]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/optabs.c
Change copyright header to refer to version 3 of the GNU General Public License and...
[thirdparty/gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
47
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
51
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
54
55 See expr.h for documentation of these optabs. */
56
57 optab optab_table[OTI_MAX];
58
59 rtx libfunc_table[LTI_MAX];
60
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
63
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
66
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
69
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
71
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
75
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
77
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
83
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
86
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
89
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
92
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
97
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
127
128 /* Current libcall id. It doesn't matter what these are, as long
129 as they are unique to each libcall that is emitted. */
130 static HOST_WIDE_INT libcall_id = 0;
131
132 #ifndef HAVE_conditional_trap
133 #define HAVE_conditional_trap 0
134 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
135 #endif
136
137 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
138 #if ENABLE_DECIMAL_BID_FORMAT
139 #define DECIMAL_PREFIX "bid_"
140 #else
141 #define DECIMAL_PREFIX "dpd_"
142 #endif
143
144 \f
145 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
146 the result of operation CODE applied to OP0 (and OP1 if it is a binary
147 operation).
148
149 If the last insn does not set TARGET, don't do anything, but return 1.
150
151 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
152 don't add the REG_EQUAL note but return 0. Our caller can then try
153 again, ensuring that TARGET is not one of the operands. */
154
155 static int
156 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
157 {
158 rtx last_insn, insn, set;
159 rtx note;
160
161 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
162
163 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
164 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
165 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
166 && GET_RTX_CLASS (code) != RTX_COMPARE
167 && GET_RTX_CLASS (code) != RTX_UNARY)
168 return 1;
169
170 if (GET_CODE (target) == ZERO_EXTRACT)
171 return 1;
172
173 for (last_insn = insns;
174 NEXT_INSN (last_insn) != NULL_RTX;
175 last_insn = NEXT_INSN (last_insn))
176 ;
177
178 set = single_set (last_insn);
179 if (set == NULL_RTX)
180 return 1;
181
182 if (! rtx_equal_p (SET_DEST (set), target)
183 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
184 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
185 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
186 return 1;
187
188 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
189 besides the last insn. */
190 if (reg_overlap_mentioned_p (target, op0)
191 || (op1 && reg_overlap_mentioned_p (target, op1)))
192 {
193 insn = PREV_INSN (last_insn);
194 while (insn != NULL_RTX)
195 {
196 if (reg_set_p (target, insn))
197 return 0;
198
199 insn = PREV_INSN (insn);
200 }
201 }
202
203 if (GET_RTX_CLASS (code) == RTX_UNARY)
204 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
205 else
206 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
207
208 set_unique_reg_note (last_insn, REG_EQUAL, note);
209
210 return 1;
211 }
212 \f
213 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
214 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
215 not actually do a sign-extend or zero-extend, but can leave the
216 higher-order bits of the result rtx undefined, for example, in the case
217 of logical operations, but not right shifts. */
218
219 static rtx
220 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
221 int unsignedp, int no_extend)
222 {
223 rtx result;
224
225 /* If we don't have to extend and this is a constant, return it. */
226 if (no_extend && GET_MODE (op) == VOIDmode)
227 return op;
228
229 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
230 extend since it will be more efficient to do so unless the signedness of
231 a promoted object differs from our extension. */
232 if (! no_extend
233 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
234 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
235 return convert_modes (mode, oldmode, op, unsignedp);
236
237 /* If MODE is no wider than a single word, we return a paradoxical
238 SUBREG. */
239 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
240 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
241
242 /* Otherwise, get an object of MODE, clobber it, and set the low-order
243 part to OP. */
244
245 result = gen_reg_rtx (mode);
246 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
247 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
248 return result;
249 }
250 \f
251 /* Return the optab used for computing the operation given by
252 the tree code, CODE. This function is not always usable (for
253 example, it cannot give complete results for multiplication
254 or division) but probably ought to be relied on more widely
255 throughout the expander. */
256 optab
257 optab_for_tree_code (enum tree_code code, tree type)
258 {
259 bool trapv;
260 switch (code)
261 {
262 case BIT_AND_EXPR:
263 return and_optab;
264
265 case BIT_IOR_EXPR:
266 return ior_optab;
267
268 case BIT_NOT_EXPR:
269 return one_cmpl_optab;
270
271 case BIT_XOR_EXPR:
272 return xor_optab;
273
274 case TRUNC_MOD_EXPR:
275 case CEIL_MOD_EXPR:
276 case FLOOR_MOD_EXPR:
277 case ROUND_MOD_EXPR:
278 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
279
280 case RDIV_EXPR:
281 case TRUNC_DIV_EXPR:
282 case CEIL_DIV_EXPR:
283 case FLOOR_DIV_EXPR:
284 case ROUND_DIV_EXPR:
285 case EXACT_DIV_EXPR:
286 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
287
288 case LSHIFT_EXPR:
289 return ashl_optab;
290
291 case RSHIFT_EXPR:
292 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
293
294 case LROTATE_EXPR:
295 return rotl_optab;
296
297 case RROTATE_EXPR:
298 return rotr_optab;
299
300 case MAX_EXPR:
301 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
302
303 case MIN_EXPR:
304 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
305
306 case REALIGN_LOAD_EXPR:
307 return vec_realign_load_optab;
308
309 case WIDEN_SUM_EXPR:
310 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
311
312 case DOT_PROD_EXPR:
313 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
314
315 case REDUC_MAX_EXPR:
316 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
317
318 case REDUC_MIN_EXPR:
319 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
320
321 case REDUC_PLUS_EXPR:
322 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
323
324 case VEC_LSHIFT_EXPR:
325 return vec_shl_optab;
326
327 case VEC_RSHIFT_EXPR:
328 return vec_shr_optab;
329
330 case VEC_WIDEN_MULT_HI_EXPR:
331 return TYPE_UNSIGNED (type) ?
332 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
333
334 case VEC_WIDEN_MULT_LO_EXPR:
335 return TYPE_UNSIGNED (type) ?
336 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
337
338 case VEC_UNPACK_HI_EXPR:
339 return TYPE_UNSIGNED (type) ?
340 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
341
342 case VEC_UNPACK_LO_EXPR:
343 return TYPE_UNSIGNED (type) ?
344 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
345
346 case VEC_UNPACK_FLOAT_HI_EXPR:
347 /* The signedness is determined from input operand. */
348 return TYPE_UNSIGNED (type) ?
349 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
350
351 case VEC_UNPACK_FLOAT_LO_EXPR:
352 /* The signedness is determined from input operand. */
353 return TYPE_UNSIGNED (type) ?
354 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
355
356 case VEC_PACK_TRUNC_EXPR:
357 return vec_pack_trunc_optab;
358
359 case VEC_PACK_SAT_EXPR:
360 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
361
362 case VEC_PACK_FIX_TRUNC_EXPR:
363 /* The signedness is determined from output operand. */
364 return TYPE_UNSIGNED (type) ?
365 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
366
367 default:
368 break;
369 }
370
371 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
372 switch (code)
373 {
374 case POINTER_PLUS_EXPR:
375 case PLUS_EXPR:
376 return trapv ? addv_optab : add_optab;
377
378 case MINUS_EXPR:
379 return trapv ? subv_optab : sub_optab;
380
381 case MULT_EXPR:
382 return trapv ? smulv_optab : smul_optab;
383
384 case NEGATE_EXPR:
385 return trapv ? negv_optab : neg_optab;
386
387 case ABS_EXPR:
388 return trapv ? absv_optab : abs_optab;
389
390 case VEC_EXTRACT_EVEN_EXPR:
391 return vec_extract_even_optab;
392
393 case VEC_EXTRACT_ODD_EXPR:
394 return vec_extract_odd_optab;
395
396 case VEC_INTERLEAVE_HIGH_EXPR:
397 return vec_interleave_high_optab;
398
399 case VEC_INTERLEAVE_LOW_EXPR:
400 return vec_interleave_low_optab;
401
402 default:
403 return NULL;
404 }
405 }
406 \f
407
408 /* Expand vector widening operations.
409
410 There are two different classes of operations handled here:
411 1) Operations whose result is wider than all the arguments to the operation.
412 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
413 In this case OP0 and optionally OP1 would be initialized,
414 but WIDE_OP wouldn't (not relevant for this case).
415 2) Operations whose result is of the same size as the last argument to the
416 operation, but wider than all the other arguments to the operation.
417 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
418 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
419
420 E.g, when called to expand the following operations, this is how
421 the arguments will be initialized:
422 nops OP0 OP1 WIDE_OP
423 widening-sum 2 oprnd0 - oprnd1
424 widening-dot-product 3 oprnd0 oprnd1 oprnd2
425 widening-mult 2 oprnd0 oprnd1 -
426 type-promotion (vec-unpack) 1 oprnd0 - - */
427
428 rtx
429 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
430 int unsignedp)
431 {
432 tree oprnd0, oprnd1, oprnd2;
433 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
434 optab widen_pattern_optab;
435 int icode;
436 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
437 rtx temp;
438 rtx pat;
439 rtx xop0, xop1, wxop;
440 int nops = TREE_OPERAND_LENGTH (exp);
441
442 oprnd0 = TREE_OPERAND (exp, 0);
443 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
444 widen_pattern_optab =
445 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
446 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
447 gcc_assert (icode != CODE_FOR_nothing);
448 xmode0 = insn_data[icode].operand[1].mode;
449
450 if (nops >= 2)
451 {
452 oprnd1 = TREE_OPERAND (exp, 1);
453 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
454 xmode1 = insn_data[icode].operand[2].mode;
455 }
456
457 /* The last operand is of a wider mode than the rest of the operands. */
458 if (nops == 2)
459 {
460 wmode = tmode1;
461 wxmode = xmode1;
462 }
463 else if (nops == 3)
464 {
465 gcc_assert (tmode1 == tmode0);
466 gcc_assert (op1);
467 oprnd2 = TREE_OPERAND (exp, 2);
468 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
469 wxmode = insn_data[icode].operand[3].mode;
470 }
471
472 if (!wide_op)
473 wmode = wxmode = insn_data[icode].operand[0].mode;
474
475 if (!target
476 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
477 temp = gen_reg_rtx (wmode);
478 else
479 temp = target;
480
481 xop0 = op0;
482 xop1 = op1;
483 wxop = wide_op;
484
485 /* In case the insn wants input operands in modes different from
486 those of the actual operands, convert the operands. It would
487 seem that we don't need to convert CONST_INTs, but we do, so
488 that they're properly zero-extended, sign-extended or truncated
489 for their mode. */
490
491 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
492 xop0 = convert_modes (xmode0,
493 GET_MODE (op0) != VOIDmode
494 ? GET_MODE (op0)
495 : tmode0,
496 xop0, unsignedp);
497
498 if (op1)
499 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
500 xop1 = convert_modes (xmode1,
501 GET_MODE (op1) != VOIDmode
502 ? GET_MODE (op1)
503 : tmode1,
504 xop1, unsignedp);
505
506 if (wide_op)
507 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
508 wxop = convert_modes (wxmode,
509 GET_MODE (wide_op) != VOIDmode
510 ? GET_MODE (wide_op)
511 : wmode,
512 wxop, unsignedp);
513
514 /* Now, if insn's predicates don't allow our operands, put them into
515 pseudo regs. */
516
517 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
518 && xmode0 != VOIDmode)
519 xop0 = copy_to_mode_reg (xmode0, xop0);
520
521 if (op1)
522 {
523 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
524 && xmode1 != VOIDmode)
525 xop1 = copy_to_mode_reg (xmode1, xop1);
526
527 if (wide_op)
528 {
529 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
530 && wxmode != VOIDmode)
531 wxop = copy_to_mode_reg (wxmode, wxop);
532
533 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
534 }
535 else
536 pat = GEN_FCN (icode) (temp, xop0, xop1);
537 }
538 else
539 {
540 if (wide_op)
541 {
542 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
543 && wxmode != VOIDmode)
544 wxop = copy_to_mode_reg (wxmode, wxop);
545
546 pat = GEN_FCN (icode) (temp, xop0, wxop);
547 }
548 else
549 pat = GEN_FCN (icode) (temp, xop0);
550 }
551
552 emit_insn (pat);
553 return temp;
554 }
555
556 /* Generate code to perform an operation specified by TERNARY_OPTAB
557 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
558
559 UNSIGNEDP is for the case where we have to widen the operands
560 to perform the operation. It says to use zero-extension.
561
562 If TARGET is nonzero, the value
563 is generated there, if it is convenient to do so.
564 In all cases an rtx is returned for the locus of the value;
565 this may or may not be TARGET. */
566
567 rtx
568 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
569 rtx op1, rtx op2, rtx target, int unsignedp)
570 {
571 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
572 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
573 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
574 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
575 rtx temp;
576 rtx pat;
577 rtx xop0 = op0, xop1 = op1, xop2 = op2;
578
579 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
580 != CODE_FOR_nothing);
581
582 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
583 temp = gen_reg_rtx (mode);
584 else
585 temp = target;
586
587 /* In case the insn wants input operands in modes different from
588 those of the actual operands, convert the operands. It would
589 seem that we don't need to convert CONST_INTs, but we do, so
590 that they're properly zero-extended, sign-extended or truncated
591 for their mode. */
592
593 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
594 xop0 = convert_modes (mode0,
595 GET_MODE (op0) != VOIDmode
596 ? GET_MODE (op0)
597 : mode,
598 xop0, unsignedp);
599
600 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
601 xop1 = convert_modes (mode1,
602 GET_MODE (op1) != VOIDmode
603 ? GET_MODE (op1)
604 : mode,
605 xop1, unsignedp);
606
607 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
608 xop2 = convert_modes (mode2,
609 GET_MODE (op2) != VOIDmode
610 ? GET_MODE (op2)
611 : mode,
612 xop2, unsignedp);
613
614 /* Now, if insn's predicates don't allow our operands, put them into
615 pseudo regs. */
616
617 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
618 && mode0 != VOIDmode)
619 xop0 = copy_to_mode_reg (mode0, xop0);
620
621 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
622 && mode1 != VOIDmode)
623 xop1 = copy_to_mode_reg (mode1, xop1);
624
625 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
626 && mode2 != VOIDmode)
627 xop2 = copy_to_mode_reg (mode2, xop2);
628
629 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
630
631 emit_insn (pat);
632 return temp;
633 }
634
635
636 /* Like expand_binop, but return a constant rtx if the result can be
637 calculated at compile time. The arguments and return value are
638 otherwise the same as for expand_binop. */
639
640 static rtx
641 simplify_expand_binop (enum machine_mode mode, optab binoptab,
642 rtx op0, rtx op1, rtx target, int unsignedp,
643 enum optab_methods methods)
644 {
645 if (CONSTANT_P (op0) && CONSTANT_P (op1))
646 {
647 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
648
649 if (x)
650 return x;
651 }
652
653 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
654 }
655
656 /* Like simplify_expand_binop, but always put the result in TARGET.
657 Return true if the expansion succeeded. */
658
659 bool
660 force_expand_binop (enum machine_mode mode, optab binoptab,
661 rtx op0, rtx op1, rtx target, int unsignedp,
662 enum optab_methods methods)
663 {
664 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
665 target, unsignedp, methods);
666 if (x == 0)
667 return false;
668 if (x != target)
669 emit_move_insn (target, x);
670 return true;
671 }
672
673 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
674
675 rtx
676 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
677 {
678 enum insn_code icode;
679 rtx rtx_op1, rtx_op2;
680 enum machine_mode mode1;
681 enum machine_mode mode2;
682 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
683 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
684 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
685 optab shift_optab;
686 rtx pat;
687
688 switch (TREE_CODE (vec_shift_expr))
689 {
690 case VEC_RSHIFT_EXPR:
691 shift_optab = vec_shr_optab;
692 break;
693 case VEC_LSHIFT_EXPR:
694 shift_optab = vec_shl_optab;
695 break;
696 default:
697 gcc_unreachable ();
698 }
699
700 icode = (int) shift_optab->handlers[(int) mode].insn_code;
701 gcc_assert (icode != CODE_FOR_nothing);
702
703 mode1 = insn_data[icode].operand[1].mode;
704 mode2 = insn_data[icode].operand[2].mode;
705
706 rtx_op1 = expand_normal (vec_oprnd);
707 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
708 && mode1 != VOIDmode)
709 rtx_op1 = force_reg (mode1, rtx_op1);
710
711 rtx_op2 = expand_normal (shift_oprnd);
712 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
713 && mode2 != VOIDmode)
714 rtx_op2 = force_reg (mode2, rtx_op2);
715
716 if (!target
717 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
718 target = gen_reg_rtx (mode);
719
720 /* Emit instruction */
721 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
722 gcc_assert (pat);
723 emit_insn (pat);
724
725 return target;
726 }
727
728 /* This subroutine of expand_doubleword_shift handles the cases in which
729 the effective shift value is >= BITS_PER_WORD. The arguments and return
730 value are the same as for the parent routine, except that SUPERWORD_OP1
731 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
732 INTO_TARGET may be null if the caller has decided to calculate it. */
733
734 static bool
735 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
736 rtx outof_target, rtx into_target,
737 int unsignedp, enum optab_methods methods)
738 {
739 if (into_target != 0)
740 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
741 into_target, unsignedp, methods))
742 return false;
743
744 if (outof_target != 0)
745 {
746 /* For a signed right shift, we must fill OUTOF_TARGET with copies
747 of the sign bit, otherwise we must fill it with zeros. */
748 if (binoptab != ashr_optab)
749 emit_move_insn (outof_target, CONST0_RTX (word_mode));
750 else
751 if (!force_expand_binop (word_mode, binoptab,
752 outof_input, GEN_INT (BITS_PER_WORD - 1),
753 outof_target, unsignedp, methods))
754 return false;
755 }
756 return true;
757 }
758
759 /* This subroutine of expand_doubleword_shift handles the cases in which
760 the effective shift value is < BITS_PER_WORD. The arguments and return
761 value are the same as for the parent routine. */
762
763 static bool
764 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
765 rtx outof_input, rtx into_input, rtx op1,
766 rtx outof_target, rtx into_target,
767 int unsignedp, enum optab_methods methods,
768 unsigned HOST_WIDE_INT shift_mask)
769 {
770 optab reverse_unsigned_shift, unsigned_shift;
771 rtx tmp, carries;
772
773 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
774 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
775
776 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
777 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
778 the opposite direction to BINOPTAB. */
779 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
780 {
781 carries = outof_input;
782 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
783 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
784 0, true, methods);
785 }
786 else
787 {
788 /* We must avoid shifting by BITS_PER_WORD bits since that is either
789 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
790 has unknown behavior. Do a single shift first, then shift by the
791 remainder. It's OK to use ~OP1 as the remainder if shift counts
792 are truncated to the mode size. */
793 carries = expand_binop (word_mode, reverse_unsigned_shift,
794 outof_input, const1_rtx, 0, unsignedp, methods);
795 if (shift_mask == BITS_PER_WORD - 1)
796 {
797 tmp = immed_double_const (-1, -1, op1_mode);
798 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
799 0, true, methods);
800 }
801 else
802 {
803 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
804 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
805 0, true, methods);
806 }
807 }
808 if (tmp == 0 || carries == 0)
809 return false;
810 carries = expand_binop (word_mode, reverse_unsigned_shift,
811 carries, tmp, 0, unsignedp, methods);
812 if (carries == 0)
813 return false;
814
815 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
816 so the result can go directly into INTO_TARGET if convenient. */
817 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
818 into_target, unsignedp, methods);
819 if (tmp == 0)
820 return false;
821
822 /* Now OR in the bits carried over from OUTOF_INPUT. */
823 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
824 into_target, unsignedp, methods))
825 return false;
826
827 /* Use a standard word_mode shift for the out-of half. */
828 if (outof_target != 0)
829 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
830 outof_target, unsignedp, methods))
831 return false;
832
833 return true;
834 }
835
836
837 #ifdef HAVE_conditional_move
838 /* Try implementing expand_doubleword_shift using conditional moves.
839 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
840 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
841 are the shift counts to use in the former and latter case. All other
842 arguments are the same as the parent routine. */
843
844 static bool
845 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
846 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
847 rtx outof_input, rtx into_input,
848 rtx subword_op1, rtx superword_op1,
849 rtx outof_target, rtx into_target,
850 int unsignedp, enum optab_methods methods,
851 unsigned HOST_WIDE_INT shift_mask)
852 {
853 rtx outof_superword, into_superword;
854
855 /* Put the superword version of the output into OUTOF_SUPERWORD and
856 INTO_SUPERWORD. */
857 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
858 if (outof_target != 0 && subword_op1 == superword_op1)
859 {
860 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
861 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
862 into_superword = outof_target;
863 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
864 outof_superword, 0, unsignedp, methods))
865 return false;
866 }
867 else
868 {
869 into_superword = gen_reg_rtx (word_mode);
870 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
871 outof_superword, into_superword,
872 unsignedp, methods))
873 return false;
874 }
875
876 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
877 if (!expand_subword_shift (op1_mode, binoptab,
878 outof_input, into_input, subword_op1,
879 outof_target, into_target,
880 unsignedp, methods, shift_mask))
881 return false;
882
883 /* Select between them. Do the INTO half first because INTO_SUPERWORD
884 might be the current value of OUTOF_TARGET. */
885 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
886 into_target, into_superword, word_mode, false))
887 return false;
888
889 if (outof_target != 0)
890 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
891 outof_target, outof_superword,
892 word_mode, false))
893 return false;
894
895 return true;
896 }
897 #endif
898
899 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
900 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
901 input operand; the shift moves bits in the direction OUTOF_INPUT->
902 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
903 of the target. OP1 is the shift count and OP1_MODE is its mode.
904 If OP1 is constant, it will have been truncated as appropriate
905 and is known to be nonzero.
906
907 If SHIFT_MASK is zero, the result of word shifts is undefined when the
908 shift count is outside the range [0, BITS_PER_WORD). This routine must
909 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
910
911 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
912 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
913 fill with zeros or sign bits as appropriate.
914
915 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
916 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
917 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
918 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
919 are undefined.
920
921 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
922 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
923 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
924 function wants to calculate it itself.
925
926 Return true if the shift could be successfully synthesized. */
927
928 static bool
929 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
930 rtx outof_input, rtx into_input, rtx op1,
931 rtx outof_target, rtx into_target,
932 int unsignedp, enum optab_methods methods,
933 unsigned HOST_WIDE_INT shift_mask)
934 {
935 rtx superword_op1, tmp, cmp1, cmp2;
936 rtx subword_label, done_label;
937 enum rtx_code cmp_code;
938
939 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
940 fill the result with sign or zero bits as appropriate. If so, the value
941 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
942 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
943 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
944
945 This isn't worthwhile for constant shifts since the optimizers will
946 cope better with in-range shift counts. */
947 if (shift_mask >= BITS_PER_WORD
948 && outof_target != 0
949 && !CONSTANT_P (op1))
950 {
951 if (!expand_doubleword_shift (op1_mode, binoptab,
952 outof_input, into_input, op1,
953 0, into_target,
954 unsignedp, methods, shift_mask))
955 return false;
956 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
957 outof_target, unsignedp, methods))
958 return false;
959 return true;
960 }
961
962 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
963 is true when the effective shift value is less than BITS_PER_WORD.
964 Set SUPERWORD_OP1 to the shift count that should be used to shift
965 OUTOF_INPUT into INTO_TARGET when the condition is false. */
966 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
967 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
968 {
969 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
970 is a subword shift count. */
971 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
972 0, true, methods);
973 cmp2 = CONST0_RTX (op1_mode);
974 cmp_code = EQ;
975 superword_op1 = op1;
976 }
977 else
978 {
979 /* Set CMP1 to OP1 - BITS_PER_WORD. */
980 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
981 0, true, methods);
982 cmp2 = CONST0_RTX (op1_mode);
983 cmp_code = LT;
984 superword_op1 = cmp1;
985 }
986 if (cmp1 == 0)
987 return false;
988
989 /* If we can compute the condition at compile time, pick the
990 appropriate subroutine. */
991 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
992 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
993 {
994 if (tmp == const0_rtx)
995 return expand_superword_shift (binoptab, outof_input, superword_op1,
996 outof_target, into_target,
997 unsignedp, methods);
998 else
999 return expand_subword_shift (op1_mode, binoptab,
1000 outof_input, into_input, op1,
1001 outof_target, into_target,
1002 unsignedp, methods, shift_mask);
1003 }
1004
1005 #ifdef HAVE_conditional_move
1006 /* Try using conditional moves to generate straight-line code. */
1007 {
1008 rtx start = get_last_insn ();
1009 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1010 cmp_code, cmp1, cmp2,
1011 outof_input, into_input,
1012 op1, superword_op1,
1013 outof_target, into_target,
1014 unsignedp, methods, shift_mask))
1015 return true;
1016 delete_insns_since (start);
1017 }
1018 #endif
1019
1020 /* As a last resort, use branches to select the correct alternative. */
1021 subword_label = gen_label_rtx ();
1022 done_label = gen_label_rtx ();
1023
1024 NO_DEFER_POP;
1025 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1026 0, 0, subword_label);
1027 OK_DEFER_POP;
1028
1029 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1030 outof_target, into_target,
1031 unsignedp, methods))
1032 return false;
1033
1034 emit_jump_insn (gen_jump (done_label));
1035 emit_barrier ();
1036 emit_label (subword_label);
1037
1038 if (!expand_subword_shift (op1_mode, binoptab,
1039 outof_input, into_input, op1,
1040 outof_target, into_target,
1041 unsignedp, methods, shift_mask))
1042 return false;
1043
1044 emit_label (done_label);
1045 return true;
1046 }
1047 \f
1048 /* Subroutine of expand_binop. Perform a double word multiplication of
1049 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1050 as the target's word_mode. This function return NULL_RTX if anything
1051 goes wrong, in which case it may have already emitted instructions
1052 which need to be deleted.
1053
1054 If we want to multiply two two-word values and have normal and widening
1055 multiplies of single-word values, we can do this with three smaller
1056 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1057 because we are not operating on one word at a time.
1058
1059 The multiplication proceeds as follows:
1060 _______________________
1061 [__op0_high_|__op0_low__]
1062 _______________________
1063 * [__op1_high_|__op1_low__]
1064 _______________________________________________
1065 _______________________
1066 (1) [__op0_low__*__op1_low__]
1067 _______________________
1068 (2a) [__op0_low__*__op1_high_]
1069 _______________________
1070 (2b) [__op0_high_*__op1_low__]
1071 _______________________
1072 (3) [__op0_high_*__op1_high_]
1073
1074
1075 This gives a 4-word result. Since we are only interested in the
1076 lower 2 words, partial result (3) and the upper words of (2a) and
1077 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1078 calculated using non-widening multiplication.
1079
1080 (1), however, needs to be calculated with an unsigned widening
1081 multiplication. If this operation is not directly supported we
1082 try using a signed widening multiplication and adjust the result.
1083 This adjustment works as follows:
1084
1085 If both operands are positive then no adjustment is needed.
1086
1087 If the operands have different signs, for example op0_low < 0 and
1088 op1_low >= 0, the instruction treats the most significant bit of
1089 op0_low as a sign bit instead of a bit with significance
1090 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1091 with 2**BITS_PER_WORD - op0_low, and two's complements the
1092 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1093 the result.
1094
1095 Similarly, if both operands are negative, we need to add
1096 (op0_low + op1_low) * 2**BITS_PER_WORD.
1097
1098 We use a trick to adjust quickly. We logically shift op0_low right
1099 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1100 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1101 logical shift exists, we do an arithmetic right shift and subtract
1102 the 0 or -1. */
1103
1104 static rtx
1105 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1106 bool umulp, enum optab_methods methods)
1107 {
1108 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1109 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1110 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1111 rtx product, adjust, product_high, temp;
1112
1113 rtx op0_high = operand_subword_force (op0, high, mode);
1114 rtx op0_low = operand_subword_force (op0, low, mode);
1115 rtx op1_high = operand_subword_force (op1, high, mode);
1116 rtx op1_low = operand_subword_force (op1, low, mode);
1117
1118 /* If we're using an unsigned multiply to directly compute the product
1119 of the low-order words of the operands and perform any required
1120 adjustments of the operands, we begin by trying two more multiplications
1121 and then computing the appropriate sum.
1122
1123 We have checked above that the required addition is provided.
1124 Full-word addition will normally always succeed, especially if
1125 it is provided at all, so we don't worry about its failure. The
1126 multiplication may well fail, however, so we do handle that. */
1127
1128 if (!umulp)
1129 {
1130 /* ??? This could be done with emit_store_flag where available. */
1131 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1132 NULL_RTX, 1, methods);
1133 if (temp)
1134 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1135 NULL_RTX, 0, OPTAB_DIRECT);
1136 else
1137 {
1138 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1139 NULL_RTX, 0, methods);
1140 if (!temp)
1141 return NULL_RTX;
1142 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1143 NULL_RTX, 0, OPTAB_DIRECT);
1144 }
1145
1146 if (!op0_high)
1147 return NULL_RTX;
1148 }
1149
1150 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1151 NULL_RTX, 0, OPTAB_DIRECT);
1152 if (!adjust)
1153 return NULL_RTX;
1154
1155 /* OP0_HIGH should now be dead. */
1156
1157 if (!umulp)
1158 {
1159 /* ??? This could be done with emit_store_flag where available. */
1160 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1161 NULL_RTX, 1, methods);
1162 if (temp)
1163 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1164 NULL_RTX, 0, OPTAB_DIRECT);
1165 else
1166 {
1167 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1168 NULL_RTX, 0, methods);
1169 if (!temp)
1170 return NULL_RTX;
1171 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1172 NULL_RTX, 0, OPTAB_DIRECT);
1173 }
1174
1175 if (!op1_high)
1176 return NULL_RTX;
1177 }
1178
1179 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1180 NULL_RTX, 0, OPTAB_DIRECT);
1181 if (!temp)
1182 return NULL_RTX;
1183
1184 /* OP1_HIGH should now be dead. */
1185
1186 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1187 adjust, 0, OPTAB_DIRECT);
1188
1189 if (target && !REG_P (target))
1190 target = NULL_RTX;
1191
1192 if (umulp)
1193 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1194 target, 1, OPTAB_DIRECT);
1195 else
1196 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1197 target, 1, OPTAB_DIRECT);
1198
1199 if (!product)
1200 return NULL_RTX;
1201
1202 product_high = operand_subword (product, high, 1, mode);
1203 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1204 REG_P (product_high) ? product_high : adjust,
1205 0, OPTAB_DIRECT);
1206 emit_move_insn (product_high, adjust);
1207 return product;
1208 }
1209 \f
1210 /* Wrapper around expand_binop which takes an rtx code to specify
1211 the operation to perform, not an optab pointer. All other
1212 arguments are the same. */
1213 rtx
1214 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1215 rtx op1, rtx target, int unsignedp,
1216 enum optab_methods methods)
1217 {
1218 optab binop = code_to_optab[(int) code];
1219 gcc_assert (binop);
1220
1221 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1222 }
1223
1224 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1225 binop. Order them according to commutative_operand_precedence and, if
1226 possible, try to put TARGET or a pseudo first. */
1227 static bool
1228 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1229 {
1230 int op0_prec = commutative_operand_precedence (op0);
1231 int op1_prec = commutative_operand_precedence (op1);
1232
1233 if (op0_prec < op1_prec)
1234 return true;
1235
1236 if (op0_prec > op1_prec)
1237 return false;
1238
1239 /* With equal precedence, both orders are ok, but it is better if the
1240 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1241 if (target == 0 || REG_P (target))
1242 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1243 else
1244 return rtx_equal_p (op1, target);
1245 }
1246
1247
1248 /* Helper function for expand_binop: handle the case where there
1249 is an insn that directly implements the indicated operation.
1250 Returns null if this is not possible. */
1251 static rtx
1252 expand_binop_directly (enum machine_mode mode, optab binoptab,
1253 rtx op0, rtx op1,
1254 rtx target, int unsignedp, enum optab_methods methods,
1255 int commutative_op, rtx last)
1256 {
1257 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1258 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1259 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1260 enum machine_mode tmp_mode;
1261 rtx pat;
1262 rtx xop0 = op0, xop1 = op1;
1263 rtx temp;
1264
1265 if (target)
1266 temp = target;
1267 else
1268 temp = gen_reg_rtx (mode);
1269
1270 /* If it is a commutative operator and the modes would match
1271 if we would swap the operands, we can save the conversions. */
1272 if (commutative_op)
1273 {
1274 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1275 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1276 {
1277 rtx tmp;
1278
1279 tmp = op0; op0 = op1; op1 = tmp;
1280 tmp = xop0; xop0 = xop1; xop1 = tmp;
1281 }
1282 }
1283
1284 /* In case the insn wants input operands in modes different from
1285 those of the actual operands, convert the operands. It would
1286 seem that we don't need to convert CONST_INTs, but we do, so
1287 that they're properly zero-extended, sign-extended or truncated
1288 for their mode. */
1289
1290 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1291 xop0 = convert_modes (mode0,
1292 GET_MODE (op0) != VOIDmode
1293 ? GET_MODE (op0)
1294 : mode,
1295 xop0, unsignedp);
1296
1297 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1298 xop1 = convert_modes (mode1,
1299 GET_MODE (op1) != VOIDmode
1300 ? GET_MODE (op1)
1301 : mode,
1302 xop1, unsignedp);
1303
1304 /* Now, if insn's predicates don't allow our operands, put them into
1305 pseudo regs. */
1306
1307 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1308 && mode0 != VOIDmode)
1309 xop0 = copy_to_mode_reg (mode0, xop0);
1310
1311 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1312 && mode1 != VOIDmode)
1313 xop1 = copy_to_mode_reg (mode1, xop1);
1314
1315 if (binoptab == vec_pack_trunc_optab
1316 || binoptab == vec_pack_usat_optab
1317 || binoptab == vec_pack_ssat_optab
1318 || binoptab == vec_pack_ufix_trunc_optab
1319 || binoptab == vec_pack_sfix_trunc_optab)
1320 {
1321 /* The mode of the result is different then the mode of the
1322 arguments. */
1323 tmp_mode = insn_data[icode].operand[0].mode;
1324 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1325 return 0;
1326 }
1327 else
1328 tmp_mode = mode;
1329
1330 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1331 temp = gen_reg_rtx (tmp_mode);
1332
1333 pat = GEN_FCN (icode) (temp, xop0, xop1);
1334 if (pat)
1335 {
1336 /* If PAT is composed of more than one insn, try to add an appropriate
1337 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1338 operand, call expand_binop again, this time without a target. */
1339 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1340 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1341 {
1342 delete_insns_since (last);
1343 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1344 unsignedp, methods);
1345 }
1346
1347 emit_insn (pat);
1348 return temp;
1349 }
1350
1351 delete_insns_since (last);
1352 return NULL_RTX;
1353 }
1354
1355 /* Generate code to perform an operation specified by BINOPTAB
1356 on operands OP0 and OP1, with result having machine-mode MODE.
1357
1358 UNSIGNEDP is for the case where we have to widen the operands
1359 to perform the operation. It says to use zero-extension.
1360
1361 If TARGET is nonzero, the value
1362 is generated there, if it is convenient to do so.
1363 In all cases an rtx is returned for the locus of the value;
1364 this may or may not be TARGET. */
1365
1366 rtx
1367 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1368 rtx target, int unsignedp, enum optab_methods methods)
1369 {
1370 enum optab_methods next_methods
1371 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1372 ? OPTAB_WIDEN : methods);
1373 enum mode_class class;
1374 enum machine_mode wider_mode;
1375 rtx temp;
1376 int commutative_op = 0;
1377 int shift_op = (binoptab->code == ASHIFT
1378 || binoptab->code == ASHIFTRT
1379 || binoptab->code == LSHIFTRT
1380 || binoptab->code == ROTATE
1381 || binoptab->code == ROTATERT);
1382 rtx entry_last = get_last_insn ();
1383 rtx last;
1384
1385 class = GET_MODE_CLASS (mode);
1386
1387 /* If subtracting an integer constant, convert this into an addition of
1388 the negated constant. */
1389
1390 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1391 {
1392 op1 = negate_rtx (mode, op1);
1393 binoptab = add_optab;
1394 }
1395
1396 /* If we are inside an appropriately-short loop and we are optimizing,
1397 force expensive constants into a register. */
1398 if (CONSTANT_P (op0) && optimize
1399 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1400 {
1401 if (GET_MODE (op0) != VOIDmode)
1402 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1403 op0 = force_reg (mode, op0);
1404 }
1405
1406 if (CONSTANT_P (op1) && optimize
1407 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1408 {
1409 if (GET_MODE (op1) != VOIDmode)
1410 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1411 op1 = force_reg (mode, op1);
1412 }
1413
1414 /* Record where to delete back to if we backtrack. */
1415 last = get_last_insn ();
1416
1417 /* If operation is commutative,
1418 try to make the first operand a register.
1419 Even better, try to make it the same as the target.
1420 Also try to make the last operand a constant. */
1421 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1422 || binoptab == smul_widen_optab
1423 || binoptab == umul_widen_optab
1424 || binoptab == smul_highpart_optab
1425 || binoptab == umul_highpart_optab)
1426 {
1427 commutative_op = 1;
1428
1429 if (swap_commutative_operands_with_target (target, op0, op1))
1430 {
1431 temp = op1;
1432 op1 = op0;
1433 op0 = temp;
1434 }
1435 }
1436
1437 /* If we can do it with a three-operand insn, do so. */
1438
1439 if (methods != OPTAB_MUST_WIDEN
1440 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1441 {
1442 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1443 unsignedp, methods, commutative_op, last);
1444 if (temp)
1445 return temp;
1446 }
1447
1448 /* If we were trying to rotate, and that didn't work, try rotating
1449 the other direction before falling back to shifts and bitwise-or. */
1450 if (((binoptab == rotl_optab
1451 && rotr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1452 || (binoptab == rotr_optab
1453 && rotl_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing))
1454 && class == MODE_INT)
1455 {
1456 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1457 rtx newop1;
1458 unsigned int bits = GET_MODE_BITSIZE (mode);
1459
1460 if (GET_CODE (op1) == CONST_INT)
1461 newop1 = GEN_INT (bits - INTVAL (op1));
1462 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1463 newop1 = negate_rtx (mode, op1);
1464 else
1465 newop1 = expand_binop (mode, sub_optab,
1466 GEN_INT (bits), op1,
1467 NULL_RTX, unsignedp, OPTAB_DIRECT);
1468
1469 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1470 target, unsignedp, methods,
1471 commutative_op, last);
1472 if (temp)
1473 return temp;
1474 }
1475
1476 /* If this is a multiply, see if we can do a widening operation that
1477 takes operands of this mode and makes a wider mode. */
1478
1479 if (binoptab == smul_optab
1480 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1481 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1482 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1483 != CODE_FOR_nothing))
1484 {
1485 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1486 unsignedp ? umul_widen_optab : smul_widen_optab,
1487 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1488
1489 if (temp != 0)
1490 {
1491 if (GET_MODE_CLASS (mode) == MODE_INT
1492 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1493 GET_MODE_BITSIZE (GET_MODE (temp))))
1494 return gen_lowpart (mode, temp);
1495 else
1496 return convert_to_mode (mode, temp, unsignedp);
1497 }
1498 }
1499
1500 /* Look for a wider mode of the same class for which we think we
1501 can open-code the operation. Check for a widening multiply at the
1502 wider mode as well. */
1503
1504 if (CLASS_HAS_WIDER_MODES_P (class)
1505 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1506 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1507 wider_mode != VOIDmode;
1508 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1509 {
1510 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1511 || (binoptab == smul_optab
1512 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1513 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1514 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1515 != CODE_FOR_nothing)))
1516 {
1517 rtx xop0 = op0, xop1 = op1;
1518 int no_extend = 0;
1519
1520 /* For certain integer operations, we need not actually extend
1521 the narrow operands, as long as we will truncate
1522 the results to the same narrowness. */
1523
1524 if ((binoptab == ior_optab || binoptab == and_optab
1525 || binoptab == xor_optab
1526 || binoptab == add_optab || binoptab == sub_optab
1527 || binoptab == smul_optab || binoptab == ashl_optab)
1528 && class == MODE_INT)
1529 no_extend = 1;
1530
1531 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1532
1533 /* The second operand of a shift must always be extended. */
1534 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1535 no_extend && binoptab != ashl_optab);
1536
1537 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1538 unsignedp, OPTAB_DIRECT);
1539 if (temp)
1540 {
1541 if (class != MODE_INT
1542 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1543 GET_MODE_BITSIZE (wider_mode)))
1544 {
1545 if (target == 0)
1546 target = gen_reg_rtx (mode);
1547 convert_move (target, temp, 0);
1548 return target;
1549 }
1550 else
1551 return gen_lowpart (mode, temp);
1552 }
1553 else
1554 delete_insns_since (last);
1555 }
1556 }
1557
1558 /* These can be done a word at a time. */
1559 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1560 && class == MODE_INT
1561 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1562 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1563 {
1564 int i;
1565 rtx insns;
1566 rtx equiv_value;
1567
1568 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1569 won't be accurate, so use a new target. */
1570 if (target == 0 || target == op0 || target == op1)
1571 target = gen_reg_rtx (mode);
1572
1573 start_sequence ();
1574
1575 /* Do the actual arithmetic. */
1576 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1577 {
1578 rtx target_piece = operand_subword (target, i, 1, mode);
1579 rtx x = expand_binop (word_mode, binoptab,
1580 operand_subword_force (op0, i, mode),
1581 operand_subword_force (op1, i, mode),
1582 target_piece, unsignedp, next_methods);
1583
1584 if (x == 0)
1585 break;
1586
1587 if (target_piece != x)
1588 emit_move_insn (target_piece, x);
1589 }
1590
1591 insns = get_insns ();
1592 end_sequence ();
1593
1594 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1595 {
1596 if (binoptab->code != UNKNOWN)
1597 equiv_value
1598 = gen_rtx_fmt_ee (binoptab->code, mode,
1599 copy_rtx (op0), copy_rtx (op1));
1600 else
1601 equiv_value = 0;
1602
1603 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1604 return target;
1605 }
1606 }
1607
1608 /* Synthesize double word shifts from single word shifts. */
1609 if ((binoptab == lshr_optab || binoptab == ashl_optab
1610 || binoptab == ashr_optab)
1611 && class == MODE_INT
1612 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1613 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1614 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1615 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1616 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1617 {
1618 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1619 enum machine_mode op1_mode;
1620
1621 double_shift_mask = targetm.shift_truncation_mask (mode);
1622 shift_mask = targetm.shift_truncation_mask (word_mode);
1623 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1624
1625 /* Apply the truncation to constant shifts. */
1626 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1627 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1628
1629 if (op1 == CONST0_RTX (op1_mode))
1630 return op0;
1631
1632 /* Make sure that this is a combination that expand_doubleword_shift
1633 can handle. See the comments there for details. */
1634 if (double_shift_mask == 0
1635 || (shift_mask == BITS_PER_WORD - 1
1636 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1637 {
1638 rtx insns, equiv_value;
1639 rtx into_target, outof_target;
1640 rtx into_input, outof_input;
1641 int left_shift, outof_word;
1642
1643 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1644 won't be accurate, so use a new target. */
1645 if (target == 0 || target == op0 || target == op1)
1646 target = gen_reg_rtx (mode);
1647
1648 start_sequence ();
1649
1650 /* OUTOF_* is the word we are shifting bits away from, and
1651 INTO_* is the word that we are shifting bits towards, thus
1652 they differ depending on the direction of the shift and
1653 WORDS_BIG_ENDIAN. */
1654
1655 left_shift = binoptab == ashl_optab;
1656 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1657
1658 outof_target = operand_subword (target, outof_word, 1, mode);
1659 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1660
1661 outof_input = operand_subword_force (op0, outof_word, mode);
1662 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1663
1664 if (expand_doubleword_shift (op1_mode, binoptab,
1665 outof_input, into_input, op1,
1666 outof_target, into_target,
1667 unsignedp, next_methods, shift_mask))
1668 {
1669 insns = get_insns ();
1670 end_sequence ();
1671
1672 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1673 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1674 return target;
1675 }
1676 end_sequence ();
1677 }
1678 }
1679
1680 /* Synthesize double word rotates from single word shifts. */
1681 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1682 && class == MODE_INT
1683 && GET_CODE (op1) == CONST_INT
1684 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1685 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1686 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1687 {
1688 rtx insns;
1689 rtx into_target, outof_target;
1690 rtx into_input, outof_input;
1691 rtx inter;
1692 int shift_count, left_shift, outof_word;
1693
1694 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1695 won't be accurate, so use a new target. Do this also if target is not
1696 a REG, first because having a register instead may open optimization
1697 opportunities, and second because if target and op0 happen to be MEMs
1698 designating the same location, we would risk clobbering it too early
1699 in the code sequence we generate below. */
1700 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1701 target = gen_reg_rtx (mode);
1702
1703 start_sequence ();
1704
1705 shift_count = INTVAL (op1);
1706
1707 /* OUTOF_* is the word we are shifting bits away from, and
1708 INTO_* is the word that we are shifting bits towards, thus
1709 they differ depending on the direction of the shift and
1710 WORDS_BIG_ENDIAN. */
1711
1712 left_shift = (binoptab == rotl_optab);
1713 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1714
1715 outof_target = operand_subword (target, outof_word, 1, mode);
1716 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1717
1718 outof_input = operand_subword_force (op0, outof_word, mode);
1719 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1720
1721 if (shift_count == BITS_PER_WORD)
1722 {
1723 /* This is just a word swap. */
1724 emit_move_insn (outof_target, into_input);
1725 emit_move_insn (into_target, outof_input);
1726 inter = const0_rtx;
1727 }
1728 else
1729 {
1730 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1731 rtx first_shift_count, second_shift_count;
1732 optab reverse_unsigned_shift, unsigned_shift;
1733
1734 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1735 ? lshr_optab : ashl_optab);
1736
1737 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1738 ? ashl_optab : lshr_optab);
1739
1740 if (shift_count > BITS_PER_WORD)
1741 {
1742 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1743 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1744 }
1745 else
1746 {
1747 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1748 second_shift_count = GEN_INT (shift_count);
1749 }
1750
1751 into_temp1 = expand_binop (word_mode, unsigned_shift,
1752 outof_input, first_shift_count,
1753 NULL_RTX, unsignedp, next_methods);
1754 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1755 into_input, second_shift_count,
1756 NULL_RTX, unsignedp, next_methods);
1757
1758 if (into_temp1 != 0 && into_temp2 != 0)
1759 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1760 into_target, unsignedp, next_methods);
1761 else
1762 inter = 0;
1763
1764 if (inter != 0 && inter != into_target)
1765 emit_move_insn (into_target, inter);
1766
1767 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1768 into_input, first_shift_count,
1769 NULL_RTX, unsignedp, next_methods);
1770 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1771 outof_input, second_shift_count,
1772 NULL_RTX, unsignedp, next_methods);
1773
1774 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1775 inter = expand_binop (word_mode, ior_optab,
1776 outof_temp1, outof_temp2,
1777 outof_target, unsignedp, next_methods);
1778
1779 if (inter != 0 && inter != outof_target)
1780 emit_move_insn (outof_target, inter);
1781 }
1782
1783 insns = get_insns ();
1784 end_sequence ();
1785
1786 if (inter != 0)
1787 {
1788 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1789 block to help the register allocator a bit. But a multi-word
1790 rotate will need all the input bits when setting the output
1791 bits, so there clearly is a conflict between the input and
1792 output registers. So we can't use a no-conflict block here. */
1793 emit_insn (insns);
1794 return target;
1795 }
1796 }
1797
1798 /* These can be done a word at a time by propagating carries. */
1799 if ((binoptab == add_optab || binoptab == sub_optab)
1800 && class == MODE_INT
1801 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1802 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1803 {
1804 unsigned int i;
1805 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1806 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1807 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1808 rtx xop0, xop1, xtarget;
1809
1810 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1811 value is one of those, use it. Otherwise, use 1 since it is the
1812 one easiest to get. */
1813 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1814 int normalizep = STORE_FLAG_VALUE;
1815 #else
1816 int normalizep = 1;
1817 #endif
1818
1819 /* Prepare the operands. */
1820 xop0 = force_reg (mode, op0);
1821 xop1 = force_reg (mode, op1);
1822
1823 xtarget = gen_reg_rtx (mode);
1824
1825 if (target == 0 || !REG_P (target))
1826 target = xtarget;
1827
1828 /* Indicate for flow that the entire target reg is being set. */
1829 if (REG_P (target))
1830 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1831
1832 /* Do the actual arithmetic. */
1833 for (i = 0; i < nwords; i++)
1834 {
1835 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1836 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1837 rtx op0_piece = operand_subword_force (xop0, index, mode);
1838 rtx op1_piece = operand_subword_force (xop1, index, mode);
1839 rtx x;
1840
1841 /* Main add/subtract of the input operands. */
1842 x = expand_binop (word_mode, binoptab,
1843 op0_piece, op1_piece,
1844 target_piece, unsignedp, next_methods);
1845 if (x == 0)
1846 break;
1847
1848 if (i + 1 < nwords)
1849 {
1850 /* Store carry from main add/subtract. */
1851 carry_out = gen_reg_rtx (word_mode);
1852 carry_out = emit_store_flag_force (carry_out,
1853 (binoptab == add_optab
1854 ? LT : GT),
1855 x, op0_piece,
1856 word_mode, 1, normalizep);
1857 }
1858
1859 if (i > 0)
1860 {
1861 rtx newx;
1862
1863 /* Add/subtract previous carry to main result. */
1864 newx = expand_binop (word_mode,
1865 normalizep == 1 ? binoptab : otheroptab,
1866 x, carry_in,
1867 NULL_RTX, 1, next_methods);
1868
1869 if (i + 1 < nwords)
1870 {
1871 /* Get out carry from adding/subtracting carry in. */
1872 rtx carry_tmp = gen_reg_rtx (word_mode);
1873 carry_tmp = emit_store_flag_force (carry_tmp,
1874 (binoptab == add_optab
1875 ? LT : GT),
1876 newx, x,
1877 word_mode, 1, normalizep);
1878
1879 /* Logical-ior the two poss. carry together. */
1880 carry_out = expand_binop (word_mode, ior_optab,
1881 carry_out, carry_tmp,
1882 carry_out, 0, next_methods);
1883 if (carry_out == 0)
1884 break;
1885 }
1886 emit_move_insn (target_piece, newx);
1887 }
1888 else
1889 {
1890 if (x != target_piece)
1891 emit_move_insn (target_piece, x);
1892 }
1893
1894 carry_in = carry_out;
1895 }
1896
1897 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1898 {
1899 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1900 || ! rtx_equal_p (target, xtarget))
1901 {
1902 rtx temp = emit_move_insn (target, xtarget);
1903
1904 set_unique_reg_note (temp,
1905 REG_EQUAL,
1906 gen_rtx_fmt_ee (binoptab->code, mode,
1907 copy_rtx (xop0),
1908 copy_rtx (xop1)));
1909 }
1910 else
1911 target = xtarget;
1912
1913 return target;
1914 }
1915
1916 else
1917 delete_insns_since (last);
1918 }
1919
1920 /* Attempt to synthesize double word multiplies using a sequence of word
1921 mode multiplications. We first attempt to generate a sequence using a
1922 more efficient unsigned widening multiply, and if that fails we then
1923 try using a signed widening multiply. */
1924
1925 if (binoptab == smul_optab
1926 && class == MODE_INT
1927 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1928 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1929 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1930 {
1931 rtx product = NULL_RTX;
1932
1933 if (umul_widen_optab->handlers[(int) mode].insn_code
1934 != CODE_FOR_nothing)
1935 {
1936 product = expand_doubleword_mult (mode, op0, op1, target,
1937 true, methods);
1938 if (!product)
1939 delete_insns_since (last);
1940 }
1941
1942 if (product == NULL_RTX
1943 && smul_widen_optab->handlers[(int) mode].insn_code
1944 != CODE_FOR_nothing)
1945 {
1946 product = expand_doubleword_mult (mode, op0, op1, target,
1947 false, methods);
1948 if (!product)
1949 delete_insns_since (last);
1950 }
1951
1952 if (product != NULL_RTX)
1953 {
1954 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1955 {
1956 temp = emit_move_insn (target ? target : product, product);
1957 set_unique_reg_note (temp,
1958 REG_EQUAL,
1959 gen_rtx_fmt_ee (MULT, mode,
1960 copy_rtx (op0),
1961 copy_rtx (op1)));
1962 }
1963 return product;
1964 }
1965 }
1966
1967 /* It can't be open-coded in this mode.
1968 Use a library call if one is available and caller says that's ok. */
1969
1970 if (binoptab->handlers[(int) mode].libfunc
1971 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1972 {
1973 rtx insns;
1974 rtx op1x = op1;
1975 enum machine_mode op1_mode = mode;
1976 rtx value;
1977
1978 start_sequence ();
1979
1980 if (shift_op)
1981 {
1982 op1_mode = targetm.libgcc_shift_count_mode ();
1983 /* Specify unsigned here,
1984 since negative shift counts are meaningless. */
1985 op1x = convert_to_mode (op1_mode, op1, 1);
1986 }
1987
1988 if (GET_MODE (op0) != VOIDmode
1989 && GET_MODE (op0) != mode)
1990 op0 = convert_to_mode (mode, op0, unsignedp);
1991
1992 /* Pass 1 for NO_QUEUE so we don't lose any increments
1993 if the libcall is cse'd or moved. */
1994 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1995 NULL_RTX, LCT_CONST, mode, 2,
1996 op0, mode, op1x, op1_mode);
1997
1998 insns = get_insns ();
1999 end_sequence ();
2000
2001 target = gen_reg_rtx (mode);
2002 emit_libcall_block (insns, target, value,
2003 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2004
2005 return target;
2006 }
2007
2008 delete_insns_since (last);
2009
2010 /* It can't be done in this mode. Can we do it in a wider mode? */
2011
2012 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2013 || methods == OPTAB_MUST_WIDEN))
2014 {
2015 /* Caller says, don't even try. */
2016 delete_insns_since (entry_last);
2017 return 0;
2018 }
2019
2020 /* Compute the value of METHODS to pass to recursive calls.
2021 Don't allow widening to be tried recursively. */
2022
2023 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2024
2025 /* Look for a wider mode of the same class for which it appears we can do
2026 the operation. */
2027
2028 if (CLASS_HAS_WIDER_MODES_P (class))
2029 {
2030 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2031 wider_mode != VOIDmode;
2032 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2033 {
2034 if ((binoptab->handlers[(int) wider_mode].insn_code
2035 != CODE_FOR_nothing)
2036 || (methods == OPTAB_LIB
2037 && binoptab->handlers[(int) wider_mode].libfunc))
2038 {
2039 rtx xop0 = op0, xop1 = op1;
2040 int no_extend = 0;
2041
2042 /* For certain integer operations, we need not actually extend
2043 the narrow operands, as long as we will truncate
2044 the results to the same narrowness. */
2045
2046 if ((binoptab == ior_optab || binoptab == and_optab
2047 || binoptab == xor_optab
2048 || binoptab == add_optab || binoptab == sub_optab
2049 || binoptab == smul_optab || binoptab == ashl_optab)
2050 && class == MODE_INT)
2051 no_extend = 1;
2052
2053 xop0 = widen_operand (xop0, wider_mode, mode,
2054 unsignedp, no_extend);
2055
2056 /* The second operand of a shift must always be extended. */
2057 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2058 no_extend && binoptab != ashl_optab);
2059
2060 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2061 unsignedp, methods);
2062 if (temp)
2063 {
2064 if (class != MODE_INT
2065 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2066 GET_MODE_BITSIZE (wider_mode)))
2067 {
2068 if (target == 0)
2069 target = gen_reg_rtx (mode);
2070 convert_move (target, temp, 0);
2071 return target;
2072 }
2073 else
2074 return gen_lowpart (mode, temp);
2075 }
2076 else
2077 delete_insns_since (last);
2078 }
2079 }
2080 }
2081
2082 delete_insns_since (entry_last);
2083 return 0;
2084 }
2085 \f
2086 /* Expand a binary operator which has both signed and unsigned forms.
2087 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2088 signed operations.
2089
2090 If we widen unsigned operands, we may use a signed wider operation instead
2091 of an unsigned wider operation, since the result would be the same. */
2092
2093 rtx
2094 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2095 rtx op0, rtx op1, rtx target, int unsignedp,
2096 enum optab_methods methods)
2097 {
2098 rtx temp;
2099 optab direct_optab = unsignedp ? uoptab : soptab;
2100 struct optab wide_soptab;
2101
2102 /* Do it without widening, if possible. */
2103 temp = expand_binop (mode, direct_optab, op0, op1, target,
2104 unsignedp, OPTAB_DIRECT);
2105 if (temp || methods == OPTAB_DIRECT)
2106 return temp;
2107
2108 /* Try widening to a signed int. Make a fake signed optab that
2109 hides any signed insn for direct use. */
2110 wide_soptab = *soptab;
2111 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2112 wide_soptab.handlers[(int) mode].libfunc = 0;
2113
2114 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2115 unsignedp, OPTAB_WIDEN);
2116
2117 /* For unsigned operands, try widening to an unsigned int. */
2118 if (temp == 0 && unsignedp)
2119 temp = expand_binop (mode, uoptab, op0, op1, target,
2120 unsignedp, OPTAB_WIDEN);
2121 if (temp || methods == OPTAB_WIDEN)
2122 return temp;
2123
2124 /* Use the right width lib call if that exists. */
2125 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2126 if (temp || methods == OPTAB_LIB)
2127 return temp;
2128
2129 /* Must widen and use a lib call, use either signed or unsigned. */
2130 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2131 unsignedp, methods);
2132 if (temp != 0)
2133 return temp;
2134 if (unsignedp)
2135 return expand_binop (mode, uoptab, op0, op1, target,
2136 unsignedp, methods);
2137 return 0;
2138 }
2139 \f
2140 /* Generate code to perform an operation specified by UNOPPTAB
2141 on operand OP0, with two results to TARG0 and TARG1.
2142 We assume that the order of the operands for the instruction
2143 is TARG0, TARG1, OP0.
2144
2145 Either TARG0 or TARG1 may be zero, but what that means is that
2146 the result is not actually wanted. We will generate it into
2147 a dummy pseudo-reg and discard it. They may not both be zero.
2148
2149 Returns 1 if this operation can be performed; 0 if not. */
2150
2151 int
2152 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2153 int unsignedp)
2154 {
2155 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2156 enum mode_class class;
2157 enum machine_mode wider_mode;
2158 rtx entry_last = get_last_insn ();
2159 rtx last;
2160
2161 class = GET_MODE_CLASS (mode);
2162
2163 if (!targ0)
2164 targ0 = gen_reg_rtx (mode);
2165 if (!targ1)
2166 targ1 = gen_reg_rtx (mode);
2167
2168 /* Record where to go back to if we fail. */
2169 last = get_last_insn ();
2170
2171 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2172 {
2173 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2174 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2175 rtx pat;
2176 rtx xop0 = op0;
2177
2178 if (GET_MODE (xop0) != VOIDmode
2179 && GET_MODE (xop0) != mode0)
2180 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2181
2182 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2183 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2184 xop0 = copy_to_mode_reg (mode0, xop0);
2185
2186 /* We could handle this, but we should always be called with a pseudo
2187 for our targets and all insns should take them as outputs. */
2188 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2189 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2190
2191 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2192 if (pat)
2193 {
2194 emit_insn (pat);
2195 return 1;
2196 }
2197 else
2198 delete_insns_since (last);
2199 }
2200
2201 /* It can't be done in this mode. Can we do it in a wider mode? */
2202
2203 if (CLASS_HAS_WIDER_MODES_P (class))
2204 {
2205 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2206 wider_mode != VOIDmode;
2207 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2208 {
2209 if (unoptab->handlers[(int) wider_mode].insn_code
2210 != CODE_FOR_nothing)
2211 {
2212 rtx t0 = gen_reg_rtx (wider_mode);
2213 rtx t1 = gen_reg_rtx (wider_mode);
2214 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2215
2216 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2217 {
2218 convert_move (targ0, t0, unsignedp);
2219 convert_move (targ1, t1, unsignedp);
2220 return 1;
2221 }
2222 else
2223 delete_insns_since (last);
2224 }
2225 }
2226 }
2227
2228 delete_insns_since (entry_last);
2229 return 0;
2230 }
2231 \f
2232 /* Generate code to perform an operation specified by BINOPTAB
2233 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2234 We assume that the order of the operands for the instruction
2235 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2236 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2237
2238 Either TARG0 or TARG1 may be zero, but what that means is that
2239 the result is not actually wanted. We will generate it into
2240 a dummy pseudo-reg and discard it. They may not both be zero.
2241
2242 Returns 1 if this operation can be performed; 0 if not. */
2243
2244 int
2245 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2246 int unsignedp)
2247 {
2248 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2249 enum mode_class class;
2250 enum machine_mode wider_mode;
2251 rtx entry_last = get_last_insn ();
2252 rtx last;
2253
2254 class = GET_MODE_CLASS (mode);
2255
2256 /* If we are inside an appropriately-short loop and we are optimizing,
2257 force expensive constants into a register. */
2258 if (CONSTANT_P (op0) && optimize
2259 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2260 op0 = force_reg (mode, op0);
2261
2262 if (CONSTANT_P (op1) && optimize
2263 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2264 op1 = force_reg (mode, op1);
2265
2266 if (!targ0)
2267 targ0 = gen_reg_rtx (mode);
2268 if (!targ1)
2269 targ1 = gen_reg_rtx (mode);
2270
2271 /* Record where to go back to if we fail. */
2272 last = get_last_insn ();
2273
2274 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2275 {
2276 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2277 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2278 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2279 rtx pat;
2280 rtx xop0 = op0, xop1 = op1;
2281
2282 /* In case the insn wants input operands in modes different from
2283 those of the actual operands, convert the operands. It would
2284 seem that we don't need to convert CONST_INTs, but we do, so
2285 that they're properly zero-extended, sign-extended or truncated
2286 for their mode. */
2287
2288 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2289 xop0 = convert_modes (mode0,
2290 GET_MODE (op0) != VOIDmode
2291 ? GET_MODE (op0)
2292 : mode,
2293 xop0, unsignedp);
2294
2295 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2296 xop1 = convert_modes (mode1,
2297 GET_MODE (op1) != VOIDmode
2298 ? GET_MODE (op1)
2299 : mode,
2300 xop1, unsignedp);
2301
2302 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2303 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2304 xop0 = copy_to_mode_reg (mode0, xop0);
2305
2306 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2307 xop1 = copy_to_mode_reg (mode1, xop1);
2308
2309 /* We could handle this, but we should always be called with a pseudo
2310 for our targets and all insns should take them as outputs. */
2311 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2312 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2313
2314 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2315 if (pat)
2316 {
2317 emit_insn (pat);
2318 return 1;
2319 }
2320 else
2321 delete_insns_since (last);
2322 }
2323
2324 /* It can't be done in this mode. Can we do it in a wider mode? */
2325
2326 if (CLASS_HAS_WIDER_MODES_P (class))
2327 {
2328 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2329 wider_mode != VOIDmode;
2330 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2331 {
2332 if (binoptab->handlers[(int) wider_mode].insn_code
2333 != CODE_FOR_nothing)
2334 {
2335 rtx t0 = gen_reg_rtx (wider_mode);
2336 rtx t1 = gen_reg_rtx (wider_mode);
2337 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2338 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2339
2340 if (expand_twoval_binop (binoptab, cop0, cop1,
2341 t0, t1, unsignedp))
2342 {
2343 convert_move (targ0, t0, unsignedp);
2344 convert_move (targ1, t1, unsignedp);
2345 return 1;
2346 }
2347 else
2348 delete_insns_since (last);
2349 }
2350 }
2351 }
2352
2353 delete_insns_since (entry_last);
2354 return 0;
2355 }
2356
2357 /* Expand the two-valued library call indicated by BINOPTAB, but
2358 preserve only one of the values. If TARG0 is non-NULL, the first
2359 value is placed into TARG0; otherwise the second value is placed
2360 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2361 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2362 This routine assumes that the value returned by the library call is
2363 as if the return value was of an integral mode twice as wide as the
2364 mode of OP0. Returns 1 if the call was successful. */
2365
2366 bool
2367 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2368 rtx targ0, rtx targ1, enum rtx_code code)
2369 {
2370 enum machine_mode mode;
2371 enum machine_mode libval_mode;
2372 rtx libval;
2373 rtx insns;
2374
2375 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2376 gcc_assert (!targ0 != !targ1);
2377
2378 mode = GET_MODE (op0);
2379 if (!binoptab->handlers[(int) mode].libfunc)
2380 return false;
2381
2382 /* The value returned by the library function will have twice as
2383 many bits as the nominal MODE. */
2384 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2385 MODE_INT);
2386 start_sequence ();
2387 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2388 NULL_RTX, LCT_CONST,
2389 libval_mode, 2,
2390 op0, mode,
2391 op1, mode);
2392 /* Get the part of VAL containing the value that we want. */
2393 libval = simplify_gen_subreg (mode, libval, libval_mode,
2394 targ0 ? 0 : GET_MODE_SIZE (mode));
2395 insns = get_insns ();
2396 end_sequence ();
2397 /* Move the into the desired location. */
2398 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2399 gen_rtx_fmt_ee (code, mode, op0, op1));
2400
2401 return true;
2402 }
2403
2404 \f
2405 /* Wrapper around expand_unop which takes an rtx code to specify
2406 the operation to perform, not an optab pointer. All other
2407 arguments are the same. */
2408 rtx
2409 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2410 rtx target, int unsignedp)
2411 {
2412 optab unop = code_to_optab[(int) code];
2413 gcc_assert (unop);
2414
2415 return expand_unop (mode, unop, op0, target, unsignedp);
2416 }
2417
2418 /* Try calculating
2419 (clz:narrow x)
2420 as
2421 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2422 static rtx
2423 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2424 {
2425 enum mode_class class = GET_MODE_CLASS (mode);
2426 if (CLASS_HAS_WIDER_MODES_P (class))
2427 {
2428 enum machine_mode wider_mode;
2429 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2430 wider_mode != VOIDmode;
2431 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2432 {
2433 if (clz_optab->handlers[(int) wider_mode].insn_code
2434 != CODE_FOR_nothing)
2435 {
2436 rtx xop0, temp, last;
2437
2438 last = get_last_insn ();
2439
2440 if (target == 0)
2441 target = gen_reg_rtx (mode);
2442 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2443 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2444 if (temp != 0)
2445 temp = expand_binop (wider_mode, sub_optab, temp,
2446 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2447 - GET_MODE_BITSIZE (mode)),
2448 target, true, OPTAB_DIRECT);
2449 if (temp == 0)
2450 delete_insns_since (last);
2451
2452 return temp;
2453 }
2454 }
2455 }
2456 return 0;
2457 }
2458
2459 /* Try calculating
2460 (bswap:narrow x)
2461 as
2462 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2463 static rtx
2464 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2465 {
2466 enum mode_class class = GET_MODE_CLASS (mode);
2467 enum machine_mode wider_mode;
2468 rtx x, last;
2469
2470 if (!CLASS_HAS_WIDER_MODES_P (class))
2471 return NULL_RTX;
2472
2473 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2474 wider_mode != VOIDmode;
2475 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2476 if (bswap_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing)
2477 goto found;
2478 return NULL_RTX;
2479
2480 found:
2481 last = get_last_insn ();
2482
2483 x = widen_operand (op0, wider_mode, mode, true, true);
2484 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2485
2486 if (x != 0)
2487 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2488 size_int (GET_MODE_BITSIZE (wider_mode)
2489 - GET_MODE_BITSIZE (mode)),
2490 NULL_RTX, true);
2491
2492 if (x != 0)
2493 {
2494 if (target == 0)
2495 target = gen_reg_rtx (mode);
2496 emit_move_insn (target, gen_lowpart (mode, x));
2497 }
2498 else
2499 delete_insns_since (last);
2500
2501 return target;
2502 }
2503
2504 /* Try calculating bswap as two bswaps of two word-sized operands. */
2505
2506 static rtx
2507 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2508 {
2509 rtx t0, t1;
2510
2511 t1 = expand_unop (word_mode, bswap_optab,
2512 operand_subword_force (op, 0, mode), NULL_RTX, true);
2513 t0 = expand_unop (word_mode, bswap_optab,
2514 operand_subword_force (op, 1, mode), NULL_RTX, true);
2515
2516 if (target == 0)
2517 target = gen_reg_rtx (mode);
2518 if (REG_P (target))
2519 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2520 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2521 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2522
2523 return target;
2524 }
2525
2526 /* Try calculating (parity x) as (and (popcount x) 1), where
2527 popcount can also be done in a wider mode. */
2528 static rtx
2529 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2530 {
2531 enum mode_class class = GET_MODE_CLASS (mode);
2532 if (CLASS_HAS_WIDER_MODES_P (class))
2533 {
2534 enum machine_mode wider_mode;
2535 for (wider_mode = mode; wider_mode != VOIDmode;
2536 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2537 {
2538 if (popcount_optab->handlers[(int) wider_mode].insn_code
2539 != CODE_FOR_nothing)
2540 {
2541 rtx xop0, temp, last;
2542
2543 last = get_last_insn ();
2544
2545 if (target == 0)
2546 target = gen_reg_rtx (mode);
2547 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2548 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2549 true);
2550 if (temp != 0)
2551 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2552 target, true, OPTAB_DIRECT);
2553 if (temp == 0)
2554 delete_insns_since (last);
2555
2556 return temp;
2557 }
2558 }
2559 }
2560 return 0;
2561 }
2562
2563 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2564 conditions, VAL may already be a SUBREG against which we cannot generate
2565 a further SUBREG. In this case, we expect forcing the value into a
2566 register will work around the situation. */
2567
2568 static rtx
2569 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2570 enum machine_mode imode)
2571 {
2572 rtx ret;
2573 ret = lowpart_subreg (omode, val, imode);
2574 if (ret == NULL)
2575 {
2576 val = force_reg (imode, val);
2577 ret = lowpart_subreg (omode, val, imode);
2578 gcc_assert (ret != NULL);
2579 }
2580 return ret;
2581 }
2582
2583 /* Expand a floating point absolute value or negation operation via a
2584 logical operation on the sign bit. */
2585
2586 static rtx
2587 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2588 rtx op0, rtx target)
2589 {
2590 const struct real_format *fmt;
2591 int bitpos, word, nwords, i;
2592 enum machine_mode imode;
2593 HOST_WIDE_INT hi, lo;
2594 rtx temp, insns;
2595
2596 /* The format has to have a simple sign bit. */
2597 fmt = REAL_MODE_FORMAT (mode);
2598 if (fmt == NULL)
2599 return NULL_RTX;
2600
2601 bitpos = fmt->signbit_rw;
2602 if (bitpos < 0)
2603 return NULL_RTX;
2604
2605 /* Don't create negative zeros if the format doesn't support them. */
2606 if (code == NEG && !fmt->has_signed_zero)
2607 return NULL_RTX;
2608
2609 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2610 {
2611 imode = int_mode_for_mode (mode);
2612 if (imode == BLKmode)
2613 return NULL_RTX;
2614 word = 0;
2615 nwords = 1;
2616 }
2617 else
2618 {
2619 imode = word_mode;
2620
2621 if (FLOAT_WORDS_BIG_ENDIAN)
2622 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2623 else
2624 word = bitpos / BITS_PER_WORD;
2625 bitpos = bitpos % BITS_PER_WORD;
2626 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2627 }
2628
2629 if (bitpos < HOST_BITS_PER_WIDE_INT)
2630 {
2631 hi = 0;
2632 lo = (HOST_WIDE_INT) 1 << bitpos;
2633 }
2634 else
2635 {
2636 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2637 lo = 0;
2638 }
2639 if (code == ABS)
2640 lo = ~lo, hi = ~hi;
2641
2642 if (target == 0 || target == op0)
2643 target = gen_reg_rtx (mode);
2644
2645 if (nwords > 1)
2646 {
2647 start_sequence ();
2648
2649 for (i = 0; i < nwords; ++i)
2650 {
2651 rtx targ_piece = operand_subword (target, i, 1, mode);
2652 rtx op0_piece = operand_subword_force (op0, i, mode);
2653
2654 if (i == word)
2655 {
2656 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2657 op0_piece,
2658 immed_double_const (lo, hi, imode),
2659 targ_piece, 1, OPTAB_LIB_WIDEN);
2660 if (temp != targ_piece)
2661 emit_move_insn (targ_piece, temp);
2662 }
2663 else
2664 emit_move_insn (targ_piece, op0_piece);
2665 }
2666
2667 insns = get_insns ();
2668 end_sequence ();
2669
2670 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2671 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2672 }
2673 else
2674 {
2675 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2676 gen_lowpart (imode, op0),
2677 immed_double_const (lo, hi, imode),
2678 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2679 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2680
2681 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2682 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2683 }
2684
2685 return target;
2686 }
2687
2688 /* Generate code to perform an operation specified by UNOPTAB
2689 on operand OP0, with result having machine-mode MODE.
2690
2691 UNSIGNEDP is for the case where we have to widen the operands
2692 to perform the operation. It says to use zero-extension.
2693
2694 If TARGET is nonzero, the value
2695 is generated there, if it is convenient to do so.
2696 In all cases an rtx is returned for the locus of the value;
2697 this may or may not be TARGET. */
2698
2699 rtx
2700 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2701 int unsignedp)
2702 {
2703 enum mode_class class;
2704 enum machine_mode wider_mode;
2705 rtx temp;
2706 rtx last = get_last_insn ();
2707 rtx pat;
2708
2709 class = GET_MODE_CLASS (mode);
2710
2711 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2712 {
2713 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2714 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2715 rtx xop0 = op0;
2716
2717 if (target)
2718 temp = target;
2719 else
2720 temp = gen_reg_rtx (mode);
2721
2722 if (GET_MODE (xop0) != VOIDmode
2723 && GET_MODE (xop0) != mode0)
2724 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2725
2726 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2727
2728 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2729 xop0 = copy_to_mode_reg (mode0, xop0);
2730
2731 if (!insn_data[icode].operand[0].predicate (temp, mode))
2732 temp = gen_reg_rtx (mode);
2733
2734 pat = GEN_FCN (icode) (temp, xop0);
2735 if (pat)
2736 {
2737 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2738 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2739 {
2740 delete_insns_since (last);
2741 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2742 }
2743
2744 emit_insn (pat);
2745
2746 return temp;
2747 }
2748 else
2749 delete_insns_since (last);
2750 }
2751
2752 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2753
2754 /* Widening clz needs special treatment. */
2755 if (unoptab == clz_optab)
2756 {
2757 temp = widen_clz (mode, op0, target);
2758 if (temp)
2759 return temp;
2760 else
2761 goto try_libcall;
2762 }
2763
2764 /* Widening (or narrowing) bswap needs special treatment. */
2765 if (unoptab == bswap_optab)
2766 {
2767 temp = widen_bswap (mode, op0, target);
2768 if (temp)
2769 return temp;
2770
2771 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2772 && unoptab->handlers[word_mode].insn_code != CODE_FOR_nothing)
2773 {
2774 temp = expand_doubleword_bswap (mode, op0, target);
2775 if (temp)
2776 return temp;
2777 }
2778
2779 goto try_libcall;
2780 }
2781
2782 if (CLASS_HAS_WIDER_MODES_P (class))
2783 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2784 wider_mode != VOIDmode;
2785 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2786 {
2787 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2788 {
2789 rtx xop0 = op0;
2790
2791 /* For certain operations, we need not actually extend
2792 the narrow operand, as long as we will truncate the
2793 results to the same narrowness. */
2794
2795 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2796 (unoptab == neg_optab
2797 || unoptab == one_cmpl_optab)
2798 && class == MODE_INT);
2799
2800 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2801 unsignedp);
2802
2803 if (temp)
2804 {
2805 if (class != MODE_INT
2806 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2807 GET_MODE_BITSIZE (wider_mode)))
2808 {
2809 if (target == 0)
2810 target = gen_reg_rtx (mode);
2811 convert_move (target, temp, 0);
2812 return target;
2813 }
2814 else
2815 return gen_lowpart (mode, temp);
2816 }
2817 else
2818 delete_insns_since (last);
2819 }
2820 }
2821
2822 /* These can be done a word at a time. */
2823 if (unoptab == one_cmpl_optab
2824 && class == MODE_INT
2825 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2826 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2827 {
2828 int i;
2829 rtx insns;
2830
2831 if (target == 0 || target == op0)
2832 target = gen_reg_rtx (mode);
2833
2834 start_sequence ();
2835
2836 /* Do the actual arithmetic. */
2837 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2838 {
2839 rtx target_piece = operand_subword (target, i, 1, mode);
2840 rtx x = expand_unop (word_mode, unoptab,
2841 operand_subword_force (op0, i, mode),
2842 target_piece, unsignedp);
2843
2844 if (target_piece != x)
2845 emit_move_insn (target_piece, x);
2846 }
2847
2848 insns = get_insns ();
2849 end_sequence ();
2850
2851 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2852 gen_rtx_fmt_e (unoptab->code, mode,
2853 copy_rtx (op0)));
2854 return target;
2855 }
2856
2857 if (unoptab->code == NEG)
2858 {
2859 /* Try negating floating point values by flipping the sign bit. */
2860 if (SCALAR_FLOAT_MODE_P (mode))
2861 {
2862 temp = expand_absneg_bit (NEG, mode, op0, target);
2863 if (temp)
2864 return temp;
2865 }
2866
2867 /* If there is no negation pattern, and we have no negative zero,
2868 try subtracting from zero. */
2869 if (!HONOR_SIGNED_ZEROS (mode))
2870 {
2871 temp = expand_binop (mode, (unoptab == negv_optab
2872 ? subv_optab : sub_optab),
2873 CONST0_RTX (mode), op0, target,
2874 unsignedp, OPTAB_DIRECT);
2875 if (temp)
2876 return temp;
2877 }
2878 }
2879
2880 /* Try calculating parity (x) as popcount (x) % 2. */
2881 if (unoptab == parity_optab)
2882 {
2883 temp = expand_parity (mode, op0, target);
2884 if (temp)
2885 return temp;
2886 }
2887
2888 try_libcall:
2889 /* Now try a library call in this mode. */
2890 if (unoptab->handlers[(int) mode].libfunc)
2891 {
2892 rtx insns;
2893 rtx value;
2894 enum machine_mode outmode = mode;
2895
2896 /* All of these functions return small values. Thus we choose to
2897 have them return something that isn't a double-word. */
2898 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2899 || unoptab == popcount_optab || unoptab == parity_optab)
2900 outmode
2901 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2902
2903 start_sequence ();
2904
2905 /* Pass 1 for NO_QUEUE so we don't lose any increments
2906 if the libcall is cse'd or moved. */
2907 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2908 NULL_RTX, LCT_CONST, outmode,
2909 1, op0, mode);
2910 insns = get_insns ();
2911 end_sequence ();
2912
2913 target = gen_reg_rtx (outmode);
2914 emit_libcall_block (insns, target, value,
2915 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2916
2917 return target;
2918 }
2919
2920 /* It can't be done in this mode. Can we do it in a wider mode? */
2921
2922 if (CLASS_HAS_WIDER_MODES_P (class))
2923 {
2924 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2925 wider_mode != VOIDmode;
2926 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2927 {
2928 if ((unoptab->handlers[(int) wider_mode].insn_code
2929 != CODE_FOR_nothing)
2930 || unoptab->handlers[(int) wider_mode].libfunc)
2931 {
2932 rtx xop0 = op0;
2933
2934 /* For certain operations, we need not actually extend
2935 the narrow operand, as long as we will truncate the
2936 results to the same narrowness. */
2937
2938 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2939 (unoptab == neg_optab
2940 || unoptab == one_cmpl_optab)
2941 && class == MODE_INT);
2942
2943 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2944 unsignedp);
2945
2946 /* If we are generating clz using wider mode, adjust the
2947 result. */
2948 if (unoptab == clz_optab && temp != 0)
2949 temp = expand_binop (wider_mode, sub_optab, temp,
2950 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2951 - GET_MODE_BITSIZE (mode)),
2952 target, true, OPTAB_DIRECT);
2953
2954 if (temp)
2955 {
2956 if (class != MODE_INT)
2957 {
2958 if (target == 0)
2959 target = gen_reg_rtx (mode);
2960 convert_move (target, temp, 0);
2961 return target;
2962 }
2963 else
2964 return gen_lowpart (mode, temp);
2965 }
2966 else
2967 delete_insns_since (last);
2968 }
2969 }
2970 }
2971
2972 /* One final attempt at implementing negation via subtraction,
2973 this time allowing widening of the operand. */
2974 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2975 {
2976 rtx temp;
2977 temp = expand_binop (mode,
2978 unoptab == negv_optab ? subv_optab : sub_optab,
2979 CONST0_RTX (mode), op0,
2980 target, unsignedp, OPTAB_LIB_WIDEN);
2981 if (temp)
2982 return temp;
2983 }
2984
2985 return 0;
2986 }
2987 \f
2988 /* Emit code to compute the absolute value of OP0, with result to
2989 TARGET if convenient. (TARGET may be 0.) The return value says
2990 where the result actually is to be found.
2991
2992 MODE is the mode of the operand; the mode of the result is
2993 different but can be deduced from MODE.
2994
2995 */
2996
2997 rtx
2998 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2999 int result_unsignedp)
3000 {
3001 rtx temp;
3002
3003 if (! flag_trapv)
3004 result_unsignedp = 1;
3005
3006 /* First try to do it with a special abs instruction. */
3007 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3008 op0, target, 0);
3009 if (temp != 0)
3010 return temp;
3011
3012 /* For floating point modes, try clearing the sign bit. */
3013 if (SCALAR_FLOAT_MODE_P (mode))
3014 {
3015 temp = expand_absneg_bit (ABS, mode, op0, target);
3016 if (temp)
3017 return temp;
3018 }
3019
3020 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3021 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
3022 && !HONOR_SIGNED_ZEROS (mode))
3023 {
3024 rtx last = get_last_insn ();
3025
3026 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3027 if (temp != 0)
3028 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3029 OPTAB_WIDEN);
3030
3031 if (temp != 0)
3032 return temp;
3033
3034 delete_insns_since (last);
3035 }
3036
3037 /* If this machine has expensive jumps, we can do integer absolute
3038 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3039 where W is the width of MODE. */
3040
3041 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3042 {
3043 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3044 size_int (GET_MODE_BITSIZE (mode) - 1),
3045 NULL_RTX, 0);
3046
3047 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3048 OPTAB_LIB_WIDEN);
3049 if (temp != 0)
3050 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3051 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3052
3053 if (temp != 0)
3054 return temp;
3055 }
3056
3057 return NULL_RTX;
3058 }
3059
3060 rtx
3061 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3062 int result_unsignedp, int safe)
3063 {
3064 rtx temp, op1;
3065
3066 if (! flag_trapv)
3067 result_unsignedp = 1;
3068
3069 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3070 if (temp != 0)
3071 return temp;
3072
3073 /* If that does not win, use conditional jump and negate. */
3074
3075 /* It is safe to use the target if it is the same
3076 as the source if this is also a pseudo register */
3077 if (op0 == target && REG_P (op0)
3078 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3079 safe = 1;
3080
3081 op1 = gen_label_rtx ();
3082 if (target == 0 || ! safe
3083 || GET_MODE (target) != mode
3084 || (MEM_P (target) && MEM_VOLATILE_P (target))
3085 || (REG_P (target)
3086 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3087 target = gen_reg_rtx (mode);
3088
3089 emit_move_insn (target, op0);
3090 NO_DEFER_POP;
3091
3092 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3093 NULL_RTX, NULL_RTX, op1);
3094
3095 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3096 target, target, 0);
3097 if (op0 != target)
3098 emit_move_insn (target, op0);
3099 emit_label (op1);
3100 OK_DEFER_POP;
3101 return target;
3102 }
3103
3104 /* A subroutine of expand_copysign, perform the copysign operation using the
3105 abs and neg primitives advertised to exist on the target. The assumption
3106 is that we have a split register file, and leaving op0 in fp registers,
3107 and not playing with subregs so much, will help the register allocator. */
3108
3109 static rtx
3110 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3111 int bitpos, bool op0_is_abs)
3112 {
3113 enum machine_mode imode;
3114 int icode;
3115 rtx sign, label;
3116
3117 if (target == op1)
3118 target = NULL_RTX;
3119
3120 /* Check if the back end provides an insn that handles signbit for the
3121 argument's mode. */
3122 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3123 if (icode != CODE_FOR_nothing)
3124 {
3125 imode = insn_data[icode].operand[0].mode;
3126 sign = gen_reg_rtx (imode);
3127 emit_unop_insn (icode, sign, op1, UNKNOWN);
3128 }
3129 else
3130 {
3131 HOST_WIDE_INT hi, lo;
3132
3133 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3134 {
3135 imode = int_mode_for_mode (mode);
3136 if (imode == BLKmode)
3137 return NULL_RTX;
3138 op1 = gen_lowpart (imode, op1);
3139 }
3140 else
3141 {
3142 int word;
3143
3144 imode = word_mode;
3145 if (FLOAT_WORDS_BIG_ENDIAN)
3146 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3147 else
3148 word = bitpos / BITS_PER_WORD;
3149 bitpos = bitpos % BITS_PER_WORD;
3150 op1 = operand_subword_force (op1, word, mode);
3151 }
3152
3153 if (bitpos < HOST_BITS_PER_WIDE_INT)
3154 {
3155 hi = 0;
3156 lo = (HOST_WIDE_INT) 1 << bitpos;
3157 }
3158 else
3159 {
3160 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3161 lo = 0;
3162 }
3163
3164 sign = gen_reg_rtx (imode);
3165 sign = expand_binop (imode, and_optab, op1,
3166 immed_double_const (lo, hi, imode),
3167 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3168 }
3169
3170 if (!op0_is_abs)
3171 {
3172 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3173 if (op0 == NULL)
3174 return NULL_RTX;
3175 target = op0;
3176 }
3177 else
3178 {
3179 if (target == NULL_RTX)
3180 target = copy_to_reg (op0);
3181 else
3182 emit_move_insn (target, op0);
3183 }
3184
3185 label = gen_label_rtx ();
3186 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3187
3188 if (GET_CODE (op0) == CONST_DOUBLE)
3189 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3190 else
3191 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3192 if (op0 != target)
3193 emit_move_insn (target, op0);
3194
3195 emit_label (label);
3196
3197 return target;
3198 }
3199
3200
3201 /* A subroutine of expand_copysign, perform the entire copysign operation
3202 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3203 is true if op0 is known to have its sign bit clear. */
3204
3205 static rtx
3206 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3207 int bitpos, bool op0_is_abs)
3208 {
3209 enum machine_mode imode;
3210 HOST_WIDE_INT hi, lo;
3211 int word, nwords, i;
3212 rtx temp, insns;
3213
3214 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3215 {
3216 imode = int_mode_for_mode (mode);
3217 if (imode == BLKmode)
3218 return NULL_RTX;
3219 word = 0;
3220 nwords = 1;
3221 }
3222 else
3223 {
3224 imode = word_mode;
3225
3226 if (FLOAT_WORDS_BIG_ENDIAN)
3227 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3228 else
3229 word = bitpos / BITS_PER_WORD;
3230 bitpos = bitpos % BITS_PER_WORD;
3231 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3232 }
3233
3234 if (bitpos < HOST_BITS_PER_WIDE_INT)
3235 {
3236 hi = 0;
3237 lo = (HOST_WIDE_INT) 1 << bitpos;
3238 }
3239 else
3240 {
3241 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3242 lo = 0;
3243 }
3244
3245 if (target == 0 || target == op0 || target == op1)
3246 target = gen_reg_rtx (mode);
3247
3248 if (nwords > 1)
3249 {
3250 start_sequence ();
3251
3252 for (i = 0; i < nwords; ++i)
3253 {
3254 rtx targ_piece = operand_subword (target, i, 1, mode);
3255 rtx op0_piece = operand_subword_force (op0, i, mode);
3256
3257 if (i == word)
3258 {
3259 if (!op0_is_abs)
3260 op0_piece = expand_binop (imode, and_optab, op0_piece,
3261 immed_double_const (~lo, ~hi, imode),
3262 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3263
3264 op1 = expand_binop (imode, and_optab,
3265 operand_subword_force (op1, i, mode),
3266 immed_double_const (lo, hi, imode),
3267 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3268
3269 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3270 targ_piece, 1, OPTAB_LIB_WIDEN);
3271 if (temp != targ_piece)
3272 emit_move_insn (targ_piece, temp);
3273 }
3274 else
3275 emit_move_insn (targ_piece, op0_piece);
3276 }
3277
3278 insns = get_insns ();
3279 end_sequence ();
3280
3281 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3282 }
3283 else
3284 {
3285 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3286 immed_double_const (lo, hi, imode),
3287 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3288
3289 op0 = gen_lowpart (imode, op0);
3290 if (!op0_is_abs)
3291 op0 = expand_binop (imode, and_optab, op0,
3292 immed_double_const (~lo, ~hi, imode),
3293 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3294
3295 temp = expand_binop (imode, ior_optab, op0, op1,
3296 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3297 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3298 }
3299
3300 return target;
3301 }
3302
3303 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3304 scalar floating point mode. Return NULL if we do not know how to
3305 expand the operation inline. */
3306
3307 rtx
3308 expand_copysign (rtx op0, rtx op1, rtx target)
3309 {
3310 enum machine_mode mode = GET_MODE (op0);
3311 const struct real_format *fmt;
3312 bool op0_is_abs;
3313 rtx temp;
3314
3315 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3316 gcc_assert (GET_MODE (op1) == mode);
3317
3318 /* First try to do it with a special instruction. */
3319 temp = expand_binop (mode, copysign_optab, op0, op1,
3320 target, 0, OPTAB_DIRECT);
3321 if (temp)
3322 return temp;
3323
3324 fmt = REAL_MODE_FORMAT (mode);
3325 if (fmt == NULL || !fmt->has_signed_zero)
3326 return NULL_RTX;
3327
3328 op0_is_abs = false;
3329 if (GET_CODE (op0) == CONST_DOUBLE)
3330 {
3331 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3332 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3333 op0_is_abs = true;
3334 }
3335
3336 if (fmt->signbit_ro >= 0
3337 && (GET_CODE (op0) == CONST_DOUBLE
3338 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3339 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3340 {
3341 temp = expand_copysign_absneg (mode, op0, op1, target,
3342 fmt->signbit_ro, op0_is_abs);
3343 if (temp)
3344 return temp;
3345 }
3346
3347 if (fmt->signbit_rw < 0)
3348 return NULL_RTX;
3349 return expand_copysign_bit (mode, op0, op1, target,
3350 fmt->signbit_rw, op0_is_abs);
3351 }
3352 \f
3353 /* Generate an instruction whose insn-code is INSN_CODE,
3354 with two operands: an output TARGET and an input OP0.
3355 TARGET *must* be nonzero, and the output is always stored there.
3356 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3357 the value that is stored into TARGET. */
3358
3359 void
3360 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3361 {
3362 rtx temp;
3363 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3364 rtx pat;
3365
3366 temp = target;
3367
3368 /* Now, if insn does not accept our operands, put them into pseudos. */
3369
3370 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3371 op0 = copy_to_mode_reg (mode0, op0);
3372
3373 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3374 temp = gen_reg_rtx (GET_MODE (temp));
3375
3376 pat = GEN_FCN (icode) (temp, op0);
3377
3378 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3379 add_equal_note (pat, temp, code, op0, NULL_RTX);
3380
3381 emit_insn (pat);
3382
3383 if (temp != target)
3384 emit_move_insn (target, temp);
3385 }
3386 \f
3387 struct no_conflict_data
3388 {
3389 rtx target, first, insn;
3390 bool must_stay;
3391 };
3392
3393 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3394 Set P->must_stay if the currently examined clobber / store has to stay
3395 in the list of insns that constitute the actual no_conflict block /
3396 libcall block. */
3397 static void
3398 no_conflict_move_test (rtx dest, rtx set, void *p0)
3399 {
3400 struct no_conflict_data *p= p0;
3401
3402 /* If this inns directly contributes to setting the target, it must stay. */
3403 if (reg_overlap_mentioned_p (p->target, dest))
3404 p->must_stay = true;
3405 /* If we haven't committed to keeping any other insns in the list yet,
3406 there is nothing more to check. */
3407 else if (p->insn == p->first)
3408 return;
3409 /* If this insn sets / clobbers a register that feeds one of the insns
3410 already in the list, this insn has to stay too. */
3411 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3412 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3413 || reg_used_between_p (dest, p->first, p->insn)
3414 /* Likewise if this insn depends on a register set by a previous
3415 insn in the list, or if it sets a result (presumably a hard
3416 register) that is set or clobbered by a previous insn.
3417 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3418 SET_DEST perform the former check on the address, and the latter
3419 check on the MEM. */
3420 || (GET_CODE (set) == SET
3421 && (modified_in_p (SET_SRC (set), p->first)
3422 || modified_in_p (SET_DEST (set), p->first)
3423 || modified_between_p (SET_SRC (set), p->first, p->insn)
3424 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3425 p->must_stay = true;
3426 }
3427
3428 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3429 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3430 is possible to do so. */
3431
3432 void
3433 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3434 {
3435 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3436 {
3437 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3438 encapsulated region would not be in one basic block, i.e. when
3439 there is a control_flow_insn_p insn between FIRST and LAST. */
3440 bool attach_libcall_retval_notes = true;
3441 rtx insn, next = NEXT_INSN (last);
3442
3443 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3444 if (control_flow_insn_p (insn))
3445 {
3446 attach_libcall_retval_notes = false;
3447 break;
3448 }
3449
3450 if (attach_libcall_retval_notes)
3451 {
3452 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3453 REG_NOTES (first));
3454 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3455 REG_NOTES (last));
3456 next = NEXT_INSN (last);
3457 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3458 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3459 GEN_INT (libcall_id),
3460 REG_NOTES (insn));
3461 libcall_id++;
3462 }
3463 }
3464 }
3465
3466 /* Emit code to perform a series of operations on a multi-word quantity, one
3467 word at a time.
3468
3469 Such a block is preceded by a CLOBBER of the output, consists of multiple
3470 insns, each setting one word of the output, and followed by a SET copying
3471 the output to itself.
3472
3473 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3474 note indicating that it doesn't conflict with the (also multi-word)
3475 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3476 notes.
3477
3478 INSNS is a block of code generated to perform the operation, not including
3479 the CLOBBER and final copy. All insns that compute intermediate values
3480 are first emitted, followed by the block as described above.
3481
3482 TARGET, OP0, and OP1 are the output and inputs of the operations,
3483 respectively. OP1 may be zero for a unary operation.
3484
3485 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3486 on the last insn.
3487
3488 If TARGET is not a register, INSNS is simply emitted with no special
3489 processing. Likewise if anything in INSNS is not an INSN or if
3490 there is a libcall block inside INSNS.
3491
3492 The final insn emitted is returned. */
3493
3494 rtx
3495 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3496 {
3497 rtx prev, next, first, last, insn;
3498
3499 if (!REG_P (target) || reload_in_progress)
3500 return emit_insn (insns);
3501 else
3502 for (insn = insns; insn; insn = NEXT_INSN (insn))
3503 if (!NONJUMP_INSN_P (insn)
3504 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3505 return emit_insn (insns);
3506
3507 /* First emit all insns that do not store into words of the output and remove
3508 these from the list. */
3509 for (insn = insns; insn; insn = next)
3510 {
3511 rtx note;
3512 struct no_conflict_data data;
3513
3514 next = NEXT_INSN (insn);
3515
3516 /* Some ports (cris) create a libcall regions at their own. We must
3517 avoid any potential nesting of LIBCALLs. */
3518 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3519 remove_note (insn, note);
3520 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3521 remove_note (insn, note);
3522 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3523 remove_note (insn, note);
3524
3525 data.target = target;
3526 data.first = insns;
3527 data.insn = insn;
3528 data.must_stay = 0;
3529 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3530 if (! data.must_stay)
3531 {
3532 if (PREV_INSN (insn))
3533 NEXT_INSN (PREV_INSN (insn)) = next;
3534 else
3535 insns = next;
3536
3537 if (next)
3538 PREV_INSN (next) = PREV_INSN (insn);
3539
3540 add_insn (insn);
3541 }
3542 }
3543
3544 prev = get_last_insn ();
3545
3546 /* Now write the CLOBBER of the output, followed by the setting of each
3547 of the words, followed by the final copy. */
3548 if (target != op0 && target != op1)
3549 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3550
3551 for (insn = insns; insn; insn = next)
3552 {
3553 next = NEXT_INSN (insn);
3554 add_insn (insn);
3555
3556 if (op1 && REG_P (op1))
3557 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3558 REG_NOTES (insn));
3559
3560 if (op0 && REG_P (op0))
3561 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3562 REG_NOTES (insn));
3563 }
3564
3565 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3566 != CODE_FOR_nothing)
3567 {
3568 last = emit_move_insn (target, target);
3569 if (equiv)
3570 set_unique_reg_note (last, REG_EQUAL, equiv);
3571 }
3572 else
3573 {
3574 last = get_last_insn ();
3575
3576 /* Remove any existing REG_EQUAL note from "last", or else it will
3577 be mistaken for a note referring to the full contents of the
3578 alleged libcall value when found together with the REG_RETVAL
3579 note added below. An existing note can come from an insn
3580 expansion at "last". */
3581 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3582 }
3583
3584 if (prev == 0)
3585 first = get_insns ();
3586 else
3587 first = NEXT_INSN (prev);
3588
3589 maybe_encapsulate_block (first, last, equiv);
3590
3591 return last;
3592 }
3593 \f
3594 /* Emit code to make a call to a constant function or a library call.
3595
3596 INSNS is a list containing all insns emitted in the call.
3597 These insns leave the result in RESULT. Our block is to copy RESULT
3598 to TARGET, which is logically equivalent to EQUIV.
3599
3600 We first emit any insns that set a pseudo on the assumption that these are
3601 loading constants into registers; doing so allows them to be safely cse'ed
3602 between blocks. Then we emit all the other insns in the block, followed by
3603 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3604 note with an operand of EQUIV.
3605
3606 Moving assignments to pseudos outside of the block is done to improve
3607 the generated code, but is not required to generate correct code,
3608 hence being unable to move an assignment is not grounds for not making
3609 a libcall block. There are two reasons why it is safe to leave these
3610 insns inside the block: First, we know that these pseudos cannot be
3611 used in generated RTL outside the block since they are created for
3612 temporary purposes within the block. Second, CSE will not record the
3613 values of anything set inside a libcall block, so we know they must
3614 be dead at the end of the block.
3615
3616 Except for the first group of insns (the ones setting pseudos), the
3617 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3618 void
3619 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3620 {
3621 rtx final_dest = target;
3622 rtx prev, next, first, last, insn;
3623
3624 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3625 into a MEM later. Protect the libcall block from this change. */
3626 if (! REG_P (target) || REG_USERVAR_P (target))
3627 target = gen_reg_rtx (GET_MODE (target));
3628
3629 /* If we're using non-call exceptions, a libcall corresponding to an
3630 operation that may trap may also trap. */
3631 if (flag_non_call_exceptions && may_trap_p (equiv))
3632 {
3633 for (insn = insns; insn; insn = NEXT_INSN (insn))
3634 if (CALL_P (insn))
3635 {
3636 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3637
3638 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3639 remove_note (insn, note);
3640 }
3641 }
3642 else
3643 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3644 reg note to indicate that this call cannot throw or execute a nonlocal
3645 goto (unless there is already a REG_EH_REGION note, in which case
3646 we update it). */
3647 for (insn = insns; insn; insn = NEXT_INSN (insn))
3648 if (CALL_P (insn))
3649 {
3650 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3651
3652 if (note != 0)
3653 XEXP (note, 0) = constm1_rtx;
3654 else
3655 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3656 REG_NOTES (insn));
3657 }
3658
3659 /* First emit all insns that set pseudos. Remove them from the list as
3660 we go. Avoid insns that set pseudos which were referenced in previous
3661 insns. These can be generated by move_by_pieces, for example,
3662 to update an address. Similarly, avoid insns that reference things
3663 set in previous insns. */
3664
3665 for (insn = insns; insn; insn = next)
3666 {
3667 rtx set = single_set (insn);
3668 rtx note;
3669
3670 /* Some ports (cris) create a libcall regions at their own. We must
3671 avoid any potential nesting of LIBCALLs. */
3672 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3673 remove_note (insn, note);
3674 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3675 remove_note (insn, note);
3676 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3677 remove_note (insn, note);
3678
3679 next = NEXT_INSN (insn);
3680
3681 if (set != 0 && REG_P (SET_DEST (set))
3682 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3683 {
3684 struct no_conflict_data data;
3685
3686 data.target = const0_rtx;
3687 data.first = insns;
3688 data.insn = insn;
3689 data.must_stay = 0;
3690 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3691 if (! data.must_stay)
3692 {
3693 if (PREV_INSN (insn))
3694 NEXT_INSN (PREV_INSN (insn)) = next;
3695 else
3696 insns = next;
3697
3698 if (next)
3699 PREV_INSN (next) = PREV_INSN (insn);
3700
3701 add_insn (insn);
3702 }
3703 }
3704
3705 /* Some ports use a loop to copy large arguments onto the stack.
3706 Don't move anything outside such a loop. */
3707 if (LABEL_P (insn))
3708 break;
3709 }
3710
3711 prev = get_last_insn ();
3712
3713 /* Write the remaining insns followed by the final copy. */
3714
3715 for (insn = insns; insn; insn = next)
3716 {
3717 next = NEXT_INSN (insn);
3718
3719 add_insn (insn);
3720 }
3721
3722 last = emit_move_insn (target, result);
3723 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3724 != CODE_FOR_nothing)
3725 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3726 else
3727 {
3728 /* Remove any existing REG_EQUAL note from "last", or else it will
3729 be mistaken for a note referring to the full contents of the
3730 libcall value when found together with the REG_RETVAL note added
3731 below. An existing note can come from an insn expansion at
3732 "last". */
3733 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3734 }
3735
3736 if (final_dest != target)
3737 emit_move_insn (final_dest, target);
3738
3739 if (prev == 0)
3740 first = get_insns ();
3741 else
3742 first = NEXT_INSN (prev);
3743
3744 maybe_encapsulate_block (first, last, equiv);
3745 }
3746 \f
3747 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3748 PURPOSE describes how this comparison will be used. CODE is the rtx
3749 comparison code we will be using.
3750
3751 ??? Actually, CODE is slightly weaker than that. A target is still
3752 required to implement all of the normal bcc operations, but not
3753 required to implement all (or any) of the unordered bcc operations. */
3754
3755 int
3756 can_compare_p (enum rtx_code code, enum machine_mode mode,
3757 enum can_compare_purpose purpose)
3758 {
3759 do
3760 {
3761 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3762 {
3763 if (purpose == ccp_jump)
3764 return bcc_gen_fctn[(int) code] != NULL;
3765 else if (purpose == ccp_store_flag)
3766 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3767 else
3768 /* There's only one cmov entry point, and it's allowed to fail. */
3769 return 1;
3770 }
3771 if (purpose == ccp_jump
3772 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3773 return 1;
3774 if (purpose == ccp_cmov
3775 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3776 return 1;
3777 if (purpose == ccp_store_flag
3778 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3779 return 1;
3780 mode = GET_MODE_WIDER_MODE (mode);
3781 }
3782 while (mode != VOIDmode);
3783
3784 return 0;
3785 }
3786
3787 /* This function is called when we are going to emit a compare instruction that
3788 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3789
3790 *PMODE is the mode of the inputs (in case they are const_int).
3791 *PUNSIGNEDP nonzero says that the operands are unsigned;
3792 this matters if they need to be widened.
3793
3794 If they have mode BLKmode, then SIZE specifies the size of both operands.
3795
3796 This function performs all the setup necessary so that the caller only has
3797 to emit a single comparison insn. This setup can involve doing a BLKmode
3798 comparison or emitting a library call to perform the comparison if no insn
3799 is available to handle it.
3800 The values which are passed in through pointers can be modified; the caller
3801 should perform the comparison on the modified values. Constant
3802 comparisons must have already been folded. */
3803
3804 static void
3805 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3806 enum machine_mode *pmode, int *punsignedp,
3807 enum can_compare_purpose purpose)
3808 {
3809 enum machine_mode mode = *pmode;
3810 rtx x = *px, y = *py;
3811 int unsignedp = *punsignedp;
3812
3813 /* If we are inside an appropriately-short loop and we are optimizing,
3814 force expensive constants into a register. */
3815 if (CONSTANT_P (x) && optimize
3816 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3817 x = force_reg (mode, x);
3818
3819 if (CONSTANT_P (y) && optimize
3820 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3821 y = force_reg (mode, y);
3822
3823 #ifdef HAVE_cc0
3824 /* Make sure if we have a canonical comparison. The RTL
3825 documentation states that canonical comparisons are required only
3826 for targets which have cc0. */
3827 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3828 #endif
3829
3830 /* Don't let both operands fail to indicate the mode. */
3831 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3832 x = force_reg (mode, x);
3833
3834 /* Handle all BLKmode compares. */
3835
3836 if (mode == BLKmode)
3837 {
3838 enum machine_mode cmp_mode, result_mode;
3839 enum insn_code cmp_code;
3840 tree length_type;
3841 rtx libfunc;
3842 rtx result;
3843 rtx opalign
3844 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3845
3846 gcc_assert (size);
3847
3848 /* Try to use a memory block compare insn - either cmpstr
3849 or cmpmem will do. */
3850 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3851 cmp_mode != VOIDmode;
3852 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3853 {
3854 cmp_code = cmpmem_optab[cmp_mode];
3855 if (cmp_code == CODE_FOR_nothing)
3856 cmp_code = cmpstr_optab[cmp_mode];
3857 if (cmp_code == CODE_FOR_nothing)
3858 cmp_code = cmpstrn_optab[cmp_mode];
3859 if (cmp_code == CODE_FOR_nothing)
3860 continue;
3861
3862 /* Must make sure the size fits the insn's mode. */
3863 if ((GET_CODE (size) == CONST_INT
3864 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3865 || (GET_MODE_BITSIZE (GET_MODE (size))
3866 > GET_MODE_BITSIZE (cmp_mode)))
3867 continue;
3868
3869 result_mode = insn_data[cmp_code].operand[0].mode;
3870 result = gen_reg_rtx (result_mode);
3871 size = convert_to_mode (cmp_mode, size, 1);
3872 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3873
3874 *px = result;
3875 *py = const0_rtx;
3876 *pmode = result_mode;
3877 return;
3878 }
3879
3880 /* Otherwise call a library function, memcmp. */
3881 libfunc = memcmp_libfunc;
3882 length_type = sizetype;
3883 result_mode = TYPE_MODE (integer_type_node);
3884 cmp_mode = TYPE_MODE (length_type);
3885 size = convert_to_mode (TYPE_MODE (length_type), size,
3886 TYPE_UNSIGNED (length_type));
3887
3888 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3889 result_mode, 3,
3890 XEXP (x, 0), Pmode,
3891 XEXP (y, 0), Pmode,
3892 size, cmp_mode);
3893 *px = result;
3894 *py = const0_rtx;
3895 *pmode = result_mode;
3896 return;
3897 }
3898
3899 /* Don't allow operands to the compare to trap, as that can put the
3900 compare and branch in different basic blocks. */
3901 if (flag_non_call_exceptions)
3902 {
3903 if (may_trap_p (x))
3904 x = force_reg (mode, x);
3905 if (may_trap_p (y))
3906 y = force_reg (mode, y);
3907 }
3908
3909 *px = x;
3910 *py = y;
3911 if (can_compare_p (*pcomparison, mode, purpose))
3912 return;
3913
3914 /* Handle a lib call just for the mode we are using. */
3915
3916 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3917 {
3918 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3919 rtx result;
3920
3921 /* If we want unsigned, and this mode has a distinct unsigned
3922 comparison routine, use that. */
3923 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3924 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3925
3926 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3927 targetm.libgcc_cmp_return_mode (),
3928 2, x, mode, y, mode);
3929
3930 /* There are two kinds of comparison routines. Biased routines
3931 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3932 of gcc expect that the comparison operation is equivalent
3933 to the modified comparison. For signed comparisons compare the
3934 result against 1 in the biased case, and zero in the unbiased
3935 case. For unsigned comparisons always compare against 1 after
3936 biasing the unbiased result by adding 1. This gives us a way to
3937 represent LTU. */
3938 *px = result;
3939 *pmode = word_mode;
3940 *py = const1_rtx;
3941
3942 if (!TARGET_LIB_INT_CMP_BIASED)
3943 {
3944 if (*punsignedp)
3945 *px = plus_constant (result, 1);
3946 else
3947 *py = const0_rtx;
3948 }
3949 return;
3950 }
3951
3952 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3953 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3954 }
3955
3956 /* Before emitting an insn with code ICODE, make sure that X, which is going
3957 to be used for operand OPNUM of the insn, is converted from mode MODE to
3958 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3959 that it is accepted by the operand predicate. Return the new value. */
3960
3961 static rtx
3962 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3963 enum machine_mode wider_mode, int unsignedp)
3964 {
3965 if (mode != wider_mode)
3966 x = convert_modes (wider_mode, mode, x, unsignedp);
3967
3968 if (!insn_data[icode].operand[opnum].predicate
3969 (x, insn_data[icode].operand[opnum].mode))
3970 {
3971 if (reload_completed)
3972 return NULL_RTX;
3973 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3974 }
3975
3976 return x;
3977 }
3978
3979 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3980 we can do the comparison.
3981 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3982 be NULL_RTX which indicates that only a comparison is to be generated. */
3983
3984 static void
3985 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3986 enum rtx_code comparison, int unsignedp, rtx label)
3987 {
3988 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3989 enum mode_class class = GET_MODE_CLASS (mode);
3990 enum machine_mode wider_mode = mode;
3991
3992 /* Try combined insns first. */
3993 do
3994 {
3995 enum insn_code icode;
3996 PUT_MODE (test, wider_mode);
3997
3998 if (label)
3999 {
4000 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
4001
4002 if (icode != CODE_FOR_nothing
4003 && insn_data[icode].operand[0].predicate (test, wider_mode))
4004 {
4005 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4006 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4007 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4008 return;
4009 }
4010 }
4011
4012 /* Handle some compares against zero. */
4013 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
4014 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4015 {
4016 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4017 emit_insn (GEN_FCN (icode) (x));
4018 if (label)
4019 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4020 return;
4021 }
4022
4023 /* Handle compares for which there is a directly suitable insn. */
4024
4025 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
4026 if (icode != CODE_FOR_nothing)
4027 {
4028 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4029 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4030 emit_insn (GEN_FCN (icode) (x, y));
4031 if (label)
4032 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4033 return;
4034 }
4035
4036 if (!CLASS_HAS_WIDER_MODES_P (class))
4037 break;
4038
4039 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4040 }
4041 while (wider_mode != VOIDmode);
4042
4043 gcc_unreachable ();
4044 }
4045
4046 /* Generate code to compare X with Y so that the condition codes are
4047 set and to jump to LABEL if the condition is true. If X is a
4048 constant and Y is not a constant, then the comparison is swapped to
4049 ensure that the comparison RTL has the canonical form.
4050
4051 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4052 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4053 the proper branch condition code.
4054
4055 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4056
4057 MODE is the mode of the inputs (in case they are const_int).
4058
4059 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4060 be passed unchanged to emit_cmp_insn, then potentially converted into an
4061 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4062
4063 void
4064 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4065 enum machine_mode mode, int unsignedp, rtx label)
4066 {
4067 rtx op0 = x, op1 = y;
4068
4069 /* Swap operands and condition to ensure canonical RTL. */
4070 if (swap_commutative_operands_p (x, y))
4071 {
4072 /* If we're not emitting a branch, callers are required to pass
4073 operands in an order conforming to canonical RTL. We relax this
4074 for commutative comparsions so callers using EQ don't need to do
4075 swapping by hand. */
4076 gcc_assert (label || (comparison == swap_condition (comparison)));
4077
4078 op0 = y, op1 = x;
4079 comparison = swap_condition (comparison);
4080 }
4081
4082 #ifdef HAVE_cc0
4083 /* If OP0 is still a constant, then both X and Y must be constants.
4084 Force X into a register to create canonical RTL. */
4085 if (CONSTANT_P (op0))
4086 op0 = force_reg (mode, op0);
4087 #endif
4088
4089 if (unsignedp)
4090 comparison = unsigned_condition (comparison);
4091
4092 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4093 ccp_jump);
4094 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4095 }
4096
4097 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4098
4099 void
4100 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4101 enum machine_mode mode, int unsignedp)
4102 {
4103 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4104 }
4105 \f
4106 /* Emit a library call comparison between floating point X and Y.
4107 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4108
4109 static void
4110 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4111 enum machine_mode *pmode, int *punsignedp)
4112 {
4113 enum rtx_code comparison = *pcomparison;
4114 enum rtx_code swapped = swap_condition (comparison);
4115 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4116 rtx x = *px;
4117 rtx y = *py;
4118 enum machine_mode orig_mode = GET_MODE (x);
4119 enum machine_mode mode;
4120 rtx value, target, insns, equiv;
4121 rtx libfunc = 0;
4122 bool reversed_p = false;
4123
4124 for (mode = orig_mode;
4125 mode != VOIDmode;
4126 mode = GET_MODE_WIDER_MODE (mode))
4127 {
4128 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
4129 break;
4130
4131 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
4132 {
4133 rtx tmp;
4134 tmp = x; x = y; y = tmp;
4135 comparison = swapped;
4136 break;
4137 }
4138
4139 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
4140 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4141 {
4142 comparison = reversed;
4143 reversed_p = true;
4144 break;
4145 }
4146 }
4147
4148 gcc_assert (mode != VOIDmode);
4149
4150 if (mode != orig_mode)
4151 {
4152 x = convert_to_mode (mode, x, 0);
4153 y = convert_to_mode (mode, y, 0);
4154 }
4155
4156 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4157 the RTL. The allows the RTL optimizers to delete the libcall if the
4158 condition can be determined at compile-time. */
4159 if (comparison == UNORDERED)
4160 {
4161 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4162 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4163 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4164 temp, const_true_rtx, equiv);
4165 }
4166 else
4167 {
4168 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4169 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4170 {
4171 rtx true_rtx, false_rtx;
4172
4173 switch (comparison)
4174 {
4175 case EQ:
4176 true_rtx = const0_rtx;
4177 false_rtx = const_true_rtx;
4178 break;
4179
4180 case NE:
4181 true_rtx = const_true_rtx;
4182 false_rtx = const0_rtx;
4183 break;
4184
4185 case GT:
4186 true_rtx = const1_rtx;
4187 false_rtx = const0_rtx;
4188 break;
4189
4190 case GE:
4191 true_rtx = const0_rtx;
4192 false_rtx = constm1_rtx;
4193 break;
4194
4195 case LT:
4196 true_rtx = constm1_rtx;
4197 false_rtx = const0_rtx;
4198 break;
4199
4200 case LE:
4201 true_rtx = const0_rtx;
4202 false_rtx = const1_rtx;
4203 break;
4204
4205 default:
4206 gcc_unreachable ();
4207 }
4208 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4209 equiv, true_rtx, false_rtx);
4210 }
4211 }
4212
4213 start_sequence ();
4214 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4215 word_mode, 2, x, mode, y, mode);
4216 insns = get_insns ();
4217 end_sequence ();
4218
4219 target = gen_reg_rtx (word_mode);
4220 emit_libcall_block (insns, target, value, equiv);
4221
4222 if (comparison == UNORDERED
4223 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4224 comparison = reversed_p ? EQ : NE;
4225
4226 *px = target;
4227 *py = const0_rtx;
4228 *pmode = word_mode;
4229 *pcomparison = comparison;
4230 *punsignedp = 0;
4231 }
4232 \f
4233 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4234
4235 void
4236 emit_indirect_jump (rtx loc)
4237 {
4238 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4239 (loc, Pmode))
4240 loc = copy_to_mode_reg (Pmode, loc);
4241
4242 emit_jump_insn (gen_indirect_jump (loc));
4243 emit_barrier ();
4244 }
4245 \f
4246 #ifdef HAVE_conditional_move
4247
4248 /* Emit a conditional move instruction if the machine supports one for that
4249 condition and machine mode.
4250
4251 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4252 the mode to use should they be constants. If it is VOIDmode, they cannot
4253 both be constants.
4254
4255 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4256 should be stored there. MODE is the mode to use should they be constants.
4257 If it is VOIDmode, they cannot both be constants.
4258
4259 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4260 is not supported. */
4261
4262 rtx
4263 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4264 enum machine_mode cmode, rtx op2, rtx op3,
4265 enum machine_mode mode, int unsignedp)
4266 {
4267 rtx tem, subtarget, comparison, insn;
4268 enum insn_code icode;
4269 enum rtx_code reversed;
4270
4271 /* If one operand is constant, make it the second one. Only do this
4272 if the other operand is not constant as well. */
4273
4274 if (swap_commutative_operands_p (op0, op1))
4275 {
4276 tem = op0;
4277 op0 = op1;
4278 op1 = tem;
4279 code = swap_condition (code);
4280 }
4281
4282 /* get_condition will prefer to generate LT and GT even if the old
4283 comparison was against zero, so undo that canonicalization here since
4284 comparisons against zero are cheaper. */
4285 if (code == LT && op1 == const1_rtx)
4286 code = LE, op1 = const0_rtx;
4287 else if (code == GT && op1 == constm1_rtx)
4288 code = GE, op1 = const0_rtx;
4289
4290 if (cmode == VOIDmode)
4291 cmode = GET_MODE (op0);
4292
4293 if (swap_commutative_operands_p (op2, op3)
4294 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4295 != UNKNOWN))
4296 {
4297 tem = op2;
4298 op2 = op3;
4299 op3 = tem;
4300 code = reversed;
4301 }
4302
4303 if (mode == VOIDmode)
4304 mode = GET_MODE (op2);
4305
4306 icode = movcc_gen_code[mode];
4307
4308 if (icode == CODE_FOR_nothing)
4309 return 0;
4310
4311 if (!target)
4312 target = gen_reg_rtx (mode);
4313
4314 subtarget = target;
4315
4316 /* If the insn doesn't accept these operands, put them in pseudos. */
4317
4318 if (!insn_data[icode].operand[0].predicate
4319 (subtarget, insn_data[icode].operand[0].mode))
4320 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4321
4322 if (!insn_data[icode].operand[2].predicate
4323 (op2, insn_data[icode].operand[2].mode))
4324 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4325
4326 if (!insn_data[icode].operand[3].predicate
4327 (op3, insn_data[icode].operand[3].mode))
4328 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4329
4330 /* Everything should now be in the suitable form, so emit the compare insn
4331 and then the conditional move. */
4332
4333 comparison
4334 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4335
4336 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4337 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4338 return NULL and let the caller figure out how best to deal with this
4339 situation. */
4340 if (GET_CODE (comparison) != code)
4341 return NULL_RTX;
4342
4343 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4344
4345 /* If that failed, then give up. */
4346 if (insn == 0)
4347 return 0;
4348
4349 emit_insn (insn);
4350
4351 if (subtarget != target)
4352 convert_move (target, subtarget, 0);
4353
4354 return target;
4355 }
4356
4357 /* Return nonzero if a conditional move of mode MODE is supported.
4358
4359 This function is for combine so it can tell whether an insn that looks
4360 like a conditional move is actually supported by the hardware. If we
4361 guess wrong we lose a bit on optimization, but that's it. */
4362 /* ??? sparc64 supports conditionally moving integers values based on fp
4363 comparisons, and vice versa. How do we handle them? */
4364
4365 int
4366 can_conditionally_move_p (enum machine_mode mode)
4367 {
4368 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4369 return 1;
4370
4371 return 0;
4372 }
4373
4374 #endif /* HAVE_conditional_move */
4375
4376 /* Emit a conditional addition instruction if the machine supports one for that
4377 condition and machine mode.
4378
4379 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4380 the mode to use should they be constants. If it is VOIDmode, they cannot
4381 both be constants.
4382
4383 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4384 should be stored there. MODE is the mode to use should they be constants.
4385 If it is VOIDmode, they cannot both be constants.
4386
4387 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4388 is not supported. */
4389
4390 rtx
4391 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4392 enum machine_mode cmode, rtx op2, rtx op3,
4393 enum machine_mode mode, int unsignedp)
4394 {
4395 rtx tem, subtarget, comparison, insn;
4396 enum insn_code icode;
4397 enum rtx_code reversed;
4398
4399 /* If one operand is constant, make it the second one. Only do this
4400 if the other operand is not constant as well. */
4401
4402 if (swap_commutative_operands_p (op0, op1))
4403 {
4404 tem = op0;
4405 op0 = op1;
4406 op1 = tem;
4407 code = swap_condition (code);
4408 }
4409
4410 /* get_condition will prefer to generate LT and GT even if the old
4411 comparison was against zero, so undo that canonicalization here since
4412 comparisons against zero are cheaper. */
4413 if (code == LT && op1 == const1_rtx)
4414 code = LE, op1 = const0_rtx;
4415 else if (code == GT && op1 == constm1_rtx)
4416 code = GE, op1 = const0_rtx;
4417
4418 if (cmode == VOIDmode)
4419 cmode = GET_MODE (op0);
4420
4421 if (swap_commutative_operands_p (op2, op3)
4422 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4423 != UNKNOWN))
4424 {
4425 tem = op2;
4426 op2 = op3;
4427 op3 = tem;
4428 code = reversed;
4429 }
4430
4431 if (mode == VOIDmode)
4432 mode = GET_MODE (op2);
4433
4434 icode = addcc_optab->handlers[(int) mode].insn_code;
4435
4436 if (icode == CODE_FOR_nothing)
4437 return 0;
4438
4439 if (!target)
4440 target = gen_reg_rtx (mode);
4441
4442 /* If the insn doesn't accept these operands, put them in pseudos. */
4443
4444 if (!insn_data[icode].operand[0].predicate
4445 (target, insn_data[icode].operand[0].mode))
4446 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4447 else
4448 subtarget = target;
4449
4450 if (!insn_data[icode].operand[2].predicate
4451 (op2, insn_data[icode].operand[2].mode))
4452 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4453
4454 if (!insn_data[icode].operand[3].predicate
4455 (op3, insn_data[icode].operand[3].mode))
4456 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4457
4458 /* Everything should now be in the suitable form, so emit the compare insn
4459 and then the conditional move. */
4460
4461 comparison
4462 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4463
4464 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4465 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4466 return NULL and let the caller figure out how best to deal with this
4467 situation. */
4468 if (GET_CODE (comparison) != code)
4469 return NULL_RTX;
4470
4471 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4472
4473 /* If that failed, then give up. */
4474 if (insn == 0)
4475 return 0;
4476
4477 emit_insn (insn);
4478
4479 if (subtarget != target)
4480 convert_move (target, subtarget, 0);
4481
4482 return target;
4483 }
4484 \f
4485 /* These functions attempt to generate an insn body, rather than
4486 emitting the insn, but if the gen function already emits them, we
4487 make no attempt to turn them back into naked patterns. */
4488
4489 /* Generate and return an insn body to add Y to X. */
4490
4491 rtx
4492 gen_add2_insn (rtx x, rtx y)
4493 {
4494 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4495
4496 gcc_assert (insn_data[icode].operand[0].predicate
4497 (x, insn_data[icode].operand[0].mode));
4498 gcc_assert (insn_data[icode].operand[1].predicate
4499 (x, insn_data[icode].operand[1].mode));
4500 gcc_assert (insn_data[icode].operand[2].predicate
4501 (y, insn_data[icode].operand[2].mode));
4502
4503 return GEN_FCN (icode) (x, x, y);
4504 }
4505
4506 /* Generate and return an insn body to add r1 and c,
4507 storing the result in r0. */
4508 rtx
4509 gen_add3_insn (rtx r0, rtx r1, rtx c)
4510 {
4511 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4512
4513 if (icode == CODE_FOR_nothing
4514 || !(insn_data[icode].operand[0].predicate
4515 (r0, insn_data[icode].operand[0].mode))
4516 || !(insn_data[icode].operand[1].predicate
4517 (r1, insn_data[icode].operand[1].mode))
4518 || !(insn_data[icode].operand[2].predicate
4519 (c, insn_data[icode].operand[2].mode)))
4520 return NULL_RTX;
4521
4522 return GEN_FCN (icode) (r0, r1, c);
4523 }
4524
4525 int
4526 have_add2_insn (rtx x, rtx y)
4527 {
4528 int icode;
4529
4530 gcc_assert (GET_MODE (x) != VOIDmode);
4531
4532 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4533
4534 if (icode == CODE_FOR_nothing)
4535 return 0;
4536
4537 if (!(insn_data[icode].operand[0].predicate
4538 (x, insn_data[icode].operand[0].mode))
4539 || !(insn_data[icode].operand[1].predicate
4540 (x, insn_data[icode].operand[1].mode))
4541 || !(insn_data[icode].operand[2].predicate
4542 (y, insn_data[icode].operand[2].mode)))
4543 return 0;
4544
4545 return 1;
4546 }
4547
4548 /* Generate and return an insn body to subtract Y from X. */
4549
4550 rtx
4551 gen_sub2_insn (rtx x, rtx y)
4552 {
4553 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4554
4555 gcc_assert (insn_data[icode].operand[0].predicate
4556 (x, insn_data[icode].operand[0].mode));
4557 gcc_assert (insn_data[icode].operand[1].predicate
4558 (x, insn_data[icode].operand[1].mode));
4559 gcc_assert (insn_data[icode].operand[2].predicate
4560 (y, insn_data[icode].operand[2].mode));
4561
4562 return GEN_FCN (icode) (x, x, y);
4563 }
4564
4565 /* Generate and return an insn body to subtract r1 and c,
4566 storing the result in r0. */
4567 rtx
4568 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4569 {
4570 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4571
4572 if (icode == CODE_FOR_nothing
4573 || !(insn_data[icode].operand[0].predicate
4574 (r0, insn_data[icode].operand[0].mode))
4575 || !(insn_data[icode].operand[1].predicate
4576 (r1, insn_data[icode].operand[1].mode))
4577 || !(insn_data[icode].operand[2].predicate
4578 (c, insn_data[icode].operand[2].mode)))
4579 return NULL_RTX;
4580
4581 return GEN_FCN (icode) (r0, r1, c);
4582 }
4583
4584 int
4585 have_sub2_insn (rtx x, rtx y)
4586 {
4587 int icode;
4588
4589 gcc_assert (GET_MODE (x) != VOIDmode);
4590
4591 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4592
4593 if (icode == CODE_FOR_nothing)
4594 return 0;
4595
4596 if (!(insn_data[icode].operand[0].predicate
4597 (x, insn_data[icode].operand[0].mode))
4598 || !(insn_data[icode].operand[1].predicate
4599 (x, insn_data[icode].operand[1].mode))
4600 || !(insn_data[icode].operand[2].predicate
4601 (y, insn_data[icode].operand[2].mode)))
4602 return 0;
4603
4604 return 1;
4605 }
4606
4607 /* Generate the body of an instruction to copy Y into X.
4608 It may be a list of insns, if one insn isn't enough. */
4609
4610 rtx
4611 gen_move_insn (rtx x, rtx y)
4612 {
4613 rtx seq;
4614
4615 start_sequence ();
4616 emit_move_insn_1 (x, y);
4617 seq = get_insns ();
4618 end_sequence ();
4619 return seq;
4620 }
4621 \f
4622 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4623 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4624 no such operation exists, CODE_FOR_nothing will be returned. */
4625
4626 enum insn_code
4627 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4628 int unsignedp)
4629 {
4630 convert_optab tab;
4631 #ifdef HAVE_ptr_extend
4632 if (unsignedp < 0)
4633 return CODE_FOR_ptr_extend;
4634 #endif
4635
4636 tab = unsignedp ? zext_optab : sext_optab;
4637 return tab->handlers[to_mode][from_mode].insn_code;
4638 }
4639
4640 /* Generate the body of an insn to extend Y (with mode MFROM)
4641 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4642
4643 rtx
4644 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4645 enum machine_mode mfrom, int unsignedp)
4646 {
4647 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4648 return GEN_FCN (icode) (x, y);
4649 }
4650 \f
4651 /* can_fix_p and can_float_p say whether the target machine
4652 can directly convert a given fixed point type to
4653 a given floating point type, or vice versa.
4654 The returned value is the CODE_FOR_... value to use,
4655 or CODE_FOR_nothing if these modes cannot be directly converted.
4656
4657 *TRUNCP_PTR is set to 1 if it is necessary to output
4658 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4659
4660 static enum insn_code
4661 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4662 int unsignedp, int *truncp_ptr)
4663 {
4664 convert_optab tab;
4665 enum insn_code icode;
4666
4667 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4668 icode = tab->handlers[fixmode][fltmode].insn_code;
4669 if (icode != CODE_FOR_nothing)
4670 {
4671 *truncp_ptr = 0;
4672 return icode;
4673 }
4674
4675 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4676 for this to work. We need to rework the fix* and ftrunc* patterns
4677 and documentation. */
4678 tab = unsignedp ? ufix_optab : sfix_optab;
4679 icode = tab->handlers[fixmode][fltmode].insn_code;
4680 if (icode != CODE_FOR_nothing
4681 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4682 {
4683 *truncp_ptr = 1;
4684 return icode;
4685 }
4686
4687 *truncp_ptr = 0;
4688 return CODE_FOR_nothing;
4689 }
4690
4691 static enum insn_code
4692 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4693 int unsignedp)
4694 {
4695 convert_optab tab;
4696
4697 tab = unsignedp ? ufloat_optab : sfloat_optab;
4698 return tab->handlers[fltmode][fixmode].insn_code;
4699 }
4700 \f
4701 /* Generate code to convert FROM to floating point
4702 and store in TO. FROM must be fixed point and not VOIDmode.
4703 UNSIGNEDP nonzero means regard FROM as unsigned.
4704 Normally this is done by correcting the final value
4705 if it is negative. */
4706
4707 void
4708 expand_float (rtx to, rtx from, int unsignedp)
4709 {
4710 enum insn_code icode;
4711 rtx target = to;
4712 enum machine_mode fmode, imode;
4713 bool can_do_signed = false;
4714
4715 /* Crash now, because we won't be able to decide which mode to use. */
4716 gcc_assert (GET_MODE (from) != VOIDmode);
4717
4718 /* Look for an insn to do the conversion. Do it in the specified
4719 modes if possible; otherwise convert either input, output or both to
4720 wider mode. If the integer mode is wider than the mode of FROM,
4721 we can do the conversion signed even if the input is unsigned. */
4722
4723 for (fmode = GET_MODE (to); fmode != VOIDmode;
4724 fmode = GET_MODE_WIDER_MODE (fmode))
4725 for (imode = GET_MODE (from); imode != VOIDmode;
4726 imode = GET_MODE_WIDER_MODE (imode))
4727 {
4728 int doing_unsigned = unsignedp;
4729
4730 if (fmode != GET_MODE (to)
4731 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4732 continue;
4733
4734 icode = can_float_p (fmode, imode, unsignedp);
4735 if (icode == CODE_FOR_nothing && unsignedp)
4736 {
4737 enum insn_code scode = can_float_p (fmode, imode, 0);
4738 if (scode != CODE_FOR_nothing)
4739 can_do_signed = true;
4740 if (imode != GET_MODE (from))
4741 icode = scode, doing_unsigned = 0;
4742 }
4743
4744 if (icode != CODE_FOR_nothing)
4745 {
4746 if (imode != GET_MODE (from))
4747 from = convert_to_mode (imode, from, unsignedp);
4748
4749 if (fmode != GET_MODE (to))
4750 target = gen_reg_rtx (fmode);
4751
4752 emit_unop_insn (icode, target, from,
4753 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4754
4755 if (target != to)
4756 convert_move (to, target, 0);
4757 return;
4758 }
4759 }
4760
4761 /* Unsigned integer, and no way to convert directly. For binary
4762 floating point modes, convert as signed, then conditionally adjust
4763 the result. */
4764 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4765 {
4766 rtx label = gen_label_rtx ();
4767 rtx temp;
4768 REAL_VALUE_TYPE offset;
4769
4770 /* Look for a usable floating mode FMODE wider than the source and at
4771 least as wide as the target. Using FMODE will avoid rounding woes
4772 with unsigned values greater than the signed maximum value. */
4773
4774 for (fmode = GET_MODE (to); fmode != VOIDmode;
4775 fmode = GET_MODE_WIDER_MODE (fmode))
4776 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4777 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4778 break;
4779
4780 if (fmode == VOIDmode)
4781 {
4782 /* There is no such mode. Pretend the target is wide enough. */
4783 fmode = GET_MODE (to);
4784
4785 /* Avoid double-rounding when TO is narrower than FROM. */
4786 if ((significand_size (fmode) + 1)
4787 < GET_MODE_BITSIZE (GET_MODE (from)))
4788 {
4789 rtx temp1;
4790 rtx neglabel = gen_label_rtx ();
4791
4792 /* Don't use TARGET if it isn't a register, is a hard register,
4793 or is the wrong mode. */
4794 if (!REG_P (target)
4795 || REGNO (target) < FIRST_PSEUDO_REGISTER
4796 || GET_MODE (target) != fmode)
4797 target = gen_reg_rtx (fmode);
4798
4799 imode = GET_MODE (from);
4800 do_pending_stack_adjust ();
4801
4802 /* Test whether the sign bit is set. */
4803 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4804 0, neglabel);
4805
4806 /* The sign bit is not set. Convert as signed. */
4807 expand_float (target, from, 0);
4808 emit_jump_insn (gen_jump (label));
4809 emit_barrier ();
4810
4811 /* The sign bit is set.
4812 Convert to a usable (positive signed) value by shifting right
4813 one bit, while remembering if a nonzero bit was shifted
4814 out; i.e., compute (from & 1) | (from >> 1). */
4815
4816 emit_label (neglabel);
4817 temp = expand_binop (imode, and_optab, from, const1_rtx,
4818 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4819 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4820 NULL_RTX, 1);
4821 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4822 OPTAB_LIB_WIDEN);
4823 expand_float (target, temp, 0);
4824
4825 /* Multiply by 2 to undo the shift above. */
4826 temp = expand_binop (fmode, add_optab, target, target,
4827 target, 0, OPTAB_LIB_WIDEN);
4828 if (temp != target)
4829 emit_move_insn (target, temp);
4830
4831 do_pending_stack_adjust ();
4832 emit_label (label);
4833 goto done;
4834 }
4835 }
4836
4837 /* If we are about to do some arithmetic to correct for an
4838 unsigned operand, do it in a pseudo-register. */
4839
4840 if (GET_MODE (to) != fmode
4841 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4842 target = gen_reg_rtx (fmode);
4843
4844 /* Convert as signed integer to floating. */
4845 expand_float (target, from, 0);
4846
4847 /* If FROM is negative (and therefore TO is negative),
4848 correct its value by 2**bitwidth. */
4849
4850 do_pending_stack_adjust ();
4851 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4852 0, label);
4853
4854
4855 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4856 temp = expand_binop (fmode, add_optab, target,
4857 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4858 target, 0, OPTAB_LIB_WIDEN);
4859 if (temp != target)
4860 emit_move_insn (target, temp);
4861
4862 do_pending_stack_adjust ();
4863 emit_label (label);
4864 goto done;
4865 }
4866
4867 /* No hardware instruction available; call a library routine. */
4868 {
4869 rtx libfunc;
4870 rtx insns;
4871 rtx value;
4872 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4873
4874 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4875 from = convert_to_mode (SImode, from, unsignedp);
4876
4877 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4878 gcc_assert (libfunc);
4879
4880 start_sequence ();
4881
4882 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4883 GET_MODE (to), 1, from,
4884 GET_MODE (from));
4885 insns = get_insns ();
4886 end_sequence ();
4887
4888 emit_libcall_block (insns, target, value,
4889 gen_rtx_FLOAT (GET_MODE (to), from));
4890 }
4891
4892 done:
4893
4894 /* Copy result to requested destination
4895 if we have been computing in a temp location. */
4896
4897 if (target != to)
4898 {
4899 if (GET_MODE (target) == GET_MODE (to))
4900 emit_move_insn (to, target);
4901 else
4902 convert_move (to, target, 0);
4903 }
4904 }
4905 \f
4906 /* Generate code to convert FROM to fixed point and store in TO. FROM
4907 must be floating point. */
4908
4909 void
4910 expand_fix (rtx to, rtx from, int unsignedp)
4911 {
4912 enum insn_code icode;
4913 rtx target = to;
4914 enum machine_mode fmode, imode;
4915 int must_trunc = 0;
4916
4917 /* We first try to find a pair of modes, one real and one integer, at
4918 least as wide as FROM and TO, respectively, in which we can open-code
4919 this conversion. If the integer mode is wider than the mode of TO,
4920 we can do the conversion either signed or unsigned. */
4921
4922 for (fmode = GET_MODE (from); fmode != VOIDmode;
4923 fmode = GET_MODE_WIDER_MODE (fmode))
4924 for (imode = GET_MODE (to); imode != VOIDmode;
4925 imode = GET_MODE_WIDER_MODE (imode))
4926 {
4927 int doing_unsigned = unsignedp;
4928
4929 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4930 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4931 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4932
4933 if (icode != CODE_FOR_nothing)
4934 {
4935 if (fmode != GET_MODE (from))
4936 from = convert_to_mode (fmode, from, 0);
4937
4938 if (must_trunc)
4939 {
4940 rtx temp = gen_reg_rtx (GET_MODE (from));
4941 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4942 temp, 0);
4943 }
4944
4945 if (imode != GET_MODE (to))
4946 target = gen_reg_rtx (imode);
4947
4948 emit_unop_insn (icode, target, from,
4949 doing_unsigned ? UNSIGNED_FIX : FIX);
4950 if (target != to)
4951 convert_move (to, target, unsignedp);
4952 return;
4953 }
4954 }
4955
4956 /* For an unsigned conversion, there is one more way to do it.
4957 If we have a signed conversion, we generate code that compares
4958 the real value to the largest representable positive number. If if
4959 is smaller, the conversion is done normally. Otherwise, subtract
4960 one plus the highest signed number, convert, and add it back.
4961
4962 We only need to check all real modes, since we know we didn't find
4963 anything with a wider integer mode.
4964
4965 This code used to extend FP value into mode wider than the destination.
4966 This is not needed. Consider, for instance conversion from SFmode
4967 into DImode.
4968
4969 The hot path through the code is dealing with inputs smaller than 2^63
4970 and doing just the conversion, so there is no bits to lose.
4971
4972 In the other path we know the value is positive in the range 2^63..2^64-1
4973 inclusive. (as for other imput overflow happens and result is undefined)
4974 So we know that the most important bit set in mantissa corresponds to
4975 2^63. The subtraction of 2^63 should not generate any rounding as it
4976 simply clears out that bit. The rest is trivial. */
4977
4978 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4979 for (fmode = GET_MODE (from); fmode != VOIDmode;
4980 fmode = GET_MODE_WIDER_MODE (fmode))
4981 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4982 &must_trunc))
4983 {
4984 int bitsize;
4985 REAL_VALUE_TYPE offset;
4986 rtx limit, lab1, lab2, insn;
4987
4988 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4989 real_2expN (&offset, bitsize - 1);
4990 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4991 lab1 = gen_label_rtx ();
4992 lab2 = gen_label_rtx ();
4993
4994 if (fmode != GET_MODE (from))
4995 from = convert_to_mode (fmode, from, 0);
4996
4997 /* See if we need to do the subtraction. */
4998 do_pending_stack_adjust ();
4999 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5000 0, lab1);
5001
5002 /* If not, do the signed "fix" and branch around fixup code. */
5003 expand_fix (to, from, 0);
5004 emit_jump_insn (gen_jump (lab2));
5005 emit_barrier ();
5006
5007 /* Otherwise, subtract 2**(N-1), convert to signed number,
5008 then add 2**(N-1). Do the addition using XOR since this
5009 will often generate better code. */
5010 emit_label (lab1);
5011 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5012 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5013 expand_fix (to, target, 0);
5014 target = expand_binop (GET_MODE (to), xor_optab, to,
5015 gen_int_mode
5016 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5017 GET_MODE (to)),
5018 to, 1, OPTAB_LIB_WIDEN);
5019
5020 if (target != to)
5021 emit_move_insn (to, target);
5022
5023 emit_label (lab2);
5024
5025 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
5026 != CODE_FOR_nothing)
5027 {
5028 /* Make a place for a REG_NOTE and add it. */
5029 insn = emit_move_insn (to, to);
5030 set_unique_reg_note (insn,
5031 REG_EQUAL,
5032 gen_rtx_fmt_e (UNSIGNED_FIX,
5033 GET_MODE (to),
5034 copy_rtx (from)));
5035 }
5036
5037 return;
5038 }
5039
5040 /* We can't do it with an insn, so use a library call. But first ensure
5041 that the mode of TO is at least as wide as SImode, since those are the
5042 only library calls we know about. */
5043
5044 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5045 {
5046 target = gen_reg_rtx (SImode);
5047
5048 expand_fix (target, from, unsignedp);
5049 }
5050 else
5051 {
5052 rtx insns;
5053 rtx value;
5054 rtx libfunc;
5055
5056 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5057 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
5058 gcc_assert (libfunc);
5059
5060 start_sequence ();
5061
5062 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5063 GET_MODE (to), 1, from,
5064 GET_MODE (from));
5065 insns = get_insns ();
5066 end_sequence ();
5067
5068 emit_libcall_block (insns, target, value,
5069 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5070 GET_MODE (to), from));
5071 }
5072
5073 if (target != to)
5074 {
5075 if (GET_MODE (to) == GET_MODE (target))
5076 emit_move_insn (to, target);
5077 else
5078 convert_move (to, target, 0);
5079 }
5080 }
5081
5082 /* Generate code to convert FROM to fixed point and store in TO. FROM
5083 must be floating point, TO must be signed. Use the conversion optab
5084 TAB to do the conversion. */
5085
5086 bool
5087 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5088 {
5089 enum insn_code icode;
5090 rtx target = to;
5091 enum machine_mode fmode, imode;
5092
5093 /* We first try to find a pair of modes, one real and one integer, at
5094 least as wide as FROM and TO, respectively, in which we can open-code
5095 this conversion. If the integer mode is wider than the mode of TO,
5096 we can do the conversion either signed or unsigned. */
5097
5098 for (fmode = GET_MODE (from); fmode != VOIDmode;
5099 fmode = GET_MODE_WIDER_MODE (fmode))
5100 for (imode = GET_MODE (to); imode != VOIDmode;
5101 imode = GET_MODE_WIDER_MODE (imode))
5102 {
5103 icode = tab->handlers[imode][fmode].insn_code;
5104 if (icode != CODE_FOR_nothing)
5105 {
5106 if (fmode != GET_MODE (from))
5107 from = convert_to_mode (fmode, from, 0);
5108
5109 if (imode != GET_MODE (to))
5110 target = gen_reg_rtx (imode);
5111
5112 emit_unop_insn (icode, target, from, UNKNOWN);
5113 if (target != to)
5114 convert_move (to, target, 0);
5115 return true;
5116 }
5117 }
5118
5119 return false;
5120 }
5121 \f
5122 /* Report whether we have an instruction to perform the operation
5123 specified by CODE on operands of mode MODE. */
5124 int
5125 have_insn_for (enum rtx_code code, enum machine_mode mode)
5126 {
5127 return (code_to_optab[(int) code] != 0
5128 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
5129 != CODE_FOR_nothing));
5130 }
5131
5132 /* Create a blank optab. */
5133 static optab
5134 new_optab (void)
5135 {
5136 int i;
5137 optab op = ggc_alloc (sizeof (struct optab));
5138 for (i = 0; i < NUM_MACHINE_MODES; i++)
5139 {
5140 op->handlers[i].insn_code = CODE_FOR_nothing;
5141 op->handlers[i].libfunc = 0;
5142 }
5143
5144 return op;
5145 }
5146
5147 static convert_optab
5148 new_convert_optab (void)
5149 {
5150 int i, j;
5151 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5152 for (i = 0; i < NUM_MACHINE_MODES; i++)
5153 for (j = 0; j < NUM_MACHINE_MODES; j++)
5154 {
5155 op->handlers[i][j].insn_code = CODE_FOR_nothing;
5156 op->handlers[i][j].libfunc = 0;
5157 }
5158 return op;
5159 }
5160
5161 /* Same, but fill in its code as CODE, and write it into the
5162 code_to_optab table. */
5163 static inline optab
5164 init_optab (enum rtx_code code)
5165 {
5166 optab op = new_optab ();
5167 op->code = code;
5168 code_to_optab[(int) code] = op;
5169 return op;
5170 }
5171
5172 /* Same, but fill in its code as CODE, and do _not_ write it into
5173 the code_to_optab table. */
5174 static inline optab
5175 init_optabv (enum rtx_code code)
5176 {
5177 optab op = new_optab ();
5178 op->code = code;
5179 return op;
5180 }
5181
5182 /* Conversion optabs never go in the code_to_optab table. */
5183 static inline convert_optab
5184 init_convert_optab (enum rtx_code code)
5185 {
5186 convert_optab op = new_convert_optab ();
5187 op->code = code;
5188 return op;
5189 }
5190
5191 /* Initialize the libfunc fields of an entire group of entries in some
5192 optab. Each entry is set equal to a string consisting of a leading
5193 pair of underscores followed by a generic operation name followed by
5194 a mode name (downshifted to lowercase) followed by a single character
5195 representing the number of operands for the given operation (which is
5196 usually one of the characters '2', '3', or '4').
5197
5198 OPTABLE is the table in which libfunc fields are to be initialized.
5199 FIRST_MODE is the first machine mode index in the given optab to
5200 initialize.
5201 LAST_MODE is the last machine mode index in the given optab to
5202 initialize.
5203 OPNAME is the generic (string) name of the operation.
5204 SUFFIX is the character which specifies the number of operands for
5205 the given generic operation.
5206 */
5207
5208 static void
5209 init_libfuncs (optab optable, int first_mode, int last_mode,
5210 const char *opname, int suffix)
5211 {
5212 int mode;
5213 unsigned opname_len = strlen (opname);
5214
5215 for (mode = first_mode; (int) mode <= (int) last_mode;
5216 mode = (enum machine_mode) ((int) mode + 1))
5217 {
5218 const char *mname = GET_MODE_NAME (mode);
5219 unsigned mname_len = strlen (mname);
5220 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5221 char *p;
5222 const char *q;
5223
5224 p = libfunc_name;
5225 *p++ = '_';
5226 *p++ = '_';
5227 for (q = opname; *q; )
5228 *p++ = *q++;
5229 for (q = mname; *q; q++)
5230 *p++ = TOLOWER (*q);
5231 *p++ = suffix;
5232 *p = '\0';
5233
5234 optable->handlers[(int) mode].libfunc
5235 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5236 }
5237 }
5238
5239 /* Initialize the libfunc fields of an entire group of entries in some
5240 optab which correspond to all integer mode operations. The parameters
5241 have the same meaning as similarly named ones for the `init_libfuncs'
5242 routine. (See above). */
5243
5244 static void
5245 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5246 {
5247 int maxsize = 2*BITS_PER_WORD;
5248 if (maxsize < LONG_LONG_TYPE_SIZE)
5249 maxsize = LONG_LONG_TYPE_SIZE;
5250 init_libfuncs (optable, word_mode,
5251 mode_for_size (maxsize, MODE_INT, 0),
5252 opname, suffix);
5253 }
5254
5255 /* Initialize the libfunc fields of an entire group of entries in some
5256 optab which correspond to all real mode operations. The parameters
5257 have the same meaning as similarly named ones for the `init_libfuncs'
5258 routine. (See above). */
5259
5260 static void
5261 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5262 {
5263 char *dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5264
5265 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5266 depending on the low level floating format used. */
5267 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5268 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5269
5270 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5271 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5272 dec_opname, suffix);
5273 }
5274
5275 /* Initialize the libfunc fields of an entire group of entries of an
5276 inter-mode-class conversion optab. The string formation rules are
5277 similar to the ones for init_libfuncs, above, but instead of having
5278 a mode name and an operand count these functions have two mode names
5279 and no operand count. */
5280 static void
5281 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5282 enum mode_class from_class,
5283 enum mode_class to_class)
5284 {
5285 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5286 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5287 size_t opname_len = strlen (opname);
5288 size_t max_mname_len = 0;
5289
5290 enum machine_mode fmode, tmode;
5291 const char *fname, *tname;
5292 const char *q;
5293 char *libfunc_name, *suffix;
5294 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5295 char *p;
5296
5297 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5298 depends on which underlying decimal floating point format is used. */
5299 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5300
5301 for (fmode = first_from_mode;
5302 fmode != VOIDmode;
5303 fmode = GET_MODE_WIDER_MODE (fmode))
5304 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5305
5306 for (tmode = first_to_mode;
5307 tmode != VOIDmode;
5308 tmode = GET_MODE_WIDER_MODE (tmode))
5309 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5310
5311 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5312 nondec_name[0] = '_';
5313 nondec_name[1] = '_';
5314 memcpy (&nondec_name[2], opname, opname_len);
5315 nondec_suffix = nondec_name + opname_len + 2;
5316
5317 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5318 dec_name[0] = '_';
5319 dec_name[1] = '_';
5320 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5321 memcpy (&dec_name[2+dec_len], opname, opname_len);
5322 dec_suffix = dec_name + dec_len + opname_len + 2;
5323
5324 for (fmode = first_from_mode; fmode != VOIDmode;
5325 fmode = GET_MODE_WIDER_MODE (fmode))
5326 for (tmode = first_to_mode; tmode != VOIDmode;
5327 tmode = GET_MODE_WIDER_MODE (tmode))
5328 {
5329 fname = GET_MODE_NAME (fmode);
5330 tname = GET_MODE_NAME (tmode);
5331
5332 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5333 {
5334 libfunc_name = dec_name;
5335 suffix = dec_suffix;
5336 }
5337 else
5338 {
5339 libfunc_name = nondec_name;
5340 suffix = nondec_suffix;
5341 }
5342
5343 p = suffix;
5344 for (q = fname; *q; p++, q++)
5345 *p = TOLOWER (*q);
5346 for (q = tname; *q; p++, q++)
5347 *p = TOLOWER (*q);
5348
5349 *p = '\0';
5350
5351 tab->handlers[tmode][fmode].libfunc
5352 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5353 p - libfunc_name));
5354 }
5355 }
5356
5357 /* Initialize the libfunc fields of an entire group of entries of an
5358 intra-mode-class conversion optab. The string formation rules are
5359 similar to the ones for init_libfunc, above. WIDENING says whether
5360 the optab goes from narrow to wide modes or vice versa. These functions
5361 have two mode names _and_ an operand count. */
5362 static void
5363 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5364 enum mode_class class, bool widening)
5365 {
5366 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5367 size_t opname_len = strlen (opname);
5368 size_t max_mname_len = 0;
5369
5370 enum machine_mode nmode, wmode;
5371 const char *nname, *wname;
5372 const char *q;
5373 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5374 char *libfunc_name, *suffix;
5375 char *p;
5376
5377 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5378 depends on which underlying decimal floating point format is used. */
5379 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5380
5381 for (nmode = first_mode; nmode != VOIDmode;
5382 nmode = GET_MODE_WIDER_MODE (nmode))
5383 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5384
5385 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5386 nondec_name[0] = '_';
5387 nondec_name[1] = '_';
5388 memcpy (&nondec_name[2], opname, opname_len);
5389 nondec_suffix = nondec_name + opname_len + 2;
5390
5391 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5392 dec_name[0] = '_';
5393 dec_name[1] = '_';
5394 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5395 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5396 dec_suffix = dec_name + dec_len + opname_len + 2;
5397
5398 for (nmode = first_mode; nmode != VOIDmode;
5399 nmode = GET_MODE_WIDER_MODE (nmode))
5400 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5401 wmode = GET_MODE_WIDER_MODE (wmode))
5402 {
5403 nname = GET_MODE_NAME (nmode);
5404 wname = GET_MODE_NAME (wmode);
5405
5406 if (DECIMAL_FLOAT_MODE_P(nmode) || DECIMAL_FLOAT_MODE_P(wmode))
5407 {
5408 libfunc_name = dec_name;
5409 suffix = dec_suffix;
5410 }
5411 else
5412 {
5413 libfunc_name = nondec_name;
5414 suffix = nondec_suffix;
5415 }
5416
5417 p = suffix;
5418 for (q = widening ? nname : wname; *q; p++, q++)
5419 *p = TOLOWER (*q);
5420 for (q = widening ? wname : nname; *q; p++, q++)
5421 *p = TOLOWER (*q);
5422
5423 *p++ = '2';
5424 *p = '\0';
5425
5426 tab->handlers[widening ? wmode : nmode]
5427 [widening ? nmode : wmode].libfunc
5428 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5429 p - libfunc_name));
5430 }
5431 }
5432
5433
5434 rtx
5435 init_one_libfunc (const char *name)
5436 {
5437 rtx symbol;
5438
5439 /* Create a FUNCTION_DECL that can be passed to
5440 targetm.encode_section_info. */
5441 /* ??? We don't have any type information except for this is
5442 a function. Pretend this is "int foo()". */
5443 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5444 build_function_type (integer_type_node, NULL_TREE));
5445 DECL_ARTIFICIAL (decl) = 1;
5446 DECL_EXTERNAL (decl) = 1;
5447 TREE_PUBLIC (decl) = 1;
5448
5449 symbol = XEXP (DECL_RTL (decl), 0);
5450
5451 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5452 are the flags assigned by targetm.encode_section_info. */
5453 SET_SYMBOL_REF_DECL (symbol, 0);
5454
5455 return symbol;
5456 }
5457
5458 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5459 MODE to NAME, which should be either 0 or a string constant. */
5460 void
5461 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5462 {
5463 if (name)
5464 optable->handlers[mode].libfunc = init_one_libfunc (name);
5465 else
5466 optable->handlers[mode].libfunc = 0;
5467 }
5468
5469 /* Call this to reset the function entry for one conversion optab
5470 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5471 either 0 or a string constant. */
5472 void
5473 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5474 enum machine_mode fmode, const char *name)
5475 {
5476 if (name)
5477 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5478 else
5479 optable->handlers[tmode][fmode].libfunc = 0;
5480 }
5481
5482 /* Call this once to initialize the contents of the optabs
5483 appropriately for the current target machine. */
5484
5485 void
5486 init_optabs (void)
5487 {
5488 unsigned int i;
5489 enum machine_mode int_mode;
5490
5491 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5492
5493 for (i = 0; i < NUM_RTX_CODE; i++)
5494 setcc_gen_code[i] = CODE_FOR_nothing;
5495
5496 #ifdef HAVE_conditional_move
5497 for (i = 0; i < NUM_MACHINE_MODES; i++)
5498 movcc_gen_code[i] = CODE_FOR_nothing;
5499 #endif
5500
5501 for (i = 0; i < NUM_MACHINE_MODES; i++)
5502 {
5503 vcond_gen_code[i] = CODE_FOR_nothing;
5504 vcondu_gen_code[i] = CODE_FOR_nothing;
5505 }
5506
5507 add_optab = init_optab (PLUS);
5508 addv_optab = init_optabv (PLUS);
5509 sub_optab = init_optab (MINUS);
5510 subv_optab = init_optabv (MINUS);
5511 smul_optab = init_optab (MULT);
5512 smulv_optab = init_optabv (MULT);
5513 smul_highpart_optab = init_optab (UNKNOWN);
5514 umul_highpart_optab = init_optab (UNKNOWN);
5515 smul_widen_optab = init_optab (UNKNOWN);
5516 umul_widen_optab = init_optab (UNKNOWN);
5517 usmul_widen_optab = init_optab (UNKNOWN);
5518 smadd_widen_optab = init_optab (UNKNOWN);
5519 umadd_widen_optab = init_optab (UNKNOWN);
5520 smsub_widen_optab = init_optab (UNKNOWN);
5521 umsub_widen_optab = init_optab (UNKNOWN);
5522 sdiv_optab = init_optab (DIV);
5523 sdivv_optab = init_optabv (DIV);
5524 sdivmod_optab = init_optab (UNKNOWN);
5525 udiv_optab = init_optab (UDIV);
5526 udivmod_optab = init_optab (UNKNOWN);
5527 smod_optab = init_optab (MOD);
5528 umod_optab = init_optab (UMOD);
5529 fmod_optab = init_optab (UNKNOWN);
5530 remainder_optab = init_optab (UNKNOWN);
5531 ftrunc_optab = init_optab (UNKNOWN);
5532 and_optab = init_optab (AND);
5533 ior_optab = init_optab (IOR);
5534 xor_optab = init_optab (XOR);
5535 ashl_optab = init_optab (ASHIFT);
5536 ashr_optab = init_optab (ASHIFTRT);
5537 lshr_optab = init_optab (LSHIFTRT);
5538 rotl_optab = init_optab (ROTATE);
5539 rotr_optab = init_optab (ROTATERT);
5540 smin_optab = init_optab (SMIN);
5541 smax_optab = init_optab (SMAX);
5542 umin_optab = init_optab (UMIN);
5543 umax_optab = init_optab (UMAX);
5544 pow_optab = init_optab (UNKNOWN);
5545 atan2_optab = init_optab (UNKNOWN);
5546
5547 /* These three have codes assigned exclusively for the sake of
5548 have_insn_for. */
5549 mov_optab = init_optab (SET);
5550 movstrict_optab = init_optab (STRICT_LOW_PART);
5551 cmp_optab = init_optab (COMPARE);
5552
5553 storent_optab = init_optab (UNKNOWN);
5554
5555 ucmp_optab = init_optab (UNKNOWN);
5556 tst_optab = init_optab (UNKNOWN);
5557
5558 eq_optab = init_optab (EQ);
5559 ne_optab = init_optab (NE);
5560 gt_optab = init_optab (GT);
5561 ge_optab = init_optab (GE);
5562 lt_optab = init_optab (LT);
5563 le_optab = init_optab (LE);
5564 unord_optab = init_optab (UNORDERED);
5565
5566 neg_optab = init_optab (NEG);
5567 negv_optab = init_optabv (NEG);
5568 abs_optab = init_optab (ABS);
5569 absv_optab = init_optabv (ABS);
5570 addcc_optab = init_optab (UNKNOWN);
5571 one_cmpl_optab = init_optab (NOT);
5572 bswap_optab = init_optab (BSWAP);
5573 ffs_optab = init_optab (FFS);
5574 clz_optab = init_optab (CLZ);
5575 ctz_optab = init_optab (CTZ);
5576 popcount_optab = init_optab (POPCOUNT);
5577 parity_optab = init_optab (PARITY);
5578 sqrt_optab = init_optab (SQRT);
5579 floor_optab = init_optab (UNKNOWN);
5580 ceil_optab = init_optab (UNKNOWN);
5581 round_optab = init_optab (UNKNOWN);
5582 btrunc_optab = init_optab (UNKNOWN);
5583 nearbyint_optab = init_optab (UNKNOWN);
5584 rint_optab = init_optab (UNKNOWN);
5585 sincos_optab = init_optab (UNKNOWN);
5586 sin_optab = init_optab (UNKNOWN);
5587 asin_optab = init_optab (UNKNOWN);
5588 cos_optab = init_optab (UNKNOWN);
5589 acos_optab = init_optab (UNKNOWN);
5590 exp_optab = init_optab (UNKNOWN);
5591 exp10_optab = init_optab (UNKNOWN);
5592 exp2_optab = init_optab (UNKNOWN);
5593 expm1_optab = init_optab (UNKNOWN);
5594 ldexp_optab = init_optab (UNKNOWN);
5595 scalb_optab = init_optab (UNKNOWN);
5596 logb_optab = init_optab (UNKNOWN);
5597 ilogb_optab = init_optab (UNKNOWN);
5598 log_optab = init_optab (UNKNOWN);
5599 log10_optab = init_optab (UNKNOWN);
5600 log2_optab = init_optab (UNKNOWN);
5601 log1p_optab = init_optab (UNKNOWN);
5602 tan_optab = init_optab (UNKNOWN);
5603 atan_optab = init_optab (UNKNOWN);
5604 copysign_optab = init_optab (UNKNOWN);
5605 signbit_optab = init_optab (UNKNOWN);
5606
5607 isinf_optab = init_optab (UNKNOWN);
5608
5609 strlen_optab = init_optab (UNKNOWN);
5610 cbranch_optab = init_optab (UNKNOWN);
5611 cmov_optab = init_optab (UNKNOWN);
5612 cstore_optab = init_optab (UNKNOWN);
5613 push_optab = init_optab (UNKNOWN);
5614
5615 reduc_smax_optab = init_optab (UNKNOWN);
5616 reduc_umax_optab = init_optab (UNKNOWN);
5617 reduc_smin_optab = init_optab (UNKNOWN);
5618 reduc_umin_optab = init_optab (UNKNOWN);
5619 reduc_splus_optab = init_optab (UNKNOWN);
5620 reduc_uplus_optab = init_optab (UNKNOWN);
5621
5622 ssum_widen_optab = init_optab (UNKNOWN);
5623 usum_widen_optab = init_optab (UNKNOWN);
5624 sdot_prod_optab = init_optab (UNKNOWN);
5625 udot_prod_optab = init_optab (UNKNOWN);
5626
5627 vec_extract_optab = init_optab (UNKNOWN);
5628 vec_extract_even_optab = init_optab (UNKNOWN);
5629 vec_extract_odd_optab = init_optab (UNKNOWN);
5630 vec_interleave_high_optab = init_optab (UNKNOWN);
5631 vec_interleave_low_optab = init_optab (UNKNOWN);
5632 vec_set_optab = init_optab (UNKNOWN);
5633 vec_init_optab = init_optab (UNKNOWN);
5634 vec_shl_optab = init_optab (UNKNOWN);
5635 vec_shr_optab = init_optab (UNKNOWN);
5636 vec_realign_load_optab = init_optab (UNKNOWN);
5637 movmisalign_optab = init_optab (UNKNOWN);
5638 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5639 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5640 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5641 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5642 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5643 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5644 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5645 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5646 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
5647 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
5648 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
5649 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
5650 vec_pack_trunc_optab = init_optab (UNKNOWN);
5651 vec_pack_usat_optab = init_optab (UNKNOWN);
5652 vec_pack_ssat_optab = init_optab (UNKNOWN);
5653 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
5654 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
5655
5656 powi_optab = init_optab (UNKNOWN);
5657
5658 /* Conversions. */
5659 sext_optab = init_convert_optab (SIGN_EXTEND);
5660 zext_optab = init_convert_optab (ZERO_EXTEND);
5661 trunc_optab = init_convert_optab (TRUNCATE);
5662 sfix_optab = init_convert_optab (FIX);
5663 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5664 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5665 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5666 sfloat_optab = init_convert_optab (FLOAT);
5667 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5668 lrint_optab = init_convert_optab (UNKNOWN);
5669 lround_optab = init_convert_optab (UNKNOWN);
5670 lfloor_optab = init_convert_optab (UNKNOWN);
5671 lceil_optab = init_convert_optab (UNKNOWN);
5672
5673 for (i = 0; i < NUM_MACHINE_MODES; i++)
5674 {
5675 movmem_optab[i] = CODE_FOR_nothing;
5676 cmpstr_optab[i] = CODE_FOR_nothing;
5677 cmpstrn_optab[i] = CODE_FOR_nothing;
5678 cmpmem_optab[i] = CODE_FOR_nothing;
5679 setmem_optab[i] = CODE_FOR_nothing;
5680
5681 sync_add_optab[i] = CODE_FOR_nothing;
5682 sync_sub_optab[i] = CODE_FOR_nothing;
5683 sync_ior_optab[i] = CODE_FOR_nothing;
5684 sync_and_optab[i] = CODE_FOR_nothing;
5685 sync_xor_optab[i] = CODE_FOR_nothing;
5686 sync_nand_optab[i] = CODE_FOR_nothing;
5687 sync_old_add_optab[i] = CODE_FOR_nothing;
5688 sync_old_sub_optab[i] = CODE_FOR_nothing;
5689 sync_old_ior_optab[i] = CODE_FOR_nothing;
5690 sync_old_and_optab[i] = CODE_FOR_nothing;
5691 sync_old_xor_optab[i] = CODE_FOR_nothing;
5692 sync_old_nand_optab[i] = CODE_FOR_nothing;
5693 sync_new_add_optab[i] = CODE_FOR_nothing;
5694 sync_new_sub_optab[i] = CODE_FOR_nothing;
5695 sync_new_ior_optab[i] = CODE_FOR_nothing;
5696 sync_new_and_optab[i] = CODE_FOR_nothing;
5697 sync_new_xor_optab[i] = CODE_FOR_nothing;
5698 sync_new_nand_optab[i] = CODE_FOR_nothing;
5699 sync_compare_and_swap[i] = CODE_FOR_nothing;
5700 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5701 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5702 sync_lock_release[i] = CODE_FOR_nothing;
5703
5704 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5705 }
5706
5707 /* Fill in the optabs with the insns we support. */
5708 init_all_optabs ();
5709
5710 /* The ffs function operates on `int'. Fall back on it if we do not
5711 have a libgcc2 function for that width. */
5712 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
5713 ffs_optab->handlers[(int) int_mode].libfunc = init_one_libfunc ("ffs");
5714
5715 /* Initialize the optabs with the names of the library functions. */
5716 init_integral_libfuncs (add_optab, "add", '3');
5717 init_floating_libfuncs (add_optab, "add", '3');
5718 init_integral_libfuncs (addv_optab, "addv", '3');
5719 init_floating_libfuncs (addv_optab, "add", '3');
5720 init_integral_libfuncs (sub_optab, "sub", '3');
5721 init_floating_libfuncs (sub_optab, "sub", '3');
5722 init_integral_libfuncs (subv_optab, "subv", '3');
5723 init_floating_libfuncs (subv_optab, "sub", '3');
5724 init_integral_libfuncs (smul_optab, "mul", '3');
5725 init_floating_libfuncs (smul_optab, "mul", '3');
5726 init_integral_libfuncs (smulv_optab, "mulv", '3');
5727 init_floating_libfuncs (smulv_optab, "mul", '3');
5728 init_integral_libfuncs (sdiv_optab, "div", '3');
5729 init_floating_libfuncs (sdiv_optab, "div", '3');
5730 init_integral_libfuncs (sdivv_optab, "divv", '3');
5731 init_integral_libfuncs (udiv_optab, "udiv", '3');
5732 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5733 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5734 init_integral_libfuncs (smod_optab, "mod", '3');
5735 init_integral_libfuncs (umod_optab, "umod", '3');
5736 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5737 init_integral_libfuncs (and_optab, "and", '3');
5738 init_integral_libfuncs (ior_optab, "ior", '3');
5739 init_integral_libfuncs (xor_optab, "xor", '3');
5740 init_integral_libfuncs (ashl_optab, "ashl", '3');
5741 init_integral_libfuncs (ashr_optab, "ashr", '3');
5742 init_integral_libfuncs (lshr_optab, "lshr", '3');
5743 init_integral_libfuncs (smin_optab, "min", '3');
5744 init_floating_libfuncs (smin_optab, "min", '3');
5745 init_integral_libfuncs (smax_optab, "max", '3');
5746 init_floating_libfuncs (smax_optab, "max", '3');
5747 init_integral_libfuncs (umin_optab, "umin", '3');
5748 init_integral_libfuncs (umax_optab, "umax", '3');
5749 init_integral_libfuncs (neg_optab, "neg", '2');
5750 init_floating_libfuncs (neg_optab, "neg", '2');
5751 init_integral_libfuncs (negv_optab, "negv", '2');
5752 init_floating_libfuncs (negv_optab, "neg", '2');
5753 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5754 init_integral_libfuncs (ffs_optab, "ffs", '2');
5755 init_integral_libfuncs (clz_optab, "clz", '2');
5756 init_integral_libfuncs (ctz_optab, "ctz", '2');
5757 init_integral_libfuncs (popcount_optab, "popcount", '2');
5758 init_integral_libfuncs (parity_optab, "parity", '2');
5759
5760 /* Comparison libcalls for integers MUST come in pairs,
5761 signed/unsigned. */
5762 init_integral_libfuncs (cmp_optab, "cmp", '2');
5763 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5764 init_floating_libfuncs (cmp_optab, "cmp", '2');
5765
5766 /* EQ etc are floating point only. */
5767 init_floating_libfuncs (eq_optab, "eq", '2');
5768 init_floating_libfuncs (ne_optab, "ne", '2');
5769 init_floating_libfuncs (gt_optab, "gt", '2');
5770 init_floating_libfuncs (ge_optab, "ge", '2');
5771 init_floating_libfuncs (lt_optab, "lt", '2');
5772 init_floating_libfuncs (le_optab, "le", '2');
5773 init_floating_libfuncs (unord_optab, "unord", '2');
5774
5775 init_floating_libfuncs (powi_optab, "powi", '2');
5776
5777 /* Conversions. */
5778 init_interclass_conv_libfuncs (sfloat_optab, "float",
5779 MODE_INT, MODE_FLOAT);
5780 init_interclass_conv_libfuncs (sfloat_optab, "float",
5781 MODE_INT, MODE_DECIMAL_FLOAT);
5782 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5783 MODE_INT, MODE_FLOAT);
5784 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5785 MODE_INT, MODE_DECIMAL_FLOAT);
5786 init_interclass_conv_libfuncs (sfix_optab, "fix",
5787 MODE_FLOAT, MODE_INT);
5788 init_interclass_conv_libfuncs (sfix_optab, "fix",
5789 MODE_DECIMAL_FLOAT, MODE_INT);
5790 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5791 MODE_FLOAT, MODE_INT);
5792 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5793 MODE_DECIMAL_FLOAT, MODE_INT);
5794 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5795 MODE_INT, MODE_DECIMAL_FLOAT);
5796 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5797 MODE_INT, MODE_FLOAT);
5798 init_interclass_conv_libfuncs (lround_optab, "lround",
5799 MODE_INT, MODE_FLOAT);
5800 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5801 MODE_INT, MODE_FLOAT);
5802 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5803 MODE_INT, MODE_FLOAT);
5804
5805 /* sext_optab is also used for FLOAT_EXTEND. */
5806 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5807 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5808 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5809 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5810 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5811 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5812 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5813 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5814
5815 /* Explicitly initialize the bswap libfuncs since we need them to be
5816 valid for things other than word_mode. */
5817 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5818 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5819
5820 /* Use cabs for double complex abs, since systems generally have cabs.
5821 Don't define any libcall for float complex, so that cabs will be used. */
5822 if (complex_double_type_node)
5823 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5824 = init_one_libfunc ("cabs");
5825
5826 abort_libfunc = init_one_libfunc ("abort");
5827 memcpy_libfunc = init_one_libfunc ("memcpy");
5828 memmove_libfunc = init_one_libfunc ("memmove");
5829 memcmp_libfunc = init_one_libfunc ("memcmp");
5830 memset_libfunc = init_one_libfunc ("memset");
5831 setbits_libfunc = init_one_libfunc ("__setbits");
5832
5833 #ifndef DONT_USE_BUILTIN_SETJMP
5834 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5835 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5836 #else
5837 setjmp_libfunc = init_one_libfunc ("setjmp");
5838 longjmp_libfunc = init_one_libfunc ("longjmp");
5839 #endif
5840 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5841 unwind_sjlj_unregister_libfunc
5842 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5843
5844 /* For function entry/exit instrumentation. */
5845 profile_function_entry_libfunc
5846 = init_one_libfunc ("__cyg_profile_func_enter");
5847 profile_function_exit_libfunc
5848 = init_one_libfunc ("__cyg_profile_func_exit");
5849
5850 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5851
5852 if (HAVE_conditional_trap)
5853 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5854
5855 /* Allow the target to add more libcalls or rename some, etc. */
5856 targetm.init_libfuncs ();
5857 }
5858
5859 #ifdef DEBUG
5860
5861 /* Print information about the current contents of the optabs on
5862 STDERR. */
5863
5864 static void
5865 debug_optab_libfuncs (void)
5866 {
5867 int i;
5868 int j;
5869 int k;
5870
5871 /* Dump the arithmetic optabs. */
5872 for (i = 0; i != (int) OTI_MAX; i++)
5873 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5874 {
5875 optab o;
5876 struct optab_handlers *h;
5877
5878 o = optab_table[i];
5879 h = &o->handlers[j];
5880 if (h->libfunc)
5881 {
5882 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5883 fprintf (stderr, "%s\t%s:\t%s\n",
5884 GET_RTX_NAME (o->code),
5885 GET_MODE_NAME (j),
5886 XSTR (h->libfunc, 0));
5887 }
5888 }
5889
5890 /* Dump the conversion optabs. */
5891 for (i = 0; i < (int) COI_MAX; ++i)
5892 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5893 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5894 {
5895 convert_optab o;
5896 struct optab_handlers *h;
5897
5898 o = &convert_optab_table[i];
5899 h = &o->handlers[j][k];
5900 if (h->libfunc)
5901 {
5902 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5903 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5904 GET_RTX_NAME (o->code),
5905 GET_MODE_NAME (j),
5906 GET_MODE_NAME (k),
5907 XSTR (h->libfunc, 0));
5908 }
5909 }
5910 }
5911
5912 #endif /* DEBUG */
5913
5914 \f
5915 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5916 CODE. Return 0 on failure. */
5917
5918 rtx
5919 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5920 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5921 {
5922 enum machine_mode mode = GET_MODE (op1);
5923 enum insn_code icode;
5924 rtx insn;
5925
5926 if (!HAVE_conditional_trap)
5927 return 0;
5928
5929 if (mode == VOIDmode)
5930 return 0;
5931
5932 icode = cmp_optab->handlers[(int) mode].insn_code;
5933 if (icode == CODE_FOR_nothing)
5934 return 0;
5935
5936 start_sequence ();
5937 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5938 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5939 if (!op1 || !op2)
5940 {
5941 end_sequence ();
5942 return 0;
5943 }
5944 emit_insn (GEN_FCN (icode) (op1, op2));
5945
5946 PUT_CODE (trap_rtx, code);
5947 gcc_assert (HAVE_conditional_trap);
5948 insn = gen_conditional_trap (trap_rtx, tcode);
5949 if (insn)
5950 {
5951 emit_insn (insn);
5952 insn = get_insns ();
5953 }
5954 end_sequence ();
5955
5956 return insn;
5957 }
5958
5959 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5960 or unsigned operation code. */
5961
5962 static enum rtx_code
5963 get_rtx_code (enum tree_code tcode, bool unsignedp)
5964 {
5965 enum rtx_code code;
5966 switch (tcode)
5967 {
5968 case EQ_EXPR:
5969 code = EQ;
5970 break;
5971 case NE_EXPR:
5972 code = NE;
5973 break;
5974 case LT_EXPR:
5975 code = unsignedp ? LTU : LT;
5976 break;
5977 case LE_EXPR:
5978 code = unsignedp ? LEU : LE;
5979 break;
5980 case GT_EXPR:
5981 code = unsignedp ? GTU : GT;
5982 break;
5983 case GE_EXPR:
5984 code = unsignedp ? GEU : GE;
5985 break;
5986
5987 case UNORDERED_EXPR:
5988 code = UNORDERED;
5989 break;
5990 case ORDERED_EXPR:
5991 code = ORDERED;
5992 break;
5993 case UNLT_EXPR:
5994 code = UNLT;
5995 break;
5996 case UNLE_EXPR:
5997 code = UNLE;
5998 break;
5999 case UNGT_EXPR:
6000 code = UNGT;
6001 break;
6002 case UNGE_EXPR:
6003 code = UNGE;
6004 break;
6005 case UNEQ_EXPR:
6006 code = UNEQ;
6007 break;
6008 case LTGT_EXPR:
6009 code = LTGT;
6010 break;
6011
6012 default:
6013 gcc_unreachable ();
6014 }
6015 return code;
6016 }
6017
6018 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6019 unsigned operators. Do not generate compare instruction. */
6020
6021 static rtx
6022 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6023 {
6024 enum rtx_code rcode;
6025 tree t_op0, t_op1;
6026 rtx rtx_op0, rtx_op1;
6027
6028 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6029 ensures that condition is a relational operation. */
6030 gcc_assert (COMPARISON_CLASS_P (cond));
6031
6032 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6033 t_op0 = TREE_OPERAND (cond, 0);
6034 t_op1 = TREE_OPERAND (cond, 1);
6035
6036 /* Expand operands. */
6037 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6038 EXPAND_STACK_PARM);
6039 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6040 EXPAND_STACK_PARM);
6041
6042 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6043 && GET_MODE (rtx_op0) != VOIDmode)
6044 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6045
6046 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6047 && GET_MODE (rtx_op1) != VOIDmode)
6048 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6049
6050 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6051 }
6052
6053 /* Return insn code for VEC_COND_EXPR EXPR. */
6054
6055 static inline enum insn_code
6056 get_vcond_icode (tree expr, enum machine_mode mode)
6057 {
6058 enum insn_code icode = CODE_FOR_nothing;
6059
6060 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6061 icode = vcondu_gen_code[mode];
6062 else
6063 icode = vcond_gen_code[mode];
6064 return icode;
6065 }
6066
6067 /* Return TRUE iff, appropriate vector insns are available
6068 for vector cond expr expr in VMODE mode. */
6069
6070 bool
6071 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6072 {
6073 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6074 return false;
6075 return true;
6076 }
6077
6078 /* Generate insns for VEC_COND_EXPR. */
6079
6080 rtx
6081 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6082 {
6083 enum insn_code icode;
6084 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6085 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6086 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6087
6088 icode = get_vcond_icode (vec_cond_expr, mode);
6089 if (icode == CODE_FOR_nothing)
6090 return 0;
6091
6092 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6093 target = gen_reg_rtx (mode);
6094
6095 /* Get comparison rtx. First expand both cond expr operands. */
6096 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6097 unsignedp, icode);
6098 cc_op0 = XEXP (comparison, 0);
6099 cc_op1 = XEXP (comparison, 1);
6100 /* Expand both operands and force them in reg, if required. */
6101 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6102 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6103 && mode != VOIDmode)
6104 rtx_op1 = force_reg (mode, rtx_op1);
6105
6106 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6107 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6108 && mode != VOIDmode)
6109 rtx_op2 = force_reg (mode, rtx_op2);
6110
6111 /* Emit instruction! */
6112 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6113 comparison, cc_op0, cc_op1));
6114
6115 return target;
6116 }
6117
6118 \f
6119 /* This is an internal subroutine of the other compare_and_swap expanders.
6120 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6121 operation. TARGET is an optional place to store the value result of
6122 the operation. ICODE is the particular instruction to expand. Return
6123 the result of the operation. */
6124
6125 static rtx
6126 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6127 rtx target, enum insn_code icode)
6128 {
6129 enum machine_mode mode = GET_MODE (mem);
6130 rtx insn;
6131
6132 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6133 target = gen_reg_rtx (mode);
6134
6135 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6136 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6137 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6138 old_val = force_reg (mode, old_val);
6139
6140 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6141 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6142 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6143 new_val = force_reg (mode, new_val);
6144
6145 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6146 if (insn == NULL_RTX)
6147 return NULL_RTX;
6148 emit_insn (insn);
6149
6150 return target;
6151 }
6152
6153 /* Expand a compare-and-swap operation and return its value. */
6154
6155 rtx
6156 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6157 {
6158 enum machine_mode mode = GET_MODE (mem);
6159 enum insn_code icode = sync_compare_and_swap[mode];
6160
6161 if (icode == CODE_FOR_nothing)
6162 return NULL_RTX;
6163
6164 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6165 }
6166
6167 /* Expand a compare-and-swap operation and store true into the result if
6168 the operation was successful and false otherwise. Return the result.
6169 Unlike other routines, TARGET is not optional. */
6170
6171 rtx
6172 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6173 {
6174 enum machine_mode mode = GET_MODE (mem);
6175 enum insn_code icode;
6176 rtx subtarget, label0, label1;
6177
6178 /* If the target supports a compare-and-swap pattern that simultaneously
6179 sets some flag for success, then use it. Otherwise use the regular
6180 compare-and-swap and follow that immediately with a compare insn. */
6181 icode = sync_compare_and_swap_cc[mode];
6182 switch (icode)
6183 {
6184 default:
6185 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6186 NULL_RTX, icode);
6187 if (subtarget != NULL_RTX)
6188 break;
6189
6190 /* FALLTHRU */
6191 case CODE_FOR_nothing:
6192 icode = sync_compare_and_swap[mode];
6193 if (icode == CODE_FOR_nothing)
6194 return NULL_RTX;
6195
6196 /* Ensure that if old_val == mem, that we're not comparing
6197 against an old value. */
6198 if (MEM_P (old_val))
6199 old_val = force_reg (mode, old_val);
6200
6201 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6202 NULL_RTX, icode);
6203 if (subtarget == NULL_RTX)
6204 return NULL_RTX;
6205
6206 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6207 }
6208
6209 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6210 setcc instruction from the beginning. We don't work too hard here,
6211 but it's nice to not be stupid about initial code gen either. */
6212 if (STORE_FLAG_VALUE == 1)
6213 {
6214 icode = setcc_gen_code[EQ];
6215 if (icode != CODE_FOR_nothing)
6216 {
6217 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6218 rtx insn;
6219
6220 subtarget = target;
6221 if (!insn_data[icode].operand[0].predicate (target, cmode))
6222 subtarget = gen_reg_rtx (cmode);
6223
6224 insn = GEN_FCN (icode) (subtarget);
6225 if (insn)
6226 {
6227 emit_insn (insn);
6228 if (GET_MODE (target) != GET_MODE (subtarget))
6229 {
6230 convert_move (target, subtarget, 1);
6231 subtarget = target;
6232 }
6233 return subtarget;
6234 }
6235 }
6236 }
6237
6238 /* Without an appropriate setcc instruction, use a set of branches to
6239 get 1 and 0 stored into target. Presumably if the target has a
6240 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6241
6242 label0 = gen_label_rtx ();
6243 label1 = gen_label_rtx ();
6244
6245 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6246 emit_move_insn (target, const0_rtx);
6247 emit_jump_insn (gen_jump (label1));
6248 emit_barrier ();
6249 emit_label (label0);
6250 emit_move_insn (target, const1_rtx);
6251 emit_label (label1);
6252
6253 return target;
6254 }
6255
6256 /* This is a helper function for the other atomic operations. This function
6257 emits a loop that contains SEQ that iterates until a compare-and-swap
6258 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6259 a set of instructions that takes a value from OLD_REG as an input and
6260 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6261 set to the current contents of MEM. After SEQ, a compare-and-swap will
6262 attempt to update MEM with NEW_REG. The function returns true when the
6263 loop was generated successfully. */
6264
6265 static bool
6266 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6267 {
6268 enum machine_mode mode = GET_MODE (mem);
6269 enum insn_code icode;
6270 rtx label, cmp_reg, subtarget;
6271
6272 /* The loop we want to generate looks like
6273
6274 cmp_reg = mem;
6275 label:
6276 old_reg = cmp_reg;
6277 seq;
6278 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6279 if (cmp_reg != old_reg)
6280 goto label;
6281
6282 Note that we only do the plain load from memory once. Subsequent
6283 iterations use the value loaded by the compare-and-swap pattern. */
6284
6285 label = gen_label_rtx ();
6286 cmp_reg = gen_reg_rtx (mode);
6287
6288 emit_move_insn (cmp_reg, mem);
6289 emit_label (label);
6290 emit_move_insn (old_reg, cmp_reg);
6291 if (seq)
6292 emit_insn (seq);
6293
6294 /* If the target supports a compare-and-swap pattern that simultaneously
6295 sets some flag for success, then use it. Otherwise use the regular
6296 compare-and-swap and follow that immediately with a compare insn. */
6297 icode = sync_compare_and_swap_cc[mode];
6298 switch (icode)
6299 {
6300 default:
6301 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6302 cmp_reg, icode);
6303 if (subtarget != NULL_RTX)
6304 {
6305 gcc_assert (subtarget == cmp_reg);
6306 break;
6307 }
6308
6309 /* FALLTHRU */
6310 case CODE_FOR_nothing:
6311 icode = sync_compare_and_swap[mode];
6312 if (icode == CODE_FOR_nothing)
6313 return false;
6314
6315 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6316 cmp_reg, icode);
6317 if (subtarget == NULL_RTX)
6318 return false;
6319 if (subtarget != cmp_reg)
6320 emit_move_insn (cmp_reg, subtarget);
6321
6322 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6323 }
6324
6325 /* ??? Mark this jump predicted not taken? */
6326 emit_jump_insn (bcc_gen_fctn[NE] (label));
6327
6328 return true;
6329 }
6330
6331 /* This function generates the atomic operation MEM CODE= VAL. In this
6332 case, we do not care about any resulting value. Returns NULL if we
6333 cannot generate the operation. */
6334
6335 rtx
6336 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6337 {
6338 enum machine_mode mode = GET_MODE (mem);
6339 enum insn_code icode;
6340 rtx insn;
6341
6342 /* Look to see if the target supports the operation directly. */
6343 switch (code)
6344 {
6345 case PLUS:
6346 icode = sync_add_optab[mode];
6347 break;
6348 case IOR:
6349 icode = sync_ior_optab[mode];
6350 break;
6351 case XOR:
6352 icode = sync_xor_optab[mode];
6353 break;
6354 case AND:
6355 icode = sync_and_optab[mode];
6356 break;
6357 case NOT:
6358 icode = sync_nand_optab[mode];
6359 break;
6360
6361 case MINUS:
6362 icode = sync_sub_optab[mode];
6363 if (icode == CODE_FOR_nothing)
6364 {
6365 icode = sync_add_optab[mode];
6366 if (icode != CODE_FOR_nothing)
6367 {
6368 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6369 code = PLUS;
6370 }
6371 }
6372 break;
6373
6374 default:
6375 gcc_unreachable ();
6376 }
6377
6378 /* Generate the direct operation, if present. */
6379 if (icode != CODE_FOR_nothing)
6380 {
6381 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6382 val = convert_modes (mode, GET_MODE (val), val, 1);
6383 if (!insn_data[icode].operand[1].predicate (val, mode))
6384 val = force_reg (mode, val);
6385
6386 insn = GEN_FCN (icode) (mem, val);
6387 if (insn)
6388 {
6389 emit_insn (insn);
6390 return const0_rtx;
6391 }
6392 }
6393
6394 /* Failing that, generate a compare-and-swap loop in which we perform the
6395 operation with normal arithmetic instructions. */
6396 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6397 {
6398 rtx t0 = gen_reg_rtx (mode), t1;
6399
6400 start_sequence ();
6401
6402 t1 = t0;
6403 if (code == NOT)
6404 {
6405 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6406 code = AND;
6407 }
6408 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6409 true, OPTAB_LIB_WIDEN);
6410
6411 insn = get_insns ();
6412 end_sequence ();
6413
6414 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6415 return const0_rtx;
6416 }
6417
6418 return NULL_RTX;
6419 }
6420
6421 /* This function generates the atomic operation MEM CODE= VAL. In this
6422 case, we do care about the resulting value: if AFTER is true then
6423 return the value MEM holds after the operation, if AFTER is false
6424 then return the value MEM holds before the operation. TARGET is an
6425 optional place for the result value to be stored. */
6426
6427 rtx
6428 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6429 bool after, rtx target)
6430 {
6431 enum machine_mode mode = GET_MODE (mem);
6432 enum insn_code old_code, new_code, icode;
6433 bool compensate;
6434 rtx insn;
6435
6436 /* Look to see if the target supports the operation directly. */
6437 switch (code)
6438 {
6439 case PLUS:
6440 old_code = sync_old_add_optab[mode];
6441 new_code = sync_new_add_optab[mode];
6442 break;
6443 case IOR:
6444 old_code = sync_old_ior_optab[mode];
6445 new_code = sync_new_ior_optab[mode];
6446 break;
6447 case XOR:
6448 old_code = sync_old_xor_optab[mode];
6449 new_code = sync_new_xor_optab[mode];
6450 break;
6451 case AND:
6452 old_code = sync_old_and_optab[mode];
6453 new_code = sync_new_and_optab[mode];
6454 break;
6455 case NOT:
6456 old_code = sync_old_nand_optab[mode];
6457 new_code = sync_new_nand_optab[mode];
6458 break;
6459
6460 case MINUS:
6461 old_code = sync_old_sub_optab[mode];
6462 new_code = sync_new_sub_optab[mode];
6463 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6464 {
6465 old_code = sync_old_add_optab[mode];
6466 new_code = sync_new_add_optab[mode];
6467 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6468 {
6469 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6470 code = PLUS;
6471 }
6472 }
6473 break;
6474
6475 default:
6476 gcc_unreachable ();
6477 }
6478
6479 /* If the target does supports the proper new/old operation, great. But
6480 if we only support the opposite old/new operation, check to see if we
6481 can compensate. In the case in which the old value is supported, then
6482 we can always perform the operation again with normal arithmetic. In
6483 the case in which the new value is supported, then we can only handle
6484 this in the case the operation is reversible. */
6485 compensate = false;
6486 if (after)
6487 {
6488 icode = new_code;
6489 if (icode == CODE_FOR_nothing)
6490 {
6491 icode = old_code;
6492 if (icode != CODE_FOR_nothing)
6493 compensate = true;
6494 }
6495 }
6496 else
6497 {
6498 icode = old_code;
6499 if (icode == CODE_FOR_nothing
6500 && (code == PLUS || code == MINUS || code == XOR))
6501 {
6502 icode = new_code;
6503 if (icode != CODE_FOR_nothing)
6504 compensate = true;
6505 }
6506 }
6507
6508 /* If we found something supported, great. */
6509 if (icode != CODE_FOR_nothing)
6510 {
6511 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6512 target = gen_reg_rtx (mode);
6513
6514 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6515 val = convert_modes (mode, GET_MODE (val), val, 1);
6516 if (!insn_data[icode].operand[2].predicate (val, mode))
6517 val = force_reg (mode, val);
6518
6519 insn = GEN_FCN (icode) (target, mem, val);
6520 if (insn)
6521 {
6522 emit_insn (insn);
6523
6524 /* If we need to compensate for using an operation with the
6525 wrong return value, do so now. */
6526 if (compensate)
6527 {
6528 if (!after)
6529 {
6530 if (code == PLUS)
6531 code = MINUS;
6532 else if (code == MINUS)
6533 code = PLUS;
6534 }
6535
6536 if (code == NOT)
6537 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6538 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6539 true, OPTAB_LIB_WIDEN);
6540 }
6541
6542 return target;
6543 }
6544 }
6545
6546 /* Failing that, generate a compare-and-swap loop in which we perform the
6547 operation with normal arithmetic instructions. */
6548 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6549 {
6550 rtx t0 = gen_reg_rtx (mode), t1;
6551
6552 if (!target || !register_operand (target, mode))
6553 target = gen_reg_rtx (mode);
6554
6555 start_sequence ();
6556
6557 if (!after)
6558 emit_move_insn (target, t0);
6559 t1 = t0;
6560 if (code == NOT)
6561 {
6562 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6563 code = AND;
6564 }
6565 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6566 true, OPTAB_LIB_WIDEN);
6567 if (after)
6568 emit_move_insn (target, t1);
6569
6570 insn = get_insns ();
6571 end_sequence ();
6572
6573 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6574 return target;
6575 }
6576
6577 return NULL_RTX;
6578 }
6579
6580 /* This function expands a test-and-set operation. Ideally we atomically
6581 store VAL in MEM and return the previous value in MEM. Some targets
6582 may not support this operation and only support VAL with the constant 1;
6583 in this case while the return value will be 0/1, but the exact value
6584 stored in MEM is target defined. TARGET is an option place to stick
6585 the return value. */
6586
6587 rtx
6588 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6589 {
6590 enum machine_mode mode = GET_MODE (mem);
6591 enum insn_code icode;
6592 rtx insn;
6593
6594 /* If the target supports the test-and-set directly, great. */
6595 icode = sync_lock_test_and_set[mode];
6596 if (icode != CODE_FOR_nothing)
6597 {
6598 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6599 target = gen_reg_rtx (mode);
6600
6601 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6602 val = convert_modes (mode, GET_MODE (val), val, 1);
6603 if (!insn_data[icode].operand[2].predicate (val, mode))
6604 val = force_reg (mode, val);
6605
6606 insn = GEN_FCN (icode) (target, mem, val);
6607 if (insn)
6608 {
6609 emit_insn (insn);
6610 return target;
6611 }
6612 }
6613
6614 /* Otherwise, use a compare-and-swap loop for the exchange. */
6615 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6616 {
6617 if (!target || !register_operand (target, mode))
6618 target = gen_reg_rtx (mode);
6619 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6620 val = convert_modes (mode, GET_MODE (val), val, 1);
6621 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6622 return target;
6623 }
6624
6625 return NULL_RTX;
6626 }
6627
6628 #include "gt-optabs.h"