]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/optabs.c
The vec_perm code falls back to doing byte-level permutes if element-level permutes...
[thirdparty/gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
40 #include "except.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "expr.h"
44 #include "optabs-tree.h"
45 #include "libfuncs.h"
46
47 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
48 machine_mode *);
49 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
50 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
51
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
54 \f
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
57 operation).
58
59 If the last insn does not set TARGET, don't do anything, but return 1.
60
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
64
65 static int
66 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
67 {
68 rtx_insn *last_insn;
69 rtx set;
70 rtx note;
71
72 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
73
74 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code) != RTX_COMPARE
78 && GET_RTX_CLASS (code) != RTX_UNARY)
79 return 1;
80
81 if (GET_CODE (target) == ZERO_EXTRACT)
82 return 1;
83
84 for (last_insn = insns;
85 NEXT_INSN (last_insn) != NULL_RTX;
86 last_insn = NEXT_INSN (last_insn))
87 ;
88
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target, op0)
92 || (op1 && reg_overlap_mentioned_p (target, op1)))
93 {
94 if (MEM_P (target)
95 && (rtx_equal_p (target, op0)
96 || (op1 && rtx_equal_p (target, op1))))
97 {
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set = single_set (last_insn);
107 if (set
108 && GET_CODE (SET_SRC (set)) == code
109 && MEM_P (SET_DEST (set))
110 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
111 || (op1 && rtx_equal_p (SET_DEST (set),
112 XEXP (SET_SRC (set), 1)))))
113 return 1;
114 }
115 return 0;
116 }
117
118 set = set_for_reg_notes (last_insn);
119 if (set == NULL_RTX)
120 return 1;
121
122 if (! rtx_equal_p (SET_DEST (set), target)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
126 return 1;
127
128 if (GET_RTX_CLASS (code) == RTX_UNARY)
129 switch (code)
130 {
131 case FFS:
132 case CLZ:
133 case CTZ:
134 case CLRSB:
135 case POPCOUNT:
136 case PARITY:
137 case BSWAP:
138 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
139 {
140 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
141 if (GET_MODE_UNIT_SIZE (GET_MODE (op0))
142 > GET_MODE_UNIT_SIZE (GET_MODE (target)))
143 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
144 note, GET_MODE (op0));
145 else
146 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
147 note, GET_MODE (op0));
148 break;
149 }
150 /* FALLTHRU */
151 default:
152 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
153 break;
154 }
155 else
156 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
157
158 set_unique_reg_note (last_insn, REG_EQUAL, note);
159
160 return 1;
161 }
162 \f
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
166
167 static machine_mode
168 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
169 {
170 machine_mode m0 = GET_MODE (op0);
171 machine_mode m1 = GET_MODE (op1);
172 machine_mode result;
173
174 if (m0 == VOIDmode && m1 == VOIDmode)
175 return to_mode;
176 else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1))
177 result = m1;
178 else
179 result = m0;
180
181 if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode))
182 return to_mode;
183
184 return result;
185 }
186 \f
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
192
193 static rtx
194 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
195 int unsignedp, int no_extend)
196 {
197 rtx result;
198 scalar_int_mode int_mode;
199
200 /* If we don't have to extend and this is a constant, return it. */
201 if (no_extend && GET_MODE (op) == VOIDmode)
202 return op;
203
204 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
205 extend since it will be more efficient to do so unless the signedness of
206 a promoted object differs from our extension. */
207 if (! no_extend
208 || !is_a <scalar_int_mode> (mode, &int_mode)
209 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
210 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
211 return convert_modes (mode, oldmode, op, unsignedp);
212
213 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
214 SUBREG. */
215 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
216 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
217
218 /* Otherwise, get an object of MODE, clobber it, and set the low-order
219 part to OP. */
220
221 result = gen_reg_rtx (int_mode);
222 emit_clobber (result);
223 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
224 return result;
225 }
226 \f
227 /* Expand vector widening operations.
228
229 There are two different classes of operations handled here:
230 1) Operations whose result is wider than all the arguments to the operation.
231 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
232 In this case OP0 and optionally OP1 would be initialized,
233 but WIDE_OP wouldn't (not relevant for this case).
234 2) Operations whose result is of the same size as the last argument to the
235 operation, but wider than all the other arguments to the operation.
236 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
237 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
238
239 E.g, when called to expand the following operations, this is how
240 the arguments will be initialized:
241 nops OP0 OP1 WIDE_OP
242 widening-sum 2 oprnd0 - oprnd1
243 widening-dot-product 3 oprnd0 oprnd1 oprnd2
244 widening-mult 2 oprnd0 oprnd1 -
245 type-promotion (vec-unpack) 1 oprnd0 - - */
246
247 rtx
248 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
249 rtx target, int unsignedp)
250 {
251 struct expand_operand eops[4];
252 tree oprnd0, oprnd1, oprnd2;
253 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
254 optab widen_pattern_optab;
255 enum insn_code icode;
256 int nops = TREE_CODE_LENGTH (ops->code);
257 int op;
258
259 oprnd0 = ops->op0;
260 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
261 widen_pattern_optab =
262 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
263 if (ops->code == WIDEN_MULT_PLUS_EXPR
264 || ops->code == WIDEN_MULT_MINUS_EXPR)
265 icode = find_widening_optab_handler (widen_pattern_optab,
266 TYPE_MODE (TREE_TYPE (ops->op2)),
267 tmode0);
268 else
269 icode = optab_handler (widen_pattern_optab, tmode0);
270 gcc_assert (icode != CODE_FOR_nothing);
271
272 if (nops >= 2)
273 {
274 oprnd1 = ops->op1;
275 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
276 }
277
278 /* The last operand is of a wider mode than the rest of the operands. */
279 if (nops == 2)
280 wmode = tmode1;
281 else if (nops == 3)
282 {
283 gcc_assert (tmode1 == tmode0);
284 gcc_assert (op1);
285 oprnd2 = ops->op2;
286 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
287 }
288
289 op = 0;
290 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
291 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
292 if (op1)
293 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
294 if (wide_op)
295 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
296 expand_insn (icode, op, eops);
297 return eops[0].value;
298 }
299
300 /* Generate code to perform an operation specified by TERNARY_OPTAB
301 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
302
303 UNSIGNEDP is for the case where we have to widen the operands
304 to perform the operation. It says to use zero-extension.
305
306 If TARGET is nonzero, the value
307 is generated there, if it is convenient to do so.
308 In all cases an rtx is returned for the locus of the value;
309 this may or may not be TARGET. */
310
311 rtx
312 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
313 rtx op1, rtx op2, rtx target, int unsignedp)
314 {
315 struct expand_operand ops[4];
316 enum insn_code icode = optab_handler (ternary_optab, mode);
317
318 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
319
320 create_output_operand (&ops[0], target, mode);
321 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
322 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
323 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
324 expand_insn (icode, 4, ops);
325 return ops[0].value;
326 }
327
328
329 /* Like expand_binop, but return a constant rtx if the result can be
330 calculated at compile time. The arguments and return value are
331 otherwise the same as for expand_binop. */
332
333 rtx
334 simplify_expand_binop (machine_mode mode, optab binoptab,
335 rtx op0, rtx op1, rtx target, int unsignedp,
336 enum optab_methods methods)
337 {
338 if (CONSTANT_P (op0) && CONSTANT_P (op1))
339 {
340 rtx x = simplify_binary_operation (optab_to_code (binoptab),
341 mode, op0, op1);
342 if (x)
343 return x;
344 }
345
346 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
347 }
348
349 /* Like simplify_expand_binop, but always put the result in TARGET.
350 Return true if the expansion succeeded. */
351
352 bool
353 force_expand_binop (machine_mode mode, optab binoptab,
354 rtx op0, rtx op1, rtx target, int unsignedp,
355 enum optab_methods methods)
356 {
357 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
358 target, unsignedp, methods);
359 if (x == 0)
360 return false;
361 if (x != target)
362 emit_move_insn (target, x);
363 return true;
364 }
365
366 /* Create a new vector value in VMODE with all elements set to OP. The
367 mode of OP must be the element mode of VMODE. If OP is a constant,
368 then the return value will be a constant. */
369
370 rtx
371 expand_vector_broadcast (machine_mode vmode, rtx op)
372 {
373 enum insn_code icode;
374 rtvec vec;
375 rtx ret;
376 int i, n;
377
378 gcc_checking_assert (VECTOR_MODE_P (vmode));
379
380 if (valid_for_const_vector_p (vmode, op))
381 return gen_const_vec_duplicate (vmode, op);
382
383 icode = optab_handler (vec_duplicate_optab, vmode);
384 if (icode != CODE_FOR_nothing)
385 {
386 struct expand_operand ops[2];
387 create_output_operand (&ops[0], NULL_RTX, vmode);
388 create_input_operand (&ops[1], op, GET_MODE (op));
389 expand_insn (icode, 2, ops);
390 return ops[0].value;
391 }
392
393 /* ??? If the target doesn't have a vec_init, then we have no easy way
394 of performing this operation. Most of this sort of generic support
395 is hidden away in the vector lowering support in gimple. */
396 icode = convert_optab_handler (vec_init_optab, vmode,
397 GET_MODE_INNER (vmode));
398 if (icode == CODE_FOR_nothing)
399 return NULL;
400
401 n = GET_MODE_NUNITS (vmode);
402 vec = rtvec_alloc (n);
403 for (i = 0; i < n; ++i)
404 RTVEC_ELT (vec, i) = op;
405 ret = gen_reg_rtx (vmode);
406 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
407
408 return ret;
409 }
410
411 /* This subroutine of expand_doubleword_shift handles the cases in which
412 the effective shift value is >= BITS_PER_WORD. The arguments and return
413 value are the same as for the parent routine, except that SUPERWORD_OP1
414 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
415 INTO_TARGET may be null if the caller has decided to calculate it. */
416
417 static bool
418 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
419 rtx outof_target, rtx into_target,
420 int unsignedp, enum optab_methods methods)
421 {
422 if (into_target != 0)
423 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
424 into_target, unsignedp, methods))
425 return false;
426
427 if (outof_target != 0)
428 {
429 /* For a signed right shift, we must fill OUTOF_TARGET with copies
430 of the sign bit, otherwise we must fill it with zeros. */
431 if (binoptab != ashr_optab)
432 emit_move_insn (outof_target, CONST0_RTX (word_mode));
433 else
434 if (!force_expand_binop (word_mode, binoptab, outof_input,
435 gen_int_shift_amount (word_mode,
436 BITS_PER_WORD - 1),
437 outof_target, unsignedp, methods))
438 return false;
439 }
440 return true;
441 }
442
443 /* This subroutine of expand_doubleword_shift handles the cases in which
444 the effective shift value is < BITS_PER_WORD. The arguments and return
445 value are the same as for the parent routine. */
446
447 static bool
448 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
449 rtx outof_input, rtx into_input, rtx op1,
450 rtx outof_target, rtx into_target,
451 int unsignedp, enum optab_methods methods,
452 unsigned HOST_WIDE_INT shift_mask)
453 {
454 optab reverse_unsigned_shift, unsigned_shift;
455 rtx tmp, carries;
456
457 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
458 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
459
460 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
461 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
462 the opposite direction to BINOPTAB. */
463 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
464 {
465 carries = outof_input;
466 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
467 op1_mode), op1_mode);
468 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
469 0, true, methods);
470 }
471 else
472 {
473 /* We must avoid shifting by BITS_PER_WORD bits since that is either
474 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
475 has unknown behavior. Do a single shift first, then shift by the
476 remainder. It's OK to use ~OP1 as the remainder if shift counts
477 are truncated to the mode size. */
478 carries = expand_binop (word_mode, reverse_unsigned_shift,
479 outof_input, const1_rtx, 0, unsignedp, methods);
480 if (shift_mask == BITS_PER_WORD - 1)
481 {
482 tmp = immed_wide_int_const
483 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
484 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
485 0, true, methods);
486 }
487 else
488 {
489 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
490 op1_mode), op1_mode);
491 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
492 0, true, methods);
493 }
494 }
495 if (tmp == 0 || carries == 0)
496 return false;
497 carries = expand_binop (word_mode, reverse_unsigned_shift,
498 carries, tmp, 0, unsignedp, methods);
499 if (carries == 0)
500 return false;
501
502 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
503 so the result can go directly into INTO_TARGET if convenient. */
504 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
505 into_target, unsignedp, methods);
506 if (tmp == 0)
507 return false;
508
509 /* Now OR in the bits carried over from OUTOF_INPUT. */
510 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
511 into_target, unsignedp, methods))
512 return false;
513
514 /* Use a standard word_mode shift for the out-of half. */
515 if (outof_target != 0)
516 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
517 outof_target, unsignedp, methods))
518 return false;
519
520 return true;
521 }
522
523
524 /* Try implementing expand_doubleword_shift using conditional moves.
525 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
526 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
527 are the shift counts to use in the former and latter case. All other
528 arguments are the same as the parent routine. */
529
530 static bool
531 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
532 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
533 rtx outof_input, rtx into_input,
534 rtx subword_op1, rtx superword_op1,
535 rtx outof_target, rtx into_target,
536 int unsignedp, enum optab_methods methods,
537 unsigned HOST_WIDE_INT shift_mask)
538 {
539 rtx outof_superword, into_superword;
540
541 /* Put the superword version of the output into OUTOF_SUPERWORD and
542 INTO_SUPERWORD. */
543 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
544 if (outof_target != 0 && subword_op1 == superword_op1)
545 {
546 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
547 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
548 into_superword = outof_target;
549 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
550 outof_superword, 0, unsignedp, methods))
551 return false;
552 }
553 else
554 {
555 into_superword = gen_reg_rtx (word_mode);
556 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
557 outof_superword, into_superword,
558 unsignedp, methods))
559 return false;
560 }
561
562 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
563 if (!expand_subword_shift (op1_mode, binoptab,
564 outof_input, into_input, subword_op1,
565 outof_target, into_target,
566 unsignedp, methods, shift_mask))
567 return false;
568
569 /* Select between them. Do the INTO half first because INTO_SUPERWORD
570 might be the current value of OUTOF_TARGET. */
571 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
572 into_target, into_superword, word_mode, false))
573 return false;
574
575 if (outof_target != 0)
576 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
577 outof_target, outof_superword,
578 word_mode, false))
579 return false;
580
581 return true;
582 }
583
584 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
585 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
586 input operand; the shift moves bits in the direction OUTOF_INPUT->
587 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
588 of the target. OP1 is the shift count and OP1_MODE is its mode.
589 If OP1 is constant, it will have been truncated as appropriate
590 and is known to be nonzero.
591
592 If SHIFT_MASK is zero, the result of word shifts is undefined when the
593 shift count is outside the range [0, BITS_PER_WORD). This routine must
594 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
595
596 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
597 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
598 fill with zeros or sign bits as appropriate.
599
600 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
601 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
602 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
603 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
604 are undefined.
605
606 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
607 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
608 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
609 function wants to calculate it itself.
610
611 Return true if the shift could be successfully synthesized. */
612
613 static bool
614 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
615 rtx outof_input, rtx into_input, rtx op1,
616 rtx outof_target, rtx into_target,
617 int unsignedp, enum optab_methods methods,
618 unsigned HOST_WIDE_INT shift_mask)
619 {
620 rtx superword_op1, tmp, cmp1, cmp2;
621 enum rtx_code cmp_code;
622
623 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
624 fill the result with sign or zero bits as appropriate. If so, the value
625 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
626 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
627 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
628
629 This isn't worthwhile for constant shifts since the optimizers will
630 cope better with in-range shift counts. */
631 if (shift_mask >= BITS_PER_WORD
632 && outof_target != 0
633 && !CONSTANT_P (op1))
634 {
635 if (!expand_doubleword_shift (op1_mode, binoptab,
636 outof_input, into_input, op1,
637 0, into_target,
638 unsignedp, methods, shift_mask))
639 return false;
640 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
641 outof_target, unsignedp, methods))
642 return false;
643 return true;
644 }
645
646 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
647 is true when the effective shift value is less than BITS_PER_WORD.
648 Set SUPERWORD_OP1 to the shift count that should be used to shift
649 OUTOF_INPUT into INTO_TARGET when the condition is false. */
650 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
651 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
652 {
653 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
654 is a subword shift count. */
655 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
656 0, true, methods);
657 cmp2 = CONST0_RTX (op1_mode);
658 cmp_code = EQ;
659 superword_op1 = op1;
660 }
661 else
662 {
663 /* Set CMP1 to OP1 - BITS_PER_WORD. */
664 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
665 0, true, methods);
666 cmp2 = CONST0_RTX (op1_mode);
667 cmp_code = LT;
668 superword_op1 = cmp1;
669 }
670 if (cmp1 == 0)
671 return false;
672
673 /* If we can compute the condition at compile time, pick the
674 appropriate subroutine. */
675 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
676 if (tmp != 0 && CONST_INT_P (tmp))
677 {
678 if (tmp == const0_rtx)
679 return expand_superword_shift (binoptab, outof_input, superword_op1,
680 outof_target, into_target,
681 unsignedp, methods);
682 else
683 return expand_subword_shift (op1_mode, binoptab,
684 outof_input, into_input, op1,
685 outof_target, into_target,
686 unsignedp, methods, shift_mask);
687 }
688
689 /* Try using conditional moves to generate straight-line code. */
690 if (HAVE_conditional_move)
691 {
692 rtx_insn *start = get_last_insn ();
693 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
694 cmp_code, cmp1, cmp2,
695 outof_input, into_input,
696 op1, superword_op1,
697 outof_target, into_target,
698 unsignedp, methods, shift_mask))
699 return true;
700 delete_insns_since (start);
701 }
702
703 /* As a last resort, use branches to select the correct alternative. */
704 rtx_code_label *subword_label = gen_label_rtx ();
705 rtx_code_label *done_label = gen_label_rtx ();
706
707 NO_DEFER_POP;
708 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
709 0, 0, subword_label,
710 profile_probability::uninitialized ());
711 OK_DEFER_POP;
712
713 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
714 outof_target, into_target,
715 unsignedp, methods))
716 return false;
717
718 emit_jump_insn (targetm.gen_jump (done_label));
719 emit_barrier ();
720 emit_label (subword_label);
721
722 if (!expand_subword_shift (op1_mode, binoptab,
723 outof_input, into_input, op1,
724 outof_target, into_target,
725 unsignedp, methods, shift_mask))
726 return false;
727
728 emit_label (done_label);
729 return true;
730 }
731 \f
732 /* Subroutine of expand_binop. Perform a double word multiplication of
733 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
734 as the target's word_mode. This function return NULL_RTX if anything
735 goes wrong, in which case it may have already emitted instructions
736 which need to be deleted.
737
738 If we want to multiply two two-word values and have normal and widening
739 multiplies of single-word values, we can do this with three smaller
740 multiplications.
741
742 The multiplication proceeds as follows:
743 _______________________
744 [__op0_high_|__op0_low__]
745 _______________________
746 * [__op1_high_|__op1_low__]
747 _______________________________________________
748 _______________________
749 (1) [__op0_low__*__op1_low__]
750 _______________________
751 (2a) [__op0_low__*__op1_high_]
752 _______________________
753 (2b) [__op0_high_*__op1_low__]
754 _______________________
755 (3) [__op0_high_*__op1_high_]
756
757
758 This gives a 4-word result. Since we are only interested in the
759 lower 2 words, partial result (3) and the upper words of (2a) and
760 (2b) don't need to be calculated. Hence (2a) and (2b) can be
761 calculated using non-widening multiplication.
762
763 (1), however, needs to be calculated with an unsigned widening
764 multiplication. If this operation is not directly supported we
765 try using a signed widening multiplication and adjust the result.
766 This adjustment works as follows:
767
768 If both operands are positive then no adjustment is needed.
769
770 If the operands have different signs, for example op0_low < 0 and
771 op1_low >= 0, the instruction treats the most significant bit of
772 op0_low as a sign bit instead of a bit with significance
773 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
774 with 2**BITS_PER_WORD - op0_low, and two's complements the
775 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
776 the result.
777
778 Similarly, if both operands are negative, we need to add
779 (op0_low + op1_low) * 2**BITS_PER_WORD.
780
781 We use a trick to adjust quickly. We logically shift op0_low right
782 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
783 op0_high (op1_high) before it is used to calculate 2b (2a). If no
784 logical shift exists, we do an arithmetic right shift and subtract
785 the 0 or -1. */
786
787 static rtx
788 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
789 bool umulp, enum optab_methods methods)
790 {
791 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
792 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
793 rtx wordm1 = (umulp ? NULL_RTX
794 : gen_int_shift_amount (word_mode, BITS_PER_WORD - 1));
795 rtx product, adjust, product_high, temp;
796
797 rtx op0_high = operand_subword_force (op0, high, mode);
798 rtx op0_low = operand_subword_force (op0, low, mode);
799 rtx op1_high = operand_subword_force (op1, high, mode);
800 rtx op1_low = operand_subword_force (op1, low, mode);
801
802 /* If we're using an unsigned multiply to directly compute the product
803 of the low-order words of the operands and perform any required
804 adjustments of the operands, we begin by trying two more multiplications
805 and then computing the appropriate sum.
806
807 We have checked above that the required addition is provided.
808 Full-word addition will normally always succeed, especially if
809 it is provided at all, so we don't worry about its failure. The
810 multiplication may well fail, however, so we do handle that. */
811
812 if (!umulp)
813 {
814 /* ??? This could be done with emit_store_flag where available. */
815 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
816 NULL_RTX, 1, methods);
817 if (temp)
818 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
819 NULL_RTX, 0, OPTAB_DIRECT);
820 else
821 {
822 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
823 NULL_RTX, 0, methods);
824 if (!temp)
825 return NULL_RTX;
826 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
827 NULL_RTX, 0, OPTAB_DIRECT);
828 }
829
830 if (!op0_high)
831 return NULL_RTX;
832 }
833
834 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
835 NULL_RTX, 0, OPTAB_DIRECT);
836 if (!adjust)
837 return NULL_RTX;
838
839 /* OP0_HIGH should now be dead. */
840
841 if (!umulp)
842 {
843 /* ??? This could be done with emit_store_flag where available. */
844 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
845 NULL_RTX, 1, methods);
846 if (temp)
847 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
848 NULL_RTX, 0, OPTAB_DIRECT);
849 else
850 {
851 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
852 NULL_RTX, 0, methods);
853 if (!temp)
854 return NULL_RTX;
855 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
856 NULL_RTX, 0, OPTAB_DIRECT);
857 }
858
859 if (!op1_high)
860 return NULL_RTX;
861 }
862
863 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
864 NULL_RTX, 0, OPTAB_DIRECT);
865 if (!temp)
866 return NULL_RTX;
867
868 /* OP1_HIGH should now be dead. */
869
870 adjust = expand_binop (word_mode, add_optab, adjust, temp,
871 NULL_RTX, 0, OPTAB_DIRECT);
872
873 if (target && !REG_P (target))
874 target = NULL_RTX;
875
876 /* *_widen_optab needs to determine operand mode, make sure at least
877 one operand has non-VOID mode. */
878 if (GET_MODE (op0_low) == VOIDmode && GET_MODE (op1_low) == VOIDmode)
879 op0_low = force_reg (word_mode, op0_low);
880
881 if (umulp)
882 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
883 target, 1, OPTAB_DIRECT);
884 else
885 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
886 target, 1, OPTAB_DIRECT);
887
888 if (!product)
889 return NULL_RTX;
890
891 product_high = operand_subword (product, high, 1, mode);
892 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
893 NULL_RTX, 0, OPTAB_DIRECT);
894 emit_move_insn (product_high, adjust);
895 return product;
896 }
897 \f
898 /* Wrapper around expand_binop which takes an rtx code to specify
899 the operation to perform, not an optab pointer. All other
900 arguments are the same. */
901 rtx
902 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
903 rtx op1, rtx target, int unsignedp,
904 enum optab_methods methods)
905 {
906 optab binop = code_to_optab (code);
907 gcc_assert (binop);
908
909 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
910 }
911
912 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
913 binop. Order them according to commutative_operand_precedence and, if
914 possible, try to put TARGET or a pseudo first. */
915 static bool
916 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
917 {
918 int op0_prec = commutative_operand_precedence (op0);
919 int op1_prec = commutative_operand_precedence (op1);
920
921 if (op0_prec < op1_prec)
922 return true;
923
924 if (op0_prec > op1_prec)
925 return false;
926
927 /* With equal precedence, both orders are ok, but it is better if the
928 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
929 if (target == 0 || REG_P (target))
930 return (REG_P (op1) && !REG_P (op0)) || target == op1;
931 else
932 return rtx_equal_p (op1, target);
933 }
934
935 /* Return true if BINOPTAB implements a shift operation. */
936
937 static bool
938 shift_optab_p (optab binoptab)
939 {
940 switch (optab_to_code (binoptab))
941 {
942 case ASHIFT:
943 case SS_ASHIFT:
944 case US_ASHIFT:
945 case ASHIFTRT:
946 case LSHIFTRT:
947 case ROTATE:
948 case ROTATERT:
949 return true;
950
951 default:
952 return false;
953 }
954 }
955
956 /* Return true if BINOPTAB implements a commutative binary operation. */
957
958 static bool
959 commutative_optab_p (optab binoptab)
960 {
961 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
962 || binoptab == smul_widen_optab
963 || binoptab == umul_widen_optab
964 || binoptab == smul_highpart_optab
965 || binoptab == umul_highpart_optab);
966 }
967
968 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
969 optimizing, and if the operand is a constant that costs more than
970 1 instruction, force the constant into a register and return that
971 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
972
973 static rtx
974 avoid_expensive_constant (machine_mode mode, optab binoptab,
975 int opn, rtx x, bool unsignedp)
976 {
977 bool speed = optimize_insn_for_speed_p ();
978
979 if (mode != VOIDmode
980 && optimize
981 && CONSTANT_P (x)
982 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
983 > set_src_cost (x, mode, speed)))
984 {
985 if (CONST_INT_P (x))
986 {
987 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
988 if (intval != INTVAL (x))
989 x = GEN_INT (intval);
990 }
991 else
992 x = convert_modes (mode, VOIDmode, x, unsignedp);
993 x = force_reg (mode, x);
994 }
995 return x;
996 }
997
998 /* Helper function for expand_binop: handle the case where there
999 is an insn ICODE that directly implements the indicated operation.
1000 Returns null if this is not possible. */
1001 static rtx
1002 expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab,
1003 rtx op0, rtx op1,
1004 rtx target, int unsignedp, enum optab_methods methods,
1005 rtx_insn *last)
1006 {
1007 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1008 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1009 machine_mode mode0, mode1, tmp_mode;
1010 struct expand_operand ops[3];
1011 bool commutative_p;
1012 rtx_insn *pat;
1013 rtx xop0 = op0, xop1 = op1;
1014 bool canonicalize_op1 = false;
1015
1016 /* If it is a commutative operator and the modes would match
1017 if we would swap the operands, we can save the conversions. */
1018 commutative_p = commutative_optab_p (binoptab);
1019 if (commutative_p
1020 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1021 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1022 std::swap (xop0, xop1);
1023
1024 /* If we are optimizing, force expensive constants into a register. */
1025 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1026 if (!shift_optab_p (binoptab))
1027 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1028 else
1029 /* Shifts and rotates often use a different mode for op1 from op0;
1030 for VOIDmode constants we don't know the mode, so force it
1031 to be canonicalized using convert_modes. */
1032 canonicalize_op1 = true;
1033
1034 /* In case the insn wants input operands in modes different from
1035 those of the actual operands, convert the operands. It would
1036 seem that we don't need to convert CONST_INTs, but we do, so
1037 that they're properly zero-extended, sign-extended or truncated
1038 for their mode. */
1039
1040 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1041 if (xmode0 != VOIDmode && xmode0 != mode0)
1042 {
1043 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1044 mode0 = xmode0;
1045 }
1046
1047 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1048 ? GET_MODE (xop1) : mode);
1049 if (xmode1 != VOIDmode && xmode1 != mode1)
1050 {
1051 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1052 mode1 = xmode1;
1053 }
1054
1055 /* If operation is commutative,
1056 try to make the first operand a register.
1057 Even better, try to make it the same as the target.
1058 Also try to make the last operand a constant. */
1059 if (commutative_p
1060 && swap_commutative_operands_with_target (target, xop0, xop1))
1061 std::swap (xop0, xop1);
1062
1063 /* Now, if insn's predicates don't allow our operands, put them into
1064 pseudo regs. */
1065
1066 if (binoptab == vec_pack_trunc_optab
1067 || binoptab == vec_pack_usat_optab
1068 || binoptab == vec_pack_ssat_optab
1069 || binoptab == vec_pack_ufix_trunc_optab
1070 || binoptab == vec_pack_sfix_trunc_optab)
1071 {
1072 /* The mode of the result is different then the mode of the
1073 arguments. */
1074 tmp_mode = insn_data[(int) icode].operand[0].mode;
1075 if (VECTOR_MODE_P (mode)
1076 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1077 {
1078 delete_insns_since (last);
1079 return NULL_RTX;
1080 }
1081 }
1082 else
1083 tmp_mode = mode;
1084
1085 create_output_operand (&ops[0], target, tmp_mode);
1086 create_input_operand (&ops[1], xop0, mode0);
1087 create_input_operand (&ops[2], xop1, mode1);
1088 pat = maybe_gen_insn (icode, 3, ops);
1089 if (pat)
1090 {
1091 /* If PAT is composed of more than one insn, try to add an appropriate
1092 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1093 operand, call expand_binop again, this time without a target. */
1094 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1095 && ! add_equal_note (pat, ops[0].value,
1096 optab_to_code (binoptab),
1097 ops[1].value, ops[2].value))
1098 {
1099 delete_insns_since (last);
1100 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1101 unsignedp, methods);
1102 }
1103
1104 emit_insn (pat);
1105 return ops[0].value;
1106 }
1107 delete_insns_since (last);
1108 return NULL_RTX;
1109 }
1110
1111 /* Generate code to perform an operation specified by BINOPTAB
1112 on operands OP0 and OP1, with result having machine-mode MODE.
1113
1114 UNSIGNEDP is for the case where we have to widen the operands
1115 to perform the operation. It says to use zero-extension.
1116
1117 If TARGET is nonzero, the value
1118 is generated there, if it is convenient to do so.
1119 In all cases an rtx is returned for the locus of the value;
1120 this may or may not be TARGET. */
1121
1122 rtx
1123 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1124 rtx target, int unsignedp, enum optab_methods methods)
1125 {
1126 enum optab_methods next_methods
1127 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1128 ? OPTAB_WIDEN : methods);
1129 enum mode_class mclass;
1130 enum insn_code icode;
1131 machine_mode wider_mode;
1132 scalar_int_mode int_mode;
1133 rtx libfunc;
1134 rtx temp;
1135 rtx_insn *entry_last = get_last_insn ();
1136 rtx_insn *last;
1137
1138 mclass = GET_MODE_CLASS (mode);
1139
1140 /* If subtracting an integer constant, convert this into an addition of
1141 the negated constant. */
1142
1143 if (binoptab == sub_optab && CONST_INT_P (op1))
1144 {
1145 op1 = negate_rtx (mode, op1);
1146 binoptab = add_optab;
1147 }
1148 /* For shifts, constant invalid op1 might be expanded from different
1149 mode than MODE. As those are invalid, force them to a register
1150 to avoid further problems during expansion. */
1151 else if (CONST_INT_P (op1)
1152 && shift_optab_p (binoptab)
1153 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1154 {
1155 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1156 op1 = force_reg (GET_MODE_INNER (mode), op1);
1157 }
1158
1159 /* Record where to delete back to if we backtrack. */
1160 last = get_last_insn ();
1161
1162 /* If we can do it with a three-operand insn, do so. */
1163
1164 if (methods != OPTAB_MUST_WIDEN)
1165 {
1166 if (convert_optab_p (binoptab))
1167 {
1168 machine_mode from_mode = widened_mode (mode, op0, op1);
1169 icode = find_widening_optab_handler (binoptab, mode, from_mode);
1170 }
1171 else
1172 icode = optab_handler (binoptab, mode);
1173 if (icode != CODE_FOR_nothing)
1174 {
1175 temp = expand_binop_directly (icode, mode, binoptab, op0, op1,
1176 target, unsignedp, methods, last);
1177 if (temp)
1178 return temp;
1179 }
1180 }
1181
1182 /* If we were trying to rotate, and that didn't work, try rotating
1183 the other direction before falling back to shifts and bitwise-or. */
1184 if (((binoptab == rotl_optab
1185 && (icode = optab_handler (rotr_optab, mode)) != CODE_FOR_nothing)
1186 || (binoptab == rotr_optab
1187 && (icode = optab_handler (rotl_optab, mode)) != CODE_FOR_nothing))
1188 && is_int_mode (mode, &int_mode))
1189 {
1190 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1191 rtx newop1;
1192 unsigned int bits = GET_MODE_PRECISION (int_mode);
1193
1194 if (CONST_INT_P (op1))
1195 newop1 = gen_int_shift_amount (int_mode, bits - INTVAL (op1));
1196 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1197 newop1 = negate_rtx (GET_MODE (op1), op1);
1198 else
1199 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1200 gen_int_mode (bits, GET_MODE (op1)), op1,
1201 NULL_RTX, unsignedp, OPTAB_DIRECT);
1202
1203 temp = expand_binop_directly (icode, int_mode, otheroptab, op0, newop1,
1204 target, unsignedp, methods, last);
1205 if (temp)
1206 return temp;
1207 }
1208
1209 /* If this is a multiply, see if we can do a widening operation that
1210 takes operands of this mode and makes a wider mode. */
1211
1212 if (binoptab == smul_optab
1213 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1214 && (convert_optab_handler ((unsignedp
1215 ? umul_widen_optab
1216 : smul_widen_optab),
1217 wider_mode, mode) != CODE_FOR_nothing))
1218 {
1219 /* *_widen_optab needs to determine operand mode, make sure at least
1220 one operand has non-VOID mode. */
1221 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
1222 op0 = force_reg (mode, op0);
1223 temp = expand_binop (wider_mode,
1224 unsignedp ? umul_widen_optab : smul_widen_optab,
1225 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1226
1227 if (temp != 0)
1228 {
1229 if (GET_MODE_CLASS (mode) == MODE_INT
1230 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1231 return gen_lowpart (mode, temp);
1232 else
1233 return convert_to_mode (mode, temp, unsignedp);
1234 }
1235 }
1236
1237 /* If this is a vector shift by a scalar, see if we can do a vector
1238 shift by a vector. If so, broadcast the scalar into a vector. */
1239 if (mclass == MODE_VECTOR_INT)
1240 {
1241 optab otheroptab = unknown_optab;
1242
1243 if (binoptab == ashl_optab)
1244 otheroptab = vashl_optab;
1245 else if (binoptab == ashr_optab)
1246 otheroptab = vashr_optab;
1247 else if (binoptab == lshr_optab)
1248 otheroptab = vlshr_optab;
1249 else if (binoptab == rotl_optab)
1250 otheroptab = vrotl_optab;
1251 else if (binoptab == rotr_optab)
1252 otheroptab = vrotr_optab;
1253
1254 if (otheroptab
1255 && (icode = optab_handler (otheroptab, mode)) != CODE_FOR_nothing)
1256 {
1257 /* The scalar may have been extended to be too wide. Truncate
1258 it back to the proper size to fit in the broadcast vector. */
1259 scalar_mode inner_mode = GET_MODE_INNER (mode);
1260 if (!CONST_INT_P (op1)
1261 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1262 > GET_MODE_BITSIZE (inner_mode)))
1263 op1 = force_reg (inner_mode,
1264 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1265 GET_MODE (op1)));
1266 rtx vop1 = expand_vector_broadcast (mode, op1);
1267 if (vop1)
1268 {
1269 temp = expand_binop_directly (icode, mode, otheroptab, op0, vop1,
1270 target, unsignedp, methods, last);
1271 if (temp)
1272 return temp;
1273 }
1274 }
1275 }
1276
1277 /* Look for a wider mode of the same class for which we think we
1278 can open-code the operation. Check for a widening multiply at the
1279 wider mode as well. */
1280
1281 if (CLASS_HAS_WIDER_MODES_P (mclass)
1282 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1283 FOR_EACH_WIDER_MODE (wider_mode, mode)
1284 {
1285 machine_mode next_mode;
1286 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1287 || (binoptab == smul_optab
1288 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1289 && (find_widening_optab_handler ((unsignedp
1290 ? umul_widen_optab
1291 : smul_widen_optab),
1292 next_mode, mode)
1293 != CODE_FOR_nothing)))
1294 {
1295 rtx xop0 = op0, xop1 = op1;
1296 int no_extend = 0;
1297
1298 /* For certain integer operations, we need not actually extend
1299 the narrow operands, as long as we will truncate
1300 the results to the same narrowness. */
1301
1302 if ((binoptab == ior_optab || binoptab == and_optab
1303 || binoptab == xor_optab
1304 || binoptab == add_optab || binoptab == sub_optab
1305 || binoptab == smul_optab || binoptab == ashl_optab)
1306 && mclass == MODE_INT)
1307 {
1308 no_extend = 1;
1309 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1310 xop0, unsignedp);
1311 if (binoptab != ashl_optab)
1312 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1313 xop1, unsignedp);
1314 }
1315
1316 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1317
1318 /* The second operand of a shift must always be extended. */
1319 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1320 no_extend && binoptab != ashl_optab);
1321
1322 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1323 unsignedp, OPTAB_DIRECT);
1324 if (temp)
1325 {
1326 if (mclass != MODE_INT
1327 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1328 {
1329 if (target == 0)
1330 target = gen_reg_rtx (mode);
1331 convert_move (target, temp, 0);
1332 return target;
1333 }
1334 else
1335 return gen_lowpart (mode, temp);
1336 }
1337 else
1338 delete_insns_since (last);
1339 }
1340 }
1341
1342 /* If operation is commutative,
1343 try to make the first operand a register.
1344 Even better, try to make it the same as the target.
1345 Also try to make the last operand a constant. */
1346 if (commutative_optab_p (binoptab)
1347 && swap_commutative_operands_with_target (target, op0, op1))
1348 std::swap (op0, op1);
1349
1350 /* These can be done a word at a time. */
1351 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1352 && is_int_mode (mode, &int_mode)
1353 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1354 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1355 {
1356 int i;
1357 rtx_insn *insns;
1358
1359 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1360 won't be accurate, so use a new target. */
1361 if (target == 0
1362 || target == op0
1363 || target == op1
1364 || !valid_multiword_target_p (target))
1365 target = gen_reg_rtx (int_mode);
1366
1367 start_sequence ();
1368
1369 /* Do the actual arithmetic. */
1370 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1371 {
1372 rtx target_piece = operand_subword (target, i, 1, int_mode);
1373 rtx x = expand_binop (word_mode, binoptab,
1374 operand_subword_force (op0, i, int_mode),
1375 operand_subword_force (op1, i, int_mode),
1376 target_piece, unsignedp, next_methods);
1377
1378 if (x == 0)
1379 break;
1380
1381 if (target_piece != x)
1382 emit_move_insn (target_piece, x);
1383 }
1384
1385 insns = get_insns ();
1386 end_sequence ();
1387
1388 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1389 {
1390 emit_insn (insns);
1391 return target;
1392 }
1393 }
1394
1395 /* Synthesize double word shifts from single word shifts. */
1396 if ((binoptab == lshr_optab || binoptab == ashl_optab
1397 || binoptab == ashr_optab)
1398 && is_int_mode (mode, &int_mode)
1399 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1400 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1401 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1402 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1403 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1404 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1405 {
1406 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1407 scalar_int_mode op1_mode;
1408
1409 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1410 shift_mask = targetm.shift_truncation_mask (word_mode);
1411 op1_mode = (GET_MODE (op1) != VOIDmode
1412 ? as_a <scalar_int_mode> (GET_MODE (op1))
1413 : word_mode);
1414
1415 /* Apply the truncation to constant shifts. */
1416 if (double_shift_mask > 0 && CONST_INT_P (op1))
1417 op1 = gen_int_mode (INTVAL (op1) & double_shift_mask, op1_mode);
1418
1419 if (op1 == CONST0_RTX (op1_mode))
1420 return op0;
1421
1422 /* Make sure that this is a combination that expand_doubleword_shift
1423 can handle. See the comments there for details. */
1424 if (double_shift_mask == 0
1425 || (shift_mask == BITS_PER_WORD - 1
1426 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1427 {
1428 rtx_insn *insns;
1429 rtx into_target, outof_target;
1430 rtx into_input, outof_input;
1431 int left_shift, outof_word;
1432
1433 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1434 won't be accurate, so use a new target. */
1435 if (target == 0
1436 || target == op0
1437 || target == op1
1438 || !valid_multiword_target_p (target))
1439 target = gen_reg_rtx (int_mode);
1440
1441 start_sequence ();
1442
1443 /* OUTOF_* is the word we are shifting bits away from, and
1444 INTO_* is the word that we are shifting bits towards, thus
1445 they differ depending on the direction of the shift and
1446 WORDS_BIG_ENDIAN. */
1447
1448 left_shift = binoptab == ashl_optab;
1449 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1450
1451 outof_target = operand_subword (target, outof_word, 1, int_mode);
1452 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1453
1454 outof_input = operand_subword_force (op0, outof_word, int_mode);
1455 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1456
1457 if (expand_doubleword_shift (op1_mode, binoptab,
1458 outof_input, into_input, op1,
1459 outof_target, into_target,
1460 unsignedp, next_methods, shift_mask))
1461 {
1462 insns = get_insns ();
1463 end_sequence ();
1464
1465 emit_insn (insns);
1466 return target;
1467 }
1468 end_sequence ();
1469 }
1470 }
1471
1472 /* Synthesize double word rotates from single word shifts. */
1473 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1474 && is_int_mode (mode, &int_mode)
1475 && CONST_INT_P (op1)
1476 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1477 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1478 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1479 {
1480 rtx_insn *insns;
1481 rtx into_target, outof_target;
1482 rtx into_input, outof_input;
1483 rtx inter;
1484 int shift_count, left_shift, outof_word;
1485
1486 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1487 won't be accurate, so use a new target. Do this also if target is not
1488 a REG, first because having a register instead may open optimization
1489 opportunities, and second because if target and op0 happen to be MEMs
1490 designating the same location, we would risk clobbering it too early
1491 in the code sequence we generate below. */
1492 if (target == 0
1493 || target == op0
1494 || target == op1
1495 || !REG_P (target)
1496 || !valid_multiword_target_p (target))
1497 target = gen_reg_rtx (int_mode);
1498
1499 start_sequence ();
1500
1501 shift_count = INTVAL (op1);
1502
1503 /* OUTOF_* is the word we are shifting bits away from, and
1504 INTO_* is the word that we are shifting bits towards, thus
1505 they differ depending on the direction of the shift and
1506 WORDS_BIG_ENDIAN. */
1507
1508 left_shift = (binoptab == rotl_optab);
1509 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1510
1511 outof_target = operand_subword (target, outof_word, 1, int_mode);
1512 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1513
1514 outof_input = operand_subword_force (op0, outof_word, int_mode);
1515 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1516
1517 if (shift_count == BITS_PER_WORD)
1518 {
1519 /* This is just a word swap. */
1520 emit_move_insn (outof_target, into_input);
1521 emit_move_insn (into_target, outof_input);
1522 inter = const0_rtx;
1523 }
1524 else
1525 {
1526 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1527 HOST_WIDE_INT first_shift_count, second_shift_count;
1528 optab reverse_unsigned_shift, unsigned_shift;
1529
1530 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1531 ? lshr_optab : ashl_optab);
1532
1533 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1534 ? ashl_optab : lshr_optab);
1535
1536 if (shift_count > BITS_PER_WORD)
1537 {
1538 first_shift_count = shift_count - BITS_PER_WORD;
1539 second_shift_count = 2 * BITS_PER_WORD - shift_count;
1540 }
1541 else
1542 {
1543 first_shift_count = BITS_PER_WORD - shift_count;
1544 second_shift_count = shift_count;
1545 }
1546 rtx first_shift_count_rtx
1547 = gen_int_shift_amount (word_mode, first_shift_count);
1548 rtx second_shift_count_rtx
1549 = gen_int_shift_amount (word_mode, second_shift_count);
1550
1551 into_temp1 = expand_binop (word_mode, unsigned_shift,
1552 outof_input, first_shift_count_rtx,
1553 NULL_RTX, unsignedp, next_methods);
1554 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1555 into_input, second_shift_count_rtx,
1556 NULL_RTX, unsignedp, next_methods);
1557
1558 if (into_temp1 != 0 && into_temp2 != 0)
1559 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1560 into_target, unsignedp, next_methods);
1561 else
1562 inter = 0;
1563
1564 if (inter != 0 && inter != into_target)
1565 emit_move_insn (into_target, inter);
1566
1567 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1568 into_input, first_shift_count_rtx,
1569 NULL_RTX, unsignedp, next_methods);
1570 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1571 outof_input, second_shift_count_rtx,
1572 NULL_RTX, unsignedp, next_methods);
1573
1574 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1575 inter = expand_binop (word_mode, ior_optab,
1576 outof_temp1, outof_temp2,
1577 outof_target, unsignedp, next_methods);
1578
1579 if (inter != 0 && inter != outof_target)
1580 emit_move_insn (outof_target, inter);
1581 }
1582
1583 insns = get_insns ();
1584 end_sequence ();
1585
1586 if (inter != 0)
1587 {
1588 emit_insn (insns);
1589 return target;
1590 }
1591 }
1592
1593 /* These can be done a word at a time by propagating carries. */
1594 if ((binoptab == add_optab || binoptab == sub_optab)
1595 && is_int_mode (mode, &int_mode)
1596 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1597 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1598 {
1599 unsigned int i;
1600 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1601 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1602 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1603 rtx xop0, xop1, xtarget;
1604
1605 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1606 value is one of those, use it. Otherwise, use 1 since it is the
1607 one easiest to get. */
1608 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1609 int normalizep = STORE_FLAG_VALUE;
1610 #else
1611 int normalizep = 1;
1612 #endif
1613
1614 /* Prepare the operands. */
1615 xop0 = force_reg (int_mode, op0);
1616 xop1 = force_reg (int_mode, op1);
1617
1618 xtarget = gen_reg_rtx (int_mode);
1619
1620 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1621 target = xtarget;
1622
1623 /* Indicate for flow that the entire target reg is being set. */
1624 if (REG_P (target))
1625 emit_clobber (xtarget);
1626
1627 /* Do the actual arithmetic. */
1628 for (i = 0; i < nwords; i++)
1629 {
1630 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1631 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1632 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1633 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1634 rtx x;
1635
1636 /* Main add/subtract of the input operands. */
1637 x = expand_binop (word_mode, binoptab,
1638 op0_piece, op1_piece,
1639 target_piece, unsignedp, next_methods);
1640 if (x == 0)
1641 break;
1642
1643 if (i + 1 < nwords)
1644 {
1645 /* Store carry from main add/subtract. */
1646 carry_out = gen_reg_rtx (word_mode);
1647 carry_out = emit_store_flag_force (carry_out,
1648 (binoptab == add_optab
1649 ? LT : GT),
1650 x, op0_piece,
1651 word_mode, 1, normalizep);
1652 }
1653
1654 if (i > 0)
1655 {
1656 rtx newx;
1657
1658 /* Add/subtract previous carry to main result. */
1659 newx = expand_binop (word_mode,
1660 normalizep == 1 ? binoptab : otheroptab,
1661 x, carry_in,
1662 NULL_RTX, 1, next_methods);
1663
1664 if (i + 1 < nwords)
1665 {
1666 /* Get out carry from adding/subtracting carry in. */
1667 rtx carry_tmp = gen_reg_rtx (word_mode);
1668 carry_tmp = emit_store_flag_force (carry_tmp,
1669 (binoptab == add_optab
1670 ? LT : GT),
1671 newx, x,
1672 word_mode, 1, normalizep);
1673
1674 /* Logical-ior the two poss. carry together. */
1675 carry_out = expand_binop (word_mode, ior_optab,
1676 carry_out, carry_tmp,
1677 carry_out, 0, next_methods);
1678 if (carry_out == 0)
1679 break;
1680 }
1681 emit_move_insn (target_piece, newx);
1682 }
1683 else
1684 {
1685 if (x != target_piece)
1686 emit_move_insn (target_piece, x);
1687 }
1688
1689 carry_in = carry_out;
1690 }
1691
1692 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
1693 {
1694 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
1695 || ! rtx_equal_p (target, xtarget))
1696 {
1697 rtx_insn *temp = emit_move_insn (target, xtarget);
1698
1699 set_dst_reg_note (temp, REG_EQUAL,
1700 gen_rtx_fmt_ee (optab_to_code (binoptab),
1701 int_mode, copy_rtx (xop0),
1702 copy_rtx (xop1)),
1703 target);
1704 }
1705 else
1706 target = xtarget;
1707
1708 return target;
1709 }
1710
1711 else
1712 delete_insns_since (last);
1713 }
1714
1715 /* Attempt to synthesize double word multiplies using a sequence of word
1716 mode multiplications. We first attempt to generate a sequence using a
1717 more efficient unsigned widening multiply, and if that fails we then
1718 try using a signed widening multiply. */
1719
1720 if (binoptab == smul_optab
1721 && is_int_mode (mode, &int_mode)
1722 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1723 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1724 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1725 {
1726 rtx product = NULL_RTX;
1727 if (convert_optab_handler (umul_widen_optab, int_mode, word_mode)
1728 != CODE_FOR_nothing)
1729 {
1730 product = expand_doubleword_mult (int_mode, op0, op1, target,
1731 true, methods);
1732 if (!product)
1733 delete_insns_since (last);
1734 }
1735
1736 if (product == NULL_RTX
1737 && (convert_optab_handler (smul_widen_optab, int_mode, word_mode)
1738 != CODE_FOR_nothing))
1739 {
1740 product = expand_doubleword_mult (int_mode, op0, op1, target,
1741 false, methods);
1742 if (!product)
1743 delete_insns_since (last);
1744 }
1745
1746 if (product != NULL_RTX)
1747 {
1748 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
1749 {
1750 rtx_insn *move = emit_move_insn (target ? target : product,
1751 product);
1752 set_dst_reg_note (move,
1753 REG_EQUAL,
1754 gen_rtx_fmt_ee (MULT, int_mode,
1755 copy_rtx (op0),
1756 copy_rtx (op1)),
1757 target ? target : product);
1758 }
1759 return product;
1760 }
1761 }
1762
1763 /* It can't be open-coded in this mode.
1764 Use a library call if one is available and caller says that's ok. */
1765
1766 libfunc = optab_libfunc (binoptab, mode);
1767 if (libfunc
1768 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1769 {
1770 rtx_insn *insns;
1771 rtx op1x = op1;
1772 machine_mode op1_mode = mode;
1773 rtx value;
1774
1775 start_sequence ();
1776
1777 if (shift_optab_p (binoptab))
1778 {
1779 op1_mode = targetm.libgcc_shift_count_mode ();
1780 /* Specify unsigned here,
1781 since negative shift counts are meaningless. */
1782 op1x = convert_to_mode (op1_mode, op1, 1);
1783 }
1784
1785 if (GET_MODE (op0) != VOIDmode
1786 && GET_MODE (op0) != mode)
1787 op0 = convert_to_mode (mode, op0, unsignedp);
1788
1789 /* Pass 1 for NO_QUEUE so we don't lose any increments
1790 if the libcall is cse'd or moved. */
1791 value = emit_library_call_value (libfunc,
1792 NULL_RTX, LCT_CONST, mode,
1793 op0, mode, op1x, op1_mode);
1794
1795 insns = get_insns ();
1796 end_sequence ();
1797
1798 bool trapv = trapv_binoptab_p (binoptab);
1799 target = gen_reg_rtx (mode);
1800 emit_libcall_block_1 (insns, target, value,
1801 trapv ? NULL_RTX
1802 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1803 mode, op0, op1), trapv);
1804
1805 return target;
1806 }
1807
1808 delete_insns_since (last);
1809
1810 /* It can't be done in this mode. Can we do it in a wider mode? */
1811
1812 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1813 || methods == OPTAB_MUST_WIDEN))
1814 {
1815 /* Caller says, don't even try. */
1816 delete_insns_since (entry_last);
1817 return 0;
1818 }
1819
1820 /* Compute the value of METHODS to pass to recursive calls.
1821 Don't allow widening to be tried recursively. */
1822
1823 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1824
1825 /* Look for a wider mode of the same class for which it appears we can do
1826 the operation. */
1827
1828 if (CLASS_HAS_WIDER_MODES_P (mclass))
1829 {
1830 /* This code doesn't make sense for conversion optabs, since we
1831 wouldn't then want to extend the operands to be the same size
1832 as the result. */
1833 gcc_assert (!convert_optab_p (binoptab));
1834 FOR_EACH_WIDER_MODE (wider_mode, mode)
1835 {
1836 if (optab_handler (binoptab, wider_mode)
1837 || (methods == OPTAB_LIB
1838 && optab_libfunc (binoptab, wider_mode)))
1839 {
1840 rtx xop0 = op0, xop1 = op1;
1841 int no_extend = 0;
1842
1843 /* For certain integer operations, we need not actually extend
1844 the narrow operands, as long as we will truncate
1845 the results to the same narrowness. */
1846
1847 if ((binoptab == ior_optab || binoptab == and_optab
1848 || binoptab == xor_optab
1849 || binoptab == add_optab || binoptab == sub_optab
1850 || binoptab == smul_optab || binoptab == ashl_optab)
1851 && mclass == MODE_INT)
1852 no_extend = 1;
1853
1854 xop0 = widen_operand (xop0, wider_mode, mode,
1855 unsignedp, no_extend);
1856
1857 /* The second operand of a shift must always be extended. */
1858 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1859 no_extend && binoptab != ashl_optab);
1860
1861 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1862 unsignedp, methods);
1863 if (temp)
1864 {
1865 if (mclass != MODE_INT
1866 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1867 {
1868 if (target == 0)
1869 target = gen_reg_rtx (mode);
1870 convert_move (target, temp, 0);
1871 return target;
1872 }
1873 else
1874 return gen_lowpart (mode, temp);
1875 }
1876 else
1877 delete_insns_since (last);
1878 }
1879 }
1880 }
1881
1882 delete_insns_since (entry_last);
1883 return 0;
1884 }
1885 \f
1886 /* Expand a binary operator which has both signed and unsigned forms.
1887 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1888 signed operations.
1889
1890 If we widen unsigned operands, we may use a signed wider operation instead
1891 of an unsigned wider operation, since the result would be the same. */
1892
1893 rtx
1894 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1895 rtx op0, rtx op1, rtx target, int unsignedp,
1896 enum optab_methods methods)
1897 {
1898 rtx temp;
1899 optab direct_optab = unsignedp ? uoptab : soptab;
1900 bool save_enable;
1901
1902 /* Do it without widening, if possible. */
1903 temp = expand_binop (mode, direct_optab, op0, op1, target,
1904 unsignedp, OPTAB_DIRECT);
1905 if (temp || methods == OPTAB_DIRECT)
1906 return temp;
1907
1908 /* Try widening to a signed int. Disable any direct use of any
1909 signed insn in the current mode. */
1910 save_enable = swap_optab_enable (soptab, mode, false);
1911
1912 temp = expand_binop (mode, soptab, op0, op1, target,
1913 unsignedp, OPTAB_WIDEN);
1914
1915 /* For unsigned operands, try widening to an unsigned int. */
1916 if (!temp && unsignedp)
1917 temp = expand_binop (mode, uoptab, op0, op1, target,
1918 unsignedp, OPTAB_WIDEN);
1919 if (temp || methods == OPTAB_WIDEN)
1920 goto egress;
1921
1922 /* Use the right width libcall if that exists. */
1923 temp = expand_binop (mode, direct_optab, op0, op1, target,
1924 unsignedp, OPTAB_LIB);
1925 if (temp || methods == OPTAB_LIB)
1926 goto egress;
1927
1928 /* Must widen and use a libcall, use either signed or unsigned. */
1929 temp = expand_binop (mode, soptab, op0, op1, target,
1930 unsignedp, methods);
1931 if (!temp && unsignedp)
1932 temp = expand_binop (mode, uoptab, op0, op1, target,
1933 unsignedp, methods);
1934
1935 egress:
1936 /* Undo the fiddling above. */
1937 if (save_enable)
1938 swap_optab_enable (soptab, mode, true);
1939 return temp;
1940 }
1941 \f
1942 /* Generate code to perform an operation specified by UNOPPTAB
1943 on operand OP0, with two results to TARG0 and TARG1.
1944 We assume that the order of the operands for the instruction
1945 is TARG0, TARG1, OP0.
1946
1947 Either TARG0 or TARG1 may be zero, but what that means is that
1948 the result is not actually wanted. We will generate it into
1949 a dummy pseudo-reg and discard it. They may not both be zero.
1950
1951 Returns 1 if this operation can be performed; 0 if not. */
1952
1953 int
1954 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1955 int unsignedp)
1956 {
1957 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1958 enum mode_class mclass;
1959 machine_mode wider_mode;
1960 rtx_insn *entry_last = get_last_insn ();
1961 rtx_insn *last;
1962
1963 mclass = GET_MODE_CLASS (mode);
1964
1965 if (!targ0)
1966 targ0 = gen_reg_rtx (mode);
1967 if (!targ1)
1968 targ1 = gen_reg_rtx (mode);
1969
1970 /* Record where to go back to if we fail. */
1971 last = get_last_insn ();
1972
1973 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1974 {
1975 struct expand_operand ops[3];
1976 enum insn_code icode = optab_handler (unoptab, mode);
1977
1978 create_fixed_operand (&ops[0], targ0);
1979 create_fixed_operand (&ops[1], targ1);
1980 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1981 if (maybe_expand_insn (icode, 3, ops))
1982 return 1;
1983 }
1984
1985 /* It can't be done in this mode. Can we do it in a wider mode? */
1986
1987 if (CLASS_HAS_WIDER_MODES_P (mclass))
1988 {
1989 FOR_EACH_WIDER_MODE (wider_mode, mode)
1990 {
1991 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1992 {
1993 rtx t0 = gen_reg_rtx (wider_mode);
1994 rtx t1 = gen_reg_rtx (wider_mode);
1995 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1996
1997 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1998 {
1999 convert_move (targ0, t0, unsignedp);
2000 convert_move (targ1, t1, unsignedp);
2001 return 1;
2002 }
2003 else
2004 delete_insns_since (last);
2005 }
2006 }
2007 }
2008
2009 delete_insns_since (entry_last);
2010 return 0;
2011 }
2012 \f
2013 /* Generate code to perform an operation specified by BINOPTAB
2014 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2015 We assume that the order of the operands for the instruction
2016 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2017 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2018
2019 Either TARG0 or TARG1 may be zero, but what that means is that
2020 the result is not actually wanted. We will generate it into
2021 a dummy pseudo-reg and discard it. They may not both be zero.
2022
2023 Returns 1 if this operation can be performed; 0 if not. */
2024
2025 int
2026 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2027 int unsignedp)
2028 {
2029 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2030 enum mode_class mclass;
2031 machine_mode wider_mode;
2032 rtx_insn *entry_last = get_last_insn ();
2033 rtx_insn *last;
2034
2035 mclass = GET_MODE_CLASS (mode);
2036
2037 if (!targ0)
2038 targ0 = gen_reg_rtx (mode);
2039 if (!targ1)
2040 targ1 = gen_reg_rtx (mode);
2041
2042 /* Record where to go back to if we fail. */
2043 last = get_last_insn ();
2044
2045 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2046 {
2047 struct expand_operand ops[4];
2048 enum insn_code icode = optab_handler (binoptab, mode);
2049 machine_mode mode0 = insn_data[icode].operand[1].mode;
2050 machine_mode mode1 = insn_data[icode].operand[2].mode;
2051 rtx xop0 = op0, xop1 = op1;
2052
2053 /* If we are optimizing, force expensive constants into a register. */
2054 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2055 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2056
2057 create_fixed_operand (&ops[0], targ0);
2058 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2059 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2060 create_fixed_operand (&ops[3], targ1);
2061 if (maybe_expand_insn (icode, 4, ops))
2062 return 1;
2063 delete_insns_since (last);
2064 }
2065
2066 /* It can't be done in this mode. Can we do it in a wider mode? */
2067
2068 if (CLASS_HAS_WIDER_MODES_P (mclass))
2069 {
2070 FOR_EACH_WIDER_MODE (wider_mode, mode)
2071 {
2072 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2073 {
2074 rtx t0 = gen_reg_rtx (wider_mode);
2075 rtx t1 = gen_reg_rtx (wider_mode);
2076 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2077 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2078
2079 if (expand_twoval_binop (binoptab, cop0, cop1,
2080 t0, t1, unsignedp))
2081 {
2082 convert_move (targ0, t0, unsignedp);
2083 convert_move (targ1, t1, unsignedp);
2084 return 1;
2085 }
2086 else
2087 delete_insns_since (last);
2088 }
2089 }
2090 }
2091
2092 delete_insns_since (entry_last);
2093 return 0;
2094 }
2095
2096 /* Expand the two-valued library call indicated by BINOPTAB, but
2097 preserve only one of the values. If TARG0 is non-NULL, the first
2098 value is placed into TARG0; otherwise the second value is placed
2099 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2100 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2101 This routine assumes that the value returned by the library call is
2102 as if the return value was of an integral mode twice as wide as the
2103 mode of OP0. Returns 1 if the call was successful. */
2104
2105 bool
2106 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2107 rtx targ0, rtx targ1, enum rtx_code code)
2108 {
2109 machine_mode mode;
2110 machine_mode libval_mode;
2111 rtx libval;
2112 rtx_insn *insns;
2113 rtx libfunc;
2114
2115 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2116 gcc_assert (!targ0 != !targ1);
2117
2118 mode = GET_MODE (op0);
2119 libfunc = optab_libfunc (binoptab, mode);
2120 if (!libfunc)
2121 return false;
2122
2123 /* The value returned by the library function will have twice as
2124 many bits as the nominal MODE. */
2125 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2126 start_sequence ();
2127 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2128 libval_mode,
2129 op0, mode,
2130 op1, mode);
2131 /* Get the part of VAL containing the value that we want. */
2132 libval = simplify_gen_subreg (mode, libval, libval_mode,
2133 targ0 ? 0 : GET_MODE_SIZE (mode));
2134 insns = get_insns ();
2135 end_sequence ();
2136 /* Move the into the desired location. */
2137 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2138 gen_rtx_fmt_ee (code, mode, op0, op1));
2139
2140 return true;
2141 }
2142
2143 \f
2144 /* Wrapper around expand_unop which takes an rtx code to specify
2145 the operation to perform, not an optab pointer. All other
2146 arguments are the same. */
2147 rtx
2148 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2149 rtx target, int unsignedp)
2150 {
2151 optab unop = code_to_optab (code);
2152 gcc_assert (unop);
2153
2154 return expand_unop (mode, unop, op0, target, unsignedp);
2155 }
2156
2157 /* Try calculating
2158 (clz:narrow x)
2159 as
2160 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2161
2162 A similar operation can be used for clrsb. UNOPTAB says which operation
2163 we are trying to expand. */
2164 static rtx
2165 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2166 {
2167 opt_scalar_int_mode wider_mode_iter;
2168 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2169 {
2170 scalar_int_mode wider_mode = wider_mode_iter.require ();
2171 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2172 {
2173 rtx xop0, temp;
2174 rtx_insn *last;
2175
2176 last = get_last_insn ();
2177
2178 if (target == 0)
2179 target = gen_reg_rtx (mode);
2180 xop0 = widen_operand (op0, wider_mode, mode,
2181 unoptab != clrsb_optab, false);
2182 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2183 unoptab != clrsb_optab);
2184 if (temp != 0)
2185 temp = expand_binop
2186 (wider_mode, sub_optab, temp,
2187 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2188 - GET_MODE_PRECISION (mode),
2189 wider_mode),
2190 target, true, OPTAB_DIRECT);
2191 if (temp == 0)
2192 delete_insns_since (last);
2193
2194 return temp;
2195 }
2196 }
2197 return 0;
2198 }
2199
2200 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2201 quantities, choosing which based on whether the high word is nonzero. */
2202 static rtx
2203 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2204 {
2205 rtx xop0 = force_reg (mode, op0);
2206 rtx subhi = gen_highpart (word_mode, xop0);
2207 rtx sublo = gen_lowpart (word_mode, xop0);
2208 rtx_code_label *hi0_label = gen_label_rtx ();
2209 rtx_code_label *after_label = gen_label_rtx ();
2210 rtx_insn *seq;
2211 rtx temp, result;
2212
2213 /* If we were not given a target, use a word_mode register, not a
2214 'mode' register. The result will fit, and nobody is expecting
2215 anything bigger (the return type of __builtin_clz* is int). */
2216 if (!target)
2217 target = gen_reg_rtx (word_mode);
2218
2219 /* In any case, write to a word_mode scratch in both branches of the
2220 conditional, so we can ensure there is a single move insn setting
2221 'target' to tag a REG_EQUAL note on. */
2222 result = gen_reg_rtx (word_mode);
2223
2224 start_sequence ();
2225
2226 /* If the high word is not equal to zero,
2227 then clz of the full value is clz of the high word. */
2228 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2229 word_mode, true, hi0_label);
2230
2231 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2232 if (!temp)
2233 goto fail;
2234
2235 if (temp != result)
2236 convert_move (result, temp, true);
2237
2238 emit_jump_insn (targetm.gen_jump (after_label));
2239 emit_barrier ();
2240
2241 /* Else clz of the full value is clz of the low word plus the number
2242 of bits in the high word. */
2243 emit_label (hi0_label);
2244
2245 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2246 if (!temp)
2247 goto fail;
2248 temp = expand_binop (word_mode, add_optab, temp,
2249 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2250 result, true, OPTAB_DIRECT);
2251 if (!temp)
2252 goto fail;
2253 if (temp != result)
2254 convert_move (result, temp, true);
2255
2256 emit_label (after_label);
2257 convert_move (target, result, true);
2258
2259 seq = get_insns ();
2260 end_sequence ();
2261
2262 add_equal_note (seq, target, CLZ, xop0, 0);
2263 emit_insn (seq);
2264 return target;
2265
2266 fail:
2267 end_sequence ();
2268 return 0;
2269 }
2270
2271 /* Try calculating popcount of a double-word quantity as two popcount's of
2272 word-sized quantities and summing up the results. */
2273 static rtx
2274 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2275 {
2276 rtx t0, t1, t;
2277 rtx_insn *seq;
2278
2279 start_sequence ();
2280
2281 t0 = expand_unop_direct (word_mode, popcount_optab,
2282 operand_subword_force (op0, 0, mode), NULL_RTX,
2283 true);
2284 t1 = expand_unop_direct (word_mode, popcount_optab,
2285 operand_subword_force (op0, 1, mode), NULL_RTX,
2286 true);
2287 if (!t0 || !t1)
2288 {
2289 end_sequence ();
2290 return NULL_RTX;
2291 }
2292
2293 /* If we were not given a target, use a word_mode register, not a
2294 'mode' register. The result will fit, and nobody is expecting
2295 anything bigger (the return type of __builtin_popcount* is int). */
2296 if (!target)
2297 target = gen_reg_rtx (word_mode);
2298
2299 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2300
2301 seq = get_insns ();
2302 end_sequence ();
2303
2304 add_equal_note (seq, t, POPCOUNT, op0, 0);
2305 emit_insn (seq);
2306 return t;
2307 }
2308
2309 /* Try calculating
2310 (parity:wide x)
2311 as
2312 (parity:narrow (low (x) ^ high (x))) */
2313 static rtx
2314 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2315 {
2316 rtx t = expand_binop (word_mode, xor_optab,
2317 operand_subword_force (op0, 0, mode),
2318 operand_subword_force (op0, 1, mode),
2319 NULL_RTX, 0, OPTAB_DIRECT);
2320 return expand_unop (word_mode, parity_optab, t, target, true);
2321 }
2322
2323 /* Try calculating
2324 (bswap:narrow x)
2325 as
2326 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2327 static rtx
2328 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2329 {
2330 rtx x;
2331 rtx_insn *last;
2332 opt_scalar_int_mode wider_mode_iter;
2333
2334 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2335 if (optab_handler (bswap_optab, wider_mode_iter.require ())
2336 != CODE_FOR_nothing)
2337 break;
2338
2339 if (!wider_mode_iter.exists ())
2340 return NULL_RTX;
2341
2342 scalar_int_mode wider_mode = wider_mode_iter.require ();
2343 last = get_last_insn ();
2344
2345 x = widen_operand (op0, wider_mode, mode, true, true);
2346 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2347
2348 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2349 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2350 if (x != 0)
2351 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2352 GET_MODE_BITSIZE (wider_mode)
2353 - GET_MODE_BITSIZE (mode),
2354 NULL_RTX, true);
2355
2356 if (x != 0)
2357 {
2358 if (target == 0)
2359 target = gen_reg_rtx (mode);
2360 emit_move_insn (target, gen_lowpart (mode, x));
2361 }
2362 else
2363 delete_insns_since (last);
2364
2365 return target;
2366 }
2367
2368 /* Try calculating bswap as two bswaps of two word-sized operands. */
2369
2370 static rtx
2371 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2372 {
2373 rtx t0, t1;
2374
2375 t1 = expand_unop (word_mode, bswap_optab,
2376 operand_subword_force (op, 0, mode), NULL_RTX, true);
2377 t0 = expand_unop (word_mode, bswap_optab,
2378 operand_subword_force (op, 1, mode), NULL_RTX, true);
2379
2380 if (target == 0 || !valid_multiword_target_p (target))
2381 target = gen_reg_rtx (mode);
2382 if (REG_P (target))
2383 emit_clobber (target);
2384 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2385 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2386
2387 return target;
2388 }
2389
2390 /* Try calculating (parity x) as (and (popcount x) 1), where
2391 popcount can also be done in a wider mode. */
2392 static rtx
2393 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2394 {
2395 enum mode_class mclass = GET_MODE_CLASS (mode);
2396 opt_scalar_int_mode wider_mode_iter;
2397 FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2398 {
2399 scalar_int_mode wider_mode = wider_mode_iter.require ();
2400 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2401 {
2402 rtx xop0, temp;
2403 rtx_insn *last;
2404
2405 last = get_last_insn ();
2406
2407 if (target == 0 || GET_MODE (target) != wider_mode)
2408 target = gen_reg_rtx (wider_mode);
2409
2410 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2411 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2412 true);
2413 if (temp != 0)
2414 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2415 target, true, OPTAB_DIRECT);
2416
2417 if (temp)
2418 {
2419 if (mclass != MODE_INT
2420 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2421 return convert_to_mode (mode, temp, 0);
2422 else
2423 return gen_lowpart (mode, temp);
2424 }
2425 else
2426 delete_insns_since (last);
2427 }
2428 }
2429 return 0;
2430 }
2431
2432 /* Try calculating ctz(x) as K - clz(x & -x) ,
2433 where K is GET_MODE_PRECISION(mode) - 1.
2434
2435 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2436 don't have to worry about what the hardware does in that case. (If
2437 the clz instruction produces the usual value at 0, which is K, the
2438 result of this code sequence will be -1; expand_ffs, below, relies
2439 on this. It might be nice to have it be K instead, for consistency
2440 with the (very few) processors that provide a ctz with a defined
2441 value, but that would take one more instruction, and it would be
2442 less convenient for expand_ffs anyway. */
2443
2444 static rtx
2445 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2446 {
2447 rtx_insn *seq;
2448 rtx temp;
2449
2450 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2451 return 0;
2452
2453 start_sequence ();
2454
2455 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2456 if (temp)
2457 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2458 true, OPTAB_DIRECT);
2459 if (temp)
2460 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2461 if (temp)
2462 temp = expand_binop (mode, sub_optab,
2463 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2464 temp, target,
2465 true, OPTAB_DIRECT);
2466 if (temp == 0)
2467 {
2468 end_sequence ();
2469 return 0;
2470 }
2471
2472 seq = get_insns ();
2473 end_sequence ();
2474
2475 add_equal_note (seq, temp, CTZ, op0, 0);
2476 emit_insn (seq);
2477 return temp;
2478 }
2479
2480
2481 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2482 else with the sequence used by expand_clz.
2483
2484 The ffs builtin promises to return zero for a zero value and ctz/clz
2485 may have an undefined value in that case. If they do not give us a
2486 convenient value, we have to generate a test and branch. */
2487 static rtx
2488 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2489 {
2490 HOST_WIDE_INT val = 0;
2491 bool defined_at_zero = false;
2492 rtx temp;
2493 rtx_insn *seq;
2494
2495 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2496 {
2497 start_sequence ();
2498
2499 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2500 if (!temp)
2501 goto fail;
2502
2503 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2504 }
2505 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2506 {
2507 start_sequence ();
2508 temp = expand_ctz (mode, op0, 0);
2509 if (!temp)
2510 goto fail;
2511
2512 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2513 {
2514 defined_at_zero = true;
2515 val = (GET_MODE_PRECISION (mode) - 1) - val;
2516 }
2517 }
2518 else
2519 return 0;
2520
2521 if (defined_at_zero && val == -1)
2522 /* No correction needed at zero. */;
2523 else
2524 {
2525 /* We don't try to do anything clever with the situation found
2526 on some processors (eg Alpha) where ctz(0:mode) ==
2527 bitsize(mode). If someone can think of a way to send N to -1
2528 and leave alone all values in the range 0..N-1 (where N is a
2529 power of two), cheaper than this test-and-branch, please add it.
2530
2531 The test-and-branch is done after the operation itself, in case
2532 the operation sets condition codes that can be recycled for this.
2533 (This is true on i386, for instance.) */
2534
2535 rtx_code_label *nonzero_label = gen_label_rtx ();
2536 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2537 mode, true, nonzero_label);
2538
2539 convert_move (temp, GEN_INT (-1), false);
2540 emit_label (nonzero_label);
2541 }
2542
2543 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2544 to produce a value in the range 0..bitsize. */
2545 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2546 target, false, OPTAB_DIRECT);
2547 if (!temp)
2548 goto fail;
2549
2550 seq = get_insns ();
2551 end_sequence ();
2552
2553 add_equal_note (seq, temp, FFS, op0, 0);
2554 emit_insn (seq);
2555 return temp;
2556
2557 fail:
2558 end_sequence ();
2559 return 0;
2560 }
2561
2562 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2563 conditions, VAL may already be a SUBREG against which we cannot generate
2564 a further SUBREG. In this case, we expect forcing the value into a
2565 register will work around the situation. */
2566
2567 static rtx
2568 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2569 machine_mode imode)
2570 {
2571 rtx ret;
2572 ret = lowpart_subreg (omode, val, imode);
2573 if (ret == NULL)
2574 {
2575 val = force_reg (imode, val);
2576 ret = lowpart_subreg (omode, val, imode);
2577 gcc_assert (ret != NULL);
2578 }
2579 return ret;
2580 }
2581
2582 /* Expand a floating point absolute value or negation operation via a
2583 logical operation on the sign bit. */
2584
2585 static rtx
2586 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
2587 rtx op0, rtx target)
2588 {
2589 const struct real_format *fmt;
2590 int bitpos, word, nwords, i;
2591 scalar_int_mode imode;
2592 rtx temp;
2593 rtx_insn *insns;
2594
2595 /* The format has to have a simple sign bit. */
2596 fmt = REAL_MODE_FORMAT (mode);
2597 if (fmt == NULL)
2598 return NULL_RTX;
2599
2600 bitpos = fmt->signbit_rw;
2601 if (bitpos < 0)
2602 return NULL_RTX;
2603
2604 /* Don't create negative zeros if the format doesn't support them. */
2605 if (code == NEG && !fmt->has_signed_zero)
2606 return NULL_RTX;
2607
2608 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2609 {
2610 if (!int_mode_for_mode (mode).exists (&imode))
2611 return NULL_RTX;
2612 word = 0;
2613 nwords = 1;
2614 }
2615 else
2616 {
2617 imode = word_mode;
2618
2619 if (FLOAT_WORDS_BIG_ENDIAN)
2620 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2621 else
2622 word = bitpos / BITS_PER_WORD;
2623 bitpos = bitpos % BITS_PER_WORD;
2624 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2625 }
2626
2627 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2628 if (code == ABS)
2629 mask = ~mask;
2630
2631 if (target == 0
2632 || target == op0
2633 || (nwords > 1 && !valid_multiword_target_p (target)))
2634 target = gen_reg_rtx (mode);
2635
2636 if (nwords > 1)
2637 {
2638 start_sequence ();
2639
2640 for (i = 0; i < nwords; ++i)
2641 {
2642 rtx targ_piece = operand_subword (target, i, 1, mode);
2643 rtx op0_piece = operand_subword_force (op0, i, mode);
2644
2645 if (i == word)
2646 {
2647 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2648 op0_piece,
2649 immed_wide_int_const (mask, imode),
2650 targ_piece, 1, OPTAB_LIB_WIDEN);
2651 if (temp != targ_piece)
2652 emit_move_insn (targ_piece, temp);
2653 }
2654 else
2655 emit_move_insn (targ_piece, op0_piece);
2656 }
2657
2658 insns = get_insns ();
2659 end_sequence ();
2660
2661 emit_insn (insns);
2662 }
2663 else
2664 {
2665 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2666 gen_lowpart (imode, op0),
2667 immed_wide_int_const (mask, imode),
2668 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2669 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2670
2671 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2672 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2673 target);
2674 }
2675
2676 return target;
2677 }
2678
2679 /* As expand_unop, but will fail rather than attempt the operation in a
2680 different mode or with a libcall. */
2681 static rtx
2682 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2683 int unsignedp)
2684 {
2685 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2686 {
2687 struct expand_operand ops[2];
2688 enum insn_code icode = optab_handler (unoptab, mode);
2689 rtx_insn *last = get_last_insn ();
2690 rtx_insn *pat;
2691
2692 create_output_operand (&ops[0], target, mode);
2693 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2694 pat = maybe_gen_insn (icode, 2, ops);
2695 if (pat)
2696 {
2697 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2698 && ! add_equal_note (pat, ops[0].value,
2699 optab_to_code (unoptab),
2700 ops[1].value, NULL_RTX))
2701 {
2702 delete_insns_since (last);
2703 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2704 }
2705
2706 emit_insn (pat);
2707
2708 return ops[0].value;
2709 }
2710 }
2711 return 0;
2712 }
2713
2714 /* Generate code to perform an operation specified by UNOPTAB
2715 on operand OP0, with result having machine-mode MODE.
2716
2717 UNSIGNEDP is for the case where we have to widen the operands
2718 to perform the operation. It says to use zero-extension.
2719
2720 If TARGET is nonzero, the value
2721 is generated there, if it is convenient to do so.
2722 In all cases an rtx is returned for the locus of the value;
2723 this may or may not be TARGET. */
2724
2725 rtx
2726 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2727 int unsignedp)
2728 {
2729 enum mode_class mclass = GET_MODE_CLASS (mode);
2730 machine_mode wider_mode;
2731 scalar_int_mode int_mode;
2732 scalar_float_mode float_mode;
2733 rtx temp;
2734 rtx libfunc;
2735
2736 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2737 if (temp)
2738 return temp;
2739
2740 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2741
2742 /* Widening (or narrowing) clz needs special treatment. */
2743 if (unoptab == clz_optab)
2744 {
2745 if (is_a <scalar_int_mode> (mode, &int_mode))
2746 {
2747 temp = widen_leading (int_mode, op0, target, unoptab);
2748 if (temp)
2749 return temp;
2750
2751 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2752 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2753 {
2754 temp = expand_doubleword_clz (int_mode, op0, target);
2755 if (temp)
2756 return temp;
2757 }
2758 }
2759
2760 goto try_libcall;
2761 }
2762
2763 if (unoptab == clrsb_optab)
2764 {
2765 if (is_a <scalar_int_mode> (mode, &int_mode))
2766 {
2767 temp = widen_leading (int_mode, op0, target, unoptab);
2768 if (temp)
2769 return temp;
2770 }
2771 goto try_libcall;
2772 }
2773
2774 if (unoptab == popcount_optab
2775 && is_a <scalar_int_mode> (mode, &int_mode)
2776 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2777 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2778 && optimize_insn_for_speed_p ())
2779 {
2780 temp = expand_doubleword_popcount (int_mode, op0, target);
2781 if (temp)
2782 return temp;
2783 }
2784
2785 if (unoptab == parity_optab
2786 && is_a <scalar_int_mode> (mode, &int_mode)
2787 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2788 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2789 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2790 && optimize_insn_for_speed_p ())
2791 {
2792 temp = expand_doubleword_parity (int_mode, op0, target);
2793 if (temp)
2794 return temp;
2795 }
2796
2797 /* Widening (or narrowing) bswap needs special treatment. */
2798 if (unoptab == bswap_optab)
2799 {
2800 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2801 or ROTATERT. First try these directly; if this fails, then try the
2802 obvious pair of shifts with allowed widening, as this will probably
2803 be always more efficient than the other fallback methods. */
2804 if (mode == HImode)
2805 {
2806 rtx_insn *last;
2807 rtx temp1, temp2;
2808
2809 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2810 {
2811 temp = expand_binop (mode, rotl_optab, op0,
2812 gen_int_shift_amount (mode, 8),
2813 target, unsignedp, OPTAB_DIRECT);
2814 if (temp)
2815 return temp;
2816 }
2817
2818 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2819 {
2820 temp = expand_binop (mode, rotr_optab, op0,
2821 gen_int_shift_amount (mode, 8),
2822 target, unsignedp, OPTAB_DIRECT);
2823 if (temp)
2824 return temp;
2825 }
2826
2827 last = get_last_insn ();
2828
2829 temp1 = expand_binop (mode, ashl_optab, op0,
2830 gen_int_shift_amount (mode, 8), NULL_RTX,
2831 unsignedp, OPTAB_WIDEN);
2832 temp2 = expand_binop (mode, lshr_optab, op0,
2833 gen_int_shift_amount (mode, 8), NULL_RTX,
2834 unsignedp, OPTAB_WIDEN);
2835 if (temp1 && temp2)
2836 {
2837 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2838 unsignedp, OPTAB_WIDEN);
2839 if (temp)
2840 return temp;
2841 }
2842
2843 delete_insns_since (last);
2844 }
2845
2846 if (is_a <scalar_int_mode> (mode, &int_mode))
2847 {
2848 temp = widen_bswap (int_mode, op0, target);
2849 if (temp)
2850 return temp;
2851
2852 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2853 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2854 {
2855 temp = expand_doubleword_bswap (mode, op0, target);
2856 if (temp)
2857 return temp;
2858 }
2859 }
2860
2861 goto try_libcall;
2862 }
2863
2864 if (CLASS_HAS_WIDER_MODES_P (mclass))
2865 FOR_EACH_WIDER_MODE (wider_mode, mode)
2866 {
2867 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2868 {
2869 rtx xop0 = op0;
2870 rtx_insn *last = get_last_insn ();
2871
2872 /* For certain operations, we need not actually extend
2873 the narrow operand, as long as we will truncate the
2874 results to the same narrowness. */
2875
2876 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2877 (unoptab == neg_optab
2878 || unoptab == one_cmpl_optab)
2879 && mclass == MODE_INT);
2880
2881 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2882 unsignedp);
2883
2884 if (temp)
2885 {
2886 if (mclass != MODE_INT
2887 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2888 {
2889 if (target == 0)
2890 target = gen_reg_rtx (mode);
2891 convert_move (target, temp, 0);
2892 return target;
2893 }
2894 else
2895 return gen_lowpart (mode, temp);
2896 }
2897 else
2898 delete_insns_since (last);
2899 }
2900 }
2901
2902 /* These can be done a word at a time. */
2903 if (unoptab == one_cmpl_optab
2904 && is_int_mode (mode, &int_mode)
2905 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
2906 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2907 {
2908 int i;
2909 rtx_insn *insns;
2910
2911 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2912 target = gen_reg_rtx (int_mode);
2913
2914 start_sequence ();
2915
2916 /* Do the actual arithmetic. */
2917 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
2918 {
2919 rtx target_piece = operand_subword (target, i, 1, int_mode);
2920 rtx x = expand_unop (word_mode, unoptab,
2921 operand_subword_force (op0, i, int_mode),
2922 target_piece, unsignedp);
2923
2924 if (target_piece != x)
2925 emit_move_insn (target_piece, x);
2926 }
2927
2928 insns = get_insns ();
2929 end_sequence ();
2930
2931 emit_insn (insns);
2932 return target;
2933 }
2934
2935 if (optab_to_code (unoptab) == NEG)
2936 {
2937 /* Try negating floating point values by flipping the sign bit. */
2938 if (is_a <scalar_float_mode> (mode, &float_mode))
2939 {
2940 temp = expand_absneg_bit (NEG, float_mode, op0, target);
2941 if (temp)
2942 return temp;
2943 }
2944
2945 /* If there is no negation pattern, and we have no negative zero,
2946 try subtracting from zero. */
2947 if (!HONOR_SIGNED_ZEROS (mode))
2948 {
2949 temp = expand_binop (mode, (unoptab == negv_optab
2950 ? subv_optab : sub_optab),
2951 CONST0_RTX (mode), op0, target,
2952 unsignedp, OPTAB_DIRECT);
2953 if (temp)
2954 return temp;
2955 }
2956 }
2957
2958 /* Try calculating parity (x) as popcount (x) % 2. */
2959 if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
2960 {
2961 temp = expand_parity (int_mode, op0, target);
2962 if (temp)
2963 return temp;
2964 }
2965
2966 /* Try implementing ffs (x) in terms of clz (x). */
2967 if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
2968 {
2969 temp = expand_ffs (int_mode, op0, target);
2970 if (temp)
2971 return temp;
2972 }
2973
2974 /* Try implementing ctz (x) in terms of clz (x). */
2975 if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
2976 {
2977 temp = expand_ctz (int_mode, op0, target);
2978 if (temp)
2979 return temp;
2980 }
2981
2982 try_libcall:
2983 /* Now try a library call in this mode. */
2984 libfunc = optab_libfunc (unoptab, mode);
2985 if (libfunc)
2986 {
2987 rtx_insn *insns;
2988 rtx value;
2989 rtx eq_value;
2990 machine_mode outmode = mode;
2991
2992 /* All of these functions return small values. Thus we choose to
2993 have them return something that isn't a double-word. */
2994 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2995 || unoptab == clrsb_optab || unoptab == popcount_optab
2996 || unoptab == parity_optab)
2997 outmode
2998 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
2999 optab_libfunc (unoptab, mode)));
3000
3001 start_sequence ();
3002
3003 /* Pass 1 for NO_QUEUE so we don't lose any increments
3004 if the libcall is cse'd or moved. */
3005 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3006 op0, mode);
3007 insns = get_insns ();
3008 end_sequence ();
3009
3010 target = gen_reg_rtx (outmode);
3011 bool trapv = trapv_unoptab_p (unoptab);
3012 if (trapv)
3013 eq_value = NULL_RTX;
3014 else
3015 {
3016 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
3017 if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode))
3018 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3019 else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode))
3020 eq_value = simplify_gen_unary (ZERO_EXTEND,
3021 outmode, eq_value, mode);
3022 }
3023 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
3024
3025 return target;
3026 }
3027
3028 /* It can't be done in this mode. Can we do it in a wider mode? */
3029
3030 if (CLASS_HAS_WIDER_MODES_P (mclass))
3031 {
3032 FOR_EACH_WIDER_MODE (wider_mode, mode)
3033 {
3034 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3035 || optab_libfunc (unoptab, wider_mode))
3036 {
3037 rtx xop0 = op0;
3038 rtx_insn *last = get_last_insn ();
3039
3040 /* For certain operations, we need not actually extend
3041 the narrow operand, as long as we will truncate the
3042 results to the same narrowness. */
3043 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3044 (unoptab == neg_optab
3045 || unoptab == one_cmpl_optab
3046 || unoptab == bswap_optab)
3047 && mclass == MODE_INT);
3048
3049 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3050 unsignedp);
3051
3052 /* If we are generating clz using wider mode, adjust the
3053 result. Similarly for clrsb. */
3054 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3055 && temp != 0)
3056 {
3057 scalar_int_mode wider_int_mode
3058 = as_a <scalar_int_mode> (wider_mode);
3059 int_mode = as_a <scalar_int_mode> (mode);
3060 temp = expand_binop
3061 (wider_mode, sub_optab, temp,
3062 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3063 - GET_MODE_PRECISION (int_mode),
3064 wider_int_mode),
3065 target, true, OPTAB_DIRECT);
3066 }
3067
3068 /* Likewise for bswap. */
3069 if (unoptab == bswap_optab && temp != 0)
3070 {
3071 scalar_int_mode wider_int_mode
3072 = as_a <scalar_int_mode> (wider_mode);
3073 int_mode = as_a <scalar_int_mode> (mode);
3074 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3075 == GET_MODE_BITSIZE (wider_int_mode)
3076 && GET_MODE_PRECISION (int_mode)
3077 == GET_MODE_BITSIZE (int_mode));
3078
3079 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3080 GET_MODE_BITSIZE (wider_int_mode)
3081 - GET_MODE_BITSIZE (int_mode),
3082 NULL_RTX, true);
3083 }
3084
3085 if (temp)
3086 {
3087 if (mclass != MODE_INT)
3088 {
3089 if (target == 0)
3090 target = gen_reg_rtx (mode);
3091 convert_move (target, temp, 0);
3092 return target;
3093 }
3094 else
3095 return gen_lowpart (mode, temp);
3096 }
3097 else
3098 delete_insns_since (last);
3099 }
3100 }
3101 }
3102
3103 /* One final attempt at implementing negation via subtraction,
3104 this time allowing widening of the operand. */
3105 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3106 {
3107 rtx temp;
3108 temp = expand_binop (mode,
3109 unoptab == negv_optab ? subv_optab : sub_optab,
3110 CONST0_RTX (mode), op0,
3111 target, unsignedp, OPTAB_LIB_WIDEN);
3112 if (temp)
3113 return temp;
3114 }
3115
3116 return 0;
3117 }
3118 \f
3119 /* Emit code to compute the absolute value of OP0, with result to
3120 TARGET if convenient. (TARGET may be 0.) The return value says
3121 where the result actually is to be found.
3122
3123 MODE is the mode of the operand; the mode of the result is
3124 different but can be deduced from MODE.
3125
3126 */
3127
3128 rtx
3129 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3130 int result_unsignedp)
3131 {
3132 rtx temp;
3133
3134 if (GET_MODE_CLASS (mode) != MODE_INT
3135 || ! flag_trapv)
3136 result_unsignedp = 1;
3137
3138 /* First try to do it with a special abs instruction. */
3139 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3140 op0, target, 0);
3141 if (temp != 0)
3142 return temp;
3143
3144 /* For floating point modes, try clearing the sign bit. */
3145 scalar_float_mode float_mode;
3146 if (is_a <scalar_float_mode> (mode, &float_mode))
3147 {
3148 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3149 if (temp)
3150 return temp;
3151 }
3152
3153 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3154 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3155 && !HONOR_SIGNED_ZEROS (mode))
3156 {
3157 rtx_insn *last = get_last_insn ();
3158
3159 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3160 op0, NULL_RTX, 0);
3161 if (temp != 0)
3162 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3163 OPTAB_WIDEN);
3164
3165 if (temp != 0)
3166 return temp;
3167
3168 delete_insns_since (last);
3169 }
3170
3171 /* If this machine has expensive jumps, we can do integer absolute
3172 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3173 where W is the width of MODE. */
3174
3175 scalar_int_mode int_mode;
3176 if (is_int_mode (mode, &int_mode)
3177 && BRANCH_COST (optimize_insn_for_speed_p (),
3178 false) >= 2)
3179 {
3180 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3181 GET_MODE_PRECISION (int_mode) - 1,
3182 NULL_RTX, 0);
3183
3184 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3185 OPTAB_LIB_WIDEN);
3186 if (temp != 0)
3187 temp = expand_binop (int_mode,
3188 result_unsignedp ? sub_optab : subv_optab,
3189 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3190
3191 if (temp != 0)
3192 return temp;
3193 }
3194
3195 return NULL_RTX;
3196 }
3197
3198 rtx
3199 expand_abs (machine_mode mode, rtx op0, rtx target,
3200 int result_unsignedp, int safe)
3201 {
3202 rtx temp;
3203 rtx_code_label *op1;
3204
3205 if (GET_MODE_CLASS (mode) != MODE_INT
3206 || ! flag_trapv)
3207 result_unsignedp = 1;
3208
3209 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3210 if (temp != 0)
3211 return temp;
3212
3213 /* If that does not win, use conditional jump and negate. */
3214
3215 /* It is safe to use the target if it is the same
3216 as the source if this is also a pseudo register */
3217 if (op0 == target && REG_P (op0)
3218 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3219 safe = 1;
3220
3221 op1 = gen_label_rtx ();
3222 if (target == 0 || ! safe
3223 || GET_MODE (target) != mode
3224 || (MEM_P (target) && MEM_VOLATILE_P (target))
3225 || (REG_P (target)
3226 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3227 target = gen_reg_rtx (mode);
3228
3229 emit_move_insn (target, op0);
3230 NO_DEFER_POP;
3231
3232 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3233 NULL_RTX, NULL, op1,
3234 profile_probability::uninitialized ());
3235
3236 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3237 target, target, 0);
3238 if (op0 != target)
3239 emit_move_insn (target, op0);
3240 emit_label (op1);
3241 OK_DEFER_POP;
3242 return target;
3243 }
3244
3245 /* Emit code to compute the one's complement absolute value of OP0
3246 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3247 (TARGET may be NULL_RTX.) The return value says where the result
3248 actually is to be found.
3249
3250 MODE is the mode of the operand; the mode of the result is
3251 different but can be deduced from MODE. */
3252
3253 rtx
3254 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3255 {
3256 rtx temp;
3257
3258 /* Not applicable for floating point modes. */
3259 if (FLOAT_MODE_P (mode))
3260 return NULL_RTX;
3261
3262 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3263 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3264 {
3265 rtx_insn *last = get_last_insn ();
3266
3267 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3268 if (temp != 0)
3269 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3270 OPTAB_WIDEN);
3271
3272 if (temp != 0)
3273 return temp;
3274
3275 delete_insns_since (last);
3276 }
3277
3278 /* If this machine has expensive jumps, we can do one's complement
3279 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3280
3281 scalar_int_mode int_mode;
3282 if (is_int_mode (mode, &int_mode)
3283 && BRANCH_COST (optimize_insn_for_speed_p (),
3284 false) >= 2)
3285 {
3286 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3287 GET_MODE_PRECISION (int_mode) - 1,
3288 NULL_RTX, 0);
3289
3290 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3291 OPTAB_LIB_WIDEN);
3292
3293 if (temp != 0)
3294 return temp;
3295 }
3296
3297 return NULL_RTX;
3298 }
3299
3300 /* A subroutine of expand_copysign, perform the copysign operation using the
3301 abs and neg primitives advertised to exist on the target. The assumption
3302 is that we have a split register file, and leaving op0 in fp registers,
3303 and not playing with subregs so much, will help the register allocator. */
3304
3305 static rtx
3306 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3307 int bitpos, bool op0_is_abs)
3308 {
3309 scalar_int_mode imode;
3310 enum insn_code icode;
3311 rtx sign;
3312 rtx_code_label *label;
3313
3314 if (target == op1)
3315 target = NULL_RTX;
3316
3317 /* Check if the back end provides an insn that handles signbit for the
3318 argument's mode. */
3319 icode = optab_handler (signbit_optab, mode);
3320 if (icode != CODE_FOR_nothing)
3321 {
3322 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3323 sign = gen_reg_rtx (imode);
3324 emit_unop_insn (icode, sign, op1, UNKNOWN);
3325 }
3326 else
3327 {
3328 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3329 {
3330 if (!int_mode_for_mode (mode).exists (&imode))
3331 return NULL_RTX;
3332 op1 = gen_lowpart (imode, op1);
3333 }
3334 else
3335 {
3336 int word;
3337
3338 imode = word_mode;
3339 if (FLOAT_WORDS_BIG_ENDIAN)
3340 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3341 else
3342 word = bitpos / BITS_PER_WORD;
3343 bitpos = bitpos % BITS_PER_WORD;
3344 op1 = operand_subword_force (op1, word, mode);
3345 }
3346
3347 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3348 sign = expand_binop (imode, and_optab, op1,
3349 immed_wide_int_const (mask, imode),
3350 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3351 }
3352
3353 if (!op0_is_abs)
3354 {
3355 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3356 if (op0 == NULL)
3357 return NULL_RTX;
3358 target = op0;
3359 }
3360 else
3361 {
3362 if (target == NULL_RTX)
3363 target = copy_to_reg (op0);
3364 else
3365 emit_move_insn (target, op0);
3366 }
3367
3368 label = gen_label_rtx ();
3369 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3370
3371 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3372 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3373 else
3374 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3375 if (op0 != target)
3376 emit_move_insn (target, op0);
3377
3378 emit_label (label);
3379
3380 return target;
3381 }
3382
3383
3384 /* A subroutine of expand_copysign, perform the entire copysign operation
3385 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3386 is true if op0 is known to have its sign bit clear. */
3387
3388 static rtx
3389 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3390 int bitpos, bool op0_is_abs)
3391 {
3392 scalar_int_mode imode;
3393 int word, nwords, i;
3394 rtx temp;
3395 rtx_insn *insns;
3396
3397 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3398 {
3399 if (!int_mode_for_mode (mode).exists (&imode))
3400 return NULL_RTX;
3401 word = 0;
3402 nwords = 1;
3403 }
3404 else
3405 {
3406 imode = word_mode;
3407
3408 if (FLOAT_WORDS_BIG_ENDIAN)
3409 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3410 else
3411 word = bitpos / BITS_PER_WORD;
3412 bitpos = bitpos % BITS_PER_WORD;
3413 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3414 }
3415
3416 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3417
3418 if (target == 0
3419 || target == op0
3420 || target == op1
3421 || (nwords > 1 && !valid_multiword_target_p (target)))
3422 target = gen_reg_rtx (mode);
3423
3424 if (nwords > 1)
3425 {
3426 start_sequence ();
3427
3428 for (i = 0; i < nwords; ++i)
3429 {
3430 rtx targ_piece = operand_subword (target, i, 1, mode);
3431 rtx op0_piece = operand_subword_force (op0, i, mode);
3432
3433 if (i == word)
3434 {
3435 if (!op0_is_abs)
3436 op0_piece
3437 = expand_binop (imode, and_optab, op0_piece,
3438 immed_wide_int_const (~mask, imode),
3439 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3440 op1 = expand_binop (imode, and_optab,
3441 operand_subword_force (op1, i, mode),
3442 immed_wide_int_const (mask, imode),
3443 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3444
3445 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3446 targ_piece, 1, OPTAB_LIB_WIDEN);
3447 if (temp != targ_piece)
3448 emit_move_insn (targ_piece, temp);
3449 }
3450 else
3451 emit_move_insn (targ_piece, op0_piece);
3452 }
3453
3454 insns = get_insns ();
3455 end_sequence ();
3456
3457 emit_insn (insns);
3458 }
3459 else
3460 {
3461 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3462 immed_wide_int_const (mask, imode),
3463 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3464
3465 op0 = gen_lowpart (imode, op0);
3466 if (!op0_is_abs)
3467 op0 = expand_binop (imode, and_optab, op0,
3468 immed_wide_int_const (~mask, imode),
3469 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3470
3471 temp = expand_binop (imode, ior_optab, op0, op1,
3472 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3473 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3474 }
3475
3476 return target;
3477 }
3478
3479 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3480 scalar floating point mode. Return NULL if we do not know how to
3481 expand the operation inline. */
3482
3483 rtx
3484 expand_copysign (rtx op0, rtx op1, rtx target)
3485 {
3486 scalar_float_mode mode;
3487 const struct real_format *fmt;
3488 bool op0_is_abs;
3489 rtx temp;
3490
3491 mode = as_a <scalar_float_mode> (GET_MODE (op0));
3492 gcc_assert (GET_MODE (op1) == mode);
3493
3494 /* First try to do it with a special instruction. */
3495 temp = expand_binop (mode, copysign_optab, op0, op1,
3496 target, 0, OPTAB_DIRECT);
3497 if (temp)
3498 return temp;
3499
3500 fmt = REAL_MODE_FORMAT (mode);
3501 if (fmt == NULL || !fmt->has_signed_zero)
3502 return NULL_RTX;
3503
3504 op0_is_abs = false;
3505 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3506 {
3507 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3508 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3509 op0_is_abs = true;
3510 }
3511
3512 if (fmt->signbit_ro >= 0
3513 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3514 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3515 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3516 {
3517 temp = expand_copysign_absneg (mode, op0, op1, target,
3518 fmt->signbit_ro, op0_is_abs);
3519 if (temp)
3520 return temp;
3521 }
3522
3523 if (fmt->signbit_rw < 0)
3524 return NULL_RTX;
3525 return expand_copysign_bit (mode, op0, op1, target,
3526 fmt->signbit_rw, op0_is_abs);
3527 }
3528 \f
3529 /* Generate an instruction whose insn-code is INSN_CODE,
3530 with two operands: an output TARGET and an input OP0.
3531 TARGET *must* be nonzero, and the output is always stored there.
3532 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3533 the value that is stored into TARGET.
3534
3535 Return false if expansion failed. */
3536
3537 bool
3538 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3539 enum rtx_code code)
3540 {
3541 struct expand_operand ops[2];
3542 rtx_insn *pat;
3543
3544 create_output_operand (&ops[0], target, GET_MODE (target));
3545 create_input_operand (&ops[1], op0, GET_MODE (op0));
3546 pat = maybe_gen_insn (icode, 2, ops);
3547 if (!pat)
3548 return false;
3549
3550 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3551 && code != UNKNOWN)
3552 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3553
3554 emit_insn (pat);
3555
3556 if (ops[0].value != target)
3557 emit_move_insn (target, ops[0].value);
3558 return true;
3559 }
3560 /* Generate an instruction whose insn-code is INSN_CODE,
3561 with two operands: an output TARGET and an input OP0.
3562 TARGET *must* be nonzero, and the output is always stored there.
3563 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3564 the value that is stored into TARGET. */
3565
3566 void
3567 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3568 {
3569 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3570 gcc_assert (ok);
3571 }
3572 \f
3573 struct no_conflict_data
3574 {
3575 rtx target;
3576 rtx_insn *first, *insn;
3577 bool must_stay;
3578 };
3579
3580 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3581 the currently examined clobber / store has to stay in the list of
3582 insns that constitute the actual libcall block. */
3583 static void
3584 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3585 {
3586 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3587
3588 /* If this inns directly contributes to setting the target, it must stay. */
3589 if (reg_overlap_mentioned_p (p->target, dest))
3590 p->must_stay = true;
3591 /* If we haven't committed to keeping any other insns in the list yet,
3592 there is nothing more to check. */
3593 else if (p->insn == p->first)
3594 return;
3595 /* If this insn sets / clobbers a register that feeds one of the insns
3596 already in the list, this insn has to stay too. */
3597 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3598 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3599 || reg_used_between_p (dest, p->first, p->insn)
3600 /* Likewise if this insn depends on a register set by a previous
3601 insn in the list, or if it sets a result (presumably a hard
3602 register) that is set or clobbered by a previous insn.
3603 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3604 SET_DEST perform the former check on the address, and the latter
3605 check on the MEM. */
3606 || (GET_CODE (set) == SET
3607 && (modified_in_p (SET_SRC (set), p->first)
3608 || modified_in_p (SET_DEST (set), p->first)
3609 || modified_between_p (SET_SRC (set), p->first, p->insn)
3610 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3611 p->must_stay = true;
3612 }
3613
3614 \f
3615 /* Emit code to make a call to a constant function or a library call.
3616
3617 INSNS is a list containing all insns emitted in the call.
3618 These insns leave the result in RESULT. Our block is to copy RESULT
3619 to TARGET, which is logically equivalent to EQUIV.
3620
3621 We first emit any insns that set a pseudo on the assumption that these are
3622 loading constants into registers; doing so allows them to be safely cse'ed
3623 between blocks. Then we emit all the other insns in the block, followed by
3624 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3625 note with an operand of EQUIV. */
3626
3627 static void
3628 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3629 bool equiv_may_trap)
3630 {
3631 rtx final_dest = target;
3632 rtx_insn *next, *last, *insn;
3633
3634 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3635 into a MEM later. Protect the libcall block from this change. */
3636 if (! REG_P (target) || REG_USERVAR_P (target))
3637 target = gen_reg_rtx (GET_MODE (target));
3638
3639 /* If we're using non-call exceptions, a libcall corresponding to an
3640 operation that may trap may also trap. */
3641 /* ??? See the comment in front of make_reg_eh_region_note. */
3642 if (cfun->can_throw_non_call_exceptions
3643 && (equiv_may_trap || may_trap_p (equiv)))
3644 {
3645 for (insn = insns; insn; insn = NEXT_INSN (insn))
3646 if (CALL_P (insn))
3647 {
3648 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3649 if (note)
3650 {
3651 int lp_nr = INTVAL (XEXP (note, 0));
3652 if (lp_nr == 0 || lp_nr == INT_MIN)
3653 remove_note (insn, note);
3654 }
3655 }
3656 }
3657 else
3658 {
3659 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3660 reg note to indicate that this call cannot throw or execute a nonlocal
3661 goto (unless there is already a REG_EH_REGION note, in which case
3662 we update it). */
3663 for (insn = insns; insn; insn = NEXT_INSN (insn))
3664 if (CALL_P (insn))
3665 make_reg_eh_region_note_nothrow_nononlocal (insn);
3666 }
3667
3668 /* First emit all insns that set pseudos. Remove them from the list as
3669 we go. Avoid insns that set pseudos which were referenced in previous
3670 insns. These can be generated by move_by_pieces, for example,
3671 to update an address. Similarly, avoid insns that reference things
3672 set in previous insns. */
3673
3674 for (insn = insns; insn; insn = next)
3675 {
3676 rtx set = single_set (insn);
3677
3678 next = NEXT_INSN (insn);
3679
3680 if (set != 0 && REG_P (SET_DEST (set))
3681 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3682 {
3683 struct no_conflict_data data;
3684
3685 data.target = const0_rtx;
3686 data.first = insns;
3687 data.insn = insn;
3688 data.must_stay = 0;
3689 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3690 if (! data.must_stay)
3691 {
3692 if (PREV_INSN (insn))
3693 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3694 else
3695 insns = next;
3696
3697 if (next)
3698 SET_PREV_INSN (next) = PREV_INSN (insn);
3699
3700 add_insn (insn);
3701 }
3702 }
3703
3704 /* Some ports use a loop to copy large arguments onto the stack.
3705 Don't move anything outside such a loop. */
3706 if (LABEL_P (insn))
3707 break;
3708 }
3709
3710 /* Write the remaining insns followed by the final copy. */
3711 for (insn = insns; insn; insn = next)
3712 {
3713 next = NEXT_INSN (insn);
3714
3715 add_insn (insn);
3716 }
3717
3718 last = emit_move_insn (target, result);
3719 if (equiv)
3720 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3721
3722 if (final_dest != target)
3723 emit_move_insn (final_dest, target);
3724 }
3725
3726 void
3727 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
3728 {
3729 emit_libcall_block_1 (insns, target, result, equiv, false);
3730 }
3731 \f
3732 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3733 PURPOSE describes how this comparison will be used. CODE is the rtx
3734 comparison code we will be using.
3735
3736 ??? Actually, CODE is slightly weaker than that. A target is still
3737 required to implement all of the normal bcc operations, but not
3738 required to implement all (or any) of the unordered bcc operations. */
3739
3740 int
3741 can_compare_p (enum rtx_code code, machine_mode mode,
3742 enum can_compare_purpose purpose)
3743 {
3744 rtx test;
3745 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3746 do
3747 {
3748 enum insn_code icode;
3749
3750 if (purpose == ccp_jump
3751 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3752 && insn_operand_matches (icode, 0, test))
3753 return 1;
3754 if (purpose == ccp_store_flag
3755 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3756 && insn_operand_matches (icode, 1, test))
3757 return 1;
3758 if (purpose == ccp_cmov
3759 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3760 return 1;
3761
3762 mode = GET_MODE_WIDER_MODE (mode).else_void ();
3763 PUT_MODE (test, mode);
3764 }
3765 while (mode != VOIDmode);
3766
3767 return 0;
3768 }
3769
3770 /* This function is called when we are going to emit a compare instruction that
3771 compares the values found in X and Y, using the rtl operator COMPARISON.
3772
3773 If they have mode BLKmode, then SIZE specifies the size of both operands.
3774
3775 UNSIGNEDP nonzero says that the operands are unsigned;
3776 this matters if they need to be widened (as given by METHODS).
3777
3778 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3779 if we failed to produce one.
3780
3781 *PMODE is the mode of the inputs (in case they are const_int).
3782
3783 This function performs all the setup necessary so that the caller only has
3784 to emit a single comparison insn. This setup can involve doing a BLKmode
3785 comparison or emitting a library call to perform the comparison if no insn
3786 is available to handle it.
3787 The values which are passed in through pointers can be modified; the caller
3788 should perform the comparison on the modified values. Constant
3789 comparisons must have already been folded. */
3790
3791 static void
3792 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3793 int unsignedp, enum optab_methods methods,
3794 rtx *ptest, machine_mode *pmode)
3795 {
3796 machine_mode mode = *pmode;
3797 rtx libfunc, test;
3798 machine_mode cmp_mode;
3799 enum mode_class mclass;
3800
3801 /* The other methods are not needed. */
3802 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3803 || methods == OPTAB_LIB_WIDEN);
3804
3805 /* If we are optimizing, force expensive constants into a register. */
3806 if (CONSTANT_P (x) && optimize
3807 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3808 > COSTS_N_INSNS (1)))
3809 x = force_reg (mode, x);
3810
3811 if (CONSTANT_P (y) && optimize
3812 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3813 > COSTS_N_INSNS (1)))
3814 y = force_reg (mode, y);
3815
3816 #if HAVE_cc0
3817 /* Make sure if we have a canonical comparison. The RTL
3818 documentation states that canonical comparisons are required only
3819 for targets which have cc0. */
3820 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3821 #endif
3822
3823 /* Don't let both operands fail to indicate the mode. */
3824 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3825 x = force_reg (mode, x);
3826 if (mode == VOIDmode)
3827 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3828
3829 /* Handle all BLKmode compares. */
3830
3831 if (mode == BLKmode)
3832 {
3833 machine_mode result_mode;
3834 enum insn_code cmp_code;
3835 rtx result;
3836 rtx opalign
3837 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3838
3839 gcc_assert (size);
3840
3841 /* Try to use a memory block compare insn - either cmpstr
3842 or cmpmem will do. */
3843 opt_scalar_int_mode cmp_mode_iter;
3844 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
3845 {
3846 scalar_int_mode cmp_mode = cmp_mode_iter.require ();
3847 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3848 if (cmp_code == CODE_FOR_nothing)
3849 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3850 if (cmp_code == CODE_FOR_nothing)
3851 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3852 if (cmp_code == CODE_FOR_nothing)
3853 continue;
3854
3855 /* Must make sure the size fits the insn's mode. */
3856 if (CONST_INT_P (size)
3857 ? INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))
3858 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
3859 > GET_MODE_BITSIZE (cmp_mode)))
3860 continue;
3861
3862 result_mode = insn_data[cmp_code].operand[0].mode;
3863 result = gen_reg_rtx (result_mode);
3864 size = convert_to_mode (cmp_mode, size, 1);
3865 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3866
3867 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3868 *pmode = result_mode;
3869 return;
3870 }
3871
3872 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3873 goto fail;
3874
3875 /* Otherwise call a library function. */
3876 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3877
3878 x = result;
3879 y = const0_rtx;
3880 mode = TYPE_MODE (integer_type_node);
3881 methods = OPTAB_LIB_WIDEN;
3882 unsignedp = false;
3883 }
3884
3885 /* Don't allow operands to the compare to trap, as that can put the
3886 compare and branch in different basic blocks. */
3887 if (cfun->can_throw_non_call_exceptions)
3888 {
3889 if (may_trap_p (x))
3890 x = copy_to_reg (x);
3891 if (may_trap_p (y))
3892 y = copy_to_reg (y);
3893 }
3894
3895 if (GET_MODE_CLASS (mode) == MODE_CC)
3896 {
3897 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3898 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3899 gcc_assert (icode != CODE_FOR_nothing
3900 && insn_operand_matches (icode, 0, test));
3901 *ptest = test;
3902 return;
3903 }
3904
3905 mclass = GET_MODE_CLASS (mode);
3906 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3907 FOR_EACH_MODE_FROM (cmp_mode, mode)
3908 {
3909 enum insn_code icode;
3910 icode = optab_handler (cbranch_optab, cmp_mode);
3911 if (icode != CODE_FOR_nothing
3912 && insn_operand_matches (icode, 0, test))
3913 {
3914 rtx_insn *last = get_last_insn ();
3915 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3916 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3917 if (op0 && op1
3918 && insn_operand_matches (icode, 1, op0)
3919 && insn_operand_matches (icode, 2, op1))
3920 {
3921 XEXP (test, 0) = op0;
3922 XEXP (test, 1) = op1;
3923 *ptest = test;
3924 *pmode = cmp_mode;
3925 return;
3926 }
3927 delete_insns_since (last);
3928 }
3929
3930 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3931 break;
3932 }
3933
3934 if (methods != OPTAB_LIB_WIDEN)
3935 goto fail;
3936
3937 if (!SCALAR_FLOAT_MODE_P (mode))
3938 {
3939 rtx result;
3940 machine_mode ret_mode;
3941
3942 /* Handle a libcall just for the mode we are using. */
3943 libfunc = optab_libfunc (cmp_optab, mode);
3944 gcc_assert (libfunc);
3945
3946 /* If we want unsigned, and this mode has a distinct unsigned
3947 comparison routine, use that. */
3948 if (unsignedp)
3949 {
3950 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3951 if (ulibfunc)
3952 libfunc = ulibfunc;
3953 }
3954
3955 ret_mode = targetm.libgcc_cmp_return_mode ();
3956 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3957 ret_mode, x, mode, y, mode);
3958
3959 /* There are two kinds of comparison routines. Biased routines
3960 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3961 of gcc expect that the comparison operation is equivalent
3962 to the modified comparison. For signed comparisons compare the
3963 result against 1 in the biased case, and zero in the unbiased
3964 case. For unsigned comparisons always compare against 1 after
3965 biasing the unbiased result by adding 1. This gives us a way to
3966 represent LTU.
3967 The comparisons in the fixed-point helper library are always
3968 biased. */
3969 x = result;
3970 y = const1_rtx;
3971
3972 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3973 {
3974 if (unsignedp)
3975 x = plus_constant (ret_mode, result, 1);
3976 else
3977 y = const0_rtx;
3978 }
3979
3980 *pmode = ret_mode;
3981 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3982 ptest, pmode);
3983 }
3984 else
3985 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3986
3987 return;
3988
3989 fail:
3990 *ptest = NULL_RTX;
3991 }
3992
3993 /* Before emitting an insn with code ICODE, make sure that X, which is going
3994 to be used for operand OPNUM of the insn, is converted from mode MODE to
3995 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3996 that it is accepted by the operand predicate. Return the new value. */
3997
3998 rtx
3999 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
4000 machine_mode wider_mode, int unsignedp)
4001 {
4002 if (mode != wider_mode)
4003 x = convert_modes (wider_mode, mode, x, unsignedp);
4004
4005 if (!insn_operand_matches (icode, opnum, x))
4006 {
4007 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
4008 if (reload_completed)
4009 return NULL_RTX;
4010 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
4011 return NULL_RTX;
4012 x = copy_to_mode_reg (op_mode, x);
4013 }
4014
4015 return x;
4016 }
4017
4018 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4019 we can do the branch. */
4020
4021 static void
4022 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
4023 profile_probability prob)
4024 {
4025 machine_mode optab_mode;
4026 enum mode_class mclass;
4027 enum insn_code icode;
4028 rtx_insn *insn;
4029
4030 mclass = GET_MODE_CLASS (mode);
4031 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4032 icode = optab_handler (cbranch_optab, optab_mode);
4033
4034 gcc_assert (icode != CODE_FOR_nothing);
4035 gcc_assert (insn_operand_matches (icode, 0, test));
4036 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4037 XEXP (test, 1), label));
4038 if (prob.initialized_p ()
4039 && profile_status_for_fn (cfun) != PROFILE_ABSENT
4040 && insn
4041 && JUMP_P (insn)
4042 && any_condjump_p (insn)
4043 && !find_reg_note (insn, REG_BR_PROB, 0))
4044 add_reg_br_prob_note (insn, prob);
4045 }
4046
4047 /* Generate code to compare X with Y so that the condition codes are
4048 set and to jump to LABEL if the condition is true. If X is a
4049 constant and Y is not a constant, then the comparison is swapped to
4050 ensure that the comparison RTL has the canonical form.
4051
4052 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4053 need to be widened. UNSIGNEDP is also used to select the proper
4054 branch condition code.
4055
4056 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4057
4058 MODE is the mode of the inputs (in case they are const_int).
4059
4060 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4061 It will be potentially converted into an unsigned variant based on
4062 UNSIGNEDP to select a proper jump instruction.
4063
4064 PROB is the probability of jumping to LABEL. */
4065
4066 void
4067 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4068 machine_mode mode, int unsignedp, rtx label,
4069 profile_probability prob)
4070 {
4071 rtx op0 = x, op1 = y;
4072 rtx test;
4073
4074 /* Swap operands and condition to ensure canonical RTL. */
4075 if (swap_commutative_operands_p (x, y)
4076 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4077 {
4078 op0 = y, op1 = x;
4079 comparison = swap_condition (comparison);
4080 }
4081
4082 /* If OP0 is still a constant, then both X and Y must be constants
4083 or the opposite comparison is not supported. Force X into a register
4084 to create canonical RTL. */
4085 if (CONSTANT_P (op0))
4086 op0 = force_reg (mode, op0);
4087
4088 if (unsignedp)
4089 comparison = unsigned_condition (comparison);
4090
4091 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4092 &test, &mode);
4093 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4094 }
4095
4096 \f
4097 /* Emit a library call comparison between floating point X and Y.
4098 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4099
4100 static void
4101 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4102 rtx *ptest, machine_mode *pmode)
4103 {
4104 enum rtx_code swapped = swap_condition (comparison);
4105 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4106 machine_mode orig_mode = GET_MODE (x);
4107 machine_mode mode;
4108 rtx true_rtx, false_rtx;
4109 rtx value, target, equiv;
4110 rtx_insn *insns;
4111 rtx libfunc = 0;
4112 bool reversed_p = false;
4113 scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4114
4115 FOR_EACH_MODE_FROM (mode, orig_mode)
4116 {
4117 if (code_to_optab (comparison)
4118 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4119 break;
4120
4121 if (code_to_optab (swapped)
4122 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4123 {
4124 std::swap (x, y);
4125 comparison = swapped;
4126 break;
4127 }
4128
4129 if (code_to_optab (reversed)
4130 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4131 {
4132 comparison = reversed;
4133 reversed_p = true;
4134 break;
4135 }
4136 }
4137
4138 gcc_assert (mode != VOIDmode);
4139
4140 if (mode != orig_mode)
4141 {
4142 x = convert_to_mode (mode, x, 0);
4143 y = convert_to_mode (mode, y, 0);
4144 }
4145
4146 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4147 the RTL. The allows the RTL optimizers to delete the libcall if the
4148 condition can be determined at compile-time. */
4149 if (comparison == UNORDERED
4150 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4151 {
4152 true_rtx = const_true_rtx;
4153 false_rtx = const0_rtx;
4154 }
4155 else
4156 {
4157 switch (comparison)
4158 {
4159 case EQ:
4160 true_rtx = const0_rtx;
4161 false_rtx = const_true_rtx;
4162 break;
4163
4164 case NE:
4165 true_rtx = const_true_rtx;
4166 false_rtx = const0_rtx;
4167 break;
4168
4169 case GT:
4170 true_rtx = const1_rtx;
4171 false_rtx = const0_rtx;
4172 break;
4173
4174 case GE:
4175 true_rtx = const0_rtx;
4176 false_rtx = constm1_rtx;
4177 break;
4178
4179 case LT:
4180 true_rtx = constm1_rtx;
4181 false_rtx = const0_rtx;
4182 break;
4183
4184 case LE:
4185 true_rtx = const0_rtx;
4186 false_rtx = const1_rtx;
4187 break;
4188
4189 default:
4190 gcc_unreachable ();
4191 }
4192 }
4193
4194 if (comparison == UNORDERED)
4195 {
4196 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4197 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4198 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4199 temp, const_true_rtx, equiv);
4200 }
4201 else
4202 {
4203 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4204 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4205 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4206 equiv, true_rtx, false_rtx);
4207 }
4208
4209 start_sequence ();
4210 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4211 cmp_mode, x, mode, y, mode);
4212 insns = get_insns ();
4213 end_sequence ();
4214
4215 target = gen_reg_rtx (cmp_mode);
4216 emit_libcall_block (insns, target, value, equiv);
4217
4218 if (comparison == UNORDERED
4219 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4220 || reversed_p)
4221 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4222 else
4223 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4224
4225 *pmode = cmp_mode;
4226 }
4227 \f
4228 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4229
4230 void
4231 emit_indirect_jump (rtx loc)
4232 {
4233 if (!targetm.have_indirect_jump ())
4234 sorry ("indirect jumps are not available on this target");
4235 else
4236 {
4237 struct expand_operand ops[1];
4238 create_address_operand (&ops[0], loc);
4239 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4240 emit_barrier ();
4241 }
4242 }
4243 \f
4244
4245 /* Emit a conditional move instruction if the machine supports one for that
4246 condition and machine mode.
4247
4248 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4249 the mode to use should they be constants. If it is VOIDmode, they cannot
4250 both be constants.
4251
4252 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4253 should be stored there. MODE is the mode to use should they be constants.
4254 If it is VOIDmode, they cannot both be constants.
4255
4256 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4257 is not supported. */
4258
4259 rtx
4260 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4261 machine_mode cmode, rtx op2, rtx op3,
4262 machine_mode mode, int unsignedp)
4263 {
4264 rtx comparison;
4265 rtx_insn *last;
4266 enum insn_code icode;
4267 enum rtx_code reversed;
4268
4269 /* If the two source operands are identical, that's just a move. */
4270
4271 if (rtx_equal_p (op2, op3))
4272 {
4273 if (!target)
4274 target = gen_reg_rtx (mode);
4275
4276 emit_move_insn (target, op3);
4277 return target;
4278 }
4279
4280 /* If one operand is constant, make it the second one. Only do this
4281 if the other operand is not constant as well. */
4282
4283 if (swap_commutative_operands_p (op0, op1))
4284 {
4285 std::swap (op0, op1);
4286 code = swap_condition (code);
4287 }
4288
4289 /* get_condition will prefer to generate LT and GT even if the old
4290 comparison was against zero, so undo that canonicalization here since
4291 comparisons against zero are cheaper. */
4292 if (code == LT && op1 == const1_rtx)
4293 code = LE, op1 = const0_rtx;
4294 else if (code == GT && op1 == constm1_rtx)
4295 code = GE, op1 = const0_rtx;
4296
4297 if (cmode == VOIDmode)
4298 cmode = GET_MODE (op0);
4299
4300 enum rtx_code orig_code = code;
4301 bool swapped = false;
4302 if (swap_commutative_operands_p (op2, op3)
4303 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4304 != UNKNOWN))
4305 {
4306 std::swap (op2, op3);
4307 code = reversed;
4308 swapped = true;
4309 }
4310
4311 if (mode == VOIDmode)
4312 mode = GET_MODE (op2);
4313
4314 icode = direct_optab_handler (movcc_optab, mode);
4315
4316 if (icode == CODE_FOR_nothing)
4317 return NULL_RTX;
4318
4319 if (!target)
4320 target = gen_reg_rtx (mode);
4321
4322 for (int pass = 0; ; pass++)
4323 {
4324 code = unsignedp ? unsigned_condition (code) : code;
4325 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4326
4327 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4328 punt and let the caller figure out how best to deal with this
4329 situation. */
4330 if (COMPARISON_P (comparison))
4331 {
4332 saved_pending_stack_adjust save;
4333 save_pending_stack_adjust (&save);
4334 last = get_last_insn ();
4335 do_pending_stack_adjust ();
4336 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4337 GET_CODE (comparison), NULL_RTX, unsignedp,
4338 OPTAB_WIDEN, &comparison, &cmode);
4339 if (comparison)
4340 {
4341 struct expand_operand ops[4];
4342
4343 create_output_operand (&ops[0], target, mode);
4344 create_fixed_operand (&ops[1], comparison);
4345 create_input_operand (&ops[2], op2, mode);
4346 create_input_operand (&ops[3], op3, mode);
4347 if (maybe_expand_insn (icode, 4, ops))
4348 {
4349 if (ops[0].value != target)
4350 convert_move (target, ops[0].value, false);
4351 return target;
4352 }
4353 }
4354 delete_insns_since (last);
4355 restore_pending_stack_adjust (&save);
4356 }
4357
4358 if (pass == 1)
4359 return NULL_RTX;
4360
4361 /* If the preferred op2/op3 order is not usable, retry with other
4362 operand order, perhaps it will expand successfully. */
4363 if (swapped)
4364 code = orig_code;
4365 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4366 NULL))
4367 != UNKNOWN)
4368 code = reversed;
4369 else
4370 return NULL_RTX;
4371 std::swap (op2, op3);
4372 }
4373 }
4374
4375
4376 /* Emit a conditional negate or bitwise complement using the
4377 negcc or notcc optabs if available. Return NULL_RTX if such operations
4378 are not available. Otherwise return the RTX holding the result.
4379 TARGET is the desired destination of the result. COMP is the comparison
4380 on which to negate. If COND is true move into TARGET the negation
4381 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4382 CODE is either NEG or NOT. MODE is the machine mode in which the
4383 operation is performed. */
4384
4385 rtx
4386 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4387 machine_mode mode, rtx cond, rtx op1,
4388 rtx op2)
4389 {
4390 optab op = unknown_optab;
4391 if (code == NEG)
4392 op = negcc_optab;
4393 else if (code == NOT)
4394 op = notcc_optab;
4395 else
4396 gcc_unreachable ();
4397
4398 insn_code icode = direct_optab_handler (op, mode);
4399
4400 if (icode == CODE_FOR_nothing)
4401 return NULL_RTX;
4402
4403 if (!target)
4404 target = gen_reg_rtx (mode);
4405
4406 rtx_insn *last = get_last_insn ();
4407 struct expand_operand ops[4];
4408
4409 create_output_operand (&ops[0], target, mode);
4410 create_fixed_operand (&ops[1], cond);
4411 create_input_operand (&ops[2], op1, mode);
4412 create_input_operand (&ops[3], op2, mode);
4413
4414 if (maybe_expand_insn (icode, 4, ops))
4415 {
4416 if (ops[0].value != target)
4417 convert_move (target, ops[0].value, false);
4418
4419 return target;
4420 }
4421 delete_insns_since (last);
4422 return NULL_RTX;
4423 }
4424
4425 /* Emit a conditional addition instruction if the machine supports one for that
4426 condition and machine mode.
4427
4428 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4429 the mode to use should they be constants. If it is VOIDmode, they cannot
4430 both be constants.
4431
4432 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4433 should be stored there. MODE is the mode to use should they be constants.
4434 If it is VOIDmode, they cannot both be constants.
4435
4436 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4437 is not supported. */
4438
4439 rtx
4440 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4441 machine_mode cmode, rtx op2, rtx op3,
4442 machine_mode mode, int unsignedp)
4443 {
4444 rtx comparison;
4445 rtx_insn *last;
4446 enum insn_code icode;
4447
4448 /* If one operand is constant, make it the second one. Only do this
4449 if the other operand is not constant as well. */
4450
4451 if (swap_commutative_operands_p (op0, op1))
4452 {
4453 std::swap (op0, op1);
4454 code = swap_condition (code);
4455 }
4456
4457 /* get_condition will prefer to generate LT and GT even if the old
4458 comparison was against zero, so undo that canonicalization here since
4459 comparisons against zero are cheaper. */
4460 if (code == LT && op1 == const1_rtx)
4461 code = LE, op1 = const0_rtx;
4462 else if (code == GT && op1 == constm1_rtx)
4463 code = GE, op1 = const0_rtx;
4464
4465 if (cmode == VOIDmode)
4466 cmode = GET_MODE (op0);
4467
4468 if (mode == VOIDmode)
4469 mode = GET_MODE (op2);
4470
4471 icode = optab_handler (addcc_optab, mode);
4472
4473 if (icode == CODE_FOR_nothing)
4474 return 0;
4475
4476 if (!target)
4477 target = gen_reg_rtx (mode);
4478
4479 code = unsignedp ? unsigned_condition (code) : code;
4480 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4481
4482 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4483 return NULL and let the caller figure out how best to deal with this
4484 situation. */
4485 if (!COMPARISON_P (comparison))
4486 return NULL_RTX;
4487
4488 do_pending_stack_adjust ();
4489 last = get_last_insn ();
4490 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4491 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4492 &comparison, &cmode);
4493 if (comparison)
4494 {
4495 struct expand_operand ops[4];
4496
4497 create_output_operand (&ops[0], target, mode);
4498 create_fixed_operand (&ops[1], comparison);
4499 create_input_operand (&ops[2], op2, mode);
4500 create_input_operand (&ops[3], op3, mode);
4501 if (maybe_expand_insn (icode, 4, ops))
4502 {
4503 if (ops[0].value != target)
4504 convert_move (target, ops[0].value, false);
4505 return target;
4506 }
4507 }
4508 delete_insns_since (last);
4509 return NULL_RTX;
4510 }
4511 \f
4512 /* These functions attempt to generate an insn body, rather than
4513 emitting the insn, but if the gen function already emits them, we
4514 make no attempt to turn them back into naked patterns. */
4515
4516 /* Generate and return an insn body to add Y to X. */
4517
4518 rtx_insn *
4519 gen_add2_insn (rtx x, rtx y)
4520 {
4521 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4522
4523 gcc_assert (insn_operand_matches (icode, 0, x));
4524 gcc_assert (insn_operand_matches (icode, 1, x));
4525 gcc_assert (insn_operand_matches (icode, 2, y));
4526
4527 return GEN_FCN (icode) (x, x, y);
4528 }
4529
4530 /* Generate and return an insn body to add r1 and c,
4531 storing the result in r0. */
4532
4533 rtx_insn *
4534 gen_add3_insn (rtx r0, rtx r1, rtx c)
4535 {
4536 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4537
4538 if (icode == CODE_FOR_nothing
4539 || !insn_operand_matches (icode, 0, r0)
4540 || !insn_operand_matches (icode, 1, r1)
4541 || !insn_operand_matches (icode, 2, c))
4542 return NULL;
4543
4544 return GEN_FCN (icode) (r0, r1, c);
4545 }
4546
4547 int
4548 have_add2_insn (rtx x, rtx y)
4549 {
4550 enum insn_code icode;
4551
4552 gcc_assert (GET_MODE (x) != VOIDmode);
4553
4554 icode = optab_handler (add_optab, GET_MODE (x));
4555
4556 if (icode == CODE_FOR_nothing)
4557 return 0;
4558
4559 if (!insn_operand_matches (icode, 0, x)
4560 || !insn_operand_matches (icode, 1, x)
4561 || !insn_operand_matches (icode, 2, y))
4562 return 0;
4563
4564 return 1;
4565 }
4566
4567 /* Generate and return an insn body to add Y to X. */
4568
4569 rtx_insn *
4570 gen_addptr3_insn (rtx x, rtx y, rtx z)
4571 {
4572 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4573
4574 gcc_assert (insn_operand_matches (icode, 0, x));
4575 gcc_assert (insn_operand_matches (icode, 1, y));
4576 gcc_assert (insn_operand_matches (icode, 2, z));
4577
4578 return GEN_FCN (icode) (x, y, z);
4579 }
4580
4581 /* Return true if the target implements an addptr pattern and X, Y,
4582 and Z are valid for the pattern predicates. */
4583
4584 int
4585 have_addptr3_insn (rtx x, rtx y, rtx z)
4586 {
4587 enum insn_code icode;
4588
4589 gcc_assert (GET_MODE (x) != VOIDmode);
4590
4591 icode = optab_handler (addptr3_optab, GET_MODE (x));
4592
4593 if (icode == CODE_FOR_nothing)
4594 return 0;
4595
4596 if (!insn_operand_matches (icode, 0, x)
4597 || !insn_operand_matches (icode, 1, y)
4598 || !insn_operand_matches (icode, 2, z))
4599 return 0;
4600
4601 return 1;
4602 }
4603
4604 /* Generate and return an insn body to subtract Y from X. */
4605
4606 rtx_insn *
4607 gen_sub2_insn (rtx x, rtx y)
4608 {
4609 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4610
4611 gcc_assert (insn_operand_matches (icode, 0, x));
4612 gcc_assert (insn_operand_matches (icode, 1, x));
4613 gcc_assert (insn_operand_matches (icode, 2, y));
4614
4615 return GEN_FCN (icode) (x, x, y);
4616 }
4617
4618 /* Generate and return an insn body to subtract r1 and c,
4619 storing the result in r0. */
4620
4621 rtx_insn *
4622 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4623 {
4624 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4625
4626 if (icode == CODE_FOR_nothing
4627 || !insn_operand_matches (icode, 0, r0)
4628 || !insn_operand_matches (icode, 1, r1)
4629 || !insn_operand_matches (icode, 2, c))
4630 return NULL;
4631
4632 return GEN_FCN (icode) (r0, r1, c);
4633 }
4634
4635 int
4636 have_sub2_insn (rtx x, rtx y)
4637 {
4638 enum insn_code icode;
4639
4640 gcc_assert (GET_MODE (x) != VOIDmode);
4641
4642 icode = optab_handler (sub_optab, GET_MODE (x));
4643
4644 if (icode == CODE_FOR_nothing)
4645 return 0;
4646
4647 if (!insn_operand_matches (icode, 0, x)
4648 || !insn_operand_matches (icode, 1, x)
4649 || !insn_operand_matches (icode, 2, y))
4650 return 0;
4651
4652 return 1;
4653 }
4654 \f
4655 /* Generate the body of an insn to extend Y (with mode MFROM)
4656 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4657
4658 rtx_insn *
4659 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4660 machine_mode mfrom, int unsignedp)
4661 {
4662 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4663 return GEN_FCN (icode) (x, y);
4664 }
4665 \f
4666 /* Generate code to convert FROM to floating point
4667 and store in TO. FROM must be fixed point and not VOIDmode.
4668 UNSIGNEDP nonzero means regard FROM as unsigned.
4669 Normally this is done by correcting the final value
4670 if it is negative. */
4671
4672 void
4673 expand_float (rtx to, rtx from, int unsignedp)
4674 {
4675 enum insn_code icode;
4676 rtx target = to;
4677 scalar_mode from_mode, to_mode;
4678 machine_mode fmode, imode;
4679 bool can_do_signed = false;
4680
4681 /* Crash now, because we won't be able to decide which mode to use. */
4682 gcc_assert (GET_MODE (from) != VOIDmode);
4683
4684 /* Look for an insn to do the conversion. Do it in the specified
4685 modes if possible; otherwise convert either input, output or both to
4686 wider mode. If the integer mode is wider than the mode of FROM,
4687 we can do the conversion signed even if the input is unsigned. */
4688
4689 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
4690 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
4691 {
4692 int doing_unsigned = unsignedp;
4693
4694 if (fmode != GET_MODE (to)
4695 && (significand_size (fmode)
4696 < GET_MODE_UNIT_PRECISION (GET_MODE (from))))
4697 continue;
4698
4699 icode = can_float_p (fmode, imode, unsignedp);
4700 if (icode == CODE_FOR_nothing && unsignedp)
4701 {
4702 enum insn_code scode = can_float_p (fmode, imode, 0);
4703 if (scode != CODE_FOR_nothing)
4704 can_do_signed = true;
4705 if (imode != GET_MODE (from))
4706 icode = scode, doing_unsigned = 0;
4707 }
4708
4709 if (icode != CODE_FOR_nothing)
4710 {
4711 if (imode != GET_MODE (from))
4712 from = convert_to_mode (imode, from, unsignedp);
4713
4714 if (fmode != GET_MODE (to))
4715 target = gen_reg_rtx (fmode);
4716
4717 emit_unop_insn (icode, target, from,
4718 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4719
4720 if (target != to)
4721 convert_move (to, target, 0);
4722 return;
4723 }
4724 }
4725
4726 /* Unsigned integer, and no way to convert directly. Convert as signed,
4727 then unconditionally adjust the result. */
4728 if (unsignedp
4729 && can_do_signed
4730 && is_a <scalar_mode> (GET_MODE (to), &to_mode)
4731 && is_a <scalar_mode> (GET_MODE (from), &from_mode))
4732 {
4733 opt_scalar_mode fmode_iter;
4734 rtx_code_label *label = gen_label_rtx ();
4735 rtx temp;
4736 REAL_VALUE_TYPE offset;
4737
4738 /* Look for a usable floating mode FMODE wider than the source and at
4739 least as wide as the target. Using FMODE will avoid rounding woes
4740 with unsigned values greater than the signed maximum value. */
4741
4742 FOR_EACH_MODE_FROM (fmode_iter, to_mode)
4743 {
4744 scalar_mode fmode = fmode_iter.require ();
4745 if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
4746 && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
4747 break;
4748 }
4749
4750 if (!fmode_iter.exists (&fmode))
4751 {
4752 /* There is no such mode. Pretend the target is wide enough. */
4753 fmode = to_mode;
4754
4755 /* Avoid double-rounding when TO is narrower than FROM. */
4756 if ((significand_size (fmode) + 1)
4757 < GET_MODE_PRECISION (from_mode))
4758 {
4759 rtx temp1;
4760 rtx_code_label *neglabel = gen_label_rtx ();
4761
4762 /* Don't use TARGET if it isn't a register, is a hard register,
4763 or is the wrong mode. */
4764 if (!REG_P (target)
4765 || REGNO (target) < FIRST_PSEUDO_REGISTER
4766 || GET_MODE (target) != fmode)
4767 target = gen_reg_rtx (fmode);
4768
4769 imode = from_mode;
4770 do_pending_stack_adjust ();
4771
4772 /* Test whether the sign bit is set. */
4773 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4774 0, neglabel);
4775
4776 /* The sign bit is not set. Convert as signed. */
4777 expand_float (target, from, 0);
4778 emit_jump_insn (targetm.gen_jump (label));
4779 emit_barrier ();
4780
4781 /* The sign bit is set.
4782 Convert to a usable (positive signed) value by shifting right
4783 one bit, while remembering if a nonzero bit was shifted
4784 out; i.e., compute (from & 1) | (from >> 1). */
4785
4786 emit_label (neglabel);
4787 temp = expand_binop (imode, and_optab, from, const1_rtx,
4788 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4789 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4790 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4791 OPTAB_LIB_WIDEN);
4792 expand_float (target, temp, 0);
4793
4794 /* Multiply by 2 to undo the shift above. */
4795 temp = expand_binop (fmode, add_optab, target, target,
4796 target, 0, OPTAB_LIB_WIDEN);
4797 if (temp != target)
4798 emit_move_insn (target, temp);
4799
4800 do_pending_stack_adjust ();
4801 emit_label (label);
4802 goto done;
4803 }
4804 }
4805
4806 /* If we are about to do some arithmetic to correct for an
4807 unsigned operand, do it in a pseudo-register. */
4808
4809 if (to_mode != fmode
4810 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4811 target = gen_reg_rtx (fmode);
4812
4813 /* Convert as signed integer to floating. */
4814 expand_float (target, from, 0);
4815
4816 /* If FROM is negative (and therefore TO is negative),
4817 correct its value by 2**bitwidth. */
4818
4819 do_pending_stack_adjust ();
4820 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
4821 0, label);
4822
4823
4824 real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
4825 temp = expand_binop (fmode, add_optab, target,
4826 const_double_from_real_value (offset, fmode),
4827 target, 0, OPTAB_LIB_WIDEN);
4828 if (temp != target)
4829 emit_move_insn (target, temp);
4830
4831 do_pending_stack_adjust ();
4832 emit_label (label);
4833 goto done;
4834 }
4835
4836 /* No hardware instruction available; call a library routine. */
4837 {
4838 rtx libfunc;
4839 rtx_insn *insns;
4840 rtx value;
4841 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4842
4843 if (is_narrower_int_mode (GET_MODE (from), SImode))
4844 from = convert_to_mode (SImode, from, unsignedp);
4845
4846 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4847 gcc_assert (libfunc);
4848
4849 start_sequence ();
4850
4851 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4852 GET_MODE (to), from, GET_MODE (from));
4853 insns = get_insns ();
4854 end_sequence ();
4855
4856 emit_libcall_block (insns, target, value,
4857 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4858 GET_MODE (to), from));
4859 }
4860
4861 done:
4862
4863 /* Copy result to requested destination
4864 if we have been computing in a temp location. */
4865
4866 if (target != to)
4867 {
4868 if (GET_MODE (target) == GET_MODE (to))
4869 emit_move_insn (to, target);
4870 else
4871 convert_move (to, target, 0);
4872 }
4873 }
4874 \f
4875 /* Generate code to convert FROM to fixed point and store in TO. FROM
4876 must be floating point. */
4877
4878 void
4879 expand_fix (rtx to, rtx from, int unsignedp)
4880 {
4881 enum insn_code icode;
4882 rtx target = to;
4883 machine_mode fmode, imode;
4884 opt_scalar_mode fmode_iter;
4885 bool must_trunc = false;
4886
4887 /* We first try to find a pair of modes, one real and one integer, at
4888 least as wide as FROM and TO, respectively, in which we can open-code
4889 this conversion. If the integer mode is wider than the mode of TO,
4890 we can do the conversion either signed or unsigned. */
4891
4892 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
4893 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
4894 {
4895 int doing_unsigned = unsignedp;
4896
4897 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4898 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4899 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4900
4901 if (icode != CODE_FOR_nothing)
4902 {
4903 rtx_insn *last = get_last_insn ();
4904 if (fmode != GET_MODE (from))
4905 from = convert_to_mode (fmode, from, 0);
4906
4907 if (must_trunc)
4908 {
4909 rtx temp = gen_reg_rtx (GET_MODE (from));
4910 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4911 temp, 0);
4912 }
4913
4914 if (imode != GET_MODE (to))
4915 target = gen_reg_rtx (imode);
4916
4917 if (maybe_emit_unop_insn (icode, target, from,
4918 doing_unsigned ? UNSIGNED_FIX : FIX))
4919 {
4920 if (target != to)
4921 convert_move (to, target, unsignedp);
4922 return;
4923 }
4924 delete_insns_since (last);
4925 }
4926 }
4927
4928 /* For an unsigned conversion, there is one more way to do it.
4929 If we have a signed conversion, we generate code that compares
4930 the real value to the largest representable positive number. If if
4931 is smaller, the conversion is done normally. Otherwise, subtract
4932 one plus the highest signed number, convert, and add it back.
4933
4934 We only need to check all real modes, since we know we didn't find
4935 anything with a wider integer mode.
4936
4937 This code used to extend FP value into mode wider than the destination.
4938 This is needed for decimal float modes which cannot accurately
4939 represent one plus the highest signed number of the same size, but
4940 not for binary modes. Consider, for instance conversion from SFmode
4941 into DImode.
4942
4943 The hot path through the code is dealing with inputs smaller than 2^63
4944 and doing just the conversion, so there is no bits to lose.
4945
4946 In the other path we know the value is positive in the range 2^63..2^64-1
4947 inclusive. (as for other input overflow happens and result is undefined)
4948 So we know that the most important bit set in mantissa corresponds to
4949 2^63. The subtraction of 2^63 should not generate any rounding as it
4950 simply clears out that bit. The rest is trivial. */
4951
4952 scalar_int_mode to_mode;
4953 if (unsignedp
4954 && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
4955 && HWI_COMPUTABLE_MODE_P (to_mode))
4956 FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
4957 {
4958 scalar_mode fmode = fmode_iter.require ();
4959 if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
4960 0, &must_trunc)
4961 && (!DECIMAL_FLOAT_MODE_P (fmode)
4962 || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
4963 {
4964 int bitsize;
4965 REAL_VALUE_TYPE offset;
4966 rtx limit;
4967 rtx_code_label *lab1, *lab2;
4968 rtx_insn *insn;
4969
4970 bitsize = GET_MODE_PRECISION (to_mode);
4971 real_2expN (&offset, bitsize - 1, fmode);
4972 limit = const_double_from_real_value (offset, fmode);
4973 lab1 = gen_label_rtx ();
4974 lab2 = gen_label_rtx ();
4975
4976 if (fmode != GET_MODE (from))
4977 from = convert_to_mode (fmode, from, 0);
4978
4979 /* See if we need to do the subtraction. */
4980 do_pending_stack_adjust ();
4981 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
4982 GET_MODE (from), 0, lab1);
4983
4984 /* If not, do the signed "fix" and branch around fixup code. */
4985 expand_fix (to, from, 0);
4986 emit_jump_insn (targetm.gen_jump (lab2));
4987 emit_barrier ();
4988
4989 /* Otherwise, subtract 2**(N-1), convert to signed number,
4990 then add 2**(N-1). Do the addition using XOR since this
4991 will often generate better code. */
4992 emit_label (lab1);
4993 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4994 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4995 expand_fix (to, target, 0);
4996 target = expand_binop (to_mode, xor_optab, to,
4997 gen_int_mode
4998 (HOST_WIDE_INT_1 << (bitsize - 1),
4999 to_mode),
5000 to, 1, OPTAB_LIB_WIDEN);
5001
5002 if (target != to)
5003 emit_move_insn (to, target);
5004
5005 emit_label (lab2);
5006
5007 if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
5008 {
5009 /* Make a place for a REG_NOTE and add it. */
5010 insn = emit_move_insn (to, to);
5011 set_dst_reg_note (insn, REG_EQUAL,
5012 gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
5013 copy_rtx (from)),
5014 to);
5015 }
5016
5017 return;
5018 }
5019 }
5020
5021 /* We can't do it with an insn, so use a library call. But first ensure
5022 that the mode of TO is at least as wide as SImode, since those are the
5023 only library calls we know about. */
5024
5025 if (is_narrower_int_mode (GET_MODE (to), SImode))
5026 {
5027 target = gen_reg_rtx (SImode);
5028
5029 expand_fix (target, from, unsignedp);
5030 }
5031 else
5032 {
5033 rtx_insn *insns;
5034 rtx value;
5035 rtx libfunc;
5036
5037 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5038 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5039 gcc_assert (libfunc);
5040
5041 start_sequence ();
5042
5043 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5044 GET_MODE (to), from, GET_MODE (from));
5045 insns = get_insns ();
5046 end_sequence ();
5047
5048 emit_libcall_block (insns, target, value,
5049 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5050 GET_MODE (to), from));
5051 }
5052
5053 if (target != to)
5054 {
5055 if (GET_MODE (to) == GET_MODE (target))
5056 emit_move_insn (to, target);
5057 else
5058 convert_move (to, target, 0);
5059 }
5060 }
5061
5062
5063 /* Promote integer arguments for a libcall if necessary.
5064 emit_library_call_value cannot do the promotion because it does not
5065 know if it should do a signed or unsigned promotion. This is because
5066 there are no tree types defined for libcalls. */
5067
5068 static rtx
5069 prepare_libcall_arg (rtx arg, int uintp)
5070 {
5071 scalar_int_mode mode;
5072 machine_mode arg_mode;
5073 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5074 {
5075 /* If we need to promote the integer function argument we need to do
5076 it here instead of inside emit_library_call_value because in
5077 emit_library_call_value we don't know if we should do a signed or
5078 unsigned promotion. */
5079
5080 int unsigned_p = 0;
5081 arg_mode = promote_function_mode (NULL_TREE, mode,
5082 &unsigned_p, NULL_TREE, 0);
5083 if (arg_mode != mode)
5084 return convert_to_mode (arg_mode, arg, uintp);
5085 }
5086 return arg;
5087 }
5088
5089 /* Generate code to convert FROM or TO a fixed-point.
5090 If UINTP is true, either TO or FROM is an unsigned integer.
5091 If SATP is true, we need to saturate the result. */
5092
5093 void
5094 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5095 {
5096 machine_mode to_mode = GET_MODE (to);
5097 machine_mode from_mode = GET_MODE (from);
5098 convert_optab tab;
5099 enum rtx_code this_code;
5100 enum insn_code code;
5101 rtx_insn *insns;
5102 rtx value;
5103 rtx libfunc;
5104
5105 if (to_mode == from_mode)
5106 {
5107 emit_move_insn (to, from);
5108 return;
5109 }
5110
5111 if (uintp)
5112 {
5113 tab = satp ? satfractuns_optab : fractuns_optab;
5114 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5115 }
5116 else
5117 {
5118 tab = satp ? satfract_optab : fract_optab;
5119 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5120 }
5121 code = convert_optab_handler (tab, to_mode, from_mode);
5122 if (code != CODE_FOR_nothing)
5123 {
5124 emit_unop_insn (code, to, from, this_code);
5125 return;
5126 }
5127
5128 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5129 gcc_assert (libfunc);
5130
5131 from = prepare_libcall_arg (from, uintp);
5132 from_mode = GET_MODE (from);
5133
5134 start_sequence ();
5135 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5136 from, from_mode);
5137 insns = get_insns ();
5138 end_sequence ();
5139
5140 emit_libcall_block (insns, to, value,
5141 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5142 }
5143
5144 /* Generate code to convert FROM to fixed point and store in TO. FROM
5145 must be floating point, TO must be signed. Use the conversion optab
5146 TAB to do the conversion. */
5147
5148 bool
5149 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5150 {
5151 enum insn_code icode;
5152 rtx target = to;
5153 machine_mode fmode, imode;
5154
5155 /* We first try to find a pair of modes, one real and one integer, at
5156 least as wide as FROM and TO, respectively, in which we can open-code
5157 this conversion. If the integer mode is wider than the mode of TO,
5158 we can do the conversion either signed or unsigned. */
5159
5160 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5161 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5162 {
5163 icode = convert_optab_handler (tab, imode, fmode);
5164 if (icode != CODE_FOR_nothing)
5165 {
5166 rtx_insn *last = get_last_insn ();
5167 if (fmode != GET_MODE (from))
5168 from = convert_to_mode (fmode, from, 0);
5169
5170 if (imode != GET_MODE (to))
5171 target = gen_reg_rtx (imode);
5172
5173 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5174 {
5175 delete_insns_since (last);
5176 continue;
5177 }
5178 if (target != to)
5179 convert_move (to, target, 0);
5180 return true;
5181 }
5182 }
5183
5184 return false;
5185 }
5186 \f
5187 /* Report whether we have an instruction to perform the operation
5188 specified by CODE on operands of mode MODE. */
5189 int
5190 have_insn_for (enum rtx_code code, machine_mode mode)
5191 {
5192 return (code_to_optab (code)
5193 && (optab_handler (code_to_optab (code), mode)
5194 != CODE_FOR_nothing));
5195 }
5196
5197 /* Print information about the current contents of the optabs on
5198 STDERR. */
5199
5200 DEBUG_FUNCTION void
5201 debug_optab_libfuncs (void)
5202 {
5203 int i, j, k;
5204
5205 /* Dump the arithmetic optabs. */
5206 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5207 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5208 {
5209 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5210 if (l)
5211 {
5212 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5213 fprintf (stderr, "%s\t%s:\t%s\n",
5214 GET_RTX_NAME (optab_to_code ((optab) i)),
5215 GET_MODE_NAME (j),
5216 XSTR (l, 0));
5217 }
5218 }
5219
5220 /* Dump the conversion optabs. */
5221 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5222 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5223 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5224 {
5225 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5226 (machine_mode) k);
5227 if (l)
5228 {
5229 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5230 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5231 GET_RTX_NAME (optab_to_code ((optab) i)),
5232 GET_MODE_NAME (j),
5233 GET_MODE_NAME (k),
5234 XSTR (l, 0));
5235 }
5236 }
5237 }
5238
5239 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5240 CODE. Return 0 on failure. */
5241
5242 rtx_insn *
5243 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5244 {
5245 machine_mode mode = GET_MODE (op1);
5246 enum insn_code icode;
5247 rtx_insn *insn;
5248 rtx trap_rtx;
5249
5250 if (mode == VOIDmode)
5251 return 0;
5252
5253 icode = optab_handler (ctrap_optab, mode);
5254 if (icode == CODE_FOR_nothing)
5255 return 0;
5256
5257 /* Some targets only accept a zero trap code. */
5258 if (!insn_operand_matches (icode, 3, tcode))
5259 return 0;
5260
5261 do_pending_stack_adjust ();
5262 start_sequence ();
5263 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5264 &trap_rtx, &mode);
5265 if (!trap_rtx)
5266 insn = NULL;
5267 else
5268 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5269 tcode);
5270
5271 /* If that failed, then give up. */
5272 if (insn == 0)
5273 {
5274 end_sequence ();
5275 return 0;
5276 }
5277
5278 emit_insn (insn);
5279 insn = get_insns ();
5280 end_sequence ();
5281 return insn;
5282 }
5283
5284 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5285 or unsigned operation code. */
5286
5287 enum rtx_code
5288 get_rtx_code (enum tree_code tcode, bool unsignedp)
5289 {
5290 enum rtx_code code;
5291 switch (tcode)
5292 {
5293 case EQ_EXPR:
5294 code = EQ;
5295 break;
5296 case NE_EXPR:
5297 code = NE;
5298 break;
5299 case LT_EXPR:
5300 code = unsignedp ? LTU : LT;
5301 break;
5302 case LE_EXPR:
5303 code = unsignedp ? LEU : LE;
5304 break;
5305 case GT_EXPR:
5306 code = unsignedp ? GTU : GT;
5307 break;
5308 case GE_EXPR:
5309 code = unsignedp ? GEU : GE;
5310 break;
5311
5312 case UNORDERED_EXPR:
5313 code = UNORDERED;
5314 break;
5315 case ORDERED_EXPR:
5316 code = ORDERED;
5317 break;
5318 case UNLT_EXPR:
5319 code = UNLT;
5320 break;
5321 case UNLE_EXPR:
5322 code = UNLE;
5323 break;
5324 case UNGT_EXPR:
5325 code = UNGT;
5326 break;
5327 case UNGE_EXPR:
5328 code = UNGE;
5329 break;
5330 case UNEQ_EXPR:
5331 code = UNEQ;
5332 break;
5333 case LTGT_EXPR:
5334 code = LTGT;
5335 break;
5336
5337 case BIT_AND_EXPR:
5338 code = AND;
5339 break;
5340
5341 case BIT_IOR_EXPR:
5342 code = IOR;
5343 break;
5344
5345 default:
5346 gcc_unreachable ();
5347 }
5348 return code;
5349 }
5350
5351 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5352 select signed or unsigned operators. OPNO holds the index of the
5353 first comparison operand for insn ICODE. Do not generate the
5354 compare instruction itself. */
5355
5356 static rtx
5357 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5358 tree t_op0, tree t_op1, bool unsignedp,
5359 enum insn_code icode, unsigned int opno)
5360 {
5361 struct expand_operand ops[2];
5362 rtx rtx_op0, rtx_op1;
5363 machine_mode m0, m1;
5364 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5365
5366 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5367
5368 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5369 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5370 cases, use the original mode. */
5371 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5372 EXPAND_STACK_PARM);
5373 m0 = GET_MODE (rtx_op0);
5374 if (m0 == VOIDmode)
5375 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5376
5377 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5378 EXPAND_STACK_PARM);
5379 m1 = GET_MODE (rtx_op1);
5380 if (m1 == VOIDmode)
5381 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5382
5383 create_input_operand (&ops[0], rtx_op0, m0);
5384 create_input_operand (&ops[1], rtx_op1, m1);
5385 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5386 gcc_unreachable ();
5387 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5388 }
5389
5390 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5391 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5392 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5393 shift. */
5394 static rtx
5395 shift_amt_for_vec_perm_mask (rtx sel)
5396 {
5397 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
5398 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
5399
5400 if (GET_CODE (sel) != CONST_VECTOR)
5401 return NULL_RTX;
5402
5403 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5404 if (first >= nelt)
5405 return NULL_RTX;
5406 for (i = 1; i < nelt; i++)
5407 {
5408 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5409 unsigned int expected = i + first;
5410 /* Indices into the second vector are all equivalent. */
5411 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5412 return NULL_RTX;
5413 }
5414
5415 return gen_int_shift_amount (GET_MODE (sel), first * bitsize);
5416 }
5417
5418 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5419
5420 static rtx
5421 expand_vec_perm_1 (enum insn_code icode, rtx target,
5422 rtx v0, rtx v1, rtx sel)
5423 {
5424 machine_mode tmode = GET_MODE (target);
5425 machine_mode smode = GET_MODE (sel);
5426 struct expand_operand ops[4];
5427
5428 create_output_operand (&ops[0], target, tmode);
5429 create_input_operand (&ops[3], sel, smode);
5430
5431 /* Make an effort to preserve v0 == v1. The target expander is able to
5432 rely on this to determine if we're permuting a single input operand. */
5433 if (rtx_equal_p (v0, v1))
5434 {
5435 if (!insn_operand_matches (icode, 1, v0))
5436 v0 = force_reg (tmode, v0);
5437 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5438 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5439
5440 create_fixed_operand (&ops[1], v0);
5441 create_fixed_operand (&ops[2], v0);
5442 }
5443 else
5444 {
5445 create_input_operand (&ops[1], v0, tmode);
5446 create_input_operand (&ops[2], v1, tmode);
5447 }
5448
5449 if (maybe_expand_insn (icode, 4, ops))
5450 return ops[0].value;
5451 return NULL_RTX;
5452 }
5453
5454 /* Generate instructions for vec_perm optab given its mode
5455 and three operands. */
5456
5457 rtx
5458 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5459 {
5460 enum insn_code icode;
5461 machine_mode qimode;
5462 unsigned int i, w, e, u;
5463 rtx tmp, sel_qi = NULL;
5464 rtvec vec;
5465
5466 if (!target || GET_MODE (target) != mode)
5467 target = gen_reg_rtx (mode);
5468
5469 w = GET_MODE_SIZE (mode);
5470 e = GET_MODE_NUNITS (mode);
5471 u = GET_MODE_UNIT_SIZE (mode);
5472
5473 /* Set QIMODE to a different vector mode with byte elements.
5474 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5475 if (!qimode_for_vec_perm (mode).exists (&qimode))
5476 qimode = VOIDmode;
5477
5478 /* If the input is a constant, expand it specially. */
5479 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5480 if (GET_CODE (sel) == CONST_VECTOR)
5481 {
5482 /* See if this can be handled with a vec_shr. We only do this if the
5483 second vector is all zeroes. */
5484 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5485 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5486 ? optab_handler (vec_shr_optab, qimode)
5487 : CODE_FOR_nothing);
5488 rtx shift_amt = NULL_RTX;
5489 if (v1 == CONST0_RTX (GET_MODE (v1))
5490 && (shift_code != CODE_FOR_nothing
5491 || shift_code_qi != CODE_FOR_nothing))
5492 {
5493 shift_amt = shift_amt_for_vec_perm_mask (sel);
5494 if (shift_amt)
5495 {
5496 struct expand_operand ops[3];
5497 if (shift_code != CODE_FOR_nothing)
5498 {
5499 create_output_operand (&ops[0], target, mode);
5500 create_input_operand (&ops[1], v0, mode);
5501 create_convert_operand_from_type (&ops[2], shift_amt,
5502 sizetype);
5503 if (maybe_expand_insn (shift_code, 3, ops))
5504 return ops[0].value;
5505 }
5506 if (shift_code_qi != CODE_FOR_nothing)
5507 {
5508 tmp = gen_reg_rtx (qimode);
5509 create_output_operand (&ops[0], tmp, qimode);
5510 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5511 qimode);
5512 create_convert_operand_from_type (&ops[2], shift_amt,
5513 sizetype);
5514 if (maybe_expand_insn (shift_code_qi, 3, ops))
5515 return gen_lowpart (mode, ops[0].value);
5516 }
5517 }
5518 }
5519
5520 icode = direct_optab_handler (vec_perm_const_optab, mode);
5521 if (icode != CODE_FOR_nothing)
5522 {
5523 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5524 if (tmp)
5525 return tmp;
5526 }
5527
5528 /* Fall back to a constant byte-based permutation. */
5529 if (qimode != VOIDmode)
5530 {
5531 vec = rtvec_alloc (w);
5532 for (i = 0; i < e; ++i)
5533 {
5534 unsigned int j, this_e;
5535
5536 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5537 this_e &= 2 * e - 1;
5538 this_e *= u;
5539
5540 for (j = 0; j < u; ++j)
5541 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5542 }
5543 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5544
5545 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5546 if (icode != CODE_FOR_nothing)
5547 {
5548 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5549 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5550 gen_lowpart (qimode, v1), sel_qi);
5551 if (tmp)
5552 return gen_lowpart (mode, tmp);
5553 }
5554 }
5555 }
5556
5557 /* Otherwise expand as a fully variable permuation. */
5558 icode = direct_optab_handler (vec_perm_optab, mode);
5559 if (icode != CODE_FOR_nothing)
5560 {
5561 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5562 if (tmp)
5563 return tmp;
5564 }
5565
5566 /* As a special case to aid several targets, lower the element-based
5567 permutation to a byte-based permutation and try again. */
5568 if (qimode == VOIDmode)
5569 return NULL_RTX;
5570 icode = direct_optab_handler (vec_perm_optab, qimode);
5571 if (icode == CODE_FOR_nothing)
5572 return NULL_RTX;
5573
5574 if (sel_qi == NULL)
5575 {
5576 /* Multiply each element by its byte size. */
5577 machine_mode selmode = GET_MODE (sel);
5578 if (u == 2)
5579 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5580 NULL, 0, OPTAB_DIRECT);
5581 else
5582 sel = expand_simple_binop (selmode, ASHIFT, sel,
5583 gen_int_shift_amount (selmode,
5584 exact_log2 (u)),
5585 NULL, 0, OPTAB_DIRECT);
5586 gcc_assert (sel != NULL);
5587
5588 /* Broadcast the low byte each element into each of its bytes. */
5589 vec = rtvec_alloc (w);
5590 for (i = 0; i < w; ++i)
5591 {
5592 int this_e = i / u * u;
5593 if (BYTES_BIG_ENDIAN)
5594 this_e += u - 1;
5595 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5596 }
5597 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5598 sel = gen_lowpart (qimode, sel);
5599 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5600 gcc_assert (sel != NULL);
5601
5602 /* Add the byte offset to each byte element. */
5603 /* Note that the definition of the indicies here is memory ordering,
5604 so there should be no difference between big and little endian. */
5605 vec = rtvec_alloc (w);
5606 for (i = 0; i < w; ++i)
5607 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5608 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5609 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5610 sel, 0, OPTAB_DIRECT);
5611 gcc_assert (sel_qi != NULL);
5612 }
5613
5614 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5615 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5616 gen_lowpart (qimode, v1), sel_qi);
5617 if (tmp)
5618 tmp = gen_lowpart (mode, tmp);
5619 return tmp;
5620 }
5621
5622 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5623 three operands. */
5624
5625 rtx
5626 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5627 rtx target)
5628 {
5629 struct expand_operand ops[4];
5630 machine_mode mode = TYPE_MODE (vec_cond_type);
5631 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5632 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5633 rtx mask, rtx_op1, rtx_op2;
5634
5635 if (icode == CODE_FOR_nothing)
5636 return 0;
5637
5638 mask = expand_normal (op0);
5639 rtx_op1 = expand_normal (op1);
5640 rtx_op2 = expand_normal (op2);
5641
5642 mask = force_reg (mask_mode, mask);
5643 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5644
5645 create_output_operand (&ops[0], target, mode);
5646 create_input_operand (&ops[1], rtx_op1, mode);
5647 create_input_operand (&ops[2], rtx_op2, mode);
5648 create_input_operand (&ops[3], mask, mask_mode);
5649 expand_insn (icode, 4, ops);
5650
5651 return ops[0].value;
5652 }
5653
5654 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5655 three operands. */
5656
5657 rtx
5658 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5659 rtx target)
5660 {
5661 struct expand_operand ops[6];
5662 enum insn_code icode;
5663 rtx comparison, rtx_op1, rtx_op2;
5664 machine_mode mode = TYPE_MODE (vec_cond_type);
5665 machine_mode cmp_op_mode;
5666 bool unsignedp;
5667 tree op0a, op0b;
5668 enum tree_code tcode;
5669
5670 if (COMPARISON_CLASS_P (op0))
5671 {
5672 op0a = TREE_OPERAND (op0, 0);
5673 op0b = TREE_OPERAND (op0, 1);
5674 tcode = TREE_CODE (op0);
5675 }
5676 else
5677 {
5678 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5679 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5680 != CODE_FOR_nothing)
5681 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5682 op2, target);
5683 /* Fake op0 < 0. */
5684 else
5685 {
5686 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5687 == MODE_VECTOR_INT);
5688 op0a = op0;
5689 op0b = build_zero_cst (TREE_TYPE (op0));
5690 tcode = LT_EXPR;
5691 }
5692 }
5693 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5694 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5695
5696
5697 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5698 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5699
5700 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5701 if (icode == CODE_FOR_nothing)
5702 {
5703 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5704 icode = get_vcond_eq_icode (mode, cmp_op_mode);
5705 if (icode == CODE_FOR_nothing)
5706 return 0;
5707 }
5708
5709 comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
5710 icode, 4);
5711 rtx_op1 = expand_normal (op1);
5712 rtx_op2 = expand_normal (op2);
5713
5714 create_output_operand (&ops[0], target, mode);
5715 create_input_operand (&ops[1], rtx_op1, mode);
5716 create_input_operand (&ops[2], rtx_op2, mode);
5717 create_fixed_operand (&ops[3], comparison);
5718 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5719 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5720 expand_insn (icode, 6, ops);
5721 return ops[0].value;
5722 }
5723
5724 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
5725 Use TARGET for the result if nonnull and convenient. */
5726
5727 rtx
5728 expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
5729 {
5730 struct expand_operand ops[3];
5731 enum insn_code icode;
5732 machine_mode emode = GET_MODE_INNER (vmode);
5733
5734 icode = direct_optab_handler (vec_series_optab, vmode);
5735 gcc_assert (icode != CODE_FOR_nothing);
5736
5737 create_output_operand (&ops[0], target, vmode);
5738 create_input_operand (&ops[1], op0, emode);
5739 create_input_operand (&ops[2], op1, emode);
5740
5741 expand_insn (icode, 3, ops);
5742 return ops[0].value;
5743 }
5744
5745 /* Generate insns for a vector comparison into a mask. */
5746
5747 rtx
5748 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5749 {
5750 struct expand_operand ops[4];
5751 enum insn_code icode;
5752 rtx comparison;
5753 machine_mode mask_mode = TYPE_MODE (type);
5754 machine_mode vmode;
5755 bool unsignedp;
5756 tree op0a, op0b;
5757 enum tree_code tcode;
5758
5759 op0a = TREE_OPERAND (exp, 0);
5760 op0b = TREE_OPERAND (exp, 1);
5761 tcode = TREE_CODE (exp);
5762
5763 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5764 vmode = TYPE_MODE (TREE_TYPE (op0a));
5765
5766 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5767 if (icode == CODE_FOR_nothing)
5768 {
5769 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5770 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
5771 if (icode == CODE_FOR_nothing)
5772 return 0;
5773 }
5774
5775 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
5776 unsignedp, icode, 2);
5777 create_output_operand (&ops[0], target, mask_mode);
5778 create_fixed_operand (&ops[1], comparison);
5779 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5780 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5781 expand_insn (icode, 4, ops);
5782 return ops[0].value;
5783 }
5784
5785 /* Expand a highpart multiply. */
5786
5787 rtx
5788 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5789 rtx target, bool uns_p)
5790 {
5791 struct expand_operand eops[3];
5792 enum insn_code icode;
5793 int method, i, nunits;
5794 machine_mode wmode;
5795 rtx m1, m2, perm;
5796 optab tab1, tab2;
5797 rtvec v;
5798
5799 method = can_mult_highpart_p (mode, uns_p);
5800 switch (method)
5801 {
5802 case 0:
5803 return NULL_RTX;
5804 case 1:
5805 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5806 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5807 OPTAB_LIB_WIDEN);
5808 case 2:
5809 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5810 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5811 break;
5812 case 3:
5813 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5814 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5815 if (BYTES_BIG_ENDIAN)
5816 std::swap (tab1, tab2);
5817 break;
5818 default:
5819 gcc_unreachable ();
5820 }
5821
5822 icode = optab_handler (tab1, mode);
5823 nunits = GET_MODE_NUNITS (mode);
5824 wmode = insn_data[icode].operand[0].mode;
5825 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5826 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5827
5828 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5829 create_input_operand (&eops[1], op0, mode);
5830 create_input_operand (&eops[2], op1, mode);
5831 expand_insn (icode, 3, eops);
5832 m1 = gen_lowpart (mode, eops[0].value);
5833
5834 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5835 create_input_operand (&eops[1], op0, mode);
5836 create_input_operand (&eops[2], op1, mode);
5837 expand_insn (optab_handler (tab2, mode), 3, eops);
5838 m2 = gen_lowpart (mode, eops[0].value);
5839
5840 v = rtvec_alloc (nunits);
5841 if (method == 2)
5842 {
5843 for (i = 0; i < nunits; ++i)
5844 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5845 + ((i & 1) ? nunits : 0));
5846 perm = gen_rtx_CONST_VECTOR (mode, v);
5847 }
5848 else
5849 {
5850 int base = BYTES_BIG_ENDIAN ? 0 : 1;
5851 perm = gen_const_vec_series (mode, GEN_INT (base), GEN_INT (2));
5852 }
5853
5854 return expand_vec_perm (mode, m1, m2, perm, target);
5855 }
5856 \f
5857 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5858 pattern. */
5859
5860 static void
5861 find_cc_set (rtx x, const_rtx pat, void *data)
5862 {
5863 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5864 && GET_CODE (pat) == SET)
5865 {
5866 rtx *p_cc_reg = (rtx *) data;
5867 gcc_assert (!*p_cc_reg);
5868 *p_cc_reg = x;
5869 }
5870 }
5871
5872 /* This is a helper function for the other atomic operations. This function
5873 emits a loop that contains SEQ that iterates until a compare-and-swap
5874 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5875 a set of instructions that takes a value from OLD_REG as an input and
5876 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5877 set to the current contents of MEM. After SEQ, a compare-and-swap will
5878 attempt to update MEM with NEW_REG. The function returns true when the
5879 loop was generated successfully. */
5880
5881 static bool
5882 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5883 {
5884 machine_mode mode = GET_MODE (mem);
5885 rtx_code_label *label;
5886 rtx cmp_reg, success, oldval;
5887
5888 /* The loop we want to generate looks like
5889
5890 cmp_reg = mem;
5891 label:
5892 old_reg = cmp_reg;
5893 seq;
5894 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5895 if (success)
5896 goto label;
5897
5898 Note that we only do the plain load from memory once. Subsequent
5899 iterations use the value loaded by the compare-and-swap pattern. */
5900
5901 label = gen_label_rtx ();
5902 cmp_reg = gen_reg_rtx (mode);
5903
5904 emit_move_insn (cmp_reg, mem);
5905 emit_label (label);
5906 emit_move_insn (old_reg, cmp_reg);
5907 if (seq)
5908 emit_insn (seq);
5909
5910 success = NULL_RTX;
5911 oldval = cmp_reg;
5912 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5913 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5914 MEMMODEL_RELAXED))
5915 return false;
5916
5917 if (oldval != cmp_reg)
5918 emit_move_insn (cmp_reg, oldval);
5919
5920 /* Mark this jump predicted not taken. */
5921 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5922 GET_MODE (success), 1, label,
5923 profile_probability::guessed_never ());
5924 return true;
5925 }
5926
5927
5928 /* This function tries to emit an atomic_exchange intruction. VAL is written
5929 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5930 using TARGET if possible. */
5931
5932 static rtx
5933 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5934 {
5935 machine_mode mode = GET_MODE (mem);
5936 enum insn_code icode;
5937
5938 /* If the target supports the exchange directly, great. */
5939 icode = direct_optab_handler (atomic_exchange_optab, mode);
5940 if (icode != CODE_FOR_nothing)
5941 {
5942 struct expand_operand ops[4];
5943
5944 create_output_operand (&ops[0], target, mode);
5945 create_fixed_operand (&ops[1], mem);
5946 create_input_operand (&ops[2], val, mode);
5947 create_integer_operand (&ops[3], model);
5948 if (maybe_expand_insn (icode, 4, ops))
5949 return ops[0].value;
5950 }
5951
5952 return NULL_RTX;
5953 }
5954
5955 /* This function tries to implement an atomic exchange operation using
5956 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5957 The previous contents of *MEM are returned, using TARGET if possible.
5958 Since this instructionn is an acquire barrier only, stronger memory
5959 models may require additional barriers to be emitted. */
5960
5961 static rtx
5962 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5963 enum memmodel model)
5964 {
5965 machine_mode mode = GET_MODE (mem);
5966 enum insn_code icode;
5967 rtx_insn *last_insn = get_last_insn ();
5968
5969 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5970
5971 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5972 exists, and the memory model is stronger than acquire, add a release
5973 barrier before the instruction. */
5974
5975 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5976 expand_mem_thread_fence (model);
5977
5978 if (icode != CODE_FOR_nothing)
5979 {
5980 struct expand_operand ops[3];
5981 create_output_operand (&ops[0], target, mode);
5982 create_fixed_operand (&ops[1], mem);
5983 create_input_operand (&ops[2], val, mode);
5984 if (maybe_expand_insn (icode, 3, ops))
5985 return ops[0].value;
5986 }
5987
5988 /* If an external test-and-set libcall is provided, use that instead of
5989 any external compare-and-swap that we might get from the compare-and-
5990 swap-loop expansion later. */
5991 if (!can_compare_and_swap_p (mode, false))
5992 {
5993 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5994 if (libfunc != NULL)
5995 {
5996 rtx addr;
5997
5998 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5999 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6000 mode, addr, ptr_mode,
6001 val, mode);
6002 }
6003 }
6004
6005 /* If the test_and_set can't be emitted, eliminate any barrier that might
6006 have been emitted. */
6007 delete_insns_since (last_insn);
6008 return NULL_RTX;
6009 }
6010
6011 /* This function tries to implement an atomic exchange operation using a
6012 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6013 *MEM are returned, using TARGET if possible. No memory model is required
6014 since a compare_and_swap loop is seq-cst. */
6015
6016 static rtx
6017 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
6018 {
6019 machine_mode mode = GET_MODE (mem);
6020
6021 if (can_compare_and_swap_p (mode, true))
6022 {
6023 if (!target || !register_operand (target, mode))
6024 target = gen_reg_rtx (mode);
6025 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6026 return target;
6027 }
6028
6029 return NULL_RTX;
6030 }
6031
6032 /* This function tries to implement an atomic test-and-set operation
6033 using the atomic_test_and_set instruction pattern. A boolean value
6034 is returned from the operation, using TARGET if possible. */
6035
6036 static rtx
6037 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6038 {
6039 machine_mode pat_bool_mode;
6040 struct expand_operand ops[3];
6041
6042 if (!targetm.have_atomic_test_and_set ())
6043 return NULL_RTX;
6044
6045 /* While we always get QImode from __atomic_test_and_set, we get
6046 other memory modes from __sync_lock_test_and_set. Note that we
6047 use no endian adjustment here. This matches the 4.6 behavior
6048 in the Sparc backend. */
6049 enum insn_code icode = targetm.code_for_atomic_test_and_set;
6050 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
6051 if (GET_MODE (mem) != QImode)
6052 mem = adjust_address_nv (mem, QImode, 0);
6053
6054 pat_bool_mode = insn_data[icode].operand[0].mode;
6055 create_output_operand (&ops[0], target, pat_bool_mode);
6056 create_fixed_operand (&ops[1], mem);
6057 create_integer_operand (&ops[2], model);
6058
6059 if (maybe_expand_insn (icode, 3, ops))
6060 return ops[0].value;
6061 return NULL_RTX;
6062 }
6063
6064 /* This function expands the legacy _sync_lock test_and_set operation which is
6065 generally an atomic exchange. Some limited targets only allow the
6066 constant 1 to be stored. This is an ACQUIRE operation.
6067
6068 TARGET is an optional place to stick the return value.
6069 MEM is where VAL is stored. */
6070
6071 rtx
6072 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6073 {
6074 rtx ret;
6075
6076 /* Try an atomic_exchange first. */
6077 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6078 if (ret)
6079 return ret;
6080
6081 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6082 MEMMODEL_SYNC_ACQUIRE);
6083 if (ret)
6084 return ret;
6085
6086 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6087 if (ret)
6088 return ret;
6089
6090 /* If there are no other options, try atomic_test_and_set if the value
6091 being stored is 1. */
6092 if (val == const1_rtx)
6093 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6094
6095 return ret;
6096 }
6097
6098 /* This function expands the atomic test_and_set operation:
6099 atomically store a boolean TRUE into MEM and return the previous value.
6100
6101 MEMMODEL is the memory model variant to use.
6102 TARGET is an optional place to stick the return value. */
6103
6104 rtx
6105 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6106 {
6107 machine_mode mode = GET_MODE (mem);
6108 rtx ret, trueval, subtarget;
6109
6110 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6111 if (ret)
6112 return ret;
6113
6114 /* Be binary compatible with non-default settings of trueval, and different
6115 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6116 another only has atomic-exchange. */
6117 if (targetm.atomic_test_and_set_trueval == 1)
6118 {
6119 trueval = const1_rtx;
6120 subtarget = target ? target : gen_reg_rtx (mode);
6121 }
6122 else
6123 {
6124 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6125 subtarget = gen_reg_rtx (mode);
6126 }
6127
6128 /* Try the atomic-exchange optab... */
6129 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6130
6131 /* ... then an atomic-compare-and-swap loop ... */
6132 if (!ret)
6133 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6134
6135 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6136 if (!ret)
6137 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6138
6139 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6140 things with the value 1. Thus we try again without trueval. */
6141 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6142 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6143
6144 /* Failing all else, assume a single threaded environment and simply
6145 perform the operation. */
6146 if (!ret)
6147 {
6148 /* If the result is ignored skip the move to target. */
6149 if (subtarget != const0_rtx)
6150 emit_move_insn (subtarget, mem);
6151
6152 emit_move_insn (mem, trueval);
6153 ret = subtarget;
6154 }
6155
6156 /* Recall that have to return a boolean value; rectify if trueval
6157 is not exactly one. */
6158 if (targetm.atomic_test_and_set_trueval != 1)
6159 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6160
6161 return ret;
6162 }
6163
6164 /* This function expands the atomic exchange operation:
6165 atomically store VAL in MEM and return the previous value in MEM.
6166
6167 MEMMODEL is the memory model variant to use.
6168 TARGET is an optional place to stick the return value. */
6169
6170 rtx
6171 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6172 {
6173 machine_mode mode = GET_MODE (mem);
6174 rtx ret;
6175
6176 /* If loads are not atomic for the required size and we are not called to
6177 provide a __sync builtin, do not do anything so that we stay consistent
6178 with atomic loads of the same size. */
6179 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6180 return NULL_RTX;
6181
6182 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6183
6184 /* Next try a compare-and-swap loop for the exchange. */
6185 if (!ret)
6186 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6187
6188 return ret;
6189 }
6190
6191 /* This function expands the atomic compare exchange operation:
6192
6193 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6194 *PTARGET_OVAL is an optional place to store the old value from memory.
6195 Both target parameters may be NULL or const0_rtx to indicate that we do
6196 not care about that return value. Both target parameters are updated on
6197 success to the actual location of the corresponding result.
6198
6199 MEMMODEL is the memory model variant to use.
6200
6201 The return value of the function is true for success. */
6202
6203 bool
6204 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6205 rtx mem, rtx expected, rtx desired,
6206 bool is_weak, enum memmodel succ_model,
6207 enum memmodel fail_model)
6208 {
6209 machine_mode mode = GET_MODE (mem);
6210 struct expand_operand ops[8];
6211 enum insn_code icode;
6212 rtx target_oval, target_bool = NULL_RTX;
6213 rtx libfunc;
6214
6215 /* If loads are not atomic for the required size and we are not called to
6216 provide a __sync builtin, do not do anything so that we stay consistent
6217 with atomic loads of the same size. */
6218 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6219 return false;
6220
6221 /* Load expected into a register for the compare and swap. */
6222 if (MEM_P (expected))
6223 expected = copy_to_reg (expected);
6224
6225 /* Make sure we always have some place to put the return oldval.
6226 Further, make sure that place is distinct from the input expected,
6227 just in case we need that path down below. */
6228 if (ptarget_oval && *ptarget_oval == const0_rtx)
6229 ptarget_oval = NULL;
6230
6231 if (ptarget_oval == NULL
6232 || (target_oval = *ptarget_oval) == NULL
6233 || reg_overlap_mentioned_p (expected, target_oval))
6234 target_oval = gen_reg_rtx (mode);
6235
6236 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6237 if (icode != CODE_FOR_nothing)
6238 {
6239 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6240
6241 if (ptarget_bool && *ptarget_bool == const0_rtx)
6242 ptarget_bool = NULL;
6243
6244 /* Make sure we always have a place for the bool operand. */
6245 if (ptarget_bool == NULL
6246 || (target_bool = *ptarget_bool) == NULL
6247 || GET_MODE (target_bool) != bool_mode)
6248 target_bool = gen_reg_rtx (bool_mode);
6249
6250 /* Emit the compare_and_swap. */
6251 create_output_operand (&ops[0], target_bool, bool_mode);
6252 create_output_operand (&ops[1], target_oval, mode);
6253 create_fixed_operand (&ops[2], mem);
6254 create_input_operand (&ops[3], expected, mode);
6255 create_input_operand (&ops[4], desired, mode);
6256 create_integer_operand (&ops[5], is_weak);
6257 create_integer_operand (&ops[6], succ_model);
6258 create_integer_operand (&ops[7], fail_model);
6259 if (maybe_expand_insn (icode, 8, ops))
6260 {
6261 /* Return success/failure. */
6262 target_bool = ops[0].value;
6263 target_oval = ops[1].value;
6264 goto success;
6265 }
6266 }
6267
6268 /* Otherwise fall back to the original __sync_val_compare_and_swap
6269 which is always seq-cst. */
6270 icode = optab_handler (sync_compare_and_swap_optab, mode);
6271 if (icode != CODE_FOR_nothing)
6272 {
6273 rtx cc_reg;
6274
6275 create_output_operand (&ops[0], target_oval, mode);
6276 create_fixed_operand (&ops[1], mem);
6277 create_input_operand (&ops[2], expected, mode);
6278 create_input_operand (&ops[3], desired, mode);
6279 if (!maybe_expand_insn (icode, 4, ops))
6280 return false;
6281
6282 target_oval = ops[0].value;
6283
6284 /* If the caller isn't interested in the boolean return value,
6285 skip the computation of it. */
6286 if (ptarget_bool == NULL)
6287 goto success;
6288
6289 /* Otherwise, work out if the compare-and-swap succeeded. */
6290 cc_reg = NULL_RTX;
6291 if (have_insn_for (COMPARE, CCmode))
6292 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6293 if (cc_reg)
6294 {
6295 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6296 const0_rtx, VOIDmode, 0, 1);
6297 goto success;
6298 }
6299 goto success_bool_from_val;
6300 }
6301
6302 /* Also check for library support for __sync_val_compare_and_swap. */
6303 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6304 if (libfunc != NULL)
6305 {
6306 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6307 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6308 mode, addr, ptr_mode,
6309 expected, mode, desired, mode);
6310 emit_move_insn (target_oval, target);
6311
6312 /* Compute the boolean return value only if requested. */
6313 if (ptarget_bool)
6314 goto success_bool_from_val;
6315 else
6316 goto success;
6317 }
6318
6319 /* Failure. */
6320 return false;
6321
6322 success_bool_from_val:
6323 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6324 expected, VOIDmode, 1, 1);
6325 success:
6326 /* Make sure that the oval output winds up where the caller asked. */
6327 if (ptarget_oval)
6328 *ptarget_oval = target_oval;
6329 if (ptarget_bool)
6330 *ptarget_bool = target_bool;
6331 return true;
6332 }
6333
6334 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6335
6336 static void
6337 expand_asm_memory_blockage (void)
6338 {
6339 rtx asm_op, clob;
6340
6341 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6342 rtvec_alloc (0), rtvec_alloc (0),
6343 rtvec_alloc (0), UNKNOWN_LOCATION);
6344 MEM_VOLATILE_P (asm_op) = 1;
6345
6346 clob = gen_rtx_SCRATCH (VOIDmode);
6347 clob = gen_rtx_MEM (BLKmode, clob);
6348 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6349
6350 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6351 }
6352
6353 /* Do not propagate memory accesses across this point. */
6354
6355 static void
6356 expand_memory_blockage (void)
6357 {
6358 if (targetm.have_memory_blockage ())
6359 emit_insn (targetm.gen_memory_blockage ());
6360 else
6361 expand_asm_memory_blockage ();
6362 }
6363
6364 /* This routine will either emit the mem_thread_fence pattern or issue a
6365 sync_synchronize to generate a fence for memory model MEMMODEL. */
6366
6367 void
6368 expand_mem_thread_fence (enum memmodel model)
6369 {
6370 if (is_mm_relaxed (model))
6371 return;
6372 if (targetm.have_mem_thread_fence ())
6373 {
6374 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6375 expand_memory_blockage ();
6376 }
6377 else if (targetm.have_memory_barrier ())
6378 emit_insn (targetm.gen_memory_barrier ());
6379 else if (synchronize_libfunc != NULL_RTX)
6380 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
6381 else
6382 expand_memory_blockage ();
6383 }
6384
6385 /* Emit a signal fence with given memory model. */
6386
6387 void
6388 expand_mem_signal_fence (enum memmodel model)
6389 {
6390 /* No machine barrier is required to implement a signal fence, but
6391 a compiler memory barrier must be issued, except for relaxed MM. */
6392 if (!is_mm_relaxed (model))
6393 expand_memory_blockage ();
6394 }
6395
6396 /* This function expands the atomic load operation:
6397 return the atomically loaded value in MEM.
6398
6399 MEMMODEL is the memory model variant to use.
6400 TARGET is an option place to stick the return value. */
6401
6402 rtx
6403 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6404 {
6405 machine_mode mode = GET_MODE (mem);
6406 enum insn_code icode;
6407
6408 /* If the target supports the load directly, great. */
6409 icode = direct_optab_handler (atomic_load_optab, mode);
6410 if (icode != CODE_FOR_nothing)
6411 {
6412 struct expand_operand ops[3];
6413 rtx_insn *last = get_last_insn ();
6414 if (is_mm_seq_cst (model))
6415 expand_memory_blockage ();
6416
6417 create_output_operand (&ops[0], target, mode);
6418 create_fixed_operand (&ops[1], mem);
6419 create_integer_operand (&ops[2], model);
6420 if (maybe_expand_insn (icode, 3, ops))
6421 {
6422 if (!is_mm_relaxed (model))
6423 expand_memory_blockage ();
6424 return ops[0].value;
6425 }
6426 delete_insns_since (last);
6427 }
6428
6429 /* If the size of the object is greater than word size on this target,
6430 then we assume that a load will not be atomic. We could try to
6431 emulate a load with a compare-and-swap operation, but the store that
6432 doing this could result in would be incorrect if this is a volatile
6433 atomic load or targetting read-only-mapped memory. */
6434 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6435 /* If there is no atomic load, leave the library call. */
6436 return NULL_RTX;
6437
6438 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6439 if (!target || target == const0_rtx)
6440 target = gen_reg_rtx (mode);
6441
6442 /* For SEQ_CST, emit a barrier before the load. */
6443 if (is_mm_seq_cst (model))
6444 expand_mem_thread_fence (model);
6445
6446 emit_move_insn (target, mem);
6447
6448 /* Emit the appropriate barrier after the load. */
6449 expand_mem_thread_fence (model);
6450
6451 return target;
6452 }
6453
6454 /* This function expands the atomic store operation:
6455 Atomically store VAL in MEM.
6456 MEMMODEL is the memory model variant to use.
6457 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6458 function returns const0_rtx if a pattern was emitted. */
6459
6460 rtx
6461 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6462 {
6463 machine_mode mode = GET_MODE (mem);
6464 enum insn_code icode;
6465 struct expand_operand ops[3];
6466
6467 /* If the target supports the store directly, great. */
6468 icode = direct_optab_handler (atomic_store_optab, mode);
6469 if (icode != CODE_FOR_nothing)
6470 {
6471 rtx_insn *last = get_last_insn ();
6472 if (!is_mm_relaxed (model))
6473 expand_memory_blockage ();
6474 create_fixed_operand (&ops[0], mem);
6475 create_input_operand (&ops[1], val, mode);
6476 create_integer_operand (&ops[2], model);
6477 if (maybe_expand_insn (icode, 3, ops))
6478 {
6479 if (is_mm_seq_cst (model))
6480 expand_memory_blockage ();
6481 return const0_rtx;
6482 }
6483 delete_insns_since (last);
6484 }
6485
6486 /* If using __sync_lock_release is a viable alternative, try it.
6487 Note that this will not be set to true if we are expanding a generic
6488 __atomic_store_n. */
6489 if (use_release)
6490 {
6491 icode = direct_optab_handler (sync_lock_release_optab, mode);
6492 if (icode != CODE_FOR_nothing)
6493 {
6494 create_fixed_operand (&ops[0], mem);
6495 create_input_operand (&ops[1], const0_rtx, mode);
6496 if (maybe_expand_insn (icode, 2, ops))
6497 {
6498 /* lock_release is only a release barrier. */
6499 if (is_mm_seq_cst (model))
6500 expand_mem_thread_fence (model);
6501 return const0_rtx;
6502 }
6503 }
6504 }
6505
6506 /* If the size of the object is greater than word size on this target,
6507 a default store will not be atomic. */
6508 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6509 {
6510 /* If loads are atomic or we are called to provide a __sync builtin,
6511 we can try a atomic_exchange and throw away the result. Otherwise,
6512 don't do anything so that we do not create an inconsistency between
6513 loads and stores. */
6514 if (can_atomic_load_p (mode) || is_mm_sync (model))
6515 {
6516 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6517 if (!target)
6518 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
6519 val);
6520 if (target)
6521 return const0_rtx;
6522 }
6523 return NULL_RTX;
6524 }
6525
6526 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6527 expand_mem_thread_fence (model);
6528
6529 emit_move_insn (mem, val);
6530
6531 /* For SEQ_CST, also emit a barrier after the store. */
6532 if (is_mm_seq_cst (model))
6533 expand_mem_thread_fence (model);
6534
6535 return const0_rtx;
6536 }
6537
6538
6539 /* Structure containing the pointers and values required to process the
6540 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6541
6542 struct atomic_op_functions
6543 {
6544 direct_optab mem_fetch_before;
6545 direct_optab mem_fetch_after;
6546 direct_optab mem_no_result;
6547 optab fetch_before;
6548 optab fetch_after;
6549 direct_optab no_result;
6550 enum rtx_code reverse_code;
6551 };
6552
6553
6554 /* Fill in structure pointed to by OP with the various optab entries for an
6555 operation of type CODE. */
6556
6557 static void
6558 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6559 {
6560 gcc_assert (op!= NULL);
6561
6562 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6563 in the source code during compilation, and the optab entries are not
6564 computable until runtime. Fill in the values at runtime. */
6565 switch (code)
6566 {
6567 case PLUS:
6568 op->mem_fetch_before = atomic_fetch_add_optab;
6569 op->mem_fetch_after = atomic_add_fetch_optab;
6570 op->mem_no_result = atomic_add_optab;
6571 op->fetch_before = sync_old_add_optab;
6572 op->fetch_after = sync_new_add_optab;
6573 op->no_result = sync_add_optab;
6574 op->reverse_code = MINUS;
6575 break;
6576 case MINUS:
6577 op->mem_fetch_before = atomic_fetch_sub_optab;
6578 op->mem_fetch_after = atomic_sub_fetch_optab;
6579 op->mem_no_result = atomic_sub_optab;
6580 op->fetch_before = sync_old_sub_optab;
6581 op->fetch_after = sync_new_sub_optab;
6582 op->no_result = sync_sub_optab;
6583 op->reverse_code = PLUS;
6584 break;
6585 case XOR:
6586 op->mem_fetch_before = atomic_fetch_xor_optab;
6587 op->mem_fetch_after = atomic_xor_fetch_optab;
6588 op->mem_no_result = atomic_xor_optab;
6589 op->fetch_before = sync_old_xor_optab;
6590 op->fetch_after = sync_new_xor_optab;
6591 op->no_result = sync_xor_optab;
6592 op->reverse_code = XOR;
6593 break;
6594 case AND:
6595 op->mem_fetch_before = atomic_fetch_and_optab;
6596 op->mem_fetch_after = atomic_and_fetch_optab;
6597 op->mem_no_result = atomic_and_optab;
6598 op->fetch_before = sync_old_and_optab;
6599 op->fetch_after = sync_new_and_optab;
6600 op->no_result = sync_and_optab;
6601 op->reverse_code = UNKNOWN;
6602 break;
6603 case IOR:
6604 op->mem_fetch_before = atomic_fetch_or_optab;
6605 op->mem_fetch_after = atomic_or_fetch_optab;
6606 op->mem_no_result = atomic_or_optab;
6607 op->fetch_before = sync_old_ior_optab;
6608 op->fetch_after = sync_new_ior_optab;
6609 op->no_result = sync_ior_optab;
6610 op->reverse_code = UNKNOWN;
6611 break;
6612 case NOT:
6613 op->mem_fetch_before = atomic_fetch_nand_optab;
6614 op->mem_fetch_after = atomic_nand_fetch_optab;
6615 op->mem_no_result = atomic_nand_optab;
6616 op->fetch_before = sync_old_nand_optab;
6617 op->fetch_after = sync_new_nand_optab;
6618 op->no_result = sync_nand_optab;
6619 op->reverse_code = UNKNOWN;
6620 break;
6621 default:
6622 gcc_unreachable ();
6623 }
6624 }
6625
6626 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6627 using memory order MODEL. If AFTER is true the operation needs to return
6628 the value of *MEM after the operation, otherwise the previous value.
6629 TARGET is an optional place to place the result. The result is unused if
6630 it is const0_rtx.
6631 Return the result if there is a better sequence, otherwise NULL_RTX. */
6632
6633 static rtx
6634 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6635 enum memmodel model, bool after)
6636 {
6637 /* If the value is prefetched, or not used, it may be possible to replace
6638 the sequence with a native exchange operation. */
6639 if (!after || target == const0_rtx)
6640 {
6641 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6642 if (code == AND && val == const0_rtx)
6643 {
6644 if (target == const0_rtx)
6645 target = gen_reg_rtx (GET_MODE (mem));
6646 return maybe_emit_atomic_exchange (target, mem, val, model);
6647 }
6648
6649 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6650 if (code == IOR && val == constm1_rtx)
6651 {
6652 if (target == const0_rtx)
6653 target = gen_reg_rtx (GET_MODE (mem));
6654 return maybe_emit_atomic_exchange (target, mem, val, model);
6655 }
6656 }
6657
6658 return NULL_RTX;
6659 }
6660
6661 /* Try to emit an instruction for a specific operation varaition.
6662 OPTAB contains the OP functions.
6663 TARGET is an optional place to return the result. const0_rtx means unused.
6664 MEM is the memory location to operate on.
6665 VAL is the value to use in the operation.
6666 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6667 MODEL is the memory model, if used.
6668 AFTER is true if the returned result is the value after the operation. */
6669
6670 static rtx
6671 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6672 rtx val, bool use_memmodel, enum memmodel model, bool after)
6673 {
6674 machine_mode mode = GET_MODE (mem);
6675 struct expand_operand ops[4];
6676 enum insn_code icode;
6677 int op_counter = 0;
6678 int num_ops;
6679
6680 /* Check to see if there is a result returned. */
6681 if (target == const0_rtx)
6682 {
6683 if (use_memmodel)
6684 {
6685 icode = direct_optab_handler (optab->mem_no_result, mode);
6686 create_integer_operand (&ops[2], model);
6687 num_ops = 3;
6688 }
6689 else
6690 {
6691 icode = direct_optab_handler (optab->no_result, mode);
6692 num_ops = 2;
6693 }
6694 }
6695 /* Otherwise, we need to generate a result. */
6696 else
6697 {
6698 if (use_memmodel)
6699 {
6700 icode = direct_optab_handler (after ? optab->mem_fetch_after
6701 : optab->mem_fetch_before, mode);
6702 create_integer_operand (&ops[3], model);
6703 num_ops = 4;
6704 }
6705 else
6706 {
6707 icode = optab_handler (after ? optab->fetch_after
6708 : optab->fetch_before, mode);
6709 num_ops = 3;
6710 }
6711 create_output_operand (&ops[op_counter++], target, mode);
6712 }
6713 if (icode == CODE_FOR_nothing)
6714 return NULL_RTX;
6715
6716 create_fixed_operand (&ops[op_counter++], mem);
6717 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6718 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6719
6720 if (maybe_expand_insn (icode, num_ops, ops))
6721 return (target == const0_rtx ? const0_rtx : ops[0].value);
6722
6723 return NULL_RTX;
6724 }
6725
6726
6727 /* This function expands an atomic fetch_OP or OP_fetch operation:
6728 TARGET is an option place to stick the return value. const0_rtx indicates
6729 the result is unused.
6730 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6731 CODE is the operation being performed (OP)
6732 MEMMODEL is the memory model variant to use.
6733 AFTER is true to return the result of the operation (OP_fetch).
6734 AFTER is false to return the value before the operation (fetch_OP).
6735
6736 This function will *only* generate instructions if there is a direct
6737 optab. No compare and swap loops or libcalls will be generated. */
6738
6739 static rtx
6740 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6741 enum rtx_code code, enum memmodel model,
6742 bool after)
6743 {
6744 machine_mode mode = GET_MODE (mem);
6745 struct atomic_op_functions optab;
6746 rtx result;
6747 bool unused_result = (target == const0_rtx);
6748
6749 get_atomic_op_for_code (&optab, code);
6750
6751 /* Check to see if there are any better instructions. */
6752 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6753 if (result)
6754 return result;
6755
6756 /* Check for the case where the result isn't used and try those patterns. */
6757 if (unused_result)
6758 {
6759 /* Try the memory model variant first. */
6760 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6761 if (result)
6762 return result;
6763
6764 /* Next try the old style withuot a memory model. */
6765 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6766 if (result)
6767 return result;
6768
6769 /* There is no no-result pattern, so try patterns with a result. */
6770 target = NULL_RTX;
6771 }
6772
6773 /* Try the __atomic version. */
6774 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6775 if (result)
6776 return result;
6777
6778 /* Try the older __sync version. */
6779 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6780 if (result)
6781 return result;
6782
6783 /* If the fetch value can be calculated from the other variation of fetch,
6784 try that operation. */
6785 if (after || unused_result || optab.reverse_code != UNKNOWN)
6786 {
6787 /* Try the __atomic version, then the older __sync version. */
6788 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6789 if (!result)
6790 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6791
6792 if (result)
6793 {
6794 /* If the result isn't used, no need to do compensation code. */
6795 if (unused_result)
6796 return result;
6797
6798 /* Issue compensation code. Fetch_after == fetch_before OP val.
6799 Fetch_before == after REVERSE_OP val. */
6800 if (!after)
6801 code = optab.reverse_code;
6802 if (code == NOT)
6803 {
6804 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6805 true, OPTAB_LIB_WIDEN);
6806 result = expand_simple_unop (mode, NOT, result, target, true);
6807 }
6808 else
6809 result = expand_simple_binop (mode, code, result, val, target,
6810 true, OPTAB_LIB_WIDEN);
6811 return result;
6812 }
6813 }
6814
6815 /* No direct opcode can be generated. */
6816 return NULL_RTX;
6817 }
6818
6819
6820
6821 /* This function expands an atomic fetch_OP or OP_fetch operation:
6822 TARGET is an option place to stick the return value. const0_rtx indicates
6823 the result is unused.
6824 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6825 CODE is the operation being performed (OP)
6826 MEMMODEL is the memory model variant to use.
6827 AFTER is true to return the result of the operation (OP_fetch).
6828 AFTER is false to return the value before the operation (fetch_OP). */
6829 rtx
6830 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6831 enum memmodel model, bool after)
6832 {
6833 machine_mode mode = GET_MODE (mem);
6834 rtx result;
6835 bool unused_result = (target == const0_rtx);
6836
6837 /* If loads are not atomic for the required size and we are not called to
6838 provide a __sync builtin, do not do anything so that we stay consistent
6839 with atomic loads of the same size. */
6840 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6841 return NULL_RTX;
6842
6843 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6844 after);
6845
6846 if (result)
6847 return result;
6848
6849 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6850 if (code == PLUS || code == MINUS)
6851 {
6852 rtx tmp;
6853 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6854
6855 start_sequence ();
6856 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6857 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6858 model, after);
6859 if (result)
6860 {
6861 /* PLUS worked so emit the insns and return. */
6862 tmp = get_insns ();
6863 end_sequence ();
6864 emit_insn (tmp);
6865 return result;
6866 }
6867
6868 /* PLUS did not work, so throw away the negation code and continue. */
6869 end_sequence ();
6870 }
6871
6872 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6873 if (!can_compare_and_swap_p (mode, false))
6874 {
6875 rtx libfunc;
6876 bool fixup = false;
6877 enum rtx_code orig_code = code;
6878 struct atomic_op_functions optab;
6879
6880 get_atomic_op_for_code (&optab, code);
6881 libfunc = optab_libfunc (after ? optab.fetch_after
6882 : optab.fetch_before, mode);
6883 if (libfunc == NULL
6884 && (after || unused_result || optab.reverse_code != UNKNOWN))
6885 {
6886 fixup = true;
6887 if (!after)
6888 code = optab.reverse_code;
6889 libfunc = optab_libfunc (after ? optab.fetch_before
6890 : optab.fetch_after, mode);
6891 }
6892 if (libfunc != NULL)
6893 {
6894 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6895 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6896 addr, ptr_mode, val, mode);
6897
6898 if (!unused_result && fixup)
6899 result = expand_simple_binop (mode, code, result, val, target,
6900 true, OPTAB_LIB_WIDEN);
6901 return result;
6902 }
6903
6904 /* We need the original code for any further attempts. */
6905 code = orig_code;
6906 }
6907
6908 /* If nothing else has succeeded, default to a compare and swap loop. */
6909 if (can_compare_and_swap_p (mode, true))
6910 {
6911 rtx_insn *insn;
6912 rtx t0 = gen_reg_rtx (mode), t1;
6913
6914 start_sequence ();
6915
6916 /* If the result is used, get a register for it. */
6917 if (!unused_result)
6918 {
6919 if (!target || !register_operand (target, mode))
6920 target = gen_reg_rtx (mode);
6921 /* If fetch_before, copy the value now. */
6922 if (!after)
6923 emit_move_insn (target, t0);
6924 }
6925 else
6926 target = const0_rtx;
6927
6928 t1 = t0;
6929 if (code == NOT)
6930 {
6931 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
6932 true, OPTAB_LIB_WIDEN);
6933 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
6934 }
6935 else
6936 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
6937 OPTAB_LIB_WIDEN);
6938
6939 /* For after, copy the value now. */
6940 if (!unused_result && after)
6941 emit_move_insn (target, t1);
6942 insn = get_insns ();
6943 end_sequence ();
6944
6945 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6946 return target;
6947 }
6948
6949 return NULL_RTX;
6950 }
6951 \f
6952 /* Return true if OPERAND is suitable for operand number OPNO of
6953 instruction ICODE. */
6954
6955 bool
6956 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
6957 {
6958 return (!insn_data[(int) icode].operand[opno].predicate
6959 || (insn_data[(int) icode].operand[opno].predicate
6960 (operand, insn_data[(int) icode].operand[opno].mode)));
6961 }
6962 \f
6963 /* TARGET is a target of a multiword operation that we are going to
6964 implement as a series of word-mode operations. Return true if
6965 TARGET is suitable for this purpose. */
6966
6967 bool
6968 valid_multiword_target_p (rtx target)
6969 {
6970 machine_mode mode;
6971 int i;
6972
6973 mode = GET_MODE (target);
6974 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6975 if (!validate_subreg (word_mode, mode, target, i))
6976 return false;
6977 return true;
6978 }
6979
6980 /* Make OP describe an input operand that has value INTVAL and that has
6981 no inherent mode. This function should only be used for operands that
6982 are always expand-time constants. The backend may request that INTVAL
6983 be copied into a different kind of rtx, but it must specify the mode
6984 of that rtx if so. */
6985
6986 void
6987 create_integer_operand (struct expand_operand *op, poly_int64 intval)
6988 {
6989 create_expand_operand (op, EXPAND_INTEGER,
6990 gen_int_mode (intval, MAX_MODE_INT),
6991 VOIDmode, false, intval);
6992 }
6993
6994 /* Like maybe_legitimize_operand, but do not change the code of the
6995 current rtx value. */
6996
6997 static bool
6998 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6999 struct expand_operand *op)
7000 {
7001 /* See if the operand matches in its current form. */
7002 if (insn_operand_matches (icode, opno, op->value))
7003 return true;
7004
7005 /* If the operand is a memory whose address has no side effects,
7006 try forcing the address into a non-virtual pseudo register.
7007 The check for side effects is important because copy_to_mode_reg
7008 cannot handle things like auto-modified addresses. */
7009 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
7010 {
7011 rtx addr, mem;
7012
7013 mem = op->value;
7014 addr = XEXP (mem, 0);
7015 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
7016 && !side_effects_p (addr))
7017 {
7018 rtx_insn *last;
7019 machine_mode mode;
7020
7021 last = get_last_insn ();
7022 mode = get_address_mode (mem);
7023 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
7024 if (insn_operand_matches (icode, opno, mem))
7025 {
7026 op->value = mem;
7027 return true;
7028 }
7029 delete_insns_since (last);
7030 }
7031 }
7032
7033 return false;
7034 }
7035
7036 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7037 on success, storing the new operand value back in OP. */
7038
7039 static bool
7040 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
7041 struct expand_operand *op)
7042 {
7043 machine_mode mode, imode;
7044 bool old_volatile_ok, result;
7045
7046 mode = op->mode;
7047 switch (op->type)
7048 {
7049 case EXPAND_FIXED:
7050 old_volatile_ok = volatile_ok;
7051 volatile_ok = true;
7052 result = maybe_legitimize_operand_same_code (icode, opno, op);
7053 volatile_ok = old_volatile_ok;
7054 return result;
7055
7056 case EXPAND_OUTPUT:
7057 gcc_assert (mode != VOIDmode);
7058 if (op->value
7059 && op->value != const0_rtx
7060 && GET_MODE (op->value) == mode
7061 && maybe_legitimize_operand_same_code (icode, opno, op))
7062 return true;
7063
7064 op->value = gen_reg_rtx (mode);
7065 op->target = 0;
7066 break;
7067
7068 case EXPAND_INPUT:
7069 input:
7070 gcc_assert (mode != VOIDmode);
7071 gcc_assert (GET_MODE (op->value) == VOIDmode
7072 || GET_MODE (op->value) == mode);
7073 if (maybe_legitimize_operand_same_code (icode, opno, op))
7074 return true;
7075
7076 op->value = copy_to_mode_reg (mode, op->value);
7077 break;
7078
7079 case EXPAND_CONVERT_TO:
7080 gcc_assert (mode != VOIDmode);
7081 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7082 goto input;
7083
7084 case EXPAND_CONVERT_FROM:
7085 if (GET_MODE (op->value) != VOIDmode)
7086 mode = GET_MODE (op->value);
7087 else
7088 /* The caller must tell us what mode this value has. */
7089 gcc_assert (mode != VOIDmode);
7090
7091 imode = insn_data[(int) icode].operand[opno].mode;
7092 if (imode != VOIDmode && imode != mode)
7093 {
7094 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
7095 mode = imode;
7096 }
7097 goto input;
7098
7099 case EXPAND_ADDRESS:
7100 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7101 op->value);
7102 goto input;
7103
7104 case EXPAND_INTEGER:
7105 mode = insn_data[(int) icode].operand[opno].mode;
7106 if (mode != VOIDmode
7107 && known_eq (trunc_int_for_mode (op->int_value, mode),
7108 op->int_value))
7109 {
7110 op->value = gen_int_mode (op->int_value, mode);
7111 goto input;
7112 }
7113 break;
7114 }
7115 return insn_operand_matches (icode, opno, op->value);
7116 }
7117
7118 /* Make OP describe an input operand that should have the same value
7119 as VALUE, after any mode conversion that the target might request.
7120 TYPE is the type of VALUE. */
7121
7122 void
7123 create_convert_operand_from_type (struct expand_operand *op,
7124 rtx value, tree type)
7125 {
7126 create_convert_operand_from (op, value, TYPE_MODE (type),
7127 TYPE_UNSIGNED (type));
7128 }
7129
7130 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7131 of instruction ICODE. Return true on success, leaving the new operand
7132 values in the OPS themselves. Emit no code on failure. */
7133
7134 bool
7135 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7136 unsigned int nops, struct expand_operand *ops)
7137 {
7138 rtx_insn *last;
7139 unsigned int i;
7140
7141 last = get_last_insn ();
7142 for (i = 0; i < nops; i++)
7143 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
7144 {
7145 delete_insns_since (last);
7146 return false;
7147 }
7148 return true;
7149 }
7150
7151 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7152 as its operands. Return the instruction pattern on success,
7153 and emit any necessary set-up code. Return null and emit no
7154 code on failure. */
7155
7156 rtx_insn *
7157 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7158 struct expand_operand *ops)
7159 {
7160 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7161 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7162 return NULL;
7163
7164 switch (nops)
7165 {
7166 case 1:
7167 return GEN_FCN (icode) (ops[0].value);
7168 case 2:
7169 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7170 case 3:
7171 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7172 case 4:
7173 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7174 ops[3].value);
7175 case 5:
7176 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7177 ops[3].value, ops[4].value);
7178 case 6:
7179 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7180 ops[3].value, ops[4].value, ops[5].value);
7181 case 7:
7182 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7183 ops[3].value, ops[4].value, ops[5].value,
7184 ops[6].value);
7185 case 8:
7186 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7187 ops[3].value, ops[4].value, ops[5].value,
7188 ops[6].value, ops[7].value);
7189 case 9:
7190 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7191 ops[3].value, ops[4].value, ops[5].value,
7192 ops[6].value, ops[7].value, ops[8].value);
7193 }
7194 gcc_unreachable ();
7195 }
7196
7197 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7198 as its operands. Return true on success and emit no code on failure. */
7199
7200 bool
7201 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7202 struct expand_operand *ops)
7203 {
7204 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7205 if (pat)
7206 {
7207 emit_insn (pat);
7208 return true;
7209 }
7210 return false;
7211 }
7212
7213 /* Like maybe_expand_insn, but for jumps. */
7214
7215 bool
7216 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7217 struct expand_operand *ops)
7218 {
7219 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7220 if (pat)
7221 {
7222 emit_jump_insn (pat);
7223 return true;
7224 }
7225 return false;
7226 }
7227
7228 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7229 as its operands. */
7230
7231 void
7232 expand_insn (enum insn_code icode, unsigned int nops,
7233 struct expand_operand *ops)
7234 {
7235 if (!maybe_expand_insn (icode, nops, ops))
7236 gcc_unreachable ();
7237 }
7238
7239 /* Like expand_insn, but for jumps. */
7240
7241 void
7242 expand_jump_insn (enum insn_code icode, unsigned int nops,
7243 struct expand_operand *ops)
7244 {
7245 if (!maybe_expand_jump_insn (icode, nops, ops))
7246 gcc_unreachable ();
7247 }