]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expmed.c
[34/77] Add a SCALAR_INT_TYPE_MODE macro
[thirdparty/gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "predict.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "emit-rtl.h"
35 #include "diagnostic-core.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "dojump.h"
39 #include "explow.h"
40 #include "expr.h"
41 #include "langhooks.h"
42
43 struct target_expmed default_target_expmed;
44 #if SWITCHABLE_TARGET
45 struct target_expmed *this_target_expmed = &default_target_expmed;
46 #endif
47
48 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT,
51 unsigned HOST_WIDE_INT,
52 rtx, bool);
53 static void store_fixed_bit_field_1 (rtx, unsigned HOST_WIDE_INT,
54 unsigned HOST_WIDE_INT,
55 rtx, bool);
56 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
57 unsigned HOST_WIDE_INT,
58 unsigned HOST_WIDE_INT,
59 unsigned HOST_WIDE_INT,
60 rtx, bool);
61 static rtx extract_fixed_bit_field (machine_mode, rtx,
62 unsigned HOST_WIDE_INT,
63 unsigned HOST_WIDE_INT, rtx, int, bool);
64 static rtx extract_fixed_bit_field_1 (machine_mode, rtx,
65 unsigned HOST_WIDE_INT,
66 unsigned HOST_WIDE_INT, rtx, int, bool);
67 static rtx lshift_value (machine_mode, unsigned HOST_WIDE_INT, int);
68 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
69 unsigned HOST_WIDE_INT, int, bool);
70 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, machine_mode, rtx_code_label *);
71 static rtx expand_smod_pow2 (machine_mode, rtx, HOST_WIDE_INT);
72 static rtx expand_sdiv_pow2 (machine_mode, rtx, HOST_WIDE_INT);
73
74 /* Return a constant integer mask value of mode MODE with BITSIZE ones
75 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
76 The mask is truncated if necessary to the width of mode MODE. The
77 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
78
79 static inline rtx
80 mask_rtx (machine_mode mode, int bitpos, int bitsize, bool complement)
81 {
82 return immed_wide_int_const
83 (wi::shifted_mask (bitpos, bitsize, complement,
84 GET_MODE_PRECISION (mode)), mode);
85 }
86
87 /* Test whether a value is zero of a power of two. */
88 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
89 (((x) & ((x) - HOST_WIDE_INT_1U)) == 0)
90
91 struct init_expmed_rtl
92 {
93 rtx reg;
94 rtx plus;
95 rtx neg;
96 rtx mult;
97 rtx sdiv;
98 rtx udiv;
99 rtx sdiv_32;
100 rtx smod_32;
101 rtx wide_mult;
102 rtx wide_lshr;
103 rtx wide_trunc;
104 rtx shift;
105 rtx shift_mult;
106 rtx shift_add;
107 rtx shift_sub0;
108 rtx shift_sub1;
109 rtx zext;
110 rtx trunc;
111
112 rtx pow2[MAX_BITS_PER_WORD];
113 rtx cint[MAX_BITS_PER_WORD];
114 };
115
116 static void
117 init_expmed_one_conv (struct init_expmed_rtl *all, machine_mode to_mode,
118 machine_mode from_mode, bool speed)
119 {
120 int to_size, from_size;
121 rtx which;
122
123 to_size = GET_MODE_PRECISION (to_mode);
124 from_size = GET_MODE_PRECISION (from_mode);
125
126 /* Most partial integers have a precision less than the "full"
127 integer it requires for storage. In case one doesn't, for
128 comparison purposes here, reduce the bit size by one in that
129 case. */
130 if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
131 && pow2p_hwi (to_size))
132 to_size --;
133 if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
134 && pow2p_hwi (from_size))
135 from_size --;
136
137 /* Assume cost of zero-extend and sign-extend is the same. */
138 which = (to_size < from_size ? all->trunc : all->zext);
139
140 PUT_MODE (all->reg, from_mode);
141 set_convert_cost (to_mode, from_mode, speed,
142 set_src_cost (which, to_mode, speed));
143 }
144
145 static void
146 init_expmed_one_mode (struct init_expmed_rtl *all,
147 machine_mode mode, int speed)
148 {
149 int m, n, mode_bitsize;
150 machine_mode mode_from;
151
152 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
153
154 PUT_MODE (all->reg, mode);
155 PUT_MODE (all->plus, mode);
156 PUT_MODE (all->neg, mode);
157 PUT_MODE (all->mult, mode);
158 PUT_MODE (all->sdiv, mode);
159 PUT_MODE (all->udiv, mode);
160 PUT_MODE (all->sdiv_32, mode);
161 PUT_MODE (all->smod_32, mode);
162 PUT_MODE (all->wide_trunc, mode);
163 PUT_MODE (all->shift, mode);
164 PUT_MODE (all->shift_mult, mode);
165 PUT_MODE (all->shift_add, mode);
166 PUT_MODE (all->shift_sub0, mode);
167 PUT_MODE (all->shift_sub1, mode);
168 PUT_MODE (all->zext, mode);
169 PUT_MODE (all->trunc, mode);
170
171 set_add_cost (speed, mode, set_src_cost (all->plus, mode, speed));
172 set_neg_cost (speed, mode, set_src_cost (all->neg, mode, speed));
173 set_mul_cost (speed, mode, set_src_cost (all->mult, mode, speed));
174 set_sdiv_cost (speed, mode, set_src_cost (all->sdiv, mode, speed));
175 set_udiv_cost (speed, mode, set_src_cost (all->udiv, mode, speed));
176
177 set_sdiv_pow2_cheap (speed, mode, (set_src_cost (all->sdiv_32, mode, speed)
178 <= 2 * add_cost (speed, mode)));
179 set_smod_pow2_cheap (speed, mode, (set_src_cost (all->smod_32, mode, speed)
180 <= 4 * add_cost (speed, mode)));
181
182 set_shift_cost (speed, mode, 0, 0);
183 {
184 int cost = add_cost (speed, mode);
185 set_shiftadd_cost (speed, mode, 0, cost);
186 set_shiftsub0_cost (speed, mode, 0, cost);
187 set_shiftsub1_cost (speed, mode, 0, cost);
188 }
189
190 n = MIN (MAX_BITS_PER_WORD, mode_bitsize);
191 for (m = 1; m < n; m++)
192 {
193 XEXP (all->shift, 1) = all->cint[m];
194 XEXP (all->shift_mult, 1) = all->pow2[m];
195
196 set_shift_cost (speed, mode, m, set_src_cost (all->shift, mode, speed));
197 set_shiftadd_cost (speed, mode, m, set_src_cost (all->shift_add, mode,
198 speed));
199 set_shiftsub0_cost (speed, mode, m, set_src_cost (all->shift_sub0, mode,
200 speed));
201 set_shiftsub1_cost (speed, mode, m, set_src_cost (all->shift_sub1, mode,
202 speed));
203 }
204
205 scalar_int_mode int_mode_to;
206 if (is_a <scalar_int_mode> (mode, &int_mode_to))
207 {
208 for (mode_from = MIN_MODE_INT; mode_from <= MAX_MODE_INT;
209 mode_from = (machine_mode)(mode_from + 1))
210 init_expmed_one_conv (all, int_mode_to, mode_from, speed);
211
212 scalar_int_mode wider_mode;
213 if (GET_MODE_CLASS (int_mode_to) == MODE_INT
214 && GET_MODE_WIDER_MODE (int_mode_to).exists (&wider_mode))
215 {
216 PUT_MODE (all->zext, wider_mode);
217 PUT_MODE (all->wide_mult, wider_mode);
218 PUT_MODE (all->wide_lshr, wider_mode);
219 XEXP (all->wide_lshr, 1) = GEN_INT (mode_bitsize);
220
221 set_mul_widen_cost (speed, wider_mode,
222 set_src_cost (all->wide_mult, wider_mode, speed));
223 set_mul_highpart_cost (speed, int_mode_to,
224 set_src_cost (all->wide_trunc,
225 int_mode_to, speed));
226 }
227 }
228 }
229
230 void
231 init_expmed (void)
232 {
233 struct init_expmed_rtl all;
234 machine_mode mode = QImode;
235 int m, speed;
236
237 memset (&all, 0, sizeof all);
238 for (m = 1; m < MAX_BITS_PER_WORD; m++)
239 {
240 all.pow2[m] = GEN_INT (HOST_WIDE_INT_1 << m);
241 all.cint[m] = GEN_INT (m);
242 }
243
244 /* Avoid using hard regs in ways which may be unsupported. */
245 all.reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
246 all.plus = gen_rtx_PLUS (mode, all.reg, all.reg);
247 all.neg = gen_rtx_NEG (mode, all.reg);
248 all.mult = gen_rtx_MULT (mode, all.reg, all.reg);
249 all.sdiv = gen_rtx_DIV (mode, all.reg, all.reg);
250 all.udiv = gen_rtx_UDIV (mode, all.reg, all.reg);
251 all.sdiv_32 = gen_rtx_DIV (mode, all.reg, all.pow2[5]);
252 all.smod_32 = gen_rtx_MOD (mode, all.reg, all.pow2[5]);
253 all.zext = gen_rtx_ZERO_EXTEND (mode, all.reg);
254 all.wide_mult = gen_rtx_MULT (mode, all.zext, all.zext);
255 all.wide_lshr = gen_rtx_LSHIFTRT (mode, all.wide_mult, all.reg);
256 all.wide_trunc = gen_rtx_TRUNCATE (mode, all.wide_lshr);
257 all.shift = gen_rtx_ASHIFT (mode, all.reg, all.reg);
258 all.shift_mult = gen_rtx_MULT (mode, all.reg, all.reg);
259 all.shift_add = gen_rtx_PLUS (mode, all.shift_mult, all.reg);
260 all.shift_sub0 = gen_rtx_MINUS (mode, all.shift_mult, all.reg);
261 all.shift_sub1 = gen_rtx_MINUS (mode, all.reg, all.shift_mult);
262 all.trunc = gen_rtx_TRUNCATE (mode, all.reg);
263
264 for (speed = 0; speed < 2; speed++)
265 {
266 crtl->maybe_hot_insn_p = speed;
267 set_zero_cost (speed, set_src_cost (const0_rtx, mode, speed));
268
269 for (mode = MIN_MODE_INT; mode <= MAX_MODE_INT;
270 mode = (machine_mode)(mode + 1))
271 init_expmed_one_mode (&all, mode, speed);
272
273 if (MIN_MODE_PARTIAL_INT != VOIDmode)
274 for (mode = MIN_MODE_PARTIAL_INT; mode <= MAX_MODE_PARTIAL_INT;
275 mode = (machine_mode)(mode + 1))
276 init_expmed_one_mode (&all, mode, speed);
277
278 if (MIN_MODE_VECTOR_INT != VOIDmode)
279 for (mode = MIN_MODE_VECTOR_INT; mode <= MAX_MODE_VECTOR_INT;
280 mode = (machine_mode)(mode + 1))
281 init_expmed_one_mode (&all, mode, speed);
282 }
283
284 if (alg_hash_used_p ())
285 {
286 struct alg_hash_entry *p = alg_hash_entry_ptr (0);
287 memset (p, 0, sizeof (*p) * NUM_ALG_HASH_ENTRIES);
288 }
289 else
290 set_alg_hash_used_p (true);
291 default_rtl_profile ();
292
293 ggc_free (all.trunc);
294 ggc_free (all.shift_sub1);
295 ggc_free (all.shift_sub0);
296 ggc_free (all.shift_add);
297 ggc_free (all.shift_mult);
298 ggc_free (all.shift);
299 ggc_free (all.wide_trunc);
300 ggc_free (all.wide_lshr);
301 ggc_free (all.wide_mult);
302 ggc_free (all.zext);
303 ggc_free (all.smod_32);
304 ggc_free (all.sdiv_32);
305 ggc_free (all.udiv);
306 ggc_free (all.sdiv);
307 ggc_free (all.mult);
308 ggc_free (all.neg);
309 ggc_free (all.plus);
310 ggc_free (all.reg);
311 }
312
313 /* Return an rtx representing minus the value of X.
314 MODE is the intended mode of the result,
315 useful if X is a CONST_INT. */
316
317 rtx
318 negate_rtx (machine_mode mode, rtx x)
319 {
320 rtx result = simplify_unary_operation (NEG, mode, x, mode);
321
322 if (result == 0)
323 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
324
325 return result;
326 }
327
328 /* Whether reverse storage order is supported on the target. */
329 static int reverse_storage_order_supported = -1;
330
331 /* Check whether reverse storage order is supported on the target. */
332
333 static void
334 check_reverse_storage_order_support (void)
335 {
336 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
337 {
338 reverse_storage_order_supported = 0;
339 sorry ("reverse scalar storage order");
340 }
341 else
342 reverse_storage_order_supported = 1;
343 }
344
345 /* Whether reverse FP storage order is supported on the target. */
346 static int reverse_float_storage_order_supported = -1;
347
348 /* Check whether reverse FP storage order is supported on the target. */
349
350 static void
351 check_reverse_float_storage_order_support (void)
352 {
353 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
354 {
355 reverse_float_storage_order_supported = 0;
356 sorry ("reverse floating-point scalar storage order");
357 }
358 else
359 reverse_float_storage_order_supported = 1;
360 }
361
362 /* Return an rtx representing value of X with reverse storage order.
363 MODE is the intended mode of the result,
364 useful if X is a CONST_INT. */
365
366 rtx
367 flip_storage_order (machine_mode mode, rtx x)
368 {
369 scalar_int_mode int_mode;
370 rtx result;
371
372 if (mode == QImode)
373 return x;
374
375 if (COMPLEX_MODE_P (mode))
376 {
377 rtx real = read_complex_part (x, false);
378 rtx imag = read_complex_part (x, true);
379
380 real = flip_storage_order (GET_MODE_INNER (mode), real);
381 imag = flip_storage_order (GET_MODE_INNER (mode), imag);
382
383 return gen_rtx_CONCAT (mode, real, imag);
384 }
385
386 if (__builtin_expect (reverse_storage_order_supported < 0, 0))
387 check_reverse_storage_order_support ();
388
389 if (!is_a <scalar_int_mode> (mode, &int_mode))
390 {
391 if (FLOAT_MODE_P (mode)
392 && __builtin_expect (reverse_float_storage_order_supported < 0, 0))
393 check_reverse_float_storage_order_support ();
394
395 if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode))
396 {
397 sorry ("reverse storage order for %smode", GET_MODE_NAME (mode));
398 return x;
399 }
400 x = gen_lowpart (int_mode, x);
401 }
402
403 result = simplify_unary_operation (BSWAP, int_mode, x, int_mode);
404 if (result == 0)
405 result = expand_unop (int_mode, bswap_optab, x, NULL_RTX, 1);
406
407 if (int_mode != mode)
408 result = gen_lowpart (mode, result);
409
410 return result;
411 }
412
413 /* Adjust bitfield memory MEM so that it points to the first unit of mode
414 MODE that contains a bitfield of size BITSIZE at bit position BITNUM.
415 If MODE is BLKmode, return a reference to every byte in the bitfield.
416 Set *NEW_BITNUM to the bit position of the field within the new memory. */
417
418 static rtx
419 narrow_bit_field_mem (rtx mem, machine_mode mode,
420 unsigned HOST_WIDE_INT bitsize,
421 unsigned HOST_WIDE_INT bitnum,
422 unsigned HOST_WIDE_INT *new_bitnum)
423 {
424 if (mode == BLKmode)
425 {
426 *new_bitnum = bitnum % BITS_PER_UNIT;
427 HOST_WIDE_INT offset = bitnum / BITS_PER_UNIT;
428 HOST_WIDE_INT size = ((*new_bitnum + bitsize + BITS_PER_UNIT - 1)
429 / BITS_PER_UNIT);
430 return adjust_bitfield_address_size (mem, mode, offset, size);
431 }
432 else
433 {
434 unsigned int unit = GET_MODE_BITSIZE (mode);
435 *new_bitnum = bitnum % unit;
436 HOST_WIDE_INT offset = (bitnum - *new_bitnum) / BITS_PER_UNIT;
437 return adjust_bitfield_address (mem, mode, offset);
438 }
439 }
440
441 /* The caller wants to perform insertion or extraction PATTERN on a
442 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
443 BITREGION_START and BITREGION_END are as for store_bit_field
444 and FIELDMODE is the natural mode of the field.
445
446 Search for a mode that is compatible with the memory access
447 restrictions and (where applicable) with a register insertion or
448 extraction. Return the new memory on success, storing the adjusted
449 bit position in *NEW_BITNUM. Return null otherwise. */
450
451 static rtx
452 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
453 rtx op0, HOST_WIDE_INT bitsize,
454 HOST_WIDE_INT bitnum,
455 unsigned HOST_WIDE_INT bitregion_start,
456 unsigned HOST_WIDE_INT bitregion_end,
457 machine_mode fieldmode,
458 unsigned HOST_WIDE_INT *new_bitnum)
459 {
460 bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
461 bitregion_end, MEM_ALIGN (op0),
462 MEM_VOLATILE_P (op0));
463 machine_mode best_mode;
464 if (iter.next_mode (&best_mode))
465 {
466 /* We can use a memory in BEST_MODE. See whether this is true for
467 any wider modes. All other things being equal, we prefer to
468 use the widest mode possible because it tends to expose more
469 CSE opportunities. */
470 if (!iter.prefer_smaller_modes ())
471 {
472 /* Limit the search to the mode required by the corresponding
473 register insertion or extraction instruction, if any. */
474 machine_mode limit_mode = word_mode;
475 extraction_insn insn;
476 if (get_best_reg_extraction_insn (&insn, pattern,
477 GET_MODE_BITSIZE (best_mode),
478 fieldmode))
479 limit_mode = insn.field_mode;
480
481 machine_mode wider_mode;
482 while (iter.next_mode (&wider_mode)
483 && GET_MODE_SIZE (wider_mode) <= GET_MODE_SIZE (limit_mode))
484 best_mode = wider_mode;
485 }
486 return narrow_bit_field_mem (op0, best_mode, bitsize, bitnum,
487 new_bitnum);
488 }
489 return NULL_RTX;
490 }
491
492 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
493 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
494 offset is then BITNUM / BITS_PER_UNIT. */
495
496 static bool
497 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum,
498 unsigned HOST_WIDE_INT bitsize,
499 machine_mode struct_mode)
500 {
501 if (BYTES_BIG_ENDIAN)
502 return (bitnum % BITS_PER_UNIT == 0
503 && (bitnum + bitsize == GET_MODE_BITSIZE (struct_mode)
504 || (bitnum + bitsize) % BITS_PER_WORD == 0));
505 else
506 return bitnum % BITS_PER_WORD == 0;
507 }
508
509 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
510 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
511 Return false if the access would touch memory outside the range
512 BITREGION_START to BITREGION_END for conformance to the C++ memory
513 model. */
514
515 static bool
516 strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
517 unsigned HOST_WIDE_INT bitnum,
518 machine_mode fieldmode,
519 unsigned HOST_WIDE_INT bitregion_start,
520 unsigned HOST_WIDE_INT bitregion_end)
521 {
522 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
523
524 /* -fstrict-volatile-bitfields must be enabled and we must have a
525 volatile MEM. */
526 if (!MEM_P (op0)
527 || !MEM_VOLATILE_P (op0)
528 || flag_strict_volatile_bitfields <= 0)
529 return false;
530
531 /* Non-integral modes likely only happen with packed structures.
532 Punt. */
533 if (!SCALAR_INT_MODE_P (fieldmode))
534 return false;
535
536 /* The bit size must not be larger than the field mode, and
537 the field mode must not be larger than a word. */
538 if (bitsize > modesize || modesize > BITS_PER_WORD)
539 return false;
540
541 /* Check for cases of unaligned fields that must be split. */
542 if (bitnum % modesize + bitsize > modesize)
543 return false;
544
545 /* The memory must be sufficiently aligned for a MODESIZE access.
546 This condition guarantees, that the memory access will not
547 touch anything after the end of the structure. */
548 if (MEM_ALIGN (op0) < modesize)
549 return false;
550
551 /* Check for cases where the C++ memory model applies. */
552 if (bitregion_end != 0
553 && (bitnum - bitnum % modesize < bitregion_start
554 || bitnum - bitnum % modesize + modesize - 1 > bitregion_end))
555 return false;
556
557 return true;
558 }
559
560 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
561 bit number BITNUM can be treated as a simple value of mode MODE. */
562
563 static bool
564 simple_mem_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
565 unsigned HOST_WIDE_INT bitnum, machine_mode mode)
566 {
567 return (MEM_P (op0)
568 && bitnum % BITS_PER_UNIT == 0
569 && bitsize == GET_MODE_BITSIZE (mode)
570 && (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
571 || (bitnum % GET_MODE_ALIGNMENT (mode) == 0
572 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
573 }
574 \f
575 /* Try to use instruction INSV to store VALUE into a field of OP0.
576 BITSIZE and BITNUM are as for store_bit_field. */
577
578 static bool
579 store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
580 unsigned HOST_WIDE_INT bitsize,
581 unsigned HOST_WIDE_INT bitnum,
582 rtx value)
583 {
584 struct expand_operand ops[4];
585 rtx value1;
586 rtx xop0 = op0;
587 rtx_insn *last = get_last_insn ();
588 bool copy_back = false;
589
590 machine_mode op_mode = insv->field_mode;
591 unsigned int unit = GET_MODE_BITSIZE (op_mode);
592 if (bitsize == 0 || bitsize > unit)
593 return false;
594
595 if (MEM_P (xop0))
596 /* Get a reference to the first byte of the field. */
597 xop0 = narrow_bit_field_mem (xop0, insv->struct_mode, bitsize, bitnum,
598 &bitnum);
599 else
600 {
601 /* Convert from counting within OP0 to counting in OP_MODE. */
602 if (BYTES_BIG_ENDIAN)
603 bitnum += unit - GET_MODE_BITSIZE (GET_MODE (op0));
604
605 /* If xop0 is a register, we need it in OP_MODE
606 to make it acceptable to the format of insv. */
607 if (GET_CODE (xop0) == SUBREG)
608 /* We can't just change the mode, because this might clobber op0,
609 and we will need the original value of op0 if insv fails. */
610 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
611 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
612 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
613 }
614
615 /* If the destination is a paradoxical subreg such that we need a
616 truncate to the inner mode, perform the insertion on a temporary and
617 truncate the result to the original destination. Note that we can't
618 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
619 X) 0)) is (reg:N X). */
620 if (GET_CODE (xop0) == SUBREG
621 && REG_P (SUBREG_REG (xop0))
622 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
623 op_mode))
624 {
625 rtx tem = gen_reg_rtx (op_mode);
626 emit_move_insn (tem, xop0);
627 xop0 = tem;
628 copy_back = true;
629 }
630
631 /* There are similar overflow check at the start of store_bit_field_1,
632 but that only check the situation where the field lies completely
633 outside the register, while there do have situation where the field
634 lies partialy in the register, we need to adjust bitsize for this
635 partial overflow situation. Without this fix, pr48335-2.c on big-endian
636 will broken on those arch support bit insert instruction, like arm, aarch64
637 etc. */
638 if (bitsize + bitnum > unit && bitnum < unit)
639 {
640 warning (OPT_Wextra, "write of %wu-bit data outside the bound of "
641 "destination object, data truncated into %wu-bit",
642 bitsize, unit - bitnum);
643 bitsize = unit - bitnum;
644 }
645
646 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
647 "backwards" from the size of the unit we are inserting into.
648 Otherwise, we count bits from the most significant on a
649 BYTES/BITS_BIG_ENDIAN machine. */
650
651 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
652 bitnum = unit - bitsize - bitnum;
653
654 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
655 value1 = value;
656 if (GET_MODE (value) != op_mode)
657 {
658 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
659 {
660 rtx tmp;
661 /* Optimization: Don't bother really extending VALUE
662 if it has all the bits we will actually use. However,
663 if we must narrow it, be sure we do it correctly. */
664
665 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
666 {
667 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
668 if (! tmp)
669 tmp = simplify_gen_subreg (op_mode,
670 force_reg (GET_MODE (value),
671 value1),
672 GET_MODE (value), 0);
673 }
674 else
675 {
676 tmp = gen_lowpart_if_possible (op_mode, value1);
677 if (! tmp)
678 tmp = gen_lowpart (op_mode, force_reg (GET_MODE (value),
679 value1));
680 }
681 value1 = tmp;
682 }
683 else if (CONST_INT_P (value))
684 value1 = gen_int_mode (INTVAL (value), op_mode);
685 else
686 /* Parse phase is supposed to make VALUE's data type
687 match that of the component reference, which is a type
688 at least as wide as the field; so VALUE should have
689 a mode that corresponds to that type. */
690 gcc_assert (CONSTANT_P (value));
691 }
692
693 create_fixed_operand (&ops[0], xop0);
694 create_integer_operand (&ops[1], bitsize);
695 create_integer_operand (&ops[2], bitnum);
696 create_input_operand (&ops[3], value1, op_mode);
697 if (maybe_expand_insn (insv->icode, 4, ops))
698 {
699 if (copy_back)
700 convert_move (op0, xop0, true);
701 return true;
702 }
703 delete_insns_since (last);
704 return false;
705 }
706
707 /* A subroutine of store_bit_field, with the same arguments. Return true
708 if the operation could be implemented.
709
710 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
711 no other way of implementing the operation. If FALLBACK_P is false,
712 return false instead. */
713
714 static bool
715 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
716 unsigned HOST_WIDE_INT bitnum,
717 unsigned HOST_WIDE_INT bitregion_start,
718 unsigned HOST_WIDE_INT bitregion_end,
719 machine_mode fieldmode,
720 rtx value, bool reverse, bool fallback_p)
721 {
722 rtx op0 = str_rtx;
723 rtx orig_value;
724
725 while (GET_CODE (op0) == SUBREG)
726 {
727 /* The following line once was done only if WORDS_BIG_ENDIAN,
728 but I think that is a mistake. WORDS_BIG_ENDIAN is
729 meaningful at a much higher level; when structures are copied
730 between memory and regs, the higher-numbered regs
731 always get higher addresses. */
732 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
733 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
734 int byte_offset = 0;
735
736 /* Paradoxical subregs need special handling on big-endian machines. */
737 if (paradoxical_subreg_p (op0))
738 {
739 int difference = inner_mode_size - outer_mode_size;
740
741 if (WORDS_BIG_ENDIAN)
742 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
743 if (BYTES_BIG_ENDIAN)
744 byte_offset += difference % UNITS_PER_WORD;
745 }
746 else
747 byte_offset = SUBREG_BYTE (op0);
748
749 bitnum += byte_offset * BITS_PER_UNIT;
750 op0 = SUBREG_REG (op0);
751 }
752
753 /* No action is needed if the target is a register and if the field
754 lies completely outside that register. This can occur if the source
755 code contains an out-of-bounds access to a small array. */
756 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
757 return true;
758
759 /* Use vec_set patterns for inserting parts of vectors whenever
760 available. */
761 if (VECTOR_MODE_P (GET_MODE (op0))
762 && !MEM_P (op0)
763 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
764 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
765 && bitsize == GET_MODE_UNIT_BITSIZE (GET_MODE (op0))
766 && !(bitnum % GET_MODE_UNIT_BITSIZE (GET_MODE (op0))))
767 {
768 struct expand_operand ops[3];
769 machine_mode outermode = GET_MODE (op0);
770 machine_mode innermode = GET_MODE_INNER (outermode);
771 enum insn_code icode = optab_handler (vec_set_optab, outermode);
772 int pos = bitnum / GET_MODE_BITSIZE (innermode);
773
774 create_fixed_operand (&ops[0], op0);
775 create_input_operand (&ops[1], value, innermode);
776 create_integer_operand (&ops[2], pos);
777 if (maybe_expand_insn (icode, 3, ops))
778 return true;
779 }
780
781 /* If the target is a register, overwriting the entire object, or storing
782 a full-word or multi-word field can be done with just a SUBREG. */
783 if (!MEM_P (op0)
784 && bitsize == GET_MODE_BITSIZE (fieldmode)
785 && ((bitsize == GET_MODE_BITSIZE (GET_MODE (op0)) && bitnum == 0)
786 || (bitsize % BITS_PER_WORD == 0 && bitnum % BITS_PER_WORD == 0)))
787 {
788 /* Use the subreg machinery either to narrow OP0 to the required
789 words or to cope with mode punning between equal-sized modes.
790 In the latter case, use subreg on the rhs side, not lhs. */
791 rtx sub;
792
793 if (bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
794 {
795 sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0);
796 if (sub)
797 {
798 if (reverse)
799 sub = flip_storage_order (GET_MODE (op0), sub);
800 emit_move_insn (op0, sub);
801 return true;
802 }
803 }
804 else
805 {
806 sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
807 bitnum / BITS_PER_UNIT);
808 if (sub)
809 {
810 if (reverse)
811 value = flip_storage_order (fieldmode, value);
812 emit_move_insn (sub, value);
813 return true;
814 }
815 }
816 }
817
818 /* If the target is memory, storing any naturally aligned field can be
819 done with a simple store. For targets that support fast unaligned
820 memory, any naturally sized, unit aligned field can be done directly. */
821 if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode))
822 {
823 op0 = adjust_bitfield_address (op0, fieldmode, bitnum / BITS_PER_UNIT);
824 if (reverse)
825 value = flip_storage_order (fieldmode, value);
826 emit_move_insn (op0, value);
827 return true;
828 }
829
830 /* Make sure we are playing with integral modes. Pun with subregs
831 if we aren't. This must come after the entire register case above,
832 since that case is valid for any mode. The following cases are only
833 valid for integral modes. */
834 opt_scalar_int_mode opt_imode = int_mode_for_mode (GET_MODE (op0));
835 scalar_int_mode imode;
836 if (!opt_imode.exists (&imode) || imode != GET_MODE (op0))
837 {
838 if (MEM_P (op0))
839 op0 = adjust_bitfield_address_size (op0, opt_imode.else_blk (),
840 0, MEM_SIZE (op0));
841 else
842 op0 = gen_lowpart (op0_mode.require (), op0);
843 }
844
845 /* Storing an lsb-aligned field in a register
846 can be done with a movstrict instruction. */
847
848 if (!MEM_P (op0)
849 && !reverse
850 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
851 && bitsize == GET_MODE_BITSIZE (fieldmode)
852 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
853 {
854 struct expand_operand ops[2];
855 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
856 rtx arg0 = op0;
857 unsigned HOST_WIDE_INT subreg_off;
858
859 if (GET_CODE (arg0) == SUBREG)
860 {
861 /* Else we've got some float mode source being extracted into
862 a different float mode destination -- this combination of
863 subregs results in Severe Tire Damage. */
864 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
865 || GET_MODE_CLASS (fieldmode) == MODE_INT
866 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
867 arg0 = SUBREG_REG (arg0);
868 }
869
870 subreg_off = bitnum / BITS_PER_UNIT;
871 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
872 {
873 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
874
875 create_fixed_operand (&ops[0], arg0);
876 /* Shrink the source operand to FIELDMODE. */
877 create_convert_operand_to (&ops[1], value, fieldmode, false);
878 if (maybe_expand_insn (icode, 2, ops))
879 return true;
880 }
881 }
882
883 /* Handle fields bigger than a word. */
884
885 if (bitsize > BITS_PER_WORD)
886 {
887 /* Here we transfer the words of the field
888 in the order least significant first.
889 This is because the most significant word is the one which may
890 be less than full.
891 However, only do that if the value is not BLKmode. */
892
893 const bool backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
894 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
895 unsigned int i;
896 rtx_insn *last;
897
898 /* This is the mode we must force value to, so that there will be enough
899 subwords to extract. Note that fieldmode will often (always?) be
900 VOIDmode, because that is what store_field uses to indicate that this
901 is a bit field, but passing VOIDmode to operand_subword_force
902 is not allowed. */
903 fieldmode = GET_MODE (value);
904 if (fieldmode == VOIDmode)
905 fieldmode = smallest_int_mode_for_size (nwords * BITS_PER_WORD);
906
907 last = get_last_insn ();
908 for (i = 0; i < nwords; i++)
909 {
910 /* If I is 0, use the low-order word in both field and target;
911 if I is 1, use the next to lowest word; and so on. */
912 unsigned int wordnum = (backwards
913 ? GET_MODE_SIZE (fieldmode) / UNITS_PER_WORD
914 - i - 1
915 : i);
916 unsigned int bit_offset = (backwards ^ reverse
917 ? MAX ((int) bitsize - ((int) i + 1)
918 * BITS_PER_WORD,
919 0)
920 : (int) i * BITS_PER_WORD);
921 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
922 unsigned HOST_WIDE_INT new_bitsize =
923 MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
924
925 /* If the remaining chunk doesn't have full wordsize we have
926 to make sure that for big-endian machines the higher order
927 bits are used. */
928 if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
929 value_word = simplify_expand_binop (word_mode, lshr_optab,
930 value_word,
931 GEN_INT (BITS_PER_WORD
932 - new_bitsize),
933 NULL_RTX, true,
934 OPTAB_LIB_WIDEN);
935
936 if (!store_bit_field_1 (op0, new_bitsize,
937 bitnum + bit_offset,
938 bitregion_start, bitregion_end,
939 word_mode,
940 value_word, reverse, fallback_p))
941 {
942 delete_insns_since (last);
943 return false;
944 }
945 }
946 return true;
947 }
948
949 /* If VALUE has a floating-point or complex mode, access it as an
950 integer of the corresponding size. This can occur on a machine
951 with 64 bit registers that uses SFmode for float. It can also
952 occur for unaligned float or complex fields. */
953 orig_value = value;
954 if (GET_MODE (value) != VOIDmode
955 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
956 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
957 {
958 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)).require ());
959 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
960 }
961
962 /* If OP0 is a multi-word register, narrow it to the affected word.
963 If the region spans two words, defer to store_split_bit_field.
964 Don't do this if op0 is a single hard register wider than word
965 such as a float or vector register. */
966 if (!MEM_P (op0)
967 && GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD
968 && (!REG_P (op0)
969 || !HARD_REGISTER_P (op0)
970 || HARD_REGNO_NREGS (REGNO (op0), GET_MODE (op0)) != 1))
971 {
972 if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
973 {
974 if (!fallback_p)
975 return false;
976
977 store_split_bit_field (op0, bitsize, bitnum, bitregion_start,
978 bitregion_end, value, reverse);
979 return true;
980 }
981 op0 = simplify_gen_subreg (word_mode, op0, GET_MODE (op0),
982 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
983 gcc_assert (op0);
984 bitnum %= BITS_PER_WORD;
985 }
986
987 /* From here on we can assume that the field to be stored in fits
988 within a word. If the destination is a register, it too fits
989 in a word. */
990
991 extraction_insn insv;
992 if (!MEM_P (op0)
993 && !reverse
994 && get_best_reg_extraction_insn (&insv, EP_insv,
995 GET_MODE_BITSIZE (GET_MODE (op0)),
996 fieldmode)
997 && store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
998 return true;
999
1000 /* If OP0 is a memory, try copying it to a register and seeing if a
1001 cheap register alternative is available. */
1002 if (MEM_P (op0) && !reverse)
1003 {
1004 if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
1005 fieldmode)
1006 && store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
1007 return true;
1008
1009 rtx_insn *last = get_last_insn ();
1010
1011 /* Try loading part of OP0 into a register, inserting the bitfield
1012 into that, and then copying the result back to OP0. */
1013 unsigned HOST_WIDE_INT bitpos;
1014 rtx xop0 = adjust_bit_field_mem_for_reg (EP_insv, op0, bitsize, bitnum,
1015 bitregion_start, bitregion_end,
1016 fieldmode, &bitpos);
1017 if (xop0)
1018 {
1019 rtx tempreg = copy_to_reg (xop0);
1020 if (store_bit_field_1 (tempreg, bitsize, bitpos,
1021 bitregion_start, bitregion_end,
1022 fieldmode, orig_value, reverse, false))
1023 {
1024 emit_move_insn (xop0, tempreg);
1025 return true;
1026 }
1027 delete_insns_since (last);
1028 }
1029 }
1030
1031 if (!fallback_p)
1032 return false;
1033
1034 store_fixed_bit_field (op0, bitsize, bitnum, bitregion_start,
1035 bitregion_end, value, reverse);
1036 return true;
1037 }
1038
1039 /* Generate code to store value from rtx VALUE
1040 into a bit-field within structure STR_RTX
1041 containing BITSIZE bits starting at bit BITNUM.
1042
1043 BITREGION_START is bitpos of the first bitfield in this region.
1044 BITREGION_END is the bitpos of the ending bitfield in this region.
1045 These two fields are 0, if the C++ memory model does not apply,
1046 or we are not interested in keeping track of bitfield regions.
1047
1048 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
1049
1050 If REVERSE is true, the store is to be done in reverse order. */
1051
1052 void
1053 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1054 unsigned HOST_WIDE_INT bitnum,
1055 unsigned HOST_WIDE_INT bitregion_start,
1056 unsigned HOST_WIDE_INT bitregion_end,
1057 machine_mode fieldmode,
1058 rtx value, bool reverse)
1059 {
1060 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1061 if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode,
1062 bitregion_start, bitregion_end))
1063 {
1064 /* Storing of a full word can be done with a simple store.
1065 We know here that the field can be accessed with one single
1066 instruction. For targets that support unaligned memory,
1067 an unaligned access may be necessary. */
1068 if (bitsize == GET_MODE_BITSIZE (fieldmode))
1069 {
1070 str_rtx = adjust_bitfield_address (str_rtx, fieldmode,
1071 bitnum / BITS_PER_UNIT);
1072 if (reverse)
1073 value = flip_storage_order (fieldmode, value);
1074 gcc_assert (bitnum % BITS_PER_UNIT == 0);
1075 emit_move_insn (str_rtx, value);
1076 }
1077 else
1078 {
1079 rtx temp;
1080
1081 str_rtx = narrow_bit_field_mem (str_rtx, fieldmode, bitsize, bitnum,
1082 &bitnum);
1083 gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (fieldmode));
1084 temp = copy_to_reg (str_rtx);
1085 if (!store_bit_field_1 (temp, bitsize, bitnum, 0, 0,
1086 fieldmode, value, reverse, true))
1087 gcc_unreachable ();
1088
1089 emit_move_insn (str_rtx, temp);
1090 }
1091
1092 return;
1093 }
1094
1095 /* Under the C++0x memory model, we must not touch bits outside the
1096 bit region. Adjust the address to start at the beginning of the
1097 bit region. */
1098 if (MEM_P (str_rtx) && bitregion_start > 0)
1099 {
1100 machine_mode bestmode;
1101 HOST_WIDE_INT offset, size;
1102
1103 gcc_assert ((bitregion_start % BITS_PER_UNIT) == 0);
1104
1105 offset = bitregion_start / BITS_PER_UNIT;
1106 bitnum -= bitregion_start;
1107 size = (bitnum + bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
1108 bitregion_end -= bitregion_start;
1109 bitregion_start = 0;
1110 bestmode = get_best_mode (bitsize, bitnum,
1111 bitregion_start, bitregion_end,
1112 MEM_ALIGN (str_rtx), VOIDmode,
1113 MEM_VOLATILE_P (str_rtx));
1114 str_rtx = adjust_bitfield_address_size (str_rtx, bestmode, offset, size);
1115 }
1116
1117 if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
1118 bitregion_start, bitregion_end,
1119 fieldmode, value, reverse, true))
1120 gcc_unreachable ();
1121 }
1122 \f
1123 /* Use shifts and boolean operations to store VALUE into a bit field of
1124 width BITSIZE in OP0, starting at bit BITNUM.
1125
1126 If REVERSE is true, the store is to be done in reverse order. */
1127
1128 static void
1129 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1130 unsigned HOST_WIDE_INT bitnum,
1131 unsigned HOST_WIDE_INT bitregion_start,
1132 unsigned HOST_WIDE_INT bitregion_end,
1133 rtx value, bool reverse)
1134 {
1135 /* There is a case not handled here:
1136 a structure with a known alignment of just a halfword
1137 and a field split across two aligned halfwords within the structure.
1138 Or likewise a structure with a known alignment of just a byte
1139 and a field split across two bytes.
1140 Such cases are not supposed to be able to occur. */
1141
1142 if (MEM_P (op0))
1143 {
1144 machine_mode mode = GET_MODE (op0);
1145 if (GET_MODE_BITSIZE (mode) == 0
1146 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
1147 mode = word_mode;
1148 mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
1149 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
1150
1151 if (mode == VOIDmode)
1152 {
1153 /* The only way this should occur is if the field spans word
1154 boundaries. */
1155 store_split_bit_field (op0, bitsize, bitnum, bitregion_start,
1156 bitregion_end, value, reverse);
1157 return;
1158 }
1159
1160 op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
1161 }
1162
1163 store_fixed_bit_field_1 (op0, bitsize, bitnum, value, reverse);
1164 }
1165
1166 /* Helper function for store_fixed_bit_field, stores
1167 the bit field always using the MODE of OP0. */
1168
1169 static void
1170 store_fixed_bit_field_1 (rtx op0, unsigned HOST_WIDE_INT bitsize,
1171 unsigned HOST_WIDE_INT bitnum,
1172 rtx value, bool reverse)
1173 {
1174 machine_mode mode;
1175 rtx temp;
1176 int all_zero = 0;
1177 int all_one = 0;
1178
1179 mode = GET_MODE (op0);
1180 gcc_assert (SCALAR_INT_MODE_P (mode));
1181
1182 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1183 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
1184
1185 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1186 /* BITNUM is the distance between our msb
1187 and that of the containing datum.
1188 Convert it to the distance from the lsb. */
1189 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1190
1191 /* Now BITNUM is always the distance between our lsb
1192 and that of OP0. */
1193
1194 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1195 we must first convert its mode to MODE. */
1196
1197 if (CONST_INT_P (value))
1198 {
1199 unsigned HOST_WIDE_INT v = UINTVAL (value);
1200
1201 if (bitsize < HOST_BITS_PER_WIDE_INT)
1202 v &= (HOST_WIDE_INT_1U << bitsize) - 1;
1203
1204 if (v == 0)
1205 all_zero = 1;
1206 else if ((bitsize < HOST_BITS_PER_WIDE_INT
1207 && v == (HOST_WIDE_INT_1U << bitsize) - 1)
1208 || (bitsize == HOST_BITS_PER_WIDE_INT
1209 && v == HOST_WIDE_INT_M1U))
1210 all_one = 1;
1211
1212 value = lshift_value (mode, v, bitnum);
1213 }
1214 else
1215 {
1216 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
1217 && bitnum + bitsize != GET_MODE_BITSIZE (mode));
1218
1219 if (GET_MODE (value) != mode)
1220 value = convert_to_mode (mode, value, 1);
1221
1222 if (must_and)
1223 value = expand_binop (mode, and_optab, value,
1224 mask_rtx (mode, 0, bitsize, 0),
1225 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1226 if (bitnum > 0)
1227 value = expand_shift (LSHIFT_EXPR, mode, value,
1228 bitnum, NULL_RTX, 1);
1229 }
1230
1231 if (reverse)
1232 value = flip_storage_order (mode, value);
1233
1234 /* Now clear the chosen bits in OP0,
1235 except that if VALUE is -1 we need not bother. */
1236 /* We keep the intermediates in registers to allow CSE to combine
1237 consecutive bitfield assignments. */
1238
1239 temp = force_reg (mode, op0);
1240
1241 if (! all_one)
1242 {
1243 rtx mask = mask_rtx (mode, bitnum, bitsize, 1);
1244 if (reverse)
1245 mask = flip_storage_order (mode, mask);
1246 temp = expand_binop (mode, and_optab, temp, mask,
1247 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1248 temp = force_reg (mode, temp);
1249 }
1250
1251 /* Now logical-or VALUE into OP0, unless it is zero. */
1252
1253 if (! all_zero)
1254 {
1255 temp = expand_binop (mode, ior_optab, temp, value,
1256 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1257 temp = force_reg (mode, temp);
1258 }
1259
1260 if (op0 != temp)
1261 {
1262 op0 = copy_rtx (op0);
1263 emit_move_insn (op0, temp);
1264 }
1265 }
1266 \f
1267 /* Store a bit field that is split across multiple accessible memory objects.
1268
1269 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1270 BITSIZE is the field width; BITPOS the position of its first bit
1271 (within the word).
1272 VALUE is the value to store.
1273
1274 If REVERSE is true, the store is to be done in reverse order.
1275
1276 This does not yet handle fields wider than BITS_PER_WORD. */
1277
1278 static void
1279 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1280 unsigned HOST_WIDE_INT bitpos,
1281 unsigned HOST_WIDE_INT bitregion_start,
1282 unsigned HOST_WIDE_INT bitregion_end,
1283 rtx value, bool reverse)
1284 {
1285 unsigned int unit, total_bits, bitsdone = 0;
1286
1287 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1288 much at a time. */
1289 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1290 unit = BITS_PER_WORD;
1291 else
1292 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1293
1294 /* If OP0 is a memory with a mode, then UNIT must not be larger than
1295 OP0's mode as well. Otherwise, store_fixed_bit_field will call us
1296 again, and we will mutually recurse forever. */
1297 if (MEM_P (op0) && GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1298 unit = MIN (unit, GET_MODE_BITSIZE (GET_MODE (op0)));
1299
1300 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1301 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1302 that VALUE might be a floating-point constant. */
1303 if (CONSTANT_P (value) && !CONST_INT_P (value))
1304 {
1305 rtx word = gen_lowpart_common (word_mode, value);
1306
1307 if (word && (value != word))
1308 value = word;
1309 else
1310 value = gen_lowpart_common (word_mode,
1311 force_reg (GET_MODE (value) != VOIDmode
1312 ? GET_MODE (value)
1313 : word_mode, value));
1314 }
1315
1316 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1317
1318 while (bitsdone < bitsize)
1319 {
1320 unsigned HOST_WIDE_INT thissize;
1321 unsigned HOST_WIDE_INT thispos;
1322 unsigned HOST_WIDE_INT offset;
1323 rtx part, word;
1324
1325 offset = (bitpos + bitsdone) / unit;
1326 thispos = (bitpos + bitsdone) % unit;
1327
1328 /* When region of bytes we can touch is restricted, decrease
1329 UNIT close to the end of the region as needed. If op0 is a REG
1330 or SUBREG of REG, don't do this, as there can't be data races
1331 on a register and we can expand shorter code in some cases. */
1332 if (bitregion_end
1333 && unit > BITS_PER_UNIT
1334 && bitpos + bitsdone - thispos + unit > bitregion_end + 1
1335 && !REG_P (op0)
1336 && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1337 {
1338 unit = unit / 2;
1339 continue;
1340 }
1341
1342 /* THISSIZE must not overrun a word boundary. Otherwise,
1343 store_fixed_bit_field will call us again, and we will mutually
1344 recurse forever. */
1345 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1346 thissize = MIN (thissize, unit - thispos);
1347
1348 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1349 {
1350 /* Fetch successively less significant portions. */
1351 if (CONST_INT_P (value))
1352 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1353 >> (bitsize - bitsdone - thissize))
1354 & ((HOST_WIDE_INT_1 << thissize) - 1));
1355 /* Likewise, but the source is little-endian. */
1356 else if (reverse)
1357 part = extract_fixed_bit_field (word_mode, value, thissize,
1358 bitsize - bitsdone - thissize,
1359 NULL_RTX, 1, false);
1360 else
1361 {
1362 int total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1363 /* The args are chosen so that the last part includes the
1364 lsb. Give extract_bit_field the value it needs (with
1365 endianness compensation) to fetch the piece we want. */
1366 part = extract_fixed_bit_field (word_mode, value, thissize,
1367 total_bits - bitsize + bitsdone,
1368 NULL_RTX, 1, false);
1369 }
1370 }
1371 else
1372 {
1373 /* Fetch successively more significant portions. */
1374 if (CONST_INT_P (value))
1375 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1376 >> bitsdone)
1377 & ((HOST_WIDE_INT_1 << thissize) - 1));
1378 /* Likewise, but the source is big-endian. */
1379 else if (reverse)
1380 part = extract_fixed_bit_field (word_mode, value, thissize,
1381 total_bits - bitsdone - thissize,
1382 NULL_RTX, 1, false);
1383 else
1384 part = extract_fixed_bit_field (word_mode, value, thissize,
1385 bitsdone, NULL_RTX, 1, false);
1386 }
1387
1388 /* If OP0 is a register, then handle OFFSET here. */
1389 if (SUBREG_P (op0) || REG_P (op0))
1390 {
1391 machine_mode op0_mode = GET_MODE (op0);
1392 if (op0_mode != BLKmode && GET_MODE_SIZE (op0_mode) < UNITS_PER_WORD)
1393 word = offset ? const0_rtx : op0;
1394 else
1395 word = operand_subword_force (op0, offset * unit / BITS_PER_WORD,
1396 GET_MODE (op0));
1397 offset &= BITS_PER_WORD / unit - 1;
1398 }
1399 else
1400 word = op0;
1401
1402 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1403 it is just an out-of-bounds access. Ignore it. */
1404 if (word != const0_rtx)
1405 store_fixed_bit_field (word, thissize, offset * unit + thispos,
1406 bitregion_start, bitregion_end, part,
1407 reverse);
1408 bitsdone += thissize;
1409 }
1410 }
1411 \f
1412 /* A subroutine of extract_bit_field_1 that converts return value X
1413 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1414 to extract_bit_field. */
1415
1416 static rtx
1417 convert_extracted_bit_field (rtx x, machine_mode mode,
1418 machine_mode tmode, bool unsignedp)
1419 {
1420 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1421 return x;
1422
1423 /* If the x mode is not a scalar integral, first convert to the
1424 integer mode of that size and then access it as a floating-point
1425 value via a SUBREG. */
1426 if (!SCALAR_INT_MODE_P (tmode))
1427 {
1428 scalar_int_mode int_mode = int_mode_for_mode (tmode).require ();
1429 x = convert_to_mode (int_mode, x, unsignedp);
1430 x = force_reg (int_mode, x);
1431 return gen_lowpart (tmode, x);
1432 }
1433
1434 return convert_to_mode (tmode, x, unsignedp);
1435 }
1436
1437 /* Try to use an ext(z)v pattern to extract a field from OP0.
1438 Return the extracted value on success, otherwise return null.
1439 EXT_MODE is the mode of the extraction and the other arguments
1440 are as for extract_bit_field. */
1441
1442 static rtx
1443 extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
1444 unsigned HOST_WIDE_INT bitsize,
1445 unsigned HOST_WIDE_INT bitnum,
1446 int unsignedp, rtx target,
1447 machine_mode mode, machine_mode tmode)
1448 {
1449 struct expand_operand ops[4];
1450 rtx spec_target = target;
1451 rtx spec_target_subreg = 0;
1452 machine_mode ext_mode = extv->field_mode;
1453 unsigned unit = GET_MODE_BITSIZE (ext_mode);
1454
1455 if (bitsize == 0 || unit < bitsize)
1456 return NULL_RTX;
1457
1458 if (MEM_P (op0))
1459 /* Get a reference to the first byte of the field. */
1460 op0 = narrow_bit_field_mem (op0, extv->struct_mode, bitsize, bitnum,
1461 &bitnum);
1462 else
1463 {
1464 /* Convert from counting within OP0 to counting in EXT_MODE. */
1465 if (BYTES_BIG_ENDIAN)
1466 bitnum += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1467
1468 /* If op0 is a register, we need it in EXT_MODE to make it
1469 acceptable to the format of ext(z)v. */
1470 if (GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1471 return NULL_RTX;
1472 if (REG_P (op0) && GET_MODE (op0) != ext_mode)
1473 op0 = gen_lowpart_SUBREG (ext_mode, op0);
1474 }
1475
1476 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1477 "backwards" from the size of the unit we are extracting from.
1478 Otherwise, we count bits from the most significant on a
1479 BYTES/BITS_BIG_ENDIAN machine. */
1480
1481 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1482 bitnum = unit - bitsize - bitnum;
1483
1484 if (target == 0)
1485 target = spec_target = gen_reg_rtx (tmode);
1486
1487 if (GET_MODE (target) != ext_mode)
1488 {
1489 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1490 between the mode of the extraction (word_mode) and the target
1491 mode. Instead, create a temporary and use convert_move to set
1492 the target. */
1493 if (REG_P (target)
1494 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target), ext_mode))
1495 {
1496 target = gen_lowpart (ext_mode, target);
1497 if (GET_MODE_PRECISION (ext_mode)
1498 > GET_MODE_PRECISION (GET_MODE (spec_target)))
1499 spec_target_subreg = target;
1500 }
1501 else
1502 target = gen_reg_rtx (ext_mode);
1503 }
1504
1505 create_output_operand (&ops[0], target, ext_mode);
1506 create_fixed_operand (&ops[1], op0);
1507 create_integer_operand (&ops[2], bitsize);
1508 create_integer_operand (&ops[3], bitnum);
1509 if (maybe_expand_insn (extv->icode, 4, ops))
1510 {
1511 target = ops[0].value;
1512 if (target == spec_target)
1513 return target;
1514 if (target == spec_target_subreg)
1515 return spec_target;
1516 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1517 }
1518 return NULL_RTX;
1519 }
1520
1521 /* A subroutine of extract_bit_field, with the same arguments.
1522 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1523 if we can find no other means of implementing the operation.
1524 if FALLBACK_P is false, return NULL instead. */
1525
1526 static rtx
1527 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1528 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1529 machine_mode mode, machine_mode tmode,
1530 bool reverse, bool fallback_p, rtx *alt_rtl)
1531 {
1532 rtx op0 = str_rtx;
1533 machine_mode mode1;
1534
1535 if (tmode == VOIDmode)
1536 tmode = mode;
1537
1538 while (GET_CODE (op0) == SUBREG)
1539 {
1540 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1541 op0 = SUBREG_REG (op0);
1542 }
1543
1544 /* If we have an out-of-bounds access to a register, just return an
1545 uninitialized register of the required mode. This can occur if the
1546 source code contains an out-of-bounds access to a small array. */
1547 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1548 return gen_reg_rtx (tmode);
1549
1550 if (REG_P (op0)
1551 && mode == GET_MODE (op0)
1552 && bitnum == 0
1553 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1554 {
1555 if (reverse)
1556 op0 = flip_storage_order (mode, op0);
1557 /* We're trying to extract a full register from itself. */
1558 return op0;
1559 }
1560
1561 /* First try to check for vector from vector extractions. */
1562 if (VECTOR_MODE_P (GET_MODE (op0))
1563 && !MEM_P (op0)
1564 && VECTOR_MODE_P (tmode)
1565 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (tmode))
1566 {
1567 machine_mode new_mode = GET_MODE (op0);
1568 if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
1569 {
1570 new_mode = mode_for_vector (GET_MODE_INNER (tmode),
1571 GET_MODE_BITSIZE (GET_MODE (op0))
1572 / GET_MODE_UNIT_BITSIZE (tmode));
1573 if (!VECTOR_MODE_P (new_mode)
1574 || GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0))
1575 || GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
1576 || !targetm.vector_mode_supported_p (new_mode))
1577 new_mode = VOIDmode;
1578 }
1579 if (new_mode != VOIDmode
1580 && (convert_optab_handler (vec_extract_optab, new_mode, tmode)
1581 != CODE_FOR_nothing)
1582 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (tmode)
1583 == bitnum / GET_MODE_BITSIZE (tmode)))
1584 {
1585 struct expand_operand ops[3];
1586 machine_mode outermode = new_mode;
1587 machine_mode innermode = tmode;
1588 enum insn_code icode
1589 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1590 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1591
1592 if (new_mode != GET_MODE (op0))
1593 op0 = gen_lowpart (new_mode, op0);
1594 create_output_operand (&ops[0], target, innermode);
1595 ops[0].target = 1;
1596 create_input_operand (&ops[1], op0, outermode);
1597 create_integer_operand (&ops[2], pos);
1598 if (maybe_expand_insn (icode, 3, ops))
1599 {
1600 if (alt_rtl && ops[0].target)
1601 *alt_rtl = target;
1602 target = ops[0].value;
1603 if (GET_MODE (target) != mode)
1604 return gen_lowpart (tmode, target);
1605 return target;
1606 }
1607 }
1608 }
1609
1610 /* See if we can get a better vector mode before extracting. */
1611 if (VECTOR_MODE_P (GET_MODE (op0))
1612 && !MEM_P (op0)
1613 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1614 {
1615 machine_mode new_mode;
1616
1617 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1618 new_mode = MIN_MODE_VECTOR_FLOAT;
1619 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1620 new_mode = MIN_MODE_VECTOR_FRACT;
1621 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1622 new_mode = MIN_MODE_VECTOR_UFRACT;
1623 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1624 new_mode = MIN_MODE_VECTOR_ACCUM;
1625 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1626 new_mode = MIN_MODE_VECTOR_UACCUM;
1627 else
1628 new_mode = MIN_MODE_VECTOR_INT;
1629
1630 FOR_EACH_MODE_FROM (new_mode, new_mode)
1631 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1632 && GET_MODE_UNIT_SIZE (new_mode) == GET_MODE_SIZE (tmode)
1633 && targetm.vector_mode_supported_p (new_mode))
1634 break;
1635 if (new_mode != VOIDmode)
1636 op0 = gen_lowpart (new_mode, op0);
1637 }
1638
1639 /* Use vec_extract patterns for extracting parts of vectors whenever
1640 available. */
1641 if (VECTOR_MODE_P (GET_MODE (op0))
1642 && !MEM_P (op0)
1643 && (convert_optab_handler (vec_extract_optab, GET_MODE (op0),
1644 GET_MODE_INNER (GET_MODE (op0)))
1645 != CODE_FOR_nothing)
1646 && ((bitnum + bitsize - 1) / GET_MODE_UNIT_BITSIZE (GET_MODE (op0))
1647 == bitnum / GET_MODE_UNIT_BITSIZE (GET_MODE (op0))))
1648 {
1649 struct expand_operand ops[3];
1650 machine_mode outermode = GET_MODE (op0);
1651 machine_mode innermode = GET_MODE_INNER (outermode);
1652 enum insn_code icode
1653 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1654 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1655
1656 create_output_operand (&ops[0], target, innermode);
1657 ops[0].target = 1;
1658 create_input_operand (&ops[1], op0, outermode);
1659 create_integer_operand (&ops[2], pos);
1660 if (maybe_expand_insn (icode, 3, ops))
1661 {
1662 if (alt_rtl && ops[0].target)
1663 *alt_rtl = target;
1664 target = ops[0].value;
1665 if (GET_MODE (target) != mode)
1666 return gen_lowpart (tmode, target);
1667 return target;
1668 }
1669 }
1670
1671 /* Make sure we are playing with integral modes. Pun with subregs
1672 if we aren't. */
1673 opt_scalar_int_mode opt_imode = int_mode_for_mode (GET_MODE (op0));
1674 scalar_int_mode imode;
1675 if (!opt_imode.exists (&imode) || imode != GET_MODE (op0))
1676 {
1677 if (MEM_P (op0))
1678 op0 = adjust_bitfield_address_size (op0, opt_imode.else_blk (),
1679 0, MEM_SIZE (op0));
1680 else if (opt_imode.exists (&imode))
1681 {
1682 op0 = gen_lowpart (imode, op0);
1683
1684 /* If we got a SUBREG, force it into a register since we
1685 aren't going to be able to do another SUBREG on it. */
1686 if (GET_CODE (op0) == SUBREG)
1687 op0 = force_reg (imode, op0);
1688 }
1689 else
1690 {
1691 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
1692 rtx mem = assign_stack_temp (GET_MODE (op0), size);
1693 emit_move_insn (mem, op0);
1694 op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1695 }
1696 }
1697
1698 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1699 If that's wrong, the solution is to test for it and set TARGET to 0
1700 if needed. */
1701
1702 /* Get the mode of the field to use for atomic access or subreg
1703 conversion. */
1704 mode1 = mode;
1705 if (SCALAR_INT_MODE_P (tmode))
1706 {
1707 machine_mode try_mode = mode_for_size (bitsize,
1708 GET_MODE_CLASS (tmode), 0);
1709 if (try_mode != BLKmode)
1710 mode1 = try_mode;
1711 }
1712 gcc_assert (mode1 != BLKmode);
1713
1714 /* Extraction of a full MODE1 value can be done with a subreg as long
1715 as the least significant bit of the value is the least significant
1716 bit of either OP0 or a word of OP0. */
1717 if (!MEM_P (op0)
1718 && !reverse
1719 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
1720 && bitsize == GET_MODE_BITSIZE (mode1)
1721 && TRULY_NOOP_TRUNCATION_MODES_P (mode1, GET_MODE (op0)))
1722 {
1723 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1724 bitnum / BITS_PER_UNIT);
1725 if (sub)
1726 return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1727 }
1728
1729 /* Extraction of a full MODE1 value can be done with a load as long as
1730 the field is on a byte boundary and is sufficiently aligned. */
1731 if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1))
1732 {
1733 op0 = adjust_bitfield_address (op0, mode1, bitnum / BITS_PER_UNIT);
1734 if (reverse)
1735 op0 = flip_storage_order (mode1, op0);
1736 return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1737 }
1738
1739 /* Handle fields bigger than a word. */
1740
1741 if (bitsize > BITS_PER_WORD)
1742 {
1743 /* Here we transfer the words of the field
1744 in the order least significant first.
1745 This is because the most significant word is the one which may
1746 be less than full. */
1747
1748 const bool backwards = WORDS_BIG_ENDIAN;
1749 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1750 unsigned int i;
1751 rtx_insn *last;
1752
1753 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1754 target = gen_reg_rtx (mode);
1755
1756 /* In case we're about to clobber a base register or something
1757 (see gcc.c-torture/execute/20040625-1.c). */
1758 if (reg_mentioned_p (target, str_rtx))
1759 target = gen_reg_rtx (mode);
1760
1761 /* Indicate for flow that the entire target reg is being set. */
1762 emit_clobber (target);
1763
1764 last = get_last_insn ();
1765 for (i = 0; i < nwords; i++)
1766 {
1767 /* If I is 0, use the low-order word in both field and target;
1768 if I is 1, use the next to lowest word; and so on. */
1769 /* Word number in TARGET to use. */
1770 unsigned int wordnum
1771 = (backwards
1772 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1773 : i);
1774 /* Offset from start of field in OP0. */
1775 unsigned int bit_offset = (backwards ^ reverse
1776 ? MAX ((int) bitsize - ((int) i + 1)
1777 * BITS_PER_WORD,
1778 0)
1779 : (int) i * BITS_PER_WORD);
1780 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1781 rtx result_part
1782 = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
1783 bitsize - i * BITS_PER_WORD),
1784 bitnum + bit_offset, 1, target_part,
1785 mode, word_mode, reverse, fallback_p, NULL);
1786
1787 gcc_assert (target_part);
1788 if (!result_part)
1789 {
1790 delete_insns_since (last);
1791 return NULL;
1792 }
1793
1794 if (result_part != target_part)
1795 emit_move_insn (target_part, result_part);
1796 }
1797
1798 if (unsignedp)
1799 {
1800 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1801 need to be zero'd out. */
1802 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1803 {
1804 unsigned int i, total_words;
1805
1806 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1807 for (i = nwords; i < total_words; i++)
1808 emit_move_insn
1809 (operand_subword (target,
1810 backwards ? total_words - i - 1 : i,
1811 1, VOIDmode),
1812 const0_rtx);
1813 }
1814 return target;
1815 }
1816
1817 /* Signed bit field: sign-extend with two arithmetic shifts. */
1818 target = expand_shift (LSHIFT_EXPR, mode, target,
1819 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1820 return expand_shift (RSHIFT_EXPR, mode, target,
1821 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1822 }
1823
1824 /* If OP0 is a multi-word register, narrow it to the affected word.
1825 If the region spans two words, defer to extract_split_bit_field. */
1826 if (!MEM_P (op0) && GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1827 {
1828 if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1829 {
1830 if (!fallback_p)
1831 return NULL_RTX;
1832 target = extract_split_bit_field (op0, bitsize, bitnum, unsignedp,
1833 reverse);
1834 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1835 }
1836 op0 = simplify_gen_subreg (word_mode, op0, GET_MODE (op0),
1837 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1838 bitnum %= BITS_PER_WORD;
1839 }
1840
1841 /* From here on we know the desired field is smaller than a word.
1842 If OP0 is a register, it too fits within a word. */
1843 enum extraction_pattern pattern = unsignedp ? EP_extzv : EP_extv;
1844 extraction_insn extv;
1845 if (!MEM_P (op0)
1846 && !reverse
1847 /* ??? We could limit the structure size to the part of OP0 that
1848 contains the field, with appropriate checks for endianness
1849 and TRULY_NOOP_TRUNCATION. */
1850 && get_best_reg_extraction_insn (&extv, pattern,
1851 GET_MODE_BITSIZE (GET_MODE (op0)),
1852 tmode))
1853 {
1854 rtx result = extract_bit_field_using_extv (&extv, op0, bitsize, bitnum,
1855 unsignedp, target, mode,
1856 tmode);
1857 if (result)
1858 return result;
1859 }
1860
1861 /* If OP0 is a memory, try copying it to a register and seeing if a
1862 cheap register alternative is available. */
1863 if (MEM_P (op0) & !reverse)
1864 {
1865 if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
1866 tmode))
1867 {
1868 rtx result = extract_bit_field_using_extv (&extv, op0, bitsize,
1869 bitnum, unsignedp,
1870 target, mode,
1871 tmode);
1872 if (result)
1873 return result;
1874 }
1875
1876 rtx_insn *last = get_last_insn ();
1877
1878 /* Try loading part of OP0 into a register and extracting the
1879 bitfield from that. */
1880 unsigned HOST_WIDE_INT bitpos;
1881 rtx xop0 = adjust_bit_field_mem_for_reg (pattern, op0, bitsize, bitnum,
1882 0, 0, tmode, &bitpos);
1883 if (xop0)
1884 {
1885 xop0 = copy_to_reg (xop0);
1886 rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
1887 unsignedp, target,
1888 mode, tmode, reverse, false, NULL);
1889 if (result)
1890 return result;
1891 delete_insns_since (last);
1892 }
1893 }
1894
1895 if (!fallback_p)
1896 return NULL;
1897
1898 /* Find a correspondingly-sized integer field, so we can apply
1899 shifts and masks to it. */
1900 scalar_int_mode int_mode;
1901 if (!int_mode_for_mode (tmode).exists (&int_mode))
1902 /* If this fails, we should probably push op0 out to memory and then
1903 do a load. */
1904 int_mode = int_mode_for_mode (mode).require ();
1905
1906 target = extract_fixed_bit_field (int_mode, op0, bitsize, bitnum, target,
1907 unsignedp, reverse);
1908
1909 /* Complex values must be reversed piecewise, so we need to undo the global
1910 reversal, convert to the complex mode and reverse again. */
1911 if (reverse && COMPLEX_MODE_P (tmode))
1912 {
1913 target = flip_storage_order (int_mode, target);
1914 target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
1915 target = flip_storage_order (tmode, target);
1916 }
1917 else
1918 target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
1919
1920 return target;
1921 }
1922
1923 /* Generate code to extract a byte-field from STR_RTX
1924 containing BITSIZE bits, starting at BITNUM,
1925 and put it in TARGET if possible (if TARGET is nonzero).
1926 Regardless of TARGET, we return the rtx for where the value is placed.
1927
1928 STR_RTX is the structure containing the byte (a REG or MEM).
1929 UNSIGNEDP is nonzero if this is an unsigned bit field.
1930 MODE is the natural mode of the field value once extracted.
1931 TMODE is the mode the caller would like the value to have;
1932 but the value may be returned with type MODE instead.
1933
1934 If REVERSE is true, the extraction is to be done in reverse order.
1935
1936 If a TARGET is specified and we can store in it at no extra cost,
1937 we do so, and return TARGET.
1938 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1939 if they are equally easy. */
1940
1941 rtx
1942 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1943 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1944 machine_mode mode, machine_mode tmode, bool reverse,
1945 rtx *alt_rtl)
1946 {
1947 machine_mode mode1;
1948
1949 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1950 if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
1951 mode1 = GET_MODE (str_rtx);
1952 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1953 mode1 = GET_MODE (target);
1954 else
1955 mode1 = tmode;
1956
1957 if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1, 0, 0))
1958 {
1959 /* Extraction of a full MODE1 value can be done with a simple load.
1960 We know here that the field can be accessed with one single
1961 instruction. For targets that support unaligned memory,
1962 an unaligned access may be necessary. */
1963 if (bitsize == GET_MODE_BITSIZE (mode1))
1964 {
1965 rtx result = adjust_bitfield_address (str_rtx, mode1,
1966 bitnum / BITS_PER_UNIT);
1967 if (reverse)
1968 result = flip_storage_order (mode1, result);
1969 gcc_assert (bitnum % BITS_PER_UNIT == 0);
1970 return convert_extracted_bit_field (result, mode, tmode, unsignedp);
1971 }
1972
1973 str_rtx = narrow_bit_field_mem (str_rtx, mode1, bitsize, bitnum,
1974 &bitnum);
1975 gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (mode1));
1976 str_rtx = copy_to_reg (str_rtx);
1977 }
1978
1979 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1980 target, mode, tmode, reverse, true, alt_rtl);
1981 }
1982 \f
1983 /* Use shifts and boolean operations to extract a field of BITSIZE bits
1984 from bit BITNUM of OP0.
1985
1986 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1987 If REVERSE is true, the extraction is to be done in reverse order.
1988
1989 If TARGET is nonzero, attempts to store the value there
1990 and return TARGET, but this is not guaranteed.
1991 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1992
1993 static rtx
1994 extract_fixed_bit_field (machine_mode tmode, rtx op0,
1995 unsigned HOST_WIDE_INT bitsize,
1996 unsigned HOST_WIDE_INT bitnum, rtx target,
1997 int unsignedp, bool reverse)
1998 {
1999 if (MEM_P (op0))
2000 {
2001 machine_mode mode
2002 = get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0), word_mode,
2003 MEM_VOLATILE_P (op0));
2004
2005 if (mode == VOIDmode)
2006 /* The only way this should occur is if the field spans word
2007 boundaries. */
2008 return extract_split_bit_field (op0, bitsize, bitnum, unsignedp,
2009 reverse);
2010
2011 op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
2012 }
2013
2014 return extract_fixed_bit_field_1 (tmode, op0, bitsize, bitnum,
2015 target, unsignedp, reverse);
2016 }
2017
2018 /* Helper function for extract_fixed_bit_field, extracts
2019 the bit field always using the MODE of OP0. */
2020
2021 static rtx
2022 extract_fixed_bit_field_1 (machine_mode tmode, rtx op0,
2023 unsigned HOST_WIDE_INT bitsize,
2024 unsigned HOST_WIDE_INT bitnum, rtx target,
2025 int unsignedp, bool reverse)
2026 {
2027 machine_mode mode = GET_MODE (op0);
2028 gcc_assert (SCALAR_INT_MODE_P (mode));
2029
2030 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
2031 for invalid input, such as extract equivalent of f5 from
2032 gcc.dg/pr48335-2.c. */
2033
2034 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2035 /* BITNUM is the distance between our msb and that of OP0.
2036 Convert it to the distance from the lsb. */
2037 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
2038
2039 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
2040 We have reduced the big-endian case to the little-endian case. */
2041 if (reverse)
2042 op0 = flip_storage_order (mode, op0);
2043
2044 if (unsignedp)
2045 {
2046 if (bitnum)
2047 {
2048 /* If the field does not already start at the lsb,
2049 shift it so it does. */
2050 /* Maybe propagate the target for the shift. */
2051 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2052 if (tmode != mode)
2053 subtarget = 0;
2054 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitnum, subtarget, 1);
2055 }
2056 /* Convert the value to the desired mode. */
2057 if (mode != tmode)
2058 op0 = convert_to_mode (tmode, op0, 1);
2059
2060 /* Unless the msb of the field used to be the msb when we shifted,
2061 mask out the upper bits. */
2062
2063 if (GET_MODE_BITSIZE (mode) != bitnum + bitsize)
2064 return expand_binop (GET_MODE (op0), and_optab, op0,
2065 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
2066 target, 1, OPTAB_LIB_WIDEN);
2067 return op0;
2068 }
2069
2070 /* To extract a signed bit-field, first shift its msb to the msb of the word,
2071 then arithmetic-shift its lsb to the lsb of the word. */
2072 op0 = force_reg (mode, op0);
2073
2074 /* Find the narrowest integer mode that contains the field. */
2075
2076 FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
2077 if (GET_MODE_BITSIZE (mode) >= bitsize + bitnum)
2078 {
2079 op0 = convert_to_mode (mode, op0, 0);
2080 break;
2081 }
2082
2083 if (mode != tmode)
2084 target = 0;
2085
2086 if (GET_MODE_BITSIZE (mode) != (bitsize + bitnum))
2087 {
2088 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitnum);
2089 /* Maybe propagate the target for the shift. */
2090 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2091 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
2092 }
2093
2094 return expand_shift (RSHIFT_EXPR, mode, op0,
2095 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
2096 }
2097
2098 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
2099 VALUE << BITPOS. */
2100
2101 static rtx
2102 lshift_value (machine_mode mode, unsigned HOST_WIDE_INT value,
2103 int bitpos)
2104 {
2105 return immed_wide_int_const (wi::lshift (value, bitpos), mode);
2106 }
2107 \f
2108 /* Extract a bit field that is split across two words
2109 and return an RTX for the result.
2110
2111 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2112 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2113 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
2114
2115 If REVERSE is true, the extraction is to be done in reverse order. */
2116
2117 static rtx
2118 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
2119 unsigned HOST_WIDE_INT bitpos, int unsignedp,
2120 bool reverse)
2121 {
2122 unsigned int unit;
2123 unsigned int bitsdone = 0;
2124 rtx result = NULL_RTX;
2125 int first = 1;
2126
2127 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2128 much at a time. */
2129 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2130 unit = BITS_PER_WORD;
2131 else
2132 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2133
2134 while (bitsdone < bitsize)
2135 {
2136 unsigned HOST_WIDE_INT thissize;
2137 rtx part, word;
2138 unsigned HOST_WIDE_INT thispos;
2139 unsigned HOST_WIDE_INT offset;
2140
2141 offset = (bitpos + bitsdone) / unit;
2142 thispos = (bitpos + bitsdone) % unit;
2143
2144 /* THISSIZE must not overrun a word boundary. Otherwise,
2145 extract_fixed_bit_field will call us again, and we will mutually
2146 recurse forever. */
2147 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2148 thissize = MIN (thissize, unit - thispos);
2149
2150 /* If OP0 is a register, then handle OFFSET here. */
2151 if (SUBREG_P (op0) || REG_P (op0))
2152 {
2153 word = operand_subword_force (op0, offset, GET_MODE (op0));
2154 offset = 0;
2155 }
2156 else
2157 word = op0;
2158
2159 /* Extract the parts in bit-counting order,
2160 whose meaning is determined by BYTES_PER_UNIT.
2161 OFFSET is in UNITs, and UNIT is in bits. */
2162 part = extract_fixed_bit_field (word_mode, word, thissize,
2163 offset * unit + thispos, 0, 1, reverse);
2164 bitsdone += thissize;
2165
2166 /* Shift this part into place for the result. */
2167 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2168 {
2169 if (bitsize != bitsdone)
2170 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2171 bitsize - bitsdone, 0, 1);
2172 }
2173 else
2174 {
2175 if (bitsdone != thissize)
2176 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2177 bitsdone - thissize, 0, 1);
2178 }
2179
2180 if (first)
2181 result = part;
2182 else
2183 /* Combine the parts with bitwise or. This works
2184 because we extracted each part as an unsigned bit field. */
2185 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2186 OPTAB_LIB_WIDEN);
2187
2188 first = 0;
2189 }
2190
2191 /* Unsigned bit field: we are done. */
2192 if (unsignedp)
2193 return result;
2194 /* Signed bit field: sign-extend with two arithmetic shifts. */
2195 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2196 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2197 return expand_shift (RSHIFT_EXPR, word_mode, result,
2198 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2199 }
2200 \f
2201 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2202 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2203 MODE, fill the upper bits with zeros. Fail if the layout of either
2204 mode is unknown (as for CC modes) or if the extraction would involve
2205 unprofitable mode punning. Return the value on success, otherwise
2206 return null.
2207
2208 This is different from gen_lowpart* in these respects:
2209
2210 - the returned value must always be considered an rvalue
2211
2212 - when MODE is wider than SRC_MODE, the extraction involves
2213 a zero extension
2214
2215 - when MODE is smaller than SRC_MODE, the extraction involves
2216 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2217
2218 In other words, this routine performs a computation, whereas the
2219 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2220 operations. */
2221
2222 rtx
2223 extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
2224 {
2225 machine_mode int_mode, src_int_mode;
2226
2227 if (mode == src_mode)
2228 return src;
2229
2230 if (CONSTANT_P (src))
2231 {
2232 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2233 fails, it will happily create (subreg (symbol_ref)) or similar
2234 invalid SUBREGs. */
2235 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2236 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2237 if (ret)
2238 return ret;
2239
2240 if (GET_MODE (src) == VOIDmode
2241 || !validate_subreg (mode, src_mode, src, byte))
2242 return NULL_RTX;
2243
2244 src = force_reg (GET_MODE (src), src);
2245 return gen_rtx_SUBREG (mode, src, byte);
2246 }
2247
2248 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2249 return NULL_RTX;
2250
2251 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2252 && MODES_TIEABLE_P (mode, src_mode))
2253 {
2254 rtx x = gen_lowpart_common (mode, src);
2255 if (x)
2256 return x;
2257 }
2258
2259 if (!int_mode_for_mode (src_mode).exists (&src_int_mode)
2260 || !int_mode_for_mode (mode).exists (&int_mode))
2261 return NULL_RTX;
2262
2263 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2264 return NULL_RTX;
2265 if (!MODES_TIEABLE_P (int_mode, mode))
2266 return NULL_RTX;
2267
2268 src = gen_lowpart (src_int_mode, src);
2269 src = convert_modes (int_mode, src_int_mode, src, true);
2270 src = gen_lowpart (mode, src);
2271 return src;
2272 }
2273 \f
2274 /* Add INC into TARGET. */
2275
2276 void
2277 expand_inc (rtx target, rtx inc)
2278 {
2279 rtx value = expand_binop (GET_MODE (target), add_optab,
2280 target, inc,
2281 target, 0, OPTAB_LIB_WIDEN);
2282 if (value != target)
2283 emit_move_insn (target, value);
2284 }
2285
2286 /* Subtract DEC from TARGET. */
2287
2288 void
2289 expand_dec (rtx target, rtx dec)
2290 {
2291 rtx value = expand_binop (GET_MODE (target), sub_optab,
2292 target, dec,
2293 target, 0, OPTAB_LIB_WIDEN);
2294 if (value != target)
2295 emit_move_insn (target, value);
2296 }
2297 \f
2298 /* Output a shift instruction for expression code CODE,
2299 with SHIFTED being the rtx for the value to shift,
2300 and AMOUNT the rtx for the amount to shift by.
2301 Store the result in the rtx TARGET, if that is convenient.
2302 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2303 Return the rtx for where the value is.
2304 If that cannot be done, abort the compilation unless MAY_FAIL is true,
2305 in which case 0 is returned. */
2306
2307 static rtx
2308 expand_shift_1 (enum tree_code code, machine_mode mode, rtx shifted,
2309 rtx amount, rtx target, int unsignedp, bool may_fail = false)
2310 {
2311 rtx op1, temp = 0;
2312 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2313 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2314 optab lshift_optab = ashl_optab;
2315 optab rshift_arith_optab = ashr_optab;
2316 optab rshift_uns_optab = lshr_optab;
2317 optab lrotate_optab = rotl_optab;
2318 optab rrotate_optab = rotr_optab;
2319 machine_mode op1_mode;
2320 machine_mode scalar_mode = mode;
2321 int attempt;
2322 bool speed = optimize_insn_for_speed_p ();
2323
2324 if (VECTOR_MODE_P (mode))
2325 scalar_mode = GET_MODE_INNER (mode);
2326 op1 = amount;
2327 op1_mode = GET_MODE (op1);
2328
2329 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2330 shift amount is a vector, use the vector/vector shift patterns. */
2331 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2332 {
2333 lshift_optab = vashl_optab;
2334 rshift_arith_optab = vashr_optab;
2335 rshift_uns_optab = vlshr_optab;
2336 lrotate_optab = vrotl_optab;
2337 rrotate_optab = vrotr_optab;
2338 }
2339
2340 /* Previously detected shift-counts computed by NEGATE_EXPR
2341 and shifted in the other direction; but that does not work
2342 on all machines. */
2343
2344 if (SHIFT_COUNT_TRUNCATED)
2345 {
2346 if (CONST_INT_P (op1)
2347 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2348 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
2349 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2350 % GET_MODE_BITSIZE (scalar_mode));
2351 else if (GET_CODE (op1) == SUBREG
2352 && subreg_lowpart_p (op1)
2353 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2354 && SCALAR_INT_MODE_P (GET_MODE (op1)))
2355 op1 = SUBREG_REG (op1);
2356 }
2357
2358 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2359 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2360 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2361 amount instead. */
2362 if (rotate
2363 && CONST_INT_P (op1)
2364 && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
2365 GET_MODE_BITSIZE (scalar_mode) - 1))
2366 {
2367 op1 = GEN_INT (GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
2368 left = !left;
2369 code = left ? LROTATE_EXPR : RROTATE_EXPR;
2370 }
2371
2372 /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2373 Note that this is not the case for bigger values. For instance a rotation
2374 of 0x01020304 by 16 bits gives 0x03040102 which is different from
2375 0x04030201 (bswapsi). */
2376 if (rotate
2377 && CONST_INT_P (op1)
2378 && INTVAL (op1) == BITS_PER_UNIT
2379 && GET_MODE_SIZE (scalar_mode) == 2
2380 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing)
2381 return expand_unop (HImode, bswap_optab, shifted, NULL_RTX,
2382 unsignedp);
2383
2384 if (op1 == const0_rtx)
2385 return shifted;
2386
2387 /* Check whether its cheaper to implement a left shift by a constant
2388 bit count by a sequence of additions. */
2389 if (code == LSHIFT_EXPR
2390 && CONST_INT_P (op1)
2391 && INTVAL (op1) > 0
2392 && INTVAL (op1) < GET_MODE_PRECISION (scalar_mode)
2393 && INTVAL (op1) < MAX_BITS_PER_WORD
2394 && (shift_cost (speed, mode, INTVAL (op1))
2395 > INTVAL (op1) * add_cost (speed, mode))
2396 && shift_cost (speed, mode, INTVAL (op1)) != MAX_COST)
2397 {
2398 int i;
2399 for (i = 0; i < INTVAL (op1); i++)
2400 {
2401 temp = force_reg (mode, shifted);
2402 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2403 unsignedp, OPTAB_LIB_WIDEN);
2404 }
2405 return shifted;
2406 }
2407
2408 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2409 {
2410 enum optab_methods methods;
2411
2412 if (attempt == 0)
2413 methods = OPTAB_DIRECT;
2414 else if (attempt == 1)
2415 methods = OPTAB_WIDEN;
2416 else
2417 methods = OPTAB_LIB_WIDEN;
2418
2419 if (rotate)
2420 {
2421 /* Widening does not work for rotation. */
2422 if (methods == OPTAB_WIDEN)
2423 continue;
2424 else if (methods == OPTAB_LIB_WIDEN)
2425 {
2426 /* If we have been unable to open-code this by a rotation,
2427 do it as the IOR of two shifts. I.e., to rotate A
2428 by N bits, compute
2429 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2430 where C is the bitsize of A.
2431
2432 It is theoretically possible that the target machine might
2433 not be able to perform either shift and hence we would
2434 be making two libcalls rather than just the one for the
2435 shift (similarly if IOR could not be done). We will allow
2436 this extremely unlikely lossage to avoid complicating the
2437 code below. */
2438
2439 rtx subtarget = target == shifted ? 0 : target;
2440 rtx new_amount, other_amount;
2441 rtx temp1;
2442
2443 new_amount = op1;
2444 if (op1 == const0_rtx)
2445 return shifted;
2446 else if (CONST_INT_P (op1))
2447 other_amount = GEN_INT (GET_MODE_BITSIZE (scalar_mode)
2448 - INTVAL (op1));
2449 else
2450 {
2451 other_amount
2452 = simplify_gen_unary (NEG, GET_MODE (op1),
2453 op1, GET_MODE (op1));
2454 HOST_WIDE_INT mask = GET_MODE_PRECISION (scalar_mode) - 1;
2455 other_amount
2456 = simplify_gen_binary (AND, GET_MODE (op1), other_amount,
2457 gen_int_mode (mask, GET_MODE (op1)));
2458 }
2459
2460 shifted = force_reg (mode, shifted);
2461
2462 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2463 mode, shifted, new_amount, 0, 1);
2464 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2465 mode, shifted, other_amount,
2466 subtarget, 1);
2467 return expand_binop (mode, ior_optab, temp, temp1, target,
2468 unsignedp, methods);
2469 }
2470
2471 temp = expand_binop (mode,
2472 left ? lrotate_optab : rrotate_optab,
2473 shifted, op1, target, unsignedp, methods);
2474 }
2475 else if (unsignedp)
2476 temp = expand_binop (mode,
2477 left ? lshift_optab : rshift_uns_optab,
2478 shifted, op1, target, unsignedp, methods);
2479
2480 /* Do arithmetic shifts.
2481 Also, if we are going to widen the operand, we can just as well
2482 use an arithmetic right-shift instead of a logical one. */
2483 if (temp == 0 && ! rotate
2484 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2485 {
2486 enum optab_methods methods1 = methods;
2487
2488 /* If trying to widen a log shift to an arithmetic shift,
2489 don't accept an arithmetic shift of the same size. */
2490 if (unsignedp)
2491 methods1 = OPTAB_MUST_WIDEN;
2492
2493 /* Arithmetic shift */
2494
2495 temp = expand_binop (mode,
2496 left ? lshift_optab : rshift_arith_optab,
2497 shifted, op1, target, unsignedp, methods1);
2498 }
2499
2500 /* We used to try extzv here for logical right shifts, but that was
2501 only useful for one machine, the VAX, and caused poor code
2502 generation there for lshrdi3, so the code was deleted and a
2503 define_expand for lshrsi3 was added to vax.md. */
2504 }
2505
2506 gcc_assert (temp != NULL_RTX || may_fail);
2507 return temp;
2508 }
2509
2510 /* Output a shift instruction for expression code CODE,
2511 with SHIFTED being the rtx for the value to shift,
2512 and AMOUNT the amount to shift by.
2513 Store the result in the rtx TARGET, if that is convenient.
2514 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2515 Return the rtx for where the value is. */
2516
2517 rtx
2518 expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2519 int amount, rtx target, int unsignedp)
2520 {
2521 return expand_shift_1 (code, mode,
2522 shifted, GEN_INT (amount), target, unsignedp);
2523 }
2524
2525 /* Likewise, but return 0 if that cannot be done. */
2526
2527 static rtx
2528 maybe_expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2529 int amount, rtx target, int unsignedp)
2530 {
2531 return expand_shift_1 (code, mode,
2532 shifted, GEN_INT (amount), target, unsignedp, true);
2533 }
2534
2535 /* Output a shift instruction for expression code CODE,
2536 with SHIFTED being the rtx for the value to shift,
2537 and AMOUNT the tree for the amount to shift by.
2538 Store the result in the rtx TARGET, if that is convenient.
2539 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2540 Return the rtx for where the value is. */
2541
2542 rtx
2543 expand_variable_shift (enum tree_code code, machine_mode mode, rtx shifted,
2544 tree amount, rtx target, int unsignedp)
2545 {
2546 return expand_shift_1 (code, mode,
2547 shifted, expand_normal (amount), target, unsignedp);
2548 }
2549
2550 \f
2551 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2552 const struct mult_cost *, machine_mode mode);
2553 static rtx expand_mult_const (machine_mode, rtx, HOST_WIDE_INT, rtx,
2554 const struct algorithm *, enum mult_variant);
2555 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2556 static rtx extract_high_half (machine_mode, rtx);
2557 static rtx expmed_mult_highpart (machine_mode, rtx, rtx, rtx, int, int);
2558 static rtx expmed_mult_highpart_optab (machine_mode, rtx, rtx, rtx,
2559 int, int);
2560 /* Compute and return the best algorithm for multiplying by T.
2561 The algorithm must cost less than cost_limit
2562 If retval.cost >= COST_LIMIT, no algorithm was found and all
2563 other field of the returned struct are undefined.
2564 MODE is the machine mode of the multiplication. */
2565
2566 static void
2567 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2568 const struct mult_cost *cost_limit, machine_mode mode)
2569 {
2570 int m;
2571 struct algorithm *alg_in, *best_alg;
2572 struct mult_cost best_cost;
2573 struct mult_cost new_limit;
2574 int op_cost, op_latency;
2575 unsigned HOST_WIDE_INT orig_t = t;
2576 unsigned HOST_WIDE_INT q;
2577 int maxm, hash_index;
2578 bool cache_hit = false;
2579 enum alg_code cache_alg = alg_zero;
2580 bool speed = optimize_insn_for_speed_p ();
2581 machine_mode imode;
2582 struct alg_hash_entry *entry_ptr;
2583
2584 /* Indicate that no algorithm is yet found. If no algorithm
2585 is found, this value will be returned and indicate failure. */
2586 alg_out->cost.cost = cost_limit->cost + 1;
2587 alg_out->cost.latency = cost_limit->latency + 1;
2588
2589 if (cost_limit->cost < 0
2590 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2591 return;
2592
2593 /* Be prepared for vector modes. */
2594 imode = GET_MODE_INNER (mode);
2595
2596 maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (imode));
2597
2598 /* Restrict the bits of "t" to the multiplication's mode. */
2599 t &= GET_MODE_MASK (imode);
2600
2601 /* t == 1 can be done in zero cost. */
2602 if (t == 1)
2603 {
2604 alg_out->ops = 1;
2605 alg_out->cost.cost = 0;
2606 alg_out->cost.latency = 0;
2607 alg_out->op[0] = alg_m;
2608 return;
2609 }
2610
2611 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2612 fail now. */
2613 if (t == 0)
2614 {
2615 if (MULT_COST_LESS (cost_limit, zero_cost (speed)))
2616 return;
2617 else
2618 {
2619 alg_out->ops = 1;
2620 alg_out->cost.cost = zero_cost (speed);
2621 alg_out->cost.latency = zero_cost (speed);
2622 alg_out->op[0] = alg_zero;
2623 return;
2624 }
2625 }
2626
2627 /* We'll be needing a couple extra algorithm structures now. */
2628
2629 alg_in = XALLOCA (struct algorithm);
2630 best_alg = XALLOCA (struct algorithm);
2631 best_cost = *cost_limit;
2632
2633 /* Compute the hash index. */
2634 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2635
2636 /* See if we already know what to do for T. */
2637 entry_ptr = alg_hash_entry_ptr (hash_index);
2638 if (entry_ptr->t == t
2639 && entry_ptr->mode == mode
2640 && entry_ptr->speed == speed
2641 && entry_ptr->alg != alg_unknown)
2642 {
2643 cache_alg = entry_ptr->alg;
2644
2645 if (cache_alg == alg_impossible)
2646 {
2647 /* The cache tells us that it's impossible to synthesize
2648 multiplication by T within entry_ptr->cost. */
2649 if (!CHEAPER_MULT_COST (&entry_ptr->cost, cost_limit))
2650 /* COST_LIMIT is at least as restrictive as the one
2651 recorded in the hash table, in which case we have no
2652 hope of synthesizing a multiplication. Just
2653 return. */
2654 return;
2655
2656 /* If we get here, COST_LIMIT is less restrictive than the
2657 one recorded in the hash table, so we may be able to
2658 synthesize a multiplication. Proceed as if we didn't
2659 have the cache entry. */
2660 }
2661 else
2662 {
2663 if (CHEAPER_MULT_COST (cost_limit, &entry_ptr->cost))
2664 /* The cached algorithm shows that this multiplication
2665 requires more cost than COST_LIMIT. Just return. This
2666 way, we don't clobber this cache entry with
2667 alg_impossible but retain useful information. */
2668 return;
2669
2670 cache_hit = true;
2671
2672 switch (cache_alg)
2673 {
2674 case alg_shift:
2675 goto do_alg_shift;
2676
2677 case alg_add_t_m2:
2678 case alg_sub_t_m2:
2679 goto do_alg_addsub_t_m2;
2680
2681 case alg_add_factor:
2682 case alg_sub_factor:
2683 goto do_alg_addsub_factor;
2684
2685 case alg_add_t2_m:
2686 goto do_alg_add_t2_m;
2687
2688 case alg_sub_t2_m:
2689 goto do_alg_sub_t2_m;
2690
2691 default:
2692 gcc_unreachable ();
2693 }
2694 }
2695 }
2696
2697 /* If we have a group of zero bits at the low-order part of T, try
2698 multiplying by the remaining bits and then doing a shift. */
2699
2700 if ((t & 1) == 0)
2701 {
2702 do_alg_shift:
2703 m = ctz_or_zero (t); /* m = number of low zero bits */
2704 if (m < maxm)
2705 {
2706 q = t >> m;
2707 /* The function expand_shift will choose between a shift and
2708 a sequence of additions, so the observed cost is given as
2709 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2710 op_cost = m * add_cost (speed, mode);
2711 if (shift_cost (speed, mode, m) < op_cost)
2712 op_cost = shift_cost (speed, mode, m);
2713 new_limit.cost = best_cost.cost - op_cost;
2714 new_limit.latency = best_cost.latency - op_cost;
2715 synth_mult (alg_in, q, &new_limit, mode);
2716
2717 alg_in->cost.cost += op_cost;
2718 alg_in->cost.latency += op_cost;
2719 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2720 {
2721 best_cost = alg_in->cost;
2722 std::swap (alg_in, best_alg);
2723 best_alg->log[best_alg->ops] = m;
2724 best_alg->op[best_alg->ops] = alg_shift;
2725 }
2726
2727 /* See if treating ORIG_T as a signed number yields a better
2728 sequence. Try this sequence only for a negative ORIG_T
2729 as it would be useless for a non-negative ORIG_T. */
2730 if ((HOST_WIDE_INT) orig_t < 0)
2731 {
2732 /* Shift ORIG_T as follows because a right shift of a
2733 negative-valued signed type is implementation
2734 defined. */
2735 q = ~(~orig_t >> m);
2736 /* The function expand_shift will choose between a shift
2737 and a sequence of additions, so the observed cost is
2738 given as MIN (m * add_cost(speed, mode),
2739 shift_cost(speed, mode, m)). */
2740 op_cost = m * add_cost (speed, mode);
2741 if (shift_cost (speed, mode, m) < op_cost)
2742 op_cost = shift_cost (speed, mode, m);
2743 new_limit.cost = best_cost.cost - op_cost;
2744 new_limit.latency = best_cost.latency - op_cost;
2745 synth_mult (alg_in, q, &new_limit, mode);
2746
2747 alg_in->cost.cost += op_cost;
2748 alg_in->cost.latency += op_cost;
2749 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2750 {
2751 best_cost = alg_in->cost;
2752 std::swap (alg_in, best_alg);
2753 best_alg->log[best_alg->ops] = m;
2754 best_alg->op[best_alg->ops] = alg_shift;
2755 }
2756 }
2757 }
2758 if (cache_hit)
2759 goto done;
2760 }
2761
2762 /* If we have an odd number, add or subtract one. */
2763 if ((t & 1) != 0)
2764 {
2765 unsigned HOST_WIDE_INT w;
2766
2767 do_alg_addsub_t_m2:
2768 for (w = 1; (w & t) != 0; w <<= 1)
2769 ;
2770 /* If T was -1, then W will be zero after the loop. This is another
2771 case where T ends with ...111. Handling this with (T + 1) and
2772 subtract 1 produces slightly better code and results in algorithm
2773 selection much faster than treating it like the ...0111 case
2774 below. */
2775 if (w == 0
2776 || (w > 2
2777 /* Reject the case where t is 3.
2778 Thus we prefer addition in that case. */
2779 && t != 3))
2780 {
2781 /* T ends with ...111. Multiply by (T + 1) and subtract T. */
2782
2783 op_cost = add_cost (speed, mode);
2784 new_limit.cost = best_cost.cost - op_cost;
2785 new_limit.latency = best_cost.latency - op_cost;
2786 synth_mult (alg_in, t + 1, &new_limit, mode);
2787
2788 alg_in->cost.cost += op_cost;
2789 alg_in->cost.latency += op_cost;
2790 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2791 {
2792 best_cost = alg_in->cost;
2793 std::swap (alg_in, best_alg);
2794 best_alg->log[best_alg->ops] = 0;
2795 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2796 }
2797 }
2798 else
2799 {
2800 /* T ends with ...01 or ...011. Multiply by (T - 1) and add T. */
2801
2802 op_cost = add_cost (speed, mode);
2803 new_limit.cost = best_cost.cost - op_cost;
2804 new_limit.latency = best_cost.latency - op_cost;
2805 synth_mult (alg_in, t - 1, &new_limit, mode);
2806
2807 alg_in->cost.cost += op_cost;
2808 alg_in->cost.latency += op_cost;
2809 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2810 {
2811 best_cost = alg_in->cost;
2812 std::swap (alg_in, best_alg);
2813 best_alg->log[best_alg->ops] = 0;
2814 best_alg->op[best_alg->ops] = alg_add_t_m2;
2815 }
2816 }
2817
2818 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2819 quickly with a - a * n for some appropriate constant n. */
2820 m = exact_log2 (-orig_t + 1);
2821 if (m >= 0 && m < maxm)
2822 {
2823 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2824 /* If the target has a cheap shift-and-subtract insn use
2825 that in preference to a shift insn followed by a sub insn.
2826 Assume that the shift-and-sub is "atomic" with a latency
2827 equal to it's cost, otherwise assume that on superscalar
2828 hardware the shift may be executed concurrently with the
2829 earlier steps in the algorithm. */
2830 if (shiftsub1_cost (speed, mode, m) <= op_cost)
2831 {
2832 op_cost = shiftsub1_cost (speed, mode, m);
2833 op_latency = op_cost;
2834 }
2835 else
2836 op_latency = add_cost (speed, mode);
2837
2838 new_limit.cost = best_cost.cost - op_cost;
2839 new_limit.latency = best_cost.latency - op_latency;
2840 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m,
2841 &new_limit, mode);
2842
2843 alg_in->cost.cost += op_cost;
2844 alg_in->cost.latency += op_latency;
2845 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2846 {
2847 best_cost = alg_in->cost;
2848 std::swap (alg_in, best_alg);
2849 best_alg->log[best_alg->ops] = m;
2850 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2851 }
2852 }
2853
2854 if (cache_hit)
2855 goto done;
2856 }
2857
2858 /* Look for factors of t of the form
2859 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2860 If we find such a factor, we can multiply by t using an algorithm that
2861 multiplies by q, shift the result by m and add/subtract it to itself.
2862
2863 We search for large factors first and loop down, even if large factors
2864 are less probable than small; if we find a large factor we will find a
2865 good sequence quickly, and therefore be able to prune (by decreasing
2866 COST_LIMIT) the search. */
2867
2868 do_alg_addsub_factor:
2869 for (m = floor_log2 (t - 1); m >= 2; m--)
2870 {
2871 unsigned HOST_WIDE_INT d;
2872
2873 d = (HOST_WIDE_INT_1U << m) + 1;
2874 if (t % d == 0 && t > d && m < maxm
2875 && (!cache_hit || cache_alg == alg_add_factor))
2876 {
2877 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2878 if (shiftadd_cost (speed, mode, m) <= op_cost)
2879 op_cost = shiftadd_cost (speed, mode, m);
2880
2881 op_latency = op_cost;
2882
2883
2884 new_limit.cost = best_cost.cost - op_cost;
2885 new_limit.latency = best_cost.latency - op_latency;
2886 synth_mult (alg_in, t / d, &new_limit, mode);
2887
2888 alg_in->cost.cost += op_cost;
2889 alg_in->cost.latency += op_latency;
2890 if (alg_in->cost.latency < op_cost)
2891 alg_in->cost.latency = op_cost;
2892 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2893 {
2894 best_cost = alg_in->cost;
2895 std::swap (alg_in, best_alg);
2896 best_alg->log[best_alg->ops] = m;
2897 best_alg->op[best_alg->ops] = alg_add_factor;
2898 }
2899 /* Other factors will have been taken care of in the recursion. */
2900 break;
2901 }
2902
2903 d = (HOST_WIDE_INT_1U << m) - 1;
2904 if (t % d == 0 && t > d && m < maxm
2905 && (!cache_hit || cache_alg == alg_sub_factor))
2906 {
2907 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2908 if (shiftsub0_cost (speed, mode, m) <= op_cost)
2909 op_cost = shiftsub0_cost (speed, mode, m);
2910
2911 op_latency = op_cost;
2912
2913 new_limit.cost = best_cost.cost - op_cost;
2914 new_limit.latency = best_cost.latency - op_latency;
2915 synth_mult (alg_in, t / d, &new_limit, mode);
2916
2917 alg_in->cost.cost += op_cost;
2918 alg_in->cost.latency += op_latency;
2919 if (alg_in->cost.latency < op_cost)
2920 alg_in->cost.latency = op_cost;
2921 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2922 {
2923 best_cost = alg_in->cost;
2924 std::swap (alg_in, best_alg);
2925 best_alg->log[best_alg->ops] = m;
2926 best_alg->op[best_alg->ops] = alg_sub_factor;
2927 }
2928 break;
2929 }
2930 }
2931 if (cache_hit)
2932 goto done;
2933
2934 /* Try shift-and-add (load effective address) instructions,
2935 i.e. do a*3, a*5, a*9. */
2936 if ((t & 1) != 0)
2937 {
2938 do_alg_add_t2_m:
2939 q = t - 1;
2940 m = ctz_hwi (q);
2941 if (q && m < maxm)
2942 {
2943 op_cost = shiftadd_cost (speed, mode, m);
2944 new_limit.cost = best_cost.cost - op_cost;
2945 new_limit.latency = best_cost.latency - op_cost;
2946 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2947
2948 alg_in->cost.cost += op_cost;
2949 alg_in->cost.latency += op_cost;
2950 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2951 {
2952 best_cost = alg_in->cost;
2953 std::swap (alg_in, best_alg);
2954 best_alg->log[best_alg->ops] = m;
2955 best_alg->op[best_alg->ops] = alg_add_t2_m;
2956 }
2957 }
2958 if (cache_hit)
2959 goto done;
2960
2961 do_alg_sub_t2_m:
2962 q = t + 1;
2963 m = ctz_hwi (q);
2964 if (q && m < maxm)
2965 {
2966 op_cost = shiftsub0_cost (speed, mode, m);
2967 new_limit.cost = best_cost.cost - op_cost;
2968 new_limit.latency = best_cost.latency - op_cost;
2969 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2970
2971 alg_in->cost.cost += op_cost;
2972 alg_in->cost.latency += op_cost;
2973 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2974 {
2975 best_cost = alg_in->cost;
2976 std::swap (alg_in, best_alg);
2977 best_alg->log[best_alg->ops] = m;
2978 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2979 }
2980 }
2981 if (cache_hit)
2982 goto done;
2983 }
2984
2985 done:
2986 /* If best_cost has not decreased, we have not found any algorithm. */
2987 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2988 {
2989 /* We failed to find an algorithm. Record alg_impossible for
2990 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2991 we are asked to find an algorithm for T within the same or
2992 lower COST_LIMIT, we can immediately return to the
2993 caller. */
2994 entry_ptr->t = t;
2995 entry_ptr->mode = mode;
2996 entry_ptr->speed = speed;
2997 entry_ptr->alg = alg_impossible;
2998 entry_ptr->cost = *cost_limit;
2999 return;
3000 }
3001
3002 /* Cache the result. */
3003 if (!cache_hit)
3004 {
3005 entry_ptr->t = t;
3006 entry_ptr->mode = mode;
3007 entry_ptr->speed = speed;
3008 entry_ptr->alg = best_alg->op[best_alg->ops];
3009 entry_ptr->cost.cost = best_cost.cost;
3010 entry_ptr->cost.latency = best_cost.latency;
3011 }
3012
3013 /* If we are getting a too long sequence for `struct algorithm'
3014 to record, make this search fail. */
3015 if (best_alg->ops == MAX_BITS_PER_WORD)
3016 return;
3017
3018 /* Copy the algorithm from temporary space to the space at alg_out.
3019 We avoid using structure assignment because the majority of
3020 best_alg is normally undefined, and this is a critical function. */
3021 alg_out->ops = best_alg->ops + 1;
3022 alg_out->cost = best_cost;
3023 memcpy (alg_out->op, best_alg->op,
3024 alg_out->ops * sizeof *alg_out->op);
3025 memcpy (alg_out->log, best_alg->log,
3026 alg_out->ops * sizeof *alg_out->log);
3027 }
3028 \f
3029 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
3030 Try three variations:
3031
3032 - a shift/add sequence based on VAL itself
3033 - a shift/add sequence based on -VAL, followed by a negation
3034 - a shift/add sequence based on VAL - 1, followed by an addition.
3035
3036 Return true if the cheapest of these cost less than MULT_COST,
3037 describing the algorithm in *ALG and final fixup in *VARIANT. */
3038
3039 bool
3040 choose_mult_variant (machine_mode mode, HOST_WIDE_INT val,
3041 struct algorithm *alg, enum mult_variant *variant,
3042 int mult_cost)
3043 {
3044 struct algorithm alg2;
3045 struct mult_cost limit;
3046 int op_cost;
3047 bool speed = optimize_insn_for_speed_p ();
3048
3049 /* Fail quickly for impossible bounds. */
3050 if (mult_cost < 0)
3051 return false;
3052
3053 /* Ensure that mult_cost provides a reasonable upper bound.
3054 Any constant multiplication can be performed with less
3055 than 2 * bits additions. */
3056 op_cost = 2 * GET_MODE_UNIT_BITSIZE (mode) * add_cost (speed, mode);
3057 if (mult_cost > op_cost)
3058 mult_cost = op_cost;
3059
3060 *variant = basic_variant;
3061 limit.cost = mult_cost;
3062 limit.latency = mult_cost;
3063 synth_mult (alg, val, &limit, mode);
3064
3065 /* This works only if the inverted value actually fits in an
3066 `unsigned int' */
3067 if (HOST_BITS_PER_INT >= GET_MODE_UNIT_BITSIZE (mode))
3068 {
3069 op_cost = neg_cost (speed, mode);
3070 if (MULT_COST_LESS (&alg->cost, mult_cost))
3071 {
3072 limit.cost = alg->cost.cost - op_cost;
3073 limit.latency = alg->cost.latency - op_cost;
3074 }
3075 else
3076 {
3077 limit.cost = mult_cost - op_cost;
3078 limit.latency = mult_cost - op_cost;
3079 }
3080
3081 synth_mult (&alg2, -val, &limit, mode);
3082 alg2.cost.cost += op_cost;
3083 alg2.cost.latency += op_cost;
3084 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3085 *alg = alg2, *variant = negate_variant;
3086 }
3087
3088 /* This proves very useful for division-by-constant. */
3089 op_cost = add_cost (speed, mode);
3090 if (MULT_COST_LESS (&alg->cost, mult_cost))
3091 {
3092 limit.cost = alg->cost.cost - op_cost;
3093 limit.latency = alg->cost.latency - op_cost;
3094 }
3095 else
3096 {
3097 limit.cost = mult_cost - op_cost;
3098 limit.latency = mult_cost - op_cost;
3099 }
3100
3101 synth_mult (&alg2, val - 1, &limit, mode);
3102 alg2.cost.cost += op_cost;
3103 alg2.cost.latency += op_cost;
3104 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3105 *alg = alg2, *variant = add_variant;
3106
3107 return MULT_COST_LESS (&alg->cost, mult_cost);
3108 }
3109
3110 /* A subroutine of expand_mult, used for constant multiplications.
3111 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
3112 convenient. Use the shift/add sequence described by ALG and apply
3113 the final fixup specified by VARIANT. */
3114
3115 static rtx
3116 expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val,
3117 rtx target, const struct algorithm *alg,
3118 enum mult_variant variant)
3119 {
3120 unsigned HOST_WIDE_INT val_so_far;
3121 rtx_insn *insn;
3122 rtx accum, tem;
3123 int opno;
3124 machine_mode nmode;
3125
3126 /* Avoid referencing memory over and over and invalid sharing
3127 on SUBREGs. */
3128 op0 = force_reg (mode, op0);
3129
3130 /* ACCUM starts out either as OP0 or as a zero, depending on
3131 the first operation. */
3132
3133 if (alg->op[0] == alg_zero)
3134 {
3135 accum = copy_to_mode_reg (mode, CONST0_RTX (mode));
3136 val_so_far = 0;
3137 }
3138 else if (alg->op[0] == alg_m)
3139 {
3140 accum = copy_to_mode_reg (mode, op0);
3141 val_so_far = 1;
3142 }
3143 else
3144 gcc_unreachable ();
3145
3146 for (opno = 1; opno < alg->ops; opno++)
3147 {
3148 int log = alg->log[opno];
3149 rtx shift_subtarget = optimize ? 0 : accum;
3150 rtx add_target
3151 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
3152 && !optimize)
3153 ? target : 0;
3154 rtx accum_target = optimize ? 0 : accum;
3155 rtx accum_inner;
3156
3157 switch (alg->op[opno])
3158 {
3159 case alg_shift:
3160 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3161 /* REG_EQUAL note will be attached to the following insn. */
3162 emit_move_insn (accum, tem);
3163 val_so_far <<= log;
3164 break;
3165
3166 case alg_add_t_m2:
3167 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3168 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3169 add_target ? add_target : accum_target);
3170 val_so_far += HOST_WIDE_INT_1U << log;
3171 break;
3172
3173 case alg_sub_t_m2:
3174 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3175 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3176 add_target ? add_target : accum_target);
3177 val_so_far -= HOST_WIDE_INT_1U << log;
3178 break;
3179
3180 case alg_add_t2_m:
3181 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3182 log, shift_subtarget, 0);
3183 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3184 add_target ? add_target : accum_target);
3185 val_so_far = (val_so_far << log) + 1;
3186 break;
3187
3188 case alg_sub_t2_m:
3189 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3190 log, shift_subtarget, 0);
3191 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3192 add_target ? add_target : accum_target);
3193 val_so_far = (val_so_far << log) - 1;
3194 break;
3195
3196 case alg_add_factor:
3197 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3198 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3199 add_target ? add_target : accum_target);
3200 val_so_far += val_so_far << log;
3201 break;
3202
3203 case alg_sub_factor:
3204 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3205 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3206 (add_target
3207 ? add_target : (optimize ? 0 : tem)));
3208 val_so_far = (val_so_far << log) - val_so_far;
3209 break;
3210
3211 default:
3212 gcc_unreachable ();
3213 }
3214
3215 if (SCALAR_INT_MODE_P (mode))
3216 {
3217 /* Write a REG_EQUAL note on the last insn so that we can cse
3218 multiplication sequences. Note that if ACCUM is a SUBREG,
3219 we've set the inner register and must properly indicate that. */
3220 tem = op0, nmode = mode;
3221 accum_inner = accum;
3222 if (GET_CODE (accum) == SUBREG)
3223 {
3224 accum_inner = SUBREG_REG (accum);
3225 nmode = GET_MODE (accum_inner);
3226 tem = gen_lowpart (nmode, op0);
3227 }
3228
3229 insn = get_last_insn ();
3230 set_dst_reg_note (insn, REG_EQUAL,
3231 gen_rtx_MULT (nmode, tem,
3232 gen_int_mode (val_so_far, nmode)),
3233 accum_inner);
3234 }
3235 }
3236
3237 if (variant == negate_variant)
3238 {
3239 val_so_far = -val_so_far;
3240 accum = expand_unop (mode, neg_optab, accum, target, 0);
3241 }
3242 else if (variant == add_variant)
3243 {
3244 val_so_far = val_so_far + 1;
3245 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3246 }
3247
3248 /* Compare only the bits of val and val_so_far that are significant
3249 in the result mode, to avoid sign-/zero-extension confusion. */
3250 nmode = GET_MODE_INNER (mode);
3251 val &= GET_MODE_MASK (nmode);
3252 val_so_far &= GET_MODE_MASK (nmode);
3253 gcc_assert (val == (HOST_WIDE_INT) val_so_far);
3254
3255 return accum;
3256 }
3257
3258 /* Perform a multiplication and return an rtx for the result.
3259 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3260 TARGET is a suggestion for where to store the result (an rtx).
3261
3262 We check specially for a constant integer as OP1.
3263 If you want this check for OP0 as well, then before calling
3264 you should swap the two operands if OP0 would be constant. */
3265
3266 rtx
3267 expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3268 int unsignedp)
3269 {
3270 enum mult_variant variant;
3271 struct algorithm algorithm;
3272 rtx scalar_op1;
3273 int max_cost;
3274 bool speed = optimize_insn_for_speed_p ();
3275 bool do_trapv = flag_trapv && SCALAR_INT_MODE_P (mode) && !unsignedp;
3276
3277 if (CONSTANT_P (op0))
3278 std::swap (op0, op1);
3279
3280 /* For vectors, there are several simplifications that can be made if
3281 all elements of the vector constant are identical. */
3282 scalar_op1 = unwrap_const_vec_duplicate (op1);
3283
3284 if (INTEGRAL_MODE_P (mode))
3285 {
3286 rtx fake_reg;
3287 HOST_WIDE_INT coeff;
3288 bool is_neg;
3289 int mode_bitsize;
3290
3291 if (op1 == CONST0_RTX (mode))
3292 return op1;
3293 if (op1 == CONST1_RTX (mode))
3294 return op0;
3295 if (op1 == CONSTM1_RTX (mode))
3296 return expand_unop (mode, do_trapv ? negv_optab : neg_optab,
3297 op0, target, 0);
3298
3299 if (do_trapv)
3300 goto skip_synth;
3301
3302 /* If mode is integer vector mode, check if the backend supports
3303 vector lshift (by scalar or vector) at all. If not, we can't use
3304 synthetized multiply. */
3305 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
3306 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing
3307 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing)
3308 goto skip_synth;
3309
3310 /* These are the operations that are potentially turned into
3311 a sequence of shifts and additions. */
3312 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
3313
3314 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3315 less than or equal in size to `unsigned int' this doesn't matter.
3316 If the mode is larger than `unsigned int', then synth_mult works
3317 only if the constant value exactly fits in an `unsigned int' without
3318 any truncation. This means that multiplying by negative values does
3319 not work; results are off by 2^32 on a 32 bit machine. */
3320 if (CONST_INT_P (scalar_op1))
3321 {
3322 coeff = INTVAL (scalar_op1);
3323 is_neg = coeff < 0;
3324 }
3325 #if TARGET_SUPPORTS_WIDE_INT
3326 else if (CONST_WIDE_INT_P (scalar_op1))
3327 #else
3328 else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
3329 #endif
3330 {
3331 int shift = wi::exact_log2 (rtx_mode_t (scalar_op1, mode));
3332 /* Perfect power of 2 (other than 1, which is handled above). */
3333 if (shift > 0)
3334 return expand_shift (LSHIFT_EXPR, mode, op0,
3335 shift, target, unsignedp);
3336 else
3337 goto skip_synth;
3338 }
3339 else
3340 goto skip_synth;
3341
3342 /* We used to test optimize here, on the grounds that it's better to
3343 produce a smaller program when -O is not used. But this causes
3344 such a terrible slowdown sometimes that it seems better to always
3345 use synth_mult. */
3346
3347 /* Special case powers of two. */
3348 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)
3349 && !(is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT))
3350 return expand_shift (LSHIFT_EXPR, mode, op0,
3351 floor_log2 (coeff), target, unsignedp);
3352
3353 fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3354
3355 /* Attempt to handle multiplication of DImode values by negative
3356 coefficients, by performing the multiplication by a positive
3357 multiplier and then inverting the result. */
3358 if (is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT)
3359 {
3360 /* Its safe to use -coeff even for INT_MIN, as the
3361 result is interpreted as an unsigned coefficient.
3362 Exclude cost of op0 from max_cost to match the cost
3363 calculation of the synth_mult. */
3364 coeff = -(unsigned HOST_WIDE_INT) coeff;
3365 max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1),
3366 mode, speed)
3367 - neg_cost (speed, mode));
3368 if (max_cost <= 0)
3369 goto skip_synth;
3370
3371 /* Special case powers of two. */
3372 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3373 {
3374 rtx temp = expand_shift (LSHIFT_EXPR, mode, op0,
3375 floor_log2 (coeff), target, unsignedp);
3376 return expand_unop (mode, neg_optab, temp, target, 0);
3377 }
3378
3379 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3380 max_cost))
3381 {
3382 rtx temp = expand_mult_const (mode, op0, coeff, NULL_RTX,
3383 &algorithm, variant);
3384 return expand_unop (mode, neg_optab, temp, target, 0);
3385 }
3386 goto skip_synth;
3387 }
3388
3389 /* Exclude cost of op0 from max_cost to match the cost
3390 calculation of the synth_mult. */
3391 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), mode, speed);
3392 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3393 return expand_mult_const (mode, op0, coeff, target,
3394 &algorithm, variant);
3395 }
3396 skip_synth:
3397
3398 /* Expand x*2.0 as x+x. */
3399 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1)
3400 && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1), &dconst2))
3401 {
3402 op0 = force_reg (GET_MODE (op0), op0);
3403 return expand_binop (mode, add_optab, op0, op0,
3404 target, unsignedp, OPTAB_LIB_WIDEN);
3405 }
3406
3407 /* This used to use umul_optab if unsigned, but for non-widening multiply
3408 there is no difference between signed and unsigned. */
3409 op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3410 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3411 gcc_assert (op0);
3412 return op0;
3413 }
3414
3415 /* Return a cost estimate for multiplying a register by the given
3416 COEFFicient in the given MODE and SPEED. */
3417
3418 int
3419 mult_by_coeff_cost (HOST_WIDE_INT coeff, machine_mode mode, bool speed)
3420 {
3421 int max_cost;
3422 struct algorithm algorithm;
3423 enum mult_variant variant;
3424
3425 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3426 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, fake_reg),
3427 mode, speed);
3428 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3429 return algorithm.cost.cost;
3430 else
3431 return max_cost;
3432 }
3433
3434 /* Perform a widening multiplication and return an rtx for the result.
3435 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3436 TARGET is a suggestion for where to store the result (an rtx).
3437 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3438 or smul_widen_optab.
3439
3440 We check specially for a constant integer as OP1, comparing the
3441 cost of a widening multiply against the cost of a sequence of shifts
3442 and adds. */
3443
3444 rtx
3445 expand_widening_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3446 int unsignedp, optab this_optab)
3447 {
3448 bool speed = optimize_insn_for_speed_p ();
3449 rtx cop1;
3450
3451 if (CONST_INT_P (op1)
3452 && GET_MODE (op0) != VOIDmode
3453 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3454 this_optab == umul_widen_optab))
3455 && CONST_INT_P (cop1)
3456 && (INTVAL (cop1) >= 0
3457 || HWI_COMPUTABLE_MODE_P (mode)))
3458 {
3459 HOST_WIDE_INT coeff = INTVAL (cop1);
3460 int max_cost;
3461 enum mult_variant variant;
3462 struct algorithm algorithm;
3463
3464 if (coeff == 0)
3465 return CONST0_RTX (mode);
3466
3467 /* Special case powers of two. */
3468 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3469 {
3470 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3471 return expand_shift (LSHIFT_EXPR, mode, op0,
3472 floor_log2 (coeff), target, unsignedp);
3473 }
3474
3475 /* Exclude cost of op0 from max_cost to match the cost
3476 calculation of the synth_mult. */
3477 max_cost = mul_widen_cost (speed, mode);
3478 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3479 max_cost))
3480 {
3481 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3482 return expand_mult_const (mode, op0, coeff, target,
3483 &algorithm, variant);
3484 }
3485 }
3486 return expand_binop (mode, this_optab, op0, op1, target,
3487 unsignedp, OPTAB_LIB_WIDEN);
3488 }
3489 \f
3490 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3491 replace division by D, and put the least significant N bits of the result
3492 in *MULTIPLIER_PTR and return the most significant bit.
3493
3494 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3495 needed precision is in PRECISION (should be <= N).
3496
3497 PRECISION should be as small as possible so this function can choose
3498 multiplier more freely.
3499
3500 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3501 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3502
3503 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3504 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3505
3506 unsigned HOST_WIDE_INT
3507 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3508 unsigned HOST_WIDE_INT *multiplier_ptr,
3509 int *post_shift_ptr, int *lgup_ptr)
3510 {
3511 int lgup, post_shift;
3512 int pow, pow2;
3513
3514 /* lgup = ceil(log2(divisor)); */
3515 lgup = ceil_log2 (d);
3516
3517 gcc_assert (lgup <= n);
3518
3519 pow = n + lgup;
3520 pow2 = n + lgup - precision;
3521
3522 /* mlow = 2^(N + lgup)/d */
3523 wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
3524 wide_int mlow = wi::udiv_trunc (val, d);
3525
3526 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3527 val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
3528 wide_int mhigh = wi::udiv_trunc (val, d);
3529
3530 /* If precision == N, then mlow, mhigh exceed 2^N
3531 (but they do not exceed 2^(N+1)). */
3532
3533 /* Reduce to lowest terms. */
3534 for (post_shift = lgup; post_shift > 0; post_shift--)
3535 {
3536 unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
3537 HOST_BITS_PER_WIDE_INT);
3538 unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
3539 HOST_BITS_PER_WIDE_INT);
3540 if (ml_lo >= mh_lo)
3541 break;
3542
3543 mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
3544 mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
3545 }
3546
3547 *post_shift_ptr = post_shift;
3548 *lgup_ptr = lgup;
3549 if (n < HOST_BITS_PER_WIDE_INT)
3550 {
3551 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1;
3552 *multiplier_ptr = mhigh.to_uhwi () & mask;
3553 return mhigh.to_uhwi () >= mask;
3554 }
3555 else
3556 {
3557 *multiplier_ptr = mhigh.to_uhwi ();
3558 return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3559 }
3560 }
3561
3562 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3563 congruent to 1 (mod 2**N). */
3564
3565 static unsigned HOST_WIDE_INT
3566 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3567 {
3568 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3569
3570 /* The algorithm notes that the choice y = x satisfies
3571 x*y == 1 mod 2^3, since x is assumed odd.
3572 Each iteration doubles the number of bits of significance in y. */
3573
3574 unsigned HOST_WIDE_INT mask;
3575 unsigned HOST_WIDE_INT y = x;
3576 int nbit = 3;
3577
3578 mask = (n == HOST_BITS_PER_WIDE_INT
3579 ? HOST_WIDE_INT_M1U
3580 : (HOST_WIDE_INT_1U << n) - 1);
3581
3582 while (nbit < n)
3583 {
3584 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3585 nbit *= 2;
3586 }
3587 return y;
3588 }
3589
3590 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3591 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3592 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3593 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3594 become signed.
3595
3596 The result is put in TARGET if that is convenient.
3597
3598 MODE is the mode of operation. */
3599
3600 rtx
3601 expand_mult_highpart_adjust (machine_mode mode, rtx adj_operand, rtx op0,
3602 rtx op1, rtx target, int unsignedp)
3603 {
3604 rtx tem;
3605 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3606
3607 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3608 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3609 tem = expand_and (mode, tem, op1, NULL_RTX);
3610 adj_operand
3611 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3612 adj_operand);
3613
3614 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3615 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3616 tem = expand_and (mode, tem, op0, NULL_RTX);
3617 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3618 target);
3619
3620 return target;
3621 }
3622
3623 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3624
3625 static rtx
3626 extract_high_half (machine_mode mode, rtx op)
3627 {
3628 machine_mode wider_mode;
3629
3630 if (mode == word_mode)
3631 return gen_highpart (mode, op);
3632
3633 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3634
3635 wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3636 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3637 GET_MODE_BITSIZE (mode), 0, 1);
3638 return convert_modes (mode, wider_mode, op, 0);
3639 }
3640
3641 /* Like expmed_mult_highpart, but only consider using a multiplication
3642 optab. OP1 is an rtx for the constant operand. */
3643
3644 static rtx
3645 expmed_mult_highpart_optab (machine_mode mode, rtx op0, rtx op1,
3646 rtx target, int unsignedp, int max_cost)
3647 {
3648 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3649 machine_mode wider_mode;
3650 optab moptab;
3651 rtx tem;
3652 int size;
3653 bool speed = optimize_insn_for_speed_p ();
3654
3655 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3656
3657 wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3658 size = GET_MODE_BITSIZE (mode);
3659
3660 /* Firstly, try using a multiplication insn that only generates the needed
3661 high part of the product, and in the sign flavor of unsignedp. */
3662 if (mul_highpart_cost (speed, mode) < max_cost)
3663 {
3664 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3665 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3666 unsignedp, OPTAB_DIRECT);
3667 if (tem)
3668 return tem;
3669 }
3670
3671 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3672 Need to adjust the result after the multiplication. */
3673 if (size - 1 < BITS_PER_WORD
3674 && (mul_highpart_cost (speed, mode)
3675 + 2 * shift_cost (speed, mode, size-1)
3676 + 4 * add_cost (speed, mode) < max_cost))
3677 {
3678 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3679 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3680 unsignedp, OPTAB_DIRECT);
3681 if (tem)
3682 /* We used the wrong signedness. Adjust the result. */
3683 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3684 tem, unsignedp);
3685 }
3686
3687 /* Try widening multiplication. */
3688 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3689 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3690 && mul_widen_cost (speed, wider_mode) < max_cost)
3691 {
3692 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3693 unsignedp, OPTAB_WIDEN);
3694 if (tem)
3695 return extract_high_half (mode, tem);
3696 }
3697
3698 /* Try widening the mode and perform a non-widening multiplication. */
3699 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3700 && size - 1 < BITS_PER_WORD
3701 && (mul_cost (speed, wider_mode) + shift_cost (speed, mode, size-1)
3702 < max_cost))
3703 {
3704 rtx_insn *insns;
3705 rtx wop0, wop1;
3706
3707 /* We need to widen the operands, for example to ensure the
3708 constant multiplier is correctly sign or zero extended.
3709 Use a sequence to clean-up any instructions emitted by
3710 the conversions if things don't work out. */
3711 start_sequence ();
3712 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3713 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3714 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3715 unsignedp, OPTAB_WIDEN);
3716 insns = get_insns ();
3717 end_sequence ();
3718
3719 if (tem)
3720 {
3721 emit_insn (insns);
3722 return extract_high_half (mode, tem);
3723 }
3724 }
3725
3726 /* Try widening multiplication of opposite signedness, and adjust. */
3727 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3728 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3729 && size - 1 < BITS_PER_WORD
3730 && (mul_widen_cost (speed, wider_mode)
3731 + 2 * shift_cost (speed, mode, size-1)
3732 + 4 * add_cost (speed, mode) < max_cost))
3733 {
3734 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3735 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3736 if (tem != 0)
3737 {
3738 tem = extract_high_half (mode, tem);
3739 /* We used the wrong signedness. Adjust the result. */
3740 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3741 target, unsignedp);
3742 }
3743 }
3744
3745 return 0;
3746 }
3747
3748 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3749 putting the high half of the result in TARGET if that is convenient,
3750 and return where the result is. If the operation can not be performed,
3751 0 is returned.
3752
3753 MODE is the mode of operation and result.
3754
3755 UNSIGNEDP nonzero means unsigned multiply.
3756
3757 MAX_COST is the total allowed cost for the expanded RTL. */
3758
3759 static rtx
3760 expmed_mult_highpart (machine_mode mode, rtx op0, rtx op1,
3761 rtx target, int unsignedp, int max_cost)
3762 {
3763 machine_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3764 unsigned HOST_WIDE_INT cnst1;
3765 int extra_cost;
3766 bool sign_adjust = false;
3767 enum mult_variant variant;
3768 struct algorithm alg;
3769 rtx tem;
3770 bool speed = optimize_insn_for_speed_p ();
3771
3772 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3773 /* We can't support modes wider than HOST_BITS_PER_INT. */
3774 gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
3775
3776 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3777
3778 /* We can't optimize modes wider than BITS_PER_WORD.
3779 ??? We might be able to perform double-word arithmetic if
3780 mode == word_mode, however all the cost calculations in
3781 synth_mult etc. assume single-word operations. */
3782 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3783 return expmed_mult_highpart_optab (mode, op0, op1, target,
3784 unsignedp, max_cost);
3785
3786 extra_cost = shift_cost (speed, mode, GET_MODE_BITSIZE (mode) - 1);
3787
3788 /* Check whether we try to multiply by a negative constant. */
3789 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3790 {
3791 sign_adjust = true;
3792 extra_cost += add_cost (speed, mode);
3793 }
3794
3795 /* See whether shift/add multiplication is cheap enough. */
3796 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3797 max_cost - extra_cost))
3798 {
3799 /* See whether the specialized multiplication optabs are
3800 cheaper than the shift/add version. */
3801 tem = expmed_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3802 alg.cost.cost + extra_cost);
3803 if (tem)
3804 return tem;
3805
3806 tem = convert_to_mode (wider_mode, op0, unsignedp);
3807 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3808 tem = extract_high_half (mode, tem);
3809
3810 /* Adjust result for signedness. */
3811 if (sign_adjust)
3812 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3813
3814 return tem;
3815 }
3816 return expmed_mult_highpart_optab (mode, op0, op1, target,
3817 unsignedp, max_cost);
3818 }
3819
3820
3821 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3822
3823 static rtx
3824 expand_smod_pow2 (machine_mode mode, rtx op0, HOST_WIDE_INT d)
3825 {
3826 rtx result, temp, shift;
3827 rtx_code_label *label;
3828 int logd;
3829 int prec = GET_MODE_PRECISION (mode);
3830
3831 logd = floor_log2 (d);
3832 result = gen_reg_rtx (mode);
3833
3834 /* Avoid conditional branches when they're expensive. */
3835 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3836 && optimize_insn_for_speed_p ())
3837 {
3838 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3839 mode, 0, -1);
3840 if (signmask)
3841 {
3842 HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1;
3843 signmask = force_reg (mode, signmask);
3844 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3845
3846 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3847 which instruction sequence to use. If logical right shifts
3848 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3849 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3850
3851 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3852 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3853 || (set_src_cost (temp, mode, optimize_insn_for_speed_p ())
3854 > COSTS_N_INSNS (2)))
3855 {
3856 temp = expand_binop (mode, xor_optab, op0, signmask,
3857 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3858 temp = expand_binop (mode, sub_optab, temp, signmask,
3859 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3860 temp = expand_binop (mode, and_optab, temp,
3861 gen_int_mode (masklow, mode),
3862 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3863 temp = expand_binop (mode, xor_optab, temp, signmask,
3864 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3865 temp = expand_binop (mode, sub_optab, temp, signmask,
3866 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3867 }
3868 else
3869 {
3870 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3871 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3872 signmask = force_reg (mode, signmask);
3873
3874 temp = expand_binop (mode, add_optab, op0, signmask,
3875 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3876 temp = expand_binop (mode, and_optab, temp,
3877 gen_int_mode (masklow, mode),
3878 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3879 temp = expand_binop (mode, sub_optab, temp, signmask,
3880 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3881 }
3882 return temp;
3883 }
3884 }
3885
3886 /* Mask contains the mode's signbit and the significant bits of the
3887 modulus. By including the signbit in the operation, many targets
3888 can avoid an explicit compare operation in the following comparison
3889 against zero. */
3890 wide_int mask = wi::mask (logd, false, prec);
3891 mask = wi::set_bit (mask, prec - 1);
3892
3893 temp = expand_binop (mode, and_optab, op0,
3894 immed_wide_int_const (mask, mode),
3895 result, 1, OPTAB_LIB_WIDEN);
3896 if (temp != result)
3897 emit_move_insn (result, temp);
3898
3899 label = gen_label_rtx ();
3900 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3901
3902 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3903 0, OPTAB_LIB_WIDEN);
3904
3905 mask = wi::mask (logd, true, prec);
3906 temp = expand_binop (mode, ior_optab, temp,
3907 immed_wide_int_const (mask, mode),
3908 result, 1, OPTAB_LIB_WIDEN);
3909 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3910 0, OPTAB_LIB_WIDEN);
3911 if (temp != result)
3912 emit_move_insn (result, temp);
3913 emit_label (label);
3914 return result;
3915 }
3916
3917 /* Expand signed division of OP0 by a power of two D in mode MODE.
3918 This routine is only called for positive values of D. */
3919
3920 static rtx
3921 expand_sdiv_pow2 (machine_mode mode, rtx op0, HOST_WIDE_INT d)
3922 {
3923 rtx temp;
3924 rtx_code_label *label;
3925 int logd;
3926
3927 logd = floor_log2 (d);
3928
3929 if (d == 2
3930 && BRANCH_COST (optimize_insn_for_speed_p (),
3931 false) >= 1)
3932 {
3933 temp = gen_reg_rtx (mode);
3934 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3935 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3936 0, OPTAB_LIB_WIDEN);
3937 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3938 }
3939
3940 if (HAVE_conditional_move
3941 && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2)
3942 {
3943 rtx temp2;
3944
3945 start_sequence ();
3946 temp2 = copy_to_mode_reg (mode, op0);
3947 temp = expand_binop (mode, add_optab, temp2, gen_int_mode (d - 1, mode),
3948 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3949 temp = force_reg (mode, temp);
3950
3951 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3952 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3953 mode, temp, temp2, mode, 0);
3954 if (temp2)
3955 {
3956 rtx_insn *seq = get_insns ();
3957 end_sequence ();
3958 emit_insn (seq);
3959 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
3960 }
3961 end_sequence ();
3962 }
3963
3964 if (BRANCH_COST (optimize_insn_for_speed_p (),
3965 false) >= 2)
3966 {
3967 int ushift = GET_MODE_BITSIZE (mode) - logd;
3968
3969 temp = gen_reg_rtx (mode);
3970 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3971 if (GET_MODE_BITSIZE (mode) >= BITS_PER_WORD
3972 || shift_cost (optimize_insn_for_speed_p (), mode, ushift)
3973 > COSTS_N_INSNS (1))
3974 temp = expand_binop (mode, and_optab, temp, gen_int_mode (d - 1, mode),
3975 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3976 else
3977 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3978 ushift, NULL_RTX, 1);
3979 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3980 0, OPTAB_LIB_WIDEN);
3981 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3982 }
3983
3984 label = gen_label_rtx ();
3985 temp = copy_to_mode_reg (mode, op0);
3986 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3987 expand_inc (temp, gen_int_mode (d - 1, mode));
3988 emit_label (label);
3989 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3990 }
3991 \f
3992 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3993 if that is convenient, and returning where the result is.
3994 You may request either the quotient or the remainder as the result;
3995 specify REM_FLAG nonzero to get the remainder.
3996
3997 CODE is the expression code for which kind of division this is;
3998 it controls how rounding is done. MODE is the machine mode to use.
3999 UNSIGNEDP nonzero means do unsigned division. */
4000
4001 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
4002 and then correct it by or'ing in missing high bits
4003 if result of ANDI is nonzero.
4004 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
4005 This could optimize to a bfexts instruction.
4006 But C doesn't use these operations, so their optimizations are
4007 left for later. */
4008 /* ??? For modulo, we don't actually need the highpart of the first product,
4009 the low part will do nicely. And for small divisors, the second multiply
4010 can also be a low-part only multiply or even be completely left out.
4011 E.g. to calculate the remainder of a division by 3 with a 32 bit
4012 multiply, multiply with 0x55555556 and extract the upper two bits;
4013 the result is exact for inputs up to 0x1fffffff.
4014 The input range can be reduced by using cross-sum rules.
4015 For odd divisors >= 3, the following table gives right shift counts
4016 so that if a number is shifted by an integer multiple of the given
4017 amount, the remainder stays the same:
4018 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
4019 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
4020 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
4021 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
4022 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
4023
4024 Cross-sum rules for even numbers can be derived by leaving as many bits
4025 to the right alone as the divisor has zeros to the right.
4026 E.g. if x is an unsigned 32 bit number:
4027 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
4028 */
4029
4030 rtx
4031 expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
4032 rtx op0, rtx op1, rtx target, int unsignedp)
4033 {
4034 machine_mode compute_mode;
4035 rtx tquotient;
4036 rtx quotient = 0, remainder = 0;
4037 rtx_insn *last;
4038 int size;
4039 rtx_insn *insn;
4040 optab optab1, optab2;
4041 int op1_is_constant, op1_is_pow2 = 0;
4042 int max_cost, extra_cost;
4043 static HOST_WIDE_INT last_div_const = 0;
4044 bool speed = optimize_insn_for_speed_p ();
4045
4046 op1_is_constant = CONST_INT_P (op1);
4047 if (op1_is_constant)
4048 {
4049 wide_int ext_op1 = rtx_mode_t (op1, mode);
4050 op1_is_pow2 = (wi::popcount (ext_op1) == 1
4051 || (! unsignedp
4052 && wi::popcount (wi::neg (ext_op1)) == 1));
4053 }
4054
4055 /*
4056 This is the structure of expand_divmod:
4057
4058 First comes code to fix up the operands so we can perform the operations
4059 correctly and efficiently.
4060
4061 Second comes a switch statement with code specific for each rounding mode.
4062 For some special operands this code emits all RTL for the desired
4063 operation, for other cases, it generates only a quotient and stores it in
4064 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
4065 to indicate that it has not done anything.
4066
4067 Last comes code that finishes the operation. If QUOTIENT is set and
4068 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
4069 QUOTIENT is not set, it is computed using trunc rounding.
4070
4071 We try to generate special code for division and remainder when OP1 is a
4072 constant. If |OP1| = 2**n we can use shifts and some other fast
4073 operations. For other values of OP1, we compute a carefully selected
4074 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
4075 by m.
4076
4077 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
4078 half of the product. Different strategies for generating the product are
4079 implemented in expmed_mult_highpart.
4080
4081 If what we actually want is the remainder, we generate that by another
4082 by-constant multiplication and a subtraction. */
4083
4084 /* We shouldn't be called with OP1 == const1_rtx, but some of the
4085 code below will malfunction if we are, so check here and handle
4086 the special case if so. */
4087 if (op1 == const1_rtx)
4088 return rem_flag ? const0_rtx : op0;
4089
4090 /* When dividing by -1, we could get an overflow.
4091 negv_optab can handle overflows. */
4092 if (! unsignedp && op1 == constm1_rtx)
4093 {
4094 if (rem_flag)
4095 return const0_rtx;
4096 return expand_unop (mode, flag_trapv && GET_MODE_CLASS (mode) == MODE_INT
4097 ? negv_optab : neg_optab, op0, target, 0);
4098 }
4099
4100 if (target
4101 /* Don't use the function value register as a target
4102 since we have to read it as well as write it,
4103 and function-inlining gets confused by this. */
4104 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
4105 /* Don't clobber an operand while doing a multi-step calculation. */
4106 || ((rem_flag || op1_is_constant)
4107 && (reg_mentioned_p (target, op0)
4108 || (MEM_P (op0) && MEM_P (target))))
4109 || reg_mentioned_p (target, op1)
4110 || (MEM_P (op1) && MEM_P (target))))
4111 target = 0;
4112
4113 /* Get the mode in which to perform this computation. Normally it will
4114 be MODE, but sometimes we can't do the desired operation in MODE.
4115 If so, pick a wider mode in which we can do the operation. Convert
4116 to that mode at the start to avoid repeated conversions.
4117
4118 First see what operations we need. These depend on the expression
4119 we are evaluating. (We assume that divxx3 insns exist under the
4120 same conditions that modxx3 insns and that these insns don't normally
4121 fail. If these assumptions are not correct, we may generate less
4122 efficient code in some cases.)
4123
4124 Then see if we find a mode in which we can open-code that operation
4125 (either a division, modulus, or shift). Finally, check for the smallest
4126 mode for which we can do the operation with a library call. */
4127
4128 /* We might want to refine this now that we have division-by-constant
4129 optimization. Since expmed_mult_highpart tries so many variants, it is
4130 not straightforward to generalize this. Maybe we should make an array
4131 of possible modes in init_expmed? Save this for GCC 2.7. */
4132
4133 optab1 = (op1_is_pow2
4134 ? (unsignedp ? lshr_optab : ashr_optab)
4135 : (unsignedp ? udiv_optab : sdiv_optab));
4136 optab2 = (op1_is_pow2 ? optab1
4137 : (unsignedp ? udivmod_optab : sdivmod_optab));
4138
4139 FOR_EACH_MODE_FROM (compute_mode, mode)
4140 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
4141 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
4142 break;
4143
4144 if (compute_mode == VOIDmode)
4145 FOR_EACH_MODE_FROM (compute_mode, mode)
4146 if (optab_libfunc (optab1, compute_mode)
4147 || optab_libfunc (optab2, compute_mode))
4148 break;
4149
4150 /* If we still couldn't find a mode, use MODE, but expand_binop will
4151 probably die. */
4152 if (compute_mode == VOIDmode)
4153 compute_mode = mode;
4154
4155 if (target && GET_MODE (target) == compute_mode)
4156 tquotient = target;
4157 else
4158 tquotient = gen_reg_rtx (compute_mode);
4159
4160 size = GET_MODE_BITSIZE (compute_mode);
4161 #if 0
4162 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4163 (mode), and thereby get better code when OP1 is a constant. Do that
4164 later. It will require going over all usages of SIZE below. */
4165 size = GET_MODE_BITSIZE (mode);
4166 #endif
4167
4168 /* Only deduct something for a REM if the last divide done was
4169 for a different constant. Then set the constant of the last
4170 divide. */
4171 max_cost = (unsignedp
4172 ? udiv_cost (speed, compute_mode)
4173 : sdiv_cost (speed, compute_mode));
4174 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
4175 && INTVAL (op1) == last_div_const))
4176 max_cost -= (mul_cost (speed, compute_mode)
4177 + add_cost (speed, compute_mode));
4178
4179 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
4180
4181 /* Now convert to the best mode to use. */
4182 if (compute_mode != mode)
4183 {
4184 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
4185 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
4186
4187 /* convert_modes may have placed op1 into a register, so we
4188 must recompute the following. */
4189 op1_is_constant = CONST_INT_P (op1);
4190 if (op1_is_constant)
4191 {
4192 wide_int ext_op1 = rtx_mode_t (op1, compute_mode);
4193 op1_is_pow2 = (wi::popcount (ext_op1) == 1
4194 || (! unsignedp
4195 && wi::popcount (wi::neg (ext_op1)) == 1));
4196 }
4197 else
4198 op1_is_pow2 = 0;
4199 }
4200
4201 /* If one of the operands is a volatile MEM, copy it into a register. */
4202
4203 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
4204 op0 = force_reg (compute_mode, op0);
4205 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
4206 op1 = force_reg (compute_mode, op1);
4207
4208 /* If we need the remainder or if OP1 is constant, we need to
4209 put OP0 in a register in case it has any queued subexpressions. */
4210 if (rem_flag || op1_is_constant)
4211 op0 = force_reg (compute_mode, op0);
4212
4213 last = get_last_insn ();
4214
4215 /* Promote floor rounding to trunc rounding for unsigned operations. */
4216 if (unsignedp)
4217 {
4218 if (code == FLOOR_DIV_EXPR)
4219 code = TRUNC_DIV_EXPR;
4220 if (code == FLOOR_MOD_EXPR)
4221 code = TRUNC_MOD_EXPR;
4222 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4223 code = TRUNC_DIV_EXPR;
4224 }
4225
4226 if (op1 != const0_rtx)
4227 switch (code)
4228 {
4229 case TRUNC_MOD_EXPR:
4230 case TRUNC_DIV_EXPR:
4231 if (op1_is_constant)
4232 {
4233 if (unsignedp)
4234 {
4235 unsigned HOST_WIDE_INT mh, ml;
4236 int pre_shift, post_shift;
4237 int dummy;
4238 wide_int wd = rtx_mode_t (op1, compute_mode);
4239 unsigned HOST_WIDE_INT d = wd.to_uhwi ();
4240
4241 if (wi::popcount (wd) == 1)
4242 {
4243 pre_shift = floor_log2 (d);
4244 if (rem_flag)
4245 {
4246 unsigned HOST_WIDE_INT mask
4247 = (HOST_WIDE_INT_1U << pre_shift) - 1;
4248 remainder
4249 = expand_binop (compute_mode, and_optab, op0,
4250 gen_int_mode (mask, compute_mode),
4251 remainder, 1,
4252 OPTAB_LIB_WIDEN);
4253 if (remainder)
4254 return gen_lowpart (mode, remainder);
4255 }
4256 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4257 pre_shift, tquotient, 1);
4258 }
4259 else if (size <= HOST_BITS_PER_WIDE_INT)
4260 {
4261 if (d >= (HOST_WIDE_INT_1U << (size - 1)))
4262 {
4263 /* Most significant bit of divisor is set; emit an scc
4264 insn. */
4265 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4266 compute_mode, 1, 1);
4267 }
4268 else
4269 {
4270 /* Find a suitable multiplier and right shift count
4271 instead of multiplying with D. */
4272
4273 mh = choose_multiplier (d, size, size,
4274 &ml, &post_shift, &dummy);
4275
4276 /* If the suggested multiplier is more than SIZE bits,
4277 we can do better for even divisors, using an
4278 initial right shift. */
4279 if (mh != 0 && (d & 1) == 0)
4280 {
4281 pre_shift = ctz_or_zero (d);
4282 mh = choose_multiplier (d >> pre_shift, size,
4283 size - pre_shift,
4284 &ml, &post_shift, &dummy);
4285 gcc_assert (!mh);
4286 }
4287 else
4288 pre_shift = 0;
4289
4290 if (mh != 0)
4291 {
4292 rtx t1, t2, t3, t4;
4293
4294 if (post_shift - 1 >= BITS_PER_WORD)
4295 goto fail1;
4296
4297 extra_cost
4298 = (shift_cost (speed, compute_mode, post_shift - 1)
4299 + shift_cost (speed, compute_mode, 1)
4300 + 2 * add_cost (speed, compute_mode));
4301 t1 = expmed_mult_highpart
4302 (compute_mode, op0,
4303 gen_int_mode (ml, compute_mode),
4304 NULL_RTX, 1, max_cost - extra_cost);
4305 if (t1 == 0)
4306 goto fail1;
4307 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4308 op0, t1),
4309 NULL_RTX);
4310 t3 = expand_shift (RSHIFT_EXPR, compute_mode,
4311 t2, 1, NULL_RTX, 1);
4312 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4313 t1, t3),
4314 NULL_RTX);
4315 quotient = expand_shift
4316 (RSHIFT_EXPR, compute_mode, t4,
4317 post_shift - 1, tquotient, 1);
4318 }
4319 else
4320 {
4321 rtx t1, t2;
4322
4323 if (pre_shift >= BITS_PER_WORD
4324 || post_shift >= BITS_PER_WORD)
4325 goto fail1;
4326
4327 t1 = expand_shift
4328 (RSHIFT_EXPR, compute_mode, op0,
4329 pre_shift, NULL_RTX, 1);
4330 extra_cost
4331 = (shift_cost (speed, compute_mode, pre_shift)
4332 + shift_cost (speed, compute_mode, post_shift));
4333 t2 = expmed_mult_highpart
4334 (compute_mode, t1,
4335 gen_int_mode (ml, compute_mode),
4336 NULL_RTX, 1, max_cost - extra_cost);
4337 if (t2 == 0)
4338 goto fail1;
4339 quotient = expand_shift
4340 (RSHIFT_EXPR, compute_mode, t2,
4341 post_shift, tquotient, 1);
4342 }
4343 }
4344 }
4345 else /* Too wide mode to use tricky code */
4346 break;
4347
4348 insn = get_last_insn ();
4349 if (insn != last)
4350 set_dst_reg_note (insn, REG_EQUAL,
4351 gen_rtx_UDIV (compute_mode, op0, op1),
4352 quotient);
4353 }
4354 else /* TRUNC_DIV, signed */
4355 {
4356 unsigned HOST_WIDE_INT ml;
4357 int lgup, post_shift;
4358 rtx mlr;
4359 HOST_WIDE_INT d = INTVAL (op1);
4360 unsigned HOST_WIDE_INT abs_d;
4361
4362 /* Since d might be INT_MIN, we have to cast to
4363 unsigned HOST_WIDE_INT before negating to avoid
4364 undefined signed overflow. */
4365 abs_d = (d >= 0
4366 ? (unsigned HOST_WIDE_INT) d
4367 : - (unsigned HOST_WIDE_INT) d);
4368
4369 /* n rem d = n rem -d */
4370 if (rem_flag && d < 0)
4371 {
4372 d = abs_d;
4373 op1 = gen_int_mode (abs_d, compute_mode);
4374 }
4375
4376 if (d == 1)
4377 quotient = op0;
4378 else if (d == -1)
4379 quotient = expand_unop (compute_mode, neg_optab, op0,
4380 tquotient, 0);
4381 else if (size <= HOST_BITS_PER_WIDE_INT
4382 && abs_d == HOST_WIDE_INT_1U << (size - 1))
4383 {
4384 /* This case is not handled correctly below. */
4385 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4386 compute_mode, 1, 1);
4387 if (quotient == 0)
4388 goto fail1;
4389 }
4390 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4391 && (size <= HOST_BITS_PER_WIDE_INT || d >= 0)
4392 && (rem_flag
4393 ? smod_pow2_cheap (speed, compute_mode)
4394 : sdiv_pow2_cheap (speed, compute_mode))
4395 /* We assume that cheap metric is true if the
4396 optab has an expander for this mode. */
4397 && ((optab_handler ((rem_flag ? smod_optab
4398 : sdiv_optab),
4399 compute_mode)
4400 != CODE_FOR_nothing)
4401 || (optab_handler (sdivmod_optab,
4402 compute_mode)
4403 != CODE_FOR_nothing)))
4404 ;
4405 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d)
4406 && (size <= HOST_BITS_PER_WIDE_INT
4407 || abs_d != (unsigned HOST_WIDE_INT) d))
4408 {
4409 if (rem_flag)
4410 {
4411 remainder = expand_smod_pow2 (compute_mode, op0, d);
4412 if (remainder)
4413 return gen_lowpart (mode, remainder);
4414 }
4415
4416 if (sdiv_pow2_cheap (speed, compute_mode)
4417 && ((optab_handler (sdiv_optab, compute_mode)
4418 != CODE_FOR_nothing)
4419 || (optab_handler (sdivmod_optab, compute_mode)
4420 != CODE_FOR_nothing)))
4421 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4422 compute_mode, op0,
4423 gen_int_mode (abs_d,
4424 compute_mode),
4425 NULL_RTX, 0);
4426 else
4427 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4428
4429 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4430 negate the quotient. */
4431 if (d < 0)
4432 {
4433 insn = get_last_insn ();
4434 if (insn != last
4435 && abs_d < (HOST_WIDE_INT_1U
4436 << (HOST_BITS_PER_WIDE_INT - 1)))
4437 set_dst_reg_note (insn, REG_EQUAL,
4438 gen_rtx_DIV (compute_mode, op0,
4439 gen_int_mode
4440 (abs_d,
4441 compute_mode)),
4442 quotient);
4443
4444 quotient = expand_unop (compute_mode, neg_optab,
4445 quotient, quotient, 0);
4446 }
4447 }
4448 else if (size <= HOST_BITS_PER_WIDE_INT)
4449 {
4450 choose_multiplier (abs_d, size, size - 1,
4451 &ml, &post_shift, &lgup);
4452 if (ml < HOST_WIDE_INT_1U << (size - 1))
4453 {
4454 rtx t1, t2, t3;
4455
4456 if (post_shift >= BITS_PER_WORD
4457 || size - 1 >= BITS_PER_WORD)
4458 goto fail1;
4459
4460 extra_cost = (shift_cost (speed, compute_mode, post_shift)
4461 + shift_cost (speed, compute_mode, size - 1)
4462 + add_cost (speed, compute_mode));
4463 t1 = expmed_mult_highpart
4464 (compute_mode, op0, gen_int_mode (ml, compute_mode),
4465 NULL_RTX, 0, max_cost - extra_cost);
4466 if (t1 == 0)
4467 goto fail1;
4468 t2 = expand_shift
4469 (RSHIFT_EXPR, compute_mode, t1,
4470 post_shift, NULL_RTX, 0);
4471 t3 = expand_shift
4472 (RSHIFT_EXPR, compute_mode, op0,
4473 size - 1, NULL_RTX, 0);
4474 if (d < 0)
4475 quotient
4476 = force_operand (gen_rtx_MINUS (compute_mode,
4477 t3, t2),
4478 tquotient);
4479 else
4480 quotient
4481 = force_operand (gen_rtx_MINUS (compute_mode,
4482 t2, t3),
4483 tquotient);
4484 }
4485 else
4486 {
4487 rtx t1, t2, t3, t4;
4488
4489 if (post_shift >= BITS_PER_WORD
4490 || size - 1 >= BITS_PER_WORD)
4491 goto fail1;
4492
4493 ml |= HOST_WIDE_INT_M1U << (size - 1);
4494 mlr = gen_int_mode (ml, compute_mode);
4495 extra_cost = (shift_cost (speed, compute_mode, post_shift)
4496 + shift_cost (speed, compute_mode, size - 1)
4497 + 2 * add_cost (speed, compute_mode));
4498 t1 = expmed_mult_highpart (compute_mode, op0, mlr,
4499 NULL_RTX, 0,
4500 max_cost - extra_cost);
4501 if (t1 == 0)
4502 goto fail1;
4503 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4504 t1, op0),
4505 NULL_RTX);
4506 t3 = expand_shift
4507 (RSHIFT_EXPR, compute_mode, t2,
4508 post_shift, NULL_RTX, 0);
4509 t4 = expand_shift
4510 (RSHIFT_EXPR, compute_mode, op0,
4511 size - 1, NULL_RTX, 0);
4512 if (d < 0)
4513 quotient
4514 = force_operand (gen_rtx_MINUS (compute_mode,
4515 t4, t3),
4516 tquotient);
4517 else
4518 quotient
4519 = force_operand (gen_rtx_MINUS (compute_mode,
4520 t3, t4),
4521 tquotient);
4522 }
4523 }
4524 else /* Too wide mode to use tricky code */
4525 break;
4526
4527 insn = get_last_insn ();
4528 if (insn != last)
4529 set_dst_reg_note (insn, REG_EQUAL,
4530 gen_rtx_DIV (compute_mode, op0, op1),
4531 quotient);
4532 }
4533 break;
4534 }
4535 fail1:
4536 delete_insns_since (last);
4537 break;
4538
4539 case FLOOR_DIV_EXPR:
4540 case FLOOR_MOD_EXPR:
4541 /* We will come here only for signed operations. */
4542 if (op1_is_constant && size <= HOST_BITS_PER_WIDE_INT)
4543 {
4544 unsigned HOST_WIDE_INT mh, ml;
4545 int pre_shift, lgup, post_shift;
4546 HOST_WIDE_INT d = INTVAL (op1);
4547
4548 if (d > 0)
4549 {
4550 /* We could just as easily deal with negative constants here,
4551 but it does not seem worth the trouble for GCC 2.6. */
4552 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4553 {
4554 pre_shift = floor_log2 (d);
4555 if (rem_flag)
4556 {
4557 unsigned HOST_WIDE_INT mask
4558 = (HOST_WIDE_INT_1U << pre_shift) - 1;
4559 remainder = expand_binop
4560 (compute_mode, and_optab, op0,
4561 gen_int_mode (mask, compute_mode),
4562 remainder, 0, OPTAB_LIB_WIDEN);
4563 if (remainder)
4564 return gen_lowpart (mode, remainder);
4565 }
4566 quotient = expand_shift
4567 (RSHIFT_EXPR, compute_mode, op0,
4568 pre_shift, tquotient, 0);
4569 }
4570 else
4571 {
4572 rtx t1, t2, t3, t4;
4573
4574 mh = choose_multiplier (d, size, size - 1,
4575 &ml, &post_shift, &lgup);
4576 gcc_assert (!mh);
4577
4578 if (post_shift < BITS_PER_WORD
4579 && size - 1 < BITS_PER_WORD)
4580 {
4581 t1 = expand_shift
4582 (RSHIFT_EXPR, compute_mode, op0,
4583 size - 1, NULL_RTX, 0);
4584 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4585 NULL_RTX, 0, OPTAB_WIDEN);
4586 extra_cost = (shift_cost (speed, compute_mode, post_shift)
4587 + shift_cost (speed, compute_mode, size - 1)
4588 + 2 * add_cost (speed, compute_mode));
4589 t3 = expmed_mult_highpart
4590 (compute_mode, t2, gen_int_mode (ml, compute_mode),
4591 NULL_RTX, 1, max_cost - extra_cost);
4592 if (t3 != 0)
4593 {
4594 t4 = expand_shift
4595 (RSHIFT_EXPR, compute_mode, t3,
4596 post_shift, NULL_RTX, 1);
4597 quotient = expand_binop (compute_mode, xor_optab,
4598 t4, t1, tquotient, 0,
4599 OPTAB_WIDEN);
4600 }
4601 }
4602 }
4603 }
4604 else
4605 {
4606 rtx nsign, t1, t2, t3, t4;
4607 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4608 op0, constm1_rtx), NULL_RTX);
4609 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4610 0, OPTAB_WIDEN);
4611 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
4612 size - 1, NULL_RTX, 0);
4613 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4614 NULL_RTX);
4615 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4616 NULL_RTX, 0);
4617 if (t4)
4618 {
4619 rtx t5;
4620 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4621 NULL_RTX, 0);
4622 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4623 t4, t5),
4624 tquotient);
4625 }
4626 }
4627 }
4628
4629 if (quotient != 0)
4630 break;
4631 delete_insns_since (last);
4632
4633 /* Try using an instruction that produces both the quotient and
4634 remainder, using truncation. We can easily compensate the quotient
4635 or remainder to get floor rounding, once we have the remainder.
4636 Notice that we compute also the final remainder value here,
4637 and return the result right away. */
4638 if (target == 0 || GET_MODE (target) != compute_mode)
4639 target = gen_reg_rtx (compute_mode);
4640
4641 if (rem_flag)
4642 {
4643 remainder
4644 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4645 quotient = gen_reg_rtx (compute_mode);
4646 }
4647 else
4648 {
4649 quotient
4650 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4651 remainder = gen_reg_rtx (compute_mode);
4652 }
4653
4654 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4655 quotient, remainder, 0))
4656 {
4657 /* This could be computed with a branch-less sequence.
4658 Save that for later. */
4659 rtx tem;
4660 rtx_code_label *label = gen_label_rtx ();
4661 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4662 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4663 NULL_RTX, 0, OPTAB_WIDEN);
4664 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4665 expand_dec (quotient, const1_rtx);
4666 expand_inc (remainder, op1);
4667 emit_label (label);
4668 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4669 }
4670
4671 /* No luck with division elimination or divmod. Have to do it
4672 by conditionally adjusting op0 *and* the result. */
4673 {
4674 rtx_code_label *label1, *label2, *label3, *label4, *label5;
4675 rtx adjusted_op0;
4676 rtx tem;
4677
4678 quotient = gen_reg_rtx (compute_mode);
4679 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4680 label1 = gen_label_rtx ();
4681 label2 = gen_label_rtx ();
4682 label3 = gen_label_rtx ();
4683 label4 = gen_label_rtx ();
4684 label5 = gen_label_rtx ();
4685 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4686 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4687 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4688 quotient, 0, OPTAB_LIB_WIDEN);
4689 if (tem != quotient)
4690 emit_move_insn (quotient, tem);
4691 emit_jump_insn (targetm.gen_jump (label5));
4692 emit_barrier ();
4693 emit_label (label1);
4694 expand_inc (adjusted_op0, const1_rtx);
4695 emit_jump_insn (targetm.gen_jump (label4));
4696 emit_barrier ();
4697 emit_label (label2);
4698 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4699 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4700 quotient, 0, OPTAB_LIB_WIDEN);
4701 if (tem != quotient)
4702 emit_move_insn (quotient, tem);
4703 emit_jump_insn (targetm.gen_jump (label5));
4704 emit_barrier ();
4705 emit_label (label3);
4706 expand_dec (adjusted_op0, const1_rtx);
4707 emit_label (label4);
4708 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4709 quotient, 0, OPTAB_LIB_WIDEN);
4710 if (tem != quotient)
4711 emit_move_insn (quotient, tem);
4712 expand_dec (quotient, const1_rtx);
4713 emit_label (label5);
4714 }
4715 break;
4716
4717 case CEIL_DIV_EXPR:
4718 case CEIL_MOD_EXPR:
4719 if (unsignedp)
4720 {
4721 if (op1_is_constant
4722 && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4723 && (size <= HOST_BITS_PER_WIDE_INT
4724 || INTVAL (op1) >= 0))
4725 {
4726 rtx t1, t2, t3;
4727 unsigned HOST_WIDE_INT d = INTVAL (op1);
4728 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4729 floor_log2 (d), tquotient, 1);
4730 t2 = expand_binop (compute_mode, and_optab, op0,
4731 gen_int_mode (d - 1, compute_mode),
4732 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4733 t3 = gen_reg_rtx (compute_mode);
4734 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4735 compute_mode, 1, 1);
4736 if (t3 == 0)
4737 {
4738 rtx_code_label *lab;
4739 lab = gen_label_rtx ();
4740 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4741 expand_inc (t1, const1_rtx);
4742 emit_label (lab);
4743 quotient = t1;
4744 }
4745 else
4746 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4747 t1, t3),
4748 tquotient);
4749 break;
4750 }
4751
4752 /* Try using an instruction that produces both the quotient and
4753 remainder, using truncation. We can easily compensate the
4754 quotient or remainder to get ceiling rounding, once we have the
4755 remainder. Notice that we compute also the final remainder
4756 value here, and return the result right away. */
4757 if (target == 0 || GET_MODE (target) != compute_mode)
4758 target = gen_reg_rtx (compute_mode);
4759
4760 if (rem_flag)
4761 {
4762 remainder = (REG_P (target)
4763 ? target : gen_reg_rtx (compute_mode));
4764 quotient = gen_reg_rtx (compute_mode);
4765 }
4766 else
4767 {
4768 quotient = (REG_P (target)
4769 ? target : gen_reg_rtx (compute_mode));
4770 remainder = gen_reg_rtx (compute_mode);
4771 }
4772
4773 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4774 remainder, 1))
4775 {
4776 /* This could be computed with a branch-less sequence.
4777 Save that for later. */
4778 rtx_code_label *label = gen_label_rtx ();
4779 do_cmp_and_jump (remainder, const0_rtx, EQ,
4780 compute_mode, label);
4781 expand_inc (quotient, const1_rtx);
4782 expand_dec (remainder, op1);
4783 emit_label (label);
4784 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4785 }
4786
4787 /* No luck with division elimination or divmod. Have to do it
4788 by conditionally adjusting op0 *and* the result. */
4789 {
4790 rtx_code_label *label1, *label2;
4791 rtx adjusted_op0, tem;
4792
4793 quotient = gen_reg_rtx (compute_mode);
4794 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4795 label1 = gen_label_rtx ();
4796 label2 = gen_label_rtx ();
4797 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4798 compute_mode, label1);
4799 emit_move_insn (quotient, const0_rtx);
4800 emit_jump_insn (targetm.gen_jump (label2));
4801 emit_barrier ();
4802 emit_label (label1);
4803 expand_dec (adjusted_op0, const1_rtx);
4804 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4805 quotient, 1, OPTAB_LIB_WIDEN);
4806 if (tem != quotient)
4807 emit_move_insn (quotient, tem);
4808 expand_inc (quotient, const1_rtx);
4809 emit_label (label2);
4810 }
4811 }
4812 else /* signed */
4813 {
4814 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4815 && INTVAL (op1) >= 0)
4816 {
4817 /* This is extremely similar to the code for the unsigned case
4818 above. For 2.7 we should merge these variants, but for
4819 2.6.1 I don't want to touch the code for unsigned since that
4820 get used in C. The signed case will only be used by other
4821 languages (Ada). */
4822
4823 rtx t1, t2, t3;
4824 unsigned HOST_WIDE_INT d = INTVAL (op1);
4825 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4826 floor_log2 (d), tquotient, 0);
4827 t2 = expand_binop (compute_mode, and_optab, op0,
4828 gen_int_mode (d - 1, compute_mode),
4829 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4830 t3 = gen_reg_rtx (compute_mode);
4831 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4832 compute_mode, 1, 1);
4833 if (t3 == 0)
4834 {
4835 rtx_code_label *lab;
4836 lab = gen_label_rtx ();
4837 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4838 expand_inc (t1, const1_rtx);
4839 emit_label (lab);
4840 quotient = t1;
4841 }
4842 else
4843 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4844 t1, t3),
4845 tquotient);
4846 break;
4847 }
4848
4849 /* Try using an instruction that produces both the quotient and
4850 remainder, using truncation. We can easily compensate the
4851 quotient or remainder to get ceiling rounding, once we have the
4852 remainder. Notice that we compute also the final remainder
4853 value here, and return the result right away. */
4854 if (target == 0 || GET_MODE (target) != compute_mode)
4855 target = gen_reg_rtx (compute_mode);
4856 if (rem_flag)
4857 {
4858 remainder= (REG_P (target)
4859 ? target : gen_reg_rtx (compute_mode));
4860 quotient = gen_reg_rtx (compute_mode);
4861 }
4862 else
4863 {
4864 quotient = (REG_P (target)
4865 ? target : gen_reg_rtx (compute_mode));
4866 remainder = gen_reg_rtx (compute_mode);
4867 }
4868
4869 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4870 remainder, 0))
4871 {
4872 /* This could be computed with a branch-less sequence.
4873 Save that for later. */
4874 rtx tem;
4875 rtx_code_label *label = gen_label_rtx ();
4876 do_cmp_and_jump (remainder, const0_rtx, EQ,
4877 compute_mode, label);
4878 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4879 NULL_RTX, 0, OPTAB_WIDEN);
4880 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4881 expand_inc (quotient, const1_rtx);
4882 expand_dec (remainder, op1);
4883 emit_label (label);
4884 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4885 }
4886
4887 /* No luck with division elimination or divmod. Have to do it
4888 by conditionally adjusting op0 *and* the result. */
4889 {
4890 rtx_code_label *label1, *label2, *label3, *label4, *label5;
4891 rtx adjusted_op0;
4892 rtx tem;
4893
4894 quotient = gen_reg_rtx (compute_mode);
4895 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4896 label1 = gen_label_rtx ();
4897 label2 = gen_label_rtx ();
4898 label3 = gen_label_rtx ();
4899 label4 = gen_label_rtx ();
4900 label5 = gen_label_rtx ();
4901 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4902 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4903 compute_mode, label1);
4904 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4905 quotient, 0, OPTAB_LIB_WIDEN);
4906 if (tem != quotient)
4907 emit_move_insn (quotient, tem);
4908 emit_jump_insn (targetm.gen_jump (label5));
4909 emit_barrier ();
4910 emit_label (label1);
4911 expand_dec (adjusted_op0, const1_rtx);
4912 emit_jump_insn (targetm.gen_jump (label4));
4913 emit_barrier ();
4914 emit_label (label2);
4915 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4916 compute_mode, label3);
4917 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4918 quotient, 0, OPTAB_LIB_WIDEN);
4919 if (tem != quotient)
4920 emit_move_insn (quotient, tem);
4921 emit_jump_insn (targetm.gen_jump (label5));
4922 emit_barrier ();
4923 emit_label (label3);
4924 expand_inc (adjusted_op0, const1_rtx);
4925 emit_label (label4);
4926 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4927 quotient, 0, OPTAB_LIB_WIDEN);
4928 if (tem != quotient)
4929 emit_move_insn (quotient, tem);
4930 expand_inc (quotient, const1_rtx);
4931 emit_label (label5);
4932 }
4933 }
4934 break;
4935
4936 case EXACT_DIV_EXPR:
4937 if (op1_is_constant && size <= HOST_BITS_PER_WIDE_INT)
4938 {
4939 HOST_WIDE_INT d = INTVAL (op1);
4940 unsigned HOST_WIDE_INT ml;
4941 int pre_shift;
4942 rtx t1;
4943
4944 pre_shift = ctz_or_zero (d);
4945 ml = invert_mod2n (d >> pre_shift, size);
4946 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4947 pre_shift, NULL_RTX, unsignedp);
4948 quotient = expand_mult (compute_mode, t1,
4949 gen_int_mode (ml, compute_mode),
4950 NULL_RTX, 1);
4951
4952 insn = get_last_insn ();
4953 set_dst_reg_note (insn, REG_EQUAL,
4954 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4955 compute_mode, op0, op1),
4956 quotient);
4957 }
4958 break;
4959
4960 case ROUND_DIV_EXPR:
4961 case ROUND_MOD_EXPR:
4962 if (unsignedp)
4963 {
4964 rtx tem;
4965 rtx_code_label *label;
4966 label = gen_label_rtx ();
4967 quotient = gen_reg_rtx (compute_mode);
4968 remainder = gen_reg_rtx (compute_mode);
4969 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4970 {
4971 rtx tem;
4972 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4973 quotient, 1, OPTAB_LIB_WIDEN);
4974 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4975 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4976 remainder, 1, OPTAB_LIB_WIDEN);
4977 }
4978 tem = plus_constant (compute_mode, op1, -1);
4979 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, 1, NULL_RTX, 1);
4980 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4981 expand_inc (quotient, const1_rtx);
4982 expand_dec (remainder, op1);
4983 emit_label (label);
4984 }
4985 else
4986 {
4987 rtx abs_rem, abs_op1, tem, mask;
4988 rtx_code_label *label;
4989 label = gen_label_rtx ();
4990 quotient = gen_reg_rtx (compute_mode);
4991 remainder = gen_reg_rtx (compute_mode);
4992 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4993 {
4994 rtx tem;
4995 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4996 quotient, 0, OPTAB_LIB_WIDEN);
4997 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4998 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4999 remainder, 0, OPTAB_LIB_WIDEN);
5000 }
5001 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
5002 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
5003 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
5004 1, NULL_RTX, 1);
5005 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
5006 tem = expand_binop (compute_mode, xor_optab, op0, op1,
5007 NULL_RTX, 0, OPTAB_WIDEN);
5008 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
5009 size - 1, NULL_RTX, 0);
5010 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
5011 NULL_RTX, 0, OPTAB_WIDEN);
5012 tem = expand_binop (compute_mode, sub_optab, tem, mask,
5013 NULL_RTX, 0, OPTAB_WIDEN);
5014 expand_inc (quotient, tem);
5015 tem = expand_binop (compute_mode, xor_optab, mask, op1,
5016 NULL_RTX, 0, OPTAB_WIDEN);
5017 tem = expand_binop (compute_mode, sub_optab, tem, mask,
5018 NULL_RTX, 0, OPTAB_WIDEN);
5019 expand_dec (remainder, tem);
5020 emit_label (label);
5021 }
5022 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5023
5024 default:
5025 gcc_unreachable ();
5026 }
5027
5028 if (quotient == 0)
5029 {
5030 if (target && GET_MODE (target) != compute_mode)
5031 target = 0;
5032
5033 if (rem_flag)
5034 {
5035 /* Try to produce the remainder without producing the quotient.
5036 If we seem to have a divmod pattern that does not require widening,
5037 don't try widening here. We should really have a WIDEN argument
5038 to expand_twoval_binop, since what we'd really like to do here is
5039 1) try a mod insn in compute_mode
5040 2) try a divmod insn in compute_mode
5041 3) try a div insn in compute_mode and multiply-subtract to get
5042 remainder
5043 4) try the same things with widening allowed. */
5044 remainder
5045 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5046 op0, op1, target,
5047 unsignedp,
5048 ((optab_handler (optab2, compute_mode)
5049 != CODE_FOR_nothing)
5050 ? OPTAB_DIRECT : OPTAB_WIDEN));
5051 if (remainder == 0)
5052 {
5053 /* No luck there. Can we do remainder and divide at once
5054 without a library call? */
5055 remainder = gen_reg_rtx (compute_mode);
5056 if (! expand_twoval_binop ((unsignedp
5057 ? udivmod_optab
5058 : sdivmod_optab),
5059 op0, op1,
5060 NULL_RTX, remainder, unsignedp))
5061 remainder = 0;
5062 }
5063
5064 if (remainder)
5065 return gen_lowpart (mode, remainder);
5066 }
5067
5068 /* Produce the quotient. Try a quotient insn, but not a library call.
5069 If we have a divmod in this mode, use it in preference to widening
5070 the div (for this test we assume it will not fail). Note that optab2
5071 is set to the one of the two optabs that the call below will use. */
5072 quotient
5073 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
5074 op0, op1, rem_flag ? NULL_RTX : target,
5075 unsignedp,
5076 ((optab_handler (optab2, compute_mode)
5077 != CODE_FOR_nothing)
5078 ? OPTAB_DIRECT : OPTAB_WIDEN));
5079
5080 if (quotient == 0)
5081 {
5082 /* No luck there. Try a quotient-and-remainder insn,
5083 keeping the quotient alone. */
5084 quotient = gen_reg_rtx (compute_mode);
5085 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
5086 op0, op1,
5087 quotient, NULL_RTX, unsignedp))
5088 {
5089 quotient = 0;
5090 if (! rem_flag)
5091 /* Still no luck. If we are not computing the remainder,
5092 use a library call for the quotient. */
5093 quotient = sign_expand_binop (compute_mode,
5094 udiv_optab, sdiv_optab,
5095 op0, op1, target,
5096 unsignedp, OPTAB_LIB_WIDEN);
5097 }
5098 }
5099 }
5100
5101 if (rem_flag)
5102 {
5103 if (target && GET_MODE (target) != compute_mode)
5104 target = 0;
5105
5106 if (quotient == 0)
5107 {
5108 /* No divide instruction either. Use library for remainder. */
5109 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5110 op0, op1, target,
5111 unsignedp, OPTAB_LIB_WIDEN);
5112 /* No remainder function. Try a quotient-and-remainder
5113 function, keeping the remainder. */
5114 if (!remainder)
5115 {
5116 remainder = gen_reg_rtx (compute_mode);
5117 if (!expand_twoval_binop_libfunc
5118 (unsignedp ? udivmod_optab : sdivmod_optab,
5119 op0, op1,
5120 NULL_RTX, remainder,
5121 unsignedp ? UMOD : MOD))
5122 remainder = NULL_RTX;
5123 }
5124 }
5125 else
5126 {
5127 /* We divided. Now finish doing X - Y * (X / Y). */
5128 remainder = expand_mult (compute_mode, quotient, op1,
5129 NULL_RTX, unsignedp);
5130 remainder = expand_binop (compute_mode, sub_optab, op0,
5131 remainder, target, unsignedp,
5132 OPTAB_LIB_WIDEN);
5133 }
5134 }
5135
5136 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5137 }
5138 \f
5139 /* Return a tree node with data type TYPE, describing the value of X.
5140 Usually this is an VAR_DECL, if there is no obvious better choice.
5141 X may be an expression, however we only support those expressions
5142 generated by loop.c. */
5143
5144 tree
5145 make_tree (tree type, rtx x)
5146 {
5147 tree t;
5148
5149 switch (GET_CODE (x))
5150 {
5151 case CONST_INT:
5152 case CONST_WIDE_INT:
5153 t = wide_int_to_tree (type, rtx_mode_t (x, TYPE_MODE (type)));
5154 return t;
5155
5156 case CONST_DOUBLE:
5157 STATIC_ASSERT (HOST_BITS_PER_WIDE_INT * 2 <= MAX_BITSIZE_MODE_ANY_INT);
5158 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
5159 t = wide_int_to_tree (type,
5160 wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
5161 HOST_BITS_PER_WIDE_INT * 2));
5162 else
5163 t = build_real (type, *CONST_DOUBLE_REAL_VALUE (x));
5164
5165 return t;
5166
5167 case CONST_VECTOR:
5168 {
5169 int units = CONST_VECTOR_NUNITS (x);
5170 tree itype = TREE_TYPE (type);
5171 tree *elts;
5172 int i;
5173
5174 /* Build a tree with vector elements. */
5175 elts = XALLOCAVEC (tree, units);
5176 for (i = units - 1; i >= 0; --i)
5177 {
5178 rtx elt = CONST_VECTOR_ELT (x, i);
5179 elts[i] = make_tree (itype, elt);
5180 }
5181
5182 return build_vector (type, elts);
5183 }
5184
5185 case PLUS:
5186 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5187 make_tree (type, XEXP (x, 1)));
5188
5189 case MINUS:
5190 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5191 make_tree (type, XEXP (x, 1)));
5192
5193 case NEG:
5194 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5195
5196 case MULT:
5197 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5198 make_tree (type, XEXP (x, 1)));
5199
5200 case ASHIFT:
5201 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5202 make_tree (type, XEXP (x, 1)));
5203
5204 case LSHIFTRT:
5205 t = unsigned_type_for (type);
5206 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5207 make_tree (t, XEXP (x, 0)),
5208 make_tree (type, XEXP (x, 1))));
5209
5210 case ASHIFTRT:
5211 t = signed_type_for (type);
5212 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5213 make_tree (t, XEXP (x, 0)),
5214 make_tree (type, XEXP (x, 1))));
5215
5216 case DIV:
5217 if (TREE_CODE (type) != REAL_TYPE)
5218 t = signed_type_for (type);
5219 else
5220 t = type;
5221
5222 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5223 make_tree (t, XEXP (x, 0)),
5224 make_tree (t, XEXP (x, 1))));
5225 case UDIV:
5226 t = unsigned_type_for (type);
5227 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5228 make_tree (t, XEXP (x, 0)),
5229 make_tree (t, XEXP (x, 1))));
5230
5231 case SIGN_EXTEND:
5232 case ZERO_EXTEND:
5233 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5234 GET_CODE (x) == ZERO_EXTEND);
5235 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5236
5237 case CONST:
5238 return make_tree (type, XEXP (x, 0));
5239
5240 case SYMBOL_REF:
5241 t = SYMBOL_REF_DECL (x);
5242 if (t)
5243 return fold_convert (type, build_fold_addr_expr (t));
5244 /* fall through. */
5245
5246 default:
5247 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5248
5249 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5250 address mode to pointer mode. */
5251 if (POINTER_TYPE_P (type))
5252 x = convert_memory_address_addr_space
5253 (SCALAR_INT_TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5254
5255 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5256 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5257 t->decl_with_rtl.rtl = x;
5258
5259 return t;
5260 }
5261 }
5262 \f
5263 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5264 and returning TARGET.
5265
5266 If TARGET is 0, a pseudo-register or constant is returned. */
5267
5268 rtx
5269 expand_and (machine_mode mode, rtx op0, rtx op1, rtx target)
5270 {
5271 rtx tem = 0;
5272
5273 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5274 tem = simplify_binary_operation (AND, mode, op0, op1);
5275 if (tem == 0)
5276 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5277
5278 if (target == 0)
5279 target = tem;
5280 else if (tem != target)
5281 emit_move_insn (target, tem);
5282 return target;
5283 }
5284
5285 /* Helper function for emit_store_flag. */
5286 rtx
5287 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5288 machine_mode mode, machine_mode compare_mode,
5289 int unsignedp, rtx x, rtx y, int normalizep,
5290 machine_mode target_mode)
5291 {
5292 struct expand_operand ops[4];
5293 rtx op0, comparison, subtarget;
5294 rtx_insn *last;
5295 machine_mode result_mode = targetm.cstore_mode (icode);
5296
5297 last = get_last_insn ();
5298 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5299 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5300 if (!x || !y)
5301 {
5302 delete_insns_since (last);
5303 return NULL_RTX;
5304 }
5305
5306 if (target_mode == VOIDmode)
5307 target_mode = result_mode;
5308 if (!target)
5309 target = gen_reg_rtx (target_mode);
5310
5311 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5312
5313 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5314 create_fixed_operand (&ops[1], comparison);
5315 create_fixed_operand (&ops[2], x);
5316 create_fixed_operand (&ops[3], y);
5317 if (!maybe_expand_insn (icode, 4, ops))
5318 {
5319 delete_insns_since (last);
5320 return NULL_RTX;
5321 }
5322 subtarget = ops[0].value;
5323
5324 /* If we are converting to a wider mode, first convert to
5325 TARGET_MODE, then normalize. This produces better combining
5326 opportunities on machines that have a SIGN_EXTRACT when we are
5327 testing a single bit. This mostly benefits the 68k.
5328
5329 If STORE_FLAG_VALUE does not have the sign bit set when
5330 interpreted in MODE, we can do this conversion as unsigned, which
5331 is usually more efficient. */
5332 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5333 {
5334 convert_move (target, subtarget,
5335 val_signbit_known_clear_p (result_mode,
5336 STORE_FLAG_VALUE));
5337 op0 = target;
5338 result_mode = target_mode;
5339 }
5340 else
5341 op0 = subtarget;
5342
5343 /* If we want to keep subexpressions around, don't reuse our last
5344 target. */
5345 if (optimize)
5346 subtarget = 0;
5347
5348 /* Now normalize to the proper value in MODE. Sometimes we don't
5349 have to do anything. */
5350 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5351 ;
5352 /* STORE_FLAG_VALUE might be the most negative number, so write
5353 the comparison this way to avoid a compiler-time warning. */
5354 else if (- normalizep == STORE_FLAG_VALUE)
5355 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5356
5357 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5358 it hard to use a value of just the sign bit due to ANSI integer
5359 constant typing rules. */
5360 else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
5361 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5362 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5363 normalizep == 1);
5364 else
5365 {
5366 gcc_assert (STORE_FLAG_VALUE & 1);
5367
5368 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5369 if (normalizep == -1)
5370 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5371 }
5372
5373 /* If we were converting to a smaller mode, do the conversion now. */
5374 if (target_mode != result_mode)
5375 {
5376 convert_move (target, op0, 0);
5377 return target;
5378 }
5379 else
5380 return op0;
5381 }
5382
5383
5384 /* A subroutine of emit_store_flag only including "tricks" that do not
5385 need a recursive call. These are kept separate to avoid infinite
5386 loops. */
5387
5388 static rtx
5389 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5390 machine_mode mode, int unsignedp, int normalizep,
5391 machine_mode target_mode)
5392 {
5393 rtx subtarget;
5394 enum insn_code icode;
5395 machine_mode compare_mode;
5396 enum mode_class mclass;
5397 enum rtx_code scode;
5398
5399 if (unsignedp)
5400 code = unsigned_condition (code);
5401 scode = swap_condition (code);
5402
5403 /* If one operand is constant, make it the second one. Only do this
5404 if the other operand is not constant as well. */
5405
5406 if (swap_commutative_operands_p (op0, op1))
5407 {
5408 std::swap (op0, op1);
5409 code = swap_condition (code);
5410 }
5411
5412 if (mode == VOIDmode)
5413 mode = GET_MODE (op0);
5414
5415 /* For some comparisons with 1 and -1, we can convert this to
5416 comparisons with zero. This will often produce more opportunities for
5417 store-flag insns. */
5418
5419 switch (code)
5420 {
5421 case LT:
5422 if (op1 == const1_rtx)
5423 op1 = const0_rtx, code = LE;
5424 break;
5425 case LE:
5426 if (op1 == constm1_rtx)
5427 op1 = const0_rtx, code = LT;
5428 break;
5429 case GE:
5430 if (op1 == const1_rtx)
5431 op1 = const0_rtx, code = GT;
5432 break;
5433 case GT:
5434 if (op1 == constm1_rtx)
5435 op1 = const0_rtx, code = GE;
5436 break;
5437 case GEU:
5438 if (op1 == const1_rtx)
5439 op1 = const0_rtx, code = NE;
5440 break;
5441 case LTU:
5442 if (op1 == const1_rtx)
5443 op1 = const0_rtx, code = EQ;
5444 break;
5445 default:
5446 break;
5447 }
5448
5449 /* If we are comparing a double-word integer with zero or -1, we can
5450 convert the comparison into one involving a single word. */
5451 scalar_int_mode int_mode;
5452 if (is_int_mode (mode, &int_mode)
5453 && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2
5454 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5455 {
5456 rtx tem;
5457 if ((code == EQ || code == NE)
5458 && (op1 == const0_rtx || op1 == constm1_rtx))
5459 {
5460 rtx op00, op01;
5461
5462 /* Do a logical OR or AND of the two words and compare the
5463 result. */
5464 op00 = simplify_gen_subreg (word_mode, op0, int_mode, 0);
5465 op01 = simplify_gen_subreg (word_mode, op0, int_mode, UNITS_PER_WORD);
5466 tem = expand_binop (word_mode,
5467 op1 == const0_rtx ? ior_optab : and_optab,
5468 op00, op01, NULL_RTX, unsignedp,
5469 OPTAB_DIRECT);
5470
5471 if (tem != 0)
5472 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5473 unsignedp, normalizep);
5474 }
5475 else if ((code == LT || code == GE) && op1 == const0_rtx)
5476 {
5477 rtx op0h;
5478
5479 /* If testing the sign bit, can just test on high word. */
5480 op0h = simplify_gen_subreg (word_mode, op0, int_mode,
5481 subreg_highpart_offset (word_mode,
5482 int_mode));
5483 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5484 unsignedp, normalizep);
5485 }
5486 else
5487 tem = NULL_RTX;
5488
5489 if (tem)
5490 {
5491 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5492 return tem;
5493 if (!target)
5494 target = gen_reg_rtx (target_mode);
5495
5496 convert_move (target, tem,
5497 !val_signbit_known_set_p (word_mode,
5498 (normalizep ? normalizep
5499 : STORE_FLAG_VALUE)));
5500 return target;
5501 }
5502 }
5503
5504 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5505 complement of A (for GE) and shifting the sign bit to the low bit. */
5506 if (op1 == const0_rtx && (code == LT || code == GE)
5507 && is_int_mode (mode, &int_mode)
5508 && (normalizep || STORE_FLAG_VALUE == 1
5509 || val_signbit_p (int_mode, STORE_FLAG_VALUE)))
5510 {
5511 subtarget = target;
5512
5513 if (!target)
5514 target_mode = int_mode;
5515
5516 /* If the result is to be wider than OP0, it is best to convert it
5517 first. If it is to be narrower, it is *incorrect* to convert it
5518 first. */
5519 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (int_mode))
5520 {
5521 op0 = convert_modes (target_mode, int_mode, op0, 0);
5522 mode = target_mode;
5523 }
5524
5525 if (target_mode != mode)
5526 subtarget = 0;
5527
5528 if (code == GE)
5529 op0 = expand_unop (mode, one_cmpl_optab, op0,
5530 ((STORE_FLAG_VALUE == 1 || normalizep)
5531 ? 0 : subtarget), 0);
5532
5533 if (STORE_FLAG_VALUE == 1 || normalizep)
5534 /* If we are supposed to produce a 0/1 value, we want to do
5535 a logical shift from the sign bit to the low-order bit; for
5536 a -1/0 value, we do an arithmetic shift. */
5537 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5538 GET_MODE_BITSIZE (mode) - 1,
5539 subtarget, normalizep != -1);
5540
5541 if (mode != target_mode)
5542 op0 = convert_modes (target_mode, mode, op0, 0);
5543
5544 return op0;
5545 }
5546
5547 mclass = GET_MODE_CLASS (mode);
5548 FOR_EACH_MODE_FROM (compare_mode, mode)
5549 {
5550 machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5551 icode = optab_handler (cstore_optab, optab_mode);
5552 if (icode != CODE_FOR_nothing)
5553 {
5554 do_pending_stack_adjust ();
5555 rtx tem = emit_cstore (target, icode, code, mode, compare_mode,
5556 unsignedp, op0, op1, normalizep, target_mode);
5557 if (tem)
5558 return tem;
5559
5560 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5561 {
5562 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5563 unsignedp, op1, op0, normalizep, target_mode);
5564 if (tem)
5565 return tem;
5566 }
5567 break;
5568 }
5569 }
5570
5571 return 0;
5572 }
5573
5574 /* Subroutine of emit_store_flag that handles cases in which the operands
5575 are scalar integers. SUBTARGET is the target to use for temporary
5576 operations and TRUEVAL is the value to store when the condition is
5577 true. All other arguments are as for emit_store_flag. */
5578
5579 rtx
5580 emit_store_flag_int (rtx target, rtx subtarget, enum rtx_code code, rtx op0,
5581 rtx op1, machine_mode mode, int unsignedp,
5582 int normalizep, rtx trueval)
5583 {
5584 machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5585 rtx_insn *last = get_last_insn ();
5586 rtx tem;
5587
5588 /* If this is an equality comparison of integers, we can try to exclusive-or
5589 (or subtract) the two operands and use a recursive call to try the
5590 comparison with zero. Don't do any of these cases if branches are
5591 very cheap. */
5592
5593 if ((code == EQ || code == NE) && op1 != const0_rtx)
5594 {
5595 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5596 OPTAB_WIDEN);
5597
5598 if (tem == 0)
5599 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5600 OPTAB_WIDEN);
5601 if (tem != 0)
5602 tem = emit_store_flag (target, code, tem, const0_rtx,
5603 mode, unsignedp, normalizep);
5604 if (tem != 0)
5605 return tem;
5606
5607 delete_insns_since (last);
5608 }
5609
5610 /* For integer comparisons, try the reverse comparison. However, for
5611 small X and if we'd have anyway to extend, implementing "X != 0"
5612 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5613 rtx_code rcode = reverse_condition (code);
5614 if (can_compare_p (rcode, mode, ccp_store_flag)
5615 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5616 && code == NE
5617 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5618 && op1 == const0_rtx))
5619 {
5620 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5621 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5622
5623 /* Again, for the reverse comparison, use either an addition or a XOR. */
5624 if (want_add
5625 && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
5626 optimize_insn_for_speed_p ()) == 0)
5627 {
5628 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5629 STORE_FLAG_VALUE, target_mode);
5630 if (tem != 0)
5631 tem = expand_binop (target_mode, add_optab, tem,
5632 gen_int_mode (normalizep, target_mode),
5633 target, 0, OPTAB_WIDEN);
5634 }
5635 else if (!want_add
5636 && rtx_cost (trueval, mode, XOR, 1,
5637 optimize_insn_for_speed_p ()) == 0)
5638 {
5639 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5640 normalizep, target_mode);
5641 if (tem != 0)
5642 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5643 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5644 }
5645
5646 if (tem != 0)
5647 return tem;
5648 delete_insns_since (last);
5649 }
5650
5651 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5652 the constant zero. Reject all other comparisons at this point. Only
5653 do LE and GT if branches are expensive since they are expensive on
5654 2-operand machines. */
5655
5656 if (op1 != const0_rtx
5657 || (code != EQ && code != NE
5658 && (BRANCH_COST (optimize_insn_for_speed_p (),
5659 false) <= 1 || (code != LE && code != GT))))
5660 return 0;
5661
5662 /* Try to put the result of the comparison in the sign bit. Assume we can't
5663 do the necessary operation below. */
5664
5665 tem = 0;
5666
5667 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5668 the sign bit set. */
5669
5670 if (code == LE)
5671 {
5672 /* This is destructive, so SUBTARGET can't be OP0. */
5673 if (rtx_equal_p (subtarget, op0))
5674 subtarget = 0;
5675
5676 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5677 OPTAB_WIDEN);
5678 if (tem)
5679 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5680 OPTAB_WIDEN);
5681 }
5682
5683 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5684 number of bits in the mode of OP0, minus one. */
5685
5686 if (code == GT)
5687 {
5688 if (rtx_equal_p (subtarget, op0))
5689 subtarget = 0;
5690
5691 tem = maybe_expand_shift (RSHIFT_EXPR, mode, op0,
5692 GET_MODE_BITSIZE (mode) - 1,
5693 subtarget, 0);
5694 if (tem)
5695 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5696 OPTAB_WIDEN);
5697 }
5698
5699 if (code == EQ || code == NE)
5700 {
5701 /* For EQ or NE, one way to do the comparison is to apply an operation
5702 that converts the operand into a positive number if it is nonzero
5703 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5704 for NE we negate. This puts the result in the sign bit. Then we
5705 normalize with a shift, if needed.
5706
5707 Two operations that can do the above actions are ABS and FFS, so try
5708 them. If that doesn't work, and MODE is smaller than a full word,
5709 we can use zero-extension to the wider mode (an unsigned conversion)
5710 as the operation. */
5711
5712 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5713 that is compensated by the subsequent overflow when subtracting
5714 one / negating. */
5715
5716 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5717 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5718 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5719 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5720 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5721 {
5722 tem = convert_modes (word_mode, mode, op0, 1);
5723 mode = word_mode;
5724 }
5725
5726 if (tem != 0)
5727 {
5728 if (code == EQ)
5729 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5730 0, OPTAB_WIDEN);
5731 else
5732 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5733 }
5734
5735 /* If we couldn't do it that way, for NE we can "or" the two's complement
5736 of the value with itself. For EQ, we take the one's complement of
5737 that "or", which is an extra insn, so we only handle EQ if branches
5738 are expensive. */
5739
5740 if (tem == 0
5741 && (code == NE
5742 || BRANCH_COST (optimize_insn_for_speed_p (),
5743 false) > 1))
5744 {
5745 if (rtx_equal_p (subtarget, op0))
5746 subtarget = 0;
5747
5748 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5749 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5750 OPTAB_WIDEN);
5751
5752 if (tem && code == EQ)
5753 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5754 }
5755 }
5756
5757 if (tem && normalizep)
5758 tem = maybe_expand_shift (RSHIFT_EXPR, mode, tem,
5759 GET_MODE_BITSIZE (mode) - 1,
5760 subtarget, normalizep == 1);
5761
5762 if (tem)
5763 {
5764 if (!target)
5765 ;
5766 else if (GET_MODE (tem) != target_mode)
5767 {
5768 convert_move (target, tem, 0);
5769 tem = target;
5770 }
5771 else if (!subtarget)
5772 {
5773 emit_move_insn (target, tem);
5774 tem = target;
5775 }
5776 }
5777 else
5778 delete_insns_since (last);
5779
5780 return tem;
5781 }
5782
5783 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5784 and storing in TARGET. Normally return TARGET.
5785 Return 0 if that cannot be done.
5786
5787 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5788 it is VOIDmode, they cannot both be CONST_INT.
5789
5790 UNSIGNEDP is for the case where we have to widen the operands
5791 to perform the operation. It says to use zero-extension.
5792
5793 NORMALIZEP is 1 if we should convert the result to be either zero
5794 or one. Normalize is -1 if we should convert the result to be
5795 either zero or -1. If NORMALIZEP is zero, the result will be left
5796 "raw" out of the scc insn. */
5797
5798 rtx
5799 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5800 machine_mode mode, int unsignedp, int normalizep)
5801 {
5802 machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5803 enum rtx_code rcode;
5804 rtx subtarget;
5805 rtx tem, trueval;
5806 rtx_insn *last;
5807
5808 /* If we compare constants, we shouldn't use a store-flag operation,
5809 but a constant load. We can get there via the vanilla route that
5810 usually generates a compare-branch sequence, but will in this case
5811 fold the comparison to a constant, and thus elide the branch. */
5812 if (CONSTANT_P (op0) && CONSTANT_P (op1))
5813 return NULL_RTX;
5814
5815 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5816 target_mode);
5817 if (tem)
5818 return tem;
5819
5820 /* If we reached here, we can't do this with a scc insn, however there
5821 are some comparisons that can be done in other ways. Don't do any
5822 of these cases if branches are very cheap. */
5823 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5824 return 0;
5825
5826 /* See what we need to return. We can only return a 1, -1, or the
5827 sign bit. */
5828
5829 if (normalizep == 0)
5830 {
5831 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5832 normalizep = STORE_FLAG_VALUE;
5833
5834 else if (val_signbit_p (mode, STORE_FLAG_VALUE))
5835 ;
5836 else
5837 return 0;
5838 }
5839
5840 last = get_last_insn ();
5841
5842 /* If optimizing, use different pseudo registers for each insn, instead
5843 of reusing the same pseudo. This leads to better CSE, but slows
5844 down the compiler, since there are more pseudos. */
5845 subtarget = (!optimize
5846 && (target_mode == mode)) ? target : NULL_RTX;
5847 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5848
5849 /* For floating-point comparisons, try the reverse comparison or try
5850 changing the "orderedness" of the comparison. */
5851 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5852 {
5853 enum rtx_code first_code;
5854 bool and_them;
5855
5856 rcode = reverse_condition_maybe_unordered (code);
5857 if (can_compare_p (rcode, mode, ccp_store_flag)
5858 && (code == ORDERED || code == UNORDERED
5859 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5860 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5861 {
5862 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5863 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5864
5865 /* For the reverse comparison, use either an addition or a XOR. */
5866 if (want_add
5867 && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
5868 optimize_insn_for_speed_p ()) == 0)
5869 {
5870 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5871 STORE_FLAG_VALUE, target_mode);
5872 if (tem)
5873 return expand_binop (target_mode, add_optab, tem,
5874 gen_int_mode (normalizep, target_mode),
5875 target, 0, OPTAB_WIDEN);
5876 }
5877 else if (!want_add
5878 && rtx_cost (trueval, mode, XOR, 1,
5879 optimize_insn_for_speed_p ()) == 0)
5880 {
5881 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5882 normalizep, target_mode);
5883 if (tem)
5884 return expand_binop (target_mode, xor_optab, tem, trueval,
5885 target, INTVAL (trueval) >= 0,
5886 OPTAB_WIDEN);
5887 }
5888 }
5889
5890 delete_insns_since (last);
5891
5892 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5893 if (code == ORDERED || code == UNORDERED)
5894 return 0;
5895
5896 and_them = split_comparison (code, mode, &first_code, &code);
5897
5898 /* If there are no NaNs, the first comparison should always fall through.
5899 Effectively change the comparison to the other one. */
5900 if (!HONOR_NANS (mode))
5901 {
5902 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5903 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5904 target_mode);
5905 }
5906
5907 if (!HAVE_conditional_move)
5908 return 0;
5909
5910 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5911 conditional move. */
5912 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5913 normalizep, target_mode);
5914 if (tem == 0)
5915 return 0;
5916
5917 if (and_them)
5918 tem = emit_conditional_move (target, code, op0, op1, mode,
5919 tem, const0_rtx, GET_MODE (tem), 0);
5920 else
5921 tem = emit_conditional_move (target, code, op0, op1, mode,
5922 trueval, tem, GET_MODE (tem), 0);
5923
5924 if (tem == 0)
5925 delete_insns_since (last);
5926 return tem;
5927 }
5928
5929 /* The remaining tricks only apply to integer comparisons. */
5930
5931 scalar_int_mode int_mode;
5932 if (is_int_mode (mode, &int_mode))
5933 return emit_store_flag_int (target, subtarget, code, op0, op1, int_mode,
5934 unsignedp, normalizep, trueval);
5935
5936 return 0;
5937 }
5938
5939 /* Like emit_store_flag, but always succeeds. */
5940
5941 rtx
5942 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5943 machine_mode mode, int unsignedp, int normalizep)
5944 {
5945 rtx tem;
5946 rtx_code_label *label;
5947 rtx trueval, falseval;
5948
5949 /* First see if emit_store_flag can do the job. */
5950 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5951 if (tem != 0)
5952 return tem;
5953
5954 if (!target)
5955 target = gen_reg_rtx (word_mode);
5956
5957 /* If this failed, we have to do this with set/compare/jump/set code.
5958 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5959 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5960 if (code == NE
5961 && GET_MODE_CLASS (mode) == MODE_INT
5962 && REG_P (target)
5963 && op0 == target
5964 && op1 == const0_rtx)
5965 {
5966 label = gen_label_rtx ();
5967 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp, mode,
5968 NULL_RTX, NULL, label,
5969 profile_probability::uninitialized ());
5970 emit_move_insn (target, trueval);
5971 emit_label (label);
5972 return target;
5973 }
5974
5975 if (!REG_P (target)
5976 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5977 target = gen_reg_rtx (GET_MODE (target));
5978
5979 /* Jump in the right direction if the target cannot implement CODE
5980 but can jump on its reverse condition. */
5981 falseval = const0_rtx;
5982 if (! can_compare_p (code, mode, ccp_jump)
5983 && (! FLOAT_MODE_P (mode)
5984 || code == ORDERED || code == UNORDERED
5985 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5986 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5987 {
5988 enum rtx_code rcode;
5989 if (FLOAT_MODE_P (mode))
5990 rcode = reverse_condition_maybe_unordered (code);
5991 else
5992 rcode = reverse_condition (code);
5993
5994 /* Canonicalize to UNORDERED for the libcall. */
5995 if (can_compare_p (rcode, mode, ccp_jump)
5996 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5997 {
5998 falseval = trueval;
5999 trueval = const0_rtx;
6000 code = rcode;
6001 }
6002 }
6003
6004 emit_move_insn (target, trueval);
6005 label = gen_label_rtx ();
6006 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, NULL,
6007 label, profile_probability::uninitialized ());
6008
6009 emit_move_insn (target, falseval);
6010 emit_label (label);
6011
6012 return target;
6013 }
6014 \f
6015 /* Perform possibly multi-word comparison and conditional jump to LABEL
6016 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
6017 now a thin wrapper around do_compare_rtx_and_jump. */
6018
6019 static void
6020 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, machine_mode mode,
6021 rtx_code_label *label)
6022 {
6023 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
6024 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode, NULL_RTX,
6025 NULL, label, profile_probability::uninitialized ());
6026 }