]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expmed.c
Directly operate on CONST_VECTOR encoding
[thirdparty/gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "predict.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "expr.h"
42 #include "langhooks.h"
43 #include "tree-vector-builder.h"
44
45 struct target_expmed default_target_expmed;
46 #if SWITCHABLE_TARGET
47 struct target_expmed *this_target_expmed = &default_target_expmed;
48 #endif
49
50 static bool store_integral_bit_field (rtx, opt_scalar_int_mode,
51 unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT,
53 poly_uint64, poly_uint64,
54 machine_mode, rtx, bool, bool);
55 static void store_fixed_bit_field (rtx, opt_scalar_int_mode,
56 unsigned HOST_WIDE_INT,
57 unsigned HOST_WIDE_INT,
58 poly_uint64, poly_uint64,
59 rtx, scalar_int_mode, bool);
60 static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
61 unsigned HOST_WIDE_INT,
62 unsigned HOST_WIDE_INT,
63 rtx, scalar_int_mode, bool);
64 static void store_split_bit_field (rtx, opt_scalar_int_mode,
65 unsigned HOST_WIDE_INT,
66 unsigned HOST_WIDE_INT,
67 poly_uint64, poly_uint64,
68 rtx, scalar_int_mode, bool);
69 static rtx extract_integral_bit_field (rtx, opt_scalar_int_mode,
70 unsigned HOST_WIDE_INT,
71 unsigned HOST_WIDE_INT, int, rtx,
72 machine_mode, machine_mode, bool, bool);
73 static rtx extract_fixed_bit_field (machine_mode, rtx, opt_scalar_int_mode,
74 unsigned HOST_WIDE_INT,
75 unsigned HOST_WIDE_INT, rtx, int, bool);
76 static rtx extract_fixed_bit_field_1 (machine_mode, rtx, scalar_int_mode,
77 unsigned HOST_WIDE_INT,
78 unsigned HOST_WIDE_INT, rtx, int, bool);
79 static rtx lshift_value (machine_mode, unsigned HOST_WIDE_INT, int);
80 static rtx extract_split_bit_field (rtx, opt_scalar_int_mode,
81 unsigned HOST_WIDE_INT,
82 unsigned HOST_WIDE_INT, int, bool);
83 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, machine_mode, rtx_code_label *);
84 static rtx expand_smod_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
85 static rtx expand_sdiv_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
86
87 /* Return a constant integer mask value of mode MODE with BITSIZE ones
88 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
89 The mask is truncated if necessary to the width of mode MODE. The
90 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
91
92 static inline rtx
93 mask_rtx (scalar_int_mode mode, int bitpos, int bitsize, bool complement)
94 {
95 return immed_wide_int_const
96 (wi::shifted_mask (bitpos, bitsize, complement,
97 GET_MODE_PRECISION (mode)), mode);
98 }
99
100 /* Test whether a value is zero of a power of two. */
101 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
102 (((x) & ((x) - HOST_WIDE_INT_1U)) == 0)
103
104 struct init_expmed_rtl
105 {
106 rtx reg;
107 rtx plus;
108 rtx neg;
109 rtx mult;
110 rtx sdiv;
111 rtx udiv;
112 rtx sdiv_32;
113 rtx smod_32;
114 rtx wide_mult;
115 rtx wide_lshr;
116 rtx wide_trunc;
117 rtx shift;
118 rtx shift_mult;
119 rtx shift_add;
120 rtx shift_sub0;
121 rtx shift_sub1;
122 rtx zext;
123 rtx trunc;
124
125 rtx pow2[MAX_BITS_PER_WORD];
126 rtx cint[MAX_BITS_PER_WORD];
127 };
128
129 static void
130 init_expmed_one_conv (struct init_expmed_rtl *all, scalar_int_mode to_mode,
131 scalar_int_mode from_mode, bool speed)
132 {
133 int to_size, from_size;
134 rtx which;
135
136 to_size = GET_MODE_PRECISION (to_mode);
137 from_size = GET_MODE_PRECISION (from_mode);
138
139 /* Most partial integers have a precision less than the "full"
140 integer it requires for storage. In case one doesn't, for
141 comparison purposes here, reduce the bit size by one in that
142 case. */
143 if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
144 && pow2p_hwi (to_size))
145 to_size --;
146 if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
147 && pow2p_hwi (from_size))
148 from_size --;
149
150 /* Assume cost of zero-extend and sign-extend is the same. */
151 which = (to_size < from_size ? all->trunc : all->zext);
152
153 PUT_MODE (all->reg, from_mode);
154 set_convert_cost (to_mode, from_mode, speed,
155 set_src_cost (which, to_mode, speed));
156 }
157
158 static void
159 init_expmed_one_mode (struct init_expmed_rtl *all,
160 machine_mode mode, int speed)
161 {
162 int m, n, mode_bitsize;
163 machine_mode mode_from;
164
165 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
166
167 PUT_MODE (all->reg, mode);
168 PUT_MODE (all->plus, mode);
169 PUT_MODE (all->neg, mode);
170 PUT_MODE (all->mult, mode);
171 PUT_MODE (all->sdiv, mode);
172 PUT_MODE (all->udiv, mode);
173 PUT_MODE (all->sdiv_32, mode);
174 PUT_MODE (all->smod_32, mode);
175 PUT_MODE (all->wide_trunc, mode);
176 PUT_MODE (all->shift, mode);
177 PUT_MODE (all->shift_mult, mode);
178 PUT_MODE (all->shift_add, mode);
179 PUT_MODE (all->shift_sub0, mode);
180 PUT_MODE (all->shift_sub1, mode);
181 PUT_MODE (all->zext, mode);
182 PUT_MODE (all->trunc, mode);
183
184 set_add_cost (speed, mode, set_src_cost (all->plus, mode, speed));
185 set_neg_cost (speed, mode, set_src_cost (all->neg, mode, speed));
186 set_mul_cost (speed, mode, set_src_cost (all->mult, mode, speed));
187 set_sdiv_cost (speed, mode, set_src_cost (all->sdiv, mode, speed));
188 set_udiv_cost (speed, mode, set_src_cost (all->udiv, mode, speed));
189
190 set_sdiv_pow2_cheap (speed, mode, (set_src_cost (all->sdiv_32, mode, speed)
191 <= 2 * add_cost (speed, mode)));
192 set_smod_pow2_cheap (speed, mode, (set_src_cost (all->smod_32, mode, speed)
193 <= 4 * add_cost (speed, mode)));
194
195 set_shift_cost (speed, mode, 0, 0);
196 {
197 int cost = add_cost (speed, mode);
198 set_shiftadd_cost (speed, mode, 0, cost);
199 set_shiftsub0_cost (speed, mode, 0, cost);
200 set_shiftsub1_cost (speed, mode, 0, cost);
201 }
202
203 n = MIN (MAX_BITS_PER_WORD, mode_bitsize);
204 for (m = 1; m < n; m++)
205 {
206 XEXP (all->shift, 1) = all->cint[m];
207 XEXP (all->shift_mult, 1) = all->pow2[m];
208
209 set_shift_cost (speed, mode, m, set_src_cost (all->shift, mode, speed));
210 set_shiftadd_cost (speed, mode, m, set_src_cost (all->shift_add, mode,
211 speed));
212 set_shiftsub0_cost (speed, mode, m, set_src_cost (all->shift_sub0, mode,
213 speed));
214 set_shiftsub1_cost (speed, mode, m, set_src_cost (all->shift_sub1, mode,
215 speed));
216 }
217
218 scalar_int_mode int_mode_to;
219 if (is_a <scalar_int_mode> (mode, &int_mode_to))
220 {
221 for (mode_from = MIN_MODE_INT; mode_from <= MAX_MODE_INT;
222 mode_from = (machine_mode)(mode_from + 1))
223 init_expmed_one_conv (all, int_mode_to,
224 as_a <scalar_int_mode> (mode_from), speed);
225
226 scalar_int_mode wider_mode;
227 if (GET_MODE_CLASS (int_mode_to) == MODE_INT
228 && GET_MODE_WIDER_MODE (int_mode_to).exists (&wider_mode))
229 {
230 PUT_MODE (all->zext, wider_mode);
231 PUT_MODE (all->wide_mult, wider_mode);
232 PUT_MODE (all->wide_lshr, wider_mode);
233 XEXP (all->wide_lshr, 1)
234 = gen_int_shift_amount (wider_mode, mode_bitsize);
235
236 set_mul_widen_cost (speed, wider_mode,
237 set_src_cost (all->wide_mult, wider_mode, speed));
238 set_mul_highpart_cost (speed, int_mode_to,
239 set_src_cost (all->wide_trunc,
240 int_mode_to, speed));
241 }
242 }
243 }
244
245 void
246 init_expmed (void)
247 {
248 struct init_expmed_rtl all;
249 machine_mode mode = QImode;
250 int m, speed;
251
252 memset (&all, 0, sizeof all);
253 for (m = 1; m < MAX_BITS_PER_WORD; m++)
254 {
255 all.pow2[m] = GEN_INT (HOST_WIDE_INT_1 << m);
256 all.cint[m] = GEN_INT (m);
257 }
258
259 /* Avoid using hard regs in ways which may be unsupported. */
260 all.reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
261 all.plus = gen_rtx_PLUS (mode, all.reg, all.reg);
262 all.neg = gen_rtx_NEG (mode, all.reg);
263 all.mult = gen_rtx_MULT (mode, all.reg, all.reg);
264 all.sdiv = gen_rtx_DIV (mode, all.reg, all.reg);
265 all.udiv = gen_rtx_UDIV (mode, all.reg, all.reg);
266 all.sdiv_32 = gen_rtx_DIV (mode, all.reg, all.pow2[5]);
267 all.smod_32 = gen_rtx_MOD (mode, all.reg, all.pow2[5]);
268 all.zext = gen_rtx_ZERO_EXTEND (mode, all.reg);
269 all.wide_mult = gen_rtx_MULT (mode, all.zext, all.zext);
270 all.wide_lshr = gen_rtx_LSHIFTRT (mode, all.wide_mult, all.reg);
271 all.wide_trunc = gen_rtx_TRUNCATE (mode, all.wide_lshr);
272 all.shift = gen_rtx_ASHIFT (mode, all.reg, all.reg);
273 all.shift_mult = gen_rtx_MULT (mode, all.reg, all.reg);
274 all.shift_add = gen_rtx_PLUS (mode, all.shift_mult, all.reg);
275 all.shift_sub0 = gen_rtx_MINUS (mode, all.shift_mult, all.reg);
276 all.shift_sub1 = gen_rtx_MINUS (mode, all.reg, all.shift_mult);
277 all.trunc = gen_rtx_TRUNCATE (mode, all.reg);
278
279 for (speed = 0; speed < 2; speed++)
280 {
281 crtl->maybe_hot_insn_p = speed;
282 set_zero_cost (speed, set_src_cost (const0_rtx, mode, speed));
283
284 for (mode = MIN_MODE_INT; mode <= MAX_MODE_INT;
285 mode = (machine_mode)(mode + 1))
286 init_expmed_one_mode (&all, mode, speed);
287
288 if (MIN_MODE_PARTIAL_INT != VOIDmode)
289 for (mode = MIN_MODE_PARTIAL_INT; mode <= MAX_MODE_PARTIAL_INT;
290 mode = (machine_mode)(mode + 1))
291 init_expmed_one_mode (&all, mode, speed);
292
293 if (MIN_MODE_VECTOR_INT != VOIDmode)
294 for (mode = MIN_MODE_VECTOR_INT; mode <= MAX_MODE_VECTOR_INT;
295 mode = (machine_mode)(mode + 1))
296 init_expmed_one_mode (&all, mode, speed);
297 }
298
299 if (alg_hash_used_p ())
300 {
301 struct alg_hash_entry *p = alg_hash_entry_ptr (0);
302 memset (p, 0, sizeof (*p) * NUM_ALG_HASH_ENTRIES);
303 }
304 else
305 set_alg_hash_used_p (true);
306 default_rtl_profile ();
307
308 ggc_free (all.trunc);
309 ggc_free (all.shift_sub1);
310 ggc_free (all.shift_sub0);
311 ggc_free (all.shift_add);
312 ggc_free (all.shift_mult);
313 ggc_free (all.shift);
314 ggc_free (all.wide_trunc);
315 ggc_free (all.wide_lshr);
316 ggc_free (all.wide_mult);
317 ggc_free (all.zext);
318 ggc_free (all.smod_32);
319 ggc_free (all.sdiv_32);
320 ggc_free (all.udiv);
321 ggc_free (all.sdiv);
322 ggc_free (all.mult);
323 ggc_free (all.neg);
324 ggc_free (all.plus);
325 ggc_free (all.reg);
326 }
327
328 /* Return an rtx representing minus the value of X.
329 MODE is the intended mode of the result,
330 useful if X is a CONST_INT. */
331
332 rtx
333 negate_rtx (machine_mode mode, rtx x)
334 {
335 rtx result = simplify_unary_operation (NEG, mode, x, mode);
336
337 if (result == 0)
338 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
339
340 return result;
341 }
342
343 /* Whether reverse storage order is supported on the target. */
344 static int reverse_storage_order_supported = -1;
345
346 /* Check whether reverse storage order is supported on the target. */
347
348 static void
349 check_reverse_storage_order_support (void)
350 {
351 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
352 {
353 reverse_storage_order_supported = 0;
354 sorry ("reverse scalar storage order");
355 }
356 else
357 reverse_storage_order_supported = 1;
358 }
359
360 /* Whether reverse FP storage order is supported on the target. */
361 static int reverse_float_storage_order_supported = -1;
362
363 /* Check whether reverse FP storage order is supported on the target. */
364
365 static void
366 check_reverse_float_storage_order_support (void)
367 {
368 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
369 {
370 reverse_float_storage_order_supported = 0;
371 sorry ("reverse floating-point scalar storage order");
372 }
373 else
374 reverse_float_storage_order_supported = 1;
375 }
376
377 /* Return an rtx representing value of X with reverse storage order.
378 MODE is the intended mode of the result,
379 useful if X is a CONST_INT. */
380
381 rtx
382 flip_storage_order (machine_mode mode, rtx x)
383 {
384 scalar_int_mode int_mode;
385 rtx result;
386
387 if (mode == QImode)
388 return x;
389
390 if (COMPLEX_MODE_P (mode))
391 {
392 rtx real = read_complex_part (x, false);
393 rtx imag = read_complex_part (x, true);
394
395 real = flip_storage_order (GET_MODE_INNER (mode), real);
396 imag = flip_storage_order (GET_MODE_INNER (mode), imag);
397
398 return gen_rtx_CONCAT (mode, real, imag);
399 }
400
401 if (__builtin_expect (reverse_storage_order_supported < 0, 0))
402 check_reverse_storage_order_support ();
403
404 if (!is_a <scalar_int_mode> (mode, &int_mode))
405 {
406 if (FLOAT_MODE_P (mode)
407 && __builtin_expect (reverse_float_storage_order_supported < 0, 0))
408 check_reverse_float_storage_order_support ();
409
410 if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode))
411 {
412 sorry ("reverse storage order for %smode", GET_MODE_NAME (mode));
413 return x;
414 }
415 x = gen_lowpart (int_mode, x);
416 }
417
418 result = simplify_unary_operation (BSWAP, int_mode, x, int_mode);
419 if (result == 0)
420 result = expand_unop (int_mode, bswap_optab, x, NULL_RTX, 1);
421
422 if (int_mode != mode)
423 result = gen_lowpart (mode, result);
424
425 return result;
426 }
427
428 /* If MODE is set, adjust bitfield memory MEM so that it points to the
429 first unit of mode MODE that contains a bitfield of size BITSIZE at
430 bit position BITNUM. If MODE is not set, return a BLKmode reference
431 to every byte in the bitfield. Set *NEW_BITNUM to the bit position
432 of the field within the new memory. */
433
434 static rtx
435 narrow_bit_field_mem (rtx mem, opt_scalar_int_mode mode,
436 unsigned HOST_WIDE_INT bitsize,
437 unsigned HOST_WIDE_INT bitnum,
438 unsigned HOST_WIDE_INT *new_bitnum)
439 {
440 scalar_int_mode imode;
441 if (mode.exists (&imode))
442 {
443 unsigned int unit = GET_MODE_BITSIZE (imode);
444 *new_bitnum = bitnum % unit;
445 HOST_WIDE_INT offset = (bitnum - *new_bitnum) / BITS_PER_UNIT;
446 return adjust_bitfield_address (mem, imode, offset);
447 }
448 else
449 {
450 *new_bitnum = bitnum % BITS_PER_UNIT;
451 HOST_WIDE_INT offset = bitnum / BITS_PER_UNIT;
452 HOST_WIDE_INT size = ((*new_bitnum + bitsize + BITS_PER_UNIT - 1)
453 / BITS_PER_UNIT);
454 return adjust_bitfield_address_size (mem, BLKmode, offset, size);
455 }
456 }
457
458 /* The caller wants to perform insertion or extraction PATTERN on a
459 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
460 BITREGION_START and BITREGION_END are as for store_bit_field
461 and FIELDMODE is the natural mode of the field.
462
463 Search for a mode that is compatible with the memory access
464 restrictions and (where applicable) with a register insertion or
465 extraction. Return the new memory on success, storing the adjusted
466 bit position in *NEW_BITNUM. Return null otherwise. */
467
468 static rtx
469 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
470 rtx op0, HOST_WIDE_INT bitsize,
471 HOST_WIDE_INT bitnum,
472 poly_uint64 bitregion_start,
473 poly_uint64 bitregion_end,
474 machine_mode fieldmode,
475 unsigned HOST_WIDE_INT *new_bitnum)
476 {
477 bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
478 bitregion_end, MEM_ALIGN (op0),
479 MEM_VOLATILE_P (op0));
480 scalar_int_mode best_mode;
481 if (iter.next_mode (&best_mode))
482 {
483 /* We can use a memory in BEST_MODE. See whether this is true for
484 any wider modes. All other things being equal, we prefer to
485 use the widest mode possible because it tends to expose more
486 CSE opportunities. */
487 if (!iter.prefer_smaller_modes ())
488 {
489 /* Limit the search to the mode required by the corresponding
490 register insertion or extraction instruction, if any. */
491 scalar_int_mode limit_mode = word_mode;
492 extraction_insn insn;
493 if (get_best_reg_extraction_insn (&insn, pattern,
494 GET_MODE_BITSIZE (best_mode),
495 fieldmode))
496 limit_mode = insn.field_mode;
497
498 scalar_int_mode wider_mode;
499 while (iter.next_mode (&wider_mode)
500 && GET_MODE_SIZE (wider_mode) <= GET_MODE_SIZE (limit_mode))
501 best_mode = wider_mode;
502 }
503 return narrow_bit_field_mem (op0, best_mode, bitsize, bitnum,
504 new_bitnum);
505 }
506 return NULL_RTX;
507 }
508
509 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
510 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
511 offset is then BITNUM / BITS_PER_UNIT. */
512
513 static bool
514 lowpart_bit_field_p (poly_uint64 bitnum, poly_uint64 bitsize,
515 machine_mode struct_mode)
516 {
517 poly_uint64 regsize = REGMODE_NATURAL_SIZE (struct_mode);
518 if (BYTES_BIG_ENDIAN)
519 return (multiple_p (bitnum, BITS_PER_UNIT)
520 && (known_eq (bitnum + bitsize, GET_MODE_BITSIZE (struct_mode))
521 || multiple_p (bitnum + bitsize,
522 regsize * BITS_PER_UNIT)));
523 else
524 return multiple_p (bitnum, regsize * BITS_PER_UNIT);
525 }
526
527 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
528 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
529 Return false if the access would touch memory outside the range
530 BITREGION_START to BITREGION_END for conformance to the C++ memory
531 model. */
532
533 static bool
534 strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
535 unsigned HOST_WIDE_INT bitnum,
536 scalar_int_mode fieldmode,
537 poly_uint64 bitregion_start,
538 poly_uint64 bitregion_end)
539 {
540 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
541
542 /* -fstrict-volatile-bitfields must be enabled and we must have a
543 volatile MEM. */
544 if (!MEM_P (op0)
545 || !MEM_VOLATILE_P (op0)
546 || flag_strict_volatile_bitfields <= 0)
547 return false;
548
549 /* The bit size must not be larger than the field mode, and
550 the field mode must not be larger than a word. */
551 if (bitsize > modesize || modesize > BITS_PER_WORD)
552 return false;
553
554 /* Check for cases of unaligned fields that must be split. */
555 if (bitnum % modesize + bitsize > modesize)
556 return false;
557
558 /* The memory must be sufficiently aligned for a MODESIZE access.
559 This condition guarantees, that the memory access will not
560 touch anything after the end of the structure. */
561 if (MEM_ALIGN (op0) < modesize)
562 return false;
563
564 /* Check for cases where the C++ memory model applies. */
565 if (maybe_ne (bitregion_end, 0U)
566 && (maybe_lt (bitnum - bitnum % modesize, bitregion_start)
567 || maybe_gt (bitnum - bitnum % modesize + modesize - 1,
568 bitregion_end)))
569 return false;
570
571 return true;
572 }
573
574 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
575 bit number BITNUM can be treated as a simple value of mode MODE.
576 Store the byte offset in *BYTENUM if so. */
577
578 static bool
579 simple_mem_bitfield_p (rtx op0, poly_uint64 bitsize, poly_uint64 bitnum,
580 machine_mode mode, poly_uint64 *bytenum)
581 {
582 return (MEM_P (op0)
583 && multiple_p (bitnum, BITS_PER_UNIT, bytenum)
584 && known_eq (bitsize, GET_MODE_BITSIZE (mode))
585 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (op0))
586 || (multiple_p (bitnum, GET_MODE_ALIGNMENT (mode))
587 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
588 }
589 \f
590 /* Try to use instruction INSV to store VALUE into a field of OP0.
591 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is a
592 BLKmode MEM. VALUE_MODE is the mode of VALUE. BITSIZE and BITNUM
593 are as for store_bit_field. */
594
595 static bool
596 store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
597 opt_scalar_int_mode op0_mode,
598 unsigned HOST_WIDE_INT bitsize,
599 unsigned HOST_WIDE_INT bitnum,
600 rtx value, scalar_int_mode value_mode)
601 {
602 struct expand_operand ops[4];
603 rtx value1;
604 rtx xop0 = op0;
605 rtx_insn *last = get_last_insn ();
606 bool copy_back = false;
607
608 scalar_int_mode op_mode = insv->field_mode;
609 unsigned int unit = GET_MODE_BITSIZE (op_mode);
610 if (bitsize == 0 || bitsize > unit)
611 return false;
612
613 if (MEM_P (xop0))
614 /* Get a reference to the first byte of the field. */
615 xop0 = narrow_bit_field_mem (xop0, insv->struct_mode, bitsize, bitnum,
616 &bitnum);
617 else
618 {
619 /* Convert from counting within OP0 to counting in OP_MODE. */
620 if (BYTES_BIG_ENDIAN)
621 bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
622
623 /* If xop0 is a register, we need it in OP_MODE
624 to make it acceptable to the format of insv. */
625 if (GET_CODE (xop0) == SUBREG)
626 /* We can't just change the mode, because this might clobber op0,
627 and we will need the original value of op0 if insv fails. */
628 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
629 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
630 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
631 }
632
633 /* If the destination is a paradoxical subreg such that we need a
634 truncate to the inner mode, perform the insertion on a temporary and
635 truncate the result to the original destination. Note that we can't
636 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
637 X) 0)) is (reg:N X). */
638 if (GET_CODE (xop0) == SUBREG
639 && REG_P (SUBREG_REG (xop0))
640 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
641 op_mode))
642 {
643 rtx tem = gen_reg_rtx (op_mode);
644 emit_move_insn (tem, xop0);
645 xop0 = tem;
646 copy_back = true;
647 }
648
649 /* There are similar overflow check at the start of store_bit_field_1,
650 but that only check the situation where the field lies completely
651 outside the register, while there do have situation where the field
652 lies partialy in the register, we need to adjust bitsize for this
653 partial overflow situation. Without this fix, pr48335-2.c on big-endian
654 will broken on those arch support bit insert instruction, like arm, aarch64
655 etc. */
656 if (bitsize + bitnum > unit && bitnum < unit)
657 {
658 warning (OPT_Wextra, "write of %wu-bit data outside the bound of "
659 "destination object, data truncated into %wu-bit",
660 bitsize, unit - bitnum);
661 bitsize = unit - bitnum;
662 }
663
664 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
665 "backwards" from the size of the unit we are inserting into.
666 Otherwise, we count bits from the most significant on a
667 BYTES/BITS_BIG_ENDIAN machine. */
668
669 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
670 bitnum = unit - bitsize - bitnum;
671
672 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
673 value1 = value;
674 if (value_mode != op_mode)
675 {
676 if (GET_MODE_BITSIZE (value_mode) >= bitsize)
677 {
678 rtx tmp;
679 /* Optimization: Don't bother really extending VALUE
680 if it has all the bits we will actually use. However,
681 if we must narrow it, be sure we do it correctly. */
682
683 if (GET_MODE_SIZE (value_mode) < GET_MODE_SIZE (op_mode))
684 {
685 tmp = simplify_subreg (op_mode, value1, value_mode, 0);
686 if (! tmp)
687 tmp = simplify_gen_subreg (op_mode,
688 force_reg (value_mode, value1),
689 value_mode, 0);
690 }
691 else
692 {
693 tmp = gen_lowpart_if_possible (op_mode, value1);
694 if (! tmp)
695 tmp = gen_lowpart (op_mode, force_reg (value_mode, value1));
696 }
697 value1 = tmp;
698 }
699 else if (CONST_INT_P (value))
700 value1 = gen_int_mode (INTVAL (value), op_mode);
701 else
702 /* Parse phase is supposed to make VALUE's data type
703 match that of the component reference, which is a type
704 at least as wide as the field; so VALUE should have
705 a mode that corresponds to that type. */
706 gcc_assert (CONSTANT_P (value));
707 }
708
709 create_fixed_operand (&ops[0], xop0);
710 create_integer_operand (&ops[1], bitsize);
711 create_integer_operand (&ops[2], bitnum);
712 create_input_operand (&ops[3], value1, op_mode);
713 if (maybe_expand_insn (insv->icode, 4, ops))
714 {
715 if (copy_back)
716 convert_move (op0, xop0, true);
717 return true;
718 }
719 delete_insns_since (last);
720 return false;
721 }
722
723 /* A subroutine of store_bit_field, with the same arguments. Return true
724 if the operation could be implemented.
725
726 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
727 no other way of implementing the operation. If FALLBACK_P is false,
728 return false instead. */
729
730 static bool
731 store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
732 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
733 machine_mode fieldmode,
734 rtx value, bool reverse, bool fallback_p)
735 {
736 rtx op0 = str_rtx;
737
738 while (GET_CODE (op0) == SUBREG)
739 {
740 bitnum += subreg_memory_offset (op0) * BITS_PER_UNIT;
741 op0 = SUBREG_REG (op0);
742 }
743
744 /* No action is needed if the target is a register and if the field
745 lies completely outside that register. This can occur if the source
746 code contains an out-of-bounds access to a small array. */
747 if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
748 return true;
749
750 /* Use vec_set patterns for inserting parts of vectors whenever
751 available. */
752 machine_mode outermode = GET_MODE (op0);
753 scalar_mode innermode = GET_MODE_INNER (outermode);
754 poly_uint64 pos;
755 if (VECTOR_MODE_P (outermode)
756 && !MEM_P (op0)
757 && optab_handler (vec_set_optab, outermode) != CODE_FOR_nothing
758 && fieldmode == innermode
759 && known_eq (bitsize, GET_MODE_BITSIZE (innermode))
760 && multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
761 {
762 struct expand_operand ops[3];
763 enum insn_code icode = optab_handler (vec_set_optab, outermode);
764
765 create_fixed_operand (&ops[0], op0);
766 create_input_operand (&ops[1], value, innermode);
767 create_integer_operand (&ops[2], pos);
768 if (maybe_expand_insn (icode, 3, ops))
769 return true;
770 }
771
772 /* If the target is a register, overwriting the entire object, or storing
773 a full-word or multi-word field can be done with just a SUBREG. */
774 if (!MEM_P (op0)
775 && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode)))
776 {
777 /* Use the subreg machinery either to narrow OP0 to the required
778 words or to cope with mode punning between equal-sized modes.
779 In the latter case, use subreg on the rhs side, not lhs. */
780 rtx sub;
781 HOST_WIDE_INT regnum;
782 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (op0));
783 if (known_eq (bitnum, 0U)
784 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
785 {
786 sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0);
787 if (sub)
788 {
789 if (reverse)
790 sub = flip_storage_order (GET_MODE (op0), sub);
791 emit_move_insn (op0, sub);
792 return true;
793 }
794 }
795 else if (constant_multiple_p (bitnum, regsize * BITS_PER_UNIT, &regnum)
796 && multiple_p (bitsize, regsize * BITS_PER_UNIT))
797 {
798 sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
799 regnum * regsize);
800 if (sub)
801 {
802 if (reverse)
803 value = flip_storage_order (fieldmode, value);
804 emit_move_insn (sub, value);
805 return true;
806 }
807 }
808 }
809
810 /* If the target is memory, storing any naturally aligned field can be
811 done with a simple store. For targets that support fast unaligned
812 memory, any naturally sized, unit aligned field can be done directly. */
813 poly_uint64 bytenum;
814 if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode, &bytenum))
815 {
816 op0 = adjust_bitfield_address (op0, fieldmode, bytenum);
817 if (reverse)
818 value = flip_storage_order (fieldmode, value);
819 emit_move_insn (op0, value);
820 return true;
821 }
822
823 /* It's possible we'll need to handle other cases here for
824 polynomial bitnum and bitsize. */
825
826 /* From here on we need to be looking at a fixed-size insertion. */
827 unsigned HOST_WIDE_INT ibitsize = bitsize.to_constant ();
828 unsigned HOST_WIDE_INT ibitnum = bitnum.to_constant ();
829
830 /* Make sure we are playing with integral modes. Pun with subregs
831 if we aren't. This must come after the entire register case above,
832 since that case is valid for any mode. The following cases are only
833 valid for integral modes. */
834 opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
835 scalar_int_mode imode;
836 if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
837 {
838 if (MEM_P (op0))
839 op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
840 0, MEM_SIZE (op0));
841 else
842 op0 = gen_lowpart (op0_mode.require (), op0);
843 }
844
845 return store_integral_bit_field (op0, op0_mode, ibitsize, ibitnum,
846 bitregion_start, bitregion_end,
847 fieldmode, value, reverse, fallback_p);
848 }
849
850 /* Subroutine of store_bit_field_1, with the same arguments, except
851 that BITSIZE and BITNUM are constant. Handle cases specific to
852 integral modes. If OP0_MODE is defined, it is the mode of OP0,
853 otherwise OP0 is a BLKmode MEM. */
854
855 static bool
856 store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
857 unsigned HOST_WIDE_INT bitsize,
858 unsigned HOST_WIDE_INT bitnum,
859 poly_uint64 bitregion_start,
860 poly_uint64 bitregion_end,
861 machine_mode fieldmode,
862 rtx value, bool reverse, bool fallback_p)
863 {
864 /* Storing an lsb-aligned field in a register
865 can be done with a movstrict instruction. */
866
867 if (!MEM_P (op0)
868 && !reverse
869 && lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
870 && bitsize == GET_MODE_BITSIZE (fieldmode)
871 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
872 {
873 struct expand_operand ops[2];
874 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
875 rtx arg0 = op0;
876 unsigned HOST_WIDE_INT subreg_off;
877
878 if (GET_CODE (arg0) == SUBREG)
879 {
880 /* Else we've got some float mode source being extracted into
881 a different float mode destination -- this combination of
882 subregs results in Severe Tire Damage. */
883 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
884 || GET_MODE_CLASS (fieldmode) == MODE_INT
885 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
886 arg0 = SUBREG_REG (arg0);
887 }
888
889 subreg_off = bitnum / BITS_PER_UNIT;
890 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
891 {
892 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
893
894 create_fixed_operand (&ops[0], arg0);
895 /* Shrink the source operand to FIELDMODE. */
896 create_convert_operand_to (&ops[1], value, fieldmode, false);
897 if (maybe_expand_insn (icode, 2, ops))
898 return true;
899 }
900 }
901
902 /* Handle fields bigger than a word. */
903
904 if (bitsize > BITS_PER_WORD)
905 {
906 /* Here we transfer the words of the field
907 in the order least significant first.
908 This is because the most significant word is the one which may
909 be less than full.
910 However, only do that if the value is not BLKmode. */
911
912 const bool backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
913 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
914 unsigned int i;
915 rtx_insn *last;
916
917 /* This is the mode we must force value to, so that there will be enough
918 subwords to extract. Note that fieldmode will often (always?) be
919 VOIDmode, because that is what store_field uses to indicate that this
920 is a bit field, but passing VOIDmode to operand_subword_force
921 is not allowed.
922
923 The mode must be fixed-size, since insertions into variable-sized
924 objects are meant to be handled before calling this function. */
925 fixed_size_mode value_mode = as_a <fixed_size_mode> (GET_MODE (value));
926 if (value_mode == VOIDmode)
927 value_mode = smallest_int_mode_for_size (nwords * BITS_PER_WORD);
928
929 last = get_last_insn ();
930 for (i = 0; i < nwords; i++)
931 {
932 /* If I is 0, use the low-order word in both field and target;
933 if I is 1, use the next to lowest word; and so on. */
934 unsigned int wordnum = (backwards
935 ? GET_MODE_SIZE (value_mode) / UNITS_PER_WORD
936 - i - 1
937 : i);
938 unsigned int bit_offset = (backwards ^ reverse
939 ? MAX ((int) bitsize - ((int) i + 1)
940 * BITS_PER_WORD,
941 0)
942 : (int) i * BITS_PER_WORD);
943 rtx value_word = operand_subword_force (value, wordnum, value_mode);
944 unsigned HOST_WIDE_INT new_bitsize =
945 MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
946
947 /* If the remaining chunk doesn't have full wordsize we have
948 to make sure that for big-endian machines the higher order
949 bits are used. */
950 if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
951 {
952 int shift = BITS_PER_WORD - new_bitsize;
953 rtx shift_rtx = gen_int_shift_amount (word_mode, shift);
954 value_word = simplify_expand_binop (word_mode, lshr_optab,
955 value_word, shift_rtx,
956 NULL_RTX, true,
957 OPTAB_LIB_WIDEN);
958 }
959
960 if (!store_bit_field_1 (op0, new_bitsize,
961 bitnum + bit_offset,
962 bitregion_start, bitregion_end,
963 word_mode,
964 value_word, reverse, fallback_p))
965 {
966 delete_insns_since (last);
967 return false;
968 }
969 }
970 return true;
971 }
972
973 /* If VALUE has a floating-point or complex mode, access it as an
974 integer of the corresponding size. This can occur on a machine
975 with 64 bit registers that uses SFmode for float. It can also
976 occur for unaligned float or complex fields. */
977 rtx orig_value = value;
978 scalar_int_mode value_mode;
979 if (GET_MODE (value) == VOIDmode)
980 /* By this point we've dealt with values that are bigger than a word,
981 so word_mode is a conservatively correct choice. */
982 value_mode = word_mode;
983 else if (!is_a <scalar_int_mode> (GET_MODE (value), &value_mode))
984 {
985 value_mode = int_mode_for_mode (GET_MODE (value)).require ();
986 value = gen_reg_rtx (value_mode);
987 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
988 }
989
990 /* If OP0 is a multi-word register, narrow it to the affected word.
991 If the region spans two words, defer to store_split_bit_field.
992 Don't do this if op0 is a single hard register wider than word
993 such as a float or vector register. */
994 if (!MEM_P (op0)
995 && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD
996 && (!REG_P (op0)
997 || !HARD_REGISTER_P (op0)
998 || hard_regno_nregs (REGNO (op0), op0_mode.require ()) != 1))
999 {
1000 if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1001 {
1002 if (!fallback_p)
1003 return false;
1004
1005 store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1006 bitregion_start, bitregion_end,
1007 value, value_mode, reverse);
1008 return true;
1009 }
1010 op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
1011 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1012 gcc_assert (op0);
1013 op0_mode = word_mode;
1014 bitnum %= BITS_PER_WORD;
1015 }
1016
1017 /* From here on we can assume that the field to be stored in fits
1018 within a word. If the destination is a register, it too fits
1019 in a word. */
1020
1021 extraction_insn insv;
1022 if (!MEM_P (op0)
1023 && !reverse
1024 && get_best_reg_extraction_insn (&insv, EP_insv,
1025 GET_MODE_BITSIZE (op0_mode.require ()),
1026 fieldmode)
1027 && store_bit_field_using_insv (&insv, op0, op0_mode,
1028 bitsize, bitnum, value, value_mode))
1029 return true;
1030
1031 /* If OP0 is a memory, try copying it to a register and seeing if a
1032 cheap register alternative is available. */
1033 if (MEM_P (op0) && !reverse)
1034 {
1035 if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
1036 fieldmode)
1037 && store_bit_field_using_insv (&insv, op0, op0_mode,
1038 bitsize, bitnum, value, value_mode))
1039 return true;
1040
1041 rtx_insn *last = get_last_insn ();
1042
1043 /* Try loading part of OP0 into a register, inserting the bitfield
1044 into that, and then copying the result back to OP0. */
1045 unsigned HOST_WIDE_INT bitpos;
1046 rtx xop0 = adjust_bit_field_mem_for_reg (EP_insv, op0, bitsize, bitnum,
1047 bitregion_start, bitregion_end,
1048 fieldmode, &bitpos);
1049 if (xop0)
1050 {
1051 rtx tempreg = copy_to_reg (xop0);
1052 if (store_bit_field_1 (tempreg, bitsize, bitpos,
1053 bitregion_start, bitregion_end,
1054 fieldmode, orig_value, reverse, false))
1055 {
1056 emit_move_insn (xop0, tempreg);
1057 return true;
1058 }
1059 delete_insns_since (last);
1060 }
1061 }
1062
1063 if (!fallback_p)
1064 return false;
1065
1066 store_fixed_bit_field (op0, op0_mode, bitsize, bitnum, bitregion_start,
1067 bitregion_end, value, value_mode, reverse);
1068 return true;
1069 }
1070
1071 /* Generate code to store value from rtx VALUE
1072 into a bit-field within structure STR_RTX
1073 containing BITSIZE bits starting at bit BITNUM.
1074
1075 BITREGION_START is bitpos of the first bitfield in this region.
1076 BITREGION_END is the bitpos of the ending bitfield in this region.
1077 These two fields are 0, if the C++ memory model does not apply,
1078 or we are not interested in keeping track of bitfield regions.
1079
1080 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
1081
1082 If REVERSE is true, the store is to be done in reverse order. */
1083
1084 void
1085 store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1086 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1087 machine_mode fieldmode,
1088 rtx value, bool reverse)
1089 {
1090 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1091 unsigned HOST_WIDE_INT ibitsize = 0, ibitnum = 0;
1092 scalar_int_mode int_mode;
1093 if (bitsize.is_constant (&ibitsize)
1094 && bitnum.is_constant (&ibitnum)
1095 && is_a <scalar_int_mode> (fieldmode, &int_mode)
1096 && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum, int_mode,
1097 bitregion_start, bitregion_end))
1098 {
1099 /* Storing of a full word can be done with a simple store.
1100 We know here that the field can be accessed with one single
1101 instruction. For targets that support unaligned memory,
1102 an unaligned access may be necessary. */
1103 if (ibitsize == GET_MODE_BITSIZE (int_mode))
1104 {
1105 str_rtx = adjust_bitfield_address (str_rtx, int_mode,
1106 ibitnum / BITS_PER_UNIT);
1107 if (reverse)
1108 value = flip_storage_order (int_mode, value);
1109 gcc_assert (ibitnum % BITS_PER_UNIT == 0);
1110 emit_move_insn (str_rtx, value);
1111 }
1112 else
1113 {
1114 rtx temp;
1115
1116 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize,
1117 ibitnum, &ibitnum);
1118 gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
1119 temp = copy_to_reg (str_rtx);
1120 if (!store_bit_field_1 (temp, ibitsize, ibitnum, 0, 0,
1121 int_mode, value, reverse, true))
1122 gcc_unreachable ();
1123
1124 emit_move_insn (str_rtx, temp);
1125 }
1126
1127 return;
1128 }
1129
1130 /* Under the C++0x memory model, we must not touch bits outside the
1131 bit region. Adjust the address to start at the beginning of the
1132 bit region. */
1133 if (MEM_P (str_rtx) && maybe_ne (bitregion_start, 0U))
1134 {
1135 scalar_int_mode best_mode;
1136 machine_mode addr_mode = VOIDmode;
1137
1138 poly_uint64 offset = exact_div (bitregion_start, BITS_PER_UNIT);
1139 bitnum -= bitregion_start;
1140 poly_int64 size = bits_to_bytes_round_up (bitnum + bitsize);
1141 bitregion_end -= bitregion_start;
1142 bitregion_start = 0;
1143 if (bitsize.is_constant (&ibitsize)
1144 && bitnum.is_constant (&ibitnum)
1145 && get_best_mode (ibitsize, ibitnum,
1146 bitregion_start, bitregion_end,
1147 MEM_ALIGN (str_rtx), INT_MAX,
1148 MEM_VOLATILE_P (str_rtx), &best_mode))
1149 addr_mode = best_mode;
1150 str_rtx = adjust_bitfield_address_size (str_rtx, addr_mode,
1151 offset, size);
1152 }
1153
1154 if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
1155 bitregion_start, bitregion_end,
1156 fieldmode, value, reverse, true))
1157 gcc_unreachable ();
1158 }
1159 \f
1160 /* Use shifts and boolean operations to store VALUE into a bit field of
1161 width BITSIZE in OP0, starting at bit BITNUM. If OP0_MODE is defined,
1162 it is the mode of OP0, otherwise OP0 is a BLKmode MEM. VALUE_MODE is
1163 the mode of VALUE.
1164
1165 If REVERSE is true, the store is to be done in reverse order. */
1166
1167 static void
1168 store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1169 unsigned HOST_WIDE_INT bitsize,
1170 unsigned HOST_WIDE_INT bitnum,
1171 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1172 rtx value, scalar_int_mode value_mode, bool reverse)
1173 {
1174 /* There is a case not handled here:
1175 a structure with a known alignment of just a halfword
1176 and a field split across two aligned halfwords within the structure.
1177 Or likewise a structure with a known alignment of just a byte
1178 and a field split across two bytes.
1179 Such cases are not supposed to be able to occur. */
1180
1181 scalar_int_mode best_mode;
1182 if (MEM_P (op0))
1183 {
1184 unsigned int max_bitsize = BITS_PER_WORD;
1185 scalar_int_mode imode;
1186 if (op0_mode.exists (&imode) && GET_MODE_BITSIZE (imode) < max_bitsize)
1187 max_bitsize = GET_MODE_BITSIZE (imode);
1188
1189 if (!get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
1190 MEM_ALIGN (op0), max_bitsize, MEM_VOLATILE_P (op0),
1191 &best_mode))
1192 {
1193 /* The only way this should occur is if the field spans word
1194 boundaries. */
1195 store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1196 bitregion_start, bitregion_end,
1197 value, value_mode, reverse);
1198 return;
1199 }
1200
1201 op0 = narrow_bit_field_mem (op0, best_mode, bitsize, bitnum, &bitnum);
1202 }
1203 else
1204 best_mode = op0_mode.require ();
1205
1206 store_fixed_bit_field_1 (op0, best_mode, bitsize, bitnum,
1207 value, value_mode, reverse);
1208 }
1209
1210 /* Helper function for store_fixed_bit_field, stores
1211 the bit field always using MODE, which is the mode of OP0. The other
1212 arguments are as for store_fixed_bit_field. */
1213
1214 static void
1215 store_fixed_bit_field_1 (rtx op0, scalar_int_mode mode,
1216 unsigned HOST_WIDE_INT bitsize,
1217 unsigned HOST_WIDE_INT bitnum,
1218 rtx value, scalar_int_mode value_mode, bool reverse)
1219 {
1220 rtx temp;
1221 int all_zero = 0;
1222 int all_one = 0;
1223
1224 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1225 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
1226
1227 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1228 /* BITNUM is the distance between our msb
1229 and that of the containing datum.
1230 Convert it to the distance from the lsb. */
1231 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1232
1233 /* Now BITNUM is always the distance between our lsb
1234 and that of OP0. */
1235
1236 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1237 we must first convert its mode to MODE. */
1238
1239 if (CONST_INT_P (value))
1240 {
1241 unsigned HOST_WIDE_INT v = UINTVAL (value);
1242
1243 if (bitsize < HOST_BITS_PER_WIDE_INT)
1244 v &= (HOST_WIDE_INT_1U << bitsize) - 1;
1245
1246 if (v == 0)
1247 all_zero = 1;
1248 else if ((bitsize < HOST_BITS_PER_WIDE_INT
1249 && v == (HOST_WIDE_INT_1U << bitsize) - 1)
1250 || (bitsize == HOST_BITS_PER_WIDE_INT
1251 && v == HOST_WIDE_INT_M1U))
1252 all_one = 1;
1253
1254 value = lshift_value (mode, v, bitnum);
1255 }
1256 else
1257 {
1258 int must_and = (GET_MODE_BITSIZE (value_mode) != bitsize
1259 && bitnum + bitsize != GET_MODE_BITSIZE (mode));
1260
1261 if (value_mode != mode)
1262 value = convert_to_mode (mode, value, 1);
1263
1264 if (must_and)
1265 value = expand_binop (mode, and_optab, value,
1266 mask_rtx (mode, 0, bitsize, 0),
1267 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1268 if (bitnum > 0)
1269 value = expand_shift (LSHIFT_EXPR, mode, value,
1270 bitnum, NULL_RTX, 1);
1271 }
1272
1273 if (reverse)
1274 value = flip_storage_order (mode, value);
1275
1276 /* Now clear the chosen bits in OP0,
1277 except that if VALUE is -1 we need not bother. */
1278 /* We keep the intermediates in registers to allow CSE to combine
1279 consecutive bitfield assignments. */
1280
1281 temp = force_reg (mode, op0);
1282
1283 if (! all_one)
1284 {
1285 rtx mask = mask_rtx (mode, bitnum, bitsize, 1);
1286 if (reverse)
1287 mask = flip_storage_order (mode, mask);
1288 temp = expand_binop (mode, and_optab, temp, mask,
1289 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1290 temp = force_reg (mode, temp);
1291 }
1292
1293 /* Now logical-or VALUE into OP0, unless it is zero. */
1294
1295 if (! all_zero)
1296 {
1297 temp = expand_binop (mode, ior_optab, temp, value,
1298 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1299 temp = force_reg (mode, temp);
1300 }
1301
1302 if (op0 != temp)
1303 {
1304 op0 = copy_rtx (op0);
1305 emit_move_insn (op0, temp);
1306 }
1307 }
1308 \f
1309 /* Store a bit field that is split across multiple accessible memory objects.
1310
1311 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1312 BITSIZE is the field width; BITPOS the position of its first bit
1313 (within the word).
1314 VALUE is the value to store, which has mode VALUE_MODE.
1315 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
1316 a BLKmode MEM.
1317
1318 If REVERSE is true, the store is to be done in reverse order.
1319
1320 This does not yet handle fields wider than BITS_PER_WORD. */
1321
1322 static void
1323 store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1324 unsigned HOST_WIDE_INT bitsize,
1325 unsigned HOST_WIDE_INT bitpos,
1326 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1327 rtx value, scalar_int_mode value_mode, bool reverse)
1328 {
1329 unsigned int unit, total_bits, bitsdone = 0;
1330
1331 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1332 much at a time. */
1333 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1334 unit = BITS_PER_WORD;
1335 else
1336 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1337
1338 /* If OP0 is a memory with a mode, then UNIT must not be larger than
1339 OP0's mode as well. Otherwise, store_fixed_bit_field will call us
1340 again, and we will mutually recurse forever. */
1341 if (MEM_P (op0) && op0_mode.exists ())
1342 unit = MIN (unit, GET_MODE_BITSIZE (op0_mode.require ()));
1343
1344 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1345 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1346 that VALUE might be a floating-point constant. */
1347 if (CONSTANT_P (value) && !CONST_INT_P (value))
1348 {
1349 rtx word = gen_lowpart_common (word_mode, value);
1350
1351 if (word && (value != word))
1352 value = word;
1353 else
1354 value = gen_lowpart_common (word_mode, force_reg (value_mode, value));
1355 value_mode = word_mode;
1356 }
1357
1358 total_bits = GET_MODE_BITSIZE (value_mode);
1359
1360 while (bitsdone < bitsize)
1361 {
1362 unsigned HOST_WIDE_INT thissize;
1363 unsigned HOST_WIDE_INT thispos;
1364 unsigned HOST_WIDE_INT offset;
1365 rtx part;
1366
1367 offset = (bitpos + bitsdone) / unit;
1368 thispos = (bitpos + bitsdone) % unit;
1369
1370 /* When region of bytes we can touch is restricted, decrease
1371 UNIT close to the end of the region as needed. If op0 is a REG
1372 or SUBREG of REG, don't do this, as there can't be data races
1373 on a register and we can expand shorter code in some cases. */
1374 if (maybe_ne (bitregion_end, 0U)
1375 && unit > BITS_PER_UNIT
1376 && maybe_gt (bitpos + bitsdone - thispos + unit, bitregion_end + 1)
1377 && !REG_P (op0)
1378 && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1379 {
1380 unit = unit / 2;
1381 continue;
1382 }
1383
1384 /* THISSIZE must not overrun a word boundary. Otherwise,
1385 store_fixed_bit_field will call us again, and we will mutually
1386 recurse forever. */
1387 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1388 thissize = MIN (thissize, unit - thispos);
1389
1390 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1391 {
1392 /* Fetch successively less significant portions. */
1393 if (CONST_INT_P (value))
1394 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1395 >> (bitsize - bitsdone - thissize))
1396 & ((HOST_WIDE_INT_1 << thissize) - 1));
1397 /* Likewise, but the source is little-endian. */
1398 else if (reverse)
1399 part = extract_fixed_bit_field (word_mode, value, value_mode,
1400 thissize,
1401 bitsize - bitsdone - thissize,
1402 NULL_RTX, 1, false);
1403 else
1404 /* The args are chosen so that the last part includes the
1405 lsb. Give extract_bit_field the value it needs (with
1406 endianness compensation) to fetch the piece we want. */
1407 part = extract_fixed_bit_field (word_mode, value, value_mode,
1408 thissize,
1409 total_bits - bitsize + bitsdone,
1410 NULL_RTX, 1, false);
1411 }
1412 else
1413 {
1414 /* Fetch successively more significant portions. */
1415 if (CONST_INT_P (value))
1416 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1417 >> bitsdone)
1418 & ((HOST_WIDE_INT_1 << thissize) - 1));
1419 /* Likewise, but the source is big-endian. */
1420 else if (reverse)
1421 part = extract_fixed_bit_field (word_mode, value, value_mode,
1422 thissize,
1423 total_bits - bitsdone - thissize,
1424 NULL_RTX, 1, false);
1425 else
1426 part = extract_fixed_bit_field (word_mode, value, value_mode,
1427 thissize, bitsdone, NULL_RTX,
1428 1, false);
1429 }
1430
1431 /* If OP0 is a register, then handle OFFSET here. */
1432 rtx op0_piece = op0;
1433 opt_scalar_int_mode op0_piece_mode = op0_mode;
1434 if (SUBREG_P (op0) || REG_P (op0))
1435 {
1436 scalar_int_mode imode;
1437 if (op0_mode.exists (&imode)
1438 && GET_MODE_SIZE (imode) < UNITS_PER_WORD)
1439 {
1440 if (offset)
1441 op0_piece = const0_rtx;
1442 }
1443 else
1444 {
1445 op0_piece = operand_subword_force (op0,
1446 offset * unit / BITS_PER_WORD,
1447 GET_MODE (op0));
1448 op0_piece_mode = word_mode;
1449 }
1450 offset &= BITS_PER_WORD / unit - 1;
1451 }
1452
1453 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1454 it is just an out-of-bounds access. Ignore it. */
1455 if (op0_piece != const0_rtx)
1456 store_fixed_bit_field (op0_piece, op0_piece_mode, thissize,
1457 offset * unit + thispos, bitregion_start,
1458 bitregion_end, part, word_mode, reverse);
1459 bitsdone += thissize;
1460 }
1461 }
1462 \f
1463 /* A subroutine of extract_bit_field_1 that converts return value X
1464 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1465 to extract_bit_field. */
1466
1467 static rtx
1468 convert_extracted_bit_field (rtx x, machine_mode mode,
1469 machine_mode tmode, bool unsignedp)
1470 {
1471 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1472 return x;
1473
1474 /* If the x mode is not a scalar integral, first convert to the
1475 integer mode of that size and then access it as a floating-point
1476 value via a SUBREG. */
1477 if (!SCALAR_INT_MODE_P (tmode))
1478 {
1479 scalar_int_mode int_mode = int_mode_for_mode (tmode).require ();
1480 x = convert_to_mode (int_mode, x, unsignedp);
1481 x = force_reg (int_mode, x);
1482 return gen_lowpart (tmode, x);
1483 }
1484
1485 return convert_to_mode (tmode, x, unsignedp);
1486 }
1487
1488 /* Try to use an ext(z)v pattern to extract a field from OP0.
1489 Return the extracted value on success, otherwise return null.
1490 EXTV describes the extraction instruction to use. If OP0_MODE
1491 is defined, it is the mode of OP0, otherwise OP0 is a BLKmode MEM.
1492 The other arguments are as for extract_bit_field. */
1493
1494 static rtx
1495 extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
1496 opt_scalar_int_mode op0_mode,
1497 unsigned HOST_WIDE_INT bitsize,
1498 unsigned HOST_WIDE_INT bitnum,
1499 int unsignedp, rtx target,
1500 machine_mode mode, machine_mode tmode)
1501 {
1502 struct expand_operand ops[4];
1503 rtx spec_target = target;
1504 rtx spec_target_subreg = 0;
1505 scalar_int_mode ext_mode = extv->field_mode;
1506 unsigned unit = GET_MODE_BITSIZE (ext_mode);
1507
1508 if (bitsize == 0 || unit < bitsize)
1509 return NULL_RTX;
1510
1511 if (MEM_P (op0))
1512 /* Get a reference to the first byte of the field. */
1513 op0 = narrow_bit_field_mem (op0, extv->struct_mode, bitsize, bitnum,
1514 &bitnum);
1515 else
1516 {
1517 /* Convert from counting within OP0 to counting in EXT_MODE. */
1518 if (BYTES_BIG_ENDIAN)
1519 bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
1520
1521 /* If op0 is a register, we need it in EXT_MODE to make it
1522 acceptable to the format of ext(z)v. */
1523 if (GET_CODE (op0) == SUBREG && op0_mode.require () != ext_mode)
1524 return NULL_RTX;
1525 if (REG_P (op0) && op0_mode.require () != ext_mode)
1526 op0 = gen_lowpart_SUBREG (ext_mode, op0);
1527 }
1528
1529 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1530 "backwards" from the size of the unit we are extracting from.
1531 Otherwise, we count bits from the most significant on a
1532 BYTES/BITS_BIG_ENDIAN machine. */
1533
1534 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1535 bitnum = unit - bitsize - bitnum;
1536
1537 if (target == 0)
1538 target = spec_target = gen_reg_rtx (tmode);
1539
1540 if (GET_MODE (target) != ext_mode)
1541 {
1542 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1543 between the mode of the extraction (word_mode) and the target
1544 mode. Instead, create a temporary and use convert_move to set
1545 the target. */
1546 if (REG_P (target)
1547 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target), ext_mode))
1548 {
1549 target = gen_lowpart (ext_mode, target);
1550 if (partial_subreg_p (GET_MODE (spec_target), ext_mode))
1551 spec_target_subreg = target;
1552 }
1553 else
1554 target = gen_reg_rtx (ext_mode);
1555 }
1556
1557 create_output_operand (&ops[0], target, ext_mode);
1558 create_fixed_operand (&ops[1], op0);
1559 create_integer_operand (&ops[2], bitsize);
1560 create_integer_operand (&ops[3], bitnum);
1561 if (maybe_expand_insn (extv->icode, 4, ops))
1562 {
1563 target = ops[0].value;
1564 if (target == spec_target)
1565 return target;
1566 if (target == spec_target_subreg)
1567 return spec_target;
1568 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1569 }
1570 return NULL_RTX;
1571 }
1572
1573 /* See whether it would be valid to extract the part of OP0 described
1574 by BITNUM and BITSIZE into a value of mode MODE using a subreg
1575 operation. Return the subreg if so, otherwise return null. */
1576
1577 static rtx
1578 extract_bit_field_as_subreg (machine_mode mode, rtx op0,
1579 poly_uint64 bitsize, poly_uint64 bitnum)
1580 {
1581 poly_uint64 bytenum;
1582 if (multiple_p (bitnum, BITS_PER_UNIT, &bytenum)
1583 && known_eq (bitsize, GET_MODE_BITSIZE (mode))
1584 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
1585 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op0)))
1586 return simplify_gen_subreg (mode, op0, GET_MODE (op0), bytenum);
1587 return NULL_RTX;
1588 }
1589
1590 /* A subroutine of extract_bit_field, with the same arguments.
1591 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1592 if we can find no other means of implementing the operation.
1593 if FALLBACK_P is false, return NULL instead. */
1594
1595 static rtx
1596 extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1597 int unsignedp, rtx target, machine_mode mode,
1598 machine_mode tmode, bool reverse, bool fallback_p,
1599 rtx *alt_rtl)
1600 {
1601 rtx op0 = str_rtx;
1602 machine_mode mode1;
1603
1604 if (tmode == VOIDmode)
1605 tmode = mode;
1606
1607 while (GET_CODE (op0) == SUBREG)
1608 {
1609 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1610 op0 = SUBREG_REG (op0);
1611 }
1612
1613 /* If we have an out-of-bounds access to a register, just return an
1614 uninitialized register of the required mode. This can occur if the
1615 source code contains an out-of-bounds access to a small array. */
1616 if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
1617 return gen_reg_rtx (tmode);
1618
1619 if (REG_P (op0)
1620 && mode == GET_MODE (op0)
1621 && known_eq (bitnum, 0U)
1622 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
1623 {
1624 if (reverse)
1625 op0 = flip_storage_order (mode, op0);
1626 /* We're trying to extract a full register from itself. */
1627 return op0;
1628 }
1629
1630 /* First try to check for vector from vector extractions. */
1631 if (VECTOR_MODE_P (GET_MODE (op0))
1632 && !MEM_P (op0)
1633 && VECTOR_MODE_P (tmode)
1634 && known_eq (bitsize, GET_MODE_SIZE (tmode))
1635 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (tmode))
1636 {
1637 machine_mode new_mode = GET_MODE (op0);
1638 if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
1639 {
1640 scalar_mode inner_mode = GET_MODE_INNER (tmode);
1641 unsigned int nunits = (GET_MODE_BITSIZE (GET_MODE (op0))
1642 / GET_MODE_UNIT_BITSIZE (tmode));
1643 if (!mode_for_vector (inner_mode, nunits).exists (&new_mode)
1644 || !VECTOR_MODE_P (new_mode)
1645 || GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0))
1646 || GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
1647 || !targetm.vector_mode_supported_p (new_mode))
1648 new_mode = VOIDmode;
1649 }
1650 poly_uint64 pos;
1651 if (new_mode != VOIDmode
1652 && (convert_optab_handler (vec_extract_optab, new_mode, tmode)
1653 != CODE_FOR_nothing)
1654 && multiple_p (bitnum, GET_MODE_BITSIZE (tmode), &pos))
1655 {
1656 struct expand_operand ops[3];
1657 machine_mode outermode = new_mode;
1658 machine_mode innermode = tmode;
1659 enum insn_code icode
1660 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1661
1662 if (new_mode != GET_MODE (op0))
1663 op0 = gen_lowpart (new_mode, op0);
1664 create_output_operand (&ops[0], target, innermode);
1665 ops[0].target = 1;
1666 create_input_operand (&ops[1], op0, outermode);
1667 create_integer_operand (&ops[2], pos);
1668 if (maybe_expand_insn (icode, 3, ops))
1669 {
1670 if (alt_rtl && ops[0].target)
1671 *alt_rtl = target;
1672 target = ops[0].value;
1673 if (GET_MODE (target) != mode)
1674 return gen_lowpart (tmode, target);
1675 return target;
1676 }
1677 }
1678 }
1679
1680 /* See if we can get a better vector mode before extracting. */
1681 if (VECTOR_MODE_P (GET_MODE (op0))
1682 && !MEM_P (op0)
1683 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1684 {
1685 machine_mode new_mode;
1686
1687 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1688 new_mode = MIN_MODE_VECTOR_FLOAT;
1689 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1690 new_mode = MIN_MODE_VECTOR_FRACT;
1691 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1692 new_mode = MIN_MODE_VECTOR_UFRACT;
1693 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1694 new_mode = MIN_MODE_VECTOR_ACCUM;
1695 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1696 new_mode = MIN_MODE_VECTOR_UACCUM;
1697 else
1698 new_mode = MIN_MODE_VECTOR_INT;
1699
1700 FOR_EACH_MODE_FROM (new_mode, new_mode)
1701 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1702 && GET_MODE_UNIT_SIZE (new_mode) == GET_MODE_SIZE (tmode)
1703 && targetm.vector_mode_supported_p (new_mode))
1704 break;
1705 if (new_mode != VOIDmode)
1706 op0 = gen_lowpart (new_mode, op0);
1707 }
1708
1709 /* Use vec_extract patterns for extracting parts of vectors whenever
1710 available. */
1711 machine_mode outermode = GET_MODE (op0);
1712 scalar_mode innermode = GET_MODE_INNER (outermode);
1713 poly_uint64 pos;
1714 if (VECTOR_MODE_P (outermode)
1715 && !MEM_P (op0)
1716 && (convert_optab_handler (vec_extract_optab, outermode, innermode)
1717 != CODE_FOR_nothing)
1718 && known_eq (bitsize, GET_MODE_BITSIZE (innermode))
1719 && multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
1720 {
1721 struct expand_operand ops[3];
1722 enum insn_code icode
1723 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1724
1725 create_output_operand (&ops[0], target, innermode);
1726 ops[0].target = 1;
1727 create_input_operand (&ops[1], op0, outermode);
1728 create_integer_operand (&ops[2], pos);
1729 if (maybe_expand_insn (icode, 3, ops))
1730 {
1731 if (alt_rtl && ops[0].target)
1732 *alt_rtl = target;
1733 target = ops[0].value;
1734 if (GET_MODE (target) != mode)
1735 return gen_lowpart (tmode, target);
1736 return target;
1737 }
1738 }
1739
1740 /* Make sure we are playing with integral modes. Pun with subregs
1741 if we aren't. */
1742 opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
1743 scalar_int_mode imode;
1744 if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
1745 {
1746 if (MEM_P (op0))
1747 op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
1748 0, MEM_SIZE (op0));
1749 else if (op0_mode.exists (&imode))
1750 {
1751 op0 = gen_lowpart (imode, op0);
1752
1753 /* If we got a SUBREG, force it into a register since we
1754 aren't going to be able to do another SUBREG on it. */
1755 if (GET_CODE (op0) == SUBREG)
1756 op0 = force_reg (imode, op0);
1757 }
1758 else
1759 {
1760 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
1761 rtx mem = assign_stack_temp (GET_MODE (op0), size);
1762 emit_move_insn (mem, op0);
1763 op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1764 }
1765 }
1766
1767 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1768 If that's wrong, the solution is to test for it and set TARGET to 0
1769 if needed. */
1770
1771 /* Get the mode of the field to use for atomic access or subreg
1772 conversion. */
1773 if (!SCALAR_INT_MODE_P (tmode)
1774 || !mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0).exists (&mode1))
1775 mode1 = mode;
1776 gcc_assert (mode1 != BLKmode);
1777
1778 /* Extraction of a full MODE1 value can be done with a subreg as long
1779 as the least significant bit of the value is the least significant
1780 bit of either OP0 or a word of OP0. */
1781 if (!MEM_P (op0) && !reverse)
1782 {
1783 rtx sub = extract_bit_field_as_subreg (mode1, op0, bitsize, bitnum);
1784 if (sub)
1785 return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1786 }
1787
1788 /* Extraction of a full MODE1 value can be done with a load as long as
1789 the field is on a byte boundary and is sufficiently aligned. */
1790 poly_uint64 bytenum;
1791 if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1, &bytenum))
1792 {
1793 op0 = adjust_bitfield_address (op0, mode1, bytenum);
1794 if (reverse)
1795 op0 = flip_storage_order (mode1, op0);
1796 return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1797 }
1798
1799 /* If we have a memory source and a non-constant bit offset, restrict
1800 the memory to the referenced bytes. This is a worst-case fallback
1801 but is useful for things like vector booleans. */
1802 if (MEM_P (op0) && !bitnum.is_constant ())
1803 {
1804 bytenum = bits_to_bytes_round_down (bitnum);
1805 bitnum = num_trailing_bits (bitnum);
1806 poly_uint64 bytesize = bits_to_bytes_round_up (bitnum + bitsize);
1807 op0 = adjust_bitfield_address_size (op0, BLKmode, bytenum, bytesize);
1808 op0_mode = opt_scalar_int_mode ();
1809 }
1810
1811 /* It's possible we'll need to handle other cases here for
1812 polynomial bitnum and bitsize. */
1813
1814 /* From here on we need to be looking at a fixed-size insertion. */
1815 return extract_integral_bit_field (op0, op0_mode, bitsize.to_constant (),
1816 bitnum.to_constant (), unsignedp,
1817 target, mode, tmode, reverse, fallback_p);
1818 }
1819
1820 /* Subroutine of extract_bit_field_1, with the same arguments, except
1821 that BITSIZE and BITNUM are constant. Handle cases specific to
1822 integral modes. If OP0_MODE is defined, it is the mode of OP0,
1823 otherwise OP0 is a BLKmode MEM. */
1824
1825 static rtx
1826 extract_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1827 unsigned HOST_WIDE_INT bitsize,
1828 unsigned HOST_WIDE_INT bitnum, int unsignedp,
1829 rtx target, machine_mode mode, machine_mode tmode,
1830 bool reverse, bool fallback_p)
1831 {
1832 /* Handle fields bigger than a word. */
1833
1834 if (bitsize > BITS_PER_WORD)
1835 {
1836 /* Here we transfer the words of the field
1837 in the order least significant first.
1838 This is because the most significant word is the one which may
1839 be less than full. */
1840
1841 const bool backwards = WORDS_BIG_ENDIAN;
1842 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1843 unsigned int i;
1844 rtx_insn *last;
1845
1846 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1847 target = gen_reg_rtx (mode);
1848
1849 /* In case we're about to clobber a base register or something
1850 (see gcc.c-torture/execute/20040625-1.c). */
1851 if (reg_mentioned_p (target, op0))
1852 target = gen_reg_rtx (mode);
1853
1854 /* Indicate for flow that the entire target reg is being set. */
1855 emit_clobber (target);
1856
1857 /* The mode must be fixed-size, since extract_bit_field_1 handles
1858 extractions from variable-sized objects before calling this
1859 function. */
1860 unsigned int target_size = GET_MODE_SIZE (GET_MODE (target));
1861 last = get_last_insn ();
1862 for (i = 0; i < nwords; i++)
1863 {
1864 /* If I is 0, use the low-order word in both field and target;
1865 if I is 1, use the next to lowest word; and so on. */
1866 /* Word number in TARGET to use. */
1867 unsigned int wordnum
1868 = (backwards ? target_size / UNITS_PER_WORD - i - 1 : i);
1869 /* Offset from start of field in OP0. */
1870 unsigned int bit_offset = (backwards ^ reverse
1871 ? MAX ((int) bitsize - ((int) i + 1)
1872 * BITS_PER_WORD,
1873 0)
1874 : (int) i * BITS_PER_WORD);
1875 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1876 rtx result_part
1877 = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
1878 bitsize - i * BITS_PER_WORD),
1879 bitnum + bit_offset, 1, target_part,
1880 mode, word_mode, reverse, fallback_p, NULL);
1881
1882 gcc_assert (target_part);
1883 if (!result_part)
1884 {
1885 delete_insns_since (last);
1886 return NULL;
1887 }
1888
1889 if (result_part != target_part)
1890 emit_move_insn (target_part, result_part);
1891 }
1892
1893 if (unsignedp)
1894 {
1895 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1896 need to be zero'd out. */
1897 if (target_size > nwords * UNITS_PER_WORD)
1898 {
1899 unsigned int i, total_words;
1900
1901 total_words = target_size / UNITS_PER_WORD;
1902 for (i = nwords; i < total_words; i++)
1903 emit_move_insn
1904 (operand_subword (target,
1905 backwards ? total_words - i - 1 : i,
1906 1, VOIDmode),
1907 const0_rtx);
1908 }
1909 return target;
1910 }
1911
1912 /* Signed bit field: sign-extend with two arithmetic shifts. */
1913 target = expand_shift (LSHIFT_EXPR, mode, target,
1914 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1915 return expand_shift (RSHIFT_EXPR, mode, target,
1916 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1917 }
1918
1919 /* If OP0 is a multi-word register, narrow it to the affected word.
1920 If the region spans two words, defer to extract_split_bit_field. */
1921 if (!MEM_P (op0) && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD)
1922 {
1923 if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1924 {
1925 if (!fallback_p)
1926 return NULL_RTX;
1927 target = extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
1928 unsignedp, reverse);
1929 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1930 }
1931 op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
1932 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1933 op0_mode = word_mode;
1934 bitnum %= BITS_PER_WORD;
1935 }
1936
1937 /* From here on we know the desired field is smaller than a word.
1938 If OP0 is a register, it too fits within a word. */
1939 enum extraction_pattern pattern = unsignedp ? EP_extzv : EP_extv;
1940 extraction_insn extv;
1941 if (!MEM_P (op0)
1942 && !reverse
1943 /* ??? We could limit the structure size to the part of OP0 that
1944 contains the field, with appropriate checks for endianness
1945 and TARGET_TRULY_NOOP_TRUNCATION. */
1946 && get_best_reg_extraction_insn (&extv, pattern,
1947 GET_MODE_BITSIZE (op0_mode.require ()),
1948 tmode))
1949 {
1950 rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
1951 bitsize, bitnum,
1952 unsignedp, target, mode,
1953 tmode);
1954 if (result)
1955 return result;
1956 }
1957
1958 /* If OP0 is a memory, try copying it to a register and seeing if a
1959 cheap register alternative is available. */
1960 if (MEM_P (op0) & !reverse)
1961 {
1962 if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
1963 tmode))
1964 {
1965 rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
1966 bitsize, bitnum,
1967 unsignedp, target, mode,
1968 tmode);
1969 if (result)
1970 return result;
1971 }
1972
1973 rtx_insn *last = get_last_insn ();
1974
1975 /* Try loading part of OP0 into a register and extracting the
1976 bitfield from that. */
1977 unsigned HOST_WIDE_INT bitpos;
1978 rtx xop0 = adjust_bit_field_mem_for_reg (pattern, op0, bitsize, bitnum,
1979 0, 0, tmode, &bitpos);
1980 if (xop0)
1981 {
1982 xop0 = copy_to_reg (xop0);
1983 rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
1984 unsignedp, target,
1985 mode, tmode, reverse, false, NULL);
1986 if (result)
1987 return result;
1988 delete_insns_since (last);
1989 }
1990 }
1991
1992 if (!fallback_p)
1993 return NULL;
1994
1995 /* Find a correspondingly-sized integer field, so we can apply
1996 shifts and masks to it. */
1997 scalar_int_mode int_mode;
1998 if (!int_mode_for_mode (tmode).exists (&int_mode))
1999 /* If this fails, we should probably push op0 out to memory and then
2000 do a load. */
2001 int_mode = int_mode_for_mode (mode).require ();
2002
2003 target = extract_fixed_bit_field (int_mode, op0, op0_mode, bitsize,
2004 bitnum, target, unsignedp, reverse);
2005
2006 /* Complex values must be reversed piecewise, so we need to undo the global
2007 reversal, convert to the complex mode and reverse again. */
2008 if (reverse && COMPLEX_MODE_P (tmode))
2009 {
2010 target = flip_storage_order (int_mode, target);
2011 target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
2012 target = flip_storage_order (tmode, target);
2013 }
2014 else
2015 target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
2016
2017 return target;
2018 }
2019
2020 /* Generate code to extract a byte-field from STR_RTX
2021 containing BITSIZE bits, starting at BITNUM,
2022 and put it in TARGET if possible (if TARGET is nonzero).
2023 Regardless of TARGET, we return the rtx for where the value is placed.
2024
2025 STR_RTX is the structure containing the byte (a REG or MEM).
2026 UNSIGNEDP is nonzero if this is an unsigned bit field.
2027 MODE is the natural mode of the field value once extracted.
2028 TMODE is the mode the caller would like the value to have;
2029 but the value may be returned with type MODE instead.
2030
2031 If REVERSE is true, the extraction is to be done in reverse order.
2032
2033 If a TARGET is specified and we can store in it at no extra cost,
2034 we do so, and return TARGET.
2035 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
2036 if they are equally easy. */
2037
2038 rtx
2039 extract_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
2040 int unsignedp, rtx target, machine_mode mode,
2041 machine_mode tmode, bool reverse, rtx *alt_rtl)
2042 {
2043 machine_mode mode1;
2044
2045 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
2046 if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
2047 mode1 = GET_MODE (str_rtx);
2048 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
2049 mode1 = GET_MODE (target);
2050 else
2051 mode1 = tmode;
2052
2053 unsigned HOST_WIDE_INT ibitsize, ibitnum;
2054 scalar_int_mode int_mode;
2055 if (bitsize.is_constant (&ibitsize)
2056 && bitnum.is_constant (&ibitnum)
2057 && is_a <scalar_int_mode> (mode1, &int_mode)
2058 && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum,
2059 int_mode, 0, 0))
2060 {
2061 /* Extraction of a full INT_MODE value can be done with a simple load.
2062 We know here that the field can be accessed with one single
2063 instruction. For targets that support unaligned memory,
2064 an unaligned access may be necessary. */
2065 if (ibitsize == GET_MODE_BITSIZE (int_mode))
2066 {
2067 rtx result = adjust_bitfield_address (str_rtx, int_mode,
2068 ibitnum / BITS_PER_UNIT);
2069 if (reverse)
2070 result = flip_storage_order (int_mode, result);
2071 gcc_assert (ibitnum % BITS_PER_UNIT == 0);
2072 return convert_extracted_bit_field (result, mode, tmode, unsignedp);
2073 }
2074
2075 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize, ibitnum,
2076 &ibitnum);
2077 gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
2078 str_rtx = copy_to_reg (str_rtx);
2079 return extract_bit_field_1 (str_rtx, ibitsize, ibitnum, unsignedp,
2080 target, mode, tmode, reverse, true, alt_rtl);
2081 }
2082
2083 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
2084 target, mode, tmode, reverse, true, alt_rtl);
2085 }
2086 \f
2087 /* Use shifts and boolean operations to extract a field of BITSIZE bits
2088 from bit BITNUM of OP0. If OP0_MODE is defined, it is the mode of OP0,
2089 otherwise OP0 is a BLKmode MEM.
2090
2091 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
2092 If REVERSE is true, the extraction is to be done in reverse order.
2093
2094 If TARGET is nonzero, attempts to store the value there
2095 and return TARGET, but this is not guaranteed.
2096 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
2097
2098 static rtx
2099 extract_fixed_bit_field (machine_mode tmode, rtx op0,
2100 opt_scalar_int_mode op0_mode,
2101 unsigned HOST_WIDE_INT bitsize,
2102 unsigned HOST_WIDE_INT bitnum, rtx target,
2103 int unsignedp, bool reverse)
2104 {
2105 scalar_int_mode mode;
2106 if (MEM_P (op0))
2107 {
2108 if (!get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0),
2109 BITS_PER_WORD, MEM_VOLATILE_P (op0), &mode))
2110 /* The only way this should occur is if the field spans word
2111 boundaries. */
2112 return extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
2113 unsignedp, reverse);
2114
2115 op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
2116 }
2117 else
2118 mode = op0_mode.require ();
2119
2120 return extract_fixed_bit_field_1 (tmode, op0, mode, bitsize, bitnum,
2121 target, unsignedp, reverse);
2122 }
2123
2124 /* Helper function for extract_fixed_bit_field, extracts
2125 the bit field always using MODE, which is the mode of OP0.
2126 The other arguments are as for extract_fixed_bit_field. */
2127
2128 static rtx
2129 extract_fixed_bit_field_1 (machine_mode tmode, rtx op0, scalar_int_mode mode,
2130 unsigned HOST_WIDE_INT bitsize,
2131 unsigned HOST_WIDE_INT bitnum, rtx target,
2132 int unsignedp, bool reverse)
2133 {
2134 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
2135 for invalid input, such as extract equivalent of f5 from
2136 gcc.dg/pr48335-2.c. */
2137
2138 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2139 /* BITNUM is the distance between our msb and that of OP0.
2140 Convert it to the distance from the lsb. */
2141 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
2142
2143 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
2144 We have reduced the big-endian case to the little-endian case. */
2145 if (reverse)
2146 op0 = flip_storage_order (mode, op0);
2147
2148 if (unsignedp)
2149 {
2150 if (bitnum)
2151 {
2152 /* If the field does not already start at the lsb,
2153 shift it so it does. */
2154 /* Maybe propagate the target for the shift. */
2155 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2156 if (tmode != mode)
2157 subtarget = 0;
2158 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitnum, subtarget, 1);
2159 }
2160 /* Convert the value to the desired mode. TMODE must also be a
2161 scalar integer for this conversion to make sense, since we
2162 shouldn't reinterpret the bits. */
2163 scalar_int_mode new_mode = as_a <scalar_int_mode> (tmode);
2164 if (mode != new_mode)
2165 op0 = convert_to_mode (new_mode, op0, 1);
2166
2167 /* Unless the msb of the field used to be the msb when we shifted,
2168 mask out the upper bits. */
2169
2170 if (GET_MODE_BITSIZE (mode) != bitnum + bitsize)
2171 return expand_binop (new_mode, and_optab, op0,
2172 mask_rtx (new_mode, 0, bitsize, 0),
2173 target, 1, OPTAB_LIB_WIDEN);
2174 return op0;
2175 }
2176
2177 /* To extract a signed bit-field, first shift its msb to the msb of the word,
2178 then arithmetic-shift its lsb to the lsb of the word. */
2179 op0 = force_reg (mode, op0);
2180
2181 /* Find the narrowest integer mode that contains the field. */
2182
2183 opt_scalar_int_mode mode_iter;
2184 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2185 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize + bitnum)
2186 break;
2187
2188 mode = mode_iter.require ();
2189 op0 = convert_to_mode (mode, op0, 0);
2190
2191 if (mode != tmode)
2192 target = 0;
2193
2194 if (GET_MODE_BITSIZE (mode) != (bitsize + bitnum))
2195 {
2196 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitnum);
2197 /* Maybe propagate the target for the shift. */
2198 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2199 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
2200 }
2201
2202 return expand_shift (RSHIFT_EXPR, mode, op0,
2203 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
2204 }
2205
2206 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
2207 VALUE << BITPOS. */
2208
2209 static rtx
2210 lshift_value (machine_mode mode, unsigned HOST_WIDE_INT value,
2211 int bitpos)
2212 {
2213 return immed_wide_int_const (wi::lshift (value, bitpos), mode);
2214 }
2215 \f
2216 /* Extract a bit field that is split across two words
2217 and return an RTX for the result.
2218
2219 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2220 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2221 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
2222 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
2223 a BLKmode MEM.
2224
2225 If REVERSE is true, the extraction is to be done in reverse order. */
2226
2227 static rtx
2228 extract_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
2229 unsigned HOST_WIDE_INT bitsize,
2230 unsigned HOST_WIDE_INT bitpos, int unsignedp,
2231 bool reverse)
2232 {
2233 unsigned int unit;
2234 unsigned int bitsdone = 0;
2235 rtx result = NULL_RTX;
2236 int first = 1;
2237
2238 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2239 much at a time. */
2240 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2241 unit = BITS_PER_WORD;
2242 else
2243 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2244
2245 while (bitsdone < bitsize)
2246 {
2247 unsigned HOST_WIDE_INT thissize;
2248 rtx part;
2249 unsigned HOST_WIDE_INT thispos;
2250 unsigned HOST_WIDE_INT offset;
2251
2252 offset = (bitpos + bitsdone) / unit;
2253 thispos = (bitpos + bitsdone) % unit;
2254
2255 /* THISSIZE must not overrun a word boundary. Otherwise,
2256 extract_fixed_bit_field will call us again, and we will mutually
2257 recurse forever. */
2258 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2259 thissize = MIN (thissize, unit - thispos);
2260
2261 /* If OP0 is a register, then handle OFFSET here. */
2262 rtx op0_piece = op0;
2263 opt_scalar_int_mode op0_piece_mode = op0_mode;
2264 if (SUBREG_P (op0) || REG_P (op0))
2265 {
2266 op0_piece = operand_subword_force (op0, offset, op0_mode.require ());
2267 op0_piece_mode = word_mode;
2268 offset = 0;
2269 }
2270
2271 /* Extract the parts in bit-counting order,
2272 whose meaning is determined by BYTES_PER_UNIT.
2273 OFFSET is in UNITs, and UNIT is in bits. */
2274 part = extract_fixed_bit_field (word_mode, op0_piece, op0_piece_mode,
2275 thissize, offset * unit + thispos,
2276 0, 1, reverse);
2277 bitsdone += thissize;
2278
2279 /* Shift this part into place for the result. */
2280 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2281 {
2282 if (bitsize != bitsdone)
2283 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2284 bitsize - bitsdone, 0, 1);
2285 }
2286 else
2287 {
2288 if (bitsdone != thissize)
2289 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2290 bitsdone - thissize, 0, 1);
2291 }
2292
2293 if (first)
2294 result = part;
2295 else
2296 /* Combine the parts with bitwise or. This works
2297 because we extracted each part as an unsigned bit field. */
2298 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2299 OPTAB_LIB_WIDEN);
2300
2301 first = 0;
2302 }
2303
2304 /* Unsigned bit field: we are done. */
2305 if (unsignedp)
2306 return result;
2307 /* Signed bit field: sign-extend with two arithmetic shifts. */
2308 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2309 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2310 return expand_shift (RSHIFT_EXPR, word_mode, result,
2311 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2312 }
2313 \f
2314 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2315 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2316 MODE, fill the upper bits with zeros. Fail if the layout of either
2317 mode is unknown (as for CC modes) or if the extraction would involve
2318 unprofitable mode punning. Return the value on success, otherwise
2319 return null.
2320
2321 This is different from gen_lowpart* in these respects:
2322
2323 - the returned value must always be considered an rvalue
2324
2325 - when MODE is wider than SRC_MODE, the extraction involves
2326 a zero extension
2327
2328 - when MODE is smaller than SRC_MODE, the extraction involves
2329 a truncation (and is thus subject to TARGET_TRULY_NOOP_TRUNCATION).
2330
2331 In other words, this routine performs a computation, whereas the
2332 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2333 operations. */
2334
2335 rtx
2336 extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
2337 {
2338 scalar_int_mode int_mode, src_int_mode;
2339
2340 if (mode == src_mode)
2341 return src;
2342
2343 if (CONSTANT_P (src))
2344 {
2345 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2346 fails, it will happily create (subreg (symbol_ref)) or similar
2347 invalid SUBREGs. */
2348 poly_uint64 byte = subreg_lowpart_offset (mode, src_mode);
2349 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2350 if (ret)
2351 return ret;
2352
2353 if (GET_MODE (src) == VOIDmode
2354 || !validate_subreg (mode, src_mode, src, byte))
2355 return NULL_RTX;
2356
2357 src = force_reg (GET_MODE (src), src);
2358 return gen_rtx_SUBREG (mode, src, byte);
2359 }
2360
2361 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2362 return NULL_RTX;
2363
2364 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2365 && targetm.modes_tieable_p (mode, src_mode))
2366 {
2367 rtx x = gen_lowpart_common (mode, src);
2368 if (x)
2369 return x;
2370 }
2371
2372 if (!int_mode_for_mode (src_mode).exists (&src_int_mode)
2373 || !int_mode_for_mode (mode).exists (&int_mode))
2374 return NULL_RTX;
2375
2376 if (!targetm.modes_tieable_p (src_int_mode, src_mode))
2377 return NULL_RTX;
2378 if (!targetm.modes_tieable_p (int_mode, mode))
2379 return NULL_RTX;
2380
2381 src = gen_lowpart (src_int_mode, src);
2382 src = convert_modes (int_mode, src_int_mode, src, true);
2383 src = gen_lowpart (mode, src);
2384 return src;
2385 }
2386 \f
2387 /* Add INC into TARGET. */
2388
2389 void
2390 expand_inc (rtx target, rtx inc)
2391 {
2392 rtx value = expand_binop (GET_MODE (target), add_optab,
2393 target, inc,
2394 target, 0, OPTAB_LIB_WIDEN);
2395 if (value != target)
2396 emit_move_insn (target, value);
2397 }
2398
2399 /* Subtract DEC from TARGET. */
2400
2401 void
2402 expand_dec (rtx target, rtx dec)
2403 {
2404 rtx value = expand_binop (GET_MODE (target), sub_optab,
2405 target, dec,
2406 target, 0, OPTAB_LIB_WIDEN);
2407 if (value != target)
2408 emit_move_insn (target, value);
2409 }
2410 \f
2411 /* Output a shift instruction for expression code CODE,
2412 with SHIFTED being the rtx for the value to shift,
2413 and AMOUNT the rtx for the amount to shift by.
2414 Store the result in the rtx TARGET, if that is convenient.
2415 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2416 Return the rtx for where the value is.
2417 If that cannot be done, abort the compilation unless MAY_FAIL is true,
2418 in which case 0 is returned. */
2419
2420 static rtx
2421 expand_shift_1 (enum tree_code code, machine_mode mode, rtx shifted,
2422 rtx amount, rtx target, int unsignedp, bool may_fail = false)
2423 {
2424 rtx op1, temp = 0;
2425 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2426 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2427 optab lshift_optab = ashl_optab;
2428 optab rshift_arith_optab = ashr_optab;
2429 optab rshift_uns_optab = lshr_optab;
2430 optab lrotate_optab = rotl_optab;
2431 optab rrotate_optab = rotr_optab;
2432 machine_mode op1_mode;
2433 scalar_mode scalar_mode = GET_MODE_INNER (mode);
2434 int attempt;
2435 bool speed = optimize_insn_for_speed_p ();
2436
2437 op1 = amount;
2438 op1_mode = GET_MODE (op1);
2439
2440 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2441 shift amount is a vector, use the vector/vector shift patterns. */
2442 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2443 {
2444 lshift_optab = vashl_optab;
2445 rshift_arith_optab = vashr_optab;
2446 rshift_uns_optab = vlshr_optab;
2447 lrotate_optab = vrotl_optab;
2448 rrotate_optab = vrotr_optab;
2449 }
2450
2451 /* Previously detected shift-counts computed by NEGATE_EXPR
2452 and shifted in the other direction; but that does not work
2453 on all machines. */
2454
2455 if (SHIFT_COUNT_TRUNCATED)
2456 {
2457 if (CONST_INT_P (op1)
2458 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2459 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
2460 op1 = gen_int_shift_amount (mode,
2461 (unsigned HOST_WIDE_INT) INTVAL (op1)
2462 % GET_MODE_BITSIZE (scalar_mode));
2463 else if (GET_CODE (op1) == SUBREG
2464 && subreg_lowpart_p (op1)
2465 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2466 && SCALAR_INT_MODE_P (GET_MODE (op1)))
2467 op1 = SUBREG_REG (op1);
2468 }
2469
2470 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2471 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2472 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2473 amount instead. */
2474 if (rotate
2475 && CONST_INT_P (op1)
2476 && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
2477 GET_MODE_BITSIZE (scalar_mode) - 1))
2478 {
2479 op1 = gen_int_shift_amount (mode, (GET_MODE_BITSIZE (scalar_mode)
2480 - INTVAL (op1)));
2481 left = !left;
2482 code = left ? LROTATE_EXPR : RROTATE_EXPR;
2483 }
2484
2485 /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2486 Note that this is not the case for bigger values. For instance a rotation
2487 of 0x01020304 by 16 bits gives 0x03040102 which is different from
2488 0x04030201 (bswapsi). */
2489 if (rotate
2490 && CONST_INT_P (op1)
2491 && INTVAL (op1) == BITS_PER_UNIT
2492 && GET_MODE_SIZE (scalar_mode) == 2
2493 && optab_handler (bswap_optab, mode) != CODE_FOR_nothing)
2494 return expand_unop (mode, bswap_optab, shifted, NULL_RTX, unsignedp);
2495
2496 if (op1 == const0_rtx)
2497 return shifted;
2498
2499 /* Check whether its cheaper to implement a left shift by a constant
2500 bit count by a sequence of additions. */
2501 if (code == LSHIFT_EXPR
2502 && CONST_INT_P (op1)
2503 && INTVAL (op1) > 0
2504 && INTVAL (op1) < GET_MODE_PRECISION (scalar_mode)
2505 && INTVAL (op1) < MAX_BITS_PER_WORD
2506 && (shift_cost (speed, mode, INTVAL (op1))
2507 > INTVAL (op1) * add_cost (speed, mode))
2508 && shift_cost (speed, mode, INTVAL (op1)) != MAX_COST)
2509 {
2510 int i;
2511 for (i = 0; i < INTVAL (op1); i++)
2512 {
2513 temp = force_reg (mode, shifted);
2514 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2515 unsignedp, OPTAB_LIB_WIDEN);
2516 }
2517 return shifted;
2518 }
2519
2520 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2521 {
2522 enum optab_methods methods;
2523
2524 if (attempt == 0)
2525 methods = OPTAB_DIRECT;
2526 else if (attempt == 1)
2527 methods = OPTAB_WIDEN;
2528 else
2529 methods = OPTAB_LIB_WIDEN;
2530
2531 if (rotate)
2532 {
2533 /* Widening does not work for rotation. */
2534 if (methods == OPTAB_WIDEN)
2535 continue;
2536 else if (methods == OPTAB_LIB_WIDEN)
2537 {
2538 /* If we have been unable to open-code this by a rotation,
2539 do it as the IOR of two shifts. I.e., to rotate A
2540 by N bits, compute
2541 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2542 where C is the bitsize of A.
2543
2544 It is theoretically possible that the target machine might
2545 not be able to perform either shift and hence we would
2546 be making two libcalls rather than just the one for the
2547 shift (similarly if IOR could not be done). We will allow
2548 this extremely unlikely lossage to avoid complicating the
2549 code below. */
2550
2551 rtx subtarget = target == shifted ? 0 : target;
2552 rtx new_amount, other_amount;
2553 rtx temp1;
2554
2555 new_amount = op1;
2556 if (op1 == const0_rtx)
2557 return shifted;
2558 else if (CONST_INT_P (op1))
2559 other_amount = gen_int_shift_amount
2560 (mode, GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
2561 else
2562 {
2563 other_amount
2564 = simplify_gen_unary (NEG, GET_MODE (op1),
2565 op1, GET_MODE (op1));
2566 HOST_WIDE_INT mask = GET_MODE_PRECISION (scalar_mode) - 1;
2567 other_amount
2568 = simplify_gen_binary (AND, GET_MODE (op1), other_amount,
2569 gen_int_mode (mask, GET_MODE (op1)));
2570 }
2571
2572 shifted = force_reg (mode, shifted);
2573
2574 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2575 mode, shifted, new_amount, 0, 1);
2576 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2577 mode, shifted, other_amount,
2578 subtarget, 1);
2579 return expand_binop (mode, ior_optab, temp, temp1, target,
2580 unsignedp, methods);
2581 }
2582
2583 temp = expand_binop (mode,
2584 left ? lrotate_optab : rrotate_optab,
2585 shifted, op1, target, unsignedp, methods);
2586 }
2587 else if (unsignedp)
2588 temp = expand_binop (mode,
2589 left ? lshift_optab : rshift_uns_optab,
2590 shifted, op1, target, unsignedp, methods);
2591
2592 /* Do arithmetic shifts.
2593 Also, if we are going to widen the operand, we can just as well
2594 use an arithmetic right-shift instead of a logical one. */
2595 if (temp == 0 && ! rotate
2596 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2597 {
2598 enum optab_methods methods1 = methods;
2599
2600 /* If trying to widen a log shift to an arithmetic shift,
2601 don't accept an arithmetic shift of the same size. */
2602 if (unsignedp)
2603 methods1 = OPTAB_MUST_WIDEN;
2604
2605 /* Arithmetic shift */
2606
2607 temp = expand_binop (mode,
2608 left ? lshift_optab : rshift_arith_optab,
2609 shifted, op1, target, unsignedp, methods1);
2610 }
2611
2612 /* We used to try extzv here for logical right shifts, but that was
2613 only useful for one machine, the VAX, and caused poor code
2614 generation there for lshrdi3, so the code was deleted and a
2615 define_expand for lshrsi3 was added to vax.md. */
2616 }
2617
2618 gcc_assert (temp != NULL_RTX || may_fail);
2619 return temp;
2620 }
2621
2622 /* Output a shift instruction for expression code CODE,
2623 with SHIFTED being the rtx for the value to shift,
2624 and AMOUNT the amount to shift by.
2625 Store the result in the rtx TARGET, if that is convenient.
2626 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2627 Return the rtx for where the value is. */
2628
2629 rtx
2630 expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2631 poly_int64 amount, rtx target, int unsignedp)
2632 {
2633 return expand_shift_1 (code, mode, shifted,
2634 gen_int_shift_amount (mode, amount),
2635 target, unsignedp);
2636 }
2637
2638 /* Likewise, but return 0 if that cannot be done. */
2639
2640 static rtx
2641 maybe_expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2642 int amount, rtx target, int unsignedp)
2643 {
2644 return expand_shift_1 (code, mode,
2645 shifted, GEN_INT (amount), target, unsignedp, true);
2646 }
2647
2648 /* Output a shift instruction for expression code CODE,
2649 with SHIFTED being the rtx for the value to shift,
2650 and AMOUNT the tree for the amount to shift by.
2651 Store the result in the rtx TARGET, if that is convenient.
2652 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2653 Return the rtx for where the value is. */
2654
2655 rtx
2656 expand_variable_shift (enum tree_code code, machine_mode mode, rtx shifted,
2657 tree amount, rtx target, int unsignedp)
2658 {
2659 return expand_shift_1 (code, mode,
2660 shifted, expand_normal (amount), target, unsignedp);
2661 }
2662
2663 \f
2664 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2665 const struct mult_cost *, machine_mode mode);
2666 static rtx expand_mult_const (machine_mode, rtx, HOST_WIDE_INT, rtx,
2667 const struct algorithm *, enum mult_variant);
2668 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2669 static rtx extract_high_half (scalar_int_mode, rtx);
2670 static rtx expmed_mult_highpart (scalar_int_mode, rtx, rtx, rtx, int, int);
2671 static rtx expmed_mult_highpart_optab (scalar_int_mode, rtx, rtx, rtx,
2672 int, int);
2673 /* Compute and return the best algorithm for multiplying by T.
2674 The algorithm must cost less than cost_limit
2675 If retval.cost >= COST_LIMIT, no algorithm was found and all
2676 other field of the returned struct are undefined.
2677 MODE is the machine mode of the multiplication. */
2678
2679 static void
2680 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2681 const struct mult_cost *cost_limit, machine_mode mode)
2682 {
2683 int m;
2684 struct algorithm *alg_in, *best_alg;
2685 struct mult_cost best_cost;
2686 struct mult_cost new_limit;
2687 int op_cost, op_latency;
2688 unsigned HOST_WIDE_INT orig_t = t;
2689 unsigned HOST_WIDE_INT q;
2690 int maxm, hash_index;
2691 bool cache_hit = false;
2692 enum alg_code cache_alg = alg_zero;
2693 bool speed = optimize_insn_for_speed_p ();
2694 scalar_int_mode imode;
2695 struct alg_hash_entry *entry_ptr;
2696
2697 /* Indicate that no algorithm is yet found. If no algorithm
2698 is found, this value will be returned and indicate failure. */
2699 alg_out->cost.cost = cost_limit->cost + 1;
2700 alg_out->cost.latency = cost_limit->latency + 1;
2701
2702 if (cost_limit->cost < 0
2703 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2704 return;
2705
2706 /* Be prepared for vector modes. */
2707 imode = as_a <scalar_int_mode> (GET_MODE_INNER (mode));
2708
2709 maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (imode));
2710
2711 /* Restrict the bits of "t" to the multiplication's mode. */
2712 t &= GET_MODE_MASK (imode);
2713
2714 /* t == 1 can be done in zero cost. */
2715 if (t == 1)
2716 {
2717 alg_out->ops = 1;
2718 alg_out->cost.cost = 0;
2719 alg_out->cost.latency = 0;
2720 alg_out->op[0] = alg_m;
2721 return;
2722 }
2723
2724 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2725 fail now. */
2726 if (t == 0)
2727 {
2728 if (MULT_COST_LESS (cost_limit, zero_cost (speed)))
2729 return;
2730 else
2731 {
2732 alg_out->ops = 1;
2733 alg_out->cost.cost = zero_cost (speed);
2734 alg_out->cost.latency = zero_cost (speed);
2735 alg_out->op[0] = alg_zero;
2736 return;
2737 }
2738 }
2739
2740 /* We'll be needing a couple extra algorithm structures now. */
2741
2742 alg_in = XALLOCA (struct algorithm);
2743 best_alg = XALLOCA (struct algorithm);
2744 best_cost = *cost_limit;
2745
2746 /* Compute the hash index. */
2747 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2748
2749 /* See if we already know what to do for T. */
2750 entry_ptr = alg_hash_entry_ptr (hash_index);
2751 if (entry_ptr->t == t
2752 && entry_ptr->mode == mode
2753 && entry_ptr->speed == speed
2754 && entry_ptr->alg != alg_unknown)
2755 {
2756 cache_alg = entry_ptr->alg;
2757
2758 if (cache_alg == alg_impossible)
2759 {
2760 /* The cache tells us that it's impossible to synthesize
2761 multiplication by T within entry_ptr->cost. */
2762 if (!CHEAPER_MULT_COST (&entry_ptr->cost, cost_limit))
2763 /* COST_LIMIT is at least as restrictive as the one
2764 recorded in the hash table, in which case we have no
2765 hope of synthesizing a multiplication. Just
2766 return. */
2767 return;
2768
2769 /* If we get here, COST_LIMIT is less restrictive than the
2770 one recorded in the hash table, so we may be able to
2771 synthesize a multiplication. Proceed as if we didn't
2772 have the cache entry. */
2773 }
2774 else
2775 {
2776 if (CHEAPER_MULT_COST (cost_limit, &entry_ptr->cost))
2777 /* The cached algorithm shows that this multiplication
2778 requires more cost than COST_LIMIT. Just return. This
2779 way, we don't clobber this cache entry with
2780 alg_impossible but retain useful information. */
2781 return;
2782
2783 cache_hit = true;
2784
2785 switch (cache_alg)
2786 {
2787 case alg_shift:
2788 goto do_alg_shift;
2789
2790 case alg_add_t_m2:
2791 case alg_sub_t_m2:
2792 goto do_alg_addsub_t_m2;
2793
2794 case alg_add_factor:
2795 case alg_sub_factor:
2796 goto do_alg_addsub_factor;
2797
2798 case alg_add_t2_m:
2799 goto do_alg_add_t2_m;
2800
2801 case alg_sub_t2_m:
2802 goto do_alg_sub_t2_m;
2803
2804 default:
2805 gcc_unreachable ();
2806 }
2807 }
2808 }
2809
2810 /* If we have a group of zero bits at the low-order part of T, try
2811 multiplying by the remaining bits and then doing a shift. */
2812
2813 if ((t & 1) == 0)
2814 {
2815 do_alg_shift:
2816 m = ctz_or_zero (t); /* m = number of low zero bits */
2817 if (m < maxm)
2818 {
2819 q = t >> m;
2820 /* The function expand_shift will choose between a shift and
2821 a sequence of additions, so the observed cost is given as
2822 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2823 op_cost = m * add_cost (speed, mode);
2824 if (shift_cost (speed, mode, m) < op_cost)
2825 op_cost = shift_cost (speed, mode, m);
2826 new_limit.cost = best_cost.cost - op_cost;
2827 new_limit.latency = best_cost.latency - op_cost;
2828 synth_mult (alg_in, q, &new_limit, mode);
2829
2830 alg_in->cost.cost += op_cost;
2831 alg_in->cost.latency += op_cost;
2832 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2833 {
2834 best_cost = alg_in->cost;
2835 std::swap (alg_in, best_alg);
2836 best_alg->log[best_alg->ops] = m;
2837 best_alg->op[best_alg->ops] = alg_shift;
2838 }
2839
2840 /* See if treating ORIG_T as a signed number yields a better
2841 sequence. Try this sequence only for a negative ORIG_T
2842 as it would be useless for a non-negative ORIG_T. */
2843 if ((HOST_WIDE_INT) orig_t < 0)
2844 {
2845 /* Shift ORIG_T as follows because a right shift of a
2846 negative-valued signed type is implementation
2847 defined. */
2848 q = ~(~orig_t >> m);
2849 /* The function expand_shift will choose between a shift
2850 and a sequence of additions, so the observed cost is
2851 given as MIN (m * add_cost(speed, mode),
2852 shift_cost(speed, mode, m)). */
2853 op_cost = m * add_cost (speed, mode);
2854 if (shift_cost (speed, mode, m) < op_cost)
2855 op_cost = shift_cost (speed, mode, m);
2856 new_limit.cost = best_cost.cost - op_cost;
2857 new_limit.latency = best_cost.latency - op_cost;
2858 synth_mult (alg_in, q, &new_limit, mode);
2859
2860 alg_in->cost.cost += op_cost;
2861 alg_in->cost.latency += op_cost;
2862 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2863 {
2864 best_cost = alg_in->cost;
2865 std::swap (alg_in, best_alg);
2866 best_alg->log[best_alg->ops] = m;
2867 best_alg->op[best_alg->ops] = alg_shift;
2868 }
2869 }
2870 }
2871 if (cache_hit)
2872 goto done;
2873 }
2874
2875 /* If we have an odd number, add or subtract one. */
2876 if ((t & 1) != 0)
2877 {
2878 unsigned HOST_WIDE_INT w;
2879
2880 do_alg_addsub_t_m2:
2881 for (w = 1; (w & t) != 0; w <<= 1)
2882 ;
2883 /* If T was -1, then W will be zero after the loop. This is another
2884 case where T ends with ...111. Handling this with (T + 1) and
2885 subtract 1 produces slightly better code and results in algorithm
2886 selection much faster than treating it like the ...0111 case
2887 below. */
2888 if (w == 0
2889 || (w > 2
2890 /* Reject the case where t is 3.
2891 Thus we prefer addition in that case. */
2892 && t != 3))
2893 {
2894 /* T ends with ...111. Multiply by (T + 1) and subtract T. */
2895
2896 op_cost = add_cost (speed, mode);
2897 new_limit.cost = best_cost.cost - op_cost;
2898 new_limit.latency = best_cost.latency - op_cost;
2899 synth_mult (alg_in, t + 1, &new_limit, mode);
2900
2901 alg_in->cost.cost += op_cost;
2902 alg_in->cost.latency += op_cost;
2903 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2904 {
2905 best_cost = alg_in->cost;
2906 std::swap (alg_in, best_alg);
2907 best_alg->log[best_alg->ops] = 0;
2908 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2909 }
2910 }
2911 else
2912 {
2913 /* T ends with ...01 or ...011. Multiply by (T - 1) and add T. */
2914
2915 op_cost = add_cost (speed, mode);
2916 new_limit.cost = best_cost.cost - op_cost;
2917 new_limit.latency = best_cost.latency - op_cost;
2918 synth_mult (alg_in, t - 1, &new_limit, mode);
2919
2920 alg_in->cost.cost += op_cost;
2921 alg_in->cost.latency += op_cost;
2922 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2923 {
2924 best_cost = alg_in->cost;
2925 std::swap (alg_in, best_alg);
2926 best_alg->log[best_alg->ops] = 0;
2927 best_alg->op[best_alg->ops] = alg_add_t_m2;
2928 }
2929 }
2930
2931 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2932 quickly with a - a * n for some appropriate constant n. */
2933 m = exact_log2 (-orig_t + 1);
2934 if (m >= 0 && m < maxm)
2935 {
2936 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2937 /* If the target has a cheap shift-and-subtract insn use
2938 that in preference to a shift insn followed by a sub insn.
2939 Assume that the shift-and-sub is "atomic" with a latency
2940 equal to it's cost, otherwise assume that on superscalar
2941 hardware the shift may be executed concurrently with the
2942 earlier steps in the algorithm. */
2943 if (shiftsub1_cost (speed, mode, m) <= op_cost)
2944 {
2945 op_cost = shiftsub1_cost (speed, mode, m);
2946 op_latency = op_cost;
2947 }
2948 else
2949 op_latency = add_cost (speed, mode);
2950
2951 new_limit.cost = best_cost.cost - op_cost;
2952 new_limit.latency = best_cost.latency - op_latency;
2953 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m,
2954 &new_limit, mode);
2955
2956 alg_in->cost.cost += op_cost;
2957 alg_in->cost.latency += op_latency;
2958 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2959 {
2960 best_cost = alg_in->cost;
2961 std::swap (alg_in, best_alg);
2962 best_alg->log[best_alg->ops] = m;
2963 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2964 }
2965 }
2966
2967 if (cache_hit)
2968 goto done;
2969 }
2970
2971 /* Look for factors of t of the form
2972 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2973 If we find such a factor, we can multiply by t using an algorithm that
2974 multiplies by q, shift the result by m and add/subtract it to itself.
2975
2976 We search for large factors first and loop down, even if large factors
2977 are less probable than small; if we find a large factor we will find a
2978 good sequence quickly, and therefore be able to prune (by decreasing
2979 COST_LIMIT) the search. */
2980
2981 do_alg_addsub_factor:
2982 for (m = floor_log2 (t - 1); m >= 2; m--)
2983 {
2984 unsigned HOST_WIDE_INT d;
2985
2986 d = (HOST_WIDE_INT_1U << m) + 1;
2987 if (t % d == 0 && t > d && m < maxm
2988 && (!cache_hit || cache_alg == alg_add_factor))
2989 {
2990 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2991 if (shiftadd_cost (speed, mode, m) <= op_cost)
2992 op_cost = shiftadd_cost (speed, mode, m);
2993
2994 op_latency = op_cost;
2995
2996
2997 new_limit.cost = best_cost.cost - op_cost;
2998 new_limit.latency = best_cost.latency - op_latency;
2999 synth_mult (alg_in, t / d, &new_limit, mode);
3000
3001 alg_in->cost.cost += op_cost;
3002 alg_in->cost.latency += op_latency;
3003 if (alg_in->cost.latency < op_cost)
3004 alg_in->cost.latency = op_cost;
3005 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3006 {
3007 best_cost = alg_in->cost;
3008 std::swap (alg_in, best_alg);
3009 best_alg->log[best_alg->ops] = m;
3010 best_alg->op[best_alg->ops] = alg_add_factor;
3011 }
3012 /* Other factors will have been taken care of in the recursion. */
3013 break;
3014 }
3015
3016 d = (HOST_WIDE_INT_1U << m) - 1;
3017 if (t % d == 0 && t > d && m < maxm
3018 && (!cache_hit || cache_alg == alg_sub_factor))
3019 {
3020 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
3021 if (shiftsub0_cost (speed, mode, m) <= op_cost)
3022 op_cost = shiftsub0_cost (speed, mode, m);
3023
3024 op_latency = op_cost;
3025
3026 new_limit.cost = best_cost.cost - op_cost;
3027 new_limit.latency = best_cost.latency - op_latency;
3028 synth_mult (alg_in, t / d, &new_limit, mode);
3029
3030 alg_in->cost.cost += op_cost;
3031 alg_in->cost.latency += op_latency;
3032 if (alg_in->cost.latency < op_cost)
3033 alg_in->cost.latency = op_cost;
3034 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3035 {
3036 best_cost = alg_in->cost;
3037 std::swap (alg_in, best_alg);
3038 best_alg->log[best_alg->ops] = m;
3039 best_alg->op[best_alg->ops] = alg_sub_factor;
3040 }
3041 break;
3042 }
3043 }
3044 if (cache_hit)
3045 goto done;
3046
3047 /* Try shift-and-add (load effective address) instructions,
3048 i.e. do a*3, a*5, a*9. */
3049 if ((t & 1) != 0)
3050 {
3051 do_alg_add_t2_m:
3052 q = t - 1;
3053 m = ctz_hwi (q);
3054 if (q && m < maxm)
3055 {
3056 op_cost = shiftadd_cost (speed, mode, m);
3057 new_limit.cost = best_cost.cost - op_cost;
3058 new_limit.latency = best_cost.latency - op_cost;
3059 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
3060
3061 alg_in->cost.cost += op_cost;
3062 alg_in->cost.latency += op_cost;
3063 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3064 {
3065 best_cost = alg_in->cost;
3066 std::swap (alg_in, best_alg);
3067 best_alg->log[best_alg->ops] = m;
3068 best_alg->op[best_alg->ops] = alg_add_t2_m;
3069 }
3070 }
3071 if (cache_hit)
3072 goto done;
3073
3074 do_alg_sub_t2_m:
3075 q = t + 1;
3076 m = ctz_hwi (q);
3077 if (q && m < maxm)
3078 {
3079 op_cost = shiftsub0_cost (speed, mode, m);
3080 new_limit.cost = best_cost.cost - op_cost;
3081 new_limit.latency = best_cost.latency - op_cost;
3082 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
3083
3084 alg_in->cost.cost += op_cost;
3085 alg_in->cost.latency += op_cost;
3086 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3087 {
3088 best_cost = alg_in->cost;
3089 std::swap (alg_in, best_alg);
3090 best_alg->log[best_alg->ops] = m;
3091 best_alg->op[best_alg->ops] = alg_sub_t2_m;
3092 }
3093 }
3094 if (cache_hit)
3095 goto done;
3096 }
3097
3098 done:
3099 /* If best_cost has not decreased, we have not found any algorithm. */
3100 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
3101 {
3102 /* We failed to find an algorithm. Record alg_impossible for
3103 this case (that is, <T, MODE, COST_LIMIT>) so that next time
3104 we are asked to find an algorithm for T within the same or
3105 lower COST_LIMIT, we can immediately return to the
3106 caller. */
3107 entry_ptr->t = t;
3108 entry_ptr->mode = mode;
3109 entry_ptr->speed = speed;
3110 entry_ptr->alg = alg_impossible;
3111 entry_ptr->cost = *cost_limit;
3112 return;
3113 }
3114
3115 /* Cache the result. */
3116 if (!cache_hit)
3117 {
3118 entry_ptr->t = t;
3119 entry_ptr->mode = mode;
3120 entry_ptr->speed = speed;
3121 entry_ptr->alg = best_alg->op[best_alg->ops];
3122 entry_ptr->cost.cost = best_cost.cost;
3123 entry_ptr->cost.latency = best_cost.latency;
3124 }
3125
3126 /* If we are getting a too long sequence for `struct algorithm'
3127 to record, make this search fail. */
3128 if (best_alg->ops == MAX_BITS_PER_WORD)
3129 return;
3130
3131 /* Copy the algorithm from temporary space to the space at alg_out.
3132 We avoid using structure assignment because the majority of
3133 best_alg is normally undefined, and this is a critical function. */
3134 alg_out->ops = best_alg->ops + 1;
3135 alg_out->cost = best_cost;
3136 memcpy (alg_out->op, best_alg->op,
3137 alg_out->ops * sizeof *alg_out->op);
3138 memcpy (alg_out->log, best_alg->log,
3139 alg_out->ops * sizeof *alg_out->log);
3140 }
3141 \f
3142 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
3143 Try three variations:
3144
3145 - a shift/add sequence based on VAL itself
3146 - a shift/add sequence based on -VAL, followed by a negation
3147 - a shift/add sequence based on VAL - 1, followed by an addition.
3148
3149 Return true if the cheapest of these cost less than MULT_COST,
3150 describing the algorithm in *ALG and final fixup in *VARIANT. */
3151
3152 bool
3153 choose_mult_variant (machine_mode mode, HOST_WIDE_INT val,
3154 struct algorithm *alg, enum mult_variant *variant,
3155 int mult_cost)
3156 {
3157 struct algorithm alg2;
3158 struct mult_cost limit;
3159 int op_cost;
3160 bool speed = optimize_insn_for_speed_p ();
3161
3162 /* Fail quickly for impossible bounds. */
3163 if (mult_cost < 0)
3164 return false;
3165
3166 /* Ensure that mult_cost provides a reasonable upper bound.
3167 Any constant multiplication can be performed with less
3168 than 2 * bits additions. */
3169 op_cost = 2 * GET_MODE_UNIT_BITSIZE (mode) * add_cost (speed, mode);
3170 if (mult_cost > op_cost)
3171 mult_cost = op_cost;
3172
3173 *variant = basic_variant;
3174 limit.cost = mult_cost;
3175 limit.latency = mult_cost;
3176 synth_mult (alg, val, &limit, mode);
3177
3178 /* This works only if the inverted value actually fits in an
3179 `unsigned int' */
3180 if (HOST_BITS_PER_INT >= GET_MODE_UNIT_BITSIZE (mode))
3181 {
3182 op_cost = neg_cost (speed, mode);
3183 if (MULT_COST_LESS (&alg->cost, mult_cost))
3184 {
3185 limit.cost = alg->cost.cost - op_cost;
3186 limit.latency = alg->cost.latency - op_cost;
3187 }
3188 else
3189 {
3190 limit.cost = mult_cost - op_cost;
3191 limit.latency = mult_cost - op_cost;
3192 }
3193
3194 synth_mult (&alg2, -val, &limit, mode);
3195 alg2.cost.cost += op_cost;
3196 alg2.cost.latency += op_cost;
3197 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3198 *alg = alg2, *variant = negate_variant;
3199 }
3200
3201 /* This proves very useful for division-by-constant. */
3202 op_cost = add_cost (speed, mode);
3203 if (MULT_COST_LESS (&alg->cost, mult_cost))
3204 {
3205 limit.cost = alg->cost.cost - op_cost;
3206 limit.latency = alg->cost.latency - op_cost;
3207 }
3208 else
3209 {
3210 limit.cost = mult_cost - op_cost;
3211 limit.latency = mult_cost - op_cost;
3212 }
3213
3214 synth_mult (&alg2, val - 1, &limit, mode);
3215 alg2.cost.cost += op_cost;
3216 alg2.cost.latency += op_cost;
3217 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3218 *alg = alg2, *variant = add_variant;
3219
3220 return MULT_COST_LESS (&alg->cost, mult_cost);
3221 }
3222
3223 /* A subroutine of expand_mult, used for constant multiplications.
3224 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
3225 convenient. Use the shift/add sequence described by ALG and apply
3226 the final fixup specified by VARIANT. */
3227
3228 static rtx
3229 expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val,
3230 rtx target, const struct algorithm *alg,
3231 enum mult_variant variant)
3232 {
3233 unsigned HOST_WIDE_INT val_so_far;
3234 rtx_insn *insn;
3235 rtx accum, tem;
3236 int opno;
3237 machine_mode nmode;
3238
3239 /* Avoid referencing memory over and over and invalid sharing
3240 on SUBREGs. */
3241 op0 = force_reg (mode, op0);
3242
3243 /* ACCUM starts out either as OP0 or as a zero, depending on
3244 the first operation. */
3245
3246 if (alg->op[0] == alg_zero)
3247 {
3248 accum = copy_to_mode_reg (mode, CONST0_RTX (mode));
3249 val_so_far = 0;
3250 }
3251 else if (alg->op[0] == alg_m)
3252 {
3253 accum = copy_to_mode_reg (mode, op0);
3254 val_so_far = 1;
3255 }
3256 else
3257 gcc_unreachable ();
3258
3259 for (opno = 1; opno < alg->ops; opno++)
3260 {
3261 int log = alg->log[opno];
3262 rtx shift_subtarget = optimize ? 0 : accum;
3263 rtx add_target
3264 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
3265 && !optimize)
3266 ? target : 0;
3267 rtx accum_target = optimize ? 0 : accum;
3268 rtx accum_inner;
3269
3270 switch (alg->op[opno])
3271 {
3272 case alg_shift:
3273 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3274 /* REG_EQUAL note will be attached to the following insn. */
3275 emit_move_insn (accum, tem);
3276 val_so_far <<= log;
3277 break;
3278
3279 case alg_add_t_m2:
3280 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3281 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3282 add_target ? add_target : accum_target);
3283 val_so_far += HOST_WIDE_INT_1U << log;
3284 break;
3285
3286 case alg_sub_t_m2:
3287 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3288 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3289 add_target ? add_target : accum_target);
3290 val_so_far -= HOST_WIDE_INT_1U << log;
3291 break;
3292
3293 case alg_add_t2_m:
3294 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3295 log, shift_subtarget, 0);
3296 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3297 add_target ? add_target : accum_target);
3298 val_so_far = (val_so_far << log) + 1;
3299 break;
3300
3301 case alg_sub_t2_m:
3302 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3303 log, shift_subtarget, 0);
3304 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3305 add_target ? add_target : accum_target);
3306 val_so_far = (val_so_far << log) - 1;
3307 break;
3308
3309 case alg_add_factor:
3310 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3311 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3312 add_target ? add_target : accum_target);
3313 val_so_far += val_so_far << log;
3314 break;
3315
3316 case alg_sub_factor:
3317 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3318 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3319 (add_target
3320 ? add_target : (optimize ? 0 : tem)));
3321 val_so_far = (val_so_far << log) - val_so_far;
3322 break;
3323
3324 default:
3325 gcc_unreachable ();
3326 }
3327
3328 if (SCALAR_INT_MODE_P (mode))
3329 {
3330 /* Write a REG_EQUAL note on the last insn so that we can cse
3331 multiplication sequences. Note that if ACCUM is a SUBREG,
3332 we've set the inner register and must properly indicate that. */
3333 tem = op0, nmode = mode;
3334 accum_inner = accum;
3335 if (GET_CODE (accum) == SUBREG)
3336 {
3337 accum_inner = SUBREG_REG (accum);
3338 nmode = GET_MODE (accum_inner);
3339 tem = gen_lowpart (nmode, op0);
3340 }
3341
3342 insn = get_last_insn ();
3343 set_dst_reg_note (insn, REG_EQUAL,
3344 gen_rtx_MULT (nmode, tem,
3345 gen_int_mode (val_so_far, nmode)),
3346 accum_inner);
3347 }
3348 }
3349
3350 if (variant == negate_variant)
3351 {
3352 val_so_far = -val_so_far;
3353 accum = expand_unop (mode, neg_optab, accum, target, 0);
3354 }
3355 else if (variant == add_variant)
3356 {
3357 val_so_far = val_so_far + 1;
3358 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3359 }
3360
3361 /* Compare only the bits of val and val_so_far that are significant
3362 in the result mode, to avoid sign-/zero-extension confusion. */
3363 nmode = GET_MODE_INNER (mode);
3364 val &= GET_MODE_MASK (nmode);
3365 val_so_far &= GET_MODE_MASK (nmode);
3366 gcc_assert (val == (HOST_WIDE_INT) val_so_far);
3367
3368 return accum;
3369 }
3370
3371 /* Perform a multiplication and return an rtx for the result.
3372 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3373 TARGET is a suggestion for where to store the result (an rtx).
3374
3375 We check specially for a constant integer as OP1.
3376 If you want this check for OP0 as well, then before calling
3377 you should swap the two operands if OP0 would be constant. */
3378
3379 rtx
3380 expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3381 int unsignedp, bool no_libcall)
3382 {
3383 enum mult_variant variant;
3384 struct algorithm algorithm;
3385 rtx scalar_op1;
3386 int max_cost;
3387 bool speed = optimize_insn_for_speed_p ();
3388 bool do_trapv = flag_trapv && SCALAR_INT_MODE_P (mode) && !unsignedp;
3389
3390 if (CONSTANT_P (op0))
3391 std::swap (op0, op1);
3392
3393 /* For vectors, there are several simplifications that can be made if
3394 all elements of the vector constant are identical. */
3395 scalar_op1 = unwrap_const_vec_duplicate (op1);
3396
3397 if (INTEGRAL_MODE_P (mode))
3398 {
3399 rtx fake_reg;
3400 HOST_WIDE_INT coeff;
3401 bool is_neg;
3402 int mode_bitsize;
3403
3404 if (op1 == CONST0_RTX (mode))
3405 return op1;
3406 if (op1 == CONST1_RTX (mode))
3407 return op0;
3408 if (op1 == CONSTM1_RTX (mode))
3409 return expand_unop (mode, do_trapv ? negv_optab : neg_optab,
3410 op0, target, 0);
3411
3412 if (do_trapv)
3413 goto skip_synth;
3414
3415 /* If mode is integer vector mode, check if the backend supports
3416 vector lshift (by scalar or vector) at all. If not, we can't use
3417 synthetized multiply. */
3418 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
3419 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing
3420 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing)
3421 goto skip_synth;
3422
3423 /* These are the operations that are potentially turned into
3424 a sequence of shifts and additions. */
3425 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
3426
3427 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3428 less than or equal in size to `unsigned int' this doesn't matter.
3429 If the mode is larger than `unsigned int', then synth_mult works
3430 only if the constant value exactly fits in an `unsigned int' without
3431 any truncation. This means that multiplying by negative values does
3432 not work; results are off by 2^32 on a 32 bit machine. */
3433 if (CONST_INT_P (scalar_op1))
3434 {
3435 coeff = INTVAL (scalar_op1);
3436 is_neg = coeff < 0;
3437 }
3438 #if TARGET_SUPPORTS_WIDE_INT
3439 else if (CONST_WIDE_INT_P (scalar_op1))
3440 #else
3441 else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
3442 #endif
3443 {
3444 int shift = wi::exact_log2 (rtx_mode_t (scalar_op1, mode));
3445 /* Perfect power of 2 (other than 1, which is handled above). */
3446 if (shift > 0)
3447 return expand_shift (LSHIFT_EXPR, mode, op0,
3448 shift, target, unsignedp);
3449 else
3450 goto skip_synth;
3451 }
3452 else
3453 goto skip_synth;
3454
3455 /* We used to test optimize here, on the grounds that it's better to
3456 produce a smaller program when -O is not used. But this causes
3457 such a terrible slowdown sometimes that it seems better to always
3458 use synth_mult. */
3459
3460 /* Special case powers of two. */
3461 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)
3462 && !(is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT))
3463 return expand_shift (LSHIFT_EXPR, mode, op0,
3464 floor_log2 (coeff), target, unsignedp);
3465
3466 fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3467
3468 /* Attempt to handle multiplication of DImode values by negative
3469 coefficients, by performing the multiplication by a positive
3470 multiplier and then inverting the result. */
3471 if (is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT)
3472 {
3473 /* Its safe to use -coeff even for INT_MIN, as the
3474 result is interpreted as an unsigned coefficient.
3475 Exclude cost of op0 from max_cost to match the cost
3476 calculation of the synth_mult. */
3477 coeff = -(unsigned HOST_WIDE_INT) coeff;
3478 max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1),
3479 mode, speed)
3480 - neg_cost (speed, mode));
3481 if (max_cost <= 0)
3482 goto skip_synth;
3483
3484 /* Special case powers of two. */
3485 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3486 {
3487 rtx temp = expand_shift (LSHIFT_EXPR, mode, op0,
3488 floor_log2 (coeff), target, unsignedp);
3489 return expand_unop (mode, neg_optab, temp, target, 0);
3490 }
3491
3492 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3493 max_cost))
3494 {
3495 rtx temp = expand_mult_const (mode, op0, coeff, NULL_RTX,
3496 &algorithm, variant);
3497 return expand_unop (mode, neg_optab, temp, target, 0);
3498 }
3499 goto skip_synth;
3500 }
3501
3502 /* Exclude cost of op0 from max_cost to match the cost
3503 calculation of the synth_mult. */
3504 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), mode, speed);
3505 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3506 return expand_mult_const (mode, op0, coeff, target,
3507 &algorithm, variant);
3508 }
3509 skip_synth:
3510
3511 /* Expand x*2.0 as x+x. */
3512 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1)
3513 && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1), &dconst2))
3514 {
3515 op0 = force_reg (GET_MODE (op0), op0);
3516 return expand_binop (mode, add_optab, op0, op0,
3517 target, unsignedp,
3518 no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3519 }
3520
3521 /* This used to use umul_optab if unsigned, but for non-widening multiply
3522 there is no difference between signed and unsigned. */
3523 op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3524 op0, op1, target, unsignedp,
3525 no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3526 gcc_assert (op0 || no_libcall);
3527 return op0;
3528 }
3529
3530 /* Return a cost estimate for multiplying a register by the given
3531 COEFFicient in the given MODE and SPEED. */
3532
3533 int
3534 mult_by_coeff_cost (HOST_WIDE_INT coeff, machine_mode mode, bool speed)
3535 {
3536 int max_cost;
3537 struct algorithm algorithm;
3538 enum mult_variant variant;
3539
3540 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3541 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, fake_reg),
3542 mode, speed);
3543 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3544 return algorithm.cost.cost;
3545 else
3546 return max_cost;
3547 }
3548
3549 /* Perform a widening multiplication and return an rtx for the result.
3550 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3551 TARGET is a suggestion for where to store the result (an rtx).
3552 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3553 or smul_widen_optab.
3554
3555 We check specially for a constant integer as OP1, comparing the
3556 cost of a widening multiply against the cost of a sequence of shifts
3557 and adds. */
3558
3559 rtx
3560 expand_widening_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3561 int unsignedp, optab this_optab)
3562 {
3563 bool speed = optimize_insn_for_speed_p ();
3564 rtx cop1;
3565
3566 if (CONST_INT_P (op1)
3567 && GET_MODE (op0) != VOIDmode
3568 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3569 this_optab == umul_widen_optab))
3570 && CONST_INT_P (cop1)
3571 && (INTVAL (cop1) >= 0
3572 || HWI_COMPUTABLE_MODE_P (mode)))
3573 {
3574 HOST_WIDE_INT coeff = INTVAL (cop1);
3575 int max_cost;
3576 enum mult_variant variant;
3577 struct algorithm algorithm;
3578
3579 if (coeff == 0)
3580 return CONST0_RTX (mode);
3581
3582 /* Special case powers of two. */
3583 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3584 {
3585 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3586 return expand_shift (LSHIFT_EXPR, mode, op0,
3587 floor_log2 (coeff), target, unsignedp);
3588 }
3589
3590 /* Exclude cost of op0 from max_cost to match the cost
3591 calculation of the synth_mult. */
3592 max_cost = mul_widen_cost (speed, mode);
3593 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3594 max_cost))
3595 {
3596 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3597 return expand_mult_const (mode, op0, coeff, target,
3598 &algorithm, variant);
3599 }
3600 }
3601 return expand_binop (mode, this_optab, op0, op1, target,
3602 unsignedp, OPTAB_LIB_WIDEN);
3603 }
3604 \f
3605 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3606 replace division by D, and put the least significant N bits of the result
3607 in *MULTIPLIER_PTR and return the most significant bit.
3608
3609 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3610 needed precision is in PRECISION (should be <= N).
3611
3612 PRECISION should be as small as possible so this function can choose
3613 multiplier more freely.
3614
3615 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3616 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3617
3618 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3619 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3620
3621 unsigned HOST_WIDE_INT
3622 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3623 unsigned HOST_WIDE_INT *multiplier_ptr,
3624 int *post_shift_ptr, int *lgup_ptr)
3625 {
3626 int lgup, post_shift;
3627 int pow, pow2;
3628
3629 /* lgup = ceil(log2(divisor)); */
3630 lgup = ceil_log2 (d);
3631
3632 gcc_assert (lgup <= n);
3633
3634 pow = n + lgup;
3635 pow2 = n + lgup - precision;
3636
3637 /* mlow = 2^(N + lgup)/d */
3638 wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
3639 wide_int mlow = wi::udiv_trunc (val, d);
3640
3641 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3642 val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
3643 wide_int mhigh = wi::udiv_trunc (val, d);
3644
3645 /* If precision == N, then mlow, mhigh exceed 2^N
3646 (but they do not exceed 2^(N+1)). */
3647
3648 /* Reduce to lowest terms. */
3649 for (post_shift = lgup; post_shift > 0; post_shift--)
3650 {
3651 unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
3652 HOST_BITS_PER_WIDE_INT);
3653 unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
3654 HOST_BITS_PER_WIDE_INT);
3655 if (ml_lo >= mh_lo)
3656 break;
3657
3658 mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
3659 mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
3660 }
3661
3662 *post_shift_ptr = post_shift;
3663 *lgup_ptr = lgup;
3664 if (n < HOST_BITS_PER_WIDE_INT)
3665 {
3666 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1;
3667 *multiplier_ptr = mhigh.to_uhwi () & mask;
3668 return mhigh.to_uhwi () >= mask;
3669 }
3670 else
3671 {
3672 *multiplier_ptr = mhigh.to_uhwi ();
3673 return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3674 }
3675 }
3676
3677 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3678 congruent to 1 (mod 2**N). */
3679
3680 static unsigned HOST_WIDE_INT
3681 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3682 {
3683 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3684
3685 /* The algorithm notes that the choice y = x satisfies
3686 x*y == 1 mod 2^3, since x is assumed odd.
3687 Each iteration doubles the number of bits of significance in y. */
3688
3689 unsigned HOST_WIDE_INT mask;
3690 unsigned HOST_WIDE_INT y = x;
3691 int nbit = 3;
3692
3693 mask = (n == HOST_BITS_PER_WIDE_INT
3694 ? HOST_WIDE_INT_M1U
3695 : (HOST_WIDE_INT_1U << n) - 1);
3696
3697 while (nbit < n)
3698 {
3699 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3700 nbit *= 2;
3701 }
3702 return y;
3703 }
3704
3705 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3706 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3707 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3708 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3709 become signed.
3710
3711 The result is put in TARGET if that is convenient.
3712
3713 MODE is the mode of operation. */
3714
3715 rtx
3716 expand_mult_highpart_adjust (scalar_int_mode mode, rtx adj_operand, rtx op0,
3717 rtx op1, rtx target, int unsignedp)
3718 {
3719 rtx tem;
3720 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3721
3722 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3723 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3724 tem = expand_and (mode, tem, op1, NULL_RTX);
3725 adj_operand
3726 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3727 adj_operand);
3728
3729 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3730 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3731 tem = expand_and (mode, tem, op0, NULL_RTX);
3732 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3733 target);
3734
3735 return target;
3736 }
3737
3738 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3739
3740 static rtx
3741 extract_high_half (scalar_int_mode mode, rtx op)
3742 {
3743 if (mode == word_mode)
3744 return gen_highpart (mode, op);
3745
3746 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3747
3748 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3749 GET_MODE_BITSIZE (mode), 0, 1);
3750 return convert_modes (mode, wider_mode, op, 0);
3751 }
3752
3753 /* Like expmed_mult_highpart, but only consider using a multiplication
3754 optab. OP1 is an rtx for the constant operand. */
3755
3756 static rtx
3757 expmed_mult_highpart_optab (scalar_int_mode mode, rtx op0, rtx op1,
3758 rtx target, int unsignedp, int max_cost)
3759 {
3760 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3761 optab moptab;
3762 rtx tem;
3763 int size;
3764 bool speed = optimize_insn_for_speed_p ();
3765
3766 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3767
3768 size = GET_MODE_BITSIZE (mode);
3769
3770 /* Firstly, try using a multiplication insn that only generates the needed
3771 high part of the product, and in the sign flavor of unsignedp. */
3772 if (mul_highpart_cost (speed, mode) < max_cost)
3773 {
3774 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3775 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3776 unsignedp, OPTAB_DIRECT);
3777 if (tem)
3778 return tem;
3779 }
3780
3781 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3782 Need to adjust the result after the multiplication. */
3783 if (size - 1 < BITS_PER_WORD
3784 && (mul_highpart_cost (speed, mode)
3785 + 2 * shift_cost (speed, mode, size-1)
3786 + 4 * add_cost (speed, mode) < max_cost))
3787 {
3788 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3789 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3790 unsignedp, OPTAB_DIRECT);
3791 if (tem)
3792 /* We used the wrong signedness. Adjust the result. */
3793 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3794 tem, unsignedp);
3795 }
3796
3797 /* Try widening multiplication. */
3798 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3799 if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3800 && mul_widen_cost (speed, wider_mode) < max_cost)
3801 {
3802 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3803 unsignedp, OPTAB_WIDEN);
3804 if (tem)
3805 return extract_high_half (mode, tem);
3806 }
3807
3808 /* Try widening the mode and perform a non-widening multiplication. */
3809 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3810 && size - 1 < BITS_PER_WORD
3811 && (mul_cost (speed, wider_mode) + shift_cost (speed, mode, size-1)
3812 < max_cost))
3813 {
3814 rtx_insn *insns;
3815 rtx wop0, wop1;
3816
3817 /* We need to widen the operands, for example to ensure the
3818 constant multiplier is correctly sign or zero extended.
3819 Use a sequence to clean-up any instructions emitted by
3820 the conversions if things don't work out. */
3821 start_sequence ();
3822 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3823 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3824 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3825 unsignedp, OPTAB_WIDEN);
3826 insns = get_insns ();
3827 end_sequence ();
3828
3829 if (tem)
3830 {
3831 emit_insn (insns);
3832 return extract_high_half (mode, tem);
3833 }
3834 }
3835
3836 /* Try widening multiplication of opposite signedness, and adjust. */
3837 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3838 if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3839 && size - 1 < BITS_PER_WORD
3840 && (mul_widen_cost (speed, wider_mode)
3841 + 2 * shift_cost (speed, mode, size-1)
3842 + 4 * add_cost (speed, mode) < max_cost))
3843 {
3844 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3845 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3846 if (tem != 0)
3847 {
3848 tem = extract_high_half (mode, tem);
3849 /* We used the wrong signedness. Adjust the result. */
3850 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3851 target, unsignedp);
3852 }
3853 }
3854
3855 return 0;
3856 }
3857
3858 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3859 putting the high half of the result in TARGET if that is convenient,
3860 and return where the result is. If the operation can not be performed,
3861 0 is returned.
3862
3863 MODE is the mode of operation and result.
3864
3865 UNSIGNEDP nonzero means unsigned multiply.
3866
3867 MAX_COST is the total allowed cost for the expanded RTL. */
3868
3869 static rtx
3870 expmed_mult_highpart (scalar_int_mode mode, rtx op0, rtx op1,
3871 rtx target, int unsignedp, int max_cost)
3872 {
3873 unsigned HOST_WIDE_INT cnst1;
3874 int extra_cost;
3875 bool sign_adjust = false;
3876 enum mult_variant variant;
3877 struct algorithm alg;
3878 rtx tem;
3879 bool speed = optimize_insn_for_speed_p ();
3880
3881 /* We can't support modes wider than HOST_BITS_PER_INT. */
3882 gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
3883
3884 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3885
3886 /* We can't optimize modes wider than BITS_PER_WORD.
3887 ??? We might be able to perform double-word arithmetic if
3888 mode == word_mode, however all the cost calculations in
3889 synth_mult etc. assume single-word operations. */
3890 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3891 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3892 return expmed_mult_highpart_optab (mode, op0, op1, target,
3893 unsignedp, max_cost);
3894
3895 extra_cost = shift_cost (speed, mode, GET_MODE_BITSIZE (mode) - 1);
3896
3897 /* Check whether we try to multiply by a negative constant. */
3898 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3899 {
3900 sign_adjust = true;
3901 extra_cost += add_cost (speed, mode);
3902 }
3903
3904 /* See whether shift/add multiplication is cheap enough. */
3905 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3906 max_cost - extra_cost))
3907 {
3908 /* See whether the specialized multiplication optabs are
3909 cheaper than the shift/add version. */
3910 tem = expmed_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3911 alg.cost.cost + extra_cost);
3912 if (tem)
3913 return tem;
3914
3915 tem = convert_to_mode (wider_mode, op0, unsignedp);
3916 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3917 tem = extract_high_half (mode, tem);
3918
3919 /* Adjust result for signedness. */
3920 if (sign_adjust)
3921 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3922
3923 return tem;
3924 }
3925 return expmed_mult_highpart_optab (mode, op0, op1, target,
3926 unsignedp, max_cost);
3927 }
3928
3929
3930 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3931
3932 static rtx
3933 expand_smod_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
3934 {
3935 rtx result, temp, shift;
3936 rtx_code_label *label;
3937 int logd;
3938 int prec = GET_MODE_PRECISION (mode);
3939
3940 logd = floor_log2 (d);
3941 result = gen_reg_rtx (mode);
3942
3943 /* Avoid conditional branches when they're expensive. */
3944 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3945 && optimize_insn_for_speed_p ())
3946 {
3947 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3948 mode, 0, -1);
3949 if (signmask)
3950 {
3951 HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1;
3952 signmask = force_reg (mode, signmask);
3953 shift = gen_int_shift_amount (mode, GET_MODE_BITSIZE (mode) - logd);
3954
3955 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3956 which instruction sequence to use. If logical right shifts
3957 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3958 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3959
3960 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3961 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3962 || (set_src_cost (temp, mode, optimize_insn_for_speed_p ())
3963 > COSTS_N_INSNS (2)))
3964 {
3965 temp = expand_binop (mode, xor_optab, op0, signmask,
3966 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3967 temp = expand_binop (mode, sub_optab, temp, signmask,
3968 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3969 temp = expand_binop (mode, and_optab, temp,
3970 gen_int_mode (masklow, mode),
3971 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3972 temp = expand_binop (mode, xor_optab, temp, signmask,
3973 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3974 temp = expand_binop (mode, sub_optab, temp, signmask,
3975 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3976 }
3977 else
3978 {
3979 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3980 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3981 signmask = force_reg (mode, signmask);
3982
3983 temp = expand_binop (mode, add_optab, op0, signmask,
3984 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3985 temp = expand_binop (mode, and_optab, temp,
3986 gen_int_mode (masklow, mode),
3987 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3988 temp = expand_binop (mode, sub_optab, temp, signmask,
3989 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3990 }
3991 return temp;
3992 }
3993 }
3994
3995 /* Mask contains the mode's signbit and the significant bits of the
3996 modulus. By including the signbit in the operation, many targets
3997 can avoid an explicit compare operation in the following comparison
3998 against zero. */
3999 wide_int mask = wi::mask (logd, false, prec);
4000 mask = wi::set_bit (mask, prec - 1);
4001
4002 temp = expand_binop (mode, and_optab, op0,
4003 immed_wide_int_const (mask, mode),
4004 result, 1, OPTAB_LIB_WIDEN);
4005 if (temp != result)
4006 emit_move_insn (result, temp);
4007
4008 label = gen_label_rtx ();
4009 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
4010
4011 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
4012 0, OPTAB_LIB_WIDEN);
4013
4014 mask = wi::mask (logd, true, prec);
4015 temp = expand_binop (mode, ior_optab, temp,
4016 immed_wide_int_const (mask, mode),
4017 result, 1, OPTAB_LIB_WIDEN);
4018 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
4019 0, OPTAB_LIB_WIDEN);
4020 if (temp != result)
4021 emit_move_insn (result, temp);
4022 emit_label (label);
4023 return result;
4024 }
4025
4026 /* Expand signed division of OP0 by a power of two D in mode MODE.
4027 This routine is only called for positive values of D. */
4028
4029 static rtx
4030 expand_sdiv_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
4031 {
4032 rtx temp;
4033 rtx_code_label *label;
4034 int logd;
4035
4036 logd = floor_log2 (d);
4037
4038 if (d == 2
4039 && BRANCH_COST (optimize_insn_for_speed_p (),
4040 false) >= 1)
4041 {
4042 temp = gen_reg_rtx (mode);
4043 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
4044 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
4045 0, OPTAB_LIB_WIDEN);
4046 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4047 }
4048
4049 if (HAVE_conditional_move
4050 && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2)
4051 {
4052 rtx temp2;
4053
4054 start_sequence ();
4055 temp2 = copy_to_mode_reg (mode, op0);
4056 temp = expand_binop (mode, add_optab, temp2, gen_int_mode (d - 1, mode),
4057 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4058 temp = force_reg (mode, temp);
4059
4060 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
4061 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
4062 mode, temp, temp2, mode, 0);
4063 if (temp2)
4064 {
4065 rtx_insn *seq = get_insns ();
4066 end_sequence ();
4067 emit_insn (seq);
4068 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
4069 }
4070 end_sequence ();
4071 }
4072
4073 if (BRANCH_COST (optimize_insn_for_speed_p (),
4074 false) >= 2)
4075 {
4076 int ushift = GET_MODE_BITSIZE (mode) - logd;
4077
4078 temp = gen_reg_rtx (mode);
4079 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
4080 if (GET_MODE_BITSIZE (mode) >= BITS_PER_WORD
4081 || shift_cost (optimize_insn_for_speed_p (), mode, ushift)
4082 > COSTS_N_INSNS (1))
4083 temp = expand_binop (mode, and_optab, temp, gen_int_mode (d - 1, mode),
4084 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4085 else
4086 temp = expand_shift (RSHIFT_EXPR, mode, temp,
4087 ushift, NULL_RTX, 1);
4088 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
4089 0, OPTAB_LIB_WIDEN);
4090 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4091 }
4092
4093 label = gen_label_rtx ();
4094 temp = copy_to_mode_reg (mode, op0);
4095 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
4096 expand_inc (temp, gen_int_mode (d - 1, mode));
4097 emit_label (label);
4098 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4099 }
4100 \f
4101 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
4102 if that is convenient, and returning where the result is.
4103 You may request either the quotient or the remainder as the result;
4104 specify REM_FLAG nonzero to get the remainder.
4105
4106 CODE is the expression code for which kind of division this is;
4107 it controls how rounding is done. MODE is the machine mode to use.
4108 UNSIGNEDP nonzero means do unsigned division. */
4109
4110 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
4111 and then correct it by or'ing in missing high bits
4112 if result of ANDI is nonzero.
4113 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
4114 This could optimize to a bfexts instruction.
4115 But C doesn't use these operations, so their optimizations are
4116 left for later. */
4117 /* ??? For modulo, we don't actually need the highpart of the first product,
4118 the low part will do nicely. And for small divisors, the second multiply
4119 can also be a low-part only multiply or even be completely left out.
4120 E.g. to calculate the remainder of a division by 3 with a 32 bit
4121 multiply, multiply with 0x55555556 and extract the upper two bits;
4122 the result is exact for inputs up to 0x1fffffff.
4123 The input range can be reduced by using cross-sum rules.
4124 For odd divisors >= 3, the following table gives right shift counts
4125 so that if a number is shifted by an integer multiple of the given
4126 amount, the remainder stays the same:
4127 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
4128 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
4129 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
4130 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
4131 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
4132
4133 Cross-sum rules for even numbers can be derived by leaving as many bits
4134 to the right alone as the divisor has zeros to the right.
4135 E.g. if x is an unsigned 32 bit number:
4136 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
4137 */
4138
4139 rtx
4140 expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
4141 rtx op0, rtx op1, rtx target, int unsignedp)
4142 {
4143 machine_mode compute_mode;
4144 rtx tquotient;
4145 rtx quotient = 0, remainder = 0;
4146 rtx_insn *last;
4147 rtx_insn *insn;
4148 optab optab1, optab2;
4149 int op1_is_constant, op1_is_pow2 = 0;
4150 int max_cost, extra_cost;
4151 static HOST_WIDE_INT last_div_const = 0;
4152 bool speed = optimize_insn_for_speed_p ();
4153
4154 op1_is_constant = CONST_INT_P (op1);
4155 if (op1_is_constant)
4156 {
4157 wide_int ext_op1 = rtx_mode_t (op1, mode);
4158 op1_is_pow2 = (wi::popcount (ext_op1) == 1
4159 || (! unsignedp
4160 && wi::popcount (wi::neg (ext_op1)) == 1));
4161 }
4162
4163 /*
4164 This is the structure of expand_divmod:
4165
4166 First comes code to fix up the operands so we can perform the operations
4167 correctly and efficiently.
4168
4169 Second comes a switch statement with code specific for each rounding mode.
4170 For some special operands this code emits all RTL for the desired
4171 operation, for other cases, it generates only a quotient and stores it in
4172 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
4173 to indicate that it has not done anything.
4174
4175 Last comes code that finishes the operation. If QUOTIENT is set and
4176 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
4177 QUOTIENT is not set, it is computed using trunc rounding.
4178
4179 We try to generate special code for division and remainder when OP1 is a
4180 constant. If |OP1| = 2**n we can use shifts and some other fast
4181 operations. For other values of OP1, we compute a carefully selected
4182 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
4183 by m.
4184
4185 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
4186 half of the product. Different strategies for generating the product are
4187 implemented in expmed_mult_highpart.
4188
4189 If what we actually want is the remainder, we generate that by another
4190 by-constant multiplication and a subtraction. */
4191
4192 /* We shouldn't be called with OP1 == const1_rtx, but some of the
4193 code below will malfunction if we are, so check here and handle
4194 the special case if so. */
4195 if (op1 == const1_rtx)
4196 return rem_flag ? const0_rtx : op0;
4197
4198 /* When dividing by -1, we could get an overflow.
4199 negv_optab can handle overflows. */
4200 if (! unsignedp && op1 == constm1_rtx)
4201 {
4202 if (rem_flag)
4203 return const0_rtx;
4204 return expand_unop (mode, flag_trapv && GET_MODE_CLASS (mode) == MODE_INT
4205 ? negv_optab : neg_optab, op0, target, 0);
4206 }
4207
4208 if (target
4209 /* Don't use the function value register as a target
4210 since we have to read it as well as write it,
4211 and function-inlining gets confused by this. */
4212 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
4213 /* Don't clobber an operand while doing a multi-step calculation. */
4214 || ((rem_flag || op1_is_constant)
4215 && (reg_mentioned_p (target, op0)
4216 || (MEM_P (op0) && MEM_P (target))))
4217 || reg_mentioned_p (target, op1)
4218 || (MEM_P (op1) && MEM_P (target))))
4219 target = 0;
4220
4221 /* Get the mode in which to perform this computation. Normally it will
4222 be MODE, but sometimes we can't do the desired operation in MODE.
4223 If so, pick a wider mode in which we can do the operation. Convert
4224 to that mode at the start to avoid repeated conversions.
4225
4226 First see what operations we need. These depend on the expression
4227 we are evaluating. (We assume that divxx3 insns exist under the
4228 same conditions that modxx3 insns and that these insns don't normally
4229 fail. If these assumptions are not correct, we may generate less
4230 efficient code in some cases.)
4231
4232 Then see if we find a mode in which we can open-code that operation
4233 (either a division, modulus, or shift). Finally, check for the smallest
4234 mode for which we can do the operation with a library call. */
4235
4236 /* We might want to refine this now that we have division-by-constant
4237 optimization. Since expmed_mult_highpart tries so many variants, it is
4238 not straightforward to generalize this. Maybe we should make an array
4239 of possible modes in init_expmed? Save this for GCC 2.7. */
4240
4241 optab1 = (op1_is_pow2
4242 ? (unsignedp ? lshr_optab : ashr_optab)
4243 : (unsignedp ? udiv_optab : sdiv_optab));
4244 optab2 = (op1_is_pow2 ? optab1
4245 : (unsignedp ? udivmod_optab : sdivmod_optab));
4246
4247 FOR_EACH_MODE_FROM (compute_mode, mode)
4248 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
4249 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
4250 break;
4251
4252 if (compute_mode == VOIDmode)
4253 FOR_EACH_MODE_FROM (compute_mode, mode)
4254 if (optab_libfunc (optab1, compute_mode)
4255 || optab_libfunc (optab2, compute_mode))
4256 break;
4257
4258 /* If we still couldn't find a mode, use MODE, but expand_binop will
4259 probably die. */
4260 if (compute_mode == VOIDmode)
4261 compute_mode = mode;
4262
4263 if (target && GET_MODE (target) == compute_mode)
4264 tquotient = target;
4265 else
4266 tquotient = gen_reg_rtx (compute_mode);
4267
4268 #if 0
4269 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4270 (mode), and thereby get better code when OP1 is a constant. Do that
4271 later. It will require going over all usages of SIZE below. */
4272 size = GET_MODE_BITSIZE (mode);
4273 #endif
4274
4275 /* Only deduct something for a REM if the last divide done was
4276 for a different constant. Then set the constant of the last
4277 divide. */
4278 max_cost = (unsignedp
4279 ? udiv_cost (speed, compute_mode)
4280 : sdiv_cost (speed, compute_mode));
4281 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
4282 && INTVAL (op1) == last_div_const))
4283 max_cost -= (mul_cost (speed, compute_mode)
4284 + add_cost (speed, compute_mode));
4285
4286 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
4287
4288 /* Now convert to the best mode to use. */
4289 if (compute_mode != mode)
4290 {
4291 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
4292 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
4293
4294 /* convert_modes may have placed op1 into a register, so we
4295 must recompute the following. */
4296 op1_is_constant = CONST_INT_P (op1);
4297 if (op1_is_constant)
4298 {
4299 wide_int ext_op1 = rtx_mode_t (op1, compute_mode);
4300 op1_is_pow2 = (wi::popcount (ext_op1) == 1
4301 || (! unsignedp
4302 && wi::popcount (wi::neg (ext_op1)) == 1));
4303 }
4304 else
4305 op1_is_pow2 = 0;
4306 }
4307
4308 /* If one of the operands is a volatile MEM, copy it into a register. */
4309
4310 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
4311 op0 = force_reg (compute_mode, op0);
4312 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
4313 op1 = force_reg (compute_mode, op1);
4314
4315 /* If we need the remainder or if OP1 is constant, we need to
4316 put OP0 in a register in case it has any queued subexpressions. */
4317 if (rem_flag || op1_is_constant)
4318 op0 = force_reg (compute_mode, op0);
4319
4320 last = get_last_insn ();
4321
4322 /* Promote floor rounding to trunc rounding for unsigned operations. */
4323 if (unsignedp)
4324 {
4325 if (code == FLOOR_DIV_EXPR)
4326 code = TRUNC_DIV_EXPR;
4327 if (code == FLOOR_MOD_EXPR)
4328 code = TRUNC_MOD_EXPR;
4329 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4330 code = TRUNC_DIV_EXPR;
4331 }
4332
4333 if (op1 != const0_rtx)
4334 switch (code)
4335 {
4336 case TRUNC_MOD_EXPR:
4337 case TRUNC_DIV_EXPR:
4338 if (op1_is_constant)
4339 {
4340 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4341 int size = GET_MODE_BITSIZE (int_mode);
4342 if (unsignedp)
4343 {
4344 unsigned HOST_WIDE_INT mh, ml;
4345 int pre_shift, post_shift;
4346 int dummy;
4347 wide_int wd = rtx_mode_t (op1, int_mode);
4348 unsigned HOST_WIDE_INT d = wd.to_uhwi ();
4349
4350 if (wi::popcount (wd) == 1)
4351 {
4352 pre_shift = floor_log2 (d);
4353 if (rem_flag)
4354 {
4355 unsigned HOST_WIDE_INT mask
4356 = (HOST_WIDE_INT_1U << pre_shift) - 1;
4357 remainder
4358 = expand_binop (int_mode, and_optab, op0,
4359 gen_int_mode (mask, int_mode),
4360 remainder, 1,
4361 OPTAB_LIB_WIDEN);
4362 if (remainder)
4363 return gen_lowpart (mode, remainder);
4364 }
4365 quotient = expand_shift (RSHIFT_EXPR, int_mode, op0,
4366 pre_shift, tquotient, 1);
4367 }
4368 else if (size <= HOST_BITS_PER_WIDE_INT)
4369 {
4370 if (d >= (HOST_WIDE_INT_1U << (size - 1)))
4371 {
4372 /* Most significant bit of divisor is set; emit an scc
4373 insn. */
4374 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4375 int_mode, 1, 1);
4376 }
4377 else
4378 {
4379 /* Find a suitable multiplier and right shift count
4380 instead of multiplying with D. */
4381
4382 mh = choose_multiplier (d, size, size,
4383 &ml, &post_shift, &dummy);
4384
4385 /* If the suggested multiplier is more than SIZE bits,
4386 we can do better for even divisors, using an
4387 initial right shift. */
4388 if (mh != 0 && (d & 1) == 0)
4389 {
4390 pre_shift = ctz_or_zero (d);
4391 mh = choose_multiplier (d >> pre_shift, size,
4392 size - pre_shift,
4393 &ml, &post_shift, &dummy);
4394 gcc_assert (!mh);
4395 }
4396 else
4397 pre_shift = 0;
4398
4399 if (mh != 0)
4400 {
4401 rtx t1, t2, t3, t4;
4402
4403 if (post_shift - 1 >= BITS_PER_WORD)
4404 goto fail1;
4405
4406 extra_cost
4407 = (shift_cost (speed, int_mode, post_shift - 1)
4408 + shift_cost (speed, int_mode, 1)
4409 + 2 * add_cost (speed, int_mode));
4410 t1 = expmed_mult_highpart
4411 (int_mode, op0, gen_int_mode (ml, int_mode),
4412 NULL_RTX, 1, max_cost - extra_cost);
4413 if (t1 == 0)
4414 goto fail1;
4415 t2 = force_operand (gen_rtx_MINUS (int_mode,
4416 op0, t1),
4417 NULL_RTX);
4418 t3 = expand_shift (RSHIFT_EXPR, int_mode,
4419 t2, 1, NULL_RTX, 1);
4420 t4 = force_operand (gen_rtx_PLUS (int_mode,
4421 t1, t3),
4422 NULL_RTX);
4423 quotient = expand_shift
4424 (RSHIFT_EXPR, int_mode, t4,
4425 post_shift - 1, tquotient, 1);
4426 }
4427 else
4428 {
4429 rtx t1, t2;
4430
4431 if (pre_shift >= BITS_PER_WORD
4432 || post_shift >= BITS_PER_WORD)
4433 goto fail1;
4434
4435 t1 = expand_shift
4436 (RSHIFT_EXPR, int_mode, op0,
4437 pre_shift, NULL_RTX, 1);
4438 extra_cost
4439 = (shift_cost (speed, int_mode, pre_shift)
4440 + shift_cost (speed, int_mode, post_shift));
4441 t2 = expmed_mult_highpart
4442 (int_mode, t1,
4443 gen_int_mode (ml, int_mode),
4444 NULL_RTX, 1, max_cost - extra_cost);
4445 if (t2 == 0)
4446 goto fail1;
4447 quotient = expand_shift
4448 (RSHIFT_EXPR, int_mode, t2,
4449 post_shift, tquotient, 1);
4450 }
4451 }
4452 }
4453 else /* Too wide mode to use tricky code */
4454 break;
4455
4456 insn = get_last_insn ();
4457 if (insn != last)
4458 set_dst_reg_note (insn, REG_EQUAL,
4459 gen_rtx_UDIV (int_mode, op0, op1),
4460 quotient);
4461 }
4462 else /* TRUNC_DIV, signed */
4463 {
4464 unsigned HOST_WIDE_INT ml;
4465 int lgup, post_shift;
4466 rtx mlr;
4467 HOST_WIDE_INT d = INTVAL (op1);
4468 unsigned HOST_WIDE_INT abs_d;
4469
4470 /* Since d might be INT_MIN, we have to cast to
4471 unsigned HOST_WIDE_INT before negating to avoid
4472 undefined signed overflow. */
4473 abs_d = (d >= 0
4474 ? (unsigned HOST_WIDE_INT) d
4475 : - (unsigned HOST_WIDE_INT) d);
4476
4477 /* n rem d = n rem -d */
4478 if (rem_flag && d < 0)
4479 {
4480 d = abs_d;
4481 op1 = gen_int_mode (abs_d, int_mode);
4482 }
4483
4484 if (d == 1)
4485 quotient = op0;
4486 else if (d == -1)
4487 quotient = expand_unop (int_mode, neg_optab, op0,
4488 tquotient, 0);
4489 else if (size <= HOST_BITS_PER_WIDE_INT
4490 && abs_d == HOST_WIDE_INT_1U << (size - 1))
4491 {
4492 /* This case is not handled correctly below. */
4493 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4494 int_mode, 1, 1);
4495 if (quotient == 0)
4496 goto fail1;
4497 }
4498 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4499 && (size <= HOST_BITS_PER_WIDE_INT || d >= 0)
4500 && (rem_flag
4501 ? smod_pow2_cheap (speed, int_mode)
4502 : sdiv_pow2_cheap (speed, int_mode))
4503 /* We assume that cheap metric is true if the
4504 optab has an expander for this mode. */
4505 && ((optab_handler ((rem_flag ? smod_optab
4506 : sdiv_optab),
4507 int_mode)
4508 != CODE_FOR_nothing)
4509 || (optab_handler (sdivmod_optab, int_mode)
4510 != CODE_FOR_nothing)))
4511 ;
4512 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d)
4513 && (size <= HOST_BITS_PER_WIDE_INT
4514 || abs_d != (unsigned HOST_WIDE_INT) d))
4515 {
4516 if (rem_flag)
4517 {
4518 remainder = expand_smod_pow2 (int_mode, op0, d);
4519 if (remainder)
4520 return gen_lowpart (mode, remainder);
4521 }
4522
4523 if (sdiv_pow2_cheap (speed, int_mode)
4524 && ((optab_handler (sdiv_optab, int_mode)
4525 != CODE_FOR_nothing)
4526 || (optab_handler (sdivmod_optab, int_mode)
4527 != CODE_FOR_nothing)))
4528 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4529 int_mode, op0,
4530 gen_int_mode (abs_d,
4531 int_mode),
4532 NULL_RTX, 0);
4533 else
4534 quotient = expand_sdiv_pow2 (int_mode, op0, abs_d);
4535
4536 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4537 negate the quotient. */
4538 if (d < 0)
4539 {
4540 insn = get_last_insn ();
4541 if (insn != last
4542 && abs_d < (HOST_WIDE_INT_1U
4543 << (HOST_BITS_PER_WIDE_INT - 1)))
4544 set_dst_reg_note (insn, REG_EQUAL,
4545 gen_rtx_DIV (int_mode, op0,
4546 gen_int_mode
4547 (abs_d,
4548 int_mode)),
4549 quotient);
4550
4551 quotient = expand_unop (int_mode, neg_optab,
4552 quotient, quotient, 0);
4553 }
4554 }
4555 else if (size <= HOST_BITS_PER_WIDE_INT)
4556 {
4557 choose_multiplier (abs_d, size, size - 1,
4558 &ml, &post_shift, &lgup);
4559 if (ml < HOST_WIDE_INT_1U << (size - 1))
4560 {
4561 rtx t1, t2, t3;
4562
4563 if (post_shift >= BITS_PER_WORD
4564 || size - 1 >= BITS_PER_WORD)
4565 goto fail1;
4566
4567 extra_cost = (shift_cost (speed, int_mode, post_shift)
4568 + shift_cost (speed, int_mode, size - 1)
4569 + add_cost (speed, int_mode));
4570 t1 = expmed_mult_highpart
4571 (int_mode, op0, gen_int_mode (ml, int_mode),
4572 NULL_RTX, 0, max_cost - extra_cost);
4573 if (t1 == 0)
4574 goto fail1;
4575 t2 = expand_shift
4576 (RSHIFT_EXPR, int_mode, t1,
4577 post_shift, NULL_RTX, 0);
4578 t3 = expand_shift
4579 (RSHIFT_EXPR, int_mode, op0,
4580 size - 1, NULL_RTX, 0);
4581 if (d < 0)
4582 quotient
4583 = force_operand (gen_rtx_MINUS (int_mode, t3, t2),
4584 tquotient);
4585 else
4586 quotient
4587 = force_operand (gen_rtx_MINUS (int_mode, t2, t3),
4588 tquotient);
4589 }
4590 else
4591 {
4592 rtx t1, t2, t3, t4;
4593
4594 if (post_shift >= BITS_PER_WORD
4595 || size - 1 >= BITS_PER_WORD)
4596 goto fail1;
4597
4598 ml |= HOST_WIDE_INT_M1U << (size - 1);
4599 mlr = gen_int_mode (ml, int_mode);
4600 extra_cost = (shift_cost (speed, int_mode, post_shift)
4601 + shift_cost (speed, int_mode, size - 1)
4602 + 2 * add_cost (speed, int_mode));
4603 t1 = expmed_mult_highpart (int_mode, op0, mlr,
4604 NULL_RTX, 0,
4605 max_cost - extra_cost);
4606 if (t1 == 0)
4607 goto fail1;
4608 t2 = force_operand (gen_rtx_PLUS (int_mode, t1, op0),
4609 NULL_RTX);
4610 t3 = expand_shift
4611 (RSHIFT_EXPR, int_mode, t2,
4612 post_shift, NULL_RTX, 0);
4613 t4 = expand_shift
4614 (RSHIFT_EXPR, int_mode, op0,
4615 size - 1, NULL_RTX, 0);
4616 if (d < 0)
4617 quotient
4618 = force_operand (gen_rtx_MINUS (int_mode, t4, t3),
4619 tquotient);
4620 else
4621 quotient
4622 = force_operand (gen_rtx_MINUS (int_mode, t3, t4),
4623 tquotient);
4624 }
4625 }
4626 else /* Too wide mode to use tricky code */
4627 break;
4628
4629 insn = get_last_insn ();
4630 if (insn != last)
4631 set_dst_reg_note (insn, REG_EQUAL,
4632 gen_rtx_DIV (int_mode, op0, op1),
4633 quotient);
4634 }
4635 break;
4636 }
4637 fail1:
4638 delete_insns_since (last);
4639 break;
4640
4641 case FLOOR_DIV_EXPR:
4642 case FLOOR_MOD_EXPR:
4643 /* We will come here only for signed operations. */
4644 if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
4645 {
4646 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4647 int size = GET_MODE_BITSIZE (int_mode);
4648 unsigned HOST_WIDE_INT mh, ml;
4649 int pre_shift, lgup, post_shift;
4650 HOST_WIDE_INT d = INTVAL (op1);
4651
4652 if (d > 0)
4653 {
4654 /* We could just as easily deal with negative constants here,
4655 but it does not seem worth the trouble for GCC 2.6. */
4656 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4657 {
4658 pre_shift = floor_log2 (d);
4659 if (rem_flag)
4660 {
4661 unsigned HOST_WIDE_INT mask
4662 = (HOST_WIDE_INT_1U << pre_shift) - 1;
4663 remainder = expand_binop
4664 (int_mode, and_optab, op0,
4665 gen_int_mode (mask, int_mode),
4666 remainder, 0, OPTAB_LIB_WIDEN);
4667 if (remainder)
4668 return gen_lowpart (mode, remainder);
4669 }
4670 quotient = expand_shift
4671 (RSHIFT_EXPR, int_mode, op0,
4672 pre_shift, tquotient, 0);
4673 }
4674 else
4675 {
4676 rtx t1, t2, t3, t4;
4677
4678 mh = choose_multiplier (d, size, size - 1,
4679 &ml, &post_shift, &lgup);
4680 gcc_assert (!mh);
4681
4682 if (post_shift < BITS_PER_WORD
4683 && size - 1 < BITS_PER_WORD)
4684 {
4685 t1 = expand_shift
4686 (RSHIFT_EXPR, int_mode, op0,
4687 size - 1, NULL_RTX, 0);
4688 t2 = expand_binop (int_mode, xor_optab, op0, t1,
4689 NULL_RTX, 0, OPTAB_WIDEN);
4690 extra_cost = (shift_cost (speed, int_mode, post_shift)
4691 + shift_cost (speed, int_mode, size - 1)
4692 + 2 * add_cost (speed, int_mode));
4693 t3 = expmed_mult_highpart
4694 (int_mode, t2, gen_int_mode (ml, int_mode),
4695 NULL_RTX, 1, max_cost - extra_cost);
4696 if (t3 != 0)
4697 {
4698 t4 = expand_shift
4699 (RSHIFT_EXPR, int_mode, t3,
4700 post_shift, NULL_RTX, 1);
4701 quotient = expand_binop (int_mode, xor_optab,
4702 t4, t1, tquotient, 0,
4703 OPTAB_WIDEN);
4704 }
4705 }
4706 }
4707 }
4708 else
4709 {
4710 rtx nsign, t1, t2, t3, t4;
4711 t1 = force_operand (gen_rtx_PLUS (int_mode,
4712 op0, constm1_rtx), NULL_RTX);
4713 t2 = expand_binop (int_mode, ior_optab, op0, t1, NULL_RTX,
4714 0, OPTAB_WIDEN);
4715 nsign = expand_shift (RSHIFT_EXPR, int_mode, t2,
4716 size - 1, NULL_RTX, 0);
4717 t3 = force_operand (gen_rtx_MINUS (int_mode, t1, nsign),
4718 NULL_RTX);
4719 t4 = expand_divmod (0, TRUNC_DIV_EXPR, int_mode, t3, op1,
4720 NULL_RTX, 0);
4721 if (t4)
4722 {
4723 rtx t5;
4724 t5 = expand_unop (int_mode, one_cmpl_optab, nsign,
4725 NULL_RTX, 0);
4726 quotient = force_operand (gen_rtx_PLUS (int_mode, t4, t5),
4727 tquotient);
4728 }
4729 }
4730 }
4731
4732 if (quotient != 0)
4733 break;
4734 delete_insns_since (last);
4735
4736 /* Try using an instruction that produces both the quotient and
4737 remainder, using truncation. We can easily compensate the quotient
4738 or remainder to get floor rounding, once we have the remainder.
4739 Notice that we compute also the final remainder value here,
4740 and return the result right away. */
4741 if (target == 0 || GET_MODE (target) != compute_mode)
4742 target = gen_reg_rtx (compute_mode);
4743
4744 if (rem_flag)
4745 {
4746 remainder
4747 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4748 quotient = gen_reg_rtx (compute_mode);
4749 }
4750 else
4751 {
4752 quotient
4753 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4754 remainder = gen_reg_rtx (compute_mode);
4755 }
4756
4757 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4758 quotient, remainder, 0))
4759 {
4760 /* This could be computed with a branch-less sequence.
4761 Save that for later. */
4762 rtx tem;
4763 rtx_code_label *label = gen_label_rtx ();
4764 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4765 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4766 NULL_RTX, 0, OPTAB_WIDEN);
4767 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4768 expand_dec (quotient, const1_rtx);
4769 expand_inc (remainder, op1);
4770 emit_label (label);
4771 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4772 }
4773
4774 /* No luck with division elimination or divmod. Have to do it
4775 by conditionally adjusting op0 *and* the result. */
4776 {
4777 rtx_code_label *label1, *label2, *label3, *label4, *label5;
4778 rtx adjusted_op0;
4779 rtx tem;
4780
4781 quotient = gen_reg_rtx (compute_mode);
4782 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4783 label1 = gen_label_rtx ();
4784 label2 = gen_label_rtx ();
4785 label3 = gen_label_rtx ();
4786 label4 = gen_label_rtx ();
4787 label5 = gen_label_rtx ();
4788 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4789 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4790 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4791 quotient, 0, OPTAB_LIB_WIDEN);
4792 if (tem != quotient)
4793 emit_move_insn (quotient, tem);
4794 emit_jump_insn (targetm.gen_jump (label5));
4795 emit_barrier ();
4796 emit_label (label1);
4797 expand_inc (adjusted_op0, const1_rtx);
4798 emit_jump_insn (targetm.gen_jump (label4));
4799 emit_barrier ();
4800 emit_label (label2);
4801 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4802 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4803 quotient, 0, OPTAB_LIB_WIDEN);
4804 if (tem != quotient)
4805 emit_move_insn (quotient, tem);
4806 emit_jump_insn (targetm.gen_jump (label5));
4807 emit_barrier ();
4808 emit_label (label3);
4809 expand_dec (adjusted_op0, const1_rtx);
4810 emit_label (label4);
4811 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4812 quotient, 0, OPTAB_LIB_WIDEN);
4813 if (tem != quotient)
4814 emit_move_insn (quotient, tem);
4815 expand_dec (quotient, const1_rtx);
4816 emit_label (label5);
4817 }
4818 break;
4819
4820 case CEIL_DIV_EXPR:
4821 case CEIL_MOD_EXPR:
4822 if (unsignedp)
4823 {
4824 if (op1_is_constant
4825 && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4826 && (HWI_COMPUTABLE_MODE_P (compute_mode)
4827 || INTVAL (op1) >= 0))
4828 {
4829 scalar_int_mode int_mode
4830 = as_a <scalar_int_mode> (compute_mode);
4831 rtx t1, t2, t3;
4832 unsigned HOST_WIDE_INT d = INTVAL (op1);
4833 t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
4834 floor_log2 (d), tquotient, 1);
4835 t2 = expand_binop (int_mode, and_optab, op0,
4836 gen_int_mode (d - 1, int_mode),
4837 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4838 t3 = gen_reg_rtx (int_mode);
4839 t3 = emit_store_flag (t3, NE, t2, const0_rtx, int_mode, 1, 1);
4840 if (t3 == 0)
4841 {
4842 rtx_code_label *lab;
4843 lab = gen_label_rtx ();
4844 do_cmp_and_jump (t2, const0_rtx, EQ, int_mode, lab);
4845 expand_inc (t1, const1_rtx);
4846 emit_label (lab);
4847 quotient = t1;
4848 }
4849 else
4850 quotient = force_operand (gen_rtx_PLUS (int_mode, t1, t3),
4851 tquotient);
4852 break;
4853 }
4854
4855 /* Try using an instruction that produces both the quotient and
4856 remainder, using truncation. We can easily compensate the
4857 quotient or remainder to get ceiling rounding, once we have the
4858 remainder. Notice that we compute also the final remainder
4859 value here, and return the result right away. */
4860 if (target == 0 || GET_MODE (target) != compute_mode)
4861 target = gen_reg_rtx (compute_mode);
4862
4863 if (rem_flag)
4864 {
4865 remainder = (REG_P (target)
4866 ? target : gen_reg_rtx (compute_mode));
4867 quotient = gen_reg_rtx (compute_mode);
4868 }
4869 else
4870 {
4871 quotient = (REG_P (target)
4872 ? target : gen_reg_rtx (compute_mode));
4873 remainder = gen_reg_rtx (compute_mode);
4874 }
4875
4876 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4877 remainder, 1))
4878 {
4879 /* This could be computed with a branch-less sequence.
4880 Save that for later. */
4881 rtx_code_label *label = gen_label_rtx ();
4882 do_cmp_and_jump (remainder, const0_rtx, EQ,
4883 compute_mode, label);
4884 expand_inc (quotient, const1_rtx);
4885 expand_dec (remainder, op1);
4886 emit_label (label);
4887 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4888 }
4889
4890 /* No luck with division elimination or divmod. Have to do it
4891 by conditionally adjusting op0 *and* the result. */
4892 {
4893 rtx_code_label *label1, *label2;
4894 rtx adjusted_op0, tem;
4895
4896 quotient = gen_reg_rtx (compute_mode);
4897 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4898 label1 = gen_label_rtx ();
4899 label2 = gen_label_rtx ();
4900 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4901 compute_mode, label1);
4902 emit_move_insn (quotient, const0_rtx);
4903 emit_jump_insn (targetm.gen_jump (label2));
4904 emit_barrier ();
4905 emit_label (label1);
4906 expand_dec (adjusted_op0, const1_rtx);
4907 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4908 quotient, 1, OPTAB_LIB_WIDEN);
4909 if (tem != quotient)
4910 emit_move_insn (quotient, tem);
4911 expand_inc (quotient, const1_rtx);
4912 emit_label (label2);
4913 }
4914 }
4915 else /* signed */
4916 {
4917 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4918 && INTVAL (op1) >= 0)
4919 {
4920 /* This is extremely similar to the code for the unsigned case
4921 above. For 2.7 we should merge these variants, but for
4922 2.6.1 I don't want to touch the code for unsigned since that
4923 get used in C. The signed case will only be used by other
4924 languages (Ada). */
4925
4926 rtx t1, t2, t3;
4927 unsigned HOST_WIDE_INT d = INTVAL (op1);
4928 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4929 floor_log2 (d), tquotient, 0);
4930 t2 = expand_binop (compute_mode, and_optab, op0,
4931 gen_int_mode (d - 1, compute_mode),
4932 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4933 t3 = gen_reg_rtx (compute_mode);
4934 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4935 compute_mode, 1, 1);
4936 if (t3 == 0)
4937 {
4938 rtx_code_label *lab;
4939 lab = gen_label_rtx ();
4940 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4941 expand_inc (t1, const1_rtx);
4942 emit_label (lab);
4943 quotient = t1;
4944 }
4945 else
4946 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4947 t1, t3),
4948 tquotient);
4949 break;
4950 }
4951
4952 /* Try using an instruction that produces both the quotient and
4953 remainder, using truncation. We can easily compensate the
4954 quotient or remainder to get ceiling rounding, once we have the
4955 remainder. Notice that we compute also the final remainder
4956 value here, and return the result right away. */
4957 if (target == 0 || GET_MODE (target) != compute_mode)
4958 target = gen_reg_rtx (compute_mode);
4959 if (rem_flag)
4960 {
4961 remainder= (REG_P (target)
4962 ? target : gen_reg_rtx (compute_mode));
4963 quotient = gen_reg_rtx (compute_mode);
4964 }
4965 else
4966 {
4967 quotient = (REG_P (target)
4968 ? target : gen_reg_rtx (compute_mode));
4969 remainder = gen_reg_rtx (compute_mode);
4970 }
4971
4972 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4973 remainder, 0))
4974 {
4975 /* This could be computed with a branch-less sequence.
4976 Save that for later. */
4977 rtx tem;
4978 rtx_code_label *label = gen_label_rtx ();
4979 do_cmp_and_jump (remainder, const0_rtx, EQ,
4980 compute_mode, label);
4981 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4982 NULL_RTX, 0, OPTAB_WIDEN);
4983 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4984 expand_inc (quotient, const1_rtx);
4985 expand_dec (remainder, op1);
4986 emit_label (label);
4987 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4988 }
4989
4990 /* No luck with division elimination or divmod. Have to do it
4991 by conditionally adjusting op0 *and* the result. */
4992 {
4993 rtx_code_label *label1, *label2, *label3, *label4, *label5;
4994 rtx adjusted_op0;
4995 rtx tem;
4996
4997 quotient = gen_reg_rtx (compute_mode);
4998 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4999 label1 = gen_label_rtx ();
5000 label2 = gen_label_rtx ();
5001 label3 = gen_label_rtx ();
5002 label4 = gen_label_rtx ();
5003 label5 = gen_label_rtx ();
5004 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
5005 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
5006 compute_mode, label1);
5007 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5008 quotient, 0, OPTAB_LIB_WIDEN);
5009 if (tem != quotient)
5010 emit_move_insn (quotient, tem);
5011 emit_jump_insn (targetm.gen_jump (label5));
5012 emit_barrier ();
5013 emit_label (label1);
5014 expand_dec (adjusted_op0, const1_rtx);
5015 emit_jump_insn (targetm.gen_jump (label4));
5016 emit_barrier ();
5017 emit_label (label2);
5018 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
5019 compute_mode, label3);
5020 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5021 quotient, 0, OPTAB_LIB_WIDEN);
5022 if (tem != quotient)
5023 emit_move_insn (quotient, tem);
5024 emit_jump_insn (targetm.gen_jump (label5));
5025 emit_barrier ();
5026 emit_label (label3);
5027 expand_inc (adjusted_op0, const1_rtx);
5028 emit_label (label4);
5029 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5030 quotient, 0, OPTAB_LIB_WIDEN);
5031 if (tem != quotient)
5032 emit_move_insn (quotient, tem);
5033 expand_inc (quotient, const1_rtx);
5034 emit_label (label5);
5035 }
5036 }
5037 break;
5038
5039 case EXACT_DIV_EXPR:
5040 if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
5041 {
5042 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5043 int size = GET_MODE_BITSIZE (int_mode);
5044 HOST_WIDE_INT d = INTVAL (op1);
5045 unsigned HOST_WIDE_INT ml;
5046 int pre_shift;
5047 rtx t1;
5048
5049 pre_shift = ctz_or_zero (d);
5050 ml = invert_mod2n (d >> pre_shift, size);
5051 t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5052 pre_shift, NULL_RTX, unsignedp);
5053 quotient = expand_mult (int_mode, t1, gen_int_mode (ml, int_mode),
5054 NULL_RTX, 1);
5055
5056 insn = get_last_insn ();
5057 set_dst_reg_note (insn, REG_EQUAL,
5058 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5059 int_mode, op0, op1),
5060 quotient);
5061 }
5062 break;
5063
5064 case ROUND_DIV_EXPR:
5065 case ROUND_MOD_EXPR:
5066 if (unsignedp)
5067 {
5068 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5069 rtx tem;
5070 rtx_code_label *label;
5071 label = gen_label_rtx ();
5072 quotient = gen_reg_rtx (int_mode);
5073 remainder = gen_reg_rtx (int_mode);
5074 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
5075 {
5076 rtx tem;
5077 quotient = expand_binop (int_mode, udiv_optab, op0, op1,
5078 quotient, 1, OPTAB_LIB_WIDEN);
5079 tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 1);
5080 remainder = expand_binop (int_mode, sub_optab, op0, tem,
5081 remainder, 1, OPTAB_LIB_WIDEN);
5082 }
5083 tem = plus_constant (int_mode, op1, -1);
5084 tem = expand_shift (RSHIFT_EXPR, int_mode, tem, 1, NULL_RTX, 1);
5085 do_cmp_and_jump (remainder, tem, LEU, int_mode, label);
5086 expand_inc (quotient, const1_rtx);
5087 expand_dec (remainder, op1);
5088 emit_label (label);
5089 }
5090 else
5091 {
5092 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5093 int size = GET_MODE_BITSIZE (int_mode);
5094 rtx abs_rem, abs_op1, tem, mask;
5095 rtx_code_label *label;
5096 label = gen_label_rtx ();
5097 quotient = gen_reg_rtx (int_mode);
5098 remainder = gen_reg_rtx (int_mode);
5099 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
5100 {
5101 rtx tem;
5102 quotient = expand_binop (int_mode, sdiv_optab, op0, op1,
5103 quotient, 0, OPTAB_LIB_WIDEN);
5104 tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 0);
5105 remainder = expand_binop (int_mode, sub_optab, op0, tem,
5106 remainder, 0, OPTAB_LIB_WIDEN);
5107 }
5108 abs_rem = expand_abs (int_mode, remainder, NULL_RTX, 1, 0);
5109 abs_op1 = expand_abs (int_mode, op1, NULL_RTX, 1, 0);
5110 tem = expand_shift (LSHIFT_EXPR, int_mode, abs_rem,
5111 1, NULL_RTX, 1);
5112 do_cmp_and_jump (tem, abs_op1, LTU, int_mode, label);
5113 tem = expand_binop (int_mode, xor_optab, op0, op1,
5114 NULL_RTX, 0, OPTAB_WIDEN);
5115 mask = expand_shift (RSHIFT_EXPR, int_mode, tem,
5116 size - 1, NULL_RTX, 0);
5117 tem = expand_binop (int_mode, xor_optab, mask, const1_rtx,
5118 NULL_RTX, 0, OPTAB_WIDEN);
5119 tem = expand_binop (int_mode, sub_optab, tem, mask,
5120 NULL_RTX, 0, OPTAB_WIDEN);
5121 expand_inc (quotient, tem);
5122 tem = expand_binop (int_mode, xor_optab, mask, op1,
5123 NULL_RTX, 0, OPTAB_WIDEN);
5124 tem = expand_binop (int_mode, sub_optab, tem, mask,
5125 NULL_RTX, 0, OPTAB_WIDEN);
5126 expand_dec (remainder, tem);
5127 emit_label (label);
5128 }
5129 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5130
5131 default:
5132 gcc_unreachable ();
5133 }
5134
5135 if (quotient == 0)
5136 {
5137 if (target && GET_MODE (target) != compute_mode)
5138 target = 0;
5139
5140 if (rem_flag)
5141 {
5142 /* Try to produce the remainder without producing the quotient.
5143 If we seem to have a divmod pattern that does not require widening,
5144 don't try widening here. We should really have a WIDEN argument
5145 to expand_twoval_binop, since what we'd really like to do here is
5146 1) try a mod insn in compute_mode
5147 2) try a divmod insn in compute_mode
5148 3) try a div insn in compute_mode and multiply-subtract to get
5149 remainder
5150 4) try the same things with widening allowed. */
5151 remainder
5152 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5153 op0, op1, target,
5154 unsignedp,
5155 ((optab_handler (optab2, compute_mode)
5156 != CODE_FOR_nothing)
5157 ? OPTAB_DIRECT : OPTAB_WIDEN));
5158 if (remainder == 0)
5159 {
5160 /* No luck there. Can we do remainder and divide at once
5161 without a library call? */
5162 remainder = gen_reg_rtx (compute_mode);
5163 if (! expand_twoval_binop ((unsignedp
5164 ? udivmod_optab
5165 : sdivmod_optab),
5166 op0, op1,
5167 NULL_RTX, remainder, unsignedp))
5168 remainder = 0;
5169 }
5170
5171 if (remainder)
5172 return gen_lowpart (mode, remainder);
5173 }
5174
5175 /* Produce the quotient. Try a quotient insn, but not a library call.
5176 If we have a divmod in this mode, use it in preference to widening
5177 the div (for this test we assume it will not fail). Note that optab2
5178 is set to the one of the two optabs that the call below will use. */
5179 quotient
5180 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
5181 op0, op1, rem_flag ? NULL_RTX : target,
5182 unsignedp,
5183 ((optab_handler (optab2, compute_mode)
5184 != CODE_FOR_nothing)
5185 ? OPTAB_DIRECT : OPTAB_WIDEN));
5186
5187 if (quotient == 0)
5188 {
5189 /* No luck there. Try a quotient-and-remainder insn,
5190 keeping the quotient alone. */
5191 quotient = gen_reg_rtx (compute_mode);
5192 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
5193 op0, op1,
5194 quotient, NULL_RTX, unsignedp))
5195 {
5196 quotient = 0;
5197 if (! rem_flag)
5198 /* Still no luck. If we are not computing the remainder,
5199 use a library call for the quotient. */
5200 quotient = sign_expand_binop (compute_mode,
5201 udiv_optab, sdiv_optab,
5202 op0, op1, target,
5203 unsignedp, OPTAB_LIB_WIDEN);
5204 }
5205 }
5206 }
5207
5208 if (rem_flag)
5209 {
5210 if (target && GET_MODE (target) != compute_mode)
5211 target = 0;
5212
5213 if (quotient == 0)
5214 {
5215 /* No divide instruction either. Use library for remainder. */
5216 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5217 op0, op1, target,
5218 unsignedp, OPTAB_LIB_WIDEN);
5219 /* No remainder function. Try a quotient-and-remainder
5220 function, keeping the remainder. */
5221 if (!remainder)
5222 {
5223 remainder = gen_reg_rtx (compute_mode);
5224 if (!expand_twoval_binop_libfunc
5225 (unsignedp ? udivmod_optab : sdivmod_optab,
5226 op0, op1,
5227 NULL_RTX, remainder,
5228 unsignedp ? UMOD : MOD))
5229 remainder = NULL_RTX;
5230 }
5231 }
5232 else
5233 {
5234 /* We divided. Now finish doing X - Y * (X / Y). */
5235 remainder = expand_mult (compute_mode, quotient, op1,
5236 NULL_RTX, unsignedp);
5237 remainder = expand_binop (compute_mode, sub_optab, op0,
5238 remainder, target, unsignedp,
5239 OPTAB_LIB_WIDEN);
5240 }
5241 }
5242
5243 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5244 }
5245 \f
5246 /* Return a tree node with data type TYPE, describing the value of X.
5247 Usually this is an VAR_DECL, if there is no obvious better choice.
5248 X may be an expression, however we only support those expressions
5249 generated by loop.c. */
5250
5251 tree
5252 make_tree (tree type, rtx x)
5253 {
5254 tree t;
5255
5256 switch (GET_CODE (x))
5257 {
5258 case CONST_INT:
5259 case CONST_WIDE_INT:
5260 t = wide_int_to_tree (type, rtx_mode_t (x, TYPE_MODE (type)));
5261 return t;
5262
5263 case CONST_DOUBLE:
5264 STATIC_ASSERT (HOST_BITS_PER_WIDE_INT * 2 <= MAX_BITSIZE_MODE_ANY_INT);
5265 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
5266 t = wide_int_to_tree (type,
5267 wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
5268 HOST_BITS_PER_WIDE_INT * 2));
5269 else
5270 t = build_real (type, *CONST_DOUBLE_REAL_VALUE (x));
5271
5272 return t;
5273
5274 case CONST_VECTOR:
5275 {
5276 unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
5277 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
5278 tree itype = TREE_TYPE (type);
5279
5280 /* Build a tree with vector elements. */
5281 tree_vector_builder elts (type, npatterns, nelts_per_pattern);
5282 unsigned int count = elts.encoded_nelts ();
5283 for (unsigned int i = 0; i < count; ++i)
5284 {
5285 rtx elt = CONST_VECTOR_ELT (x, i);
5286 elts.quick_push (make_tree (itype, elt));
5287 }
5288
5289 return elts.build ();
5290 }
5291
5292 case PLUS:
5293 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5294 make_tree (type, XEXP (x, 1)));
5295
5296 case MINUS:
5297 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5298 make_tree (type, XEXP (x, 1)));
5299
5300 case NEG:
5301 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5302
5303 case MULT:
5304 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5305 make_tree (type, XEXP (x, 1)));
5306
5307 case ASHIFT:
5308 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5309 make_tree (type, XEXP (x, 1)));
5310
5311 case LSHIFTRT:
5312 t = unsigned_type_for (type);
5313 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5314 make_tree (t, XEXP (x, 0)),
5315 make_tree (type, XEXP (x, 1))));
5316
5317 case ASHIFTRT:
5318 t = signed_type_for (type);
5319 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5320 make_tree (t, XEXP (x, 0)),
5321 make_tree (type, XEXP (x, 1))));
5322
5323 case DIV:
5324 if (TREE_CODE (type) != REAL_TYPE)
5325 t = signed_type_for (type);
5326 else
5327 t = type;
5328
5329 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5330 make_tree (t, XEXP (x, 0)),
5331 make_tree (t, XEXP (x, 1))));
5332 case UDIV:
5333 t = unsigned_type_for (type);
5334 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5335 make_tree (t, XEXP (x, 0)),
5336 make_tree (t, XEXP (x, 1))));
5337
5338 case SIGN_EXTEND:
5339 case ZERO_EXTEND:
5340 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5341 GET_CODE (x) == ZERO_EXTEND);
5342 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5343
5344 case CONST:
5345 {
5346 rtx op = XEXP (x, 0);
5347 if (GET_CODE (op) == VEC_DUPLICATE)
5348 {
5349 tree elt_tree = make_tree (TREE_TYPE (type), XEXP (op, 0));
5350 return build_vector_from_val (type, elt_tree);
5351 }
5352 if (GET_CODE (op) == VEC_SERIES)
5353 {
5354 tree itype = TREE_TYPE (type);
5355 tree base_tree = make_tree (itype, XEXP (op, 0));
5356 tree step_tree = make_tree (itype, XEXP (op, 1));
5357 return build_vec_series (type, base_tree, step_tree);
5358 }
5359 return make_tree (type, op);
5360 }
5361
5362 case SYMBOL_REF:
5363 t = SYMBOL_REF_DECL (x);
5364 if (t)
5365 return fold_convert (type, build_fold_addr_expr (t));
5366 /* fall through. */
5367
5368 default:
5369 if (CONST_POLY_INT_P (x))
5370 return wide_int_to_tree (t, const_poly_int_value (x));
5371
5372 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5373
5374 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5375 address mode to pointer mode. */
5376 if (POINTER_TYPE_P (type))
5377 x = convert_memory_address_addr_space
5378 (SCALAR_INT_TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5379
5380 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5381 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5382 t->decl_with_rtl.rtl = x;
5383
5384 return t;
5385 }
5386 }
5387 \f
5388 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5389 and returning TARGET.
5390
5391 If TARGET is 0, a pseudo-register or constant is returned. */
5392
5393 rtx
5394 expand_and (machine_mode mode, rtx op0, rtx op1, rtx target)
5395 {
5396 rtx tem = 0;
5397
5398 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5399 tem = simplify_binary_operation (AND, mode, op0, op1);
5400 if (tem == 0)
5401 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5402
5403 if (target == 0)
5404 target = tem;
5405 else if (tem != target)
5406 emit_move_insn (target, tem);
5407 return target;
5408 }
5409
5410 /* Helper function for emit_store_flag. */
5411 rtx
5412 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5413 machine_mode mode, machine_mode compare_mode,
5414 int unsignedp, rtx x, rtx y, int normalizep,
5415 machine_mode target_mode)
5416 {
5417 struct expand_operand ops[4];
5418 rtx op0, comparison, subtarget;
5419 rtx_insn *last;
5420 scalar_int_mode result_mode = targetm.cstore_mode (icode);
5421 scalar_int_mode int_target_mode;
5422
5423 last = get_last_insn ();
5424 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5425 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5426 if (!x || !y)
5427 {
5428 delete_insns_since (last);
5429 return NULL_RTX;
5430 }
5431
5432 if (target_mode == VOIDmode)
5433 int_target_mode = result_mode;
5434 else
5435 int_target_mode = as_a <scalar_int_mode> (target_mode);
5436 if (!target)
5437 target = gen_reg_rtx (int_target_mode);
5438
5439 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5440
5441 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5442 create_fixed_operand (&ops[1], comparison);
5443 create_fixed_operand (&ops[2], x);
5444 create_fixed_operand (&ops[3], y);
5445 if (!maybe_expand_insn (icode, 4, ops))
5446 {
5447 delete_insns_since (last);
5448 return NULL_RTX;
5449 }
5450 subtarget = ops[0].value;
5451
5452 /* If we are converting to a wider mode, first convert to
5453 INT_TARGET_MODE, then normalize. This produces better combining
5454 opportunities on machines that have a SIGN_EXTRACT when we are
5455 testing a single bit. This mostly benefits the 68k.
5456
5457 If STORE_FLAG_VALUE does not have the sign bit set when
5458 interpreted in MODE, we can do this conversion as unsigned, which
5459 is usually more efficient. */
5460 if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (result_mode))
5461 {
5462 convert_move (target, subtarget,
5463 val_signbit_known_clear_p (result_mode,
5464 STORE_FLAG_VALUE));
5465 op0 = target;
5466 result_mode = int_target_mode;
5467 }
5468 else
5469 op0 = subtarget;
5470
5471 /* If we want to keep subexpressions around, don't reuse our last
5472 target. */
5473 if (optimize)
5474 subtarget = 0;
5475
5476 /* Now normalize to the proper value in MODE. Sometimes we don't
5477 have to do anything. */
5478 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5479 ;
5480 /* STORE_FLAG_VALUE might be the most negative number, so write
5481 the comparison this way to avoid a compiler-time warning. */
5482 else if (- normalizep == STORE_FLAG_VALUE)
5483 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5484
5485 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5486 it hard to use a value of just the sign bit due to ANSI integer
5487 constant typing rules. */
5488 else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
5489 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5490 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5491 normalizep == 1);
5492 else
5493 {
5494 gcc_assert (STORE_FLAG_VALUE & 1);
5495
5496 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5497 if (normalizep == -1)
5498 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5499 }
5500
5501 /* If we were converting to a smaller mode, do the conversion now. */
5502 if (int_target_mode != result_mode)
5503 {
5504 convert_move (target, op0, 0);
5505 return target;
5506 }
5507 else
5508 return op0;
5509 }
5510
5511
5512 /* A subroutine of emit_store_flag only including "tricks" that do not
5513 need a recursive call. These are kept separate to avoid infinite
5514 loops. */
5515
5516 static rtx
5517 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5518 machine_mode mode, int unsignedp, int normalizep,
5519 machine_mode target_mode)
5520 {
5521 rtx subtarget;
5522 enum insn_code icode;
5523 machine_mode compare_mode;
5524 enum mode_class mclass;
5525 enum rtx_code scode;
5526
5527 if (unsignedp)
5528 code = unsigned_condition (code);
5529 scode = swap_condition (code);
5530
5531 /* If one operand is constant, make it the second one. Only do this
5532 if the other operand is not constant as well. */
5533
5534 if (swap_commutative_operands_p (op0, op1))
5535 {
5536 std::swap (op0, op1);
5537 code = swap_condition (code);
5538 }
5539
5540 if (mode == VOIDmode)
5541 mode = GET_MODE (op0);
5542
5543 /* For some comparisons with 1 and -1, we can convert this to
5544 comparisons with zero. This will often produce more opportunities for
5545 store-flag insns. */
5546
5547 switch (code)
5548 {
5549 case LT:
5550 if (op1 == const1_rtx)
5551 op1 = const0_rtx, code = LE;
5552 break;
5553 case LE:
5554 if (op1 == constm1_rtx)
5555 op1 = const0_rtx, code = LT;
5556 break;
5557 case GE:
5558 if (op1 == const1_rtx)
5559 op1 = const0_rtx, code = GT;
5560 break;
5561 case GT:
5562 if (op1 == constm1_rtx)
5563 op1 = const0_rtx, code = GE;
5564 break;
5565 case GEU:
5566 if (op1 == const1_rtx)
5567 op1 = const0_rtx, code = NE;
5568 break;
5569 case LTU:
5570 if (op1 == const1_rtx)
5571 op1 = const0_rtx, code = EQ;
5572 break;
5573 default:
5574 break;
5575 }
5576
5577 /* If we are comparing a double-word integer with zero or -1, we can
5578 convert the comparison into one involving a single word. */
5579 scalar_int_mode int_mode;
5580 if (is_int_mode (mode, &int_mode)
5581 && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2
5582 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5583 {
5584 rtx tem;
5585 if ((code == EQ || code == NE)
5586 && (op1 == const0_rtx || op1 == constm1_rtx))
5587 {
5588 rtx op00, op01;
5589
5590 /* Do a logical OR or AND of the two words and compare the
5591 result. */
5592 op00 = simplify_gen_subreg (word_mode, op0, int_mode, 0);
5593 op01 = simplify_gen_subreg (word_mode, op0, int_mode, UNITS_PER_WORD);
5594 tem = expand_binop (word_mode,
5595 op1 == const0_rtx ? ior_optab : and_optab,
5596 op00, op01, NULL_RTX, unsignedp,
5597 OPTAB_DIRECT);
5598
5599 if (tem != 0)
5600 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5601 unsignedp, normalizep);
5602 }
5603 else if ((code == LT || code == GE) && op1 == const0_rtx)
5604 {
5605 rtx op0h;
5606
5607 /* If testing the sign bit, can just test on high word. */
5608 op0h = simplify_gen_subreg (word_mode, op0, int_mode,
5609 subreg_highpart_offset (word_mode,
5610 int_mode));
5611 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5612 unsignedp, normalizep);
5613 }
5614 else
5615 tem = NULL_RTX;
5616
5617 if (tem)
5618 {
5619 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5620 return tem;
5621 if (!target)
5622 target = gen_reg_rtx (target_mode);
5623
5624 convert_move (target, tem,
5625 !val_signbit_known_set_p (word_mode,
5626 (normalizep ? normalizep
5627 : STORE_FLAG_VALUE)));
5628 return target;
5629 }
5630 }
5631
5632 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5633 complement of A (for GE) and shifting the sign bit to the low bit. */
5634 if (op1 == const0_rtx && (code == LT || code == GE)
5635 && is_int_mode (mode, &int_mode)
5636 && (normalizep || STORE_FLAG_VALUE == 1
5637 || val_signbit_p (int_mode, STORE_FLAG_VALUE)))
5638 {
5639 scalar_int_mode int_target_mode;
5640 subtarget = target;
5641
5642 if (!target)
5643 int_target_mode = int_mode;
5644 else
5645 {
5646 /* If the result is to be wider than OP0, it is best to convert it
5647 first. If it is to be narrower, it is *incorrect* to convert it
5648 first. */
5649 int_target_mode = as_a <scalar_int_mode> (target_mode);
5650 if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (int_mode))
5651 {
5652 op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5653 int_mode = int_target_mode;
5654 }
5655 }
5656
5657 if (int_target_mode != int_mode)
5658 subtarget = 0;
5659
5660 if (code == GE)
5661 op0 = expand_unop (int_mode, one_cmpl_optab, op0,
5662 ((STORE_FLAG_VALUE == 1 || normalizep)
5663 ? 0 : subtarget), 0);
5664
5665 if (STORE_FLAG_VALUE == 1 || normalizep)
5666 /* If we are supposed to produce a 0/1 value, we want to do
5667 a logical shift from the sign bit to the low-order bit; for
5668 a -1/0 value, we do an arithmetic shift. */
5669 op0 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5670 GET_MODE_BITSIZE (int_mode) - 1,
5671 subtarget, normalizep != -1);
5672
5673 if (int_mode != int_target_mode)
5674 op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5675
5676 return op0;
5677 }
5678
5679 mclass = GET_MODE_CLASS (mode);
5680 FOR_EACH_MODE_FROM (compare_mode, mode)
5681 {
5682 machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5683 icode = optab_handler (cstore_optab, optab_mode);
5684 if (icode != CODE_FOR_nothing)
5685 {
5686 do_pending_stack_adjust ();
5687 rtx tem = emit_cstore (target, icode, code, mode, compare_mode,
5688 unsignedp, op0, op1, normalizep, target_mode);
5689 if (tem)
5690 return tem;
5691
5692 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5693 {
5694 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5695 unsignedp, op1, op0, normalizep, target_mode);
5696 if (tem)
5697 return tem;
5698 }
5699 break;
5700 }
5701 }
5702
5703 return 0;
5704 }
5705
5706 /* Subroutine of emit_store_flag that handles cases in which the operands
5707 are scalar integers. SUBTARGET is the target to use for temporary
5708 operations and TRUEVAL is the value to store when the condition is
5709 true. All other arguments are as for emit_store_flag. */
5710
5711 rtx
5712 emit_store_flag_int (rtx target, rtx subtarget, enum rtx_code code, rtx op0,
5713 rtx op1, scalar_int_mode mode, int unsignedp,
5714 int normalizep, rtx trueval)
5715 {
5716 machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5717 rtx_insn *last = get_last_insn ();
5718
5719 /* If this is an equality comparison of integers, we can try to exclusive-or
5720 (or subtract) the two operands and use a recursive call to try the
5721 comparison with zero. Don't do any of these cases if branches are
5722 very cheap. */
5723
5724 if ((code == EQ || code == NE) && op1 != const0_rtx)
5725 {
5726 rtx tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5727 OPTAB_WIDEN);
5728
5729 if (tem == 0)
5730 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5731 OPTAB_WIDEN);
5732 if (tem != 0)
5733 tem = emit_store_flag (target, code, tem, const0_rtx,
5734 mode, unsignedp, normalizep);
5735 if (tem != 0)
5736 return tem;
5737
5738 delete_insns_since (last);
5739 }
5740
5741 /* For integer comparisons, try the reverse comparison. However, for
5742 small X and if we'd have anyway to extend, implementing "X != 0"
5743 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5744 rtx_code rcode = reverse_condition (code);
5745 if (can_compare_p (rcode, mode, ccp_store_flag)
5746 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5747 && code == NE
5748 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5749 && op1 == const0_rtx))
5750 {
5751 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5752 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5753
5754 /* Again, for the reverse comparison, use either an addition or a XOR. */
5755 if (want_add
5756 && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
5757 optimize_insn_for_speed_p ()) == 0)
5758 {
5759 rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5760 STORE_FLAG_VALUE, target_mode);
5761 if (tem != 0)
5762 tem = expand_binop (target_mode, add_optab, tem,
5763 gen_int_mode (normalizep, target_mode),
5764 target, 0, OPTAB_WIDEN);
5765 if (tem != 0)
5766 return tem;
5767 }
5768 else if (!want_add
5769 && rtx_cost (trueval, mode, XOR, 1,
5770 optimize_insn_for_speed_p ()) == 0)
5771 {
5772 rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5773 normalizep, target_mode);
5774 if (tem != 0)
5775 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5776 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5777 if (tem != 0)
5778 return tem;
5779 }
5780
5781 delete_insns_since (last);
5782 }
5783
5784 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5785 the constant zero. Reject all other comparisons at this point. Only
5786 do LE and GT if branches are expensive since they are expensive on
5787 2-operand machines. */
5788
5789 if (op1 != const0_rtx
5790 || (code != EQ && code != NE
5791 && (BRANCH_COST (optimize_insn_for_speed_p (),
5792 false) <= 1 || (code != LE && code != GT))))
5793 return 0;
5794
5795 /* Try to put the result of the comparison in the sign bit. Assume we can't
5796 do the necessary operation below. */
5797
5798 rtx tem = 0;
5799
5800 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5801 the sign bit set. */
5802
5803 if (code == LE)
5804 {
5805 /* This is destructive, so SUBTARGET can't be OP0. */
5806 if (rtx_equal_p (subtarget, op0))
5807 subtarget = 0;
5808
5809 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5810 OPTAB_WIDEN);
5811 if (tem)
5812 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5813 OPTAB_WIDEN);
5814 }
5815
5816 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5817 number of bits in the mode of OP0, minus one. */
5818
5819 if (code == GT)
5820 {
5821 if (rtx_equal_p (subtarget, op0))
5822 subtarget = 0;
5823
5824 tem = maybe_expand_shift (RSHIFT_EXPR, mode, op0,
5825 GET_MODE_BITSIZE (mode) - 1,
5826 subtarget, 0);
5827 if (tem)
5828 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5829 OPTAB_WIDEN);
5830 }
5831
5832 if (code == EQ || code == NE)
5833 {
5834 /* For EQ or NE, one way to do the comparison is to apply an operation
5835 that converts the operand into a positive number if it is nonzero
5836 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5837 for NE we negate. This puts the result in the sign bit. Then we
5838 normalize with a shift, if needed.
5839
5840 Two operations that can do the above actions are ABS and FFS, so try
5841 them. If that doesn't work, and MODE is smaller than a full word,
5842 we can use zero-extension to the wider mode (an unsigned conversion)
5843 as the operation. */
5844
5845 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5846 that is compensated by the subsequent overflow when subtracting
5847 one / negating. */
5848
5849 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5850 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5851 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5852 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5853 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5854 {
5855 tem = convert_modes (word_mode, mode, op0, 1);
5856 mode = word_mode;
5857 }
5858
5859 if (tem != 0)
5860 {
5861 if (code == EQ)
5862 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5863 0, OPTAB_WIDEN);
5864 else
5865 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5866 }
5867
5868 /* If we couldn't do it that way, for NE we can "or" the two's complement
5869 of the value with itself. For EQ, we take the one's complement of
5870 that "or", which is an extra insn, so we only handle EQ if branches
5871 are expensive. */
5872
5873 if (tem == 0
5874 && (code == NE
5875 || BRANCH_COST (optimize_insn_for_speed_p (),
5876 false) > 1))
5877 {
5878 if (rtx_equal_p (subtarget, op0))
5879 subtarget = 0;
5880
5881 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5882 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5883 OPTAB_WIDEN);
5884
5885 if (tem && code == EQ)
5886 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5887 }
5888 }
5889
5890 if (tem && normalizep)
5891 tem = maybe_expand_shift (RSHIFT_EXPR, mode, tem,
5892 GET_MODE_BITSIZE (mode) - 1,
5893 subtarget, normalizep == 1);
5894
5895 if (tem)
5896 {
5897 if (!target)
5898 ;
5899 else if (GET_MODE (tem) != target_mode)
5900 {
5901 convert_move (target, tem, 0);
5902 tem = target;
5903 }
5904 else if (!subtarget)
5905 {
5906 emit_move_insn (target, tem);
5907 tem = target;
5908 }
5909 }
5910 else
5911 delete_insns_since (last);
5912
5913 return tem;
5914 }
5915
5916 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5917 and storing in TARGET. Normally return TARGET.
5918 Return 0 if that cannot be done.
5919
5920 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5921 it is VOIDmode, they cannot both be CONST_INT.
5922
5923 UNSIGNEDP is for the case where we have to widen the operands
5924 to perform the operation. It says to use zero-extension.
5925
5926 NORMALIZEP is 1 if we should convert the result to be either zero
5927 or one. Normalize is -1 if we should convert the result to be
5928 either zero or -1. If NORMALIZEP is zero, the result will be left
5929 "raw" out of the scc insn. */
5930
5931 rtx
5932 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5933 machine_mode mode, int unsignedp, int normalizep)
5934 {
5935 machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5936 enum rtx_code rcode;
5937 rtx subtarget;
5938 rtx tem, trueval;
5939 rtx_insn *last;
5940
5941 /* If we compare constants, we shouldn't use a store-flag operation,
5942 but a constant load. We can get there via the vanilla route that
5943 usually generates a compare-branch sequence, but will in this case
5944 fold the comparison to a constant, and thus elide the branch. */
5945 if (CONSTANT_P (op0) && CONSTANT_P (op1))
5946 return NULL_RTX;
5947
5948 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5949 target_mode);
5950 if (tem)
5951 return tem;
5952
5953 /* If we reached here, we can't do this with a scc insn, however there
5954 are some comparisons that can be done in other ways. Don't do any
5955 of these cases if branches are very cheap. */
5956 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5957 return 0;
5958
5959 /* See what we need to return. We can only return a 1, -1, or the
5960 sign bit. */
5961
5962 if (normalizep == 0)
5963 {
5964 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5965 normalizep = STORE_FLAG_VALUE;
5966
5967 else if (val_signbit_p (mode, STORE_FLAG_VALUE))
5968 ;
5969 else
5970 return 0;
5971 }
5972
5973 last = get_last_insn ();
5974
5975 /* If optimizing, use different pseudo registers for each insn, instead
5976 of reusing the same pseudo. This leads to better CSE, but slows
5977 down the compiler, since there are more pseudos. */
5978 subtarget = (!optimize
5979 && (target_mode == mode)) ? target : NULL_RTX;
5980 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5981
5982 /* For floating-point comparisons, try the reverse comparison or try
5983 changing the "orderedness" of the comparison. */
5984 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5985 {
5986 enum rtx_code first_code;
5987 bool and_them;
5988
5989 rcode = reverse_condition_maybe_unordered (code);
5990 if (can_compare_p (rcode, mode, ccp_store_flag)
5991 && (code == ORDERED || code == UNORDERED
5992 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5993 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5994 {
5995 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5996 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5997
5998 /* For the reverse comparison, use either an addition or a XOR. */
5999 if (want_add
6000 && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
6001 optimize_insn_for_speed_p ()) == 0)
6002 {
6003 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
6004 STORE_FLAG_VALUE, target_mode);
6005 if (tem)
6006 return expand_binop (target_mode, add_optab, tem,
6007 gen_int_mode (normalizep, target_mode),
6008 target, 0, OPTAB_WIDEN);
6009 }
6010 else if (!want_add
6011 && rtx_cost (trueval, mode, XOR, 1,
6012 optimize_insn_for_speed_p ()) == 0)
6013 {
6014 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
6015 normalizep, target_mode);
6016 if (tem)
6017 return expand_binop (target_mode, xor_optab, tem, trueval,
6018 target, INTVAL (trueval) >= 0,
6019 OPTAB_WIDEN);
6020 }
6021 }
6022
6023 delete_insns_since (last);
6024
6025 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
6026 if (code == ORDERED || code == UNORDERED)
6027 return 0;
6028
6029 and_them = split_comparison (code, mode, &first_code, &code);
6030
6031 /* If there are no NaNs, the first comparison should always fall through.
6032 Effectively change the comparison to the other one. */
6033 if (!HONOR_NANS (mode))
6034 {
6035 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
6036 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
6037 target_mode);
6038 }
6039
6040 if (!HAVE_conditional_move)
6041 return 0;
6042
6043 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
6044 conditional move. */
6045 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
6046 normalizep, target_mode);
6047 if (tem == 0)
6048 return 0;
6049
6050 if (and_them)
6051 tem = emit_conditional_move (target, code, op0, op1, mode,
6052 tem, const0_rtx, GET_MODE (tem), 0);
6053 else
6054 tem = emit_conditional_move (target, code, op0, op1, mode,
6055 trueval, tem, GET_MODE (tem), 0);
6056
6057 if (tem == 0)
6058 delete_insns_since (last);
6059 return tem;
6060 }
6061
6062 /* The remaining tricks only apply to integer comparisons. */
6063
6064 scalar_int_mode int_mode;
6065 if (is_int_mode (mode, &int_mode))
6066 return emit_store_flag_int (target, subtarget, code, op0, op1, int_mode,
6067 unsignedp, normalizep, trueval);
6068
6069 return 0;
6070 }
6071
6072 /* Like emit_store_flag, but always succeeds. */
6073
6074 rtx
6075 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
6076 machine_mode mode, int unsignedp, int normalizep)
6077 {
6078 rtx tem;
6079 rtx_code_label *label;
6080 rtx trueval, falseval;
6081
6082 /* First see if emit_store_flag can do the job. */
6083 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
6084 if (tem != 0)
6085 return tem;
6086
6087 if (!target)
6088 target = gen_reg_rtx (word_mode);
6089
6090 /* If this failed, we have to do this with set/compare/jump/set code.
6091 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
6092 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
6093 if (code == NE
6094 && GET_MODE_CLASS (mode) == MODE_INT
6095 && REG_P (target)
6096 && op0 == target
6097 && op1 == const0_rtx)
6098 {
6099 label = gen_label_rtx ();
6100 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp, mode,
6101 NULL_RTX, NULL, label,
6102 profile_probability::uninitialized ());
6103 emit_move_insn (target, trueval);
6104 emit_label (label);
6105 return target;
6106 }
6107
6108 if (!REG_P (target)
6109 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
6110 target = gen_reg_rtx (GET_MODE (target));
6111
6112 /* Jump in the right direction if the target cannot implement CODE
6113 but can jump on its reverse condition. */
6114 falseval = const0_rtx;
6115 if (! can_compare_p (code, mode, ccp_jump)
6116 && (! FLOAT_MODE_P (mode)
6117 || code == ORDERED || code == UNORDERED
6118 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
6119 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
6120 {
6121 enum rtx_code rcode;
6122 if (FLOAT_MODE_P (mode))
6123 rcode = reverse_condition_maybe_unordered (code);
6124 else
6125 rcode = reverse_condition (code);
6126
6127 /* Canonicalize to UNORDERED for the libcall. */
6128 if (can_compare_p (rcode, mode, ccp_jump)
6129 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
6130 {
6131 falseval = trueval;
6132 trueval = const0_rtx;
6133 code = rcode;
6134 }
6135 }
6136
6137 emit_move_insn (target, trueval);
6138 label = gen_label_rtx ();
6139 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, NULL,
6140 label, profile_probability::uninitialized ());
6141
6142 emit_move_insn (target, falseval);
6143 emit_label (label);
6144
6145 return target;
6146 }
6147 \f
6148 /* Perform possibly multi-word comparison and conditional jump to LABEL
6149 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
6150 now a thin wrapper around do_compare_rtx_and_jump. */
6151
6152 static void
6153 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, machine_mode mode,
6154 rtx_code_label *label)
6155 {
6156 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
6157 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode, NULL_RTX,
6158 NULL, label, profile_probability::uninitialized ());
6159 }