]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expmed.c
i386: Update SSE <-> integer move costs
[thirdparty/gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "predict.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "expr.h"
42 #include "langhooks.h"
43 #include "tree-vector-builder.h"
44
45 struct target_expmed default_target_expmed;
46 #if SWITCHABLE_TARGET
47 struct target_expmed *this_target_expmed = &default_target_expmed;
48 #endif
49
50 static bool store_integral_bit_field (rtx, opt_scalar_int_mode,
51 unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT,
53 poly_uint64, poly_uint64,
54 machine_mode, rtx, bool, bool);
55 static void store_fixed_bit_field (rtx, opt_scalar_int_mode,
56 unsigned HOST_WIDE_INT,
57 unsigned HOST_WIDE_INT,
58 poly_uint64, poly_uint64,
59 rtx, scalar_int_mode, bool);
60 static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
61 unsigned HOST_WIDE_INT,
62 unsigned HOST_WIDE_INT,
63 rtx, scalar_int_mode, bool);
64 static void store_split_bit_field (rtx, opt_scalar_int_mode,
65 unsigned HOST_WIDE_INT,
66 unsigned HOST_WIDE_INT,
67 poly_uint64, poly_uint64,
68 rtx, scalar_int_mode, bool);
69 static rtx extract_integral_bit_field (rtx, opt_scalar_int_mode,
70 unsigned HOST_WIDE_INT,
71 unsigned HOST_WIDE_INT, int, rtx,
72 machine_mode, machine_mode, bool, bool);
73 static rtx extract_fixed_bit_field (machine_mode, rtx, opt_scalar_int_mode,
74 unsigned HOST_WIDE_INT,
75 unsigned HOST_WIDE_INT, rtx, int, bool);
76 static rtx extract_fixed_bit_field_1 (machine_mode, rtx, scalar_int_mode,
77 unsigned HOST_WIDE_INT,
78 unsigned HOST_WIDE_INT, rtx, int, bool);
79 static rtx lshift_value (machine_mode, unsigned HOST_WIDE_INT, int);
80 static rtx extract_split_bit_field (rtx, opt_scalar_int_mode,
81 unsigned HOST_WIDE_INT,
82 unsigned HOST_WIDE_INT, int, bool);
83 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, machine_mode, rtx_code_label *);
84 static rtx expand_smod_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
85 static rtx expand_sdiv_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
86
87 /* Return a constant integer mask value of mode MODE with BITSIZE ones
88 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
89 The mask is truncated if necessary to the width of mode MODE. The
90 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
91
92 static inline rtx
93 mask_rtx (scalar_int_mode mode, int bitpos, int bitsize, bool complement)
94 {
95 return immed_wide_int_const
96 (wi::shifted_mask (bitpos, bitsize, complement,
97 GET_MODE_PRECISION (mode)), mode);
98 }
99
100 /* Test whether a value is zero of a power of two. */
101 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
102 (((x) & ((x) - HOST_WIDE_INT_1U)) == 0)
103
104 struct init_expmed_rtl
105 {
106 rtx reg;
107 rtx plus;
108 rtx neg;
109 rtx mult;
110 rtx sdiv;
111 rtx udiv;
112 rtx sdiv_32;
113 rtx smod_32;
114 rtx wide_mult;
115 rtx wide_lshr;
116 rtx wide_trunc;
117 rtx shift;
118 rtx shift_mult;
119 rtx shift_add;
120 rtx shift_sub0;
121 rtx shift_sub1;
122 rtx zext;
123 rtx trunc;
124
125 rtx pow2[MAX_BITS_PER_WORD];
126 rtx cint[MAX_BITS_PER_WORD];
127 };
128
129 static void
130 init_expmed_one_conv (struct init_expmed_rtl *all, scalar_int_mode to_mode,
131 scalar_int_mode from_mode, bool speed)
132 {
133 int to_size, from_size;
134 rtx which;
135
136 to_size = GET_MODE_PRECISION (to_mode);
137 from_size = GET_MODE_PRECISION (from_mode);
138
139 /* Most partial integers have a precision less than the "full"
140 integer it requires for storage. In case one doesn't, for
141 comparison purposes here, reduce the bit size by one in that
142 case. */
143 if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
144 && pow2p_hwi (to_size))
145 to_size --;
146 if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
147 && pow2p_hwi (from_size))
148 from_size --;
149
150 /* Assume cost of zero-extend and sign-extend is the same. */
151 which = (to_size < from_size ? all->trunc : all->zext);
152
153 PUT_MODE (all->reg, from_mode);
154 set_convert_cost (to_mode, from_mode, speed,
155 set_src_cost (which, to_mode, speed));
156 }
157
158 static void
159 init_expmed_one_mode (struct init_expmed_rtl *all,
160 machine_mode mode, int speed)
161 {
162 int m, n, mode_bitsize;
163 machine_mode mode_from;
164
165 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
166
167 PUT_MODE (all->reg, mode);
168 PUT_MODE (all->plus, mode);
169 PUT_MODE (all->neg, mode);
170 PUT_MODE (all->mult, mode);
171 PUT_MODE (all->sdiv, mode);
172 PUT_MODE (all->udiv, mode);
173 PUT_MODE (all->sdiv_32, mode);
174 PUT_MODE (all->smod_32, mode);
175 PUT_MODE (all->wide_trunc, mode);
176 PUT_MODE (all->shift, mode);
177 PUT_MODE (all->shift_mult, mode);
178 PUT_MODE (all->shift_add, mode);
179 PUT_MODE (all->shift_sub0, mode);
180 PUT_MODE (all->shift_sub1, mode);
181 PUT_MODE (all->zext, mode);
182 PUT_MODE (all->trunc, mode);
183
184 set_add_cost (speed, mode, set_src_cost (all->plus, mode, speed));
185 set_neg_cost (speed, mode, set_src_cost (all->neg, mode, speed));
186 set_mul_cost (speed, mode, set_src_cost (all->mult, mode, speed));
187 set_sdiv_cost (speed, mode, set_src_cost (all->sdiv, mode, speed));
188 set_udiv_cost (speed, mode, set_src_cost (all->udiv, mode, speed));
189
190 set_sdiv_pow2_cheap (speed, mode, (set_src_cost (all->sdiv_32, mode, speed)
191 <= 2 * add_cost (speed, mode)));
192 set_smod_pow2_cheap (speed, mode, (set_src_cost (all->smod_32, mode, speed)
193 <= 4 * add_cost (speed, mode)));
194
195 set_shift_cost (speed, mode, 0, 0);
196 {
197 int cost = add_cost (speed, mode);
198 set_shiftadd_cost (speed, mode, 0, cost);
199 set_shiftsub0_cost (speed, mode, 0, cost);
200 set_shiftsub1_cost (speed, mode, 0, cost);
201 }
202
203 n = MIN (MAX_BITS_PER_WORD, mode_bitsize);
204 for (m = 1; m < n; m++)
205 {
206 XEXP (all->shift, 1) = all->cint[m];
207 XEXP (all->shift_mult, 1) = all->pow2[m];
208
209 set_shift_cost (speed, mode, m, set_src_cost (all->shift, mode, speed));
210 set_shiftadd_cost (speed, mode, m, set_src_cost (all->shift_add, mode,
211 speed));
212 set_shiftsub0_cost (speed, mode, m, set_src_cost (all->shift_sub0, mode,
213 speed));
214 set_shiftsub1_cost (speed, mode, m, set_src_cost (all->shift_sub1, mode,
215 speed));
216 }
217
218 scalar_int_mode int_mode_to;
219 if (is_a <scalar_int_mode> (mode, &int_mode_to))
220 {
221 for (mode_from = MIN_MODE_INT; mode_from <= MAX_MODE_INT;
222 mode_from = (machine_mode)(mode_from + 1))
223 init_expmed_one_conv (all, int_mode_to,
224 as_a <scalar_int_mode> (mode_from), speed);
225
226 scalar_int_mode wider_mode;
227 if (GET_MODE_CLASS (int_mode_to) == MODE_INT
228 && GET_MODE_WIDER_MODE (int_mode_to).exists (&wider_mode))
229 {
230 PUT_MODE (all->zext, wider_mode);
231 PUT_MODE (all->wide_mult, wider_mode);
232 PUT_MODE (all->wide_lshr, wider_mode);
233 XEXP (all->wide_lshr, 1)
234 = gen_int_shift_amount (wider_mode, mode_bitsize);
235
236 set_mul_widen_cost (speed, wider_mode,
237 set_src_cost (all->wide_mult, wider_mode, speed));
238 set_mul_highpart_cost (speed, int_mode_to,
239 set_src_cost (all->wide_trunc,
240 int_mode_to, speed));
241 }
242 }
243 }
244
245 void
246 init_expmed (void)
247 {
248 struct init_expmed_rtl all;
249 machine_mode mode = QImode;
250 int m, speed;
251
252 memset (&all, 0, sizeof all);
253 for (m = 1; m < MAX_BITS_PER_WORD; m++)
254 {
255 all.pow2[m] = GEN_INT (HOST_WIDE_INT_1 << m);
256 all.cint[m] = GEN_INT (m);
257 }
258
259 /* Avoid using hard regs in ways which may be unsupported. */
260 all.reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
261 all.plus = gen_rtx_PLUS (mode, all.reg, all.reg);
262 all.neg = gen_rtx_NEG (mode, all.reg);
263 all.mult = gen_rtx_MULT (mode, all.reg, all.reg);
264 all.sdiv = gen_rtx_DIV (mode, all.reg, all.reg);
265 all.udiv = gen_rtx_UDIV (mode, all.reg, all.reg);
266 all.sdiv_32 = gen_rtx_DIV (mode, all.reg, all.pow2[5]);
267 all.smod_32 = gen_rtx_MOD (mode, all.reg, all.pow2[5]);
268 all.zext = gen_rtx_ZERO_EXTEND (mode, all.reg);
269 all.wide_mult = gen_rtx_MULT (mode, all.zext, all.zext);
270 all.wide_lshr = gen_rtx_LSHIFTRT (mode, all.wide_mult, all.reg);
271 all.wide_trunc = gen_rtx_TRUNCATE (mode, all.wide_lshr);
272 all.shift = gen_rtx_ASHIFT (mode, all.reg, all.reg);
273 all.shift_mult = gen_rtx_MULT (mode, all.reg, all.reg);
274 all.shift_add = gen_rtx_PLUS (mode, all.shift_mult, all.reg);
275 all.shift_sub0 = gen_rtx_MINUS (mode, all.shift_mult, all.reg);
276 all.shift_sub1 = gen_rtx_MINUS (mode, all.reg, all.shift_mult);
277 all.trunc = gen_rtx_TRUNCATE (mode, all.reg);
278
279 for (speed = 0; speed < 2; speed++)
280 {
281 crtl->maybe_hot_insn_p = speed;
282 set_zero_cost (speed, set_src_cost (const0_rtx, mode, speed));
283
284 for (mode = MIN_MODE_INT; mode <= MAX_MODE_INT;
285 mode = (machine_mode)(mode + 1))
286 init_expmed_one_mode (&all, mode, speed);
287
288 if (MIN_MODE_PARTIAL_INT != VOIDmode)
289 for (mode = MIN_MODE_PARTIAL_INT; mode <= MAX_MODE_PARTIAL_INT;
290 mode = (machine_mode)(mode + 1))
291 init_expmed_one_mode (&all, mode, speed);
292
293 if (MIN_MODE_VECTOR_INT != VOIDmode)
294 for (mode = MIN_MODE_VECTOR_INT; mode <= MAX_MODE_VECTOR_INT;
295 mode = (machine_mode)(mode + 1))
296 init_expmed_one_mode (&all, mode, speed);
297 }
298
299 if (alg_hash_used_p ())
300 {
301 struct alg_hash_entry *p = alg_hash_entry_ptr (0);
302 memset (p, 0, sizeof (*p) * NUM_ALG_HASH_ENTRIES);
303 }
304 else
305 set_alg_hash_used_p (true);
306 default_rtl_profile ();
307
308 ggc_free (all.trunc);
309 ggc_free (all.shift_sub1);
310 ggc_free (all.shift_sub0);
311 ggc_free (all.shift_add);
312 ggc_free (all.shift_mult);
313 ggc_free (all.shift);
314 ggc_free (all.wide_trunc);
315 ggc_free (all.wide_lshr);
316 ggc_free (all.wide_mult);
317 ggc_free (all.zext);
318 ggc_free (all.smod_32);
319 ggc_free (all.sdiv_32);
320 ggc_free (all.udiv);
321 ggc_free (all.sdiv);
322 ggc_free (all.mult);
323 ggc_free (all.neg);
324 ggc_free (all.plus);
325 ggc_free (all.reg);
326 }
327
328 /* Return an rtx representing minus the value of X.
329 MODE is the intended mode of the result,
330 useful if X is a CONST_INT. */
331
332 rtx
333 negate_rtx (machine_mode mode, rtx x)
334 {
335 rtx result = simplify_unary_operation (NEG, mode, x, mode);
336
337 if (result == 0)
338 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
339
340 return result;
341 }
342
343 /* Whether reverse storage order is supported on the target. */
344 static int reverse_storage_order_supported = -1;
345
346 /* Check whether reverse storage order is supported on the target. */
347
348 static void
349 check_reverse_storage_order_support (void)
350 {
351 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
352 {
353 reverse_storage_order_supported = 0;
354 sorry ("reverse scalar storage order");
355 }
356 else
357 reverse_storage_order_supported = 1;
358 }
359
360 /* Whether reverse FP storage order is supported on the target. */
361 static int reverse_float_storage_order_supported = -1;
362
363 /* Check whether reverse FP storage order is supported on the target. */
364
365 static void
366 check_reverse_float_storage_order_support (void)
367 {
368 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
369 {
370 reverse_float_storage_order_supported = 0;
371 sorry ("reverse floating-point scalar storage order");
372 }
373 else
374 reverse_float_storage_order_supported = 1;
375 }
376
377 /* Return an rtx representing value of X with reverse storage order.
378 MODE is the intended mode of the result,
379 useful if X is a CONST_INT. */
380
381 rtx
382 flip_storage_order (machine_mode mode, rtx x)
383 {
384 scalar_int_mode int_mode;
385 rtx result;
386
387 if (mode == QImode)
388 return x;
389
390 if (COMPLEX_MODE_P (mode))
391 {
392 rtx real = read_complex_part (x, false);
393 rtx imag = read_complex_part (x, true);
394
395 real = flip_storage_order (GET_MODE_INNER (mode), real);
396 imag = flip_storage_order (GET_MODE_INNER (mode), imag);
397
398 return gen_rtx_CONCAT (mode, real, imag);
399 }
400
401 if (__builtin_expect (reverse_storage_order_supported < 0, 0))
402 check_reverse_storage_order_support ();
403
404 if (!is_a <scalar_int_mode> (mode, &int_mode))
405 {
406 if (FLOAT_MODE_P (mode)
407 && __builtin_expect (reverse_float_storage_order_supported < 0, 0))
408 check_reverse_float_storage_order_support ();
409
410 if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode))
411 {
412 sorry ("reverse storage order for %smode", GET_MODE_NAME (mode));
413 return x;
414 }
415 x = gen_lowpart (int_mode, x);
416 }
417
418 result = simplify_unary_operation (BSWAP, int_mode, x, int_mode);
419 if (result == 0)
420 result = expand_unop (int_mode, bswap_optab, x, NULL_RTX, 1);
421
422 if (int_mode != mode)
423 result = gen_lowpart (mode, result);
424
425 return result;
426 }
427
428 /* If MODE is set, adjust bitfield memory MEM so that it points to the
429 first unit of mode MODE that contains a bitfield of size BITSIZE at
430 bit position BITNUM. If MODE is not set, return a BLKmode reference
431 to every byte in the bitfield. Set *NEW_BITNUM to the bit position
432 of the field within the new memory. */
433
434 static rtx
435 narrow_bit_field_mem (rtx mem, opt_scalar_int_mode mode,
436 unsigned HOST_WIDE_INT bitsize,
437 unsigned HOST_WIDE_INT bitnum,
438 unsigned HOST_WIDE_INT *new_bitnum)
439 {
440 scalar_int_mode imode;
441 if (mode.exists (&imode))
442 {
443 unsigned int unit = GET_MODE_BITSIZE (imode);
444 *new_bitnum = bitnum % unit;
445 HOST_WIDE_INT offset = (bitnum - *new_bitnum) / BITS_PER_UNIT;
446 return adjust_bitfield_address (mem, imode, offset);
447 }
448 else
449 {
450 *new_bitnum = bitnum % BITS_PER_UNIT;
451 HOST_WIDE_INT offset = bitnum / BITS_PER_UNIT;
452 HOST_WIDE_INT size = ((*new_bitnum + bitsize + BITS_PER_UNIT - 1)
453 / BITS_PER_UNIT);
454 return adjust_bitfield_address_size (mem, BLKmode, offset, size);
455 }
456 }
457
458 /* The caller wants to perform insertion or extraction PATTERN on a
459 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
460 BITREGION_START and BITREGION_END are as for store_bit_field
461 and FIELDMODE is the natural mode of the field.
462
463 Search for a mode that is compatible with the memory access
464 restrictions and (where applicable) with a register insertion or
465 extraction. Return the new memory on success, storing the adjusted
466 bit position in *NEW_BITNUM. Return null otherwise. */
467
468 static rtx
469 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
470 rtx op0, HOST_WIDE_INT bitsize,
471 HOST_WIDE_INT bitnum,
472 poly_uint64 bitregion_start,
473 poly_uint64 bitregion_end,
474 machine_mode fieldmode,
475 unsigned HOST_WIDE_INT *new_bitnum)
476 {
477 bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
478 bitregion_end, MEM_ALIGN (op0),
479 MEM_VOLATILE_P (op0));
480 scalar_int_mode best_mode;
481 if (iter.next_mode (&best_mode))
482 {
483 /* We can use a memory in BEST_MODE. See whether this is true for
484 any wider modes. All other things being equal, we prefer to
485 use the widest mode possible because it tends to expose more
486 CSE opportunities. */
487 if (!iter.prefer_smaller_modes ())
488 {
489 /* Limit the search to the mode required by the corresponding
490 register insertion or extraction instruction, if any. */
491 scalar_int_mode limit_mode = word_mode;
492 extraction_insn insn;
493 if (get_best_reg_extraction_insn (&insn, pattern,
494 GET_MODE_BITSIZE (best_mode),
495 fieldmode))
496 limit_mode = insn.field_mode;
497
498 scalar_int_mode wider_mode;
499 while (iter.next_mode (&wider_mode)
500 && GET_MODE_SIZE (wider_mode) <= GET_MODE_SIZE (limit_mode))
501 best_mode = wider_mode;
502 }
503 return narrow_bit_field_mem (op0, best_mode, bitsize, bitnum,
504 new_bitnum);
505 }
506 return NULL_RTX;
507 }
508
509 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
510 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
511 offset is then BITNUM / BITS_PER_UNIT. */
512
513 static bool
514 lowpart_bit_field_p (poly_uint64 bitnum, poly_uint64 bitsize,
515 machine_mode struct_mode)
516 {
517 poly_uint64 regsize = REGMODE_NATURAL_SIZE (struct_mode);
518 if (BYTES_BIG_ENDIAN)
519 return (multiple_p (bitnum, BITS_PER_UNIT)
520 && (known_eq (bitnum + bitsize, GET_MODE_BITSIZE (struct_mode))
521 || multiple_p (bitnum + bitsize,
522 regsize * BITS_PER_UNIT)));
523 else
524 return multiple_p (bitnum, regsize * BITS_PER_UNIT);
525 }
526
527 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
528 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
529 Return false if the access would touch memory outside the range
530 BITREGION_START to BITREGION_END for conformance to the C++ memory
531 model. */
532
533 static bool
534 strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
535 unsigned HOST_WIDE_INT bitnum,
536 scalar_int_mode fieldmode,
537 poly_uint64 bitregion_start,
538 poly_uint64 bitregion_end)
539 {
540 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
541
542 /* -fstrict-volatile-bitfields must be enabled and we must have a
543 volatile MEM. */
544 if (!MEM_P (op0)
545 || !MEM_VOLATILE_P (op0)
546 || flag_strict_volatile_bitfields <= 0)
547 return false;
548
549 /* The bit size must not be larger than the field mode, and
550 the field mode must not be larger than a word. */
551 if (bitsize > modesize || modesize > BITS_PER_WORD)
552 return false;
553
554 /* Check for cases of unaligned fields that must be split. */
555 if (bitnum % modesize + bitsize > modesize)
556 return false;
557
558 /* The memory must be sufficiently aligned for a MODESIZE access.
559 This condition guarantees, that the memory access will not
560 touch anything after the end of the structure. */
561 if (MEM_ALIGN (op0) < modesize)
562 return false;
563
564 /* Check for cases where the C++ memory model applies. */
565 if (maybe_ne (bitregion_end, 0U)
566 && (maybe_lt (bitnum - bitnum % modesize, bitregion_start)
567 || maybe_gt (bitnum - bitnum % modesize + modesize - 1,
568 bitregion_end)))
569 return false;
570
571 return true;
572 }
573
574 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
575 bit number BITNUM can be treated as a simple value of mode MODE.
576 Store the byte offset in *BYTENUM if so. */
577
578 static bool
579 simple_mem_bitfield_p (rtx op0, poly_uint64 bitsize, poly_uint64 bitnum,
580 machine_mode mode, poly_uint64 *bytenum)
581 {
582 return (MEM_P (op0)
583 && multiple_p (bitnum, BITS_PER_UNIT, bytenum)
584 && known_eq (bitsize, GET_MODE_BITSIZE (mode))
585 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (op0))
586 || (multiple_p (bitnum, GET_MODE_ALIGNMENT (mode))
587 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
588 }
589 \f
590 /* Try to use instruction INSV to store VALUE into a field of OP0.
591 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is a
592 BLKmode MEM. VALUE_MODE is the mode of VALUE. BITSIZE and BITNUM
593 are as for store_bit_field. */
594
595 static bool
596 store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
597 opt_scalar_int_mode op0_mode,
598 unsigned HOST_WIDE_INT bitsize,
599 unsigned HOST_WIDE_INT bitnum,
600 rtx value, scalar_int_mode value_mode)
601 {
602 struct expand_operand ops[4];
603 rtx value1;
604 rtx xop0 = op0;
605 rtx_insn *last = get_last_insn ();
606 bool copy_back = false;
607
608 scalar_int_mode op_mode = insv->field_mode;
609 unsigned int unit = GET_MODE_BITSIZE (op_mode);
610 if (bitsize == 0 || bitsize > unit)
611 return false;
612
613 if (MEM_P (xop0))
614 /* Get a reference to the first byte of the field. */
615 xop0 = narrow_bit_field_mem (xop0, insv->struct_mode, bitsize, bitnum,
616 &bitnum);
617 else
618 {
619 /* Convert from counting within OP0 to counting in OP_MODE. */
620 if (BYTES_BIG_ENDIAN)
621 bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
622
623 /* If xop0 is a register, we need it in OP_MODE
624 to make it acceptable to the format of insv. */
625 if (GET_CODE (xop0) == SUBREG)
626 /* We can't just change the mode, because this might clobber op0,
627 and we will need the original value of op0 if insv fails. */
628 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
629 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
630 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
631 }
632
633 /* If the destination is a paradoxical subreg such that we need a
634 truncate to the inner mode, perform the insertion on a temporary and
635 truncate the result to the original destination. Note that we can't
636 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
637 X) 0)) is (reg:N X). */
638 if (GET_CODE (xop0) == SUBREG
639 && REG_P (SUBREG_REG (xop0))
640 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
641 op_mode))
642 {
643 rtx tem = gen_reg_rtx (op_mode);
644 emit_move_insn (tem, xop0);
645 xop0 = tem;
646 copy_back = true;
647 }
648
649 /* There are similar overflow check at the start of store_bit_field_1,
650 but that only check the situation where the field lies completely
651 outside the register, while there do have situation where the field
652 lies partialy in the register, we need to adjust bitsize for this
653 partial overflow situation. Without this fix, pr48335-2.c on big-endian
654 will broken on those arch support bit insert instruction, like arm, aarch64
655 etc. */
656 if (bitsize + bitnum > unit && bitnum < unit)
657 {
658 warning (OPT_Wextra, "write of %wu-bit data outside the bound of "
659 "destination object, data truncated into %wu-bit",
660 bitsize, unit - bitnum);
661 bitsize = unit - bitnum;
662 }
663
664 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
665 "backwards" from the size of the unit we are inserting into.
666 Otherwise, we count bits from the most significant on a
667 BYTES/BITS_BIG_ENDIAN machine. */
668
669 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
670 bitnum = unit - bitsize - bitnum;
671
672 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
673 value1 = value;
674 if (value_mode != op_mode)
675 {
676 if (GET_MODE_BITSIZE (value_mode) >= bitsize)
677 {
678 rtx tmp;
679 /* Optimization: Don't bother really extending VALUE
680 if it has all the bits we will actually use. However,
681 if we must narrow it, be sure we do it correctly. */
682
683 if (GET_MODE_SIZE (value_mode) < GET_MODE_SIZE (op_mode))
684 {
685 tmp = simplify_subreg (op_mode, value1, value_mode, 0);
686 if (! tmp)
687 tmp = simplify_gen_subreg (op_mode,
688 force_reg (value_mode, value1),
689 value_mode, 0);
690 }
691 else
692 {
693 tmp = gen_lowpart_if_possible (op_mode, value1);
694 if (! tmp)
695 tmp = gen_lowpart (op_mode, force_reg (value_mode, value1));
696 }
697 value1 = tmp;
698 }
699 else if (CONST_INT_P (value))
700 value1 = gen_int_mode (INTVAL (value), op_mode);
701 else
702 /* Parse phase is supposed to make VALUE's data type
703 match that of the component reference, which is a type
704 at least as wide as the field; so VALUE should have
705 a mode that corresponds to that type. */
706 gcc_assert (CONSTANT_P (value));
707 }
708
709 create_fixed_operand (&ops[0], xop0);
710 create_integer_operand (&ops[1], bitsize);
711 create_integer_operand (&ops[2], bitnum);
712 create_input_operand (&ops[3], value1, op_mode);
713 if (maybe_expand_insn (insv->icode, 4, ops))
714 {
715 if (copy_back)
716 convert_move (op0, xop0, true);
717 return true;
718 }
719 delete_insns_since (last);
720 return false;
721 }
722
723 /* A subroutine of store_bit_field, with the same arguments. Return true
724 if the operation could be implemented.
725
726 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
727 no other way of implementing the operation. If FALLBACK_P is false,
728 return false instead. */
729
730 static bool
731 store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
732 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
733 machine_mode fieldmode,
734 rtx value, bool reverse, bool fallback_p)
735 {
736 rtx op0 = str_rtx;
737
738 while (GET_CODE (op0) == SUBREG)
739 {
740 bitnum += subreg_memory_offset (op0) * BITS_PER_UNIT;
741 op0 = SUBREG_REG (op0);
742 }
743
744 /* No action is needed if the target is a register and if the field
745 lies completely outside that register. This can occur if the source
746 code contains an out-of-bounds access to a small array. */
747 if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
748 return true;
749
750 /* Use vec_set patterns for inserting parts of vectors whenever
751 available. */
752 machine_mode outermode = GET_MODE (op0);
753 scalar_mode innermode = GET_MODE_INNER (outermode);
754 poly_uint64 pos;
755 if (VECTOR_MODE_P (outermode)
756 && !MEM_P (op0)
757 && optab_handler (vec_set_optab, outermode) != CODE_FOR_nothing
758 && fieldmode == innermode
759 && known_eq (bitsize, GET_MODE_BITSIZE (innermode))
760 && multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
761 {
762 struct expand_operand ops[3];
763 enum insn_code icode = optab_handler (vec_set_optab, outermode);
764
765 create_fixed_operand (&ops[0], op0);
766 create_input_operand (&ops[1], value, innermode);
767 create_integer_operand (&ops[2], pos);
768 if (maybe_expand_insn (icode, 3, ops))
769 return true;
770 }
771
772 /* If the target is a register, overwriting the entire object, or storing
773 a full-word or multi-word field can be done with just a SUBREG. */
774 if (!MEM_P (op0)
775 && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode)))
776 {
777 /* Use the subreg machinery either to narrow OP0 to the required
778 words or to cope with mode punning between equal-sized modes.
779 In the latter case, use subreg on the rhs side, not lhs. */
780 rtx sub;
781 HOST_WIDE_INT regnum;
782 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (op0));
783 if (known_eq (bitnum, 0U)
784 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
785 {
786 sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0);
787 if (sub)
788 {
789 if (reverse)
790 sub = flip_storage_order (GET_MODE (op0), sub);
791 emit_move_insn (op0, sub);
792 return true;
793 }
794 }
795 else if (constant_multiple_p (bitnum, regsize * BITS_PER_UNIT, &regnum)
796 && multiple_p (bitsize, regsize * BITS_PER_UNIT))
797 {
798 sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
799 regnum * regsize);
800 if (sub)
801 {
802 if (reverse)
803 value = flip_storage_order (fieldmode, value);
804 emit_move_insn (sub, value);
805 return true;
806 }
807 }
808 }
809
810 /* If the target is memory, storing any naturally aligned field can be
811 done with a simple store. For targets that support fast unaligned
812 memory, any naturally sized, unit aligned field can be done directly. */
813 poly_uint64 bytenum;
814 if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode, &bytenum))
815 {
816 op0 = adjust_bitfield_address (op0, fieldmode, bytenum);
817 if (reverse)
818 value = flip_storage_order (fieldmode, value);
819 emit_move_insn (op0, value);
820 return true;
821 }
822
823 /* It's possible we'll need to handle other cases here for
824 polynomial bitnum and bitsize. */
825
826 /* From here on we need to be looking at a fixed-size insertion. */
827 unsigned HOST_WIDE_INT ibitsize = bitsize.to_constant ();
828 unsigned HOST_WIDE_INT ibitnum = bitnum.to_constant ();
829
830 /* Make sure we are playing with integral modes. Pun with subregs
831 if we aren't. This must come after the entire register case above,
832 since that case is valid for any mode. The following cases are only
833 valid for integral modes. */
834 opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
835 scalar_int_mode imode;
836 if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
837 {
838 if (MEM_P (op0))
839 op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
840 0, MEM_SIZE (op0));
841 else
842 op0 = gen_lowpart (op0_mode.require (), op0);
843 }
844
845 return store_integral_bit_field (op0, op0_mode, ibitsize, ibitnum,
846 bitregion_start, bitregion_end,
847 fieldmode, value, reverse, fallback_p);
848 }
849
850 /* Subroutine of store_bit_field_1, with the same arguments, except
851 that BITSIZE and BITNUM are constant. Handle cases specific to
852 integral modes. If OP0_MODE is defined, it is the mode of OP0,
853 otherwise OP0 is a BLKmode MEM. */
854
855 static bool
856 store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
857 unsigned HOST_WIDE_INT bitsize,
858 unsigned HOST_WIDE_INT bitnum,
859 poly_uint64 bitregion_start,
860 poly_uint64 bitregion_end,
861 machine_mode fieldmode,
862 rtx value, bool reverse, bool fallback_p)
863 {
864 /* Storing an lsb-aligned field in a register
865 can be done with a movstrict instruction. */
866
867 if (!MEM_P (op0)
868 && !reverse
869 && lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
870 && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
871 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
872 {
873 struct expand_operand ops[2];
874 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
875 rtx arg0 = op0;
876 unsigned HOST_WIDE_INT subreg_off;
877
878 if (GET_CODE (arg0) == SUBREG)
879 {
880 /* Else we've got some float mode source being extracted into
881 a different float mode destination -- this combination of
882 subregs results in Severe Tire Damage. */
883 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
884 || GET_MODE_CLASS (fieldmode) == MODE_INT
885 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
886 arg0 = SUBREG_REG (arg0);
887 }
888
889 subreg_off = bitnum / BITS_PER_UNIT;
890 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
891 {
892 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
893
894 create_fixed_operand (&ops[0], arg0);
895 /* Shrink the source operand to FIELDMODE. */
896 create_convert_operand_to (&ops[1], value, fieldmode, false);
897 if (maybe_expand_insn (icode, 2, ops))
898 return true;
899 }
900 }
901
902 /* Handle fields bigger than a word. */
903
904 if (bitsize > BITS_PER_WORD)
905 {
906 /* Here we transfer the words of the field
907 in the order least significant first.
908 This is because the most significant word is the one which may
909 be less than full.
910 However, only do that if the value is not BLKmode. */
911
912 const bool backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
913 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
914 unsigned int i;
915 rtx_insn *last;
916
917 /* This is the mode we must force value to, so that there will be enough
918 subwords to extract. Note that fieldmode will often (always?) be
919 VOIDmode, because that is what store_field uses to indicate that this
920 is a bit field, but passing VOIDmode to operand_subword_force
921 is not allowed.
922
923 The mode must be fixed-size, since insertions into variable-sized
924 objects are meant to be handled before calling this function. */
925 fixed_size_mode value_mode = as_a <fixed_size_mode> (GET_MODE (value));
926 if (value_mode == VOIDmode)
927 value_mode = smallest_int_mode_for_size (nwords * BITS_PER_WORD);
928
929 last = get_last_insn ();
930 for (i = 0; i < nwords; i++)
931 {
932 /* If I is 0, use the low-order word in both field and target;
933 if I is 1, use the next to lowest word; and so on. */
934 unsigned int wordnum = (backwards
935 ? GET_MODE_SIZE (value_mode) / UNITS_PER_WORD
936 - i - 1
937 : i);
938 unsigned int bit_offset = (backwards ^ reverse
939 ? MAX ((int) bitsize - ((int) i + 1)
940 * BITS_PER_WORD,
941 0)
942 : (int) i * BITS_PER_WORD);
943 rtx value_word = operand_subword_force (value, wordnum, value_mode);
944 unsigned HOST_WIDE_INT new_bitsize =
945 MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
946
947 /* If the remaining chunk doesn't have full wordsize we have
948 to make sure that for big-endian machines the higher order
949 bits are used. */
950 if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
951 {
952 int shift = BITS_PER_WORD - new_bitsize;
953 rtx shift_rtx = gen_int_shift_amount (word_mode, shift);
954 value_word = simplify_expand_binop (word_mode, lshr_optab,
955 value_word, shift_rtx,
956 NULL_RTX, true,
957 OPTAB_LIB_WIDEN);
958 }
959
960 if (!store_bit_field_1 (op0, new_bitsize,
961 bitnum + bit_offset,
962 bitregion_start, bitregion_end,
963 word_mode,
964 value_word, reverse, fallback_p))
965 {
966 delete_insns_since (last);
967 return false;
968 }
969 }
970 return true;
971 }
972
973 /* If VALUE has a floating-point or complex mode, access it as an
974 integer of the corresponding size. This can occur on a machine
975 with 64 bit registers that uses SFmode for float. It can also
976 occur for unaligned float or complex fields. */
977 rtx orig_value = value;
978 scalar_int_mode value_mode;
979 if (GET_MODE (value) == VOIDmode)
980 /* By this point we've dealt with values that are bigger than a word,
981 so word_mode is a conservatively correct choice. */
982 value_mode = word_mode;
983 else if (!is_a <scalar_int_mode> (GET_MODE (value), &value_mode))
984 {
985 value_mode = int_mode_for_mode (GET_MODE (value)).require ();
986 value = gen_reg_rtx (value_mode);
987 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
988 }
989
990 /* If OP0 is a multi-word register, narrow it to the affected word.
991 If the region spans two words, defer to store_split_bit_field.
992 Don't do this if op0 is a single hard register wider than word
993 such as a float or vector register. */
994 if (!MEM_P (op0)
995 && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD
996 && (!REG_P (op0)
997 || !HARD_REGISTER_P (op0)
998 || hard_regno_nregs (REGNO (op0), op0_mode.require ()) != 1))
999 {
1000 if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1001 {
1002 if (!fallback_p)
1003 return false;
1004
1005 store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1006 bitregion_start, bitregion_end,
1007 value, value_mode, reverse);
1008 return true;
1009 }
1010 op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
1011 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1012 gcc_assert (op0);
1013 op0_mode = word_mode;
1014 bitnum %= BITS_PER_WORD;
1015 }
1016
1017 /* From here on we can assume that the field to be stored in fits
1018 within a word. If the destination is a register, it too fits
1019 in a word. */
1020
1021 extraction_insn insv;
1022 if (!MEM_P (op0)
1023 && !reverse
1024 && get_best_reg_extraction_insn (&insv, EP_insv,
1025 GET_MODE_BITSIZE (op0_mode.require ()),
1026 fieldmode)
1027 && store_bit_field_using_insv (&insv, op0, op0_mode,
1028 bitsize, bitnum, value, value_mode))
1029 return true;
1030
1031 /* If OP0 is a memory, try copying it to a register and seeing if a
1032 cheap register alternative is available. */
1033 if (MEM_P (op0) && !reverse)
1034 {
1035 if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
1036 fieldmode)
1037 && store_bit_field_using_insv (&insv, op0, op0_mode,
1038 bitsize, bitnum, value, value_mode))
1039 return true;
1040
1041 rtx_insn *last = get_last_insn ();
1042
1043 /* Try loading part of OP0 into a register, inserting the bitfield
1044 into that, and then copying the result back to OP0. */
1045 unsigned HOST_WIDE_INT bitpos;
1046 rtx xop0 = adjust_bit_field_mem_for_reg (EP_insv, op0, bitsize, bitnum,
1047 bitregion_start, bitregion_end,
1048 fieldmode, &bitpos);
1049 if (xop0)
1050 {
1051 rtx tempreg = copy_to_reg (xop0);
1052 if (store_bit_field_1 (tempreg, bitsize, bitpos,
1053 bitregion_start, bitregion_end,
1054 fieldmode, orig_value, reverse, false))
1055 {
1056 emit_move_insn (xop0, tempreg);
1057 return true;
1058 }
1059 delete_insns_since (last);
1060 }
1061 }
1062
1063 if (!fallback_p)
1064 return false;
1065
1066 store_fixed_bit_field (op0, op0_mode, bitsize, bitnum, bitregion_start,
1067 bitregion_end, value, value_mode, reverse);
1068 return true;
1069 }
1070
1071 /* Generate code to store value from rtx VALUE
1072 into a bit-field within structure STR_RTX
1073 containing BITSIZE bits starting at bit BITNUM.
1074
1075 BITREGION_START is bitpos of the first bitfield in this region.
1076 BITREGION_END is the bitpos of the ending bitfield in this region.
1077 These two fields are 0, if the C++ memory model does not apply,
1078 or we are not interested in keeping track of bitfield regions.
1079
1080 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
1081
1082 If REVERSE is true, the store is to be done in reverse order. */
1083
1084 void
1085 store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1086 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1087 machine_mode fieldmode,
1088 rtx value, bool reverse)
1089 {
1090 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1091 unsigned HOST_WIDE_INT ibitsize = 0, ibitnum = 0;
1092 scalar_int_mode int_mode;
1093 if (bitsize.is_constant (&ibitsize)
1094 && bitnum.is_constant (&ibitnum)
1095 && is_a <scalar_int_mode> (fieldmode, &int_mode)
1096 && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum, int_mode,
1097 bitregion_start, bitregion_end))
1098 {
1099 /* Storing of a full word can be done with a simple store.
1100 We know here that the field can be accessed with one single
1101 instruction. For targets that support unaligned memory,
1102 an unaligned access may be necessary. */
1103 if (ibitsize == GET_MODE_BITSIZE (int_mode))
1104 {
1105 str_rtx = adjust_bitfield_address (str_rtx, int_mode,
1106 ibitnum / BITS_PER_UNIT);
1107 if (reverse)
1108 value = flip_storage_order (int_mode, value);
1109 gcc_assert (ibitnum % BITS_PER_UNIT == 0);
1110 emit_move_insn (str_rtx, value);
1111 }
1112 else
1113 {
1114 rtx temp;
1115
1116 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize,
1117 ibitnum, &ibitnum);
1118 gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
1119 temp = copy_to_reg (str_rtx);
1120 if (!store_bit_field_1 (temp, ibitsize, ibitnum, 0, 0,
1121 int_mode, value, reverse, true))
1122 gcc_unreachable ();
1123
1124 emit_move_insn (str_rtx, temp);
1125 }
1126
1127 return;
1128 }
1129
1130 /* Under the C++0x memory model, we must not touch bits outside the
1131 bit region. Adjust the address to start at the beginning of the
1132 bit region. */
1133 if (MEM_P (str_rtx) && maybe_ne (bitregion_start, 0U))
1134 {
1135 scalar_int_mode best_mode;
1136 machine_mode addr_mode = VOIDmode;
1137
1138 poly_uint64 offset = exact_div (bitregion_start, BITS_PER_UNIT);
1139 bitnum -= bitregion_start;
1140 poly_int64 size = bits_to_bytes_round_up (bitnum + bitsize);
1141 bitregion_end -= bitregion_start;
1142 bitregion_start = 0;
1143 if (bitsize.is_constant (&ibitsize)
1144 && bitnum.is_constant (&ibitnum)
1145 && get_best_mode (ibitsize, ibitnum,
1146 bitregion_start, bitregion_end,
1147 MEM_ALIGN (str_rtx), INT_MAX,
1148 MEM_VOLATILE_P (str_rtx), &best_mode))
1149 addr_mode = best_mode;
1150 str_rtx = adjust_bitfield_address_size (str_rtx, addr_mode,
1151 offset, size);
1152 }
1153
1154 if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
1155 bitregion_start, bitregion_end,
1156 fieldmode, value, reverse, true))
1157 gcc_unreachable ();
1158 }
1159 \f
1160 /* Use shifts and boolean operations to store VALUE into a bit field of
1161 width BITSIZE in OP0, starting at bit BITNUM. If OP0_MODE is defined,
1162 it is the mode of OP0, otherwise OP0 is a BLKmode MEM. VALUE_MODE is
1163 the mode of VALUE.
1164
1165 If REVERSE is true, the store is to be done in reverse order. */
1166
1167 static void
1168 store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1169 unsigned HOST_WIDE_INT bitsize,
1170 unsigned HOST_WIDE_INT bitnum,
1171 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1172 rtx value, scalar_int_mode value_mode, bool reverse)
1173 {
1174 /* There is a case not handled here:
1175 a structure with a known alignment of just a halfword
1176 and a field split across two aligned halfwords within the structure.
1177 Or likewise a structure with a known alignment of just a byte
1178 and a field split across two bytes.
1179 Such cases are not supposed to be able to occur. */
1180
1181 scalar_int_mode best_mode;
1182 if (MEM_P (op0))
1183 {
1184 unsigned int max_bitsize = BITS_PER_WORD;
1185 scalar_int_mode imode;
1186 if (op0_mode.exists (&imode) && GET_MODE_BITSIZE (imode) < max_bitsize)
1187 max_bitsize = GET_MODE_BITSIZE (imode);
1188
1189 if (!get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
1190 MEM_ALIGN (op0), max_bitsize, MEM_VOLATILE_P (op0),
1191 &best_mode))
1192 {
1193 /* The only way this should occur is if the field spans word
1194 boundaries. */
1195 store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1196 bitregion_start, bitregion_end,
1197 value, value_mode, reverse);
1198 return;
1199 }
1200
1201 op0 = narrow_bit_field_mem (op0, best_mode, bitsize, bitnum, &bitnum);
1202 }
1203 else
1204 best_mode = op0_mode.require ();
1205
1206 store_fixed_bit_field_1 (op0, best_mode, bitsize, bitnum,
1207 value, value_mode, reverse);
1208 }
1209
1210 /* Helper function for store_fixed_bit_field, stores
1211 the bit field always using MODE, which is the mode of OP0. The other
1212 arguments are as for store_fixed_bit_field. */
1213
1214 static void
1215 store_fixed_bit_field_1 (rtx op0, scalar_int_mode mode,
1216 unsigned HOST_WIDE_INT bitsize,
1217 unsigned HOST_WIDE_INT bitnum,
1218 rtx value, scalar_int_mode value_mode, bool reverse)
1219 {
1220 rtx temp;
1221 int all_zero = 0;
1222 int all_one = 0;
1223
1224 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1225 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
1226
1227 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1228 /* BITNUM is the distance between our msb
1229 and that of the containing datum.
1230 Convert it to the distance from the lsb. */
1231 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1232
1233 /* Now BITNUM is always the distance between our lsb
1234 and that of OP0. */
1235
1236 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1237 we must first convert its mode to MODE. */
1238
1239 if (CONST_INT_P (value))
1240 {
1241 unsigned HOST_WIDE_INT v = UINTVAL (value);
1242
1243 if (bitsize < HOST_BITS_PER_WIDE_INT)
1244 v &= (HOST_WIDE_INT_1U << bitsize) - 1;
1245
1246 if (v == 0)
1247 all_zero = 1;
1248 else if ((bitsize < HOST_BITS_PER_WIDE_INT
1249 && v == (HOST_WIDE_INT_1U << bitsize) - 1)
1250 || (bitsize == HOST_BITS_PER_WIDE_INT
1251 && v == HOST_WIDE_INT_M1U))
1252 all_one = 1;
1253
1254 value = lshift_value (mode, v, bitnum);
1255 }
1256 else
1257 {
1258 int must_and = (GET_MODE_BITSIZE (value_mode) != bitsize
1259 && bitnum + bitsize != GET_MODE_BITSIZE (mode));
1260
1261 if (value_mode != mode)
1262 value = convert_to_mode (mode, value, 1);
1263
1264 if (must_and)
1265 value = expand_binop (mode, and_optab, value,
1266 mask_rtx (mode, 0, bitsize, 0),
1267 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1268 if (bitnum > 0)
1269 value = expand_shift (LSHIFT_EXPR, mode, value,
1270 bitnum, NULL_RTX, 1);
1271 }
1272
1273 if (reverse)
1274 value = flip_storage_order (mode, value);
1275
1276 /* Now clear the chosen bits in OP0,
1277 except that if VALUE is -1 we need not bother. */
1278 /* We keep the intermediates in registers to allow CSE to combine
1279 consecutive bitfield assignments. */
1280
1281 temp = force_reg (mode, op0);
1282
1283 if (! all_one)
1284 {
1285 rtx mask = mask_rtx (mode, bitnum, bitsize, 1);
1286 if (reverse)
1287 mask = flip_storage_order (mode, mask);
1288 temp = expand_binop (mode, and_optab, temp, mask,
1289 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1290 temp = force_reg (mode, temp);
1291 }
1292
1293 /* Now logical-or VALUE into OP0, unless it is zero. */
1294
1295 if (! all_zero)
1296 {
1297 temp = expand_binop (mode, ior_optab, temp, value,
1298 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1299 temp = force_reg (mode, temp);
1300 }
1301
1302 if (op0 != temp)
1303 {
1304 op0 = copy_rtx (op0);
1305 emit_move_insn (op0, temp);
1306 }
1307 }
1308 \f
1309 /* Store a bit field that is split across multiple accessible memory objects.
1310
1311 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1312 BITSIZE is the field width; BITPOS the position of its first bit
1313 (within the word).
1314 VALUE is the value to store, which has mode VALUE_MODE.
1315 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
1316 a BLKmode MEM.
1317
1318 If REVERSE is true, the store is to be done in reverse order.
1319
1320 This does not yet handle fields wider than BITS_PER_WORD. */
1321
1322 static void
1323 store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1324 unsigned HOST_WIDE_INT bitsize,
1325 unsigned HOST_WIDE_INT bitpos,
1326 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1327 rtx value, scalar_int_mode value_mode, bool reverse)
1328 {
1329 unsigned int unit, total_bits, bitsdone = 0;
1330
1331 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1332 much at a time. */
1333 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1334 unit = BITS_PER_WORD;
1335 else
1336 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1337
1338 /* If OP0 is a memory with a mode, then UNIT must not be larger than
1339 OP0's mode as well. Otherwise, store_fixed_bit_field will call us
1340 again, and we will mutually recurse forever. */
1341 if (MEM_P (op0) && op0_mode.exists ())
1342 unit = MIN (unit, GET_MODE_BITSIZE (op0_mode.require ()));
1343
1344 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1345 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1346 that VALUE might be a floating-point constant. */
1347 if (CONSTANT_P (value) && !CONST_INT_P (value))
1348 {
1349 rtx word = gen_lowpart_common (word_mode, value);
1350
1351 if (word && (value != word))
1352 value = word;
1353 else
1354 value = gen_lowpart_common (word_mode, force_reg (value_mode, value));
1355 value_mode = word_mode;
1356 }
1357
1358 total_bits = GET_MODE_BITSIZE (value_mode);
1359
1360 while (bitsdone < bitsize)
1361 {
1362 unsigned HOST_WIDE_INT thissize;
1363 unsigned HOST_WIDE_INT thispos;
1364 unsigned HOST_WIDE_INT offset;
1365 rtx part;
1366
1367 offset = (bitpos + bitsdone) / unit;
1368 thispos = (bitpos + bitsdone) % unit;
1369
1370 /* When region of bytes we can touch is restricted, decrease
1371 UNIT close to the end of the region as needed. If op0 is a REG
1372 or SUBREG of REG, don't do this, as there can't be data races
1373 on a register and we can expand shorter code in some cases. */
1374 if (maybe_ne (bitregion_end, 0U)
1375 && unit > BITS_PER_UNIT
1376 && maybe_gt (bitpos + bitsdone - thispos + unit, bitregion_end + 1)
1377 && !REG_P (op0)
1378 && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1379 {
1380 unit = unit / 2;
1381 continue;
1382 }
1383
1384 /* THISSIZE must not overrun a word boundary. Otherwise,
1385 store_fixed_bit_field will call us again, and we will mutually
1386 recurse forever. */
1387 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1388 thissize = MIN (thissize, unit - thispos);
1389
1390 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1391 {
1392 /* Fetch successively less significant portions. */
1393 if (CONST_INT_P (value))
1394 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1395 >> (bitsize - bitsdone - thissize))
1396 & ((HOST_WIDE_INT_1 << thissize) - 1));
1397 /* Likewise, but the source is little-endian. */
1398 else if (reverse)
1399 part = extract_fixed_bit_field (word_mode, value, value_mode,
1400 thissize,
1401 bitsize - bitsdone - thissize,
1402 NULL_RTX, 1, false);
1403 else
1404 /* The args are chosen so that the last part includes the
1405 lsb. Give extract_bit_field the value it needs (with
1406 endianness compensation) to fetch the piece we want. */
1407 part = extract_fixed_bit_field (word_mode, value, value_mode,
1408 thissize,
1409 total_bits - bitsize + bitsdone,
1410 NULL_RTX, 1, false);
1411 }
1412 else
1413 {
1414 /* Fetch successively more significant portions. */
1415 if (CONST_INT_P (value))
1416 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1417 >> bitsdone)
1418 & ((HOST_WIDE_INT_1 << thissize) - 1));
1419 /* Likewise, but the source is big-endian. */
1420 else if (reverse)
1421 part = extract_fixed_bit_field (word_mode, value, value_mode,
1422 thissize,
1423 total_bits - bitsdone - thissize,
1424 NULL_RTX, 1, false);
1425 else
1426 part = extract_fixed_bit_field (word_mode, value, value_mode,
1427 thissize, bitsdone, NULL_RTX,
1428 1, false);
1429 }
1430
1431 /* If OP0 is a register, then handle OFFSET here. */
1432 rtx op0_piece = op0;
1433 opt_scalar_int_mode op0_piece_mode = op0_mode;
1434 if (SUBREG_P (op0) || REG_P (op0))
1435 {
1436 scalar_int_mode imode;
1437 if (op0_mode.exists (&imode)
1438 && GET_MODE_SIZE (imode) < UNITS_PER_WORD)
1439 {
1440 if (offset)
1441 op0_piece = const0_rtx;
1442 }
1443 else
1444 {
1445 op0_piece = operand_subword_force (op0,
1446 offset * unit / BITS_PER_WORD,
1447 GET_MODE (op0));
1448 op0_piece_mode = word_mode;
1449 }
1450 offset &= BITS_PER_WORD / unit - 1;
1451 }
1452
1453 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1454 it is just an out-of-bounds access. Ignore it. */
1455 if (op0_piece != const0_rtx)
1456 store_fixed_bit_field (op0_piece, op0_piece_mode, thissize,
1457 offset * unit + thispos, bitregion_start,
1458 bitregion_end, part, word_mode, reverse);
1459 bitsdone += thissize;
1460 }
1461 }
1462 \f
1463 /* A subroutine of extract_bit_field_1 that converts return value X
1464 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1465 to extract_bit_field. */
1466
1467 static rtx
1468 convert_extracted_bit_field (rtx x, machine_mode mode,
1469 machine_mode tmode, bool unsignedp)
1470 {
1471 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1472 return x;
1473
1474 /* If the x mode is not a scalar integral, first convert to the
1475 integer mode of that size and then access it as a floating-point
1476 value via a SUBREG. */
1477 if (!SCALAR_INT_MODE_P (tmode))
1478 {
1479 scalar_int_mode int_mode = int_mode_for_mode (tmode).require ();
1480 x = convert_to_mode (int_mode, x, unsignedp);
1481 x = force_reg (int_mode, x);
1482 return gen_lowpart (tmode, x);
1483 }
1484
1485 return convert_to_mode (tmode, x, unsignedp);
1486 }
1487
1488 /* Try to use an ext(z)v pattern to extract a field from OP0.
1489 Return the extracted value on success, otherwise return null.
1490 EXTV describes the extraction instruction to use. If OP0_MODE
1491 is defined, it is the mode of OP0, otherwise OP0 is a BLKmode MEM.
1492 The other arguments are as for extract_bit_field. */
1493
1494 static rtx
1495 extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
1496 opt_scalar_int_mode op0_mode,
1497 unsigned HOST_WIDE_INT bitsize,
1498 unsigned HOST_WIDE_INT bitnum,
1499 int unsignedp, rtx target,
1500 machine_mode mode, machine_mode tmode)
1501 {
1502 struct expand_operand ops[4];
1503 rtx spec_target = target;
1504 rtx spec_target_subreg = 0;
1505 scalar_int_mode ext_mode = extv->field_mode;
1506 unsigned unit = GET_MODE_BITSIZE (ext_mode);
1507
1508 if (bitsize == 0 || unit < bitsize)
1509 return NULL_RTX;
1510
1511 if (MEM_P (op0))
1512 /* Get a reference to the first byte of the field. */
1513 op0 = narrow_bit_field_mem (op0, extv->struct_mode, bitsize, bitnum,
1514 &bitnum);
1515 else
1516 {
1517 /* Convert from counting within OP0 to counting in EXT_MODE. */
1518 if (BYTES_BIG_ENDIAN)
1519 bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
1520
1521 /* If op0 is a register, we need it in EXT_MODE to make it
1522 acceptable to the format of ext(z)v. */
1523 if (GET_CODE (op0) == SUBREG && op0_mode.require () != ext_mode)
1524 return NULL_RTX;
1525 if (REG_P (op0) && op0_mode.require () != ext_mode)
1526 op0 = gen_lowpart_SUBREG (ext_mode, op0);
1527 }
1528
1529 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1530 "backwards" from the size of the unit we are extracting from.
1531 Otherwise, we count bits from the most significant on a
1532 BYTES/BITS_BIG_ENDIAN machine. */
1533
1534 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1535 bitnum = unit - bitsize - bitnum;
1536
1537 if (target == 0)
1538 target = spec_target = gen_reg_rtx (tmode);
1539
1540 if (GET_MODE (target) != ext_mode)
1541 {
1542 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1543 between the mode of the extraction (word_mode) and the target
1544 mode. Instead, create a temporary and use convert_move to set
1545 the target. */
1546 if (REG_P (target)
1547 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target), ext_mode))
1548 {
1549 target = gen_lowpart (ext_mode, target);
1550 if (partial_subreg_p (GET_MODE (spec_target), ext_mode))
1551 spec_target_subreg = target;
1552 }
1553 else
1554 target = gen_reg_rtx (ext_mode);
1555 }
1556
1557 create_output_operand (&ops[0], target, ext_mode);
1558 create_fixed_operand (&ops[1], op0);
1559 create_integer_operand (&ops[2], bitsize);
1560 create_integer_operand (&ops[3], bitnum);
1561 if (maybe_expand_insn (extv->icode, 4, ops))
1562 {
1563 target = ops[0].value;
1564 if (target == spec_target)
1565 return target;
1566 if (target == spec_target_subreg)
1567 return spec_target;
1568 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1569 }
1570 return NULL_RTX;
1571 }
1572
1573 /* See whether it would be valid to extract the part of OP0 described
1574 by BITNUM and BITSIZE into a value of mode MODE using a subreg
1575 operation. Return the subreg if so, otherwise return null. */
1576
1577 static rtx
1578 extract_bit_field_as_subreg (machine_mode mode, rtx op0,
1579 poly_uint64 bitsize, poly_uint64 bitnum)
1580 {
1581 poly_uint64 bytenum;
1582 if (multiple_p (bitnum, BITS_PER_UNIT, &bytenum)
1583 && known_eq (bitsize, GET_MODE_BITSIZE (mode))
1584 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
1585 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op0)))
1586 return simplify_gen_subreg (mode, op0, GET_MODE (op0), bytenum);
1587 return NULL_RTX;
1588 }
1589
1590 /* A subroutine of extract_bit_field, with the same arguments.
1591 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1592 if we can find no other means of implementing the operation.
1593 if FALLBACK_P is false, return NULL instead. */
1594
1595 static rtx
1596 extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1597 int unsignedp, rtx target, machine_mode mode,
1598 machine_mode tmode, bool reverse, bool fallback_p,
1599 rtx *alt_rtl)
1600 {
1601 rtx op0 = str_rtx;
1602 machine_mode mode1;
1603
1604 if (tmode == VOIDmode)
1605 tmode = mode;
1606
1607 while (GET_CODE (op0) == SUBREG)
1608 {
1609 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1610 op0 = SUBREG_REG (op0);
1611 }
1612
1613 /* If we have an out-of-bounds access to a register, just return an
1614 uninitialized register of the required mode. This can occur if the
1615 source code contains an out-of-bounds access to a small array. */
1616 if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
1617 return gen_reg_rtx (tmode);
1618
1619 if (REG_P (op0)
1620 && mode == GET_MODE (op0)
1621 && known_eq (bitnum, 0U)
1622 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
1623 {
1624 if (reverse)
1625 op0 = flip_storage_order (mode, op0);
1626 /* We're trying to extract a full register from itself. */
1627 return op0;
1628 }
1629
1630 /* First try to check for vector from vector extractions. */
1631 if (VECTOR_MODE_P (GET_MODE (op0))
1632 && !MEM_P (op0)
1633 && VECTOR_MODE_P (tmode)
1634 && known_eq (bitsize, GET_MODE_BITSIZE (tmode))
1635 && maybe_gt (GET_MODE_SIZE (GET_MODE (op0)), GET_MODE_SIZE (tmode)))
1636 {
1637 machine_mode new_mode = GET_MODE (op0);
1638 if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
1639 {
1640 scalar_mode inner_mode = GET_MODE_INNER (tmode);
1641 poly_uint64 nunits;
1642 if (!multiple_p (GET_MODE_BITSIZE (GET_MODE (op0)),
1643 GET_MODE_UNIT_BITSIZE (tmode), &nunits)
1644 || !mode_for_vector (inner_mode, nunits).exists (&new_mode)
1645 || !VECTOR_MODE_P (new_mode)
1646 || maybe_ne (GET_MODE_SIZE (new_mode),
1647 GET_MODE_SIZE (GET_MODE (op0)))
1648 || GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
1649 || !targetm.vector_mode_supported_p (new_mode))
1650 new_mode = VOIDmode;
1651 }
1652 poly_uint64 pos;
1653 if (new_mode != VOIDmode
1654 && (convert_optab_handler (vec_extract_optab, new_mode, tmode)
1655 != CODE_FOR_nothing)
1656 && multiple_p (bitnum, GET_MODE_BITSIZE (tmode), &pos))
1657 {
1658 struct expand_operand ops[3];
1659 machine_mode outermode = new_mode;
1660 machine_mode innermode = tmode;
1661 enum insn_code icode
1662 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1663
1664 if (new_mode != GET_MODE (op0))
1665 op0 = gen_lowpart (new_mode, op0);
1666 create_output_operand (&ops[0], target, innermode);
1667 ops[0].target = 1;
1668 create_input_operand (&ops[1], op0, outermode);
1669 create_integer_operand (&ops[2], pos);
1670 if (maybe_expand_insn (icode, 3, ops))
1671 {
1672 if (alt_rtl && ops[0].target)
1673 *alt_rtl = target;
1674 target = ops[0].value;
1675 if (GET_MODE (target) != mode)
1676 return gen_lowpart (tmode, target);
1677 return target;
1678 }
1679 }
1680 }
1681
1682 /* See if we can get a better vector mode before extracting. */
1683 if (VECTOR_MODE_P (GET_MODE (op0))
1684 && !MEM_P (op0)
1685 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1686 {
1687 machine_mode new_mode;
1688
1689 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1690 new_mode = MIN_MODE_VECTOR_FLOAT;
1691 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1692 new_mode = MIN_MODE_VECTOR_FRACT;
1693 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1694 new_mode = MIN_MODE_VECTOR_UFRACT;
1695 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1696 new_mode = MIN_MODE_VECTOR_ACCUM;
1697 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1698 new_mode = MIN_MODE_VECTOR_UACCUM;
1699 else
1700 new_mode = MIN_MODE_VECTOR_INT;
1701
1702 FOR_EACH_MODE_FROM (new_mode, new_mode)
1703 if (known_eq (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (GET_MODE (op0)))
1704 && known_eq (GET_MODE_UNIT_SIZE (new_mode), GET_MODE_SIZE (tmode))
1705 && targetm.vector_mode_supported_p (new_mode))
1706 break;
1707 if (new_mode != VOIDmode)
1708 op0 = gen_lowpart (new_mode, op0);
1709 }
1710
1711 /* Use vec_extract patterns for extracting parts of vectors whenever
1712 available. If that fails, see whether the current modes and bitregion
1713 give a natural subreg. */
1714 machine_mode outermode = GET_MODE (op0);
1715 if (VECTOR_MODE_P (outermode) && !MEM_P (op0))
1716 {
1717 scalar_mode innermode = GET_MODE_INNER (outermode);
1718 enum insn_code icode
1719 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1720 poly_uint64 pos;
1721 if (icode != CODE_FOR_nothing
1722 && known_eq (bitsize, GET_MODE_BITSIZE (innermode))
1723 && multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
1724 {
1725 struct expand_operand ops[3];
1726
1727 create_output_operand (&ops[0], target, innermode);
1728 ops[0].target = 1;
1729 create_input_operand (&ops[1], op0, outermode);
1730 create_integer_operand (&ops[2], pos);
1731 if (maybe_expand_insn (icode, 3, ops))
1732 {
1733 if (alt_rtl && ops[0].target)
1734 *alt_rtl = target;
1735 target = ops[0].value;
1736 if (GET_MODE (target) != mode)
1737 return gen_lowpart (tmode, target);
1738 return target;
1739 }
1740 }
1741 /* Using subregs is useful if we're extracting one register vector
1742 from a multi-register vector. extract_bit_field_as_subreg checks
1743 for valid bitsize and bitnum, so we don't need to do that here. */
1744 if (VECTOR_MODE_P (mode))
1745 {
1746 rtx sub = extract_bit_field_as_subreg (mode, op0, bitsize, bitnum);
1747 if (sub)
1748 return sub;
1749 }
1750 }
1751
1752 /* Make sure we are playing with integral modes. Pun with subregs
1753 if we aren't. */
1754 opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
1755 scalar_int_mode imode;
1756 if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
1757 {
1758 if (MEM_P (op0))
1759 op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
1760 0, MEM_SIZE (op0));
1761 else if (op0_mode.exists (&imode))
1762 {
1763 op0 = gen_lowpart (imode, op0);
1764
1765 /* If we got a SUBREG, force it into a register since we
1766 aren't going to be able to do another SUBREG on it. */
1767 if (GET_CODE (op0) == SUBREG)
1768 op0 = force_reg (imode, op0);
1769 }
1770 else
1771 {
1772 poly_int64 size = GET_MODE_SIZE (GET_MODE (op0));
1773 rtx mem = assign_stack_temp (GET_MODE (op0), size);
1774 emit_move_insn (mem, op0);
1775 op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1776 }
1777 }
1778
1779 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1780 If that's wrong, the solution is to test for it and set TARGET to 0
1781 if needed. */
1782
1783 /* Get the mode of the field to use for atomic access or subreg
1784 conversion. */
1785 if (!SCALAR_INT_MODE_P (tmode)
1786 || !mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0).exists (&mode1))
1787 mode1 = mode;
1788 gcc_assert (mode1 != BLKmode);
1789
1790 /* Extraction of a full MODE1 value can be done with a subreg as long
1791 as the least significant bit of the value is the least significant
1792 bit of either OP0 or a word of OP0. */
1793 if (!MEM_P (op0) && !reverse)
1794 {
1795 rtx sub = extract_bit_field_as_subreg (mode1, op0, bitsize, bitnum);
1796 if (sub)
1797 return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1798 }
1799
1800 /* Extraction of a full MODE1 value can be done with a load as long as
1801 the field is on a byte boundary and is sufficiently aligned. */
1802 poly_uint64 bytenum;
1803 if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1, &bytenum))
1804 {
1805 op0 = adjust_bitfield_address (op0, mode1, bytenum);
1806 if (reverse)
1807 op0 = flip_storage_order (mode1, op0);
1808 return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1809 }
1810
1811 /* If we have a memory source and a non-constant bit offset, restrict
1812 the memory to the referenced bytes. This is a worst-case fallback
1813 but is useful for things like vector booleans. */
1814 if (MEM_P (op0) && !bitnum.is_constant ())
1815 {
1816 bytenum = bits_to_bytes_round_down (bitnum);
1817 bitnum = num_trailing_bits (bitnum);
1818 poly_uint64 bytesize = bits_to_bytes_round_up (bitnum + bitsize);
1819 op0 = adjust_bitfield_address_size (op0, BLKmode, bytenum, bytesize);
1820 op0_mode = opt_scalar_int_mode ();
1821 }
1822
1823 /* It's possible we'll need to handle other cases here for
1824 polynomial bitnum and bitsize. */
1825
1826 /* From here on we need to be looking at a fixed-size insertion. */
1827 return extract_integral_bit_field (op0, op0_mode, bitsize.to_constant (),
1828 bitnum.to_constant (), unsignedp,
1829 target, mode, tmode, reverse, fallback_p);
1830 }
1831
1832 /* Subroutine of extract_bit_field_1, with the same arguments, except
1833 that BITSIZE and BITNUM are constant. Handle cases specific to
1834 integral modes. If OP0_MODE is defined, it is the mode of OP0,
1835 otherwise OP0 is a BLKmode MEM. */
1836
1837 static rtx
1838 extract_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1839 unsigned HOST_WIDE_INT bitsize,
1840 unsigned HOST_WIDE_INT bitnum, int unsignedp,
1841 rtx target, machine_mode mode, machine_mode tmode,
1842 bool reverse, bool fallback_p)
1843 {
1844 /* Handle fields bigger than a word. */
1845
1846 if (bitsize > BITS_PER_WORD)
1847 {
1848 /* Here we transfer the words of the field
1849 in the order least significant first.
1850 This is because the most significant word is the one which may
1851 be less than full. */
1852
1853 const bool backwards = WORDS_BIG_ENDIAN;
1854 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1855 unsigned int i;
1856 rtx_insn *last;
1857
1858 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1859 target = gen_reg_rtx (mode);
1860
1861 /* In case we're about to clobber a base register or something
1862 (see gcc.c-torture/execute/20040625-1.c). */
1863 if (reg_mentioned_p (target, op0))
1864 target = gen_reg_rtx (mode);
1865
1866 /* Indicate for flow that the entire target reg is being set. */
1867 emit_clobber (target);
1868
1869 /* The mode must be fixed-size, since extract_bit_field_1 handles
1870 extractions from variable-sized objects before calling this
1871 function. */
1872 unsigned int target_size
1873 = GET_MODE_SIZE (GET_MODE (target)).to_constant ();
1874 last = get_last_insn ();
1875 for (i = 0; i < nwords; i++)
1876 {
1877 /* If I is 0, use the low-order word in both field and target;
1878 if I is 1, use the next to lowest word; and so on. */
1879 /* Word number in TARGET to use. */
1880 unsigned int wordnum
1881 = (backwards ? target_size / UNITS_PER_WORD - i - 1 : i);
1882 /* Offset from start of field in OP0. */
1883 unsigned int bit_offset = (backwards ^ reverse
1884 ? MAX ((int) bitsize - ((int) i + 1)
1885 * BITS_PER_WORD,
1886 0)
1887 : (int) i * BITS_PER_WORD);
1888 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1889 rtx result_part
1890 = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
1891 bitsize - i * BITS_PER_WORD),
1892 bitnum + bit_offset, 1, target_part,
1893 mode, word_mode, reverse, fallback_p, NULL);
1894
1895 gcc_assert (target_part);
1896 if (!result_part)
1897 {
1898 delete_insns_since (last);
1899 return NULL;
1900 }
1901
1902 if (result_part != target_part)
1903 emit_move_insn (target_part, result_part);
1904 }
1905
1906 if (unsignedp)
1907 {
1908 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1909 need to be zero'd out. */
1910 if (target_size > nwords * UNITS_PER_WORD)
1911 {
1912 unsigned int i, total_words;
1913
1914 total_words = target_size / UNITS_PER_WORD;
1915 for (i = nwords; i < total_words; i++)
1916 emit_move_insn
1917 (operand_subword (target,
1918 backwards ? total_words - i - 1 : i,
1919 1, VOIDmode),
1920 const0_rtx);
1921 }
1922 return target;
1923 }
1924
1925 /* Signed bit field: sign-extend with two arithmetic shifts. */
1926 target = expand_shift (LSHIFT_EXPR, mode, target,
1927 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1928 return expand_shift (RSHIFT_EXPR, mode, target,
1929 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1930 }
1931
1932 /* If OP0 is a multi-word register, narrow it to the affected word.
1933 If the region spans two words, defer to extract_split_bit_field. */
1934 if (!MEM_P (op0) && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD)
1935 {
1936 if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1937 {
1938 if (!fallback_p)
1939 return NULL_RTX;
1940 target = extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
1941 unsignedp, reverse);
1942 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1943 }
1944 op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
1945 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1946 op0_mode = word_mode;
1947 bitnum %= BITS_PER_WORD;
1948 }
1949
1950 /* From here on we know the desired field is smaller than a word.
1951 If OP0 is a register, it too fits within a word. */
1952 enum extraction_pattern pattern = unsignedp ? EP_extzv : EP_extv;
1953 extraction_insn extv;
1954 if (!MEM_P (op0)
1955 && !reverse
1956 /* ??? We could limit the structure size to the part of OP0 that
1957 contains the field, with appropriate checks for endianness
1958 and TARGET_TRULY_NOOP_TRUNCATION. */
1959 && get_best_reg_extraction_insn (&extv, pattern,
1960 GET_MODE_BITSIZE (op0_mode.require ()),
1961 tmode))
1962 {
1963 rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
1964 bitsize, bitnum,
1965 unsignedp, target, mode,
1966 tmode);
1967 if (result)
1968 return result;
1969 }
1970
1971 /* If OP0 is a memory, try copying it to a register and seeing if a
1972 cheap register alternative is available. */
1973 if (MEM_P (op0) & !reverse)
1974 {
1975 if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
1976 tmode))
1977 {
1978 rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
1979 bitsize, bitnum,
1980 unsignedp, target, mode,
1981 tmode);
1982 if (result)
1983 return result;
1984 }
1985
1986 rtx_insn *last = get_last_insn ();
1987
1988 /* Try loading part of OP0 into a register and extracting the
1989 bitfield from that. */
1990 unsigned HOST_WIDE_INT bitpos;
1991 rtx xop0 = adjust_bit_field_mem_for_reg (pattern, op0, bitsize, bitnum,
1992 0, 0, tmode, &bitpos);
1993 if (xop0)
1994 {
1995 xop0 = copy_to_reg (xop0);
1996 rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
1997 unsignedp, target,
1998 mode, tmode, reverse, false, NULL);
1999 if (result)
2000 return result;
2001 delete_insns_since (last);
2002 }
2003 }
2004
2005 if (!fallback_p)
2006 return NULL;
2007
2008 /* Find a correspondingly-sized integer field, so we can apply
2009 shifts and masks to it. */
2010 scalar_int_mode int_mode;
2011 if (!int_mode_for_mode (tmode).exists (&int_mode))
2012 /* If this fails, we should probably push op0 out to memory and then
2013 do a load. */
2014 int_mode = int_mode_for_mode (mode).require ();
2015
2016 target = extract_fixed_bit_field (int_mode, op0, op0_mode, bitsize,
2017 bitnum, target, unsignedp, reverse);
2018
2019 /* Complex values must be reversed piecewise, so we need to undo the global
2020 reversal, convert to the complex mode and reverse again. */
2021 if (reverse && COMPLEX_MODE_P (tmode))
2022 {
2023 target = flip_storage_order (int_mode, target);
2024 target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
2025 target = flip_storage_order (tmode, target);
2026 }
2027 else
2028 target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
2029
2030 return target;
2031 }
2032
2033 /* Generate code to extract a byte-field from STR_RTX
2034 containing BITSIZE bits, starting at BITNUM,
2035 and put it in TARGET if possible (if TARGET is nonzero).
2036 Regardless of TARGET, we return the rtx for where the value is placed.
2037
2038 STR_RTX is the structure containing the byte (a REG or MEM).
2039 UNSIGNEDP is nonzero if this is an unsigned bit field.
2040 MODE is the natural mode of the field value once extracted.
2041 TMODE is the mode the caller would like the value to have;
2042 but the value may be returned with type MODE instead.
2043
2044 If REVERSE is true, the extraction is to be done in reverse order.
2045
2046 If a TARGET is specified and we can store in it at no extra cost,
2047 we do so, and return TARGET.
2048 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
2049 if they are equally easy. */
2050
2051 rtx
2052 extract_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
2053 int unsignedp, rtx target, machine_mode mode,
2054 machine_mode tmode, bool reverse, rtx *alt_rtl)
2055 {
2056 machine_mode mode1;
2057
2058 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
2059 if (maybe_ne (GET_MODE_BITSIZE (GET_MODE (str_rtx)), 0))
2060 mode1 = GET_MODE (str_rtx);
2061 else if (target && maybe_ne (GET_MODE_BITSIZE (GET_MODE (target)), 0))
2062 mode1 = GET_MODE (target);
2063 else
2064 mode1 = tmode;
2065
2066 unsigned HOST_WIDE_INT ibitsize, ibitnum;
2067 scalar_int_mode int_mode;
2068 if (bitsize.is_constant (&ibitsize)
2069 && bitnum.is_constant (&ibitnum)
2070 && is_a <scalar_int_mode> (mode1, &int_mode)
2071 && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum,
2072 int_mode, 0, 0))
2073 {
2074 /* Extraction of a full INT_MODE value can be done with a simple load.
2075 We know here that the field can be accessed with one single
2076 instruction. For targets that support unaligned memory,
2077 an unaligned access may be necessary. */
2078 if (ibitsize == GET_MODE_BITSIZE (int_mode))
2079 {
2080 rtx result = adjust_bitfield_address (str_rtx, int_mode,
2081 ibitnum / BITS_PER_UNIT);
2082 if (reverse)
2083 result = flip_storage_order (int_mode, result);
2084 gcc_assert (ibitnum % BITS_PER_UNIT == 0);
2085 return convert_extracted_bit_field (result, mode, tmode, unsignedp);
2086 }
2087
2088 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize, ibitnum,
2089 &ibitnum);
2090 gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
2091 str_rtx = copy_to_reg (str_rtx);
2092 return extract_bit_field_1 (str_rtx, ibitsize, ibitnum, unsignedp,
2093 target, mode, tmode, reverse, true, alt_rtl);
2094 }
2095
2096 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
2097 target, mode, tmode, reverse, true, alt_rtl);
2098 }
2099 \f
2100 /* Use shifts and boolean operations to extract a field of BITSIZE bits
2101 from bit BITNUM of OP0. If OP0_MODE is defined, it is the mode of OP0,
2102 otherwise OP0 is a BLKmode MEM.
2103
2104 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
2105 If REVERSE is true, the extraction is to be done in reverse order.
2106
2107 If TARGET is nonzero, attempts to store the value there
2108 and return TARGET, but this is not guaranteed.
2109 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
2110
2111 static rtx
2112 extract_fixed_bit_field (machine_mode tmode, rtx op0,
2113 opt_scalar_int_mode op0_mode,
2114 unsigned HOST_WIDE_INT bitsize,
2115 unsigned HOST_WIDE_INT bitnum, rtx target,
2116 int unsignedp, bool reverse)
2117 {
2118 scalar_int_mode mode;
2119 if (MEM_P (op0))
2120 {
2121 if (!get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0),
2122 BITS_PER_WORD, MEM_VOLATILE_P (op0), &mode))
2123 /* The only way this should occur is if the field spans word
2124 boundaries. */
2125 return extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
2126 unsignedp, reverse);
2127
2128 op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
2129 }
2130 else
2131 mode = op0_mode.require ();
2132
2133 return extract_fixed_bit_field_1 (tmode, op0, mode, bitsize, bitnum,
2134 target, unsignedp, reverse);
2135 }
2136
2137 /* Helper function for extract_fixed_bit_field, extracts
2138 the bit field always using MODE, which is the mode of OP0.
2139 The other arguments are as for extract_fixed_bit_field. */
2140
2141 static rtx
2142 extract_fixed_bit_field_1 (machine_mode tmode, rtx op0, scalar_int_mode mode,
2143 unsigned HOST_WIDE_INT bitsize,
2144 unsigned HOST_WIDE_INT bitnum, rtx target,
2145 int unsignedp, bool reverse)
2146 {
2147 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
2148 for invalid input, such as extract equivalent of f5 from
2149 gcc.dg/pr48335-2.c. */
2150
2151 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2152 /* BITNUM is the distance between our msb and that of OP0.
2153 Convert it to the distance from the lsb. */
2154 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
2155
2156 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
2157 We have reduced the big-endian case to the little-endian case. */
2158 if (reverse)
2159 op0 = flip_storage_order (mode, op0);
2160
2161 if (unsignedp)
2162 {
2163 if (bitnum)
2164 {
2165 /* If the field does not already start at the lsb,
2166 shift it so it does. */
2167 /* Maybe propagate the target for the shift. */
2168 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2169 if (tmode != mode)
2170 subtarget = 0;
2171 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitnum, subtarget, 1);
2172 }
2173 /* Convert the value to the desired mode. TMODE must also be a
2174 scalar integer for this conversion to make sense, since we
2175 shouldn't reinterpret the bits. */
2176 scalar_int_mode new_mode = as_a <scalar_int_mode> (tmode);
2177 if (mode != new_mode)
2178 op0 = convert_to_mode (new_mode, op0, 1);
2179
2180 /* Unless the msb of the field used to be the msb when we shifted,
2181 mask out the upper bits. */
2182
2183 if (GET_MODE_BITSIZE (mode) != bitnum + bitsize)
2184 return expand_binop (new_mode, and_optab, op0,
2185 mask_rtx (new_mode, 0, bitsize, 0),
2186 target, 1, OPTAB_LIB_WIDEN);
2187 return op0;
2188 }
2189
2190 /* To extract a signed bit-field, first shift its msb to the msb of the word,
2191 then arithmetic-shift its lsb to the lsb of the word. */
2192 op0 = force_reg (mode, op0);
2193
2194 /* Find the narrowest integer mode that contains the field. */
2195
2196 opt_scalar_int_mode mode_iter;
2197 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2198 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize + bitnum)
2199 break;
2200
2201 mode = mode_iter.require ();
2202 op0 = convert_to_mode (mode, op0, 0);
2203
2204 if (mode != tmode)
2205 target = 0;
2206
2207 if (GET_MODE_BITSIZE (mode) != (bitsize + bitnum))
2208 {
2209 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitnum);
2210 /* Maybe propagate the target for the shift. */
2211 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2212 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
2213 }
2214
2215 return expand_shift (RSHIFT_EXPR, mode, op0,
2216 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
2217 }
2218
2219 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
2220 VALUE << BITPOS. */
2221
2222 static rtx
2223 lshift_value (machine_mode mode, unsigned HOST_WIDE_INT value,
2224 int bitpos)
2225 {
2226 return immed_wide_int_const (wi::lshift (value, bitpos), mode);
2227 }
2228 \f
2229 /* Extract a bit field that is split across two words
2230 and return an RTX for the result.
2231
2232 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2233 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2234 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
2235 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
2236 a BLKmode MEM.
2237
2238 If REVERSE is true, the extraction is to be done in reverse order. */
2239
2240 static rtx
2241 extract_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
2242 unsigned HOST_WIDE_INT bitsize,
2243 unsigned HOST_WIDE_INT bitpos, int unsignedp,
2244 bool reverse)
2245 {
2246 unsigned int unit;
2247 unsigned int bitsdone = 0;
2248 rtx result = NULL_RTX;
2249 int first = 1;
2250
2251 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2252 much at a time. */
2253 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2254 unit = BITS_PER_WORD;
2255 else
2256 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2257
2258 while (bitsdone < bitsize)
2259 {
2260 unsigned HOST_WIDE_INT thissize;
2261 rtx part;
2262 unsigned HOST_WIDE_INT thispos;
2263 unsigned HOST_WIDE_INT offset;
2264
2265 offset = (bitpos + bitsdone) / unit;
2266 thispos = (bitpos + bitsdone) % unit;
2267
2268 /* THISSIZE must not overrun a word boundary. Otherwise,
2269 extract_fixed_bit_field will call us again, and we will mutually
2270 recurse forever. */
2271 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2272 thissize = MIN (thissize, unit - thispos);
2273
2274 /* If OP0 is a register, then handle OFFSET here. */
2275 rtx op0_piece = op0;
2276 opt_scalar_int_mode op0_piece_mode = op0_mode;
2277 if (SUBREG_P (op0) || REG_P (op0))
2278 {
2279 op0_piece = operand_subword_force (op0, offset, op0_mode.require ());
2280 op0_piece_mode = word_mode;
2281 offset = 0;
2282 }
2283
2284 /* Extract the parts in bit-counting order,
2285 whose meaning is determined by BYTES_PER_UNIT.
2286 OFFSET is in UNITs, and UNIT is in bits. */
2287 part = extract_fixed_bit_field (word_mode, op0_piece, op0_piece_mode,
2288 thissize, offset * unit + thispos,
2289 0, 1, reverse);
2290 bitsdone += thissize;
2291
2292 /* Shift this part into place for the result. */
2293 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2294 {
2295 if (bitsize != bitsdone)
2296 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2297 bitsize - bitsdone, 0, 1);
2298 }
2299 else
2300 {
2301 if (bitsdone != thissize)
2302 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2303 bitsdone - thissize, 0, 1);
2304 }
2305
2306 if (first)
2307 result = part;
2308 else
2309 /* Combine the parts with bitwise or. This works
2310 because we extracted each part as an unsigned bit field. */
2311 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2312 OPTAB_LIB_WIDEN);
2313
2314 first = 0;
2315 }
2316
2317 /* Unsigned bit field: we are done. */
2318 if (unsignedp)
2319 return result;
2320 /* Signed bit field: sign-extend with two arithmetic shifts. */
2321 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2322 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2323 return expand_shift (RSHIFT_EXPR, word_mode, result,
2324 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2325 }
2326 \f
2327 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2328 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2329 MODE, fill the upper bits with zeros. Fail if the layout of either
2330 mode is unknown (as for CC modes) or if the extraction would involve
2331 unprofitable mode punning. Return the value on success, otherwise
2332 return null.
2333
2334 This is different from gen_lowpart* in these respects:
2335
2336 - the returned value must always be considered an rvalue
2337
2338 - when MODE is wider than SRC_MODE, the extraction involves
2339 a zero extension
2340
2341 - when MODE is smaller than SRC_MODE, the extraction involves
2342 a truncation (and is thus subject to TARGET_TRULY_NOOP_TRUNCATION).
2343
2344 In other words, this routine performs a computation, whereas the
2345 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2346 operations. */
2347
2348 rtx
2349 extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
2350 {
2351 scalar_int_mode int_mode, src_int_mode;
2352
2353 if (mode == src_mode)
2354 return src;
2355
2356 if (CONSTANT_P (src))
2357 {
2358 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2359 fails, it will happily create (subreg (symbol_ref)) or similar
2360 invalid SUBREGs. */
2361 poly_uint64 byte = subreg_lowpart_offset (mode, src_mode);
2362 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2363 if (ret)
2364 return ret;
2365
2366 if (GET_MODE (src) == VOIDmode
2367 || !validate_subreg (mode, src_mode, src, byte))
2368 return NULL_RTX;
2369
2370 src = force_reg (GET_MODE (src), src);
2371 return gen_rtx_SUBREG (mode, src, byte);
2372 }
2373
2374 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2375 return NULL_RTX;
2376
2377 if (known_eq (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (src_mode))
2378 && targetm.modes_tieable_p (mode, src_mode))
2379 {
2380 rtx x = gen_lowpart_common (mode, src);
2381 if (x)
2382 return x;
2383 }
2384
2385 if (!int_mode_for_mode (src_mode).exists (&src_int_mode)
2386 || !int_mode_for_mode (mode).exists (&int_mode))
2387 return NULL_RTX;
2388
2389 if (!targetm.modes_tieable_p (src_int_mode, src_mode))
2390 return NULL_RTX;
2391 if (!targetm.modes_tieable_p (int_mode, mode))
2392 return NULL_RTX;
2393
2394 src = gen_lowpart (src_int_mode, src);
2395 if (!validate_subreg (int_mode, src_int_mode, src,
2396 subreg_lowpart_offset (int_mode, src_int_mode)))
2397 return NULL_RTX;
2398
2399 src = convert_modes (int_mode, src_int_mode, src, true);
2400 src = gen_lowpart (mode, src);
2401 return src;
2402 }
2403 \f
2404 /* Add INC into TARGET. */
2405
2406 void
2407 expand_inc (rtx target, rtx inc)
2408 {
2409 rtx value = expand_binop (GET_MODE (target), add_optab,
2410 target, inc,
2411 target, 0, OPTAB_LIB_WIDEN);
2412 if (value != target)
2413 emit_move_insn (target, value);
2414 }
2415
2416 /* Subtract DEC from TARGET. */
2417
2418 void
2419 expand_dec (rtx target, rtx dec)
2420 {
2421 rtx value = expand_binop (GET_MODE (target), sub_optab,
2422 target, dec,
2423 target, 0, OPTAB_LIB_WIDEN);
2424 if (value != target)
2425 emit_move_insn (target, value);
2426 }
2427 \f
2428 /* Output a shift instruction for expression code CODE,
2429 with SHIFTED being the rtx for the value to shift,
2430 and AMOUNT the rtx for the amount to shift by.
2431 Store the result in the rtx TARGET, if that is convenient.
2432 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2433 Return the rtx for where the value is.
2434 If that cannot be done, abort the compilation unless MAY_FAIL is true,
2435 in which case 0 is returned. */
2436
2437 static rtx
2438 expand_shift_1 (enum tree_code code, machine_mode mode, rtx shifted,
2439 rtx amount, rtx target, int unsignedp, bool may_fail = false)
2440 {
2441 rtx op1, temp = 0;
2442 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2443 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2444 optab lshift_optab = ashl_optab;
2445 optab rshift_arith_optab = ashr_optab;
2446 optab rshift_uns_optab = lshr_optab;
2447 optab lrotate_optab = rotl_optab;
2448 optab rrotate_optab = rotr_optab;
2449 machine_mode op1_mode;
2450 scalar_mode scalar_mode = GET_MODE_INNER (mode);
2451 int attempt;
2452 bool speed = optimize_insn_for_speed_p ();
2453
2454 op1 = amount;
2455 op1_mode = GET_MODE (op1);
2456
2457 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2458 shift amount is a vector, use the vector/vector shift patterns. */
2459 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2460 {
2461 lshift_optab = vashl_optab;
2462 rshift_arith_optab = vashr_optab;
2463 rshift_uns_optab = vlshr_optab;
2464 lrotate_optab = vrotl_optab;
2465 rrotate_optab = vrotr_optab;
2466 }
2467
2468 /* Previously detected shift-counts computed by NEGATE_EXPR
2469 and shifted in the other direction; but that does not work
2470 on all machines. */
2471
2472 if (SHIFT_COUNT_TRUNCATED)
2473 {
2474 if (CONST_INT_P (op1)
2475 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2476 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
2477 op1 = gen_int_shift_amount (mode,
2478 (unsigned HOST_WIDE_INT) INTVAL (op1)
2479 % GET_MODE_BITSIZE (scalar_mode));
2480 else if (GET_CODE (op1) == SUBREG
2481 && subreg_lowpart_p (op1)
2482 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2483 && SCALAR_INT_MODE_P (GET_MODE (op1)))
2484 op1 = SUBREG_REG (op1);
2485 }
2486
2487 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2488 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2489 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2490 amount instead. */
2491 if (rotate
2492 && CONST_INT_P (op1)
2493 && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
2494 GET_MODE_BITSIZE (scalar_mode) - 1))
2495 {
2496 op1 = gen_int_shift_amount (mode, (GET_MODE_BITSIZE (scalar_mode)
2497 - INTVAL (op1)));
2498 left = !left;
2499 code = left ? LROTATE_EXPR : RROTATE_EXPR;
2500 }
2501
2502 /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2503 Note that this is not the case for bigger values. For instance a rotation
2504 of 0x01020304 by 16 bits gives 0x03040102 which is different from
2505 0x04030201 (bswapsi). */
2506 if (rotate
2507 && CONST_INT_P (op1)
2508 && INTVAL (op1) == BITS_PER_UNIT
2509 && GET_MODE_SIZE (scalar_mode) == 2
2510 && optab_handler (bswap_optab, mode) != CODE_FOR_nothing)
2511 return expand_unop (mode, bswap_optab, shifted, NULL_RTX, unsignedp);
2512
2513 if (op1 == const0_rtx)
2514 return shifted;
2515
2516 /* Check whether its cheaper to implement a left shift by a constant
2517 bit count by a sequence of additions. */
2518 if (code == LSHIFT_EXPR
2519 && CONST_INT_P (op1)
2520 && INTVAL (op1) > 0
2521 && INTVAL (op1) < GET_MODE_PRECISION (scalar_mode)
2522 && INTVAL (op1) < MAX_BITS_PER_WORD
2523 && (shift_cost (speed, mode, INTVAL (op1))
2524 > INTVAL (op1) * add_cost (speed, mode))
2525 && shift_cost (speed, mode, INTVAL (op1)) != MAX_COST)
2526 {
2527 int i;
2528 for (i = 0; i < INTVAL (op1); i++)
2529 {
2530 temp = force_reg (mode, shifted);
2531 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2532 unsignedp, OPTAB_LIB_WIDEN);
2533 }
2534 return shifted;
2535 }
2536
2537 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2538 {
2539 enum optab_methods methods;
2540
2541 if (attempt == 0)
2542 methods = OPTAB_DIRECT;
2543 else if (attempt == 1)
2544 methods = OPTAB_WIDEN;
2545 else
2546 methods = OPTAB_LIB_WIDEN;
2547
2548 if (rotate)
2549 {
2550 /* Widening does not work for rotation. */
2551 if (methods == OPTAB_WIDEN)
2552 continue;
2553 else if (methods == OPTAB_LIB_WIDEN)
2554 {
2555 /* If we have been unable to open-code this by a rotation,
2556 do it as the IOR of two shifts. I.e., to rotate A
2557 by N bits, compute
2558 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2559 where C is the bitsize of A.
2560
2561 It is theoretically possible that the target machine might
2562 not be able to perform either shift and hence we would
2563 be making two libcalls rather than just the one for the
2564 shift (similarly if IOR could not be done). We will allow
2565 this extremely unlikely lossage to avoid complicating the
2566 code below. */
2567
2568 rtx subtarget = target == shifted ? 0 : target;
2569 rtx new_amount, other_amount;
2570 rtx temp1;
2571
2572 new_amount = op1;
2573 if (op1 == const0_rtx)
2574 return shifted;
2575 else if (CONST_INT_P (op1))
2576 other_amount = gen_int_shift_amount
2577 (mode, GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
2578 else
2579 {
2580 other_amount
2581 = simplify_gen_unary (NEG, GET_MODE (op1),
2582 op1, GET_MODE (op1));
2583 HOST_WIDE_INT mask = GET_MODE_PRECISION (scalar_mode) - 1;
2584 other_amount
2585 = simplify_gen_binary (AND, GET_MODE (op1), other_amount,
2586 gen_int_mode (mask, GET_MODE (op1)));
2587 }
2588
2589 shifted = force_reg (mode, shifted);
2590
2591 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2592 mode, shifted, new_amount, 0, 1);
2593 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2594 mode, shifted, other_amount,
2595 subtarget, 1);
2596 return expand_binop (mode, ior_optab, temp, temp1, target,
2597 unsignedp, methods);
2598 }
2599
2600 temp = expand_binop (mode,
2601 left ? lrotate_optab : rrotate_optab,
2602 shifted, op1, target, unsignedp, methods);
2603 }
2604 else if (unsignedp)
2605 temp = expand_binop (mode,
2606 left ? lshift_optab : rshift_uns_optab,
2607 shifted, op1, target, unsignedp, methods);
2608
2609 /* Do arithmetic shifts.
2610 Also, if we are going to widen the operand, we can just as well
2611 use an arithmetic right-shift instead of a logical one. */
2612 if (temp == 0 && ! rotate
2613 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2614 {
2615 enum optab_methods methods1 = methods;
2616
2617 /* If trying to widen a log shift to an arithmetic shift,
2618 don't accept an arithmetic shift of the same size. */
2619 if (unsignedp)
2620 methods1 = OPTAB_MUST_WIDEN;
2621
2622 /* Arithmetic shift */
2623
2624 temp = expand_binop (mode,
2625 left ? lshift_optab : rshift_arith_optab,
2626 shifted, op1, target, unsignedp, methods1);
2627 }
2628
2629 /* We used to try extzv here for logical right shifts, but that was
2630 only useful for one machine, the VAX, and caused poor code
2631 generation there for lshrdi3, so the code was deleted and a
2632 define_expand for lshrsi3 was added to vax.md. */
2633 }
2634
2635 gcc_assert (temp != NULL_RTX || may_fail);
2636 return temp;
2637 }
2638
2639 /* Output a shift instruction for expression code CODE,
2640 with SHIFTED being the rtx for the value to shift,
2641 and AMOUNT the amount to shift by.
2642 Store the result in the rtx TARGET, if that is convenient.
2643 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2644 Return the rtx for where the value is. */
2645
2646 rtx
2647 expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2648 poly_int64 amount, rtx target, int unsignedp)
2649 {
2650 return expand_shift_1 (code, mode, shifted,
2651 gen_int_shift_amount (mode, amount),
2652 target, unsignedp);
2653 }
2654
2655 /* Likewise, but return 0 if that cannot be done. */
2656
2657 static rtx
2658 maybe_expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2659 int amount, rtx target, int unsignedp)
2660 {
2661 return expand_shift_1 (code, mode,
2662 shifted, GEN_INT (amount), target, unsignedp, true);
2663 }
2664
2665 /* Output a shift instruction for expression code CODE,
2666 with SHIFTED being the rtx for the value to shift,
2667 and AMOUNT the tree for the amount to shift by.
2668 Store the result in the rtx TARGET, if that is convenient.
2669 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2670 Return the rtx for where the value is. */
2671
2672 rtx
2673 expand_variable_shift (enum tree_code code, machine_mode mode, rtx shifted,
2674 tree amount, rtx target, int unsignedp)
2675 {
2676 return expand_shift_1 (code, mode,
2677 shifted, expand_normal (amount), target, unsignedp);
2678 }
2679
2680 \f
2681 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2682 const struct mult_cost *, machine_mode mode);
2683 static rtx expand_mult_const (machine_mode, rtx, HOST_WIDE_INT, rtx,
2684 const struct algorithm *, enum mult_variant);
2685 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2686 static rtx extract_high_half (scalar_int_mode, rtx);
2687 static rtx expmed_mult_highpart (scalar_int_mode, rtx, rtx, rtx, int, int);
2688 static rtx expmed_mult_highpart_optab (scalar_int_mode, rtx, rtx, rtx,
2689 int, int);
2690 /* Compute and return the best algorithm for multiplying by T.
2691 The algorithm must cost less than cost_limit
2692 If retval.cost >= COST_LIMIT, no algorithm was found and all
2693 other field of the returned struct are undefined.
2694 MODE is the machine mode of the multiplication. */
2695
2696 static void
2697 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2698 const struct mult_cost *cost_limit, machine_mode mode)
2699 {
2700 int m;
2701 struct algorithm *alg_in, *best_alg;
2702 struct mult_cost best_cost;
2703 struct mult_cost new_limit;
2704 int op_cost, op_latency;
2705 unsigned HOST_WIDE_INT orig_t = t;
2706 unsigned HOST_WIDE_INT q;
2707 int maxm, hash_index;
2708 bool cache_hit = false;
2709 enum alg_code cache_alg = alg_zero;
2710 bool speed = optimize_insn_for_speed_p ();
2711 scalar_int_mode imode;
2712 struct alg_hash_entry *entry_ptr;
2713
2714 /* Indicate that no algorithm is yet found. If no algorithm
2715 is found, this value will be returned and indicate failure. */
2716 alg_out->cost.cost = cost_limit->cost + 1;
2717 alg_out->cost.latency = cost_limit->latency + 1;
2718
2719 if (cost_limit->cost < 0
2720 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2721 return;
2722
2723 /* Be prepared for vector modes. */
2724 imode = as_a <scalar_int_mode> (GET_MODE_INNER (mode));
2725
2726 maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (imode));
2727
2728 /* Restrict the bits of "t" to the multiplication's mode. */
2729 t &= GET_MODE_MASK (imode);
2730
2731 /* t == 1 can be done in zero cost. */
2732 if (t == 1)
2733 {
2734 alg_out->ops = 1;
2735 alg_out->cost.cost = 0;
2736 alg_out->cost.latency = 0;
2737 alg_out->op[0] = alg_m;
2738 return;
2739 }
2740
2741 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2742 fail now. */
2743 if (t == 0)
2744 {
2745 if (MULT_COST_LESS (cost_limit, zero_cost (speed)))
2746 return;
2747 else
2748 {
2749 alg_out->ops = 1;
2750 alg_out->cost.cost = zero_cost (speed);
2751 alg_out->cost.latency = zero_cost (speed);
2752 alg_out->op[0] = alg_zero;
2753 return;
2754 }
2755 }
2756
2757 /* We'll be needing a couple extra algorithm structures now. */
2758
2759 alg_in = XALLOCA (struct algorithm);
2760 best_alg = XALLOCA (struct algorithm);
2761 best_cost = *cost_limit;
2762
2763 /* Compute the hash index. */
2764 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2765
2766 /* See if we already know what to do for T. */
2767 entry_ptr = alg_hash_entry_ptr (hash_index);
2768 if (entry_ptr->t == t
2769 && entry_ptr->mode == mode
2770 && entry_ptr->speed == speed
2771 && entry_ptr->alg != alg_unknown)
2772 {
2773 cache_alg = entry_ptr->alg;
2774
2775 if (cache_alg == alg_impossible)
2776 {
2777 /* The cache tells us that it's impossible to synthesize
2778 multiplication by T within entry_ptr->cost. */
2779 if (!CHEAPER_MULT_COST (&entry_ptr->cost, cost_limit))
2780 /* COST_LIMIT is at least as restrictive as the one
2781 recorded in the hash table, in which case we have no
2782 hope of synthesizing a multiplication. Just
2783 return. */
2784 return;
2785
2786 /* If we get here, COST_LIMIT is less restrictive than the
2787 one recorded in the hash table, so we may be able to
2788 synthesize a multiplication. Proceed as if we didn't
2789 have the cache entry. */
2790 }
2791 else
2792 {
2793 if (CHEAPER_MULT_COST (cost_limit, &entry_ptr->cost))
2794 /* The cached algorithm shows that this multiplication
2795 requires more cost than COST_LIMIT. Just return. This
2796 way, we don't clobber this cache entry with
2797 alg_impossible but retain useful information. */
2798 return;
2799
2800 cache_hit = true;
2801
2802 switch (cache_alg)
2803 {
2804 case alg_shift:
2805 goto do_alg_shift;
2806
2807 case alg_add_t_m2:
2808 case alg_sub_t_m2:
2809 goto do_alg_addsub_t_m2;
2810
2811 case alg_add_factor:
2812 case alg_sub_factor:
2813 goto do_alg_addsub_factor;
2814
2815 case alg_add_t2_m:
2816 goto do_alg_add_t2_m;
2817
2818 case alg_sub_t2_m:
2819 goto do_alg_sub_t2_m;
2820
2821 default:
2822 gcc_unreachable ();
2823 }
2824 }
2825 }
2826
2827 /* If we have a group of zero bits at the low-order part of T, try
2828 multiplying by the remaining bits and then doing a shift. */
2829
2830 if ((t & 1) == 0)
2831 {
2832 do_alg_shift:
2833 m = ctz_or_zero (t); /* m = number of low zero bits */
2834 if (m < maxm)
2835 {
2836 q = t >> m;
2837 /* The function expand_shift will choose between a shift and
2838 a sequence of additions, so the observed cost is given as
2839 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2840 op_cost = m * add_cost (speed, mode);
2841 if (shift_cost (speed, mode, m) < op_cost)
2842 op_cost = shift_cost (speed, mode, m);
2843 new_limit.cost = best_cost.cost - op_cost;
2844 new_limit.latency = best_cost.latency - op_cost;
2845 synth_mult (alg_in, q, &new_limit, mode);
2846
2847 alg_in->cost.cost += op_cost;
2848 alg_in->cost.latency += op_cost;
2849 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2850 {
2851 best_cost = alg_in->cost;
2852 std::swap (alg_in, best_alg);
2853 best_alg->log[best_alg->ops] = m;
2854 best_alg->op[best_alg->ops] = alg_shift;
2855 }
2856
2857 /* See if treating ORIG_T as a signed number yields a better
2858 sequence. Try this sequence only for a negative ORIG_T
2859 as it would be useless for a non-negative ORIG_T. */
2860 if ((HOST_WIDE_INT) orig_t < 0)
2861 {
2862 /* Shift ORIG_T as follows because a right shift of a
2863 negative-valued signed type is implementation
2864 defined. */
2865 q = ~(~orig_t >> m);
2866 /* The function expand_shift will choose between a shift
2867 and a sequence of additions, so the observed cost is
2868 given as MIN (m * add_cost(speed, mode),
2869 shift_cost(speed, mode, m)). */
2870 op_cost = m * add_cost (speed, mode);
2871 if (shift_cost (speed, mode, m) < op_cost)
2872 op_cost = shift_cost (speed, mode, m);
2873 new_limit.cost = best_cost.cost - op_cost;
2874 new_limit.latency = best_cost.latency - op_cost;
2875 synth_mult (alg_in, q, &new_limit, mode);
2876
2877 alg_in->cost.cost += op_cost;
2878 alg_in->cost.latency += op_cost;
2879 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2880 {
2881 best_cost = alg_in->cost;
2882 std::swap (alg_in, best_alg);
2883 best_alg->log[best_alg->ops] = m;
2884 best_alg->op[best_alg->ops] = alg_shift;
2885 }
2886 }
2887 }
2888 if (cache_hit)
2889 goto done;
2890 }
2891
2892 /* If we have an odd number, add or subtract one. */
2893 if ((t & 1) != 0)
2894 {
2895 unsigned HOST_WIDE_INT w;
2896
2897 do_alg_addsub_t_m2:
2898 for (w = 1; (w & t) != 0; w <<= 1)
2899 ;
2900 /* If T was -1, then W will be zero after the loop. This is another
2901 case where T ends with ...111. Handling this with (T + 1) and
2902 subtract 1 produces slightly better code and results in algorithm
2903 selection much faster than treating it like the ...0111 case
2904 below. */
2905 if (w == 0
2906 || (w > 2
2907 /* Reject the case where t is 3.
2908 Thus we prefer addition in that case. */
2909 && t != 3))
2910 {
2911 /* T ends with ...111. Multiply by (T + 1) and subtract T. */
2912
2913 op_cost = add_cost (speed, mode);
2914 new_limit.cost = best_cost.cost - op_cost;
2915 new_limit.latency = best_cost.latency - op_cost;
2916 synth_mult (alg_in, t + 1, &new_limit, mode);
2917
2918 alg_in->cost.cost += op_cost;
2919 alg_in->cost.latency += op_cost;
2920 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2921 {
2922 best_cost = alg_in->cost;
2923 std::swap (alg_in, best_alg);
2924 best_alg->log[best_alg->ops] = 0;
2925 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2926 }
2927 }
2928 else
2929 {
2930 /* T ends with ...01 or ...011. Multiply by (T - 1) and add T. */
2931
2932 op_cost = add_cost (speed, mode);
2933 new_limit.cost = best_cost.cost - op_cost;
2934 new_limit.latency = best_cost.latency - op_cost;
2935 synth_mult (alg_in, t - 1, &new_limit, mode);
2936
2937 alg_in->cost.cost += op_cost;
2938 alg_in->cost.latency += op_cost;
2939 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2940 {
2941 best_cost = alg_in->cost;
2942 std::swap (alg_in, best_alg);
2943 best_alg->log[best_alg->ops] = 0;
2944 best_alg->op[best_alg->ops] = alg_add_t_m2;
2945 }
2946 }
2947
2948 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2949 quickly with a - a * n for some appropriate constant n. */
2950 m = exact_log2 (-orig_t + 1);
2951 if (m >= 0 && m < maxm)
2952 {
2953 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2954 /* If the target has a cheap shift-and-subtract insn use
2955 that in preference to a shift insn followed by a sub insn.
2956 Assume that the shift-and-sub is "atomic" with a latency
2957 equal to it's cost, otherwise assume that on superscalar
2958 hardware the shift may be executed concurrently with the
2959 earlier steps in the algorithm. */
2960 if (shiftsub1_cost (speed, mode, m) <= op_cost)
2961 {
2962 op_cost = shiftsub1_cost (speed, mode, m);
2963 op_latency = op_cost;
2964 }
2965 else
2966 op_latency = add_cost (speed, mode);
2967
2968 new_limit.cost = best_cost.cost - op_cost;
2969 new_limit.latency = best_cost.latency - op_latency;
2970 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m,
2971 &new_limit, mode);
2972
2973 alg_in->cost.cost += op_cost;
2974 alg_in->cost.latency += op_latency;
2975 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2976 {
2977 best_cost = alg_in->cost;
2978 std::swap (alg_in, best_alg);
2979 best_alg->log[best_alg->ops] = m;
2980 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2981 }
2982 }
2983
2984 if (cache_hit)
2985 goto done;
2986 }
2987
2988 /* Look for factors of t of the form
2989 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2990 If we find such a factor, we can multiply by t using an algorithm that
2991 multiplies by q, shift the result by m and add/subtract it to itself.
2992
2993 We search for large factors first and loop down, even if large factors
2994 are less probable than small; if we find a large factor we will find a
2995 good sequence quickly, and therefore be able to prune (by decreasing
2996 COST_LIMIT) the search. */
2997
2998 do_alg_addsub_factor:
2999 for (m = floor_log2 (t - 1); m >= 2; m--)
3000 {
3001 unsigned HOST_WIDE_INT d;
3002
3003 d = (HOST_WIDE_INT_1U << m) + 1;
3004 if (t % d == 0 && t > d && m < maxm
3005 && (!cache_hit || cache_alg == alg_add_factor))
3006 {
3007 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
3008 if (shiftadd_cost (speed, mode, m) <= op_cost)
3009 op_cost = shiftadd_cost (speed, mode, m);
3010
3011 op_latency = op_cost;
3012
3013
3014 new_limit.cost = best_cost.cost - op_cost;
3015 new_limit.latency = best_cost.latency - op_latency;
3016 synth_mult (alg_in, t / d, &new_limit, mode);
3017
3018 alg_in->cost.cost += op_cost;
3019 alg_in->cost.latency += op_latency;
3020 if (alg_in->cost.latency < op_cost)
3021 alg_in->cost.latency = op_cost;
3022 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3023 {
3024 best_cost = alg_in->cost;
3025 std::swap (alg_in, best_alg);
3026 best_alg->log[best_alg->ops] = m;
3027 best_alg->op[best_alg->ops] = alg_add_factor;
3028 }
3029 /* Other factors will have been taken care of in the recursion. */
3030 break;
3031 }
3032
3033 d = (HOST_WIDE_INT_1U << m) - 1;
3034 if (t % d == 0 && t > d && m < maxm
3035 && (!cache_hit || cache_alg == alg_sub_factor))
3036 {
3037 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
3038 if (shiftsub0_cost (speed, mode, m) <= op_cost)
3039 op_cost = shiftsub0_cost (speed, mode, m);
3040
3041 op_latency = op_cost;
3042
3043 new_limit.cost = best_cost.cost - op_cost;
3044 new_limit.latency = best_cost.latency - op_latency;
3045 synth_mult (alg_in, t / d, &new_limit, mode);
3046
3047 alg_in->cost.cost += op_cost;
3048 alg_in->cost.latency += op_latency;
3049 if (alg_in->cost.latency < op_cost)
3050 alg_in->cost.latency = op_cost;
3051 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3052 {
3053 best_cost = alg_in->cost;
3054 std::swap (alg_in, best_alg);
3055 best_alg->log[best_alg->ops] = m;
3056 best_alg->op[best_alg->ops] = alg_sub_factor;
3057 }
3058 break;
3059 }
3060 }
3061 if (cache_hit)
3062 goto done;
3063
3064 /* Try shift-and-add (load effective address) instructions,
3065 i.e. do a*3, a*5, a*9. */
3066 if ((t & 1) != 0)
3067 {
3068 do_alg_add_t2_m:
3069 q = t - 1;
3070 m = ctz_hwi (q);
3071 if (q && m < maxm)
3072 {
3073 op_cost = shiftadd_cost (speed, mode, m);
3074 new_limit.cost = best_cost.cost - op_cost;
3075 new_limit.latency = best_cost.latency - op_cost;
3076 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
3077
3078 alg_in->cost.cost += op_cost;
3079 alg_in->cost.latency += op_cost;
3080 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3081 {
3082 best_cost = alg_in->cost;
3083 std::swap (alg_in, best_alg);
3084 best_alg->log[best_alg->ops] = m;
3085 best_alg->op[best_alg->ops] = alg_add_t2_m;
3086 }
3087 }
3088 if (cache_hit)
3089 goto done;
3090
3091 do_alg_sub_t2_m:
3092 q = t + 1;
3093 m = ctz_hwi (q);
3094 if (q && m < maxm)
3095 {
3096 op_cost = shiftsub0_cost (speed, mode, m);
3097 new_limit.cost = best_cost.cost - op_cost;
3098 new_limit.latency = best_cost.latency - op_cost;
3099 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
3100
3101 alg_in->cost.cost += op_cost;
3102 alg_in->cost.latency += op_cost;
3103 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3104 {
3105 best_cost = alg_in->cost;
3106 std::swap (alg_in, best_alg);
3107 best_alg->log[best_alg->ops] = m;
3108 best_alg->op[best_alg->ops] = alg_sub_t2_m;
3109 }
3110 }
3111 if (cache_hit)
3112 goto done;
3113 }
3114
3115 done:
3116 /* If best_cost has not decreased, we have not found any algorithm. */
3117 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
3118 {
3119 /* We failed to find an algorithm. Record alg_impossible for
3120 this case (that is, <T, MODE, COST_LIMIT>) so that next time
3121 we are asked to find an algorithm for T within the same or
3122 lower COST_LIMIT, we can immediately return to the
3123 caller. */
3124 entry_ptr->t = t;
3125 entry_ptr->mode = mode;
3126 entry_ptr->speed = speed;
3127 entry_ptr->alg = alg_impossible;
3128 entry_ptr->cost = *cost_limit;
3129 return;
3130 }
3131
3132 /* Cache the result. */
3133 if (!cache_hit)
3134 {
3135 entry_ptr->t = t;
3136 entry_ptr->mode = mode;
3137 entry_ptr->speed = speed;
3138 entry_ptr->alg = best_alg->op[best_alg->ops];
3139 entry_ptr->cost.cost = best_cost.cost;
3140 entry_ptr->cost.latency = best_cost.latency;
3141 }
3142
3143 /* If we are getting a too long sequence for `struct algorithm'
3144 to record, make this search fail. */
3145 if (best_alg->ops == MAX_BITS_PER_WORD)
3146 return;
3147
3148 /* Copy the algorithm from temporary space to the space at alg_out.
3149 We avoid using structure assignment because the majority of
3150 best_alg is normally undefined, and this is a critical function. */
3151 alg_out->ops = best_alg->ops + 1;
3152 alg_out->cost = best_cost;
3153 memcpy (alg_out->op, best_alg->op,
3154 alg_out->ops * sizeof *alg_out->op);
3155 memcpy (alg_out->log, best_alg->log,
3156 alg_out->ops * sizeof *alg_out->log);
3157 }
3158 \f
3159 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
3160 Try three variations:
3161
3162 - a shift/add sequence based on VAL itself
3163 - a shift/add sequence based on -VAL, followed by a negation
3164 - a shift/add sequence based on VAL - 1, followed by an addition.
3165
3166 Return true if the cheapest of these cost less than MULT_COST,
3167 describing the algorithm in *ALG and final fixup in *VARIANT. */
3168
3169 bool
3170 choose_mult_variant (machine_mode mode, HOST_WIDE_INT val,
3171 struct algorithm *alg, enum mult_variant *variant,
3172 int mult_cost)
3173 {
3174 struct algorithm alg2;
3175 struct mult_cost limit;
3176 int op_cost;
3177 bool speed = optimize_insn_for_speed_p ();
3178
3179 /* Fail quickly for impossible bounds. */
3180 if (mult_cost < 0)
3181 return false;
3182
3183 /* Ensure that mult_cost provides a reasonable upper bound.
3184 Any constant multiplication can be performed with less
3185 than 2 * bits additions. */
3186 op_cost = 2 * GET_MODE_UNIT_BITSIZE (mode) * add_cost (speed, mode);
3187 if (mult_cost > op_cost)
3188 mult_cost = op_cost;
3189
3190 *variant = basic_variant;
3191 limit.cost = mult_cost;
3192 limit.latency = mult_cost;
3193 synth_mult (alg, val, &limit, mode);
3194
3195 /* This works only if the inverted value actually fits in an
3196 `unsigned int' */
3197 if (HOST_BITS_PER_INT >= GET_MODE_UNIT_BITSIZE (mode))
3198 {
3199 op_cost = neg_cost (speed, mode);
3200 if (MULT_COST_LESS (&alg->cost, mult_cost))
3201 {
3202 limit.cost = alg->cost.cost - op_cost;
3203 limit.latency = alg->cost.latency - op_cost;
3204 }
3205 else
3206 {
3207 limit.cost = mult_cost - op_cost;
3208 limit.latency = mult_cost - op_cost;
3209 }
3210
3211 synth_mult (&alg2, -val, &limit, mode);
3212 alg2.cost.cost += op_cost;
3213 alg2.cost.latency += op_cost;
3214 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3215 *alg = alg2, *variant = negate_variant;
3216 }
3217
3218 /* This proves very useful for division-by-constant. */
3219 op_cost = add_cost (speed, mode);
3220 if (MULT_COST_LESS (&alg->cost, mult_cost))
3221 {
3222 limit.cost = alg->cost.cost - op_cost;
3223 limit.latency = alg->cost.latency - op_cost;
3224 }
3225 else
3226 {
3227 limit.cost = mult_cost - op_cost;
3228 limit.latency = mult_cost - op_cost;
3229 }
3230
3231 synth_mult (&alg2, val - 1, &limit, mode);
3232 alg2.cost.cost += op_cost;
3233 alg2.cost.latency += op_cost;
3234 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3235 *alg = alg2, *variant = add_variant;
3236
3237 return MULT_COST_LESS (&alg->cost, mult_cost);
3238 }
3239
3240 /* A subroutine of expand_mult, used for constant multiplications.
3241 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
3242 convenient. Use the shift/add sequence described by ALG and apply
3243 the final fixup specified by VARIANT. */
3244
3245 static rtx
3246 expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val,
3247 rtx target, const struct algorithm *alg,
3248 enum mult_variant variant)
3249 {
3250 unsigned HOST_WIDE_INT val_so_far;
3251 rtx_insn *insn;
3252 rtx accum, tem;
3253 int opno;
3254 machine_mode nmode;
3255
3256 /* Avoid referencing memory over and over and invalid sharing
3257 on SUBREGs. */
3258 op0 = force_reg (mode, op0);
3259
3260 /* ACCUM starts out either as OP0 or as a zero, depending on
3261 the first operation. */
3262
3263 if (alg->op[0] == alg_zero)
3264 {
3265 accum = copy_to_mode_reg (mode, CONST0_RTX (mode));
3266 val_so_far = 0;
3267 }
3268 else if (alg->op[0] == alg_m)
3269 {
3270 accum = copy_to_mode_reg (mode, op0);
3271 val_so_far = 1;
3272 }
3273 else
3274 gcc_unreachable ();
3275
3276 for (opno = 1; opno < alg->ops; opno++)
3277 {
3278 int log = alg->log[opno];
3279 rtx shift_subtarget = optimize ? 0 : accum;
3280 rtx add_target
3281 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
3282 && !optimize)
3283 ? target : 0;
3284 rtx accum_target = optimize ? 0 : accum;
3285 rtx accum_inner;
3286
3287 switch (alg->op[opno])
3288 {
3289 case alg_shift:
3290 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3291 /* REG_EQUAL note will be attached to the following insn. */
3292 emit_move_insn (accum, tem);
3293 val_so_far <<= log;
3294 break;
3295
3296 case alg_add_t_m2:
3297 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3298 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3299 add_target ? add_target : accum_target);
3300 val_so_far += HOST_WIDE_INT_1U << log;
3301 break;
3302
3303 case alg_sub_t_m2:
3304 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3305 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3306 add_target ? add_target : accum_target);
3307 val_so_far -= HOST_WIDE_INT_1U << log;
3308 break;
3309
3310 case alg_add_t2_m:
3311 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3312 log, shift_subtarget, 0);
3313 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3314 add_target ? add_target : accum_target);
3315 val_so_far = (val_so_far << log) + 1;
3316 break;
3317
3318 case alg_sub_t2_m:
3319 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3320 log, shift_subtarget, 0);
3321 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3322 add_target ? add_target : accum_target);
3323 val_so_far = (val_so_far << log) - 1;
3324 break;
3325
3326 case alg_add_factor:
3327 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3328 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3329 add_target ? add_target : accum_target);
3330 val_so_far += val_so_far << log;
3331 break;
3332
3333 case alg_sub_factor:
3334 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3335 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3336 (add_target
3337 ? add_target : (optimize ? 0 : tem)));
3338 val_so_far = (val_so_far << log) - val_so_far;
3339 break;
3340
3341 default:
3342 gcc_unreachable ();
3343 }
3344
3345 if (SCALAR_INT_MODE_P (mode))
3346 {
3347 /* Write a REG_EQUAL note on the last insn so that we can cse
3348 multiplication sequences. Note that if ACCUM is a SUBREG,
3349 we've set the inner register and must properly indicate that. */
3350 tem = op0, nmode = mode;
3351 accum_inner = accum;
3352 if (GET_CODE (accum) == SUBREG)
3353 {
3354 accum_inner = SUBREG_REG (accum);
3355 nmode = GET_MODE (accum_inner);
3356 tem = gen_lowpart (nmode, op0);
3357 }
3358
3359 /* Don't add a REG_EQUAL note if tem is a paradoxical SUBREG.
3360 In that case, only the low bits of accum would be guaranteed to
3361 be equal to the content of the REG_EQUAL note, the upper bits
3362 can be anything. */
3363 if (!paradoxical_subreg_p (tem))
3364 {
3365 insn = get_last_insn ();
3366 wide_int wval_so_far
3367 = wi::uhwi (val_so_far,
3368 GET_MODE_PRECISION (as_a <scalar_mode> (nmode)));
3369 rtx c = immed_wide_int_const (wval_so_far, nmode);
3370 set_dst_reg_note (insn, REG_EQUAL, gen_rtx_MULT (nmode, tem, c),
3371 accum_inner);
3372 }
3373 }
3374 }
3375
3376 if (variant == negate_variant)
3377 {
3378 val_so_far = -val_so_far;
3379 accum = expand_unop (mode, neg_optab, accum, target, 0);
3380 }
3381 else if (variant == add_variant)
3382 {
3383 val_so_far = val_so_far + 1;
3384 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3385 }
3386
3387 /* Compare only the bits of val and val_so_far that are significant
3388 in the result mode, to avoid sign-/zero-extension confusion. */
3389 nmode = GET_MODE_INNER (mode);
3390 val &= GET_MODE_MASK (nmode);
3391 val_so_far &= GET_MODE_MASK (nmode);
3392 gcc_assert (val == (HOST_WIDE_INT) val_so_far);
3393
3394 return accum;
3395 }
3396
3397 /* Perform a multiplication and return an rtx for the result.
3398 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3399 TARGET is a suggestion for where to store the result (an rtx).
3400
3401 We check specially for a constant integer as OP1.
3402 If you want this check for OP0 as well, then before calling
3403 you should swap the two operands if OP0 would be constant. */
3404
3405 rtx
3406 expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3407 int unsignedp, bool no_libcall)
3408 {
3409 enum mult_variant variant;
3410 struct algorithm algorithm;
3411 rtx scalar_op1;
3412 int max_cost;
3413 bool speed = optimize_insn_for_speed_p ();
3414 bool do_trapv = flag_trapv && SCALAR_INT_MODE_P (mode) && !unsignedp;
3415
3416 if (CONSTANT_P (op0))
3417 std::swap (op0, op1);
3418
3419 /* For vectors, there are several simplifications that can be made if
3420 all elements of the vector constant are identical. */
3421 scalar_op1 = unwrap_const_vec_duplicate (op1);
3422
3423 if (INTEGRAL_MODE_P (mode))
3424 {
3425 rtx fake_reg;
3426 HOST_WIDE_INT coeff;
3427 bool is_neg;
3428 int mode_bitsize;
3429
3430 if (op1 == CONST0_RTX (mode))
3431 return op1;
3432 if (op1 == CONST1_RTX (mode))
3433 return op0;
3434 if (op1 == CONSTM1_RTX (mode))
3435 return expand_unop (mode, do_trapv ? negv_optab : neg_optab,
3436 op0, target, 0);
3437
3438 if (do_trapv)
3439 goto skip_synth;
3440
3441 /* If mode is integer vector mode, check if the backend supports
3442 vector lshift (by scalar or vector) at all. If not, we can't use
3443 synthetized multiply. */
3444 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
3445 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing
3446 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing)
3447 goto skip_synth;
3448
3449 /* These are the operations that are potentially turned into
3450 a sequence of shifts and additions. */
3451 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
3452
3453 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3454 less than or equal in size to `unsigned int' this doesn't matter.
3455 If the mode is larger than `unsigned int', then synth_mult works
3456 only if the constant value exactly fits in an `unsigned int' without
3457 any truncation. This means that multiplying by negative values does
3458 not work; results are off by 2^32 on a 32 bit machine. */
3459 if (CONST_INT_P (scalar_op1))
3460 {
3461 coeff = INTVAL (scalar_op1);
3462 is_neg = coeff < 0;
3463 }
3464 #if TARGET_SUPPORTS_WIDE_INT
3465 else if (CONST_WIDE_INT_P (scalar_op1))
3466 #else
3467 else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
3468 #endif
3469 {
3470 int shift = wi::exact_log2 (rtx_mode_t (scalar_op1, mode));
3471 /* Perfect power of 2 (other than 1, which is handled above). */
3472 if (shift > 0)
3473 return expand_shift (LSHIFT_EXPR, mode, op0,
3474 shift, target, unsignedp);
3475 else
3476 goto skip_synth;
3477 }
3478 else
3479 goto skip_synth;
3480
3481 /* We used to test optimize here, on the grounds that it's better to
3482 produce a smaller program when -O is not used. But this causes
3483 such a terrible slowdown sometimes that it seems better to always
3484 use synth_mult. */
3485
3486 /* Special case powers of two. */
3487 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)
3488 && !(is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT))
3489 return expand_shift (LSHIFT_EXPR, mode, op0,
3490 floor_log2 (coeff), target, unsignedp);
3491
3492 fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3493
3494 /* Attempt to handle multiplication of DImode values by negative
3495 coefficients, by performing the multiplication by a positive
3496 multiplier and then inverting the result. */
3497 if (is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT)
3498 {
3499 /* Its safe to use -coeff even for INT_MIN, as the
3500 result is interpreted as an unsigned coefficient.
3501 Exclude cost of op0 from max_cost to match the cost
3502 calculation of the synth_mult. */
3503 coeff = -(unsigned HOST_WIDE_INT) coeff;
3504 max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1),
3505 mode, speed)
3506 - neg_cost (speed, mode));
3507 if (max_cost <= 0)
3508 goto skip_synth;
3509
3510 /* Special case powers of two. */
3511 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3512 {
3513 rtx temp = expand_shift (LSHIFT_EXPR, mode, op0,
3514 floor_log2 (coeff), target, unsignedp);
3515 return expand_unop (mode, neg_optab, temp, target, 0);
3516 }
3517
3518 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3519 max_cost))
3520 {
3521 rtx temp = expand_mult_const (mode, op0, coeff, NULL_RTX,
3522 &algorithm, variant);
3523 return expand_unop (mode, neg_optab, temp, target, 0);
3524 }
3525 goto skip_synth;
3526 }
3527
3528 /* Exclude cost of op0 from max_cost to match the cost
3529 calculation of the synth_mult. */
3530 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), mode, speed);
3531 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3532 return expand_mult_const (mode, op0, coeff, target,
3533 &algorithm, variant);
3534 }
3535 skip_synth:
3536
3537 /* Expand x*2.0 as x+x. */
3538 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1)
3539 && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1), &dconst2))
3540 {
3541 op0 = force_reg (GET_MODE (op0), op0);
3542 return expand_binop (mode, add_optab, op0, op0,
3543 target, unsignedp,
3544 no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3545 }
3546
3547 /* This used to use umul_optab if unsigned, but for non-widening multiply
3548 there is no difference between signed and unsigned. */
3549 op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3550 op0, op1, target, unsignedp,
3551 no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3552 gcc_assert (op0 || no_libcall);
3553 return op0;
3554 }
3555
3556 /* Return a cost estimate for multiplying a register by the given
3557 COEFFicient in the given MODE and SPEED. */
3558
3559 int
3560 mult_by_coeff_cost (HOST_WIDE_INT coeff, machine_mode mode, bool speed)
3561 {
3562 int max_cost;
3563 struct algorithm algorithm;
3564 enum mult_variant variant;
3565
3566 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3567 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, fake_reg),
3568 mode, speed);
3569 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3570 return algorithm.cost.cost;
3571 else
3572 return max_cost;
3573 }
3574
3575 /* Perform a widening multiplication and return an rtx for the result.
3576 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3577 TARGET is a suggestion for where to store the result (an rtx).
3578 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3579 or smul_widen_optab.
3580
3581 We check specially for a constant integer as OP1, comparing the
3582 cost of a widening multiply against the cost of a sequence of shifts
3583 and adds. */
3584
3585 rtx
3586 expand_widening_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3587 int unsignedp, optab this_optab)
3588 {
3589 bool speed = optimize_insn_for_speed_p ();
3590 rtx cop1;
3591
3592 if (CONST_INT_P (op1)
3593 && GET_MODE (op0) != VOIDmode
3594 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3595 this_optab == umul_widen_optab))
3596 && CONST_INT_P (cop1)
3597 && (INTVAL (cop1) >= 0
3598 || HWI_COMPUTABLE_MODE_P (mode)))
3599 {
3600 HOST_WIDE_INT coeff = INTVAL (cop1);
3601 int max_cost;
3602 enum mult_variant variant;
3603 struct algorithm algorithm;
3604
3605 if (coeff == 0)
3606 return CONST0_RTX (mode);
3607
3608 /* Special case powers of two. */
3609 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3610 {
3611 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3612 return expand_shift (LSHIFT_EXPR, mode, op0,
3613 floor_log2 (coeff), target, unsignedp);
3614 }
3615
3616 /* Exclude cost of op0 from max_cost to match the cost
3617 calculation of the synth_mult. */
3618 max_cost = mul_widen_cost (speed, mode);
3619 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3620 max_cost))
3621 {
3622 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3623 return expand_mult_const (mode, op0, coeff, target,
3624 &algorithm, variant);
3625 }
3626 }
3627 return expand_binop (mode, this_optab, op0, op1, target,
3628 unsignedp, OPTAB_LIB_WIDEN);
3629 }
3630 \f
3631 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3632 replace division by D, and put the least significant N bits of the result
3633 in *MULTIPLIER_PTR and return the most significant bit.
3634
3635 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3636 needed precision is in PRECISION (should be <= N).
3637
3638 PRECISION should be as small as possible so this function can choose
3639 multiplier more freely.
3640
3641 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3642 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3643
3644 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3645 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3646
3647 unsigned HOST_WIDE_INT
3648 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3649 unsigned HOST_WIDE_INT *multiplier_ptr,
3650 int *post_shift_ptr, int *lgup_ptr)
3651 {
3652 int lgup, post_shift;
3653 int pow, pow2;
3654
3655 /* lgup = ceil(log2(divisor)); */
3656 lgup = ceil_log2 (d);
3657
3658 gcc_assert (lgup <= n);
3659
3660 pow = n + lgup;
3661 pow2 = n + lgup - precision;
3662
3663 /* mlow = 2^(N + lgup)/d */
3664 wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
3665 wide_int mlow = wi::udiv_trunc (val, d);
3666
3667 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3668 val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
3669 wide_int mhigh = wi::udiv_trunc (val, d);
3670
3671 /* If precision == N, then mlow, mhigh exceed 2^N
3672 (but they do not exceed 2^(N+1)). */
3673
3674 /* Reduce to lowest terms. */
3675 for (post_shift = lgup; post_shift > 0; post_shift--)
3676 {
3677 unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
3678 HOST_BITS_PER_WIDE_INT);
3679 unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
3680 HOST_BITS_PER_WIDE_INT);
3681 if (ml_lo >= mh_lo)
3682 break;
3683
3684 mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
3685 mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
3686 }
3687
3688 *post_shift_ptr = post_shift;
3689 *lgup_ptr = lgup;
3690 if (n < HOST_BITS_PER_WIDE_INT)
3691 {
3692 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1;
3693 *multiplier_ptr = mhigh.to_uhwi () & mask;
3694 return mhigh.to_uhwi () > mask;
3695 }
3696 else
3697 {
3698 *multiplier_ptr = mhigh.to_uhwi ();
3699 return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3700 }
3701 }
3702
3703 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3704 congruent to 1 (mod 2**N). */
3705
3706 static unsigned HOST_WIDE_INT
3707 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3708 {
3709 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3710
3711 /* The algorithm notes that the choice y = x satisfies
3712 x*y == 1 mod 2^3, since x is assumed odd.
3713 Each iteration doubles the number of bits of significance in y. */
3714
3715 unsigned HOST_WIDE_INT mask;
3716 unsigned HOST_WIDE_INT y = x;
3717 int nbit = 3;
3718
3719 mask = (n == HOST_BITS_PER_WIDE_INT
3720 ? HOST_WIDE_INT_M1U
3721 : (HOST_WIDE_INT_1U << n) - 1);
3722
3723 while (nbit < n)
3724 {
3725 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3726 nbit *= 2;
3727 }
3728 return y;
3729 }
3730
3731 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3732 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3733 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3734 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3735 become signed.
3736
3737 The result is put in TARGET if that is convenient.
3738
3739 MODE is the mode of operation. */
3740
3741 rtx
3742 expand_mult_highpart_adjust (scalar_int_mode mode, rtx adj_operand, rtx op0,
3743 rtx op1, rtx target, int unsignedp)
3744 {
3745 rtx tem;
3746 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3747
3748 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3749 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3750 tem = expand_and (mode, tem, op1, NULL_RTX);
3751 adj_operand
3752 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3753 adj_operand);
3754
3755 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3756 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3757 tem = expand_and (mode, tem, op0, NULL_RTX);
3758 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3759 target);
3760
3761 return target;
3762 }
3763
3764 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3765
3766 static rtx
3767 extract_high_half (scalar_int_mode mode, rtx op)
3768 {
3769 if (mode == word_mode)
3770 return gen_highpart (mode, op);
3771
3772 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3773
3774 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3775 GET_MODE_BITSIZE (mode), 0, 1);
3776 return convert_modes (mode, wider_mode, op, 0);
3777 }
3778
3779 /* Like expmed_mult_highpart, but only consider using a multiplication
3780 optab. OP1 is an rtx for the constant operand. */
3781
3782 static rtx
3783 expmed_mult_highpart_optab (scalar_int_mode mode, rtx op0, rtx op1,
3784 rtx target, int unsignedp, int max_cost)
3785 {
3786 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3787 optab moptab;
3788 rtx tem;
3789 int size;
3790 bool speed = optimize_insn_for_speed_p ();
3791
3792 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3793
3794 size = GET_MODE_BITSIZE (mode);
3795
3796 /* Firstly, try using a multiplication insn that only generates the needed
3797 high part of the product, and in the sign flavor of unsignedp. */
3798 if (mul_highpart_cost (speed, mode) < max_cost)
3799 {
3800 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3801 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3802 unsignedp, OPTAB_DIRECT);
3803 if (tem)
3804 return tem;
3805 }
3806
3807 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3808 Need to adjust the result after the multiplication. */
3809 if (size - 1 < BITS_PER_WORD
3810 && (mul_highpart_cost (speed, mode)
3811 + 2 * shift_cost (speed, mode, size-1)
3812 + 4 * add_cost (speed, mode) < max_cost))
3813 {
3814 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3815 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3816 unsignedp, OPTAB_DIRECT);
3817 if (tem)
3818 /* We used the wrong signedness. Adjust the result. */
3819 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3820 tem, unsignedp);
3821 }
3822
3823 /* Try widening multiplication. */
3824 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3825 if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3826 && mul_widen_cost (speed, wider_mode) < max_cost)
3827 {
3828 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3829 unsignedp, OPTAB_WIDEN);
3830 if (tem)
3831 return extract_high_half (mode, tem);
3832 }
3833
3834 /* Try widening the mode and perform a non-widening multiplication. */
3835 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3836 && size - 1 < BITS_PER_WORD
3837 && (mul_cost (speed, wider_mode) + shift_cost (speed, mode, size-1)
3838 < max_cost))
3839 {
3840 rtx_insn *insns;
3841 rtx wop0, wop1;
3842
3843 /* We need to widen the operands, for example to ensure the
3844 constant multiplier is correctly sign or zero extended.
3845 Use a sequence to clean-up any instructions emitted by
3846 the conversions if things don't work out. */
3847 start_sequence ();
3848 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3849 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3850 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3851 unsignedp, OPTAB_WIDEN);
3852 insns = get_insns ();
3853 end_sequence ();
3854
3855 if (tem)
3856 {
3857 emit_insn (insns);
3858 return extract_high_half (mode, tem);
3859 }
3860 }
3861
3862 /* Try widening multiplication of opposite signedness, and adjust. */
3863 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3864 if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3865 && size - 1 < BITS_PER_WORD
3866 && (mul_widen_cost (speed, wider_mode)
3867 + 2 * shift_cost (speed, mode, size-1)
3868 + 4 * add_cost (speed, mode) < max_cost))
3869 {
3870 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3871 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3872 if (tem != 0)
3873 {
3874 tem = extract_high_half (mode, tem);
3875 /* We used the wrong signedness. Adjust the result. */
3876 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3877 target, unsignedp);
3878 }
3879 }
3880
3881 return 0;
3882 }
3883
3884 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3885 putting the high half of the result in TARGET if that is convenient,
3886 and return where the result is. If the operation cannot be performed,
3887 0 is returned.
3888
3889 MODE is the mode of operation and result.
3890
3891 UNSIGNEDP nonzero means unsigned multiply.
3892
3893 MAX_COST is the total allowed cost for the expanded RTL. */
3894
3895 static rtx
3896 expmed_mult_highpart (scalar_int_mode mode, rtx op0, rtx op1,
3897 rtx target, int unsignedp, int max_cost)
3898 {
3899 unsigned HOST_WIDE_INT cnst1;
3900 int extra_cost;
3901 bool sign_adjust = false;
3902 enum mult_variant variant;
3903 struct algorithm alg;
3904 rtx tem;
3905 bool speed = optimize_insn_for_speed_p ();
3906
3907 /* We can't support modes wider than HOST_BITS_PER_INT. */
3908 gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
3909
3910 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3911
3912 /* We can't optimize modes wider than BITS_PER_WORD.
3913 ??? We might be able to perform double-word arithmetic if
3914 mode == word_mode, however all the cost calculations in
3915 synth_mult etc. assume single-word operations. */
3916 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3917 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3918 return expmed_mult_highpart_optab (mode, op0, op1, target,
3919 unsignedp, max_cost);
3920
3921 extra_cost = shift_cost (speed, mode, GET_MODE_BITSIZE (mode) - 1);
3922
3923 /* Check whether we try to multiply by a negative constant. */
3924 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3925 {
3926 sign_adjust = true;
3927 extra_cost += add_cost (speed, mode);
3928 }
3929
3930 /* See whether shift/add multiplication is cheap enough. */
3931 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3932 max_cost - extra_cost))
3933 {
3934 /* See whether the specialized multiplication optabs are
3935 cheaper than the shift/add version. */
3936 tem = expmed_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3937 alg.cost.cost + extra_cost);
3938 if (tem)
3939 return tem;
3940
3941 tem = convert_to_mode (wider_mode, op0, unsignedp);
3942 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3943 tem = extract_high_half (mode, tem);
3944
3945 /* Adjust result for signedness. */
3946 if (sign_adjust)
3947 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3948
3949 return tem;
3950 }
3951 return expmed_mult_highpart_optab (mode, op0, op1, target,
3952 unsignedp, max_cost);
3953 }
3954
3955
3956 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3957
3958 static rtx
3959 expand_smod_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
3960 {
3961 rtx result, temp, shift;
3962 rtx_code_label *label;
3963 int logd;
3964 int prec = GET_MODE_PRECISION (mode);
3965
3966 logd = floor_log2 (d);
3967 result = gen_reg_rtx (mode);
3968
3969 /* Avoid conditional branches when they're expensive. */
3970 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3971 && optimize_insn_for_speed_p ())
3972 {
3973 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3974 mode, 0, -1);
3975 if (signmask)
3976 {
3977 HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1;
3978 signmask = force_reg (mode, signmask);
3979 shift = gen_int_shift_amount (mode, GET_MODE_BITSIZE (mode) - logd);
3980
3981 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3982 which instruction sequence to use. If logical right shifts
3983 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3984 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3985
3986 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3987 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3988 || (set_src_cost (temp, mode, optimize_insn_for_speed_p ())
3989 > COSTS_N_INSNS (2)))
3990 {
3991 temp = expand_binop (mode, xor_optab, op0, signmask,
3992 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3993 temp = expand_binop (mode, sub_optab, temp, signmask,
3994 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3995 temp = expand_binop (mode, and_optab, temp,
3996 gen_int_mode (masklow, mode),
3997 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3998 temp = expand_binop (mode, xor_optab, temp, signmask,
3999 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4000 temp = expand_binop (mode, sub_optab, temp, signmask,
4001 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4002 }
4003 else
4004 {
4005 signmask = expand_binop (mode, lshr_optab, signmask, shift,
4006 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4007 signmask = force_reg (mode, signmask);
4008
4009 temp = expand_binop (mode, add_optab, op0, signmask,
4010 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4011 temp = expand_binop (mode, and_optab, temp,
4012 gen_int_mode (masklow, mode),
4013 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4014 temp = expand_binop (mode, sub_optab, temp, signmask,
4015 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4016 }
4017 return temp;
4018 }
4019 }
4020
4021 /* Mask contains the mode's signbit and the significant bits of the
4022 modulus. By including the signbit in the operation, many targets
4023 can avoid an explicit compare operation in the following comparison
4024 against zero. */
4025 wide_int mask = wi::mask (logd, false, prec);
4026 mask = wi::set_bit (mask, prec - 1);
4027
4028 temp = expand_binop (mode, and_optab, op0,
4029 immed_wide_int_const (mask, mode),
4030 result, 1, OPTAB_LIB_WIDEN);
4031 if (temp != result)
4032 emit_move_insn (result, temp);
4033
4034 label = gen_label_rtx ();
4035 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
4036
4037 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
4038 0, OPTAB_LIB_WIDEN);
4039
4040 mask = wi::mask (logd, true, prec);
4041 temp = expand_binop (mode, ior_optab, temp,
4042 immed_wide_int_const (mask, mode),
4043 result, 1, OPTAB_LIB_WIDEN);
4044 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
4045 0, OPTAB_LIB_WIDEN);
4046 if (temp != result)
4047 emit_move_insn (result, temp);
4048 emit_label (label);
4049 return result;
4050 }
4051
4052 /* Expand signed division of OP0 by a power of two D in mode MODE.
4053 This routine is only called for positive values of D. */
4054
4055 static rtx
4056 expand_sdiv_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
4057 {
4058 rtx temp;
4059 rtx_code_label *label;
4060 int logd;
4061
4062 logd = floor_log2 (d);
4063
4064 if (d == 2
4065 && BRANCH_COST (optimize_insn_for_speed_p (),
4066 false) >= 1)
4067 {
4068 temp = gen_reg_rtx (mode);
4069 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
4070 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
4071 0, OPTAB_LIB_WIDEN);
4072 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4073 }
4074
4075 if (HAVE_conditional_move
4076 && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2)
4077 {
4078 rtx temp2;
4079
4080 start_sequence ();
4081 temp2 = copy_to_mode_reg (mode, op0);
4082 temp = expand_binop (mode, add_optab, temp2, gen_int_mode (d - 1, mode),
4083 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4084 temp = force_reg (mode, temp);
4085
4086 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
4087 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
4088 mode, temp, temp2, mode, 0);
4089 if (temp2)
4090 {
4091 rtx_insn *seq = get_insns ();
4092 end_sequence ();
4093 emit_insn (seq);
4094 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
4095 }
4096 end_sequence ();
4097 }
4098
4099 if (BRANCH_COST (optimize_insn_for_speed_p (),
4100 false) >= 2)
4101 {
4102 int ushift = GET_MODE_BITSIZE (mode) - logd;
4103
4104 temp = gen_reg_rtx (mode);
4105 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
4106 if (GET_MODE_BITSIZE (mode) >= BITS_PER_WORD
4107 || shift_cost (optimize_insn_for_speed_p (), mode, ushift)
4108 > COSTS_N_INSNS (1))
4109 temp = expand_binop (mode, and_optab, temp, gen_int_mode (d - 1, mode),
4110 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4111 else
4112 temp = expand_shift (RSHIFT_EXPR, mode, temp,
4113 ushift, NULL_RTX, 1);
4114 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
4115 0, OPTAB_LIB_WIDEN);
4116 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4117 }
4118
4119 label = gen_label_rtx ();
4120 temp = copy_to_mode_reg (mode, op0);
4121 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
4122 expand_inc (temp, gen_int_mode (d - 1, mode));
4123 emit_label (label);
4124 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4125 }
4126 \f
4127 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
4128 if that is convenient, and returning where the result is.
4129 You may request either the quotient or the remainder as the result;
4130 specify REM_FLAG nonzero to get the remainder.
4131
4132 CODE is the expression code for which kind of division this is;
4133 it controls how rounding is done. MODE is the machine mode to use.
4134 UNSIGNEDP nonzero means do unsigned division. */
4135
4136 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
4137 and then correct it by or'ing in missing high bits
4138 if result of ANDI is nonzero.
4139 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
4140 This could optimize to a bfexts instruction.
4141 But C doesn't use these operations, so their optimizations are
4142 left for later. */
4143 /* ??? For modulo, we don't actually need the highpart of the first product,
4144 the low part will do nicely. And for small divisors, the second multiply
4145 can also be a low-part only multiply or even be completely left out.
4146 E.g. to calculate the remainder of a division by 3 with a 32 bit
4147 multiply, multiply with 0x55555556 and extract the upper two bits;
4148 the result is exact for inputs up to 0x1fffffff.
4149 The input range can be reduced by using cross-sum rules.
4150 For odd divisors >= 3, the following table gives right shift counts
4151 so that if a number is shifted by an integer multiple of the given
4152 amount, the remainder stays the same:
4153 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
4154 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
4155 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
4156 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
4157 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
4158
4159 Cross-sum rules for even numbers can be derived by leaving as many bits
4160 to the right alone as the divisor has zeros to the right.
4161 E.g. if x is an unsigned 32 bit number:
4162 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
4163 */
4164
4165 rtx
4166 expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
4167 rtx op0, rtx op1, rtx target, int unsignedp)
4168 {
4169 machine_mode compute_mode;
4170 rtx tquotient;
4171 rtx quotient = 0, remainder = 0;
4172 rtx_insn *last;
4173 rtx_insn *insn;
4174 optab optab1, optab2;
4175 int op1_is_constant, op1_is_pow2 = 0;
4176 int max_cost, extra_cost;
4177 static HOST_WIDE_INT last_div_const = 0;
4178 bool speed = optimize_insn_for_speed_p ();
4179
4180 op1_is_constant = CONST_INT_P (op1);
4181 if (op1_is_constant)
4182 {
4183 wide_int ext_op1 = rtx_mode_t (op1, mode);
4184 op1_is_pow2 = (wi::popcount (ext_op1) == 1
4185 || (! unsignedp
4186 && wi::popcount (wi::neg (ext_op1)) == 1));
4187 }
4188
4189 /*
4190 This is the structure of expand_divmod:
4191
4192 First comes code to fix up the operands so we can perform the operations
4193 correctly and efficiently.
4194
4195 Second comes a switch statement with code specific for each rounding mode.
4196 For some special operands this code emits all RTL for the desired
4197 operation, for other cases, it generates only a quotient and stores it in
4198 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
4199 to indicate that it has not done anything.
4200
4201 Last comes code that finishes the operation. If QUOTIENT is set and
4202 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
4203 QUOTIENT is not set, it is computed using trunc rounding.
4204
4205 We try to generate special code for division and remainder when OP1 is a
4206 constant. If |OP1| = 2**n we can use shifts and some other fast
4207 operations. For other values of OP1, we compute a carefully selected
4208 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
4209 by m.
4210
4211 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
4212 half of the product. Different strategies for generating the product are
4213 implemented in expmed_mult_highpart.
4214
4215 If what we actually want is the remainder, we generate that by another
4216 by-constant multiplication and a subtraction. */
4217
4218 /* We shouldn't be called with OP1 == const1_rtx, but some of the
4219 code below will malfunction if we are, so check here and handle
4220 the special case if so. */
4221 if (op1 == const1_rtx)
4222 return rem_flag ? const0_rtx : op0;
4223
4224 /* When dividing by -1, we could get an overflow.
4225 negv_optab can handle overflows. */
4226 if (! unsignedp && op1 == constm1_rtx)
4227 {
4228 if (rem_flag)
4229 return const0_rtx;
4230 return expand_unop (mode, flag_trapv && GET_MODE_CLASS (mode) == MODE_INT
4231 ? negv_optab : neg_optab, op0, target, 0);
4232 }
4233
4234 if (target
4235 /* Don't use the function value register as a target
4236 since we have to read it as well as write it,
4237 and function-inlining gets confused by this. */
4238 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
4239 /* Don't clobber an operand while doing a multi-step calculation. */
4240 || ((rem_flag || op1_is_constant)
4241 && (reg_mentioned_p (target, op0)
4242 || (MEM_P (op0) && MEM_P (target))))
4243 || reg_mentioned_p (target, op1)
4244 || (MEM_P (op1) && MEM_P (target))))
4245 target = 0;
4246
4247 /* Get the mode in which to perform this computation. Normally it will
4248 be MODE, but sometimes we can't do the desired operation in MODE.
4249 If so, pick a wider mode in which we can do the operation. Convert
4250 to that mode at the start to avoid repeated conversions.
4251
4252 First see what operations we need. These depend on the expression
4253 we are evaluating. (We assume that divxx3 insns exist under the
4254 same conditions that modxx3 insns and that these insns don't normally
4255 fail. If these assumptions are not correct, we may generate less
4256 efficient code in some cases.)
4257
4258 Then see if we find a mode in which we can open-code that operation
4259 (either a division, modulus, or shift). Finally, check for the smallest
4260 mode for which we can do the operation with a library call. */
4261
4262 /* We might want to refine this now that we have division-by-constant
4263 optimization. Since expmed_mult_highpart tries so many variants, it is
4264 not straightforward to generalize this. Maybe we should make an array
4265 of possible modes in init_expmed? Save this for GCC 2.7. */
4266
4267 optab1 = (op1_is_pow2
4268 ? (unsignedp ? lshr_optab : ashr_optab)
4269 : (unsignedp ? udiv_optab : sdiv_optab));
4270 optab2 = (op1_is_pow2 ? optab1
4271 : (unsignedp ? udivmod_optab : sdivmod_optab));
4272
4273 FOR_EACH_MODE_FROM (compute_mode, mode)
4274 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
4275 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
4276 break;
4277
4278 if (compute_mode == VOIDmode)
4279 FOR_EACH_MODE_FROM (compute_mode, mode)
4280 if (optab_libfunc (optab1, compute_mode)
4281 || optab_libfunc (optab2, compute_mode))
4282 break;
4283
4284 /* If we still couldn't find a mode, use MODE, but expand_binop will
4285 probably die. */
4286 if (compute_mode == VOIDmode)
4287 compute_mode = mode;
4288
4289 if (target && GET_MODE (target) == compute_mode)
4290 tquotient = target;
4291 else
4292 tquotient = gen_reg_rtx (compute_mode);
4293
4294 #if 0
4295 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4296 (mode), and thereby get better code when OP1 is a constant. Do that
4297 later. It will require going over all usages of SIZE below. */
4298 size = GET_MODE_BITSIZE (mode);
4299 #endif
4300
4301 /* Only deduct something for a REM if the last divide done was
4302 for a different constant. Then set the constant of the last
4303 divide. */
4304 max_cost = (unsignedp
4305 ? udiv_cost (speed, compute_mode)
4306 : sdiv_cost (speed, compute_mode));
4307 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
4308 && INTVAL (op1) == last_div_const))
4309 max_cost -= (mul_cost (speed, compute_mode)
4310 + add_cost (speed, compute_mode));
4311
4312 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
4313
4314 /* Now convert to the best mode to use. */
4315 if (compute_mode != mode)
4316 {
4317 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
4318 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
4319
4320 /* convert_modes may have placed op1 into a register, so we
4321 must recompute the following. */
4322 op1_is_constant = CONST_INT_P (op1);
4323 if (op1_is_constant)
4324 {
4325 wide_int ext_op1 = rtx_mode_t (op1, compute_mode);
4326 op1_is_pow2 = (wi::popcount (ext_op1) == 1
4327 || (! unsignedp
4328 && wi::popcount (wi::neg (ext_op1)) == 1));
4329 }
4330 else
4331 op1_is_pow2 = 0;
4332 }
4333
4334 /* If one of the operands is a volatile MEM, copy it into a register. */
4335
4336 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
4337 op0 = force_reg (compute_mode, op0);
4338 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
4339 op1 = force_reg (compute_mode, op1);
4340
4341 /* If we need the remainder or if OP1 is constant, we need to
4342 put OP0 in a register in case it has any queued subexpressions. */
4343 if (rem_flag || op1_is_constant)
4344 op0 = force_reg (compute_mode, op0);
4345
4346 last = get_last_insn ();
4347
4348 /* Promote floor rounding to trunc rounding for unsigned operations. */
4349 if (unsignedp)
4350 {
4351 if (code == FLOOR_DIV_EXPR)
4352 code = TRUNC_DIV_EXPR;
4353 if (code == FLOOR_MOD_EXPR)
4354 code = TRUNC_MOD_EXPR;
4355 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4356 code = TRUNC_DIV_EXPR;
4357 }
4358
4359 if (op1 != const0_rtx)
4360 switch (code)
4361 {
4362 case TRUNC_MOD_EXPR:
4363 case TRUNC_DIV_EXPR:
4364 if (op1_is_constant)
4365 {
4366 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4367 int size = GET_MODE_BITSIZE (int_mode);
4368 if (unsignedp)
4369 {
4370 unsigned HOST_WIDE_INT mh, ml;
4371 int pre_shift, post_shift;
4372 int dummy;
4373 wide_int wd = rtx_mode_t (op1, int_mode);
4374 unsigned HOST_WIDE_INT d = wd.to_uhwi ();
4375
4376 if (wi::popcount (wd) == 1)
4377 {
4378 pre_shift = floor_log2 (d);
4379 if (rem_flag)
4380 {
4381 unsigned HOST_WIDE_INT mask
4382 = (HOST_WIDE_INT_1U << pre_shift) - 1;
4383 remainder
4384 = expand_binop (int_mode, and_optab, op0,
4385 gen_int_mode (mask, int_mode),
4386 remainder, 1,
4387 OPTAB_LIB_WIDEN);
4388 if (remainder)
4389 return gen_lowpart (mode, remainder);
4390 }
4391 quotient = expand_shift (RSHIFT_EXPR, int_mode, op0,
4392 pre_shift, tquotient, 1);
4393 }
4394 else if (size <= HOST_BITS_PER_WIDE_INT)
4395 {
4396 if (d >= (HOST_WIDE_INT_1U << (size - 1)))
4397 {
4398 /* Most significant bit of divisor is set; emit an scc
4399 insn. */
4400 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4401 int_mode, 1, 1);
4402 }
4403 else
4404 {
4405 /* Find a suitable multiplier and right shift count
4406 instead of multiplying with D. */
4407
4408 mh = choose_multiplier (d, size, size,
4409 &ml, &post_shift, &dummy);
4410
4411 /* If the suggested multiplier is more than SIZE bits,
4412 we can do better for even divisors, using an
4413 initial right shift. */
4414 if (mh != 0 && (d & 1) == 0)
4415 {
4416 pre_shift = ctz_or_zero (d);
4417 mh = choose_multiplier (d >> pre_shift, size,
4418 size - pre_shift,
4419 &ml, &post_shift, &dummy);
4420 gcc_assert (!mh);
4421 }
4422 else
4423 pre_shift = 0;
4424
4425 if (mh != 0)
4426 {
4427 rtx t1, t2, t3, t4;
4428
4429 if (post_shift - 1 >= BITS_PER_WORD)
4430 goto fail1;
4431
4432 extra_cost
4433 = (shift_cost (speed, int_mode, post_shift - 1)
4434 + shift_cost (speed, int_mode, 1)
4435 + 2 * add_cost (speed, int_mode));
4436 t1 = expmed_mult_highpart
4437 (int_mode, op0, gen_int_mode (ml, int_mode),
4438 NULL_RTX, 1, max_cost - extra_cost);
4439 if (t1 == 0)
4440 goto fail1;
4441 t2 = force_operand (gen_rtx_MINUS (int_mode,
4442 op0, t1),
4443 NULL_RTX);
4444 t3 = expand_shift (RSHIFT_EXPR, int_mode,
4445 t2, 1, NULL_RTX, 1);
4446 t4 = force_operand (gen_rtx_PLUS (int_mode,
4447 t1, t3),
4448 NULL_RTX);
4449 quotient = expand_shift
4450 (RSHIFT_EXPR, int_mode, t4,
4451 post_shift - 1, tquotient, 1);
4452 }
4453 else
4454 {
4455 rtx t1, t2;
4456
4457 if (pre_shift >= BITS_PER_WORD
4458 || post_shift >= BITS_PER_WORD)
4459 goto fail1;
4460
4461 t1 = expand_shift
4462 (RSHIFT_EXPR, int_mode, op0,
4463 pre_shift, NULL_RTX, 1);
4464 extra_cost
4465 = (shift_cost (speed, int_mode, pre_shift)
4466 + shift_cost (speed, int_mode, post_shift));
4467 t2 = expmed_mult_highpart
4468 (int_mode, t1,
4469 gen_int_mode (ml, int_mode),
4470 NULL_RTX, 1, max_cost - extra_cost);
4471 if (t2 == 0)
4472 goto fail1;
4473 quotient = expand_shift
4474 (RSHIFT_EXPR, int_mode, t2,
4475 post_shift, tquotient, 1);
4476 }
4477 }
4478 }
4479 else /* Too wide mode to use tricky code */
4480 break;
4481
4482 insn = get_last_insn ();
4483 if (insn != last)
4484 set_dst_reg_note (insn, REG_EQUAL,
4485 gen_rtx_UDIV (int_mode, op0, op1),
4486 quotient);
4487 }
4488 else /* TRUNC_DIV, signed */
4489 {
4490 unsigned HOST_WIDE_INT ml;
4491 int lgup, post_shift;
4492 rtx mlr;
4493 HOST_WIDE_INT d = INTVAL (op1);
4494 unsigned HOST_WIDE_INT abs_d;
4495
4496 /* Not prepared to handle division/remainder by
4497 0xffffffffffffffff8000000000000000 etc. */
4498 if (d == HOST_WIDE_INT_MIN && size > HOST_BITS_PER_WIDE_INT)
4499 break;
4500
4501 /* Since d might be INT_MIN, we have to cast to
4502 unsigned HOST_WIDE_INT before negating to avoid
4503 undefined signed overflow. */
4504 abs_d = (d >= 0
4505 ? (unsigned HOST_WIDE_INT) d
4506 : - (unsigned HOST_WIDE_INT) d);
4507
4508 /* n rem d = n rem -d */
4509 if (rem_flag && d < 0)
4510 {
4511 d = abs_d;
4512 op1 = gen_int_mode (abs_d, int_mode);
4513 }
4514
4515 if (d == 1)
4516 quotient = op0;
4517 else if (d == -1)
4518 quotient = expand_unop (int_mode, neg_optab, op0,
4519 tquotient, 0);
4520 else if (size <= HOST_BITS_PER_WIDE_INT
4521 && abs_d == HOST_WIDE_INT_1U << (size - 1))
4522 {
4523 /* This case is not handled correctly below. */
4524 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4525 int_mode, 1, 1);
4526 if (quotient == 0)
4527 goto fail1;
4528 }
4529 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4530 && (size <= HOST_BITS_PER_WIDE_INT || d >= 0)
4531 && (rem_flag
4532 ? smod_pow2_cheap (speed, int_mode)
4533 : sdiv_pow2_cheap (speed, int_mode))
4534 /* We assume that cheap metric is true if the
4535 optab has an expander for this mode. */
4536 && ((optab_handler ((rem_flag ? smod_optab
4537 : sdiv_optab),
4538 int_mode)
4539 != CODE_FOR_nothing)
4540 || (optab_handler (sdivmod_optab, int_mode)
4541 != CODE_FOR_nothing)))
4542 ;
4543 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4544 {
4545 if (rem_flag)
4546 {
4547 remainder = expand_smod_pow2 (int_mode, op0, d);
4548 if (remainder)
4549 return gen_lowpart (mode, remainder);
4550 }
4551
4552 if (sdiv_pow2_cheap (speed, int_mode)
4553 && ((optab_handler (sdiv_optab, int_mode)
4554 != CODE_FOR_nothing)
4555 || (optab_handler (sdivmod_optab, int_mode)
4556 != CODE_FOR_nothing)))
4557 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4558 int_mode, op0,
4559 gen_int_mode (abs_d,
4560 int_mode),
4561 NULL_RTX, 0);
4562 else
4563 quotient = expand_sdiv_pow2 (int_mode, op0, abs_d);
4564
4565 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4566 negate the quotient. */
4567 if (d < 0)
4568 {
4569 insn = get_last_insn ();
4570 if (insn != last
4571 && abs_d < (HOST_WIDE_INT_1U
4572 << (HOST_BITS_PER_WIDE_INT - 1)))
4573 set_dst_reg_note (insn, REG_EQUAL,
4574 gen_rtx_DIV (int_mode, op0,
4575 gen_int_mode
4576 (abs_d,
4577 int_mode)),
4578 quotient);
4579
4580 quotient = expand_unop (int_mode, neg_optab,
4581 quotient, quotient, 0);
4582 }
4583 }
4584 else if (size <= HOST_BITS_PER_WIDE_INT)
4585 {
4586 choose_multiplier (abs_d, size, size - 1,
4587 &ml, &post_shift, &lgup);
4588 if (ml < HOST_WIDE_INT_1U << (size - 1))
4589 {
4590 rtx t1, t2, t3;
4591
4592 if (post_shift >= BITS_PER_WORD
4593 || size - 1 >= BITS_PER_WORD)
4594 goto fail1;
4595
4596 extra_cost = (shift_cost (speed, int_mode, post_shift)
4597 + shift_cost (speed, int_mode, size - 1)
4598 + add_cost (speed, int_mode));
4599 t1 = expmed_mult_highpart
4600 (int_mode, op0, gen_int_mode (ml, int_mode),
4601 NULL_RTX, 0, max_cost - extra_cost);
4602 if (t1 == 0)
4603 goto fail1;
4604 t2 = expand_shift
4605 (RSHIFT_EXPR, int_mode, t1,
4606 post_shift, NULL_RTX, 0);
4607 t3 = expand_shift
4608 (RSHIFT_EXPR, int_mode, op0,
4609 size - 1, NULL_RTX, 0);
4610 if (d < 0)
4611 quotient
4612 = force_operand (gen_rtx_MINUS (int_mode, t3, t2),
4613 tquotient);
4614 else
4615 quotient
4616 = force_operand (gen_rtx_MINUS (int_mode, t2, t3),
4617 tquotient);
4618 }
4619 else
4620 {
4621 rtx t1, t2, t3, t4;
4622
4623 if (post_shift >= BITS_PER_WORD
4624 || size - 1 >= BITS_PER_WORD)
4625 goto fail1;
4626
4627 ml |= HOST_WIDE_INT_M1U << (size - 1);
4628 mlr = gen_int_mode (ml, int_mode);
4629 extra_cost = (shift_cost (speed, int_mode, post_shift)
4630 + shift_cost (speed, int_mode, size - 1)
4631 + 2 * add_cost (speed, int_mode));
4632 t1 = expmed_mult_highpart (int_mode, op0, mlr,
4633 NULL_RTX, 0,
4634 max_cost - extra_cost);
4635 if (t1 == 0)
4636 goto fail1;
4637 t2 = force_operand (gen_rtx_PLUS (int_mode, t1, op0),
4638 NULL_RTX);
4639 t3 = expand_shift
4640 (RSHIFT_EXPR, int_mode, t2,
4641 post_shift, NULL_RTX, 0);
4642 t4 = expand_shift
4643 (RSHIFT_EXPR, int_mode, op0,
4644 size - 1, NULL_RTX, 0);
4645 if (d < 0)
4646 quotient
4647 = force_operand (gen_rtx_MINUS (int_mode, t4, t3),
4648 tquotient);
4649 else
4650 quotient
4651 = force_operand (gen_rtx_MINUS (int_mode, t3, t4),
4652 tquotient);
4653 }
4654 }
4655 else /* Too wide mode to use tricky code */
4656 break;
4657
4658 insn = get_last_insn ();
4659 if (insn != last)
4660 set_dst_reg_note (insn, REG_EQUAL,
4661 gen_rtx_DIV (int_mode, op0, op1),
4662 quotient);
4663 }
4664 break;
4665 }
4666 fail1:
4667 delete_insns_since (last);
4668 break;
4669
4670 case FLOOR_DIV_EXPR:
4671 case FLOOR_MOD_EXPR:
4672 /* We will come here only for signed operations. */
4673 if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
4674 {
4675 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4676 int size = GET_MODE_BITSIZE (int_mode);
4677 unsigned HOST_WIDE_INT mh, ml;
4678 int pre_shift, lgup, post_shift;
4679 HOST_WIDE_INT d = INTVAL (op1);
4680
4681 if (d > 0)
4682 {
4683 /* We could just as easily deal with negative constants here,
4684 but it does not seem worth the trouble for GCC 2.6. */
4685 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4686 {
4687 pre_shift = floor_log2 (d);
4688 if (rem_flag)
4689 {
4690 unsigned HOST_WIDE_INT mask
4691 = (HOST_WIDE_INT_1U << pre_shift) - 1;
4692 remainder = expand_binop
4693 (int_mode, and_optab, op0,
4694 gen_int_mode (mask, int_mode),
4695 remainder, 0, OPTAB_LIB_WIDEN);
4696 if (remainder)
4697 return gen_lowpart (mode, remainder);
4698 }
4699 quotient = expand_shift
4700 (RSHIFT_EXPR, int_mode, op0,
4701 pre_shift, tquotient, 0);
4702 }
4703 else
4704 {
4705 rtx t1, t2, t3, t4;
4706
4707 mh = choose_multiplier (d, size, size - 1,
4708 &ml, &post_shift, &lgup);
4709 gcc_assert (!mh);
4710
4711 if (post_shift < BITS_PER_WORD
4712 && size - 1 < BITS_PER_WORD)
4713 {
4714 t1 = expand_shift
4715 (RSHIFT_EXPR, int_mode, op0,
4716 size - 1, NULL_RTX, 0);
4717 t2 = expand_binop (int_mode, xor_optab, op0, t1,
4718 NULL_RTX, 0, OPTAB_WIDEN);
4719 extra_cost = (shift_cost (speed, int_mode, post_shift)
4720 + shift_cost (speed, int_mode, size - 1)
4721 + 2 * add_cost (speed, int_mode));
4722 t3 = expmed_mult_highpart
4723 (int_mode, t2, gen_int_mode (ml, int_mode),
4724 NULL_RTX, 1, max_cost - extra_cost);
4725 if (t3 != 0)
4726 {
4727 t4 = expand_shift
4728 (RSHIFT_EXPR, int_mode, t3,
4729 post_shift, NULL_RTX, 1);
4730 quotient = expand_binop (int_mode, xor_optab,
4731 t4, t1, tquotient, 0,
4732 OPTAB_WIDEN);
4733 }
4734 }
4735 }
4736 }
4737 else
4738 {
4739 rtx nsign, t1, t2, t3, t4;
4740 t1 = force_operand (gen_rtx_PLUS (int_mode,
4741 op0, constm1_rtx), NULL_RTX);
4742 t2 = expand_binop (int_mode, ior_optab, op0, t1, NULL_RTX,
4743 0, OPTAB_WIDEN);
4744 nsign = expand_shift (RSHIFT_EXPR, int_mode, t2,
4745 size - 1, NULL_RTX, 0);
4746 t3 = force_operand (gen_rtx_MINUS (int_mode, t1, nsign),
4747 NULL_RTX);
4748 t4 = expand_divmod (0, TRUNC_DIV_EXPR, int_mode, t3, op1,
4749 NULL_RTX, 0);
4750 if (t4)
4751 {
4752 rtx t5;
4753 t5 = expand_unop (int_mode, one_cmpl_optab, nsign,
4754 NULL_RTX, 0);
4755 quotient = force_operand (gen_rtx_PLUS (int_mode, t4, t5),
4756 tquotient);
4757 }
4758 }
4759 }
4760
4761 if (quotient != 0)
4762 break;
4763 delete_insns_since (last);
4764
4765 /* Try using an instruction that produces both the quotient and
4766 remainder, using truncation. We can easily compensate the quotient
4767 or remainder to get floor rounding, once we have the remainder.
4768 Notice that we compute also the final remainder value here,
4769 and return the result right away. */
4770 if (target == 0 || GET_MODE (target) != compute_mode)
4771 target = gen_reg_rtx (compute_mode);
4772
4773 if (rem_flag)
4774 {
4775 remainder
4776 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4777 quotient = gen_reg_rtx (compute_mode);
4778 }
4779 else
4780 {
4781 quotient
4782 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4783 remainder = gen_reg_rtx (compute_mode);
4784 }
4785
4786 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4787 quotient, remainder, 0))
4788 {
4789 /* This could be computed with a branch-less sequence.
4790 Save that for later. */
4791 rtx tem;
4792 rtx_code_label *label = gen_label_rtx ();
4793 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4794 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4795 NULL_RTX, 0, OPTAB_WIDEN);
4796 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4797 expand_dec (quotient, const1_rtx);
4798 expand_inc (remainder, op1);
4799 emit_label (label);
4800 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4801 }
4802
4803 /* No luck with division elimination or divmod. Have to do it
4804 by conditionally adjusting op0 *and* the result. */
4805 {
4806 rtx_code_label *label1, *label2, *label3, *label4, *label5;
4807 rtx adjusted_op0;
4808 rtx tem;
4809
4810 quotient = gen_reg_rtx (compute_mode);
4811 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4812 label1 = gen_label_rtx ();
4813 label2 = gen_label_rtx ();
4814 label3 = gen_label_rtx ();
4815 label4 = gen_label_rtx ();
4816 label5 = gen_label_rtx ();
4817 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4818 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4819 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4820 quotient, 0, OPTAB_LIB_WIDEN);
4821 if (tem != quotient)
4822 emit_move_insn (quotient, tem);
4823 emit_jump_insn (targetm.gen_jump (label5));
4824 emit_barrier ();
4825 emit_label (label1);
4826 expand_inc (adjusted_op0, const1_rtx);
4827 emit_jump_insn (targetm.gen_jump (label4));
4828 emit_barrier ();
4829 emit_label (label2);
4830 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4831 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4832 quotient, 0, OPTAB_LIB_WIDEN);
4833 if (tem != quotient)
4834 emit_move_insn (quotient, tem);
4835 emit_jump_insn (targetm.gen_jump (label5));
4836 emit_barrier ();
4837 emit_label (label3);
4838 expand_dec (adjusted_op0, const1_rtx);
4839 emit_label (label4);
4840 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4841 quotient, 0, OPTAB_LIB_WIDEN);
4842 if (tem != quotient)
4843 emit_move_insn (quotient, tem);
4844 expand_dec (quotient, const1_rtx);
4845 emit_label (label5);
4846 }
4847 break;
4848
4849 case CEIL_DIV_EXPR:
4850 case CEIL_MOD_EXPR:
4851 if (unsignedp)
4852 {
4853 if (op1_is_constant
4854 && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4855 && (HWI_COMPUTABLE_MODE_P (compute_mode)
4856 || INTVAL (op1) >= 0))
4857 {
4858 scalar_int_mode int_mode
4859 = as_a <scalar_int_mode> (compute_mode);
4860 rtx t1, t2, t3;
4861 unsigned HOST_WIDE_INT d = INTVAL (op1);
4862 t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
4863 floor_log2 (d), tquotient, 1);
4864 t2 = expand_binop (int_mode, and_optab, op0,
4865 gen_int_mode (d - 1, int_mode),
4866 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4867 t3 = gen_reg_rtx (int_mode);
4868 t3 = emit_store_flag (t3, NE, t2, const0_rtx, int_mode, 1, 1);
4869 if (t3 == 0)
4870 {
4871 rtx_code_label *lab;
4872 lab = gen_label_rtx ();
4873 do_cmp_and_jump (t2, const0_rtx, EQ, int_mode, lab);
4874 expand_inc (t1, const1_rtx);
4875 emit_label (lab);
4876 quotient = t1;
4877 }
4878 else
4879 quotient = force_operand (gen_rtx_PLUS (int_mode, t1, t3),
4880 tquotient);
4881 break;
4882 }
4883
4884 /* Try using an instruction that produces both the quotient and
4885 remainder, using truncation. We can easily compensate the
4886 quotient or remainder to get ceiling rounding, once we have the
4887 remainder. Notice that we compute also the final remainder
4888 value here, and return the result right away. */
4889 if (target == 0 || GET_MODE (target) != compute_mode)
4890 target = gen_reg_rtx (compute_mode);
4891
4892 if (rem_flag)
4893 {
4894 remainder = (REG_P (target)
4895 ? target : gen_reg_rtx (compute_mode));
4896 quotient = gen_reg_rtx (compute_mode);
4897 }
4898 else
4899 {
4900 quotient = (REG_P (target)
4901 ? target : gen_reg_rtx (compute_mode));
4902 remainder = gen_reg_rtx (compute_mode);
4903 }
4904
4905 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4906 remainder, 1))
4907 {
4908 /* This could be computed with a branch-less sequence.
4909 Save that for later. */
4910 rtx_code_label *label = gen_label_rtx ();
4911 do_cmp_and_jump (remainder, const0_rtx, EQ,
4912 compute_mode, label);
4913 expand_inc (quotient, const1_rtx);
4914 expand_dec (remainder, op1);
4915 emit_label (label);
4916 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4917 }
4918
4919 /* No luck with division elimination or divmod. Have to do it
4920 by conditionally adjusting op0 *and* the result. */
4921 {
4922 rtx_code_label *label1, *label2;
4923 rtx adjusted_op0, tem;
4924
4925 quotient = gen_reg_rtx (compute_mode);
4926 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4927 label1 = gen_label_rtx ();
4928 label2 = gen_label_rtx ();
4929 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4930 compute_mode, label1);
4931 emit_move_insn (quotient, const0_rtx);
4932 emit_jump_insn (targetm.gen_jump (label2));
4933 emit_barrier ();
4934 emit_label (label1);
4935 expand_dec (adjusted_op0, const1_rtx);
4936 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4937 quotient, 1, OPTAB_LIB_WIDEN);
4938 if (tem != quotient)
4939 emit_move_insn (quotient, tem);
4940 expand_inc (quotient, const1_rtx);
4941 emit_label (label2);
4942 }
4943 }
4944 else /* signed */
4945 {
4946 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4947 && INTVAL (op1) >= 0)
4948 {
4949 /* This is extremely similar to the code for the unsigned case
4950 above. For 2.7 we should merge these variants, but for
4951 2.6.1 I don't want to touch the code for unsigned since that
4952 get used in C. The signed case will only be used by other
4953 languages (Ada). */
4954
4955 rtx t1, t2, t3;
4956 unsigned HOST_WIDE_INT d = INTVAL (op1);
4957 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4958 floor_log2 (d), tquotient, 0);
4959 t2 = expand_binop (compute_mode, and_optab, op0,
4960 gen_int_mode (d - 1, compute_mode),
4961 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4962 t3 = gen_reg_rtx (compute_mode);
4963 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4964 compute_mode, 1, 1);
4965 if (t3 == 0)
4966 {
4967 rtx_code_label *lab;
4968 lab = gen_label_rtx ();
4969 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4970 expand_inc (t1, const1_rtx);
4971 emit_label (lab);
4972 quotient = t1;
4973 }
4974 else
4975 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4976 t1, t3),
4977 tquotient);
4978 break;
4979 }
4980
4981 /* Try using an instruction that produces both the quotient and
4982 remainder, using truncation. We can easily compensate the
4983 quotient or remainder to get ceiling rounding, once we have the
4984 remainder. Notice that we compute also the final remainder
4985 value here, and return the result right away. */
4986 if (target == 0 || GET_MODE (target) != compute_mode)
4987 target = gen_reg_rtx (compute_mode);
4988 if (rem_flag)
4989 {
4990 remainder= (REG_P (target)
4991 ? target : gen_reg_rtx (compute_mode));
4992 quotient = gen_reg_rtx (compute_mode);
4993 }
4994 else
4995 {
4996 quotient = (REG_P (target)
4997 ? target : gen_reg_rtx (compute_mode));
4998 remainder = gen_reg_rtx (compute_mode);
4999 }
5000
5001 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
5002 remainder, 0))
5003 {
5004 /* This could be computed with a branch-less sequence.
5005 Save that for later. */
5006 rtx tem;
5007 rtx_code_label *label = gen_label_rtx ();
5008 do_cmp_and_jump (remainder, const0_rtx, EQ,
5009 compute_mode, label);
5010 tem = expand_binop (compute_mode, xor_optab, op0, op1,
5011 NULL_RTX, 0, OPTAB_WIDEN);
5012 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
5013 expand_inc (quotient, const1_rtx);
5014 expand_dec (remainder, op1);
5015 emit_label (label);
5016 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5017 }
5018
5019 /* No luck with division elimination or divmod. Have to do it
5020 by conditionally adjusting op0 *and* the result. */
5021 {
5022 rtx_code_label *label1, *label2, *label3, *label4, *label5;
5023 rtx adjusted_op0;
5024 rtx tem;
5025
5026 quotient = gen_reg_rtx (compute_mode);
5027 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
5028 label1 = gen_label_rtx ();
5029 label2 = gen_label_rtx ();
5030 label3 = gen_label_rtx ();
5031 label4 = gen_label_rtx ();
5032 label5 = gen_label_rtx ();
5033 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
5034 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
5035 compute_mode, label1);
5036 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5037 quotient, 0, OPTAB_LIB_WIDEN);
5038 if (tem != quotient)
5039 emit_move_insn (quotient, tem);
5040 emit_jump_insn (targetm.gen_jump (label5));
5041 emit_barrier ();
5042 emit_label (label1);
5043 expand_dec (adjusted_op0, const1_rtx);
5044 emit_jump_insn (targetm.gen_jump (label4));
5045 emit_barrier ();
5046 emit_label (label2);
5047 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
5048 compute_mode, label3);
5049 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5050 quotient, 0, OPTAB_LIB_WIDEN);
5051 if (tem != quotient)
5052 emit_move_insn (quotient, tem);
5053 emit_jump_insn (targetm.gen_jump (label5));
5054 emit_barrier ();
5055 emit_label (label3);
5056 expand_inc (adjusted_op0, const1_rtx);
5057 emit_label (label4);
5058 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5059 quotient, 0, OPTAB_LIB_WIDEN);
5060 if (tem != quotient)
5061 emit_move_insn (quotient, tem);
5062 expand_inc (quotient, const1_rtx);
5063 emit_label (label5);
5064 }
5065 }
5066 break;
5067
5068 case EXACT_DIV_EXPR:
5069 if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
5070 {
5071 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5072 int size = GET_MODE_BITSIZE (int_mode);
5073 HOST_WIDE_INT d = INTVAL (op1);
5074 unsigned HOST_WIDE_INT ml;
5075 int pre_shift;
5076 rtx t1;
5077
5078 pre_shift = ctz_or_zero (d);
5079 ml = invert_mod2n (d >> pre_shift, size);
5080 t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5081 pre_shift, NULL_RTX, unsignedp);
5082 quotient = expand_mult (int_mode, t1, gen_int_mode (ml, int_mode),
5083 NULL_RTX, 1);
5084
5085 insn = get_last_insn ();
5086 set_dst_reg_note (insn, REG_EQUAL,
5087 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5088 int_mode, op0, op1),
5089 quotient);
5090 }
5091 break;
5092
5093 case ROUND_DIV_EXPR:
5094 case ROUND_MOD_EXPR:
5095 if (unsignedp)
5096 {
5097 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5098 rtx tem;
5099 rtx_code_label *label;
5100 label = gen_label_rtx ();
5101 quotient = gen_reg_rtx (int_mode);
5102 remainder = gen_reg_rtx (int_mode);
5103 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
5104 {
5105 rtx tem;
5106 quotient = expand_binop (int_mode, udiv_optab, op0, op1,
5107 quotient, 1, OPTAB_LIB_WIDEN);
5108 tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 1);
5109 remainder = expand_binop (int_mode, sub_optab, op0, tem,
5110 remainder, 1, OPTAB_LIB_WIDEN);
5111 }
5112 tem = plus_constant (int_mode, op1, -1);
5113 tem = expand_shift (RSHIFT_EXPR, int_mode, tem, 1, NULL_RTX, 1);
5114 do_cmp_and_jump (remainder, tem, LEU, int_mode, label);
5115 expand_inc (quotient, const1_rtx);
5116 expand_dec (remainder, op1);
5117 emit_label (label);
5118 }
5119 else
5120 {
5121 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5122 int size = GET_MODE_BITSIZE (int_mode);
5123 rtx abs_rem, abs_op1, tem, mask;
5124 rtx_code_label *label;
5125 label = gen_label_rtx ();
5126 quotient = gen_reg_rtx (int_mode);
5127 remainder = gen_reg_rtx (int_mode);
5128 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
5129 {
5130 rtx tem;
5131 quotient = expand_binop (int_mode, sdiv_optab, op0, op1,
5132 quotient, 0, OPTAB_LIB_WIDEN);
5133 tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 0);
5134 remainder = expand_binop (int_mode, sub_optab, op0, tem,
5135 remainder, 0, OPTAB_LIB_WIDEN);
5136 }
5137 abs_rem = expand_abs (int_mode, remainder, NULL_RTX, 1, 0);
5138 abs_op1 = expand_abs (int_mode, op1, NULL_RTX, 1, 0);
5139 tem = expand_shift (LSHIFT_EXPR, int_mode, abs_rem,
5140 1, NULL_RTX, 1);
5141 do_cmp_and_jump (tem, abs_op1, LTU, int_mode, label);
5142 tem = expand_binop (int_mode, xor_optab, op0, op1,
5143 NULL_RTX, 0, OPTAB_WIDEN);
5144 mask = expand_shift (RSHIFT_EXPR, int_mode, tem,
5145 size - 1, NULL_RTX, 0);
5146 tem = expand_binop (int_mode, xor_optab, mask, const1_rtx,
5147 NULL_RTX, 0, OPTAB_WIDEN);
5148 tem = expand_binop (int_mode, sub_optab, tem, mask,
5149 NULL_RTX, 0, OPTAB_WIDEN);
5150 expand_inc (quotient, tem);
5151 tem = expand_binop (int_mode, xor_optab, mask, op1,
5152 NULL_RTX, 0, OPTAB_WIDEN);
5153 tem = expand_binop (int_mode, sub_optab, tem, mask,
5154 NULL_RTX, 0, OPTAB_WIDEN);
5155 expand_dec (remainder, tem);
5156 emit_label (label);
5157 }
5158 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5159
5160 default:
5161 gcc_unreachable ();
5162 }
5163
5164 if (quotient == 0)
5165 {
5166 if (target && GET_MODE (target) != compute_mode)
5167 target = 0;
5168
5169 if (rem_flag)
5170 {
5171 /* Try to produce the remainder without producing the quotient.
5172 If we seem to have a divmod pattern that does not require widening,
5173 don't try widening here. We should really have a WIDEN argument
5174 to expand_twoval_binop, since what we'd really like to do here is
5175 1) try a mod insn in compute_mode
5176 2) try a divmod insn in compute_mode
5177 3) try a div insn in compute_mode and multiply-subtract to get
5178 remainder
5179 4) try the same things with widening allowed. */
5180 remainder
5181 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5182 op0, op1, target,
5183 unsignedp,
5184 ((optab_handler (optab2, compute_mode)
5185 != CODE_FOR_nothing)
5186 ? OPTAB_DIRECT : OPTAB_WIDEN));
5187 if (remainder == 0)
5188 {
5189 /* No luck there. Can we do remainder and divide at once
5190 without a library call? */
5191 remainder = gen_reg_rtx (compute_mode);
5192 if (! expand_twoval_binop ((unsignedp
5193 ? udivmod_optab
5194 : sdivmod_optab),
5195 op0, op1,
5196 NULL_RTX, remainder, unsignedp))
5197 remainder = 0;
5198 }
5199
5200 if (remainder)
5201 return gen_lowpart (mode, remainder);
5202 }
5203
5204 /* Produce the quotient. Try a quotient insn, but not a library call.
5205 If we have a divmod in this mode, use it in preference to widening
5206 the div (for this test we assume it will not fail). Note that optab2
5207 is set to the one of the two optabs that the call below will use. */
5208 quotient
5209 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
5210 op0, op1, rem_flag ? NULL_RTX : target,
5211 unsignedp,
5212 ((optab_handler (optab2, compute_mode)
5213 != CODE_FOR_nothing)
5214 ? OPTAB_DIRECT : OPTAB_WIDEN));
5215
5216 if (quotient == 0)
5217 {
5218 /* No luck there. Try a quotient-and-remainder insn,
5219 keeping the quotient alone. */
5220 quotient = gen_reg_rtx (compute_mode);
5221 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
5222 op0, op1,
5223 quotient, NULL_RTX, unsignedp))
5224 {
5225 quotient = 0;
5226 if (! rem_flag)
5227 /* Still no luck. If we are not computing the remainder,
5228 use a library call for the quotient. */
5229 quotient = sign_expand_binop (compute_mode,
5230 udiv_optab, sdiv_optab,
5231 op0, op1, target,
5232 unsignedp, OPTAB_LIB_WIDEN);
5233 }
5234 }
5235 }
5236
5237 if (rem_flag)
5238 {
5239 if (target && GET_MODE (target) != compute_mode)
5240 target = 0;
5241
5242 if (quotient == 0)
5243 {
5244 /* No divide instruction either. Use library for remainder. */
5245 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5246 op0, op1, target,
5247 unsignedp, OPTAB_LIB_WIDEN);
5248 /* No remainder function. Try a quotient-and-remainder
5249 function, keeping the remainder. */
5250 if (!remainder)
5251 {
5252 remainder = gen_reg_rtx (compute_mode);
5253 if (!expand_twoval_binop_libfunc
5254 (unsignedp ? udivmod_optab : sdivmod_optab,
5255 op0, op1,
5256 NULL_RTX, remainder,
5257 unsignedp ? UMOD : MOD))
5258 remainder = NULL_RTX;
5259 }
5260 }
5261 else
5262 {
5263 /* We divided. Now finish doing X - Y * (X / Y). */
5264 remainder = expand_mult (compute_mode, quotient, op1,
5265 NULL_RTX, unsignedp);
5266 remainder = expand_binop (compute_mode, sub_optab, op0,
5267 remainder, target, unsignedp,
5268 OPTAB_LIB_WIDEN);
5269 }
5270 }
5271
5272 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5273 }
5274 \f
5275 /* Return a tree node with data type TYPE, describing the value of X.
5276 Usually this is an VAR_DECL, if there is no obvious better choice.
5277 X may be an expression, however we only support those expressions
5278 generated by loop.c. */
5279
5280 tree
5281 make_tree (tree type, rtx x)
5282 {
5283 tree t;
5284
5285 switch (GET_CODE (x))
5286 {
5287 case CONST_INT:
5288 case CONST_WIDE_INT:
5289 t = wide_int_to_tree (type, rtx_mode_t (x, TYPE_MODE (type)));
5290 return t;
5291
5292 case CONST_DOUBLE:
5293 STATIC_ASSERT (HOST_BITS_PER_WIDE_INT * 2 <= MAX_BITSIZE_MODE_ANY_INT);
5294 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
5295 t = wide_int_to_tree (type,
5296 wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
5297 HOST_BITS_PER_WIDE_INT * 2));
5298 else
5299 t = build_real (type, *CONST_DOUBLE_REAL_VALUE (x));
5300
5301 return t;
5302
5303 case CONST_VECTOR:
5304 {
5305 unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
5306 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
5307 tree itype = TREE_TYPE (type);
5308
5309 /* Build a tree with vector elements. */
5310 tree_vector_builder elts (type, npatterns, nelts_per_pattern);
5311 unsigned int count = elts.encoded_nelts ();
5312 for (unsigned int i = 0; i < count; ++i)
5313 {
5314 rtx elt = CONST_VECTOR_ELT (x, i);
5315 elts.quick_push (make_tree (itype, elt));
5316 }
5317
5318 return elts.build ();
5319 }
5320
5321 case PLUS:
5322 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5323 make_tree (type, XEXP (x, 1)));
5324
5325 case MINUS:
5326 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5327 make_tree (type, XEXP (x, 1)));
5328
5329 case NEG:
5330 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5331
5332 case MULT:
5333 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5334 make_tree (type, XEXP (x, 1)));
5335
5336 case ASHIFT:
5337 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5338 make_tree (type, XEXP (x, 1)));
5339
5340 case LSHIFTRT:
5341 t = unsigned_type_for (type);
5342 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5343 make_tree (t, XEXP (x, 0)),
5344 make_tree (type, XEXP (x, 1))));
5345
5346 case ASHIFTRT:
5347 t = signed_type_for (type);
5348 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5349 make_tree (t, XEXP (x, 0)),
5350 make_tree (type, XEXP (x, 1))));
5351
5352 case DIV:
5353 if (TREE_CODE (type) != REAL_TYPE)
5354 t = signed_type_for (type);
5355 else
5356 t = type;
5357
5358 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5359 make_tree (t, XEXP (x, 0)),
5360 make_tree (t, XEXP (x, 1))));
5361 case UDIV:
5362 t = unsigned_type_for (type);
5363 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5364 make_tree (t, XEXP (x, 0)),
5365 make_tree (t, XEXP (x, 1))));
5366
5367 case SIGN_EXTEND:
5368 case ZERO_EXTEND:
5369 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5370 GET_CODE (x) == ZERO_EXTEND);
5371 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5372
5373 case CONST:
5374 return make_tree (type, XEXP (x, 0));
5375
5376 case SYMBOL_REF:
5377 t = SYMBOL_REF_DECL (x);
5378 if (t)
5379 return fold_convert (type, build_fold_addr_expr (t));
5380 /* fall through. */
5381
5382 default:
5383 if (CONST_POLY_INT_P (x))
5384 return wide_int_to_tree (t, const_poly_int_value (x));
5385
5386 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5387
5388 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5389 address mode to pointer mode. */
5390 if (POINTER_TYPE_P (type))
5391 x = convert_memory_address_addr_space
5392 (SCALAR_INT_TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5393
5394 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5395 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5396 t->decl_with_rtl.rtl = x;
5397
5398 return t;
5399 }
5400 }
5401 \f
5402 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5403 and returning TARGET.
5404
5405 If TARGET is 0, a pseudo-register or constant is returned. */
5406
5407 rtx
5408 expand_and (machine_mode mode, rtx op0, rtx op1, rtx target)
5409 {
5410 rtx tem = 0;
5411
5412 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5413 tem = simplify_binary_operation (AND, mode, op0, op1);
5414 if (tem == 0)
5415 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5416
5417 if (target == 0)
5418 target = tem;
5419 else if (tem != target)
5420 emit_move_insn (target, tem);
5421 return target;
5422 }
5423
5424 /* Helper function for emit_store_flag. */
5425 rtx
5426 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5427 machine_mode mode, machine_mode compare_mode,
5428 int unsignedp, rtx x, rtx y, int normalizep,
5429 machine_mode target_mode)
5430 {
5431 struct expand_operand ops[4];
5432 rtx op0, comparison, subtarget;
5433 rtx_insn *last;
5434 scalar_int_mode result_mode = targetm.cstore_mode (icode);
5435 scalar_int_mode int_target_mode;
5436
5437 last = get_last_insn ();
5438 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5439 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5440 if (!x || !y)
5441 {
5442 delete_insns_since (last);
5443 return NULL_RTX;
5444 }
5445
5446 if (target_mode == VOIDmode)
5447 int_target_mode = result_mode;
5448 else
5449 int_target_mode = as_a <scalar_int_mode> (target_mode);
5450 if (!target)
5451 target = gen_reg_rtx (int_target_mode);
5452
5453 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5454
5455 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5456 create_fixed_operand (&ops[1], comparison);
5457 create_fixed_operand (&ops[2], x);
5458 create_fixed_operand (&ops[3], y);
5459 if (!maybe_expand_insn (icode, 4, ops))
5460 {
5461 delete_insns_since (last);
5462 return NULL_RTX;
5463 }
5464 subtarget = ops[0].value;
5465
5466 /* If we are converting to a wider mode, first convert to
5467 INT_TARGET_MODE, then normalize. This produces better combining
5468 opportunities on machines that have a SIGN_EXTRACT when we are
5469 testing a single bit. This mostly benefits the 68k.
5470
5471 If STORE_FLAG_VALUE does not have the sign bit set when
5472 interpreted in MODE, we can do this conversion as unsigned, which
5473 is usually more efficient. */
5474 if (GET_MODE_PRECISION (int_target_mode) > GET_MODE_PRECISION (result_mode))
5475 {
5476 gcc_assert (GET_MODE_PRECISION (result_mode) != 1
5477 || STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1);
5478
5479 bool unsignedp = (STORE_FLAG_VALUE >= 0);
5480 convert_move (target, subtarget, unsignedp);
5481
5482 op0 = target;
5483 result_mode = int_target_mode;
5484 }
5485 else
5486 op0 = subtarget;
5487
5488 /* If we want to keep subexpressions around, don't reuse our last
5489 target. */
5490 if (optimize)
5491 subtarget = 0;
5492
5493 /* Now normalize to the proper value in MODE. Sometimes we don't
5494 have to do anything. */
5495 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5496 ;
5497 /* STORE_FLAG_VALUE might be the most negative number, so write
5498 the comparison this way to avoid a compiler-time warning. */
5499 else if (- normalizep == STORE_FLAG_VALUE)
5500 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5501
5502 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5503 it hard to use a value of just the sign bit due to ANSI integer
5504 constant typing rules. */
5505 else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
5506 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5507 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5508 normalizep == 1);
5509 else
5510 {
5511 gcc_assert (STORE_FLAG_VALUE & 1);
5512
5513 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5514 if (normalizep == -1)
5515 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5516 }
5517
5518 /* If we were converting to a smaller mode, do the conversion now. */
5519 if (int_target_mode != result_mode)
5520 {
5521 convert_move (target, op0, 0);
5522 return target;
5523 }
5524 else
5525 return op0;
5526 }
5527
5528
5529 /* A subroutine of emit_store_flag only including "tricks" that do not
5530 need a recursive call. These are kept separate to avoid infinite
5531 loops. */
5532
5533 static rtx
5534 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5535 machine_mode mode, int unsignedp, int normalizep,
5536 machine_mode target_mode)
5537 {
5538 rtx subtarget;
5539 enum insn_code icode;
5540 machine_mode compare_mode;
5541 enum mode_class mclass;
5542 enum rtx_code scode;
5543
5544 if (unsignedp)
5545 code = unsigned_condition (code);
5546 scode = swap_condition (code);
5547
5548 /* If one operand is constant, make it the second one. Only do this
5549 if the other operand is not constant as well. */
5550
5551 if (swap_commutative_operands_p (op0, op1))
5552 {
5553 std::swap (op0, op1);
5554 code = swap_condition (code);
5555 }
5556
5557 if (mode == VOIDmode)
5558 mode = GET_MODE (op0);
5559
5560 if (CONST_SCALAR_INT_P (op1))
5561 canonicalize_comparison (mode, &code, &op1);
5562
5563 /* For some comparisons with 1 and -1, we can convert this to
5564 comparisons with zero. This will often produce more opportunities for
5565 store-flag insns. */
5566
5567 switch (code)
5568 {
5569 case LT:
5570 if (op1 == const1_rtx)
5571 op1 = const0_rtx, code = LE;
5572 break;
5573 case LE:
5574 if (op1 == constm1_rtx)
5575 op1 = const0_rtx, code = LT;
5576 break;
5577 case GE:
5578 if (op1 == const1_rtx)
5579 op1 = const0_rtx, code = GT;
5580 break;
5581 case GT:
5582 if (op1 == constm1_rtx)
5583 op1 = const0_rtx, code = GE;
5584 break;
5585 case GEU:
5586 if (op1 == const1_rtx)
5587 op1 = const0_rtx, code = NE;
5588 break;
5589 case LTU:
5590 if (op1 == const1_rtx)
5591 op1 = const0_rtx, code = EQ;
5592 break;
5593 default:
5594 break;
5595 }
5596
5597 /* If we are comparing a double-word integer with zero or -1, we can
5598 convert the comparison into one involving a single word. */
5599 scalar_int_mode int_mode;
5600 if (is_int_mode (mode, &int_mode)
5601 && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2
5602 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5603 {
5604 rtx tem;
5605 if ((code == EQ || code == NE)
5606 && (op1 == const0_rtx || op1 == constm1_rtx))
5607 {
5608 rtx op00, op01;
5609
5610 /* Do a logical OR or AND of the two words and compare the
5611 result. */
5612 op00 = simplify_gen_subreg (word_mode, op0, int_mode, 0);
5613 op01 = simplify_gen_subreg (word_mode, op0, int_mode, UNITS_PER_WORD);
5614 tem = expand_binop (word_mode,
5615 op1 == const0_rtx ? ior_optab : and_optab,
5616 op00, op01, NULL_RTX, unsignedp,
5617 OPTAB_DIRECT);
5618
5619 if (tem != 0)
5620 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5621 unsignedp, normalizep);
5622 }
5623 else if ((code == LT || code == GE) && op1 == const0_rtx)
5624 {
5625 rtx op0h;
5626
5627 /* If testing the sign bit, can just test on high word. */
5628 op0h = simplify_gen_subreg (word_mode, op0, int_mode,
5629 subreg_highpart_offset (word_mode,
5630 int_mode));
5631 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5632 unsignedp, normalizep);
5633 }
5634 else
5635 tem = NULL_RTX;
5636
5637 if (tem)
5638 {
5639 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5640 return tem;
5641 if (!target)
5642 target = gen_reg_rtx (target_mode);
5643
5644 convert_move (target, tem,
5645 !val_signbit_known_set_p (word_mode,
5646 (normalizep ? normalizep
5647 : STORE_FLAG_VALUE)));
5648 return target;
5649 }
5650 }
5651
5652 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5653 complement of A (for GE) and shifting the sign bit to the low bit. */
5654 if (op1 == const0_rtx && (code == LT || code == GE)
5655 && is_int_mode (mode, &int_mode)
5656 && (normalizep || STORE_FLAG_VALUE == 1
5657 || val_signbit_p (int_mode, STORE_FLAG_VALUE)))
5658 {
5659 scalar_int_mode int_target_mode;
5660 subtarget = target;
5661
5662 if (!target)
5663 int_target_mode = int_mode;
5664 else
5665 {
5666 /* If the result is to be wider than OP0, it is best to convert it
5667 first. If it is to be narrower, it is *incorrect* to convert it
5668 first. */
5669 int_target_mode = as_a <scalar_int_mode> (target_mode);
5670 if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (int_mode))
5671 {
5672 op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5673 int_mode = int_target_mode;
5674 }
5675 }
5676
5677 if (int_target_mode != int_mode)
5678 subtarget = 0;
5679
5680 if (code == GE)
5681 op0 = expand_unop (int_mode, one_cmpl_optab, op0,
5682 ((STORE_FLAG_VALUE == 1 || normalizep)
5683 ? 0 : subtarget), 0);
5684
5685 if (STORE_FLAG_VALUE == 1 || normalizep)
5686 /* If we are supposed to produce a 0/1 value, we want to do
5687 a logical shift from the sign bit to the low-order bit; for
5688 a -1/0 value, we do an arithmetic shift. */
5689 op0 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5690 GET_MODE_BITSIZE (int_mode) - 1,
5691 subtarget, normalizep != -1);
5692
5693 if (int_mode != int_target_mode)
5694 op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5695
5696 return op0;
5697 }
5698
5699 mclass = GET_MODE_CLASS (mode);
5700 FOR_EACH_MODE_FROM (compare_mode, mode)
5701 {
5702 machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5703 icode = optab_handler (cstore_optab, optab_mode);
5704 if (icode != CODE_FOR_nothing)
5705 {
5706 do_pending_stack_adjust ();
5707 rtx tem = emit_cstore (target, icode, code, mode, compare_mode,
5708 unsignedp, op0, op1, normalizep, target_mode);
5709 if (tem)
5710 return tem;
5711
5712 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5713 {
5714 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5715 unsignedp, op1, op0, normalizep, target_mode);
5716 if (tem)
5717 return tem;
5718 }
5719 break;
5720 }
5721 }
5722
5723 return 0;
5724 }
5725
5726 /* Subroutine of emit_store_flag that handles cases in which the operands
5727 are scalar integers. SUBTARGET is the target to use for temporary
5728 operations and TRUEVAL is the value to store when the condition is
5729 true. All other arguments are as for emit_store_flag. */
5730
5731 rtx
5732 emit_store_flag_int (rtx target, rtx subtarget, enum rtx_code code, rtx op0,
5733 rtx op1, scalar_int_mode mode, int unsignedp,
5734 int normalizep, rtx trueval)
5735 {
5736 machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5737 rtx_insn *last = get_last_insn ();
5738
5739 /* If this is an equality comparison of integers, we can try to exclusive-or
5740 (or subtract) the two operands and use a recursive call to try the
5741 comparison with zero. Don't do any of these cases if branches are
5742 very cheap. */
5743
5744 if ((code == EQ || code == NE) && op1 != const0_rtx)
5745 {
5746 rtx tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5747 OPTAB_WIDEN);
5748
5749 if (tem == 0)
5750 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5751 OPTAB_WIDEN);
5752 if (tem != 0)
5753 tem = emit_store_flag (target, code, tem, const0_rtx,
5754 mode, unsignedp, normalizep);
5755 if (tem != 0)
5756 return tem;
5757
5758 delete_insns_since (last);
5759 }
5760
5761 /* For integer comparisons, try the reverse comparison. However, for
5762 small X and if we'd have anyway to extend, implementing "X != 0"
5763 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5764 rtx_code rcode = reverse_condition (code);
5765 if (can_compare_p (rcode, mode, ccp_store_flag)
5766 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5767 && code == NE
5768 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5769 && op1 == const0_rtx))
5770 {
5771 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5772 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5773
5774 /* Again, for the reverse comparison, use either an addition or a XOR. */
5775 if (want_add
5776 && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
5777 optimize_insn_for_speed_p ()) == 0)
5778 {
5779 rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5780 STORE_FLAG_VALUE, target_mode);
5781 if (tem != 0)
5782 tem = expand_binop (target_mode, add_optab, tem,
5783 gen_int_mode (normalizep, target_mode),
5784 target, 0, OPTAB_WIDEN);
5785 if (tem != 0)
5786 return tem;
5787 }
5788 else if (!want_add
5789 && rtx_cost (trueval, mode, XOR, 1,
5790 optimize_insn_for_speed_p ()) == 0)
5791 {
5792 rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5793 normalizep, target_mode);
5794 if (tem != 0)
5795 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5796 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5797 if (tem != 0)
5798 return tem;
5799 }
5800
5801 delete_insns_since (last);
5802 }
5803
5804 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5805 the constant zero. Reject all other comparisons at this point. Only
5806 do LE and GT if branches are expensive since they are expensive on
5807 2-operand machines. */
5808
5809 if (op1 != const0_rtx
5810 || (code != EQ && code != NE
5811 && (BRANCH_COST (optimize_insn_for_speed_p (),
5812 false) <= 1 || (code != LE && code != GT))))
5813 return 0;
5814
5815 /* Try to put the result of the comparison in the sign bit. Assume we can't
5816 do the necessary operation below. */
5817
5818 rtx tem = 0;
5819
5820 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5821 the sign bit set. */
5822
5823 if (code == LE)
5824 {
5825 /* This is destructive, so SUBTARGET can't be OP0. */
5826 if (rtx_equal_p (subtarget, op0))
5827 subtarget = 0;
5828
5829 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5830 OPTAB_WIDEN);
5831 if (tem)
5832 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5833 OPTAB_WIDEN);
5834 }
5835
5836 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5837 number of bits in the mode of OP0, minus one. */
5838
5839 if (code == GT)
5840 {
5841 if (rtx_equal_p (subtarget, op0))
5842 subtarget = 0;
5843
5844 tem = maybe_expand_shift (RSHIFT_EXPR, mode, op0,
5845 GET_MODE_BITSIZE (mode) - 1,
5846 subtarget, 0);
5847 if (tem)
5848 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5849 OPTAB_WIDEN);
5850 }
5851
5852 if (code == EQ || code == NE)
5853 {
5854 /* For EQ or NE, one way to do the comparison is to apply an operation
5855 that converts the operand into a positive number if it is nonzero
5856 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5857 for NE we negate. This puts the result in the sign bit. Then we
5858 normalize with a shift, if needed.
5859
5860 Two operations that can do the above actions are ABS and FFS, so try
5861 them. If that doesn't work, and MODE is smaller than a full word,
5862 we can use zero-extension to the wider mode (an unsigned conversion)
5863 as the operation. */
5864
5865 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5866 that is compensated by the subsequent overflow when subtracting
5867 one / negating. */
5868
5869 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5870 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5871 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5872 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5873 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5874 {
5875 tem = convert_modes (word_mode, mode, op0, 1);
5876 mode = word_mode;
5877 }
5878
5879 if (tem != 0)
5880 {
5881 if (code == EQ)
5882 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5883 0, OPTAB_WIDEN);
5884 else
5885 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5886 }
5887
5888 /* If we couldn't do it that way, for NE we can "or" the two's complement
5889 of the value with itself. For EQ, we take the one's complement of
5890 that "or", which is an extra insn, so we only handle EQ if branches
5891 are expensive. */
5892
5893 if (tem == 0
5894 && (code == NE
5895 || BRANCH_COST (optimize_insn_for_speed_p (),
5896 false) > 1))
5897 {
5898 if (rtx_equal_p (subtarget, op0))
5899 subtarget = 0;
5900
5901 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5902 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5903 OPTAB_WIDEN);
5904
5905 if (tem && code == EQ)
5906 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5907 }
5908 }
5909
5910 if (tem && normalizep)
5911 tem = maybe_expand_shift (RSHIFT_EXPR, mode, tem,
5912 GET_MODE_BITSIZE (mode) - 1,
5913 subtarget, normalizep == 1);
5914
5915 if (tem)
5916 {
5917 if (!target)
5918 ;
5919 else if (GET_MODE (tem) != target_mode)
5920 {
5921 convert_move (target, tem, 0);
5922 tem = target;
5923 }
5924 else if (!subtarget)
5925 {
5926 emit_move_insn (target, tem);
5927 tem = target;
5928 }
5929 }
5930 else
5931 delete_insns_since (last);
5932
5933 return tem;
5934 }
5935
5936 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5937 and storing in TARGET. Normally return TARGET.
5938 Return 0 if that cannot be done.
5939
5940 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5941 it is VOIDmode, they cannot both be CONST_INT.
5942
5943 UNSIGNEDP is for the case where we have to widen the operands
5944 to perform the operation. It says to use zero-extension.
5945
5946 NORMALIZEP is 1 if we should convert the result to be either zero
5947 or one. Normalize is -1 if we should convert the result to be
5948 either zero or -1. If NORMALIZEP is zero, the result will be left
5949 "raw" out of the scc insn. */
5950
5951 rtx
5952 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5953 machine_mode mode, int unsignedp, int normalizep)
5954 {
5955 machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5956 enum rtx_code rcode;
5957 rtx subtarget;
5958 rtx tem, trueval;
5959 rtx_insn *last;
5960
5961 /* If we compare constants, we shouldn't use a store-flag operation,
5962 but a constant load. We can get there via the vanilla route that
5963 usually generates a compare-branch sequence, but will in this case
5964 fold the comparison to a constant, and thus elide the branch. */
5965 if (CONSTANT_P (op0) && CONSTANT_P (op1))
5966 return NULL_RTX;
5967
5968 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5969 target_mode);
5970 if (tem)
5971 return tem;
5972
5973 /* If we reached here, we can't do this with a scc insn, however there
5974 are some comparisons that can be done in other ways. Don't do any
5975 of these cases if branches are very cheap. */
5976 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5977 return 0;
5978
5979 /* See what we need to return. We can only return a 1, -1, or the
5980 sign bit. */
5981
5982 if (normalizep == 0)
5983 {
5984 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5985 normalizep = STORE_FLAG_VALUE;
5986
5987 else if (val_signbit_p (mode, STORE_FLAG_VALUE))
5988 ;
5989 else
5990 return 0;
5991 }
5992
5993 last = get_last_insn ();
5994
5995 /* If optimizing, use different pseudo registers for each insn, instead
5996 of reusing the same pseudo. This leads to better CSE, but slows
5997 down the compiler, since there are more pseudos. */
5998 subtarget = (!optimize
5999 && (target_mode == mode)) ? target : NULL_RTX;
6000 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
6001
6002 /* For floating-point comparisons, try the reverse comparison or try
6003 changing the "orderedness" of the comparison. */
6004 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
6005 {
6006 enum rtx_code first_code;
6007 bool and_them;
6008
6009 rcode = reverse_condition_maybe_unordered (code);
6010 if (can_compare_p (rcode, mode, ccp_store_flag)
6011 && (code == ORDERED || code == UNORDERED
6012 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
6013 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
6014 {
6015 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
6016 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
6017
6018 /* For the reverse comparison, use either an addition or a XOR. */
6019 if (want_add
6020 && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
6021 optimize_insn_for_speed_p ()) == 0)
6022 {
6023 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
6024 STORE_FLAG_VALUE, target_mode);
6025 if (tem)
6026 return expand_binop (target_mode, add_optab, tem,
6027 gen_int_mode (normalizep, target_mode),
6028 target, 0, OPTAB_WIDEN);
6029 }
6030 else if (!want_add
6031 && rtx_cost (trueval, mode, XOR, 1,
6032 optimize_insn_for_speed_p ()) == 0)
6033 {
6034 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
6035 normalizep, target_mode);
6036 if (tem)
6037 return expand_binop (target_mode, xor_optab, tem, trueval,
6038 target, INTVAL (trueval) >= 0,
6039 OPTAB_WIDEN);
6040 }
6041 }
6042
6043 delete_insns_since (last);
6044
6045 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
6046 if (code == ORDERED || code == UNORDERED)
6047 return 0;
6048
6049 and_them = split_comparison (code, mode, &first_code, &code);
6050
6051 /* If there are no NaNs, the first comparison should always fall through.
6052 Effectively change the comparison to the other one. */
6053 if (!HONOR_NANS (mode))
6054 {
6055 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
6056 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
6057 target_mode);
6058 }
6059
6060 if (!HAVE_conditional_move)
6061 return 0;
6062
6063 /* Do not turn a trapping comparison into a non-trapping one. */
6064 if ((code != EQ && code != NE && code != UNEQ && code != LTGT)
6065 && flag_trapping_math)
6066 return 0;
6067
6068 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
6069 conditional move. */
6070 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
6071 normalizep, target_mode);
6072 if (tem == 0)
6073 return 0;
6074
6075 if (and_them)
6076 tem = emit_conditional_move (target, code, op0, op1, mode,
6077 tem, const0_rtx, GET_MODE (tem), 0);
6078 else
6079 tem = emit_conditional_move (target, code, op0, op1, mode,
6080 trueval, tem, GET_MODE (tem), 0);
6081
6082 if (tem == 0)
6083 delete_insns_since (last);
6084 return tem;
6085 }
6086
6087 /* The remaining tricks only apply to integer comparisons. */
6088
6089 scalar_int_mode int_mode;
6090 if (is_int_mode (mode, &int_mode))
6091 return emit_store_flag_int (target, subtarget, code, op0, op1, int_mode,
6092 unsignedp, normalizep, trueval);
6093
6094 return 0;
6095 }
6096
6097 /* Like emit_store_flag, but always succeeds. */
6098
6099 rtx
6100 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
6101 machine_mode mode, int unsignedp, int normalizep)
6102 {
6103 rtx tem;
6104 rtx_code_label *label;
6105 rtx trueval, falseval;
6106
6107 /* First see if emit_store_flag can do the job. */
6108 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
6109 if (tem != 0)
6110 return tem;
6111
6112 /* If one operand is constant, make it the second one. Only do this
6113 if the other operand is not constant as well. */
6114 if (swap_commutative_operands_p (op0, op1))
6115 {
6116 std::swap (op0, op1);
6117 code = swap_condition (code);
6118 }
6119
6120 if (mode == VOIDmode)
6121 mode = GET_MODE (op0);
6122
6123 if (!target)
6124 target = gen_reg_rtx (word_mode);
6125
6126 /* If this failed, we have to do this with set/compare/jump/set code.
6127 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
6128 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
6129 if (code == NE
6130 && GET_MODE_CLASS (mode) == MODE_INT
6131 && REG_P (target)
6132 && op0 == target
6133 && op1 == const0_rtx)
6134 {
6135 label = gen_label_rtx ();
6136 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp, mode,
6137 NULL_RTX, NULL, label,
6138 profile_probability::uninitialized ());
6139 emit_move_insn (target, trueval);
6140 emit_label (label);
6141 return target;
6142 }
6143
6144 if (!REG_P (target)
6145 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
6146 target = gen_reg_rtx (GET_MODE (target));
6147
6148 /* Jump in the right direction if the target cannot implement CODE
6149 but can jump on its reverse condition. */
6150 falseval = const0_rtx;
6151 if (! can_compare_p (code, mode, ccp_jump)
6152 && (! FLOAT_MODE_P (mode)
6153 || code == ORDERED || code == UNORDERED
6154 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
6155 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
6156 {
6157 enum rtx_code rcode;
6158 if (FLOAT_MODE_P (mode))
6159 rcode = reverse_condition_maybe_unordered (code);
6160 else
6161 rcode = reverse_condition (code);
6162
6163 /* Canonicalize to UNORDERED for the libcall. */
6164 if (can_compare_p (rcode, mode, ccp_jump)
6165 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
6166 {
6167 falseval = trueval;
6168 trueval = const0_rtx;
6169 code = rcode;
6170 }
6171 }
6172
6173 emit_move_insn (target, trueval);
6174 label = gen_label_rtx ();
6175 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, NULL,
6176 label, profile_probability::uninitialized ());
6177
6178 emit_move_insn (target, falseval);
6179 emit_label (label);
6180
6181 return target;
6182 }
6183
6184 /* Helper function for canonicalize_cmp_for_target. Swap between inclusive
6185 and exclusive ranges in order to create an equivalent comparison. See
6186 canonicalize_cmp_for_target for the possible cases. */
6187
6188 static enum rtx_code
6189 equivalent_cmp_code (enum rtx_code code)
6190 {
6191 switch (code)
6192 {
6193 case GT:
6194 return GE;
6195 case GE:
6196 return GT;
6197 case LT:
6198 return LE;
6199 case LE:
6200 return LT;
6201 case GTU:
6202 return GEU;
6203 case GEU:
6204 return GTU;
6205 case LTU:
6206 return LEU;
6207 case LEU:
6208 return LTU;
6209
6210 default:
6211 return code;
6212 }
6213 }
6214
6215 /* Choose the more appropiate immediate in scalar integer comparisons. The
6216 purpose of this is to end up with an immediate which can be loaded into a
6217 register in fewer moves, if possible.
6218
6219 For each integer comparison there exists an equivalent choice:
6220 i) a > b or a >= b + 1
6221 ii) a <= b or a < b + 1
6222 iii) a >= b or a > b - 1
6223 iv) a < b or a <= b - 1
6224
6225 MODE is the mode of the first operand.
6226 CODE points to the comparison code.
6227 IMM points to the rtx containing the immediate. *IMM must satisfy
6228 CONST_SCALAR_INT_P on entry and continues to satisfy CONST_SCALAR_INT_P
6229 on exit. */
6230
6231 void
6232 canonicalize_comparison (machine_mode mode, enum rtx_code *code, rtx *imm)
6233 {
6234 if (!SCALAR_INT_MODE_P (mode))
6235 return;
6236
6237 int to_add = 0;
6238 enum signop sgn = unsigned_condition_p (*code) ? UNSIGNED : SIGNED;
6239
6240 /* Extract the immediate value from the rtx. */
6241 wide_int imm_val = rtx_mode_t (*imm, mode);
6242
6243 if (*code == GT || *code == GTU || *code == LE || *code == LEU)
6244 to_add = 1;
6245 else if (*code == GE || *code == GEU || *code == LT || *code == LTU)
6246 to_add = -1;
6247 else
6248 return;
6249
6250 /* Check for overflow/underflow in the case of signed values and
6251 wrapping around in the case of unsigned values. If any occur
6252 cancel the optimization. */
6253 wi::overflow_type overflow = wi::OVF_NONE;
6254 wide_int imm_modif;
6255
6256 if (to_add == 1)
6257 imm_modif = wi::add (imm_val, 1, sgn, &overflow);
6258 else
6259 imm_modif = wi::sub (imm_val, 1, sgn, &overflow);
6260
6261 if (overflow)
6262 return;
6263
6264 /* The following creates a pseudo; if we cannot do that, bail out. */
6265 if (!can_create_pseudo_p ())
6266 return;
6267
6268 rtx reg = gen_rtx_REG (mode, LAST_VIRTUAL_REGISTER + 1);
6269 rtx new_imm = immed_wide_int_const (imm_modif, mode);
6270
6271 rtx_insn *old_rtx = gen_move_insn (reg, *imm);
6272 rtx_insn *new_rtx = gen_move_insn (reg, new_imm);
6273
6274 /* Update the immediate and the code. */
6275 if (insn_cost (old_rtx, true) > insn_cost (new_rtx, true))
6276 {
6277 *code = equivalent_cmp_code (*code);
6278 *imm = new_imm;
6279 }
6280 }
6281
6282
6283 \f
6284 /* Perform possibly multi-word comparison and conditional jump to LABEL
6285 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
6286 now a thin wrapper around do_compare_rtx_and_jump. */
6287
6288 static void
6289 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, machine_mode mode,
6290 rtx_code_label *label)
6291 {
6292 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
6293 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode, NULL_RTX,
6294 NULL, label, profile_probability::uninitialized ());
6295 }