]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expmed.c
Merge from trunk.
[thirdparty/gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "diagnostic-core.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "stor-layout.h"
30 #include "tm_p.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "expr.h"
34 #include "optabs.h"
35 #include "recog.h"
36 #include "langhooks.h"
37 #include "df.h"
38 #include "target.h"
39 #include "expmed.h"
40
41 struct target_expmed default_target_expmed;
42 #if SWITCHABLE_TARGET
43 struct target_expmed *this_target_expmed = &default_target_expmed;
44 #endif
45
46 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 rtx);
51 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT,
54 unsigned HOST_WIDE_INT,
55 rtx);
56 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
57 unsigned HOST_WIDE_INT,
58 unsigned HOST_WIDE_INT, rtx, int);
59 static rtx lshift_value (enum machine_mode, unsigned HOST_WIDE_INT, int);
60 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
61 unsigned HOST_WIDE_INT, int);
62 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
63 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
64 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
65
66 /* Return a constant integer mask value of mode MODE with BITSIZE ones
67 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
68 The mask is truncated if necessary to the width of mode MODE. The
69 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
70
71 static inline rtx
72 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, bool complement)
73 {
74 return immed_wide_int_const
75 (wi::shifted_mask (bitpos, bitsize, complement,
76 GET_MODE_PRECISION (mode)), mode);
77 }
78
79 /* Test whether a value is zero of a power of two. */
80 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
81 (((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0)
82
83 struct init_expmed_rtl
84 {
85 struct rtx_def reg;
86 struct rtx_def plus;
87 struct rtx_def neg;
88 struct rtx_def mult;
89 struct rtx_def sdiv;
90 struct rtx_def udiv;
91 struct rtx_def sdiv_32;
92 struct rtx_def smod_32;
93 struct rtx_def wide_mult;
94 struct rtx_def wide_lshr;
95 struct rtx_def wide_trunc;
96 struct rtx_def shift;
97 struct rtx_def shift_mult;
98 struct rtx_def shift_add;
99 struct rtx_def shift_sub0;
100 struct rtx_def shift_sub1;
101 struct rtx_def zext;
102 struct rtx_def trunc;
103
104 rtx pow2[MAX_BITS_PER_WORD];
105 rtx cint[MAX_BITS_PER_WORD];
106 };
107
108 static void
109 init_expmed_one_conv (struct init_expmed_rtl *all, enum machine_mode to_mode,
110 enum machine_mode from_mode, bool speed)
111 {
112 int to_size, from_size;
113 rtx which;
114
115 /* We're given no information about the true size of a partial integer,
116 only the size of the "full" integer it requires for storage. For
117 comparison purposes here, reduce the bit size by one in that case. */
118 to_size = (GET_MODE_BITSIZE (to_mode)
119 - (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT));
120 from_size = (GET_MODE_BITSIZE (from_mode)
121 - (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT));
122
123 /* Assume cost of zero-extend and sign-extend is the same. */
124 which = (to_size < from_size ? &all->trunc : &all->zext);
125
126 PUT_MODE (&all->reg, from_mode);
127 set_convert_cost (to_mode, from_mode, speed, set_src_cost (which, speed));
128 }
129
130 static void
131 init_expmed_one_mode (struct init_expmed_rtl *all,
132 enum machine_mode mode, int speed)
133 {
134 int m, n, mode_bitsize;
135 enum machine_mode mode_from;
136
137 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
138
139 PUT_MODE (&all->reg, mode);
140 PUT_MODE (&all->plus, mode);
141 PUT_MODE (&all->neg, mode);
142 PUT_MODE (&all->mult, mode);
143 PUT_MODE (&all->sdiv, mode);
144 PUT_MODE (&all->udiv, mode);
145 PUT_MODE (&all->sdiv_32, mode);
146 PUT_MODE (&all->smod_32, mode);
147 PUT_MODE (&all->wide_trunc, mode);
148 PUT_MODE (&all->shift, mode);
149 PUT_MODE (&all->shift_mult, mode);
150 PUT_MODE (&all->shift_add, mode);
151 PUT_MODE (&all->shift_sub0, mode);
152 PUT_MODE (&all->shift_sub1, mode);
153 PUT_MODE (&all->zext, mode);
154 PUT_MODE (&all->trunc, mode);
155
156 set_add_cost (speed, mode, set_src_cost (&all->plus, speed));
157 set_neg_cost (speed, mode, set_src_cost (&all->neg, speed));
158 set_mul_cost (speed, mode, set_src_cost (&all->mult, speed));
159 set_sdiv_cost (speed, mode, set_src_cost (&all->sdiv, speed));
160 set_udiv_cost (speed, mode, set_src_cost (&all->udiv, speed));
161
162 set_sdiv_pow2_cheap (speed, mode, (set_src_cost (&all->sdiv_32, speed)
163 <= 2 * add_cost (speed, mode)));
164 set_smod_pow2_cheap (speed, mode, (set_src_cost (&all->smod_32, speed)
165 <= 4 * add_cost (speed, mode)));
166
167 set_shift_cost (speed, mode, 0, 0);
168 {
169 int cost = add_cost (speed, mode);
170 set_shiftadd_cost (speed, mode, 0, cost);
171 set_shiftsub0_cost (speed, mode, 0, cost);
172 set_shiftsub1_cost (speed, mode, 0, cost);
173 }
174
175 n = MIN (MAX_BITS_PER_WORD, mode_bitsize);
176 for (m = 1; m < n; m++)
177 {
178 XEXP (&all->shift, 1) = all->cint[m];
179 XEXP (&all->shift_mult, 1) = all->pow2[m];
180
181 set_shift_cost (speed, mode, m, set_src_cost (&all->shift, speed));
182 set_shiftadd_cost (speed, mode, m, set_src_cost (&all->shift_add, speed));
183 set_shiftsub0_cost (speed, mode, m, set_src_cost (&all->shift_sub0, speed));
184 set_shiftsub1_cost (speed, mode, m, set_src_cost (&all->shift_sub1, speed));
185 }
186
187 if (SCALAR_INT_MODE_P (mode))
188 {
189 for (mode_from = MIN_MODE_INT; mode_from <= MAX_MODE_INT;
190 mode_from = (enum machine_mode)(mode_from + 1))
191 init_expmed_one_conv (all, mode, mode_from, speed);
192 }
193 if (GET_MODE_CLASS (mode) == MODE_INT)
194 {
195 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
196 if (wider_mode != VOIDmode)
197 {
198 PUT_MODE (&all->zext, wider_mode);
199 PUT_MODE (&all->wide_mult, wider_mode);
200 PUT_MODE (&all->wide_lshr, wider_mode);
201 XEXP (&all->wide_lshr, 1) = GEN_INT (mode_bitsize);
202
203 set_mul_widen_cost (speed, wider_mode,
204 set_src_cost (&all->wide_mult, speed));
205 set_mul_highpart_cost (speed, mode,
206 set_src_cost (&all->wide_trunc, speed));
207 }
208 }
209 }
210
211 void
212 init_expmed (void)
213 {
214 struct init_expmed_rtl all;
215 enum machine_mode mode;
216 int m, speed;
217
218 memset (&all, 0, sizeof all);
219 for (m = 1; m < MAX_BITS_PER_WORD; m++)
220 {
221 all.pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
222 all.cint[m] = GEN_INT (m);
223 }
224
225 PUT_CODE (&all.reg, REG);
226 /* Avoid using hard regs in ways which may be unsupported. */
227 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
228
229 PUT_CODE (&all.plus, PLUS);
230 XEXP (&all.plus, 0) = &all.reg;
231 XEXP (&all.plus, 1) = &all.reg;
232
233 PUT_CODE (&all.neg, NEG);
234 XEXP (&all.neg, 0) = &all.reg;
235
236 PUT_CODE (&all.mult, MULT);
237 XEXP (&all.mult, 0) = &all.reg;
238 XEXP (&all.mult, 1) = &all.reg;
239
240 PUT_CODE (&all.sdiv, DIV);
241 XEXP (&all.sdiv, 0) = &all.reg;
242 XEXP (&all.sdiv, 1) = &all.reg;
243
244 PUT_CODE (&all.udiv, UDIV);
245 XEXP (&all.udiv, 0) = &all.reg;
246 XEXP (&all.udiv, 1) = &all.reg;
247
248 PUT_CODE (&all.sdiv_32, DIV);
249 XEXP (&all.sdiv_32, 0) = &all.reg;
250 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? all.cint[32] : GEN_INT (32);
251
252 PUT_CODE (&all.smod_32, MOD);
253 XEXP (&all.smod_32, 0) = &all.reg;
254 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
255
256 PUT_CODE (&all.zext, ZERO_EXTEND);
257 XEXP (&all.zext, 0) = &all.reg;
258
259 PUT_CODE (&all.wide_mult, MULT);
260 XEXP (&all.wide_mult, 0) = &all.zext;
261 XEXP (&all.wide_mult, 1) = &all.zext;
262
263 PUT_CODE (&all.wide_lshr, LSHIFTRT);
264 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
265
266 PUT_CODE (&all.wide_trunc, TRUNCATE);
267 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
268
269 PUT_CODE (&all.shift, ASHIFT);
270 XEXP (&all.shift, 0) = &all.reg;
271
272 PUT_CODE (&all.shift_mult, MULT);
273 XEXP (&all.shift_mult, 0) = &all.reg;
274
275 PUT_CODE (&all.shift_add, PLUS);
276 XEXP (&all.shift_add, 0) = &all.shift_mult;
277 XEXP (&all.shift_add, 1) = &all.reg;
278
279 PUT_CODE (&all.shift_sub0, MINUS);
280 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
281 XEXP (&all.shift_sub0, 1) = &all.reg;
282
283 PUT_CODE (&all.shift_sub1, MINUS);
284 XEXP (&all.shift_sub1, 0) = &all.reg;
285 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
286
287 PUT_CODE (&all.trunc, TRUNCATE);
288 XEXP (&all.trunc, 0) = &all.reg;
289
290 for (speed = 0; speed < 2; speed++)
291 {
292 crtl->maybe_hot_insn_p = speed;
293 set_zero_cost (speed, set_src_cost (const0_rtx, speed));
294
295 for (mode = MIN_MODE_INT; mode <= MAX_MODE_INT;
296 mode = (enum machine_mode)(mode + 1))
297 init_expmed_one_mode (&all, mode, speed);
298
299 if (MIN_MODE_PARTIAL_INT != VOIDmode)
300 for (mode = MIN_MODE_PARTIAL_INT; mode <= MAX_MODE_PARTIAL_INT;
301 mode = (enum machine_mode)(mode + 1))
302 init_expmed_one_mode (&all, mode, speed);
303
304 if (MIN_MODE_VECTOR_INT != VOIDmode)
305 for (mode = MIN_MODE_VECTOR_INT; mode <= MAX_MODE_VECTOR_INT;
306 mode = (enum machine_mode)(mode + 1))
307 init_expmed_one_mode (&all, mode, speed);
308 }
309
310 if (alg_hash_used_p ())
311 {
312 struct alg_hash_entry *p = alg_hash_entry_ptr (0);
313 memset (p, 0, sizeof (*p) * NUM_ALG_HASH_ENTRIES);
314 }
315 else
316 set_alg_hash_used_p (true);
317 default_rtl_profile ();
318 }
319
320 /* Return an rtx representing minus the value of X.
321 MODE is the intended mode of the result,
322 useful if X is a CONST_INT. */
323
324 rtx
325 negate_rtx (enum machine_mode mode, rtx x)
326 {
327 rtx result = simplify_unary_operation (NEG, mode, x, mode);
328
329 if (result == 0)
330 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
331
332 return result;
333 }
334
335 /* Adjust bitfield memory MEM so that it points to the first unit of mode
336 MODE that contains a bitfield of size BITSIZE at bit position BITNUM.
337 If MODE is BLKmode, return a reference to every byte in the bitfield.
338 Set *NEW_BITNUM to the bit position of the field within the new memory. */
339
340 static rtx
341 narrow_bit_field_mem (rtx mem, enum machine_mode mode,
342 unsigned HOST_WIDE_INT bitsize,
343 unsigned HOST_WIDE_INT bitnum,
344 unsigned HOST_WIDE_INT *new_bitnum)
345 {
346 if (mode == BLKmode)
347 {
348 *new_bitnum = bitnum % BITS_PER_UNIT;
349 HOST_WIDE_INT offset = bitnum / BITS_PER_UNIT;
350 HOST_WIDE_INT size = ((*new_bitnum + bitsize + BITS_PER_UNIT - 1)
351 / BITS_PER_UNIT);
352 return adjust_bitfield_address_size (mem, mode, offset, size);
353 }
354 else
355 {
356 unsigned int unit = GET_MODE_BITSIZE (mode);
357 *new_bitnum = bitnum % unit;
358 HOST_WIDE_INT offset = (bitnum - *new_bitnum) / BITS_PER_UNIT;
359 return adjust_bitfield_address (mem, mode, offset);
360 }
361 }
362
363 /* The caller wants to perform insertion or extraction PATTERN on a
364 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
365 BITREGION_START and BITREGION_END are as for store_bit_field
366 and FIELDMODE is the natural mode of the field.
367
368 Search for a mode that is compatible with the memory access
369 restrictions and (where applicable) with a register insertion or
370 extraction. Return the new memory on success, storing the adjusted
371 bit position in *NEW_BITNUM. Return null otherwise. */
372
373 static rtx
374 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
375 rtx op0, HOST_WIDE_INT bitsize,
376 HOST_WIDE_INT bitnum,
377 unsigned HOST_WIDE_INT bitregion_start,
378 unsigned HOST_WIDE_INT bitregion_end,
379 enum machine_mode fieldmode,
380 unsigned HOST_WIDE_INT *new_bitnum)
381 {
382 bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
383 bitregion_end, MEM_ALIGN (op0),
384 MEM_VOLATILE_P (op0));
385 enum machine_mode best_mode;
386 if (iter.next_mode (&best_mode))
387 {
388 /* We can use a memory in BEST_MODE. See whether this is true for
389 any wider modes. All other things being equal, we prefer to
390 use the widest mode possible because it tends to expose more
391 CSE opportunities. */
392 if (!iter.prefer_smaller_modes ())
393 {
394 /* Limit the search to the mode required by the corresponding
395 register insertion or extraction instruction, if any. */
396 enum machine_mode limit_mode = word_mode;
397 extraction_insn insn;
398 if (get_best_reg_extraction_insn (&insn, pattern,
399 GET_MODE_BITSIZE (best_mode),
400 fieldmode))
401 limit_mode = insn.field_mode;
402
403 enum machine_mode wider_mode;
404 while (iter.next_mode (&wider_mode)
405 && GET_MODE_SIZE (wider_mode) <= GET_MODE_SIZE (limit_mode))
406 best_mode = wider_mode;
407 }
408 return narrow_bit_field_mem (op0, best_mode, bitsize, bitnum,
409 new_bitnum);
410 }
411 return NULL_RTX;
412 }
413
414 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
415 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
416 offset is then BITNUM / BITS_PER_UNIT. */
417
418 static bool
419 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum,
420 unsigned HOST_WIDE_INT bitsize,
421 enum machine_mode struct_mode)
422 {
423 if (BYTES_BIG_ENDIAN)
424 return (bitnum % BITS_PER_UNIT == 0
425 && (bitnum + bitsize == GET_MODE_BITSIZE (struct_mode)
426 || (bitnum + bitsize) % BITS_PER_WORD == 0));
427 else
428 return bitnum % BITS_PER_WORD == 0;
429 }
430
431 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
432 bit number BITNUM can be treated as a simple value of mode MODE. */
433
434 static bool
435 simple_mem_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
436 unsigned HOST_WIDE_INT bitnum, enum machine_mode mode)
437 {
438 return (MEM_P (op0)
439 && bitnum % BITS_PER_UNIT == 0
440 && bitsize == GET_MODE_BITSIZE (mode)
441 && (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
442 || (bitnum % GET_MODE_ALIGNMENT (mode) == 0
443 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
444 }
445 \f
446 /* Try to use instruction INSV to store VALUE into a field of OP0.
447 BITSIZE and BITNUM are as for store_bit_field. */
448
449 static bool
450 store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
451 unsigned HOST_WIDE_INT bitsize,
452 unsigned HOST_WIDE_INT bitnum, rtx value)
453 {
454 struct expand_operand ops[4];
455 rtx value1;
456 rtx xop0 = op0;
457 rtx last = get_last_insn ();
458 bool copy_back = false;
459
460 enum machine_mode op_mode = insv->field_mode;
461 unsigned int unit = GET_MODE_BITSIZE (op_mode);
462 if (bitsize == 0 || bitsize > unit)
463 return false;
464
465 if (MEM_P (xop0))
466 /* Get a reference to the first byte of the field. */
467 xop0 = narrow_bit_field_mem (xop0, insv->struct_mode, bitsize, bitnum,
468 &bitnum);
469 else
470 {
471 /* Convert from counting within OP0 to counting in OP_MODE. */
472 if (BYTES_BIG_ENDIAN)
473 bitnum += unit - GET_MODE_BITSIZE (GET_MODE (op0));
474
475 /* If xop0 is a register, we need it in OP_MODE
476 to make it acceptable to the format of insv. */
477 if (GET_CODE (xop0) == SUBREG)
478 /* We can't just change the mode, because this might clobber op0,
479 and we will need the original value of op0 if insv fails. */
480 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
481 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
482 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
483 }
484
485 /* If the destination is a paradoxical subreg such that we need a
486 truncate to the inner mode, perform the insertion on a temporary and
487 truncate the result to the original destination. Note that we can't
488 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
489 X) 0)) is (reg:N X). */
490 if (GET_CODE (xop0) == SUBREG
491 && REG_P (SUBREG_REG (xop0))
492 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
493 op_mode))
494 {
495 rtx tem = gen_reg_rtx (op_mode);
496 emit_move_insn (tem, xop0);
497 xop0 = tem;
498 copy_back = true;
499 }
500
501 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
502 "backwards" from the size of the unit we are inserting into.
503 Otherwise, we count bits from the most significant on a
504 BYTES/BITS_BIG_ENDIAN machine. */
505
506 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
507 bitnum = unit - bitsize - bitnum;
508
509 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
510 value1 = value;
511 if (GET_MODE (value) != op_mode)
512 {
513 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
514 {
515 /* Optimization: Don't bother really extending VALUE
516 if it has all the bits we will actually use. However,
517 if we must narrow it, be sure we do it correctly. */
518
519 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
520 {
521 rtx tmp;
522
523 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
524 if (! tmp)
525 tmp = simplify_gen_subreg (op_mode,
526 force_reg (GET_MODE (value),
527 value1),
528 GET_MODE (value), 0);
529 value1 = tmp;
530 }
531 else
532 value1 = gen_lowpart (op_mode, value1);
533 }
534 else if (CONST_INT_P (value))
535 value1 = gen_int_mode (INTVAL (value), op_mode);
536 else
537 /* Parse phase is supposed to make VALUE's data type
538 match that of the component reference, which is a type
539 at least as wide as the field; so VALUE should have
540 a mode that corresponds to that type. */
541 gcc_assert (CONSTANT_P (value));
542 }
543
544 create_fixed_operand (&ops[0], xop0);
545 create_integer_operand (&ops[1], bitsize);
546 create_integer_operand (&ops[2], bitnum);
547 create_input_operand (&ops[3], value1, op_mode);
548 if (maybe_expand_insn (insv->icode, 4, ops))
549 {
550 if (copy_back)
551 convert_move (op0, xop0, true);
552 return true;
553 }
554 delete_insns_since (last);
555 return false;
556 }
557
558 /* A subroutine of store_bit_field, with the same arguments. Return true
559 if the operation could be implemented.
560
561 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
562 no other way of implementing the operation. If FALLBACK_P is false,
563 return false instead. */
564
565 static bool
566 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
567 unsigned HOST_WIDE_INT bitnum,
568 unsigned HOST_WIDE_INT bitregion_start,
569 unsigned HOST_WIDE_INT bitregion_end,
570 enum machine_mode fieldmode,
571 rtx value, bool fallback_p)
572 {
573 rtx op0 = str_rtx;
574 rtx orig_value;
575
576 while (GET_CODE (op0) == SUBREG)
577 {
578 /* The following line once was done only if WORDS_BIG_ENDIAN,
579 but I think that is a mistake. WORDS_BIG_ENDIAN is
580 meaningful at a much higher level; when structures are copied
581 between memory and regs, the higher-numbered regs
582 always get higher addresses. */
583 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
584 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
585 int byte_offset = 0;
586
587 /* Paradoxical subregs need special handling on big endian machines. */
588 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
589 {
590 int difference = inner_mode_size - outer_mode_size;
591
592 if (WORDS_BIG_ENDIAN)
593 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
594 if (BYTES_BIG_ENDIAN)
595 byte_offset += difference % UNITS_PER_WORD;
596 }
597 else
598 byte_offset = SUBREG_BYTE (op0);
599
600 bitnum += byte_offset * BITS_PER_UNIT;
601 op0 = SUBREG_REG (op0);
602 }
603
604 /* No action is needed if the target is a register and if the field
605 lies completely outside that register. This can occur if the source
606 code contains an out-of-bounds access to a small array. */
607 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
608 return true;
609
610 /* Use vec_set patterns for inserting parts of vectors whenever
611 available. */
612 if (VECTOR_MODE_P (GET_MODE (op0))
613 && !MEM_P (op0)
614 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
615 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
616 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
617 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
618 {
619 struct expand_operand ops[3];
620 enum machine_mode outermode = GET_MODE (op0);
621 enum machine_mode innermode = GET_MODE_INNER (outermode);
622 enum insn_code icode = optab_handler (vec_set_optab, outermode);
623 int pos = bitnum / GET_MODE_BITSIZE (innermode);
624
625 create_fixed_operand (&ops[0], op0);
626 create_input_operand (&ops[1], value, innermode);
627 create_integer_operand (&ops[2], pos);
628 if (maybe_expand_insn (icode, 3, ops))
629 return true;
630 }
631
632 /* If the target is a register, overwriting the entire object, or storing
633 a full-word or multi-word field can be done with just a SUBREG. */
634 if (!MEM_P (op0)
635 && bitsize == GET_MODE_BITSIZE (fieldmode)
636 && ((bitsize == GET_MODE_BITSIZE (GET_MODE (op0)) && bitnum == 0)
637 || (bitsize % BITS_PER_WORD == 0 && bitnum % BITS_PER_WORD == 0)))
638 {
639 /* Use the subreg machinery either to narrow OP0 to the required
640 words or to cope with mode punning between equal-sized modes.
641 In the latter case, use subreg on the rhs side, not lhs. */
642 rtx sub;
643
644 if (bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
645 {
646 sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0);
647 if (sub)
648 {
649 emit_move_insn (op0, sub);
650 return true;
651 }
652 }
653 else
654 {
655 sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
656 bitnum / BITS_PER_UNIT);
657 if (sub)
658 {
659 emit_move_insn (sub, value);
660 return true;
661 }
662 }
663 }
664
665 /* If the target is memory, storing any naturally aligned field can be
666 done with a simple store. For targets that support fast unaligned
667 memory, any naturally sized, unit aligned field can be done directly. */
668 if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode))
669 {
670 op0 = adjust_bitfield_address (op0, fieldmode, bitnum / BITS_PER_UNIT);
671 emit_move_insn (op0, value);
672 return true;
673 }
674
675 /* Make sure we are playing with integral modes. Pun with subregs
676 if we aren't. This must come after the entire register case above,
677 since that case is valid for any mode. The following cases are only
678 valid for integral modes. */
679 {
680 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
681 if (imode != GET_MODE (op0))
682 {
683 if (MEM_P (op0))
684 op0 = adjust_bitfield_address_size (op0, imode, 0, MEM_SIZE (op0));
685 else
686 {
687 gcc_assert (imode != BLKmode);
688 op0 = gen_lowpart (imode, op0);
689 }
690 }
691 }
692
693 /* Storing an lsb-aligned field in a register
694 can be done with a movstrict instruction. */
695
696 if (!MEM_P (op0)
697 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
698 && bitsize == GET_MODE_BITSIZE (fieldmode)
699 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
700 {
701 struct expand_operand ops[2];
702 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
703 rtx arg0 = op0;
704 unsigned HOST_WIDE_INT subreg_off;
705
706 if (GET_CODE (arg0) == SUBREG)
707 {
708 /* Else we've got some float mode source being extracted into
709 a different float mode destination -- this combination of
710 subregs results in Severe Tire Damage. */
711 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
712 || GET_MODE_CLASS (fieldmode) == MODE_INT
713 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
714 arg0 = SUBREG_REG (arg0);
715 }
716
717 subreg_off = bitnum / BITS_PER_UNIT;
718 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
719 {
720 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
721
722 create_fixed_operand (&ops[0], arg0);
723 /* Shrink the source operand to FIELDMODE. */
724 create_convert_operand_to (&ops[1], value, fieldmode, false);
725 if (maybe_expand_insn (icode, 2, ops))
726 return true;
727 }
728 }
729
730 /* Handle fields bigger than a word. */
731
732 if (bitsize > BITS_PER_WORD)
733 {
734 /* Here we transfer the words of the field
735 in the order least significant first.
736 This is because the most significant word is the one which may
737 be less than full.
738 However, only do that if the value is not BLKmode. */
739
740 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
741 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
742 unsigned int i;
743 rtx last;
744
745 /* This is the mode we must force value to, so that there will be enough
746 subwords to extract. Note that fieldmode will often (always?) be
747 VOIDmode, because that is what store_field uses to indicate that this
748 is a bit field, but passing VOIDmode to operand_subword_force
749 is not allowed. */
750 fieldmode = GET_MODE (value);
751 if (fieldmode == VOIDmode)
752 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
753
754 last = get_last_insn ();
755 for (i = 0; i < nwords; i++)
756 {
757 /* If I is 0, use the low-order word in both field and target;
758 if I is 1, use the next to lowest word; and so on. */
759 unsigned int wordnum = (backwards
760 ? GET_MODE_SIZE (fieldmode) / UNITS_PER_WORD
761 - i - 1
762 : i);
763 unsigned int bit_offset = (backwards
764 ? MAX ((int) bitsize - ((int) i + 1)
765 * BITS_PER_WORD,
766 0)
767 : (int) i * BITS_PER_WORD);
768 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
769 unsigned HOST_WIDE_INT new_bitsize =
770 MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
771
772 /* If the remaining chunk doesn't have full wordsize we have
773 to make sure that for big endian machines the higher order
774 bits are used. */
775 if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
776 value_word = simplify_expand_binop (word_mode, lshr_optab,
777 value_word,
778 GEN_INT (BITS_PER_WORD
779 - new_bitsize),
780 NULL_RTX, true,
781 OPTAB_LIB_WIDEN);
782
783 if (!store_bit_field_1 (op0, new_bitsize,
784 bitnum + bit_offset,
785 bitregion_start, bitregion_end,
786 word_mode,
787 value_word, fallback_p))
788 {
789 delete_insns_since (last);
790 return false;
791 }
792 }
793 return true;
794 }
795
796 /* If VALUE has a floating-point or complex mode, access it as an
797 integer of the corresponding size. This can occur on a machine
798 with 64 bit registers that uses SFmode for float. It can also
799 occur for unaligned float or complex fields. */
800 orig_value = value;
801 if (GET_MODE (value) != VOIDmode
802 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
803 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
804 {
805 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
806 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
807 }
808
809 /* If OP0 is a multi-word register, narrow it to the affected word.
810 If the region spans two words, defer to store_split_bit_field. */
811 if (!MEM_P (op0) && GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
812 {
813 op0 = simplify_gen_subreg (word_mode, op0, GET_MODE (op0),
814 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
815 gcc_assert (op0);
816 bitnum %= BITS_PER_WORD;
817 if (bitnum + bitsize > BITS_PER_WORD)
818 {
819 if (!fallback_p)
820 return false;
821
822 store_split_bit_field (op0, bitsize, bitnum, bitregion_start,
823 bitregion_end, value);
824 return true;
825 }
826 }
827
828 /* From here on we can assume that the field to be stored in fits
829 within a word. If the destination is a register, it too fits
830 in a word. */
831
832 extraction_insn insv;
833 if (!MEM_P (op0)
834 && get_best_reg_extraction_insn (&insv, EP_insv,
835 GET_MODE_BITSIZE (GET_MODE (op0)),
836 fieldmode)
837 && store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
838 return true;
839
840 /* If OP0 is a memory, try copying it to a register and seeing if a
841 cheap register alternative is available. */
842 if (MEM_P (op0))
843 {
844 /* Do not use unaligned memory insvs for volatile bitfields when
845 -fstrict-volatile-bitfields is in effect. */
846 if (!(MEM_VOLATILE_P (op0)
847 && flag_strict_volatile_bitfields > 0)
848 && get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
849 fieldmode)
850 && store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
851 return true;
852
853 rtx last = get_last_insn ();
854
855 /* Try loading part of OP0 into a register, inserting the bitfield
856 into that, and then copying the result back to OP0. */
857 unsigned HOST_WIDE_INT bitpos;
858 rtx xop0 = adjust_bit_field_mem_for_reg (EP_insv, op0, bitsize, bitnum,
859 bitregion_start, bitregion_end,
860 fieldmode, &bitpos);
861 if (xop0)
862 {
863 rtx tempreg = copy_to_reg (xop0);
864 if (store_bit_field_1 (tempreg, bitsize, bitpos,
865 bitregion_start, bitregion_end,
866 fieldmode, orig_value, false))
867 {
868 emit_move_insn (xop0, tempreg);
869 return true;
870 }
871 delete_insns_since (last);
872 }
873 }
874
875 if (!fallback_p)
876 return false;
877
878 store_fixed_bit_field (op0, bitsize, bitnum, bitregion_start,
879 bitregion_end, value);
880 return true;
881 }
882
883 /* Generate code to store value from rtx VALUE
884 into a bit-field within structure STR_RTX
885 containing BITSIZE bits starting at bit BITNUM.
886
887 BITREGION_START is bitpos of the first bitfield in this region.
888 BITREGION_END is the bitpos of the ending bitfield in this region.
889 These two fields are 0, if the C++ memory model does not apply,
890 or we are not interested in keeping track of bitfield regions.
891
892 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
893
894 void
895 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
896 unsigned HOST_WIDE_INT bitnum,
897 unsigned HOST_WIDE_INT bitregion_start,
898 unsigned HOST_WIDE_INT bitregion_end,
899 enum machine_mode fieldmode,
900 rtx value)
901 {
902 /* Under the C++0x memory model, we must not touch bits outside the
903 bit region. Adjust the address to start at the beginning of the
904 bit region. */
905 if (MEM_P (str_rtx) && bitregion_start > 0)
906 {
907 enum machine_mode bestmode;
908 HOST_WIDE_INT offset, size;
909
910 gcc_assert ((bitregion_start % BITS_PER_UNIT) == 0);
911
912 offset = bitregion_start / BITS_PER_UNIT;
913 bitnum -= bitregion_start;
914 size = (bitnum + bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
915 bitregion_end -= bitregion_start;
916 bitregion_start = 0;
917 bestmode = get_best_mode (bitsize, bitnum,
918 bitregion_start, bitregion_end,
919 MEM_ALIGN (str_rtx), VOIDmode,
920 MEM_VOLATILE_P (str_rtx));
921 str_rtx = adjust_bitfield_address_size (str_rtx, bestmode, offset, size);
922 }
923
924 if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
925 bitregion_start, bitregion_end,
926 fieldmode, value, true))
927 gcc_unreachable ();
928 }
929 \f
930 /* Use shifts and boolean operations to store VALUE into a bit field of
931 width BITSIZE in OP0, starting at bit BITNUM. */
932
933 static void
934 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
935 unsigned HOST_WIDE_INT bitnum,
936 unsigned HOST_WIDE_INT bitregion_start,
937 unsigned HOST_WIDE_INT bitregion_end,
938 rtx value)
939 {
940 enum machine_mode mode;
941 rtx temp;
942 int all_zero = 0;
943 int all_one = 0;
944
945 /* There is a case not handled here:
946 a structure with a known alignment of just a halfword
947 and a field split across two aligned halfwords within the structure.
948 Or likewise a structure with a known alignment of just a byte
949 and a field split across two bytes.
950 Such cases are not supposed to be able to occur. */
951
952 if (MEM_P (op0))
953 {
954 unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
955
956 if (bitregion_end)
957 maxbits = bitregion_end - bitregion_start + 1;
958
959 /* Get the proper mode to use for this field. We want a mode that
960 includes the entire field. If such a mode would be larger than
961 a word, we won't be doing the extraction the normal way.
962 We don't want a mode bigger than the destination. */
963
964 mode = GET_MODE (op0);
965 if (GET_MODE_BITSIZE (mode) == 0
966 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
967 mode = word_mode;
968
969 if (MEM_VOLATILE_P (op0)
970 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
971 && GET_MODE_BITSIZE (GET_MODE (op0)) <= maxbits
972 && flag_strict_volatile_bitfields > 0)
973 mode = GET_MODE (op0);
974 else
975 mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
976 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
977
978 if (mode == VOIDmode)
979 {
980 /* The only way this should occur is if the field spans word
981 boundaries. */
982 store_split_bit_field (op0, bitsize, bitnum, bitregion_start,
983 bitregion_end, value);
984 return;
985 }
986
987 op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
988 }
989
990 mode = GET_MODE (op0);
991 gcc_assert (SCALAR_INT_MODE_P (mode));
992
993 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
994 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
995
996 if (BYTES_BIG_ENDIAN)
997 /* BITNUM is the distance between our msb
998 and that of the containing datum.
999 Convert it to the distance from the lsb. */
1000 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1001
1002 /* Now BITNUM is always the distance between our lsb
1003 and that of OP0. */
1004
1005 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1006 we must first convert its mode to MODE. */
1007
1008 if (CONST_INT_P (value))
1009 {
1010 HOST_WIDE_INT v = INTVAL (value);
1011
1012 if (bitsize < HOST_BITS_PER_WIDE_INT)
1013 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
1014
1015 if (v == 0)
1016 all_zero = 1;
1017 else if ((bitsize < HOST_BITS_PER_WIDE_INT
1018 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
1019 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
1020 all_one = 1;
1021
1022 value = lshift_value (mode, v, bitnum);
1023 }
1024 else
1025 {
1026 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
1027 && bitnum + bitsize != GET_MODE_BITSIZE (mode));
1028
1029 if (GET_MODE (value) != mode)
1030 value = convert_to_mode (mode, value, 1);
1031
1032 if (must_and)
1033 value = expand_binop (mode, and_optab, value,
1034 mask_rtx (mode, 0, bitsize, 0),
1035 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1036 if (bitnum > 0)
1037 value = expand_shift (LSHIFT_EXPR, mode, value,
1038 bitnum, NULL_RTX, 1);
1039 }
1040
1041 /* Now clear the chosen bits in OP0,
1042 except that if VALUE is -1 we need not bother. */
1043 /* We keep the intermediates in registers to allow CSE to combine
1044 consecutive bitfield assignments. */
1045
1046 temp = force_reg (mode, op0);
1047
1048 if (! all_one)
1049 {
1050 temp = expand_binop (mode, and_optab, temp,
1051 mask_rtx (mode, bitnum, bitsize, 1),
1052 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1053 temp = force_reg (mode, temp);
1054 }
1055
1056 /* Now logical-or VALUE into OP0, unless it is zero. */
1057
1058 if (! all_zero)
1059 {
1060 temp = expand_binop (mode, ior_optab, temp, value,
1061 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1062 temp = force_reg (mode, temp);
1063 }
1064
1065 if (op0 != temp)
1066 {
1067 op0 = copy_rtx (op0);
1068 emit_move_insn (op0, temp);
1069 }
1070 }
1071 \f
1072 /* Store a bit field that is split across multiple accessible memory objects.
1073
1074 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1075 BITSIZE is the field width; BITPOS the position of its first bit
1076 (within the word).
1077 VALUE is the value to store.
1078
1079 This does not yet handle fields wider than BITS_PER_WORD. */
1080
1081 static void
1082 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1083 unsigned HOST_WIDE_INT bitpos,
1084 unsigned HOST_WIDE_INT bitregion_start,
1085 unsigned HOST_WIDE_INT bitregion_end,
1086 rtx value)
1087 {
1088 unsigned int unit;
1089 unsigned int bitsdone = 0;
1090
1091 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1092 much at a time. */
1093 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1094 unit = BITS_PER_WORD;
1095 else
1096 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1097
1098 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1099 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1100 that VALUE might be a floating-point constant. */
1101 if (CONSTANT_P (value) && !CONST_INT_P (value))
1102 {
1103 rtx word = gen_lowpart_common (word_mode, value);
1104
1105 if (word && (value != word))
1106 value = word;
1107 else
1108 value = gen_lowpart_common (word_mode,
1109 force_reg (GET_MODE (value) != VOIDmode
1110 ? GET_MODE (value)
1111 : word_mode, value));
1112 }
1113
1114 while (bitsdone < bitsize)
1115 {
1116 unsigned HOST_WIDE_INT thissize;
1117 rtx part, word;
1118 unsigned HOST_WIDE_INT thispos;
1119 unsigned HOST_WIDE_INT offset;
1120
1121 offset = (bitpos + bitsdone) / unit;
1122 thispos = (bitpos + bitsdone) % unit;
1123
1124 /* When region of bytes we can touch is restricted, decrease
1125 UNIT close to the end of the region as needed. If op0 is a REG
1126 or SUBREG of REG, don't do this, as there can't be data races
1127 on a register and we can expand shorter code in some cases. */
1128 if (bitregion_end
1129 && unit > BITS_PER_UNIT
1130 && bitpos + bitsdone - thispos + unit > bitregion_end + 1
1131 && !REG_P (op0)
1132 && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1133 {
1134 unit = unit / 2;
1135 continue;
1136 }
1137
1138 /* THISSIZE must not overrun a word boundary. Otherwise,
1139 store_fixed_bit_field will call us again, and we will mutually
1140 recurse forever. */
1141 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1142 thissize = MIN (thissize, unit - thispos);
1143
1144 if (BYTES_BIG_ENDIAN)
1145 {
1146 /* Fetch successively less significant portions. */
1147 if (CONST_INT_P (value))
1148 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1149 >> (bitsize - bitsdone - thissize))
1150 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1151 else
1152 {
1153 int total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1154 /* The args are chosen so that the last part includes the
1155 lsb. Give extract_bit_field the value it needs (with
1156 endianness compensation) to fetch the piece we want. */
1157 part = extract_fixed_bit_field (word_mode, value, thissize,
1158 total_bits - bitsize + bitsdone,
1159 NULL_RTX, 1);
1160 }
1161 }
1162 else
1163 {
1164 /* Fetch successively more significant portions. */
1165 if (CONST_INT_P (value))
1166 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1167 >> bitsdone)
1168 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1169 else
1170 part = extract_fixed_bit_field (word_mode, value, thissize,
1171 bitsdone, NULL_RTX, 1);
1172 }
1173
1174 /* If OP0 is a register, then handle OFFSET here.
1175
1176 When handling multiword bitfields, extract_bit_field may pass
1177 down a word_mode SUBREG of a larger REG for a bitfield that actually
1178 crosses a word boundary. Thus, for a SUBREG, we must find
1179 the current word starting from the base register. */
1180 if (GET_CODE (op0) == SUBREG)
1181 {
1182 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD)
1183 + (offset * unit / BITS_PER_WORD);
1184 enum machine_mode sub_mode = GET_MODE (SUBREG_REG (op0));
1185 if (sub_mode != BLKmode && GET_MODE_SIZE (sub_mode) < UNITS_PER_WORD)
1186 word = word_offset ? const0_rtx : op0;
1187 else
1188 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1189 GET_MODE (SUBREG_REG (op0)));
1190 offset &= BITS_PER_WORD / unit - 1;
1191 }
1192 else if (REG_P (op0))
1193 {
1194 enum machine_mode op0_mode = GET_MODE (op0);
1195 if (op0_mode != BLKmode && GET_MODE_SIZE (op0_mode) < UNITS_PER_WORD)
1196 word = offset ? const0_rtx : op0;
1197 else
1198 word = operand_subword_force (op0, offset * unit / BITS_PER_WORD,
1199 GET_MODE (op0));
1200 offset &= BITS_PER_WORD / unit - 1;
1201 }
1202 else
1203 word = op0;
1204
1205 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1206 it is just an out-of-bounds access. Ignore it. */
1207 if (word != const0_rtx)
1208 store_fixed_bit_field (word, thissize, offset * unit + thispos,
1209 bitregion_start, bitregion_end, part);
1210 bitsdone += thissize;
1211 }
1212 }
1213 \f
1214 /* A subroutine of extract_bit_field_1 that converts return value X
1215 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1216 to extract_bit_field. */
1217
1218 static rtx
1219 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1220 enum machine_mode tmode, bool unsignedp)
1221 {
1222 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1223 return x;
1224
1225 /* If the x mode is not a scalar integral, first convert to the
1226 integer mode of that size and then access it as a floating-point
1227 value via a SUBREG. */
1228 if (!SCALAR_INT_MODE_P (tmode))
1229 {
1230 enum machine_mode smode;
1231
1232 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1233 x = convert_to_mode (smode, x, unsignedp);
1234 x = force_reg (smode, x);
1235 return gen_lowpart (tmode, x);
1236 }
1237
1238 return convert_to_mode (tmode, x, unsignedp);
1239 }
1240
1241 /* Try to use an ext(z)v pattern to extract a field from OP0.
1242 Return the extracted value on success, otherwise return null.
1243 EXT_MODE is the mode of the extraction and the other arguments
1244 are as for extract_bit_field. */
1245
1246 static rtx
1247 extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
1248 unsigned HOST_WIDE_INT bitsize,
1249 unsigned HOST_WIDE_INT bitnum,
1250 int unsignedp, rtx target,
1251 enum machine_mode mode, enum machine_mode tmode)
1252 {
1253 struct expand_operand ops[4];
1254 rtx spec_target = target;
1255 rtx spec_target_subreg = 0;
1256 enum machine_mode ext_mode = extv->field_mode;
1257 unsigned unit = GET_MODE_BITSIZE (ext_mode);
1258
1259 if (bitsize == 0 || unit < bitsize)
1260 return NULL_RTX;
1261
1262 if (MEM_P (op0))
1263 /* Get a reference to the first byte of the field. */
1264 op0 = narrow_bit_field_mem (op0, extv->struct_mode, bitsize, bitnum,
1265 &bitnum);
1266 else
1267 {
1268 /* Convert from counting within OP0 to counting in EXT_MODE. */
1269 if (BYTES_BIG_ENDIAN)
1270 bitnum += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1271
1272 /* If op0 is a register, we need it in EXT_MODE to make it
1273 acceptable to the format of ext(z)v. */
1274 if (GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1275 return NULL_RTX;
1276 if (REG_P (op0) && GET_MODE (op0) != ext_mode)
1277 op0 = gen_lowpart_SUBREG (ext_mode, op0);
1278 }
1279
1280 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1281 "backwards" from the size of the unit we are extracting from.
1282 Otherwise, we count bits from the most significant on a
1283 BYTES/BITS_BIG_ENDIAN machine. */
1284
1285 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1286 bitnum = unit - bitsize - bitnum;
1287
1288 if (target == 0)
1289 target = spec_target = gen_reg_rtx (tmode);
1290
1291 if (GET_MODE (target) != ext_mode)
1292 {
1293 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1294 between the mode of the extraction (word_mode) and the target
1295 mode. Instead, create a temporary and use convert_move to set
1296 the target. */
1297 if (REG_P (target)
1298 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target), ext_mode))
1299 {
1300 target = gen_lowpart (ext_mode, target);
1301 if (GET_MODE_PRECISION (ext_mode)
1302 > GET_MODE_PRECISION (GET_MODE (spec_target)))
1303 spec_target_subreg = target;
1304 }
1305 else
1306 target = gen_reg_rtx (ext_mode);
1307 }
1308
1309 create_output_operand (&ops[0], target, ext_mode);
1310 create_fixed_operand (&ops[1], op0);
1311 create_integer_operand (&ops[2], bitsize);
1312 create_integer_operand (&ops[3], bitnum);
1313 if (maybe_expand_insn (extv->icode, 4, ops))
1314 {
1315 target = ops[0].value;
1316 if (target == spec_target)
1317 return target;
1318 if (target == spec_target_subreg)
1319 return spec_target;
1320 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1321 }
1322 return NULL_RTX;
1323 }
1324
1325 /* A subroutine of extract_bit_field, with the same arguments.
1326 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1327 if we can find no other means of implementing the operation.
1328 if FALLBACK_P is false, return NULL instead. */
1329
1330 static rtx
1331 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1332 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1333 enum machine_mode mode, enum machine_mode tmode,
1334 bool fallback_p)
1335 {
1336 rtx op0 = str_rtx;
1337 enum machine_mode int_mode;
1338 enum machine_mode mode1;
1339
1340 if (tmode == VOIDmode)
1341 tmode = mode;
1342
1343 while (GET_CODE (op0) == SUBREG)
1344 {
1345 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1346 op0 = SUBREG_REG (op0);
1347 }
1348
1349 /* If we have an out-of-bounds access to a register, just return an
1350 uninitialized register of the required mode. This can occur if the
1351 source code contains an out-of-bounds access to a small array. */
1352 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1353 return gen_reg_rtx (tmode);
1354
1355 if (REG_P (op0)
1356 && mode == GET_MODE (op0)
1357 && bitnum == 0
1358 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1359 {
1360 /* We're trying to extract a full register from itself. */
1361 return op0;
1362 }
1363
1364 /* See if we can get a better vector mode before extracting. */
1365 if (VECTOR_MODE_P (GET_MODE (op0))
1366 && !MEM_P (op0)
1367 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1368 {
1369 enum machine_mode new_mode;
1370
1371 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1372 new_mode = MIN_MODE_VECTOR_FLOAT;
1373 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1374 new_mode = MIN_MODE_VECTOR_FRACT;
1375 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1376 new_mode = MIN_MODE_VECTOR_UFRACT;
1377 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1378 new_mode = MIN_MODE_VECTOR_ACCUM;
1379 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1380 new_mode = MIN_MODE_VECTOR_UACCUM;
1381 else
1382 new_mode = MIN_MODE_VECTOR_INT;
1383
1384 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1385 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1386 && targetm.vector_mode_supported_p (new_mode))
1387 break;
1388 if (new_mode != VOIDmode)
1389 op0 = gen_lowpart (new_mode, op0);
1390 }
1391
1392 /* Use vec_extract patterns for extracting parts of vectors whenever
1393 available. */
1394 if (VECTOR_MODE_P (GET_MODE (op0))
1395 && !MEM_P (op0)
1396 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1397 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1398 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1399 {
1400 struct expand_operand ops[3];
1401 enum machine_mode outermode = GET_MODE (op0);
1402 enum machine_mode innermode = GET_MODE_INNER (outermode);
1403 enum insn_code icode = optab_handler (vec_extract_optab, outermode);
1404 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1405
1406 create_output_operand (&ops[0], target, innermode);
1407 create_input_operand (&ops[1], op0, outermode);
1408 create_integer_operand (&ops[2], pos);
1409 if (maybe_expand_insn (icode, 3, ops))
1410 {
1411 target = ops[0].value;
1412 if (GET_MODE (target) != mode)
1413 return gen_lowpart (tmode, target);
1414 return target;
1415 }
1416 }
1417
1418 /* Make sure we are playing with integral modes. Pun with subregs
1419 if we aren't. */
1420 {
1421 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1422 if (imode != GET_MODE (op0))
1423 {
1424 if (MEM_P (op0))
1425 op0 = adjust_bitfield_address_size (op0, imode, 0, MEM_SIZE (op0));
1426 else if (imode != BLKmode)
1427 {
1428 op0 = gen_lowpart (imode, op0);
1429
1430 /* If we got a SUBREG, force it into a register since we
1431 aren't going to be able to do another SUBREG on it. */
1432 if (GET_CODE (op0) == SUBREG)
1433 op0 = force_reg (imode, op0);
1434 }
1435 else if (REG_P (op0))
1436 {
1437 rtx reg, subreg;
1438 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1439 MODE_INT);
1440 reg = gen_reg_rtx (imode);
1441 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1442 emit_move_insn (subreg, op0);
1443 op0 = reg;
1444 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1445 }
1446 else
1447 {
1448 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
1449 rtx mem = assign_stack_temp (GET_MODE (op0), size);
1450 emit_move_insn (mem, op0);
1451 op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1452 }
1453 }
1454 }
1455
1456 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1457 If that's wrong, the solution is to test for it and set TARGET to 0
1458 if needed. */
1459
1460 /* If the bitfield is volatile, we need to make sure the access
1461 remains on a type-aligned boundary. */
1462 if (GET_CODE (op0) == MEM
1463 && MEM_VOLATILE_P (op0)
1464 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1465 && flag_strict_volatile_bitfields > 0)
1466 goto no_subreg_mode_swap;
1467
1468 /* Only scalar integer modes can be converted via subregs. There is an
1469 additional problem for FP modes here in that they can have a precision
1470 which is different from the size. mode_for_size uses precision, but
1471 we want a mode based on the size, so we must avoid calling it for FP
1472 modes. */
1473 mode1 = mode;
1474 if (SCALAR_INT_MODE_P (tmode))
1475 {
1476 enum machine_mode try_mode = mode_for_size (bitsize,
1477 GET_MODE_CLASS (tmode), 0);
1478 if (try_mode != BLKmode)
1479 mode1 = try_mode;
1480 }
1481 gcc_assert (mode1 != BLKmode);
1482
1483 /* Extraction of a full MODE1 value can be done with a subreg as long
1484 as the least significant bit of the value is the least significant
1485 bit of either OP0 or a word of OP0. */
1486 if (!MEM_P (op0)
1487 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
1488 && bitsize == GET_MODE_BITSIZE (mode1)
1489 && TRULY_NOOP_TRUNCATION_MODES_P (mode1, GET_MODE (op0)))
1490 {
1491 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1492 bitnum / BITS_PER_UNIT);
1493 if (sub)
1494 return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1495 }
1496
1497 /* Extraction of a full MODE1 value can be done with a load as long as
1498 the field is on a byte boundary and is sufficiently aligned. */
1499 if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1))
1500 {
1501 op0 = adjust_bitfield_address (op0, mode1, bitnum / BITS_PER_UNIT);
1502 return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1503 }
1504
1505 no_subreg_mode_swap:
1506
1507 /* Handle fields bigger than a word. */
1508
1509 if (bitsize > BITS_PER_WORD)
1510 {
1511 /* Here we transfer the words of the field
1512 in the order least significant first.
1513 This is because the most significant word is the one which may
1514 be less than full. */
1515
1516 unsigned int backwards = WORDS_BIG_ENDIAN;
1517 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1518 unsigned int i;
1519 rtx last;
1520
1521 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1522 target = gen_reg_rtx (mode);
1523
1524 /* Indicate for flow that the entire target reg is being set. */
1525 emit_clobber (target);
1526
1527 last = get_last_insn ();
1528 for (i = 0; i < nwords; i++)
1529 {
1530 /* If I is 0, use the low-order word in both field and target;
1531 if I is 1, use the next to lowest word; and so on. */
1532 /* Word number in TARGET to use. */
1533 unsigned int wordnum
1534 = (backwards
1535 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1536 : i);
1537 /* Offset from start of field in OP0. */
1538 unsigned int bit_offset = (backwards
1539 ? MAX ((int) bitsize - ((int) i + 1)
1540 * BITS_PER_WORD,
1541 0)
1542 : (int) i * BITS_PER_WORD);
1543 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1544 rtx result_part
1545 = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
1546 bitsize - i * BITS_PER_WORD),
1547 bitnum + bit_offset, 1, target_part,
1548 mode, word_mode, fallback_p);
1549
1550 gcc_assert (target_part);
1551 if (!result_part)
1552 {
1553 delete_insns_since (last);
1554 return NULL;
1555 }
1556
1557 if (result_part != target_part)
1558 emit_move_insn (target_part, result_part);
1559 }
1560
1561 if (unsignedp)
1562 {
1563 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1564 need to be zero'd out. */
1565 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1566 {
1567 unsigned int i, total_words;
1568
1569 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1570 for (i = nwords; i < total_words; i++)
1571 emit_move_insn
1572 (operand_subword (target,
1573 backwards ? total_words - i - 1 : i,
1574 1, VOIDmode),
1575 const0_rtx);
1576 }
1577 return target;
1578 }
1579
1580 /* Signed bit field: sign-extend with two arithmetic shifts. */
1581 target = expand_shift (LSHIFT_EXPR, mode, target,
1582 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1583 return expand_shift (RSHIFT_EXPR, mode, target,
1584 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1585 }
1586
1587 /* If OP0 is a multi-word register, narrow it to the affected word.
1588 If the region spans two words, defer to extract_split_bit_field. */
1589 if (!MEM_P (op0) && GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1590 {
1591 op0 = simplify_gen_subreg (word_mode, op0, GET_MODE (op0),
1592 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1593 bitnum %= BITS_PER_WORD;
1594 if (bitnum + bitsize > BITS_PER_WORD)
1595 {
1596 if (!fallback_p)
1597 return NULL_RTX;
1598 target = extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
1599 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1600 }
1601 }
1602
1603 /* From here on we know the desired field is smaller than a word.
1604 If OP0 is a register, it too fits within a word. */
1605 enum extraction_pattern pattern = unsignedp ? EP_extzv : EP_extv;
1606 extraction_insn extv;
1607 if (!MEM_P (op0)
1608 /* ??? We could limit the structure size to the part of OP0 that
1609 contains the field, with appropriate checks for endianness
1610 and TRULY_NOOP_TRUNCATION. */
1611 && get_best_reg_extraction_insn (&extv, pattern,
1612 GET_MODE_BITSIZE (GET_MODE (op0)),
1613 tmode))
1614 {
1615 rtx result = extract_bit_field_using_extv (&extv, op0, bitsize, bitnum,
1616 unsignedp, target, mode,
1617 tmode);
1618 if (result)
1619 return result;
1620 }
1621
1622 /* If OP0 is a memory, try copying it to a register and seeing if a
1623 cheap register alternative is available. */
1624 if (MEM_P (op0))
1625 {
1626 /* Do not use extv/extzv for volatile bitfields when
1627 -fstrict-volatile-bitfields is in effect. */
1628 if (!(MEM_VOLATILE_P (op0) && flag_strict_volatile_bitfields > 0)
1629 && get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
1630 tmode))
1631 {
1632 rtx result = extract_bit_field_using_extv (&extv, op0, bitsize,
1633 bitnum, unsignedp,
1634 target, mode,
1635 tmode);
1636 if (result)
1637 return result;
1638 }
1639
1640 rtx last = get_last_insn ();
1641
1642 /* Try loading part of OP0 into a register and extracting the
1643 bitfield from that. */
1644 unsigned HOST_WIDE_INT bitpos;
1645 rtx xop0 = adjust_bit_field_mem_for_reg (pattern, op0, bitsize, bitnum,
1646 0, 0, tmode, &bitpos);
1647 if (xop0)
1648 {
1649 xop0 = copy_to_reg (xop0);
1650 rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
1651 unsignedp, target,
1652 mode, tmode, false);
1653 if (result)
1654 return result;
1655 delete_insns_since (last);
1656 }
1657 }
1658
1659 if (!fallback_p)
1660 return NULL;
1661
1662 /* Find a correspondingly-sized integer field, so we can apply
1663 shifts and masks to it. */
1664 int_mode = int_mode_for_mode (tmode);
1665 if (int_mode == BLKmode)
1666 int_mode = int_mode_for_mode (mode);
1667 /* Should probably push op0 out to memory and then do a load. */
1668 gcc_assert (int_mode != BLKmode);
1669
1670 target = extract_fixed_bit_field (int_mode, op0, bitsize, bitnum,
1671 target, unsignedp);
1672 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1673 }
1674
1675 /* Generate code to extract a byte-field from STR_RTX
1676 containing BITSIZE bits, starting at BITNUM,
1677 and put it in TARGET if possible (if TARGET is nonzero).
1678 Regardless of TARGET, we return the rtx for where the value is placed.
1679
1680 STR_RTX is the structure containing the byte (a REG or MEM).
1681 UNSIGNEDP is nonzero if this is an unsigned bit field.
1682 MODE is the natural mode of the field value once extracted.
1683 TMODE is the mode the caller would like the value to have;
1684 but the value may be returned with type MODE instead.
1685
1686 If a TARGET is specified and we can store in it at no extra cost,
1687 we do so, and return TARGET.
1688 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1689 if they are equally easy. */
1690
1691 rtx
1692 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1693 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1694 enum machine_mode mode, enum machine_mode tmode)
1695 {
1696 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1697 target, mode, tmode, true);
1698 }
1699 \f
1700 /* Use shifts and boolean operations to extract a field of BITSIZE bits
1701 from bit BITNUM of OP0.
1702
1703 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1704 If TARGET is nonzero, attempts to store the value there
1705 and return TARGET, but this is not guaranteed.
1706 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1707
1708 static rtx
1709 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1710 unsigned HOST_WIDE_INT bitsize,
1711 unsigned HOST_WIDE_INT bitnum, rtx target,
1712 int unsignedp)
1713 {
1714 enum machine_mode mode;
1715
1716 if (MEM_P (op0))
1717 {
1718 /* Get the proper mode to use for this field. We want a mode that
1719 includes the entire field. If such a mode would be larger than
1720 a word, we won't be doing the extraction the normal way. */
1721
1722 if (MEM_VOLATILE_P (op0)
1723 && flag_strict_volatile_bitfields > 0)
1724 {
1725 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1726 mode = GET_MODE (op0);
1727 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1728 mode = GET_MODE (target);
1729 else
1730 mode = tmode;
1731 }
1732 else
1733 mode = get_best_mode (bitsize, bitnum, 0, 0,
1734 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1735
1736 if (mode == VOIDmode)
1737 /* The only way this should occur is if the field spans word
1738 boundaries. */
1739 return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
1740
1741 unsigned int total_bits = GET_MODE_BITSIZE (mode);
1742 HOST_WIDE_INT bit_offset = bitnum - bitnum % total_bits;
1743
1744 /* If we're accessing a volatile MEM, we can't apply BIT_OFFSET
1745 if it results in a multi-word access where we otherwise wouldn't
1746 have one. So, check for that case here. */
1747 if (MEM_P (op0)
1748 && MEM_VOLATILE_P (op0)
1749 && flag_strict_volatile_bitfields > 0
1750 && bitnum % BITS_PER_UNIT + bitsize <= total_bits
1751 && bitnum % GET_MODE_BITSIZE (mode) + bitsize > total_bits)
1752 {
1753 /* If the target doesn't support unaligned access, give up and
1754 split the access into two. */
1755 if (STRICT_ALIGNMENT)
1756 return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
1757 bit_offset = bitnum - bitnum % BITS_PER_UNIT;
1758 }
1759 op0 = adjust_bitfield_address (op0, mode, bit_offset / BITS_PER_UNIT);
1760 bitnum -= bit_offset;
1761 }
1762
1763 mode = GET_MODE (op0);
1764 gcc_assert (SCALAR_INT_MODE_P (mode));
1765
1766 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1767 for invalid input, such as extract equivalent of f5 from
1768 gcc.dg/pr48335-2.c. */
1769
1770 if (BYTES_BIG_ENDIAN)
1771 /* BITNUM is the distance between our msb and that of OP0.
1772 Convert it to the distance from the lsb. */
1773 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1774
1775 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
1776 We have reduced the big-endian case to the little-endian case. */
1777
1778 if (unsignedp)
1779 {
1780 if (bitnum)
1781 {
1782 /* If the field does not already start at the lsb,
1783 shift it so it does. */
1784 /* Maybe propagate the target for the shift. */
1785 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1786 if (tmode != mode)
1787 subtarget = 0;
1788 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitnum, subtarget, 1);
1789 }
1790 /* Convert the value to the desired mode. */
1791 if (mode != tmode)
1792 op0 = convert_to_mode (tmode, op0, 1);
1793
1794 /* Unless the msb of the field used to be the msb when we shifted,
1795 mask out the upper bits. */
1796
1797 if (GET_MODE_BITSIZE (mode) != bitnum + bitsize)
1798 return expand_binop (GET_MODE (op0), and_optab, op0,
1799 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1800 target, 1, OPTAB_LIB_WIDEN);
1801 return op0;
1802 }
1803
1804 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1805 then arithmetic-shift its lsb to the lsb of the word. */
1806 op0 = force_reg (mode, op0);
1807
1808 /* Find the narrowest integer mode that contains the field. */
1809
1810 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1811 mode = GET_MODE_WIDER_MODE (mode))
1812 if (GET_MODE_BITSIZE (mode) >= bitsize + bitnum)
1813 {
1814 op0 = convert_to_mode (mode, op0, 0);
1815 break;
1816 }
1817
1818 if (mode != tmode)
1819 target = 0;
1820
1821 if (GET_MODE_BITSIZE (mode) != (bitsize + bitnum))
1822 {
1823 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitnum);
1824 /* Maybe propagate the target for the shift. */
1825 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1826 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1827 }
1828
1829 return expand_shift (RSHIFT_EXPR, mode, op0,
1830 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
1831 }
1832
1833 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1834 VALUE << BITPOS. */
1835
1836 static rtx
1837 lshift_value (enum machine_mode mode, unsigned HOST_WIDE_INT value,
1838 int bitpos)
1839 {
1840 return immed_wide_int_const (wi::lshift (value, bitpos), mode);
1841 }
1842 \f
1843 /* Extract a bit field that is split across two words
1844 and return an RTX for the result.
1845
1846 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1847 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1848 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1849
1850 static rtx
1851 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1852 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1853 {
1854 unsigned int unit;
1855 unsigned int bitsdone = 0;
1856 rtx result = NULL_RTX;
1857 int first = 1;
1858
1859 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1860 much at a time. */
1861 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1862 unit = BITS_PER_WORD;
1863 else
1864 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1865
1866 while (bitsdone < bitsize)
1867 {
1868 unsigned HOST_WIDE_INT thissize;
1869 rtx part, word;
1870 unsigned HOST_WIDE_INT thispos;
1871 unsigned HOST_WIDE_INT offset;
1872
1873 offset = (bitpos + bitsdone) / unit;
1874 thispos = (bitpos + bitsdone) % unit;
1875
1876 /* THISSIZE must not overrun a word boundary. Otherwise,
1877 extract_fixed_bit_field will call us again, and we will mutually
1878 recurse forever. */
1879 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1880 thissize = MIN (thissize, unit - thispos);
1881
1882 /* If OP0 is a register, then handle OFFSET here.
1883
1884 When handling multiword bitfields, extract_bit_field may pass
1885 down a word_mode SUBREG of a larger REG for a bitfield that actually
1886 crosses a word boundary. Thus, for a SUBREG, we must find
1887 the current word starting from the base register. */
1888 if (GET_CODE (op0) == SUBREG)
1889 {
1890 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1891 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1892 GET_MODE (SUBREG_REG (op0)));
1893 offset = 0;
1894 }
1895 else if (REG_P (op0))
1896 {
1897 word = operand_subword_force (op0, offset, GET_MODE (op0));
1898 offset = 0;
1899 }
1900 else
1901 word = op0;
1902
1903 /* Extract the parts in bit-counting order,
1904 whose meaning is determined by BYTES_PER_UNIT.
1905 OFFSET is in UNITs, and UNIT is in bits. */
1906 part = extract_fixed_bit_field (word_mode, word, thissize,
1907 offset * unit + thispos, 0, 1);
1908 bitsdone += thissize;
1909
1910 /* Shift this part into place for the result. */
1911 if (BYTES_BIG_ENDIAN)
1912 {
1913 if (bitsize != bitsdone)
1914 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1915 bitsize - bitsdone, 0, 1);
1916 }
1917 else
1918 {
1919 if (bitsdone != thissize)
1920 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1921 bitsdone - thissize, 0, 1);
1922 }
1923
1924 if (first)
1925 result = part;
1926 else
1927 /* Combine the parts with bitwise or. This works
1928 because we extracted each part as an unsigned bit field. */
1929 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1930 OPTAB_LIB_WIDEN);
1931
1932 first = 0;
1933 }
1934
1935 /* Unsigned bit field: we are done. */
1936 if (unsignedp)
1937 return result;
1938 /* Signed bit field: sign-extend with two arithmetic shifts. */
1939 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1940 BITS_PER_WORD - bitsize, NULL_RTX, 0);
1941 return expand_shift (RSHIFT_EXPR, word_mode, result,
1942 BITS_PER_WORD - bitsize, NULL_RTX, 0);
1943 }
1944 \f
1945 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1946 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1947 MODE, fill the upper bits with zeros. Fail if the layout of either
1948 mode is unknown (as for CC modes) or if the extraction would involve
1949 unprofitable mode punning. Return the value on success, otherwise
1950 return null.
1951
1952 This is different from gen_lowpart* in these respects:
1953
1954 - the returned value must always be considered an rvalue
1955
1956 - when MODE is wider than SRC_MODE, the extraction involves
1957 a zero extension
1958
1959 - when MODE is smaller than SRC_MODE, the extraction involves
1960 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
1961
1962 In other words, this routine performs a computation, whereas the
1963 gen_lowpart* routines are conceptually lvalue or rvalue subreg
1964 operations. */
1965
1966 rtx
1967 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
1968 {
1969 enum machine_mode int_mode, src_int_mode;
1970
1971 if (mode == src_mode)
1972 return src;
1973
1974 if (CONSTANT_P (src))
1975 {
1976 /* simplify_gen_subreg can't be used here, as if simplify_subreg
1977 fails, it will happily create (subreg (symbol_ref)) or similar
1978 invalid SUBREGs. */
1979 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
1980 rtx ret = simplify_subreg (mode, src, src_mode, byte);
1981 if (ret)
1982 return ret;
1983
1984 if (GET_MODE (src) == VOIDmode
1985 || !validate_subreg (mode, src_mode, src, byte))
1986 return NULL_RTX;
1987
1988 src = force_reg (GET_MODE (src), src);
1989 return gen_rtx_SUBREG (mode, src, byte);
1990 }
1991
1992 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
1993 return NULL_RTX;
1994
1995 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
1996 && MODES_TIEABLE_P (mode, src_mode))
1997 {
1998 rtx x = gen_lowpart_common (mode, src);
1999 if (x)
2000 return x;
2001 }
2002
2003 src_int_mode = int_mode_for_mode (src_mode);
2004 int_mode = int_mode_for_mode (mode);
2005 if (src_int_mode == BLKmode || int_mode == BLKmode)
2006 return NULL_RTX;
2007
2008 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2009 return NULL_RTX;
2010 if (!MODES_TIEABLE_P (int_mode, mode))
2011 return NULL_RTX;
2012
2013 src = gen_lowpart (src_int_mode, src);
2014 src = convert_modes (int_mode, src_int_mode, src, true);
2015 src = gen_lowpart (mode, src);
2016 return src;
2017 }
2018 \f
2019 /* Add INC into TARGET. */
2020
2021 void
2022 expand_inc (rtx target, rtx inc)
2023 {
2024 rtx value = expand_binop (GET_MODE (target), add_optab,
2025 target, inc,
2026 target, 0, OPTAB_LIB_WIDEN);
2027 if (value != target)
2028 emit_move_insn (target, value);
2029 }
2030
2031 /* Subtract DEC from TARGET. */
2032
2033 void
2034 expand_dec (rtx target, rtx dec)
2035 {
2036 rtx value = expand_binop (GET_MODE (target), sub_optab,
2037 target, dec,
2038 target, 0, OPTAB_LIB_WIDEN);
2039 if (value != target)
2040 emit_move_insn (target, value);
2041 }
2042 \f
2043 /* Output a shift instruction for expression code CODE,
2044 with SHIFTED being the rtx for the value to shift,
2045 and AMOUNT the rtx for the amount to shift by.
2046 Store the result in the rtx TARGET, if that is convenient.
2047 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2048 Return the rtx for where the value is. */
2049
2050 static rtx
2051 expand_shift_1 (enum tree_code code, enum machine_mode mode, rtx shifted,
2052 rtx amount, rtx target, int unsignedp)
2053 {
2054 rtx op1, temp = 0;
2055 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2056 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2057 optab lshift_optab = ashl_optab;
2058 optab rshift_arith_optab = ashr_optab;
2059 optab rshift_uns_optab = lshr_optab;
2060 optab lrotate_optab = rotl_optab;
2061 optab rrotate_optab = rotr_optab;
2062 enum machine_mode op1_mode;
2063 int attempt;
2064 bool speed = optimize_insn_for_speed_p ();
2065
2066 op1 = amount;
2067 op1_mode = GET_MODE (op1);
2068
2069 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2070 shift amount is a vector, use the vector/vector shift patterns. */
2071 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2072 {
2073 lshift_optab = vashl_optab;
2074 rshift_arith_optab = vashr_optab;
2075 rshift_uns_optab = vlshr_optab;
2076 lrotate_optab = vrotl_optab;
2077 rrotate_optab = vrotr_optab;
2078 }
2079
2080 /* Previously detected shift-counts computed by NEGATE_EXPR
2081 and shifted in the other direction; but that does not work
2082 on all machines. */
2083
2084 if (SHIFT_COUNT_TRUNCATED)
2085 {
2086 if (CONST_INT_P (op1)
2087 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2088 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2089 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2090 % GET_MODE_BITSIZE (mode));
2091 else if (GET_CODE (op1) == SUBREG
2092 && subreg_lowpart_p (op1)
2093 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2094 && SCALAR_INT_MODE_P (GET_MODE (op1)))
2095 op1 = SUBREG_REG (op1);
2096 }
2097
2098 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2099 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2100 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2101 amount instead. */
2102 if (rotate
2103 && CONST_INT_P (op1)
2104 && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (mode) / 2 + left,
2105 GET_MODE_BITSIZE (mode) - 1))
2106 {
2107 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
2108 left = !left;
2109 code = left ? LROTATE_EXPR : RROTATE_EXPR;
2110 }
2111
2112 if (op1 == const0_rtx)
2113 return shifted;
2114
2115 /* Check whether its cheaper to implement a left shift by a constant
2116 bit count by a sequence of additions. */
2117 if (code == LSHIFT_EXPR
2118 && CONST_INT_P (op1)
2119 && INTVAL (op1) > 0
2120 && INTVAL (op1) < GET_MODE_PRECISION (mode)
2121 && INTVAL (op1) < MAX_BITS_PER_WORD
2122 && (shift_cost (speed, mode, INTVAL (op1))
2123 > INTVAL (op1) * add_cost (speed, mode))
2124 && shift_cost (speed, mode, INTVAL (op1)) != MAX_COST)
2125 {
2126 int i;
2127 for (i = 0; i < INTVAL (op1); i++)
2128 {
2129 temp = force_reg (mode, shifted);
2130 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2131 unsignedp, OPTAB_LIB_WIDEN);
2132 }
2133 return shifted;
2134 }
2135
2136 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2137 {
2138 enum optab_methods methods;
2139
2140 if (attempt == 0)
2141 methods = OPTAB_DIRECT;
2142 else if (attempt == 1)
2143 methods = OPTAB_WIDEN;
2144 else
2145 methods = OPTAB_LIB_WIDEN;
2146
2147 if (rotate)
2148 {
2149 /* Widening does not work for rotation. */
2150 if (methods == OPTAB_WIDEN)
2151 continue;
2152 else if (methods == OPTAB_LIB_WIDEN)
2153 {
2154 /* If we have been unable to open-code this by a rotation,
2155 do it as the IOR of two shifts. I.e., to rotate A
2156 by N bits, compute
2157 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2158 where C is the bitsize of A.
2159
2160 It is theoretically possible that the target machine might
2161 not be able to perform either shift and hence we would
2162 be making two libcalls rather than just the one for the
2163 shift (similarly if IOR could not be done). We will allow
2164 this extremely unlikely lossage to avoid complicating the
2165 code below. */
2166
2167 rtx subtarget = target == shifted ? 0 : target;
2168 rtx new_amount, other_amount;
2169 rtx temp1;
2170
2171 new_amount = op1;
2172 if (op1 == const0_rtx)
2173 return shifted;
2174 else if (CONST_INT_P (op1))
2175 other_amount = GEN_INT (GET_MODE_BITSIZE (mode)
2176 - INTVAL (op1));
2177 else
2178 {
2179 other_amount
2180 = simplify_gen_unary (NEG, GET_MODE (op1),
2181 op1, GET_MODE (op1));
2182 HOST_WIDE_INT mask = GET_MODE_PRECISION (mode) - 1;
2183 other_amount
2184 = simplify_gen_binary (AND, GET_MODE (op1), other_amount,
2185 gen_int_mode (mask, GET_MODE (op1)));
2186 }
2187
2188 shifted = force_reg (mode, shifted);
2189
2190 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2191 mode, shifted, new_amount, 0, 1);
2192 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2193 mode, shifted, other_amount,
2194 subtarget, 1);
2195 return expand_binop (mode, ior_optab, temp, temp1, target,
2196 unsignedp, methods);
2197 }
2198
2199 temp = expand_binop (mode,
2200 left ? lrotate_optab : rrotate_optab,
2201 shifted, op1, target, unsignedp, methods);
2202 }
2203 else if (unsignedp)
2204 temp = expand_binop (mode,
2205 left ? lshift_optab : rshift_uns_optab,
2206 shifted, op1, target, unsignedp, methods);
2207
2208 /* Do arithmetic shifts.
2209 Also, if we are going to widen the operand, we can just as well
2210 use an arithmetic right-shift instead of a logical one. */
2211 if (temp == 0 && ! rotate
2212 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2213 {
2214 enum optab_methods methods1 = methods;
2215
2216 /* If trying to widen a log shift to an arithmetic shift,
2217 don't accept an arithmetic shift of the same size. */
2218 if (unsignedp)
2219 methods1 = OPTAB_MUST_WIDEN;
2220
2221 /* Arithmetic shift */
2222
2223 temp = expand_binop (mode,
2224 left ? lshift_optab : rshift_arith_optab,
2225 shifted, op1, target, unsignedp, methods1);
2226 }
2227
2228 /* We used to try extzv here for logical right shifts, but that was
2229 only useful for one machine, the VAX, and caused poor code
2230 generation there for lshrdi3, so the code was deleted and a
2231 define_expand for lshrsi3 was added to vax.md. */
2232 }
2233
2234 gcc_assert (temp);
2235 return temp;
2236 }
2237
2238 /* Output a shift instruction for expression code CODE,
2239 with SHIFTED being the rtx for the value to shift,
2240 and AMOUNT the amount to shift by.
2241 Store the result in the rtx TARGET, if that is convenient.
2242 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2243 Return the rtx for where the value is. */
2244
2245 rtx
2246 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2247 int amount, rtx target, int unsignedp)
2248 {
2249 return expand_shift_1 (code, mode,
2250 shifted, GEN_INT (amount), target, unsignedp);
2251 }
2252
2253 /* Output a shift instruction for expression code CODE,
2254 with SHIFTED being the rtx for the value to shift,
2255 and AMOUNT the tree for the amount to shift by.
2256 Store the result in the rtx TARGET, if that is convenient.
2257 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2258 Return the rtx for where the value is. */
2259
2260 rtx
2261 expand_variable_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2262 tree amount, rtx target, int unsignedp)
2263 {
2264 return expand_shift_1 (code, mode,
2265 shifted, expand_normal (amount), target, unsignedp);
2266 }
2267
2268 \f
2269 /* Indicates the type of fixup needed after a constant multiplication.
2270 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2271 the result should be negated, and ADD_VARIANT means that the
2272 multiplicand should be added to the result. */
2273 enum mult_variant {basic_variant, negate_variant, add_variant};
2274
2275 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2276 const struct mult_cost *, enum machine_mode mode);
2277 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2278 struct algorithm *, enum mult_variant *, int);
2279 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2280 const struct algorithm *, enum mult_variant);
2281 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2282 static rtx extract_high_half (enum machine_mode, rtx);
2283 static rtx expmed_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2284 static rtx expmed_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2285 int, int);
2286 /* Compute and return the best algorithm for multiplying by T.
2287 The algorithm must cost less than cost_limit
2288 If retval.cost >= COST_LIMIT, no algorithm was found and all
2289 other field of the returned struct are undefined.
2290 MODE is the machine mode of the multiplication. */
2291
2292 static void
2293 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2294 const struct mult_cost *cost_limit, enum machine_mode mode)
2295 {
2296 int m;
2297 struct algorithm *alg_in, *best_alg;
2298 struct mult_cost best_cost;
2299 struct mult_cost new_limit;
2300 int op_cost, op_latency;
2301 unsigned HOST_WIDE_INT orig_t = t;
2302 unsigned HOST_WIDE_INT q;
2303 int maxm, hash_index;
2304 bool cache_hit = false;
2305 enum alg_code cache_alg = alg_zero;
2306 bool speed = optimize_insn_for_speed_p ();
2307 enum machine_mode imode;
2308 struct alg_hash_entry *entry_ptr;
2309
2310 /* Indicate that no algorithm is yet found. If no algorithm
2311 is found, this value will be returned and indicate failure. */
2312 alg_out->cost.cost = cost_limit->cost + 1;
2313 alg_out->cost.latency = cost_limit->latency + 1;
2314
2315 if (cost_limit->cost < 0
2316 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2317 return;
2318
2319 /* Be prepared for vector modes. */
2320 imode = GET_MODE_INNER (mode);
2321 if (imode == VOIDmode)
2322 imode = mode;
2323
2324 maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (imode));
2325
2326 /* Restrict the bits of "t" to the multiplication's mode. */
2327 t &= GET_MODE_MASK (imode);
2328
2329 /* t == 1 can be done in zero cost. */
2330 if (t == 1)
2331 {
2332 alg_out->ops = 1;
2333 alg_out->cost.cost = 0;
2334 alg_out->cost.latency = 0;
2335 alg_out->op[0] = alg_m;
2336 return;
2337 }
2338
2339 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2340 fail now. */
2341 if (t == 0)
2342 {
2343 if (MULT_COST_LESS (cost_limit, zero_cost (speed)))
2344 return;
2345 else
2346 {
2347 alg_out->ops = 1;
2348 alg_out->cost.cost = zero_cost (speed);
2349 alg_out->cost.latency = zero_cost (speed);
2350 alg_out->op[0] = alg_zero;
2351 return;
2352 }
2353 }
2354
2355 /* We'll be needing a couple extra algorithm structures now. */
2356
2357 alg_in = XALLOCA (struct algorithm);
2358 best_alg = XALLOCA (struct algorithm);
2359 best_cost = *cost_limit;
2360
2361 /* Compute the hash index. */
2362 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2363
2364 /* See if we already know what to do for T. */
2365 entry_ptr = alg_hash_entry_ptr (hash_index);
2366 if (entry_ptr->t == t
2367 && entry_ptr->mode == mode
2368 && entry_ptr->mode == mode
2369 && entry_ptr->speed == speed
2370 && entry_ptr->alg != alg_unknown)
2371 {
2372 cache_alg = entry_ptr->alg;
2373
2374 if (cache_alg == alg_impossible)
2375 {
2376 /* The cache tells us that it's impossible to synthesize
2377 multiplication by T within entry_ptr->cost. */
2378 if (!CHEAPER_MULT_COST (&entry_ptr->cost, cost_limit))
2379 /* COST_LIMIT is at least as restrictive as the one
2380 recorded in the hash table, in which case we have no
2381 hope of synthesizing a multiplication. Just
2382 return. */
2383 return;
2384
2385 /* If we get here, COST_LIMIT is less restrictive than the
2386 one recorded in the hash table, so we may be able to
2387 synthesize a multiplication. Proceed as if we didn't
2388 have the cache entry. */
2389 }
2390 else
2391 {
2392 if (CHEAPER_MULT_COST (cost_limit, &entry_ptr->cost))
2393 /* The cached algorithm shows that this multiplication
2394 requires more cost than COST_LIMIT. Just return. This
2395 way, we don't clobber this cache entry with
2396 alg_impossible but retain useful information. */
2397 return;
2398
2399 cache_hit = true;
2400
2401 switch (cache_alg)
2402 {
2403 case alg_shift:
2404 goto do_alg_shift;
2405
2406 case alg_add_t_m2:
2407 case alg_sub_t_m2:
2408 goto do_alg_addsub_t_m2;
2409
2410 case alg_add_factor:
2411 case alg_sub_factor:
2412 goto do_alg_addsub_factor;
2413
2414 case alg_add_t2_m:
2415 goto do_alg_add_t2_m;
2416
2417 case alg_sub_t2_m:
2418 goto do_alg_sub_t2_m;
2419
2420 default:
2421 gcc_unreachable ();
2422 }
2423 }
2424 }
2425
2426 /* If we have a group of zero bits at the low-order part of T, try
2427 multiplying by the remaining bits and then doing a shift. */
2428
2429 if ((t & 1) == 0)
2430 {
2431 do_alg_shift:
2432 m = floor_log2 (t & -t); /* m = number of low zero bits */
2433 if (m < maxm)
2434 {
2435 q = t >> m;
2436 /* The function expand_shift will choose between a shift and
2437 a sequence of additions, so the observed cost is given as
2438 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2439 op_cost = m * add_cost (speed, mode);
2440 if (shift_cost (speed, mode, m) < op_cost)
2441 op_cost = shift_cost (speed, mode, m);
2442 new_limit.cost = best_cost.cost - op_cost;
2443 new_limit.latency = best_cost.latency - op_cost;
2444 synth_mult (alg_in, q, &new_limit, mode);
2445
2446 alg_in->cost.cost += op_cost;
2447 alg_in->cost.latency += op_cost;
2448 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2449 {
2450 struct algorithm *x;
2451 best_cost = alg_in->cost;
2452 x = alg_in, alg_in = best_alg, best_alg = x;
2453 best_alg->log[best_alg->ops] = m;
2454 best_alg->op[best_alg->ops] = alg_shift;
2455 }
2456
2457 /* See if treating ORIG_T as a signed number yields a better
2458 sequence. Try this sequence only for a negative ORIG_T
2459 as it would be useless for a non-negative ORIG_T. */
2460 if ((HOST_WIDE_INT) orig_t < 0)
2461 {
2462 /* Shift ORIG_T as follows because a right shift of a
2463 negative-valued signed type is implementation
2464 defined. */
2465 q = ~(~orig_t >> m);
2466 /* The function expand_shift will choose between a shift
2467 and a sequence of additions, so the observed cost is
2468 given as MIN (m * add_cost(speed, mode),
2469 shift_cost(speed, mode, m)). */
2470 op_cost = m * add_cost (speed, mode);
2471 if (shift_cost (speed, mode, m) < op_cost)
2472 op_cost = shift_cost (speed, mode, m);
2473 new_limit.cost = best_cost.cost - op_cost;
2474 new_limit.latency = best_cost.latency - op_cost;
2475 synth_mult (alg_in, q, &new_limit, mode);
2476
2477 alg_in->cost.cost += op_cost;
2478 alg_in->cost.latency += op_cost;
2479 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2480 {
2481 struct algorithm *x;
2482 best_cost = alg_in->cost;
2483 x = alg_in, alg_in = best_alg, best_alg = x;
2484 best_alg->log[best_alg->ops] = m;
2485 best_alg->op[best_alg->ops] = alg_shift;
2486 }
2487 }
2488 }
2489 if (cache_hit)
2490 goto done;
2491 }
2492
2493 /* If we have an odd number, add or subtract one. */
2494 if ((t & 1) != 0)
2495 {
2496 unsigned HOST_WIDE_INT w;
2497
2498 do_alg_addsub_t_m2:
2499 for (w = 1; (w & t) != 0; w <<= 1)
2500 ;
2501 /* If T was -1, then W will be zero after the loop. This is another
2502 case where T ends with ...111. Handling this with (T + 1) and
2503 subtract 1 produces slightly better code and results in algorithm
2504 selection much faster than treating it like the ...0111 case
2505 below. */
2506 if (w == 0
2507 || (w > 2
2508 /* Reject the case where t is 3.
2509 Thus we prefer addition in that case. */
2510 && t != 3))
2511 {
2512 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2513
2514 op_cost = add_cost (speed, mode);
2515 new_limit.cost = best_cost.cost - op_cost;
2516 new_limit.latency = best_cost.latency - op_cost;
2517 synth_mult (alg_in, t + 1, &new_limit, mode);
2518
2519 alg_in->cost.cost += op_cost;
2520 alg_in->cost.latency += op_cost;
2521 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2522 {
2523 struct algorithm *x;
2524 best_cost = alg_in->cost;
2525 x = alg_in, alg_in = best_alg, best_alg = x;
2526 best_alg->log[best_alg->ops] = 0;
2527 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2528 }
2529 }
2530 else
2531 {
2532 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2533
2534 op_cost = add_cost (speed, mode);
2535 new_limit.cost = best_cost.cost - op_cost;
2536 new_limit.latency = best_cost.latency - op_cost;
2537 synth_mult (alg_in, t - 1, &new_limit, mode);
2538
2539 alg_in->cost.cost += op_cost;
2540 alg_in->cost.latency += op_cost;
2541 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2542 {
2543 struct algorithm *x;
2544 best_cost = alg_in->cost;
2545 x = alg_in, alg_in = best_alg, best_alg = x;
2546 best_alg->log[best_alg->ops] = 0;
2547 best_alg->op[best_alg->ops] = alg_add_t_m2;
2548 }
2549 }
2550
2551 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2552 quickly with a - a * n for some appropriate constant n. */
2553 m = exact_log2 (-orig_t + 1);
2554 if (m >= 0 && m < maxm)
2555 {
2556 op_cost = shiftsub1_cost (speed, mode, m);
2557 new_limit.cost = best_cost.cost - op_cost;
2558 new_limit.latency = best_cost.latency - op_cost;
2559 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m,
2560 &new_limit, mode);
2561
2562 alg_in->cost.cost += op_cost;
2563 alg_in->cost.latency += op_cost;
2564 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2565 {
2566 struct algorithm *x;
2567 best_cost = alg_in->cost;
2568 x = alg_in, alg_in = best_alg, best_alg = x;
2569 best_alg->log[best_alg->ops] = m;
2570 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2571 }
2572 }
2573
2574 if (cache_hit)
2575 goto done;
2576 }
2577
2578 /* Look for factors of t of the form
2579 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2580 If we find such a factor, we can multiply by t using an algorithm that
2581 multiplies by q, shift the result by m and add/subtract it to itself.
2582
2583 We search for large factors first and loop down, even if large factors
2584 are less probable than small; if we find a large factor we will find a
2585 good sequence quickly, and therefore be able to prune (by decreasing
2586 COST_LIMIT) the search. */
2587
2588 do_alg_addsub_factor:
2589 for (m = floor_log2 (t - 1); m >= 2; m--)
2590 {
2591 unsigned HOST_WIDE_INT d;
2592
2593 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2594 if (t % d == 0 && t > d && m < maxm
2595 && (!cache_hit || cache_alg == alg_add_factor))
2596 {
2597 /* If the target has a cheap shift-and-add instruction use
2598 that in preference to a shift insn followed by an add insn.
2599 Assume that the shift-and-add is "atomic" with a latency
2600 equal to its cost, otherwise assume that on superscalar
2601 hardware the shift may be executed concurrently with the
2602 earlier steps in the algorithm. */
2603 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2604 if (shiftadd_cost (speed, mode, m) < op_cost)
2605 {
2606 op_cost = shiftadd_cost (speed, mode, m);
2607 op_latency = op_cost;
2608 }
2609 else
2610 op_latency = add_cost (speed, mode);
2611
2612 new_limit.cost = best_cost.cost - op_cost;
2613 new_limit.latency = best_cost.latency - op_latency;
2614 synth_mult (alg_in, t / d, &new_limit, mode);
2615
2616 alg_in->cost.cost += op_cost;
2617 alg_in->cost.latency += op_latency;
2618 if (alg_in->cost.latency < op_cost)
2619 alg_in->cost.latency = op_cost;
2620 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2621 {
2622 struct algorithm *x;
2623 best_cost = alg_in->cost;
2624 x = alg_in, alg_in = best_alg, best_alg = x;
2625 best_alg->log[best_alg->ops] = m;
2626 best_alg->op[best_alg->ops] = alg_add_factor;
2627 }
2628 /* Other factors will have been taken care of in the recursion. */
2629 break;
2630 }
2631
2632 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2633 if (t % d == 0 && t > d && m < maxm
2634 && (!cache_hit || cache_alg == alg_sub_factor))
2635 {
2636 /* If the target has a cheap shift-and-subtract insn use
2637 that in preference to a shift insn followed by a sub insn.
2638 Assume that the shift-and-sub is "atomic" with a latency
2639 equal to it's cost, otherwise assume that on superscalar
2640 hardware the shift may be executed concurrently with the
2641 earlier steps in the algorithm. */
2642 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2643 if (shiftsub0_cost (speed, mode, m) < op_cost)
2644 {
2645 op_cost = shiftsub0_cost (speed, mode, m);
2646 op_latency = op_cost;
2647 }
2648 else
2649 op_latency = add_cost (speed, mode);
2650
2651 new_limit.cost = best_cost.cost - op_cost;
2652 new_limit.latency = best_cost.latency - op_latency;
2653 synth_mult (alg_in, t / d, &new_limit, mode);
2654
2655 alg_in->cost.cost += op_cost;
2656 alg_in->cost.latency += op_latency;
2657 if (alg_in->cost.latency < op_cost)
2658 alg_in->cost.latency = op_cost;
2659 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2660 {
2661 struct algorithm *x;
2662 best_cost = alg_in->cost;
2663 x = alg_in, alg_in = best_alg, best_alg = x;
2664 best_alg->log[best_alg->ops] = m;
2665 best_alg->op[best_alg->ops] = alg_sub_factor;
2666 }
2667 break;
2668 }
2669 }
2670 if (cache_hit)
2671 goto done;
2672
2673 /* Try shift-and-add (load effective address) instructions,
2674 i.e. do a*3, a*5, a*9. */
2675 if ((t & 1) != 0)
2676 {
2677 do_alg_add_t2_m:
2678 q = t - 1;
2679 q = q & -q;
2680 m = exact_log2 (q);
2681 if (m >= 0 && m < maxm)
2682 {
2683 op_cost = shiftadd_cost (speed, mode, m);
2684 new_limit.cost = best_cost.cost - op_cost;
2685 new_limit.latency = best_cost.latency - op_cost;
2686 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2687
2688 alg_in->cost.cost += op_cost;
2689 alg_in->cost.latency += op_cost;
2690 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2691 {
2692 struct algorithm *x;
2693 best_cost = alg_in->cost;
2694 x = alg_in, alg_in = best_alg, best_alg = x;
2695 best_alg->log[best_alg->ops] = m;
2696 best_alg->op[best_alg->ops] = alg_add_t2_m;
2697 }
2698 }
2699 if (cache_hit)
2700 goto done;
2701
2702 do_alg_sub_t2_m:
2703 q = t + 1;
2704 q = q & -q;
2705 m = exact_log2 (q);
2706 if (m >= 0 && m < maxm)
2707 {
2708 op_cost = shiftsub0_cost (speed, mode, m);
2709 new_limit.cost = best_cost.cost - op_cost;
2710 new_limit.latency = best_cost.latency - op_cost;
2711 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2712
2713 alg_in->cost.cost += op_cost;
2714 alg_in->cost.latency += op_cost;
2715 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2716 {
2717 struct algorithm *x;
2718 best_cost = alg_in->cost;
2719 x = alg_in, alg_in = best_alg, best_alg = x;
2720 best_alg->log[best_alg->ops] = m;
2721 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2722 }
2723 }
2724 if (cache_hit)
2725 goto done;
2726 }
2727
2728 done:
2729 /* If best_cost has not decreased, we have not found any algorithm. */
2730 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2731 {
2732 /* We failed to find an algorithm. Record alg_impossible for
2733 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2734 we are asked to find an algorithm for T within the same or
2735 lower COST_LIMIT, we can immediately return to the
2736 caller. */
2737 entry_ptr->t = t;
2738 entry_ptr->mode = mode;
2739 entry_ptr->speed = speed;
2740 entry_ptr->alg = alg_impossible;
2741 entry_ptr->cost = *cost_limit;
2742 return;
2743 }
2744
2745 /* Cache the result. */
2746 if (!cache_hit)
2747 {
2748 entry_ptr->t = t;
2749 entry_ptr->mode = mode;
2750 entry_ptr->speed = speed;
2751 entry_ptr->alg = best_alg->op[best_alg->ops];
2752 entry_ptr->cost.cost = best_cost.cost;
2753 entry_ptr->cost.latency = best_cost.latency;
2754 }
2755
2756 /* If we are getting a too long sequence for `struct algorithm'
2757 to record, make this search fail. */
2758 if (best_alg->ops == MAX_BITS_PER_WORD)
2759 return;
2760
2761 /* Copy the algorithm from temporary space to the space at alg_out.
2762 We avoid using structure assignment because the majority of
2763 best_alg is normally undefined, and this is a critical function. */
2764 alg_out->ops = best_alg->ops + 1;
2765 alg_out->cost = best_cost;
2766 memcpy (alg_out->op, best_alg->op,
2767 alg_out->ops * sizeof *alg_out->op);
2768 memcpy (alg_out->log, best_alg->log,
2769 alg_out->ops * sizeof *alg_out->log);
2770 }
2771 \f
2772 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2773 Try three variations:
2774
2775 - a shift/add sequence based on VAL itself
2776 - a shift/add sequence based on -VAL, followed by a negation
2777 - a shift/add sequence based on VAL - 1, followed by an addition.
2778
2779 Return true if the cheapest of these cost less than MULT_COST,
2780 describing the algorithm in *ALG and final fixup in *VARIANT. */
2781
2782 static bool
2783 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2784 struct algorithm *alg, enum mult_variant *variant,
2785 int mult_cost)
2786 {
2787 struct algorithm alg2;
2788 struct mult_cost limit;
2789 int op_cost;
2790 bool speed = optimize_insn_for_speed_p ();
2791
2792 /* Fail quickly for impossible bounds. */
2793 if (mult_cost < 0)
2794 return false;
2795
2796 /* Ensure that mult_cost provides a reasonable upper bound.
2797 Any constant multiplication can be performed with less
2798 than 2 * bits additions. */
2799 op_cost = 2 * GET_MODE_UNIT_BITSIZE (mode) * add_cost (speed, mode);
2800 if (mult_cost > op_cost)
2801 mult_cost = op_cost;
2802
2803 *variant = basic_variant;
2804 limit.cost = mult_cost;
2805 limit.latency = mult_cost;
2806 synth_mult (alg, val, &limit, mode);
2807
2808 /* This works only if the inverted value actually fits in an
2809 `unsigned int' */
2810 if (HOST_BITS_PER_INT >= GET_MODE_UNIT_BITSIZE (mode))
2811 {
2812 op_cost = neg_cost (speed, mode);
2813 if (MULT_COST_LESS (&alg->cost, mult_cost))
2814 {
2815 limit.cost = alg->cost.cost - op_cost;
2816 limit.latency = alg->cost.latency - op_cost;
2817 }
2818 else
2819 {
2820 limit.cost = mult_cost - op_cost;
2821 limit.latency = mult_cost - op_cost;
2822 }
2823
2824 synth_mult (&alg2, -val, &limit, mode);
2825 alg2.cost.cost += op_cost;
2826 alg2.cost.latency += op_cost;
2827 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2828 *alg = alg2, *variant = negate_variant;
2829 }
2830
2831 /* This proves very useful for division-by-constant. */
2832 op_cost = add_cost (speed, mode);
2833 if (MULT_COST_LESS (&alg->cost, mult_cost))
2834 {
2835 limit.cost = alg->cost.cost - op_cost;
2836 limit.latency = alg->cost.latency - op_cost;
2837 }
2838 else
2839 {
2840 limit.cost = mult_cost - op_cost;
2841 limit.latency = mult_cost - op_cost;
2842 }
2843
2844 synth_mult (&alg2, val - 1, &limit, mode);
2845 alg2.cost.cost += op_cost;
2846 alg2.cost.latency += op_cost;
2847 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2848 *alg = alg2, *variant = add_variant;
2849
2850 return MULT_COST_LESS (&alg->cost, mult_cost);
2851 }
2852
2853 /* A subroutine of expand_mult, used for constant multiplications.
2854 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2855 convenient. Use the shift/add sequence described by ALG and apply
2856 the final fixup specified by VARIANT. */
2857
2858 static rtx
2859 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2860 rtx target, const struct algorithm *alg,
2861 enum mult_variant variant)
2862 {
2863 HOST_WIDE_INT val_so_far;
2864 rtx insn, accum, tem;
2865 int opno;
2866 enum machine_mode nmode;
2867
2868 /* Avoid referencing memory over and over and invalid sharing
2869 on SUBREGs. */
2870 op0 = force_reg (mode, op0);
2871
2872 /* ACCUM starts out either as OP0 or as a zero, depending on
2873 the first operation. */
2874
2875 if (alg->op[0] == alg_zero)
2876 {
2877 accum = copy_to_mode_reg (mode, CONST0_RTX (mode));
2878 val_so_far = 0;
2879 }
2880 else if (alg->op[0] == alg_m)
2881 {
2882 accum = copy_to_mode_reg (mode, op0);
2883 val_so_far = 1;
2884 }
2885 else
2886 gcc_unreachable ();
2887
2888 for (opno = 1; opno < alg->ops; opno++)
2889 {
2890 int log = alg->log[opno];
2891 rtx shift_subtarget = optimize ? 0 : accum;
2892 rtx add_target
2893 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2894 && !optimize)
2895 ? target : 0;
2896 rtx accum_target = optimize ? 0 : accum;
2897 rtx accum_inner;
2898
2899 switch (alg->op[opno])
2900 {
2901 case alg_shift:
2902 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2903 /* REG_EQUAL note will be attached to the following insn. */
2904 emit_move_insn (accum, tem);
2905 val_so_far <<= log;
2906 break;
2907
2908 case alg_add_t_m2:
2909 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2910 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2911 add_target ? add_target : accum_target);
2912 val_so_far += (HOST_WIDE_INT) 1 << log;
2913 break;
2914
2915 case alg_sub_t_m2:
2916 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2917 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2918 add_target ? add_target : accum_target);
2919 val_so_far -= (HOST_WIDE_INT) 1 << log;
2920 break;
2921
2922 case alg_add_t2_m:
2923 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2924 log, shift_subtarget, 0);
2925 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2926 add_target ? add_target : accum_target);
2927 val_so_far = (val_so_far << log) + 1;
2928 break;
2929
2930 case alg_sub_t2_m:
2931 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2932 log, shift_subtarget, 0);
2933 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2934 add_target ? add_target : accum_target);
2935 val_so_far = (val_so_far << log) - 1;
2936 break;
2937
2938 case alg_add_factor:
2939 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2940 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2941 add_target ? add_target : accum_target);
2942 val_so_far += val_so_far << log;
2943 break;
2944
2945 case alg_sub_factor:
2946 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2947 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2948 (add_target
2949 ? add_target : (optimize ? 0 : tem)));
2950 val_so_far = (val_so_far << log) - val_so_far;
2951 break;
2952
2953 default:
2954 gcc_unreachable ();
2955 }
2956
2957 if (SCALAR_INT_MODE_P (mode))
2958 {
2959 /* Write a REG_EQUAL note on the last insn so that we can cse
2960 multiplication sequences. Note that if ACCUM is a SUBREG,
2961 we've set the inner register and must properly indicate that. */
2962 tem = op0, nmode = mode;
2963 accum_inner = accum;
2964 if (GET_CODE (accum) == SUBREG)
2965 {
2966 accum_inner = SUBREG_REG (accum);
2967 nmode = GET_MODE (accum_inner);
2968 tem = gen_lowpart (nmode, op0);
2969 }
2970
2971 insn = get_last_insn ();
2972 set_dst_reg_note (insn, REG_EQUAL,
2973 gen_rtx_MULT (nmode, tem,
2974 gen_int_mode (val_so_far, nmode)),
2975 accum_inner);
2976 }
2977 }
2978
2979 if (variant == negate_variant)
2980 {
2981 val_so_far = -val_so_far;
2982 accum = expand_unop (mode, neg_optab, accum, target, 0);
2983 }
2984 else if (variant == add_variant)
2985 {
2986 val_so_far = val_so_far + 1;
2987 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2988 }
2989
2990 /* Compare only the bits of val and val_so_far that are significant
2991 in the result mode, to avoid sign-/zero-extension confusion. */
2992 nmode = GET_MODE_INNER (mode);
2993 if (nmode == VOIDmode)
2994 nmode = mode;
2995 val &= GET_MODE_MASK (nmode);
2996 val_so_far &= GET_MODE_MASK (nmode);
2997 gcc_assert (val == val_so_far);
2998
2999 return accum;
3000 }
3001
3002 /* Perform a multiplication and return an rtx for the result.
3003 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3004 TARGET is a suggestion for where to store the result (an rtx).
3005
3006 We check specially for a constant integer as OP1.
3007 If you want this check for OP0 as well, then before calling
3008 you should swap the two operands if OP0 would be constant. */
3009
3010 rtx
3011 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3012 int unsignedp)
3013 {
3014 enum mult_variant variant;
3015 struct algorithm algorithm;
3016 rtx scalar_op1;
3017 int max_cost;
3018 bool speed = optimize_insn_for_speed_p ();
3019 bool do_trapv = flag_trapv && SCALAR_INT_MODE_P (mode) && !unsignedp;
3020
3021 if (CONSTANT_P (op0))
3022 {
3023 rtx temp = op0;
3024 op0 = op1;
3025 op1 = temp;
3026 }
3027
3028 /* For vectors, there are several simplifications that can be made if
3029 all elements of the vector constant are identical. */
3030 scalar_op1 = op1;
3031 if (GET_CODE (op1) == CONST_VECTOR)
3032 {
3033 int i, n = CONST_VECTOR_NUNITS (op1);
3034 scalar_op1 = CONST_VECTOR_ELT (op1, 0);
3035 for (i = 1; i < n; ++i)
3036 if (!rtx_equal_p (scalar_op1, CONST_VECTOR_ELT (op1, i)))
3037 goto skip_scalar;
3038 }
3039
3040 if (INTEGRAL_MODE_P (mode))
3041 {
3042 rtx fake_reg;
3043 HOST_WIDE_INT coeff;
3044 bool is_neg;
3045 int mode_bitsize;
3046
3047 if (op1 == CONST0_RTX (mode))
3048 return op1;
3049 if (op1 == CONST1_RTX (mode))
3050 return op0;
3051 if (op1 == CONSTM1_RTX (mode))
3052 return expand_unop (mode, do_trapv ? negv_optab : neg_optab,
3053 op0, target, 0);
3054
3055 if (do_trapv)
3056 goto skip_synth;
3057
3058 /* These are the operations that are potentially turned into
3059 a sequence of shifts and additions. */
3060 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
3061
3062 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3063 less than or equal in size to `unsigned int' this doesn't matter.
3064 If the mode is larger than `unsigned int', then synth_mult works
3065 only if the constant value exactly fits in an `unsigned int' without
3066 any truncation. This means that multiplying by negative values does
3067 not work; results are off by 2^32 on a 32 bit machine. */
3068 if (CONST_INT_P (scalar_op1))
3069 {
3070 coeff = INTVAL (scalar_op1);
3071 is_neg = coeff < 0;
3072 }
3073 #if TARGET_SUPPORTS_WIDE_INT
3074 else if (CONST_WIDE_INT_P (scalar_op1))
3075 #else
3076 else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
3077 #endif
3078 {
3079 int shift = wi::exact_log2 (std::make_pair (scalar_op1, mode));
3080 /* Perfect power of 2 (other than 1, which is handled above). */
3081 if (shift > 0)
3082 return expand_shift (LSHIFT_EXPR, mode, op0,
3083 shift, target, unsignedp);
3084 else
3085 goto skip_synth;
3086 }
3087 else
3088 goto skip_synth;
3089
3090 /* We used to test optimize here, on the grounds that it's better to
3091 produce a smaller program when -O is not used. But this causes
3092 such a terrible slowdown sometimes that it seems better to always
3093 use synth_mult. */
3094
3095 /* Special case powers of two. */
3096 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)
3097 && !(is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT))
3098 return expand_shift (LSHIFT_EXPR, mode, op0,
3099 floor_log2 (coeff), target, unsignedp);
3100
3101 fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3102
3103 /* Attempt to handle multiplication of DImode values by negative
3104 coefficients, by performing the multiplication by a positive
3105 multiplier and then inverting the result. */
3106 if (is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT)
3107 {
3108 /* Its safe to use -coeff even for INT_MIN, as the
3109 result is interpreted as an unsigned coefficient.
3110 Exclude cost of op0 from max_cost to match the cost
3111 calculation of the synth_mult. */
3112 coeff = -(unsigned HOST_WIDE_INT) coeff;
3113 max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), speed)
3114 - neg_cost (speed, mode));
3115 if (max_cost <= 0)
3116 goto skip_synth;
3117
3118 /* Special case powers of two. */
3119 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3120 {
3121 rtx temp = expand_shift (LSHIFT_EXPR, mode, op0,
3122 floor_log2 (coeff), target, unsignedp);
3123 return expand_unop (mode, neg_optab, temp, target, 0);
3124 }
3125
3126 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3127 max_cost))
3128 {
3129 rtx temp = expand_mult_const (mode, op0, coeff, NULL_RTX,
3130 &algorithm, variant);
3131 return expand_unop (mode, neg_optab, temp, target, 0);
3132 }
3133 goto skip_synth;
3134 }
3135
3136 /* Exclude cost of op0 from max_cost to match the cost
3137 calculation of the synth_mult. */
3138 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), speed);
3139 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3140 return expand_mult_const (mode, op0, coeff, target,
3141 &algorithm, variant);
3142 }
3143 skip_synth:
3144
3145 /* Expand x*2.0 as x+x. */
3146 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1))
3147 {
3148 REAL_VALUE_TYPE d;
3149 REAL_VALUE_FROM_CONST_DOUBLE (d, scalar_op1);
3150
3151 if (REAL_VALUES_EQUAL (d, dconst2))
3152 {
3153 op0 = force_reg (GET_MODE (op0), op0);
3154 return expand_binop (mode, add_optab, op0, op0,
3155 target, unsignedp, OPTAB_LIB_WIDEN);
3156 }
3157 }
3158 skip_scalar:
3159
3160 /* This used to use umul_optab if unsigned, but for non-widening multiply
3161 there is no difference between signed and unsigned. */
3162 op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3163 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3164 gcc_assert (op0);
3165 return op0;
3166 }
3167
3168 /* Return a cost estimate for multiplying a register by the given
3169 COEFFicient in the given MODE and SPEED. */
3170
3171 int
3172 mult_by_coeff_cost (HOST_WIDE_INT coeff, enum machine_mode mode, bool speed)
3173 {
3174 int max_cost;
3175 struct algorithm algorithm;
3176 enum mult_variant variant;
3177
3178 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3179 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, fake_reg), speed);
3180 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3181 return algorithm.cost.cost;
3182 else
3183 return max_cost;
3184 }
3185
3186 /* Perform a widening multiplication and return an rtx for the result.
3187 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3188 TARGET is a suggestion for where to store the result (an rtx).
3189 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3190 or smul_widen_optab.
3191
3192 We check specially for a constant integer as OP1, comparing the
3193 cost of a widening multiply against the cost of a sequence of shifts
3194 and adds. */
3195
3196 rtx
3197 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3198 int unsignedp, optab this_optab)
3199 {
3200 bool speed = optimize_insn_for_speed_p ();
3201 rtx cop1;
3202
3203 if (CONST_INT_P (op1)
3204 && GET_MODE (op0) != VOIDmode
3205 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3206 this_optab == umul_widen_optab))
3207 && CONST_INT_P (cop1)
3208 && (INTVAL (cop1) >= 0
3209 || HWI_COMPUTABLE_MODE_P (mode)))
3210 {
3211 HOST_WIDE_INT coeff = INTVAL (cop1);
3212 int max_cost;
3213 enum mult_variant variant;
3214 struct algorithm algorithm;
3215
3216 /* Special case powers of two. */
3217 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3218 {
3219 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3220 return expand_shift (LSHIFT_EXPR, mode, op0,
3221 floor_log2 (coeff), target, unsignedp);
3222 }
3223
3224 /* Exclude cost of op0 from max_cost to match the cost
3225 calculation of the synth_mult. */
3226 max_cost = mul_widen_cost (speed, mode);
3227 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3228 max_cost))
3229 {
3230 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3231 return expand_mult_const (mode, op0, coeff, target,
3232 &algorithm, variant);
3233 }
3234 }
3235 return expand_binop (mode, this_optab, op0, op1, target,
3236 unsignedp, OPTAB_LIB_WIDEN);
3237 }
3238 \f
3239 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3240 replace division by D, and put the least significant N bits of the result
3241 in *MULTIPLIER_PTR and return the most significant bit.
3242
3243 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3244 needed precision is in PRECISION (should be <= N).
3245
3246 PRECISION should be as small as possible so this function can choose
3247 multiplier more freely.
3248
3249 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3250 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3251
3252 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3253 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3254
3255 unsigned HOST_WIDE_INT
3256 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3257 unsigned HOST_WIDE_INT *multiplier_ptr,
3258 int *post_shift_ptr, int *lgup_ptr)
3259 {
3260 int lgup, post_shift;
3261 int pow, pow2;
3262
3263 /* lgup = ceil(log2(divisor)); */
3264 lgup = ceil_log2 (d);
3265
3266 gcc_assert (lgup <= n);
3267
3268 pow = n + lgup;
3269 pow2 = n + lgup - precision;
3270
3271 /* mlow = 2^(N + lgup)/d */
3272 wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
3273 wide_int mlow = wi::udiv_trunc (val, d);
3274
3275 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3276 val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
3277 wide_int mhigh = wi::udiv_trunc (val, d);
3278
3279 /* If precision == N, then mlow, mhigh exceed 2^N
3280 (but they do not exceed 2^(N+1)). */
3281
3282 /* Reduce to lowest terms. */
3283 for (post_shift = lgup; post_shift > 0; post_shift--)
3284 {
3285 unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
3286 HOST_BITS_PER_WIDE_INT);
3287 unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
3288 HOST_BITS_PER_WIDE_INT);
3289 if (ml_lo >= mh_lo)
3290 break;
3291
3292 mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
3293 mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
3294 }
3295
3296 *post_shift_ptr = post_shift;
3297 *lgup_ptr = lgup;
3298 if (n < HOST_BITS_PER_WIDE_INT)
3299 {
3300 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3301 *multiplier_ptr = mhigh.to_uhwi () & mask;
3302 return mhigh.to_uhwi () >= mask;
3303 }
3304 else
3305 {
3306 *multiplier_ptr = mhigh.to_uhwi ();
3307 return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3308 }
3309 }
3310
3311 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3312 congruent to 1 (mod 2**N). */
3313
3314 static unsigned HOST_WIDE_INT
3315 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3316 {
3317 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3318
3319 /* The algorithm notes that the choice y = x satisfies
3320 x*y == 1 mod 2^3, since x is assumed odd.
3321 Each iteration doubles the number of bits of significance in y. */
3322
3323 unsigned HOST_WIDE_INT mask;
3324 unsigned HOST_WIDE_INT y = x;
3325 int nbit = 3;
3326
3327 mask = (n == HOST_BITS_PER_WIDE_INT
3328 ? ~(unsigned HOST_WIDE_INT) 0
3329 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3330
3331 while (nbit < n)
3332 {
3333 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3334 nbit *= 2;
3335 }
3336 return y;
3337 }
3338
3339 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3340 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3341 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3342 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3343 become signed.
3344
3345 The result is put in TARGET if that is convenient.
3346
3347 MODE is the mode of operation. */
3348
3349 rtx
3350 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3351 rtx op1, rtx target, int unsignedp)
3352 {
3353 rtx tem;
3354 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3355
3356 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3357 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3358 tem = expand_and (mode, tem, op1, NULL_RTX);
3359 adj_operand
3360 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3361 adj_operand);
3362
3363 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3364 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3365 tem = expand_and (mode, tem, op0, NULL_RTX);
3366 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3367 target);
3368
3369 return target;
3370 }
3371
3372 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3373
3374 static rtx
3375 extract_high_half (enum machine_mode mode, rtx op)
3376 {
3377 enum machine_mode wider_mode;
3378
3379 if (mode == word_mode)
3380 return gen_highpart (mode, op);
3381
3382 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3383
3384 wider_mode = GET_MODE_WIDER_MODE (mode);
3385 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3386 GET_MODE_BITSIZE (mode), 0, 1);
3387 return convert_modes (mode, wider_mode, op, 0);
3388 }
3389
3390 /* Like expmed_mult_highpart, but only consider using a multiplication
3391 optab. OP1 is an rtx for the constant operand. */
3392
3393 static rtx
3394 expmed_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3395 rtx target, int unsignedp, int max_cost)
3396 {
3397 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3398 enum machine_mode wider_mode;
3399 optab moptab;
3400 rtx tem;
3401 int size;
3402 bool speed = optimize_insn_for_speed_p ();
3403
3404 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3405
3406 wider_mode = GET_MODE_WIDER_MODE (mode);
3407 size = GET_MODE_BITSIZE (mode);
3408
3409 /* Firstly, try using a multiplication insn that only generates the needed
3410 high part of the product, and in the sign flavor of unsignedp. */
3411 if (mul_highpart_cost (speed, mode) < max_cost)
3412 {
3413 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3414 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3415 unsignedp, OPTAB_DIRECT);
3416 if (tem)
3417 return tem;
3418 }
3419
3420 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3421 Need to adjust the result after the multiplication. */
3422 if (size - 1 < BITS_PER_WORD
3423 && (mul_highpart_cost (speed, mode)
3424 + 2 * shift_cost (speed, mode, size-1)
3425 + 4 * add_cost (speed, mode) < max_cost))
3426 {
3427 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3428 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3429 unsignedp, OPTAB_DIRECT);
3430 if (tem)
3431 /* We used the wrong signedness. Adjust the result. */
3432 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3433 tem, unsignedp);
3434 }
3435
3436 /* Try widening multiplication. */
3437 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3438 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3439 && mul_widen_cost (speed, wider_mode) < max_cost)
3440 {
3441 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3442 unsignedp, OPTAB_WIDEN);
3443 if (tem)
3444 return extract_high_half (mode, tem);
3445 }
3446
3447 /* Try widening the mode and perform a non-widening multiplication. */
3448 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3449 && size - 1 < BITS_PER_WORD
3450 && (mul_cost (speed, wider_mode) + shift_cost (speed, mode, size-1)
3451 < max_cost))
3452 {
3453 rtx insns, wop0, wop1;
3454
3455 /* We need to widen the operands, for example to ensure the
3456 constant multiplier is correctly sign or zero extended.
3457 Use a sequence to clean-up any instructions emitted by
3458 the conversions if things don't work out. */
3459 start_sequence ();
3460 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3461 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3462 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3463 unsignedp, OPTAB_WIDEN);
3464 insns = get_insns ();
3465 end_sequence ();
3466
3467 if (tem)
3468 {
3469 emit_insn (insns);
3470 return extract_high_half (mode, tem);
3471 }
3472 }
3473
3474 /* Try widening multiplication of opposite signedness, and adjust. */
3475 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3476 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3477 && size - 1 < BITS_PER_WORD
3478 && (mul_widen_cost (speed, wider_mode)
3479 + 2 * shift_cost (speed, mode, size-1)
3480 + 4 * add_cost (speed, mode) < max_cost))
3481 {
3482 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3483 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3484 if (tem != 0)
3485 {
3486 tem = extract_high_half (mode, tem);
3487 /* We used the wrong signedness. Adjust the result. */
3488 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3489 target, unsignedp);
3490 }
3491 }
3492
3493 return 0;
3494 }
3495
3496 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3497 putting the high half of the result in TARGET if that is convenient,
3498 and return where the result is. If the operation can not be performed,
3499 0 is returned.
3500
3501 MODE is the mode of operation and result.
3502
3503 UNSIGNEDP nonzero means unsigned multiply.
3504
3505 MAX_COST is the total allowed cost for the expanded RTL. */
3506
3507 static rtx
3508 expmed_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3509 rtx target, int unsignedp, int max_cost)
3510 {
3511 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3512 unsigned HOST_WIDE_INT cnst1;
3513 int extra_cost;
3514 bool sign_adjust = false;
3515 enum mult_variant variant;
3516 struct algorithm alg;
3517 rtx tem;
3518 bool speed = optimize_insn_for_speed_p ();
3519
3520 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3521 /* We can't support modes wider than HOST_BITS_PER_INT. */
3522 gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
3523
3524 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3525
3526 /* We can't optimize modes wider than BITS_PER_WORD.
3527 ??? We might be able to perform double-word arithmetic if
3528 mode == word_mode, however all the cost calculations in
3529 synth_mult etc. assume single-word operations. */
3530 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3531 return expmed_mult_highpart_optab (mode, op0, op1, target,
3532 unsignedp, max_cost);
3533
3534 extra_cost = shift_cost (speed, mode, GET_MODE_BITSIZE (mode) - 1);
3535
3536 /* Check whether we try to multiply by a negative constant. */
3537 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3538 {
3539 sign_adjust = true;
3540 extra_cost += add_cost (speed, mode);
3541 }
3542
3543 /* See whether shift/add multiplication is cheap enough. */
3544 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3545 max_cost - extra_cost))
3546 {
3547 /* See whether the specialized multiplication optabs are
3548 cheaper than the shift/add version. */
3549 tem = expmed_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3550 alg.cost.cost + extra_cost);
3551 if (tem)
3552 return tem;
3553
3554 tem = convert_to_mode (wider_mode, op0, unsignedp);
3555 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3556 tem = extract_high_half (mode, tem);
3557
3558 /* Adjust result for signedness. */
3559 if (sign_adjust)
3560 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3561
3562 return tem;
3563 }
3564 return expmed_mult_highpart_optab (mode, op0, op1, target,
3565 unsignedp, max_cost);
3566 }
3567
3568
3569 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3570
3571 static rtx
3572 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3573 {
3574 rtx result, temp, shift, label;
3575 int logd;
3576 int prec = GET_MODE_PRECISION (mode);
3577
3578 logd = floor_log2 (d);
3579 result = gen_reg_rtx (mode);
3580
3581 /* Avoid conditional branches when they're expensive. */
3582 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3583 && optimize_insn_for_speed_p ())
3584 {
3585 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3586 mode, 0, -1);
3587 if (signmask)
3588 {
3589 HOST_WIDE_INT masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3590 signmask = force_reg (mode, signmask);
3591 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3592
3593 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3594 which instruction sequence to use. If logical right shifts
3595 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3596 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3597
3598 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3599 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3600 || (set_src_cost (temp, optimize_insn_for_speed_p ())
3601 > COSTS_N_INSNS (2)))
3602 {
3603 temp = expand_binop (mode, xor_optab, op0, signmask,
3604 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3605 temp = expand_binop (mode, sub_optab, temp, signmask,
3606 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3607 temp = expand_binop (mode, and_optab, temp,
3608 gen_int_mode (masklow, mode),
3609 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3610 temp = expand_binop (mode, xor_optab, temp, signmask,
3611 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3612 temp = expand_binop (mode, sub_optab, temp, signmask,
3613 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3614 }
3615 else
3616 {
3617 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3618 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3619 signmask = force_reg (mode, signmask);
3620
3621 temp = expand_binop (mode, add_optab, op0, signmask,
3622 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3623 temp = expand_binop (mode, and_optab, temp,
3624 gen_int_mode (masklow, mode),
3625 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3626 temp = expand_binop (mode, sub_optab, temp, signmask,
3627 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3628 }
3629 return temp;
3630 }
3631 }
3632
3633 /* Mask contains the mode's signbit and the significant bits of the
3634 modulus. By including the signbit in the operation, many targets
3635 can avoid an explicit compare operation in the following comparison
3636 against zero. */
3637 wide_int mask = wi::mask (logd, false, prec);
3638 mask = wi::set_bit (mask, prec - 1);
3639
3640 temp = expand_binop (mode, and_optab, op0,
3641 immed_wide_int_const (mask, mode),
3642 result, 1, OPTAB_LIB_WIDEN);
3643 if (temp != result)
3644 emit_move_insn (result, temp);
3645
3646 label = gen_label_rtx ();
3647 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3648
3649 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3650 0, OPTAB_LIB_WIDEN);
3651
3652 mask = wi::mask (logd, true, prec);
3653 temp = expand_binop (mode, ior_optab, temp,
3654 immed_wide_int_const (mask, mode),
3655 result, 1, OPTAB_LIB_WIDEN);
3656 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3657 0, OPTAB_LIB_WIDEN);
3658 if (temp != result)
3659 emit_move_insn (result, temp);
3660 emit_label (label);
3661 return result;
3662 }
3663
3664 /* Expand signed division of OP0 by a power of two D in mode MODE.
3665 This routine is only called for positive values of D. */
3666
3667 static rtx
3668 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3669 {
3670 rtx temp, label;
3671 int logd;
3672
3673 logd = floor_log2 (d);
3674
3675 if (d == 2
3676 && BRANCH_COST (optimize_insn_for_speed_p (),
3677 false) >= 1)
3678 {
3679 temp = gen_reg_rtx (mode);
3680 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3681 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3682 0, OPTAB_LIB_WIDEN);
3683 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3684 }
3685
3686 #ifdef HAVE_conditional_move
3687 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3688 >= 2)
3689 {
3690 rtx temp2;
3691
3692 /* ??? emit_conditional_move forces a stack adjustment via
3693 compare_from_rtx so, if the sequence is discarded, it will
3694 be lost. Do it now instead. */
3695 do_pending_stack_adjust ();
3696
3697 start_sequence ();
3698 temp2 = copy_to_mode_reg (mode, op0);
3699 temp = expand_binop (mode, add_optab, temp2, gen_int_mode (d - 1, mode),
3700 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3701 temp = force_reg (mode, temp);
3702
3703 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3704 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3705 mode, temp, temp2, mode, 0);
3706 if (temp2)
3707 {
3708 rtx seq = get_insns ();
3709 end_sequence ();
3710 emit_insn (seq);
3711 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
3712 }
3713 end_sequence ();
3714 }
3715 #endif
3716
3717 if (BRANCH_COST (optimize_insn_for_speed_p (),
3718 false) >= 2)
3719 {
3720 int ushift = GET_MODE_BITSIZE (mode) - logd;
3721
3722 temp = gen_reg_rtx (mode);
3723 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3724 if (shift_cost (optimize_insn_for_speed_p (), mode, ushift)
3725 > COSTS_N_INSNS (1))
3726 temp = expand_binop (mode, and_optab, temp, gen_int_mode (d - 1, mode),
3727 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3728 else
3729 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3730 ushift, NULL_RTX, 1);
3731 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3732 0, OPTAB_LIB_WIDEN);
3733 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3734 }
3735
3736 label = gen_label_rtx ();
3737 temp = copy_to_mode_reg (mode, op0);
3738 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3739 expand_inc (temp, gen_int_mode (d - 1, mode));
3740 emit_label (label);
3741 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3742 }
3743 \f
3744 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3745 if that is convenient, and returning where the result is.
3746 You may request either the quotient or the remainder as the result;
3747 specify REM_FLAG nonzero to get the remainder.
3748
3749 CODE is the expression code for which kind of division this is;
3750 it controls how rounding is done. MODE is the machine mode to use.
3751 UNSIGNEDP nonzero means do unsigned division. */
3752
3753 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3754 and then correct it by or'ing in missing high bits
3755 if result of ANDI is nonzero.
3756 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3757 This could optimize to a bfexts instruction.
3758 But C doesn't use these operations, so their optimizations are
3759 left for later. */
3760 /* ??? For modulo, we don't actually need the highpart of the first product,
3761 the low part will do nicely. And for small divisors, the second multiply
3762 can also be a low-part only multiply or even be completely left out.
3763 E.g. to calculate the remainder of a division by 3 with a 32 bit
3764 multiply, multiply with 0x55555556 and extract the upper two bits;
3765 the result is exact for inputs up to 0x1fffffff.
3766 The input range can be reduced by using cross-sum rules.
3767 For odd divisors >= 3, the following table gives right shift counts
3768 so that if a number is shifted by an integer multiple of the given
3769 amount, the remainder stays the same:
3770 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3771 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3772 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3773 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3774 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3775
3776 Cross-sum rules for even numbers can be derived by leaving as many bits
3777 to the right alone as the divisor has zeros to the right.
3778 E.g. if x is an unsigned 32 bit number:
3779 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3780 */
3781
3782 rtx
3783 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3784 rtx op0, rtx op1, rtx target, int unsignedp)
3785 {
3786 enum machine_mode compute_mode;
3787 rtx tquotient;
3788 rtx quotient = 0, remainder = 0;
3789 rtx last;
3790 int size;
3791 rtx insn;
3792 optab optab1, optab2;
3793 int op1_is_constant, op1_is_pow2 = 0;
3794 int max_cost, extra_cost;
3795 static HOST_WIDE_INT last_div_const = 0;
3796 bool speed = optimize_insn_for_speed_p ();
3797
3798 op1_is_constant = CONST_INT_P (op1);
3799 if (op1_is_constant)
3800 {
3801 unsigned HOST_WIDE_INT ext_op1 = UINTVAL (op1);
3802 if (unsignedp)
3803 ext_op1 &= GET_MODE_MASK (mode);
3804 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3805 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3806 }
3807
3808 /*
3809 This is the structure of expand_divmod:
3810
3811 First comes code to fix up the operands so we can perform the operations
3812 correctly and efficiently.
3813
3814 Second comes a switch statement with code specific for each rounding mode.
3815 For some special operands this code emits all RTL for the desired
3816 operation, for other cases, it generates only a quotient and stores it in
3817 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3818 to indicate that it has not done anything.
3819
3820 Last comes code that finishes the operation. If QUOTIENT is set and
3821 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3822 QUOTIENT is not set, it is computed using trunc rounding.
3823
3824 We try to generate special code for division and remainder when OP1 is a
3825 constant. If |OP1| = 2**n we can use shifts and some other fast
3826 operations. For other values of OP1, we compute a carefully selected
3827 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3828 by m.
3829
3830 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3831 half of the product. Different strategies for generating the product are
3832 implemented in expmed_mult_highpart.
3833
3834 If what we actually want is the remainder, we generate that by another
3835 by-constant multiplication and a subtraction. */
3836
3837 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3838 code below will malfunction if we are, so check here and handle
3839 the special case if so. */
3840 if (op1 == const1_rtx)
3841 return rem_flag ? const0_rtx : op0;
3842
3843 /* When dividing by -1, we could get an overflow.
3844 negv_optab can handle overflows. */
3845 if (! unsignedp && op1 == constm1_rtx)
3846 {
3847 if (rem_flag)
3848 return const0_rtx;
3849 return expand_unop (mode, flag_trapv && GET_MODE_CLASS (mode) == MODE_INT
3850 ? negv_optab : neg_optab, op0, target, 0);
3851 }
3852
3853 if (target
3854 /* Don't use the function value register as a target
3855 since we have to read it as well as write it,
3856 and function-inlining gets confused by this. */
3857 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3858 /* Don't clobber an operand while doing a multi-step calculation. */
3859 || ((rem_flag || op1_is_constant)
3860 && (reg_mentioned_p (target, op0)
3861 || (MEM_P (op0) && MEM_P (target))))
3862 || reg_mentioned_p (target, op1)
3863 || (MEM_P (op1) && MEM_P (target))))
3864 target = 0;
3865
3866 /* Get the mode in which to perform this computation. Normally it will
3867 be MODE, but sometimes we can't do the desired operation in MODE.
3868 If so, pick a wider mode in which we can do the operation. Convert
3869 to that mode at the start to avoid repeated conversions.
3870
3871 First see what operations we need. These depend on the expression
3872 we are evaluating. (We assume that divxx3 insns exist under the
3873 same conditions that modxx3 insns and that these insns don't normally
3874 fail. If these assumptions are not correct, we may generate less
3875 efficient code in some cases.)
3876
3877 Then see if we find a mode in which we can open-code that operation
3878 (either a division, modulus, or shift). Finally, check for the smallest
3879 mode for which we can do the operation with a library call. */
3880
3881 /* We might want to refine this now that we have division-by-constant
3882 optimization. Since expmed_mult_highpart tries so many variants, it is
3883 not straightforward to generalize this. Maybe we should make an array
3884 of possible modes in init_expmed? Save this for GCC 2.7. */
3885
3886 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3887 ? (unsignedp ? lshr_optab : ashr_optab)
3888 : (unsignedp ? udiv_optab : sdiv_optab));
3889 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3890 ? optab1
3891 : (unsignedp ? udivmod_optab : sdivmod_optab));
3892
3893 for (compute_mode = mode; compute_mode != VOIDmode;
3894 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3895 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
3896 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
3897 break;
3898
3899 if (compute_mode == VOIDmode)
3900 for (compute_mode = mode; compute_mode != VOIDmode;
3901 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3902 if (optab_libfunc (optab1, compute_mode)
3903 || optab_libfunc (optab2, compute_mode))
3904 break;
3905
3906 /* If we still couldn't find a mode, use MODE, but expand_binop will
3907 probably die. */
3908 if (compute_mode == VOIDmode)
3909 compute_mode = mode;
3910
3911 if (target && GET_MODE (target) == compute_mode)
3912 tquotient = target;
3913 else
3914 tquotient = gen_reg_rtx (compute_mode);
3915
3916 size = GET_MODE_BITSIZE (compute_mode);
3917 #if 0
3918 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3919 (mode), and thereby get better code when OP1 is a constant. Do that
3920 later. It will require going over all usages of SIZE below. */
3921 size = GET_MODE_BITSIZE (mode);
3922 #endif
3923
3924 /* Only deduct something for a REM if the last divide done was
3925 for a different constant. Then set the constant of the last
3926 divide. */
3927 max_cost = (unsignedp
3928 ? udiv_cost (speed, compute_mode)
3929 : sdiv_cost (speed, compute_mode));
3930 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3931 && INTVAL (op1) == last_div_const))
3932 max_cost -= (mul_cost (speed, compute_mode)
3933 + add_cost (speed, compute_mode));
3934
3935 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3936
3937 /* Now convert to the best mode to use. */
3938 if (compute_mode != mode)
3939 {
3940 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3941 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3942
3943 /* convert_modes may have placed op1 into a register, so we
3944 must recompute the following. */
3945 op1_is_constant = CONST_INT_P (op1);
3946 op1_is_pow2 = (op1_is_constant
3947 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3948 || (! unsignedp
3949 && EXACT_POWER_OF_2_OR_ZERO_P (-UINTVAL (op1))))));
3950 }
3951
3952 /* If one of the operands is a volatile MEM, copy it into a register. */
3953
3954 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3955 op0 = force_reg (compute_mode, op0);
3956 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3957 op1 = force_reg (compute_mode, op1);
3958
3959 /* If we need the remainder or if OP1 is constant, we need to
3960 put OP0 in a register in case it has any queued subexpressions. */
3961 if (rem_flag || op1_is_constant)
3962 op0 = force_reg (compute_mode, op0);
3963
3964 last = get_last_insn ();
3965
3966 /* Promote floor rounding to trunc rounding for unsigned operations. */
3967 if (unsignedp)
3968 {
3969 if (code == FLOOR_DIV_EXPR)
3970 code = TRUNC_DIV_EXPR;
3971 if (code == FLOOR_MOD_EXPR)
3972 code = TRUNC_MOD_EXPR;
3973 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3974 code = TRUNC_DIV_EXPR;
3975 }
3976
3977 if (op1 != const0_rtx)
3978 switch (code)
3979 {
3980 case TRUNC_MOD_EXPR:
3981 case TRUNC_DIV_EXPR:
3982 if (op1_is_constant)
3983 {
3984 if (unsignedp)
3985 {
3986 unsigned HOST_WIDE_INT mh, ml;
3987 int pre_shift, post_shift;
3988 int dummy;
3989 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3990 & GET_MODE_MASK (compute_mode));
3991
3992 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3993 {
3994 pre_shift = floor_log2 (d);
3995 if (rem_flag)
3996 {
3997 unsigned HOST_WIDE_INT mask
3998 = ((unsigned HOST_WIDE_INT) 1 << pre_shift) - 1;
3999 remainder
4000 = expand_binop (compute_mode, and_optab, op0,
4001 gen_int_mode (mask, compute_mode),
4002 remainder, 1,
4003 OPTAB_LIB_WIDEN);
4004 if (remainder)
4005 return gen_lowpart (mode, remainder);
4006 }
4007 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4008 pre_shift, tquotient, 1);
4009 }
4010 else if (size <= HOST_BITS_PER_WIDE_INT)
4011 {
4012 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4013 {
4014 /* Most significant bit of divisor is set; emit an scc
4015 insn. */
4016 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4017 compute_mode, 1, 1);
4018 }
4019 else
4020 {
4021 /* Find a suitable multiplier and right shift count
4022 instead of multiplying with D. */
4023
4024 mh = choose_multiplier (d, size, size,
4025 &ml, &post_shift, &dummy);
4026
4027 /* If the suggested multiplier is more than SIZE bits,
4028 we can do better for even divisors, using an
4029 initial right shift. */
4030 if (mh != 0 && (d & 1) == 0)
4031 {
4032 pre_shift = floor_log2 (d & -d);
4033 mh = choose_multiplier (d >> pre_shift, size,
4034 size - pre_shift,
4035 &ml, &post_shift, &dummy);
4036 gcc_assert (!mh);
4037 }
4038 else
4039 pre_shift = 0;
4040
4041 if (mh != 0)
4042 {
4043 rtx t1, t2, t3, t4;
4044
4045 if (post_shift - 1 >= BITS_PER_WORD)
4046 goto fail1;
4047
4048 extra_cost
4049 = (shift_cost (speed, compute_mode, post_shift - 1)
4050 + shift_cost (speed, compute_mode, 1)
4051 + 2 * add_cost (speed, compute_mode));
4052 t1 = expmed_mult_highpart
4053 (compute_mode, op0,
4054 gen_int_mode (ml, compute_mode),
4055 NULL_RTX, 1, max_cost - extra_cost);
4056 if (t1 == 0)
4057 goto fail1;
4058 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4059 op0, t1),
4060 NULL_RTX);
4061 t3 = expand_shift (RSHIFT_EXPR, compute_mode,
4062 t2, 1, NULL_RTX, 1);
4063 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4064 t1, t3),
4065 NULL_RTX);
4066 quotient = expand_shift
4067 (RSHIFT_EXPR, compute_mode, t4,
4068 post_shift - 1, tquotient, 1);
4069 }
4070 else
4071 {
4072 rtx t1, t2;
4073
4074 if (pre_shift >= BITS_PER_WORD
4075 || post_shift >= BITS_PER_WORD)
4076 goto fail1;
4077
4078 t1 = expand_shift
4079 (RSHIFT_EXPR, compute_mode, op0,
4080 pre_shift, NULL_RTX, 1);
4081 extra_cost
4082 = (shift_cost (speed, compute_mode, pre_shift)
4083 + shift_cost (speed, compute_mode, post_shift));
4084 t2 = expmed_mult_highpart
4085 (compute_mode, t1,
4086 gen_int_mode (ml, compute_mode),
4087 NULL_RTX, 1, max_cost - extra_cost);
4088 if (t2 == 0)
4089 goto fail1;
4090 quotient = expand_shift
4091 (RSHIFT_EXPR, compute_mode, t2,
4092 post_shift, tquotient, 1);
4093 }
4094 }
4095 }
4096 else /* Too wide mode to use tricky code */
4097 break;
4098
4099 insn = get_last_insn ();
4100 if (insn != last)
4101 set_dst_reg_note (insn, REG_EQUAL,
4102 gen_rtx_UDIV (compute_mode, op0, op1),
4103 quotient);
4104 }
4105 else /* TRUNC_DIV, signed */
4106 {
4107 unsigned HOST_WIDE_INT ml;
4108 int lgup, post_shift;
4109 rtx mlr;
4110 HOST_WIDE_INT d = INTVAL (op1);
4111 unsigned HOST_WIDE_INT abs_d;
4112
4113 /* Since d might be INT_MIN, we have to cast to
4114 unsigned HOST_WIDE_INT before negating to avoid
4115 undefined signed overflow. */
4116 abs_d = (d >= 0
4117 ? (unsigned HOST_WIDE_INT) d
4118 : - (unsigned HOST_WIDE_INT) d);
4119
4120 /* n rem d = n rem -d */
4121 if (rem_flag && d < 0)
4122 {
4123 d = abs_d;
4124 op1 = gen_int_mode (abs_d, compute_mode);
4125 }
4126
4127 if (d == 1)
4128 quotient = op0;
4129 else if (d == -1)
4130 quotient = expand_unop (compute_mode, neg_optab, op0,
4131 tquotient, 0);
4132 else if (HOST_BITS_PER_WIDE_INT >= size
4133 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4134 {
4135 /* This case is not handled correctly below. */
4136 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4137 compute_mode, 1, 1);
4138 if (quotient == 0)
4139 goto fail1;
4140 }
4141 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4142 && (rem_flag
4143 ? smod_pow2_cheap (speed, compute_mode)
4144 : sdiv_pow2_cheap (speed, compute_mode))
4145 /* We assume that cheap metric is true if the
4146 optab has an expander for this mode. */
4147 && ((optab_handler ((rem_flag ? smod_optab
4148 : sdiv_optab),
4149 compute_mode)
4150 != CODE_FOR_nothing)
4151 || (optab_handler (sdivmod_optab,
4152 compute_mode)
4153 != CODE_FOR_nothing)))
4154 ;
4155 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4156 {
4157 if (rem_flag)
4158 {
4159 remainder = expand_smod_pow2 (compute_mode, op0, d);
4160 if (remainder)
4161 return gen_lowpart (mode, remainder);
4162 }
4163
4164 if (sdiv_pow2_cheap (speed, compute_mode)
4165 && ((optab_handler (sdiv_optab, compute_mode)
4166 != CODE_FOR_nothing)
4167 || (optab_handler (sdivmod_optab, compute_mode)
4168 != CODE_FOR_nothing)))
4169 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4170 compute_mode, op0,
4171 gen_int_mode (abs_d,
4172 compute_mode),
4173 NULL_RTX, 0);
4174 else
4175 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4176
4177 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4178 negate the quotient. */
4179 if (d < 0)
4180 {
4181 insn = get_last_insn ();
4182 if (insn != last
4183 && abs_d < ((unsigned HOST_WIDE_INT) 1
4184 << (HOST_BITS_PER_WIDE_INT - 1)))
4185 set_dst_reg_note (insn, REG_EQUAL,
4186 gen_rtx_DIV (compute_mode, op0,
4187 gen_int_mode
4188 (abs_d,
4189 compute_mode)),
4190 quotient);
4191
4192 quotient = expand_unop (compute_mode, neg_optab,
4193 quotient, quotient, 0);
4194 }
4195 }
4196 else if (size <= HOST_BITS_PER_WIDE_INT)
4197 {
4198 choose_multiplier (abs_d, size, size - 1,
4199 &ml, &post_shift, &lgup);
4200 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4201 {
4202 rtx t1, t2, t3;
4203
4204 if (post_shift >= BITS_PER_WORD
4205 || size - 1 >= BITS_PER_WORD)
4206 goto fail1;
4207
4208 extra_cost = (shift_cost (speed, compute_mode, post_shift)
4209 + shift_cost (speed, compute_mode, size - 1)
4210 + add_cost (speed, compute_mode));
4211 t1 = expmed_mult_highpart
4212 (compute_mode, op0, gen_int_mode (ml, compute_mode),
4213 NULL_RTX, 0, max_cost - extra_cost);
4214 if (t1 == 0)
4215 goto fail1;
4216 t2 = expand_shift
4217 (RSHIFT_EXPR, compute_mode, t1,
4218 post_shift, NULL_RTX, 0);
4219 t3 = expand_shift
4220 (RSHIFT_EXPR, compute_mode, op0,
4221 size - 1, NULL_RTX, 0);
4222 if (d < 0)
4223 quotient
4224 = force_operand (gen_rtx_MINUS (compute_mode,
4225 t3, t2),
4226 tquotient);
4227 else
4228 quotient
4229 = force_operand (gen_rtx_MINUS (compute_mode,
4230 t2, t3),
4231 tquotient);
4232 }
4233 else
4234 {
4235 rtx t1, t2, t3, t4;
4236
4237 if (post_shift >= BITS_PER_WORD
4238 || size - 1 >= BITS_PER_WORD)
4239 goto fail1;
4240
4241 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4242 mlr = gen_int_mode (ml, compute_mode);
4243 extra_cost = (shift_cost (speed, compute_mode, post_shift)
4244 + shift_cost (speed, compute_mode, size - 1)
4245 + 2 * add_cost (speed, compute_mode));
4246 t1 = expmed_mult_highpart (compute_mode, op0, mlr,
4247 NULL_RTX, 0,
4248 max_cost - extra_cost);
4249 if (t1 == 0)
4250 goto fail1;
4251 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4252 t1, op0),
4253 NULL_RTX);
4254 t3 = expand_shift
4255 (RSHIFT_EXPR, compute_mode, t2,
4256 post_shift, NULL_RTX, 0);
4257 t4 = expand_shift
4258 (RSHIFT_EXPR, compute_mode, op0,
4259 size - 1, NULL_RTX, 0);
4260 if (d < 0)
4261 quotient
4262 = force_operand (gen_rtx_MINUS (compute_mode,
4263 t4, t3),
4264 tquotient);
4265 else
4266 quotient
4267 = force_operand (gen_rtx_MINUS (compute_mode,
4268 t3, t4),
4269 tquotient);
4270 }
4271 }
4272 else /* Too wide mode to use tricky code */
4273 break;
4274
4275 insn = get_last_insn ();
4276 if (insn != last)
4277 set_dst_reg_note (insn, REG_EQUAL,
4278 gen_rtx_DIV (compute_mode, op0, op1),
4279 quotient);
4280 }
4281 break;
4282 }
4283 fail1:
4284 delete_insns_since (last);
4285 break;
4286
4287 case FLOOR_DIV_EXPR:
4288 case FLOOR_MOD_EXPR:
4289 /* We will come here only for signed operations. */
4290 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4291 {
4292 unsigned HOST_WIDE_INT mh, ml;
4293 int pre_shift, lgup, post_shift;
4294 HOST_WIDE_INT d = INTVAL (op1);
4295
4296 if (d > 0)
4297 {
4298 /* We could just as easily deal with negative constants here,
4299 but it does not seem worth the trouble for GCC 2.6. */
4300 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4301 {
4302 pre_shift = floor_log2 (d);
4303 if (rem_flag)
4304 {
4305 unsigned HOST_WIDE_INT mask
4306 = ((unsigned HOST_WIDE_INT) 1 << pre_shift) - 1;
4307 remainder = expand_binop
4308 (compute_mode, and_optab, op0,
4309 gen_int_mode (mask, compute_mode),
4310 remainder, 0, OPTAB_LIB_WIDEN);
4311 if (remainder)
4312 return gen_lowpart (mode, remainder);
4313 }
4314 quotient = expand_shift
4315 (RSHIFT_EXPR, compute_mode, op0,
4316 pre_shift, tquotient, 0);
4317 }
4318 else
4319 {
4320 rtx t1, t2, t3, t4;
4321
4322 mh = choose_multiplier (d, size, size - 1,
4323 &ml, &post_shift, &lgup);
4324 gcc_assert (!mh);
4325
4326 if (post_shift < BITS_PER_WORD
4327 && size - 1 < BITS_PER_WORD)
4328 {
4329 t1 = expand_shift
4330 (RSHIFT_EXPR, compute_mode, op0,
4331 size - 1, NULL_RTX, 0);
4332 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4333 NULL_RTX, 0, OPTAB_WIDEN);
4334 extra_cost = (shift_cost (speed, compute_mode, post_shift)
4335 + shift_cost (speed, compute_mode, size - 1)
4336 + 2 * add_cost (speed, compute_mode));
4337 t3 = expmed_mult_highpart
4338 (compute_mode, t2, gen_int_mode (ml, compute_mode),
4339 NULL_RTX, 1, max_cost - extra_cost);
4340 if (t3 != 0)
4341 {
4342 t4 = expand_shift
4343 (RSHIFT_EXPR, compute_mode, t3,
4344 post_shift, NULL_RTX, 1);
4345 quotient = expand_binop (compute_mode, xor_optab,
4346 t4, t1, tquotient, 0,
4347 OPTAB_WIDEN);
4348 }
4349 }
4350 }
4351 }
4352 else
4353 {
4354 rtx nsign, t1, t2, t3, t4;
4355 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4356 op0, constm1_rtx), NULL_RTX);
4357 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4358 0, OPTAB_WIDEN);
4359 nsign = expand_shift
4360 (RSHIFT_EXPR, compute_mode, t2,
4361 size - 1, NULL_RTX, 0);
4362 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4363 NULL_RTX);
4364 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4365 NULL_RTX, 0);
4366 if (t4)
4367 {
4368 rtx t5;
4369 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4370 NULL_RTX, 0);
4371 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4372 t4, t5),
4373 tquotient);
4374 }
4375 }
4376 }
4377
4378 if (quotient != 0)
4379 break;
4380 delete_insns_since (last);
4381
4382 /* Try using an instruction that produces both the quotient and
4383 remainder, using truncation. We can easily compensate the quotient
4384 or remainder to get floor rounding, once we have the remainder.
4385 Notice that we compute also the final remainder value here,
4386 and return the result right away. */
4387 if (target == 0 || GET_MODE (target) != compute_mode)
4388 target = gen_reg_rtx (compute_mode);
4389
4390 if (rem_flag)
4391 {
4392 remainder
4393 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4394 quotient = gen_reg_rtx (compute_mode);
4395 }
4396 else
4397 {
4398 quotient
4399 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4400 remainder = gen_reg_rtx (compute_mode);
4401 }
4402
4403 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4404 quotient, remainder, 0))
4405 {
4406 /* This could be computed with a branch-less sequence.
4407 Save that for later. */
4408 rtx tem;
4409 rtx label = gen_label_rtx ();
4410 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4411 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4412 NULL_RTX, 0, OPTAB_WIDEN);
4413 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4414 expand_dec (quotient, const1_rtx);
4415 expand_inc (remainder, op1);
4416 emit_label (label);
4417 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4418 }
4419
4420 /* No luck with division elimination or divmod. Have to do it
4421 by conditionally adjusting op0 *and* the result. */
4422 {
4423 rtx label1, label2, label3, label4, label5;
4424 rtx adjusted_op0;
4425 rtx tem;
4426
4427 quotient = gen_reg_rtx (compute_mode);
4428 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4429 label1 = gen_label_rtx ();
4430 label2 = gen_label_rtx ();
4431 label3 = gen_label_rtx ();
4432 label4 = gen_label_rtx ();
4433 label5 = gen_label_rtx ();
4434 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4435 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4436 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4437 quotient, 0, OPTAB_LIB_WIDEN);
4438 if (tem != quotient)
4439 emit_move_insn (quotient, tem);
4440 emit_jump_insn (gen_jump (label5));
4441 emit_barrier ();
4442 emit_label (label1);
4443 expand_inc (adjusted_op0, const1_rtx);
4444 emit_jump_insn (gen_jump (label4));
4445 emit_barrier ();
4446 emit_label (label2);
4447 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4448 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4449 quotient, 0, OPTAB_LIB_WIDEN);
4450 if (tem != quotient)
4451 emit_move_insn (quotient, tem);
4452 emit_jump_insn (gen_jump (label5));
4453 emit_barrier ();
4454 emit_label (label3);
4455 expand_dec (adjusted_op0, const1_rtx);
4456 emit_label (label4);
4457 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4458 quotient, 0, OPTAB_LIB_WIDEN);
4459 if (tem != quotient)
4460 emit_move_insn (quotient, tem);
4461 expand_dec (quotient, const1_rtx);
4462 emit_label (label5);
4463 }
4464 break;
4465
4466 case CEIL_DIV_EXPR:
4467 case CEIL_MOD_EXPR:
4468 if (unsignedp)
4469 {
4470 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4471 {
4472 rtx t1, t2, t3;
4473 unsigned HOST_WIDE_INT d = INTVAL (op1);
4474 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4475 floor_log2 (d), tquotient, 1);
4476 t2 = expand_binop (compute_mode, and_optab, op0,
4477 gen_int_mode (d - 1, compute_mode),
4478 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4479 t3 = gen_reg_rtx (compute_mode);
4480 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4481 compute_mode, 1, 1);
4482 if (t3 == 0)
4483 {
4484 rtx lab;
4485 lab = gen_label_rtx ();
4486 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4487 expand_inc (t1, const1_rtx);
4488 emit_label (lab);
4489 quotient = t1;
4490 }
4491 else
4492 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4493 t1, t3),
4494 tquotient);
4495 break;
4496 }
4497
4498 /* Try using an instruction that produces both the quotient and
4499 remainder, using truncation. We can easily compensate the
4500 quotient or remainder to get ceiling rounding, once we have the
4501 remainder. Notice that we compute also the final remainder
4502 value here, and return the result right away. */
4503 if (target == 0 || GET_MODE (target) != compute_mode)
4504 target = gen_reg_rtx (compute_mode);
4505
4506 if (rem_flag)
4507 {
4508 remainder = (REG_P (target)
4509 ? target : gen_reg_rtx (compute_mode));
4510 quotient = gen_reg_rtx (compute_mode);
4511 }
4512 else
4513 {
4514 quotient = (REG_P (target)
4515 ? target : gen_reg_rtx (compute_mode));
4516 remainder = gen_reg_rtx (compute_mode);
4517 }
4518
4519 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4520 remainder, 1))
4521 {
4522 /* This could be computed with a branch-less sequence.
4523 Save that for later. */
4524 rtx label = gen_label_rtx ();
4525 do_cmp_and_jump (remainder, const0_rtx, EQ,
4526 compute_mode, label);
4527 expand_inc (quotient, const1_rtx);
4528 expand_dec (remainder, op1);
4529 emit_label (label);
4530 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4531 }
4532
4533 /* No luck with division elimination or divmod. Have to do it
4534 by conditionally adjusting op0 *and* the result. */
4535 {
4536 rtx label1, label2;
4537 rtx adjusted_op0, tem;
4538
4539 quotient = gen_reg_rtx (compute_mode);
4540 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4541 label1 = gen_label_rtx ();
4542 label2 = gen_label_rtx ();
4543 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4544 compute_mode, label1);
4545 emit_move_insn (quotient, const0_rtx);
4546 emit_jump_insn (gen_jump (label2));
4547 emit_barrier ();
4548 emit_label (label1);
4549 expand_dec (adjusted_op0, const1_rtx);
4550 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4551 quotient, 1, OPTAB_LIB_WIDEN);
4552 if (tem != quotient)
4553 emit_move_insn (quotient, tem);
4554 expand_inc (quotient, const1_rtx);
4555 emit_label (label2);
4556 }
4557 }
4558 else /* signed */
4559 {
4560 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4561 && INTVAL (op1) >= 0)
4562 {
4563 /* This is extremely similar to the code for the unsigned case
4564 above. For 2.7 we should merge these variants, but for
4565 2.6.1 I don't want to touch the code for unsigned since that
4566 get used in C. The signed case will only be used by other
4567 languages (Ada). */
4568
4569 rtx t1, t2, t3;
4570 unsigned HOST_WIDE_INT d = INTVAL (op1);
4571 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4572 floor_log2 (d), tquotient, 0);
4573 t2 = expand_binop (compute_mode, and_optab, op0,
4574 gen_int_mode (d - 1, compute_mode),
4575 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4576 t3 = gen_reg_rtx (compute_mode);
4577 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4578 compute_mode, 1, 1);
4579 if (t3 == 0)
4580 {
4581 rtx lab;
4582 lab = gen_label_rtx ();
4583 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4584 expand_inc (t1, const1_rtx);
4585 emit_label (lab);
4586 quotient = t1;
4587 }
4588 else
4589 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4590 t1, t3),
4591 tquotient);
4592 break;
4593 }
4594
4595 /* Try using an instruction that produces both the quotient and
4596 remainder, using truncation. We can easily compensate the
4597 quotient or remainder to get ceiling rounding, once we have the
4598 remainder. Notice that we compute also the final remainder
4599 value here, and return the result right away. */
4600 if (target == 0 || GET_MODE (target) != compute_mode)
4601 target = gen_reg_rtx (compute_mode);
4602 if (rem_flag)
4603 {
4604 remainder= (REG_P (target)
4605 ? target : gen_reg_rtx (compute_mode));
4606 quotient = gen_reg_rtx (compute_mode);
4607 }
4608 else
4609 {
4610 quotient = (REG_P (target)
4611 ? target : gen_reg_rtx (compute_mode));
4612 remainder = gen_reg_rtx (compute_mode);
4613 }
4614
4615 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4616 remainder, 0))
4617 {
4618 /* This could be computed with a branch-less sequence.
4619 Save that for later. */
4620 rtx tem;
4621 rtx label = gen_label_rtx ();
4622 do_cmp_and_jump (remainder, const0_rtx, EQ,
4623 compute_mode, label);
4624 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4625 NULL_RTX, 0, OPTAB_WIDEN);
4626 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4627 expand_inc (quotient, const1_rtx);
4628 expand_dec (remainder, op1);
4629 emit_label (label);
4630 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4631 }
4632
4633 /* No luck with division elimination or divmod. Have to do it
4634 by conditionally adjusting op0 *and* the result. */
4635 {
4636 rtx label1, label2, label3, label4, label5;
4637 rtx adjusted_op0;
4638 rtx tem;
4639
4640 quotient = gen_reg_rtx (compute_mode);
4641 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4642 label1 = gen_label_rtx ();
4643 label2 = gen_label_rtx ();
4644 label3 = gen_label_rtx ();
4645 label4 = gen_label_rtx ();
4646 label5 = gen_label_rtx ();
4647 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4648 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4649 compute_mode, label1);
4650 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4651 quotient, 0, OPTAB_LIB_WIDEN);
4652 if (tem != quotient)
4653 emit_move_insn (quotient, tem);
4654 emit_jump_insn (gen_jump (label5));
4655 emit_barrier ();
4656 emit_label (label1);
4657 expand_dec (adjusted_op0, const1_rtx);
4658 emit_jump_insn (gen_jump (label4));
4659 emit_barrier ();
4660 emit_label (label2);
4661 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4662 compute_mode, label3);
4663 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4664 quotient, 0, OPTAB_LIB_WIDEN);
4665 if (tem != quotient)
4666 emit_move_insn (quotient, tem);
4667 emit_jump_insn (gen_jump (label5));
4668 emit_barrier ();
4669 emit_label (label3);
4670 expand_inc (adjusted_op0, const1_rtx);
4671 emit_label (label4);
4672 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4673 quotient, 0, OPTAB_LIB_WIDEN);
4674 if (tem != quotient)
4675 emit_move_insn (quotient, tem);
4676 expand_inc (quotient, const1_rtx);
4677 emit_label (label5);
4678 }
4679 }
4680 break;
4681
4682 case EXACT_DIV_EXPR:
4683 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4684 {
4685 HOST_WIDE_INT d = INTVAL (op1);
4686 unsigned HOST_WIDE_INT ml;
4687 int pre_shift;
4688 rtx t1;
4689
4690 pre_shift = floor_log2 (d & -d);
4691 ml = invert_mod2n (d >> pre_shift, size);
4692 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4693 pre_shift, NULL_RTX, unsignedp);
4694 quotient = expand_mult (compute_mode, t1,
4695 gen_int_mode (ml, compute_mode),
4696 NULL_RTX, 1);
4697
4698 insn = get_last_insn ();
4699 set_dst_reg_note (insn, REG_EQUAL,
4700 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4701 compute_mode, op0, op1),
4702 quotient);
4703 }
4704 break;
4705
4706 case ROUND_DIV_EXPR:
4707 case ROUND_MOD_EXPR:
4708 if (unsignedp)
4709 {
4710 rtx tem;
4711 rtx label;
4712 label = gen_label_rtx ();
4713 quotient = gen_reg_rtx (compute_mode);
4714 remainder = gen_reg_rtx (compute_mode);
4715 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4716 {
4717 rtx tem;
4718 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4719 quotient, 1, OPTAB_LIB_WIDEN);
4720 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4721 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4722 remainder, 1, OPTAB_LIB_WIDEN);
4723 }
4724 tem = plus_constant (compute_mode, op1, -1);
4725 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, 1, NULL_RTX, 1);
4726 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4727 expand_inc (quotient, const1_rtx);
4728 expand_dec (remainder, op1);
4729 emit_label (label);
4730 }
4731 else
4732 {
4733 rtx abs_rem, abs_op1, tem, mask;
4734 rtx label;
4735 label = gen_label_rtx ();
4736 quotient = gen_reg_rtx (compute_mode);
4737 remainder = gen_reg_rtx (compute_mode);
4738 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4739 {
4740 rtx tem;
4741 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4742 quotient, 0, OPTAB_LIB_WIDEN);
4743 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4744 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4745 remainder, 0, OPTAB_LIB_WIDEN);
4746 }
4747 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4748 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4749 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4750 1, NULL_RTX, 1);
4751 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4752 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4753 NULL_RTX, 0, OPTAB_WIDEN);
4754 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4755 size - 1, NULL_RTX, 0);
4756 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4757 NULL_RTX, 0, OPTAB_WIDEN);
4758 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4759 NULL_RTX, 0, OPTAB_WIDEN);
4760 expand_inc (quotient, tem);
4761 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4762 NULL_RTX, 0, OPTAB_WIDEN);
4763 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4764 NULL_RTX, 0, OPTAB_WIDEN);
4765 expand_dec (remainder, tem);
4766 emit_label (label);
4767 }
4768 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4769
4770 default:
4771 gcc_unreachable ();
4772 }
4773
4774 if (quotient == 0)
4775 {
4776 if (target && GET_MODE (target) != compute_mode)
4777 target = 0;
4778
4779 if (rem_flag)
4780 {
4781 /* Try to produce the remainder without producing the quotient.
4782 If we seem to have a divmod pattern that does not require widening,
4783 don't try widening here. We should really have a WIDEN argument
4784 to expand_twoval_binop, since what we'd really like to do here is
4785 1) try a mod insn in compute_mode
4786 2) try a divmod insn in compute_mode
4787 3) try a div insn in compute_mode and multiply-subtract to get
4788 remainder
4789 4) try the same things with widening allowed. */
4790 remainder
4791 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4792 op0, op1, target,
4793 unsignedp,
4794 ((optab_handler (optab2, compute_mode)
4795 != CODE_FOR_nothing)
4796 ? OPTAB_DIRECT : OPTAB_WIDEN));
4797 if (remainder == 0)
4798 {
4799 /* No luck there. Can we do remainder and divide at once
4800 without a library call? */
4801 remainder = gen_reg_rtx (compute_mode);
4802 if (! expand_twoval_binop ((unsignedp
4803 ? udivmod_optab
4804 : sdivmod_optab),
4805 op0, op1,
4806 NULL_RTX, remainder, unsignedp))
4807 remainder = 0;
4808 }
4809
4810 if (remainder)
4811 return gen_lowpart (mode, remainder);
4812 }
4813
4814 /* Produce the quotient. Try a quotient insn, but not a library call.
4815 If we have a divmod in this mode, use it in preference to widening
4816 the div (for this test we assume it will not fail). Note that optab2
4817 is set to the one of the two optabs that the call below will use. */
4818 quotient
4819 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4820 op0, op1, rem_flag ? NULL_RTX : target,
4821 unsignedp,
4822 ((optab_handler (optab2, compute_mode)
4823 != CODE_FOR_nothing)
4824 ? OPTAB_DIRECT : OPTAB_WIDEN));
4825
4826 if (quotient == 0)
4827 {
4828 /* No luck there. Try a quotient-and-remainder insn,
4829 keeping the quotient alone. */
4830 quotient = gen_reg_rtx (compute_mode);
4831 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4832 op0, op1,
4833 quotient, NULL_RTX, unsignedp))
4834 {
4835 quotient = 0;
4836 if (! rem_flag)
4837 /* Still no luck. If we are not computing the remainder,
4838 use a library call for the quotient. */
4839 quotient = sign_expand_binop (compute_mode,
4840 udiv_optab, sdiv_optab,
4841 op0, op1, target,
4842 unsignedp, OPTAB_LIB_WIDEN);
4843 }
4844 }
4845 }
4846
4847 if (rem_flag)
4848 {
4849 if (target && GET_MODE (target) != compute_mode)
4850 target = 0;
4851
4852 if (quotient == 0)
4853 {
4854 /* No divide instruction either. Use library for remainder. */
4855 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4856 op0, op1, target,
4857 unsignedp, OPTAB_LIB_WIDEN);
4858 /* No remainder function. Try a quotient-and-remainder
4859 function, keeping the remainder. */
4860 if (!remainder)
4861 {
4862 remainder = gen_reg_rtx (compute_mode);
4863 if (!expand_twoval_binop_libfunc
4864 (unsignedp ? udivmod_optab : sdivmod_optab,
4865 op0, op1,
4866 NULL_RTX, remainder,
4867 unsignedp ? UMOD : MOD))
4868 remainder = NULL_RTX;
4869 }
4870 }
4871 else
4872 {
4873 /* We divided. Now finish doing X - Y * (X / Y). */
4874 remainder = expand_mult (compute_mode, quotient, op1,
4875 NULL_RTX, unsignedp);
4876 remainder = expand_binop (compute_mode, sub_optab, op0,
4877 remainder, target, unsignedp,
4878 OPTAB_LIB_WIDEN);
4879 }
4880 }
4881
4882 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4883 }
4884 \f
4885 /* Return a tree node with data type TYPE, describing the value of X.
4886 Usually this is an VAR_DECL, if there is no obvious better choice.
4887 X may be an expression, however we only support those expressions
4888 generated by loop.c. */
4889
4890 tree
4891 make_tree (tree type, rtx x)
4892 {
4893 tree t;
4894
4895 switch (GET_CODE (x))
4896 {
4897 case CONST_INT:
4898 case CONST_WIDE_INT:
4899 t = wide_int_to_tree (type, std::make_pair (x, TYPE_MODE (type)));
4900 return t;
4901
4902 case CONST_DOUBLE:
4903 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
4904 t = wide_int_to_tree (type,
4905 wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
4906 HOST_BITS_PER_WIDE_INT * 2));
4907 else
4908 {
4909 REAL_VALUE_TYPE d;
4910
4911 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4912 t = build_real (type, d);
4913 }
4914
4915 return t;
4916
4917 case CONST_VECTOR:
4918 {
4919 int units = CONST_VECTOR_NUNITS (x);
4920 tree itype = TREE_TYPE (type);
4921 tree *elts;
4922 int i;
4923
4924 /* Build a tree with vector elements. */
4925 elts = XALLOCAVEC (tree, units);
4926 for (i = units - 1; i >= 0; --i)
4927 {
4928 rtx elt = CONST_VECTOR_ELT (x, i);
4929 elts[i] = make_tree (itype, elt);
4930 }
4931
4932 return build_vector (type, elts);
4933 }
4934
4935 case PLUS:
4936 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4937 make_tree (type, XEXP (x, 1)));
4938
4939 case MINUS:
4940 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4941 make_tree (type, XEXP (x, 1)));
4942
4943 case NEG:
4944 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4945
4946 case MULT:
4947 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4948 make_tree (type, XEXP (x, 1)));
4949
4950 case ASHIFT:
4951 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4952 make_tree (type, XEXP (x, 1)));
4953
4954 case LSHIFTRT:
4955 t = unsigned_type_for (type);
4956 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4957 make_tree (t, XEXP (x, 0)),
4958 make_tree (type, XEXP (x, 1))));
4959
4960 case ASHIFTRT:
4961 t = signed_type_for (type);
4962 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4963 make_tree (t, XEXP (x, 0)),
4964 make_tree (type, XEXP (x, 1))));
4965
4966 case DIV:
4967 if (TREE_CODE (type) != REAL_TYPE)
4968 t = signed_type_for (type);
4969 else
4970 t = type;
4971
4972 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4973 make_tree (t, XEXP (x, 0)),
4974 make_tree (t, XEXP (x, 1))));
4975 case UDIV:
4976 t = unsigned_type_for (type);
4977 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4978 make_tree (t, XEXP (x, 0)),
4979 make_tree (t, XEXP (x, 1))));
4980
4981 case SIGN_EXTEND:
4982 case ZERO_EXTEND:
4983 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4984 GET_CODE (x) == ZERO_EXTEND);
4985 return fold_convert (type, make_tree (t, XEXP (x, 0)));
4986
4987 case CONST:
4988 return make_tree (type, XEXP (x, 0));
4989
4990 case SYMBOL_REF:
4991 t = SYMBOL_REF_DECL (x);
4992 if (t)
4993 return fold_convert (type, build_fold_addr_expr (t));
4994 /* else fall through. */
4995
4996 default:
4997 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
4998
4999 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5000 address mode to pointer mode. */
5001 if (POINTER_TYPE_P (type))
5002 x = convert_memory_address_addr_space
5003 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5004
5005 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5006 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5007 t->decl_with_rtl.rtl = x;
5008
5009 return t;
5010 }
5011 }
5012 \f
5013 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5014 and returning TARGET.
5015
5016 If TARGET is 0, a pseudo-register or constant is returned. */
5017
5018 rtx
5019 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5020 {
5021 rtx tem = 0;
5022
5023 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5024 tem = simplify_binary_operation (AND, mode, op0, op1);
5025 if (tem == 0)
5026 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5027
5028 if (target == 0)
5029 target = tem;
5030 else if (tem != target)
5031 emit_move_insn (target, tem);
5032 return target;
5033 }
5034
5035 /* Helper function for emit_store_flag. */
5036 static rtx
5037 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5038 enum machine_mode mode, enum machine_mode compare_mode,
5039 int unsignedp, rtx x, rtx y, int normalizep,
5040 enum machine_mode target_mode)
5041 {
5042 struct expand_operand ops[4];
5043 rtx op0, last, comparison, subtarget;
5044 enum machine_mode result_mode = targetm.cstore_mode (icode);
5045
5046 last = get_last_insn ();
5047 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5048 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5049 if (!x || !y)
5050 {
5051 delete_insns_since (last);
5052 return NULL_RTX;
5053 }
5054
5055 if (target_mode == VOIDmode)
5056 target_mode = result_mode;
5057 if (!target)
5058 target = gen_reg_rtx (target_mode);
5059
5060 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5061
5062 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5063 create_fixed_operand (&ops[1], comparison);
5064 create_fixed_operand (&ops[2], x);
5065 create_fixed_operand (&ops[3], y);
5066 if (!maybe_expand_insn (icode, 4, ops))
5067 {
5068 delete_insns_since (last);
5069 return NULL_RTX;
5070 }
5071 subtarget = ops[0].value;
5072
5073 /* If we are converting to a wider mode, first convert to
5074 TARGET_MODE, then normalize. This produces better combining
5075 opportunities on machines that have a SIGN_EXTRACT when we are
5076 testing a single bit. This mostly benefits the 68k.
5077
5078 If STORE_FLAG_VALUE does not have the sign bit set when
5079 interpreted in MODE, we can do this conversion as unsigned, which
5080 is usually more efficient. */
5081 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5082 {
5083 convert_move (target, subtarget,
5084 val_signbit_known_clear_p (result_mode,
5085 STORE_FLAG_VALUE));
5086 op0 = target;
5087 result_mode = target_mode;
5088 }
5089 else
5090 op0 = subtarget;
5091
5092 /* If we want to keep subexpressions around, don't reuse our last
5093 target. */
5094 if (optimize)
5095 subtarget = 0;
5096
5097 /* Now normalize to the proper value in MODE. Sometimes we don't
5098 have to do anything. */
5099 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5100 ;
5101 /* STORE_FLAG_VALUE might be the most negative number, so write
5102 the comparison this way to avoid a compiler-time warning. */
5103 else if (- normalizep == STORE_FLAG_VALUE)
5104 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5105
5106 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5107 it hard to use a value of just the sign bit due to ANSI integer
5108 constant typing rules. */
5109 else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
5110 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5111 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5112 normalizep == 1);
5113 else
5114 {
5115 gcc_assert (STORE_FLAG_VALUE & 1);
5116
5117 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5118 if (normalizep == -1)
5119 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5120 }
5121
5122 /* If we were converting to a smaller mode, do the conversion now. */
5123 if (target_mode != result_mode)
5124 {
5125 convert_move (target, op0, 0);
5126 return target;
5127 }
5128 else
5129 return op0;
5130 }
5131
5132
5133 /* A subroutine of emit_store_flag only including "tricks" that do not
5134 need a recursive call. These are kept separate to avoid infinite
5135 loops. */
5136
5137 static rtx
5138 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5139 enum machine_mode mode, int unsignedp, int normalizep,
5140 enum machine_mode target_mode)
5141 {
5142 rtx subtarget;
5143 enum insn_code icode;
5144 enum machine_mode compare_mode;
5145 enum mode_class mclass;
5146 enum rtx_code scode;
5147 rtx tem;
5148
5149 if (unsignedp)
5150 code = unsigned_condition (code);
5151 scode = swap_condition (code);
5152
5153 /* If one operand is constant, make it the second one. Only do this
5154 if the other operand is not constant as well. */
5155
5156 if (swap_commutative_operands_p (op0, op1))
5157 {
5158 tem = op0;
5159 op0 = op1;
5160 op1 = tem;
5161 code = swap_condition (code);
5162 }
5163
5164 if (mode == VOIDmode)
5165 mode = GET_MODE (op0);
5166
5167 /* For some comparisons with 1 and -1, we can convert this to
5168 comparisons with zero. This will often produce more opportunities for
5169 store-flag insns. */
5170
5171 switch (code)
5172 {
5173 case LT:
5174 if (op1 == const1_rtx)
5175 op1 = const0_rtx, code = LE;
5176 break;
5177 case LE:
5178 if (op1 == constm1_rtx)
5179 op1 = const0_rtx, code = LT;
5180 break;
5181 case GE:
5182 if (op1 == const1_rtx)
5183 op1 = const0_rtx, code = GT;
5184 break;
5185 case GT:
5186 if (op1 == constm1_rtx)
5187 op1 = const0_rtx, code = GE;
5188 break;
5189 case GEU:
5190 if (op1 == const1_rtx)
5191 op1 = const0_rtx, code = NE;
5192 break;
5193 case LTU:
5194 if (op1 == const1_rtx)
5195 op1 = const0_rtx, code = EQ;
5196 break;
5197 default:
5198 break;
5199 }
5200
5201 /* If we are comparing a double-word integer with zero or -1, we can
5202 convert the comparison into one involving a single word. */
5203 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5204 && GET_MODE_CLASS (mode) == MODE_INT
5205 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5206 {
5207 if ((code == EQ || code == NE)
5208 && (op1 == const0_rtx || op1 == constm1_rtx))
5209 {
5210 rtx op00, op01;
5211
5212 /* Do a logical OR or AND of the two words and compare the
5213 result. */
5214 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5215 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5216 tem = expand_binop (word_mode,
5217 op1 == const0_rtx ? ior_optab : and_optab,
5218 op00, op01, NULL_RTX, unsignedp,
5219 OPTAB_DIRECT);
5220
5221 if (tem != 0)
5222 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5223 unsignedp, normalizep);
5224 }
5225 else if ((code == LT || code == GE) && op1 == const0_rtx)
5226 {
5227 rtx op0h;
5228
5229 /* If testing the sign bit, can just test on high word. */
5230 op0h = simplify_gen_subreg (word_mode, op0, mode,
5231 subreg_highpart_offset (word_mode,
5232 mode));
5233 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5234 unsignedp, normalizep);
5235 }
5236 else
5237 tem = NULL_RTX;
5238
5239 if (tem)
5240 {
5241 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5242 return tem;
5243 if (!target)
5244 target = gen_reg_rtx (target_mode);
5245
5246 convert_move (target, tem,
5247 !val_signbit_known_set_p (word_mode,
5248 (normalizep ? normalizep
5249 : STORE_FLAG_VALUE)));
5250 return target;
5251 }
5252 }
5253
5254 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5255 complement of A (for GE) and shifting the sign bit to the low bit. */
5256 if (op1 == const0_rtx && (code == LT || code == GE)
5257 && GET_MODE_CLASS (mode) == MODE_INT
5258 && (normalizep || STORE_FLAG_VALUE == 1
5259 || val_signbit_p (mode, STORE_FLAG_VALUE)))
5260 {
5261 subtarget = target;
5262
5263 if (!target)
5264 target_mode = mode;
5265
5266 /* If the result is to be wider than OP0, it is best to convert it
5267 first. If it is to be narrower, it is *incorrect* to convert it
5268 first. */
5269 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5270 {
5271 op0 = convert_modes (target_mode, mode, op0, 0);
5272 mode = target_mode;
5273 }
5274
5275 if (target_mode != mode)
5276 subtarget = 0;
5277
5278 if (code == GE)
5279 op0 = expand_unop (mode, one_cmpl_optab, op0,
5280 ((STORE_FLAG_VALUE == 1 || normalizep)
5281 ? 0 : subtarget), 0);
5282
5283 if (STORE_FLAG_VALUE == 1 || normalizep)
5284 /* If we are supposed to produce a 0/1 value, we want to do
5285 a logical shift from the sign bit to the low-order bit; for
5286 a -1/0 value, we do an arithmetic shift. */
5287 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5288 GET_MODE_BITSIZE (mode) - 1,
5289 subtarget, normalizep != -1);
5290
5291 if (mode != target_mode)
5292 op0 = convert_modes (target_mode, mode, op0, 0);
5293
5294 return op0;
5295 }
5296
5297 mclass = GET_MODE_CLASS (mode);
5298 for (compare_mode = mode; compare_mode != VOIDmode;
5299 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5300 {
5301 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5302 icode = optab_handler (cstore_optab, optab_mode);
5303 if (icode != CODE_FOR_nothing)
5304 {
5305 do_pending_stack_adjust ();
5306 tem = emit_cstore (target, icode, code, mode, compare_mode,
5307 unsignedp, op0, op1, normalizep, target_mode);
5308 if (tem)
5309 return tem;
5310
5311 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5312 {
5313 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5314 unsignedp, op1, op0, normalizep, target_mode);
5315 if (tem)
5316 return tem;
5317 }
5318 break;
5319 }
5320 }
5321
5322 return 0;
5323 }
5324
5325 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5326 and storing in TARGET. Normally return TARGET.
5327 Return 0 if that cannot be done.
5328
5329 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5330 it is VOIDmode, they cannot both be CONST_INT.
5331
5332 UNSIGNEDP is for the case where we have to widen the operands
5333 to perform the operation. It says to use zero-extension.
5334
5335 NORMALIZEP is 1 if we should convert the result to be either zero
5336 or one. Normalize is -1 if we should convert the result to be
5337 either zero or -1. If NORMALIZEP is zero, the result will be left
5338 "raw" out of the scc insn. */
5339
5340 rtx
5341 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5342 enum machine_mode mode, int unsignedp, int normalizep)
5343 {
5344 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5345 enum rtx_code rcode;
5346 rtx subtarget;
5347 rtx tem, last, trueval;
5348
5349 /* If we compare constants, we shouldn't use a store-flag operation,
5350 but a constant load. We can get there via the vanilla route that
5351 usually generates a compare-branch sequence, but will in this case
5352 fold the comparison to a constant, and thus elide the branch. */
5353 if (CONSTANT_P (op0) && CONSTANT_P (op1))
5354 return NULL_RTX;
5355
5356 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5357 target_mode);
5358 if (tem)
5359 return tem;
5360
5361 /* If we reached here, we can't do this with a scc insn, however there
5362 are some comparisons that can be done in other ways. Don't do any
5363 of these cases if branches are very cheap. */
5364 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5365 return 0;
5366
5367 /* See what we need to return. We can only return a 1, -1, or the
5368 sign bit. */
5369
5370 if (normalizep == 0)
5371 {
5372 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5373 normalizep = STORE_FLAG_VALUE;
5374
5375 else if (val_signbit_p (mode, STORE_FLAG_VALUE))
5376 ;
5377 else
5378 return 0;
5379 }
5380
5381 last = get_last_insn ();
5382
5383 /* If optimizing, use different pseudo registers for each insn, instead
5384 of reusing the same pseudo. This leads to better CSE, but slows
5385 down the compiler, since there are more pseudos */
5386 subtarget = (!optimize
5387 && (target_mode == mode)) ? target : NULL_RTX;
5388 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5389
5390 /* For floating-point comparisons, try the reverse comparison or try
5391 changing the "orderedness" of the comparison. */
5392 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5393 {
5394 enum rtx_code first_code;
5395 bool and_them;
5396
5397 rcode = reverse_condition_maybe_unordered (code);
5398 if (can_compare_p (rcode, mode, ccp_store_flag)
5399 && (code == ORDERED || code == UNORDERED
5400 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5401 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5402 {
5403 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5404 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5405
5406 /* For the reverse comparison, use either an addition or a XOR. */
5407 if (want_add
5408 && rtx_cost (GEN_INT (normalizep), PLUS, 1,
5409 optimize_insn_for_speed_p ()) == 0)
5410 {
5411 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5412 STORE_FLAG_VALUE, target_mode);
5413 if (tem)
5414 return expand_binop (target_mode, add_optab, tem,
5415 gen_int_mode (normalizep, target_mode),
5416 target, 0, OPTAB_WIDEN);
5417 }
5418 else if (!want_add
5419 && rtx_cost (trueval, XOR, 1,
5420 optimize_insn_for_speed_p ()) == 0)
5421 {
5422 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5423 normalizep, target_mode);
5424 if (tem)
5425 return expand_binop (target_mode, xor_optab, tem, trueval,
5426 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5427 }
5428 }
5429
5430 delete_insns_since (last);
5431
5432 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5433 if (code == ORDERED || code == UNORDERED)
5434 return 0;
5435
5436 and_them = split_comparison (code, mode, &first_code, &code);
5437
5438 /* If there are no NaNs, the first comparison should always fall through.
5439 Effectively change the comparison to the other one. */
5440 if (!HONOR_NANS (mode))
5441 {
5442 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5443 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5444 target_mode);
5445 }
5446
5447 #ifdef HAVE_conditional_move
5448 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5449 conditional move. */
5450 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5451 normalizep, target_mode);
5452 if (tem == 0)
5453 return 0;
5454
5455 if (and_them)
5456 tem = emit_conditional_move (target, code, op0, op1, mode,
5457 tem, const0_rtx, GET_MODE (tem), 0);
5458 else
5459 tem = emit_conditional_move (target, code, op0, op1, mode,
5460 trueval, tem, GET_MODE (tem), 0);
5461
5462 if (tem == 0)
5463 delete_insns_since (last);
5464 return tem;
5465 #else
5466 return 0;
5467 #endif
5468 }
5469
5470 /* The remaining tricks only apply to integer comparisons. */
5471
5472 if (GET_MODE_CLASS (mode) != MODE_INT)
5473 return 0;
5474
5475 /* If this is an equality comparison of integers, we can try to exclusive-or
5476 (or subtract) the two operands and use a recursive call to try the
5477 comparison with zero. Don't do any of these cases if branches are
5478 very cheap. */
5479
5480 if ((code == EQ || code == NE) && op1 != const0_rtx)
5481 {
5482 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5483 OPTAB_WIDEN);
5484
5485 if (tem == 0)
5486 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5487 OPTAB_WIDEN);
5488 if (tem != 0)
5489 tem = emit_store_flag (target, code, tem, const0_rtx,
5490 mode, unsignedp, normalizep);
5491 if (tem != 0)
5492 return tem;
5493
5494 delete_insns_since (last);
5495 }
5496
5497 /* For integer comparisons, try the reverse comparison. However, for
5498 small X and if we'd have anyway to extend, implementing "X != 0"
5499 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5500 rcode = reverse_condition (code);
5501 if (can_compare_p (rcode, mode, ccp_store_flag)
5502 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5503 && code == NE
5504 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5505 && op1 == const0_rtx))
5506 {
5507 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5508 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5509
5510 /* Again, for the reverse comparison, use either an addition or a XOR. */
5511 if (want_add
5512 && rtx_cost (GEN_INT (normalizep), PLUS, 1,
5513 optimize_insn_for_speed_p ()) == 0)
5514 {
5515 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5516 STORE_FLAG_VALUE, target_mode);
5517 if (tem != 0)
5518 tem = expand_binop (target_mode, add_optab, tem,
5519 gen_int_mode (normalizep, target_mode),
5520 target, 0, OPTAB_WIDEN);
5521 }
5522 else if (!want_add
5523 && rtx_cost (trueval, XOR, 1,
5524 optimize_insn_for_speed_p ()) == 0)
5525 {
5526 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5527 normalizep, target_mode);
5528 if (tem != 0)
5529 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5530 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5531 }
5532
5533 if (tem != 0)
5534 return tem;
5535 delete_insns_since (last);
5536 }
5537
5538 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5539 the constant zero. Reject all other comparisons at this point. Only
5540 do LE and GT if branches are expensive since they are expensive on
5541 2-operand machines. */
5542
5543 if (op1 != const0_rtx
5544 || (code != EQ && code != NE
5545 && (BRANCH_COST (optimize_insn_for_speed_p (),
5546 false) <= 1 || (code != LE && code != GT))))
5547 return 0;
5548
5549 /* Try to put the result of the comparison in the sign bit. Assume we can't
5550 do the necessary operation below. */
5551
5552 tem = 0;
5553
5554 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5555 the sign bit set. */
5556
5557 if (code == LE)
5558 {
5559 /* This is destructive, so SUBTARGET can't be OP0. */
5560 if (rtx_equal_p (subtarget, op0))
5561 subtarget = 0;
5562
5563 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5564 OPTAB_WIDEN);
5565 if (tem)
5566 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5567 OPTAB_WIDEN);
5568 }
5569
5570 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5571 number of bits in the mode of OP0, minus one. */
5572
5573 if (code == GT)
5574 {
5575 if (rtx_equal_p (subtarget, op0))
5576 subtarget = 0;
5577
5578 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5579 GET_MODE_BITSIZE (mode) - 1,
5580 subtarget, 0);
5581 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5582 OPTAB_WIDEN);
5583 }
5584
5585 if (code == EQ || code == NE)
5586 {
5587 /* For EQ or NE, one way to do the comparison is to apply an operation
5588 that converts the operand into a positive number if it is nonzero
5589 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5590 for NE we negate. This puts the result in the sign bit. Then we
5591 normalize with a shift, if needed.
5592
5593 Two operations that can do the above actions are ABS and FFS, so try
5594 them. If that doesn't work, and MODE is smaller than a full word,
5595 we can use zero-extension to the wider mode (an unsigned conversion)
5596 as the operation. */
5597
5598 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5599 that is compensated by the subsequent overflow when subtracting
5600 one / negating. */
5601
5602 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5603 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5604 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5605 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5606 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5607 {
5608 tem = convert_modes (word_mode, mode, op0, 1);
5609 mode = word_mode;
5610 }
5611
5612 if (tem != 0)
5613 {
5614 if (code == EQ)
5615 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5616 0, OPTAB_WIDEN);
5617 else
5618 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5619 }
5620
5621 /* If we couldn't do it that way, for NE we can "or" the two's complement
5622 of the value with itself. For EQ, we take the one's complement of
5623 that "or", which is an extra insn, so we only handle EQ if branches
5624 are expensive. */
5625
5626 if (tem == 0
5627 && (code == NE
5628 || BRANCH_COST (optimize_insn_for_speed_p (),
5629 false) > 1))
5630 {
5631 if (rtx_equal_p (subtarget, op0))
5632 subtarget = 0;
5633
5634 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5635 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5636 OPTAB_WIDEN);
5637
5638 if (tem && code == EQ)
5639 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5640 }
5641 }
5642
5643 if (tem && normalizep)
5644 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5645 GET_MODE_BITSIZE (mode) - 1,
5646 subtarget, normalizep == 1);
5647
5648 if (tem)
5649 {
5650 if (!target)
5651 ;
5652 else if (GET_MODE (tem) != target_mode)
5653 {
5654 convert_move (target, tem, 0);
5655 tem = target;
5656 }
5657 else if (!subtarget)
5658 {
5659 emit_move_insn (target, tem);
5660 tem = target;
5661 }
5662 }
5663 else
5664 delete_insns_since (last);
5665
5666 return tem;
5667 }
5668
5669 /* Like emit_store_flag, but always succeeds. */
5670
5671 rtx
5672 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5673 enum machine_mode mode, int unsignedp, int normalizep)
5674 {
5675 rtx tem, label;
5676 rtx trueval, falseval;
5677
5678 /* First see if emit_store_flag can do the job. */
5679 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5680 if (tem != 0)
5681 return tem;
5682
5683 if (!target)
5684 target = gen_reg_rtx (word_mode);
5685
5686 /* If this failed, we have to do this with set/compare/jump/set code.
5687 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5688 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5689 if (code == NE
5690 && GET_MODE_CLASS (mode) == MODE_INT
5691 && REG_P (target)
5692 && op0 == target
5693 && op1 == const0_rtx)
5694 {
5695 label = gen_label_rtx ();
5696 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5697 mode, NULL_RTX, NULL_RTX, label, -1);
5698 emit_move_insn (target, trueval);
5699 emit_label (label);
5700 return target;
5701 }
5702
5703 if (!REG_P (target)
5704 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5705 target = gen_reg_rtx (GET_MODE (target));
5706
5707 /* Jump in the right direction if the target cannot implement CODE
5708 but can jump on its reverse condition. */
5709 falseval = const0_rtx;
5710 if (! can_compare_p (code, mode, ccp_jump)
5711 && (! FLOAT_MODE_P (mode)
5712 || code == ORDERED || code == UNORDERED
5713 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5714 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5715 {
5716 enum rtx_code rcode;
5717 if (FLOAT_MODE_P (mode))
5718 rcode = reverse_condition_maybe_unordered (code);
5719 else
5720 rcode = reverse_condition (code);
5721
5722 /* Canonicalize to UNORDERED for the libcall. */
5723 if (can_compare_p (rcode, mode, ccp_jump)
5724 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5725 {
5726 falseval = trueval;
5727 trueval = const0_rtx;
5728 code = rcode;
5729 }
5730 }
5731
5732 emit_move_insn (target, trueval);
5733 label = gen_label_rtx ();
5734 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5735 NULL_RTX, label, -1);
5736
5737 emit_move_insn (target, falseval);
5738 emit_label (label);
5739
5740 return target;
5741 }
5742 \f
5743 /* Perform possibly multi-word comparison and conditional jump to LABEL
5744 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5745 now a thin wrapper around do_compare_rtx_and_jump. */
5746
5747 static void
5748 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5749 rtx label)
5750 {
5751 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5752 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5753 NULL_RTX, NULL_RTX, label, -1);
5754 }