1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2013 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "diagnostic-core.h"
29 #include "stor-layout.h"
32 #include "insn-config.h"
36 #include "langhooks.h"
41 struct target_expmed default_target_expmed
;
43 struct target_expmed
*this_target_expmed
= &default_target_expmed
;
46 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
47 unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
,
49 unsigned HOST_WIDE_INT
,
51 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
52 unsigned HOST_WIDE_INT
,
53 unsigned HOST_WIDE_INT
,
54 unsigned HOST_WIDE_INT
,
56 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
57 unsigned HOST_WIDE_INT
,
58 unsigned HOST_WIDE_INT
, rtx
, int);
59 static rtx
lshift_value (enum machine_mode
, unsigned HOST_WIDE_INT
, int);
60 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
61 unsigned HOST_WIDE_INT
, int);
62 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
64 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
66 /* Return a constant integer mask value of mode MODE with BITSIZE ones
67 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
68 The mask is truncated if necessary to the width of mode MODE. The
69 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
72 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, bool complement
)
74 return immed_wide_int_const
75 (wi::shifted_mask (bitpos
, bitsize
, complement
,
76 GET_MODE_PRECISION (mode
)), mode
);
79 /* Test whether a value is zero of a power of two. */
80 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
81 (((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0)
83 struct init_expmed_rtl
91 struct rtx_def sdiv_32
;
92 struct rtx_def smod_32
;
93 struct rtx_def wide_mult
;
94 struct rtx_def wide_lshr
;
95 struct rtx_def wide_trunc
;
97 struct rtx_def shift_mult
;
98 struct rtx_def shift_add
;
99 struct rtx_def shift_sub0
;
100 struct rtx_def shift_sub1
;
102 struct rtx_def trunc
;
104 rtx pow2
[MAX_BITS_PER_WORD
];
105 rtx cint
[MAX_BITS_PER_WORD
];
109 init_expmed_one_conv (struct init_expmed_rtl
*all
, enum machine_mode to_mode
,
110 enum machine_mode from_mode
, bool speed
)
112 int to_size
, from_size
;
115 /* We're given no information about the true size of a partial integer,
116 only the size of the "full" integer it requires for storage. For
117 comparison purposes here, reduce the bit size by one in that case. */
118 to_size
= (GET_MODE_BITSIZE (to_mode
)
119 - (GET_MODE_CLASS (to_mode
) == MODE_PARTIAL_INT
));
120 from_size
= (GET_MODE_BITSIZE (from_mode
)
121 - (GET_MODE_CLASS (from_mode
) == MODE_PARTIAL_INT
));
123 /* Assume cost of zero-extend and sign-extend is the same. */
124 which
= (to_size
< from_size
? &all
->trunc
: &all
->zext
);
126 PUT_MODE (&all
->reg
, from_mode
);
127 set_convert_cost (to_mode
, from_mode
, speed
, set_src_cost (which
, speed
));
131 init_expmed_one_mode (struct init_expmed_rtl
*all
,
132 enum machine_mode mode
, int speed
)
134 int m
, n
, mode_bitsize
;
135 enum machine_mode mode_from
;
137 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
139 PUT_MODE (&all
->reg
, mode
);
140 PUT_MODE (&all
->plus
, mode
);
141 PUT_MODE (&all
->neg
, mode
);
142 PUT_MODE (&all
->mult
, mode
);
143 PUT_MODE (&all
->sdiv
, mode
);
144 PUT_MODE (&all
->udiv
, mode
);
145 PUT_MODE (&all
->sdiv_32
, mode
);
146 PUT_MODE (&all
->smod_32
, mode
);
147 PUT_MODE (&all
->wide_trunc
, mode
);
148 PUT_MODE (&all
->shift
, mode
);
149 PUT_MODE (&all
->shift_mult
, mode
);
150 PUT_MODE (&all
->shift_add
, mode
);
151 PUT_MODE (&all
->shift_sub0
, mode
);
152 PUT_MODE (&all
->shift_sub1
, mode
);
153 PUT_MODE (&all
->zext
, mode
);
154 PUT_MODE (&all
->trunc
, mode
);
156 set_add_cost (speed
, mode
, set_src_cost (&all
->plus
, speed
));
157 set_neg_cost (speed
, mode
, set_src_cost (&all
->neg
, speed
));
158 set_mul_cost (speed
, mode
, set_src_cost (&all
->mult
, speed
));
159 set_sdiv_cost (speed
, mode
, set_src_cost (&all
->sdiv
, speed
));
160 set_udiv_cost (speed
, mode
, set_src_cost (&all
->udiv
, speed
));
162 set_sdiv_pow2_cheap (speed
, mode
, (set_src_cost (&all
->sdiv_32
, speed
)
163 <= 2 * add_cost (speed
, mode
)));
164 set_smod_pow2_cheap (speed
, mode
, (set_src_cost (&all
->smod_32
, speed
)
165 <= 4 * add_cost (speed
, mode
)));
167 set_shift_cost (speed
, mode
, 0, 0);
169 int cost
= add_cost (speed
, mode
);
170 set_shiftadd_cost (speed
, mode
, 0, cost
);
171 set_shiftsub0_cost (speed
, mode
, 0, cost
);
172 set_shiftsub1_cost (speed
, mode
, 0, cost
);
175 n
= MIN (MAX_BITS_PER_WORD
, mode_bitsize
);
176 for (m
= 1; m
< n
; m
++)
178 XEXP (&all
->shift
, 1) = all
->cint
[m
];
179 XEXP (&all
->shift_mult
, 1) = all
->pow2
[m
];
181 set_shift_cost (speed
, mode
, m
, set_src_cost (&all
->shift
, speed
));
182 set_shiftadd_cost (speed
, mode
, m
, set_src_cost (&all
->shift_add
, speed
));
183 set_shiftsub0_cost (speed
, mode
, m
, set_src_cost (&all
->shift_sub0
, speed
));
184 set_shiftsub1_cost (speed
, mode
, m
, set_src_cost (&all
->shift_sub1
, speed
));
187 if (SCALAR_INT_MODE_P (mode
))
189 for (mode_from
= MIN_MODE_INT
; mode_from
<= MAX_MODE_INT
;
190 mode_from
= (enum machine_mode
)(mode_from
+ 1))
191 init_expmed_one_conv (all
, mode
, mode_from
, speed
);
193 if (GET_MODE_CLASS (mode
) == MODE_INT
)
195 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
196 if (wider_mode
!= VOIDmode
)
198 PUT_MODE (&all
->zext
, wider_mode
);
199 PUT_MODE (&all
->wide_mult
, wider_mode
);
200 PUT_MODE (&all
->wide_lshr
, wider_mode
);
201 XEXP (&all
->wide_lshr
, 1) = GEN_INT (mode_bitsize
);
203 set_mul_widen_cost (speed
, wider_mode
,
204 set_src_cost (&all
->wide_mult
, speed
));
205 set_mul_highpart_cost (speed
, mode
,
206 set_src_cost (&all
->wide_trunc
, speed
));
214 struct init_expmed_rtl all
;
215 enum machine_mode mode
;
218 memset (&all
, 0, sizeof all
);
219 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
221 all
.pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
222 all
.cint
[m
] = GEN_INT (m
);
225 PUT_CODE (&all
.reg
, REG
);
226 /* Avoid using hard regs in ways which may be unsupported. */
227 SET_REGNO (&all
.reg
, LAST_VIRTUAL_REGISTER
+ 1);
229 PUT_CODE (&all
.plus
, PLUS
);
230 XEXP (&all
.plus
, 0) = &all
.reg
;
231 XEXP (&all
.plus
, 1) = &all
.reg
;
233 PUT_CODE (&all
.neg
, NEG
);
234 XEXP (&all
.neg
, 0) = &all
.reg
;
236 PUT_CODE (&all
.mult
, MULT
);
237 XEXP (&all
.mult
, 0) = &all
.reg
;
238 XEXP (&all
.mult
, 1) = &all
.reg
;
240 PUT_CODE (&all
.sdiv
, DIV
);
241 XEXP (&all
.sdiv
, 0) = &all
.reg
;
242 XEXP (&all
.sdiv
, 1) = &all
.reg
;
244 PUT_CODE (&all
.udiv
, UDIV
);
245 XEXP (&all
.udiv
, 0) = &all
.reg
;
246 XEXP (&all
.udiv
, 1) = &all
.reg
;
248 PUT_CODE (&all
.sdiv_32
, DIV
);
249 XEXP (&all
.sdiv_32
, 0) = &all
.reg
;
250 XEXP (&all
.sdiv_32
, 1) = 32 < MAX_BITS_PER_WORD
? all
.cint
[32] : GEN_INT (32);
252 PUT_CODE (&all
.smod_32
, MOD
);
253 XEXP (&all
.smod_32
, 0) = &all
.reg
;
254 XEXP (&all
.smod_32
, 1) = XEXP (&all
.sdiv_32
, 1);
256 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
257 XEXP (&all
.zext
, 0) = &all
.reg
;
259 PUT_CODE (&all
.wide_mult
, MULT
);
260 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
261 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
263 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
264 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
266 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
267 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
269 PUT_CODE (&all
.shift
, ASHIFT
);
270 XEXP (&all
.shift
, 0) = &all
.reg
;
272 PUT_CODE (&all
.shift_mult
, MULT
);
273 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
275 PUT_CODE (&all
.shift_add
, PLUS
);
276 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
277 XEXP (&all
.shift_add
, 1) = &all
.reg
;
279 PUT_CODE (&all
.shift_sub0
, MINUS
);
280 XEXP (&all
.shift_sub0
, 0) = &all
.shift_mult
;
281 XEXP (&all
.shift_sub0
, 1) = &all
.reg
;
283 PUT_CODE (&all
.shift_sub1
, MINUS
);
284 XEXP (&all
.shift_sub1
, 0) = &all
.reg
;
285 XEXP (&all
.shift_sub1
, 1) = &all
.shift_mult
;
287 PUT_CODE (&all
.trunc
, TRUNCATE
);
288 XEXP (&all
.trunc
, 0) = &all
.reg
;
290 for (speed
= 0; speed
< 2; speed
++)
292 crtl
->maybe_hot_insn_p
= speed
;
293 set_zero_cost (speed
, set_src_cost (const0_rtx
, speed
));
295 for (mode
= MIN_MODE_INT
; mode
<= MAX_MODE_INT
;
296 mode
= (enum machine_mode
)(mode
+ 1))
297 init_expmed_one_mode (&all
, mode
, speed
);
299 if (MIN_MODE_PARTIAL_INT
!= VOIDmode
)
300 for (mode
= MIN_MODE_PARTIAL_INT
; mode
<= MAX_MODE_PARTIAL_INT
;
301 mode
= (enum machine_mode
)(mode
+ 1))
302 init_expmed_one_mode (&all
, mode
, speed
);
304 if (MIN_MODE_VECTOR_INT
!= VOIDmode
)
305 for (mode
= MIN_MODE_VECTOR_INT
; mode
<= MAX_MODE_VECTOR_INT
;
306 mode
= (enum machine_mode
)(mode
+ 1))
307 init_expmed_one_mode (&all
, mode
, speed
);
310 if (alg_hash_used_p ())
312 struct alg_hash_entry
*p
= alg_hash_entry_ptr (0);
313 memset (p
, 0, sizeof (*p
) * NUM_ALG_HASH_ENTRIES
);
316 set_alg_hash_used_p (true);
317 default_rtl_profile ();
320 /* Return an rtx representing minus the value of X.
321 MODE is the intended mode of the result,
322 useful if X is a CONST_INT. */
325 negate_rtx (enum machine_mode mode
, rtx x
)
327 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
330 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
335 /* Adjust bitfield memory MEM so that it points to the first unit of mode
336 MODE that contains a bitfield of size BITSIZE at bit position BITNUM.
337 If MODE is BLKmode, return a reference to every byte in the bitfield.
338 Set *NEW_BITNUM to the bit position of the field within the new memory. */
341 narrow_bit_field_mem (rtx mem
, enum machine_mode mode
,
342 unsigned HOST_WIDE_INT bitsize
,
343 unsigned HOST_WIDE_INT bitnum
,
344 unsigned HOST_WIDE_INT
*new_bitnum
)
348 *new_bitnum
= bitnum
% BITS_PER_UNIT
;
349 HOST_WIDE_INT offset
= bitnum
/ BITS_PER_UNIT
;
350 HOST_WIDE_INT size
= ((*new_bitnum
+ bitsize
+ BITS_PER_UNIT
- 1)
352 return adjust_bitfield_address_size (mem
, mode
, offset
, size
);
356 unsigned int unit
= GET_MODE_BITSIZE (mode
);
357 *new_bitnum
= bitnum
% unit
;
358 HOST_WIDE_INT offset
= (bitnum
- *new_bitnum
) / BITS_PER_UNIT
;
359 return adjust_bitfield_address (mem
, mode
, offset
);
363 /* The caller wants to perform insertion or extraction PATTERN on a
364 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
365 BITREGION_START and BITREGION_END are as for store_bit_field
366 and FIELDMODE is the natural mode of the field.
368 Search for a mode that is compatible with the memory access
369 restrictions and (where applicable) with a register insertion or
370 extraction. Return the new memory on success, storing the adjusted
371 bit position in *NEW_BITNUM. Return null otherwise. */
374 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern
,
375 rtx op0
, HOST_WIDE_INT bitsize
,
376 HOST_WIDE_INT bitnum
,
377 unsigned HOST_WIDE_INT bitregion_start
,
378 unsigned HOST_WIDE_INT bitregion_end
,
379 enum machine_mode fieldmode
,
380 unsigned HOST_WIDE_INT
*new_bitnum
)
382 bit_field_mode_iterator
iter (bitsize
, bitnum
, bitregion_start
,
383 bitregion_end
, MEM_ALIGN (op0
),
384 MEM_VOLATILE_P (op0
));
385 enum machine_mode best_mode
;
386 if (iter
.next_mode (&best_mode
))
388 /* We can use a memory in BEST_MODE. See whether this is true for
389 any wider modes. All other things being equal, we prefer to
390 use the widest mode possible because it tends to expose more
391 CSE opportunities. */
392 if (!iter
.prefer_smaller_modes ())
394 /* Limit the search to the mode required by the corresponding
395 register insertion or extraction instruction, if any. */
396 enum machine_mode limit_mode
= word_mode
;
397 extraction_insn insn
;
398 if (get_best_reg_extraction_insn (&insn
, pattern
,
399 GET_MODE_BITSIZE (best_mode
),
401 limit_mode
= insn
.field_mode
;
403 enum machine_mode wider_mode
;
404 while (iter
.next_mode (&wider_mode
)
405 && GET_MODE_SIZE (wider_mode
) <= GET_MODE_SIZE (limit_mode
))
406 best_mode
= wider_mode
;
408 return narrow_bit_field_mem (op0
, best_mode
, bitsize
, bitnum
,
414 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
415 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
416 offset is then BITNUM / BITS_PER_UNIT. */
419 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum
,
420 unsigned HOST_WIDE_INT bitsize
,
421 enum machine_mode struct_mode
)
423 if (BYTES_BIG_ENDIAN
)
424 return (bitnum
% BITS_PER_UNIT
== 0
425 && (bitnum
+ bitsize
== GET_MODE_BITSIZE (struct_mode
)
426 || (bitnum
+ bitsize
) % BITS_PER_WORD
== 0));
428 return bitnum
% BITS_PER_WORD
== 0;
431 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
432 bit number BITNUM can be treated as a simple value of mode MODE. */
435 simple_mem_bitfield_p (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
436 unsigned HOST_WIDE_INT bitnum
, enum machine_mode mode
)
439 && bitnum
% BITS_PER_UNIT
== 0
440 && bitsize
== GET_MODE_BITSIZE (mode
)
441 && (!SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
442 || (bitnum
% GET_MODE_ALIGNMENT (mode
) == 0
443 && MEM_ALIGN (op0
) >= GET_MODE_ALIGNMENT (mode
))));
446 /* Try to use instruction INSV to store VALUE into a field of OP0.
447 BITSIZE and BITNUM are as for store_bit_field. */
450 store_bit_field_using_insv (const extraction_insn
*insv
, rtx op0
,
451 unsigned HOST_WIDE_INT bitsize
,
452 unsigned HOST_WIDE_INT bitnum
, rtx value
)
454 struct expand_operand ops
[4];
457 rtx last
= get_last_insn ();
458 bool copy_back
= false;
460 enum machine_mode op_mode
= insv
->field_mode
;
461 unsigned int unit
= GET_MODE_BITSIZE (op_mode
);
462 if (bitsize
== 0 || bitsize
> unit
)
466 /* Get a reference to the first byte of the field. */
467 xop0
= narrow_bit_field_mem (xop0
, insv
->struct_mode
, bitsize
, bitnum
,
471 /* Convert from counting within OP0 to counting in OP_MODE. */
472 if (BYTES_BIG_ENDIAN
)
473 bitnum
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
475 /* If xop0 is a register, we need it in OP_MODE
476 to make it acceptable to the format of insv. */
477 if (GET_CODE (xop0
) == SUBREG
)
478 /* We can't just change the mode, because this might clobber op0,
479 and we will need the original value of op0 if insv fails. */
480 xop0
= gen_rtx_SUBREG (op_mode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
481 if (REG_P (xop0
) && GET_MODE (xop0
) != op_mode
)
482 xop0
= gen_lowpart_SUBREG (op_mode
, xop0
);
485 /* If the destination is a paradoxical subreg such that we need a
486 truncate to the inner mode, perform the insertion on a temporary and
487 truncate the result to the original destination. Note that we can't
488 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
489 X) 0)) is (reg:N X). */
490 if (GET_CODE (xop0
) == SUBREG
491 && REG_P (SUBREG_REG (xop0
))
492 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0
)),
495 rtx tem
= gen_reg_rtx (op_mode
);
496 emit_move_insn (tem
, xop0
);
501 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
502 "backwards" from the size of the unit we are inserting into.
503 Otherwise, we count bits from the most significant on a
504 BYTES/BITS_BIG_ENDIAN machine. */
506 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
507 bitnum
= unit
- bitsize
- bitnum
;
509 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
511 if (GET_MODE (value
) != op_mode
)
513 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
515 /* Optimization: Don't bother really extending VALUE
516 if it has all the bits we will actually use. However,
517 if we must narrow it, be sure we do it correctly. */
519 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (op_mode
))
523 tmp
= simplify_subreg (op_mode
, value1
, GET_MODE (value
), 0);
525 tmp
= simplify_gen_subreg (op_mode
,
526 force_reg (GET_MODE (value
),
528 GET_MODE (value
), 0);
532 value1
= gen_lowpart (op_mode
, value1
);
534 else if (CONST_INT_P (value
))
535 value1
= gen_int_mode (INTVAL (value
), op_mode
);
537 /* Parse phase is supposed to make VALUE's data type
538 match that of the component reference, which is a type
539 at least as wide as the field; so VALUE should have
540 a mode that corresponds to that type. */
541 gcc_assert (CONSTANT_P (value
));
544 create_fixed_operand (&ops
[0], xop0
);
545 create_integer_operand (&ops
[1], bitsize
);
546 create_integer_operand (&ops
[2], bitnum
);
547 create_input_operand (&ops
[3], value1
, op_mode
);
548 if (maybe_expand_insn (insv
->icode
, 4, ops
))
551 convert_move (op0
, xop0
, true);
554 delete_insns_since (last
);
558 /* A subroutine of store_bit_field, with the same arguments. Return true
559 if the operation could be implemented.
561 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
562 no other way of implementing the operation. If FALLBACK_P is false,
563 return false instead. */
566 store_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
567 unsigned HOST_WIDE_INT bitnum
,
568 unsigned HOST_WIDE_INT bitregion_start
,
569 unsigned HOST_WIDE_INT bitregion_end
,
570 enum machine_mode fieldmode
,
571 rtx value
, bool fallback_p
)
576 while (GET_CODE (op0
) == SUBREG
)
578 /* The following line once was done only if WORDS_BIG_ENDIAN,
579 but I think that is a mistake. WORDS_BIG_ENDIAN is
580 meaningful at a much higher level; when structures are copied
581 between memory and regs, the higher-numbered regs
582 always get higher addresses. */
583 int inner_mode_size
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)));
584 int outer_mode_size
= GET_MODE_SIZE (GET_MODE (op0
));
587 /* Paradoxical subregs need special handling on big endian machines. */
588 if (SUBREG_BYTE (op0
) == 0 && inner_mode_size
< outer_mode_size
)
590 int difference
= inner_mode_size
- outer_mode_size
;
592 if (WORDS_BIG_ENDIAN
)
593 byte_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
594 if (BYTES_BIG_ENDIAN
)
595 byte_offset
+= difference
% UNITS_PER_WORD
;
598 byte_offset
= SUBREG_BYTE (op0
);
600 bitnum
+= byte_offset
* BITS_PER_UNIT
;
601 op0
= SUBREG_REG (op0
);
604 /* No action is needed if the target is a register and if the field
605 lies completely outside that register. This can occur if the source
606 code contains an out-of-bounds access to a small array. */
607 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
610 /* Use vec_set patterns for inserting parts of vectors whenever
612 if (VECTOR_MODE_P (GET_MODE (op0
))
614 && optab_handler (vec_set_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
615 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
616 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
617 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
619 struct expand_operand ops
[3];
620 enum machine_mode outermode
= GET_MODE (op0
);
621 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
622 enum insn_code icode
= optab_handler (vec_set_optab
, outermode
);
623 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
625 create_fixed_operand (&ops
[0], op0
);
626 create_input_operand (&ops
[1], value
, innermode
);
627 create_integer_operand (&ops
[2], pos
);
628 if (maybe_expand_insn (icode
, 3, ops
))
632 /* If the target is a register, overwriting the entire object, or storing
633 a full-word or multi-word field can be done with just a SUBREG. */
635 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
636 && ((bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)) && bitnum
== 0)
637 || (bitsize
% BITS_PER_WORD
== 0 && bitnum
% BITS_PER_WORD
== 0)))
639 /* Use the subreg machinery either to narrow OP0 to the required
640 words or to cope with mode punning between equal-sized modes.
641 In the latter case, use subreg on the rhs side, not lhs. */
644 if (bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
646 sub
= simplify_gen_subreg (GET_MODE (op0
), value
, fieldmode
, 0);
649 emit_move_insn (op0
, sub
);
655 sub
= simplify_gen_subreg (fieldmode
, op0
, GET_MODE (op0
),
656 bitnum
/ BITS_PER_UNIT
);
659 emit_move_insn (sub
, value
);
665 /* If the target is memory, storing any naturally aligned field can be
666 done with a simple store. For targets that support fast unaligned
667 memory, any naturally sized, unit aligned field can be done directly. */
668 if (simple_mem_bitfield_p (op0
, bitsize
, bitnum
, fieldmode
))
670 op0
= adjust_bitfield_address (op0
, fieldmode
, bitnum
/ BITS_PER_UNIT
);
671 emit_move_insn (op0
, value
);
675 /* Make sure we are playing with integral modes. Pun with subregs
676 if we aren't. This must come after the entire register case above,
677 since that case is valid for any mode. The following cases are only
678 valid for integral modes. */
680 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
681 if (imode
!= GET_MODE (op0
))
684 op0
= adjust_bitfield_address_size (op0
, imode
, 0, MEM_SIZE (op0
));
687 gcc_assert (imode
!= BLKmode
);
688 op0
= gen_lowpart (imode
, op0
);
693 /* Storing an lsb-aligned field in a register
694 can be done with a movstrict instruction. */
697 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
698 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
699 && optab_handler (movstrict_optab
, fieldmode
) != CODE_FOR_nothing
)
701 struct expand_operand ops
[2];
702 enum insn_code icode
= optab_handler (movstrict_optab
, fieldmode
);
704 unsigned HOST_WIDE_INT subreg_off
;
706 if (GET_CODE (arg0
) == SUBREG
)
708 /* Else we've got some float mode source being extracted into
709 a different float mode destination -- this combination of
710 subregs results in Severe Tire Damage. */
711 gcc_assert (GET_MODE (SUBREG_REG (arg0
)) == fieldmode
712 || GET_MODE_CLASS (fieldmode
) == MODE_INT
713 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
714 arg0
= SUBREG_REG (arg0
);
717 subreg_off
= bitnum
/ BITS_PER_UNIT
;
718 if (validate_subreg (fieldmode
, GET_MODE (arg0
), arg0
, subreg_off
))
720 arg0
= gen_rtx_SUBREG (fieldmode
, arg0
, subreg_off
);
722 create_fixed_operand (&ops
[0], arg0
);
723 /* Shrink the source operand to FIELDMODE. */
724 create_convert_operand_to (&ops
[1], value
, fieldmode
, false);
725 if (maybe_expand_insn (icode
, 2, ops
))
730 /* Handle fields bigger than a word. */
732 if (bitsize
> BITS_PER_WORD
)
734 /* Here we transfer the words of the field
735 in the order least significant first.
736 This is because the most significant word is the one which may
738 However, only do that if the value is not BLKmode. */
740 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
741 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
745 /* This is the mode we must force value to, so that there will be enough
746 subwords to extract. Note that fieldmode will often (always?) be
747 VOIDmode, because that is what store_field uses to indicate that this
748 is a bit field, but passing VOIDmode to operand_subword_force
750 fieldmode
= GET_MODE (value
);
751 if (fieldmode
== VOIDmode
)
752 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
754 last
= get_last_insn ();
755 for (i
= 0; i
< nwords
; i
++)
757 /* If I is 0, use the low-order word in both field and target;
758 if I is 1, use the next to lowest word; and so on. */
759 unsigned int wordnum
= (backwards
760 ? GET_MODE_SIZE (fieldmode
) / UNITS_PER_WORD
763 unsigned int bit_offset
= (backwards
764 ? MAX ((int) bitsize
- ((int) i
+ 1)
767 : (int) i
* BITS_PER_WORD
);
768 rtx value_word
= operand_subword_force (value
, wordnum
, fieldmode
);
769 unsigned HOST_WIDE_INT new_bitsize
=
770 MIN (BITS_PER_WORD
, bitsize
- i
* BITS_PER_WORD
);
772 /* If the remaining chunk doesn't have full wordsize we have
773 to make sure that for big endian machines the higher order
775 if (new_bitsize
< BITS_PER_WORD
&& BYTES_BIG_ENDIAN
&& !backwards
)
776 value_word
= simplify_expand_binop (word_mode
, lshr_optab
,
778 GEN_INT (BITS_PER_WORD
783 if (!store_bit_field_1 (op0
, new_bitsize
,
785 bitregion_start
, bitregion_end
,
787 value_word
, fallback_p
))
789 delete_insns_since (last
);
796 /* If VALUE has a floating-point or complex mode, access it as an
797 integer of the corresponding size. This can occur on a machine
798 with 64 bit registers that uses SFmode for float. It can also
799 occur for unaligned float or complex fields. */
801 if (GET_MODE (value
) != VOIDmode
802 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
803 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
805 value
= gen_reg_rtx (int_mode_for_mode (GET_MODE (value
)));
806 emit_move_insn (gen_lowpart (GET_MODE (orig_value
), value
), orig_value
);
809 /* If OP0 is a multi-word register, narrow it to the affected word.
810 If the region spans two words, defer to store_split_bit_field. */
811 if (!MEM_P (op0
) && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
813 op0
= simplify_gen_subreg (word_mode
, op0
, GET_MODE (op0
),
814 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
816 bitnum
%= BITS_PER_WORD
;
817 if (bitnum
+ bitsize
> BITS_PER_WORD
)
822 store_split_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
823 bitregion_end
, value
);
828 /* From here on we can assume that the field to be stored in fits
829 within a word. If the destination is a register, it too fits
832 extraction_insn insv
;
834 && get_best_reg_extraction_insn (&insv
, EP_insv
,
835 GET_MODE_BITSIZE (GET_MODE (op0
)),
837 && store_bit_field_using_insv (&insv
, op0
, bitsize
, bitnum
, value
))
840 /* If OP0 is a memory, try copying it to a register and seeing if a
841 cheap register alternative is available. */
844 /* Do not use unaligned memory insvs for volatile bitfields when
845 -fstrict-volatile-bitfields is in effect. */
846 if (!(MEM_VOLATILE_P (op0
)
847 && flag_strict_volatile_bitfields
> 0)
848 && get_best_mem_extraction_insn (&insv
, EP_insv
, bitsize
, bitnum
,
850 && store_bit_field_using_insv (&insv
, op0
, bitsize
, bitnum
, value
))
853 rtx last
= get_last_insn ();
855 /* Try loading part of OP0 into a register, inserting the bitfield
856 into that, and then copying the result back to OP0. */
857 unsigned HOST_WIDE_INT bitpos
;
858 rtx xop0
= adjust_bit_field_mem_for_reg (EP_insv
, op0
, bitsize
, bitnum
,
859 bitregion_start
, bitregion_end
,
863 rtx tempreg
= copy_to_reg (xop0
);
864 if (store_bit_field_1 (tempreg
, bitsize
, bitpos
,
865 bitregion_start
, bitregion_end
,
866 fieldmode
, orig_value
, false))
868 emit_move_insn (xop0
, tempreg
);
871 delete_insns_since (last
);
878 store_fixed_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
879 bitregion_end
, value
);
883 /* Generate code to store value from rtx VALUE
884 into a bit-field within structure STR_RTX
885 containing BITSIZE bits starting at bit BITNUM.
887 BITREGION_START is bitpos of the first bitfield in this region.
888 BITREGION_END is the bitpos of the ending bitfield in this region.
889 These two fields are 0, if the C++ memory model does not apply,
890 or we are not interested in keeping track of bitfield regions.
892 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
895 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
896 unsigned HOST_WIDE_INT bitnum
,
897 unsigned HOST_WIDE_INT bitregion_start
,
898 unsigned HOST_WIDE_INT bitregion_end
,
899 enum machine_mode fieldmode
,
902 /* Under the C++0x memory model, we must not touch bits outside the
903 bit region. Adjust the address to start at the beginning of the
905 if (MEM_P (str_rtx
) && bitregion_start
> 0)
907 enum machine_mode bestmode
;
908 HOST_WIDE_INT offset
, size
;
910 gcc_assert ((bitregion_start
% BITS_PER_UNIT
) == 0);
912 offset
= bitregion_start
/ BITS_PER_UNIT
;
913 bitnum
-= bitregion_start
;
914 size
= (bitnum
+ bitsize
+ BITS_PER_UNIT
- 1) / BITS_PER_UNIT
;
915 bitregion_end
-= bitregion_start
;
917 bestmode
= get_best_mode (bitsize
, bitnum
,
918 bitregion_start
, bitregion_end
,
919 MEM_ALIGN (str_rtx
), VOIDmode
,
920 MEM_VOLATILE_P (str_rtx
));
921 str_rtx
= adjust_bitfield_address_size (str_rtx
, bestmode
, offset
, size
);
924 if (!store_bit_field_1 (str_rtx
, bitsize
, bitnum
,
925 bitregion_start
, bitregion_end
,
926 fieldmode
, value
, true))
930 /* Use shifts and boolean operations to store VALUE into a bit field of
931 width BITSIZE in OP0, starting at bit BITNUM. */
934 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
935 unsigned HOST_WIDE_INT bitnum
,
936 unsigned HOST_WIDE_INT bitregion_start
,
937 unsigned HOST_WIDE_INT bitregion_end
,
940 enum machine_mode mode
;
945 /* There is a case not handled here:
946 a structure with a known alignment of just a halfword
947 and a field split across two aligned halfwords within the structure.
948 Or likewise a structure with a known alignment of just a byte
949 and a field split across two bytes.
950 Such cases are not supposed to be able to occur. */
954 unsigned HOST_WIDE_INT maxbits
= MAX_FIXED_MODE_SIZE
;
957 maxbits
= bitregion_end
- bitregion_start
+ 1;
959 /* Get the proper mode to use for this field. We want a mode that
960 includes the entire field. If such a mode would be larger than
961 a word, we won't be doing the extraction the normal way.
962 We don't want a mode bigger than the destination. */
964 mode
= GET_MODE (op0
);
965 if (GET_MODE_BITSIZE (mode
) == 0
966 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
969 if (MEM_VOLATILE_P (op0
)
970 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
971 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= maxbits
972 && flag_strict_volatile_bitfields
> 0)
973 mode
= GET_MODE (op0
);
975 mode
= get_best_mode (bitsize
, bitnum
, bitregion_start
, bitregion_end
,
976 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
978 if (mode
== VOIDmode
)
980 /* The only way this should occur is if the field spans word
982 store_split_bit_field (op0
, bitsize
, bitnum
, bitregion_start
,
983 bitregion_end
, value
);
987 op0
= narrow_bit_field_mem (op0
, mode
, bitsize
, bitnum
, &bitnum
);
990 mode
= GET_MODE (op0
);
991 gcc_assert (SCALAR_INT_MODE_P (mode
));
993 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
994 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
996 if (BYTES_BIG_ENDIAN
)
997 /* BITNUM is the distance between our msb
998 and that of the containing datum.
999 Convert it to the distance from the lsb. */
1000 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
1002 /* Now BITNUM is always the distance between our lsb
1005 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1006 we must first convert its mode to MODE. */
1008 if (CONST_INT_P (value
))
1010 HOST_WIDE_INT v
= INTVAL (value
);
1012 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1013 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
1017 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
1018 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
1019 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
1022 value
= lshift_value (mode
, v
, bitnum
);
1026 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
1027 && bitnum
+ bitsize
!= GET_MODE_BITSIZE (mode
));
1029 if (GET_MODE (value
) != mode
)
1030 value
= convert_to_mode (mode
, value
, 1);
1033 value
= expand_binop (mode
, and_optab
, value
,
1034 mask_rtx (mode
, 0, bitsize
, 0),
1035 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1037 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
1038 bitnum
, NULL_RTX
, 1);
1041 /* Now clear the chosen bits in OP0,
1042 except that if VALUE is -1 we need not bother. */
1043 /* We keep the intermediates in registers to allow CSE to combine
1044 consecutive bitfield assignments. */
1046 temp
= force_reg (mode
, op0
);
1050 temp
= expand_binop (mode
, and_optab
, temp
,
1051 mask_rtx (mode
, bitnum
, bitsize
, 1),
1052 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1053 temp
= force_reg (mode
, temp
);
1056 /* Now logical-or VALUE into OP0, unless it is zero. */
1060 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
1061 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1062 temp
= force_reg (mode
, temp
);
1067 op0
= copy_rtx (op0
);
1068 emit_move_insn (op0
, temp
);
1072 /* Store a bit field that is split across multiple accessible memory objects.
1074 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1075 BITSIZE is the field width; BITPOS the position of its first bit
1077 VALUE is the value to store.
1079 This does not yet handle fields wider than BITS_PER_WORD. */
1082 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1083 unsigned HOST_WIDE_INT bitpos
,
1084 unsigned HOST_WIDE_INT bitregion_start
,
1085 unsigned HOST_WIDE_INT bitregion_end
,
1089 unsigned int bitsdone
= 0;
1091 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1093 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1094 unit
= BITS_PER_WORD
;
1096 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1098 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1099 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1100 that VALUE might be a floating-point constant. */
1101 if (CONSTANT_P (value
) && !CONST_INT_P (value
))
1103 rtx word
= gen_lowpart_common (word_mode
, value
);
1105 if (word
&& (value
!= word
))
1108 value
= gen_lowpart_common (word_mode
,
1109 force_reg (GET_MODE (value
) != VOIDmode
1111 : word_mode
, value
));
1114 while (bitsdone
< bitsize
)
1116 unsigned HOST_WIDE_INT thissize
;
1118 unsigned HOST_WIDE_INT thispos
;
1119 unsigned HOST_WIDE_INT offset
;
1121 offset
= (bitpos
+ bitsdone
) / unit
;
1122 thispos
= (bitpos
+ bitsdone
) % unit
;
1124 /* When region of bytes we can touch is restricted, decrease
1125 UNIT close to the end of the region as needed. If op0 is a REG
1126 or SUBREG of REG, don't do this, as there can't be data races
1127 on a register and we can expand shorter code in some cases. */
1129 && unit
> BITS_PER_UNIT
1130 && bitpos
+ bitsdone
- thispos
+ unit
> bitregion_end
+ 1
1132 && (GET_CODE (op0
) != SUBREG
|| !REG_P (SUBREG_REG (op0
))))
1138 /* THISSIZE must not overrun a word boundary. Otherwise,
1139 store_fixed_bit_field will call us again, and we will mutually
1141 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1142 thissize
= MIN (thissize
, unit
- thispos
);
1144 if (BYTES_BIG_ENDIAN
)
1146 /* Fetch successively less significant portions. */
1147 if (CONST_INT_P (value
))
1148 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1149 >> (bitsize
- bitsdone
- thissize
))
1150 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1153 int total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
1154 /* The args are chosen so that the last part includes the
1155 lsb. Give extract_bit_field the value it needs (with
1156 endianness compensation) to fetch the piece we want. */
1157 part
= extract_fixed_bit_field (word_mode
, value
, thissize
,
1158 total_bits
- bitsize
+ bitsdone
,
1164 /* Fetch successively more significant portions. */
1165 if (CONST_INT_P (value
))
1166 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1168 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1170 part
= extract_fixed_bit_field (word_mode
, value
, thissize
,
1171 bitsdone
, NULL_RTX
, 1);
1174 /* If OP0 is a register, then handle OFFSET here.
1176 When handling multiword bitfields, extract_bit_field may pass
1177 down a word_mode SUBREG of a larger REG for a bitfield that actually
1178 crosses a word boundary. Thus, for a SUBREG, we must find
1179 the current word starting from the base register. */
1180 if (GET_CODE (op0
) == SUBREG
)
1182 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
)
1183 + (offset
* unit
/ BITS_PER_WORD
);
1184 enum machine_mode sub_mode
= GET_MODE (SUBREG_REG (op0
));
1185 if (sub_mode
!= BLKmode
&& GET_MODE_SIZE (sub_mode
) < UNITS_PER_WORD
)
1186 word
= word_offset
? const0_rtx
: op0
;
1188 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1189 GET_MODE (SUBREG_REG (op0
)));
1190 offset
&= BITS_PER_WORD
/ unit
- 1;
1192 else if (REG_P (op0
))
1194 enum machine_mode op0_mode
= GET_MODE (op0
);
1195 if (op0_mode
!= BLKmode
&& GET_MODE_SIZE (op0_mode
) < UNITS_PER_WORD
)
1196 word
= offset
? const0_rtx
: op0
;
1198 word
= operand_subword_force (op0
, offset
* unit
/ BITS_PER_WORD
,
1200 offset
&= BITS_PER_WORD
/ unit
- 1;
1205 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1206 it is just an out-of-bounds access. Ignore it. */
1207 if (word
!= const0_rtx
)
1208 store_fixed_bit_field (word
, thissize
, offset
* unit
+ thispos
,
1209 bitregion_start
, bitregion_end
, part
);
1210 bitsdone
+= thissize
;
1214 /* A subroutine of extract_bit_field_1 that converts return value X
1215 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1216 to extract_bit_field. */
1219 convert_extracted_bit_field (rtx x
, enum machine_mode mode
,
1220 enum machine_mode tmode
, bool unsignedp
)
1222 if (GET_MODE (x
) == tmode
|| GET_MODE (x
) == mode
)
1225 /* If the x mode is not a scalar integral, first convert to the
1226 integer mode of that size and then access it as a floating-point
1227 value via a SUBREG. */
1228 if (!SCALAR_INT_MODE_P (tmode
))
1230 enum machine_mode smode
;
1232 smode
= mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1233 x
= convert_to_mode (smode
, x
, unsignedp
);
1234 x
= force_reg (smode
, x
);
1235 return gen_lowpart (tmode
, x
);
1238 return convert_to_mode (tmode
, x
, unsignedp
);
1241 /* Try to use an ext(z)v pattern to extract a field from OP0.
1242 Return the extracted value on success, otherwise return null.
1243 EXT_MODE is the mode of the extraction and the other arguments
1244 are as for extract_bit_field. */
1247 extract_bit_field_using_extv (const extraction_insn
*extv
, rtx op0
,
1248 unsigned HOST_WIDE_INT bitsize
,
1249 unsigned HOST_WIDE_INT bitnum
,
1250 int unsignedp
, rtx target
,
1251 enum machine_mode mode
, enum machine_mode tmode
)
1253 struct expand_operand ops
[4];
1254 rtx spec_target
= target
;
1255 rtx spec_target_subreg
= 0;
1256 enum machine_mode ext_mode
= extv
->field_mode
;
1257 unsigned unit
= GET_MODE_BITSIZE (ext_mode
);
1259 if (bitsize
== 0 || unit
< bitsize
)
1263 /* Get a reference to the first byte of the field. */
1264 op0
= narrow_bit_field_mem (op0
, extv
->struct_mode
, bitsize
, bitnum
,
1268 /* Convert from counting within OP0 to counting in EXT_MODE. */
1269 if (BYTES_BIG_ENDIAN
)
1270 bitnum
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1272 /* If op0 is a register, we need it in EXT_MODE to make it
1273 acceptable to the format of ext(z)v. */
1274 if (GET_CODE (op0
) == SUBREG
&& GET_MODE (op0
) != ext_mode
)
1276 if (REG_P (op0
) && GET_MODE (op0
) != ext_mode
)
1277 op0
= gen_lowpart_SUBREG (ext_mode
, op0
);
1280 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1281 "backwards" from the size of the unit we are extracting from.
1282 Otherwise, we count bits from the most significant on a
1283 BYTES/BITS_BIG_ENDIAN machine. */
1285 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1286 bitnum
= unit
- bitsize
- bitnum
;
1289 target
= spec_target
= gen_reg_rtx (tmode
);
1291 if (GET_MODE (target
) != ext_mode
)
1293 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1294 between the mode of the extraction (word_mode) and the target
1295 mode. Instead, create a temporary and use convert_move to set
1298 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target
), ext_mode
))
1300 target
= gen_lowpart (ext_mode
, target
);
1301 if (GET_MODE_PRECISION (ext_mode
)
1302 > GET_MODE_PRECISION (GET_MODE (spec_target
)))
1303 spec_target_subreg
= target
;
1306 target
= gen_reg_rtx (ext_mode
);
1309 create_output_operand (&ops
[0], target
, ext_mode
);
1310 create_fixed_operand (&ops
[1], op0
);
1311 create_integer_operand (&ops
[2], bitsize
);
1312 create_integer_operand (&ops
[3], bitnum
);
1313 if (maybe_expand_insn (extv
->icode
, 4, ops
))
1315 target
= ops
[0].value
;
1316 if (target
== spec_target
)
1318 if (target
== spec_target_subreg
)
1320 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1325 /* A subroutine of extract_bit_field, with the same arguments.
1326 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1327 if we can find no other means of implementing the operation.
1328 if FALLBACK_P is false, return NULL instead. */
1331 extract_bit_field_1 (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1332 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1333 enum machine_mode mode
, enum machine_mode tmode
,
1337 enum machine_mode int_mode
;
1338 enum machine_mode mode1
;
1340 if (tmode
== VOIDmode
)
1343 while (GET_CODE (op0
) == SUBREG
)
1345 bitnum
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1346 op0
= SUBREG_REG (op0
);
1349 /* If we have an out-of-bounds access to a register, just return an
1350 uninitialized register of the required mode. This can occur if the
1351 source code contains an out-of-bounds access to a small array. */
1352 if (REG_P (op0
) && bitnum
>= GET_MODE_BITSIZE (GET_MODE (op0
)))
1353 return gen_reg_rtx (tmode
);
1356 && mode
== GET_MODE (op0
)
1358 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1360 /* We're trying to extract a full register from itself. */
1364 /* See if we can get a better vector mode before extracting. */
1365 if (VECTOR_MODE_P (GET_MODE (op0
))
1367 && GET_MODE_INNER (GET_MODE (op0
)) != tmode
)
1369 enum machine_mode new_mode
;
1371 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1372 new_mode
= MIN_MODE_VECTOR_FLOAT
;
1373 else if (GET_MODE_CLASS (tmode
) == MODE_FRACT
)
1374 new_mode
= MIN_MODE_VECTOR_FRACT
;
1375 else if (GET_MODE_CLASS (tmode
) == MODE_UFRACT
)
1376 new_mode
= MIN_MODE_VECTOR_UFRACT
;
1377 else if (GET_MODE_CLASS (tmode
) == MODE_ACCUM
)
1378 new_mode
= MIN_MODE_VECTOR_ACCUM
;
1379 else if (GET_MODE_CLASS (tmode
) == MODE_UACCUM
)
1380 new_mode
= MIN_MODE_VECTOR_UACCUM
;
1382 new_mode
= MIN_MODE_VECTOR_INT
;
1384 for (; new_mode
!= VOIDmode
; new_mode
= GET_MODE_WIDER_MODE (new_mode
))
1385 if (GET_MODE_SIZE (new_mode
) == GET_MODE_SIZE (GET_MODE (op0
))
1386 && targetm
.vector_mode_supported_p (new_mode
))
1388 if (new_mode
!= VOIDmode
)
1389 op0
= gen_lowpart (new_mode
, op0
);
1392 /* Use vec_extract patterns for extracting parts of vectors whenever
1394 if (VECTOR_MODE_P (GET_MODE (op0
))
1396 && optab_handler (vec_extract_optab
, GET_MODE (op0
)) != CODE_FOR_nothing
1397 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1398 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1400 struct expand_operand ops
[3];
1401 enum machine_mode outermode
= GET_MODE (op0
);
1402 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1403 enum insn_code icode
= optab_handler (vec_extract_optab
, outermode
);
1404 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1406 create_output_operand (&ops
[0], target
, innermode
);
1407 create_input_operand (&ops
[1], op0
, outermode
);
1408 create_integer_operand (&ops
[2], pos
);
1409 if (maybe_expand_insn (icode
, 3, ops
))
1411 target
= ops
[0].value
;
1412 if (GET_MODE (target
) != mode
)
1413 return gen_lowpart (tmode
, target
);
1418 /* Make sure we are playing with integral modes. Pun with subregs
1421 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1422 if (imode
!= GET_MODE (op0
))
1425 op0
= adjust_bitfield_address_size (op0
, imode
, 0, MEM_SIZE (op0
));
1426 else if (imode
!= BLKmode
)
1428 op0
= gen_lowpart (imode
, op0
);
1430 /* If we got a SUBREG, force it into a register since we
1431 aren't going to be able to do another SUBREG on it. */
1432 if (GET_CODE (op0
) == SUBREG
)
1433 op0
= force_reg (imode
, op0
);
1435 else if (REG_P (op0
))
1438 imode
= smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0
)),
1440 reg
= gen_reg_rtx (imode
);
1441 subreg
= gen_lowpart_SUBREG (GET_MODE (op0
), reg
);
1442 emit_move_insn (subreg
, op0
);
1444 bitnum
+= SUBREG_BYTE (subreg
) * BITS_PER_UNIT
;
1448 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (op0
));
1449 rtx mem
= assign_stack_temp (GET_MODE (op0
), size
);
1450 emit_move_insn (mem
, op0
);
1451 op0
= adjust_bitfield_address_size (mem
, BLKmode
, 0, size
);
1456 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1457 If that's wrong, the solution is to test for it and set TARGET to 0
1460 /* If the bitfield is volatile, we need to make sure the access
1461 remains on a type-aligned boundary. */
1462 if (GET_CODE (op0
) == MEM
1463 && MEM_VOLATILE_P (op0
)
1464 && GET_MODE_BITSIZE (GET_MODE (op0
)) > 0
1465 && flag_strict_volatile_bitfields
> 0)
1466 goto no_subreg_mode_swap
;
1468 /* Only scalar integer modes can be converted via subregs. There is an
1469 additional problem for FP modes here in that they can have a precision
1470 which is different from the size. mode_for_size uses precision, but
1471 we want a mode based on the size, so we must avoid calling it for FP
1474 if (SCALAR_INT_MODE_P (tmode
))
1476 enum machine_mode try_mode
= mode_for_size (bitsize
,
1477 GET_MODE_CLASS (tmode
), 0);
1478 if (try_mode
!= BLKmode
)
1481 gcc_assert (mode1
!= BLKmode
);
1483 /* Extraction of a full MODE1 value can be done with a subreg as long
1484 as the least significant bit of the value is the least significant
1485 bit of either OP0 or a word of OP0. */
1487 && lowpart_bit_field_p (bitnum
, bitsize
, GET_MODE (op0
))
1488 && bitsize
== GET_MODE_BITSIZE (mode1
)
1489 && TRULY_NOOP_TRUNCATION_MODES_P (mode1
, GET_MODE (op0
)))
1491 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1492 bitnum
/ BITS_PER_UNIT
);
1494 return convert_extracted_bit_field (sub
, mode
, tmode
, unsignedp
);
1497 /* Extraction of a full MODE1 value can be done with a load as long as
1498 the field is on a byte boundary and is sufficiently aligned. */
1499 if (simple_mem_bitfield_p (op0
, bitsize
, bitnum
, mode1
))
1501 op0
= adjust_bitfield_address (op0
, mode1
, bitnum
/ BITS_PER_UNIT
);
1502 return convert_extracted_bit_field (op0
, mode
, tmode
, unsignedp
);
1505 no_subreg_mode_swap
:
1507 /* Handle fields bigger than a word. */
1509 if (bitsize
> BITS_PER_WORD
)
1511 /* Here we transfer the words of the field
1512 in the order least significant first.
1513 This is because the most significant word is the one which may
1514 be less than full. */
1516 unsigned int backwards
= WORDS_BIG_ENDIAN
;
1517 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1521 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1522 target
= gen_reg_rtx (mode
);
1524 /* Indicate for flow that the entire target reg is being set. */
1525 emit_clobber (target
);
1527 last
= get_last_insn ();
1528 for (i
= 0; i
< nwords
; i
++)
1530 /* If I is 0, use the low-order word in both field and target;
1531 if I is 1, use the next to lowest word; and so on. */
1532 /* Word number in TARGET to use. */
1533 unsigned int wordnum
1535 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1537 /* Offset from start of field in OP0. */
1538 unsigned int bit_offset
= (backwards
1539 ? MAX ((int) bitsize
- ((int) i
+ 1)
1542 : (int) i
* BITS_PER_WORD
);
1543 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1545 = extract_bit_field_1 (op0
, MIN (BITS_PER_WORD
,
1546 bitsize
- i
* BITS_PER_WORD
),
1547 bitnum
+ bit_offset
, 1, target_part
,
1548 mode
, word_mode
, fallback_p
);
1550 gcc_assert (target_part
);
1553 delete_insns_since (last
);
1557 if (result_part
!= target_part
)
1558 emit_move_insn (target_part
, result_part
);
1563 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1564 need to be zero'd out. */
1565 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1567 unsigned int i
, total_words
;
1569 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1570 for (i
= nwords
; i
< total_words
; i
++)
1572 (operand_subword (target
,
1573 backwards
? total_words
- i
- 1 : i
,
1580 /* Signed bit field: sign-extend with two arithmetic shifts. */
1581 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1582 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1583 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1584 GET_MODE_BITSIZE (mode
) - bitsize
, NULL_RTX
, 0);
1587 /* If OP0 is a multi-word register, narrow it to the affected word.
1588 If the region spans two words, defer to extract_split_bit_field. */
1589 if (!MEM_P (op0
) && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1591 op0
= simplify_gen_subreg (word_mode
, op0
, GET_MODE (op0
),
1592 bitnum
/ BITS_PER_WORD
* UNITS_PER_WORD
);
1593 bitnum
%= BITS_PER_WORD
;
1594 if (bitnum
+ bitsize
> BITS_PER_WORD
)
1598 target
= extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1599 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1603 /* From here on we know the desired field is smaller than a word.
1604 If OP0 is a register, it too fits within a word. */
1605 enum extraction_pattern pattern
= unsignedp
? EP_extzv
: EP_extv
;
1606 extraction_insn extv
;
1608 /* ??? We could limit the structure size to the part of OP0 that
1609 contains the field, with appropriate checks for endianness
1610 and TRULY_NOOP_TRUNCATION. */
1611 && get_best_reg_extraction_insn (&extv
, pattern
,
1612 GET_MODE_BITSIZE (GET_MODE (op0
)),
1615 rtx result
= extract_bit_field_using_extv (&extv
, op0
, bitsize
, bitnum
,
1616 unsignedp
, target
, mode
,
1622 /* If OP0 is a memory, try copying it to a register and seeing if a
1623 cheap register alternative is available. */
1626 /* Do not use extv/extzv for volatile bitfields when
1627 -fstrict-volatile-bitfields is in effect. */
1628 if (!(MEM_VOLATILE_P (op0
) && flag_strict_volatile_bitfields
> 0)
1629 && get_best_mem_extraction_insn (&extv
, pattern
, bitsize
, bitnum
,
1632 rtx result
= extract_bit_field_using_extv (&extv
, op0
, bitsize
,
1640 rtx last
= get_last_insn ();
1642 /* Try loading part of OP0 into a register and extracting the
1643 bitfield from that. */
1644 unsigned HOST_WIDE_INT bitpos
;
1645 rtx xop0
= adjust_bit_field_mem_for_reg (pattern
, op0
, bitsize
, bitnum
,
1646 0, 0, tmode
, &bitpos
);
1649 xop0
= copy_to_reg (xop0
);
1650 rtx result
= extract_bit_field_1 (xop0
, bitsize
, bitpos
,
1652 mode
, tmode
, false);
1655 delete_insns_since (last
);
1662 /* Find a correspondingly-sized integer field, so we can apply
1663 shifts and masks to it. */
1664 int_mode
= int_mode_for_mode (tmode
);
1665 if (int_mode
== BLKmode
)
1666 int_mode
= int_mode_for_mode (mode
);
1667 /* Should probably push op0 out to memory and then do a load. */
1668 gcc_assert (int_mode
!= BLKmode
);
1670 target
= extract_fixed_bit_field (int_mode
, op0
, bitsize
, bitnum
,
1672 return convert_extracted_bit_field (target
, mode
, tmode
, unsignedp
);
1675 /* Generate code to extract a byte-field from STR_RTX
1676 containing BITSIZE bits, starting at BITNUM,
1677 and put it in TARGET if possible (if TARGET is nonzero).
1678 Regardless of TARGET, we return the rtx for where the value is placed.
1680 STR_RTX is the structure containing the byte (a REG or MEM).
1681 UNSIGNEDP is nonzero if this is an unsigned bit field.
1682 MODE is the natural mode of the field value once extracted.
1683 TMODE is the mode the caller would like the value to have;
1684 but the value may be returned with type MODE instead.
1686 If a TARGET is specified and we can store in it at no extra cost,
1687 we do so, and return TARGET.
1688 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1689 if they are equally easy. */
1692 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1693 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1694 enum machine_mode mode
, enum machine_mode tmode
)
1696 return extract_bit_field_1 (str_rtx
, bitsize
, bitnum
, unsignedp
,
1697 target
, mode
, tmode
, true);
1700 /* Use shifts and boolean operations to extract a field of BITSIZE bits
1701 from bit BITNUM of OP0.
1703 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1704 If TARGET is nonzero, attempts to store the value there
1705 and return TARGET, but this is not guaranteed.
1706 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1709 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1710 unsigned HOST_WIDE_INT bitsize
,
1711 unsigned HOST_WIDE_INT bitnum
, rtx target
,
1714 enum machine_mode mode
;
1718 /* Get the proper mode to use for this field. We want a mode that
1719 includes the entire field. If such a mode would be larger than
1720 a word, we won't be doing the extraction the normal way. */
1722 if (MEM_VOLATILE_P (op0
)
1723 && flag_strict_volatile_bitfields
> 0)
1725 if (GET_MODE_BITSIZE (GET_MODE (op0
)) > 0)
1726 mode
= GET_MODE (op0
);
1727 else if (target
&& GET_MODE_BITSIZE (GET_MODE (target
)) > 0)
1728 mode
= GET_MODE (target
);
1733 mode
= get_best_mode (bitsize
, bitnum
, 0, 0,
1734 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1736 if (mode
== VOIDmode
)
1737 /* The only way this should occur is if the field spans word
1739 return extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1741 unsigned int total_bits
= GET_MODE_BITSIZE (mode
);
1742 HOST_WIDE_INT bit_offset
= bitnum
- bitnum
% total_bits
;
1744 /* If we're accessing a volatile MEM, we can't apply BIT_OFFSET
1745 if it results in a multi-word access where we otherwise wouldn't
1746 have one. So, check for that case here. */
1748 && MEM_VOLATILE_P (op0
)
1749 && flag_strict_volatile_bitfields
> 0
1750 && bitnum
% BITS_PER_UNIT
+ bitsize
<= total_bits
1751 && bitnum
% GET_MODE_BITSIZE (mode
) + bitsize
> total_bits
)
1753 /* If the target doesn't support unaligned access, give up and
1754 split the access into two. */
1755 if (STRICT_ALIGNMENT
)
1756 return extract_split_bit_field (op0
, bitsize
, bitnum
, unsignedp
);
1757 bit_offset
= bitnum
- bitnum
% BITS_PER_UNIT
;
1759 op0
= adjust_bitfield_address (op0
, mode
, bit_offset
/ BITS_PER_UNIT
);
1760 bitnum
-= bit_offset
;
1763 mode
= GET_MODE (op0
);
1764 gcc_assert (SCALAR_INT_MODE_P (mode
));
1766 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1767 for invalid input, such as extract equivalent of f5 from
1768 gcc.dg/pr48335-2.c. */
1770 if (BYTES_BIG_ENDIAN
)
1771 /* BITNUM is the distance between our msb and that of OP0.
1772 Convert it to the distance from the lsb. */
1773 bitnum
= GET_MODE_BITSIZE (mode
) - bitsize
- bitnum
;
1775 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
1776 We have reduced the big-endian case to the little-endian case. */
1782 /* If the field does not already start at the lsb,
1783 shift it so it does. */
1784 /* Maybe propagate the target for the shift. */
1785 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1788 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, bitnum
, subtarget
, 1);
1790 /* Convert the value to the desired mode. */
1792 op0
= convert_to_mode (tmode
, op0
, 1);
1794 /* Unless the msb of the field used to be the msb when we shifted,
1795 mask out the upper bits. */
1797 if (GET_MODE_BITSIZE (mode
) != bitnum
+ bitsize
)
1798 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1799 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1800 target
, 1, OPTAB_LIB_WIDEN
);
1804 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1805 then arithmetic-shift its lsb to the lsb of the word. */
1806 op0
= force_reg (mode
, op0
);
1808 /* Find the narrowest integer mode that contains the field. */
1810 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1811 mode
= GET_MODE_WIDER_MODE (mode
))
1812 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitnum
)
1814 op0
= convert_to_mode (mode
, op0
, 0);
1821 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitnum
))
1823 int amount
= GET_MODE_BITSIZE (mode
) - (bitsize
+ bitnum
);
1824 /* Maybe propagate the target for the shift. */
1825 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1826 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1829 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1830 GET_MODE_BITSIZE (mode
) - bitsize
, target
, 0);
1833 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1837 lshift_value (enum machine_mode mode
, unsigned HOST_WIDE_INT value
,
1840 return immed_wide_int_const (wi::lshift (value
, bitpos
), mode
);
1843 /* Extract a bit field that is split across two words
1844 and return an RTX for the result.
1846 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1847 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1848 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1851 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1852 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
1855 unsigned int bitsdone
= 0;
1856 rtx result
= NULL_RTX
;
1859 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1861 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1862 unit
= BITS_PER_WORD
;
1864 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1866 while (bitsdone
< bitsize
)
1868 unsigned HOST_WIDE_INT thissize
;
1870 unsigned HOST_WIDE_INT thispos
;
1871 unsigned HOST_WIDE_INT offset
;
1873 offset
= (bitpos
+ bitsdone
) / unit
;
1874 thispos
= (bitpos
+ bitsdone
) % unit
;
1876 /* THISSIZE must not overrun a word boundary. Otherwise,
1877 extract_fixed_bit_field will call us again, and we will mutually
1879 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1880 thissize
= MIN (thissize
, unit
- thispos
);
1882 /* If OP0 is a register, then handle OFFSET here.
1884 When handling multiword bitfields, extract_bit_field may pass
1885 down a word_mode SUBREG of a larger REG for a bitfield that actually
1886 crosses a word boundary. Thus, for a SUBREG, we must find
1887 the current word starting from the base register. */
1888 if (GET_CODE (op0
) == SUBREG
)
1890 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1891 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1892 GET_MODE (SUBREG_REG (op0
)));
1895 else if (REG_P (op0
))
1897 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1903 /* Extract the parts in bit-counting order,
1904 whose meaning is determined by BYTES_PER_UNIT.
1905 OFFSET is in UNITs, and UNIT is in bits. */
1906 part
= extract_fixed_bit_field (word_mode
, word
, thissize
,
1907 offset
* unit
+ thispos
, 0, 1);
1908 bitsdone
+= thissize
;
1910 /* Shift this part into place for the result. */
1911 if (BYTES_BIG_ENDIAN
)
1913 if (bitsize
!= bitsdone
)
1914 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1915 bitsize
- bitsdone
, 0, 1);
1919 if (bitsdone
!= thissize
)
1920 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1921 bitsdone
- thissize
, 0, 1);
1927 /* Combine the parts with bitwise or. This works
1928 because we extracted each part as an unsigned bit field. */
1929 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
1935 /* Unsigned bit field: we are done. */
1938 /* Signed bit field: sign-extend with two arithmetic shifts. */
1939 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
1940 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
1941 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
1942 BITS_PER_WORD
- bitsize
, NULL_RTX
, 0);
1945 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1946 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1947 MODE, fill the upper bits with zeros. Fail if the layout of either
1948 mode is unknown (as for CC modes) or if the extraction would involve
1949 unprofitable mode punning. Return the value on success, otherwise
1952 This is different from gen_lowpart* in these respects:
1954 - the returned value must always be considered an rvalue
1956 - when MODE is wider than SRC_MODE, the extraction involves
1959 - when MODE is smaller than SRC_MODE, the extraction involves
1960 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
1962 In other words, this routine performs a computation, whereas the
1963 gen_lowpart* routines are conceptually lvalue or rvalue subreg
1967 extract_low_bits (enum machine_mode mode
, enum machine_mode src_mode
, rtx src
)
1969 enum machine_mode int_mode
, src_int_mode
;
1971 if (mode
== src_mode
)
1974 if (CONSTANT_P (src
))
1976 /* simplify_gen_subreg can't be used here, as if simplify_subreg
1977 fails, it will happily create (subreg (symbol_ref)) or similar
1979 unsigned int byte
= subreg_lowpart_offset (mode
, src_mode
);
1980 rtx ret
= simplify_subreg (mode
, src
, src_mode
, byte
);
1984 if (GET_MODE (src
) == VOIDmode
1985 || !validate_subreg (mode
, src_mode
, src
, byte
))
1988 src
= force_reg (GET_MODE (src
), src
);
1989 return gen_rtx_SUBREG (mode
, src
, byte
);
1992 if (GET_MODE_CLASS (mode
) == MODE_CC
|| GET_MODE_CLASS (src_mode
) == MODE_CC
)
1995 if (GET_MODE_BITSIZE (mode
) == GET_MODE_BITSIZE (src_mode
)
1996 && MODES_TIEABLE_P (mode
, src_mode
))
1998 rtx x
= gen_lowpart_common (mode
, src
);
2003 src_int_mode
= int_mode_for_mode (src_mode
);
2004 int_mode
= int_mode_for_mode (mode
);
2005 if (src_int_mode
== BLKmode
|| int_mode
== BLKmode
)
2008 if (!MODES_TIEABLE_P (src_int_mode
, src_mode
))
2010 if (!MODES_TIEABLE_P (int_mode
, mode
))
2013 src
= gen_lowpart (src_int_mode
, src
);
2014 src
= convert_modes (int_mode
, src_int_mode
, src
, true);
2015 src
= gen_lowpart (mode
, src
);
2019 /* Add INC into TARGET. */
2022 expand_inc (rtx target
, rtx inc
)
2024 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2026 target
, 0, OPTAB_LIB_WIDEN
);
2027 if (value
!= target
)
2028 emit_move_insn (target
, value
);
2031 /* Subtract DEC from TARGET. */
2034 expand_dec (rtx target
, rtx dec
)
2036 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2038 target
, 0, OPTAB_LIB_WIDEN
);
2039 if (value
!= target
)
2040 emit_move_insn (target
, value
);
2043 /* Output a shift instruction for expression code CODE,
2044 with SHIFTED being the rtx for the value to shift,
2045 and AMOUNT the rtx for the amount to shift by.
2046 Store the result in the rtx TARGET, if that is convenient.
2047 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2048 Return the rtx for where the value is. */
2051 expand_shift_1 (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2052 rtx amount
, rtx target
, int unsignedp
)
2055 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2056 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2057 optab lshift_optab
= ashl_optab
;
2058 optab rshift_arith_optab
= ashr_optab
;
2059 optab rshift_uns_optab
= lshr_optab
;
2060 optab lrotate_optab
= rotl_optab
;
2061 optab rrotate_optab
= rotr_optab
;
2062 enum machine_mode op1_mode
;
2064 bool speed
= optimize_insn_for_speed_p ();
2067 op1_mode
= GET_MODE (op1
);
2069 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2070 shift amount is a vector, use the vector/vector shift patterns. */
2071 if (VECTOR_MODE_P (mode
) && VECTOR_MODE_P (op1_mode
))
2073 lshift_optab
= vashl_optab
;
2074 rshift_arith_optab
= vashr_optab
;
2075 rshift_uns_optab
= vlshr_optab
;
2076 lrotate_optab
= vrotl_optab
;
2077 rrotate_optab
= vrotr_optab
;
2080 /* Previously detected shift-counts computed by NEGATE_EXPR
2081 and shifted in the other direction; but that does not work
2084 if (SHIFT_COUNT_TRUNCATED
)
2086 if (CONST_INT_P (op1
)
2087 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2088 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2089 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2090 % GET_MODE_BITSIZE (mode
));
2091 else if (GET_CODE (op1
) == SUBREG
2092 && subreg_lowpart_p (op1
)
2093 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1
)))
2094 && SCALAR_INT_MODE_P (GET_MODE (op1
)))
2095 op1
= SUBREG_REG (op1
);
2098 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2099 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2100 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2103 && CONST_INT_P (op1
)
2104 && IN_RANGE (INTVAL (op1
), GET_MODE_BITSIZE (mode
) / 2 + left
,
2105 GET_MODE_BITSIZE (mode
) - 1))
2107 op1
= GEN_INT (GET_MODE_BITSIZE (mode
) - INTVAL (op1
));
2109 code
= left
? LROTATE_EXPR
: RROTATE_EXPR
;
2112 if (op1
== const0_rtx
)
2115 /* Check whether its cheaper to implement a left shift by a constant
2116 bit count by a sequence of additions. */
2117 if (code
== LSHIFT_EXPR
2118 && CONST_INT_P (op1
)
2120 && INTVAL (op1
) < GET_MODE_PRECISION (mode
)
2121 && INTVAL (op1
) < MAX_BITS_PER_WORD
2122 && (shift_cost (speed
, mode
, INTVAL (op1
))
2123 > INTVAL (op1
) * add_cost (speed
, mode
))
2124 && shift_cost (speed
, mode
, INTVAL (op1
)) != MAX_COST
)
2127 for (i
= 0; i
< INTVAL (op1
); i
++)
2129 temp
= force_reg (mode
, shifted
);
2130 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2131 unsignedp
, OPTAB_LIB_WIDEN
);
2136 for (attempt
= 0; temp
== 0 && attempt
< 3; attempt
++)
2138 enum optab_methods methods
;
2141 methods
= OPTAB_DIRECT
;
2142 else if (attempt
== 1)
2143 methods
= OPTAB_WIDEN
;
2145 methods
= OPTAB_LIB_WIDEN
;
2149 /* Widening does not work for rotation. */
2150 if (methods
== OPTAB_WIDEN
)
2152 else if (methods
== OPTAB_LIB_WIDEN
)
2154 /* If we have been unable to open-code this by a rotation,
2155 do it as the IOR of two shifts. I.e., to rotate A
2157 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2158 where C is the bitsize of A.
2160 It is theoretically possible that the target machine might
2161 not be able to perform either shift and hence we would
2162 be making two libcalls rather than just the one for the
2163 shift (similarly if IOR could not be done). We will allow
2164 this extremely unlikely lossage to avoid complicating the
2167 rtx subtarget
= target
== shifted
? 0 : target
;
2168 rtx new_amount
, other_amount
;
2172 if (op1
== const0_rtx
)
2174 else if (CONST_INT_P (op1
))
2175 other_amount
= GEN_INT (GET_MODE_BITSIZE (mode
)
2180 = simplify_gen_unary (NEG
, GET_MODE (op1
),
2181 op1
, GET_MODE (op1
));
2182 HOST_WIDE_INT mask
= GET_MODE_PRECISION (mode
) - 1;
2184 = simplify_gen_binary (AND
, GET_MODE (op1
), other_amount
,
2185 gen_int_mode (mask
, GET_MODE (op1
)));
2188 shifted
= force_reg (mode
, shifted
);
2190 temp
= expand_shift_1 (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2191 mode
, shifted
, new_amount
, 0, 1);
2192 temp1
= expand_shift_1 (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2193 mode
, shifted
, other_amount
,
2195 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2196 unsignedp
, methods
);
2199 temp
= expand_binop (mode
,
2200 left
? lrotate_optab
: rrotate_optab
,
2201 shifted
, op1
, target
, unsignedp
, methods
);
2204 temp
= expand_binop (mode
,
2205 left
? lshift_optab
: rshift_uns_optab
,
2206 shifted
, op1
, target
, unsignedp
, methods
);
2208 /* Do arithmetic shifts.
2209 Also, if we are going to widen the operand, we can just as well
2210 use an arithmetic right-shift instead of a logical one. */
2211 if (temp
== 0 && ! rotate
2212 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2214 enum optab_methods methods1
= methods
;
2216 /* If trying to widen a log shift to an arithmetic shift,
2217 don't accept an arithmetic shift of the same size. */
2219 methods1
= OPTAB_MUST_WIDEN
;
2221 /* Arithmetic shift */
2223 temp
= expand_binop (mode
,
2224 left
? lshift_optab
: rshift_arith_optab
,
2225 shifted
, op1
, target
, unsignedp
, methods1
);
2228 /* We used to try extzv here for logical right shifts, but that was
2229 only useful for one machine, the VAX, and caused poor code
2230 generation there for lshrdi3, so the code was deleted and a
2231 define_expand for lshrsi3 was added to vax.md. */
2238 /* Output a shift instruction for expression code CODE,
2239 with SHIFTED being the rtx for the value to shift,
2240 and AMOUNT the amount to shift by.
2241 Store the result in the rtx TARGET, if that is convenient.
2242 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2243 Return the rtx for where the value is. */
2246 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2247 int amount
, rtx target
, int unsignedp
)
2249 return expand_shift_1 (code
, mode
,
2250 shifted
, GEN_INT (amount
), target
, unsignedp
);
2253 /* Output a shift instruction for expression code CODE,
2254 with SHIFTED being the rtx for the value to shift,
2255 and AMOUNT the tree for the amount to shift by.
2256 Store the result in the rtx TARGET, if that is convenient.
2257 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2258 Return the rtx for where the value is. */
2261 expand_variable_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2262 tree amount
, rtx target
, int unsignedp
)
2264 return expand_shift_1 (code
, mode
,
2265 shifted
, expand_normal (amount
), target
, unsignedp
);
2269 /* Indicates the type of fixup needed after a constant multiplication.
2270 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2271 the result should be negated, and ADD_VARIANT means that the
2272 multiplicand should be added to the result. */
2273 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2275 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2276 const struct mult_cost
*, enum machine_mode mode
);
2277 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2278 struct algorithm
*, enum mult_variant
*, int);
2279 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2280 const struct algorithm
*, enum mult_variant
);
2281 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2282 static rtx
extract_high_half (enum machine_mode
, rtx
);
2283 static rtx
expmed_mult_highpart (enum machine_mode
, rtx
, rtx
, rtx
, int, int);
2284 static rtx
expmed_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2286 /* Compute and return the best algorithm for multiplying by T.
2287 The algorithm must cost less than cost_limit
2288 If retval.cost >= COST_LIMIT, no algorithm was found and all
2289 other field of the returned struct are undefined.
2290 MODE is the machine mode of the multiplication. */
2293 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2294 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2297 struct algorithm
*alg_in
, *best_alg
;
2298 struct mult_cost best_cost
;
2299 struct mult_cost new_limit
;
2300 int op_cost
, op_latency
;
2301 unsigned HOST_WIDE_INT orig_t
= t
;
2302 unsigned HOST_WIDE_INT q
;
2303 int maxm
, hash_index
;
2304 bool cache_hit
= false;
2305 enum alg_code cache_alg
= alg_zero
;
2306 bool speed
= optimize_insn_for_speed_p ();
2307 enum machine_mode imode
;
2308 struct alg_hash_entry
*entry_ptr
;
2310 /* Indicate that no algorithm is yet found. If no algorithm
2311 is found, this value will be returned and indicate failure. */
2312 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2313 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2315 if (cost_limit
->cost
< 0
2316 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2319 /* Be prepared for vector modes. */
2320 imode
= GET_MODE_INNER (mode
);
2321 if (imode
== VOIDmode
)
2324 maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (imode
));
2326 /* Restrict the bits of "t" to the multiplication's mode. */
2327 t
&= GET_MODE_MASK (imode
);
2329 /* t == 1 can be done in zero cost. */
2333 alg_out
->cost
.cost
= 0;
2334 alg_out
->cost
.latency
= 0;
2335 alg_out
->op
[0] = alg_m
;
2339 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2343 if (MULT_COST_LESS (cost_limit
, zero_cost (speed
)))
2348 alg_out
->cost
.cost
= zero_cost (speed
);
2349 alg_out
->cost
.latency
= zero_cost (speed
);
2350 alg_out
->op
[0] = alg_zero
;
2355 /* We'll be needing a couple extra algorithm structures now. */
2357 alg_in
= XALLOCA (struct algorithm
);
2358 best_alg
= XALLOCA (struct algorithm
);
2359 best_cost
= *cost_limit
;
2361 /* Compute the hash index. */
2362 hash_index
= (t
^ (unsigned int) mode
^ (speed
* 256)) % NUM_ALG_HASH_ENTRIES
;
2364 /* See if we already know what to do for T. */
2365 entry_ptr
= alg_hash_entry_ptr (hash_index
);
2366 if (entry_ptr
->t
== t
2367 && entry_ptr
->mode
== mode
2368 && entry_ptr
->mode
== mode
2369 && entry_ptr
->speed
== speed
2370 && entry_ptr
->alg
!= alg_unknown
)
2372 cache_alg
= entry_ptr
->alg
;
2374 if (cache_alg
== alg_impossible
)
2376 /* The cache tells us that it's impossible to synthesize
2377 multiplication by T within entry_ptr->cost. */
2378 if (!CHEAPER_MULT_COST (&entry_ptr
->cost
, cost_limit
))
2379 /* COST_LIMIT is at least as restrictive as the one
2380 recorded in the hash table, in which case we have no
2381 hope of synthesizing a multiplication. Just
2385 /* If we get here, COST_LIMIT is less restrictive than the
2386 one recorded in the hash table, so we may be able to
2387 synthesize a multiplication. Proceed as if we didn't
2388 have the cache entry. */
2392 if (CHEAPER_MULT_COST (cost_limit
, &entry_ptr
->cost
))
2393 /* The cached algorithm shows that this multiplication
2394 requires more cost than COST_LIMIT. Just return. This
2395 way, we don't clobber this cache entry with
2396 alg_impossible but retain useful information. */
2408 goto do_alg_addsub_t_m2
;
2410 case alg_add_factor
:
2411 case alg_sub_factor
:
2412 goto do_alg_addsub_factor
;
2415 goto do_alg_add_t2_m
;
2418 goto do_alg_sub_t2_m
;
2426 /* If we have a group of zero bits at the low-order part of T, try
2427 multiplying by the remaining bits and then doing a shift. */
2432 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2436 /* The function expand_shift will choose between a shift and
2437 a sequence of additions, so the observed cost is given as
2438 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2439 op_cost
= m
* add_cost (speed
, mode
);
2440 if (shift_cost (speed
, mode
, m
) < op_cost
)
2441 op_cost
= shift_cost (speed
, mode
, m
);
2442 new_limit
.cost
= best_cost
.cost
- op_cost
;
2443 new_limit
.latency
= best_cost
.latency
- op_cost
;
2444 synth_mult (alg_in
, q
, &new_limit
, mode
);
2446 alg_in
->cost
.cost
+= op_cost
;
2447 alg_in
->cost
.latency
+= op_cost
;
2448 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2450 struct algorithm
*x
;
2451 best_cost
= alg_in
->cost
;
2452 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2453 best_alg
->log
[best_alg
->ops
] = m
;
2454 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2457 /* See if treating ORIG_T as a signed number yields a better
2458 sequence. Try this sequence only for a negative ORIG_T
2459 as it would be useless for a non-negative ORIG_T. */
2460 if ((HOST_WIDE_INT
) orig_t
< 0)
2462 /* Shift ORIG_T as follows because a right shift of a
2463 negative-valued signed type is implementation
2465 q
= ~(~orig_t
>> m
);
2466 /* The function expand_shift will choose between a shift
2467 and a sequence of additions, so the observed cost is
2468 given as MIN (m * add_cost(speed, mode),
2469 shift_cost(speed, mode, m)). */
2470 op_cost
= m
* add_cost (speed
, mode
);
2471 if (shift_cost (speed
, mode
, m
) < op_cost
)
2472 op_cost
= shift_cost (speed
, mode
, m
);
2473 new_limit
.cost
= best_cost
.cost
- op_cost
;
2474 new_limit
.latency
= best_cost
.latency
- op_cost
;
2475 synth_mult (alg_in
, q
, &new_limit
, mode
);
2477 alg_in
->cost
.cost
+= op_cost
;
2478 alg_in
->cost
.latency
+= op_cost
;
2479 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2481 struct algorithm
*x
;
2482 best_cost
= alg_in
->cost
;
2483 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2484 best_alg
->log
[best_alg
->ops
] = m
;
2485 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2493 /* If we have an odd number, add or subtract one. */
2496 unsigned HOST_WIDE_INT w
;
2499 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2501 /* If T was -1, then W will be zero after the loop. This is another
2502 case where T ends with ...111. Handling this with (T + 1) and
2503 subtract 1 produces slightly better code and results in algorithm
2504 selection much faster than treating it like the ...0111 case
2508 /* Reject the case where t is 3.
2509 Thus we prefer addition in that case. */
2512 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2514 op_cost
= add_cost (speed
, mode
);
2515 new_limit
.cost
= best_cost
.cost
- op_cost
;
2516 new_limit
.latency
= best_cost
.latency
- op_cost
;
2517 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2519 alg_in
->cost
.cost
+= op_cost
;
2520 alg_in
->cost
.latency
+= op_cost
;
2521 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2523 struct algorithm
*x
;
2524 best_cost
= alg_in
->cost
;
2525 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2526 best_alg
->log
[best_alg
->ops
] = 0;
2527 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2532 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2534 op_cost
= add_cost (speed
, mode
);
2535 new_limit
.cost
= best_cost
.cost
- op_cost
;
2536 new_limit
.latency
= best_cost
.latency
- op_cost
;
2537 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2539 alg_in
->cost
.cost
+= op_cost
;
2540 alg_in
->cost
.latency
+= op_cost
;
2541 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2543 struct algorithm
*x
;
2544 best_cost
= alg_in
->cost
;
2545 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2546 best_alg
->log
[best_alg
->ops
] = 0;
2547 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2551 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2552 quickly with a - a * n for some appropriate constant n. */
2553 m
= exact_log2 (-orig_t
+ 1);
2554 if (m
>= 0 && m
< maxm
)
2556 op_cost
= shiftsub1_cost (speed
, mode
, m
);
2557 new_limit
.cost
= best_cost
.cost
- op_cost
;
2558 new_limit
.latency
= best_cost
.latency
- op_cost
;
2559 synth_mult (alg_in
, (unsigned HOST_WIDE_INT
) (-orig_t
+ 1) >> m
,
2562 alg_in
->cost
.cost
+= op_cost
;
2563 alg_in
->cost
.latency
+= op_cost
;
2564 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2566 struct algorithm
*x
;
2567 best_cost
= alg_in
->cost
;
2568 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2569 best_alg
->log
[best_alg
->ops
] = m
;
2570 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2578 /* Look for factors of t of the form
2579 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2580 If we find such a factor, we can multiply by t using an algorithm that
2581 multiplies by q, shift the result by m and add/subtract it to itself.
2583 We search for large factors first and loop down, even if large factors
2584 are less probable than small; if we find a large factor we will find a
2585 good sequence quickly, and therefore be able to prune (by decreasing
2586 COST_LIMIT) the search. */
2588 do_alg_addsub_factor
:
2589 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2591 unsigned HOST_WIDE_INT d
;
2593 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2594 if (t
% d
== 0 && t
> d
&& m
< maxm
2595 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2597 /* If the target has a cheap shift-and-add instruction use
2598 that in preference to a shift insn followed by an add insn.
2599 Assume that the shift-and-add is "atomic" with a latency
2600 equal to its cost, otherwise assume that on superscalar
2601 hardware the shift may be executed concurrently with the
2602 earlier steps in the algorithm. */
2603 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2604 if (shiftadd_cost (speed
, mode
, m
) < op_cost
)
2606 op_cost
= shiftadd_cost (speed
, mode
, m
);
2607 op_latency
= op_cost
;
2610 op_latency
= add_cost (speed
, mode
);
2612 new_limit
.cost
= best_cost
.cost
- op_cost
;
2613 new_limit
.latency
= best_cost
.latency
- op_latency
;
2614 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2616 alg_in
->cost
.cost
+= op_cost
;
2617 alg_in
->cost
.latency
+= op_latency
;
2618 if (alg_in
->cost
.latency
< op_cost
)
2619 alg_in
->cost
.latency
= op_cost
;
2620 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2622 struct algorithm
*x
;
2623 best_cost
= alg_in
->cost
;
2624 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2625 best_alg
->log
[best_alg
->ops
] = m
;
2626 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2628 /* Other factors will have been taken care of in the recursion. */
2632 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2633 if (t
% d
== 0 && t
> d
&& m
< maxm
2634 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2636 /* If the target has a cheap shift-and-subtract insn use
2637 that in preference to a shift insn followed by a sub insn.
2638 Assume that the shift-and-sub is "atomic" with a latency
2639 equal to it's cost, otherwise assume that on superscalar
2640 hardware the shift may be executed concurrently with the
2641 earlier steps in the algorithm. */
2642 op_cost
= add_cost (speed
, mode
) + shift_cost (speed
, mode
, m
);
2643 if (shiftsub0_cost (speed
, mode
, m
) < op_cost
)
2645 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2646 op_latency
= op_cost
;
2649 op_latency
= add_cost (speed
, mode
);
2651 new_limit
.cost
= best_cost
.cost
- op_cost
;
2652 new_limit
.latency
= best_cost
.latency
- op_latency
;
2653 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2655 alg_in
->cost
.cost
+= op_cost
;
2656 alg_in
->cost
.latency
+= op_latency
;
2657 if (alg_in
->cost
.latency
< op_cost
)
2658 alg_in
->cost
.latency
= op_cost
;
2659 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2661 struct algorithm
*x
;
2662 best_cost
= alg_in
->cost
;
2663 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2664 best_alg
->log
[best_alg
->ops
] = m
;
2665 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2673 /* Try shift-and-add (load effective address) instructions,
2674 i.e. do a*3, a*5, a*9. */
2681 if (m
>= 0 && m
< maxm
)
2683 op_cost
= shiftadd_cost (speed
, mode
, m
);
2684 new_limit
.cost
= best_cost
.cost
- op_cost
;
2685 new_limit
.latency
= best_cost
.latency
- op_cost
;
2686 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2688 alg_in
->cost
.cost
+= op_cost
;
2689 alg_in
->cost
.latency
+= op_cost
;
2690 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2692 struct algorithm
*x
;
2693 best_cost
= alg_in
->cost
;
2694 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2695 best_alg
->log
[best_alg
->ops
] = m
;
2696 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2706 if (m
>= 0 && m
< maxm
)
2708 op_cost
= shiftsub0_cost (speed
, mode
, m
);
2709 new_limit
.cost
= best_cost
.cost
- op_cost
;
2710 new_limit
.latency
= best_cost
.latency
- op_cost
;
2711 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2713 alg_in
->cost
.cost
+= op_cost
;
2714 alg_in
->cost
.latency
+= op_cost
;
2715 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2717 struct algorithm
*x
;
2718 best_cost
= alg_in
->cost
;
2719 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2720 best_alg
->log
[best_alg
->ops
] = m
;
2721 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2729 /* If best_cost has not decreased, we have not found any algorithm. */
2730 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2732 /* We failed to find an algorithm. Record alg_impossible for
2733 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2734 we are asked to find an algorithm for T within the same or
2735 lower COST_LIMIT, we can immediately return to the
2738 entry_ptr
->mode
= mode
;
2739 entry_ptr
->speed
= speed
;
2740 entry_ptr
->alg
= alg_impossible
;
2741 entry_ptr
->cost
= *cost_limit
;
2745 /* Cache the result. */
2749 entry_ptr
->mode
= mode
;
2750 entry_ptr
->speed
= speed
;
2751 entry_ptr
->alg
= best_alg
->op
[best_alg
->ops
];
2752 entry_ptr
->cost
.cost
= best_cost
.cost
;
2753 entry_ptr
->cost
.latency
= best_cost
.latency
;
2756 /* If we are getting a too long sequence for `struct algorithm'
2757 to record, make this search fail. */
2758 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2761 /* Copy the algorithm from temporary space to the space at alg_out.
2762 We avoid using structure assignment because the majority of
2763 best_alg is normally undefined, and this is a critical function. */
2764 alg_out
->ops
= best_alg
->ops
+ 1;
2765 alg_out
->cost
= best_cost
;
2766 memcpy (alg_out
->op
, best_alg
->op
,
2767 alg_out
->ops
* sizeof *alg_out
->op
);
2768 memcpy (alg_out
->log
, best_alg
->log
,
2769 alg_out
->ops
* sizeof *alg_out
->log
);
2772 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2773 Try three variations:
2775 - a shift/add sequence based on VAL itself
2776 - a shift/add sequence based on -VAL, followed by a negation
2777 - a shift/add sequence based on VAL - 1, followed by an addition.
2779 Return true if the cheapest of these cost less than MULT_COST,
2780 describing the algorithm in *ALG and final fixup in *VARIANT. */
2783 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2784 struct algorithm
*alg
, enum mult_variant
*variant
,
2787 struct algorithm alg2
;
2788 struct mult_cost limit
;
2790 bool speed
= optimize_insn_for_speed_p ();
2792 /* Fail quickly for impossible bounds. */
2796 /* Ensure that mult_cost provides a reasonable upper bound.
2797 Any constant multiplication can be performed with less
2798 than 2 * bits additions. */
2799 op_cost
= 2 * GET_MODE_UNIT_BITSIZE (mode
) * add_cost (speed
, mode
);
2800 if (mult_cost
> op_cost
)
2801 mult_cost
= op_cost
;
2803 *variant
= basic_variant
;
2804 limit
.cost
= mult_cost
;
2805 limit
.latency
= mult_cost
;
2806 synth_mult (alg
, val
, &limit
, mode
);
2808 /* This works only if the inverted value actually fits in an
2810 if (HOST_BITS_PER_INT
>= GET_MODE_UNIT_BITSIZE (mode
))
2812 op_cost
= neg_cost (speed
, mode
);
2813 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2815 limit
.cost
= alg
->cost
.cost
- op_cost
;
2816 limit
.latency
= alg
->cost
.latency
- op_cost
;
2820 limit
.cost
= mult_cost
- op_cost
;
2821 limit
.latency
= mult_cost
- op_cost
;
2824 synth_mult (&alg2
, -val
, &limit
, mode
);
2825 alg2
.cost
.cost
+= op_cost
;
2826 alg2
.cost
.latency
+= op_cost
;
2827 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2828 *alg
= alg2
, *variant
= negate_variant
;
2831 /* This proves very useful for division-by-constant. */
2832 op_cost
= add_cost (speed
, mode
);
2833 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2835 limit
.cost
= alg
->cost
.cost
- op_cost
;
2836 limit
.latency
= alg
->cost
.latency
- op_cost
;
2840 limit
.cost
= mult_cost
- op_cost
;
2841 limit
.latency
= mult_cost
- op_cost
;
2844 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2845 alg2
.cost
.cost
+= op_cost
;
2846 alg2
.cost
.latency
+= op_cost
;
2847 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2848 *alg
= alg2
, *variant
= add_variant
;
2850 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2853 /* A subroutine of expand_mult, used for constant multiplications.
2854 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2855 convenient. Use the shift/add sequence described by ALG and apply
2856 the final fixup specified by VARIANT. */
2859 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2860 rtx target
, const struct algorithm
*alg
,
2861 enum mult_variant variant
)
2863 HOST_WIDE_INT val_so_far
;
2864 rtx insn
, accum
, tem
;
2866 enum machine_mode nmode
;
2868 /* Avoid referencing memory over and over and invalid sharing
2870 op0
= force_reg (mode
, op0
);
2872 /* ACCUM starts out either as OP0 or as a zero, depending on
2873 the first operation. */
2875 if (alg
->op
[0] == alg_zero
)
2877 accum
= copy_to_mode_reg (mode
, CONST0_RTX (mode
));
2880 else if (alg
->op
[0] == alg_m
)
2882 accum
= copy_to_mode_reg (mode
, op0
);
2888 for (opno
= 1; opno
< alg
->ops
; opno
++)
2890 int log
= alg
->log
[opno
];
2891 rtx shift_subtarget
= optimize
? 0 : accum
;
2893 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
2896 rtx accum_target
= optimize
? 0 : accum
;
2899 switch (alg
->op
[opno
])
2902 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2903 /* REG_EQUAL note will be attached to the following insn. */
2904 emit_move_insn (accum
, tem
);
2909 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
2910 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2911 add_target
? add_target
: accum_target
);
2912 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2916 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
, log
, NULL_RTX
, 0);
2917 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2918 add_target
? add_target
: accum_target
);
2919 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2923 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2924 log
, shift_subtarget
, 0);
2925 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2926 add_target
? add_target
: accum_target
);
2927 val_so_far
= (val_so_far
<< log
) + 1;
2931 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2932 log
, shift_subtarget
, 0);
2933 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2934 add_target
? add_target
: accum_target
);
2935 val_so_far
= (val_so_far
<< log
) - 1;
2938 case alg_add_factor
:
2939 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2940 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2941 add_target
? add_target
: accum_target
);
2942 val_so_far
+= val_so_far
<< log
;
2945 case alg_sub_factor
:
2946 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
, log
, NULL_RTX
, 0);
2947 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2949 ? add_target
: (optimize
? 0 : tem
)));
2950 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2957 if (SCALAR_INT_MODE_P (mode
))
2959 /* Write a REG_EQUAL note on the last insn so that we can cse
2960 multiplication sequences. Note that if ACCUM is a SUBREG,
2961 we've set the inner register and must properly indicate that. */
2962 tem
= op0
, nmode
= mode
;
2963 accum_inner
= accum
;
2964 if (GET_CODE (accum
) == SUBREG
)
2966 accum_inner
= SUBREG_REG (accum
);
2967 nmode
= GET_MODE (accum_inner
);
2968 tem
= gen_lowpart (nmode
, op0
);
2971 insn
= get_last_insn ();
2972 set_dst_reg_note (insn
, REG_EQUAL
,
2973 gen_rtx_MULT (nmode
, tem
,
2974 gen_int_mode (val_so_far
, nmode
)),
2979 if (variant
== negate_variant
)
2981 val_so_far
= -val_so_far
;
2982 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
2984 else if (variant
== add_variant
)
2986 val_so_far
= val_so_far
+ 1;
2987 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
2990 /* Compare only the bits of val and val_so_far that are significant
2991 in the result mode, to avoid sign-/zero-extension confusion. */
2992 nmode
= GET_MODE_INNER (mode
);
2993 if (nmode
== VOIDmode
)
2995 val
&= GET_MODE_MASK (nmode
);
2996 val_so_far
&= GET_MODE_MASK (nmode
);
2997 gcc_assert (val
== val_so_far
);
3002 /* Perform a multiplication and return an rtx for the result.
3003 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3004 TARGET is a suggestion for where to store the result (an rtx).
3006 We check specially for a constant integer as OP1.
3007 If you want this check for OP0 as well, then before calling
3008 you should swap the two operands if OP0 would be constant. */
3011 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3014 enum mult_variant variant
;
3015 struct algorithm algorithm
;
3018 bool speed
= optimize_insn_for_speed_p ();
3019 bool do_trapv
= flag_trapv
&& SCALAR_INT_MODE_P (mode
) && !unsignedp
;
3021 if (CONSTANT_P (op0
))
3028 /* For vectors, there are several simplifications that can be made if
3029 all elements of the vector constant are identical. */
3031 if (GET_CODE (op1
) == CONST_VECTOR
)
3033 int i
, n
= CONST_VECTOR_NUNITS (op1
);
3034 scalar_op1
= CONST_VECTOR_ELT (op1
, 0);
3035 for (i
= 1; i
< n
; ++i
)
3036 if (!rtx_equal_p (scalar_op1
, CONST_VECTOR_ELT (op1
, i
)))
3040 if (INTEGRAL_MODE_P (mode
))
3043 HOST_WIDE_INT coeff
;
3047 if (op1
== CONST0_RTX (mode
))
3049 if (op1
== CONST1_RTX (mode
))
3051 if (op1
== CONSTM1_RTX (mode
))
3052 return expand_unop (mode
, do_trapv
? negv_optab
: neg_optab
,
3058 /* These are the operations that are potentially turned into
3059 a sequence of shifts and additions. */
3060 mode_bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
3062 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3063 less than or equal in size to `unsigned int' this doesn't matter.
3064 If the mode is larger than `unsigned int', then synth_mult works
3065 only if the constant value exactly fits in an `unsigned int' without
3066 any truncation. This means that multiplying by negative values does
3067 not work; results are off by 2^32 on a 32 bit machine. */
3068 if (CONST_INT_P (scalar_op1
))
3070 coeff
= INTVAL (scalar_op1
);
3073 #if TARGET_SUPPORTS_WIDE_INT
3074 else if (CONST_WIDE_INT_P (scalar_op1
))
3076 else if (CONST_DOUBLE_AS_INT_P (scalar_op1
))
3079 int shift
= wi::exact_log2 (std::make_pair (scalar_op1
, mode
));
3080 /* Perfect power of 2 (other than 1, which is handled above). */
3082 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3083 shift
, target
, unsignedp
);
3090 /* We used to test optimize here, on the grounds that it's better to
3091 produce a smaller program when -O is not used. But this causes
3092 such a terrible slowdown sometimes that it seems better to always
3095 /* Special case powers of two. */
3096 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
)
3097 && !(is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
))
3098 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3099 floor_log2 (coeff
), target
, unsignedp
);
3101 fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3103 /* Attempt to handle multiplication of DImode values by negative
3104 coefficients, by performing the multiplication by a positive
3105 multiplier and then inverting the result. */
3106 if (is_neg
&& mode_bitsize
> HOST_BITS_PER_WIDE_INT
)
3108 /* Its safe to use -coeff even for INT_MIN, as the
3109 result is interpreted as an unsigned coefficient.
3110 Exclude cost of op0 from max_cost to match the cost
3111 calculation of the synth_mult. */
3112 coeff
= -(unsigned HOST_WIDE_INT
) coeff
;
3113 max_cost
= (set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), speed
)
3114 - neg_cost (speed
, mode
));
3118 /* Special case powers of two. */
3119 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3121 rtx temp
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
3122 floor_log2 (coeff
), target
, unsignedp
);
3123 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3126 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3129 rtx temp
= expand_mult_const (mode
, op0
, coeff
, NULL_RTX
,
3130 &algorithm
, variant
);
3131 return expand_unop (mode
, neg_optab
, temp
, target
, 0);
3136 /* Exclude cost of op0 from max_cost to match the cost
3137 calculation of the synth_mult. */
3138 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, op1
), speed
);
3139 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3140 return expand_mult_const (mode
, op0
, coeff
, target
,
3141 &algorithm
, variant
);
3145 /* Expand x*2.0 as x+x. */
3146 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1
))
3149 REAL_VALUE_FROM_CONST_DOUBLE (d
, scalar_op1
);
3151 if (REAL_VALUES_EQUAL (d
, dconst2
))
3153 op0
= force_reg (GET_MODE (op0
), op0
);
3154 return expand_binop (mode
, add_optab
, op0
, op0
,
3155 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3160 /* This used to use umul_optab if unsigned, but for non-widening multiply
3161 there is no difference between signed and unsigned. */
3162 op0
= expand_binop (mode
, do_trapv
? smulv_optab
: smul_optab
,
3163 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3168 /* Return a cost estimate for multiplying a register by the given
3169 COEFFicient in the given MODE and SPEED. */
3172 mult_by_coeff_cost (HOST_WIDE_INT coeff
, enum machine_mode mode
, bool speed
)
3175 struct algorithm algorithm
;
3176 enum mult_variant variant
;
3178 rtx fake_reg
= gen_raw_REG (mode
, LAST_VIRTUAL_REGISTER
+ 1);
3179 max_cost
= set_src_cost (gen_rtx_MULT (mode
, fake_reg
, fake_reg
), speed
);
3180 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
, max_cost
))
3181 return algorithm
.cost
.cost
;
3186 /* Perform a widening multiplication and return an rtx for the result.
3187 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3188 TARGET is a suggestion for where to store the result (an rtx).
3189 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3190 or smul_widen_optab.
3192 We check specially for a constant integer as OP1, comparing the
3193 cost of a widening multiply against the cost of a sequence of shifts
3197 expand_widening_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3198 int unsignedp
, optab this_optab
)
3200 bool speed
= optimize_insn_for_speed_p ();
3203 if (CONST_INT_P (op1
)
3204 && GET_MODE (op0
) != VOIDmode
3205 && (cop1
= convert_modes (mode
, GET_MODE (op0
), op1
,
3206 this_optab
== umul_widen_optab
))
3207 && CONST_INT_P (cop1
)
3208 && (INTVAL (cop1
) >= 0
3209 || HWI_COMPUTABLE_MODE_P (mode
)))
3211 HOST_WIDE_INT coeff
= INTVAL (cop1
);
3213 enum mult_variant variant
;
3214 struct algorithm algorithm
;
3216 /* Special case powers of two. */
3217 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff
))
3219 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3220 return expand_shift (LSHIFT_EXPR
, mode
, op0
,
3221 floor_log2 (coeff
), target
, unsignedp
);
3224 /* Exclude cost of op0 from max_cost to match the cost
3225 calculation of the synth_mult. */
3226 max_cost
= mul_widen_cost (speed
, mode
);
3227 if (choose_mult_variant (mode
, coeff
, &algorithm
, &variant
,
3230 op0
= convert_to_mode (mode
, op0
, this_optab
== umul_widen_optab
);
3231 return expand_mult_const (mode
, op0
, coeff
, target
,
3232 &algorithm
, variant
);
3235 return expand_binop (mode
, this_optab
, op0
, op1
, target
,
3236 unsignedp
, OPTAB_LIB_WIDEN
);
3239 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3240 replace division by D, and put the least significant N bits of the result
3241 in *MULTIPLIER_PTR and return the most significant bit.
3243 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3244 needed precision is in PRECISION (should be <= N).
3246 PRECISION should be as small as possible so this function can choose
3247 multiplier more freely.
3249 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3250 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3252 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3253 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3255 unsigned HOST_WIDE_INT
3256 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3257 unsigned HOST_WIDE_INT
*multiplier_ptr
,
3258 int *post_shift_ptr
, int *lgup_ptr
)
3260 int lgup
, post_shift
;
3263 /* lgup = ceil(log2(divisor)); */
3264 lgup
= ceil_log2 (d
);
3266 gcc_assert (lgup
<= n
);
3269 pow2
= n
+ lgup
- precision
;
3271 /* mlow = 2^(N + lgup)/d */
3272 wide_int val
= wi::set_bit_in_zero (pow
, HOST_BITS_PER_DOUBLE_INT
);
3273 wide_int mlow
= wi::udiv_trunc (val
, d
);
3275 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3276 val
|= wi::set_bit_in_zero (pow2
, HOST_BITS_PER_DOUBLE_INT
);
3277 wide_int mhigh
= wi::udiv_trunc (val
, d
);
3279 /* If precision == N, then mlow, mhigh exceed 2^N
3280 (but they do not exceed 2^(N+1)). */
3282 /* Reduce to lowest terms. */
3283 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3285 unsigned HOST_WIDE_INT ml_lo
= wi::extract_uhwi (mlow
, 1,
3286 HOST_BITS_PER_WIDE_INT
);
3287 unsigned HOST_WIDE_INT mh_lo
= wi::extract_uhwi (mhigh
, 1,
3288 HOST_BITS_PER_WIDE_INT
);
3292 mlow
= wi::uhwi (ml_lo
, HOST_BITS_PER_DOUBLE_INT
);
3293 mhigh
= wi::uhwi (mh_lo
, HOST_BITS_PER_DOUBLE_INT
);
3296 *post_shift_ptr
= post_shift
;
3298 if (n
< HOST_BITS_PER_WIDE_INT
)
3300 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3301 *multiplier_ptr
= mhigh
.to_uhwi () & mask
;
3302 return mhigh
.to_uhwi () >= mask
;
3306 *multiplier_ptr
= mhigh
.to_uhwi ();
3307 return wi::extract_uhwi (mhigh
, HOST_BITS_PER_WIDE_INT
, 1);
3311 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3312 congruent to 1 (mod 2**N). */
3314 static unsigned HOST_WIDE_INT
3315 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3317 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3319 /* The algorithm notes that the choice y = x satisfies
3320 x*y == 1 mod 2^3, since x is assumed odd.
3321 Each iteration doubles the number of bits of significance in y. */
3323 unsigned HOST_WIDE_INT mask
;
3324 unsigned HOST_WIDE_INT y
= x
;
3327 mask
= (n
== HOST_BITS_PER_WIDE_INT
3328 ? ~(unsigned HOST_WIDE_INT
) 0
3329 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3333 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3339 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3340 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3341 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3342 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3345 The result is put in TARGET if that is convenient.
3347 MODE is the mode of operation. */
3350 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3351 rtx op1
, rtx target
, int unsignedp
)
3354 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3356 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3357 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3358 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3360 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3363 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3364 GET_MODE_BITSIZE (mode
) - 1, NULL_RTX
, 0);
3365 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3366 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3372 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3375 extract_high_half (enum machine_mode mode
, rtx op
)
3377 enum machine_mode wider_mode
;
3379 if (mode
== word_mode
)
3380 return gen_highpart (mode
, op
);
3382 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3384 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3385 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3386 GET_MODE_BITSIZE (mode
), 0, 1);
3387 return convert_modes (mode
, wider_mode
, op
, 0);
3390 /* Like expmed_mult_highpart, but only consider using a multiplication
3391 optab. OP1 is an rtx for the constant operand. */
3394 expmed_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3395 rtx target
, int unsignedp
, int max_cost
)
3397 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3398 enum machine_mode wider_mode
;
3402 bool speed
= optimize_insn_for_speed_p ();
3404 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3406 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3407 size
= GET_MODE_BITSIZE (mode
);
3409 /* Firstly, try using a multiplication insn that only generates the needed
3410 high part of the product, and in the sign flavor of unsignedp. */
3411 if (mul_highpart_cost (speed
, mode
) < max_cost
)
3413 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3414 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3415 unsignedp
, OPTAB_DIRECT
);
3420 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3421 Need to adjust the result after the multiplication. */
3422 if (size
- 1 < BITS_PER_WORD
3423 && (mul_highpart_cost (speed
, mode
)
3424 + 2 * shift_cost (speed
, mode
, size
-1)
3425 + 4 * add_cost (speed
, mode
) < max_cost
))
3427 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3428 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3429 unsignedp
, OPTAB_DIRECT
);
3431 /* We used the wrong signedness. Adjust the result. */
3432 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3436 /* Try widening multiplication. */
3437 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3438 if (widening_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3439 && mul_widen_cost (speed
, wider_mode
) < max_cost
)
3441 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3442 unsignedp
, OPTAB_WIDEN
);
3444 return extract_high_half (mode
, tem
);
3447 /* Try widening the mode and perform a non-widening multiplication. */
3448 if (optab_handler (smul_optab
, wider_mode
) != CODE_FOR_nothing
3449 && size
- 1 < BITS_PER_WORD
3450 && (mul_cost (speed
, wider_mode
) + shift_cost (speed
, mode
, size
-1)
3453 rtx insns
, wop0
, wop1
;
3455 /* We need to widen the operands, for example to ensure the
3456 constant multiplier is correctly sign or zero extended.
3457 Use a sequence to clean-up any instructions emitted by
3458 the conversions if things don't work out. */
3460 wop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
3461 wop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
3462 tem
= expand_binop (wider_mode
, smul_optab
, wop0
, wop1
, 0,
3463 unsignedp
, OPTAB_WIDEN
);
3464 insns
= get_insns ();
3470 return extract_high_half (mode
, tem
);
3474 /* Try widening multiplication of opposite signedness, and adjust. */
3475 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3476 if (widening_optab_handler (moptab
, wider_mode
, mode
) != CODE_FOR_nothing
3477 && size
- 1 < BITS_PER_WORD
3478 && (mul_widen_cost (speed
, wider_mode
)
3479 + 2 * shift_cost (speed
, mode
, size
-1)
3480 + 4 * add_cost (speed
, mode
) < max_cost
))
3482 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3483 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3486 tem
= extract_high_half (mode
, tem
);
3487 /* We used the wrong signedness. Adjust the result. */
3488 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3496 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3497 putting the high half of the result in TARGET if that is convenient,
3498 and return where the result is. If the operation can not be performed,
3501 MODE is the mode of operation and result.
3503 UNSIGNEDP nonzero means unsigned multiply.
3505 MAX_COST is the total allowed cost for the expanded RTL. */
3508 expmed_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
3509 rtx target
, int unsignedp
, int max_cost
)
3511 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3512 unsigned HOST_WIDE_INT cnst1
;
3514 bool sign_adjust
= false;
3515 enum mult_variant variant
;
3516 struct algorithm alg
;
3518 bool speed
= optimize_insn_for_speed_p ();
3520 gcc_assert (!SCALAR_FLOAT_MODE_P (mode
));
3521 /* We can't support modes wider than HOST_BITS_PER_INT. */
3522 gcc_assert (HWI_COMPUTABLE_MODE_P (mode
));
3524 cnst1
= INTVAL (op1
) & GET_MODE_MASK (mode
);
3526 /* We can't optimize modes wider than BITS_PER_WORD.
3527 ??? We might be able to perform double-word arithmetic if
3528 mode == word_mode, however all the cost calculations in
3529 synth_mult etc. assume single-word operations. */
3530 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3531 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3532 unsignedp
, max_cost
);
3534 extra_cost
= shift_cost (speed
, mode
, GET_MODE_BITSIZE (mode
) - 1);
3536 /* Check whether we try to multiply by a negative constant. */
3537 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3540 extra_cost
+= add_cost (speed
, mode
);
3543 /* See whether shift/add multiplication is cheap enough. */
3544 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3545 max_cost
- extra_cost
))
3547 /* See whether the specialized multiplication optabs are
3548 cheaper than the shift/add version. */
3549 tem
= expmed_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3550 alg
.cost
.cost
+ extra_cost
);
3554 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3555 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3556 tem
= extract_high_half (mode
, tem
);
3558 /* Adjust result for signedness. */
3560 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3564 return expmed_mult_highpart_optab (mode
, op0
, op1
, target
,
3565 unsignedp
, max_cost
);
3569 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3572 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3574 rtx result
, temp
, shift
, label
;
3576 int prec
= GET_MODE_PRECISION (mode
);
3578 logd
= floor_log2 (d
);
3579 result
= gen_reg_rtx (mode
);
3581 /* Avoid conditional branches when they're expensive. */
3582 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3583 && optimize_insn_for_speed_p ())
3585 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3589 HOST_WIDE_INT masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3590 signmask
= force_reg (mode
, signmask
);
3591 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3593 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3594 which instruction sequence to use. If logical right shifts
3595 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3596 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3598 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3599 if (optab_handler (lshr_optab
, mode
) == CODE_FOR_nothing
3600 || (set_src_cost (temp
, optimize_insn_for_speed_p ())
3601 > COSTS_N_INSNS (2)))
3603 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3604 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3605 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3606 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3607 temp
= expand_binop (mode
, and_optab
, temp
,
3608 gen_int_mode (masklow
, mode
),
3609 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3610 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3611 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3612 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3613 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3617 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3618 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3619 signmask
= force_reg (mode
, signmask
);
3621 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3622 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3623 temp
= expand_binop (mode
, and_optab
, temp
,
3624 gen_int_mode (masklow
, mode
),
3625 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3626 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3627 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3633 /* Mask contains the mode's signbit and the significant bits of the
3634 modulus. By including the signbit in the operation, many targets
3635 can avoid an explicit compare operation in the following comparison
3637 wide_int mask
= wi::mask (logd
, false, prec
);
3638 mask
= wi::set_bit (mask
, prec
- 1);
3640 temp
= expand_binop (mode
, and_optab
, op0
,
3641 immed_wide_int_const (mask
, mode
),
3642 result
, 1, OPTAB_LIB_WIDEN
);
3644 emit_move_insn (result
, temp
);
3646 label
= gen_label_rtx ();
3647 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3649 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3650 0, OPTAB_LIB_WIDEN
);
3652 mask
= wi::mask (logd
, true, prec
);
3653 temp
= expand_binop (mode
, ior_optab
, temp
,
3654 immed_wide_int_const (mask
, mode
),
3655 result
, 1, OPTAB_LIB_WIDEN
);
3656 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3657 0, OPTAB_LIB_WIDEN
);
3659 emit_move_insn (result
, temp
);
3664 /* Expand signed division of OP0 by a power of two D in mode MODE.
3665 This routine is only called for positive values of D. */
3668 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3673 logd
= floor_log2 (d
);
3676 && BRANCH_COST (optimize_insn_for_speed_p (),
3679 temp
= gen_reg_rtx (mode
);
3680 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3681 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3682 0, OPTAB_LIB_WIDEN
);
3683 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3686 #ifdef HAVE_conditional_move
3687 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3693 temp2
= copy_to_mode_reg (mode
, op0
);
3694 temp
= expand_binop (mode
, add_optab
, temp2
, gen_int_mode (d
- 1, mode
),
3695 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3696 temp
= force_reg (mode
, temp
);
3698 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3699 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3700 mode
, temp
, temp2
, mode
, 0);
3703 rtx seq
= get_insns ();
3706 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, logd
, NULL_RTX
, 0);
3712 if (BRANCH_COST (optimize_insn_for_speed_p (),
3715 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3717 temp
= gen_reg_rtx (mode
);
3718 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3719 if (shift_cost (optimize_insn_for_speed_p (), mode
, ushift
)
3720 > COSTS_N_INSNS (1))
3721 temp
= expand_binop (mode
, and_optab
, temp
, gen_int_mode (d
- 1, mode
),
3722 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3724 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3725 ushift
, NULL_RTX
, 1);
3726 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3727 0, OPTAB_LIB_WIDEN
);
3728 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3731 label
= gen_label_rtx ();
3732 temp
= copy_to_mode_reg (mode
, op0
);
3733 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3734 expand_inc (temp
, gen_int_mode (d
- 1, mode
));
3736 return expand_shift (RSHIFT_EXPR
, mode
, temp
, logd
, NULL_RTX
, 0);
3739 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3740 if that is convenient, and returning where the result is.
3741 You may request either the quotient or the remainder as the result;
3742 specify REM_FLAG nonzero to get the remainder.
3744 CODE is the expression code for which kind of division this is;
3745 it controls how rounding is done. MODE is the machine mode to use.
3746 UNSIGNEDP nonzero means do unsigned division. */
3748 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3749 and then correct it by or'ing in missing high bits
3750 if result of ANDI is nonzero.
3751 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3752 This could optimize to a bfexts instruction.
3753 But C doesn't use these operations, so their optimizations are
3755 /* ??? For modulo, we don't actually need the highpart of the first product,
3756 the low part will do nicely. And for small divisors, the second multiply
3757 can also be a low-part only multiply or even be completely left out.
3758 E.g. to calculate the remainder of a division by 3 with a 32 bit
3759 multiply, multiply with 0x55555556 and extract the upper two bits;
3760 the result is exact for inputs up to 0x1fffffff.
3761 The input range can be reduced by using cross-sum rules.
3762 For odd divisors >= 3, the following table gives right shift counts
3763 so that if a number is shifted by an integer multiple of the given
3764 amount, the remainder stays the same:
3765 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3766 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3767 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3768 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3769 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3771 Cross-sum rules for even numbers can be derived by leaving as many bits
3772 to the right alone as the divisor has zeros to the right.
3773 E.g. if x is an unsigned 32 bit number:
3774 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3778 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3779 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3781 enum machine_mode compute_mode
;
3783 rtx quotient
= 0, remainder
= 0;
3787 optab optab1
, optab2
;
3788 int op1_is_constant
, op1_is_pow2
= 0;
3789 int max_cost
, extra_cost
;
3790 static HOST_WIDE_INT last_div_const
= 0;
3791 bool speed
= optimize_insn_for_speed_p ();
3793 op1_is_constant
= CONST_INT_P (op1
);
3794 if (op1_is_constant
)
3796 unsigned HOST_WIDE_INT ext_op1
= UINTVAL (op1
);
3798 ext_op1
&= GET_MODE_MASK (mode
);
3799 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3800 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3804 This is the structure of expand_divmod:
3806 First comes code to fix up the operands so we can perform the operations
3807 correctly and efficiently.
3809 Second comes a switch statement with code specific for each rounding mode.
3810 For some special operands this code emits all RTL for the desired
3811 operation, for other cases, it generates only a quotient and stores it in
3812 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3813 to indicate that it has not done anything.
3815 Last comes code that finishes the operation. If QUOTIENT is set and
3816 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3817 QUOTIENT is not set, it is computed using trunc rounding.
3819 We try to generate special code for division and remainder when OP1 is a
3820 constant. If |OP1| = 2**n we can use shifts and some other fast
3821 operations. For other values of OP1, we compute a carefully selected
3822 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3825 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3826 half of the product. Different strategies for generating the product are
3827 implemented in expmed_mult_highpart.
3829 If what we actually want is the remainder, we generate that by another
3830 by-constant multiplication and a subtraction. */
3832 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3833 code below will malfunction if we are, so check here and handle
3834 the special case if so. */
3835 if (op1
== const1_rtx
)
3836 return rem_flag
? const0_rtx
: op0
;
3838 /* When dividing by -1, we could get an overflow.
3839 negv_optab can handle overflows. */
3840 if (! unsignedp
&& op1
== constm1_rtx
)
3844 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS (mode
) == MODE_INT
3845 ? negv_optab
: neg_optab
, op0
, target
, 0);
3849 /* Don't use the function value register as a target
3850 since we have to read it as well as write it,
3851 and function-inlining gets confused by this. */
3852 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3853 /* Don't clobber an operand while doing a multi-step calculation. */
3854 || ((rem_flag
|| op1_is_constant
)
3855 && (reg_mentioned_p (target
, op0
)
3856 || (MEM_P (op0
) && MEM_P (target
))))
3857 || reg_mentioned_p (target
, op1
)
3858 || (MEM_P (op1
) && MEM_P (target
))))
3861 /* Get the mode in which to perform this computation. Normally it will
3862 be MODE, but sometimes we can't do the desired operation in MODE.
3863 If so, pick a wider mode in which we can do the operation. Convert
3864 to that mode at the start to avoid repeated conversions.
3866 First see what operations we need. These depend on the expression
3867 we are evaluating. (We assume that divxx3 insns exist under the
3868 same conditions that modxx3 insns and that these insns don't normally
3869 fail. If these assumptions are not correct, we may generate less
3870 efficient code in some cases.)
3872 Then see if we find a mode in which we can open-code that operation
3873 (either a division, modulus, or shift). Finally, check for the smallest
3874 mode for which we can do the operation with a library call. */
3876 /* We might want to refine this now that we have division-by-constant
3877 optimization. Since expmed_mult_highpart tries so many variants, it is
3878 not straightforward to generalize this. Maybe we should make an array
3879 of possible modes in init_expmed? Save this for GCC 2.7. */
3881 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3882 ? (unsignedp
? lshr_optab
: ashr_optab
)
3883 : (unsignedp
? udiv_optab
: sdiv_optab
));
3884 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3886 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3888 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3889 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3890 if (optab_handler (optab1
, compute_mode
) != CODE_FOR_nothing
3891 || optab_handler (optab2
, compute_mode
) != CODE_FOR_nothing
)
3894 if (compute_mode
== VOIDmode
)
3895 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3896 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3897 if (optab_libfunc (optab1
, compute_mode
)
3898 || optab_libfunc (optab2
, compute_mode
))
3901 /* If we still couldn't find a mode, use MODE, but expand_binop will
3903 if (compute_mode
== VOIDmode
)
3904 compute_mode
= mode
;
3906 if (target
&& GET_MODE (target
) == compute_mode
)
3909 tquotient
= gen_reg_rtx (compute_mode
);
3911 size
= GET_MODE_BITSIZE (compute_mode
);
3913 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3914 (mode), and thereby get better code when OP1 is a constant. Do that
3915 later. It will require going over all usages of SIZE below. */
3916 size
= GET_MODE_BITSIZE (mode
);
3919 /* Only deduct something for a REM if the last divide done was
3920 for a different constant. Then set the constant of the last
3922 max_cost
= (unsignedp
3923 ? udiv_cost (speed
, compute_mode
)
3924 : sdiv_cost (speed
, compute_mode
));
3925 if (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3926 && INTVAL (op1
) == last_div_const
))
3927 max_cost
-= (mul_cost (speed
, compute_mode
)
3928 + add_cost (speed
, compute_mode
));
3930 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3932 /* Now convert to the best mode to use. */
3933 if (compute_mode
!= mode
)
3935 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3936 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3938 /* convert_modes may have placed op1 into a register, so we
3939 must recompute the following. */
3940 op1_is_constant
= CONST_INT_P (op1
);
3941 op1_is_pow2
= (op1_is_constant
3942 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3944 && EXACT_POWER_OF_2_OR_ZERO_P (-UINTVAL (op1
))))));
3947 /* If one of the operands is a volatile MEM, copy it into a register. */
3949 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
3950 op0
= force_reg (compute_mode
, op0
);
3951 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
3952 op1
= force_reg (compute_mode
, op1
);
3954 /* If we need the remainder or if OP1 is constant, we need to
3955 put OP0 in a register in case it has any queued subexpressions. */
3956 if (rem_flag
|| op1_is_constant
)
3957 op0
= force_reg (compute_mode
, op0
);
3959 last
= get_last_insn ();
3961 /* Promote floor rounding to trunc rounding for unsigned operations. */
3964 if (code
== FLOOR_DIV_EXPR
)
3965 code
= TRUNC_DIV_EXPR
;
3966 if (code
== FLOOR_MOD_EXPR
)
3967 code
= TRUNC_MOD_EXPR
;
3968 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
3969 code
= TRUNC_DIV_EXPR
;
3972 if (op1
!= const0_rtx
)
3975 case TRUNC_MOD_EXPR
:
3976 case TRUNC_DIV_EXPR
:
3977 if (op1_is_constant
)
3981 unsigned HOST_WIDE_INT mh
, ml
;
3982 int pre_shift
, post_shift
;
3984 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
3985 & GET_MODE_MASK (compute_mode
));
3987 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
3989 pre_shift
= floor_log2 (d
);
3992 unsigned HOST_WIDE_INT mask
3993 = ((unsigned HOST_WIDE_INT
) 1 << pre_shift
) - 1;
3995 = expand_binop (compute_mode
, and_optab
, op0
,
3996 gen_int_mode (mask
, compute_mode
),
4000 return gen_lowpart (mode
, remainder
);
4002 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4003 pre_shift
, tquotient
, 1);
4005 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4007 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
4009 /* Most significant bit of divisor is set; emit an scc
4011 quotient
= emit_store_flag_force (tquotient
, GEU
, op0
, op1
,
4012 compute_mode
, 1, 1);
4016 /* Find a suitable multiplier and right shift count
4017 instead of multiplying with D. */
4019 mh
= choose_multiplier (d
, size
, size
,
4020 &ml
, &post_shift
, &dummy
);
4022 /* If the suggested multiplier is more than SIZE bits,
4023 we can do better for even divisors, using an
4024 initial right shift. */
4025 if (mh
!= 0 && (d
& 1) == 0)
4027 pre_shift
= floor_log2 (d
& -d
);
4028 mh
= choose_multiplier (d
>> pre_shift
, size
,
4030 &ml
, &post_shift
, &dummy
);
4040 if (post_shift
- 1 >= BITS_PER_WORD
)
4044 = (shift_cost (speed
, compute_mode
, post_shift
- 1)
4045 + shift_cost (speed
, compute_mode
, 1)
4046 + 2 * add_cost (speed
, compute_mode
));
4047 t1
= expmed_mult_highpart
4049 gen_int_mode (ml
, compute_mode
),
4050 NULL_RTX
, 1, max_cost
- extra_cost
);
4053 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
4056 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
,
4057 t2
, 1, NULL_RTX
, 1);
4058 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
4061 quotient
= expand_shift
4062 (RSHIFT_EXPR
, compute_mode
, t4
,
4063 post_shift
- 1, tquotient
, 1);
4069 if (pre_shift
>= BITS_PER_WORD
4070 || post_shift
>= BITS_PER_WORD
)
4074 (RSHIFT_EXPR
, compute_mode
, op0
,
4075 pre_shift
, NULL_RTX
, 1);
4077 = (shift_cost (speed
, compute_mode
, pre_shift
)
4078 + shift_cost (speed
, compute_mode
, post_shift
));
4079 t2
= expmed_mult_highpart
4081 gen_int_mode (ml
, compute_mode
),
4082 NULL_RTX
, 1, max_cost
- extra_cost
);
4085 quotient
= expand_shift
4086 (RSHIFT_EXPR
, compute_mode
, t2
,
4087 post_shift
, tquotient
, 1);
4091 else /* Too wide mode to use tricky code */
4094 insn
= get_last_insn ();
4096 set_dst_reg_note (insn
, REG_EQUAL
,
4097 gen_rtx_UDIV (compute_mode
, op0
, op1
),
4100 else /* TRUNC_DIV, signed */
4102 unsigned HOST_WIDE_INT ml
;
4103 int lgup
, post_shift
;
4105 HOST_WIDE_INT d
= INTVAL (op1
);
4106 unsigned HOST_WIDE_INT abs_d
;
4108 /* Since d might be INT_MIN, we have to cast to
4109 unsigned HOST_WIDE_INT before negating to avoid
4110 undefined signed overflow. */
4112 ? (unsigned HOST_WIDE_INT
) d
4113 : - (unsigned HOST_WIDE_INT
) d
);
4115 /* n rem d = n rem -d */
4116 if (rem_flag
&& d
< 0)
4119 op1
= gen_int_mode (abs_d
, compute_mode
);
4125 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4127 else if (HOST_BITS_PER_WIDE_INT
>= size
4128 && abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4130 /* This case is not handled correctly below. */
4131 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4132 compute_mode
, 1, 1);
4136 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4138 ? smod_pow2_cheap (speed
, compute_mode
)
4139 : sdiv_pow2_cheap (speed
, compute_mode
))
4140 /* We assume that cheap metric is true if the
4141 optab has an expander for this mode. */
4142 && ((optab_handler ((rem_flag
? smod_optab
4145 != CODE_FOR_nothing
)
4146 || (optab_handler (sdivmod_optab
,
4148 != CODE_FOR_nothing
)))
4150 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4154 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4156 return gen_lowpart (mode
, remainder
);
4159 if (sdiv_pow2_cheap (speed
, compute_mode
)
4160 && ((optab_handler (sdiv_optab
, compute_mode
)
4161 != CODE_FOR_nothing
)
4162 || (optab_handler (sdivmod_optab
, compute_mode
)
4163 != CODE_FOR_nothing
)))
4164 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4166 gen_int_mode (abs_d
,
4170 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4172 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4173 negate the quotient. */
4176 insn
= get_last_insn ();
4178 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4179 << (HOST_BITS_PER_WIDE_INT
- 1)))
4180 set_dst_reg_note (insn
, REG_EQUAL
,
4181 gen_rtx_DIV (compute_mode
, op0
,
4187 quotient
= expand_unop (compute_mode
, neg_optab
,
4188 quotient
, quotient
, 0);
4191 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4193 choose_multiplier (abs_d
, size
, size
- 1,
4194 &ml
, &post_shift
, &lgup
);
4195 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4199 if (post_shift
>= BITS_PER_WORD
4200 || size
- 1 >= BITS_PER_WORD
)
4203 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4204 + shift_cost (speed
, compute_mode
, size
- 1)
4205 + add_cost (speed
, compute_mode
));
4206 t1
= expmed_mult_highpart
4207 (compute_mode
, op0
, gen_int_mode (ml
, compute_mode
),
4208 NULL_RTX
, 0, max_cost
- extra_cost
);
4212 (RSHIFT_EXPR
, compute_mode
, t1
,
4213 post_shift
, NULL_RTX
, 0);
4215 (RSHIFT_EXPR
, compute_mode
, op0
,
4216 size
- 1, NULL_RTX
, 0);
4219 = force_operand (gen_rtx_MINUS (compute_mode
,
4224 = force_operand (gen_rtx_MINUS (compute_mode
,
4232 if (post_shift
>= BITS_PER_WORD
4233 || size
- 1 >= BITS_PER_WORD
)
4236 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4237 mlr
= gen_int_mode (ml
, compute_mode
);
4238 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4239 + shift_cost (speed
, compute_mode
, size
- 1)
4240 + 2 * add_cost (speed
, compute_mode
));
4241 t1
= expmed_mult_highpart (compute_mode
, op0
, mlr
,
4243 max_cost
- extra_cost
);
4246 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4250 (RSHIFT_EXPR
, compute_mode
, t2
,
4251 post_shift
, NULL_RTX
, 0);
4253 (RSHIFT_EXPR
, compute_mode
, op0
,
4254 size
- 1, NULL_RTX
, 0);
4257 = force_operand (gen_rtx_MINUS (compute_mode
,
4262 = force_operand (gen_rtx_MINUS (compute_mode
,
4267 else /* Too wide mode to use tricky code */
4270 insn
= get_last_insn ();
4272 set_dst_reg_note (insn
, REG_EQUAL
,
4273 gen_rtx_DIV (compute_mode
, op0
, op1
),
4279 delete_insns_since (last
);
4282 case FLOOR_DIV_EXPR
:
4283 case FLOOR_MOD_EXPR
:
4284 /* We will come here only for signed operations. */
4285 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4287 unsigned HOST_WIDE_INT mh
, ml
;
4288 int pre_shift
, lgup
, post_shift
;
4289 HOST_WIDE_INT d
= INTVAL (op1
);
4293 /* We could just as easily deal with negative constants here,
4294 but it does not seem worth the trouble for GCC 2.6. */
4295 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4297 pre_shift
= floor_log2 (d
);
4300 unsigned HOST_WIDE_INT mask
4301 = ((unsigned HOST_WIDE_INT
) 1 << pre_shift
) - 1;
4302 remainder
= expand_binop
4303 (compute_mode
, and_optab
, op0
,
4304 gen_int_mode (mask
, compute_mode
),
4305 remainder
, 0, OPTAB_LIB_WIDEN
);
4307 return gen_lowpart (mode
, remainder
);
4309 quotient
= expand_shift
4310 (RSHIFT_EXPR
, compute_mode
, op0
,
4311 pre_shift
, tquotient
, 0);
4317 mh
= choose_multiplier (d
, size
, size
- 1,
4318 &ml
, &post_shift
, &lgup
);
4321 if (post_shift
< BITS_PER_WORD
4322 && size
- 1 < BITS_PER_WORD
)
4325 (RSHIFT_EXPR
, compute_mode
, op0
,
4326 size
- 1, NULL_RTX
, 0);
4327 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4328 NULL_RTX
, 0, OPTAB_WIDEN
);
4329 extra_cost
= (shift_cost (speed
, compute_mode
, post_shift
)
4330 + shift_cost (speed
, compute_mode
, size
- 1)
4331 + 2 * add_cost (speed
, compute_mode
));
4332 t3
= expmed_mult_highpart
4333 (compute_mode
, t2
, gen_int_mode (ml
, compute_mode
),
4334 NULL_RTX
, 1, max_cost
- extra_cost
);
4338 (RSHIFT_EXPR
, compute_mode
, t3
,
4339 post_shift
, NULL_RTX
, 1);
4340 quotient
= expand_binop (compute_mode
, xor_optab
,
4341 t4
, t1
, tquotient
, 0,
4349 rtx nsign
, t1
, t2
, t3
, t4
;
4350 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4351 op0
, constm1_rtx
), NULL_RTX
);
4352 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4354 nsign
= expand_shift
4355 (RSHIFT_EXPR
, compute_mode
, t2
,
4356 size
- 1, NULL_RTX
, 0);
4357 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4359 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4364 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4366 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4375 delete_insns_since (last
);
4377 /* Try using an instruction that produces both the quotient and
4378 remainder, using truncation. We can easily compensate the quotient
4379 or remainder to get floor rounding, once we have the remainder.
4380 Notice that we compute also the final remainder value here,
4381 and return the result right away. */
4382 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4383 target
= gen_reg_rtx (compute_mode
);
4388 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4389 quotient
= gen_reg_rtx (compute_mode
);
4394 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4395 remainder
= gen_reg_rtx (compute_mode
);
4398 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4399 quotient
, remainder
, 0))
4401 /* This could be computed with a branch-less sequence.
4402 Save that for later. */
4404 rtx label
= gen_label_rtx ();
4405 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4406 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4407 NULL_RTX
, 0, OPTAB_WIDEN
);
4408 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4409 expand_dec (quotient
, const1_rtx
);
4410 expand_inc (remainder
, op1
);
4412 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4415 /* No luck with division elimination or divmod. Have to do it
4416 by conditionally adjusting op0 *and* the result. */
4418 rtx label1
, label2
, label3
, label4
, label5
;
4422 quotient
= gen_reg_rtx (compute_mode
);
4423 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4424 label1
= gen_label_rtx ();
4425 label2
= gen_label_rtx ();
4426 label3
= gen_label_rtx ();
4427 label4
= gen_label_rtx ();
4428 label5
= gen_label_rtx ();
4429 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4430 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4431 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4432 quotient
, 0, OPTAB_LIB_WIDEN
);
4433 if (tem
!= quotient
)
4434 emit_move_insn (quotient
, tem
);
4435 emit_jump_insn (gen_jump (label5
));
4437 emit_label (label1
);
4438 expand_inc (adjusted_op0
, const1_rtx
);
4439 emit_jump_insn (gen_jump (label4
));
4441 emit_label (label2
);
4442 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4443 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4444 quotient
, 0, OPTAB_LIB_WIDEN
);
4445 if (tem
!= quotient
)
4446 emit_move_insn (quotient
, tem
);
4447 emit_jump_insn (gen_jump (label5
));
4449 emit_label (label3
);
4450 expand_dec (adjusted_op0
, const1_rtx
);
4451 emit_label (label4
);
4452 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4453 quotient
, 0, OPTAB_LIB_WIDEN
);
4454 if (tem
!= quotient
)
4455 emit_move_insn (quotient
, tem
);
4456 expand_dec (quotient
, const1_rtx
);
4457 emit_label (label5
);
4465 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4468 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4469 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4470 floor_log2 (d
), tquotient
, 1);
4471 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4472 gen_int_mode (d
- 1, compute_mode
),
4473 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4474 t3
= gen_reg_rtx (compute_mode
);
4475 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4476 compute_mode
, 1, 1);
4480 lab
= gen_label_rtx ();
4481 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4482 expand_inc (t1
, const1_rtx
);
4487 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4493 /* Try using an instruction that produces both the quotient and
4494 remainder, using truncation. We can easily compensate the
4495 quotient or remainder to get ceiling rounding, once we have the
4496 remainder. Notice that we compute also the final remainder
4497 value here, and return the result right away. */
4498 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4499 target
= gen_reg_rtx (compute_mode
);
4503 remainder
= (REG_P (target
)
4504 ? target
: gen_reg_rtx (compute_mode
));
4505 quotient
= gen_reg_rtx (compute_mode
);
4509 quotient
= (REG_P (target
)
4510 ? target
: gen_reg_rtx (compute_mode
));
4511 remainder
= gen_reg_rtx (compute_mode
);
4514 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4517 /* This could be computed with a branch-less sequence.
4518 Save that for later. */
4519 rtx label
= gen_label_rtx ();
4520 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4521 compute_mode
, label
);
4522 expand_inc (quotient
, const1_rtx
);
4523 expand_dec (remainder
, op1
);
4525 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4528 /* No luck with division elimination or divmod. Have to do it
4529 by conditionally adjusting op0 *and* the result. */
4532 rtx adjusted_op0
, tem
;
4534 quotient
= gen_reg_rtx (compute_mode
);
4535 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4536 label1
= gen_label_rtx ();
4537 label2
= gen_label_rtx ();
4538 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4539 compute_mode
, label1
);
4540 emit_move_insn (quotient
, const0_rtx
);
4541 emit_jump_insn (gen_jump (label2
));
4543 emit_label (label1
);
4544 expand_dec (adjusted_op0
, const1_rtx
);
4545 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4546 quotient
, 1, OPTAB_LIB_WIDEN
);
4547 if (tem
!= quotient
)
4548 emit_move_insn (quotient
, tem
);
4549 expand_inc (quotient
, const1_rtx
);
4550 emit_label (label2
);
4555 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4556 && INTVAL (op1
) >= 0)
4558 /* This is extremely similar to the code for the unsigned case
4559 above. For 2.7 we should merge these variants, but for
4560 2.6.1 I don't want to touch the code for unsigned since that
4561 get used in C. The signed case will only be used by other
4565 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4566 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4567 floor_log2 (d
), tquotient
, 0);
4568 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4569 gen_int_mode (d
- 1, compute_mode
),
4570 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4571 t3
= gen_reg_rtx (compute_mode
);
4572 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4573 compute_mode
, 1, 1);
4577 lab
= gen_label_rtx ();
4578 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4579 expand_inc (t1
, const1_rtx
);
4584 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4590 /* Try using an instruction that produces both the quotient and
4591 remainder, using truncation. We can easily compensate the
4592 quotient or remainder to get ceiling rounding, once we have the
4593 remainder. Notice that we compute also the final remainder
4594 value here, and return the result right away. */
4595 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4596 target
= gen_reg_rtx (compute_mode
);
4599 remainder
= (REG_P (target
)
4600 ? target
: gen_reg_rtx (compute_mode
));
4601 quotient
= gen_reg_rtx (compute_mode
);
4605 quotient
= (REG_P (target
)
4606 ? target
: gen_reg_rtx (compute_mode
));
4607 remainder
= gen_reg_rtx (compute_mode
);
4610 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4613 /* This could be computed with a branch-less sequence.
4614 Save that for later. */
4616 rtx label
= gen_label_rtx ();
4617 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4618 compute_mode
, label
);
4619 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4620 NULL_RTX
, 0, OPTAB_WIDEN
);
4621 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4622 expand_inc (quotient
, const1_rtx
);
4623 expand_dec (remainder
, op1
);
4625 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4628 /* No luck with division elimination or divmod. Have to do it
4629 by conditionally adjusting op0 *and* the result. */
4631 rtx label1
, label2
, label3
, label4
, label5
;
4635 quotient
= gen_reg_rtx (compute_mode
);
4636 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4637 label1
= gen_label_rtx ();
4638 label2
= gen_label_rtx ();
4639 label3
= gen_label_rtx ();
4640 label4
= gen_label_rtx ();
4641 label5
= gen_label_rtx ();
4642 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4643 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4644 compute_mode
, label1
);
4645 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4646 quotient
, 0, OPTAB_LIB_WIDEN
);
4647 if (tem
!= quotient
)
4648 emit_move_insn (quotient
, tem
);
4649 emit_jump_insn (gen_jump (label5
));
4651 emit_label (label1
);
4652 expand_dec (adjusted_op0
, const1_rtx
);
4653 emit_jump_insn (gen_jump (label4
));
4655 emit_label (label2
);
4656 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4657 compute_mode
, label3
);
4658 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4659 quotient
, 0, OPTAB_LIB_WIDEN
);
4660 if (tem
!= quotient
)
4661 emit_move_insn (quotient
, tem
);
4662 emit_jump_insn (gen_jump (label5
));
4664 emit_label (label3
);
4665 expand_inc (adjusted_op0
, const1_rtx
);
4666 emit_label (label4
);
4667 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4668 quotient
, 0, OPTAB_LIB_WIDEN
);
4669 if (tem
!= quotient
)
4670 emit_move_insn (quotient
, tem
);
4671 expand_inc (quotient
, const1_rtx
);
4672 emit_label (label5
);
4677 case EXACT_DIV_EXPR
:
4678 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4680 HOST_WIDE_INT d
= INTVAL (op1
);
4681 unsigned HOST_WIDE_INT ml
;
4685 pre_shift
= floor_log2 (d
& -d
);
4686 ml
= invert_mod2n (d
>> pre_shift
, size
);
4687 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4688 pre_shift
, NULL_RTX
, unsignedp
);
4689 quotient
= expand_mult (compute_mode
, t1
,
4690 gen_int_mode (ml
, compute_mode
),
4693 insn
= get_last_insn ();
4694 set_dst_reg_note (insn
, REG_EQUAL
,
4695 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4696 compute_mode
, op0
, op1
),
4701 case ROUND_DIV_EXPR
:
4702 case ROUND_MOD_EXPR
:
4707 label
= gen_label_rtx ();
4708 quotient
= gen_reg_rtx (compute_mode
);
4709 remainder
= gen_reg_rtx (compute_mode
);
4710 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4713 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4714 quotient
, 1, OPTAB_LIB_WIDEN
);
4715 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4716 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4717 remainder
, 1, OPTAB_LIB_WIDEN
);
4719 tem
= plus_constant (compute_mode
, op1
, -1);
4720 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
, 1, NULL_RTX
, 1);
4721 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4722 expand_inc (quotient
, const1_rtx
);
4723 expand_dec (remainder
, op1
);
4728 rtx abs_rem
, abs_op1
, tem
, mask
;
4730 label
= gen_label_rtx ();
4731 quotient
= gen_reg_rtx (compute_mode
);
4732 remainder
= gen_reg_rtx (compute_mode
);
4733 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4736 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4737 quotient
, 0, OPTAB_LIB_WIDEN
);
4738 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4739 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4740 remainder
, 0, OPTAB_LIB_WIDEN
);
4742 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4743 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4744 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4746 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4747 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4748 NULL_RTX
, 0, OPTAB_WIDEN
);
4749 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4750 size
- 1, NULL_RTX
, 0);
4751 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4752 NULL_RTX
, 0, OPTAB_WIDEN
);
4753 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4754 NULL_RTX
, 0, OPTAB_WIDEN
);
4755 expand_inc (quotient
, tem
);
4756 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4757 NULL_RTX
, 0, OPTAB_WIDEN
);
4758 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4759 NULL_RTX
, 0, OPTAB_WIDEN
);
4760 expand_dec (remainder
, tem
);
4763 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4771 if (target
&& GET_MODE (target
) != compute_mode
)
4776 /* Try to produce the remainder without producing the quotient.
4777 If we seem to have a divmod pattern that does not require widening,
4778 don't try widening here. We should really have a WIDEN argument
4779 to expand_twoval_binop, since what we'd really like to do here is
4780 1) try a mod insn in compute_mode
4781 2) try a divmod insn in compute_mode
4782 3) try a div insn in compute_mode and multiply-subtract to get
4784 4) try the same things with widening allowed. */
4786 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4789 ((optab_handler (optab2
, compute_mode
)
4790 != CODE_FOR_nothing
)
4791 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4794 /* No luck there. Can we do remainder and divide at once
4795 without a library call? */
4796 remainder
= gen_reg_rtx (compute_mode
);
4797 if (! expand_twoval_binop ((unsignedp
4801 NULL_RTX
, remainder
, unsignedp
))
4806 return gen_lowpart (mode
, remainder
);
4809 /* Produce the quotient. Try a quotient insn, but not a library call.
4810 If we have a divmod in this mode, use it in preference to widening
4811 the div (for this test we assume it will not fail). Note that optab2
4812 is set to the one of the two optabs that the call below will use. */
4814 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4815 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4817 ((optab_handler (optab2
, compute_mode
)
4818 != CODE_FOR_nothing
)
4819 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4823 /* No luck there. Try a quotient-and-remainder insn,
4824 keeping the quotient alone. */
4825 quotient
= gen_reg_rtx (compute_mode
);
4826 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4828 quotient
, NULL_RTX
, unsignedp
))
4832 /* Still no luck. If we are not computing the remainder,
4833 use a library call for the quotient. */
4834 quotient
= sign_expand_binop (compute_mode
,
4835 udiv_optab
, sdiv_optab
,
4837 unsignedp
, OPTAB_LIB_WIDEN
);
4844 if (target
&& GET_MODE (target
) != compute_mode
)
4849 /* No divide instruction either. Use library for remainder. */
4850 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4852 unsignedp
, OPTAB_LIB_WIDEN
);
4853 /* No remainder function. Try a quotient-and-remainder
4854 function, keeping the remainder. */
4857 remainder
= gen_reg_rtx (compute_mode
);
4858 if (!expand_twoval_binop_libfunc
4859 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4861 NULL_RTX
, remainder
,
4862 unsignedp
? UMOD
: MOD
))
4863 remainder
= NULL_RTX
;
4868 /* We divided. Now finish doing X - Y * (X / Y). */
4869 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4870 NULL_RTX
, unsignedp
);
4871 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4872 remainder
, target
, unsignedp
,
4877 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4880 /* Return a tree node with data type TYPE, describing the value of X.
4881 Usually this is an VAR_DECL, if there is no obvious better choice.
4882 X may be an expression, however we only support those expressions
4883 generated by loop.c. */
4886 make_tree (tree type
, rtx x
)
4890 switch (GET_CODE (x
))
4893 case CONST_WIDE_INT
:
4894 t
= wide_int_to_tree (type
, std::make_pair (x
, TYPE_MODE (type
)));
4898 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (x
) == VOIDmode
)
4899 t
= wide_int_to_tree (type
,
4900 wide_int::from_array (&CONST_DOUBLE_LOW (x
), 2,
4901 HOST_BITS_PER_WIDE_INT
* 2));
4906 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4907 t
= build_real (type
, d
);
4914 int units
= CONST_VECTOR_NUNITS (x
);
4915 tree itype
= TREE_TYPE (type
);
4919 /* Build a tree with vector elements. */
4920 elts
= XALLOCAVEC (tree
, units
);
4921 for (i
= units
- 1; i
>= 0; --i
)
4923 rtx elt
= CONST_VECTOR_ELT (x
, i
);
4924 elts
[i
] = make_tree (itype
, elt
);
4927 return build_vector (type
, elts
);
4931 return fold_build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4932 make_tree (type
, XEXP (x
, 1)));
4935 return fold_build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4936 make_tree (type
, XEXP (x
, 1)));
4939 return fold_build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0)));
4942 return fold_build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4943 make_tree (type
, XEXP (x
, 1)));
4946 return fold_build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4947 make_tree (type
, XEXP (x
, 1)));
4950 t
= unsigned_type_for (type
);
4951 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
4952 make_tree (t
, XEXP (x
, 0)),
4953 make_tree (type
, XEXP (x
, 1))));
4956 t
= signed_type_for (type
);
4957 return fold_convert (type
, build2 (RSHIFT_EXPR
, t
,
4958 make_tree (t
, XEXP (x
, 0)),
4959 make_tree (type
, XEXP (x
, 1))));
4962 if (TREE_CODE (type
) != REAL_TYPE
)
4963 t
= signed_type_for (type
);
4967 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
4968 make_tree (t
, XEXP (x
, 0)),
4969 make_tree (t
, XEXP (x
, 1))));
4971 t
= unsigned_type_for (type
);
4972 return fold_convert (type
, build2 (TRUNC_DIV_EXPR
, t
,
4973 make_tree (t
, XEXP (x
, 0)),
4974 make_tree (t
, XEXP (x
, 1))));
4978 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
4979 GET_CODE (x
) == ZERO_EXTEND
);
4980 return fold_convert (type
, make_tree (t
, XEXP (x
, 0)));
4983 return make_tree (type
, XEXP (x
, 0));
4986 t
= SYMBOL_REF_DECL (x
);
4988 return fold_convert (type
, build_fold_addr_expr (t
));
4989 /* else fall through. */
4992 t
= build_decl (RTL_LOCATION (x
), VAR_DECL
, NULL_TREE
, type
);
4994 /* If TYPE is a POINTER_TYPE, we might need to convert X from
4995 address mode to pointer mode. */
4996 if (POINTER_TYPE_P (type
))
4997 x
= convert_memory_address_addr_space
4998 (TYPE_MODE (type
), x
, TYPE_ADDR_SPACE (TREE_TYPE (type
)));
5000 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5001 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5002 t
->decl_with_rtl
.rtl
= x
;
5008 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5009 and returning TARGET.
5011 If TARGET is 0, a pseudo-register or constant is returned. */
5014 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
5018 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
5019 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
5021 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
5025 else if (tem
!= target
)
5026 emit_move_insn (target
, tem
);
5030 /* Helper function for emit_store_flag. */
5032 emit_cstore (rtx target
, enum insn_code icode
, enum rtx_code code
,
5033 enum machine_mode mode
, enum machine_mode compare_mode
,
5034 int unsignedp
, rtx x
, rtx y
, int normalizep
,
5035 enum machine_mode target_mode
)
5037 struct expand_operand ops
[4];
5038 rtx op0
, last
, comparison
, subtarget
;
5039 enum machine_mode result_mode
= targetm
.cstore_mode (icode
);
5041 last
= get_last_insn ();
5042 x
= prepare_operand (icode
, x
, 2, mode
, compare_mode
, unsignedp
);
5043 y
= prepare_operand (icode
, y
, 3, mode
, compare_mode
, unsignedp
);
5046 delete_insns_since (last
);
5050 if (target_mode
== VOIDmode
)
5051 target_mode
= result_mode
;
5053 target
= gen_reg_rtx (target_mode
);
5055 comparison
= gen_rtx_fmt_ee (code
, result_mode
, x
, y
);
5057 create_output_operand (&ops
[0], optimize
? NULL_RTX
: target
, result_mode
);
5058 create_fixed_operand (&ops
[1], comparison
);
5059 create_fixed_operand (&ops
[2], x
);
5060 create_fixed_operand (&ops
[3], y
);
5061 if (!maybe_expand_insn (icode
, 4, ops
))
5063 delete_insns_since (last
);
5066 subtarget
= ops
[0].value
;
5068 /* If we are converting to a wider mode, first convert to
5069 TARGET_MODE, then normalize. This produces better combining
5070 opportunities on machines that have a SIGN_EXTRACT when we are
5071 testing a single bit. This mostly benefits the 68k.
5073 If STORE_FLAG_VALUE does not have the sign bit set when
5074 interpreted in MODE, we can do this conversion as unsigned, which
5075 is usually more efficient. */
5076 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (result_mode
))
5078 convert_move (target
, subtarget
,
5079 val_signbit_known_clear_p (result_mode
,
5082 result_mode
= target_mode
;
5087 /* If we want to keep subexpressions around, don't reuse our last
5092 /* Now normalize to the proper value in MODE. Sometimes we don't
5093 have to do anything. */
5094 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5096 /* STORE_FLAG_VALUE might be the most negative number, so write
5097 the comparison this way to avoid a compiler-time warning. */
5098 else if (- normalizep
== STORE_FLAG_VALUE
)
5099 op0
= expand_unop (result_mode
, neg_optab
, op0
, subtarget
, 0);
5101 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5102 it hard to use a value of just the sign bit due to ANSI integer
5103 constant typing rules. */
5104 else if (val_signbit_known_set_p (result_mode
, STORE_FLAG_VALUE
))
5105 op0
= expand_shift (RSHIFT_EXPR
, result_mode
, op0
,
5106 GET_MODE_BITSIZE (result_mode
) - 1, subtarget
,
5110 gcc_assert (STORE_FLAG_VALUE
& 1);
5112 op0
= expand_and (result_mode
, op0
, const1_rtx
, subtarget
);
5113 if (normalizep
== -1)
5114 op0
= expand_unop (result_mode
, neg_optab
, op0
, op0
, 0);
5117 /* If we were converting to a smaller mode, do the conversion now. */
5118 if (target_mode
!= result_mode
)
5120 convert_move (target
, op0
, 0);
5128 /* A subroutine of emit_store_flag only including "tricks" that do not
5129 need a recursive call. These are kept separate to avoid infinite
5133 emit_store_flag_1 (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5134 enum machine_mode mode
, int unsignedp
, int normalizep
,
5135 enum machine_mode target_mode
)
5138 enum insn_code icode
;
5139 enum machine_mode compare_mode
;
5140 enum mode_class mclass
;
5141 enum rtx_code scode
;
5145 code
= unsigned_condition (code
);
5146 scode
= swap_condition (code
);
5148 /* If one operand is constant, make it the second one. Only do this
5149 if the other operand is not constant as well. */
5151 if (swap_commutative_operands_p (op0
, op1
))
5156 code
= swap_condition (code
);
5159 if (mode
== VOIDmode
)
5160 mode
= GET_MODE (op0
);
5162 /* For some comparisons with 1 and -1, we can convert this to
5163 comparisons with zero. This will often produce more opportunities for
5164 store-flag insns. */
5169 if (op1
== const1_rtx
)
5170 op1
= const0_rtx
, code
= LE
;
5173 if (op1
== constm1_rtx
)
5174 op1
= const0_rtx
, code
= LT
;
5177 if (op1
== const1_rtx
)
5178 op1
= const0_rtx
, code
= GT
;
5181 if (op1
== constm1_rtx
)
5182 op1
= const0_rtx
, code
= GE
;
5185 if (op1
== const1_rtx
)
5186 op1
= const0_rtx
, code
= NE
;
5189 if (op1
== const1_rtx
)
5190 op1
= const0_rtx
, code
= EQ
;
5196 /* If we are comparing a double-word integer with zero or -1, we can
5197 convert the comparison into one involving a single word. */
5198 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5199 && GET_MODE_CLASS (mode
) == MODE_INT
5200 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5202 if ((code
== EQ
|| code
== NE
)
5203 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5207 /* Do a logical OR or AND of the two words and compare the
5209 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5210 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5211 tem
= expand_binop (word_mode
,
5212 op1
== const0_rtx
? ior_optab
: and_optab
,
5213 op00
, op01
, NULL_RTX
, unsignedp
,
5217 tem
= emit_store_flag (NULL_RTX
, code
, tem
, op1
, word_mode
,
5218 unsignedp
, normalizep
);
5220 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5224 /* If testing the sign bit, can just test on high word. */
5225 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5226 subreg_highpart_offset (word_mode
,
5228 tem
= emit_store_flag (NULL_RTX
, code
, op0h
, op1
, word_mode
,
5229 unsignedp
, normalizep
);
5236 if (target_mode
== VOIDmode
|| GET_MODE (tem
) == target_mode
)
5239 target
= gen_reg_rtx (target_mode
);
5241 convert_move (target
, tem
,
5242 !val_signbit_known_set_p (word_mode
,
5243 (normalizep
? normalizep
5244 : STORE_FLAG_VALUE
)));
5249 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5250 complement of A (for GE) and shifting the sign bit to the low bit. */
5251 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5252 && GET_MODE_CLASS (mode
) == MODE_INT
5253 && (normalizep
|| STORE_FLAG_VALUE
== 1
5254 || val_signbit_p (mode
, STORE_FLAG_VALUE
)))
5261 /* If the result is to be wider than OP0, it is best to convert it
5262 first. If it is to be narrower, it is *incorrect* to convert it
5264 else if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5266 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5270 if (target_mode
!= mode
)
5274 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5275 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5276 ? 0 : subtarget
), 0);
5278 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5279 /* If we are supposed to produce a 0/1 value, we want to do
5280 a logical shift from the sign bit to the low-order bit; for
5281 a -1/0 value, we do an arithmetic shift. */
5282 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5283 GET_MODE_BITSIZE (mode
) - 1,
5284 subtarget
, normalizep
!= -1);
5286 if (mode
!= target_mode
)
5287 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5292 mclass
= GET_MODE_CLASS (mode
);
5293 for (compare_mode
= mode
; compare_mode
!= VOIDmode
;
5294 compare_mode
= GET_MODE_WIDER_MODE (compare_mode
))
5296 enum machine_mode optab_mode
= mclass
== MODE_CC
? CCmode
: compare_mode
;
5297 icode
= optab_handler (cstore_optab
, optab_mode
);
5298 if (icode
!= CODE_FOR_nothing
)
5300 do_pending_stack_adjust ();
5301 tem
= emit_cstore (target
, icode
, code
, mode
, compare_mode
,
5302 unsignedp
, op0
, op1
, normalizep
, target_mode
);
5306 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5308 tem
= emit_cstore (target
, icode
, scode
, mode
, compare_mode
,
5309 unsignedp
, op1
, op0
, normalizep
, target_mode
);
5320 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5321 and storing in TARGET. Normally return TARGET.
5322 Return 0 if that cannot be done.
5324 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5325 it is VOIDmode, they cannot both be CONST_INT.
5327 UNSIGNEDP is for the case where we have to widen the operands
5328 to perform the operation. It says to use zero-extension.
5330 NORMALIZEP is 1 if we should convert the result to be either zero
5331 or one. Normalize is -1 if we should convert the result to be
5332 either zero or -1. If NORMALIZEP is zero, the result will be left
5333 "raw" out of the scc insn. */
5336 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5337 enum machine_mode mode
, int unsignedp
, int normalizep
)
5339 enum machine_mode target_mode
= target
? GET_MODE (target
) : VOIDmode
;
5340 enum rtx_code rcode
;
5342 rtx tem
, last
, trueval
;
5344 /* If we compare constants, we shouldn't use a store-flag operation,
5345 but a constant load. We can get there via the vanilla route that
5346 usually generates a compare-branch sequence, but will in this case
5347 fold the comparison to a constant, and thus elide the branch. */
5348 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
5351 tem
= emit_store_flag_1 (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
,
5356 /* If we reached here, we can't do this with a scc insn, however there
5357 are some comparisons that can be done in other ways. Don't do any
5358 of these cases if branches are very cheap. */
5359 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5362 /* See what we need to return. We can only return a 1, -1, or the
5365 if (normalizep
== 0)
5367 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5368 normalizep
= STORE_FLAG_VALUE
;
5370 else if (val_signbit_p (mode
, STORE_FLAG_VALUE
))
5376 last
= get_last_insn ();
5378 /* If optimizing, use different pseudo registers for each insn, instead
5379 of reusing the same pseudo. This leads to better CSE, but slows
5380 down the compiler, since there are more pseudos */
5381 subtarget
= (!optimize
5382 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5383 trueval
= GEN_INT (normalizep
? normalizep
: STORE_FLAG_VALUE
);
5385 /* For floating-point comparisons, try the reverse comparison or try
5386 changing the "orderedness" of the comparison. */
5387 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5389 enum rtx_code first_code
;
5392 rcode
= reverse_condition_maybe_unordered (code
);
5393 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5394 && (code
== ORDERED
|| code
== UNORDERED
5395 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5396 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5398 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5399 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5401 /* For the reverse comparison, use either an addition or a XOR. */
5403 && rtx_cost (GEN_INT (normalizep
), PLUS
, 1,
5404 optimize_insn_for_speed_p ()) == 0)
5406 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5407 STORE_FLAG_VALUE
, target_mode
);
5409 return expand_binop (target_mode
, add_optab
, tem
,
5410 gen_int_mode (normalizep
, target_mode
),
5411 target
, 0, OPTAB_WIDEN
);
5414 && rtx_cost (trueval
, XOR
, 1,
5415 optimize_insn_for_speed_p ()) == 0)
5417 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5418 normalizep
, target_mode
);
5420 return expand_binop (target_mode
, xor_optab
, tem
, trueval
,
5421 target
, INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5425 delete_insns_since (last
);
5427 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5428 if (code
== ORDERED
|| code
== UNORDERED
)
5431 and_them
= split_comparison (code
, mode
, &first_code
, &code
);
5433 /* If there are no NaNs, the first comparison should always fall through.
5434 Effectively change the comparison to the other one. */
5435 if (!HONOR_NANS (mode
))
5437 gcc_assert (first_code
== (and_them
? ORDERED
: UNORDERED
));
5438 return emit_store_flag_1 (target
, code
, op0
, op1
, mode
, 0, normalizep
,
5442 #ifdef HAVE_conditional_move
5443 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5444 conditional move. */
5445 tem
= emit_store_flag_1 (subtarget
, first_code
, op0
, op1
, mode
, 0,
5446 normalizep
, target_mode
);
5451 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5452 tem
, const0_rtx
, GET_MODE (tem
), 0);
5454 tem
= emit_conditional_move (target
, code
, op0
, op1
, mode
,
5455 trueval
, tem
, GET_MODE (tem
), 0);
5458 delete_insns_since (last
);
5465 /* The remaining tricks only apply to integer comparisons. */
5467 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5470 /* If this is an equality comparison of integers, we can try to exclusive-or
5471 (or subtract) the two operands and use a recursive call to try the
5472 comparison with zero. Don't do any of these cases if branches are
5475 if ((code
== EQ
|| code
== NE
) && op1
!= const0_rtx
)
5477 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5481 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5484 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5485 mode
, unsignedp
, normalizep
);
5489 delete_insns_since (last
);
5492 /* For integer comparisons, try the reverse comparison. However, for
5493 small X and if we'd have anyway to extend, implementing "X != 0"
5494 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5495 rcode
= reverse_condition (code
);
5496 if (can_compare_p (rcode
, mode
, ccp_store_flag
)
5497 && ! (optab_handler (cstore_optab
, mode
) == CODE_FOR_nothing
5499 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
5500 && op1
== const0_rtx
))
5502 int want_add
= ((STORE_FLAG_VALUE
== 1 && normalizep
== -1)
5503 || (STORE_FLAG_VALUE
== -1 && normalizep
== 1));
5505 /* Again, for the reverse comparison, use either an addition or a XOR. */
5507 && rtx_cost (GEN_INT (normalizep
), PLUS
, 1,
5508 optimize_insn_for_speed_p ()) == 0)
5510 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5511 STORE_FLAG_VALUE
, target_mode
);
5513 tem
= expand_binop (target_mode
, add_optab
, tem
,
5514 gen_int_mode (normalizep
, target_mode
),
5515 target
, 0, OPTAB_WIDEN
);
5518 && rtx_cost (trueval
, XOR
, 1,
5519 optimize_insn_for_speed_p ()) == 0)
5521 tem
= emit_store_flag_1 (subtarget
, rcode
, op0
, op1
, mode
, 0,
5522 normalizep
, target_mode
);
5524 tem
= expand_binop (target_mode
, xor_optab
, tem
, trueval
, target
,
5525 INTVAL (trueval
) >= 0, OPTAB_WIDEN
);
5530 delete_insns_since (last
);
5533 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5534 the constant zero. Reject all other comparisons at this point. Only
5535 do LE and GT if branches are expensive since they are expensive on
5536 2-operand machines. */
5538 if (op1
!= const0_rtx
5539 || (code
!= EQ
&& code
!= NE
5540 && (BRANCH_COST (optimize_insn_for_speed_p (),
5541 false) <= 1 || (code
!= LE
&& code
!= GT
))))
5544 /* Try to put the result of the comparison in the sign bit. Assume we can't
5545 do the necessary operation below. */
5549 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5550 the sign bit set. */
5554 /* This is destructive, so SUBTARGET can't be OP0. */
5555 if (rtx_equal_p (subtarget
, op0
))
5558 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5561 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5565 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5566 number of bits in the mode of OP0, minus one. */
5570 if (rtx_equal_p (subtarget
, op0
))
5573 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5574 GET_MODE_BITSIZE (mode
) - 1,
5576 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5580 if (code
== EQ
|| code
== NE
)
5582 /* For EQ or NE, one way to do the comparison is to apply an operation
5583 that converts the operand into a positive number if it is nonzero
5584 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5585 for NE we negate. This puts the result in the sign bit. Then we
5586 normalize with a shift, if needed.
5588 Two operations that can do the above actions are ABS and FFS, so try
5589 them. If that doesn't work, and MODE is smaller than a full word,
5590 we can use zero-extension to the wider mode (an unsigned conversion)
5591 as the operation. */
5593 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5594 that is compensated by the subsequent overflow when subtracting
5597 if (optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)
5598 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5599 else if (optab_handler (ffs_optab
, mode
) != CODE_FOR_nothing
)
5600 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5601 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5603 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5610 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5613 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5616 /* If we couldn't do it that way, for NE we can "or" the two's complement
5617 of the value with itself. For EQ, we take the one's complement of
5618 that "or", which is an extra insn, so we only handle EQ if branches
5623 || BRANCH_COST (optimize_insn_for_speed_p (),
5626 if (rtx_equal_p (subtarget
, op0
))
5629 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5630 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5633 if (tem
&& code
== EQ
)
5634 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5638 if (tem
&& normalizep
)
5639 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5640 GET_MODE_BITSIZE (mode
) - 1,
5641 subtarget
, normalizep
== 1);
5647 else if (GET_MODE (tem
) != target_mode
)
5649 convert_move (target
, tem
, 0);
5652 else if (!subtarget
)
5654 emit_move_insn (target
, tem
);
5659 delete_insns_since (last
);
5664 /* Like emit_store_flag, but always succeeds. */
5667 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5668 enum machine_mode mode
, int unsignedp
, int normalizep
)
5671 rtx trueval
, falseval
;
5673 /* First see if emit_store_flag can do the job. */
5674 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5679 target
= gen_reg_rtx (word_mode
);
5681 /* If this failed, we have to do this with set/compare/jump/set code.
5682 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5683 trueval
= normalizep
? GEN_INT (normalizep
) : const1_rtx
;
5685 && GET_MODE_CLASS (mode
) == MODE_INT
5688 && op1
== const0_rtx
)
5690 label
= gen_label_rtx ();
5691 do_compare_rtx_and_jump (target
, const0_rtx
, EQ
, unsignedp
,
5692 mode
, NULL_RTX
, NULL_RTX
, label
, -1);
5693 emit_move_insn (target
, trueval
);
5699 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5700 target
= gen_reg_rtx (GET_MODE (target
));
5702 /* Jump in the right direction if the target cannot implement CODE
5703 but can jump on its reverse condition. */
5704 falseval
= const0_rtx
;
5705 if (! can_compare_p (code
, mode
, ccp_jump
)
5706 && (! FLOAT_MODE_P (mode
)
5707 || code
== ORDERED
|| code
== UNORDERED
5708 || (! HONOR_NANS (mode
) && (code
== LTGT
|| code
== UNEQ
))
5709 || (! HONOR_SNANS (mode
) && (code
== EQ
|| code
== NE
))))
5711 enum rtx_code rcode
;
5712 if (FLOAT_MODE_P (mode
))
5713 rcode
= reverse_condition_maybe_unordered (code
);
5715 rcode
= reverse_condition (code
);
5717 /* Canonicalize to UNORDERED for the libcall. */
5718 if (can_compare_p (rcode
, mode
, ccp_jump
)
5719 || (code
== ORDERED
&& ! can_compare_p (ORDERED
, mode
, ccp_jump
)))
5722 trueval
= const0_rtx
;
5727 emit_move_insn (target
, trueval
);
5728 label
= gen_label_rtx ();
5729 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5730 NULL_RTX
, label
, -1);
5732 emit_move_insn (target
, falseval
);
5738 /* Perform possibly multi-word comparison and conditional jump to LABEL
5739 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5740 now a thin wrapper around do_compare_rtx_and_jump. */
5743 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5746 int unsignedp
= (op
== LTU
|| op
== LEU
|| op
== GTU
|| op
== GEU
);
5747 do_compare_rtx_and_jump (arg1
, arg2
, op
, unsignedp
, mode
,
5748 NULL_RTX
, NULL_RTX
, label
, -1);