]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expmed.c
rtl.def: Add unordered fp comparisions.
[thirdparty/gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-98, 1999, 2000 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "toplev.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "flags.h"
30 #include "insn-flags.h"
31 #include "insn-codes.h"
32 #include "insn-config.h"
33 #include "expr.h"
34 #include "real.h"
35 #include "recog.h"
36
37 static void store_fixed_bit_field PARAMS ((rtx, int, int, int, rtx, int));
38 static void store_split_bit_field PARAMS ((rtx, int, int, rtx, int));
39 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx, int,
40 int, int, rtx, int, int));
41 static rtx mask_rtx PARAMS ((enum machine_mode, int,
42 int, int));
43 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
44 int, int));
45 static rtx extract_split_bit_field PARAMS ((rtx, int, int, int, int));
46 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
47 enum machine_mode, rtx));
48
49 /* Non-zero means divides or modulus operations are relatively cheap for
50 powers of two, so don't use branches; emit the operation instead.
51 Usually, this will mean that the MD file will emit non-branch
52 sequences. */
53
54 static int sdiv_pow2_cheap, smod_pow2_cheap;
55
56 #ifndef SLOW_UNALIGNED_ACCESS
57 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
58 #endif
59
60 /* For compilers that support multiple targets with different word sizes,
61 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
62 is the H8/300(H) compiler. */
63
64 #ifndef MAX_BITS_PER_WORD
65 #define MAX_BITS_PER_WORD BITS_PER_WORD
66 #endif
67
68 /* Cost of various pieces of RTL. Note that some of these are indexed by
69 shift count and some by mode. */
70 static int add_cost, negate_cost, zero_cost;
71 static int shift_cost[MAX_BITS_PER_WORD];
72 static int shiftadd_cost[MAX_BITS_PER_WORD];
73 static int shiftsub_cost[MAX_BITS_PER_WORD];
74 static int mul_cost[NUM_MACHINE_MODES];
75 static int div_cost[NUM_MACHINE_MODES];
76 static int mul_widen_cost[NUM_MACHINE_MODES];
77 static int mul_highpart_cost[NUM_MACHINE_MODES];
78
79 void
80 init_expmed ()
81 {
82 char *free_point;
83 /* This is "some random pseudo register" for purposes of calling recog
84 to see what insns exist. */
85 rtx reg = gen_rtx_REG (word_mode, 10000);
86 rtx shift_insn, shiftadd_insn, shiftsub_insn;
87 int dummy;
88 int m;
89 enum machine_mode mode, wider_mode;
90
91 start_sequence ();
92
93 /* Since we are on the permanent obstack, we must be sure we save this
94 spot AFTER we call start_sequence, since it will reuse the rtl it
95 makes. */
96 free_point = (char *) oballoc (0);
97
98 reg = gen_rtx_REG (word_mode, 10000);
99
100 zero_cost = rtx_cost (const0_rtx, 0);
101 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
102
103 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
104 gen_rtx_ASHIFT (word_mode, reg,
105 const0_rtx)));
106
107 shiftadd_insn
108 = emit_insn (gen_rtx_SET (VOIDmode, reg,
109 gen_rtx_PLUS (word_mode,
110 gen_rtx_MULT (word_mode,
111 reg, const0_rtx),
112 reg)));
113
114 shiftsub_insn
115 = emit_insn (gen_rtx_SET (VOIDmode, reg,
116 gen_rtx_MINUS (word_mode,
117 gen_rtx_MULT (word_mode,
118 reg, const0_rtx),
119 reg)));
120
121 init_recog ();
122
123 shift_cost[0] = 0;
124 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
125
126 for (m = 1; m < MAX_BITS_PER_WORD; m++)
127 {
128 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
129
130 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
131 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
132 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
133
134 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
135 = GEN_INT ((HOST_WIDE_INT) 1 << m);
136 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
137 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
138
139 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
140 = GEN_INT ((HOST_WIDE_INT) 1 << m);
141 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
142 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
143 }
144
145 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
146
147 sdiv_pow2_cheap
148 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
149 <= 2 * add_cost);
150 smod_pow2_cheap
151 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
152 <= 2 * add_cost);
153
154 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
155 mode != VOIDmode;
156 mode = GET_MODE_WIDER_MODE (mode))
157 {
158 reg = gen_rtx_REG (mode, 10000);
159 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
160 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
161 wider_mode = GET_MODE_WIDER_MODE (mode);
162 if (wider_mode != VOIDmode)
163 {
164 mul_widen_cost[(int) wider_mode]
165 = rtx_cost (gen_rtx_MULT (wider_mode,
166 gen_rtx_ZERO_EXTEND (wider_mode, reg),
167 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
168 SET);
169 mul_highpart_cost[(int) mode]
170 = rtx_cost (gen_rtx_TRUNCATE
171 (mode,
172 gen_rtx_LSHIFTRT (wider_mode,
173 gen_rtx_MULT (wider_mode,
174 gen_rtx_ZERO_EXTEND
175 (wider_mode, reg),
176 gen_rtx_ZERO_EXTEND
177 (wider_mode, reg)),
178 GEN_INT (GET_MODE_BITSIZE (mode)))),
179 SET);
180 }
181 }
182
183 /* Free the objects we just allocated. */
184 end_sequence ();
185 obfree (free_point);
186 }
187
188 /* Return an rtx representing minus the value of X.
189 MODE is the intended mode of the result,
190 useful if X is a CONST_INT. */
191
192 rtx
193 negate_rtx (mode, x)
194 enum machine_mode mode;
195 rtx x;
196 {
197 rtx result = simplify_unary_operation (NEG, mode, x, mode);
198
199 if (result == 0)
200 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
201
202 return result;
203 }
204 \f
205 /* Generate code to store value from rtx VALUE
206 into a bit-field within structure STR_RTX
207 containing BITSIZE bits starting at bit BITNUM.
208 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
209 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
210 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
211
212 /* ??? Note that there are two different ideas here for how
213 to determine the size to count bits within, for a register.
214 One is BITS_PER_WORD, and the other is the size of operand 3
215 of the insv pattern.
216
217 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
218 else, we use the mode of operand 3. */
219
220 rtx
221 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
222 rtx str_rtx;
223 register int bitsize;
224 int bitnum;
225 enum machine_mode fieldmode;
226 rtx value;
227 int align;
228 int total_size;
229 {
230 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
231 register int offset = bitnum / unit;
232 register int bitpos = bitnum % unit;
233 register rtx op0 = str_rtx;
234 #ifdef HAVE_insv
235 int insv_bitsize;
236 enum machine_mode op_mode;
237
238 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
239 if (op_mode == VOIDmode)
240 op_mode = word_mode;
241 insv_bitsize = GET_MODE_BITSIZE (op_mode);
242 #endif
243
244 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
245 abort ();
246
247 /* Discount the part of the structure before the desired byte.
248 We need to know how many bytes are safe to reference after it. */
249 if (total_size >= 0)
250 total_size -= (bitpos / BIGGEST_ALIGNMENT
251 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
252
253 while (GET_CODE (op0) == SUBREG)
254 {
255 /* The following line once was done only if WORDS_BIG_ENDIAN,
256 but I think that is a mistake. WORDS_BIG_ENDIAN is
257 meaningful at a much higher level; when structures are copied
258 between memory and regs, the higher-numbered regs
259 always get higher addresses. */
260 offset += SUBREG_WORD (op0);
261 /* We used to adjust BITPOS here, but now we do the whole adjustment
262 right after the loop. */
263 op0 = SUBREG_REG (op0);
264 }
265
266 /* Make sure we are playing with integral modes. Pun with subregs
267 if we aren't. */
268 {
269 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
270 if (imode != GET_MODE (op0))
271 {
272 if (GET_CODE (op0) == MEM)
273 op0 = change_address (op0, imode, NULL_RTX);
274 else if (imode != BLKmode)
275 op0 = gen_lowpart (imode, op0);
276 else
277 abort ();
278 }
279 }
280
281 /* If OP0 is a register, BITPOS must count within a word.
282 But as we have it, it counts within whatever size OP0 now has.
283 On a bigendian machine, these are not the same, so convert. */
284 if (BYTES_BIG_ENDIAN
285 && GET_CODE (op0) != MEM
286 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
287 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
288
289 value = protect_from_queue (value, 0);
290
291 if (flag_force_mem)
292 value = force_not_mem (value);
293
294 /* Note that the adjustment of BITPOS above has no effect on whether
295 BITPOS is 0 in a REG bigger than a word. */
296 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
297 && (GET_CODE (op0) != MEM
298 || ! SLOW_UNALIGNED_ACCESS (fieldmode, align)
299 || (offset * BITS_PER_UNIT % bitsize == 0
300 && align % GET_MODE_SIZE (fieldmode) == 0))
301 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
302 {
303 /* Storing in a full-word or multi-word field in a register
304 can be done with just SUBREG. */
305 if (GET_MODE (op0) != fieldmode)
306 {
307 if (GET_CODE (op0) == SUBREG)
308 {
309 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
310 || GET_MODE_CLASS (fieldmode) == MODE_INT
311 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
312 op0 = SUBREG_REG (op0);
313 else
314 /* Else we've got some float mode source being extracted into
315 a different float mode destination -- this combination of
316 subregs results in Severe Tire Damage. */
317 abort ();
318 }
319 if (GET_CODE (op0) == REG)
320 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
321 else
322 op0 = change_address (op0, fieldmode,
323 plus_constant (XEXP (op0, 0), offset));
324 }
325 emit_move_insn (op0, value);
326 return value;
327 }
328
329 /* Storing an lsb-aligned field in a register
330 can be done with a movestrict instruction. */
331
332 if (GET_CODE (op0) != MEM
333 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
334 && bitsize == GET_MODE_BITSIZE (fieldmode)
335 && (GET_MODE (op0) == fieldmode
336 || (movstrict_optab->handlers[(int) fieldmode].insn_code
337 != CODE_FOR_nothing)))
338 {
339 /* Get appropriate low part of the value being stored. */
340 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
341 value = gen_lowpart (fieldmode, value);
342 else if (!(GET_CODE (value) == SYMBOL_REF
343 || GET_CODE (value) == LABEL_REF
344 || GET_CODE (value) == CONST))
345 value = convert_to_mode (fieldmode, value, 0);
346
347 if (GET_MODE (op0) == fieldmode)
348 emit_move_insn (op0, value);
349 else
350 {
351 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
352 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
353 value = copy_to_mode_reg (fieldmode, value);
354
355 if (GET_CODE (op0) == SUBREG)
356 {
357 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
358 || GET_MODE_CLASS (fieldmode) == MODE_INT
359 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
360 op0 = SUBREG_REG (op0);
361 else
362 /* Else we've got some float mode source being extracted into
363 a different float mode destination -- this combination of
364 subregs results in Severe Tire Damage. */
365 abort ();
366 }
367
368 emit_insn (GEN_FCN (icode)
369 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
370 }
371 return value;
372 }
373
374 /* Handle fields bigger than a word. */
375
376 if (bitsize > BITS_PER_WORD)
377 {
378 /* Here we transfer the words of the field
379 in the order least significant first.
380 This is because the most significant word is the one which may
381 be less than full.
382 However, only do that if the value is not BLKmode. */
383
384 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
385
386 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
387 int i;
388
389 /* This is the mode we must force value to, so that there will be enough
390 subwords to extract. Note that fieldmode will often (always?) be
391 VOIDmode, because that is what store_field uses to indicate that this
392 is a bit field, but passing VOIDmode to operand_subword_force will
393 result in an abort. */
394 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
395
396 for (i = 0; i < nwords; i++)
397 {
398 /* If I is 0, use the low-order word in both field and target;
399 if I is 1, use the next to lowest word; and so on. */
400 int wordnum = (backwards ? nwords - i - 1 : i);
401 int bit_offset = (backwards
402 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
403 : i * BITS_PER_WORD);
404 store_bit_field (op0, MIN (BITS_PER_WORD,
405 bitsize - i * BITS_PER_WORD),
406 bitnum + bit_offset, word_mode,
407 operand_subword_force (value, wordnum,
408 (GET_MODE (value) == VOIDmode
409 ? fieldmode
410 : GET_MODE (value))),
411 align, total_size);
412 }
413 return value;
414 }
415
416 /* From here on we can assume that the field to be stored in is
417 a full-word (whatever type that is), since it is shorter than a word. */
418
419 /* OFFSET is the number of words or bytes (UNIT says which)
420 from STR_RTX to the first word or byte containing part of the field. */
421
422 if (GET_CODE (op0) != MEM)
423 {
424 if (offset != 0
425 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
426 {
427 if (GET_CODE (op0) != REG)
428 {
429 /* Since this is a destination (lvalue), we can't copy it to a
430 pseudo. We can trivially remove a SUBREG that does not
431 change the size of the operand. Such a SUBREG may have been
432 added above. Otherwise, abort. */
433 if (GET_CODE (op0) == SUBREG
434 && (GET_MODE_SIZE (GET_MODE (op0))
435 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
436 op0 = SUBREG_REG (op0);
437 else
438 abort ();
439 }
440 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
441 op0, offset);
442 }
443 offset = 0;
444 }
445 else
446 {
447 op0 = protect_from_queue (op0, 1);
448 }
449
450 /* If VALUE is a floating-point mode, access it as an integer of the
451 corresponding size. This can occur on a machine with 64 bit registers
452 that uses SFmode for float. This can also occur for unaligned float
453 structure fields. */
454 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
455 {
456 if (GET_CODE (value) != REG)
457 value = copy_to_reg (value);
458 value = gen_rtx_SUBREG (word_mode, value, 0);
459 }
460
461 /* Now OFFSET is nonzero only if OP0 is memory
462 and is therefore always measured in bytes. */
463
464 #ifdef HAVE_insv
465 if (HAVE_insv
466 && GET_MODE (value) != BLKmode
467 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
468 /* Ensure insv's size is wide enough for this field. */
469 && (insv_bitsize >= bitsize)
470 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
471 && (bitsize + bitpos > insv_bitsize)))
472 {
473 int xbitpos = bitpos;
474 rtx value1;
475 rtx xop0 = op0;
476 rtx last = get_last_insn ();
477 rtx pat;
478 enum machine_mode maxmode;
479 int save_volatile_ok = volatile_ok;
480
481 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
482 if (maxmode == VOIDmode)
483 maxmode = word_mode;
484
485 volatile_ok = 1;
486
487 /* If this machine's insv can only insert into a register, copy OP0
488 into a register and save it back later. */
489 /* This used to check flag_force_mem, but that was a serious
490 de-optimization now that flag_force_mem is enabled by -O2. */
491 if (GET_CODE (op0) == MEM
492 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
493 (op0, VOIDmode)))
494 {
495 rtx tempreg;
496 enum machine_mode bestmode;
497
498 /* Get the mode to use for inserting into this field. If OP0 is
499 BLKmode, get the smallest mode consistent with the alignment. If
500 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
501 mode. Otherwise, use the smallest mode containing the field. */
502
503 if (GET_MODE (op0) == BLKmode
504 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
505 bestmode
506 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
507 MEM_VOLATILE_P (op0));
508 else
509 bestmode = GET_MODE (op0);
510
511 if (bestmode == VOIDmode
512 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
513 && GET_MODE_SIZE (bestmode) > align))
514 goto insv_loses;
515
516 /* Adjust address to point to the containing unit of that mode. */
517 unit = GET_MODE_BITSIZE (bestmode);
518 /* Compute offset as multiple of this unit, counting in bytes. */
519 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
520 bitpos = bitnum % unit;
521 op0 = change_address (op0, bestmode,
522 plus_constant (XEXP (op0, 0), offset));
523
524 /* Fetch that unit, store the bitfield in it, then store the unit. */
525 tempreg = copy_to_reg (op0);
526 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
527 align, total_size);
528 emit_move_insn (op0, tempreg);
529 return value;
530 }
531 volatile_ok = save_volatile_ok;
532
533 /* Add OFFSET into OP0's address. */
534 if (GET_CODE (xop0) == MEM)
535 xop0 = change_address (xop0, byte_mode,
536 plus_constant (XEXP (xop0, 0), offset));
537
538 /* If xop0 is a register, we need it in MAXMODE
539 to make it acceptable to the format of insv. */
540 if (GET_CODE (xop0) == SUBREG)
541 /* We can't just change the mode, because this might clobber op0,
542 and we will need the original value of op0 if insv fails. */
543 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
544 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
545 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
546
547 /* On big-endian machines, we count bits from the most significant.
548 If the bit field insn does not, we must invert. */
549
550 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
551 xbitpos = unit - bitsize - xbitpos;
552
553 /* We have been counting XBITPOS within UNIT.
554 Count instead within the size of the register. */
555 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
556 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
557
558 unit = GET_MODE_BITSIZE (maxmode);
559
560 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
561 value1 = value;
562 if (GET_MODE (value) != maxmode)
563 {
564 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
565 {
566 /* Optimization: Don't bother really extending VALUE
567 if it has all the bits we will actually use. However,
568 if we must narrow it, be sure we do it correctly. */
569
570 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
571 {
572 /* Avoid making subreg of a subreg, or of a mem. */
573 if (GET_CODE (value1) != REG)
574 value1 = copy_to_reg (value1);
575 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
576 }
577 else
578 value1 = gen_lowpart (maxmode, value1);
579 }
580 else if (!CONSTANT_P (value))
581 /* Parse phase is supposed to make VALUE's data type
582 match that of the component reference, which is a type
583 at least as wide as the field; so VALUE should have
584 a mode that corresponds to that type. */
585 abort ();
586 }
587
588 /* If this machine's insv insists on a register,
589 get VALUE1 into a register. */
590 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
591 (value1, maxmode)))
592 value1 = force_reg (maxmode, value1);
593
594 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
595 if (pat)
596 emit_insn (pat);
597 else
598 {
599 delete_insns_since (last);
600 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
601 }
602 }
603 else
604 insv_loses:
605 #endif
606 /* Insv is not available; store using shifts and boolean ops. */
607 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
608 return value;
609 }
610 \f
611 /* Use shifts and boolean operations to store VALUE
612 into a bit field of width BITSIZE
613 in a memory location specified by OP0 except offset by OFFSET bytes.
614 (OFFSET must be 0 if OP0 is a register.)
615 The field starts at position BITPOS within the byte.
616 (If OP0 is a register, it may be a full word or a narrower mode,
617 but BITPOS still counts within a full word,
618 which is significant on bigendian machines.)
619 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
620
621 Note that protect_from_queue has already been done on OP0 and VALUE. */
622
623 static void
624 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
625 register rtx op0;
626 register int offset, bitsize, bitpos;
627 register rtx value;
628 int struct_align;
629 {
630 register enum machine_mode mode;
631 int total_bits = BITS_PER_WORD;
632 rtx subtarget, temp;
633 int all_zero = 0;
634 int all_one = 0;
635
636 if (! SLOW_UNALIGNED_ACCESS (word_mode, struct_align))
637 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
638
639 /* There is a case not handled here:
640 a structure with a known alignment of just a halfword
641 and a field split across two aligned halfwords within the structure.
642 Or likewise a structure with a known alignment of just a byte
643 and a field split across two bytes.
644 Such cases are not supposed to be able to occur. */
645
646 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
647 {
648 if (offset != 0)
649 abort ();
650 /* Special treatment for a bit field split across two registers. */
651 if (bitsize + bitpos > BITS_PER_WORD)
652 {
653 store_split_bit_field (op0, bitsize, bitpos,
654 value, BITS_PER_WORD);
655 return;
656 }
657 }
658 else
659 {
660 /* Get the proper mode to use for this field. We want a mode that
661 includes the entire field. If such a mode would be larger than
662 a word, we won't be doing the extraction the normal way. */
663
664 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
665 struct_align * BITS_PER_UNIT, word_mode,
666 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
667
668 if (mode == VOIDmode)
669 {
670 /* The only way this should occur is if the field spans word
671 boundaries. */
672 store_split_bit_field (op0,
673 bitsize, bitpos + offset * BITS_PER_UNIT,
674 value, struct_align);
675 return;
676 }
677
678 total_bits = GET_MODE_BITSIZE (mode);
679
680 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
681 be in the range 0 to total_bits-1, and put any excess bytes in
682 OFFSET. */
683 if (bitpos >= total_bits)
684 {
685 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
686 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
687 * BITS_PER_UNIT);
688 }
689
690 /* Get ref to an aligned byte, halfword, or word containing the field.
691 Adjust BITPOS to be position within a word,
692 and OFFSET to be the offset of that word.
693 Then alter OP0 to refer to that word. */
694 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
695 offset -= (offset % (total_bits / BITS_PER_UNIT));
696 op0 = change_address (op0, mode,
697 plus_constant (XEXP (op0, 0), offset));
698 }
699
700 mode = GET_MODE (op0);
701
702 /* Now MODE is either some integral mode for a MEM as OP0,
703 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
704 The bit field is contained entirely within OP0.
705 BITPOS is the starting bit number within OP0.
706 (OP0's mode may actually be narrower than MODE.) */
707
708 if (BYTES_BIG_ENDIAN)
709 /* BITPOS is the distance between our msb
710 and that of the containing datum.
711 Convert it to the distance from the lsb. */
712 bitpos = total_bits - bitsize - bitpos;
713
714 /* Now BITPOS is always the distance between our lsb
715 and that of OP0. */
716
717 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
718 we must first convert its mode to MODE. */
719
720 if (GET_CODE (value) == CONST_INT)
721 {
722 register HOST_WIDE_INT v = INTVAL (value);
723
724 if (bitsize < HOST_BITS_PER_WIDE_INT)
725 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
726
727 if (v == 0)
728 all_zero = 1;
729 else if ((bitsize < HOST_BITS_PER_WIDE_INT
730 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
731 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
732 all_one = 1;
733
734 value = lshift_value (mode, value, bitpos, bitsize);
735 }
736 else
737 {
738 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
739 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
740
741 if (GET_MODE (value) != mode)
742 {
743 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
744 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
745 value = gen_lowpart (mode, value);
746 else
747 value = convert_to_mode (mode, value, 1);
748 }
749
750 if (must_and)
751 value = expand_binop (mode, and_optab, value,
752 mask_rtx (mode, 0, bitsize, 0),
753 NULL_RTX, 1, OPTAB_LIB_WIDEN);
754 if (bitpos > 0)
755 value = expand_shift (LSHIFT_EXPR, mode, value,
756 build_int_2 (bitpos, 0), NULL_RTX, 1);
757 }
758
759 /* Now clear the chosen bits in OP0,
760 except that if VALUE is -1 we need not bother. */
761
762 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
763
764 if (! all_one)
765 {
766 temp = expand_binop (mode, and_optab, op0,
767 mask_rtx (mode, bitpos, bitsize, 1),
768 subtarget, 1, OPTAB_LIB_WIDEN);
769 subtarget = temp;
770 }
771 else
772 temp = op0;
773
774 /* Now logical-or VALUE into OP0, unless it is zero. */
775
776 if (! all_zero)
777 temp = expand_binop (mode, ior_optab, temp, value,
778 subtarget, 1, OPTAB_LIB_WIDEN);
779 if (op0 != temp)
780 emit_move_insn (op0, temp);
781 }
782 \f
783 /* Store a bit field that is split across multiple accessible memory objects.
784
785 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
786 BITSIZE is the field width; BITPOS the position of its first bit
787 (within the word).
788 VALUE is the value to store.
789 ALIGN is the known alignment of OP0, measured in bytes.
790 This is also the size of the memory objects to be used.
791
792 This does not yet handle fields wider than BITS_PER_WORD. */
793
794 static void
795 store_split_bit_field (op0, bitsize, bitpos, value, align)
796 rtx op0;
797 int bitsize, bitpos;
798 rtx value;
799 int align;
800 {
801 int unit;
802 int bitsdone = 0;
803
804 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
805 much at a time. */
806 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
807 unit = BITS_PER_WORD;
808 else
809 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
810
811 /* If VALUE is a constant other than a CONST_INT, get it into a register in
812 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
813 that VALUE might be a floating-point constant. */
814 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
815 {
816 rtx word = gen_lowpart_common (word_mode, value);
817
818 if (word && (value != word))
819 value = word;
820 else
821 value = gen_lowpart_common (word_mode,
822 force_reg (GET_MODE (value) != VOIDmode
823 ? GET_MODE (value)
824 : word_mode, value));
825 }
826 else if (GET_CODE (value) == ADDRESSOF)
827 value = copy_to_reg (value);
828
829 while (bitsdone < bitsize)
830 {
831 int thissize;
832 rtx part, word;
833 int thispos;
834 int offset;
835
836 offset = (bitpos + bitsdone) / unit;
837 thispos = (bitpos + bitsdone) % unit;
838
839 /* THISSIZE must not overrun a word boundary. Otherwise,
840 store_fixed_bit_field will call us again, and we will mutually
841 recurse forever. */
842 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
843 thissize = MIN (thissize, unit - thispos);
844
845 if (BYTES_BIG_ENDIAN)
846 {
847 int total_bits;
848
849 /* We must do an endian conversion exactly the same way as it is
850 done in extract_bit_field, so that the two calls to
851 extract_fixed_bit_field will have comparable arguments. */
852 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
853 total_bits = BITS_PER_WORD;
854 else
855 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
856
857 /* Fetch successively less significant portions. */
858 if (GET_CODE (value) == CONST_INT)
859 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
860 >> (bitsize - bitsdone - thissize))
861 & (((HOST_WIDE_INT) 1 << thissize) - 1));
862 else
863 /* The args are chosen so that the last part includes the
864 lsb. Give extract_bit_field the value it needs (with
865 endianness compensation) to fetch the piece we want.
866
867 ??? We have no idea what the alignment of VALUE is, so
868 we have to use a guess. */
869 part
870 = extract_fixed_bit_field
871 (word_mode, value, 0, thissize,
872 total_bits - bitsize + bitsdone, NULL_RTX, 1,
873 GET_MODE (value) == VOIDmode
874 ? UNITS_PER_WORD
875 : (GET_MODE (value) == BLKmode
876 ? 1
877 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
878 }
879 else
880 {
881 /* Fetch successively more significant portions. */
882 if (GET_CODE (value) == CONST_INT)
883 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
884 >> bitsdone)
885 & (((HOST_WIDE_INT) 1 << thissize) - 1));
886 else
887 part
888 = extract_fixed_bit_field
889 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
890 GET_MODE (value) == VOIDmode
891 ? UNITS_PER_WORD
892 : (GET_MODE (value) == BLKmode
893 ? 1
894 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
895 }
896
897 /* If OP0 is a register, then handle OFFSET here.
898
899 When handling multiword bitfields, extract_bit_field may pass
900 down a word_mode SUBREG of a larger REG for a bitfield that actually
901 crosses a word boundary. Thus, for a SUBREG, we must find
902 the current word starting from the base register. */
903 if (GET_CODE (op0) == SUBREG)
904 {
905 word = operand_subword_force (SUBREG_REG (op0),
906 SUBREG_WORD (op0) + offset,
907 GET_MODE (SUBREG_REG (op0)));
908 offset = 0;
909 }
910 else if (GET_CODE (op0) == REG)
911 {
912 word = operand_subword_force (op0, offset, GET_MODE (op0));
913 offset = 0;
914 }
915 else
916 word = op0;
917
918 /* OFFSET is in UNITs, and UNIT is in bits.
919 store_fixed_bit_field wants offset in bytes. */
920 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
921 thissize, thispos, part, align);
922 bitsdone += thissize;
923 }
924 }
925 \f
926 /* Generate code to extract a byte-field from STR_RTX
927 containing BITSIZE bits, starting at BITNUM,
928 and put it in TARGET if possible (if TARGET is nonzero).
929 Regardless of TARGET, we return the rtx for where the value is placed.
930 It may be a QUEUED.
931
932 STR_RTX is the structure containing the byte (a REG or MEM).
933 UNSIGNEDP is nonzero if this is an unsigned bit field.
934 MODE is the natural mode of the field value once extracted.
935 TMODE is the mode the caller would like the value to have;
936 but the value may be returned with type MODE instead.
937
938 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
939 TOTAL_SIZE is the size in bytes of the containing structure,
940 or -1 if varying.
941
942 If a TARGET is specified and we can store in it at no extra cost,
943 we do so, and return TARGET.
944 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
945 if they are equally easy. */
946
947 rtx
948 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
949 target, mode, tmode, align, total_size)
950 rtx str_rtx;
951 register int bitsize;
952 int bitnum;
953 int unsignedp;
954 rtx target;
955 enum machine_mode mode, tmode;
956 int align;
957 int total_size;
958 {
959 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
960 register int offset = bitnum / unit;
961 register int bitpos = bitnum % unit;
962 register rtx op0 = str_rtx;
963 rtx spec_target = target;
964 rtx spec_target_subreg = 0;
965 enum machine_mode int_mode;
966 #ifdef HAVE_extv
967 int extv_bitsize;
968 enum machine_mode extv_mode;
969 #endif
970 #ifdef HAVE_extzv
971 int extzv_bitsize;
972 enum machine_mode extzv_mode;
973 #endif
974
975 #ifdef HAVE_extv
976 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
977 if (extv_mode == VOIDmode)
978 extv_mode = word_mode;
979 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
980 #endif
981
982 #ifdef HAVE_extzv
983 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
984 if (extzv_mode == VOIDmode)
985 extzv_mode = word_mode;
986 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
987 #endif
988
989 /* Discount the part of the structure before the desired byte.
990 We need to know how many bytes are safe to reference after it. */
991 if (total_size >= 0)
992 total_size -= (bitpos / BIGGEST_ALIGNMENT
993 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
994
995 if (tmode == VOIDmode)
996 tmode = mode;
997 while (GET_CODE (op0) == SUBREG)
998 {
999 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1000 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1001
1002 offset += SUBREG_WORD (op0);
1003
1004 inner_size = MIN (inner_size, BITS_PER_WORD);
1005
1006 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1007 {
1008 bitpos += inner_size - outer_size;
1009 if (bitpos > unit)
1010 {
1011 offset += (bitpos / unit);
1012 bitpos %= unit;
1013 }
1014 }
1015
1016 op0 = SUBREG_REG (op0);
1017 }
1018
1019 /* Make sure we are playing with integral modes. Pun with subregs
1020 if we aren't. */
1021 {
1022 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1023 if (imode != GET_MODE (op0))
1024 {
1025 if (GET_CODE (op0) == MEM)
1026 op0 = change_address (op0, imode, NULL_RTX);
1027 else if (imode != BLKmode)
1028 op0 = gen_lowpart (imode, op0);
1029 else
1030 abort ();
1031 }
1032 }
1033
1034 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1035 If that's wrong, the solution is to test for it and set TARGET to 0
1036 if needed. */
1037
1038 /* If OP0 is a register, BITPOS must count within a word.
1039 But as we have it, it counts within whatever size OP0 now has.
1040 On a bigendian machine, these are not the same, so convert. */
1041 if (BYTES_BIG_ENDIAN
1042 && GET_CODE (op0) != MEM
1043 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1044 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1045
1046 /* Extracting a full-word or multi-word value
1047 from a structure in a register or aligned memory.
1048 This can be done with just SUBREG.
1049 So too extracting a subword value in
1050 the least significant part of the register. */
1051
1052 if (((GET_CODE (op0) != MEM
1053 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1054 GET_MODE_BITSIZE (GET_MODE (op0))))
1055 || (GET_CODE (op0) == MEM
1056 && (! SLOW_UNALIGNED_ACCESS (mode, align)
1057 || (offset * BITS_PER_UNIT % bitsize == 0
1058 && align * BITS_PER_UNIT % bitsize == 0))))
1059 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1060 && bitpos % BITS_PER_WORD == 0)
1061 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1062 /* ??? The big endian test here is wrong. This is correct
1063 if the value is in a register, and if mode_for_size is not
1064 the same mode as op0. This causes us to get unnecessarily
1065 inefficient code from the Thumb port when -mbig-endian. */
1066 && (BYTES_BIG_ENDIAN
1067 ? bitpos + bitsize == BITS_PER_WORD
1068 : bitpos == 0))))
1069 {
1070 enum machine_mode mode1
1071 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
1072
1073 if (mode1 != GET_MODE (op0))
1074 {
1075 if (GET_CODE (op0) == SUBREG)
1076 {
1077 if (GET_MODE (SUBREG_REG (op0)) == mode1
1078 || GET_MODE_CLASS (mode1) == MODE_INT
1079 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1080 op0 = SUBREG_REG (op0);
1081 else
1082 /* Else we've got some float mode source being extracted into
1083 a different float mode destination -- this combination of
1084 subregs results in Severe Tire Damage. */
1085 abort ();
1086 }
1087 if (GET_CODE (op0) == REG)
1088 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1089 else
1090 op0 = change_address (op0, mode1,
1091 plus_constant (XEXP (op0, 0), offset));
1092 }
1093 if (mode1 != mode)
1094 return convert_to_mode (tmode, op0, unsignedp);
1095 return op0;
1096 }
1097
1098 /* Handle fields bigger than a word. */
1099
1100 if (bitsize > BITS_PER_WORD)
1101 {
1102 /* Here we transfer the words of the field
1103 in the order least significant first.
1104 This is because the most significant word is the one which may
1105 be less than full. */
1106
1107 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1108 int i;
1109
1110 if (target == 0 || GET_CODE (target) != REG)
1111 target = gen_reg_rtx (mode);
1112
1113 /* Indicate for flow that the entire target reg is being set. */
1114 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1115
1116 for (i = 0; i < nwords; i++)
1117 {
1118 /* If I is 0, use the low-order word in both field and target;
1119 if I is 1, use the next to lowest word; and so on. */
1120 /* Word number in TARGET to use. */
1121 int wordnum = (WORDS_BIG_ENDIAN
1122 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1123 : i);
1124 /* Offset from start of field in OP0. */
1125 int bit_offset = (WORDS_BIG_ENDIAN
1126 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1127 : i * BITS_PER_WORD);
1128 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1129 rtx result_part
1130 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1131 bitsize - i * BITS_PER_WORD),
1132 bitnum + bit_offset,
1133 1, target_part, mode, word_mode,
1134 align, total_size);
1135
1136 if (target_part == 0)
1137 abort ();
1138
1139 if (result_part != target_part)
1140 emit_move_insn (target_part, result_part);
1141 }
1142
1143 if (unsignedp)
1144 {
1145 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1146 need to be zero'd out. */
1147 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1148 {
1149 int i,total_words;
1150
1151 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1152 for (i = nwords; i < total_words; i++)
1153 {
1154 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1155 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1156 emit_move_insn (target_part, const0_rtx);
1157 }
1158 }
1159 return target;
1160 }
1161
1162 /* Signed bit field: sign-extend with two arithmetic shifts. */
1163 target = expand_shift (LSHIFT_EXPR, mode, target,
1164 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1165 NULL_RTX, 0);
1166 return expand_shift (RSHIFT_EXPR, mode, target,
1167 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1168 NULL_RTX, 0);
1169 }
1170
1171 /* From here on we know the desired field is smaller than a word. */
1172
1173 /* Check if there is a correspondingly-sized integer field, so we can
1174 safely extract it as one size of integer, if necessary; then
1175 truncate or extend to the size that is wanted; then use SUBREGs or
1176 convert_to_mode to get one of the modes we really wanted. */
1177
1178 int_mode = int_mode_for_mode (tmode);
1179 if (int_mode == BLKmode)
1180 int_mode = int_mode_for_mode (mode);
1181 if (int_mode == BLKmode)
1182 abort(); /* Should probably push op0 out to memory and then
1183 do a load. */
1184
1185 /* OFFSET is the number of words or bytes (UNIT says which)
1186 from STR_RTX to the first word or byte containing part of the field. */
1187
1188 if (GET_CODE (op0) != MEM)
1189 {
1190 if (offset != 0
1191 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1192 {
1193 if (GET_CODE (op0) != REG)
1194 op0 = copy_to_reg (op0);
1195 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1196 op0, offset);
1197 }
1198 offset = 0;
1199 }
1200 else
1201 {
1202 op0 = protect_from_queue (str_rtx, 1);
1203 }
1204
1205 /* Now OFFSET is nonzero only for memory operands. */
1206
1207 if (unsignedp)
1208 {
1209 #ifdef HAVE_extzv
1210 if (HAVE_extzv
1211 && (extzv_bitsize >= bitsize)
1212 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1213 && (bitsize + bitpos > extzv_bitsize)))
1214 {
1215 int xbitpos = bitpos, xoffset = offset;
1216 rtx bitsize_rtx, bitpos_rtx;
1217 rtx last = get_last_insn ();
1218 rtx xop0 = op0;
1219 rtx xtarget = target;
1220 rtx xspec_target = spec_target;
1221 rtx xspec_target_subreg = spec_target_subreg;
1222 rtx pat;
1223 enum machine_mode maxmode;
1224
1225 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1226 if (maxmode == VOIDmode)
1227 maxmode = word_mode;
1228
1229 if (GET_CODE (xop0) == MEM)
1230 {
1231 int save_volatile_ok = volatile_ok;
1232 volatile_ok = 1;
1233
1234 /* Is the memory operand acceptable? */
1235 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1236 (xop0, GET_MODE (xop0))))
1237 {
1238 /* No, load into a reg and extract from there. */
1239 enum machine_mode bestmode;
1240
1241 /* Get the mode to use for inserting into this field. If
1242 OP0 is BLKmode, get the smallest mode consistent with the
1243 alignment. If OP0 is a non-BLKmode object that is no
1244 wider than MAXMODE, use its mode. Otherwise, use the
1245 smallest mode containing the field. */
1246
1247 if (GET_MODE (xop0) == BLKmode
1248 || (GET_MODE_SIZE (GET_MODE (op0))
1249 > GET_MODE_SIZE (maxmode)))
1250 bestmode = get_best_mode (bitsize, bitnum,
1251 align * BITS_PER_UNIT, maxmode,
1252 MEM_VOLATILE_P (xop0));
1253 else
1254 bestmode = GET_MODE (xop0);
1255
1256 if (bestmode == VOIDmode
1257 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1258 && GET_MODE_SIZE (bestmode) > align))
1259 goto extzv_loses;
1260
1261 /* Compute offset as multiple of this unit,
1262 counting in bytes. */
1263 unit = GET_MODE_BITSIZE (bestmode);
1264 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1265 xbitpos = bitnum % unit;
1266 xop0 = change_address (xop0, bestmode,
1267 plus_constant (XEXP (xop0, 0),
1268 xoffset));
1269 /* Fetch it to a register in that size. */
1270 xop0 = force_reg (bestmode, xop0);
1271
1272 /* XBITPOS counts within UNIT, which is what is expected. */
1273 }
1274 else
1275 /* Get ref to first byte containing part of the field. */
1276 xop0 = change_address (xop0, byte_mode,
1277 plus_constant (XEXP (xop0, 0), xoffset));
1278
1279 volatile_ok = save_volatile_ok;
1280 }
1281
1282 /* If op0 is a register, we need it in MAXMODE (which is usually
1283 SImode). to make it acceptable to the format of extzv. */
1284 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1285 goto extzv_loses;
1286 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1287 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1288
1289 /* On big-endian machines, we count bits from the most significant.
1290 If the bit field insn does not, we must invert. */
1291 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1292 xbitpos = unit - bitsize - xbitpos;
1293
1294 /* Now convert from counting within UNIT to counting in MAXMODE. */
1295 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1296 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1297
1298 unit = GET_MODE_BITSIZE (maxmode);
1299
1300 if (xtarget == 0
1301 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1302 xtarget = xspec_target = gen_reg_rtx (tmode);
1303
1304 if (GET_MODE (xtarget) != maxmode)
1305 {
1306 if (GET_CODE (xtarget) == REG)
1307 {
1308 int wider = (GET_MODE_SIZE (maxmode)
1309 > GET_MODE_SIZE (GET_MODE (xtarget)));
1310 xtarget = gen_lowpart (maxmode, xtarget);
1311 if (wider)
1312 xspec_target_subreg = xtarget;
1313 }
1314 else
1315 xtarget = gen_reg_rtx (maxmode);
1316 }
1317
1318 /* If this machine's extzv insists on a register target,
1319 make sure we have one. */
1320 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1321 (xtarget, maxmode)))
1322 xtarget = gen_reg_rtx (maxmode);
1323
1324 bitsize_rtx = GEN_INT (bitsize);
1325 bitpos_rtx = GEN_INT (xbitpos);
1326
1327 pat = gen_extzv (protect_from_queue (xtarget, 1),
1328 xop0, bitsize_rtx, bitpos_rtx);
1329 if (pat)
1330 {
1331 emit_insn (pat);
1332 target = xtarget;
1333 spec_target = xspec_target;
1334 spec_target_subreg = xspec_target_subreg;
1335 }
1336 else
1337 {
1338 delete_insns_since (last);
1339 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1340 bitpos, target, 1, align);
1341 }
1342 }
1343 else
1344 extzv_loses:
1345 #endif
1346 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1347 bitpos, target, 1, align);
1348 }
1349 else
1350 {
1351 #ifdef HAVE_extv
1352 if (HAVE_extv
1353 && (extv_bitsize >= bitsize)
1354 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1355 && (bitsize + bitpos > extv_bitsize)))
1356 {
1357 int xbitpos = bitpos, xoffset = offset;
1358 rtx bitsize_rtx, bitpos_rtx;
1359 rtx last = get_last_insn ();
1360 rtx xop0 = op0, xtarget = target;
1361 rtx xspec_target = spec_target;
1362 rtx xspec_target_subreg = spec_target_subreg;
1363 rtx pat;
1364 enum machine_mode maxmode;
1365
1366 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1367 if (maxmode == VOIDmode)
1368 maxmode = word_mode;
1369
1370 if (GET_CODE (xop0) == MEM)
1371 {
1372 /* Is the memory operand acceptable? */
1373 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1374 (xop0, GET_MODE (xop0))))
1375 {
1376 /* No, load into a reg and extract from there. */
1377 enum machine_mode bestmode;
1378
1379 /* Get the mode to use for inserting into this field. If
1380 OP0 is BLKmode, get the smallest mode consistent with the
1381 alignment. If OP0 is a non-BLKmode object that is no
1382 wider than MAXMODE, use its mode. Otherwise, use the
1383 smallest mode containing the field. */
1384
1385 if (GET_MODE (xop0) == BLKmode
1386 || (GET_MODE_SIZE (GET_MODE (op0))
1387 > GET_MODE_SIZE (maxmode)))
1388 bestmode = get_best_mode (bitsize, bitnum,
1389 align * BITS_PER_UNIT, maxmode,
1390 MEM_VOLATILE_P (xop0));
1391 else
1392 bestmode = GET_MODE (xop0);
1393
1394 if (bestmode == VOIDmode
1395 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1396 && GET_MODE_SIZE (bestmode) > align))
1397 goto extv_loses;
1398
1399 /* Compute offset as multiple of this unit,
1400 counting in bytes. */
1401 unit = GET_MODE_BITSIZE (bestmode);
1402 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1403 xbitpos = bitnum % unit;
1404 xop0 = change_address (xop0, bestmode,
1405 plus_constant (XEXP (xop0, 0),
1406 xoffset));
1407 /* Fetch it to a register in that size. */
1408 xop0 = force_reg (bestmode, xop0);
1409
1410 /* XBITPOS counts within UNIT, which is what is expected. */
1411 }
1412 else
1413 /* Get ref to first byte containing part of the field. */
1414 xop0 = change_address (xop0, byte_mode,
1415 plus_constant (XEXP (xop0, 0), xoffset));
1416 }
1417
1418 /* If op0 is a register, we need it in MAXMODE (which is usually
1419 SImode) to make it acceptable to the format of extv. */
1420 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1421 goto extv_loses;
1422 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1423 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1424
1425 /* On big-endian machines, we count bits from the most significant.
1426 If the bit field insn does not, we must invert. */
1427 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1428 xbitpos = unit - bitsize - xbitpos;
1429
1430 /* XBITPOS counts within a size of UNIT.
1431 Adjust to count within a size of MAXMODE. */
1432 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1433 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1434
1435 unit = GET_MODE_BITSIZE (maxmode);
1436
1437 if (xtarget == 0
1438 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1439 xtarget = xspec_target = gen_reg_rtx (tmode);
1440
1441 if (GET_MODE (xtarget) != maxmode)
1442 {
1443 if (GET_CODE (xtarget) == REG)
1444 {
1445 int wider = (GET_MODE_SIZE (maxmode)
1446 > GET_MODE_SIZE (GET_MODE (xtarget)));
1447 xtarget = gen_lowpart (maxmode, xtarget);
1448 if (wider)
1449 xspec_target_subreg = xtarget;
1450 }
1451 else
1452 xtarget = gen_reg_rtx (maxmode);
1453 }
1454
1455 /* If this machine's extv insists on a register target,
1456 make sure we have one. */
1457 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1458 (xtarget, maxmode)))
1459 xtarget = gen_reg_rtx (maxmode);
1460
1461 bitsize_rtx = GEN_INT (bitsize);
1462 bitpos_rtx = GEN_INT (xbitpos);
1463
1464 pat = gen_extv (protect_from_queue (xtarget, 1),
1465 xop0, bitsize_rtx, bitpos_rtx);
1466 if (pat)
1467 {
1468 emit_insn (pat);
1469 target = xtarget;
1470 spec_target = xspec_target;
1471 spec_target_subreg = xspec_target_subreg;
1472 }
1473 else
1474 {
1475 delete_insns_since (last);
1476 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1477 bitpos, target, 0, align);
1478 }
1479 }
1480 else
1481 extv_loses:
1482 #endif
1483 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1484 bitpos, target, 0, align);
1485 }
1486 if (target == spec_target)
1487 return target;
1488 if (target == spec_target_subreg)
1489 return spec_target;
1490 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1491 {
1492 /* If the target mode is floating-point, first convert to the
1493 integer mode of that size and then access it as a floating-point
1494 value via a SUBREG. */
1495 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1496 {
1497 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1498 MODE_INT, 0),
1499 target, unsignedp);
1500 if (GET_CODE (target) != REG)
1501 target = copy_to_reg (target);
1502 return gen_rtx_SUBREG (tmode, target, 0);
1503 }
1504 else
1505 return convert_to_mode (tmode, target, unsignedp);
1506 }
1507 return target;
1508 }
1509 \f
1510 /* Extract a bit field using shifts and boolean operations
1511 Returns an rtx to represent the value.
1512 OP0 addresses a register (word) or memory (byte).
1513 BITPOS says which bit within the word or byte the bit field starts in.
1514 OFFSET says how many bytes farther the bit field starts;
1515 it is 0 if OP0 is a register.
1516 BITSIZE says how many bits long the bit field is.
1517 (If OP0 is a register, it may be narrower than a full word,
1518 but BITPOS still counts within a full word,
1519 which is significant on bigendian machines.)
1520
1521 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1522 If TARGET is nonzero, attempts to store the value there
1523 and return TARGET, but this is not guaranteed.
1524 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1525
1526 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1527
1528 static rtx
1529 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1530 target, unsignedp, align)
1531 enum machine_mode tmode;
1532 register rtx op0, target;
1533 register int offset, bitsize, bitpos;
1534 int unsignedp;
1535 int align;
1536 {
1537 int total_bits = BITS_PER_WORD;
1538 enum machine_mode mode;
1539
1540 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1541 {
1542 /* Special treatment for a bit field split across two registers. */
1543 if (bitsize + bitpos > BITS_PER_WORD)
1544 return extract_split_bit_field (op0, bitsize, bitpos,
1545 unsignedp, align);
1546 }
1547 else
1548 {
1549 /* Get the proper mode to use for this field. We want a mode that
1550 includes the entire field. If such a mode would be larger than
1551 a word, we won't be doing the extraction the normal way. */
1552
1553 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1554 align * BITS_PER_UNIT, word_mode,
1555 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1556
1557 if (mode == VOIDmode)
1558 /* The only way this should occur is if the field spans word
1559 boundaries. */
1560 return extract_split_bit_field (op0, bitsize,
1561 bitpos + offset * BITS_PER_UNIT,
1562 unsignedp, align);
1563
1564 total_bits = GET_MODE_BITSIZE (mode);
1565
1566 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1567 be in the range 0 to total_bits-1, and put any excess bytes in
1568 OFFSET. */
1569 if (bitpos >= total_bits)
1570 {
1571 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1572 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1573 * BITS_PER_UNIT);
1574 }
1575
1576 /* Get ref to an aligned byte, halfword, or word containing the field.
1577 Adjust BITPOS to be position within a word,
1578 and OFFSET to be the offset of that word.
1579 Then alter OP0 to refer to that word. */
1580 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1581 offset -= (offset % (total_bits / BITS_PER_UNIT));
1582 op0 = change_address (op0, mode,
1583 plus_constant (XEXP (op0, 0), offset));
1584 }
1585
1586 mode = GET_MODE (op0);
1587
1588 if (BYTES_BIG_ENDIAN)
1589 {
1590 /* BITPOS is the distance between our msb and that of OP0.
1591 Convert it to the distance from the lsb. */
1592
1593 bitpos = total_bits - bitsize - bitpos;
1594 }
1595
1596 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1597 We have reduced the big-endian case to the little-endian case. */
1598
1599 if (unsignedp)
1600 {
1601 if (bitpos)
1602 {
1603 /* If the field does not already start at the lsb,
1604 shift it so it does. */
1605 tree amount = build_int_2 (bitpos, 0);
1606 /* Maybe propagate the target for the shift. */
1607 /* But not if we will return it--could confuse integrate.c. */
1608 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1609 && !REG_FUNCTION_VALUE_P (target)
1610 ? target : 0);
1611 if (tmode != mode) subtarget = 0;
1612 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1613 }
1614 /* Convert the value to the desired mode. */
1615 if (mode != tmode)
1616 op0 = convert_to_mode (tmode, op0, 1);
1617
1618 /* Unless the msb of the field used to be the msb when we shifted,
1619 mask out the upper bits. */
1620
1621 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1622 #if 0
1623 #ifdef SLOW_ZERO_EXTEND
1624 /* Always generate an `and' if
1625 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1626 will combine fruitfully with the zero-extend. */
1627 || tmode != mode
1628 #endif
1629 #endif
1630 )
1631 return expand_binop (GET_MODE (op0), and_optab, op0,
1632 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1633 target, 1, OPTAB_LIB_WIDEN);
1634 return op0;
1635 }
1636
1637 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1638 then arithmetic-shift its lsb to the lsb of the word. */
1639 op0 = force_reg (mode, op0);
1640 if (mode != tmode)
1641 target = 0;
1642
1643 /* Find the narrowest integer mode that contains the field. */
1644
1645 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1646 mode = GET_MODE_WIDER_MODE (mode))
1647 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1648 {
1649 op0 = convert_to_mode (mode, op0, 0);
1650 break;
1651 }
1652
1653 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1654 {
1655 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1656 /* Maybe propagate the target for the shift. */
1657 /* But not if we will return the result--could confuse integrate.c. */
1658 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1659 && ! REG_FUNCTION_VALUE_P (target)
1660 ? target : 0);
1661 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1662 }
1663
1664 return expand_shift (RSHIFT_EXPR, mode, op0,
1665 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1666 target, 0);
1667 }
1668 \f
1669 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1670 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1671 complement of that if COMPLEMENT. The mask is truncated if
1672 necessary to the width of mode MODE. The mask is zero-extended if
1673 BITSIZE+BITPOS is too small for MODE. */
1674
1675 static rtx
1676 mask_rtx (mode, bitpos, bitsize, complement)
1677 enum machine_mode mode;
1678 int bitpos, bitsize, complement;
1679 {
1680 HOST_WIDE_INT masklow, maskhigh;
1681
1682 if (bitpos < HOST_BITS_PER_WIDE_INT)
1683 masklow = (HOST_WIDE_INT) -1 << bitpos;
1684 else
1685 masklow = 0;
1686
1687 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1688 masklow &= ((unsigned HOST_WIDE_INT) -1
1689 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1690
1691 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1692 maskhigh = -1;
1693 else
1694 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1695
1696 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1697 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1698 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1699 else
1700 maskhigh = 0;
1701
1702 if (complement)
1703 {
1704 maskhigh = ~maskhigh;
1705 masklow = ~masklow;
1706 }
1707
1708 return immed_double_const (masklow, maskhigh, mode);
1709 }
1710
1711 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1712 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1713
1714 static rtx
1715 lshift_value (mode, value, bitpos, bitsize)
1716 enum machine_mode mode;
1717 rtx value;
1718 int bitpos, bitsize;
1719 {
1720 unsigned HOST_WIDE_INT v = INTVAL (value);
1721 HOST_WIDE_INT low, high;
1722
1723 if (bitsize < HOST_BITS_PER_WIDE_INT)
1724 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1725
1726 if (bitpos < HOST_BITS_PER_WIDE_INT)
1727 {
1728 low = v << bitpos;
1729 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1730 }
1731 else
1732 {
1733 low = 0;
1734 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1735 }
1736
1737 return immed_double_const (low, high, mode);
1738 }
1739 \f
1740 /* Extract a bit field that is split across two words
1741 and return an RTX for the result.
1742
1743 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1744 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1745 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1746
1747 ALIGN is the known alignment of OP0, measured in bytes.
1748 This is also the size of the memory objects to be used. */
1749
1750 static rtx
1751 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1752 rtx op0;
1753 int bitsize, bitpos, unsignedp, align;
1754 {
1755 int unit;
1756 int bitsdone = 0;
1757 rtx result = NULL_RTX;
1758 int first = 1;
1759
1760 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1761 much at a time. */
1762 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1763 unit = BITS_PER_WORD;
1764 else
1765 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1766
1767 while (bitsdone < bitsize)
1768 {
1769 int thissize;
1770 rtx part, word;
1771 int thispos;
1772 int offset;
1773
1774 offset = (bitpos + bitsdone) / unit;
1775 thispos = (bitpos + bitsdone) % unit;
1776
1777 /* THISSIZE must not overrun a word boundary. Otherwise,
1778 extract_fixed_bit_field will call us again, and we will mutually
1779 recurse forever. */
1780 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1781 thissize = MIN (thissize, unit - thispos);
1782
1783 /* If OP0 is a register, then handle OFFSET here.
1784
1785 When handling multiword bitfields, extract_bit_field may pass
1786 down a word_mode SUBREG of a larger REG for a bitfield that actually
1787 crosses a word boundary. Thus, for a SUBREG, we must find
1788 the current word starting from the base register. */
1789 if (GET_CODE (op0) == SUBREG)
1790 {
1791 word = operand_subword_force (SUBREG_REG (op0),
1792 SUBREG_WORD (op0) + offset,
1793 GET_MODE (SUBREG_REG (op0)));
1794 offset = 0;
1795 }
1796 else if (GET_CODE (op0) == REG)
1797 {
1798 word = operand_subword_force (op0, offset, GET_MODE (op0));
1799 offset = 0;
1800 }
1801 else
1802 word = op0;
1803
1804 /* Extract the parts in bit-counting order,
1805 whose meaning is determined by BYTES_PER_UNIT.
1806 OFFSET is in UNITs, and UNIT is in bits.
1807 extract_fixed_bit_field wants offset in bytes. */
1808 part = extract_fixed_bit_field (word_mode, word,
1809 offset * unit / BITS_PER_UNIT,
1810 thissize, thispos, 0, 1, align);
1811 bitsdone += thissize;
1812
1813 /* Shift this part into place for the result. */
1814 if (BYTES_BIG_ENDIAN)
1815 {
1816 if (bitsize != bitsdone)
1817 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1818 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1819 }
1820 else
1821 {
1822 if (bitsdone != thissize)
1823 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1824 build_int_2 (bitsdone - thissize, 0), 0, 1);
1825 }
1826
1827 if (first)
1828 result = part;
1829 else
1830 /* Combine the parts with bitwise or. This works
1831 because we extracted each part as an unsigned bit field. */
1832 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1833 OPTAB_LIB_WIDEN);
1834
1835 first = 0;
1836 }
1837
1838 /* Unsigned bit field: we are done. */
1839 if (unsignedp)
1840 return result;
1841 /* Signed bit field: sign-extend with two arithmetic shifts. */
1842 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1843 build_int_2 (BITS_PER_WORD - bitsize, 0),
1844 NULL_RTX, 0);
1845 return expand_shift (RSHIFT_EXPR, word_mode, result,
1846 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1847 }
1848 \f
1849 /* Add INC into TARGET. */
1850
1851 void
1852 expand_inc (target, inc)
1853 rtx target, inc;
1854 {
1855 rtx value = expand_binop (GET_MODE (target), add_optab,
1856 target, inc,
1857 target, 0, OPTAB_LIB_WIDEN);
1858 if (value != target)
1859 emit_move_insn (target, value);
1860 }
1861
1862 /* Subtract DEC from TARGET. */
1863
1864 void
1865 expand_dec (target, dec)
1866 rtx target, dec;
1867 {
1868 rtx value = expand_binop (GET_MODE (target), sub_optab,
1869 target, dec,
1870 target, 0, OPTAB_LIB_WIDEN);
1871 if (value != target)
1872 emit_move_insn (target, value);
1873 }
1874 \f
1875 /* Output a shift instruction for expression code CODE,
1876 with SHIFTED being the rtx for the value to shift,
1877 and AMOUNT the tree for the amount to shift by.
1878 Store the result in the rtx TARGET, if that is convenient.
1879 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1880 Return the rtx for where the value is. */
1881
1882 rtx
1883 expand_shift (code, mode, shifted, amount, target, unsignedp)
1884 enum tree_code code;
1885 register enum machine_mode mode;
1886 rtx shifted;
1887 tree amount;
1888 register rtx target;
1889 int unsignedp;
1890 {
1891 register rtx op1, temp = 0;
1892 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1893 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1894 int try;
1895
1896 /* Previously detected shift-counts computed by NEGATE_EXPR
1897 and shifted in the other direction; but that does not work
1898 on all machines. */
1899
1900 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1901
1902 #ifdef SHIFT_COUNT_TRUNCATED
1903 if (SHIFT_COUNT_TRUNCATED)
1904 {
1905 if (GET_CODE (op1) == CONST_INT
1906 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1907 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1908 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1909 % GET_MODE_BITSIZE (mode));
1910 else if (GET_CODE (op1) == SUBREG
1911 && SUBREG_WORD (op1) == 0)
1912 op1 = SUBREG_REG (op1);
1913 }
1914 #endif
1915
1916 if (op1 == const0_rtx)
1917 return shifted;
1918
1919 for (try = 0; temp == 0 && try < 3; try++)
1920 {
1921 enum optab_methods methods;
1922
1923 if (try == 0)
1924 methods = OPTAB_DIRECT;
1925 else if (try == 1)
1926 methods = OPTAB_WIDEN;
1927 else
1928 methods = OPTAB_LIB_WIDEN;
1929
1930 if (rotate)
1931 {
1932 /* Widening does not work for rotation. */
1933 if (methods == OPTAB_WIDEN)
1934 continue;
1935 else if (methods == OPTAB_LIB_WIDEN)
1936 {
1937 /* If we have been unable to open-code this by a rotation,
1938 do it as the IOR of two shifts. I.e., to rotate A
1939 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1940 where C is the bitsize of A.
1941
1942 It is theoretically possible that the target machine might
1943 not be able to perform either shift and hence we would
1944 be making two libcalls rather than just the one for the
1945 shift (similarly if IOR could not be done). We will allow
1946 this extremely unlikely lossage to avoid complicating the
1947 code below. */
1948
1949 rtx subtarget = target == shifted ? 0 : target;
1950 rtx temp1;
1951 tree type = TREE_TYPE (amount);
1952 tree new_amount = make_tree (type, op1);
1953 tree other_amount
1954 = fold (build (MINUS_EXPR, type,
1955 convert (type,
1956 build_int_2 (GET_MODE_BITSIZE (mode),
1957 0)),
1958 amount));
1959
1960 shifted = force_reg (mode, shifted);
1961
1962 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1963 mode, shifted, new_amount, subtarget, 1);
1964 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1965 mode, shifted, other_amount, 0, 1);
1966 return expand_binop (mode, ior_optab, temp, temp1, target,
1967 unsignedp, methods);
1968 }
1969
1970 temp = expand_binop (mode,
1971 left ? rotl_optab : rotr_optab,
1972 shifted, op1, target, unsignedp, methods);
1973
1974 /* If we don't have the rotate, but we are rotating by a constant
1975 that is in range, try a rotate in the opposite direction. */
1976
1977 if (temp == 0 && GET_CODE (op1) == CONST_INT
1978 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1979 temp = expand_binop (mode,
1980 left ? rotr_optab : rotl_optab,
1981 shifted,
1982 GEN_INT (GET_MODE_BITSIZE (mode)
1983 - INTVAL (op1)),
1984 target, unsignedp, methods);
1985 }
1986 else if (unsignedp)
1987 temp = expand_binop (mode,
1988 left ? ashl_optab : lshr_optab,
1989 shifted, op1, target, unsignedp, methods);
1990
1991 /* Do arithmetic shifts.
1992 Also, if we are going to widen the operand, we can just as well
1993 use an arithmetic right-shift instead of a logical one. */
1994 if (temp == 0 && ! rotate
1995 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1996 {
1997 enum optab_methods methods1 = methods;
1998
1999 /* If trying to widen a log shift to an arithmetic shift,
2000 don't accept an arithmetic shift of the same size. */
2001 if (unsignedp)
2002 methods1 = OPTAB_MUST_WIDEN;
2003
2004 /* Arithmetic shift */
2005
2006 temp = expand_binop (mode,
2007 left ? ashl_optab : ashr_optab,
2008 shifted, op1, target, unsignedp, methods1);
2009 }
2010
2011 /* We used to try extzv here for logical right shifts, but that was
2012 only useful for one machine, the VAX, and caused poor code
2013 generation there for lshrdi3, so the code was deleted and a
2014 define_expand for lshrsi3 was added to vax.md. */
2015 }
2016
2017 if (temp == 0)
2018 abort ();
2019 return temp;
2020 }
2021 \f
2022 enum alg_code { alg_zero, alg_m, alg_shift,
2023 alg_add_t_m2, alg_sub_t_m2,
2024 alg_add_factor, alg_sub_factor,
2025 alg_add_t2_m, alg_sub_t2_m,
2026 alg_add, alg_subtract, alg_factor, alg_shiftop };
2027
2028 /* This structure records a sequence of operations.
2029 `ops' is the number of operations recorded.
2030 `cost' is their total cost.
2031 The operations are stored in `op' and the corresponding
2032 logarithms of the integer coefficients in `log'.
2033
2034 These are the operations:
2035 alg_zero total := 0;
2036 alg_m total := multiplicand;
2037 alg_shift total := total * coeff
2038 alg_add_t_m2 total := total + multiplicand * coeff;
2039 alg_sub_t_m2 total := total - multiplicand * coeff;
2040 alg_add_factor total := total * coeff + total;
2041 alg_sub_factor total := total * coeff - total;
2042 alg_add_t2_m total := total * coeff + multiplicand;
2043 alg_sub_t2_m total := total * coeff - multiplicand;
2044
2045 The first operand must be either alg_zero or alg_m. */
2046
2047 struct algorithm
2048 {
2049 short cost;
2050 short ops;
2051 /* The size of the OP and LOG fields are not directly related to the
2052 word size, but the worst-case algorithms will be if we have few
2053 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2054 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2055 in total wordsize operations. */
2056 enum alg_code op[MAX_BITS_PER_WORD];
2057 char log[MAX_BITS_PER_WORD];
2058 };
2059
2060 static void synth_mult PARAMS ((struct algorithm *,
2061 unsigned HOST_WIDE_INT,
2062 int));
2063 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2064 int, int,
2065 unsigned HOST_WIDE_INT *,
2066 int *, int *));
2067 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2068 int));
2069 /* Compute and return the best algorithm for multiplying by T.
2070 The algorithm must cost less than cost_limit
2071 If retval.cost >= COST_LIMIT, no algorithm was found and all
2072 other field of the returned struct are undefined. */
2073
2074 static void
2075 synth_mult (alg_out, t, cost_limit)
2076 struct algorithm *alg_out;
2077 unsigned HOST_WIDE_INT t;
2078 int cost_limit;
2079 {
2080 int m;
2081 struct algorithm *alg_in, *best_alg;
2082 int cost;
2083 unsigned HOST_WIDE_INT q;
2084
2085 /* Indicate that no algorithm is yet found. If no algorithm
2086 is found, this value will be returned and indicate failure. */
2087 alg_out->cost = cost_limit;
2088
2089 if (cost_limit <= 0)
2090 return;
2091
2092 /* t == 1 can be done in zero cost. */
2093 if (t == 1)
2094 {
2095 alg_out->ops = 1;
2096 alg_out->cost = 0;
2097 alg_out->op[0] = alg_m;
2098 return;
2099 }
2100
2101 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2102 fail now. */
2103 if (t == 0)
2104 {
2105 if (zero_cost >= cost_limit)
2106 return;
2107 else
2108 {
2109 alg_out->ops = 1;
2110 alg_out->cost = zero_cost;
2111 alg_out->op[0] = alg_zero;
2112 return;
2113 }
2114 }
2115
2116 /* We'll be needing a couple extra algorithm structures now. */
2117
2118 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2119 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2120
2121 /* If we have a group of zero bits at the low-order part of T, try
2122 multiplying by the remaining bits and then doing a shift. */
2123
2124 if ((t & 1) == 0)
2125 {
2126 m = floor_log2 (t & -t); /* m = number of low zero bits */
2127 q = t >> m;
2128 cost = shift_cost[m];
2129 synth_mult (alg_in, q, cost_limit - cost);
2130
2131 cost += alg_in->cost;
2132 if (cost < cost_limit)
2133 {
2134 struct algorithm *x;
2135 x = alg_in, alg_in = best_alg, best_alg = x;
2136 best_alg->log[best_alg->ops] = m;
2137 best_alg->op[best_alg->ops] = alg_shift;
2138 cost_limit = cost;
2139 }
2140 }
2141
2142 /* If we have an odd number, add or subtract one. */
2143 if ((t & 1) != 0)
2144 {
2145 unsigned HOST_WIDE_INT w;
2146
2147 for (w = 1; (w & t) != 0; w <<= 1)
2148 ;
2149 /* If T was -1, then W will be zero after the loop. This is another
2150 case where T ends with ...111. Handling this with (T + 1) and
2151 subtract 1 produces slightly better code and results in algorithm
2152 selection much faster than treating it like the ...0111 case
2153 below. */
2154 if (w == 0
2155 || (w > 2
2156 /* Reject the case where t is 3.
2157 Thus we prefer addition in that case. */
2158 && t != 3))
2159 {
2160 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2161
2162 cost = add_cost;
2163 synth_mult (alg_in, t + 1, cost_limit - cost);
2164
2165 cost += alg_in->cost;
2166 if (cost < cost_limit)
2167 {
2168 struct algorithm *x;
2169 x = alg_in, alg_in = best_alg, best_alg = x;
2170 best_alg->log[best_alg->ops] = 0;
2171 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2172 cost_limit = cost;
2173 }
2174 }
2175 else
2176 {
2177 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2178
2179 cost = add_cost;
2180 synth_mult (alg_in, t - 1, cost_limit - cost);
2181
2182 cost += alg_in->cost;
2183 if (cost < cost_limit)
2184 {
2185 struct algorithm *x;
2186 x = alg_in, alg_in = best_alg, best_alg = x;
2187 best_alg->log[best_alg->ops] = 0;
2188 best_alg->op[best_alg->ops] = alg_add_t_m2;
2189 cost_limit = cost;
2190 }
2191 }
2192 }
2193
2194 /* Look for factors of t of the form
2195 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2196 If we find such a factor, we can multiply by t using an algorithm that
2197 multiplies by q, shift the result by m and add/subtract it to itself.
2198
2199 We search for large factors first and loop down, even if large factors
2200 are less probable than small; if we find a large factor we will find a
2201 good sequence quickly, and therefore be able to prune (by decreasing
2202 COST_LIMIT) the search. */
2203
2204 for (m = floor_log2 (t - 1); m >= 2; m--)
2205 {
2206 unsigned HOST_WIDE_INT d;
2207
2208 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2209 if (t % d == 0 && t > d)
2210 {
2211 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2212 synth_mult (alg_in, t / d, cost_limit - cost);
2213
2214 cost += alg_in->cost;
2215 if (cost < cost_limit)
2216 {
2217 struct algorithm *x;
2218 x = alg_in, alg_in = best_alg, best_alg = x;
2219 best_alg->log[best_alg->ops] = m;
2220 best_alg->op[best_alg->ops] = alg_add_factor;
2221 cost_limit = cost;
2222 }
2223 /* Other factors will have been taken care of in the recursion. */
2224 break;
2225 }
2226
2227 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2228 if (t % d == 0 && t > d)
2229 {
2230 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2231 synth_mult (alg_in, t / d, cost_limit - cost);
2232
2233 cost += alg_in->cost;
2234 if (cost < cost_limit)
2235 {
2236 struct algorithm *x;
2237 x = alg_in, alg_in = best_alg, best_alg = x;
2238 best_alg->log[best_alg->ops] = m;
2239 best_alg->op[best_alg->ops] = alg_sub_factor;
2240 cost_limit = cost;
2241 }
2242 break;
2243 }
2244 }
2245
2246 /* Try shift-and-add (load effective address) instructions,
2247 i.e. do a*3, a*5, a*9. */
2248 if ((t & 1) != 0)
2249 {
2250 q = t - 1;
2251 q = q & -q;
2252 m = exact_log2 (q);
2253 if (m >= 0)
2254 {
2255 cost = shiftadd_cost[m];
2256 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2257
2258 cost += alg_in->cost;
2259 if (cost < cost_limit)
2260 {
2261 struct algorithm *x;
2262 x = alg_in, alg_in = best_alg, best_alg = x;
2263 best_alg->log[best_alg->ops] = m;
2264 best_alg->op[best_alg->ops] = alg_add_t2_m;
2265 cost_limit = cost;
2266 }
2267 }
2268
2269 q = t + 1;
2270 q = q & -q;
2271 m = exact_log2 (q);
2272 if (m >= 0)
2273 {
2274 cost = shiftsub_cost[m];
2275 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2276
2277 cost += alg_in->cost;
2278 if (cost < cost_limit)
2279 {
2280 struct algorithm *x;
2281 x = alg_in, alg_in = best_alg, best_alg = x;
2282 best_alg->log[best_alg->ops] = m;
2283 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2284 cost_limit = cost;
2285 }
2286 }
2287 }
2288
2289 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2290 we have not found any algorithm. */
2291 if (cost_limit == alg_out->cost)
2292 return;
2293
2294 /* If we are getting a too long sequence for `struct algorithm'
2295 to record, make this search fail. */
2296 if (best_alg->ops == MAX_BITS_PER_WORD)
2297 return;
2298
2299 /* Copy the algorithm from temporary space to the space at alg_out.
2300 We avoid using structure assignment because the majority of
2301 best_alg is normally undefined, and this is a critical function. */
2302 alg_out->ops = best_alg->ops + 1;
2303 alg_out->cost = cost_limit;
2304 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2305 alg_out->ops * sizeof *alg_out->op);
2306 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2307 alg_out->ops * sizeof *alg_out->log);
2308 }
2309 \f
2310 /* Perform a multiplication and return an rtx for the result.
2311 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2312 TARGET is a suggestion for where to store the result (an rtx).
2313
2314 We check specially for a constant integer as OP1.
2315 If you want this check for OP0 as well, then before calling
2316 you should swap the two operands if OP0 would be constant. */
2317
2318 rtx
2319 expand_mult (mode, op0, op1, target, unsignedp)
2320 enum machine_mode mode;
2321 register rtx op0, op1, target;
2322 int unsignedp;
2323 {
2324 rtx const_op1 = op1;
2325
2326 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2327 less than or equal in size to `unsigned int' this doesn't matter.
2328 If the mode is larger than `unsigned int', then synth_mult works only
2329 if the constant value exactly fits in an `unsigned int' without any
2330 truncation. This means that multiplying by negative values does
2331 not work; results are off by 2^32 on a 32 bit machine. */
2332
2333 /* If we are multiplying in DImode, it may still be a win
2334 to try to work with shifts and adds. */
2335 if (GET_CODE (op1) == CONST_DOUBLE
2336 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2337 && HOST_BITS_PER_INT >= BITS_PER_WORD
2338 && CONST_DOUBLE_HIGH (op1) == 0)
2339 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2340 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2341 && GET_CODE (op1) == CONST_INT
2342 && INTVAL (op1) < 0)
2343 const_op1 = 0;
2344
2345 /* We used to test optimize here, on the grounds that it's better to
2346 produce a smaller program when -O is not used.
2347 But this causes such a terrible slowdown sometimes
2348 that it seems better to use synth_mult always. */
2349
2350 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2351 {
2352 struct algorithm alg;
2353 struct algorithm alg2;
2354 HOST_WIDE_INT val = INTVAL (op1);
2355 HOST_WIDE_INT val_so_far;
2356 rtx insn;
2357 int mult_cost;
2358 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2359
2360 /* Try to do the computation three ways: multiply by the negative of OP1
2361 and then negate, do the multiplication directly, or do multiplication
2362 by OP1 - 1. */
2363
2364 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2365 mult_cost = MIN (12 * add_cost, mult_cost);
2366
2367 synth_mult (&alg, val, mult_cost);
2368
2369 /* This works only if the inverted value actually fits in an
2370 `unsigned int' */
2371 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2372 {
2373 synth_mult (&alg2, - val,
2374 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2375 if (alg2.cost + negate_cost < alg.cost)
2376 alg = alg2, variant = negate_variant;
2377 }
2378
2379 /* This proves very useful for division-by-constant. */
2380 synth_mult (&alg2, val - 1,
2381 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2382 if (alg2.cost + add_cost < alg.cost)
2383 alg = alg2, variant = add_variant;
2384
2385 if (alg.cost < mult_cost)
2386 {
2387 /* We found something cheaper than a multiply insn. */
2388 int opno;
2389 rtx accum, tem;
2390
2391 op0 = protect_from_queue (op0, 0);
2392
2393 /* Avoid referencing memory over and over.
2394 For speed, but also for correctness when mem is volatile. */
2395 if (GET_CODE (op0) == MEM)
2396 op0 = force_reg (mode, op0);
2397
2398 /* ACCUM starts out either as OP0 or as a zero, depending on
2399 the first operation. */
2400
2401 if (alg.op[0] == alg_zero)
2402 {
2403 accum = copy_to_mode_reg (mode, const0_rtx);
2404 val_so_far = 0;
2405 }
2406 else if (alg.op[0] == alg_m)
2407 {
2408 accum = copy_to_mode_reg (mode, op0);
2409 val_so_far = 1;
2410 }
2411 else
2412 abort ();
2413
2414 for (opno = 1; opno < alg.ops; opno++)
2415 {
2416 int log = alg.log[opno];
2417 int preserve = preserve_subexpressions_p ();
2418 rtx shift_subtarget = preserve ? 0 : accum;
2419 rtx add_target
2420 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2421 && ! preserve)
2422 ? target : 0;
2423 rtx accum_target = preserve ? 0 : accum;
2424
2425 switch (alg.op[opno])
2426 {
2427 case alg_shift:
2428 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2429 build_int_2 (log, 0), NULL_RTX, 0);
2430 val_so_far <<= log;
2431 break;
2432
2433 case alg_add_t_m2:
2434 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2435 build_int_2 (log, 0), NULL_RTX, 0);
2436 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2437 add_target
2438 ? add_target : accum_target);
2439 val_so_far += (HOST_WIDE_INT) 1 << log;
2440 break;
2441
2442 case alg_sub_t_m2:
2443 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2444 build_int_2 (log, 0), NULL_RTX, 0);
2445 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2446 add_target
2447 ? add_target : accum_target);
2448 val_so_far -= (HOST_WIDE_INT) 1 << log;
2449 break;
2450
2451 case alg_add_t2_m:
2452 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2453 build_int_2 (log, 0), shift_subtarget,
2454 0);
2455 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2456 add_target
2457 ? add_target : accum_target);
2458 val_so_far = (val_so_far << log) + 1;
2459 break;
2460
2461 case alg_sub_t2_m:
2462 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2463 build_int_2 (log, 0), shift_subtarget,
2464 0);
2465 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2466 add_target
2467 ? add_target : accum_target);
2468 val_so_far = (val_so_far << log) - 1;
2469 break;
2470
2471 case alg_add_factor:
2472 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2473 build_int_2 (log, 0), NULL_RTX, 0);
2474 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2475 add_target
2476 ? add_target : accum_target);
2477 val_so_far += val_so_far << log;
2478 break;
2479
2480 case alg_sub_factor:
2481 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2482 build_int_2 (log, 0), NULL_RTX, 0);
2483 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2484 (add_target ? add_target
2485 : preserve ? 0 : tem));
2486 val_so_far = (val_so_far << log) - val_so_far;
2487 break;
2488
2489 default:
2490 abort ();
2491 }
2492
2493 /* Write a REG_EQUAL note on the last insn so that we can cse
2494 multiplication sequences. */
2495
2496 insn = get_last_insn ();
2497 set_unique_reg_note (insn,
2498 REG_EQUAL,
2499 gen_rtx_MULT (mode, op0,
2500 GEN_INT (val_so_far)));
2501 }
2502
2503 if (variant == negate_variant)
2504 {
2505 val_so_far = - val_so_far;
2506 accum = expand_unop (mode, neg_optab, accum, target, 0);
2507 }
2508 else if (variant == add_variant)
2509 {
2510 val_so_far = val_so_far + 1;
2511 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2512 }
2513
2514 if (val != val_so_far)
2515 abort ();
2516
2517 return accum;
2518 }
2519 }
2520
2521 /* This used to use umul_optab if unsigned, but for non-widening multiply
2522 there is no difference between signed and unsigned. */
2523 op0 = expand_binop (mode, smul_optab,
2524 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2525 if (op0 == 0)
2526 abort ();
2527 return op0;
2528 }
2529 \f
2530 /* Return the smallest n such that 2**n >= X. */
2531
2532 int
2533 ceil_log2 (x)
2534 unsigned HOST_WIDE_INT x;
2535 {
2536 return floor_log2 (x - 1) + 1;
2537 }
2538
2539 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2540 replace division by D, and put the least significant N bits of the result
2541 in *MULTIPLIER_PTR and return the most significant bit.
2542
2543 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2544 needed precision is in PRECISION (should be <= N).
2545
2546 PRECISION should be as small as possible so this function can choose
2547 multiplier more freely.
2548
2549 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2550 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2551
2552 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2553 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2554
2555 static
2556 unsigned HOST_WIDE_INT
2557 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2558 unsigned HOST_WIDE_INT d;
2559 int n;
2560 int precision;
2561 unsigned HOST_WIDE_INT *multiplier_ptr;
2562 int *post_shift_ptr;
2563 int *lgup_ptr;
2564 {
2565 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2566 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2567 int lgup, post_shift;
2568 int pow, pow2;
2569 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2570
2571 /* lgup = ceil(log2(divisor)); */
2572 lgup = ceil_log2 (d);
2573
2574 if (lgup > n)
2575 abort ();
2576
2577 pow = n + lgup;
2578 pow2 = n + lgup - precision;
2579
2580 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2581 {
2582 /* We could handle this with some effort, but this case is much better
2583 handled directly with a scc insn, so rely on caller using that. */
2584 abort ();
2585 }
2586
2587 /* mlow = 2^(N + lgup)/d */
2588 if (pow >= HOST_BITS_PER_WIDE_INT)
2589 {
2590 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2591 nl = 0;
2592 }
2593 else
2594 {
2595 nh = 0;
2596 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2597 }
2598 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2599 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2600
2601 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2602 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2603 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2604 else
2605 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2606 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2607 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2608
2609 if (mhigh_hi && nh - d >= d)
2610 abort ();
2611 if (mhigh_hi > 1 || mlow_hi > 1)
2612 abort ();
2613 /* assert that mlow < mhigh. */
2614 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2615 abort();
2616
2617 /* If precision == N, then mlow, mhigh exceed 2^N
2618 (but they do not exceed 2^(N+1)). */
2619
2620 /* Reduce to lowest terms */
2621 for (post_shift = lgup; post_shift > 0; post_shift--)
2622 {
2623 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2624 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2625 if (ml_lo >= mh_lo)
2626 break;
2627
2628 mlow_hi = 0;
2629 mlow_lo = ml_lo;
2630 mhigh_hi = 0;
2631 mhigh_lo = mh_lo;
2632 }
2633
2634 *post_shift_ptr = post_shift;
2635 *lgup_ptr = lgup;
2636 if (n < HOST_BITS_PER_WIDE_INT)
2637 {
2638 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2639 *multiplier_ptr = mhigh_lo & mask;
2640 return mhigh_lo >= mask;
2641 }
2642 else
2643 {
2644 *multiplier_ptr = mhigh_lo;
2645 return mhigh_hi;
2646 }
2647 }
2648
2649 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2650 congruent to 1 (mod 2**N). */
2651
2652 static unsigned HOST_WIDE_INT
2653 invert_mod2n (x, n)
2654 unsigned HOST_WIDE_INT x;
2655 int n;
2656 {
2657 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2658
2659 /* The algorithm notes that the choice y = x satisfies
2660 x*y == 1 mod 2^3, since x is assumed odd.
2661 Each iteration doubles the number of bits of significance in y. */
2662
2663 unsigned HOST_WIDE_INT mask;
2664 unsigned HOST_WIDE_INT y = x;
2665 int nbit = 3;
2666
2667 mask = (n == HOST_BITS_PER_WIDE_INT
2668 ? ~(unsigned HOST_WIDE_INT) 0
2669 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2670
2671 while (nbit < n)
2672 {
2673 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2674 nbit *= 2;
2675 }
2676 return y;
2677 }
2678
2679 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2680 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2681 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2682 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2683 become signed.
2684
2685 The result is put in TARGET if that is convenient.
2686
2687 MODE is the mode of operation. */
2688
2689 rtx
2690 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2691 enum machine_mode mode;
2692 register rtx adj_operand, op0, op1, target;
2693 int unsignedp;
2694 {
2695 rtx tem;
2696 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2697
2698 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2699 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2700 NULL_RTX, 0);
2701 tem = expand_and (tem, op1, NULL_RTX);
2702 adj_operand
2703 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2704 adj_operand);
2705
2706 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2707 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2708 NULL_RTX, 0);
2709 tem = expand_and (tem, op0, NULL_RTX);
2710 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2711 target);
2712
2713 return target;
2714 }
2715
2716 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2717 in TARGET if that is convenient, and return where the result is. If the
2718 operation can not be performed, 0 is returned.
2719
2720 MODE is the mode of operation and result.
2721
2722 UNSIGNEDP nonzero means unsigned multiply.
2723
2724 MAX_COST is the total allowed cost for the expanded RTL. */
2725
2726 rtx
2727 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2728 enum machine_mode mode;
2729 register rtx op0, target;
2730 unsigned HOST_WIDE_INT cnst1;
2731 int unsignedp;
2732 int max_cost;
2733 {
2734 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2735 optab mul_highpart_optab;
2736 optab moptab;
2737 rtx tem;
2738 int size = GET_MODE_BITSIZE (mode);
2739 rtx op1, wide_op1;
2740
2741 /* We can't support modes wider than HOST_BITS_PER_INT. */
2742 if (size > HOST_BITS_PER_WIDE_INT)
2743 abort ();
2744
2745 op1 = GEN_INT (cnst1);
2746
2747 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2748 wide_op1 = op1;
2749 else
2750 wide_op1
2751 = immed_double_const (cnst1,
2752 (unsignedp
2753 ? (HOST_WIDE_INT) 0
2754 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2755 wider_mode);
2756
2757 /* expand_mult handles constant multiplication of word_mode
2758 or narrower. It does a poor job for large modes. */
2759 if (size < BITS_PER_WORD
2760 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2761 {
2762 /* We have to do this, since expand_binop doesn't do conversion for
2763 multiply. Maybe change expand_binop to handle widening multiply? */
2764 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2765
2766 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2767 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2768 build_int_2 (size, 0), NULL_RTX, 1);
2769 return convert_modes (mode, wider_mode, tem, unsignedp);
2770 }
2771
2772 if (target == 0)
2773 target = gen_reg_rtx (mode);
2774
2775 /* Firstly, try using a multiplication insn that only generates the needed
2776 high part of the product, and in the sign flavor of unsignedp. */
2777 if (mul_highpart_cost[(int) mode] < max_cost)
2778 {
2779 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2780 target = expand_binop (mode, mul_highpart_optab,
2781 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2782 if (target)
2783 return target;
2784 }
2785
2786 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2787 Need to adjust the result after the multiplication. */
2788 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2789 {
2790 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2791 target = expand_binop (mode, mul_highpart_optab,
2792 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2793 if (target)
2794 /* We used the wrong signedness. Adjust the result. */
2795 return expand_mult_highpart_adjust (mode, target, op0,
2796 op1, target, unsignedp);
2797 }
2798
2799 /* Try widening multiplication. */
2800 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2801 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2802 && mul_widen_cost[(int) wider_mode] < max_cost)
2803 {
2804 op1 = force_reg (mode, op1);
2805 goto try;
2806 }
2807
2808 /* Try widening the mode and perform a non-widening multiplication. */
2809 moptab = smul_optab;
2810 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2811 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2812 {
2813 op1 = wide_op1;
2814 goto try;
2815 }
2816
2817 /* Try widening multiplication of opposite signedness, and adjust. */
2818 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2819 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2820 && (mul_widen_cost[(int) wider_mode]
2821 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2822 {
2823 rtx regop1 = force_reg (mode, op1);
2824 tem = expand_binop (wider_mode, moptab, op0, regop1,
2825 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2826 if (tem != 0)
2827 {
2828 /* Extract the high half of the just generated product. */
2829 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2830 build_int_2 (size, 0), NULL_RTX, 1);
2831 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2832 /* We used the wrong signedness. Adjust the result. */
2833 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2834 target, unsignedp);
2835 }
2836 }
2837
2838 return 0;
2839
2840 try:
2841 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2842 tem = expand_binop (wider_mode, moptab, op0, op1,
2843 NULL_RTX, unsignedp, OPTAB_WIDEN);
2844 if (tem == 0)
2845 return 0;
2846
2847 /* Extract the high half of the just generated product. */
2848 if (mode == word_mode)
2849 {
2850 return gen_highpart (mode, tem);
2851 }
2852 else
2853 {
2854 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2855 build_int_2 (size, 0), NULL_RTX, 1);
2856 return convert_modes (mode, wider_mode, tem, unsignedp);
2857 }
2858 }
2859 \f
2860 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2861 if that is convenient, and returning where the result is.
2862 You may request either the quotient or the remainder as the result;
2863 specify REM_FLAG nonzero to get the remainder.
2864
2865 CODE is the expression code for which kind of division this is;
2866 it controls how rounding is done. MODE is the machine mode to use.
2867 UNSIGNEDP nonzero means do unsigned division. */
2868
2869 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2870 and then correct it by or'ing in missing high bits
2871 if result of ANDI is nonzero.
2872 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2873 This could optimize to a bfexts instruction.
2874 But C doesn't use these operations, so their optimizations are
2875 left for later. */
2876 /* ??? For modulo, we don't actually need the highpart of the first product,
2877 the low part will do nicely. And for small divisors, the second multiply
2878 can also be a low-part only multiply or even be completely left out.
2879 E.g. to calculate the remainder of a division by 3 with a 32 bit
2880 multiply, multiply with 0x55555556 and extract the upper two bits;
2881 the result is exact for inputs up to 0x1fffffff.
2882 The input range can be reduced by using cross-sum rules.
2883 For odd divisors >= 3, the following table gives right shift counts
2884 so that if an number is shifted by an integer multiple of the given
2885 amount, the remainder stays the same:
2886 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2887 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2888 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2889 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2890 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2891
2892 Cross-sum rules for even numbers can be derived by leaving as many bits
2893 to the right alone as the divisor has zeros to the right.
2894 E.g. if x is an unsigned 32 bit number:
2895 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2896 */
2897
2898 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2899
2900 rtx
2901 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2902 int rem_flag;
2903 enum tree_code code;
2904 enum machine_mode mode;
2905 register rtx op0, op1, target;
2906 int unsignedp;
2907 {
2908 enum machine_mode compute_mode;
2909 register rtx tquotient;
2910 rtx quotient = 0, remainder = 0;
2911 rtx last;
2912 int size;
2913 rtx insn, set;
2914 optab optab1, optab2;
2915 int op1_is_constant, op1_is_pow2;
2916 int max_cost, extra_cost;
2917 static HOST_WIDE_INT last_div_const = 0;
2918
2919 op1_is_constant = GET_CODE (op1) == CONST_INT;
2920 op1_is_pow2 = (op1_is_constant
2921 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2922 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2923
2924 /*
2925 This is the structure of expand_divmod:
2926
2927 First comes code to fix up the operands so we can perform the operations
2928 correctly and efficiently.
2929
2930 Second comes a switch statement with code specific for each rounding mode.
2931 For some special operands this code emits all RTL for the desired
2932 operation, for other cases, it generates only a quotient and stores it in
2933 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2934 to indicate that it has not done anything.
2935
2936 Last comes code that finishes the operation. If QUOTIENT is set and
2937 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2938 QUOTIENT is not set, it is computed using trunc rounding.
2939
2940 We try to generate special code for division and remainder when OP1 is a
2941 constant. If |OP1| = 2**n we can use shifts and some other fast
2942 operations. For other values of OP1, we compute a carefully selected
2943 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2944 by m.
2945
2946 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2947 half of the product. Different strategies for generating the product are
2948 implemented in expand_mult_highpart.
2949
2950 If what we actually want is the remainder, we generate that by another
2951 by-constant multiplication and a subtraction. */
2952
2953 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2954 code below will malfunction if we are, so check here and handle
2955 the special case if so. */
2956 if (op1 == const1_rtx)
2957 return rem_flag ? const0_rtx : op0;
2958
2959 if (target
2960 /* Don't use the function value register as a target
2961 since we have to read it as well as write it,
2962 and function-inlining gets confused by this. */
2963 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2964 /* Don't clobber an operand while doing a multi-step calculation. */
2965 || ((rem_flag || op1_is_constant)
2966 && (reg_mentioned_p (target, op0)
2967 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2968 || reg_mentioned_p (target, op1)
2969 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2970 target = 0;
2971
2972 /* Get the mode in which to perform this computation. Normally it will
2973 be MODE, but sometimes we can't do the desired operation in MODE.
2974 If so, pick a wider mode in which we can do the operation. Convert
2975 to that mode at the start to avoid repeated conversions.
2976
2977 First see what operations we need. These depend on the expression
2978 we are evaluating. (We assume that divxx3 insns exist under the
2979 same conditions that modxx3 insns and that these insns don't normally
2980 fail. If these assumptions are not correct, we may generate less
2981 efficient code in some cases.)
2982
2983 Then see if we find a mode in which we can open-code that operation
2984 (either a division, modulus, or shift). Finally, check for the smallest
2985 mode for which we can do the operation with a library call. */
2986
2987 /* We might want to refine this now that we have division-by-constant
2988 optimization. Since expand_mult_highpart tries so many variants, it is
2989 not straightforward to generalize this. Maybe we should make an array
2990 of possible modes in init_expmed? Save this for GCC 2.7. */
2991
2992 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2993 : (unsignedp ? udiv_optab : sdiv_optab));
2994 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2995
2996 for (compute_mode = mode; compute_mode != VOIDmode;
2997 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2998 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2999 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3000 break;
3001
3002 if (compute_mode == VOIDmode)
3003 for (compute_mode = mode; compute_mode != VOIDmode;
3004 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3005 if (optab1->handlers[(int) compute_mode].libfunc
3006 || optab2->handlers[(int) compute_mode].libfunc)
3007 break;
3008
3009 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3010 in expand_binop. */
3011 if (compute_mode == VOIDmode)
3012 compute_mode = mode;
3013
3014 if (target && GET_MODE (target) == compute_mode)
3015 tquotient = target;
3016 else
3017 tquotient = gen_reg_rtx (compute_mode);
3018
3019 size = GET_MODE_BITSIZE (compute_mode);
3020 #if 0
3021 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3022 (mode), and thereby get better code when OP1 is a constant. Do that
3023 later. It will require going over all usages of SIZE below. */
3024 size = GET_MODE_BITSIZE (mode);
3025 #endif
3026
3027 /* Only deduct something for a REM if the last divide done was
3028 for a different constant. Then set the constant of the last
3029 divide. */
3030 max_cost = div_cost[(int) compute_mode]
3031 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3032 && INTVAL (op1) == last_div_const)
3033 ? mul_cost[(int) compute_mode] + add_cost : 0);
3034
3035 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3036
3037 /* Now convert to the best mode to use. */
3038 if (compute_mode != mode)
3039 {
3040 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3041 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3042
3043 /* convert_modes may have placed op1 into a register, so we
3044 must recompute the following. */
3045 op1_is_constant = GET_CODE (op1) == CONST_INT;
3046 op1_is_pow2 = (op1_is_constant
3047 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3048 || (! unsignedp
3049 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3050 }
3051
3052 /* If one of the operands is a volatile MEM, copy it into a register. */
3053
3054 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3055 op0 = force_reg (compute_mode, op0);
3056 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3057 op1 = force_reg (compute_mode, op1);
3058
3059 /* If we need the remainder or if OP1 is constant, we need to
3060 put OP0 in a register in case it has any queued subexpressions. */
3061 if (rem_flag || op1_is_constant)
3062 op0 = force_reg (compute_mode, op0);
3063
3064 last = get_last_insn ();
3065
3066 /* Promote floor rounding to trunc rounding for unsigned operations. */
3067 if (unsignedp)
3068 {
3069 if (code == FLOOR_DIV_EXPR)
3070 code = TRUNC_DIV_EXPR;
3071 if (code == FLOOR_MOD_EXPR)
3072 code = TRUNC_MOD_EXPR;
3073 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3074 code = TRUNC_DIV_EXPR;
3075 }
3076
3077 if (op1 != const0_rtx)
3078 switch (code)
3079 {
3080 case TRUNC_MOD_EXPR:
3081 case TRUNC_DIV_EXPR:
3082 if (op1_is_constant)
3083 {
3084 if (unsignedp)
3085 {
3086 unsigned HOST_WIDE_INT mh, ml;
3087 int pre_shift, post_shift;
3088 int dummy;
3089 unsigned HOST_WIDE_INT d = INTVAL (op1);
3090
3091 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3092 {
3093 pre_shift = floor_log2 (d);
3094 if (rem_flag)
3095 {
3096 remainder
3097 = expand_binop (compute_mode, and_optab, op0,
3098 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3099 remainder, 1,
3100 OPTAB_LIB_WIDEN);
3101 if (remainder)
3102 return gen_lowpart (mode, remainder);
3103 }
3104 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3105 build_int_2 (pre_shift, 0),
3106 tquotient, 1);
3107 }
3108 else if (size <= HOST_BITS_PER_WIDE_INT)
3109 {
3110 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3111 {
3112 /* Most significant bit of divisor is set; emit an scc
3113 insn. */
3114 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3115 compute_mode, 1, 1);
3116 if (quotient == 0)
3117 goto fail1;
3118 }
3119 else
3120 {
3121 /* Find a suitable multiplier and right shift count
3122 instead of multiplying with D. */
3123
3124 mh = choose_multiplier (d, size, size,
3125 &ml, &post_shift, &dummy);
3126
3127 /* If the suggested multiplier is more than SIZE bits,
3128 we can do better for even divisors, using an
3129 initial right shift. */
3130 if (mh != 0 && (d & 1) == 0)
3131 {
3132 pre_shift = floor_log2 (d & -d);
3133 mh = choose_multiplier (d >> pre_shift, size,
3134 size - pre_shift,
3135 &ml, &post_shift, &dummy);
3136 if (mh)
3137 abort ();
3138 }
3139 else
3140 pre_shift = 0;
3141
3142 if (mh != 0)
3143 {
3144 rtx t1, t2, t3, t4;
3145
3146 extra_cost = (shift_cost[post_shift - 1]
3147 + shift_cost[1] + 2 * add_cost);
3148 t1 = expand_mult_highpart (compute_mode, op0, ml,
3149 NULL_RTX, 1,
3150 max_cost - extra_cost);
3151 if (t1 == 0)
3152 goto fail1;
3153 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3154 op0, t1),
3155 NULL_RTX);
3156 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3157 build_int_2 (1, 0), NULL_RTX,1);
3158 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3159 t1, t3),
3160 NULL_RTX);
3161 quotient
3162 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3163 build_int_2 (post_shift - 1, 0),
3164 tquotient, 1);
3165 }
3166 else
3167 {
3168 rtx t1, t2;
3169
3170 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3171 build_int_2 (pre_shift, 0),
3172 NULL_RTX, 1);
3173 extra_cost = (shift_cost[pre_shift]
3174 + shift_cost[post_shift]);
3175 t2 = expand_mult_highpart (compute_mode, t1, ml,
3176 NULL_RTX, 1,
3177 max_cost - extra_cost);
3178 if (t2 == 0)
3179 goto fail1;
3180 quotient
3181 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3182 build_int_2 (post_shift, 0),
3183 tquotient, 1);
3184 }
3185 }
3186 }
3187 else /* Too wide mode to use tricky code */
3188 break;
3189
3190 insn = get_last_insn ();
3191 if (insn != last
3192 && (set = single_set (insn)) != 0
3193 && SET_DEST (set) == quotient)
3194 set_unique_reg_note (insn,
3195 REG_EQUAL,
3196 gen_rtx_UDIV (compute_mode, op0, op1));
3197 }
3198 else /* TRUNC_DIV, signed */
3199 {
3200 unsigned HOST_WIDE_INT ml;
3201 int lgup, post_shift;
3202 HOST_WIDE_INT d = INTVAL (op1);
3203 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3204
3205 /* n rem d = n rem -d */
3206 if (rem_flag && d < 0)
3207 {
3208 d = abs_d;
3209 op1 = GEN_INT (abs_d);
3210 }
3211
3212 if (d == 1)
3213 quotient = op0;
3214 else if (d == -1)
3215 quotient = expand_unop (compute_mode, neg_optab, op0,
3216 tquotient, 0);
3217 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3218 {
3219 /* This case is not handled correctly below. */
3220 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3221 compute_mode, 1, 1);
3222 if (quotient == 0)
3223 goto fail1;
3224 }
3225 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3226 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3227 ;
3228 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3229 {
3230 lgup = floor_log2 (abs_d);
3231 if (abs_d != 2 && BRANCH_COST < 3)
3232 {
3233 rtx label = gen_label_rtx ();
3234 rtx t1;
3235
3236 t1 = copy_to_mode_reg (compute_mode, op0);
3237 do_cmp_and_jump (t1, const0_rtx, GE,
3238 compute_mode, label);
3239 expand_inc (t1, GEN_INT (abs_d - 1));
3240 emit_label (label);
3241 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3242 build_int_2 (lgup, 0),
3243 tquotient, 0);
3244 }
3245 else
3246 {
3247 rtx t1, t2, t3;
3248 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3249 build_int_2 (size - 1, 0),
3250 NULL_RTX, 0);
3251 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3252 build_int_2 (size - lgup, 0),
3253 NULL_RTX, 1);
3254 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3255 op0, t2),
3256 NULL_RTX);
3257 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3258 build_int_2 (lgup, 0),
3259 tquotient, 0);
3260 }
3261
3262 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3263 the quotient. */
3264 if (d < 0)
3265 {
3266 insn = get_last_insn ();
3267 if (insn != last
3268 && (set = single_set (insn)) != 0
3269 && SET_DEST (set) == quotient
3270 && abs_d < ((unsigned HOST_WIDE_INT) 1
3271 << (HOST_BITS_PER_WIDE_INT - 1)))
3272 set_unique_reg_note (insn,
3273 REG_EQUAL,
3274 gen_rtx_DIV (compute_mode,
3275 op0,
3276 GEN_INT (abs_d)));
3277
3278 quotient = expand_unop (compute_mode, neg_optab,
3279 quotient, quotient, 0);
3280 }
3281 }
3282 else if (size <= HOST_BITS_PER_WIDE_INT)
3283 {
3284 choose_multiplier (abs_d, size, size - 1,
3285 &ml, &post_shift, &lgup);
3286 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3287 {
3288 rtx t1, t2, t3;
3289
3290 extra_cost = (shift_cost[post_shift]
3291 + shift_cost[size - 1] + add_cost);
3292 t1 = expand_mult_highpart (compute_mode, op0, ml,
3293 NULL_RTX, 0,
3294 max_cost - extra_cost);
3295 if (t1 == 0)
3296 goto fail1;
3297 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3298 build_int_2 (post_shift, 0), NULL_RTX, 0);
3299 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3300 build_int_2 (size - 1, 0), NULL_RTX, 0);
3301 if (d < 0)
3302 quotient
3303 = force_operand (gen_rtx_MINUS (compute_mode,
3304 t3, t2),
3305 tquotient);
3306 else
3307 quotient
3308 = force_operand (gen_rtx_MINUS (compute_mode,
3309 t2, t3),
3310 tquotient);
3311 }
3312 else
3313 {
3314 rtx t1, t2, t3, t4;
3315
3316 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3317 extra_cost = (shift_cost[post_shift]
3318 + shift_cost[size - 1] + 2 * add_cost);
3319 t1 = expand_mult_highpart (compute_mode, op0, ml,
3320 NULL_RTX, 0,
3321 max_cost - extra_cost);
3322 if (t1 == 0)
3323 goto fail1;
3324 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3325 t1, op0),
3326 NULL_RTX);
3327 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3328 build_int_2 (post_shift, 0),
3329 NULL_RTX, 0);
3330 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3331 build_int_2 (size - 1, 0),
3332 NULL_RTX, 0);
3333 if (d < 0)
3334 quotient
3335 = force_operand (gen_rtx_MINUS (compute_mode,
3336 t4, t3),
3337 tquotient);
3338 else
3339 quotient
3340 = force_operand (gen_rtx_MINUS (compute_mode,
3341 t3, t4),
3342 tquotient);
3343 }
3344 }
3345 else /* Too wide mode to use tricky code */
3346 break;
3347
3348 insn = get_last_insn ();
3349 if (insn != last
3350 && (set = single_set (insn)) != 0
3351 && SET_DEST (set) == quotient)
3352 set_unique_reg_note (insn,
3353 REG_EQUAL,
3354 gen_rtx_DIV (compute_mode, op0, op1));
3355 }
3356 break;
3357 }
3358 fail1:
3359 delete_insns_since (last);
3360 break;
3361
3362 case FLOOR_DIV_EXPR:
3363 case FLOOR_MOD_EXPR:
3364 /* We will come here only for signed operations. */
3365 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3366 {
3367 unsigned HOST_WIDE_INT mh, ml;
3368 int pre_shift, lgup, post_shift;
3369 HOST_WIDE_INT d = INTVAL (op1);
3370
3371 if (d > 0)
3372 {
3373 /* We could just as easily deal with negative constants here,
3374 but it does not seem worth the trouble for GCC 2.6. */
3375 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3376 {
3377 pre_shift = floor_log2 (d);
3378 if (rem_flag)
3379 {
3380 remainder = expand_binop (compute_mode, and_optab, op0,
3381 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3382 remainder, 0, OPTAB_LIB_WIDEN);
3383 if (remainder)
3384 return gen_lowpart (mode, remainder);
3385 }
3386 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3387 build_int_2 (pre_shift, 0),
3388 tquotient, 0);
3389 }
3390 else
3391 {
3392 rtx t1, t2, t3, t4;
3393
3394 mh = choose_multiplier (d, size, size - 1,
3395 &ml, &post_shift, &lgup);
3396 if (mh)
3397 abort ();
3398
3399 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3400 build_int_2 (size - 1, 0), NULL_RTX, 0);
3401 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3402 NULL_RTX, 0, OPTAB_WIDEN);
3403 extra_cost = (shift_cost[post_shift]
3404 + shift_cost[size - 1] + 2 * add_cost);
3405 t3 = expand_mult_highpart (compute_mode, t2, ml,
3406 NULL_RTX, 1,
3407 max_cost - extra_cost);
3408 if (t3 != 0)
3409 {
3410 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3411 build_int_2 (post_shift, 0),
3412 NULL_RTX, 1);
3413 quotient = expand_binop (compute_mode, xor_optab,
3414 t4, t1, tquotient, 0,
3415 OPTAB_WIDEN);
3416 }
3417 }
3418 }
3419 else
3420 {
3421 rtx nsign, t1, t2, t3, t4;
3422 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3423 op0, constm1_rtx), NULL_RTX);
3424 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3425 0, OPTAB_WIDEN);
3426 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3427 build_int_2 (size - 1, 0), NULL_RTX, 0);
3428 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3429 NULL_RTX);
3430 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3431 NULL_RTX, 0);
3432 if (t4)
3433 {
3434 rtx t5;
3435 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3436 NULL_RTX, 0);
3437 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3438 t4, t5),
3439 tquotient);
3440 }
3441 }
3442 }
3443
3444 if (quotient != 0)
3445 break;
3446 delete_insns_since (last);
3447
3448 /* Try using an instruction that produces both the quotient and
3449 remainder, using truncation. We can easily compensate the quotient
3450 or remainder to get floor rounding, once we have the remainder.
3451 Notice that we compute also the final remainder value here,
3452 and return the result right away. */
3453 if (target == 0 || GET_MODE (target) != compute_mode)
3454 target = gen_reg_rtx (compute_mode);
3455
3456 if (rem_flag)
3457 {
3458 remainder
3459 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3460 quotient = gen_reg_rtx (compute_mode);
3461 }
3462 else
3463 {
3464 quotient
3465 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3466 remainder = gen_reg_rtx (compute_mode);
3467 }
3468
3469 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3470 quotient, remainder, 0))
3471 {
3472 /* This could be computed with a branch-less sequence.
3473 Save that for later. */
3474 rtx tem;
3475 rtx label = gen_label_rtx ();
3476 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3477 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3478 NULL_RTX, 0, OPTAB_WIDEN);
3479 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3480 expand_dec (quotient, const1_rtx);
3481 expand_inc (remainder, op1);
3482 emit_label (label);
3483 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3484 }
3485
3486 /* No luck with division elimination or divmod. Have to do it
3487 by conditionally adjusting op0 *and* the result. */
3488 {
3489 rtx label1, label2, label3, label4, label5;
3490 rtx adjusted_op0;
3491 rtx tem;
3492
3493 quotient = gen_reg_rtx (compute_mode);
3494 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3495 label1 = gen_label_rtx ();
3496 label2 = gen_label_rtx ();
3497 label3 = gen_label_rtx ();
3498 label4 = gen_label_rtx ();
3499 label5 = gen_label_rtx ();
3500 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3501 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3502 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3503 quotient, 0, OPTAB_LIB_WIDEN);
3504 if (tem != quotient)
3505 emit_move_insn (quotient, tem);
3506 emit_jump_insn (gen_jump (label5));
3507 emit_barrier ();
3508 emit_label (label1);
3509 expand_inc (adjusted_op0, const1_rtx);
3510 emit_jump_insn (gen_jump (label4));
3511 emit_barrier ();
3512 emit_label (label2);
3513 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3514 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3515 quotient, 0, OPTAB_LIB_WIDEN);
3516 if (tem != quotient)
3517 emit_move_insn (quotient, tem);
3518 emit_jump_insn (gen_jump (label5));
3519 emit_barrier ();
3520 emit_label (label3);
3521 expand_dec (adjusted_op0, const1_rtx);
3522 emit_label (label4);
3523 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3524 quotient, 0, OPTAB_LIB_WIDEN);
3525 if (tem != quotient)
3526 emit_move_insn (quotient, tem);
3527 expand_dec (quotient, const1_rtx);
3528 emit_label (label5);
3529 }
3530 break;
3531
3532 case CEIL_DIV_EXPR:
3533 case CEIL_MOD_EXPR:
3534 if (unsignedp)
3535 {
3536 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3537 {
3538 rtx t1, t2, t3;
3539 unsigned HOST_WIDE_INT d = INTVAL (op1);
3540 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3541 build_int_2 (floor_log2 (d), 0),
3542 tquotient, 1);
3543 t2 = expand_binop (compute_mode, and_optab, op0,
3544 GEN_INT (d - 1),
3545 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3546 t3 = gen_reg_rtx (compute_mode);
3547 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3548 compute_mode, 1, 1);
3549 if (t3 == 0)
3550 {
3551 rtx lab;
3552 lab = gen_label_rtx ();
3553 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3554 expand_inc (t1, const1_rtx);
3555 emit_label (lab);
3556 quotient = t1;
3557 }
3558 else
3559 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3560 t1, t3),
3561 tquotient);
3562 break;
3563 }
3564
3565 /* Try using an instruction that produces both the quotient and
3566 remainder, using truncation. We can easily compensate the
3567 quotient or remainder to get ceiling rounding, once we have the
3568 remainder. Notice that we compute also the final remainder
3569 value here, and return the result right away. */
3570 if (target == 0 || GET_MODE (target) != compute_mode)
3571 target = gen_reg_rtx (compute_mode);
3572
3573 if (rem_flag)
3574 {
3575 remainder = (GET_CODE (target) == REG
3576 ? target : gen_reg_rtx (compute_mode));
3577 quotient = gen_reg_rtx (compute_mode);
3578 }
3579 else
3580 {
3581 quotient = (GET_CODE (target) == REG
3582 ? target : gen_reg_rtx (compute_mode));
3583 remainder = gen_reg_rtx (compute_mode);
3584 }
3585
3586 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3587 remainder, 1))
3588 {
3589 /* This could be computed with a branch-less sequence.
3590 Save that for later. */
3591 rtx label = gen_label_rtx ();
3592 do_cmp_and_jump (remainder, const0_rtx, EQ,
3593 compute_mode, label);
3594 expand_inc (quotient, const1_rtx);
3595 expand_dec (remainder, op1);
3596 emit_label (label);
3597 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3598 }
3599
3600 /* No luck with division elimination or divmod. Have to do it
3601 by conditionally adjusting op0 *and* the result. */
3602 {
3603 rtx label1, label2;
3604 rtx adjusted_op0, tem;
3605
3606 quotient = gen_reg_rtx (compute_mode);
3607 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3608 label1 = gen_label_rtx ();
3609 label2 = gen_label_rtx ();
3610 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3611 compute_mode, label1);
3612 emit_move_insn (quotient, const0_rtx);
3613 emit_jump_insn (gen_jump (label2));
3614 emit_barrier ();
3615 emit_label (label1);
3616 expand_dec (adjusted_op0, const1_rtx);
3617 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3618 quotient, 1, OPTAB_LIB_WIDEN);
3619 if (tem != quotient)
3620 emit_move_insn (quotient, tem);
3621 expand_inc (quotient, const1_rtx);
3622 emit_label (label2);
3623 }
3624 }
3625 else /* signed */
3626 {
3627 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3628 && INTVAL (op1) >= 0)
3629 {
3630 /* This is extremely similar to the code for the unsigned case
3631 above. For 2.7 we should merge these variants, but for
3632 2.6.1 I don't want to touch the code for unsigned since that
3633 get used in C. The signed case will only be used by other
3634 languages (Ada). */
3635
3636 rtx t1, t2, t3;
3637 unsigned HOST_WIDE_INT d = INTVAL (op1);
3638 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3639 build_int_2 (floor_log2 (d), 0),
3640 tquotient, 0);
3641 t2 = expand_binop (compute_mode, and_optab, op0,
3642 GEN_INT (d - 1),
3643 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3644 t3 = gen_reg_rtx (compute_mode);
3645 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3646 compute_mode, 1, 1);
3647 if (t3 == 0)
3648 {
3649 rtx lab;
3650 lab = gen_label_rtx ();
3651 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3652 expand_inc (t1, const1_rtx);
3653 emit_label (lab);
3654 quotient = t1;
3655 }
3656 else
3657 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3658 t1, t3),
3659 tquotient);
3660 break;
3661 }
3662
3663 /* Try using an instruction that produces both the quotient and
3664 remainder, using truncation. We can easily compensate the
3665 quotient or remainder to get ceiling rounding, once we have the
3666 remainder. Notice that we compute also the final remainder
3667 value here, and return the result right away. */
3668 if (target == 0 || GET_MODE (target) != compute_mode)
3669 target = gen_reg_rtx (compute_mode);
3670 if (rem_flag)
3671 {
3672 remainder= (GET_CODE (target) == REG
3673 ? target : gen_reg_rtx (compute_mode));
3674 quotient = gen_reg_rtx (compute_mode);
3675 }
3676 else
3677 {
3678 quotient = (GET_CODE (target) == REG
3679 ? target : gen_reg_rtx (compute_mode));
3680 remainder = gen_reg_rtx (compute_mode);
3681 }
3682
3683 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3684 remainder, 0))
3685 {
3686 /* This could be computed with a branch-less sequence.
3687 Save that for later. */
3688 rtx tem;
3689 rtx label = gen_label_rtx ();
3690 do_cmp_and_jump (remainder, const0_rtx, EQ,
3691 compute_mode, label);
3692 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3693 NULL_RTX, 0, OPTAB_WIDEN);
3694 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3695 expand_inc (quotient, const1_rtx);
3696 expand_dec (remainder, op1);
3697 emit_label (label);
3698 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3699 }
3700
3701 /* No luck with division elimination or divmod. Have to do it
3702 by conditionally adjusting op0 *and* the result. */
3703 {
3704 rtx label1, label2, label3, label4, label5;
3705 rtx adjusted_op0;
3706 rtx tem;
3707
3708 quotient = gen_reg_rtx (compute_mode);
3709 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3710 label1 = gen_label_rtx ();
3711 label2 = gen_label_rtx ();
3712 label3 = gen_label_rtx ();
3713 label4 = gen_label_rtx ();
3714 label5 = gen_label_rtx ();
3715 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3716 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3717 compute_mode, label1);
3718 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3719 quotient, 0, OPTAB_LIB_WIDEN);
3720 if (tem != quotient)
3721 emit_move_insn (quotient, tem);
3722 emit_jump_insn (gen_jump (label5));
3723 emit_barrier ();
3724 emit_label (label1);
3725 expand_dec (adjusted_op0, const1_rtx);
3726 emit_jump_insn (gen_jump (label4));
3727 emit_barrier ();
3728 emit_label (label2);
3729 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3730 compute_mode, label3);
3731 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3732 quotient, 0, OPTAB_LIB_WIDEN);
3733 if (tem != quotient)
3734 emit_move_insn (quotient, tem);
3735 emit_jump_insn (gen_jump (label5));
3736 emit_barrier ();
3737 emit_label (label3);
3738 expand_inc (adjusted_op0, const1_rtx);
3739 emit_label (label4);
3740 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3741 quotient, 0, OPTAB_LIB_WIDEN);
3742 if (tem != quotient)
3743 emit_move_insn (quotient, tem);
3744 expand_inc (quotient, const1_rtx);
3745 emit_label (label5);
3746 }
3747 }
3748 break;
3749
3750 case EXACT_DIV_EXPR:
3751 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3752 {
3753 HOST_WIDE_INT d = INTVAL (op1);
3754 unsigned HOST_WIDE_INT ml;
3755 int post_shift;
3756 rtx t1;
3757
3758 post_shift = floor_log2 (d & -d);
3759 ml = invert_mod2n (d >> post_shift, size);
3760 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3761 unsignedp);
3762 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3763 build_int_2 (post_shift, 0),
3764 NULL_RTX, unsignedp);
3765
3766 insn = get_last_insn ();
3767 set_unique_reg_note (insn,
3768 REG_EQUAL,
3769 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3770 compute_mode,
3771 op0, op1));
3772 }
3773 break;
3774
3775 case ROUND_DIV_EXPR:
3776 case ROUND_MOD_EXPR:
3777 if (unsignedp)
3778 {
3779 rtx tem;
3780 rtx label;
3781 label = gen_label_rtx ();
3782 quotient = gen_reg_rtx (compute_mode);
3783 remainder = gen_reg_rtx (compute_mode);
3784 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3785 {
3786 rtx tem;
3787 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3788 quotient, 1, OPTAB_LIB_WIDEN);
3789 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3790 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3791 remainder, 1, OPTAB_LIB_WIDEN);
3792 }
3793 tem = plus_constant (op1, -1);
3794 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3795 build_int_2 (1, 0), NULL_RTX, 1);
3796 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3797 expand_inc (quotient, const1_rtx);
3798 expand_dec (remainder, op1);
3799 emit_label (label);
3800 }
3801 else
3802 {
3803 rtx abs_rem, abs_op1, tem, mask;
3804 rtx label;
3805 label = gen_label_rtx ();
3806 quotient = gen_reg_rtx (compute_mode);
3807 remainder = gen_reg_rtx (compute_mode);
3808 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3809 {
3810 rtx tem;
3811 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3812 quotient, 0, OPTAB_LIB_WIDEN);
3813 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3814 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3815 remainder, 0, OPTAB_LIB_WIDEN);
3816 }
3817 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0);
3818 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0);
3819 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3820 build_int_2 (1, 0), NULL_RTX, 1);
3821 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3822 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3823 NULL_RTX, 0, OPTAB_WIDEN);
3824 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3825 build_int_2 (size - 1, 0), NULL_RTX, 0);
3826 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3827 NULL_RTX, 0, OPTAB_WIDEN);
3828 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3829 NULL_RTX, 0, OPTAB_WIDEN);
3830 expand_inc (quotient, tem);
3831 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3832 NULL_RTX, 0, OPTAB_WIDEN);
3833 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3834 NULL_RTX, 0, OPTAB_WIDEN);
3835 expand_dec (remainder, tem);
3836 emit_label (label);
3837 }
3838 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3839
3840 default:
3841 abort ();
3842 }
3843
3844 if (quotient == 0)
3845 {
3846 if (target && GET_MODE (target) != compute_mode)
3847 target = 0;
3848
3849 if (rem_flag)
3850 {
3851 /* Try to produce the remainder without producing the quotient.
3852 If we seem to have a divmod patten that does not require widening,
3853 don't try windening here. We should really have an WIDEN argument
3854 to expand_twoval_binop, since what we'd really like to do here is
3855 1) try a mod insn in compute_mode
3856 2) try a divmod insn in compute_mode
3857 3) try a div insn in compute_mode and multiply-subtract to get
3858 remainder
3859 4) try the same things with widening allowed. */
3860 remainder
3861 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3862 op0, op1, target,
3863 unsignedp,
3864 ((optab2->handlers[(int) compute_mode].insn_code
3865 != CODE_FOR_nothing)
3866 ? OPTAB_DIRECT : OPTAB_WIDEN));
3867 if (remainder == 0)
3868 {
3869 /* No luck there. Can we do remainder and divide at once
3870 without a library call? */
3871 remainder = gen_reg_rtx (compute_mode);
3872 if (! expand_twoval_binop ((unsignedp
3873 ? udivmod_optab
3874 : sdivmod_optab),
3875 op0, op1,
3876 NULL_RTX, remainder, unsignedp))
3877 remainder = 0;
3878 }
3879
3880 if (remainder)
3881 return gen_lowpart (mode, remainder);
3882 }
3883
3884 /* Produce the quotient. Try a quotient insn, but not a library call.
3885 If we have a divmod in this mode, use it in preference to widening
3886 the div (for this test we assume it will not fail). Note that optab2
3887 is set to the one of the two optabs that the call below will use. */
3888 quotient
3889 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3890 op0, op1, rem_flag ? NULL_RTX : target,
3891 unsignedp,
3892 ((optab2->handlers[(int) compute_mode].insn_code
3893 != CODE_FOR_nothing)
3894 ? OPTAB_DIRECT : OPTAB_WIDEN));
3895
3896 if (quotient == 0)
3897 {
3898 /* No luck there. Try a quotient-and-remainder insn,
3899 keeping the quotient alone. */
3900 quotient = gen_reg_rtx (compute_mode);
3901 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3902 op0, op1,
3903 quotient, NULL_RTX, unsignedp))
3904 {
3905 quotient = 0;
3906 if (! rem_flag)
3907 /* Still no luck. If we are not computing the remainder,
3908 use a library call for the quotient. */
3909 quotient = sign_expand_binop (compute_mode,
3910 udiv_optab, sdiv_optab,
3911 op0, op1, target,
3912 unsignedp, OPTAB_LIB_WIDEN);
3913 }
3914 }
3915 }
3916
3917 if (rem_flag)
3918 {
3919 if (target && GET_MODE (target) != compute_mode)
3920 target = 0;
3921
3922 if (quotient == 0)
3923 /* No divide instruction either. Use library for remainder. */
3924 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3925 op0, op1, target,
3926 unsignedp, OPTAB_LIB_WIDEN);
3927 else
3928 {
3929 /* We divided. Now finish doing X - Y * (X / Y). */
3930 remainder = expand_mult (compute_mode, quotient, op1,
3931 NULL_RTX, unsignedp);
3932 remainder = expand_binop (compute_mode, sub_optab, op0,
3933 remainder, target, unsignedp,
3934 OPTAB_LIB_WIDEN);
3935 }
3936 }
3937
3938 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3939 }
3940 \f
3941 /* Return a tree node with data type TYPE, describing the value of X.
3942 Usually this is an RTL_EXPR, if there is no obvious better choice.
3943 X may be an expression, however we only support those expressions
3944 generated by loop.c. */
3945
3946 tree
3947 make_tree (type, x)
3948 tree type;
3949 rtx x;
3950 {
3951 tree t;
3952
3953 switch (GET_CODE (x))
3954 {
3955 case CONST_INT:
3956 t = build_int_2 (INTVAL (x),
3957 (TREE_UNSIGNED (type)
3958 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3959 || INTVAL (x) >= 0 ? 0 : -1);
3960 TREE_TYPE (t) = type;
3961 return t;
3962
3963 case CONST_DOUBLE:
3964 if (GET_MODE (x) == VOIDmode)
3965 {
3966 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3967 TREE_TYPE (t) = type;
3968 }
3969 else
3970 {
3971 REAL_VALUE_TYPE d;
3972
3973 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3974 t = build_real (type, d);
3975 }
3976
3977 return t;
3978
3979 case PLUS:
3980 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3981 make_tree (type, XEXP (x, 1))));
3982
3983 case MINUS:
3984 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3985 make_tree (type, XEXP (x, 1))));
3986
3987 case NEG:
3988 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3989
3990 case MULT:
3991 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3992 make_tree (type, XEXP (x, 1))));
3993
3994 case ASHIFT:
3995 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3996 make_tree (type, XEXP (x, 1))));
3997
3998 case LSHIFTRT:
3999 return fold (convert (type,
4000 build (RSHIFT_EXPR, unsigned_type (type),
4001 make_tree (unsigned_type (type),
4002 XEXP (x, 0)),
4003 make_tree (type, XEXP (x, 1)))));
4004
4005 case ASHIFTRT:
4006 return fold (convert (type,
4007 build (RSHIFT_EXPR, signed_type (type),
4008 make_tree (signed_type (type), XEXP (x, 0)),
4009 make_tree (type, XEXP (x, 1)))));
4010
4011 case DIV:
4012 if (TREE_CODE (type) != REAL_TYPE)
4013 t = signed_type (type);
4014 else
4015 t = type;
4016
4017 return fold (convert (type,
4018 build (TRUNC_DIV_EXPR, t,
4019 make_tree (t, XEXP (x, 0)),
4020 make_tree (t, XEXP (x, 1)))));
4021 case UDIV:
4022 t = unsigned_type (type);
4023 return fold (convert (type,
4024 build (TRUNC_DIV_EXPR, t,
4025 make_tree (t, XEXP (x, 0)),
4026 make_tree (t, XEXP (x, 1)))));
4027 default:
4028 t = make_node (RTL_EXPR);
4029 TREE_TYPE (t) = type;
4030 RTL_EXPR_RTL (t) = x;
4031 /* There are no insns to be output
4032 when this rtl_expr is used. */
4033 RTL_EXPR_SEQUENCE (t) = 0;
4034 return t;
4035 }
4036 }
4037
4038 /* Return an rtx representing the value of X * MULT + ADD.
4039 TARGET is a suggestion for where to store the result (an rtx).
4040 MODE is the machine mode for the computation.
4041 X and MULT must have mode MODE. ADD may have a different mode.
4042 So can X (defaults to same as MODE).
4043 UNSIGNEDP is non-zero to do unsigned multiplication.
4044 This may emit insns. */
4045
4046 rtx
4047 expand_mult_add (x, target, mult, add, mode, unsignedp)
4048 rtx x, target, mult, add;
4049 enum machine_mode mode;
4050 int unsignedp;
4051 {
4052 tree type = type_for_mode (mode, unsignedp);
4053 tree add_type = (GET_MODE (add) == VOIDmode
4054 ? type : type_for_mode (GET_MODE (add), unsignedp));
4055 tree result = fold (build (PLUS_EXPR, type,
4056 fold (build (MULT_EXPR, type,
4057 make_tree (type, x),
4058 make_tree (type, mult))),
4059 make_tree (add_type, add)));
4060
4061 return expand_expr (result, target, VOIDmode, 0);
4062 }
4063 \f
4064 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4065 and returning TARGET.
4066
4067 If TARGET is 0, a pseudo-register or constant is returned. */
4068
4069 rtx
4070 expand_and (op0, op1, target)
4071 rtx op0, op1, target;
4072 {
4073 enum machine_mode mode = VOIDmode;
4074 rtx tem;
4075
4076 if (GET_MODE (op0) != VOIDmode)
4077 mode = GET_MODE (op0);
4078 else if (GET_MODE (op1) != VOIDmode)
4079 mode = GET_MODE (op1);
4080
4081 if (mode != VOIDmode)
4082 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4083 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4084 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4085 else
4086 abort ();
4087
4088 if (target == 0)
4089 target = tem;
4090 else if (tem != target)
4091 emit_move_insn (target, tem);
4092 return target;
4093 }
4094 \f
4095 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4096 and storing in TARGET. Normally return TARGET.
4097 Return 0 if that cannot be done.
4098
4099 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4100 it is VOIDmode, they cannot both be CONST_INT.
4101
4102 UNSIGNEDP is for the case where we have to widen the operands
4103 to perform the operation. It says to use zero-extension.
4104
4105 NORMALIZEP is 1 if we should convert the result to be either zero
4106 or one. Normalize is -1 if we should convert the result to be
4107 either zero or -1. If NORMALIZEP is zero, the result will be left
4108 "raw" out of the scc insn. */
4109
4110 rtx
4111 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4112 rtx target;
4113 enum rtx_code code;
4114 rtx op0, op1;
4115 enum machine_mode mode;
4116 int unsignedp;
4117 int normalizep;
4118 {
4119 rtx subtarget;
4120 enum insn_code icode;
4121 enum machine_mode compare_mode;
4122 enum machine_mode target_mode = GET_MODE (target);
4123 rtx tem;
4124 rtx last = get_last_insn ();
4125 rtx pattern, comparison;
4126
4127 if (unsignedp)
4128 code = unsigned_condition (code);
4129
4130 /* If one operand is constant, make it the second one. Only do this
4131 if the other operand is not constant as well. */
4132
4133 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4134 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4135 {
4136 tem = op0;
4137 op0 = op1;
4138 op1 = tem;
4139 code = swap_condition (code);
4140 }
4141
4142 if (mode == VOIDmode)
4143 mode = GET_MODE (op0);
4144
4145 /* For some comparisons with 1 and -1, we can convert this to
4146 comparisons with zero. This will often produce more opportunities for
4147 store-flag insns. */
4148
4149 switch (code)
4150 {
4151 case LT:
4152 if (op1 == const1_rtx)
4153 op1 = const0_rtx, code = LE;
4154 break;
4155 case LE:
4156 if (op1 == constm1_rtx)
4157 op1 = const0_rtx, code = LT;
4158 break;
4159 case GE:
4160 if (op1 == const1_rtx)
4161 op1 = const0_rtx, code = GT;
4162 break;
4163 case GT:
4164 if (op1 == constm1_rtx)
4165 op1 = const0_rtx, code = GE;
4166 break;
4167 case GEU:
4168 if (op1 == const1_rtx)
4169 op1 = const0_rtx, code = NE;
4170 break;
4171 case LTU:
4172 if (op1 == const1_rtx)
4173 op1 = const0_rtx, code = EQ;
4174 break;
4175 default:
4176 break;
4177 }
4178
4179 /* From now on, we won't change CODE, so set ICODE now. */
4180 icode = setcc_gen_code[(int) code];
4181
4182 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4183 complement of A (for GE) and shifting the sign bit to the low bit. */
4184 if (op1 == const0_rtx && (code == LT || code == GE)
4185 && GET_MODE_CLASS (mode) == MODE_INT
4186 && (normalizep || STORE_FLAG_VALUE == 1
4187 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4188 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4189 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4190 {
4191 subtarget = target;
4192
4193 /* If the result is to be wider than OP0, it is best to convert it
4194 first. If it is to be narrower, it is *incorrect* to convert it
4195 first. */
4196 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4197 {
4198 op0 = protect_from_queue (op0, 0);
4199 op0 = convert_modes (target_mode, mode, op0, 0);
4200 mode = target_mode;
4201 }
4202
4203 if (target_mode != mode)
4204 subtarget = 0;
4205
4206 if (code == GE)
4207 op0 = expand_unop (mode, one_cmpl_optab, op0,
4208 ((STORE_FLAG_VALUE == 1 || normalizep)
4209 ? 0 : subtarget), 0);
4210
4211 if (STORE_FLAG_VALUE == 1 || normalizep)
4212 /* If we are supposed to produce a 0/1 value, we want to do
4213 a logical shift from the sign bit to the low-order bit; for
4214 a -1/0 value, we do an arithmetic shift. */
4215 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4216 size_int (GET_MODE_BITSIZE (mode) - 1),
4217 subtarget, normalizep != -1);
4218
4219 if (mode != target_mode)
4220 op0 = convert_modes (target_mode, mode, op0, 0);
4221
4222 return op0;
4223 }
4224
4225 if (icode != CODE_FOR_nothing)
4226 {
4227 insn_operand_predicate_fn pred;
4228
4229 /* We think we may be able to do this with a scc insn. Emit the
4230 comparison and then the scc insn.
4231
4232 compare_from_rtx may call emit_queue, which would be deleted below
4233 if the scc insn fails. So call it ourselves before setting LAST.
4234 Likewise for do_pending_stack_adjust. */
4235
4236 emit_queue ();
4237 do_pending_stack_adjust ();
4238 last = get_last_insn ();
4239
4240 comparison
4241 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4242 if (GET_CODE (comparison) == CONST_INT)
4243 return (comparison == const0_rtx ? const0_rtx
4244 : normalizep == 1 ? const1_rtx
4245 : normalizep == -1 ? constm1_rtx
4246 : const_true_rtx);
4247
4248 /* If the code of COMPARISON doesn't match CODE, something is
4249 wrong; we can no longer be sure that we have the operation.
4250 We could handle this case, but it should not happen. */
4251
4252 if (GET_CODE (comparison) != code)
4253 abort ();
4254
4255 /* Get a reference to the target in the proper mode for this insn. */
4256 compare_mode = insn_data[(int) icode].operand[0].mode;
4257 subtarget = target;
4258 pred = insn_data[(int) icode].operand[0].predicate;
4259 if (preserve_subexpressions_p ()
4260 || ! (*pred) (subtarget, compare_mode))
4261 subtarget = gen_reg_rtx (compare_mode);
4262
4263 pattern = GEN_FCN (icode) (subtarget);
4264 if (pattern)
4265 {
4266 emit_insn (pattern);
4267
4268 /* If we are converting to a wider mode, first convert to
4269 TARGET_MODE, then normalize. This produces better combining
4270 opportunities on machines that have a SIGN_EXTRACT when we are
4271 testing a single bit. This mostly benefits the 68k.
4272
4273 If STORE_FLAG_VALUE does not have the sign bit set when
4274 interpreted in COMPARE_MODE, we can do this conversion as
4275 unsigned, which is usually more efficient. */
4276 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4277 {
4278 convert_move (target, subtarget,
4279 (GET_MODE_BITSIZE (compare_mode)
4280 <= HOST_BITS_PER_WIDE_INT)
4281 && 0 == (STORE_FLAG_VALUE
4282 & ((HOST_WIDE_INT) 1
4283 << (GET_MODE_BITSIZE (compare_mode) -1))));
4284 op0 = target;
4285 compare_mode = target_mode;
4286 }
4287 else
4288 op0 = subtarget;
4289
4290 /* If we want to keep subexpressions around, don't reuse our
4291 last target. */
4292
4293 if (preserve_subexpressions_p ())
4294 subtarget = 0;
4295
4296 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4297 we don't have to do anything. */
4298 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4299 ;
4300 /* STORE_FLAG_VALUE might be the most negative number, so write
4301 the comparison this way to avoid a compiler-time warning. */
4302 else if (- normalizep == STORE_FLAG_VALUE)
4303 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4304
4305 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4306 makes it hard to use a value of just the sign bit due to
4307 ANSI integer constant typing rules. */
4308 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4309 && (STORE_FLAG_VALUE
4310 & ((HOST_WIDE_INT) 1
4311 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4312 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4313 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4314 subtarget, normalizep == 1);
4315 else if (STORE_FLAG_VALUE & 1)
4316 {
4317 op0 = expand_and (op0, const1_rtx, subtarget);
4318 if (normalizep == -1)
4319 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4320 }
4321 else
4322 abort ();
4323
4324 /* If we were converting to a smaller mode, do the
4325 conversion now. */
4326 if (target_mode != compare_mode)
4327 {
4328 convert_move (target, op0, 0);
4329 return target;
4330 }
4331 else
4332 return op0;
4333 }
4334 }
4335
4336 delete_insns_since (last);
4337
4338 /* If expensive optimizations, use different pseudo registers for each
4339 insn, instead of reusing the same pseudo. This leads to better CSE,
4340 but slows down the compiler, since there are more pseudos */
4341 subtarget = (!flag_expensive_optimizations
4342 && (target_mode == mode)) ? target : NULL_RTX;
4343
4344 /* If we reached here, we can't do this with a scc insn. However, there
4345 are some comparisons that can be done directly. For example, if
4346 this is an equality comparison of integers, we can try to exclusive-or
4347 (or subtract) the two operands and use a recursive call to try the
4348 comparison with zero. Don't do any of these cases if branches are
4349 very cheap. */
4350
4351 if (BRANCH_COST > 0
4352 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4353 && op1 != const0_rtx)
4354 {
4355 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4356 OPTAB_WIDEN);
4357
4358 if (tem == 0)
4359 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4360 OPTAB_WIDEN);
4361 if (tem != 0)
4362 tem = emit_store_flag (target, code, tem, const0_rtx,
4363 mode, unsignedp, normalizep);
4364 if (tem == 0)
4365 delete_insns_since (last);
4366 return tem;
4367 }
4368
4369 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4370 the constant zero. Reject all other comparisons at this point. Only
4371 do LE and GT if branches are expensive since they are expensive on
4372 2-operand machines. */
4373
4374 if (BRANCH_COST == 0
4375 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4376 || (code != EQ && code != NE
4377 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4378 return 0;
4379
4380 /* See what we need to return. We can only return a 1, -1, or the
4381 sign bit. */
4382
4383 if (normalizep == 0)
4384 {
4385 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4386 normalizep = STORE_FLAG_VALUE;
4387
4388 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4389 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4390 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4391 ;
4392 else
4393 return 0;
4394 }
4395
4396 /* Try to put the result of the comparison in the sign bit. Assume we can't
4397 do the necessary operation below. */
4398
4399 tem = 0;
4400
4401 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4402 the sign bit set. */
4403
4404 if (code == LE)
4405 {
4406 /* This is destructive, so SUBTARGET can't be OP0. */
4407 if (rtx_equal_p (subtarget, op0))
4408 subtarget = 0;
4409
4410 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4411 OPTAB_WIDEN);
4412 if (tem)
4413 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4414 OPTAB_WIDEN);
4415 }
4416
4417 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4418 number of bits in the mode of OP0, minus one. */
4419
4420 if (code == GT)
4421 {
4422 if (rtx_equal_p (subtarget, op0))
4423 subtarget = 0;
4424
4425 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4426 size_int (GET_MODE_BITSIZE (mode) - 1),
4427 subtarget, 0);
4428 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4429 OPTAB_WIDEN);
4430 }
4431
4432 if (code == EQ || code == NE)
4433 {
4434 /* For EQ or NE, one way to do the comparison is to apply an operation
4435 that converts the operand into a positive number if it is non-zero
4436 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4437 for NE we negate. This puts the result in the sign bit. Then we
4438 normalize with a shift, if needed.
4439
4440 Two operations that can do the above actions are ABS and FFS, so try
4441 them. If that doesn't work, and MODE is smaller than a full word,
4442 we can use zero-extension to the wider mode (an unsigned conversion)
4443 as the operation. */
4444
4445 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4446 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4447 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4448 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4449 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4450 {
4451 op0 = protect_from_queue (op0, 0);
4452 tem = convert_modes (word_mode, mode, op0, 1);
4453 mode = word_mode;
4454 }
4455
4456 if (tem != 0)
4457 {
4458 if (code == EQ)
4459 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4460 0, OPTAB_WIDEN);
4461 else
4462 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4463 }
4464
4465 /* If we couldn't do it that way, for NE we can "or" the two's complement
4466 of the value with itself. For EQ, we take the one's complement of
4467 that "or", which is an extra insn, so we only handle EQ if branches
4468 are expensive. */
4469
4470 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4471 {
4472 if (rtx_equal_p (subtarget, op0))
4473 subtarget = 0;
4474
4475 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4476 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4477 OPTAB_WIDEN);
4478
4479 if (tem && code == EQ)
4480 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4481 }
4482 }
4483
4484 if (tem && normalizep)
4485 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4486 size_int (GET_MODE_BITSIZE (mode) - 1),
4487 subtarget, normalizep == 1);
4488
4489 if (tem)
4490 {
4491 if (GET_MODE (tem) != target_mode)
4492 {
4493 convert_move (target, tem, 0);
4494 tem = target;
4495 }
4496 else if (!subtarget)
4497 {
4498 emit_move_insn (target, tem);
4499 tem = target;
4500 }
4501 }
4502 else
4503 delete_insns_since (last);
4504
4505 return tem;
4506 }
4507
4508 /* Like emit_store_flag, but always succeeds. */
4509
4510 rtx
4511 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4512 rtx target;
4513 enum rtx_code code;
4514 rtx op0, op1;
4515 enum machine_mode mode;
4516 int unsignedp;
4517 int normalizep;
4518 {
4519 rtx tem, label;
4520
4521 /* First see if emit_store_flag can do the job. */
4522 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4523 if (tem != 0)
4524 return tem;
4525
4526 if (normalizep == 0)
4527 normalizep = 1;
4528
4529 /* If this failed, we have to do this with set/compare/jump/set code. */
4530
4531 if (GET_CODE (target) != REG
4532 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4533 target = gen_reg_rtx (GET_MODE (target));
4534
4535 emit_move_insn (target, const1_rtx);
4536 label = gen_label_rtx ();
4537 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4538 NULL_RTX, label);
4539
4540 emit_move_insn (target, const0_rtx);
4541 emit_label (label);
4542
4543 return target;
4544 }
4545 \f
4546 /* Perform possibly multi-word comparison and conditional jump to LABEL
4547 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4548
4549 The algorithm is based on the code in expr.c:do_jump.
4550
4551 Note that this does not perform a general comparison. Only variants
4552 generated within expmed.c are correctly handled, others abort (but could
4553 be handled if needed). */
4554
4555 static void
4556 do_cmp_and_jump (arg1, arg2, op, mode, label)
4557 rtx arg1, arg2, label;
4558 enum rtx_code op;
4559 enum machine_mode mode;
4560 {
4561 /* If this mode is an integer too wide to compare properly,
4562 compare word by word. Rely on cse to optimize constant cases. */
4563
4564 if (GET_MODE_CLASS (mode) == MODE_INT
4565 && ! can_compare_p (op, mode, ccp_jump))
4566 {
4567 rtx label2 = gen_label_rtx ();
4568
4569 switch (op)
4570 {
4571 case LTU:
4572 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4573 break;
4574
4575 case LEU:
4576 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4577 break;
4578
4579 case LT:
4580 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4581 break;
4582
4583 case GT:
4584 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4585 break;
4586
4587 case GE:
4588 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4589 break;
4590
4591 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4592 that's the only equality operations we do */
4593 case EQ:
4594 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4595 abort();
4596 do_jump_by_parts_equality_rtx (arg1, label2, label);
4597 break;
4598
4599 case NE:
4600 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4601 abort();
4602 do_jump_by_parts_equality_rtx (arg1, label, label2);
4603 break;
4604
4605 default:
4606 abort();
4607 }
4608
4609 emit_label (label2);
4610 }
4611 else
4612 {
4613 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);
4614 }
4615 }