]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expmed.c
* cse.c: Fix formatting.
[thirdparty/gcc.git] / gcc / expmed.c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expr.h"
33 #include "optabs.h"
34 #include "real.h"
35 #include "recog.h"
36
37 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
38 unsigned HOST_WIDE_INT,
39 unsigned HOST_WIDE_INT, rtx));
40 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT, rtx));
42 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
43 unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT,
46 rtx, int));
47 static rtx mask_rtx PARAMS ((enum machine_mode, int,
48 int, int));
49 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
50 int, int));
51 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int));
53 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
54 enum machine_mode, rtx));
55
56 /* Non-zero means divides or modulus operations are relatively cheap for
57 powers of two, so don't use branches; emit the operation instead.
58 Usually, this will mean that the MD file will emit non-branch
59 sequences. */
60
61 static int sdiv_pow2_cheap, smod_pow2_cheap;
62
63 #ifndef SLOW_UNALIGNED_ACCESS
64 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
65 #endif
66
67 /* For compilers that support multiple targets with different word sizes,
68 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
69 is the H8/300(H) compiler. */
70
71 #ifndef MAX_BITS_PER_WORD
72 #define MAX_BITS_PER_WORD BITS_PER_WORD
73 #endif
74
75 /* Reduce conditional compilation elsewhere. */
76 #ifndef HAVE_insv
77 #define HAVE_insv 0
78 #define CODE_FOR_insv CODE_FOR_nothing
79 #define gen_insv(a,b,c,d) NULL_RTX
80 #endif
81 #ifndef HAVE_extv
82 #define HAVE_extv 0
83 #define CODE_FOR_extv CODE_FOR_nothing
84 #define gen_extv(a,b,c,d) NULL_RTX
85 #endif
86 #ifndef HAVE_extzv
87 #define HAVE_extzv 0
88 #define CODE_FOR_extzv CODE_FOR_nothing
89 #define gen_extzv(a,b,c,d) NULL_RTX
90 #endif
91
92 /* Cost of various pieces of RTL. Note that some of these are indexed by
93 shift count and some by mode. */
94 static int add_cost, negate_cost, zero_cost;
95 static int shift_cost[MAX_BITS_PER_WORD];
96 static int shiftadd_cost[MAX_BITS_PER_WORD];
97 static int shiftsub_cost[MAX_BITS_PER_WORD];
98 static int mul_cost[NUM_MACHINE_MODES];
99 static int div_cost[NUM_MACHINE_MODES];
100 static int mul_widen_cost[NUM_MACHINE_MODES];
101 static int mul_highpart_cost[NUM_MACHINE_MODES];
102
103 void
104 init_expmed ()
105 {
106 /* This is "some random pseudo register" for purposes of calling recog
107 to see what insns exist. */
108 rtx reg = gen_rtx_REG (word_mode, 10000);
109 rtx shift_insn, shiftadd_insn, shiftsub_insn;
110 int dummy;
111 int m;
112 enum machine_mode mode, wider_mode;
113
114 start_sequence ();
115
116 reg = gen_rtx_REG (word_mode, 10000);
117
118 zero_cost = rtx_cost (const0_rtx, 0);
119 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
120
121 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
122 gen_rtx_ASHIFT (word_mode, reg,
123 const0_rtx)));
124
125 shiftadd_insn
126 = emit_insn (gen_rtx_SET (VOIDmode, reg,
127 gen_rtx_PLUS (word_mode,
128 gen_rtx_MULT (word_mode,
129 reg, const0_rtx),
130 reg)));
131
132 shiftsub_insn
133 = emit_insn (gen_rtx_SET (VOIDmode, reg,
134 gen_rtx_MINUS (word_mode,
135 gen_rtx_MULT (word_mode,
136 reg, const0_rtx),
137 reg)));
138
139 init_recog ();
140
141 shift_cost[0] = 0;
142 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
143
144 for (m = 1; m < MAX_BITS_PER_WORD; m++)
145 {
146 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
147
148 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
149 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
150 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
151
152 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
153 = GEN_INT ((HOST_WIDE_INT) 1 << m);
154 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
155 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
156
157 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
158 = GEN_INT ((HOST_WIDE_INT) 1 << m);
159 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
160 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
161 }
162
163 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
164
165 sdiv_pow2_cheap
166 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
167 <= 2 * add_cost);
168 smod_pow2_cheap
169 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
170 <= 2 * add_cost);
171
172 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
173 mode != VOIDmode;
174 mode = GET_MODE_WIDER_MODE (mode))
175 {
176 reg = gen_rtx_REG (mode, 10000);
177 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
178 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
179 wider_mode = GET_MODE_WIDER_MODE (mode);
180 if (wider_mode != VOIDmode)
181 {
182 mul_widen_cost[(int) wider_mode]
183 = rtx_cost (gen_rtx_MULT (wider_mode,
184 gen_rtx_ZERO_EXTEND (wider_mode, reg),
185 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
186 SET);
187 mul_highpart_cost[(int) mode]
188 = rtx_cost (gen_rtx_TRUNCATE
189 (mode,
190 gen_rtx_LSHIFTRT (wider_mode,
191 gen_rtx_MULT (wider_mode,
192 gen_rtx_ZERO_EXTEND
193 (wider_mode, reg),
194 gen_rtx_ZERO_EXTEND
195 (wider_mode, reg)),
196 GEN_INT (GET_MODE_BITSIZE (mode)))),
197 SET);
198 }
199 }
200
201 end_sequence ();
202 }
203
204 /* Return an rtx representing minus the value of X.
205 MODE is the intended mode of the result,
206 useful if X is a CONST_INT. */
207
208 rtx
209 negate_rtx (mode, x)
210 enum machine_mode mode;
211 rtx x;
212 {
213 rtx result = simplify_unary_operation (NEG, mode, x, mode);
214
215 if (result == 0)
216 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
217
218 return result;
219 }
220
221 /* Report on the availability of insv/extv/extzv and the desired mode
222 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
223 is false; else the mode of the specified operand. If OPNO is -1,
224 all the caller cares about is whether the insn is available. */
225 enum machine_mode
226 mode_for_extraction (pattern, opno)
227 enum extraction_pattern pattern;
228 int opno;
229 {
230 const struct insn_data *data;
231
232 switch (pattern)
233 {
234 case EP_insv:
235 if (HAVE_insv)
236 {
237 data = &insn_data[CODE_FOR_insv];
238 break;
239 }
240 return MAX_MACHINE_MODE;
241
242 case EP_extv:
243 if (HAVE_extv)
244 {
245 data = &insn_data[CODE_FOR_extv];
246 break;
247 }
248 return MAX_MACHINE_MODE;
249
250 case EP_extzv:
251 if (HAVE_extzv)
252 {
253 data = &insn_data[CODE_FOR_extzv];
254 break;
255 }
256 return MAX_MACHINE_MODE;
257
258 default:
259 abort ();
260 }
261
262 if (opno == -1)
263 return VOIDmode;
264
265 /* Everyone who uses this function used to follow it with
266 if (result == VOIDmode) result = word_mode; */
267 if (data->operand[opno].mode == VOIDmode)
268 return word_mode;
269 return data->operand[opno].mode;
270 }
271
272 \f
273 /* Generate code to store value from rtx VALUE
274 into a bit-field within structure STR_RTX
275 containing BITSIZE bits starting at bit BITNUM.
276 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
277 ALIGN is the alignment that STR_RTX is known to have.
278 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
279
280 /* ??? Note that there are two different ideas here for how
281 to determine the size to count bits within, for a register.
282 One is BITS_PER_WORD, and the other is the size of operand 3
283 of the insv pattern.
284
285 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
286 else, we use the mode of operand 3. */
287
288 rtx
289 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, total_size)
290 rtx str_rtx;
291 unsigned HOST_WIDE_INT bitsize;
292 unsigned HOST_WIDE_INT bitnum;
293 enum machine_mode fieldmode;
294 rtx value;
295 HOST_WIDE_INT total_size;
296 {
297 unsigned int unit
298 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
299 unsigned HOST_WIDE_INT offset = bitnum / unit;
300 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
301 rtx op0 = str_rtx;
302
303 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
304
305 /* Discount the part of the structure before the desired byte.
306 We need to know how many bytes are safe to reference after it. */
307 if (total_size >= 0)
308 total_size -= (bitpos / BIGGEST_ALIGNMENT
309 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
310
311 while (GET_CODE (op0) == SUBREG)
312 {
313 /* The following line once was done only if WORDS_BIG_ENDIAN,
314 but I think that is a mistake. WORDS_BIG_ENDIAN is
315 meaningful at a much higher level; when structures are copied
316 between memory and regs, the higher-numbered regs
317 always get higher addresses. */
318 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
319 /* We used to adjust BITPOS here, but now we do the whole adjustment
320 right after the loop. */
321 op0 = SUBREG_REG (op0);
322 }
323
324 value = protect_from_queue (value, 0);
325
326 if (flag_force_mem)
327 value = force_not_mem (value);
328
329 /* If the target is a register, overwriting the entire object, or storing
330 a full-word or multi-word field can be done with just a SUBREG.
331
332 If the target is memory, storing any naturally aligned field can be
333 done with a simple store. For targets that support fast unaligned
334 memory, any naturally sized, unit aligned field can be done directly. */
335
336 if (bitpos == 0
337 && bitsize == GET_MODE_BITSIZE (fieldmode)
338 && (GET_CODE (op0) != MEM
339 ? (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
340 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
341 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
342 || (offset * BITS_PER_UNIT % bitsize == 0
343 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
344 {
345 if (GET_MODE (op0) != fieldmode)
346 {
347 if (GET_CODE (op0) == SUBREG)
348 {
349 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
350 || GET_MODE_CLASS (fieldmode) == MODE_INT
351 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
352 op0 = SUBREG_REG (op0);
353 else
354 /* Else we've got some float mode source being extracted into
355 a different float mode destination -- this combination of
356 subregs results in Severe Tire Damage. */
357 abort ();
358 }
359 if (GET_CODE (op0) == REG)
360 op0 = gen_rtx_SUBREG (fieldmode, op0,
361 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
362 + (offset * UNITS_PER_WORD));
363 else
364 op0 = adjust_address (op0, fieldmode, offset);
365 }
366 emit_move_insn (op0, value);
367 return value;
368 }
369
370 /* Make sure we are playing with integral modes. Pun with subregs
371 if we aren't. This must come after the entire register case above,
372 since that case is valid for any mode. The following cases are only
373 valid for integral modes. */
374 {
375 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
376 if (imode != GET_MODE (op0))
377 {
378 if (GET_CODE (op0) == MEM)
379 op0 = adjust_address (op0, imode, 0);
380 else if (imode != BLKmode)
381 op0 = gen_lowpart (imode, op0);
382 else
383 abort ();
384 }
385 }
386
387 /* If OP0 is a register, BITPOS must count within a word.
388 But as we have it, it counts within whatever size OP0 now has.
389 On a bigendian machine, these are not the same, so convert. */
390 if (BYTES_BIG_ENDIAN
391 && GET_CODE (op0) != MEM
392 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
393 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
394
395 /* Storing an lsb-aligned field in a register
396 can be done with a movestrict instruction. */
397
398 if (GET_CODE (op0) != MEM
399 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
400 && bitsize == GET_MODE_BITSIZE (fieldmode)
401 && (movstrict_optab->handlers[(int) fieldmode].insn_code
402 != CODE_FOR_nothing))
403 {
404 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
405
406 /* Get appropriate low part of the value being stored. */
407 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
408 value = gen_lowpart (fieldmode, value);
409 else if (!(GET_CODE (value) == SYMBOL_REF
410 || GET_CODE (value) == LABEL_REF
411 || GET_CODE (value) == CONST))
412 value = convert_to_mode (fieldmode, value, 0);
413
414 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
415 value = copy_to_mode_reg (fieldmode, value);
416
417 if (GET_CODE (op0) == SUBREG)
418 {
419 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
420 || GET_MODE_CLASS (fieldmode) == MODE_INT
421 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
422 op0 = SUBREG_REG (op0);
423 else
424 /* Else we've got some float mode source being extracted into
425 a different float mode destination -- this combination of
426 subregs results in Severe Tire Damage. */
427 abort ();
428 }
429
430 emit_insn (GEN_FCN (icode)
431 (gen_rtx_SUBREG (fieldmode, op0,
432 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
433 + (offset * UNITS_PER_WORD)),
434 value));
435
436 return value;
437 }
438
439 /* Handle fields bigger than a word. */
440
441 if (bitsize > BITS_PER_WORD)
442 {
443 /* Here we transfer the words of the field
444 in the order least significant first.
445 This is because the most significant word is the one which may
446 be less than full.
447 However, only do that if the value is not BLKmode. */
448
449 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
450 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
451 unsigned int i;
452
453 /* This is the mode we must force value to, so that there will be enough
454 subwords to extract. Note that fieldmode will often (always?) be
455 VOIDmode, because that is what store_field uses to indicate that this
456 is a bit field, but passing VOIDmode to operand_subword_force will
457 result in an abort. */
458 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
459
460 for (i = 0; i < nwords; i++)
461 {
462 /* If I is 0, use the low-order word in both field and target;
463 if I is 1, use the next to lowest word; and so on. */
464 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
465 unsigned int bit_offset = (backwards
466 ? MAX ((int) bitsize - ((int) i + 1)
467 * BITS_PER_WORD,
468 0)
469 : (int) i * BITS_PER_WORD);
470
471 store_bit_field (op0, MIN (BITS_PER_WORD,
472 bitsize - i * BITS_PER_WORD),
473 bitnum + bit_offset, word_mode,
474 operand_subword_force (value, wordnum,
475 (GET_MODE (value) == VOIDmode
476 ? fieldmode
477 : GET_MODE (value))),
478 total_size);
479 }
480 return value;
481 }
482
483 /* From here on we can assume that the field to be stored in is
484 a full-word (whatever type that is), since it is shorter than a word. */
485
486 /* OFFSET is the number of words or bytes (UNIT says which)
487 from STR_RTX to the first word or byte containing part of the field. */
488
489 if (GET_CODE (op0) != MEM)
490 {
491 if (offset != 0
492 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
493 {
494 if (GET_CODE (op0) != REG)
495 {
496 /* Since this is a destination (lvalue), we can't copy it to a
497 pseudo. We can trivially remove a SUBREG that does not
498 change the size of the operand. Such a SUBREG may have been
499 added above. Otherwise, abort. */
500 if (GET_CODE (op0) == SUBREG
501 && (GET_MODE_SIZE (GET_MODE (op0))
502 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
503 op0 = SUBREG_REG (op0);
504 else
505 abort ();
506 }
507 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
508 op0, (offset * UNITS_PER_WORD));
509 }
510 offset = 0;
511 }
512 else
513 op0 = protect_from_queue (op0, 1);
514
515 /* If VALUE is a floating-point mode, access it as an integer of the
516 corresponding size. This can occur on a machine with 64 bit registers
517 that uses SFmode for float. This can also occur for unaligned float
518 structure fields. */
519 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
520 {
521 if (GET_CODE (value) != REG)
522 value = copy_to_reg (value);
523 value = gen_rtx_SUBREG (word_mode, value, 0);
524 }
525
526 /* Now OFFSET is nonzero only if OP0 is memory
527 and is therefore always measured in bytes. */
528
529 if (HAVE_insv
530 && GET_MODE (value) != BLKmode
531 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
532 /* Ensure insv's size is wide enough for this field. */
533 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
534 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
535 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
536 {
537 int xbitpos = bitpos;
538 rtx value1;
539 rtx xop0 = op0;
540 rtx last = get_last_insn ();
541 rtx pat;
542 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
543 int save_volatile_ok = volatile_ok;
544
545 volatile_ok = 1;
546
547 /* If this machine's insv can only insert into a register, copy OP0
548 into a register and save it back later. */
549 /* This used to check flag_force_mem, but that was a serious
550 de-optimization now that flag_force_mem is enabled by -O2. */
551 if (GET_CODE (op0) == MEM
552 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
553 (op0, VOIDmode)))
554 {
555 rtx tempreg;
556 enum machine_mode bestmode;
557
558 /* Get the mode to use for inserting into this field. If OP0 is
559 BLKmode, get the smallest mode consistent with the alignment. If
560 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
561 mode. Otherwise, use the smallest mode containing the field. */
562
563 if (GET_MODE (op0) == BLKmode
564 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
565 bestmode
566 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
567 MEM_VOLATILE_P (op0));
568 else
569 bestmode = GET_MODE (op0);
570
571 if (bestmode == VOIDmode
572 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
573 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
574 goto insv_loses;
575
576 /* Adjust address to point to the containing unit of that mode.
577 Compute offset as multiple of this unit, counting in bytes. */
578 unit = GET_MODE_BITSIZE (bestmode);
579 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
580 bitpos = bitnum % unit;
581 op0 = adjust_address (op0, bestmode, offset);
582
583 /* Fetch that unit, store the bitfield in it, then store
584 the unit. */
585 tempreg = copy_to_reg (op0);
586 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
587 total_size);
588 emit_move_insn (op0, tempreg);
589 return value;
590 }
591 volatile_ok = save_volatile_ok;
592
593 /* Add OFFSET into OP0's address. */
594 if (GET_CODE (xop0) == MEM)
595 xop0 = adjust_address (xop0, byte_mode, offset);
596
597 /* If xop0 is a register, we need it in MAXMODE
598 to make it acceptable to the format of insv. */
599 if (GET_CODE (xop0) == SUBREG)
600 /* We can't just change the mode, because this might clobber op0,
601 and we will need the original value of op0 if insv fails. */
602 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
603 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
604 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
605
606 /* On big-endian machines, we count bits from the most significant.
607 If the bit field insn does not, we must invert. */
608
609 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
610 xbitpos = unit - bitsize - xbitpos;
611
612 /* We have been counting XBITPOS within UNIT.
613 Count instead within the size of the register. */
614 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
615 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
616
617 unit = GET_MODE_BITSIZE (maxmode);
618
619 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
620 value1 = value;
621 if (GET_MODE (value) != maxmode)
622 {
623 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
624 {
625 /* Optimization: Don't bother really extending VALUE
626 if it has all the bits we will actually use. However,
627 if we must narrow it, be sure we do it correctly. */
628
629 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
630 value1 = simplify_gen_subreg (maxmode, value1,
631 GET_MODE (value1), 0);
632 else
633 value1 = gen_lowpart (maxmode, value1);
634 }
635 else if (GET_CODE (value) == CONST_INT)
636 value1 = GEN_INT (trunc_int_for_mode (INTVAL (value), maxmode));
637 else if (!CONSTANT_P (value))
638 /* Parse phase is supposed to make VALUE's data type
639 match that of the component reference, which is a type
640 at least as wide as the field; so VALUE should have
641 a mode that corresponds to that type. */
642 abort ();
643 }
644
645 /* If this machine's insv insists on a register,
646 get VALUE1 into a register. */
647 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
648 (value1, maxmode)))
649 value1 = force_reg (maxmode, value1);
650
651 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
652 if (pat)
653 emit_insn (pat);
654 else
655 {
656 delete_insns_since (last);
657 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
658 }
659 }
660 else
661 insv_loses:
662 /* Insv is not available; store using shifts and boolean ops. */
663 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
664 return value;
665 }
666 \f
667 /* Use shifts and boolean operations to store VALUE
668 into a bit field of width BITSIZE
669 in a memory location specified by OP0 except offset by OFFSET bytes.
670 (OFFSET must be 0 if OP0 is a register.)
671 The field starts at position BITPOS within the byte.
672 (If OP0 is a register, it may be a full word or a narrower mode,
673 but BITPOS still counts within a full word,
674 which is significant on bigendian machines.)
675
676 Note that protect_from_queue has already been done on OP0 and VALUE. */
677
678 static void
679 store_fixed_bit_field (op0, offset, bitsize, bitpos, value)
680 rtx op0;
681 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
682 rtx value;
683 {
684 enum machine_mode mode;
685 unsigned int total_bits = BITS_PER_WORD;
686 rtx subtarget, temp;
687 int all_zero = 0;
688 int all_one = 0;
689
690 /* There is a case not handled here:
691 a structure with a known alignment of just a halfword
692 and a field split across two aligned halfwords within the structure.
693 Or likewise a structure with a known alignment of just a byte
694 and a field split across two bytes.
695 Such cases are not supposed to be able to occur. */
696
697 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
698 {
699 if (offset != 0)
700 abort ();
701 /* Special treatment for a bit field split across two registers. */
702 if (bitsize + bitpos > BITS_PER_WORD)
703 {
704 store_split_bit_field (op0, bitsize, bitpos, value);
705 return;
706 }
707 }
708 else
709 {
710 /* Get the proper mode to use for this field. We want a mode that
711 includes the entire field. If such a mode would be larger than
712 a word, we won't be doing the extraction the normal way.
713 We don't want a mode bigger than the destination. */
714
715 mode = GET_MODE (op0);
716 if (GET_MODE_BITSIZE (mode) == 0
717 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
718 mode = word_mode;
719 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
720 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
721
722 if (mode == VOIDmode)
723 {
724 /* The only way this should occur is if the field spans word
725 boundaries. */
726 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
727 value);
728 return;
729 }
730
731 total_bits = GET_MODE_BITSIZE (mode);
732
733 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
734 be in the range 0 to total_bits-1, and put any excess bytes in
735 OFFSET. */
736 if (bitpos >= total_bits)
737 {
738 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
739 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
740 * BITS_PER_UNIT);
741 }
742
743 /* Get ref to an aligned byte, halfword, or word containing the field.
744 Adjust BITPOS to be position within a word,
745 and OFFSET to be the offset of that word.
746 Then alter OP0 to refer to that word. */
747 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
748 offset -= (offset % (total_bits / BITS_PER_UNIT));
749 op0 = adjust_address (op0, mode, offset);
750 }
751
752 mode = GET_MODE (op0);
753
754 /* Now MODE is either some integral mode for a MEM as OP0,
755 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
756 The bit field is contained entirely within OP0.
757 BITPOS is the starting bit number within OP0.
758 (OP0's mode may actually be narrower than MODE.) */
759
760 if (BYTES_BIG_ENDIAN)
761 /* BITPOS is the distance between our msb
762 and that of the containing datum.
763 Convert it to the distance from the lsb. */
764 bitpos = total_bits - bitsize - bitpos;
765
766 /* Now BITPOS is always the distance between our lsb
767 and that of OP0. */
768
769 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
770 we must first convert its mode to MODE. */
771
772 if (GET_CODE (value) == CONST_INT)
773 {
774 HOST_WIDE_INT v = INTVAL (value);
775
776 if (bitsize < HOST_BITS_PER_WIDE_INT)
777 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
778
779 if (v == 0)
780 all_zero = 1;
781 else if ((bitsize < HOST_BITS_PER_WIDE_INT
782 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
783 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
784 all_one = 1;
785
786 value = lshift_value (mode, value, bitpos, bitsize);
787 }
788 else
789 {
790 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
791 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
792
793 if (GET_MODE (value) != mode)
794 {
795 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
796 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
797 value = gen_lowpart (mode, value);
798 else
799 value = convert_to_mode (mode, value, 1);
800 }
801
802 if (must_and)
803 value = expand_binop (mode, and_optab, value,
804 mask_rtx (mode, 0, bitsize, 0),
805 NULL_RTX, 1, OPTAB_LIB_WIDEN);
806 if (bitpos > 0)
807 value = expand_shift (LSHIFT_EXPR, mode, value,
808 build_int_2 (bitpos, 0), NULL_RTX, 1);
809 }
810
811 /* Now clear the chosen bits in OP0,
812 except that if VALUE is -1 we need not bother. */
813
814 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
815
816 if (! all_one)
817 {
818 temp = expand_binop (mode, and_optab, op0,
819 mask_rtx (mode, bitpos, bitsize, 1),
820 subtarget, 1, OPTAB_LIB_WIDEN);
821 subtarget = temp;
822 }
823 else
824 temp = op0;
825
826 /* Now logical-or VALUE into OP0, unless it is zero. */
827
828 if (! all_zero)
829 temp = expand_binop (mode, ior_optab, temp, value,
830 subtarget, 1, OPTAB_LIB_WIDEN);
831 if (op0 != temp)
832 emit_move_insn (op0, temp);
833 }
834 \f
835 /* Store a bit field that is split across multiple accessible memory objects.
836
837 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
838 BITSIZE is the field width; BITPOS the position of its first bit
839 (within the word).
840 VALUE is the value to store.
841
842 This does not yet handle fields wider than BITS_PER_WORD. */
843
844 static void
845 store_split_bit_field (op0, bitsize, bitpos, value)
846 rtx op0;
847 unsigned HOST_WIDE_INT bitsize, bitpos;
848 rtx value;
849 {
850 unsigned int unit;
851 unsigned int bitsdone = 0;
852
853 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
854 much at a time. */
855 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
856 unit = BITS_PER_WORD;
857 else
858 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
859
860 /* If VALUE is a constant other than a CONST_INT, get it into a register in
861 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
862 that VALUE might be a floating-point constant. */
863 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
864 {
865 rtx word = gen_lowpart_common (word_mode, value);
866
867 if (word && (value != word))
868 value = word;
869 else
870 value = gen_lowpart_common (word_mode,
871 force_reg (GET_MODE (value) != VOIDmode
872 ? GET_MODE (value)
873 : word_mode, value));
874 }
875 else if (GET_CODE (value) == ADDRESSOF)
876 value = copy_to_reg (value);
877
878 while (bitsdone < bitsize)
879 {
880 unsigned HOST_WIDE_INT thissize;
881 rtx part, word;
882 unsigned HOST_WIDE_INT thispos;
883 unsigned HOST_WIDE_INT offset;
884
885 offset = (bitpos + bitsdone) / unit;
886 thispos = (bitpos + bitsdone) % unit;
887
888 /* THISSIZE must not overrun a word boundary. Otherwise,
889 store_fixed_bit_field will call us again, and we will mutually
890 recurse forever. */
891 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
892 thissize = MIN (thissize, unit - thispos);
893
894 if (BYTES_BIG_ENDIAN)
895 {
896 int total_bits;
897
898 /* We must do an endian conversion exactly the same way as it is
899 done in extract_bit_field, so that the two calls to
900 extract_fixed_bit_field will have comparable arguments. */
901 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
902 total_bits = BITS_PER_WORD;
903 else
904 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
905
906 /* Fetch successively less significant portions. */
907 if (GET_CODE (value) == CONST_INT)
908 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
909 >> (bitsize - bitsdone - thissize))
910 & (((HOST_WIDE_INT) 1 << thissize) - 1));
911 else
912 /* The args are chosen so that the last part includes the
913 lsb. Give extract_bit_field the value it needs (with
914 endianness compensation) to fetch the piece we want. */
915 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
916 total_bits - bitsize + bitsdone,
917 NULL_RTX, 1);
918 }
919 else
920 {
921 /* Fetch successively more significant portions. */
922 if (GET_CODE (value) == CONST_INT)
923 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
924 >> bitsdone)
925 & (((HOST_WIDE_INT) 1 << thissize) - 1));
926 else
927 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
928 bitsdone, NULL_RTX, 1);
929 }
930
931 /* If OP0 is a register, then handle OFFSET here.
932
933 When handling multiword bitfields, extract_bit_field may pass
934 down a word_mode SUBREG of a larger REG for a bitfield that actually
935 crosses a word boundary. Thus, for a SUBREG, we must find
936 the current word starting from the base register. */
937 if (GET_CODE (op0) == SUBREG)
938 {
939 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
940 word = operand_subword_force (SUBREG_REG (op0), word_offset,
941 GET_MODE (SUBREG_REG (op0)));
942 offset = 0;
943 }
944 else if (GET_CODE (op0) == REG)
945 {
946 word = operand_subword_force (op0, offset, GET_MODE (op0));
947 offset = 0;
948 }
949 else
950 word = op0;
951
952 /* OFFSET is in UNITs, and UNIT is in bits.
953 store_fixed_bit_field wants offset in bytes. */
954 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
955 thispos, part);
956 bitsdone += thissize;
957 }
958 }
959 \f
960 /* Generate code to extract a byte-field from STR_RTX
961 containing BITSIZE bits, starting at BITNUM,
962 and put it in TARGET if possible (if TARGET is nonzero).
963 Regardless of TARGET, we return the rtx for where the value is placed.
964 It may be a QUEUED.
965
966 STR_RTX is the structure containing the byte (a REG or MEM).
967 UNSIGNEDP is nonzero if this is an unsigned bit field.
968 MODE is the natural mode of the field value once extracted.
969 TMODE is the mode the caller would like the value to have;
970 but the value may be returned with type MODE instead.
971
972 TOTAL_SIZE is the size in bytes of the containing structure,
973 or -1 if varying.
974
975 If a TARGET is specified and we can store in it at no extra cost,
976 we do so, and return TARGET.
977 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
978 if they are equally easy. */
979
980 rtx
981 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
982 target, mode, tmode, total_size)
983 rtx str_rtx;
984 unsigned HOST_WIDE_INT bitsize;
985 unsigned HOST_WIDE_INT bitnum;
986 int unsignedp;
987 rtx target;
988 enum machine_mode mode, tmode;
989 HOST_WIDE_INT total_size;
990 {
991 unsigned int unit
992 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
993 unsigned HOST_WIDE_INT offset = bitnum / unit;
994 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
995 rtx op0 = str_rtx;
996 rtx spec_target = target;
997 rtx spec_target_subreg = 0;
998 enum machine_mode int_mode;
999 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1000 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1001 enum machine_mode mode1;
1002 int byte_offset;
1003
1004 /* Discount the part of the structure before the desired byte.
1005 We need to know how many bytes are safe to reference after it. */
1006 if (total_size >= 0)
1007 total_size -= (bitpos / BIGGEST_ALIGNMENT
1008 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1009
1010 if (tmode == VOIDmode)
1011 tmode = mode;
1012 while (GET_CODE (op0) == SUBREG)
1013 {
1014 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1015 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1016
1017 offset += SUBREG_BYTE (op0) / UNITS_PER_WORD;
1018
1019 inner_size = MIN (inner_size, BITS_PER_WORD);
1020
1021 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1022 {
1023 bitpos += inner_size - outer_size;
1024 if (bitpos > unit)
1025 {
1026 offset += (bitpos / unit);
1027 bitpos %= unit;
1028 }
1029 }
1030
1031 op0 = SUBREG_REG (op0);
1032 }
1033
1034 if (GET_CODE (op0) == REG
1035 && mode == GET_MODE (op0)
1036 && bitnum == 0
1037 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1038 {
1039 /* We're trying to extract a full register from itself. */
1040 return op0;
1041 }
1042
1043 /* Make sure we are playing with integral modes. Pun with subregs
1044 if we aren't. */
1045 {
1046 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1047 if (imode != GET_MODE (op0))
1048 {
1049 if (GET_CODE (op0) == MEM)
1050 op0 = adjust_address (op0, imode, 0);
1051 else if (imode != BLKmode)
1052 op0 = gen_lowpart (imode, op0);
1053 else
1054 abort ();
1055 }
1056 }
1057
1058 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1059 If that's wrong, the solution is to test for it and set TARGET to 0
1060 if needed. */
1061
1062 /* If OP0 is a register, BITPOS must count within a word.
1063 But as we have it, it counts within whatever size OP0 now has.
1064 On a bigendian machine, these are not the same, so convert. */
1065 if (BYTES_BIG_ENDIAN
1066 && GET_CODE (op0) != MEM
1067 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1068 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1069
1070 /* Extracting a full-word or multi-word value
1071 from a structure in a register or aligned memory.
1072 This can be done with just SUBREG.
1073 So too extracting a subword value in
1074 the least significant part of the register. */
1075
1076 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
1077 + (offset * UNITS_PER_WORD);
1078
1079 mode1 = (VECTOR_MODE_P (tmode)
1080 ? mode
1081 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1082
1083 if (((GET_CODE (op0) != MEM
1084 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1085 GET_MODE_BITSIZE (GET_MODE (op0)))
1086 && GET_MODE_SIZE (mode1) != 0
1087 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1088 || (GET_CODE (op0) == MEM
1089 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1090 || (offset * BITS_PER_UNIT % bitsize == 0
1091 && MEM_ALIGN (op0) % bitsize == 0))))
1092 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1093 && bitpos % BITS_PER_WORD == 0)
1094 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1095 /* ??? The big endian test here is wrong. This is correct
1096 if the value is in a register, and if mode_for_size is not
1097 the same mode as op0. This causes us to get unnecessarily
1098 inefficient code from the Thumb port when -mbig-endian. */
1099 && (BYTES_BIG_ENDIAN
1100 ? bitpos + bitsize == BITS_PER_WORD
1101 : bitpos == 0))))
1102 {
1103 if (mode1 != GET_MODE (op0))
1104 {
1105 if (GET_CODE (op0) == SUBREG)
1106 {
1107 if (GET_MODE (SUBREG_REG (op0)) == mode1
1108 || GET_MODE_CLASS (mode1) == MODE_INT
1109 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1110 op0 = SUBREG_REG (op0);
1111 else
1112 /* Else we've got some float mode source being extracted into
1113 a different float mode destination -- this combination of
1114 subregs results in Severe Tire Damage. */
1115 abort ();
1116 }
1117 if (GET_CODE (op0) == REG)
1118 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1119 else
1120 op0 = adjust_address (op0, mode1, offset);
1121 }
1122 if (mode1 != mode)
1123 return convert_to_mode (tmode, op0, unsignedp);
1124 return op0;
1125 }
1126
1127 /* Handle fields bigger than a word. */
1128
1129 if (bitsize > BITS_PER_WORD)
1130 {
1131 /* Here we transfer the words of the field
1132 in the order least significant first.
1133 This is because the most significant word is the one which may
1134 be less than full. */
1135
1136 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1137 unsigned int i;
1138
1139 if (target == 0 || GET_CODE (target) != REG)
1140 target = gen_reg_rtx (mode);
1141
1142 /* Indicate for flow that the entire target reg is being set. */
1143 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1144
1145 for (i = 0; i < nwords; i++)
1146 {
1147 /* If I is 0, use the low-order word in both field and target;
1148 if I is 1, use the next to lowest word; and so on. */
1149 /* Word number in TARGET to use. */
1150 unsigned int wordnum
1151 = (WORDS_BIG_ENDIAN
1152 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1153 : i);
1154 /* Offset from start of field in OP0. */
1155 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1156 ? MAX (0, ((int) bitsize - ((int) i + 1)
1157 * (int) BITS_PER_WORD))
1158 : (int) i * BITS_PER_WORD);
1159 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1160 rtx result_part
1161 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1162 bitsize - i * BITS_PER_WORD),
1163 bitnum + bit_offset, 1, target_part, mode,
1164 word_mode, total_size);
1165
1166 if (target_part == 0)
1167 abort ();
1168
1169 if (result_part != target_part)
1170 emit_move_insn (target_part, result_part);
1171 }
1172
1173 if (unsignedp)
1174 {
1175 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1176 need to be zero'd out. */
1177 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1178 {
1179 unsigned int i, total_words;
1180
1181 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1182 for (i = nwords; i < total_words; i++)
1183 emit_move_insn
1184 (operand_subword (target,
1185 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1186 1, VOIDmode),
1187 const0_rtx);
1188 }
1189 return target;
1190 }
1191
1192 /* Signed bit field: sign-extend with two arithmetic shifts. */
1193 target = expand_shift (LSHIFT_EXPR, mode, target,
1194 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1195 NULL_RTX, 0);
1196 return expand_shift (RSHIFT_EXPR, mode, target,
1197 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1198 NULL_RTX, 0);
1199 }
1200
1201 /* From here on we know the desired field is smaller than a word. */
1202
1203 /* Check if there is a correspondingly-sized integer field, so we can
1204 safely extract it as one size of integer, if necessary; then
1205 truncate or extend to the size that is wanted; then use SUBREGs or
1206 convert_to_mode to get one of the modes we really wanted. */
1207
1208 int_mode = int_mode_for_mode (tmode);
1209 if (int_mode == BLKmode)
1210 int_mode = int_mode_for_mode (mode);
1211 if (int_mode == BLKmode)
1212 abort (); /* Should probably push op0 out to memory and then
1213 do a load. */
1214
1215 /* OFFSET is the number of words or bytes (UNIT says which)
1216 from STR_RTX to the first word or byte containing part of the field. */
1217
1218 if (GET_CODE (op0) != MEM)
1219 {
1220 if (offset != 0
1221 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1222 {
1223 if (GET_CODE (op0) != REG)
1224 op0 = copy_to_reg (op0);
1225 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1226 op0, (offset * UNITS_PER_WORD));
1227 }
1228 offset = 0;
1229 }
1230 else
1231 op0 = protect_from_queue (str_rtx, 1);
1232
1233 /* Now OFFSET is nonzero only for memory operands. */
1234
1235 if (unsignedp)
1236 {
1237 if (HAVE_extzv
1238 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1239 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1240 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1241 {
1242 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1243 rtx bitsize_rtx, bitpos_rtx;
1244 rtx last = get_last_insn ();
1245 rtx xop0 = op0;
1246 rtx xtarget = target;
1247 rtx xspec_target = spec_target;
1248 rtx xspec_target_subreg = spec_target_subreg;
1249 rtx pat;
1250 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1251
1252 if (GET_CODE (xop0) == MEM)
1253 {
1254 int save_volatile_ok = volatile_ok;
1255 volatile_ok = 1;
1256
1257 /* Is the memory operand acceptable? */
1258 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1259 (xop0, GET_MODE (xop0))))
1260 {
1261 /* No, load into a reg and extract from there. */
1262 enum machine_mode bestmode;
1263
1264 /* Get the mode to use for inserting into this field. If
1265 OP0 is BLKmode, get the smallest mode consistent with the
1266 alignment. If OP0 is a non-BLKmode object that is no
1267 wider than MAXMODE, use its mode. Otherwise, use the
1268 smallest mode containing the field. */
1269
1270 if (GET_MODE (xop0) == BLKmode
1271 || (GET_MODE_SIZE (GET_MODE (op0))
1272 > GET_MODE_SIZE (maxmode)))
1273 bestmode = get_best_mode (bitsize, bitnum,
1274 MEM_ALIGN (xop0), maxmode,
1275 MEM_VOLATILE_P (xop0));
1276 else
1277 bestmode = GET_MODE (xop0);
1278
1279 if (bestmode == VOIDmode
1280 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1281 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1282 goto extzv_loses;
1283
1284 /* Compute offset as multiple of this unit,
1285 counting in bytes. */
1286 unit = GET_MODE_BITSIZE (bestmode);
1287 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1288 xbitpos = bitnum % unit;
1289 xop0 = adjust_address (xop0, bestmode, xoffset);
1290
1291 /* Fetch it to a register in that size. */
1292 xop0 = force_reg (bestmode, xop0);
1293
1294 /* XBITPOS counts within UNIT, which is what is expected. */
1295 }
1296 else
1297 /* Get ref to first byte containing part of the field. */
1298 xop0 = adjust_address (xop0, byte_mode, xoffset);
1299
1300 volatile_ok = save_volatile_ok;
1301 }
1302
1303 /* If op0 is a register, we need it in MAXMODE (which is usually
1304 SImode). to make it acceptable to the format of extzv. */
1305 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1306 goto extzv_loses;
1307 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1308 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1309
1310 /* On big-endian machines, we count bits from the most significant.
1311 If the bit field insn does not, we must invert. */
1312 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1313 xbitpos = unit - bitsize - xbitpos;
1314
1315 /* Now convert from counting within UNIT to counting in MAXMODE. */
1316 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1317 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1318
1319 unit = GET_MODE_BITSIZE (maxmode);
1320
1321 if (xtarget == 0
1322 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1323 xtarget = xspec_target = gen_reg_rtx (tmode);
1324
1325 if (GET_MODE (xtarget) != maxmode)
1326 {
1327 if (GET_CODE (xtarget) == REG)
1328 {
1329 int wider = (GET_MODE_SIZE (maxmode)
1330 > GET_MODE_SIZE (GET_MODE (xtarget)));
1331 xtarget = gen_lowpart (maxmode, xtarget);
1332 if (wider)
1333 xspec_target_subreg = xtarget;
1334 }
1335 else
1336 xtarget = gen_reg_rtx (maxmode);
1337 }
1338
1339 /* If this machine's extzv insists on a register target,
1340 make sure we have one. */
1341 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1342 (xtarget, maxmode)))
1343 xtarget = gen_reg_rtx (maxmode);
1344
1345 bitsize_rtx = GEN_INT (bitsize);
1346 bitpos_rtx = GEN_INT (xbitpos);
1347
1348 pat = gen_extzv (protect_from_queue (xtarget, 1),
1349 xop0, bitsize_rtx, bitpos_rtx);
1350 if (pat)
1351 {
1352 emit_insn (pat);
1353 target = xtarget;
1354 spec_target = xspec_target;
1355 spec_target_subreg = xspec_target_subreg;
1356 }
1357 else
1358 {
1359 delete_insns_since (last);
1360 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1361 bitpos, target, 1);
1362 }
1363 }
1364 else
1365 extzv_loses:
1366 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1367 bitpos, target, 1);
1368 }
1369 else
1370 {
1371 if (HAVE_extv
1372 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1373 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1374 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1375 {
1376 int xbitpos = bitpos, xoffset = offset;
1377 rtx bitsize_rtx, bitpos_rtx;
1378 rtx last = get_last_insn ();
1379 rtx xop0 = op0, xtarget = target;
1380 rtx xspec_target = spec_target;
1381 rtx xspec_target_subreg = spec_target_subreg;
1382 rtx pat;
1383 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1384
1385 if (GET_CODE (xop0) == MEM)
1386 {
1387 /* Is the memory operand acceptable? */
1388 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1389 (xop0, GET_MODE (xop0))))
1390 {
1391 /* No, load into a reg and extract from there. */
1392 enum machine_mode bestmode;
1393
1394 /* Get the mode to use for inserting into this field. If
1395 OP0 is BLKmode, get the smallest mode consistent with the
1396 alignment. If OP0 is a non-BLKmode object that is no
1397 wider than MAXMODE, use its mode. Otherwise, use the
1398 smallest mode containing the field. */
1399
1400 if (GET_MODE (xop0) == BLKmode
1401 || (GET_MODE_SIZE (GET_MODE (op0))
1402 > GET_MODE_SIZE (maxmode)))
1403 bestmode = get_best_mode (bitsize, bitnum,
1404 MEM_ALIGN (xop0), maxmode,
1405 MEM_VOLATILE_P (xop0));
1406 else
1407 bestmode = GET_MODE (xop0);
1408
1409 if (bestmode == VOIDmode
1410 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1411 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1412 goto extv_loses;
1413
1414 /* Compute offset as multiple of this unit,
1415 counting in bytes. */
1416 unit = GET_MODE_BITSIZE (bestmode);
1417 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1418 xbitpos = bitnum % unit;
1419 xop0 = adjust_address (xop0, bestmode, xoffset);
1420
1421 /* Fetch it to a register in that size. */
1422 xop0 = force_reg (bestmode, xop0);
1423
1424 /* XBITPOS counts within UNIT, which is what is expected. */
1425 }
1426 else
1427 /* Get ref to first byte containing part of the field. */
1428 xop0 = adjust_address (xop0, byte_mode, xoffset);
1429 }
1430
1431 /* If op0 is a register, we need it in MAXMODE (which is usually
1432 SImode) to make it acceptable to the format of extv. */
1433 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1434 goto extv_loses;
1435 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1436 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1437
1438 /* On big-endian machines, we count bits from the most significant.
1439 If the bit field insn does not, we must invert. */
1440 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1441 xbitpos = unit - bitsize - xbitpos;
1442
1443 /* XBITPOS counts within a size of UNIT.
1444 Adjust to count within a size of MAXMODE. */
1445 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1446 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1447
1448 unit = GET_MODE_BITSIZE (maxmode);
1449
1450 if (xtarget == 0
1451 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1452 xtarget = xspec_target = gen_reg_rtx (tmode);
1453
1454 if (GET_MODE (xtarget) != maxmode)
1455 {
1456 if (GET_CODE (xtarget) == REG)
1457 {
1458 int wider = (GET_MODE_SIZE (maxmode)
1459 > GET_MODE_SIZE (GET_MODE (xtarget)));
1460 xtarget = gen_lowpart (maxmode, xtarget);
1461 if (wider)
1462 xspec_target_subreg = xtarget;
1463 }
1464 else
1465 xtarget = gen_reg_rtx (maxmode);
1466 }
1467
1468 /* If this machine's extv insists on a register target,
1469 make sure we have one. */
1470 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1471 (xtarget, maxmode)))
1472 xtarget = gen_reg_rtx (maxmode);
1473
1474 bitsize_rtx = GEN_INT (bitsize);
1475 bitpos_rtx = GEN_INT (xbitpos);
1476
1477 pat = gen_extv (protect_from_queue (xtarget, 1),
1478 xop0, bitsize_rtx, bitpos_rtx);
1479 if (pat)
1480 {
1481 emit_insn (pat);
1482 target = xtarget;
1483 spec_target = xspec_target;
1484 spec_target_subreg = xspec_target_subreg;
1485 }
1486 else
1487 {
1488 delete_insns_since (last);
1489 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1490 bitpos, target, 0);
1491 }
1492 }
1493 else
1494 extv_loses:
1495 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1496 bitpos, target, 0);
1497 }
1498 if (target == spec_target)
1499 return target;
1500 if (target == spec_target_subreg)
1501 return spec_target;
1502 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1503 {
1504 /* If the target mode is floating-point, first convert to the
1505 integer mode of that size and then access it as a floating-point
1506 value via a SUBREG. */
1507 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1508 {
1509 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1510 MODE_INT, 0),
1511 target, unsignedp);
1512 if (GET_CODE (target) != REG)
1513 target = copy_to_reg (target);
1514 return gen_rtx_SUBREG (tmode, target, 0);
1515 }
1516 else
1517 return convert_to_mode (tmode, target, unsignedp);
1518 }
1519 return target;
1520 }
1521 \f
1522 /* Extract a bit field using shifts and boolean operations
1523 Returns an rtx to represent the value.
1524 OP0 addresses a register (word) or memory (byte).
1525 BITPOS says which bit within the word or byte the bit field starts in.
1526 OFFSET says how many bytes farther the bit field starts;
1527 it is 0 if OP0 is a register.
1528 BITSIZE says how many bits long the bit field is.
1529 (If OP0 is a register, it may be narrower than a full word,
1530 but BITPOS still counts within a full word,
1531 which is significant on bigendian machines.)
1532
1533 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1534 If TARGET is nonzero, attempts to store the value there
1535 and return TARGET, but this is not guaranteed.
1536 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1537
1538 static rtx
1539 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1540 target, unsignedp)
1541 enum machine_mode tmode;
1542 rtx op0, target;
1543 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1544 int unsignedp;
1545 {
1546 unsigned int total_bits = BITS_PER_WORD;
1547 enum machine_mode mode;
1548
1549 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1550 {
1551 /* Special treatment for a bit field split across two registers. */
1552 if (bitsize + bitpos > BITS_PER_WORD)
1553 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1554 }
1555 else
1556 {
1557 /* Get the proper mode to use for this field. We want a mode that
1558 includes the entire field. If such a mode would be larger than
1559 a word, we won't be doing the extraction the normal way. */
1560
1561 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1562 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1563
1564 if (mode == VOIDmode)
1565 /* The only way this should occur is if the field spans word
1566 boundaries. */
1567 return extract_split_bit_field (op0, bitsize,
1568 bitpos + offset * BITS_PER_UNIT,
1569 unsignedp);
1570
1571 total_bits = GET_MODE_BITSIZE (mode);
1572
1573 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1574 be in the range 0 to total_bits-1, and put any excess bytes in
1575 OFFSET. */
1576 if (bitpos >= total_bits)
1577 {
1578 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1579 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1580 * BITS_PER_UNIT);
1581 }
1582
1583 /* Get ref to an aligned byte, halfword, or word containing the field.
1584 Adjust BITPOS to be position within a word,
1585 and OFFSET to be the offset of that word.
1586 Then alter OP0 to refer to that word. */
1587 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1588 offset -= (offset % (total_bits / BITS_PER_UNIT));
1589 op0 = adjust_address (op0, mode, offset);
1590 }
1591
1592 mode = GET_MODE (op0);
1593
1594 if (BYTES_BIG_ENDIAN)
1595 /* BITPOS is the distance between our msb and that of OP0.
1596 Convert it to the distance from the lsb. */
1597 bitpos = total_bits - bitsize - bitpos;
1598
1599 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1600 We have reduced the big-endian case to the little-endian case. */
1601
1602 if (unsignedp)
1603 {
1604 if (bitpos)
1605 {
1606 /* If the field does not already start at the lsb,
1607 shift it so it does. */
1608 tree amount = build_int_2 (bitpos, 0);
1609 /* Maybe propagate the target for the shift. */
1610 /* But not if we will return it--could confuse integrate.c. */
1611 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1612 && !REG_FUNCTION_VALUE_P (target)
1613 ? target : 0);
1614 if (tmode != mode) subtarget = 0;
1615 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1616 }
1617 /* Convert the value to the desired mode. */
1618 if (mode != tmode)
1619 op0 = convert_to_mode (tmode, op0, 1);
1620
1621 /* Unless the msb of the field used to be the msb when we shifted,
1622 mask out the upper bits. */
1623
1624 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1625 #if 0
1626 #ifdef SLOW_ZERO_EXTEND
1627 /* Always generate an `and' if
1628 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1629 will combine fruitfully with the zero-extend. */
1630 || tmode != mode
1631 #endif
1632 #endif
1633 )
1634 return expand_binop (GET_MODE (op0), and_optab, op0,
1635 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1636 target, 1, OPTAB_LIB_WIDEN);
1637 return op0;
1638 }
1639
1640 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1641 then arithmetic-shift its lsb to the lsb of the word. */
1642 op0 = force_reg (mode, op0);
1643 if (mode != tmode)
1644 target = 0;
1645
1646 /* Find the narrowest integer mode that contains the field. */
1647
1648 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1649 mode = GET_MODE_WIDER_MODE (mode))
1650 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1651 {
1652 op0 = convert_to_mode (mode, op0, 0);
1653 break;
1654 }
1655
1656 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1657 {
1658 tree amount
1659 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1660 /* Maybe propagate the target for the shift. */
1661 /* But not if we will return the result--could confuse integrate.c. */
1662 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1663 && ! REG_FUNCTION_VALUE_P (target)
1664 ? target : 0);
1665 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1666 }
1667
1668 return expand_shift (RSHIFT_EXPR, mode, op0,
1669 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1670 target, 0);
1671 }
1672 \f
1673 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1674 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1675 complement of that if COMPLEMENT. The mask is truncated if
1676 necessary to the width of mode MODE. The mask is zero-extended if
1677 BITSIZE+BITPOS is too small for MODE. */
1678
1679 static rtx
1680 mask_rtx (mode, bitpos, bitsize, complement)
1681 enum machine_mode mode;
1682 int bitpos, bitsize, complement;
1683 {
1684 HOST_WIDE_INT masklow, maskhigh;
1685
1686 if (bitpos < HOST_BITS_PER_WIDE_INT)
1687 masklow = (HOST_WIDE_INT) -1 << bitpos;
1688 else
1689 masklow = 0;
1690
1691 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1692 masklow &= ((unsigned HOST_WIDE_INT) -1
1693 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1694
1695 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1696 maskhigh = -1;
1697 else
1698 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1699
1700 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1701 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1702 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1703 else
1704 maskhigh = 0;
1705
1706 if (complement)
1707 {
1708 maskhigh = ~maskhigh;
1709 masklow = ~masklow;
1710 }
1711
1712 return immed_double_const (masklow, maskhigh, mode);
1713 }
1714
1715 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1716 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1717
1718 static rtx
1719 lshift_value (mode, value, bitpos, bitsize)
1720 enum machine_mode mode;
1721 rtx value;
1722 int bitpos, bitsize;
1723 {
1724 unsigned HOST_WIDE_INT v = INTVAL (value);
1725 HOST_WIDE_INT low, high;
1726
1727 if (bitsize < HOST_BITS_PER_WIDE_INT)
1728 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1729
1730 if (bitpos < HOST_BITS_PER_WIDE_INT)
1731 {
1732 low = v << bitpos;
1733 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1734 }
1735 else
1736 {
1737 low = 0;
1738 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1739 }
1740
1741 return immed_double_const (low, high, mode);
1742 }
1743 \f
1744 /* Extract a bit field that is split across two words
1745 and return an RTX for the result.
1746
1747 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1748 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1749 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1750
1751 static rtx
1752 extract_split_bit_field (op0, bitsize, bitpos, unsignedp)
1753 rtx op0;
1754 unsigned HOST_WIDE_INT bitsize, bitpos;
1755 int unsignedp;
1756 {
1757 unsigned int unit;
1758 unsigned int bitsdone = 0;
1759 rtx result = NULL_RTX;
1760 int first = 1;
1761
1762 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1763 much at a time. */
1764 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1765 unit = BITS_PER_WORD;
1766 else
1767 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1768
1769 while (bitsdone < bitsize)
1770 {
1771 unsigned HOST_WIDE_INT thissize;
1772 rtx part, word;
1773 unsigned HOST_WIDE_INT thispos;
1774 unsigned HOST_WIDE_INT offset;
1775
1776 offset = (bitpos + bitsdone) / unit;
1777 thispos = (bitpos + bitsdone) % unit;
1778
1779 /* THISSIZE must not overrun a word boundary. Otherwise,
1780 extract_fixed_bit_field will call us again, and we will mutually
1781 recurse forever. */
1782 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1783 thissize = MIN (thissize, unit - thispos);
1784
1785 /* If OP0 is a register, then handle OFFSET here.
1786
1787 When handling multiword bitfields, extract_bit_field may pass
1788 down a word_mode SUBREG of a larger REG for a bitfield that actually
1789 crosses a word boundary. Thus, for a SUBREG, we must find
1790 the current word starting from the base register. */
1791 if (GET_CODE (op0) == SUBREG)
1792 {
1793 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1794 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1795 GET_MODE (SUBREG_REG (op0)));
1796 offset = 0;
1797 }
1798 else if (GET_CODE (op0) == REG)
1799 {
1800 word = operand_subword_force (op0, offset, GET_MODE (op0));
1801 offset = 0;
1802 }
1803 else
1804 word = op0;
1805
1806 /* Extract the parts in bit-counting order,
1807 whose meaning is determined by BYTES_PER_UNIT.
1808 OFFSET is in UNITs, and UNIT is in bits.
1809 extract_fixed_bit_field wants offset in bytes. */
1810 part = extract_fixed_bit_field (word_mode, word,
1811 offset * unit / BITS_PER_UNIT,
1812 thissize, thispos, 0, 1);
1813 bitsdone += thissize;
1814
1815 /* Shift this part into place for the result. */
1816 if (BYTES_BIG_ENDIAN)
1817 {
1818 if (bitsize != bitsdone)
1819 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1820 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1821 }
1822 else
1823 {
1824 if (bitsdone != thissize)
1825 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1826 build_int_2 (bitsdone - thissize, 0), 0, 1);
1827 }
1828
1829 if (first)
1830 result = part;
1831 else
1832 /* Combine the parts with bitwise or. This works
1833 because we extracted each part as an unsigned bit field. */
1834 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1835 OPTAB_LIB_WIDEN);
1836
1837 first = 0;
1838 }
1839
1840 /* Unsigned bit field: we are done. */
1841 if (unsignedp)
1842 return result;
1843 /* Signed bit field: sign-extend with two arithmetic shifts. */
1844 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1845 build_int_2 (BITS_PER_WORD - bitsize, 0),
1846 NULL_RTX, 0);
1847 return expand_shift (RSHIFT_EXPR, word_mode, result,
1848 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1849 }
1850 \f
1851 /* Add INC into TARGET. */
1852
1853 void
1854 expand_inc (target, inc)
1855 rtx target, inc;
1856 {
1857 rtx value = expand_binop (GET_MODE (target), add_optab,
1858 target, inc,
1859 target, 0, OPTAB_LIB_WIDEN);
1860 if (value != target)
1861 emit_move_insn (target, value);
1862 }
1863
1864 /* Subtract DEC from TARGET. */
1865
1866 void
1867 expand_dec (target, dec)
1868 rtx target, dec;
1869 {
1870 rtx value = expand_binop (GET_MODE (target), sub_optab,
1871 target, dec,
1872 target, 0, OPTAB_LIB_WIDEN);
1873 if (value != target)
1874 emit_move_insn (target, value);
1875 }
1876 \f
1877 /* Output a shift instruction for expression code CODE,
1878 with SHIFTED being the rtx for the value to shift,
1879 and AMOUNT the tree for the amount to shift by.
1880 Store the result in the rtx TARGET, if that is convenient.
1881 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1882 Return the rtx for where the value is. */
1883
1884 rtx
1885 expand_shift (code, mode, shifted, amount, target, unsignedp)
1886 enum tree_code code;
1887 enum machine_mode mode;
1888 rtx shifted;
1889 tree amount;
1890 rtx target;
1891 int unsignedp;
1892 {
1893 rtx op1, temp = 0;
1894 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1895 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1896 int try;
1897
1898 /* Previously detected shift-counts computed by NEGATE_EXPR
1899 and shifted in the other direction; but that does not work
1900 on all machines. */
1901
1902 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1903
1904 #ifdef SHIFT_COUNT_TRUNCATED
1905 if (SHIFT_COUNT_TRUNCATED)
1906 {
1907 if (GET_CODE (op1) == CONST_INT
1908 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1909 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1910 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1911 % GET_MODE_BITSIZE (mode));
1912 else if (GET_CODE (op1) == SUBREG
1913 && SUBREG_BYTE (op1) == 0)
1914 op1 = SUBREG_REG (op1);
1915 }
1916 #endif
1917
1918 if (op1 == const0_rtx)
1919 return shifted;
1920
1921 for (try = 0; temp == 0 && try < 3; try++)
1922 {
1923 enum optab_methods methods;
1924
1925 if (try == 0)
1926 methods = OPTAB_DIRECT;
1927 else if (try == 1)
1928 methods = OPTAB_WIDEN;
1929 else
1930 methods = OPTAB_LIB_WIDEN;
1931
1932 if (rotate)
1933 {
1934 /* Widening does not work for rotation. */
1935 if (methods == OPTAB_WIDEN)
1936 continue;
1937 else if (methods == OPTAB_LIB_WIDEN)
1938 {
1939 /* If we have been unable to open-code this by a rotation,
1940 do it as the IOR of two shifts. I.e., to rotate A
1941 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1942 where C is the bitsize of A.
1943
1944 It is theoretically possible that the target machine might
1945 not be able to perform either shift and hence we would
1946 be making two libcalls rather than just the one for the
1947 shift (similarly if IOR could not be done). We will allow
1948 this extremely unlikely lossage to avoid complicating the
1949 code below. */
1950
1951 rtx subtarget = target == shifted ? 0 : target;
1952 rtx temp1;
1953 tree type = TREE_TYPE (amount);
1954 tree new_amount = make_tree (type, op1);
1955 tree other_amount
1956 = fold (build (MINUS_EXPR, type,
1957 convert (type,
1958 build_int_2 (GET_MODE_BITSIZE (mode),
1959 0)),
1960 amount));
1961
1962 shifted = force_reg (mode, shifted);
1963
1964 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1965 mode, shifted, new_amount, subtarget, 1);
1966 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1967 mode, shifted, other_amount, 0, 1);
1968 return expand_binop (mode, ior_optab, temp, temp1, target,
1969 unsignedp, methods);
1970 }
1971
1972 temp = expand_binop (mode,
1973 left ? rotl_optab : rotr_optab,
1974 shifted, op1, target, unsignedp, methods);
1975
1976 /* If we don't have the rotate, but we are rotating by a constant
1977 that is in range, try a rotate in the opposite direction. */
1978
1979 if (temp == 0 && GET_CODE (op1) == CONST_INT
1980 && INTVAL (op1) > 0
1981 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1982 temp = expand_binop (mode,
1983 left ? rotr_optab : rotl_optab,
1984 shifted,
1985 GEN_INT (GET_MODE_BITSIZE (mode)
1986 - INTVAL (op1)),
1987 target, unsignedp, methods);
1988 }
1989 else if (unsignedp)
1990 temp = expand_binop (mode,
1991 left ? ashl_optab : lshr_optab,
1992 shifted, op1, target, unsignedp, methods);
1993
1994 /* Do arithmetic shifts.
1995 Also, if we are going to widen the operand, we can just as well
1996 use an arithmetic right-shift instead of a logical one. */
1997 if (temp == 0 && ! rotate
1998 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1999 {
2000 enum optab_methods methods1 = methods;
2001
2002 /* If trying to widen a log shift to an arithmetic shift,
2003 don't accept an arithmetic shift of the same size. */
2004 if (unsignedp)
2005 methods1 = OPTAB_MUST_WIDEN;
2006
2007 /* Arithmetic shift */
2008
2009 temp = expand_binop (mode,
2010 left ? ashl_optab : ashr_optab,
2011 shifted, op1, target, unsignedp, methods1);
2012 }
2013
2014 /* We used to try extzv here for logical right shifts, but that was
2015 only useful for one machine, the VAX, and caused poor code
2016 generation there for lshrdi3, so the code was deleted and a
2017 define_expand for lshrsi3 was added to vax.md. */
2018 }
2019
2020 if (temp == 0)
2021 abort ();
2022 return temp;
2023 }
2024 \f
2025 enum alg_code { alg_zero, alg_m, alg_shift,
2026 alg_add_t_m2, alg_sub_t_m2,
2027 alg_add_factor, alg_sub_factor,
2028 alg_add_t2_m, alg_sub_t2_m,
2029 alg_add, alg_subtract, alg_factor, alg_shiftop };
2030
2031 /* This structure records a sequence of operations.
2032 `ops' is the number of operations recorded.
2033 `cost' is their total cost.
2034 The operations are stored in `op' and the corresponding
2035 logarithms of the integer coefficients in `log'.
2036
2037 These are the operations:
2038 alg_zero total := 0;
2039 alg_m total := multiplicand;
2040 alg_shift total := total * coeff
2041 alg_add_t_m2 total := total + multiplicand * coeff;
2042 alg_sub_t_m2 total := total - multiplicand * coeff;
2043 alg_add_factor total := total * coeff + total;
2044 alg_sub_factor total := total * coeff - total;
2045 alg_add_t2_m total := total * coeff + multiplicand;
2046 alg_sub_t2_m total := total * coeff - multiplicand;
2047
2048 The first operand must be either alg_zero or alg_m. */
2049
2050 struct algorithm
2051 {
2052 short cost;
2053 short ops;
2054 /* The size of the OP and LOG fields are not directly related to the
2055 word size, but the worst-case algorithms will be if we have few
2056 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2057 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2058 in total wordsize operations. */
2059 enum alg_code op[MAX_BITS_PER_WORD];
2060 char log[MAX_BITS_PER_WORD];
2061 };
2062
2063 static void synth_mult PARAMS ((struct algorithm *,
2064 unsigned HOST_WIDE_INT,
2065 int));
2066 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2067 int, int,
2068 unsigned HOST_WIDE_INT *,
2069 int *, int *));
2070 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2071 int));
2072 /* Compute and return the best algorithm for multiplying by T.
2073 The algorithm must cost less than cost_limit
2074 If retval.cost >= COST_LIMIT, no algorithm was found and all
2075 other field of the returned struct are undefined. */
2076
2077 static void
2078 synth_mult (alg_out, t, cost_limit)
2079 struct algorithm *alg_out;
2080 unsigned HOST_WIDE_INT t;
2081 int cost_limit;
2082 {
2083 int m;
2084 struct algorithm *alg_in, *best_alg;
2085 int cost;
2086 unsigned HOST_WIDE_INT q;
2087
2088 /* Indicate that no algorithm is yet found. If no algorithm
2089 is found, this value will be returned and indicate failure. */
2090 alg_out->cost = cost_limit;
2091
2092 if (cost_limit <= 0)
2093 return;
2094
2095 /* t == 1 can be done in zero cost. */
2096 if (t == 1)
2097 {
2098 alg_out->ops = 1;
2099 alg_out->cost = 0;
2100 alg_out->op[0] = alg_m;
2101 return;
2102 }
2103
2104 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2105 fail now. */
2106 if (t == 0)
2107 {
2108 if (zero_cost >= cost_limit)
2109 return;
2110 else
2111 {
2112 alg_out->ops = 1;
2113 alg_out->cost = zero_cost;
2114 alg_out->op[0] = alg_zero;
2115 return;
2116 }
2117 }
2118
2119 /* We'll be needing a couple extra algorithm structures now. */
2120
2121 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2122 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2123
2124 /* If we have a group of zero bits at the low-order part of T, try
2125 multiplying by the remaining bits and then doing a shift. */
2126
2127 if ((t & 1) == 0)
2128 {
2129 m = floor_log2 (t & -t); /* m = number of low zero bits */
2130 if (m < BITS_PER_WORD)
2131 {
2132 q = t >> m;
2133 cost = shift_cost[m];
2134 synth_mult (alg_in, q, cost_limit - cost);
2135
2136 cost += alg_in->cost;
2137 if (cost < cost_limit)
2138 {
2139 struct algorithm *x;
2140 x = alg_in, alg_in = best_alg, best_alg = x;
2141 best_alg->log[best_alg->ops] = m;
2142 best_alg->op[best_alg->ops] = alg_shift;
2143 cost_limit = cost;
2144 }
2145 }
2146 }
2147
2148 /* If we have an odd number, add or subtract one. */
2149 if ((t & 1) != 0)
2150 {
2151 unsigned HOST_WIDE_INT w;
2152
2153 for (w = 1; (w & t) != 0; w <<= 1)
2154 ;
2155 /* If T was -1, then W will be zero after the loop. This is another
2156 case where T ends with ...111. Handling this with (T + 1) and
2157 subtract 1 produces slightly better code and results in algorithm
2158 selection much faster than treating it like the ...0111 case
2159 below. */
2160 if (w == 0
2161 || (w > 2
2162 /* Reject the case where t is 3.
2163 Thus we prefer addition in that case. */
2164 && t != 3))
2165 {
2166 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2167
2168 cost = add_cost;
2169 synth_mult (alg_in, t + 1, cost_limit - cost);
2170
2171 cost += alg_in->cost;
2172 if (cost < cost_limit)
2173 {
2174 struct algorithm *x;
2175 x = alg_in, alg_in = best_alg, best_alg = x;
2176 best_alg->log[best_alg->ops] = 0;
2177 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2178 cost_limit = cost;
2179 }
2180 }
2181 else
2182 {
2183 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2184
2185 cost = add_cost;
2186 synth_mult (alg_in, t - 1, cost_limit - cost);
2187
2188 cost += alg_in->cost;
2189 if (cost < cost_limit)
2190 {
2191 struct algorithm *x;
2192 x = alg_in, alg_in = best_alg, best_alg = x;
2193 best_alg->log[best_alg->ops] = 0;
2194 best_alg->op[best_alg->ops] = alg_add_t_m2;
2195 cost_limit = cost;
2196 }
2197 }
2198 }
2199
2200 /* Look for factors of t of the form
2201 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2202 If we find such a factor, we can multiply by t using an algorithm that
2203 multiplies by q, shift the result by m and add/subtract it to itself.
2204
2205 We search for large factors first and loop down, even if large factors
2206 are less probable than small; if we find a large factor we will find a
2207 good sequence quickly, and therefore be able to prune (by decreasing
2208 COST_LIMIT) the search. */
2209
2210 for (m = floor_log2 (t - 1); m >= 2; m--)
2211 {
2212 unsigned HOST_WIDE_INT d;
2213
2214 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2215 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2216 {
2217 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2218 synth_mult (alg_in, t / d, cost_limit - cost);
2219
2220 cost += alg_in->cost;
2221 if (cost < cost_limit)
2222 {
2223 struct algorithm *x;
2224 x = alg_in, alg_in = best_alg, best_alg = x;
2225 best_alg->log[best_alg->ops] = m;
2226 best_alg->op[best_alg->ops] = alg_add_factor;
2227 cost_limit = cost;
2228 }
2229 /* Other factors will have been taken care of in the recursion. */
2230 break;
2231 }
2232
2233 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2234 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2235 {
2236 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2237 synth_mult (alg_in, t / d, cost_limit - cost);
2238
2239 cost += alg_in->cost;
2240 if (cost < cost_limit)
2241 {
2242 struct algorithm *x;
2243 x = alg_in, alg_in = best_alg, best_alg = x;
2244 best_alg->log[best_alg->ops] = m;
2245 best_alg->op[best_alg->ops] = alg_sub_factor;
2246 cost_limit = cost;
2247 }
2248 break;
2249 }
2250 }
2251
2252 /* Try shift-and-add (load effective address) instructions,
2253 i.e. do a*3, a*5, a*9. */
2254 if ((t & 1) != 0)
2255 {
2256 q = t - 1;
2257 q = q & -q;
2258 m = exact_log2 (q);
2259 if (m >= 0 && m < BITS_PER_WORD)
2260 {
2261 cost = shiftadd_cost[m];
2262 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2263
2264 cost += alg_in->cost;
2265 if (cost < cost_limit)
2266 {
2267 struct algorithm *x;
2268 x = alg_in, alg_in = best_alg, best_alg = x;
2269 best_alg->log[best_alg->ops] = m;
2270 best_alg->op[best_alg->ops] = alg_add_t2_m;
2271 cost_limit = cost;
2272 }
2273 }
2274
2275 q = t + 1;
2276 q = q & -q;
2277 m = exact_log2 (q);
2278 if (m >= 0 && m < BITS_PER_WORD)
2279 {
2280 cost = shiftsub_cost[m];
2281 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2282
2283 cost += alg_in->cost;
2284 if (cost < cost_limit)
2285 {
2286 struct algorithm *x;
2287 x = alg_in, alg_in = best_alg, best_alg = x;
2288 best_alg->log[best_alg->ops] = m;
2289 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2290 cost_limit = cost;
2291 }
2292 }
2293 }
2294
2295 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2296 we have not found any algorithm. */
2297 if (cost_limit == alg_out->cost)
2298 return;
2299
2300 /* If we are getting a too long sequence for `struct algorithm'
2301 to record, make this search fail. */
2302 if (best_alg->ops == MAX_BITS_PER_WORD)
2303 return;
2304
2305 /* Copy the algorithm from temporary space to the space at alg_out.
2306 We avoid using structure assignment because the majority of
2307 best_alg is normally undefined, and this is a critical function. */
2308 alg_out->ops = best_alg->ops + 1;
2309 alg_out->cost = cost_limit;
2310 memcpy (alg_out->op, best_alg->op,
2311 alg_out->ops * sizeof *alg_out->op);
2312 memcpy (alg_out->log, best_alg->log,
2313 alg_out->ops * sizeof *alg_out->log);
2314 }
2315 \f
2316 /* Perform a multiplication and return an rtx for the result.
2317 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2318 TARGET is a suggestion for where to store the result (an rtx).
2319
2320 We check specially for a constant integer as OP1.
2321 If you want this check for OP0 as well, then before calling
2322 you should swap the two operands if OP0 would be constant. */
2323
2324 rtx
2325 expand_mult (mode, op0, op1, target, unsignedp)
2326 enum machine_mode mode;
2327 rtx op0, op1, target;
2328 int unsignedp;
2329 {
2330 rtx const_op1 = op1;
2331
2332 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2333 less than or equal in size to `unsigned int' this doesn't matter.
2334 If the mode is larger than `unsigned int', then synth_mult works only
2335 if the constant value exactly fits in an `unsigned int' without any
2336 truncation. This means that multiplying by negative values does
2337 not work; results are off by 2^32 on a 32 bit machine. */
2338
2339 /* If we are multiplying in DImode, it may still be a win
2340 to try to work with shifts and adds. */
2341 if (GET_CODE (op1) == CONST_DOUBLE
2342 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2343 && HOST_BITS_PER_INT >= BITS_PER_WORD
2344 && CONST_DOUBLE_HIGH (op1) == 0)
2345 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2346 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2347 && GET_CODE (op1) == CONST_INT
2348 && INTVAL (op1) < 0)
2349 const_op1 = 0;
2350
2351 /* We used to test optimize here, on the grounds that it's better to
2352 produce a smaller program when -O is not used.
2353 But this causes such a terrible slowdown sometimes
2354 that it seems better to use synth_mult always. */
2355
2356 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2357 && (unsignedp || ! flag_trapv))
2358 {
2359 struct algorithm alg;
2360 struct algorithm alg2;
2361 HOST_WIDE_INT val = INTVAL (op1);
2362 HOST_WIDE_INT val_so_far;
2363 rtx insn;
2364 int mult_cost;
2365 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2366
2367 /* op0 must be register to make mult_cost match the precomputed
2368 shiftadd_cost array. */
2369 op0 = force_reg (mode, op0);
2370
2371 /* Try to do the computation three ways: multiply by the negative of OP1
2372 and then negate, do the multiplication directly, or do multiplication
2373 by OP1 - 1. */
2374
2375 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2376 mult_cost = MIN (12 * add_cost, mult_cost);
2377
2378 synth_mult (&alg, val, mult_cost);
2379
2380 /* This works only if the inverted value actually fits in an
2381 `unsigned int' */
2382 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2383 {
2384 synth_mult (&alg2, - val,
2385 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2386 if (alg2.cost + negate_cost < alg.cost)
2387 alg = alg2, variant = negate_variant;
2388 }
2389
2390 /* This proves very useful for division-by-constant. */
2391 synth_mult (&alg2, val - 1,
2392 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2393 if (alg2.cost + add_cost < alg.cost)
2394 alg = alg2, variant = add_variant;
2395
2396 if (alg.cost < mult_cost)
2397 {
2398 /* We found something cheaper than a multiply insn. */
2399 int opno;
2400 rtx accum, tem;
2401 enum machine_mode nmode;
2402
2403 op0 = protect_from_queue (op0, 0);
2404
2405 /* Avoid referencing memory over and over.
2406 For speed, but also for correctness when mem is volatile. */
2407 if (GET_CODE (op0) == MEM)
2408 op0 = force_reg (mode, op0);
2409
2410 /* ACCUM starts out either as OP0 or as a zero, depending on
2411 the first operation. */
2412
2413 if (alg.op[0] == alg_zero)
2414 {
2415 accum = copy_to_mode_reg (mode, const0_rtx);
2416 val_so_far = 0;
2417 }
2418 else if (alg.op[0] == alg_m)
2419 {
2420 accum = copy_to_mode_reg (mode, op0);
2421 val_so_far = 1;
2422 }
2423 else
2424 abort ();
2425
2426 for (opno = 1; opno < alg.ops; opno++)
2427 {
2428 int log = alg.log[opno];
2429 int preserve = preserve_subexpressions_p ();
2430 rtx shift_subtarget = preserve ? 0 : accum;
2431 rtx add_target
2432 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2433 && ! preserve)
2434 ? target : 0;
2435 rtx accum_target = preserve ? 0 : accum;
2436
2437 switch (alg.op[opno])
2438 {
2439 case alg_shift:
2440 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2441 build_int_2 (log, 0), NULL_RTX, 0);
2442 val_so_far <<= log;
2443 break;
2444
2445 case alg_add_t_m2:
2446 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2447 build_int_2 (log, 0), NULL_RTX, 0);
2448 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2449 add_target
2450 ? add_target : accum_target);
2451 val_so_far += (HOST_WIDE_INT) 1 << log;
2452 break;
2453
2454 case alg_sub_t_m2:
2455 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2456 build_int_2 (log, 0), NULL_RTX, 0);
2457 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2458 add_target
2459 ? add_target : accum_target);
2460 val_so_far -= (HOST_WIDE_INT) 1 << log;
2461 break;
2462
2463 case alg_add_t2_m:
2464 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2465 build_int_2 (log, 0), shift_subtarget,
2466 0);
2467 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2468 add_target
2469 ? add_target : accum_target);
2470 val_so_far = (val_so_far << log) + 1;
2471 break;
2472
2473 case alg_sub_t2_m:
2474 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2475 build_int_2 (log, 0), shift_subtarget,
2476 0);
2477 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2478 add_target
2479 ? add_target : accum_target);
2480 val_so_far = (val_so_far << log) - 1;
2481 break;
2482
2483 case alg_add_factor:
2484 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2485 build_int_2 (log, 0), NULL_RTX, 0);
2486 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2487 add_target
2488 ? add_target : accum_target);
2489 val_so_far += val_so_far << log;
2490 break;
2491
2492 case alg_sub_factor:
2493 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2494 build_int_2 (log, 0), NULL_RTX, 0);
2495 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2496 (add_target ? add_target
2497 : preserve ? 0 : tem));
2498 val_so_far = (val_so_far << log) - val_so_far;
2499 break;
2500
2501 default:
2502 abort ();
2503 }
2504
2505 /* Write a REG_EQUAL note on the last insn so that we can cse
2506 multiplication sequences. Note that if ACCUM is a SUBREG,
2507 we've set the inner register and must properly indicate
2508 that. */
2509
2510 tem = op0, nmode = mode;
2511 if (GET_CODE (accum) == SUBREG)
2512 {
2513 nmode = GET_MODE (SUBREG_REG (accum));
2514 tem = gen_lowpart (nmode, op0);
2515 }
2516
2517 insn = get_last_insn ();
2518 set_unique_reg_note (insn,
2519 REG_EQUAL,
2520 gen_rtx_MULT (nmode, tem,
2521 GEN_INT (val_so_far)));
2522 }
2523
2524 if (variant == negate_variant)
2525 {
2526 val_so_far = - val_so_far;
2527 accum = expand_unop (mode, neg_optab, accum, target, 0);
2528 }
2529 else if (variant == add_variant)
2530 {
2531 val_so_far = val_so_far + 1;
2532 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2533 }
2534
2535 if (val != val_so_far)
2536 abort ();
2537
2538 return accum;
2539 }
2540 }
2541
2542 /* This used to use umul_optab if unsigned, but for non-widening multiply
2543 there is no difference between signed and unsigned. */
2544 op0 = expand_binop (mode,
2545 ! unsignedp
2546 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2547 ? smulv_optab : smul_optab,
2548 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2549 if (op0 == 0)
2550 abort ();
2551 return op0;
2552 }
2553 \f
2554 /* Return the smallest n such that 2**n >= X. */
2555
2556 int
2557 ceil_log2 (x)
2558 unsigned HOST_WIDE_INT x;
2559 {
2560 return floor_log2 (x - 1) + 1;
2561 }
2562
2563 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2564 replace division by D, and put the least significant N bits of the result
2565 in *MULTIPLIER_PTR and return the most significant bit.
2566
2567 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2568 needed precision is in PRECISION (should be <= N).
2569
2570 PRECISION should be as small as possible so this function can choose
2571 multiplier more freely.
2572
2573 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2574 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2575
2576 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2577 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2578
2579 static
2580 unsigned HOST_WIDE_INT
2581 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2582 unsigned HOST_WIDE_INT d;
2583 int n;
2584 int precision;
2585 unsigned HOST_WIDE_INT *multiplier_ptr;
2586 int *post_shift_ptr;
2587 int *lgup_ptr;
2588 {
2589 HOST_WIDE_INT mhigh_hi, mlow_hi;
2590 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2591 int lgup, post_shift;
2592 int pow, pow2;
2593 unsigned HOST_WIDE_INT nl, dummy1;
2594 HOST_WIDE_INT nh, dummy2;
2595
2596 /* lgup = ceil(log2(divisor)); */
2597 lgup = ceil_log2 (d);
2598
2599 if (lgup > n)
2600 abort ();
2601
2602 pow = n + lgup;
2603 pow2 = n + lgup - precision;
2604
2605 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2606 {
2607 /* We could handle this with some effort, but this case is much better
2608 handled directly with a scc insn, so rely on caller using that. */
2609 abort ();
2610 }
2611
2612 /* mlow = 2^(N + lgup)/d */
2613 if (pow >= HOST_BITS_PER_WIDE_INT)
2614 {
2615 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2616 nl = 0;
2617 }
2618 else
2619 {
2620 nh = 0;
2621 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2622 }
2623 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2624 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2625
2626 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2627 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2628 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2629 else
2630 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2631 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2632 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2633
2634 if (mhigh_hi && nh - d >= d)
2635 abort ();
2636 if (mhigh_hi > 1 || mlow_hi > 1)
2637 abort ();
2638 /* assert that mlow < mhigh. */
2639 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2640 abort ();
2641
2642 /* If precision == N, then mlow, mhigh exceed 2^N
2643 (but they do not exceed 2^(N+1)). */
2644
2645 /* Reduce to lowest terms */
2646 for (post_shift = lgup; post_shift > 0; post_shift--)
2647 {
2648 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2649 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2650 if (ml_lo >= mh_lo)
2651 break;
2652
2653 mlow_hi = 0;
2654 mlow_lo = ml_lo;
2655 mhigh_hi = 0;
2656 mhigh_lo = mh_lo;
2657 }
2658
2659 *post_shift_ptr = post_shift;
2660 *lgup_ptr = lgup;
2661 if (n < HOST_BITS_PER_WIDE_INT)
2662 {
2663 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2664 *multiplier_ptr = mhigh_lo & mask;
2665 return mhigh_lo >= mask;
2666 }
2667 else
2668 {
2669 *multiplier_ptr = mhigh_lo;
2670 return mhigh_hi;
2671 }
2672 }
2673
2674 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2675 congruent to 1 (mod 2**N). */
2676
2677 static unsigned HOST_WIDE_INT
2678 invert_mod2n (x, n)
2679 unsigned HOST_WIDE_INT x;
2680 int n;
2681 {
2682 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2683
2684 /* The algorithm notes that the choice y = x satisfies
2685 x*y == 1 mod 2^3, since x is assumed odd.
2686 Each iteration doubles the number of bits of significance in y. */
2687
2688 unsigned HOST_WIDE_INT mask;
2689 unsigned HOST_WIDE_INT y = x;
2690 int nbit = 3;
2691
2692 mask = (n == HOST_BITS_PER_WIDE_INT
2693 ? ~(unsigned HOST_WIDE_INT) 0
2694 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2695
2696 while (nbit < n)
2697 {
2698 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2699 nbit *= 2;
2700 }
2701 return y;
2702 }
2703
2704 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2705 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2706 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2707 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2708 become signed.
2709
2710 The result is put in TARGET if that is convenient.
2711
2712 MODE is the mode of operation. */
2713
2714 rtx
2715 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2716 enum machine_mode mode;
2717 rtx adj_operand, op0, op1, target;
2718 int unsignedp;
2719 {
2720 rtx tem;
2721 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2722
2723 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2724 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2725 NULL_RTX, 0);
2726 tem = expand_and (tem, op1, NULL_RTX);
2727 adj_operand
2728 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2729 adj_operand);
2730
2731 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2732 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2733 NULL_RTX, 0);
2734 tem = expand_and (tem, op0, NULL_RTX);
2735 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2736 target);
2737
2738 return target;
2739 }
2740
2741 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2742 in TARGET if that is convenient, and return where the result is. If the
2743 operation can not be performed, 0 is returned.
2744
2745 MODE is the mode of operation and result.
2746
2747 UNSIGNEDP nonzero means unsigned multiply.
2748
2749 MAX_COST is the total allowed cost for the expanded RTL. */
2750
2751 rtx
2752 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2753 enum machine_mode mode;
2754 rtx op0, target;
2755 unsigned HOST_WIDE_INT cnst1;
2756 int unsignedp;
2757 int max_cost;
2758 {
2759 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2760 optab mul_highpart_optab;
2761 optab moptab;
2762 rtx tem;
2763 int size = GET_MODE_BITSIZE (mode);
2764 rtx op1, wide_op1;
2765
2766 /* We can't support modes wider than HOST_BITS_PER_INT. */
2767 if (size > HOST_BITS_PER_WIDE_INT)
2768 abort ();
2769
2770 op1 = GEN_INT (trunc_int_for_mode (cnst1, mode));
2771
2772 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2773 wide_op1 = op1;
2774 else
2775 wide_op1
2776 = immed_double_const (cnst1,
2777 (unsignedp
2778 ? (HOST_WIDE_INT) 0
2779 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2780 wider_mode);
2781
2782 /* expand_mult handles constant multiplication of word_mode
2783 or narrower. It does a poor job for large modes. */
2784 if (size < BITS_PER_WORD
2785 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2786 {
2787 /* We have to do this, since expand_binop doesn't do conversion for
2788 multiply. Maybe change expand_binop to handle widening multiply? */
2789 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2790
2791 /* We know that this can't have signed overflow, so pretend this is
2792 an unsigned multiply. */
2793 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2794 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2795 build_int_2 (size, 0), NULL_RTX, 1);
2796 return convert_modes (mode, wider_mode, tem, unsignedp);
2797 }
2798
2799 if (target == 0)
2800 target = gen_reg_rtx (mode);
2801
2802 /* Firstly, try using a multiplication insn that only generates the needed
2803 high part of the product, and in the sign flavor of unsignedp. */
2804 if (mul_highpart_cost[(int) mode] < max_cost)
2805 {
2806 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2807 target = expand_binop (mode, mul_highpart_optab,
2808 op0, op1, target, unsignedp, OPTAB_DIRECT);
2809 if (target)
2810 return target;
2811 }
2812
2813 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2814 Need to adjust the result after the multiplication. */
2815 if (size - 1 < BITS_PER_WORD
2816 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2817 < max_cost))
2818 {
2819 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2820 target = expand_binop (mode, mul_highpart_optab,
2821 op0, op1, target, unsignedp, OPTAB_DIRECT);
2822 if (target)
2823 /* We used the wrong signedness. Adjust the result. */
2824 return expand_mult_highpart_adjust (mode, target, op0,
2825 op1, target, unsignedp);
2826 }
2827
2828 /* Try widening multiplication. */
2829 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2830 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2831 && mul_widen_cost[(int) wider_mode] < max_cost)
2832 {
2833 op1 = force_reg (mode, op1);
2834 goto try;
2835 }
2836
2837 /* Try widening the mode and perform a non-widening multiplication. */
2838 moptab = smul_optab;
2839 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2840 && size - 1 < BITS_PER_WORD
2841 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2842 {
2843 op1 = wide_op1;
2844 goto try;
2845 }
2846
2847 /* Try widening multiplication of opposite signedness, and adjust. */
2848 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2849 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2850 && size - 1 < BITS_PER_WORD
2851 && (mul_widen_cost[(int) wider_mode]
2852 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2853 {
2854 rtx regop1 = force_reg (mode, op1);
2855 tem = expand_binop (wider_mode, moptab, op0, regop1,
2856 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2857 if (tem != 0)
2858 {
2859 /* Extract the high half of the just generated product. */
2860 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2861 build_int_2 (size, 0), NULL_RTX, 1);
2862 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2863 /* We used the wrong signedness. Adjust the result. */
2864 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2865 target, unsignedp);
2866 }
2867 }
2868
2869 return 0;
2870
2871 try:
2872 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2873 tem = expand_binop (wider_mode, moptab, op0, op1,
2874 NULL_RTX, unsignedp, OPTAB_WIDEN);
2875 if (tem == 0)
2876 return 0;
2877
2878 /* Extract the high half of the just generated product. */
2879 if (mode == word_mode)
2880 {
2881 return gen_highpart (mode, tem);
2882 }
2883 else
2884 {
2885 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2886 build_int_2 (size, 0), NULL_RTX, 1);
2887 return convert_modes (mode, wider_mode, tem, unsignedp);
2888 }
2889 }
2890 \f
2891 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2892 if that is convenient, and returning where the result is.
2893 You may request either the quotient or the remainder as the result;
2894 specify REM_FLAG nonzero to get the remainder.
2895
2896 CODE is the expression code for which kind of division this is;
2897 it controls how rounding is done. MODE is the machine mode to use.
2898 UNSIGNEDP nonzero means do unsigned division. */
2899
2900 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2901 and then correct it by or'ing in missing high bits
2902 if result of ANDI is nonzero.
2903 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2904 This could optimize to a bfexts instruction.
2905 But C doesn't use these operations, so their optimizations are
2906 left for later. */
2907 /* ??? For modulo, we don't actually need the highpart of the first product,
2908 the low part will do nicely. And for small divisors, the second multiply
2909 can also be a low-part only multiply or even be completely left out.
2910 E.g. to calculate the remainder of a division by 3 with a 32 bit
2911 multiply, multiply with 0x55555556 and extract the upper two bits;
2912 the result is exact for inputs up to 0x1fffffff.
2913 The input range can be reduced by using cross-sum rules.
2914 For odd divisors >= 3, the following table gives right shift counts
2915 so that if an number is shifted by an integer multiple of the given
2916 amount, the remainder stays the same:
2917 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2918 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2919 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2920 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2921 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2922
2923 Cross-sum rules for even numbers can be derived by leaving as many bits
2924 to the right alone as the divisor has zeros to the right.
2925 E.g. if x is an unsigned 32 bit number:
2926 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2927 */
2928
2929 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2930
2931 rtx
2932 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2933 int rem_flag;
2934 enum tree_code code;
2935 enum machine_mode mode;
2936 rtx op0, op1, target;
2937 int unsignedp;
2938 {
2939 enum machine_mode compute_mode;
2940 rtx tquotient;
2941 rtx quotient = 0, remainder = 0;
2942 rtx last;
2943 int size;
2944 rtx insn, set;
2945 optab optab1, optab2;
2946 int op1_is_constant, op1_is_pow2;
2947 int max_cost, extra_cost;
2948 static HOST_WIDE_INT last_div_const = 0;
2949
2950 op1_is_constant = GET_CODE (op1) == CONST_INT;
2951 op1_is_pow2 = (op1_is_constant
2952 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2953 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2954
2955 /*
2956 This is the structure of expand_divmod:
2957
2958 First comes code to fix up the operands so we can perform the operations
2959 correctly and efficiently.
2960
2961 Second comes a switch statement with code specific for each rounding mode.
2962 For some special operands this code emits all RTL for the desired
2963 operation, for other cases, it generates only a quotient and stores it in
2964 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2965 to indicate that it has not done anything.
2966
2967 Last comes code that finishes the operation. If QUOTIENT is set and
2968 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2969 QUOTIENT is not set, it is computed using trunc rounding.
2970
2971 We try to generate special code for division and remainder when OP1 is a
2972 constant. If |OP1| = 2**n we can use shifts and some other fast
2973 operations. For other values of OP1, we compute a carefully selected
2974 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2975 by m.
2976
2977 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2978 half of the product. Different strategies for generating the product are
2979 implemented in expand_mult_highpart.
2980
2981 If what we actually want is the remainder, we generate that by another
2982 by-constant multiplication and a subtraction. */
2983
2984 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2985 code below will malfunction if we are, so check here and handle
2986 the special case if so. */
2987 if (op1 == const1_rtx)
2988 return rem_flag ? const0_rtx : op0;
2989
2990 /* When dividing by -1, we could get an overflow.
2991 negv_optab can handle overflows. */
2992 if (! unsignedp && op1 == constm1_rtx)
2993 {
2994 if (rem_flag)
2995 return const0_rtx;
2996 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
2997 ? negv_optab : neg_optab, op0, target, 0);
2998 }
2999
3000 if (target
3001 /* Don't use the function value register as a target
3002 since we have to read it as well as write it,
3003 and function-inlining gets confused by this. */
3004 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3005 /* Don't clobber an operand while doing a multi-step calculation. */
3006 || ((rem_flag || op1_is_constant)
3007 && (reg_mentioned_p (target, op0)
3008 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3009 || reg_mentioned_p (target, op1)
3010 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3011 target = 0;
3012
3013 /* Get the mode in which to perform this computation. Normally it will
3014 be MODE, but sometimes we can't do the desired operation in MODE.
3015 If so, pick a wider mode in which we can do the operation. Convert
3016 to that mode at the start to avoid repeated conversions.
3017
3018 First see what operations we need. These depend on the expression
3019 we are evaluating. (We assume that divxx3 insns exist under the
3020 same conditions that modxx3 insns and that these insns don't normally
3021 fail. If these assumptions are not correct, we may generate less
3022 efficient code in some cases.)
3023
3024 Then see if we find a mode in which we can open-code that operation
3025 (either a division, modulus, or shift). Finally, check for the smallest
3026 mode for which we can do the operation with a library call. */
3027
3028 /* We might want to refine this now that we have division-by-constant
3029 optimization. Since expand_mult_highpart tries so many variants, it is
3030 not straightforward to generalize this. Maybe we should make an array
3031 of possible modes in init_expmed? Save this for GCC 2.7. */
3032
3033 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
3034 : (unsignedp ? udiv_optab : sdiv_optab));
3035 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
3036
3037 for (compute_mode = mode; compute_mode != VOIDmode;
3038 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3039 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3040 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3041 break;
3042
3043 if (compute_mode == VOIDmode)
3044 for (compute_mode = mode; compute_mode != VOIDmode;
3045 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3046 if (optab1->handlers[(int) compute_mode].libfunc
3047 || optab2->handlers[(int) compute_mode].libfunc)
3048 break;
3049
3050 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3051 in expand_binop. */
3052 if (compute_mode == VOIDmode)
3053 compute_mode = mode;
3054
3055 if (target && GET_MODE (target) == compute_mode)
3056 tquotient = target;
3057 else
3058 tquotient = gen_reg_rtx (compute_mode);
3059
3060 size = GET_MODE_BITSIZE (compute_mode);
3061 #if 0
3062 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3063 (mode), and thereby get better code when OP1 is a constant. Do that
3064 later. It will require going over all usages of SIZE below. */
3065 size = GET_MODE_BITSIZE (mode);
3066 #endif
3067
3068 /* Only deduct something for a REM if the last divide done was
3069 for a different constant. Then set the constant of the last
3070 divide. */
3071 max_cost = div_cost[(int) compute_mode]
3072 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3073 && INTVAL (op1) == last_div_const)
3074 ? mul_cost[(int) compute_mode] + add_cost : 0);
3075
3076 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3077
3078 /* Now convert to the best mode to use. */
3079 if (compute_mode != mode)
3080 {
3081 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3082 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3083
3084 /* convert_modes may have placed op1 into a register, so we
3085 must recompute the following. */
3086 op1_is_constant = GET_CODE (op1) == CONST_INT;
3087 op1_is_pow2 = (op1_is_constant
3088 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3089 || (! unsignedp
3090 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3091 }
3092
3093 /* If one of the operands is a volatile MEM, copy it into a register. */
3094
3095 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3096 op0 = force_reg (compute_mode, op0);
3097 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3098 op1 = force_reg (compute_mode, op1);
3099
3100 /* If we need the remainder or if OP1 is constant, we need to
3101 put OP0 in a register in case it has any queued subexpressions. */
3102 if (rem_flag || op1_is_constant)
3103 op0 = force_reg (compute_mode, op0);
3104
3105 last = get_last_insn ();
3106
3107 /* Promote floor rounding to trunc rounding for unsigned operations. */
3108 if (unsignedp)
3109 {
3110 if (code == FLOOR_DIV_EXPR)
3111 code = TRUNC_DIV_EXPR;
3112 if (code == FLOOR_MOD_EXPR)
3113 code = TRUNC_MOD_EXPR;
3114 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3115 code = TRUNC_DIV_EXPR;
3116 }
3117
3118 if (op1 != const0_rtx)
3119 switch (code)
3120 {
3121 case TRUNC_MOD_EXPR:
3122 case TRUNC_DIV_EXPR:
3123 if (op1_is_constant)
3124 {
3125 if (unsignedp)
3126 {
3127 unsigned HOST_WIDE_INT mh, ml;
3128 int pre_shift, post_shift;
3129 int dummy;
3130 unsigned HOST_WIDE_INT d = INTVAL (op1);
3131
3132 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3133 {
3134 pre_shift = floor_log2 (d);
3135 if (rem_flag)
3136 {
3137 remainder
3138 = expand_binop (compute_mode, and_optab, op0,
3139 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3140 remainder, 1,
3141 OPTAB_LIB_WIDEN);
3142 if (remainder)
3143 return gen_lowpart (mode, remainder);
3144 }
3145 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3146 build_int_2 (pre_shift, 0),
3147 tquotient, 1);
3148 }
3149 else if (size <= HOST_BITS_PER_WIDE_INT)
3150 {
3151 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3152 {
3153 /* Most significant bit of divisor is set; emit an scc
3154 insn. */
3155 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3156 compute_mode, 1, 1);
3157 if (quotient == 0)
3158 goto fail1;
3159 }
3160 else
3161 {
3162 /* Find a suitable multiplier and right shift count
3163 instead of multiplying with D. */
3164
3165 mh = choose_multiplier (d, size, size,
3166 &ml, &post_shift, &dummy);
3167
3168 /* If the suggested multiplier is more than SIZE bits,
3169 we can do better for even divisors, using an
3170 initial right shift. */
3171 if (mh != 0 && (d & 1) == 0)
3172 {
3173 pre_shift = floor_log2 (d & -d);
3174 mh = choose_multiplier (d >> pre_shift, size,
3175 size - pre_shift,
3176 &ml, &post_shift, &dummy);
3177 if (mh)
3178 abort ();
3179 }
3180 else
3181 pre_shift = 0;
3182
3183 if (mh != 0)
3184 {
3185 rtx t1, t2, t3, t4;
3186
3187 if (post_shift - 1 >= BITS_PER_WORD)
3188 goto fail1;
3189
3190 extra_cost = (shift_cost[post_shift - 1]
3191 + shift_cost[1] + 2 * add_cost);
3192 t1 = expand_mult_highpart (compute_mode, op0, ml,
3193 NULL_RTX, 1,
3194 max_cost - extra_cost);
3195 if (t1 == 0)
3196 goto fail1;
3197 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3198 op0, t1),
3199 NULL_RTX);
3200 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3201 build_int_2 (1, 0), NULL_RTX,1);
3202 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3203 t1, t3),
3204 NULL_RTX);
3205 quotient
3206 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3207 build_int_2 (post_shift - 1, 0),
3208 tquotient, 1);
3209 }
3210 else
3211 {
3212 rtx t1, t2;
3213
3214 if (pre_shift >= BITS_PER_WORD
3215 || post_shift >= BITS_PER_WORD)
3216 goto fail1;
3217
3218 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3219 build_int_2 (pre_shift, 0),
3220 NULL_RTX, 1);
3221 extra_cost = (shift_cost[pre_shift]
3222 + shift_cost[post_shift]);
3223 t2 = expand_mult_highpart (compute_mode, t1, ml,
3224 NULL_RTX, 1,
3225 max_cost - extra_cost);
3226 if (t2 == 0)
3227 goto fail1;
3228 quotient
3229 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3230 build_int_2 (post_shift, 0),
3231 tquotient, 1);
3232 }
3233 }
3234 }
3235 else /* Too wide mode to use tricky code */
3236 break;
3237
3238 insn = get_last_insn ();
3239 if (insn != last
3240 && (set = single_set (insn)) != 0
3241 && SET_DEST (set) == quotient)
3242 set_unique_reg_note (insn,
3243 REG_EQUAL,
3244 gen_rtx_UDIV (compute_mode, op0, op1));
3245 }
3246 else /* TRUNC_DIV, signed */
3247 {
3248 unsigned HOST_WIDE_INT ml;
3249 int lgup, post_shift;
3250 HOST_WIDE_INT d = INTVAL (op1);
3251 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3252
3253 /* n rem d = n rem -d */
3254 if (rem_flag && d < 0)
3255 {
3256 d = abs_d;
3257 op1 = GEN_INT (trunc_int_for_mode (abs_d, compute_mode));
3258 }
3259
3260 if (d == 1)
3261 quotient = op0;
3262 else if (d == -1)
3263 quotient = expand_unop (compute_mode, neg_optab, op0,
3264 tquotient, 0);
3265 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3266 {
3267 /* This case is not handled correctly below. */
3268 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3269 compute_mode, 1, 1);
3270 if (quotient == 0)
3271 goto fail1;
3272 }
3273 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3274 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3275 /* ??? The cheap metric is computed only for
3276 word_mode. If this operation is wider, this may
3277 not be so. Assume true if the optab has an
3278 expander for this mode. */
3279 && (((rem_flag ? smod_optab : sdiv_optab)
3280 ->handlers[(int) compute_mode].insn_code
3281 != CODE_FOR_nothing)
3282 || (sdivmod_optab->handlers[(int) compute_mode]
3283 .insn_code != CODE_FOR_nothing)))
3284 ;
3285 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3286 {
3287 lgup = floor_log2 (abs_d);
3288 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3289 {
3290 rtx label = gen_label_rtx ();
3291 rtx t1;
3292
3293 t1 = copy_to_mode_reg (compute_mode, op0);
3294 do_cmp_and_jump (t1, const0_rtx, GE,
3295 compute_mode, label);
3296 expand_inc (t1, GEN_INT (trunc_int_for_mode
3297 (abs_d - 1, compute_mode)));
3298 emit_label (label);
3299 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3300 build_int_2 (lgup, 0),
3301 tquotient, 0);
3302 }
3303 else
3304 {
3305 rtx t1, t2, t3;
3306 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3307 build_int_2 (size - 1, 0),
3308 NULL_RTX, 0);
3309 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3310 build_int_2 (size - lgup, 0),
3311 NULL_RTX, 1);
3312 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3313 op0, t2),
3314 NULL_RTX);
3315 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3316 build_int_2 (lgup, 0),
3317 tquotient, 0);
3318 }
3319
3320 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3321 the quotient. */
3322 if (d < 0)
3323 {
3324 insn = get_last_insn ();
3325 if (insn != last
3326 && (set = single_set (insn)) != 0
3327 && SET_DEST (set) == quotient
3328 && abs_d < ((unsigned HOST_WIDE_INT) 1
3329 << (HOST_BITS_PER_WIDE_INT - 1)))
3330 set_unique_reg_note (insn,
3331 REG_EQUAL,
3332 gen_rtx_DIV (compute_mode,
3333 op0,
3334 GEN_INT
3335 (trunc_int_for_mode
3336 (abs_d,
3337 compute_mode))));
3338
3339 quotient = expand_unop (compute_mode, neg_optab,
3340 quotient, quotient, 0);
3341 }
3342 }
3343 else if (size <= HOST_BITS_PER_WIDE_INT)
3344 {
3345 choose_multiplier (abs_d, size, size - 1,
3346 &ml, &post_shift, &lgup);
3347 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3348 {
3349 rtx t1, t2, t3;
3350
3351 if (post_shift >= BITS_PER_WORD
3352 || size - 1 >= BITS_PER_WORD)
3353 goto fail1;
3354
3355 extra_cost = (shift_cost[post_shift]
3356 + shift_cost[size - 1] + add_cost);
3357 t1 = expand_mult_highpart (compute_mode, op0, ml,
3358 NULL_RTX, 0,
3359 max_cost - extra_cost);
3360 if (t1 == 0)
3361 goto fail1;
3362 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3363 build_int_2 (post_shift, 0), NULL_RTX, 0);
3364 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3365 build_int_2 (size - 1, 0), NULL_RTX, 0);
3366 if (d < 0)
3367 quotient
3368 = force_operand (gen_rtx_MINUS (compute_mode,
3369 t3, t2),
3370 tquotient);
3371 else
3372 quotient
3373 = force_operand (gen_rtx_MINUS (compute_mode,
3374 t2, t3),
3375 tquotient);
3376 }
3377 else
3378 {
3379 rtx t1, t2, t3, t4;
3380
3381 if (post_shift >= BITS_PER_WORD
3382 || size - 1 >= BITS_PER_WORD)
3383 goto fail1;
3384
3385 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3386 extra_cost = (shift_cost[post_shift]
3387 + shift_cost[size - 1] + 2 * add_cost);
3388 t1 = expand_mult_highpart (compute_mode, op0, ml,
3389 NULL_RTX, 0,
3390 max_cost - extra_cost);
3391 if (t1 == 0)
3392 goto fail1;
3393 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3394 t1, op0),
3395 NULL_RTX);
3396 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3397 build_int_2 (post_shift, 0),
3398 NULL_RTX, 0);
3399 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3400 build_int_2 (size - 1, 0),
3401 NULL_RTX, 0);
3402 if (d < 0)
3403 quotient
3404 = force_operand (gen_rtx_MINUS (compute_mode,
3405 t4, t3),
3406 tquotient);
3407 else
3408 quotient
3409 = force_operand (gen_rtx_MINUS (compute_mode,
3410 t3, t4),
3411 tquotient);
3412 }
3413 }
3414 else /* Too wide mode to use tricky code */
3415 break;
3416
3417 insn = get_last_insn ();
3418 if (insn != last
3419 && (set = single_set (insn)) != 0
3420 && SET_DEST (set) == quotient)
3421 set_unique_reg_note (insn,
3422 REG_EQUAL,
3423 gen_rtx_DIV (compute_mode, op0, op1));
3424 }
3425 break;
3426 }
3427 fail1:
3428 delete_insns_since (last);
3429 break;
3430
3431 case FLOOR_DIV_EXPR:
3432 case FLOOR_MOD_EXPR:
3433 /* We will come here only for signed operations. */
3434 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3435 {
3436 unsigned HOST_WIDE_INT mh, ml;
3437 int pre_shift, lgup, post_shift;
3438 HOST_WIDE_INT d = INTVAL (op1);
3439
3440 if (d > 0)
3441 {
3442 /* We could just as easily deal with negative constants here,
3443 but it does not seem worth the trouble for GCC 2.6. */
3444 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3445 {
3446 pre_shift = floor_log2 (d);
3447 if (rem_flag)
3448 {
3449 remainder = expand_binop (compute_mode, and_optab, op0,
3450 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3451 remainder, 0, OPTAB_LIB_WIDEN);
3452 if (remainder)
3453 return gen_lowpart (mode, remainder);
3454 }
3455 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3456 build_int_2 (pre_shift, 0),
3457 tquotient, 0);
3458 }
3459 else
3460 {
3461 rtx t1, t2, t3, t4;
3462
3463 mh = choose_multiplier (d, size, size - 1,
3464 &ml, &post_shift, &lgup);
3465 if (mh)
3466 abort ();
3467
3468 if (post_shift < BITS_PER_WORD
3469 && size - 1 < BITS_PER_WORD)
3470 {
3471 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3472 build_int_2 (size - 1, 0),
3473 NULL_RTX, 0);
3474 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3475 NULL_RTX, 0, OPTAB_WIDEN);
3476 extra_cost = (shift_cost[post_shift]
3477 + shift_cost[size - 1] + 2 * add_cost);
3478 t3 = expand_mult_highpart (compute_mode, t2, ml,
3479 NULL_RTX, 1,
3480 max_cost - extra_cost);
3481 if (t3 != 0)
3482 {
3483 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3484 build_int_2 (post_shift, 0),
3485 NULL_RTX, 1);
3486 quotient = expand_binop (compute_mode, xor_optab,
3487 t4, t1, tquotient, 0,
3488 OPTAB_WIDEN);
3489 }
3490 }
3491 }
3492 }
3493 else
3494 {
3495 rtx nsign, t1, t2, t3, t4;
3496 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3497 op0, constm1_rtx), NULL_RTX);
3498 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3499 0, OPTAB_WIDEN);
3500 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3501 build_int_2 (size - 1, 0), NULL_RTX, 0);
3502 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3503 NULL_RTX);
3504 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3505 NULL_RTX, 0);
3506 if (t4)
3507 {
3508 rtx t5;
3509 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3510 NULL_RTX, 0);
3511 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3512 t4, t5),
3513 tquotient);
3514 }
3515 }
3516 }
3517
3518 if (quotient != 0)
3519 break;
3520 delete_insns_since (last);
3521
3522 /* Try using an instruction that produces both the quotient and
3523 remainder, using truncation. We can easily compensate the quotient
3524 or remainder to get floor rounding, once we have the remainder.
3525 Notice that we compute also the final remainder value here,
3526 and return the result right away. */
3527 if (target == 0 || GET_MODE (target) != compute_mode)
3528 target = gen_reg_rtx (compute_mode);
3529
3530 if (rem_flag)
3531 {
3532 remainder
3533 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3534 quotient = gen_reg_rtx (compute_mode);
3535 }
3536 else
3537 {
3538 quotient
3539 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3540 remainder = gen_reg_rtx (compute_mode);
3541 }
3542
3543 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3544 quotient, remainder, 0))
3545 {
3546 /* This could be computed with a branch-less sequence.
3547 Save that for later. */
3548 rtx tem;
3549 rtx label = gen_label_rtx ();
3550 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3551 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3552 NULL_RTX, 0, OPTAB_WIDEN);
3553 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3554 expand_dec (quotient, const1_rtx);
3555 expand_inc (remainder, op1);
3556 emit_label (label);
3557 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3558 }
3559
3560 /* No luck with division elimination or divmod. Have to do it
3561 by conditionally adjusting op0 *and* the result. */
3562 {
3563 rtx label1, label2, label3, label4, label5;
3564 rtx adjusted_op0;
3565 rtx tem;
3566
3567 quotient = gen_reg_rtx (compute_mode);
3568 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3569 label1 = gen_label_rtx ();
3570 label2 = gen_label_rtx ();
3571 label3 = gen_label_rtx ();
3572 label4 = gen_label_rtx ();
3573 label5 = gen_label_rtx ();
3574 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3575 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3576 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3577 quotient, 0, OPTAB_LIB_WIDEN);
3578 if (tem != quotient)
3579 emit_move_insn (quotient, tem);
3580 emit_jump_insn (gen_jump (label5));
3581 emit_barrier ();
3582 emit_label (label1);
3583 expand_inc (adjusted_op0, const1_rtx);
3584 emit_jump_insn (gen_jump (label4));
3585 emit_barrier ();
3586 emit_label (label2);
3587 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3588 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3589 quotient, 0, OPTAB_LIB_WIDEN);
3590 if (tem != quotient)
3591 emit_move_insn (quotient, tem);
3592 emit_jump_insn (gen_jump (label5));
3593 emit_barrier ();
3594 emit_label (label3);
3595 expand_dec (adjusted_op0, const1_rtx);
3596 emit_label (label4);
3597 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3598 quotient, 0, OPTAB_LIB_WIDEN);
3599 if (tem != quotient)
3600 emit_move_insn (quotient, tem);
3601 expand_dec (quotient, const1_rtx);
3602 emit_label (label5);
3603 }
3604 break;
3605
3606 case CEIL_DIV_EXPR:
3607 case CEIL_MOD_EXPR:
3608 if (unsignedp)
3609 {
3610 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3611 {
3612 rtx t1, t2, t3;
3613 unsigned HOST_WIDE_INT d = INTVAL (op1);
3614 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3615 build_int_2 (floor_log2 (d), 0),
3616 tquotient, 1);
3617 t2 = expand_binop (compute_mode, and_optab, op0,
3618 GEN_INT (d - 1),
3619 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3620 t3 = gen_reg_rtx (compute_mode);
3621 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3622 compute_mode, 1, 1);
3623 if (t3 == 0)
3624 {
3625 rtx lab;
3626 lab = gen_label_rtx ();
3627 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3628 expand_inc (t1, const1_rtx);
3629 emit_label (lab);
3630 quotient = t1;
3631 }
3632 else
3633 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3634 t1, t3),
3635 tquotient);
3636 break;
3637 }
3638
3639 /* Try using an instruction that produces both the quotient and
3640 remainder, using truncation. We can easily compensate the
3641 quotient or remainder to get ceiling rounding, once we have the
3642 remainder. Notice that we compute also the final remainder
3643 value here, and return the result right away. */
3644 if (target == 0 || GET_MODE (target) != compute_mode)
3645 target = gen_reg_rtx (compute_mode);
3646
3647 if (rem_flag)
3648 {
3649 remainder = (GET_CODE (target) == REG
3650 ? target : gen_reg_rtx (compute_mode));
3651 quotient = gen_reg_rtx (compute_mode);
3652 }
3653 else
3654 {
3655 quotient = (GET_CODE (target) == REG
3656 ? target : gen_reg_rtx (compute_mode));
3657 remainder = gen_reg_rtx (compute_mode);
3658 }
3659
3660 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3661 remainder, 1))
3662 {
3663 /* This could be computed with a branch-less sequence.
3664 Save that for later. */
3665 rtx label = gen_label_rtx ();
3666 do_cmp_and_jump (remainder, const0_rtx, EQ,
3667 compute_mode, label);
3668 expand_inc (quotient, const1_rtx);
3669 expand_dec (remainder, op1);
3670 emit_label (label);
3671 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3672 }
3673
3674 /* No luck with division elimination or divmod. Have to do it
3675 by conditionally adjusting op0 *and* the result. */
3676 {
3677 rtx label1, label2;
3678 rtx adjusted_op0, tem;
3679
3680 quotient = gen_reg_rtx (compute_mode);
3681 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3682 label1 = gen_label_rtx ();
3683 label2 = gen_label_rtx ();
3684 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3685 compute_mode, label1);
3686 emit_move_insn (quotient, const0_rtx);
3687 emit_jump_insn (gen_jump (label2));
3688 emit_barrier ();
3689 emit_label (label1);
3690 expand_dec (adjusted_op0, const1_rtx);
3691 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3692 quotient, 1, OPTAB_LIB_WIDEN);
3693 if (tem != quotient)
3694 emit_move_insn (quotient, tem);
3695 expand_inc (quotient, const1_rtx);
3696 emit_label (label2);
3697 }
3698 }
3699 else /* signed */
3700 {
3701 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3702 && INTVAL (op1) >= 0)
3703 {
3704 /* This is extremely similar to the code for the unsigned case
3705 above. For 2.7 we should merge these variants, but for
3706 2.6.1 I don't want to touch the code for unsigned since that
3707 get used in C. The signed case will only be used by other
3708 languages (Ada). */
3709
3710 rtx t1, t2, t3;
3711 unsigned HOST_WIDE_INT d = INTVAL (op1);
3712 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3713 build_int_2 (floor_log2 (d), 0),
3714 tquotient, 0);
3715 t2 = expand_binop (compute_mode, and_optab, op0,
3716 GEN_INT (d - 1),
3717 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3718 t3 = gen_reg_rtx (compute_mode);
3719 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3720 compute_mode, 1, 1);
3721 if (t3 == 0)
3722 {
3723 rtx lab;
3724 lab = gen_label_rtx ();
3725 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3726 expand_inc (t1, const1_rtx);
3727 emit_label (lab);
3728 quotient = t1;
3729 }
3730 else
3731 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3732 t1, t3),
3733 tquotient);
3734 break;
3735 }
3736
3737 /* Try using an instruction that produces both the quotient and
3738 remainder, using truncation. We can easily compensate the
3739 quotient or remainder to get ceiling rounding, once we have the
3740 remainder. Notice that we compute also the final remainder
3741 value here, and return the result right away. */
3742 if (target == 0 || GET_MODE (target) != compute_mode)
3743 target = gen_reg_rtx (compute_mode);
3744 if (rem_flag)
3745 {
3746 remainder= (GET_CODE (target) == REG
3747 ? target : gen_reg_rtx (compute_mode));
3748 quotient = gen_reg_rtx (compute_mode);
3749 }
3750 else
3751 {
3752 quotient = (GET_CODE (target) == REG
3753 ? target : gen_reg_rtx (compute_mode));
3754 remainder = gen_reg_rtx (compute_mode);
3755 }
3756
3757 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3758 remainder, 0))
3759 {
3760 /* This could be computed with a branch-less sequence.
3761 Save that for later. */
3762 rtx tem;
3763 rtx label = gen_label_rtx ();
3764 do_cmp_and_jump (remainder, const0_rtx, EQ,
3765 compute_mode, label);
3766 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3767 NULL_RTX, 0, OPTAB_WIDEN);
3768 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3769 expand_inc (quotient, const1_rtx);
3770 expand_dec (remainder, op1);
3771 emit_label (label);
3772 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3773 }
3774
3775 /* No luck with division elimination or divmod. Have to do it
3776 by conditionally adjusting op0 *and* the result. */
3777 {
3778 rtx label1, label2, label3, label4, label5;
3779 rtx adjusted_op0;
3780 rtx tem;
3781
3782 quotient = gen_reg_rtx (compute_mode);
3783 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3784 label1 = gen_label_rtx ();
3785 label2 = gen_label_rtx ();
3786 label3 = gen_label_rtx ();
3787 label4 = gen_label_rtx ();
3788 label5 = gen_label_rtx ();
3789 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3790 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3791 compute_mode, label1);
3792 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3793 quotient, 0, OPTAB_LIB_WIDEN);
3794 if (tem != quotient)
3795 emit_move_insn (quotient, tem);
3796 emit_jump_insn (gen_jump (label5));
3797 emit_barrier ();
3798 emit_label (label1);
3799 expand_dec (adjusted_op0, const1_rtx);
3800 emit_jump_insn (gen_jump (label4));
3801 emit_barrier ();
3802 emit_label (label2);
3803 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3804 compute_mode, label3);
3805 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3806 quotient, 0, OPTAB_LIB_WIDEN);
3807 if (tem != quotient)
3808 emit_move_insn (quotient, tem);
3809 emit_jump_insn (gen_jump (label5));
3810 emit_barrier ();
3811 emit_label (label3);
3812 expand_inc (adjusted_op0, const1_rtx);
3813 emit_label (label4);
3814 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3815 quotient, 0, OPTAB_LIB_WIDEN);
3816 if (tem != quotient)
3817 emit_move_insn (quotient, tem);
3818 expand_inc (quotient, const1_rtx);
3819 emit_label (label5);
3820 }
3821 }
3822 break;
3823
3824 case EXACT_DIV_EXPR:
3825 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3826 {
3827 HOST_WIDE_INT d = INTVAL (op1);
3828 unsigned HOST_WIDE_INT ml;
3829 int pre_shift;
3830 rtx t1;
3831
3832 pre_shift = floor_log2 (d & -d);
3833 ml = invert_mod2n (d >> pre_shift, size);
3834 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3835 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3836 quotient = expand_mult (compute_mode, t1,
3837 GEN_INT (trunc_int_for_mode
3838 (ml, compute_mode)),
3839 NULL_RTX, 0);
3840
3841 insn = get_last_insn ();
3842 set_unique_reg_note (insn,
3843 REG_EQUAL,
3844 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3845 compute_mode,
3846 op0, op1));
3847 }
3848 break;
3849
3850 case ROUND_DIV_EXPR:
3851 case ROUND_MOD_EXPR:
3852 if (unsignedp)
3853 {
3854 rtx tem;
3855 rtx label;
3856 label = gen_label_rtx ();
3857 quotient = gen_reg_rtx (compute_mode);
3858 remainder = gen_reg_rtx (compute_mode);
3859 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3860 {
3861 rtx tem;
3862 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3863 quotient, 1, OPTAB_LIB_WIDEN);
3864 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3865 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3866 remainder, 1, OPTAB_LIB_WIDEN);
3867 }
3868 tem = plus_constant (op1, -1);
3869 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3870 build_int_2 (1, 0), NULL_RTX, 1);
3871 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3872 expand_inc (quotient, const1_rtx);
3873 expand_dec (remainder, op1);
3874 emit_label (label);
3875 }
3876 else
3877 {
3878 rtx abs_rem, abs_op1, tem, mask;
3879 rtx label;
3880 label = gen_label_rtx ();
3881 quotient = gen_reg_rtx (compute_mode);
3882 remainder = gen_reg_rtx (compute_mode);
3883 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3884 {
3885 rtx tem;
3886 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3887 quotient, 0, OPTAB_LIB_WIDEN);
3888 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3889 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3890 remainder, 0, OPTAB_LIB_WIDEN);
3891 }
3892 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3893 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3894 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3895 build_int_2 (1, 0), NULL_RTX, 1);
3896 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3897 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3898 NULL_RTX, 0, OPTAB_WIDEN);
3899 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3900 build_int_2 (size - 1, 0), NULL_RTX, 0);
3901 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3902 NULL_RTX, 0, OPTAB_WIDEN);
3903 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3904 NULL_RTX, 0, OPTAB_WIDEN);
3905 expand_inc (quotient, tem);
3906 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3907 NULL_RTX, 0, OPTAB_WIDEN);
3908 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3909 NULL_RTX, 0, OPTAB_WIDEN);
3910 expand_dec (remainder, tem);
3911 emit_label (label);
3912 }
3913 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3914
3915 default:
3916 abort ();
3917 }
3918
3919 if (quotient == 0)
3920 {
3921 if (target && GET_MODE (target) != compute_mode)
3922 target = 0;
3923
3924 if (rem_flag)
3925 {
3926 /* Try to produce the remainder without producing the quotient.
3927 If we seem to have a divmod pattern that does not require widening,
3928 don't try widening here. We should really have an WIDEN argument
3929 to expand_twoval_binop, since what we'd really like to do here is
3930 1) try a mod insn in compute_mode
3931 2) try a divmod insn in compute_mode
3932 3) try a div insn in compute_mode and multiply-subtract to get
3933 remainder
3934 4) try the same things with widening allowed. */
3935 remainder
3936 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3937 op0, op1, target,
3938 unsignedp,
3939 ((optab2->handlers[(int) compute_mode].insn_code
3940 != CODE_FOR_nothing)
3941 ? OPTAB_DIRECT : OPTAB_WIDEN));
3942 if (remainder == 0)
3943 {
3944 /* No luck there. Can we do remainder and divide at once
3945 without a library call? */
3946 remainder = gen_reg_rtx (compute_mode);
3947 if (! expand_twoval_binop ((unsignedp
3948 ? udivmod_optab
3949 : sdivmod_optab),
3950 op0, op1,
3951 NULL_RTX, remainder, unsignedp))
3952 remainder = 0;
3953 }
3954
3955 if (remainder)
3956 return gen_lowpart (mode, remainder);
3957 }
3958
3959 /* Produce the quotient. Try a quotient insn, but not a library call.
3960 If we have a divmod in this mode, use it in preference to widening
3961 the div (for this test we assume it will not fail). Note that optab2
3962 is set to the one of the two optabs that the call below will use. */
3963 quotient
3964 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3965 op0, op1, rem_flag ? NULL_RTX : target,
3966 unsignedp,
3967 ((optab2->handlers[(int) compute_mode].insn_code
3968 != CODE_FOR_nothing)
3969 ? OPTAB_DIRECT : OPTAB_WIDEN));
3970
3971 if (quotient == 0)
3972 {
3973 /* No luck there. Try a quotient-and-remainder insn,
3974 keeping the quotient alone. */
3975 quotient = gen_reg_rtx (compute_mode);
3976 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3977 op0, op1,
3978 quotient, NULL_RTX, unsignedp))
3979 {
3980 quotient = 0;
3981 if (! rem_flag)
3982 /* Still no luck. If we are not computing the remainder,
3983 use a library call for the quotient. */
3984 quotient = sign_expand_binop (compute_mode,
3985 udiv_optab, sdiv_optab,
3986 op0, op1, target,
3987 unsignedp, OPTAB_LIB_WIDEN);
3988 }
3989 }
3990 }
3991
3992 if (rem_flag)
3993 {
3994 if (target && GET_MODE (target) != compute_mode)
3995 target = 0;
3996
3997 if (quotient == 0)
3998 /* No divide instruction either. Use library for remainder. */
3999 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4000 op0, op1, target,
4001 unsignedp, OPTAB_LIB_WIDEN);
4002 else
4003 {
4004 /* We divided. Now finish doing X - Y * (X / Y). */
4005 remainder = expand_mult (compute_mode, quotient, op1,
4006 NULL_RTX, unsignedp);
4007 remainder = expand_binop (compute_mode, sub_optab, op0,
4008 remainder, target, unsignedp,
4009 OPTAB_LIB_WIDEN);
4010 }
4011 }
4012
4013 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4014 }
4015 \f
4016 /* Return a tree node with data type TYPE, describing the value of X.
4017 Usually this is an RTL_EXPR, if there is no obvious better choice.
4018 X may be an expression, however we only support those expressions
4019 generated by loop.c. */
4020
4021 tree
4022 make_tree (type, x)
4023 tree type;
4024 rtx x;
4025 {
4026 tree t;
4027
4028 switch (GET_CODE (x))
4029 {
4030 case CONST_INT:
4031 t = build_int_2 (INTVAL (x),
4032 (TREE_UNSIGNED (type)
4033 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4034 || INTVAL (x) >= 0 ? 0 : -1);
4035 TREE_TYPE (t) = type;
4036 return t;
4037
4038 case CONST_DOUBLE:
4039 if (GET_MODE (x) == VOIDmode)
4040 {
4041 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4042 TREE_TYPE (t) = type;
4043 }
4044 else
4045 {
4046 REAL_VALUE_TYPE d;
4047
4048 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4049 t = build_real (type, d);
4050 }
4051
4052 return t;
4053
4054 case PLUS:
4055 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4056 make_tree (type, XEXP (x, 1))));
4057
4058 case MINUS:
4059 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4060 make_tree (type, XEXP (x, 1))));
4061
4062 case NEG:
4063 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4064
4065 case MULT:
4066 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4067 make_tree (type, XEXP (x, 1))));
4068
4069 case ASHIFT:
4070 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4071 make_tree (type, XEXP (x, 1))));
4072
4073 case LSHIFTRT:
4074 return fold (convert (type,
4075 build (RSHIFT_EXPR, unsigned_type (type),
4076 make_tree (unsigned_type (type),
4077 XEXP (x, 0)),
4078 make_tree (type, XEXP (x, 1)))));
4079
4080 case ASHIFTRT:
4081 return fold (convert (type,
4082 build (RSHIFT_EXPR, signed_type (type),
4083 make_tree (signed_type (type), XEXP (x, 0)),
4084 make_tree (type, XEXP (x, 1)))));
4085
4086 case DIV:
4087 if (TREE_CODE (type) != REAL_TYPE)
4088 t = signed_type (type);
4089 else
4090 t = type;
4091
4092 return fold (convert (type,
4093 build (TRUNC_DIV_EXPR, t,
4094 make_tree (t, XEXP (x, 0)),
4095 make_tree (t, XEXP (x, 1)))));
4096 case UDIV:
4097 t = unsigned_type (type);
4098 return fold (convert (type,
4099 build (TRUNC_DIV_EXPR, t,
4100 make_tree (t, XEXP (x, 0)),
4101 make_tree (t, XEXP (x, 1)))));
4102 default:
4103 t = make_node (RTL_EXPR);
4104 TREE_TYPE (t) = type;
4105
4106 #ifdef POINTERS_EXTEND_UNSIGNED
4107 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4108 ptr_mode. So convert. */
4109 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4110 x = convert_memory_address (TYPE_MODE (type), x);
4111 #endif
4112
4113 RTL_EXPR_RTL (t) = x;
4114 /* There are no insns to be output
4115 when this rtl_expr is used. */
4116 RTL_EXPR_SEQUENCE (t) = 0;
4117 return t;
4118 }
4119 }
4120
4121 /* Return an rtx representing the value of X * MULT + ADD.
4122 TARGET is a suggestion for where to store the result (an rtx).
4123 MODE is the machine mode for the computation.
4124 X and MULT must have mode MODE. ADD may have a different mode.
4125 So can X (defaults to same as MODE).
4126 UNSIGNEDP is non-zero to do unsigned multiplication.
4127 This may emit insns. */
4128
4129 rtx
4130 expand_mult_add (x, target, mult, add, mode, unsignedp)
4131 rtx x, target, mult, add;
4132 enum machine_mode mode;
4133 int unsignedp;
4134 {
4135 tree type = type_for_mode (mode, unsignedp);
4136 tree add_type = (GET_MODE (add) == VOIDmode
4137 ? type : type_for_mode (GET_MODE (add), unsignedp));
4138 tree result = fold (build (PLUS_EXPR, type,
4139 fold (build (MULT_EXPR, type,
4140 make_tree (type, x),
4141 make_tree (type, mult))),
4142 make_tree (add_type, add)));
4143
4144 return expand_expr (result, target, VOIDmode, 0);
4145 }
4146 \f
4147 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4148 and returning TARGET.
4149
4150 If TARGET is 0, a pseudo-register or constant is returned. */
4151
4152 rtx
4153 expand_and (op0, op1, target)
4154 rtx op0, op1, target;
4155 {
4156 enum machine_mode mode = VOIDmode;
4157 rtx tem;
4158
4159 if (GET_MODE (op0) != VOIDmode)
4160 mode = GET_MODE (op0);
4161 else if (GET_MODE (op1) != VOIDmode)
4162 mode = GET_MODE (op1);
4163
4164 if (mode != VOIDmode)
4165 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4166 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4167 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4168 else
4169 abort ();
4170
4171 if (target == 0)
4172 target = tem;
4173 else if (tem != target)
4174 emit_move_insn (target, tem);
4175 return target;
4176 }
4177 \f
4178 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4179 and storing in TARGET. Normally return TARGET.
4180 Return 0 if that cannot be done.
4181
4182 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4183 it is VOIDmode, they cannot both be CONST_INT.
4184
4185 UNSIGNEDP is for the case where we have to widen the operands
4186 to perform the operation. It says to use zero-extension.
4187
4188 NORMALIZEP is 1 if we should convert the result to be either zero
4189 or one. Normalize is -1 if we should convert the result to be
4190 either zero or -1. If NORMALIZEP is zero, the result will be left
4191 "raw" out of the scc insn. */
4192
4193 rtx
4194 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4195 rtx target;
4196 enum rtx_code code;
4197 rtx op0, op1;
4198 enum machine_mode mode;
4199 int unsignedp;
4200 int normalizep;
4201 {
4202 rtx subtarget;
4203 enum insn_code icode;
4204 enum machine_mode compare_mode;
4205 enum machine_mode target_mode = GET_MODE (target);
4206 rtx tem;
4207 rtx last = get_last_insn ();
4208 rtx pattern, comparison;
4209
4210 if (unsignedp)
4211 code = unsigned_condition (code);
4212
4213 /* If one operand is constant, make it the second one. Only do this
4214 if the other operand is not constant as well. */
4215
4216 if (swap_commutative_operands_p (op0, op1))
4217 {
4218 tem = op0;
4219 op0 = op1;
4220 op1 = tem;
4221 code = swap_condition (code);
4222 }
4223
4224 if (mode == VOIDmode)
4225 mode = GET_MODE (op0);
4226
4227 /* For some comparisons with 1 and -1, we can convert this to
4228 comparisons with zero. This will often produce more opportunities for
4229 store-flag insns. */
4230
4231 switch (code)
4232 {
4233 case LT:
4234 if (op1 == const1_rtx)
4235 op1 = const0_rtx, code = LE;
4236 break;
4237 case LE:
4238 if (op1 == constm1_rtx)
4239 op1 = const0_rtx, code = LT;
4240 break;
4241 case GE:
4242 if (op1 == const1_rtx)
4243 op1 = const0_rtx, code = GT;
4244 break;
4245 case GT:
4246 if (op1 == constm1_rtx)
4247 op1 = const0_rtx, code = GE;
4248 break;
4249 case GEU:
4250 if (op1 == const1_rtx)
4251 op1 = const0_rtx, code = NE;
4252 break;
4253 case LTU:
4254 if (op1 == const1_rtx)
4255 op1 = const0_rtx, code = EQ;
4256 break;
4257 default:
4258 break;
4259 }
4260
4261 /* If we are comparing a double-word integer with zero, we can convert
4262 the comparison into one involving a single word. */
4263 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4264 && GET_MODE_CLASS (mode) == MODE_INT
4265 && op1 == const0_rtx)
4266 {
4267 if (code == EQ || code == NE)
4268 {
4269 /* Do a logical OR of the two words and compare the result. */
4270 rtx op0h = gen_highpart (word_mode, op0);
4271 rtx op0l = gen_lowpart (word_mode, op0);
4272 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4273 NULL_RTX, unsignedp, OPTAB_DIRECT);
4274 if (op0both != 0)
4275 return emit_store_flag (target, code, op0both, op1, word_mode,
4276 unsignedp, normalizep);
4277 }
4278 else if (code == LT || code == GE)
4279 /* If testing the sign bit, can just test on high word. */
4280 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4281 op1, word_mode, unsignedp, normalizep);
4282 }
4283
4284 /* From now on, we won't change CODE, so set ICODE now. */
4285 icode = setcc_gen_code[(int) code];
4286
4287 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4288 complement of A (for GE) and shifting the sign bit to the low bit. */
4289 if (op1 == const0_rtx && (code == LT || code == GE)
4290 && GET_MODE_CLASS (mode) == MODE_INT
4291 && (normalizep || STORE_FLAG_VALUE == 1
4292 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4293 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4294 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4295 {
4296 subtarget = target;
4297
4298 /* If the result is to be wider than OP0, it is best to convert it
4299 first. If it is to be narrower, it is *incorrect* to convert it
4300 first. */
4301 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4302 {
4303 op0 = protect_from_queue (op0, 0);
4304 op0 = convert_modes (target_mode, mode, op0, 0);
4305 mode = target_mode;
4306 }
4307
4308 if (target_mode != mode)
4309 subtarget = 0;
4310
4311 if (code == GE)
4312 op0 = expand_unop (mode, one_cmpl_optab, op0,
4313 ((STORE_FLAG_VALUE == 1 || normalizep)
4314 ? 0 : subtarget), 0);
4315
4316 if (STORE_FLAG_VALUE == 1 || normalizep)
4317 /* If we are supposed to produce a 0/1 value, we want to do
4318 a logical shift from the sign bit to the low-order bit; for
4319 a -1/0 value, we do an arithmetic shift. */
4320 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4321 size_int (GET_MODE_BITSIZE (mode) - 1),
4322 subtarget, normalizep != -1);
4323
4324 if (mode != target_mode)
4325 op0 = convert_modes (target_mode, mode, op0, 0);
4326
4327 return op0;
4328 }
4329
4330 if (icode != CODE_FOR_nothing)
4331 {
4332 insn_operand_predicate_fn pred;
4333
4334 /* We think we may be able to do this with a scc insn. Emit the
4335 comparison and then the scc insn.
4336
4337 compare_from_rtx may call emit_queue, which would be deleted below
4338 if the scc insn fails. So call it ourselves before setting LAST.
4339 Likewise for do_pending_stack_adjust. */
4340
4341 emit_queue ();
4342 do_pending_stack_adjust ();
4343 last = get_last_insn ();
4344
4345 comparison
4346 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4347 if (GET_CODE (comparison) == CONST_INT)
4348 return (comparison == const0_rtx ? const0_rtx
4349 : normalizep == 1 ? const1_rtx
4350 : normalizep == -1 ? constm1_rtx
4351 : const_true_rtx);
4352
4353 /* The code of COMPARISON may not match CODE if compare_from_rtx
4354 decided to swap its operands and reverse the original code.
4355
4356 We know that compare_from_rtx returns either a CONST_INT or
4357 a new comparison code, so it is safe to just extract the
4358 code from COMPARISON. */
4359 code = GET_CODE (comparison);
4360
4361 /* Get a reference to the target in the proper mode for this insn. */
4362 compare_mode = insn_data[(int) icode].operand[0].mode;
4363 subtarget = target;
4364 pred = insn_data[(int) icode].operand[0].predicate;
4365 if (preserve_subexpressions_p ()
4366 || ! (*pred) (subtarget, compare_mode))
4367 subtarget = gen_reg_rtx (compare_mode);
4368
4369 pattern = GEN_FCN (icode) (subtarget);
4370 if (pattern)
4371 {
4372 emit_insn (pattern);
4373
4374 /* If we are converting to a wider mode, first convert to
4375 TARGET_MODE, then normalize. This produces better combining
4376 opportunities on machines that have a SIGN_EXTRACT when we are
4377 testing a single bit. This mostly benefits the 68k.
4378
4379 If STORE_FLAG_VALUE does not have the sign bit set when
4380 interpreted in COMPARE_MODE, we can do this conversion as
4381 unsigned, which is usually more efficient. */
4382 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4383 {
4384 convert_move (target, subtarget,
4385 (GET_MODE_BITSIZE (compare_mode)
4386 <= HOST_BITS_PER_WIDE_INT)
4387 && 0 == (STORE_FLAG_VALUE
4388 & ((HOST_WIDE_INT) 1
4389 << (GET_MODE_BITSIZE (compare_mode) -1))));
4390 op0 = target;
4391 compare_mode = target_mode;
4392 }
4393 else
4394 op0 = subtarget;
4395
4396 /* If we want to keep subexpressions around, don't reuse our
4397 last target. */
4398
4399 if (preserve_subexpressions_p ())
4400 subtarget = 0;
4401
4402 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4403 we don't have to do anything. */
4404 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4405 ;
4406 /* STORE_FLAG_VALUE might be the most negative number, so write
4407 the comparison this way to avoid a compiler-time warning. */
4408 else if (- normalizep == STORE_FLAG_VALUE)
4409 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4410
4411 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4412 makes it hard to use a value of just the sign bit due to
4413 ANSI integer constant typing rules. */
4414 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4415 && (STORE_FLAG_VALUE
4416 & ((HOST_WIDE_INT) 1
4417 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4418 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4419 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4420 subtarget, normalizep == 1);
4421 else if (STORE_FLAG_VALUE & 1)
4422 {
4423 op0 = expand_and (op0, const1_rtx, subtarget);
4424 if (normalizep == -1)
4425 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4426 }
4427 else
4428 abort ();
4429
4430 /* If we were converting to a smaller mode, do the
4431 conversion now. */
4432 if (target_mode != compare_mode)
4433 {
4434 convert_move (target, op0, 0);
4435 return target;
4436 }
4437 else
4438 return op0;
4439 }
4440 }
4441
4442 delete_insns_since (last);
4443
4444 /* If expensive optimizations, use different pseudo registers for each
4445 insn, instead of reusing the same pseudo. This leads to better CSE,
4446 but slows down the compiler, since there are more pseudos */
4447 subtarget = (!flag_expensive_optimizations
4448 && (target_mode == mode)) ? target : NULL_RTX;
4449
4450 /* If we reached here, we can't do this with a scc insn. However, there
4451 are some comparisons that can be done directly. For example, if
4452 this is an equality comparison of integers, we can try to exclusive-or
4453 (or subtract) the two operands and use a recursive call to try the
4454 comparison with zero. Don't do any of these cases if branches are
4455 very cheap. */
4456
4457 if (BRANCH_COST > 0
4458 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4459 && op1 != const0_rtx)
4460 {
4461 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4462 OPTAB_WIDEN);
4463
4464 if (tem == 0)
4465 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4466 OPTAB_WIDEN);
4467 if (tem != 0)
4468 tem = emit_store_flag (target, code, tem, const0_rtx,
4469 mode, unsignedp, normalizep);
4470 if (tem == 0)
4471 delete_insns_since (last);
4472 return tem;
4473 }
4474
4475 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4476 the constant zero. Reject all other comparisons at this point. Only
4477 do LE and GT if branches are expensive since they are expensive on
4478 2-operand machines. */
4479
4480 if (BRANCH_COST == 0
4481 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4482 || (code != EQ && code != NE
4483 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4484 return 0;
4485
4486 /* See what we need to return. We can only return a 1, -1, or the
4487 sign bit. */
4488
4489 if (normalizep == 0)
4490 {
4491 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4492 normalizep = STORE_FLAG_VALUE;
4493
4494 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4495 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4496 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4497 ;
4498 else
4499 return 0;
4500 }
4501
4502 /* Try to put the result of the comparison in the sign bit. Assume we can't
4503 do the necessary operation below. */
4504
4505 tem = 0;
4506
4507 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4508 the sign bit set. */
4509
4510 if (code == LE)
4511 {
4512 /* This is destructive, so SUBTARGET can't be OP0. */
4513 if (rtx_equal_p (subtarget, op0))
4514 subtarget = 0;
4515
4516 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4517 OPTAB_WIDEN);
4518 if (tem)
4519 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4520 OPTAB_WIDEN);
4521 }
4522
4523 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4524 number of bits in the mode of OP0, minus one. */
4525
4526 if (code == GT)
4527 {
4528 if (rtx_equal_p (subtarget, op0))
4529 subtarget = 0;
4530
4531 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4532 size_int (GET_MODE_BITSIZE (mode) - 1),
4533 subtarget, 0);
4534 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4535 OPTAB_WIDEN);
4536 }
4537
4538 if (code == EQ || code == NE)
4539 {
4540 /* For EQ or NE, one way to do the comparison is to apply an operation
4541 that converts the operand into a positive number if it is non-zero
4542 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4543 for NE we negate. This puts the result in the sign bit. Then we
4544 normalize with a shift, if needed.
4545
4546 Two operations that can do the above actions are ABS and FFS, so try
4547 them. If that doesn't work, and MODE is smaller than a full word,
4548 we can use zero-extension to the wider mode (an unsigned conversion)
4549 as the operation. */
4550
4551 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4552 that is compensated by the subsequent overflow when subtracting
4553 one / negating. */
4554
4555 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4556 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4557 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4558 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4559 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4560 {
4561 op0 = protect_from_queue (op0, 0);
4562 tem = convert_modes (word_mode, mode, op0, 1);
4563 mode = word_mode;
4564 }
4565
4566 if (tem != 0)
4567 {
4568 if (code == EQ)
4569 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4570 0, OPTAB_WIDEN);
4571 else
4572 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4573 }
4574
4575 /* If we couldn't do it that way, for NE we can "or" the two's complement
4576 of the value with itself. For EQ, we take the one's complement of
4577 that "or", which is an extra insn, so we only handle EQ if branches
4578 are expensive. */
4579
4580 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4581 {
4582 if (rtx_equal_p (subtarget, op0))
4583 subtarget = 0;
4584
4585 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4586 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4587 OPTAB_WIDEN);
4588
4589 if (tem && code == EQ)
4590 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4591 }
4592 }
4593
4594 if (tem && normalizep)
4595 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4596 size_int (GET_MODE_BITSIZE (mode) - 1),
4597 subtarget, normalizep == 1);
4598
4599 if (tem)
4600 {
4601 if (GET_MODE (tem) != target_mode)
4602 {
4603 convert_move (target, tem, 0);
4604 tem = target;
4605 }
4606 else if (!subtarget)
4607 {
4608 emit_move_insn (target, tem);
4609 tem = target;
4610 }
4611 }
4612 else
4613 delete_insns_since (last);
4614
4615 return tem;
4616 }
4617
4618 /* Like emit_store_flag, but always succeeds. */
4619
4620 rtx
4621 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4622 rtx target;
4623 enum rtx_code code;
4624 rtx op0, op1;
4625 enum machine_mode mode;
4626 int unsignedp;
4627 int normalizep;
4628 {
4629 rtx tem, label;
4630
4631 /* First see if emit_store_flag can do the job. */
4632 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4633 if (tem != 0)
4634 return tem;
4635
4636 if (normalizep == 0)
4637 normalizep = 1;
4638
4639 /* If this failed, we have to do this with set/compare/jump/set code. */
4640
4641 if (GET_CODE (target) != REG
4642 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4643 target = gen_reg_rtx (GET_MODE (target));
4644
4645 emit_move_insn (target, const1_rtx);
4646 label = gen_label_rtx ();
4647 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4648 NULL_RTX, label);
4649
4650 emit_move_insn (target, const0_rtx);
4651 emit_label (label);
4652
4653 return target;
4654 }
4655 \f
4656 /* Perform possibly multi-word comparison and conditional jump to LABEL
4657 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4658
4659 The algorithm is based on the code in expr.c:do_jump.
4660
4661 Note that this does not perform a general comparison. Only variants
4662 generated within expmed.c are correctly handled, others abort (but could
4663 be handled if needed). */
4664
4665 static void
4666 do_cmp_and_jump (arg1, arg2, op, mode, label)
4667 rtx arg1, arg2, label;
4668 enum rtx_code op;
4669 enum machine_mode mode;
4670 {
4671 /* If this mode is an integer too wide to compare properly,
4672 compare word by word. Rely on cse to optimize constant cases. */
4673
4674 if (GET_MODE_CLASS (mode) == MODE_INT
4675 && ! can_compare_p (op, mode, ccp_jump))
4676 {
4677 rtx label2 = gen_label_rtx ();
4678
4679 switch (op)
4680 {
4681 case LTU:
4682 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4683 break;
4684
4685 case LEU:
4686 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4687 break;
4688
4689 case LT:
4690 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4691 break;
4692
4693 case GT:
4694 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4695 break;
4696
4697 case GE:
4698 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4699 break;
4700
4701 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4702 that's the only equality operations we do */
4703 case EQ:
4704 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4705 abort ();
4706 do_jump_by_parts_equality_rtx (arg1, label2, label);
4707 break;
4708
4709 case NE:
4710 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4711 abort ();
4712 do_jump_by_parts_equality_rtx (arg1, label, label2);
4713 break;
4714
4715 default:
4716 abort ();
4717 }
4718
4719 emit_label (label2);
4720 }
4721 else
4722 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);
4723 }