]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/i386/i386.c
target.def (legitimate_constant_p): New hook.
[thirdparty/gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "gimple.h"
51 #include "dwarf2.h"
52 #include "df.h"
53 #include "tm-constrs.h"
54 #include "params.h"
55 #include "cselib.h"
56 #include "debug.h"
57 #include "dwarf2out.h"
58 #include "sched-int.h"
59 #include "sbitmap.h"
60 #include "fibheap.h"
61 #include "opts.h"
62
63 enum upper_128bits_state
64 {
65 unknown = 0,
66 unused,
67 used
68 };
69
70 typedef struct block_info_def
71 {
72 /* State of the upper 128bits of AVX registers at exit. */
73 enum upper_128bits_state state;
74 /* TRUE if state of the upper 128bits of AVX registers is unchanged
75 in this block. */
76 bool unchanged;
77 /* TRUE if block has been processed. */
78 bool processed;
79 /* TRUE if block has been scanned. */
80 bool scanned;
81 /* Previous state of the upper 128bits of AVX registers at entry. */
82 enum upper_128bits_state prev;
83 } *block_info;
84
85 #define BLOCK_INFO(B) ((block_info) (B)->aux)
86
87 enum call_avx256_state
88 {
89 /* Callee returns 256bit AVX register. */
90 callee_return_avx256 = -1,
91 /* Callee returns and passes 256bit AVX register. */
92 callee_return_pass_avx256,
93 /* Callee passes 256bit AVX register. */
94 callee_pass_avx256,
95 /* Callee doesn't return nor passe 256bit AVX register, or no
96 256bit AVX register in function return. */
97 call_no_avx256,
98 /* vzeroupper intrinsic. */
99 vzeroupper_intrinsic
100 };
101
102 /* Check if a 256bit AVX register is referenced in stores. */
103
104 static void
105 check_avx256_stores (rtx dest, const_rtx set, void *data)
106 {
107 if ((REG_P (dest)
108 && VALID_AVX256_REG_MODE (GET_MODE (dest)))
109 || (GET_CODE (set) == SET
110 && REG_P (SET_SRC (set))
111 && VALID_AVX256_REG_MODE (GET_MODE (SET_SRC (set)))))
112 {
113 enum upper_128bits_state *state
114 = (enum upper_128bits_state *) data;
115 *state = used;
116 }
117 }
118
119 /* Helper function for move_or_delete_vzeroupper_1. Look for vzeroupper
120 in basic block BB. Delete it if upper 128bit AVX registers are
121 unused. If it isn't deleted, move it to just before a jump insn.
122
123 STATE is state of the upper 128bits of AVX registers at entry. */
124
125 static void
126 move_or_delete_vzeroupper_2 (basic_block bb,
127 enum upper_128bits_state state)
128 {
129 rtx insn, bb_end;
130 rtx vzeroupper_insn = NULL_RTX;
131 rtx pat;
132 int avx256;
133 bool unchanged;
134
135 if (BLOCK_INFO (bb)->unchanged)
136 {
137 if (dump_file)
138 fprintf (dump_file, " [bb %i] unchanged: upper 128bits: %d\n",
139 bb->index, state);
140
141 BLOCK_INFO (bb)->state = state;
142 return;
143 }
144
145 if (BLOCK_INFO (bb)->scanned && BLOCK_INFO (bb)->prev == state)
146 {
147 if (dump_file)
148 fprintf (dump_file, " [bb %i] scanned: upper 128bits: %d\n",
149 bb->index, BLOCK_INFO (bb)->state);
150 return;
151 }
152
153 BLOCK_INFO (bb)->prev = state;
154
155 if (dump_file)
156 fprintf (dump_file, " [bb %i] entry: upper 128bits: %d\n",
157 bb->index, state);
158
159 unchanged = true;
160
161 /* BB_END changes when it is deleted. */
162 bb_end = BB_END (bb);
163 insn = BB_HEAD (bb);
164 while (insn != bb_end)
165 {
166 insn = NEXT_INSN (insn);
167
168 if (!NONDEBUG_INSN_P (insn))
169 continue;
170
171 /* Move vzeroupper before jump/call. */
172 if (JUMP_P (insn) || CALL_P (insn))
173 {
174 if (!vzeroupper_insn)
175 continue;
176
177 if (PREV_INSN (insn) != vzeroupper_insn)
178 {
179 if (dump_file)
180 {
181 fprintf (dump_file, "Move vzeroupper after:\n");
182 print_rtl_single (dump_file, PREV_INSN (insn));
183 fprintf (dump_file, "before:\n");
184 print_rtl_single (dump_file, insn);
185 }
186 reorder_insns_nobb (vzeroupper_insn, vzeroupper_insn,
187 PREV_INSN (insn));
188 }
189 vzeroupper_insn = NULL_RTX;
190 continue;
191 }
192
193 pat = PATTERN (insn);
194
195 /* Check insn for vzeroupper intrinsic. */
196 if (GET_CODE (pat) == UNSPEC_VOLATILE
197 && XINT (pat, 1) == UNSPECV_VZEROUPPER)
198 {
199 if (dump_file)
200 {
201 /* Found vzeroupper intrinsic. */
202 fprintf (dump_file, "Found vzeroupper:\n");
203 print_rtl_single (dump_file, insn);
204 }
205 }
206 else
207 {
208 /* Check insn for vzeroall intrinsic. */
209 if (GET_CODE (pat) == PARALLEL
210 && GET_CODE (XVECEXP (pat, 0, 0)) == UNSPEC_VOLATILE
211 && XINT (XVECEXP (pat, 0, 0), 1) == UNSPECV_VZEROALL)
212 {
213 state = unused;
214 unchanged = false;
215
216 /* Delete pending vzeroupper insertion. */
217 if (vzeroupper_insn)
218 {
219 delete_insn (vzeroupper_insn);
220 vzeroupper_insn = NULL_RTX;
221 }
222 }
223 else if (state != used)
224 {
225 note_stores (pat, check_avx256_stores, &state);
226 if (state == used)
227 unchanged = false;
228 }
229 continue;
230 }
231
232 /* Process vzeroupper intrinsic. */
233 avx256 = INTVAL (XVECEXP (pat, 0, 0));
234
235 if (state == unused)
236 {
237 /* Since the upper 128bits are cleared, callee must not pass
238 256bit AVX register. We only need to check if callee
239 returns 256bit AVX register. */
240 if (avx256 == callee_return_avx256)
241 {
242 state = used;
243 unchanged = false;
244 }
245
246 /* Remove unnecessary vzeroupper since upper 128bits are
247 cleared. */
248 if (dump_file)
249 {
250 fprintf (dump_file, "Delete redundant vzeroupper:\n");
251 print_rtl_single (dump_file, insn);
252 }
253 delete_insn (insn);
254 }
255 else
256 {
257 /* Set state to UNUSED if callee doesn't return 256bit AVX
258 register. */
259 if (avx256 != callee_return_pass_avx256)
260 state = unused;
261
262 if (avx256 == callee_return_pass_avx256
263 || avx256 == callee_pass_avx256)
264 {
265 /* Must remove vzeroupper since callee passes in 256bit
266 AVX register. */
267 if (dump_file)
268 {
269 fprintf (dump_file, "Delete callee pass vzeroupper:\n");
270 print_rtl_single (dump_file, insn);
271 }
272 delete_insn (insn);
273 }
274 else
275 {
276 vzeroupper_insn = insn;
277 unchanged = false;
278 }
279 }
280 }
281
282 BLOCK_INFO (bb)->state = state;
283 BLOCK_INFO (bb)->unchanged = unchanged;
284 BLOCK_INFO (bb)->scanned = true;
285
286 if (dump_file)
287 fprintf (dump_file, " [bb %i] exit: %s: upper 128bits: %d\n",
288 bb->index, unchanged ? "unchanged" : "changed",
289 state);
290 }
291
292 /* Helper function for move_or_delete_vzeroupper. Process vzeroupper
293 in BLOCK and check its predecessor blocks. Treat UNKNOWN state
294 as USED if UNKNOWN_IS_UNUSED is true. Return TRUE if the exit
295 state is changed. */
296
297 static bool
298 move_or_delete_vzeroupper_1 (basic_block block, bool unknown_is_unused)
299 {
300 edge e;
301 edge_iterator ei;
302 enum upper_128bits_state state, old_state, new_state;
303 bool seen_unknown;
304
305 if (dump_file)
306 fprintf (dump_file, " Process [bb %i]: status: %d\n",
307 block->index, BLOCK_INFO (block)->processed);
308
309 if (BLOCK_INFO (block)->processed)
310 return false;
311
312 state = unused;
313
314 /* Check all predecessor edges of this block. */
315 seen_unknown = false;
316 FOR_EACH_EDGE (e, ei, block->preds)
317 {
318 if (e->src == block)
319 continue;
320 switch (BLOCK_INFO (e->src)->state)
321 {
322 case unknown:
323 if (!unknown_is_unused)
324 seen_unknown = true;
325 case unused:
326 break;
327 case used:
328 state = used;
329 goto done;
330 }
331 }
332
333 if (seen_unknown)
334 state = unknown;
335
336 done:
337 old_state = BLOCK_INFO (block)->state;
338 move_or_delete_vzeroupper_2 (block, state);
339 new_state = BLOCK_INFO (block)->state;
340
341 if (state != unknown || new_state == used)
342 BLOCK_INFO (block)->processed = true;
343
344 /* Need to rescan if the upper 128bits of AVX registers are changed
345 to USED at exit. */
346 if (new_state != old_state)
347 {
348 if (new_state == used)
349 cfun->machine->rescan_vzeroupper_p = 1;
350 return true;
351 }
352 else
353 return false;
354 }
355
356 /* Go through the instruction stream looking for vzeroupper. Delete
357 it if upper 128bit AVX registers are unused. If it isn't deleted,
358 move it to just before a jump insn. */
359
360 static void
361 move_or_delete_vzeroupper (void)
362 {
363 edge e;
364 edge_iterator ei;
365 basic_block bb;
366 fibheap_t worklist, pending, fibheap_swap;
367 sbitmap visited, in_worklist, in_pending, sbitmap_swap;
368 int *bb_order;
369 int *rc_order;
370 int i;
371
372 /* Set up block info for each basic block. */
373 alloc_aux_for_blocks (sizeof (struct block_info_def));
374
375 /* Process outgoing edges of entry point. */
376 if (dump_file)
377 fprintf (dump_file, "Process outgoing edges of entry point\n");
378
379 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
380 {
381 move_or_delete_vzeroupper_2 (e->dest,
382 cfun->machine->caller_pass_avx256_p
383 ? used : unused);
384 BLOCK_INFO (e->dest)->processed = true;
385 }
386
387 /* Compute reverse completion order of depth first search of the CFG
388 so that the data-flow runs faster. */
389 rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
390 bb_order = XNEWVEC (int, last_basic_block);
391 pre_and_rev_post_order_compute (NULL, rc_order, false);
392 for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
393 bb_order[rc_order[i]] = i;
394 free (rc_order);
395
396 worklist = fibheap_new ();
397 pending = fibheap_new ();
398 visited = sbitmap_alloc (last_basic_block);
399 in_worklist = sbitmap_alloc (last_basic_block);
400 in_pending = sbitmap_alloc (last_basic_block);
401 sbitmap_zero (in_worklist);
402
403 /* Don't check outgoing edges of entry point. */
404 sbitmap_ones (in_pending);
405 FOR_EACH_BB (bb)
406 if (BLOCK_INFO (bb)->processed)
407 RESET_BIT (in_pending, bb->index);
408 else
409 {
410 move_or_delete_vzeroupper_1 (bb, false);
411 fibheap_insert (pending, bb_order[bb->index], bb);
412 }
413
414 if (dump_file)
415 fprintf (dump_file, "Check remaining basic blocks\n");
416
417 while (!fibheap_empty (pending))
418 {
419 fibheap_swap = pending;
420 pending = worklist;
421 worklist = fibheap_swap;
422 sbitmap_swap = in_pending;
423 in_pending = in_worklist;
424 in_worklist = sbitmap_swap;
425
426 sbitmap_zero (visited);
427
428 cfun->machine->rescan_vzeroupper_p = 0;
429
430 while (!fibheap_empty (worklist))
431 {
432 bb = (basic_block) fibheap_extract_min (worklist);
433 RESET_BIT (in_worklist, bb->index);
434 gcc_assert (!TEST_BIT (visited, bb->index));
435 if (!TEST_BIT (visited, bb->index))
436 {
437 edge_iterator ei;
438
439 SET_BIT (visited, bb->index);
440
441 if (move_or_delete_vzeroupper_1 (bb, false))
442 FOR_EACH_EDGE (e, ei, bb->succs)
443 {
444 if (e->dest == EXIT_BLOCK_PTR
445 || BLOCK_INFO (e->dest)->processed)
446 continue;
447
448 if (TEST_BIT (visited, e->dest->index))
449 {
450 if (!TEST_BIT (in_pending, e->dest->index))
451 {
452 /* Send E->DEST to next round. */
453 SET_BIT (in_pending, e->dest->index);
454 fibheap_insert (pending,
455 bb_order[e->dest->index],
456 e->dest);
457 }
458 }
459 else if (!TEST_BIT (in_worklist, e->dest->index))
460 {
461 /* Add E->DEST to current round. */
462 SET_BIT (in_worklist, e->dest->index);
463 fibheap_insert (worklist, bb_order[e->dest->index],
464 e->dest);
465 }
466 }
467 }
468 }
469
470 if (!cfun->machine->rescan_vzeroupper_p)
471 break;
472 }
473
474 free (bb_order);
475 fibheap_delete (worklist);
476 fibheap_delete (pending);
477 sbitmap_free (visited);
478 sbitmap_free (in_worklist);
479 sbitmap_free (in_pending);
480
481 if (dump_file)
482 fprintf (dump_file, "Process remaining basic blocks\n");
483
484 FOR_EACH_BB (bb)
485 move_or_delete_vzeroupper_1 (bb, true);
486
487 free_aux_for_blocks ();
488 }
489
490 static rtx legitimize_dllimport_symbol (rtx, bool);
491
492 #ifndef CHECK_STACK_LIMIT
493 #define CHECK_STACK_LIMIT (-1)
494 #endif
495
496 /* Return index of given mode in mult and division cost tables. */
497 #define MODE_INDEX(mode) \
498 ((mode) == QImode ? 0 \
499 : (mode) == HImode ? 1 \
500 : (mode) == SImode ? 2 \
501 : (mode) == DImode ? 3 \
502 : 4)
503
504 /* Processor costs (relative to an add) */
505 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
506 #define COSTS_N_BYTES(N) ((N) * 2)
507
508 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
509
510 const
511 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
512 COSTS_N_BYTES (2), /* cost of an add instruction */
513 COSTS_N_BYTES (3), /* cost of a lea instruction */
514 COSTS_N_BYTES (2), /* variable shift costs */
515 COSTS_N_BYTES (3), /* constant shift costs */
516 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
517 COSTS_N_BYTES (3), /* HI */
518 COSTS_N_BYTES (3), /* SI */
519 COSTS_N_BYTES (3), /* DI */
520 COSTS_N_BYTES (5)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
523 COSTS_N_BYTES (3), /* HI */
524 COSTS_N_BYTES (3), /* SI */
525 COSTS_N_BYTES (3), /* DI */
526 COSTS_N_BYTES (5)}, /* other */
527 COSTS_N_BYTES (3), /* cost of movsx */
528 COSTS_N_BYTES (3), /* cost of movzx */
529 0, /* "large" insn */
530 2, /* MOVE_RATIO */
531 2, /* cost for loading QImode using movzbl */
532 {2, 2, 2}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 2, 2}, /* cost of storing integer registers */
536 2, /* cost of reg,reg fld/fst */
537 {2, 2, 2}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {2, 2, 2}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 3, /* cost of moving MMX register */
542 {3, 3}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {3, 3}, /* cost of storing MMX registers
545 in SImode and DImode */
546 3, /* cost of moving SSE register */
547 {3, 3, 3}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {3, 3, 3}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 3, /* MMX or SSE register to integer */
552 0, /* size of l1 cache */
553 0, /* size of l2 cache */
554 0, /* size of prefetch block */
555 0, /* number of parallel prefetches */
556 2, /* Branch cost */
557 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
558 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
559 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
560 COSTS_N_BYTES (2), /* cost of FABS instruction. */
561 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
562 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
563 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
564 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
565 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
566 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
567 1, /* scalar_stmt_cost. */
568 1, /* scalar load_cost. */
569 1, /* scalar_store_cost. */
570 1, /* vec_stmt_cost. */
571 1, /* vec_to_scalar_cost. */
572 1, /* scalar_to_vec_cost. */
573 1, /* vec_align_load_cost. */
574 1, /* vec_unalign_load_cost. */
575 1, /* vec_store_cost. */
576 1, /* cond_taken_branch_cost. */
577 1, /* cond_not_taken_branch_cost. */
578 };
579
580 /* Processor costs (relative to an add) */
581 static const
582 struct processor_costs i386_cost = { /* 386 specific costs */
583 COSTS_N_INSNS (1), /* cost of an add instruction */
584 COSTS_N_INSNS (1), /* cost of a lea instruction */
585 COSTS_N_INSNS (3), /* variable shift costs */
586 COSTS_N_INSNS (2), /* constant shift costs */
587 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
588 COSTS_N_INSNS (6), /* HI */
589 COSTS_N_INSNS (6), /* SI */
590 COSTS_N_INSNS (6), /* DI */
591 COSTS_N_INSNS (6)}, /* other */
592 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
593 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
594 COSTS_N_INSNS (23), /* HI */
595 COSTS_N_INSNS (23), /* SI */
596 COSTS_N_INSNS (23), /* DI */
597 COSTS_N_INSNS (23)}, /* other */
598 COSTS_N_INSNS (3), /* cost of movsx */
599 COSTS_N_INSNS (2), /* cost of movzx */
600 15, /* "large" insn */
601 3, /* MOVE_RATIO */
602 4, /* cost for loading QImode using movzbl */
603 {2, 4, 2}, /* cost of loading integer registers
604 in QImode, HImode and SImode.
605 Relative to reg-reg move (2). */
606 {2, 4, 2}, /* cost of storing integer registers */
607 2, /* cost of reg,reg fld/fst */
608 {8, 8, 8}, /* cost of loading fp registers
609 in SFmode, DFmode and XFmode */
610 {8, 8, 8}, /* cost of storing fp registers
611 in SFmode, DFmode and XFmode */
612 2, /* cost of moving MMX register */
613 {4, 8}, /* cost of loading MMX registers
614 in SImode and DImode */
615 {4, 8}, /* cost of storing MMX registers
616 in SImode and DImode */
617 2, /* cost of moving SSE register */
618 {4, 8, 16}, /* cost of loading SSE registers
619 in SImode, DImode and TImode */
620 {4, 8, 16}, /* cost of storing SSE registers
621 in SImode, DImode and TImode */
622 3, /* MMX or SSE register to integer */
623 0, /* size of l1 cache */
624 0, /* size of l2 cache */
625 0, /* size of prefetch block */
626 0, /* number of parallel prefetches */
627 1, /* Branch cost */
628 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
629 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
630 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
631 COSTS_N_INSNS (22), /* cost of FABS instruction. */
632 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
633 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
634 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
635 DUMMY_STRINGOP_ALGS},
636 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
637 DUMMY_STRINGOP_ALGS},
638 1, /* scalar_stmt_cost. */
639 1, /* scalar load_cost. */
640 1, /* scalar_store_cost. */
641 1, /* vec_stmt_cost. */
642 1, /* vec_to_scalar_cost. */
643 1, /* scalar_to_vec_cost. */
644 1, /* vec_align_load_cost. */
645 2, /* vec_unalign_load_cost. */
646 1, /* vec_store_cost. */
647 3, /* cond_taken_branch_cost. */
648 1, /* cond_not_taken_branch_cost. */
649 };
650
651 static const
652 struct processor_costs i486_cost = { /* 486 specific costs */
653 COSTS_N_INSNS (1), /* cost of an add instruction */
654 COSTS_N_INSNS (1), /* cost of a lea instruction */
655 COSTS_N_INSNS (3), /* variable shift costs */
656 COSTS_N_INSNS (2), /* constant shift costs */
657 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
658 COSTS_N_INSNS (12), /* HI */
659 COSTS_N_INSNS (12), /* SI */
660 COSTS_N_INSNS (12), /* DI */
661 COSTS_N_INSNS (12)}, /* other */
662 1, /* cost of multiply per each bit set */
663 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
664 COSTS_N_INSNS (40), /* HI */
665 COSTS_N_INSNS (40), /* SI */
666 COSTS_N_INSNS (40), /* DI */
667 COSTS_N_INSNS (40)}, /* other */
668 COSTS_N_INSNS (3), /* cost of movsx */
669 COSTS_N_INSNS (2), /* cost of movzx */
670 15, /* "large" insn */
671 3, /* MOVE_RATIO */
672 4, /* cost for loading QImode using movzbl */
673 {2, 4, 2}, /* cost of loading integer registers
674 in QImode, HImode and SImode.
675 Relative to reg-reg move (2). */
676 {2, 4, 2}, /* cost of storing integer registers */
677 2, /* cost of reg,reg fld/fst */
678 {8, 8, 8}, /* cost of loading fp registers
679 in SFmode, DFmode and XFmode */
680 {8, 8, 8}, /* cost of storing fp registers
681 in SFmode, DFmode and XFmode */
682 2, /* cost of moving MMX register */
683 {4, 8}, /* cost of loading MMX registers
684 in SImode and DImode */
685 {4, 8}, /* cost of storing MMX registers
686 in SImode and DImode */
687 2, /* cost of moving SSE register */
688 {4, 8, 16}, /* cost of loading SSE registers
689 in SImode, DImode and TImode */
690 {4, 8, 16}, /* cost of storing SSE registers
691 in SImode, DImode and TImode */
692 3, /* MMX or SSE register to integer */
693 4, /* size of l1 cache. 486 has 8kB cache
694 shared for code and data, so 4kB is
695 not really precise. */
696 4, /* size of l2 cache */
697 0, /* size of prefetch block */
698 0, /* number of parallel prefetches */
699 1, /* Branch cost */
700 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
701 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
702 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
703 COSTS_N_INSNS (3), /* cost of FABS instruction. */
704 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
705 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
706 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
707 DUMMY_STRINGOP_ALGS},
708 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
709 DUMMY_STRINGOP_ALGS},
710 1, /* scalar_stmt_cost. */
711 1, /* scalar load_cost. */
712 1, /* scalar_store_cost. */
713 1, /* vec_stmt_cost. */
714 1, /* vec_to_scalar_cost. */
715 1, /* scalar_to_vec_cost. */
716 1, /* vec_align_load_cost. */
717 2, /* vec_unalign_load_cost. */
718 1, /* vec_store_cost. */
719 3, /* cond_taken_branch_cost. */
720 1, /* cond_not_taken_branch_cost. */
721 };
722
723 static const
724 struct processor_costs pentium_cost = {
725 COSTS_N_INSNS (1), /* cost of an add instruction */
726 COSTS_N_INSNS (1), /* cost of a lea instruction */
727 COSTS_N_INSNS (4), /* variable shift costs */
728 COSTS_N_INSNS (1), /* constant shift costs */
729 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
730 COSTS_N_INSNS (11), /* HI */
731 COSTS_N_INSNS (11), /* SI */
732 COSTS_N_INSNS (11), /* DI */
733 COSTS_N_INSNS (11)}, /* other */
734 0, /* cost of multiply per each bit set */
735 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
736 COSTS_N_INSNS (25), /* HI */
737 COSTS_N_INSNS (25), /* SI */
738 COSTS_N_INSNS (25), /* DI */
739 COSTS_N_INSNS (25)}, /* other */
740 COSTS_N_INSNS (3), /* cost of movsx */
741 COSTS_N_INSNS (2), /* cost of movzx */
742 8, /* "large" insn */
743 6, /* MOVE_RATIO */
744 6, /* cost for loading QImode using movzbl */
745 {2, 4, 2}, /* cost of loading integer registers
746 in QImode, HImode and SImode.
747 Relative to reg-reg move (2). */
748 {2, 4, 2}, /* cost of storing integer registers */
749 2, /* cost of reg,reg fld/fst */
750 {2, 2, 6}, /* cost of loading fp registers
751 in SFmode, DFmode and XFmode */
752 {4, 4, 6}, /* cost of storing fp registers
753 in SFmode, DFmode and XFmode */
754 8, /* cost of moving MMX register */
755 {8, 8}, /* cost of loading MMX registers
756 in SImode and DImode */
757 {8, 8}, /* cost of storing MMX registers
758 in SImode and DImode */
759 2, /* cost of moving SSE register */
760 {4, 8, 16}, /* cost of loading SSE registers
761 in SImode, DImode and TImode */
762 {4, 8, 16}, /* cost of storing SSE registers
763 in SImode, DImode and TImode */
764 3, /* MMX or SSE register to integer */
765 8, /* size of l1 cache. */
766 8, /* size of l2 cache */
767 0, /* size of prefetch block */
768 0, /* number of parallel prefetches */
769 2, /* Branch cost */
770 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
771 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
772 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
773 COSTS_N_INSNS (1), /* cost of FABS instruction. */
774 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
775 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
776 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
777 DUMMY_STRINGOP_ALGS},
778 {{libcall, {{-1, rep_prefix_4_byte}}},
779 DUMMY_STRINGOP_ALGS},
780 1, /* scalar_stmt_cost. */
781 1, /* scalar load_cost. */
782 1, /* scalar_store_cost. */
783 1, /* vec_stmt_cost. */
784 1, /* vec_to_scalar_cost. */
785 1, /* scalar_to_vec_cost. */
786 1, /* vec_align_load_cost. */
787 2, /* vec_unalign_load_cost. */
788 1, /* vec_store_cost. */
789 3, /* cond_taken_branch_cost. */
790 1, /* cond_not_taken_branch_cost. */
791 };
792
793 static const
794 struct processor_costs pentiumpro_cost = {
795 COSTS_N_INSNS (1), /* cost of an add instruction */
796 COSTS_N_INSNS (1), /* cost of a lea instruction */
797 COSTS_N_INSNS (1), /* variable shift costs */
798 COSTS_N_INSNS (1), /* constant shift costs */
799 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
800 COSTS_N_INSNS (4), /* HI */
801 COSTS_N_INSNS (4), /* SI */
802 COSTS_N_INSNS (4), /* DI */
803 COSTS_N_INSNS (4)}, /* other */
804 0, /* cost of multiply per each bit set */
805 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
806 COSTS_N_INSNS (17), /* HI */
807 COSTS_N_INSNS (17), /* SI */
808 COSTS_N_INSNS (17), /* DI */
809 COSTS_N_INSNS (17)}, /* other */
810 COSTS_N_INSNS (1), /* cost of movsx */
811 COSTS_N_INSNS (1), /* cost of movzx */
812 8, /* "large" insn */
813 6, /* MOVE_RATIO */
814 2, /* cost for loading QImode using movzbl */
815 {4, 4, 4}, /* cost of loading integer registers
816 in QImode, HImode and SImode.
817 Relative to reg-reg move (2). */
818 {2, 2, 2}, /* cost of storing integer registers */
819 2, /* cost of reg,reg fld/fst */
820 {2, 2, 6}, /* cost of loading fp registers
821 in SFmode, DFmode and XFmode */
822 {4, 4, 6}, /* cost of storing fp registers
823 in SFmode, DFmode and XFmode */
824 2, /* cost of moving MMX register */
825 {2, 2}, /* cost of loading MMX registers
826 in SImode and DImode */
827 {2, 2}, /* cost of storing MMX registers
828 in SImode and DImode */
829 2, /* cost of moving SSE register */
830 {2, 2, 8}, /* cost of loading SSE registers
831 in SImode, DImode and TImode */
832 {2, 2, 8}, /* cost of storing SSE registers
833 in SImode, DImode and TImode */
834 3, /* MMX or SSE register to integer */
835 8, /* size of l1 cache. */
836 256, /* size of l2 cache */
837 32, /* size of prefetch block */
838 6, /* number of parallel prefetches */
839 2, /* Branch cost */
840 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
841 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
842 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
843 COSTS_N_INSNS (2), /* cost of FABS instruction. */
844 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
845 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
846 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
847 (we ensure the alignment). For small blocks inline loop is still a
848 noticeable win, for bigger blocks either rep movsl or rep movsb is
849 way to go. Rep movsb has apparently more expensive startup time in CPU,
850 but after 4K the difference is down in the noise. */
851 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
852 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
853 DUMMY_STRINGOP_ALGS},
854 {{rep_prefix_4_byte, {{1024, unrolled_loop},
855 {8192, rep_prefix_4_byte}, {-1, libcall}}},
856 DUMMY_STRINGOP_ALGS},
857 1, /* scalar_stmt_cost. */
858 1, /* scalar load_cost. */
859 1, /* scalar_store_cost. */
860 1, /* vec_stmt_cost. */
861 1, /* vec_to_scalar_cost. */
862 1, /* scalar_to_vec_cost. */
863 1, /* vec_align_load_cost. */
864 2, /* vec_unalign_load_cost. */
865 1, /* vec_store_cost. */
866 3, /* cond_taken_branch_cost. */
867 1, /* cond_not_taken_branch_cost. */
868 };
869
870 static const
871 struct processor_costs geode_cost = {
872 COSTS_N_INSNS (1), /* cost of an add instruction */
873 COSTS_N_INSNS (1), /* cost of a lea instruction */
874 COSTS_N_INSNS (2), /* variable shift costs */
875 COSTS_N_INSNS (1), /* constant shift costs */
876 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
877 COSTS_N_INSNS (4), /* HI */
878 COSTS_N_INSNS (7), /* SI */
879 COSTS_N_INSNS (7), /* DI */
880 COSTS_N_INSNS (7)}, /* other */
881 0, /* cost of multiply per each bit set */
882 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
883 COSTS_N_INSNS (23), /* HI */
884 COSTS_N_INSNS (39), /* SI */
885 COSTS_N_INSNS (39), /* DI */
886 COSTS_N_INSNS (39)}, /* other */
887 COSTS_N_INSNS (1), /* cost of movsx */
888 COSTS_N_INSNS (1), /* cost of movzx */
889 8, /* "large" insn */
890 4, /* MOVE_RATIO */
891 1, /* cost for loading QImode using movzbl */
892 {1, 1, 1}, /* cost of loading integer registers
893 in QImode, HImode and SImode.
894 Relative to reg-reg move (2). */
895 {1, 1, 1}, /* cost of storing integer registers */
896 1, /* cost of reg,reg fld/fst */
897 {1, 1, 1}, /* cost of loading fp registers
898 in SFmode, DFmode and XFmode */
899 {4, 6, 6}, /* cost of storing fp registers
900 in SFmode, DFmode and XFmode */
901
902 1, /* cost of moving MMX register */
903 {1, 1}, /* cost of loading MMX registers
904 in SImode and DImode */
905 {1, 1}, /* cost of storing MMX registers
906 in SImode and DImode */
907 1, /* cost of moving SSE register */
908 {1, 1, 1}, /* cost of loading SSE registers
909 in SImode, DImode and TImode */
910 {1, 1, 1}, /* cost of storing SSE registers
911 in SImode, DImode and TImode */
912 1, /* MMX or SSE register to integer */
913 64, /* size of l1 cache. */
914 128, /* size of l2 cache. */
915 32, /* size of prefetch block */
916 1, /* number of parallel prefetches */
917 1, /* Branch cost */
918 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
919 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
920 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
921 COSTS_N_INSNS (1), /* cost of FABS instruction. */
922 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
923 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
924 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
925 DUMMY_STRINGOP_ALGS},
926 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
927 DUMMY_STRINGOP_ALGS},
928 1, /* scalar_stmt_cost. */
929 1, /* scalar load_cost. */
930 1, /* scalar_store_cost. */
931 1, /* vec_stmt_cost. */
932 1, /* vec_to_scalar_cost. */
933 1, /* scalar_to_vec_cost. */
934 1, /* vec_align_load_cost. */
935 2, /* vec_unalign_load_cost. */
936 1, /* vec_store_cost. */
937 3, /* cond_taken_branch_cost. */
938 1, /* cond_not_taken_branch_cost. */
939 };
940
941 static const
942 struct processor_costs k6_cost = {
943 COSTS_N_INSNS (1), /* cost of an add instruction */
944 COSTS_N_INSNS (2), /* cost of a lea instruction */
945 COSTS_N_INSNS (1), /* variable shift costs */
946 COSTS_N_INSNS (1), /* constant shift costs */
947 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
948 COSTS_N_INSNS (3), /* HI */
949 COSTS_N_INSNS (3), /* SI */
950 COSTS_N_INSNS (3), /* DI */
951 COSTS_N_INSNS (3)}, /* other */
952 0, /* cost of multiply per each bit set */
953 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
954 COSTS_N_INSNS (18), /* HI */
955 COSTS_N_INSNS (18), /* SI */
956 COSTS_N_INSNS (18), /* DI */
957 COSTS_N_INSNS (18)}, /* other */
958 COSTS_N_INSNS (2), /* cost of movsx */
959 COSTS_N_INSNS (2), /* cost of movzx */
960 8, /* "large" insn */
961 4, /* MOVE_RATIO */
962 3, /* cost for loading QImode using movzbl */
963 {4, 5, 4}, /* cost of loading integer registers
964 in QImode, HImode and SImode.
965 Relative to reg-reg move (2). */
966 {2, 3, 2}, /* cost of storing integer registers */
967 4, /* cost of reg,reg fld/fst */
968 {6, 6, 6}, /* cost of loading fp registers
969 in SFmode, DFmode and XFmode */
970 {4, 4, 4}, /* cost of storing fp registers
971 in SFmode, DFmode and XFmode */
972 2, /* cost of moving MMX register */
973 {2, 2}, /* cost of loading MMX registers
974 in SImode and DImode */
975 {2, 2}, /* cost of storing MMX registers
976 in SImode and DImode */
977 2, /* cost of moving SSE register */
978 {2, 2, 8}, /* cost of loading SSE registers
979 in SImode, DImode and TImode */
980 {2, 2, 8}, /* cost of storing SSE registers
981 in SImode, DImode and TImode */
982 6, /* MMX or SSE register to integer */
983 32, /* size of l1 cache. */
984 32, /* size of l2 cache. Some models
985 have integrated l2 cache, but
986 optimizing for k6 is not important
987 enough to worry about that. */
988 32, /* size of prefetch block */
989 1, /* number of parallel prefetches */
990 1, /* Branch cost */
991 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
992 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
993 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
994 COSTS_N_INSNS (2), /* cost of FABS instruction. */
995 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
996 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
997 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
998 DUMMY_STRINGOP_ALGS},
999 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
1000 DUMMY_STRINGOP_ALGS},
1001 1, /* scalar_stmt_cost. */
1002 1, /* scalar load_cost. */
1003 1, /* scalar_store_cost. */
1004 1, /* vec_stmt_cost. */
1005 1, /* vec_to_scalar_cost. */
1006 1, /* scalar_to_vec_cost. */
1007 1, /* vec_align_load_cost. */
1008 2, /* vec_unalign_load_cost. */
1009 1, /* vec_store_cost. */
1010 3, /* cond_taken_branch_cost. */
1011 1, /* cond_not_taken_branch_cost. */
1012 };
1013
1014 static const
1015 struct processor_costs athlon_cost = {
1016 COSTS_N_INSNS (1), /* cost of an add instruction */
1017 COSTS_N_INSNS (2), /* cost of a lea instruction */
1018 COSTS_N_INSNS (1), /* variable shift costs */
1019 COSTS_N_INSNS (1), /* constant shift costs */
1020 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
1021 COSTS_N_INSNS (5), /* HI */
1022 COSTS_N_INSNS (5), /* SI */
1023 COSTS_N_INSNS (5), /* DI */
1024 COSTS_N_INSNS (5)}, /* other */
1025 0, /* cost of multiply per each bit set */
1026 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1027 COSTS_N_INSNS (26), /* HI */
1028 COSTS_N_INSNS (42), /* SI */
1029 COSTS_N_INSNS (74), /* DI */
1030 COSTS_N_INSNS (74)}, /* other */
1031 COSTS_N_INSNS (1), /* cost of movsx */
1032 COSTS_N_INSNS (1), /* cost of movzx */
1033 8, /* "large" insn */
1034 9, /* MOVE_RATIO */
1035 4, /* cost for loading QImode using movzbl */
1036 {3, 4, 3}, /* cost of loading integer registers
1037 in QImode, HImode and SImode.
1038 Relative to reg-reg move (2). */
1039 {3, 4, 3}, /* cost of storing integer registers */
1040 4, /* cost of reg,reg fld/fst */
1041 {4, 4, 12}, /* cost of loading fp registers
1042 in SFmode, DFmode and XFmode */
1043 {6, 6, 8}, /* cost of storing fp registers
1044 in SFmode, DFmode and XFmode */
1045 2, /* cost of moving MMX register */
1046 {4, 4}, /* cost of loading MMX registers
1047 in SImode and DImode */
1048 {4, 4}, /* cost of storing MMX registers
1049 in SImode and DImode */
1050 2, /* cost of moving SSE register */
1051 {4, 4, 6}, /* cost of loading SSE registers
1052 in SImode, DImode and TImode */
1053 {4, 4, 5}, /* cost of storing SSE registers
1054 in SImode, DImode and TImode */
1055 5, /* MMX or SSE register to integer */
1056 64, /* size of l1 cache. */
1057 256, /* size of l2 cache. */
1058 64, /* size of prefetch block */
1059 6, /* number of parallel prefetches */
1060 5, /* Branch cost */
1061 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1062 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1063 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
1064 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1065 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1066 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1067 /* For some reason, Athlon deals better with REP prefix (relative to loops)
1068 compared to K8. Alignment becomes important after 8 bytes for memcpy and
1069 128 bytes for memset. */
1070 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1071 DUMMY_STRINGOP_ALGS},
1072 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1073 DUMMY_STRINGOP_ALGS},
1074 1, /* scalar_stmt_cost. */
1075 1, /* scalar load_cost. */
1076 1, /* scalar_store_cost. */
1077 1, /* vec_stmt_cost. */
1078 1, /* vec_to_scalar_cost. */
1079 1, /* scalar_to_vec_cost. */
1080 1, /* vec_align_load_cost. */
1081 2, /* vec_unalign_load_cost. */
1082 1, /* vec_store_cost. */
1083 3, /* cond_taken_branch_cost. */
1084 1, /* cond_not_taken_branch_cost. */
1085 };
1086
1087 static const
1088 struct processor_costs k8_cost = {
1089 COSTS_N_INSNS (1), /* cost of an add instruction */
1090 COSTS_N_INSNS (2), /* cost of a lea instruction */
1091 COSTS_N_INSNS (1), /* variable shift costs */
1092 COSTS_N_INSNS (1), /* constant shift costs */
1093 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1094 COSTS_N_INSNS (4), /* HI */
1095 COSTS_N_INSNS (3), /* SI */
1096 COSTS_N_INSNS (4), /* DI */
1097 COSTS_N_INSNS (5)}, /* other */
1098 0, /* cost of multiply per each bit set */
1099 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1100 COSTS_N_INSNS (26), /* HI */
1101 COSTS_N_INSNS (42), /* SI */
1102 COSTS_N_INSNS (74), /* DI */
1103 COSTS_N_INSNS (74)}, /* other */
1104 COSTS_N_INSNS (1), /* cost of movsx */
1105 COSTS_N_INSNS (1), /* cost of movzx */
1106 8, /* "large" insn */
1107 9, /* MOVE_RATIO */
1108 4, /* cost for loading QImode using movzbl */
1109 {3, 4, 3}, /* cost of loading integer registers
1110 in QImode, HImode and SImode.
1111 Relative to reg-reg move (2). */
1112 {3, 4, 3}, /* cost of storing integer registers */
1113 4, /* cost of reg,reg fld/fst */
1114 {4, 4, 12}, /* cost of loading fp registers
1115 in SFmode, DFmode and XFmode */
1116 {6, 6, 8}, /* cost of storing fp registers
1117 in SFmode, DFmode and XFmode */
1118 2, /* cost of moving MMX register */
1119 {3, 3}, /* cost of loading MMX registers
1120 in SImode and DImode */
1121 {4, 4}, /* cost of storing MMX registers
1122 in SImode and DImode */
1123 2, /* cost of moving SSE register */
1124 {4, 3, 6}, /* cost of loading SSE registers
1125 in SImode, DImode and TImode */
1126 {4, 4, 5}, /* cost of storing SSE registers
1127 in SImode, DImode and TImode */
1128 5, /* MMX or SSE register to integer */
1129 64, /* size of l1 cache. */
1130 512, /* size of l2 cache. */
1131 64, /* size of prefetch block */
1132 /* New AMD processors never drop prefetches; if they cannot be performed
1133 immediately, they are queued. We set number of simultaneous prefetches
1134 to a large constant to reflect this (it probably is not a good idea not
1135 to limit number of prefetches at all, as their execution also takes some
1136 time). */
1137 100, /* number of parallel prefetches */
1138 3, /* Branch cost */
1139 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1140 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1141 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1142 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1143 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1144 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1145 /* K8 has optimized REP instruction for medium sized blocks, but for very
1146 small blocks it is better to use loop. For large blocks, libcall can
1147 do nontemporary accesses and beat inline considerably. */
1148 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1149 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1150 {{libcall, {{8, loop}, {24, unrolled_loop},
1151 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1152 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1153 4, /* scalar_stmt_cost. */
1154 2, /* scalar load_cost. */
1155 2, /* scalar_store_cost. */
1156 5, /* vec_stmt_cost. */
1157 0, /* vec_to_scalar_cost. */
1158 2, /* scalar_to_vec_cost. */
1159 2, /* vec_align_load_cost. */
1160 3, /* vec_unalign_load_cost. */
1161 3, /* vec_store_cost. */
1162 3, /* cond_taken_branch_cost. */
1163 2, /* cond_not_taken_branch_cost. */
1164 };
1165
1166 struct processor_costs amdfam10_cost = {
1167 COSTS_N_INSNS (1), /* cost of an add instruction */
1168 COSTS_N_INSNS (2), /* cost of a lea instruction */
1169 COSTS_N_INSNS (1), /* variable shift costs */
1170 COSTS_N_INSNS (1), /* constant shift costs */
1171 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1172 COSTS_N_INSNS (4), /* HI */
1173 COSTS_N_INSNS (3), /* SI */
1174 COSTS_N_INSNS (4), /* DI */
1175 COSTS_N_INSNS (5)}, /* other */
1176 0, /* cost of multiply per each bit set */
1177 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1178 COSTS_N_INSNS (35), /* HI */
1179 COSTS_N_INSNS (51), /* SI */
1180 COSTS_N_INSNS (83), /* DI */
1181 COSTS_N_INSNS (83)}, /* other */
1182 COSTS_N_INSNS (1), /* cost of movsx */
1183 COSTS_N_INSNS (1), /* cost of movzx */
1184 8, /* "large" insn */
1185 9, /* MOVE_RATIO */
1186 4, /* cost for loading QImode using movzbl */
1187 {3, 4, 3}, /* cost of loading integer registers
1188 in QImode, HImode and SImode.
1189 Relative to reg-reg move (2). */
1190 {3, 4, 3}, /* cost of storing integer registers */
1191 4, /* cost of reg,reg fld/fst */
1192 {4, 4, 12}, /* cost of loading fp registers
1193 in SFmode, DFmode and XFmode */
1194 {6, 6, 8}, /* cost of storing fp registers
1195 in SFmode, DFmode and XFmode */
1196 2, /* cost of moving MMX register */
1197 {3, 3}, /* cost of loading MMX registers
1198 in SImode and DImode */
1199 {4, 4}, /* cost of storing MMX registers
1200 in SImode and DImode */
1201 2, /* cost of moving SSE register */
1202 {4, 4, 3}, /* cost of loading SSE registers
1203 in SImode, DImode and TImode */
1204 {4, 4, 5}, /* cost of storing SSE registers
1205 in SImode, DImode and TImode */
1206 3, /* MMX or SSE register to integer */
1207 /* On K8:
1208 MOVD reg64, xmmreg Double FSTORE 4
1209 MOVD reg32, xmmreg Double FSTORE 4
1210 On AMDFAM10:
1211 MOVD reg64, xmmreg Double FADD 3
1212 1/1 1/1
1213 MOVD reg32, xmmreg Double FADD 3
1214 1/1 1/1 */
1215 64, /* size of l1 cache. */
1216 512, /* size of l2 cache. */
1217 64, /* size of prefetch block */
1218 /* New AMD processors never drop prefetches; if they cannot be performed
1219 immediately, they are queued. We set number of simultaneous prefetches
1220 to a large constant to reflect this (it probably is not a good idea not
1221 to limit number of prefetches at all, as their execution also takes some
1222 time). */
1223 100, /* number of parallel prefetches */
1224 2, /* Branch cost */
1225 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1226 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1227 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1228 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1229 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1230 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1231
1232 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
1233 very small blocks it is better to use loop. For large blocks, libcall can
1234 do nontemporary accesses and beat inline considerably. */
1235 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1236 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1237 {{libcall, {{8, loop}, {24, unrolled_loop},
1238 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1239 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1240 4, /* scalar_stmt_cost. */
1241 2, /* scalar load_cost. */
1242 2, /* scalar_store_cost. */
1243 6, /* vec_stmt_cost. */
1244 0, /* vec_to_scalar_cost. */
1245 2, /* scalar_to_vec_cost. */
1246 2, /* vec_align_load_cost. */
1247 2, /* vec_unalign_load_cost. */
1248 2, /* vec_store_cost. */
1249 2, /* cond_taken_branch_cost. */
1250 1, /* cond_not_taken_branch_cost. */
1251 };
1252
1253 struct processor_costs bdver1_cost = {
1254 COSTS_N_INSNS (1), /* cost of an add instruction */
1255 COSTS_N_INSNS (1), /* cost of a lea instruction */
1256 COSTS_N_INSNS (1), /* variable shift costs */
1257 COSTS_N_INSNS (1), /* constant shift costs */
1258 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1259 COSTS_N_INSNS (4), /* HI */
1260 COSTS_N_INSNS (4), /* SI */
1261 COSTS_N_INSNS (6), /* DI */
1262 COSTS_N_INSNS (6)}, /* other */
1263 0, /* cost of multiply per each bit set */
1264 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1265 COSTS_N_INSNS (35), /* HI */
1266 COSTS_N_INSNS (51), /* SI */
1267 COSTS_N_INSNS (83), /* DI */
1268 COSTS_N_INSNS (83)}, /* other */
1269 COSTS_N_INSNS (1), /* cost of movsx */
1270 COSTS_N_INSNS (1), /* cost of movzx */
1271 8, /* "large" insn */
1272 9, /* MOVE_RATIO */
1273 4, /* cost for loading QImode using movzbl */
1274 {5, 5, 4}, /* cost of loading integer registers
1275 in QImode, HImode and SImode.
1276 Relative to reg-reg move (2). */
1277 {4, 4, 4}, /* cost of storing integer registers */
1278 2, /* cost of reg,reg fld/fst */
1279 {5, 5, 12}, /* cost of loading fp registers
1280 in SFmode, DFmode and XFmode */
1281 {4, 4, 8}, /* cost of storing fp registers
1282 in SFmode, DFmode and XFmode */
1283 2, /* cost of moving MMX register */
1284 {4, 4}, /* cost of loading MMX registers
1285 in SImode and DImode */
1286 {4, 4}, /* cost of storing MMX registers
1287 in SImode and DImode */
1288 2, /* cost of moving SSE register */
1289 {4, 4, 4}, /* cost of loading SSE registers
1290 in SImode, DImode and TImode */
1291 {4, 4, 4}, /* cost of storing SSE registers
1292 in SImode, DImode and TImode */
1293 2, /* MMX or SSE register to integer */
1294 /* On K8:
1295 MOVD reg64, xmmreg Double FSTORE 4
1296 MOVD reg32, xmmreg Double FSTORE 4
1297 On AMDFAM10:
1298 MOVD reg64, xmmreg Double FADD 3
1299 1/1 1/1
1300 MOVD reg32, xmmreg Double FADD 3
1301 1/1 1/1 */
1302 16, /* size of l1 cache. */
1303 2048, /* size of l2 cache. */
1304 64, /* size of prefetch block */
1305 /* New AMD processors never drop prefetches; if they cannot be performed
1306 immediately, they are queued. We set number of simultaneous prefetches
1307 to a large constant to reflect this (it probably is not a good idea not
1308 to limit number of prefetches at all, as their execution also takes some
1309 time). */
1310 100, /* number of parallel prefetches */
1311 2, /* Branch cost */
1312 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1313 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1314 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1315 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1316 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1317 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1318
1319 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
1320 very small blocks it is better to use loop. For large blocks, libcall
1321 can do nontemporary accesses and beat inline considerably. */
1322 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1323 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1324 {{libcall, {{8, loop}, {24, unrolled_loop},
1325 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1326 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1327 6, /* scalar_stmt_cost. */
1328 4, /* scalar load_cost. */
1329 4, /* scalar_store_cost. */
1330 6, /* vec_stmt_cost. */
1331 0, /* vec_to_scalar_cost. */
1332 2, /* scalar_to_vec_cost. */
1333 4, /* vec_align_load_cost. */
1334 4, /* vec_unalign_load_cost. */
1335 4, /* vec_store_cost. */
1336 2, /* cond_taken_branch_cost. */
1337 1, /* cond_not_taken_branch_cost. */
1338 };
1339
1340 struct processor_costs btver1_cost = {
1341 COSTS_N_INSNS (1), /* cost of an add instruction */
1342 COSTS_N_INSNS (2), /* cost of a lea instruction */
1343 COSTS_N_INSNS (1), /* variable shift costs */
1344 COSTS_N_INSNS (1), /* constant shift costs */
1345 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1346 COSTS_N_INSNS (4), /* HI */
1347 COSTS_N_INSNS (3), /* SI */
1348 COSTS_N_INSNS (4), /* DI */
1349 COSTS_N_INSNS (5)}, /* other */
1350 0, /* cost of multiply per each bit set */
1351 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1352 COSTS_N_INSNS (35), /* HI */
1353 COSTS_N_INSNS (51), /* SI */
1354 COSTS_N_INSNS (83), /* DI */
1355 COSTS_N_INSNS (83)}, /* other */
1356 COSTS_N_INSNS (1), /* cost of movsx */
1357 COSTS_N_INSNS (1), /* cost of movzx */
1358 8, /* "large" insn */
1359 9, /* MOVE_RATIO */
1360 4, /* cost for loading QImode using movzbl */
1361 {3, 4, 3}, /* cost of loading integer registers
1362 in QImode, HImode and SImode.
1363 Relative to reg-reg move (2). */
1364 {3, 4, 3}, /* cost of storing integer registers */
1365 4, /* cost of reg,reg fld/fst */
1366 {4, 4, 12}, /* cost of loading fp registers
1367 in SFmode, DFmode and XFmode */
1368 {6, 6, 8}, /* cost of storing fp registers
1369 in SFmode, DFmode and XFmode */
1370 2, /* cost of moving MMX register */
1371 {3, 3}, /* cost of loading MMX registers
1372 in SImode and DImode */
1373 {4, 4}, /* cost of storing MMX registers
1374 in SImode and DImode */
1375 2, /* cost of moving SSE register */
1376 {4, 4, 3}, /* cost of loading SSE registers
1377 in SImode, DImode and TImode */
1378 {4, 4, 5}, /* cost of storing SSE registers
1379 in SImode, DImode and TImode */
1380 3, /* MMX or SSE register to integer */
1381 /* On K8:
1382 MOVD reg64, xmmreg Double FSTORE 4
1383 MOVD reg32, xmmreg Double FSTORE 4
1384 On AMDFAM10:
1385 MOVD reg64, xmmreg Double FADD 3
1386 1/1 1/1
1387 MOVD reg32, xmmreg Double FADD 3
1388 1/1 1/1 */
1389 32, /* size of l1 cache. */
1390 512, /* size of l2 cache. */
1391 64, /* size of prefetch block */
1392 100, /* number of parallel prefetches */
1393 2, /* Branch cost */
1394 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1395 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1396 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1397 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1398 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1399 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1400
1401 /* BTVER1 has optimized REP instruction for medium sized blocks, but for
1402 very small blocks it is better to use loop. For large blocks, libcall can
1403 do nontemporary accesses and beat inline considerably. */
1404 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1405 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1406 {{libcall, {{8, loop}, {24, unrolled_loop},
1407 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1408 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1409 4, /* scalar_stmt_cost. */
1410 2, /* scalar load_cost. */
1411 2, /* scalar_store_cost. */
1412 6, /* vec_stmt_cost. */
1413 0, /* vec_to_scalar_cost. */
1414 2, /* scalar_to_vec_cost. */
1415 2, /* vec_align_load_cost. */
1416 2, /* vec_unalign_load_cost. */
1417 2, /* vec_store_cost. */
1418 2, /* cond_taken_branch_cost. */
1419 1, /* cond_not_taken_branch_cost. */
1420 };
1421
1422 static const
1423 struct processor_costs pentium4_cost = {
1424 COSTS_N_INSNS (1), /* cost of an add instruction */
1425 COSTS_N_INSNS (3), /* cost of a lea instruction */
1426 COSTS_N_INSNS (4), /* variable shift costs */
1427 COSTS_N_INSNS (4), /* constant shift costs */
1428 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1429 COSTS_N_INSNS (15), /* HI */
1430 COSTS_N_INSNS (15), /* SI */
1431 COSTS_N_INSNS (15), /* DI */
1432 COSTS_N_INSNS (15)}, /* other */
1433 0, /* cost of multiply per each bit set */
1434 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1435 COSTS_N_INSNS (56), /* HI */
1436 COSTS_N_INSNS (56), /* SI */
1437 COSTS_N_INSNS (56), /* DI */
1438 COSTS_N_INSNS (56)}, /* other */
1439 COSTS_N_INSNS (1), /* cost of movsx */
1440 COSTS_N_INSNS (1), /* cost of movzx */
1441 16, /* "large" insn */
1442 6, /* MOVE_RATIO */
1443 2, /* cost for loading QImode using movzbl */
1444 {4, 5, 4}, /* cost of loading integer registers
1445 in QImode, HImode and SImode.
1446 Relative to reg-reg move (2). */
1447 {2, 3, 2}, /* cost of storing integer registers */
1448 2, /* cost of reg,reg fld/fst */
1449 {2, 2, 6}, /* cost of loading fp registers
1450 in SFmode, DFmode and XFmode */
1451 {4, 4, 6}, /* cost of storing fp registers
1452 in SFmode, DFmode and XFmode */
1453 2, /* cost of moving MMX register */
1454 {2, 2}, /* cost of loading MMX registers
1455 in SImode and DImode */
1456 {2, 2}, /* cost of storing MMX registers
1457 in SImode and DImode */
1458 12, /* cost of moving SSE register */
1459 {12, 12, 12}, /* cost of loading SSE registers
1460 in SImode, DImode and TImode */
1461 {2, 2, 8}, /* cost of storing SSE registers
1462 in SImode, DImode and TImode */
1463 10, /* MMX or SSE register to integer */
1464 8, /* size of l1 cache. */
1465 256, /* size of l2 cache. */
1466 64, /* size of prefetch block */
1467 6, /* number of parallel prefetches */
1468 2, /* Branch cost */
1469 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1470 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1471 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1474 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1475 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1476 DUMMY_STRINGOP_ALGS},
1477 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1478 {-1, libcall}}},
1479 DUMMY_STRINGOP_ALGS},
1480 1, /* scalar_stmt_cost. */
1481 1, /* scalar load_cost. */
1482 1, /* scalar_store_cost. */
1483 1, /* vec_stmt_cost. */
1484 1, /* vec_to_scalar_cost. */
1485 1, /* scalar_to_vec_cost. */
1486 1, /* vec_align_load_cost. */
1487 2, /* vec_unalign_load_cost. */
1488 1, /* vec_store_cost. */
1489 3, /* cond_taken_branch_cost. */
1490 1, /* cond_not_taken_branch_cost. */
1491 };
1492
1493 static const
1494 struct processor_costs nocona_cost = {
1495 COSTS_N_INSNS (1), /* cost of an add instruction */
1496 COSTS_N_INSNS (1), /* cost of a lea instruction */
1497 COSTS_N_INSNS (1), /* variable shift costs */
1498 COSTS_N_INSNS (1), /* constant shift costs */
1499 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1500 COSTS_N_INSNS (10), /* HI */
1501 COSTS_N_INSNS (10), /* SI */
1502 COSTS_N_INSNS (10), /* DI */
1503 COSTS_N_INSNS (10)}, /* other */
1504 0, /* cost of multiply per each bit set */
1505 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1506 COSTS_N_INSNS (66), /* HI */
1507 COSTS_N_INSNS (66), /* SI */
1508 COSTS_N_INSNS (66), /* DI */
1509 COSTS_N_INSNS (66)}, /* other */
1510 COSTS_N_INSNS (1), /* cost of movsx */
1511 COSTS_N_INSNS (1), /* cost of movzx */
1512 16, /* "large" insn */
1513 17, /* MOVE_RATIO */
1514 4, /* cost for loading QImode using movzbl */
1515 {4, 4, 4}, /* cost of loading integer registers
1516 in QImode, HImode and SImode.
1517 Relative to reg-reg move (2). */
1518 {4, 4, 4}, /* cost of storing integer registers */
1519 3, /* cost of reg,reg fld/fst */
1520 {12, 12, 12}, /* cost of loading fp registers
1521 in SFmode, DFmode and XFmode */
1522 {4, 4, 4}, /* cost of storing fp registers
1523 in SFmode, DFmode and XFmode */
1524 6, /* cost of moving MMX register */
1525 {12, 12}, /* cost of loading MMX registers
1526 in SImode and DImode */
1527 {12, 12}, /* cost of storing MMX registers
1528 in SImode and DImode */
1529 6, /* cost of moving SSE register */
1530 {12, 12, 12}, /* cost of loading SSE registers
1531 in SImode, DImode and TImode */
1532 {12, 12, 12}, /* cost of storing SSE registers
1533 in SImode, DImode and TImode */
1534 8, /* MMX or SSE register to integer */
1535 8, /* size of l1 cache. */
1536 1024, /* size of l2 cache. */
1537 128, /* size of prefetch block */
1538 8, /* number of parallel prefetches */
1539 1, /* Branch cost */
1540 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1541 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1542 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1543 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1544 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1545 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1546 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1547 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1548 {100000, unrolled_loop}, {-1, libcall}}}},
1549 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1550 {-1, libcall}}},
1551 {libcall, {{24, loop}, {64, unrolled_loop},
1552 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1553 1, /* scalar_stmt_cost. */
1554 1, /* scalar load_cost. */
1555 1, /* scalar_store_cost. */
1556 1, /* vec_stmt_cost. */
1557 1, /* vec_to_scalar_cost. */
1558 1, /* scalar_to_vec_cost. */
1559 1, /* vec_align_load_cost. */
1560 2, /* vec_unalign_load_cost. */
1561 1, /* vec_store_cost. */
1562 3, /* cond_taken_branch_cost. */
1563 1, /* cond_not_taken_branch_cost. */
1564 };
1565
1566 static const
1567 struct processor_costs atom_cost = {
1568 COSTS_N_INSNS (1), /* cost of an add instruction */
1569 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1570 COSTS_N_INSNS (1), /* variable shift costs */
1571 COSTS_N_INSNS (1), /* constant shift costs */
1572 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1573 COSTS_N_INSNS (4), /* HI */
1574 COSTS_N_INSNS (3), /* SI */
1575 COSTS_N_INSNS (4), /* DI */
1576 COSTS_N_INSNS (2)}, /* other */
1577 0, /* cost of multiply per each bit set */
1578 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1579 COSTS_N_INSNS (26), /* HI */
1580 COSTS_N_INSNS (42), /* SI */
1581 COSTS_N_INSNS (74), /* DI */
1582 COSTS_N_INSNS (74)}, /* other */
1583 COSTS_N_INSNS (1), /* cost of movsx */
1584 COSTS_N_INSNS (1), /* cost of movzx */
1585 8, /* "large" insn */
1586 17, /* MOVE_RATIO */
1587 2, /* cost for loading QImode using movzbl */
1588 {4, 4, 4}, /* cost of loading integer registers
1589 in QImode, HImode and SImode.
1590 Relative to reg-reg move (2). */
1591 {4, 4, 4}, /* cost of storing integer registers */
1592 4, /* cost of reg,reg fld/fst */
1593 {12, 12, 12}, /* cost of loading fp registers
1594 in SFmode, DFmode and XFmode */
1595 {6, 6, 8}, /* cost of storing fp registers
1596 in SFmode, DFmode and XFmode */
1597 2, /* cost of moving MMX register */
1598 {8, 8}, /* cost of loading MMX registers
1599 in SImode and DImode */
1600 {8, 8}, /* cost of storing MMX registers
1601 in SImode and DImode */
1602 2, /* cost of moving SSE register */
1603 {8, 8, 8}, /* cost of loading SSE registers
1604 in SImode, DImode and TImode */
1605 {8, 8, 8}, /* cost of storing SSE registers
1606 in SImode, DImode and TImode */
1607 5, /* MMX or SSE register to integer */
1608 32, /* size of l1 cache. */
1609 256, /* size of l2 cache. */
1610 64, /* size of prefetch block */
1611 6, /* number of parallel prefetches */
1612 3, /* Branch cost */
1613 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1614 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1615 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1616 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1617 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1618 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1619 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1620 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1621 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1622 {{libcall, {{8, loop}, {15, unrolled_loop},
1623 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1624 {libcall, {{24, loop}, {32, unrolled_loop},
1625 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1626 1, /* scalar_stmt_cost. */
1627 1, /* scalar load_cost. */
1628 1, /* scalar_store_cost. */
1629 1, /* vec_stmt_cost. */
1630 1, /* vec_to_scalar_cost. */
1631 1, /* scalar_to_vec_cost. */
1632 1, /* vec_align_load_cost. */
1633 2, /* vec_unalign_load_cost. */
1634 1, /* vec_store_cost. */
1635 3, /* cond_taken_branch_cost. */
1636 1, /* cond_not_taken_branch_cost. */
1637 };
1638
1639 /* Generic64 should produce code tuned for Nocona and K8. */
1640 static const
1641 struct processor_costs generic64_cost = {
1642 COSTS_N_INSNS (1), /* cost of an add instruction */
1643 /* On all chips taken into consideration lea is 2 cycles and more. With
1644 this cost however our current implementation of synth_mult results in
1645 use of unnecessary temporary registers causing regression on several
1646 SPECfp benchmarks. */
1647 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1648 COSTS_N_INSNS (1), /* variable shift costs */
1649 COSTS_N_INSNS (1), /* constant shift costs */
1650 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1651 COSTS_N_INSNS (4), /* HI */
1652 COSTS_N_INSNS (3), /* SI */
1653 COSTS_N_INSNS (4), /* DI */
1654 COSTS_N_INSNS (2)}, /* other */
1655 0, /* cost of multiply per each bit set */
1656 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1657 COSTS_N_INSNS (26), /* HI */
1658 COSTS_N_INSNS (42), /* SI */
1659 COSTS_N_INSNS (74), /* DI */
1660 COSTS_N_INSNS (74)}, /* other */
1661 COSTS_N_INSNS (1), /* cost of movsx */
1662 COSTS_N_INSNS (1), /* cost of movzx */
1663 8, /* "large" insn */
1664 17, /* MOVE_RATIO */
1665 4, /* cost for loading QImode using movzbl */
1666 {4, 4, 4}, /* cost of loading integer registers
1667 in QImode, HImode and SImode.
1668 Relative to reg-reg move (2). */
1669 {4, 4, 4}, /* cost of storing integer registers */
1670 4, /* cost of reg,reg fld/fst */
1671 {12, 12, 12}, /* cost of loading fp registers
1672 in SFmode, DFmode and XFmode */
1673 {6, 6, 8}, /* cost of storing fp registers
1674 in SFmode, DFmode and XFmode */
1675 2, /* cost of moving MMX register */
1676 {8, 8}, /* cost of loading MMX registers
1677 in SImode and DImode */
1678 {8, 8}, /* cost of storing MMX registers
1679 in SImode and DImode */
1680 2, /* cost of moving SSE register */
1681 {8, 8, 8}, /* cost of loading SSE registers
1682 in SImode, DImode and TImode */
1683 {8, 8, 8}, /* cost of storing SSE registers
1684 in SImode, DImode and TImode */
1685 5, /* MMX or SSE register to integer */
1686 32, /* size of l1 cache. */
1687 512, /* size of l2 cache. */
1688 64, /* size of prefetch block */
1689 6, /* number of parallel prefetches */
1690 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1691 value is increased to perhaps more appropriate value of 5. */
1692 3, /* Branch cost */
1693 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1694 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1695 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1696 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1697 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1698 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1699 {DUMMY_STRINGOP_ALGS,
1700 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1701 {DUMMY_STRINGOP_ALGS,
1702 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1703 1, /* scalar_stmt_cost. */
1704 1, /* scalar load_cost. */
1705 1, /* scalar_store_cost. */
1706 1, /* vec_stmt_cost. */
1707 1, /* vec_to_scalar_cost. */
1708 1, /* scalar_to_vec_cost. */
1709 1, /* vec_align_load_cost. */
1710 2, /* vec_unalign_load_cost. */
1711 1, /* vec_store_cost. */
1712 3, /* cond_taken_branch_cost. */
1713 1, /* cond_not_taken_branch_cost. */
1714 };
1715
1716 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1717 Athlon and K8. */
1718 static const
1719 struct processor_costs generic32_cost = {
1720 COSTS_N_INSNS (1), /* cost of an add instruction */
1721 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1722 COSTS_N_INSNS (1), /* variable shift costs */
1723 COSTS_N_INSNS (1), /* constant shift costs */
1724 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1725 COSTS_N_INSNS (4), /* HI */
1726 COSTS_N_INSNS (3), /* SI */
1727 COSTS_N_INSNS (4), /* DI */
1728 COSTS_N_INSNS (2)}, /* other */
1729 0, /* cost of multiply per each bit set */
1730 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1731 COSTS_N_INSNS (26), /* HI */
1732 COSTS_N_INSNS (42), /* SI */
1733 COSTS_N_INSNS (74), /* DI */
1734 COSTS_N_INSNS (74)}, /* other */
1735 COSTS_N_INSNS (1), /* cost of movsx */
1736 COSTS_N_INSNS (1), /* cost of movzx */
1737 8, /* "large" insn */
1738 17, /* MOVE_RATIO */
1739 4, /* cost for loading QImode using movzbl */
1740 {4, 4, 4}, /* cost of loading integer registers
1741 in QImode, HImode and SImode.
1742 Relative to reg-reg move (2). */
1743 {4, 4, 4}, /* cost of storing integer registers */
1744 4, /* cost of reg,reg fld/fst */
1745 {12, 12, 12}, /* cost of loading fp registers
1746 in SFmode, DFmode and XFmode */
1747 {6, 6, 8}, /* cost of storing fp registers
1748 in SFmode, DFmode and XFmode */
1749 2, /* cost of moving MMX register */
1750 {8, 8}, /* cost of loading MMX registers
1751 in SImode and DImode */
1752 {8, 8}, /* cost of storing MMX registers
1753 in SImode and DImode */
1754 2, /* cost of moving SSE register */
1755 {8, 8, 8}, /* cost of loading SSE registers
1756 in SImode, DImode and TImode */
1757 {8, 8, 8}, /* cost of storing SSE registers
1758 in SImode, DImode and TImode */
1759 5, /* MMX or SSE register to integer */
1760 32, /* size of l1 cache. */
1761 256, /* size of l2 cache. */
1762 64, /* size of prefetch block */
1763 6, /* number of parallel prefetches */
1764 3, /* Branch cost */
1765 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1766 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1767 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1768 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1769 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1770 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1771 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1772 DUMMY_STRINGOP_ALGS},
1773 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1774 DUMMY_STRINGOP_ALGS},
1775 1, /* scalar_stmt_cost. */
1776 1, /* scalar load_cost. */
1777 1, /* scalar_store_cost. */
1778 1, /* vec_stmt_cost. */
1779 1, /* vec_to_scalar_cost. */
1780 1, /* scalar_to_vec_cost. */
1781 1, /* vec_align_load_cost. */
1782 2, /* vec_unalign_load_cost. */
1783 1, /* vec_store_cost. */
1784 3, /* cond_taken_branch_cost. */
1785 1, /* cond_not_taken_branch_cost. */
1786 };
1787
1788 const struct processor_costs *ix86_cost = &pentium_cost;
1789
1790 /* Processor feature/optimization bitmasks. */
1791 #define m_386 (1<<PROCESSOR_I386)
1792 #define m_486 (1<<PROCESSOR_I486)
1793 #define m_PENT (1<<PROCESSOR_PENTIUM)
1794 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1795 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1796 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1797 #define m_CORE2_32 (1<<PROCESSOR_CORE2_32)
1798 #define m_CORE2_64 (1<<PROCESSOR_CORE2_64)
1799 #define m_COREI7_32 (1<<PROCESSOR_COREI7_32)
1800 #define m_COREI7_64 (1<<PROCESSOR_COREI7_64)
1801 #define m_COREI7 (m_COREI7_32 | m_COREI7_64)
1802 #define m_CORE2I7_32 (m_CORE2_32 | m_COREI7_32)
1803 #define m_CORE2I7_64 (m_CORE2_64 | m_COREI7_64)
1804 #define m_CORE2I7 (m_CORE2I7_32 | m_CORE2I7_64)
1805 #define m_ATOM (1<<PROCESSOR_ATOM)
1806
1807 #define m_GEODE (1<<PROCESSOR_GEODE)
1808 #define m_K6 (1<<PROCESSOR_K6)
1809 #define m_K6_GEODE (m_K6 | m_GEODE)
1810 #define m_K8 (1<<PROCESSOR_K8)
1811 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1812 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1813 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1814 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1815 #define m_BTVER1 (1<<PROCESSOR_BTVER1)
1816 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1 | m_BTVER1)
1817
1818 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1819 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1820
1821 /* Generic instruction choice should be common subset of supported CPUs
1822 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1823 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1824
1825 /* Feature tests against the various tunings. */
1826 unsigned char ix86_tune_features[X86_TUNE_LAST];
1827
1828 /* Feature tests against the various tunings used to create ix86_tune_features
1829 based on the processor mask. */
1830 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1831 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1832 negatively, so enabling for Generic64 seems like good code size
1833 tradeoff. We can't enable it for 32bit generic because it does not
1834 work well with PPro base chips. */
1835 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2I7_64 | m_GENERIC64,
1836
1837 /* X86_TUNE_PUSH_MEMORY */
1838 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1839 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1840
1841 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1842 m_486 | m_PENT,
1843
1844 /* X86_TUNE_UNROLL_STRLEN */
1845 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1846 | m_CORE2I7 | m_GENERIC,
1847
1848 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1849 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1850 | m_CORE2I7 | m_GENERIC,
1851
1852 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1853 on simulation result. But after P4 was made, no performance benefit
1854 was observed with branch hints. It also increases the code size.
1855 As a result, icc never generates branch hints. */
1856 0,
1857
1858 /* X86_TUNE_DOUBLE_WITH_ADD */
1859 ~m_386,
1860
1861 /* X86_TUNE_USE_SAHF */
1862 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_BTVER1
1863 | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1864
1865 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1866 partial dependencies. */
1867 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1868 | m_CORE2I7 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1869
1870 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1871 register stalls on Generic32 compilation setting as well. However
1872 in current implementation the partial register stalls are not eliminated
1873 very well - they can be introduced via subregs synthesized by combine
1874 and can happen in caller/callee saving sequences. Because this option
1875 pays back little on PPro based chips and is in conflict with partial reg
1876 dependencies used by Athlon/P4 based chips, it is better to leave it off
1877 for generic32 for now. */
1878 m_PPRO,
1879
1880 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1881 m_CORE2I7 | m_GENERIC,
1882
1883 /* X86_TUNE_USE_HIMODE_FIOP */
1884 m_386 | m_486 | m_K6_GEODE,
1885
1886 /* X86_TUNE_USE_SIMODE_FIOP */
1887 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2I7 | m_GENERIC),
1888
1889 /* X86_TUNE_USE_MOV0 */
1890 m_K6,
1891
1892 /* X86_TUNE_USE_CLTD */
1893 ~(m_PENT | m_ATOM | m_K6 | m_CORE2I7 | m_GENERIC),
1894
1895 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1896 m_PENT4,
1897
1898 /* X86_TUNE_SPLIT_LONG_MOVES */
1899 m_PPRO,
1900
1901 /* X86_TUNE_READ_MODIFY_WRITE */
1902 ~m_PENT,
1903
1904 /* X86_TUNE_READ_MODIFY */
1905 ~(m_PENT | m_PPRO),
1906
1907 /* X86_TUNE_PROMOTE_QIMODE */
1908 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1909 | m_CORE2I7 | m_GENERIC /* | m_PENT4 ? */,
1910
1911 /* X86_TUNE_FAST_PREFIX */
1912 ~(m_PENT | m_486 | m_386),
1913
1914 /* X86_TUNE_SINGLE_STRINGOP */
1915 m_386 | m_PENT4 | m_NOCONA,
1916
1917 /* X86_TUNE_QIMODE_MATH */
1918 ~0,
1919
1920 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1921 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1922 might be considered for Generic32 if our scheme for avoiding partial
1923 stalls was more effective. */
1924 ~m_PPRO,
1925
1926 /* X86_TUNE_PROMOTE_QI_REGS */
1927 0,
1928
1929 /* X86_TUNE_PROMOTE_HI_REGS */
1930 m_PPRO,
1931
1932 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
1933 over esp addition. */
1934 m_386 | m_486 | m_PENT | m_PPRO,
1935
1936 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
1937 over esp addition. */
1938 m_PENT,
1939
1940 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
1941 over esp subtraction. */
1942 m_386 | m_486 | m_PENT | m_K6_GEODE,
1943
1944 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
1945 over esp subtraction. */
1946 m_PENT | m_K6_GEODE,
1947
1948 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1949 for DFmode copies */
1950 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7
1951 | m_GENERIC | m_GEODE),
1952
1953 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1954 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1955
1956 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1957 conflict here in between PPro/Pentium4 based chips that thread 128bit
1958 SSE registers as single units versus K8 based chips that divide SSE
1959 registers to two 64bit halves. This knob promotes all store destinations
1960 to be 128bit to allow register renaming on 128bit SSE units, but usually
1961 results in one extra microop on 64bit SSE units. Experimental results
1962 shows that disabling this option on P4 brings over 20% SPECfp regression,
1963 while enabling it on K8 brings roughly 2.4% regression that can be partly
1964 masked by careful scheduling of moves. */
1965 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7 | m_GENERIC
1966 | m_AMDFAM10 | m_BDVER1,
1967
1968 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1969 m_AMDFAM10 | m_BDVER1 | m_BTVER1 | m_COREI7,
1970
1971 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1972 m_BDVER1 | m_COREI7,
1973
1974 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1975 m_BDVER1,
1976
1977 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1978 are resolved on SSE register parts instead of whole registers, so we may
1979 maintain just lower part of scalar values in proper format leaving the
1980 upper part undefined. */
1981 m_ATHLON_K8,
1982
1983 /* X86_TUNE_SSE_TYPELESS_STORES */
1984 m_AMD_MULTIPLE,
1985
1986 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1987 m_PPRO | m_PENT4 | m_NOCONA,
1988
1989 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1990 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1991
1992 /* X86_TUNE_PROLOGUE_USING_MOVE */
1993 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2I7 | m_GENERIC,
1994
1995 /* X86_TUNE_EPILOGUE_USING_MOVE */
1996 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2I7 | m_GENERIC,
1997
1998 /* X86_TUNE_SHIFT1 */
1999 ~m_486,
2000
2001 /* X86_TUNE_USE_FFREEP */
2002 m_AMD_MULTIPLE,
2003
2004 /* X86_TUNE_INTER_UNIT_MOVES */
2005 ~(m_AMD_MULTIPLE | m_GENERIC),
2006
2007 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
2008 ~(m_AMDFAM10 | m_BDVER1),
2009
2010 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
2011 than 4 branch instructions in the 16 byte window. */
2012 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2I7
2013 | m_GENERIC,
2014
2015 /* X86_TUNE_SCHEDULE */
2016 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2I7
2017 | m_GENERIC,
2018
2019 /* X86_TUNE_USE_BT */
2020 m_AMD_MULTIPLE | m_ATOM | m_CORE2I7 | m_GENERIC,
2021
2022 /* X86_TUNE_USE_INCDEC */
2023 ~(m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC | m_ATOM),
2024
2025 /* X86_TUNE_PAD_RETURNS */
2026 m_AMD_MULTIPLE | m_CORE2I7 | m_GENERIC,
2027
2028 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
2029 m_ATOM,
2030
2031 /* X86_TUNE_EXT_80387_CONSTANTS */
2032 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
2033 | m_CORE2I7 | m_GENERIC,
2034
2035 /* X86_TUNE_SHORTEN_X87_SSE */
2036 ~m_K8,
2037
2038 /* X86_TUNE_AVOID_VECTOR_DECODE */
2039 m_K8 | m_CORE2I7_64 | m_GENERIC64,
2040
2041 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
2042 and SImode multiply, but 386 and 486 do HImode multiply faster. */
2043 ~(m_386 | m_486),
2044
2045 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
2046 vector path on AMD machines. */
2047 m_K8 | m_CORE2I7_64 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1 | m_BTVER1,
2048
2049 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
2050 machines. */
2051 m_K8 | m_CORE2I7_64 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1 | m_BTVER1,
2052
2053 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
2054 than a MOV. */
2055 m_PENT,
2056
2057 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
2058 but one byte longer. */
2059 m_PENT,
2060
2061 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
2062 operand that cannot be represented using a modRM byte. The XOR
2063 replacement is long decoded, so this split helps here as well. */
2064 m_K6,
2065
2066 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
2067 from FP to FP. */
2068 m_AMDFAM10 | m_CORE2I7 | m_GENERIC,
2069
2070 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
2071 from integer to FP. */
2072 m_AMDFAM10,
2073
2074 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
2075 with a subsequent conditional jump instruction into a single
2076 compare-and-branch uop. */
2077 m_BDVER1,
2078
2079 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
2080 will impact LEA instruction selection. */
2081 m_ATOM,
2082
2083 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
2084 instructions. */
2085 ~m_ATOM,
2086 };
2087
2088 /* Feature tests against the various architecture variations. */
2089 unsigned char ix86_arch_features[X86_ARCH_LAST];
2090
2091 /* Feature tests against the various architecture variations, used to create
2092 ix86_arch_features based on the processor mask. */
2093 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
2094 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
2095 ~(m_386 | m_486 | m_PENT | m_K6),
2096
2097 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
2098 ~m_386,
2099
2100 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
2101 ~(m_386 | m_486),
2102
2103 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
2104 ~m_386,
2105
2106 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
2107 ~m_386,
2108 };
2109
2110 static const unsigned int x86_accumulate_outgoing_args
2111 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7
2112 | m_GENERIC;
2113
2114 static const unsigned int x86_arch_always_fancy_math_387
2115 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
2116 | m_NOCONA | m_CORE2I7 | m_GENERIC;
2117
2118 static enum stringop_alg stringop_alg = no_stringop;
2119
2120 /* In case the average insn count for single function invocation is
2121 lower than this constant, emit fast (but longer) prologue and
2122 epilogue code. */
2123 #define FAST_PROLOGUE_INSN_COUNT 20
2124
2125 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
2126 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
2127 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
2128 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
2129
2130 /* Array of the smallest class containing reg number REGNO, indexed by
2131 REGNO. Used by REGNO_REG_CLASS in i386.h. */
2132
2133 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
2134 {
2135 /* ax, dx, cx, bx */
2136 AREG, DREG, CREG, BREG,
2137 /* si, di, bp, sp */
2138 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
2139 /* FP registers */
2140 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
2141 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
2142 /* arg pointer */
2143 NON_Q_REGS,
2144 /* flags, fpsr, fpcr, frame */
2145 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
2146 /* SSE registers */
2147 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2148 SSE_REGS, SSE_REGS,
2149 /* MMX registers */
2150 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
2151 MMX_REGS, MMX_REGS,
2152 /* REX registers */
2153 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2154 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2155 /* SSE REX registers */
2156 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2157 SSE_REGS, SSE_REGS,
2158 };
2159
2160 /* The "default" register map used in 32bit mode. */
2161
2162 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
2163 {
2164 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
2165 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
2166 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2167 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
2168 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
2169 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2170 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2171 };
2172
2173 /* The "default" register map used in 64bit mode. */
2174
2175 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
2176 {
2177 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
2178 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
2179 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2180 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
2181 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
2182 8,9,10,11,12,13,14,15, /* extended integer registers */
2183 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
2184 };
2185
2186 /* Define the register numbers to be used in Dwarf debugging information.
2187 The SVR4 reference port C compiler uses the following register numbers
2188 in its Dwarf output code:
2189 0 for %eax (gcc regno = 0)
2190 1 for %ecx (gcc regno = 2)
2191 2 for %edx (gcc regno = 1)
2192 3 for %ebx (gcc regno = 3)
2193 4 for %esp (gcc regno = 7)
2194 5 for %ebp (gcc regno = 6)
2195 6 for %esi (gcc regno = 4)
2196 7 for %edi (gcc regno = 5)
2197 The following three DWARF register numbers are never generated by
2198 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
2199 believes these numbers have these meanings.
2200 8 for %eip (no gcc equivalent)
2201 9 for %eflags (gcc regno = 17)
2202 10 for %trapno (no gcc equivalent)
2203 It is not at all clear how we should number the FP stack registers
2204 for the x86 architecture. If the version of SDB on x86/svr4 were
2205 a bit less brain dead with respect to floating-point then we would
2206 have a precedent to follow with respect to DWARF register numbers
2207 for x86 FP registers, but the SDB on x86/svr4 is so completely
2208 broken with respect to FP registers that it is hardly worth thinking
2209 of it as something to strive for compatibility with.
2210 The version of x86/svr4 SDB I have at the moment does (partially)
2211 seem to believe that DWARF register number 11 is associated with
2212 the x86 register %st(0), but that's about all. Higher DWARF
2213 register numbers don't seem to be associated with anything in
2214 particular, and even for DWARF regno 11, SDB only seems to under-
2215 stand that it should say that a variable lives in %st(0) (when
2216 asked via an `=' command) if we said it was in DWARF regno 11,
2217 but SDB still prints garbage when asked for the value of the
2218 variable in question (via a `/' command).
2219 (Also note that the labels SDB prints for various FP stack regs
2220 when doing an `x' command are all wrong.)
2221 Note that these problems generally don't affect the native SVR4
2222 C compiler because it doesn't allow the use of -O with -g and
2223 because when it is *not* optimizing, it allocates a memory
2224 location for each floating-point variable, and the memory
2225 location is what gets described in the DWARF AT_location
2226 attribute for the variable in question.
2227 Regardless of the severe mental illness of the x86/svr4 SDB, we
2228 do something sensible here and we use the following DWARF
2229 register numbers. Note that these are all stack-top-relative
2230 numbers.
2231 11 for %st(0) (gcc regno = 8)
2232 12 for %st(1) (gcc regno = 9)
2233 13 for %st(2) (gcc regno = 10)
2234 14 for %st(3) (gcc regno = 11)
2235 15 for %st(4) (gcc regno = 12)
2236 16 for %st(5) (gcc regno = 13)
2237 17 for %st(6) (gcc regno = 14)
2238 18 for %st(7) (gcc regno = 15)
2239 */
2240 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
2241 {
2242 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2243 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2244 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2245 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2246 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2247 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2248 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2249 };
2250
2251 /* Define parameter passing and return registers. */
2252
2253 static int const x86_64_int_parameter_registers[6] =
2254 {
2255 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
2256 };
2257
2258 static int const x86_64_ms_abi_int_parameter_registers[4] =
2259 {
2260 CX_REG, DX_REG, R8_REG, R9_REG
2261 };
2262
2263 static int const x86_64_int_return_registers[4] =
2264 {
2265 AX_REG, DX_REG, DI_REG, SI_REG
2266 };
2267
2268 /* Define the structure for the machine field in struct function. */
2269
2270 struct GTY(()) stack_local_entry {
2271 unsigned short mode;
2272 unsigned short n;
2273 rtx rtl;
2274 struct stack_local_entry *next;
2275 };
2276
2277 /* Structure describing stack frame layout.
2278 Stack grows downward:
2279
2280 [arguments]
2281 <- ARG_POINTER
2282 saved pc
2283
2284 saved static chain if ix86_static_chain_on_stack
2285
2286 saved frame pointer if frame_pointer_needed
2287 <- HARD_FRAME_POINTER
2288 [saved regs]
2289 <- regs_save_offset
2290 [padding0]
2291
2292 [saved SSE regs]
2293 <- sse_regs_save_offset
2294 [padding1] |
2295 | <- FRAME_POINTER
2296 [va_arg registers] |
2297 |
2298 [frame] |
2299 |
2300 [padding2] | = to_allocate
2301 <- STACK_POINTER
2302 */
2303 struct ix86_frame
2304 {
2305 int nsseregs;
2306 int nregs;
2307 int va_arg_size;
2308 int red_zone_size;
2309 int outgoing_arguments_size;
2310 HOST_WIDE_INT frame;
2311
2312 /* The offsets relative to ARG_POINTER. */
2313 HOST_WIDE_INT frame_pointer_offset;
2314 HOST_WIDE_INT hard_frame_pointer_offset;
2315 HOST_WIDE_INT stack_pointer_offset;
2316 HOST_WIDE_INT hfp_save_offset;
2317 HOST_WIDE_INT reg_save_offset;
2318 HOST_WIDE_INT sse_reg_save_offset;
2319
2320 /* When save_regs_using_mov is set, emit prologue using
2321 move instead of push instructions. */
2322 bool save_regs_using_mov;
2323 };
2324
2325 /* Code model option. */
2326 enum cmodel ix86_cmodel;
2327 /* Asm dialect. */
2328 enum asm_dialect ix86_asm_dialect = ASM_ATT;
2329 /* TLS dialects. */
2330 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
2331
2332 /* Which unit we are generating floating point math for. */
2333 enum fpmath_unit ix86_fpmath;
2334
2335 /* Which cpu are we scheduling for. */
2336 enum attr_cpu ix86_schedule;
2337
2338 /* Which cpu are we optimizing for. */
2339 enum processor_type ix86_tune;
2340
2341 /* Which instruction set architecture to use. */
2342 enum processor_type ix86_arch;
2343
2344 /* true if sse prefetch instruction is not NOOP. */
2345 int x86_prefetch_sse;
2346
2347 /* ix86_regparm_string as a number */
2348 static int ix86_regparm;
2349
2350 /* -mstackrealign option */
2351 static const char ix86_force_align_arg_pointer_string[]
2352 = "force_align_arg_pointer";
2353
2354 static rtx (*ix86_gen_leave) (void);
2355 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
2356 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
2357 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
2358 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
2359 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
2360 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
2361 static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
2362 static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
2363 static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
2364
2365 /* Preferred alignment for stack boundary in bits. */
2366 unsigned int ix86_preferred_stack_boundary;
2367
2368 /* Alignment for incoming stack boundary in bits specified at
2369 command line. */
2370 static unsigned int ix86_user_incoming_stack_boundary;
2371
2372 /* Default alignment for incoming stack boundary in bits. */
2373 static unsigned int ix86_default_incoming_stack_boundary;
2374
2375 /* Alignment for incoming stack boundary in bits. */
2376 unsigned int ix86_incoming_stack_boundary;
2377
2378 /* The abi used by target. */
2379 enum calling_abi ix86_abi;
2380
2381 /* Values 1-5: see jump.c */
2382 int ix86_branch_cost;
2383
2384 /* Calling abi specific va_list type nodes. */
2385 static GTY(()) tree sysv_va_list_type_node;
2386 static GTY(()) tree ms_va_list_type_node;
2387
2388 /* Variables which are this size or smaller are put in the data/bss
2389 or ldata/lbss sections. */
2390
2391 int ix86_section_threshold = 65536;
2392
2393 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2394 char internal_label_prefix[16];
2395 int internal_label_prefix_len;
2396
2397 /* Fence to use after loop using movnt. */
2398 tree x86_mfence;
2399
2400 /* Register class used for passing given 64bit part of the argument.
2401 These represent classes as documented by the PS ABI, with the exception
2402 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2403 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2404
2405 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2406 whenever possible (upper half does contain padding). */
2407 enum x86_64_reg_class
2408 {
2409 X86_64_NO_CLASS,
2410 X86_64_INTEGER_CLASS,
2411 X86_64_INTEGERSI_CLASS,
2412 X86_64_SSE_CLASS,
2413 X86_64_SSESF_CLASS,
2414 X86_64_SSEDF_CLASS,
2415 X86_64_SSEUP_CLASS,
2416 X86_64_X87_CLASS,
2417 X86_64_X87UP_CLASS,
2418 X86_64_COMPLEX_X87_CLASS,
2419 X86_64_MEMORY_CLASS
2420 };
2421
2422 #define MAX_CLASSES 4
2423
2424 /* Table of constants used by fldpi, fldln2, etc.... */
2425 static REAL_VALUE_TYPE ext_80387_constants_table [5];
2426 static bool ext_80387_constants_init = 0;
2427
2428 \f
2429 static struct machine_function * ix86_init_machine_status (void);
2430 static rtx ix86_function_value (const_tree, const_tree, bool);
2431 static bool ix86_function_value_regno_p (const unsigned int);
2432 static unsigned int ix86_function_arg_boundary (enum machine_mode,
2433 const_tree);
2434 static rtx ix86_static_chain (const_tree, bool);
2435 static int ix86_function_regparm (const_tree, const_tree);
2436 static void ix86_compute_frame_layout (struct ix86_frame *);
2437 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
2438 rtx, rtx, int);
2439 static void ix86_add_new_builtins (int);
2440 static rtx ix86_expand_vec_perm_builtin (tree);
2441 static tree ix86_canonical_va_list_type (tree);
2442 static void predict_jump (int);
2443 static unsigned int split_stack_prologue_scratch_regno (void);
2444 static bool i386_asm_output_addr_const_extra (FILE *, rtx);
2445
2446 enum ix86_function_specific_strings
2447 {
2448 IX86_FUNCTION_SPECIFIC_ARCH,
2449 IX86_FUNCTION_SPECIFIC_TUNE,
2450 IX86_FUNCTION_SPECIFIC_FPMATH,
2451 IX86_FUNCTION_SPECIFIC_MAX
2452 };
2453
2454 static char *ix86_target_string (int, int, const char *, const char *,
2455 const char *, bool);
2456 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
2457 static void ix86_function_specific_save (struct cl_target_option *);
2458 static void ix86_function_specific_restore (struct cl_target_option *);
2459 static void ix86_function_specific_print (FILE *, int,
2460 struct cl_target_option *);
2461 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2462 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2463 static bool ix86_can_inline_p (tree, tree);
2464 static void ix86_set_current_function (tree);
2465 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2466
2467 static enum calling_abi ix86_function_abi (const_tree);
2468
2469 \f
2470 #ifndef SUBTARGET32_DEFAULT_CPU
2471 #define SUBTARGET32_DEFAULT_CPU "i386"
2472 #endif
2473
2474 /* The svr4 ABI for the i386 says that records and unions are returned
2475 in memory. */
2476 #ifndef DEFAULT_PCC_STRUCT_RETURN
2477 #define DEFAULT_PCC_STRUCT_RETURN 1
2478 #endif
2479
2480 /* Whether -mtune= or -march= were specified */
2481 static int ix86_tune_defaulted;
2482 static int ix86_arch_specified;
2483
2484 /* Define a set of ISAs which are available when a given ISA is
2485 enabled. MMX and SSE ISAs are handled separately. */
2486
2487 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2488 #define OPTION_MASK_ISA_3DNOW_SET \
2489 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2490
2491 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2492 #define OPTION_MASK_ISA_SSE2_SET \
2493 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2494 #define OPTION_MASK_ISA_SSE3_SET \
2495 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2496 #define OPTION_MASK_ISA_SSSE3_SET \
2497 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2498 #define OPTION_MASK_ISA_SSE4_1_SET \
2499 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2500 #define OPTION_MASK_ISA_SSE4_2_SET \
2501 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2502 #define OPTION_MASK_ISA_AVX_SET \
2503 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2504 #define OPTION_MASK_ISA_FMA_SET \
2505 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2506
2507 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2508 as -msse4.2. */
2509 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2510
2511 #define OPTION_MASK_ISA_SSE4A_SET \
2512 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2513 #define OPTION_MASK_ISA_FMA4_SET \
2514 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2515 | OPTION_MASK_ISA_AVX_SET)
2516 #define OPTION_MASK_ISA_XOP_SET \
2517 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2518 #define OPTION_MASK_ISA_LWP_SET \
2519 OPTION_MASK_ISA_LWP
2520
2521 /* AES and PCLMUL need SSE2 because they use xmm registers */
2522 #define OPTION_MASK_ISA_AES_SET \
2523 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2524 #define OPTION_MASK_ISA_PCLMUL_SET \
2525 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2526
2527 #define OPTION_MASK_ISA_ABM_SET \
2528 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2529
2530 #define OPTION_MASK_ISA_BMI_SET OPTION_MASK_ISA_BMI
2531 #define OPTION_MASK_ISA_TBM_SET OPTION_MASK_ISA_TBM
2532 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2533 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2534 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2535 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2536 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2537
2538 #define OPTION_MASK_ISA_FSGSBASE_SET OPTION_MASK_ISA_FSGSBASE
2539 #define OPTION_MASK_ISA_RDRND_SET OPTION_MASK_ISA_RDRND
2540 #define OPTION_MASK_ISA_F16C_SET \
2541 (OPTION_MASK_ISA_F16C | OPTION_MASK_ISA_AVX_SET)
2542
2543 /* Define a set of ISAs which aren't available when a given ISA is
2544 disabled. MMX and SSE ISAs are handled separately. */
2545
2546 #define OPTION_MASK_ISA_MMX_UNSET \
2547 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2548 #define OPTION_MASK_ISA_3DNOW_UNSET \
2549 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2550 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2551
2552 #define OPTION_MASK_ISA_SSE_UNSET \
2553 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2554 #define OPTION_MASK_ISA_SSE2_UNSET \
2555 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2556 #define OPTION_MASK_ISA_SSE3_UNSET \
2557 (OPTION_MASK_ISA_SSE3 \
2558 | OPTION_MASK_ISA_SSSE3_UNSET \
2559 | OPTION_MASK_ISA_SSE4A_UNSET )
2560 #define OPTION_MASK_ISA_SSSE3_UNSET \
2561 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2562 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2563 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2564 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2565 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2566 #define OPTION_MASK_ISA_AVX_UNSET \
2567 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2568 | OPTION_MASK_ISA_FMA4_UNSET | OPTION_MASK_ISA_F16C_UNSET)
2569 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2570
2571 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2572 as -mno-sse4.1. */
2573 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2574
2575 #define OPTION_MASK_ISA_SSE4A_UNSET \
2576 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2577
2578 #define OPTION_MASK_ISA_FMA4_UNSET \
2579 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2580 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2581 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2582
2583 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2584 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2585 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2586 #define OPTION_MASK_ISA_BMI_UNSET OPTION_MASK_ISA_BMI
2587 #define OPTION_MASK_ISA_TBM_UNSET OPTION_MASK_ISA_TBM
2588 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2589 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2590 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2591 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2592 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2593
2594 #define OPTION_MASK_ISA_FSGSBASE_UNSET OPTION_MASK_ISA_FSGSBASE
2595 #define OPTION_MASK_ISA_RDRND_UNSET OPTION_MASK_ISA_RDRND
2596 #define OPTION_MASK_ISA_F16C_UNSET OPTION_MASK_ISA_F16C
2597
2598 /* Vectorization library interface and handlers. */
2599 static tree (*ix86_veclib_handler) (enum built_in_function, tree, tree);
2600
2601 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2602 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2603
2604 /* Processor target table, indexed by processor number */
2605 struct ptt
2606 {
2607 const struct processor_costs *cost; /* Processor costs */
2608 const int align_loop; /* Default alignments. */
2609 const int align_loop_max_skip;
2610 const int align_jump;
2611 const int align_jump_max_skip;
2612 const int align_func;
2613 };
2614
2615 static const struct ptt processor_target_table[PROCESSOR_max] =
2616 {
2617 {&i386_cost, 4, 3, 4, 3, 4},
2618 {&i486_cost, 16, 15, 16, 15, 16},
2619 {&pentium_cost, 16, 7, 16, 7, 16},
2620 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2621 {&geode_cost, 0, 0, 0, 0, 0},
2622 {&k6_cost, 32, 7, 32, 7, 32},
2623 {&athlon_cost, 16, 7, 16, 7, 16},
2624 {&pentium4_cost, 0, 0, 0, 0, 0},
2625 {&k8_cost, 16, 7, 16, 7, 16},
2626 {&nocona_cost, 0, 0, 0, 0, 0},
2627 /* Core 2 32-bit. */
2628 {&generic32_cost, 16, 10, 16, 10, 16},
2629 /* Core 2 64-bit. */
2630 {&generic64_cost, 16, 10, 16, 10, 16},
2631 /* Core i7 32-bit. */
2632 {&generic32_cost, 16, 10, 16, 10, 16},
2633 /* Core i7 64-bit. */
2634 {&generic64_cost, 16, 10, 16, 10, 16},
2635 {&generic32_cost, 16, 7, 16, 7, 16},
2636 {&generic64_cost, 16, 10, 16, 10, 16},
2637 {&amdfam10_cost, 32, 24, 32, 7, 32},
2638 {&bdver1_cost, 32, 24, 32, 7, 32},
2639 {&btver1_cost, 32, 24, 32, 7, 32},
2640 {&atom_cost, 16, 7, 16, 7, 16}
2641 };
2642
2643 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2644 {
2645 "generic",
2646 "i386",
2647 "i486",
2648 "pentium",
2649 "pentium-mmx",
2650 "pentiumpro",
2651 "pentium2",
2652 "pentium3",
2653 "pentium4",
2654 "pentium-m",
2655 "prescott",
2656 "nocona",
2657 "core2",
2658 "corei7",
2659 "atom",
2660 "geode",
2661 "k6",
2662 "k6-2",
2663 "k6-3",
2664 "athlon",
2665 "athlon-4",
2666 "k8",
2667 "amdfam10",
2668 "bdver1",
2669 "btver1"
2670 };
2671 \f
2672 /* Return true if a red-zone is in use. */
2673
2674 static inline bool
2675 ix86_using_red_zone (void)
2676 {
2677 return TARGET_RED_ZONE && !TARGET_64BIT_MS_ABI;
2678 }
2679
2680 /* Implement TARGET_HANDLE_OPTION. */
2681
2682 static bool
2683 ix86_handle_option (struct gcc_options *opts,
2684 struct gcc_options *opts_set ATTRIBUTE_UNUSED,
2685 const struct cl_decoded_option *decoded,
2686 location_t loc ATTRIBUTE_UNUSED)
2687 {
2688 size_t code = decoded->opt_index;
2689 int value = decoded->value;
2690
2691 switch (code)
2692 {
2693 case OPT_mmmx:
2694 if (value)
2695 {
2696 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2697 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2698 }
2699 else
2700 {
2701 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2702 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2703 }
2704 return true;
2705
2706 case OPT_m3dnow:
2707 if (value)
2708 {
2709 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2710 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2711 }
2712 else
2713 {
2714 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2715 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2716 }
2717 return true;
2718
2719 case OPT_m3dnowa:
2720 return false;
2721
2722 case OPT_msse:
2723 if (value)
2724 {
2725 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2726 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2727 }
2728 else
2729 {
2730 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2731 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2732 }
2733 return true;
2734
2735 case OPT_msse2:
2736 if (value)
2737 {
2738 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2739 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2740 }
2741 else
2742 {
2743 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2744 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2745 }
2746 return true;
2747
2748 case OPT_msse3:
2749 if (value)
2750 {
2751 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2752 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2753 }
2754 else
2755 {
2756 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2757 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2758 }
2759 return true;
2760
2761 case OPT_mssse3:
2762 if (value)
2763 {
2764 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2765 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2766 }
2767 else
2768 {
2769 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2770 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2771 }
2772 return true;
2773
2774 case OPT_msse4_1:
2775 if (value)
2776 {
2777 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2778 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2779 }
2780 else
2781 {
2782 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2783 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2784 }
2785 return true;
2786
2787 case OPT_msse4_2:
2788 if (value)
2789 {
2790 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2791 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2792 }
2793 else
2794 {
2795 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2796 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2797 }
2798 return true;
2799
2800 case OPT_mavx:
2801 if (value)
2802 {
2803 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2804 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2805 }
2806 else
2807 {
2808 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2809 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2810 }
2811 return true;
2812
2813 case OPT_mfma:
2814 if (value)
2815 {
2816 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2817 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2818 }
2819 else
2820 {
2821 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2822 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2823 }
2824 return true;
2825
2826 case OPT_msse4:
2827 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2828 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2829 return true;
2830
2831 case OPT_mno_sse4:
2832 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2833 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2834 return true;
2835
2836 case OPT_msse4a:
2837 if (value)
2838 {
2839 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2840 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2841 }
2842 else
2843 {
2844 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2845 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2846 }
2847 return true;
2848
2849 case OPT_mfma4:
2850 if (value)
2851 {
2852 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2853 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2854 }
2855 else
2856 {
2857 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2858 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2859 }
2860 return true;
2861
2862 case OPT_mxop:
2863 if (value)
2864 {
2865 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2866 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2867 }
2868 else
2869 {
2870 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2871 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2872 }
2873 return true;
2874
2875 case OPT_mlwp:
2876 if (value)
2877 {
2878 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2879 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2880 }
2881 else
2882 {
2883 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2884 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2885 }
2886 return true;
2887
2888 case OPT_mabm:
2889 if (value)
2890 {
2891 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2892 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2893 }
2894 else
2895 {
2896 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2897 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2898 }
2899 return true;
2900
2901 case OPT_mbmi:
2902 if (value)
2903 {
2904 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_BMI_SET;
2905 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_SET;
2906 }
2907 else
2908 {
2909 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_BMI_UNSET;
2910 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_UNSET;
2911 }
2912 return true;
2913
2914 case OPT_mtbm:
2915 if (value)
2916 {
2917 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_TBM_SET;
2918 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_SET;
2919 }
2920 else
2921 {
2922 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_TBM_UNSET;
2923 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_UNSET;
2924 }
2925 return true;
2926
2927 case OPT_mpopcnt:
2928 if (value)
2929 {
2930 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2931 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2932 }
2933 else
2934 {
2935 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2936 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2937 }
2938 return true;
2939
2940 case OPT_msahf:
2941 if (value)
2942 {
2943 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2944 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2945 }
2946 else
2947 {
2948 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2949 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2950 }
2951 return true;
2952
2953 case OPT_mcx16:
2954 if (value)
2955 {
2956 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2957 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2958 }
2959 else
2960 {
2961 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2962 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2963 }
2964 return true;
2965
2966 case OPT_mmovbe:
2967 if (value)
2968 {
2969 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2970 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2971 }
2972 else
2973 {
2974 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2975 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2976 }
2977 return true;
2978
2979 case OPT_mcrc32:
2980 if (value)
2981 {
2982 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2983 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2984 }
2985 else
2986 {
2987 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2988 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2989 }
2990 return true;
2991
2992 case OPT_maes:
2993 if (value)
2994 {
2995 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2996 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2997 }
2998 else
2999 {
3000 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
3001 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
3002 }
3003 return true;
3004
3005 case OPT_mpclmul:
3006 if (value)
3007 {
3008 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
3009 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
3010 }
3011 else
3012 {
3013 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
3014 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
3015 }
3016 return true;
3017
3018 case OPT_mfsgsbase:
3019 if (value)
3020 {
3021 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE_SET;
3022 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_SET;
3023 }
3024 else
3025 {
3026 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FSGSBASE_UNSET;
3027 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_UNSET;
3028 }
3029 return true;
3030
3031 case OPT_mrdrnd:
3032 if (value)
3033 {
3034 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_RDRND_SET;
3035 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_SET;
3036 }
3037 else
3038 {
3039 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_RDRND_UNSET;
3040 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_UNSET;
3041 }
3042 return true;
3043
3044 case OPT_mf16c:
3045 if (value)
3046 {
3047 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_F16C_SET;
3048 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_SET;
3049 }
3050 else
3051 {
3052 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_F16C_UNSET;
3053 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_UNSET;
3054 }
3055 return true;
3056
3057 default:
3058 return true;
3059 }
3060 }
3061 \f
3062 /* Return a string that documents the current -m options. The caller is
3063 responsible for freeing the string. */
3064
3065 static char *
3066 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
3067 const char *fpmath, bool add_nl_p)
3068 {
3069 struct ix86_target_opts
3070 {
3071 const char *option; /* option string */
3072 int mask; /* isa mask options */
3073 };
3074
3075 /* This table is ordered so that options like -msse4.2 that imply
3076 preceding options while match those first. */
3077 static struct ix86_target_opts isa_opts[] =
3078 {
3079 { "-m64", OPTION_MASK_ISA_64BIT },
3080 { "-mfma4", OPTION_MASK_ISA_FMA4 },
3081 { "-mfma", OPTION_MASK_ISA_FMA },
3082 { "-mxop", OPTION_MASK_ISA_XOP },
3083 { "-mlwp", OPTION_MASK_ISA_LWP },
3084 { "-msse4a", OPTION_MASK_ISA_SSE4A },
3085 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
3086 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
3087 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
3088 { "-msse3", OPTION_MASK_ISA_SSE3 },
3089 { "-msse2", OPTION_MASK_ISA_SSE2 },
3090 { "-msse", OPTION_MASK_ISA_SSE },
3091 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
3092 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
3093 { "-mmmx", OPTION_MASK_ISA_MMX },
3094 { "-mabm", OPTION_MASK_ISA_ABM },
3095 { "-mbmi", OPTION_MASK_ISA_BMI },
3096 { "-mtbm", OPTION_MASK_ISA_TBM },
3097 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
3098 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
3099 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
3100 { "-maes", OPTION_MASK_ISA_AES },
3101 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
3102 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE },
3103 { "-mrdrnd", OPTION_MASK_ISA_RDRND },
3104 { "-mf16c", OPTION_MASK_ISA_F16C },
3105 };
3106
3107 /* Flag options. */
3108 static struct ix86_target_opts flag_opts[] =
3109 {
3110 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
3111 { "-m80387", MASK_80387 },
3112 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
3113 { "-malign-double", MASK_ALIGN_DOUBLE },
3114 { "-mcld", MASK_CLD },
3115 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
3116 { "-mieee-fp", MASK_IEEE_FP },
3117 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
3118 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
3119 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
3120 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
3121 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
3122 { "-mno-push-args", MASK_NO_PUSH_ARGS },
3123 { "-mno-red-zone", MASK_NO_RED_ZONE },
3124 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
3125 { "-mrecip", MASK_RECIP },
3126 { "-mrtd", MASK_RTD },
3127 { "-msseregparm", MASK_SSEREGPARM },
3128 { "-mstack-arg-probe", MASK_STACK_PROBE },
3129 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
3130 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS },
3131 { "-m8bit-idiv", MASK_USE_8BIT_IDIV },
3132 { "-mvzeroupper", MASK_VZEROUPPER },
3133 { "-mavx256-split-unaligned-load", MASK_AVX256_SPLIT_UNALIGNED_LOAD},
3134 { "-mavx256-split-unaligned-store", MASK_AVX256_SPLIT_UNALIGNED_STORE},
3135 };
3136
3137 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
3138
3139 char isa_other[40];
3140 char target_other[40];
3141 unsigned num = 0;
3142 unsigned i, j;
3143 char *ret;
3144 char *ptr;
3145 size_t len;
3146 size_t line_len;
3147 size_t sep_len;
3148
3149 memset (opts, '\0', sizeof (opts));
3150
3151 /* Add -march= option. */
3152 if (arch)
3153 {
3154 opts[num][0] = "-march=";
3155 opts[num++][1] = arch;
3156 }
3157
3158 /* Add -mtune= option. */
3159 if (tune)
3160 {
3161 opts[num][0] = "-mtune=";
3162 opts[num++][1] = tune;
3163 }
3164
3165 /* Pick out the options in isa options. */
3166 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
3167 {
3168 if ((isa & isa_opts[i].mask) != 0)
3169 {
3170 opts[num++][0] = isa_opts[i].option;
3171 isa &= ~ isa_opts[i].mask;
3172 }
3173 }
3174
3175 if (isa && add_nl_p)
3176 {
3177 opts[num++][0] = isa_other;
3178 sprintf (isa_other, "(other isa: %#x)", isa);
3179 }
3180
3181 /* Add flag options. */
3182 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
3183 {
3184 if ((flags & flag_opts[i].mask) != 0)
3185 {
3186 opts[num++][0] = flag_opts[i].option;
3187 flags &= ~ flag_opts[i].mask;
3188 }
3189 }
3190
3191 if (flags && add_nl_p)
3192 {
3193 opts[num++][0] = target_other;
3194 sprintf (target_other, "(other flags: %#x)", flags);
3195 }
3196
3197 /* Add -fpmath= option. */
3198 if (fpmath)
3199 {
3200 opts[num][0] = "-mfpmath=";
3201 opts[num++][1] = fpmath;
3202 }
3203
3204 /* Any options? */
3205 if (num == 0)
3206 return NULL;
3207
3208 gcc_assert (num < ARRAY_SIZE (opts));
3209
3210 /* Size the string. */
3211 len = 0;
3212 sep_len = (add_nl_p) ? 3 : 1;
3213 for (i = 0; i < num; i++)
3214 {
3215 len += sep_len;
3216 for (j = 0; j < 2; j++)
3217 if (opts[i][j])
3218 len += strlen (opts[i][j]);
3219 }
3220
3221 /* Build the string. */
3222 ret = ptr = (char *) xmalloc (len);
3223 line_len = 0;
3224
3225 for (i = 0; i < num; i++)
3226 {
3227 size_t len2[2];
3228
3229 for (j = 0; j < 2; j++)
3230 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
3231
3232 if (i != 0)
3233 {
3234 *ptr++ = ' ';
3235 line_len++;
3236
3237 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
3238 {
3239 *ptr++ = '\\';
3240 *ptr++ = '\n';
3241 line_len = 0;
3242 }
3243 }
3244
3245 for (j = 0; j < 2; j++)
3246 if (opts[i][j])
3247 {
3248 memcpy (ptr, opts[i][j], len2[j]);
3249 ptr += len2[j];
3250 line_len += len2[j];
3251 }
3252 }
3253
3254 *ptr = '\0';
3255 gcc_assert (ret + len >= ptr);
3256
3257 return ret;
3258 }
3259
3260 /* Return TRUE if software prefetching is beneficial for the
3261 given CPU. */
3262
3263 static bool
3264 software_prefetching_beneficial_p (void)
3265 {
3266 switch (ix86_tune)
3267 {
3268 case PROCESSOR_GEODE:
3269 case PROCESSOR_K6:
3270 case PROCESSOR_ATHLON:
3271 case PROCESSOR_K8:
3272 case PROCESSOR_AMDFAM10:
3273 case PROCESSOR_BTVER1:
3274 return true;
3275
3276 default:
3277 return false;
3278 }
3279 }
3280
3281 /* Return true, if profiling code should be emitted before
3282 prologue. Otherwise it returns false.
3283 Note: For x86 with "hotfix" it is sorried. */
3284 static bool
3285 ix86_profile_before_prologue (void)
3286 {
3287 return flag_fentry != 0;
3288 }
3289
3290 /* Function that is callable from the debugger to print the current
3291 options. */
3292 void
3293 ix86_debug_options (void)
3294 {
3295 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
3296 ix86_arch_string, ix86_tune_string,
3297 ix86_fpmath_string, true);
3298
3299 if (opts)
3300 {
3301 fprintf (stderr, "%s\n\n", opts);
3302 free (opts);
3303 }
3304 else
3305 fputs ("<no options>\n\n", stderr);
3306
3307 return;
3308 }
3309 \f
3310 /* Override various settings based on options. If MAIN_ARGS_P, the
3311 options are from the command line, otherwise they are from
3312 attributes. */
3313
3314 static void
3315 ix86_option_override_internal (bool main_args_p)
3316 {
3317 int i;
3318 unsigned int ix86_arch_mask, ix86_tune_mask;
3319 const bool ix86_tune_specified = (ix86_tune_string != NULL);
3320 const char *prefix;
3321 const char *suffix;
3322 const char *sw;
3323
3324 /* Comes from final.c -- no real reason to change it. */
3325 #define MAX_CODE_ALIGN 16
3326
3327 enum pta_flags
3328 {
3329 PTA_SSE = 1 << 0,
3330 PTA_SSE2 = 1 << 1,
3331 PTA_SSE3 = 1 << 2,
3332 PTA_MMX = 1 << 3,
3333 PTA_PREFETCH_SSE = 1 << 4,
3334 PTA_3DNOW = 1 << 5,
3335 PTA_3DNOW_A = 1 << 6,
3336 PTA_64BIT = 1 << 7,
3337 PTA_SSSE3 = 1 << 8,
3338 PTA_CX16 = 1 << 9,
3339 PTA_POPCNT = 1 << 10,
3340 PTA_ABM = 1 << 11,
3341 PTA_SSE4A = 1 << 12,
3342 PTA_NO_SAHF = 1 << 13,
3343 PTA_SSE4_1 = 1 << 14,
3344 PTA_SSE4_2 = 1 << 15,
3345 PTA_AES = 1 << 16,
3346 PTA_PCLMUL = 1 << 17,
3347 PTA_AVX = 1 << 18,
3348 PTA_FMA = 1 << 19,
3349 PTA_MOVBE = 1 << 20,
3350 PTA_FMA4 = 1 << 21,
3351 PTA_XOP = 1 << 22,
3352 PTA_LWP = 1 << 23,
3353 PTA_FSGSBASE = 1 << 24,
3354 PTA_RDRND = 1 << 25,
3355 PTA_F16C = 1 << 26,
3356 PTA_BMI = 1 << 27,
3357 PTA_TBM = 1 << 28
3358 /* if this reaches 32, need to widen struct pta flags below */
3359 };
3360
3361 static struct pta
3362 {
3363 const char *const name; /* processor name or nickname. */
3364 const enum processor_type processor;
3365 const enum attr_cpu schedule;
3366 const unsigned /*enum pta_flags*/ flags;
3367 }
3368 const processor_alias_table[] =
3369 {
3370 {"i386", PROCESSOR_I386, CPU_NONE, 0},
3371 {"i486", PROCESSOR_I486, CPU_NONE, 0},
3372 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3373 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3374 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
3375 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
3376 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3377 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3378 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
3379 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3380 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3381 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
3382 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3383 PTA_MMX | PTA_SSE},
3384 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3385 PTA_MMX | PTA_SSE},
3386 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3387 PTA_MMX | PTA_SSE | PTA_SSE2},
3388 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
3389 PTA_MMX |PTA_SSE | PTA_SSE2},
3390 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
3391 PTA_MMX | PTA_SSE | PTA_SSE2},
3392 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
3393 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
3394 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
3395 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3396 | PTA_CX16 | PTA_NO_SAHF},
3397 {"core2", PROCESSOR_CORE2_64, CPU_CORE2,
3398 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3399 | PTA_SSSE3 | PTA_CX16},
3400 {"corei7", PROCESSOR_COREI7_64, CPU_COREI7,
3401 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3402 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16},
3403 {"corei7-avx", PROCESSOR_COREI7_64, CPU_COREI7,
3404 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3405 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
3406 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL},
3407 {"atom", PROCESSOR_ATOM, CPU_ATOM,
3408 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3409 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
3410 {"geode", PROCESSOR_GEODE, CPU_GEODE,
3411 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
3412 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
3413 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3414 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3415 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
3416 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3417 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
3418 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3419 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
3420 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3421 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
3422 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3423 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
3424 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3425 {"x86-64", PROCESSOR_K8, CPU_K8,
3426 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
3427 {"k8", PROCESSOR_K8, CPU_K8,
3428 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3429 | PTA_SSE2 | PTA_NO_SAHF},
3430 {"k8-sse3", PROCESSOR_K8, CPU_K8,
3431 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3432 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3433 {"opteron", PROCESSOR_K8, CPU_K8,
3434 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3435 | PTA_SSE2 | PTA_NO_SAHF},
3436 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
3437 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3438 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3439 {"athlon64", PROCESSOR_K8, CPU_K8,
3440 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3441 | PTA_SSE2 | PTA_NO_SAHF},
3442 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
3443 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3444 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3445 {"athlon-fx", PROCESSOR_K8, CPU_K8,
3446 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3447 | PTA_SSE2 | PTA_NO_SAHF},
3448 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3449 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3450 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3451 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3452 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3453 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3454 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
3455 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3456 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
3457 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4
3458 | PTA_XOP | PTA_LWP},
3459 {"btver1", PROCESSOR_BTVER1, CPU_GENERIC64,
3460 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3461 | PTA_SSSE3 | PTA_SSE4A |PTA_ABM | PTA_CX16},
3462 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
3463 0 /* flags are only used for -march switch. */ },
3464 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
3465 PTA_64BIT /* flags are only used for -march switch. */ },
3466 };
3467
3468 int const pta_size = ARRAY_SIZE (processor_alias_table);
3469
3470 /* Set up prefix/suffix so the error messages refer to either the command
3471 line argument, or the attribute(target). */
3472 if (main_args_p)
3473 {
3474 prefix = "-m";
3475 suffix = "";
3476 sw = "switch";
3477 }
3478 else
3479 {
3480 prefix = "option(\"";
3481 suffix = "\")";
3482 sw = "attribute";
3483 }
3484
3485 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3486 SUBTARGET_OVERRIDE_OPTIONS;
3487 #endif
3488
3489 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3490 SUBSUBTARGET_OVERRIDE_OPTIONS;
3491 #endif
3492
3493 /* -fPIC is the default for x86_64. */
3494 if (TARGET_MACHO && TARGET_64BIT)
3495 flag_pic = 2;
3496
3497 /* Need to check -mtune=generic first. */
3498 if (ix86_tune_string)
3499 {
3500 if (!strcmp (ix86_tune_string, "generic")
3501 || !strcmp (ix86_tune_string, "i686")
3502 /* As special support for cross compilers we read -mtune=native
3503 as -mtune=generic. With native compilers we won't see the
3504 -mtune=native, as it was changed by the driver. */
3505 || !strcmp (ix86_tune_string, "native"))
3506 {
3507 if (TARGET_64BIT)
3508 ix86_tune_string = "generic64";
3509 else
3510 ix86_tune_string = "generic32";
3511 }
3512 /* If this call is for setting the option attribute, allow the
3513 generic32/generic64 that was previously set. */
3514 else if (!main_args_p
3515 && (!strcmp (ix86_tune_string, "generic32")
3516 || !strcmp (ix86_tune_string, "generic64")))
3517 ;
3518 else if (!strncmp (ix86_tune_string, "generic", 7))
3519 error ("bad value (%s) for %stune=%s %s",
3520 ix86_tune_string, prefix, suffix, sw);
3521 else if (!strcmp (ix86_tune_string, "x86-64"))
3522 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated; use "
3523 "%stune=k8%s or %stune=generic%s instead as appropriate",
3524 prefix, suffix, prefix, suffix, prefix, suffix);
3525 }
3526 else
3527 {
3528 if (ix86_arch_string)
3529 ix86_tune_string = ix86_arch_string;
3530 if (!ix86_tune_string)
3531 {
3532 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
3533 ix86_tune_defaulted = 1;
3534 }
3535
3536 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3537 need to use a sensible tune option. */
3538 if (!strcmp (ix86_tune_string, "generic")
3539 || !strcmp (ix86_tune_string, "x86-64")
3540 || !strcmp (ix86_tune_string, "i686"))
3541 {
3542 if (TARGET_64BIT)
3543 ix86_tune_string = "generic64";
3544 else
3545 ix86_tune_string = "generic32";
3546 }
3547 }
3548
3549 if (ix86_stringop_string)
3550 {
3551 if (!strcmp (ix86_stringop_string, "rep_byte"))
3552 stringop_alg = rep_prefix_1_byte;
3553 else if (!strcmp (ix86_stringop_string, "libcall"))
3554 stringop_alg = libcall;
3555 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
3556 stringop_alg = rep_prefix_4_byte;
3557 else if (!strcmp (ix86_stringop_string, "rep_8byte")
3558 && TARGET_64BIT)
3559 /* rep; movq isn't available in 32-bit code. */
3560 stringop_alg = rep_prefix_8_byte;
3561 else if (!strcmp (ix86_stringop_string, "byte_loop"))
3562 stringop_alg = loop_1_byte;
3563 else if (!strcmp (ix86_stringop_string, "loop"))
3564 stringop_alg = loop;
3565 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
3566 stringop_alg = unrolled_loop;
3567 else
3568 error ("bad value (%s) for %sstringop-strategy=%s %s",
3569 ix86_stringop_string, prefix, suffix, sw);
3570 }
3571
3572 if (!ix86_arch_string)
3573 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3574 else
3575 ix86_arch_specified = 1;
3576
3577 /* Validate -mabi= value. */
3578 if (ix86_abi_string)
3579 {
3580 if (strcmp (ix86_abi_string, "sysv") == 0)
3581 ix86_abi = SYSV_ABI;
3582 else if (strcmp (ix86_abi_string, "ms") == 0)
3583 ix86_abi = MS_ABI;
3584 else
3585 error ("unknown ABI (%s) for %sabi=%s %s",
3586 ix86_abi_string, prefix, suffix, sw);
3587 }
3588 else
3589 ix86_abi = DEFAULT_ABI;
3590
3591 if (ix86_cmodel_string != 0)
3592 {
3593 if (!strcmp (ix86_cmodel_string, "small"))
3594 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3595 else if (!strcmp (ix86_cmodel_string, "medium"))
3596 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3597 else if (!strcmp (ix86_cmodel_string, "large"))
3598 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3599 else if (flag_pic)
3600 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3601 else if (!strcmp (ix86_cmodel_string, "32"))
3602 ix86_cmodel = CM_32;
3603 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3604 ix86_cmodel = CM_KERNEL;
3605 else
3606 error ("bad value (%s) for %scmodel=%s %s",
3607 ix86_cmodel_string, prefix, suffix, sw);
3608 }
3609 else
3610 {
3611 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3612 use of rip-relative addressing. This eliminates fixups that
3613 would otherwise be needed if this object is to be placed in a
3614 DLL, and is essentially just as efficient as direct addressing. */
3615 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3616 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3617 else if (TARGET_64BIT)
3618 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3619 else
3620 ix86_cmodel = CM_32;
3621 }
3622 if (ix86_asm_string != 0)
3623 {
3624 if (! TARGET_MACHO
3625 && !strcmp (ix86_asm_string, "intel"))
3626 ix86_asm_dialect = ASM_INTEL;
3627 else if (!strcmp (ix86_asm_string, "att"))
3628 ix86_asm_dialect = ASM_ATT;
3629 else
3630 error ("bad value (%s) for %sasm=%s %s",
3631 ix86_asm_string, prefix, suffix, sw);
3632 }
3633 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3634 error ("code model %qs not supported in the %s bit mode",
3635 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3636 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3637 sorry ("%i-bit mode not compiled in",
3638 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3639
3640 for (i = 0; i < pta_size; i++)
3641 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3642 {
3643 ix86_schedule = processor_alias_table[i].schedule;
3644 ix86_arch = processor_alias_table[i].processor;
3645 /* Default cpu tuning to the architecture. */
3646 ix86_tune = ix86_arch;
3647
3648 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3649 error ("CPU you selected does not support x86-64 "
3650 "instruction set");
3651
3652 if (processor_alias_table[i].flags & PTA_MMX
3653 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3654 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3655 if (processor_alias_table[i].flags & PTA_3DNOW
3656 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3657 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3658 if (processor_alias_table[i].flags & PTA_3DNOW_A
3659 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3660 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3661 if (processor_alias_table[i].flags & PTA_SSE
3662 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3663 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3664 if (processor_alias_table[i].flags & PTA_SSE2
3665 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3666 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3667 if (processor_alias_table[i].flags & PTA_SSE3
3668 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3669 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3670 if (processor_alias_table[i].flags & PTA_SSSE3
3671 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3672 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3673 if (processor_alias_table[i].flags & PTA_SSE4_1
3674 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3675 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3676 if (processor_alias_table[i].flags & PTA_SSE4_2
3677 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3678 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3679 if (processor_alias_table[i].flags & PTA_AVX
3680 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3681 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3682 if (processor_alias_table[i].flags & PTA_FMA
3683 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3684 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3685 if (processor_alias_table[i].flags & PTA_SSE4A
3686 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3687 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3688 if (processor_alias_table[i].flags & PTA_FMA4
3689 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3690 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3691 if (processor_alias_table[i].flags & PTA_XOP
3692 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3693 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3694 if (processor_alias_table[i].flags & PTA_LWP
3695 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3696 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3697 if (processor_alias_table[i].flags & PTA_ABM
3698 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3699 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3700 if (processor_alias_table[i].flags & PTA_BMI
3701 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI))
3702 ix86_isa_flags |= OPTION_MASK_ISA_BMI;
3703 if (processor_alias_table[i].flags & PTA_TBM
3704 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_TBM))
3705 ix86_isa_flags |= OPTION_MASK_ISA_TBM;
3706 if (processor_alias_table[i].flags & PTA_CX16
3707 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3708 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3709 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3710 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3711 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3712 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3713 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3714 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3715 if (processor_alias_table[i].flags & PTA_MOVBE
3716 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3717 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3718 if (processor_alias_table[i].flags & PTA_AES
3719 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3720 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3721 if (processor_alias_table[i].flags & PTA_PCLMUL
3722 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3723 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3724 if (processor_alias_table[i].flags & PTA_FSGSBASE
3725 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
3726 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
3727 if (processor_alias_table[i].flags & PTA_RDRND
3728 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
3729 ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
3730 if (processor_alias_table[i].flags & PTA_F16C
3731 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
3732 ix86_isa_flags |= OPTION_MASK_ISA_F16C;
3733 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3734 x86_prefetch_sse = true;
3735
3736 break;
3737 }
3738
3739 if (!strcmp (ix86_arch_string, "generic"))
3740 error ("generic CPU can be used only for %stune=%s %s",
3741 prefix, suffix, sw);
3742 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3743 error ("bad value (%s) for %sarch=%s %s",
3744 ix86_arch_string, prefix, suffix, sw);
3745
3746 ix86_arch_mask = 1u << ix86_arch;
3747 for (i = 0; i < X86_ARCH_LAST; ++i)
3748 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3749
3750 for (i = 0; i < pta_size; i++)
3751 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3752 {
3753 ix86_schedule = processor_alias_table[i].schedule;
3754 ix86_tune = processor_alias_table[i].processor;
3755 if (TARGET_64BIT)
3756 {
3757 if (!(processor_alias_table[i].flags & PTA_64BIT))
3758 {
3759 if (ix86_tune_defaulted)
3760 {
3761 ix86_tune_string = "x86-64";
3762 for (i = 0; i < pta_size; i++)
3763 if (! strcmp (ix86_tune_string,
3764 processor_alias_table[i].name))
3765 break;
3766 ix86_schedule = processor_alias_table[i].schedule;
3767 ix86_tune = processor_alias_table[i].processor;
3768 }
3769 else
3770 error ("CPU you selected does not support x86-64 "
3771 "instruction set");
3772 }
3773 }
3774 else
3775 {
3776 /* Adjust tuning when compiling for 32-bit ABI. */
3777 switch (ix86_tune)
3778 {
3779 case PROCESSOR_GENERIC64:
3780 ix86_tune = PROCESSOR_GENERIC32;
3781 ix86_schedule = CPU_PENTIUMPRO;
3782 break;
3783
3784 case PROCESSOR_CORE2_64:
3785 ix86_tune = PROCESSOR_CORE2_32;
3786 break;
3787
3788 case PROCESSOR_COREI7_64:
3789 ix86_tune = PROCESSOR_COREI7_32;
3790 break;
3791
3792 default:
3793 break;
3794 }
3795 }
3796 /* Intel CPUs have always interpreted SSE prefetch instructions as
3797 NOPs; so, we can enable SSE prefetch instructions even when
3798 -mtune (rather than -march) points us to a processor that has them.
3799 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3800 higher processors. */
3801 if (TARGET_CMOVE
3802 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3803 x86_prefetch_sse = true;
3804 break;
3805 }
3806
3807 if (ix86_tune_specified && i == pta_size)
3808 error ("bad value (%s) for %stune=%s %s",
3809 ix86_tune_string, prefix, suffix, sw);
3810
3811 ix86_tune_mask = 1u << ix86_tune;
3812 for (i = 0; i < X86_TUNE_LAST; ++i)
3813 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3814
3815 #ifndef USE_IX86_FRAME_POINTER
3816 #define USE_IX86_FRAME_POINTER 0
3817 #endif
3818
3819 #ifndef USE_X86_64_FRAME_POINTER
3820 #define USE_X86_64_FRAME_POINTER 0
3821 #endif
3822
3823 /* Set the default values for switches whose default depends on TARGET_64BIT
3824 in case they weren't overwritten by command line options. */
3825 if (TARGET_64BIT)
3826 {
3827 if (optimize > 1 && !global_options_set.x_flag_zee)
3828 flag_zee = 1;
3829 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3830 flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
3831 if (flag_asynchronous_unwind_tables == 2)
3832 flag_unwind_tables = flag_asynchronous_unwind_tables = 1;
3833 if (flag_pcc_struct_return == 2)
3834 flag_pcc_struct_return = 0;
3835 }
3836 else
3837 {
3838 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3839 flag_omit_frame_pointer = !(USE_IX86_FRAME_POINTER || optimize_size);
3840 if (flag_asynchronous_unwind_tables == 2)
3841 flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
3842 if (flag_pcc_struct_return == 2)
3843 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
3844 }
3845
3846 if (optimize_size)
3847 ix86_cost = &ix86_size_cost;
3848 else
3849 ix86_cost = processor_target_table[ix86_tune].cost;
3850
3851 /* Arrange to set up i386_stack_locals for all functions. */
3852 init_machine_status = ix86_init_machine_status;
3853
3854 /* Validate -mregparm= value. */
3855 if (ix86_regparm_string)
3856 {
3857 if (TARGET_64BIT)
3858 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3859 i = atoi (ix86_regparm_string);
3860 if (i < 0 || i > REGPARM_MAX)
3861 error ("%sregparm=%d%s is not between 0 and %d",
3862 prefix, i, suffix, REGPARM_MAX);
3863 else
3864 ix86_regparm = i;
3865 }
3866 if (TARGET_64BIT)
3867 ix86_regparm = REGPARM_MAX;
3868
3869 /* If the user has provided any of the -malign-* options,
3870 warn and use that value only if -falign-* is not set.
3871 Remove this code in GCC 3.2 or later. */
3872 if (ix86_align_loops_string)
3873 {
3874 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3875 prefix, suffix, suffix);
3876 if (align_loops == 0)
3877 {
3878 i = atoi (ix86_align_loops_string);
3879 if (i < 0 || i > MAX_CODE_ALIGN)
3880 error ("%salign-loops=%d%s is not between 0 and %d",
3881 prefix, i, suffix, MAX_CODE_ALIGN);
3882 else
3883 align_loops = 1 << i;
3884 }
3885 }
3886
3887 if (ix86_align_jumps_string)
3888 {
3889 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3890 prefix, suffix, suffix);
3891 if (align_jumps == 0)
3892 {
3893 i = atoi (ix86_align_jumps_string);
3894 if (i < 0 || i > MAX_CODE_ALIGN)
3895 error ("%salign-loops=%d%s is not between 0 and %d",
3896 prefix, i, suffix, MAX_CODE_ALIGN);
3897 else
3898 align_jumps = 1 << i;
3899 }
3900 }
3901
3902 if (ix86_align_funcs_string)
3903 {
3904 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3905 prefix, suffix, suffix);
3906 if (align_functions == 0)
3907 {
3908 i = atoi (ix86_align_funcs_string);
3909 if (i < 0 || i > MAX_CODE_ALIGN)
3910 error ("%salign-loops=%d%s is not between 0 and %d",
3911 prefix, i, suffix, MAX_CODE_ALIGN);
3912 else
3913 align_functions = 1 << i;
3914 }
3915 }
3916
3917 /* Default align_* from the processor table. */
3918 if (align_loops == 0)
3919 {
3920 align_loops = processor_target_table[ix86_tune].align_loop;
3921 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3922 }
3923 if (align_jumps == 0)
3924 {
3925 align_jumps = processor_target_table[ix86_tune].align_jump;
3926 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3927 }
3928 if (align_functions == 0)
3929 {
3930 align_functions = processor_target_table[ix86_tune].align_func;
3931 }
3932
3933 /* Validate -mbranch-cost= value, or provide default. */
3934 ix86_branch_cost = ix86_cost->branch_cost;
3935 if (ix86_branch_cost_string)
3936 {
3937 i = atoi (ix86_branch_cost_string);
3938 if (i < 0 || i > 5)
3939 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3940 else
3941 ix86_branch_cost = i;
3942 }
3943 if (ix86_section_threshold_string)
3944 {
3945 i = atoi (ix86_section_threshold_string);
3946 if (i < 0)
3947 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3948 else
3949 ix86_section_threshold = i;
3950 }
3951
3952 if (ix86_tls_dialect_string)
3953 {
3954 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3955 ix86_tls_dialect = TLS_DIALECT_GNU;
3956 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3957 ix86_tls_dialect = TLS_DIALECT_GNU2;
3958 else
3959 error ("bad value (%s) for %stls-dialect=%s %s",
3960 ix86_tls_dialect_string, prefix, suffix, sw);
3961 }
3962
3963 if (ix87_precision_string)
3964 {
3965 i = atoi (ix87_precision_string);
3966 if (i != 32 && i != 64 && i != 80)
3967 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3968 }
3969
3970 if (TARGET_64BIT)
3971 {
3972 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3973
3974 /* Enable by default the SSE and MMX builtins. Do allow the user to
3975 explicitly disable any of these. In particular, disabling SSE and
3976 MMX for kernel code is extremely useful. */
3977 if (!ix86_arch_specified)
3978 ix86_isa_flags
3979 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3980 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3981
3982 if (TARGET_RTD)
3983 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3984 }
3985 else
3986 {
3987 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3988
3989 if (!ix86_arch_specified)
3990 ix86_isa_flags
3991 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3992
3993 /* i386 ABI does not specify red zone. It still makes sense to use it
3994 when programmer takes care to stack from being destroyed. */
3995 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3996 target_flags |= MASK_NO_RED_ZONE;
3997 }
3998
3999 /* Keep nonleaf frame pointers. */
4000 if (flag_omit_frame_pointer)
4001 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
4002 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
4003 flag_omit_frame_pointer = 1;
4004
4005 /* If we're doing fast math, we don't care about comparison order
4006 wrt NaNs. This lets us use a shorter comparison sequence. */
4007 if (flag_finite_math_only)
4008 target_flags &= ~MASK_IEEE_FP;
4009
4010 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
4011 since the insns won't need emulation. */
4012 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
4013 target_flags &= ~MASK_NO_FANCY_MATH_387;
4014
4015 /* Likewise, if the target doesn't have a 387, or we've specified
4016 software floating point, don't use 387 inline intrinsics. */
4017 if (!TARGET_80387)
4018 target_flags |= MASK_NO_FANCY_MATH_387;
4019
4020 /* Turn on MMX builtins for -msse. */
4021 if (TARGET_SSE)
4022 {
4023 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
4024 x86_prefetch_sse = true;
4025 }
4026
4027 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
4028 if (TARGET_SSE4_2 || TARGET_ABM)
4029 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
4030
4031 /* Validate -mpreferred-stack-boundary= value or default it to
4032 PREFERRED_STACK_BOUNDARY_DEFAULT. */
4033 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
4034 if (ix86_preferred_stack_boundary_string)
4035 {
4036 int min = (TARGET_64BIT ? 4 : 2);
4037 int max = (TARGET_SEH ? 4 : 12);
4038
4039 i = atoi (ix86_preferred_stack_boundary_string);
4040 if (i < min || i > max)
4041 {
4042 if (min == max)
4043 error ("%spreferred-stack-boundary%s is not supported "
4044 "for this target", prefix, suffix);
4045 else
4046 error ("%spreferred-stack-boundary=%d%s is not between %d and %d",
4047 prefix, i, suffix, min, max);
4048 }
4049 else
4050 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
4051 }
4052
4053 /* Set the default value for -mstackrealign. */
4054 if (ix86_force_align_arg_pointer == -1)
4055 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
4056
4057 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
4058
4059 /* Validate -mincoming-stack-boundary= value or default it to
4060 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
4061 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
4062 if (ix86_incoming_stack_boundary_string)
4063 {
4064 i = atoi (ix86_incoming_stack_boundary_string);
4065 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
4066 error ("-mincoming-stack-boundary=%d is not between %d and 12",
4067 i, TARGET_64BIT ? 4 : 2);
4068 else
4069 {
4070 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
4071 ix86_incoming_stack_boundary
4072 = ix86_user_incoming_stack_boundary;
4073 }
4074 }
4075
4076 /* Accept -msseregparm only if at least SSE support is enabled. */
4077 if (TARGET_SSEREGPARM
4078 && ! TARGET_SSE)
4079 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
4080
4081 ix86_fpmath = TARGET_FPMATH_DEFAULT;
4082 if (ix86_fpmath_string != 0)
4083 {
4084 if (! strcmp (ix86_fpmath_string, "387"))
4085 ix86_fpmath = FPMATH_387;
4086 else if (! strcmp (ix86_fpmath_string, "sse"))
4087 {
4088 if (!TARGET_SSE)
4089 {
4090 warning (0, "SSE instruction set disabled, using 387 arithmetics");
4091 ix86_fpmath = FPMATH_387;
4092 }
4093 else
4094 ix86_fpmath = FPMATH_SSE;
4095 }
4096 else if (! strcmp (ix86_fpmath_string, "387,sse")
4097 || ! strcmp (ix86_fpmath_string, "387+sse")
4098 || ! strcmp (ix86_fpmath_string, "sse,387")
4099 || ! strcmp (ix86_fpmath_string, "sse+387")
4100 || ! strcmp (ix86_fpmath_string, "both"))
4101 {
4102 if (!TARGET_SSE)
4103 {
4104 warning (0, "SSE instruction set disabled, using 387 arithmetics");
4105 ix86_fpmath = FPMATH_387;
4106 }
4107 else if (!TARGET_80387)
4108 {
4109 warning (0, "387 instruction set disabled, using SSE arithmetics");
4110 ix86_fpmath = FPMATH_SSE;
4111 }
4112 else
4113 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
4114 }
4115 else
4116 error ("bad value (%s) for %sfpmath=%s %s",
4117 ix86_fpmath_string, prefix, suffix, sw);
4118 }
4119
4120 /* If the i387 is disabled, then do not return values in it. */
4121 if (!TARGET_80387)
4122 target_flags &= ~MASK_FLOAT_RETURNS;
4123
4124 /* Use external vectorized library in vectorizing intrinsics. */
4125 if (ix86_veclibabi_string)
4126 {
4127 if (strcmp (ix86_veclibabi_string, "svml") == 0)
4128 ix86_veclib_handler = ix86_veclibabi_svml;
4129 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
4130 ix86_veclib_handler = ix86_veclibabi_acml;
4131 else
4132 error ("unknown vectorization library ABI type (%s) for "
4133 "%sveclibabi=%s %s", ix86_veclibabi_string,
4134 prefix, suffix, sw);
4135 }
4136
4137 if ((!USE_IX86_FRAME_POINTER
4138 || (x86_accumulate_outgoing_args & ix86_tune_mask))
4139 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4140 && !optimize_size)
4141 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4142
4143 /* ??? Unwind info is not correct around the CFG unless either a frame
4144 pointer is present or M_A_O_A is set. Fixing this requires rewriting
4145 unwind info generation to be aware of the CFG and propagating states
4146 around edges. */
4147 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
4148 || flag_exceptions || flag_non_call_exceptions)
4149 && flag_omit_frame_pointer
4150 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
4151 {
4152 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4153 warning (0, "unwind tables currently require either a frame pointer "
4154 "or %saccumulate-outgoing-args%s for correctness",
4155 prefix, suffix);
4156 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4157 }
4158
4159 /* If stack probes are required, the space used for large function
4160 arguments on the stack must also be probed, so enable
4161 -maccumulate-outgoing-args so this happens in the prologue. */
4162 if (TARGET_STACK_PROBE
4163 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
4164 {
4165 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4166 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
4167 "for correctness", prefix, suffix);
4168 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4169 }
4170
4171 /* For sane SSE instruction set generation we need fcomi instruction.
4172 It is safe to enable all CMOVE instructions. */
4173 if (TARGET_SSE)
4174 TARGET_CMOVE = 1;
4175
4176 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
4177 {
4178 char *p;
4179 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
4180 p = strchr (internal_label_prefix, 'X');
4181 internal_label_prefix_len = p - internal_label_prefix;
4182 *p = '\0';
4183 }
4184
4185 /* When scheduling description is not available, disable scheduler pass
4186 so it won't slow down the compilation and make x87 code slower. */
4187 if (!TARGET_SCHEDULE)
4188 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
4189
4190 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4191 ix86_cost->simultaneous_prefetches,
4192 global_options.x_param_values,
4193 global_options_set.x_param_values);
4194 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, ix86_cost->prefetch_block,
4195 global_options.x_param_values,
4196 global_options_set.x_param_values);
4197 maybe_set_param_value (PARAM_L1_CACHE_SIZE, ix86_cost->l1_cache_size,
4198 global_options.x_param_values,
4199 global_options_set.x_param_values);
4200 maybe_set_param_value (PARAM_L2_CACHE_SIZE, ix86_cost->l2_cache_size,
4201 global_options.x_param_values,
4202 global_options_set.x_param_values);
4203
4204 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
4205 if (flag_prefetch_loop_arrays < 0
4206 && HAVE_prefetch
4207 && optimize >= 3
4208 && software_prefetching_beneficial_p ())
4209 flag_prefetch_loop_arrays = 1;
4210
4211 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
4212 can be optimized to ap = __builtin_next_arg (0). */
4213 if (!TARGET_64BIT && !flag_split_stack)
4214 targetm.expand_builtin_va_start = NULL;
4215
4216 if (TARGET_64BIT)
4217 {
4218 ix86_gen_leave = gen_leave_rex64;
4219 ix86_gen_add3 = gen_adddi3;
4220 ix86_gen_sub3 = gen_subdi3;
4221 ix86_gen_sub3_carry = gen_subdi3_carry;
4222 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
4223 ix86_gen_monitor = gen_sse3_monitor64;
4224 ix86_gen_andsp = gen_anddi3;
4225 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
4226 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
4227 ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
4228 }
4229 else
4230 {
4231 ix86_gen_leave = gen_leave;
4232 ix86_gen_add3 = gen_addsi3;
4233 ix86_gen_sub3 = gen_subsi3;
4234 ix86_gen_sub3_carry = gen_subsi3_carry;
4235 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
4236 ix86_gen_monitor = gen_sse3_monitor;
4237 ix86_gen_andsp = gen_andsi3;
4238 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
4239 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
4240 ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
4241 }
4242
4243 #ifdef USE_IX86_CLD
4244 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
4245 if (!TARGET_64BIT)
4246 target_flags |= MASK_CLD & ~target_flags_explicit;
4247 #endif
4248
4249 if (!TARGET_64BIT && flag_pic)
4250 {
4251 if (flag_fentry > 0)
4252 sorry ("-mfentry isn%'t supported for 32-bit in combination "
4253 "with -fpic");
4254 flag_fentry = 0;
4255 }
4256 else if (TARGET_SEH)
4257 {
4258 if (flag_fentry == 0)
4259 sorry ("-mno-fentry isn%'t compatible with SEH");
4260 flag_fentry = 1;
4261 }
4262 else if (flag_fentry < 0)
4263 {
4264 #if defined(PROFILE_BEFORE_PROLOGUE)
4265 flag_fentry = 1;
4266 #else
4267 flag_fentry = 0;
4268 #endif
4269 }
4270
4271 /* Save the initial options in case the user does function specific options */
4272 if (main_args_p)
4273 target_option_default_node = target_option_current_node
4274 = build_target_option_node ();
4275
4276 if (TARGET_AVX)
4277 {
4278 /* When not optimize for size, enable vzeroupper optimization for
4279 TARGET_AVX with -fexpensive-optimizations and split 32-byte
4280 AVX unaligned load/store. */
4281 if (!optimize_size)
4282 {
4283 if (flag_expensive_optimizations
4284 && !(target_flags_explicit & MASK_VZEROUPPER))
4285 target_flags |= MASK_VZEROUPPER;
4286 if (!(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD))
4287 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD;
4288 if (!(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE))
4289 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE;
4290 }
4291 }
4292 else
4293 {
4294 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
4295 target_flags &= ~MASK_VZEROUPPER;
4296 }
4297 }
4298
4299 /* Return TRUE if VAL is passed in register with 256bit AVX modes. */
4300
4301 static bool
4302 function_pass_avx256_p (const_rtx val)
4303 {
4304 if (!val)
4305 return false;
4306
4307 if (REG_P (val) && VALID_AVX256_REG_MODE (GET_MODE (val)))
4308 return true;
4309
4310 if (GET_CODE (val) == PARALLEL)
4311 {
4312 int i;
4313 rtx r;
4314
4315 for (i = XVECLEN (val, 0) - 1; i >= 0; i--)
4316 {
4317 r = XVECEXP (val, 0, i);
4318 if (GET_CODE (r) == EXPR_LIST
4319 && XEXP (r, 0)
4320 && REG_P (XEXP (r, 0))
4321 && (GET_MODE (XEXP (r, 0)) == OImode
4322 || VALID_AVX256_REG_MODE (GET_MODE (XEXP (r, 0)))))
4323 return true;
4324 }
4325 }
4326
4327 return false;
4328 }
4329
4330 /* Implement the TARGET_OPTION_OVERRIDE hook. */
4331
4332 static void
4333 ix86_option_override (void)
4334 {
4335 ix86_option_override_internal (true);
4336 }
4337
4338 /* Update register usage after having seen the compiler flags. */
4339
4340 static void
4341 ix86_conditional_register_usage (void)
4342 {
4343 int i;
4344 unsigned int j;
4345
4346 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4347 {
4348 if (fixed_regs[i] > 1)
4349 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
4350 if (call_used_regs[i] > 1)
4351 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
4352 }
4353
4354 /* The PIC register, if it exists, is fixed. */
4355 j = PIC_OFFSET_TABLE_REGNUM;
4356 if (j != INVALID_REGNUM)
4357 fixed_regs[j] = call_used_regs[j] = 1;
4358
4359 /* The 64-bit MS_ABI changes the set of call-used registers. */
4360 if (TARGET_64BIT_MS_ABI)
4361 {
4362 call_used_regs[SI_REG] = 0;
4363 call_used_regs[DI_REG] = 0;
4364 call_used_regs[XMM6_REG] = 0;
4365 call_used_regs[XMM7_REG] = 0;
4366 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4367 call_used_regs[i] = 0;
4368 }
4369
4370 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
4371 other call-clobbered regs for 64-bit. */
4372 if (TARGET_64BIT)
4373 {
4374 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
4375
4376 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4377 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
4378 && call_used_regs[i])
4379 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
4380 }
4381
4382 /* If MMX is disabled, squash the registers. */
4383 if (! TARGET_MMX)
4384 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4385 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
4386 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4387
4388 /* If SSE is disabled, squash the registers. */
4389 if (! TARGET_SSE)
4390 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4391 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
4392 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4393
4394 /* If the FPU is disabled, squash the registers. */
4395 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
4396 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4397 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
4398 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4399
4400 /* If 32-bit, squash the 64-bit registers. */
4401 if (! TARGET_64BIT)
4402 {
4403 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
4404 reg_names[i] = "";
4405 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4406 reg_names[i] = "";
4407 }
4408 }
4409
4410 \f
4411 /* Save the current options */
4412
4413 static void
4414 ix86_function_specific_save (struct cl_target_option *ptr)
4415 {
4416 ptr->arch = ix86_arch;
4417 ptr->schedule = ix86_schedule;
4418 ptr->tune = ix86_tune;
4419 ptr->fpmath = ix86_fpmath;
4420 ptr->branch_cost = ix86_branch_cost;
4421 ptr->tune_defaulted = ix86_tune_defaulted;
4422 ptr->arch_specified = ix86_arch_specified;
4423 ptr->x_ix86_isa_flags_explicit = ix86_isa_flags_explicit;
4424 ptr->ix86_target_flags_explicit = target_flags_explicit;
4425
4426 /* The fields are char but the variables are not; make sure the
4427 values fit in the fields. */
4428 gcc_assert (ptr->arch == ix86_arch);
4429 gcc_assert (ptr->schedule == ix86_schedule);
4430 gcc_assert (ptr->tune == ix86_tune);
4431 gcc_assert (ptr->fpmath == ix86_fpmath);
4432 gcc_assert (ptr->branch_cost == ix86_branch_cost);
4433 }
4434
4435 /* Restore the current options */
4436
4437 static void
4438 ix86_function_specific_restore (struct cl_target_option *ptr)
4439 {
4440 enum processor_type old_tune = ix86_tune;
4441 enum processor_type old_arch = ix86_arch;
4442 unsigned int ix86_arch_mask, ix86_tune_mask;
4443 int i;
4444
4445 ix86_arch = (enum processor_type) ptr->arch;
4446 ix86_schedule = (enum attr_cpu) ptr->schedule;
4447 ix86_tune = (enum processor_type) ptr->tune;
4448 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
4449 ix86_branch_cost = ptr->branch_cost;
4450 ix86_tune_defaulted = ptr->tune_defaulted;
4451 ix86_arch_specified = ptr->arch_specified;
4452 ix86_isa_flags_explicit = ptr->x_ix86_isa_flags_explicit;
4453 target_flags_explicit = ptr->ix86_target_flags_explicit;
4454
4455 /* Recreate the arch feature tests if the arch changed */
4456 if (old_arch != ix86_arch)
4457 {
4458 ix86_arch_mask = 1u << ix86_arch;
4459 for (i = 0; i < X86_ARCH_LAST; ++i)
4460 ix86_arch_features[i]
4461 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
4462 }
4463
4464 /* Recreate the tune optimization tests */
4465 if (old_tune != ix86_tune)
4466 {
4467 ix86_tune_mask = 1u << ix86_tune;
4468 for (i = 0; i < X86_TUNE_LAST; ++i)
4469 ix86_tune_features[i]
4470 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
4471 }
4472 }
4473
4474 /* Print the current options */
4475
4476 static void
4477 ix86_function_specific_print (FILE *file, int indent,
4478 struct cl_target_option *ptr)
4479 {
4480 char *target_string
4481 = ix86_target_string (ptr->x_ix86_isa_flags, ptr->x_target_flags,
4482 NULL, NULL, NULL, false);
4483
4484 fprintf (file, "%*sarch = %d (%s)\n",
4485 indent, "",
4486 ptr->arch,
4487 ((ptr->arch < TARGET_CPU_DEFAULT_max)
4488 ? cpu_names[ptr->arch]
4489 : "<unknown>"));
4490
4491 fprintf (file, "%*stune = %d (%s)\n",
4492 indent, "",
4493 ptr->tune,
4494 ((ptr->tune < TARGET_CPU_DEFAULT_max)
4495 ? cpu_names[ptr->tune]
4496 : "<unknown>"));
4497
4498 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
4499 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
4500 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
4501 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
4502
4503 if (target_string)
4504 {
4505 fprintf (file, "%*s%s\n", indent, "", target_string);
4506 free (target_string);
4507 }
4508 }
4509
4510 \f
4511 /* Inner function to process the attribute((target(...))), take an argument and
4512 set the current options from the argument. If we have a list, recursively go
4513 over the list. */
4514
4515 static bool
4516 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
4517 {
4518 char *next_optstr;
4519 bool ret = true;
4520
4521 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4522 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4523 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4524 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4525
4526 enum ix86_opt_type
4527 {
4528 ix86_opt_unknown,
4529 ix86_opt_yes,
4530 ix86_opt_no,
4531 ix86_opt_str,
4532 ix86_opt_isa
4533 };
4534
4535 static const struct
4536 {
4537 const char *string;
4538 size_t len;
4539 enum ix86_opt_type type;
4540 int opt;
4541 int mask;
4542 } attrs[] = {
4543 /* isa options */
4544 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
4545 IX86_ATTR_ISA ("abm", OPT_mabm),
4546 IX86_ATTR_ISA ("bmi", OPT_mbmi),
4547 IX86_ATTR_ISA ("tbm", OPT_mtbm),
4548 IX86_ATTR_ISA ("aes", OPT_maes),
4549 IX86_ATTR_ISA ("avx", OPT_mavx),
4550 IX86_ATTR_ISA ("mmx", OPT_mmmx),
4551 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
4552 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
4553 IX86_ATTR_ISA ("sse", OPT_msse),
4554 IX86_ATTR_ISA ("sse2", OPT_msse2),
4555 IX86_ATTR_ISA ("sse3", OPT_msse3),
4556 IX86_ATTR_ISA ("sse4", OPT_msse4),
4557 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
4558 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
4559 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
4560 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
4561 IX86_ATTR_ISA ("fma4", OPT_mfma4),
4562 IX86_ATTR_ISA ("xop", OPT_mxop),
4563 IX86_ATTR_ISA ("lwp", OPT_mlwp),
4564 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase),
4565 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
4566 IX86_ATTR_ISA ("f16c", OPT_mf16c),
4567
4568 /* string options */
4569 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
4570 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
4571 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
4572
4573 /* flag options */
4574 IX86_ATTR_YES ("cld",
4575 OPT_mcld,
4576 MASK_CLD),
4577
4578 IX86_ATTR_NO ("fancy-math-387",
4579 OPT_mfancy_math_387,
4580 MASK_NO_FANCY_MATH_387),
4581
4582 IX86_ATTR_YES ("ieee-fp",
4583 OPT_mieee_fp,
4584 MASK_IEEE_FP),
4585
4586 IX86_ATTR_YES ("inline-all-stringops",
4587 OPT_minline_all_stringops,
4588 MASK_INLINE_ALL_STRINGOPS),
4589
4590 IX86_ATTR_YES ("inline-stringops-dynamically",
4591 OPT_minline_stringops_dynamically,
4592 MASK_INLINE_STRINGOPS_DYNAMICALLY),
4593
4594 IX86_ATTR_NO ("align-stringops",
4595 OPT_mno_align_stringops,
4596 MASK_NO_ALIGN_STRINGOPS),
4597
4598 IX86_ATTR_YES ("recip",
4599 OPT_mrecip,
4600 MASK_RECIP),
4601
4602 };
4603
4604 /* If this is a list, recurse to get the options. */
4605 if (TREE_CODE (args) == TREE_LIST)
4606 {
4607 bool ret = true;
4608
4609 for (; args; args = TREE_CHAIN (args))
4610 if (TREE_VALUE (args)
4611 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
4612 ret = false;
4613
4614 return ret;
4615 }
4616
4617 else if (TREE_CODE (args) != STRING_CST)
4618 gcc_unreachable ();
4619
4620 /* Handle multiple arguments separated by commas. */
4621 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
4622
4623 while (next_optstr && *next_optstr != '\0')
4624 {
4625 char *p = next_optstr;
4626 char *orig_p = p;
4627 char *comma = strchr (next_optstr, ',');
4628 const char *opt_string;
4629 size_t len, opt_len;
4630 int opt;
4631 bool opt_set_p;
4632 char ch;
4633 unsigned i;
4634 enum ix86_opt_type type = ix86_opt_unknown;
4635 int mask = 0;
4636
4637 if (comma)
4638 {
4639 *comma = '\0';
4640 len = comma - next_optstr;
4641 next_optstr = comma + 1;
4642 }
4643 else
4644 {
4645 len = strlen (p);
4646 next_optstr = NULL;
4647 }
4648
4649 /* Recognize no-xxx. */
4650 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
4651 {
4652 opt_set_p = false;
4653 p += 3;
4654 len -= 3;
4655 }
4656 else
4657 opt_set_p = true;
4658
4659 /* Find the option. */
4660 ch = *p;
4661 opt = N_OPTS;
4662 for (i = 0; i < ARRAY_SIZE (attrs); i++)
4663 {
4664 type = attrs[i].type;
4665 opt_len = attrs[i].len;
4666 if (ch == attrs[i].string[0]
4667 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
4668 && memcmp (p, attrs[i].string, opt_len) == 0)
4669 {
4670 opt = attrs[i].opt;
4671 mask = attrs[i].mask;
4672 opt_string = attrs[i].string;
4673 break;
4674 }
4675 }
4676
4677 /* Process the option. */
4678 if (opt == N_OPTS)
4679 {
4680 error ("attribute(target(\"%s\")) is unknown", orig_p);
4681 ret = false;
4682 }
4683
4684 else if (type == ix86_opt_isa)
4685 {
4686 struct cl_decoded_option decoded;
4687
4688 generate_option (opt, NULL, opt_set_p, CL_TARGET, &decoded);
4689 ix86_handle_option (&global_options, &global_options_set,
4690 &decoded, input_location);
4691 }
4692
4693 else if (type == ix86_opt_yes || type == ix86_opt_no)
4694 {
4695 if (type == ix86_opt_no)
4696 opt_set_p = !opt_set_p;
4697
4698 if (opt_set_p)
4699 target_flags |= mask;
4700 else
4701 target_flags &= ~mask;
4702 }
4703
4704 else if (type == ix86_opt_str)
4705 {
4706 if (p_strings[opt])
4707 {
4708 error ("option(\"%s\") was already specified", opt_string);
4709 ret = false;
4710 }
4711 else
4712 p_strings[opt] = xstrdup (p + opt_len);
4713 }
4714
4715 else
4716 gcc_unreachable ();
4717 }
4718
4719 return ret;
4720 }
4721
4722 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4723
4724 tree
4725 ix86_valid_target_attribute_tree (tree args)
4726 {
4727 const char *orig_arch_string = ix86_arch_string;
4728 const char *orig_tune_string = ix86_tune_string;
4729 const char *orig_fpmath_string = ix86_fpmath_string;
4730 int orig_tune_defaulted = ix86_tune_defaulted;
4731 int orig_arch_specified = ix86_arch_specified;
4732 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
4733 tree t = NULL_TREE;
4734 int i;
4735 struct cl_target_option *def
4736 = TREE_TARGET_OPTION (target_option_default_node);
4737
4738 /* Process each of the options on the chain. */
4739 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
4740 return NULL_TREE;
4741
4742 /* If the changed options are different from the default, rerun
4743 ix86_option_override_internal, and then save the options away.
4744 The string options are are attribute options, and will be undone
4745 when we copy the save structure. */
4746 if (ix86_isa_flags != def->x_ix86_isa_flags
4747 || target_flags != def->x_target_flags
4748 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4749 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4750 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4751 {
4752 /* If we are using the default tune= or arch=, undo the string assigned,
4753 and use the default. */
4754 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4755 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4756 else if (!orig_arch_specified)
4757 ix86_arch_string = NULL;
4758
4759 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4760 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4761 else if (orig_tune_defaulted)
4762 ix86_tune_string = NULL;
4763
4764 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4765 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4766 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
4767 else if (!TARGET_64BIT && TARGET_SSE)
4768 ix86_fpmath_string = "sse,387";
4769
4770 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4771 ix86_option_override_internal (false);
4772
4773 /* Add any builtin functions with the new isa if any. */
4774 ix86_add_new_builtins (ix86_isa_flags);
4775
4776 /* Save the current options unless we are validating options for
4777 #pragma. */
4778 t = build_target_option_node ();
4779
4780 ix86_arch_string = orig_arch_string;
4781 ix86_tune_string = orig_tune_string;
4782 ix86_fpmath_string = orig_fpmath_string;
4783
4784 /* Free up memory allocated to hold the strings */
4785 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4786 free (option_strings[i]);
4787 }
4788
4789 return t;
4790 }
4791
4792 /* Hook to validate attribute((target("string"))). */
4793
4794 static bool
4795 ix86_valid_target_attribute_p (tree fndecl,
4796 tree ARG_UNUSED (name),
4797 tree args,
4798 int ARG_UNUSED (flags))
4799 {
4800 struct cl_target_option cur_target;
4801 bool ret = true;
4802 tree old_optimize = build_optimization_node ();
4803 tree new_target, new_optimize;
4804 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4805
4806 /* If the function changed the optimization levels as well as setting target
4807 options, start with the optimizations specified. */
4808 if (func_optimize && func_optimize != old_optimize)
4809 cl_optimization_restore (&global_options,
4810 TREE_OPTIMIZATION (func_optimize));
4811
4812 /* The target attributes may also change some optimization flags, so update
4813 the optimization options if necessary. */
4814 cl_target_option_save (&cur_target, &global_options);
4815 new_target = ix86_valid_target_attribute_tree (args);
4816 new_optimize = build_optimization_node ();
4817
4818 if (!new_target)
4819 ret = false;
4820
4821 else if (fndecl)
4822 {
4823 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4824
4825 if (old_optimize != new_optimize)
4826 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4827 }
4828
4829 cl_target_option_restore (&global_options, &cur_target);
4830
4831 if (old_optimize != new_optimize)
4832 cl_optimization_restore (&global_options,
4833 TREE_OPTIMIZATION (old_optimize));
4834
4835 return ret;
4836 }
4837
4838 \f
4839 /* Hook to determine if one function can safely inline another. */
4840
4841 static bool
4842 ix86_can_inline_p (tree caller, tree callee)
4843 {
4844 bool ret = false;
4845 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4846 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4847
4848 /* If callee has no option attributes, then it is ok to inline. */
4849 if (!callee_tree)
4850 ret = true;
4851
4852 /* If caller has no option attributes, but callee does then it is not ok to
4853 inline. */
4854 else if (!caller_tree)
4855 ret = false;
4856
4857 else
4858 {
4859 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4860 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4861
4862 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4863 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4864 function. */
4865 if ((caller_opts->x_ix86_isa_flags & callee_opts->x_ix86_isa_flags)
4866 != callee_opts->x_ix86_isa_flags)
4867 ret = false;
4868
4869 /* See if we have the same non-isa options. */
4870 else if (caller_opts->x_target_flags != callee_opts->x_target_flags)
4871 ret = false;
4872
4873 /* See if arch, tune, etc. are the same. */
4874 else if (caller_opts->arch != callee_opts->arch)
4875 ret = false;
4876
4877 else if (caller_opts->tune != callee_opts->tune)
4878 ret = false;
4879
4880 else if (caller_opts->fpmath != callee_opts->fpmath)
4881 ret = false;
4882
4883 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4884 ret = false;
4885
4886 else
4887 ret = true;
4888 }
4889
4890 return ret;
4891 }
4892
4893 \f
4894 /* Remember the last target of ix86_set_current_function. */
4895 static GTY(()) tree ix86_previous_fndecl;
4896
4897 /* Establish appropriate back-end context for processing the function
4898 FNDECL. The argument might be NULL to indicate processing at top
4899 level, outside of any function scope. */
4900 static void
4901 ix86_set_current_function (tree fndecl)
4902 {
4903 /* Only change the context if the function changes. This hook is called
4904 several times in the course of compiling a function, and we don't want to
4905 slow things down too much or call target_reinit when it isn't safe. */
4906 if (fndecl && fndecl != ix86_previous_fndecl)
4907 {
4908 tree old_tree = (ix86_previous_fndecl
4909 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4910 : NULL_TREE);
4911
4912 tree new_tree = (fndecl
4913 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4914 : NULL_TREE);
4915
4916 ix86_previous_fndecl = fndecl;
4917 if (old_tree == new_tree)
4918 ;
4919
4920 else if (new_tree)
4921 {
4922 cl_target_option_restore (&global_options,
4923 TREE_TARGET_OPTION (new_tree));
4924 target_reinit ();
4925 }
4926
4927 else if (old_tree)
4928 {
4929 struct cl_target_option *def
4930 = TREE_TARGET_OPTION (target_option_current_node);
4931
4932 cl_target_option_restore (&global_options, def);
4933 target_reinit ();
4934 }
4935 }
4936 }
4937
4938 \f
4939 /* Return true if this goes in large data/bss. */
4940
4941 static bool
4942 ix86_in_large_data_p (tree exp)
4943 {
4944 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4945 return false;
4946
4947 /* Functions are never large data. */
4948 if (TREE_CODE (exp) == FUNCTION_DECL)
4949 return false;
4950
4951 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4952 {
4953 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4954 if (strcmp (section, ".ldata") == 0
4955 || strcmp (section, ".lbss") == 0)
4956 return true;
4957 return false;
4958 }
4959 else
4960 {
4961 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4962
4963 /* If this is an incomplete type with size 0, then we can't put it
4964 in data because it might be too big when completed. */
4965 if (!size || size > ix86_section_threshold)
4966 return true;
4967 }
4968
4969 return false;
4970 }
4971
4972 /* Switch to the appropriate section for output of DECL.
4973 DECL is either a `VAR_DECL' node or a constant of some sort.
4974 RELOC indicates whether forming the initial value of DECL requires
4975 link-time relocations. */
4976
4977 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4978 ATTRIBUTE_UNUSED;
4979
4980 static section *
4981 x86_64_elf_select_section (tree decl, int reloc,
4982 unsigned HOST_WIDE_INT align)
4983 {
4984 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4985 && ix86_in_large_data_p (decl))
4986 {
4987 const char *sname = NULL;
4988 unsigned int flags = SECTION_WRITE;
4989 switch (categorize_decl_for_section (decl, reloc))
4990 {
4991 case SECCAT_DATA:
4992 sname = ".ldata";
4993 break;
4994 case SECCAT_DATA_REL:
4995 sname = ".ldata.rel";
4996 break;
4997 case SECCAT_DATA_REL_LOCAL:
4998 sname = ".ldata.rel.local";
4999 break;
5000 case SECCAT_DATA_REL_RO:
5001 sname = ".ldata.rel.ro";
5002 break;
5003 case SECCAT_DATA_REL_RO_LOCAL:
5004 sname = ".ldata.rel.ro.local";
5005 break;
5006 case SECCAT_BSS:
5007 sname = ".lbss";
5008 flags |= SECTION_BSS;
5009 break;
5010 case SECCAT_RODATA:
5011 case SECCAT_RODATA_MERGE_STR:
5012 case SECCAT_RODATA_MERGE_STR_INIT:
5013 case SECCAT_RODATA_MERGE_CONST:
5014 sname = ".lrodata";
5015 flags = 0;
5016 break;
5017 case SECCAT_SRODATA:
5018 case SECCAT_SDATA:
5019 case SECCAT_SBSS:
5020 gcc_unreachable ();
5021 case SECCAT_TEXT:
5022 case SECCAT_TDATA:
5023 case SECCAT_TBSS:
5024 /* We don't split these for medium model. Place them into
5025 default sections and hope for best. */
5026 break;
5027 }
5028 if (sname)
5029 {
5030 /* We might get called with string constants, but get_named_section
5031 doesn't like them as they are not DECLs. Also, we need to set
5032 flags in that case. */
5033 if (!DECL_P (decl))
5034 return get_section (sname, flags, NULL);
5035 return get_named_section (decl, sname, reloc);
5036 }
5037 }
5038 return default_elf_select_section (decl, reloc, align);
5039 }
5040
5041 /* Build up a unique section name, expressed as a
5042 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
5043 RELOC indicates whether the initial value of EXP requires
5044 link-time relocations. */
5045
5046 static void ATTRIBUTE_UNUSED
5047 x86_64_elf_unique_section (tree decl, int reloc)
5048 {
5049 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5050 && ix86_in_large_data_p (decl))
5051 {
5052 const char *prefix = NULL;
5053 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
5054 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
5055
5056 switch (categorize_decl_for_section (decl, reloc))
5057 {
5058 case SECCAT_DATA:
5059 case SECCAT_DATA_REL:
5060 case SECCAT_DATA_REL_LOCAL:
5061 case SECCAT_DATA_REL_RO:
5062 case SECCAT_DATA_REL_RO_LOCAL:
5063 prefix = one_only ? ".ld" : ".ldata";
5064 break;
5065 case SECCAT_BSS:
5066 prefix = one_only ? ".lb" : ".lbss";
5067 break;
5068 case SECCAT_RODATA:
5069 case SECCAT_RODATA_MERGE_STR:
5070 case SECCAT_RODATA_MERGE_STR_INIT:
5071 case SECCAT_RODATA_MERGE_CONST:
5072 prefix = one_only ? ".lr" : ".lrodata";
5073 break;
5074 case SECCAT_SRODATA:
5075 case SECCAT_SDATA:
5076 case SECCAT_SBSS:
5077 gcc_unreachable ();
5078 case SECCAT_TEXT:
5079 case SECCAT_TDATA:
5080 case SECCAT_TBSS:
5081 /* We don't split these for medium model. Place them into
5082 default sections and hope for best. */
5083 break;
5084 }
5085 if (prefix)
5086 {
5087 const char *name, *linkonce;
5088 char *string;
5089
5090 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
5091 name = targetm.strip_name_encoding (name);
5092
5093 /* If we're using one_only, then there needs to be a .gnu.linkonce
5094 prefix to the section name. */
5095 linkonce = one_only ? ".gnu.linkonce" : "";
5096
5097 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
5098
5099 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
5100 return;
5101 }
5102 }
5103 default_unique_section (decl, reloc);
5104 }
5105
5106 #ifdef COMMON_ASM_OP
5107 /* This says how to output assembler code to declare an
5108 uninitialized external linkage data object.
5109
5110 For medium model x86-64 we need to use .largecomm opcode for
5111 large objects. */
5112 void
5113 x86_elf_aligned_common (FILE *file,
5114 const char *name, unsigned HOST_WIDE_INT size,
5115 int align)
5116 {
5117 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5118 && size > (unsigned int)ix86_section_threshold)
5119 fputs (".largecomm\t", file);
5120 else
5121 fputs (COMMON_ASM_OP, file);
5122 assemble_name (file, name);
5123 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5124 size, align / BITS_PER_UNIT);
5125 }
5126 #endif
5127
5128 /* Utility function for targets to use in implementing
5129 ASM_OUTPUT_ALIGNED_BSS. */
5130
5131 void
5132 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
5133 const char *name, unsigned HOST_WIDE_INT size,
5134 int align)
5135 {
5136 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5137 && size > (unsigned int)ix86_section_threshold)
5138 switch_to_section (get_named_section (decl, ".lbss", 0));
5139 else
5140 switch_to_section (bss_section);
5141 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
5142 #ifdef ASM_DECLARE_OBJECT_NAME
5143 last_assemble_variable_decl = decl;
5144 ASM_DECLARE_OBJECT_NAME (file, name, decl);
5145 #else
5146 /* Standard thing is just output label for the object. */
5147 ASM_OUTPUT_LABEL (file, name);
5148 #endif /* ASM_DECLARE_OBJECT_NAME */
5149 ASM_OUTPUT_SKIP (file, size ? size : 1);
5150 }
5151 \f
5152 static const struct default_options ix86_option_optimization_table[] =
5153 {
5154 /* Turn off -fschedule-insns by default. It tends to make the
5155 problem with not enough registers even worse. */
5156 #ifdef INSN_SCHEDULING
5157 { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
5158 #endif
5159
5160 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
5161 SUBTARGET_OPTIMIZATION_OPTIONS,
5162 #endif
5163 { OPT_LEVELS_NONE, 0, NULL, 0 }
5164 };
5165
5166 /* Implement TARGET_OPTION_INIT_STRUCT. */
5167
5168 static void
5169 ix86_option_init_struct (struct gcc_options *opts)
5170 {
5171 if (TARGET_MACHO)
5172 /* The Darwin libraries never set errno, so we might as well
5173 avoid calling them when that's the only reason we would. */
5174 opts->x_flag_errno_math = 0;
5175
5176 opts->x_flag_pcc_struct_return = 2;
5177 opts->x_flag_asynchronous_unwind_tables = 2;
5178 opts->x_flag_vect_cost_model = 1;
5179 }
5180
5181 /* Decide whether we must probe the stack before any space allocation
5182 on this target. It's essentially TARGET_STACK_PROBE except when
5183 -fstack-check causes the stack to be already probed differently. */
5184
5185 bool
5186 ix86_target_stack_probe (void)
5187 {
5188 /* Do not probe the stack twice if static stack checking is enabled. */
5189 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5190 return false;
5191
5192 return TARGET_STACK_PROBE;
5193 }
5194 \f
5195 /* Decide whether we can make a sibling call to a function. DECL is the
5196 declaration of the function being targeted by the call and EXP is the
5197 CALL_EXPR representing the call. */
5198
5199 static bool
5200 ix86_function_ok_for_sibcall (tree decl, tree exp)
5201 {
5202 tree type, decl_or_type;
5203 rtx a, b;
5204
5205 /* If we are generating position-independent code, we cannot sibcall
5206 optimize any indirect call, or a direct call to a global function,
5207 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
5208 if (!TARGET_MACHO
5209 && !TARGET_64BIT
5210 && flag_pic
5211 && (!decl || !targetm.binds_local_p (decl)))
5212 return false;
5213
5214 /* If we need to align the outgoing stack, then sibcalling would
5215 unalign the stack, which may break the called function. */
5216 if (ix86_minimum_incoming_stack_boundary (true)
5217 < PREFERRED_STACK_BOUNDARY)
5218 return false;
5219
5220 if (decl)
5221 {
5222 decl_or_type = decl;
5223 type = TREE_TYPE (decl);
5224 }
5225 else
5226 {
5227 /* We're looking at the CALL_EXPR, we need the type of the function. */
5228 type = CALL_EXPR_FN (exp); /* pointer expression */
5229 type = TREE_TYPE (type); /* pointer type */
5230 type = TREE_TYPE (type); /* function type */
5231 decl_or_type = type;
5232 }
5233
5234 /* Check that the return value locations are the same. Like
5235 if we are returning floats on the 80387 register stack, we cannot
5236 make a sibcall from a function that doesn't return a float to a
5237 function that does or, conversely, from a function that does return
5238 a float to a function that doesn't; the necessary stack adjustment
5239 would not be executed. This is also the place we notice
5240 differences in the return value ABI. Note that it is ok for one
5241 of the functions to have void return type as long as the return
5242 value of the other is passed in a register. */
5243 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
5244 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
5245 cfun->decl, false);
5246 if (STACK_REG_P (a) || STACK_REG_P (b))
5247 {
5248 if (!rtx_equal_p (a, b))
5249 return false;
5250 }
5251 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
5252 {
5253 /* Disable sibcall if we need to generate vzeroupper after
5254 callee returns. */
5255 if (TARGET_VZEROUPPER
5256 && cfun->machine->callee_return_avx256_p
5257 && !cfun->machine->caller_return_avx256_p)
5258 return false;
5259 }
5260 else if (!rtx_equal_p (a, b))
5261 return false;
5262
5263 if (TARGET_64BIT)
5264 {
5265 /* The SYSV ABI has more call-clobbered registers;
5266 disallow sibcalls from MS to SYSV. */
5267 if (cfun->machine->call_abi == MS_ABI
5268 && ix86_function_type_abi (type) == SYSV_ABI)
5269 return false;
5270 }
5271 else
5272 {
5273 /* If this call is indirect, we'll need to be able to use a
5274 call-clobbered register for the address of the target function.
5275 Make sure that all such registers are not used for passing
5276 parameters. Note that DLLIMPORT functions are indirect. */
5277 if (!decl
5278 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
5279 {
5280 if (ix86_function_regparm (type, NULL) >= 3)
5281 {
5282 /* ??? Need to count the actual number of registers to be used,
5283 not the possible number of registers. Fix later. */
5284 return false;
5285 }
5286 }
5287 }
5288
5289 /* Otherwise okay. That also includes certain types of indirect calls. */
5290 return true;
5291 }
5292
5293 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
5294 and "sseregparm" calling convention attributes;
5295 arguments as in struct attribute_spec.handler. */
5296
5297 static tree
5298 ix86_handle_cconv_attribute (tree *node, tree name,
5299 tree args,
5300 int flags ATTRIBUTE_UNUSED,
5301 bool *no_add_attrs)
5302 {
5303 if (TREE_CODE (*node) != FUNCTION_TYPE
5304 && TREE_CODE (*node) != METHOD_TYPE
5305 && TREE_CODE (*node) != FIELD_DECL
5306 && TREE_CODE (*node) != TYPE_DECL)
5307 {
5308 warning (OPT_Wattributes, "%qE attribute only applies to functions",
5309 name);
5310 *no_add_attrs = true;
5311 return NULL_TREE;
5312 }
5313
5314 /* Can combine regparm with all attributes but fastcall, and thiscall. */
5315 if (is_attribute_p ("regparm", name))
5316 {
5317 tree cst;
5318
5319 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5320 {
5321 error ("fastcall and regparm attributes are not compatible");
5322 }
5323
5324 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5325 {
5326 error ("regparam and thiscall attributes are not compatible");
5327 }
5328
5329 cst = TREE_VALUE (args);
5330 if (TREE_CODE (cst) != INTEGER_CST)
5331 {
5332 warning (OPT_Wattributes,
5333 "%qE attribute requires an integer constant argument",
5334 name);
5335 *no_add_attrs = true;
5336 }
5337 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
5338 {
5339 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
5340 name, REGPARM_MAX);
5341 *no_add_attrs = true;
5342 }
5343
5344 return NULL_TREE;
5345 }
5346
5347 if (TARGET_64BIT)
5348 {
5349 /* Do not warn when emulating the MS ABI. */
5350 if ((TREE_CODE (*node) != FUNCTION_TYPE
5351 && TREE_CODE (*node) != METHOD_TYPE)
5352 || ix86_function_type_abi (*node) != MS_ABI)
5353 warning (OPT_Wattributes, "%qE attribute ignored",
5354 name);
5355 *no_add_attrs = true;
5356 return NULL_TREE;
5357 }
5358
5359 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
5360 if (is_attribute_p ("fastcall", name))
5361 {
5362 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5363 {
5364 error ("fastcall and cdecl attributes are not compatible");
5365 }
5366 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5367 {
5368 error ("fastcall and stdcall attributes are not compatible");
5369 }
5370 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
5371 {
5372 error ("fastcall and regparm attributes are not compatible");
5373 }
5374 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5375 {
5376 error ("fastcall and thiscall attributes are not compatible");
5377 }
5378 }
5379
5380 /* Can combine stdcall with fastcall (redundant), regparm and
5381 sseregparm. */
5382 else if (is_attribute_p ("stdcall", name))
5383 {
5384 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5385 {
5386 error ("stdcall and cdecl attributes are not compatible");
5387 }
5388 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5389 {
5390 error ("stdcall and fastcall attributes are not compatible");
5391 }
5392 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5393 {
5394 error ("stdcall and thiscall attributes are not compatible");
5395 }
5396 }
5397
5398 /* Can combine cdecl with regparm and sseregparm. */
5399 else if (is_attribute_p ("cdecl", name))
5400 {
5401 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5402 {
5403 error ("stdcall and cdecl attributes are not compatible");
5404 }
5405 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5406 {
5407 error ("fastcall and cdecl attributes are not compatible");
5408 }
5409 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5410 {
5411 error ("cdecl and thiscall attributes are not compatible");
5412 }
5413 }
5414 else if (is_attribute_p ("thiscall", name))
5415 {
5416 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
5417 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
5418 name);
5419 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5420 {
5421 error ("stdcall and thiscall attributes are not compatible");
5422 }
5423 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5424 {
5425 error ("fastcall and thiscall attributes are not compatible");
5426 }
5427 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5428 {
5429 error ("cdecl and thiscall attributes are not compatible");
5430 }
5431 }
5432
5433 /* Can combine sseregparm with all attributes. */
5434
5435 return NULL_TREE;
5436 }
5437
5438 /* This function determines from TYPE the calling-convention. */
5439
5440 unsigned int
5441 ix86_get_callcvt (const_tree type)
5442 {
5443 unsigned int ret = 0;
5444 bool is_stdarg;
5445 tree attrs;
5446
5447 if (TARGET_64BIT)
5448 return IX86_CALLCVT_CDECL;
5449
5450 attrs = TYPE_ATTRIBUTES (type);
5451 if (attrs != NULL_TREE)
5452 {
5453 if (lookup_attribute ("cdecl", attrs))
5454 ret |= IX86_CALLCVT_CDECL;
5455 else if (lookup_attribute ("stdcall", attrs))
5456 ret |= IX86_CALLCVT_STDCALL;
5457 else if (lookup_attribute ("fastcall", attrs))
5458 ret |= IX86_CALLCVT_FASTCALL;
5459 else if (lookup_attribute ("thiscall", attrs))
5460 ret |= IX86_CALLCVT_THISCALL;
5461
5462 /* Regparam isn't allowed for thiscall and fastcall. */
5463 if ((ret & (IX86_CALLCVT_THISCALL | IX86_CALLCVT_FASTCALL)) == 0)
5464 {
5465 if (lookup_attribute ("regparm", attrs))
5466 ret |= IX86_CALLCVT_REGPARM;
5467 if (lookup_attribute ("sseregparm", attrs))
5468 ret |= IX86_CALLCVT_SSEREGPARM;
5469 }
5470
5471 if (IX86_BASE_CALLCVT(ret) != 0)
5472 return ret;
5473 }
5474
5475 is_stdarg = stdarg_p (type);
5476 if (TARGET_RTD && !is_stdarg)
5477 return IX86_CALLCVT_STDCALL | ret;
5478
5479 if (ret != 0
5480 || is_stdarg
5481 || TREE_CODE (type) != METHOD_TYPE
5482 || ix86_function_type_abi (type) != MS_ABI)
5483 return IX86_CALLCVT_CDECL | ret;
5484
5485 return IX86_CALLCVT_THISCALL;
5486 }
5487
5488 /* Return 0 if the attributes for two types are incompatible, 1 if they
5489 are compatible, and 2 if they are nearly compatible (which causes a
5490 warning to be generated). */
5491
5492 static int
5493 ix86_comp_type_attributes (const_tree type1, const_tree type2)
5494 {
5495 unsigned int ccvt1, ccvt2;
5496
5497 if (TREE_CODE (type1) != FUNCTION_TYPE
5498 && TREE_CODE (type1) != METHOD_TYPE)
5499 return 1;
5500
5501 ccvt1 = ix86_get_callcvt (type1);
5502 ccvt2 = ix86_get_callcvt (type2);
5503 if (ccvt1 != ccvt2)
5504 return 0;
5505 if (ix86_function_regparm (type1, NULL)
5506 != ix86_function_regparm (type2, NULL))
5507 return 0;
5508
5509 return 1;
5510 }
5511 \f
5512 /* Return the regparm value for a function with the indicated TYPE and DECL.
5513 DECL may be NULL when calling function indirectly
5514 or considering a libcall. */
5515
5516 static int
5517 ix86_function_regparm (const_tree type, const_tree decl)
5518 {
5519 tree attr;
5520 int regparm;
5521 unsigned int ccvt;
5522
5523 if (TARGET_64BIT)
5524 return (ix86_function_type_abi (type) == SYSV_ABI
5525 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5526 ccvt = ix86_get_callcvt (type);
5527 regparm = ix86_regparm;
5528
5529 if ((ccvt & IX86_CALLCVT_REGPARM) != 0)
5530 {
5531 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
5532 if (attr)
5533 {
5534 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
5535 return regparm;
5536 }
5537 }
5538 else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
5539 return 2;
5540 else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
5541 return 1;
5542
5543 /* Use register calling convention for local functions when possible. */
5544 if (decl
5545 && TREE_CODE (decl) == FUNCTION_DECL
5546 && optimize
5547 && !(profile_flag && !flag_fentry))
5548 {
5549 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5550 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
5551 if (i && i->local && i->can_change_signature)
5552 {
5553 int local_regparm, globals = 0, regno;
5554
5555 /* Make sure no regparm register is taken by a
5556 fixed register variable. */
5557 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
5558 if (fixed_regs[local_regparm])
5559 break;
5560
5561 /* We don't want to use regparm(3) for nested functions as
5562 these use a static chain pointer in the third argument. */
5563 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
5564 local_regparm = 2;
5565
5566 /* In 32-bit mode save a register for the split stack. */
5567 if (!TARGET_64BIT && local_regparm == 3 && flag_split_stack)
5568 local_regparm = 2;
5569
5570 /* Each fixed register usage increases register pressure,
5571 so less registers should be used for argument passing.
5572 This functionality can be overriden by an explicit
5573 regparm value. */
5574 for (regno = 0; regno <= DI_REG; regno++)
5575 if (fixed_regs[regno])
5576 globals++;
5577
5578 local_regparm
5579 = globals < local_regparm ? local_regparm - globals : 0;
5580
5581 if (local_regparm > regparm)
5582 regparm = local_regparm;
5583 }
5584 }
5585
5586 return regparm;
5587 }
5588
5589 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5590 DFmode (2) arguments in SSE registers for a function with the
5591 indicated TYPE and DECL. DECL may be NULL when calling function
5592 indirectly or considering a libcall. Otherwise return 0. */
5593
5594 static int
5595 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
5596 {
5597 gcc_assert (!TARGET_64BIT);
5598
5599 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5600 by the sseregparm attribute. */
5601 if (TARGET_SSEREGPARM
5602 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
5603 {
5604 if (!TARGET_SSE)
5605 {
5606 if (warn)
5607 {
5608 if (decl)
5609 error ("calling %qD with attribute sseregparm without "
5610 "SSE/SSE2 enabled", decl);
5611 else
5612 error ("calling %qT with attribute sseregparm without "
5613 "SSE/SSE2 enabled", type);
5614 }
5615 return 0;
5616 }
5617
5618 return 2;
5619 }
5620
5621 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5622 (and DFmode for SSE2) arguments in SSE registers. */
5623 if (decl && TARGET_SSE_MATH && optimize
5624 && !(profile_flag && !flag_fentry))
5625 {
5626 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5627 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
5628 if (i && i->local && i->can_change_signature)
5629 return TARGET_SSE2 ? 2 : 1;
5630 }
5631
5632 return 0;
5633 }
5634
5635 /* Return true if EAX is live at the start of the function. Used by
5636 ix86_expand_prologue to determine if we need special help before
5637 calling allocate_stack_worker. */
5638
5639 static bool
5640 ix86_eax_live_at_start_p (void)
5641 {
5642 /* Cheat. Don't bother working forward from ix86_function_regparm
5643 to the function type to whether an actual argument is located in
5644 eax. Instead just look at cfg info, which is still close enough
5645 to correct at this point. This gives false positives for broken
5646 functions that might use uninitialized data that happens to be
5647 allocated in eax, but who cares? */
5648 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
5649 }
5650
5651 static bool
5652 ix86_keep_aggregate_return_pointer (tree fntype)
5653 {
5654 tree attr;
5655
5656 if (!TARGET_64BIT)
5657 {
5658 attr = lookup_attribute ("callee_pop_aggregate_return",
5659 TYPE_ATTRIBUTES (fntype));
5660 if (attr)
5661 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0);
5662
5663 /* For 32-bit MS-ABI the default is to keep aggregate
5664 return pointer. */
5665 if (ix86_function_type_abi (fntype) == MS_ABI)
5666 return true;
5667 }
5668 return KEEP_AGGREGATE_RETURN_POINTER != 0;
5669 }
5670
5671 /* Value is the number of bytes of arguments automatically
5672 popped when returning from a subroutine call.
5673 FUNDECL is the declaration node of the function (as a tree),
5674 FUNTYPE is the data type of the function (as a tree),
5675 or for a library call it is an identifier node for the subroutine name.
5676 SIZE is the number of bytes of arguments passed on the stack.
5677
5678 On the 80386, the RTD insn may be used to pop them if the number
5679 of args is fixed, but if the number is variable then the caller
5680 must pop them all. RTD can't be used for library calls now
5681 because the library is compiled with the Unix compiler.
5682 Use of RTD is a selectable option, since it is incompatible with
5683 standard Unix calling sequences. If the option is not selected,
5684 the caller must always pop the args.
5685
5686 The attribute stdcall is equivalent to RTD on a per module basis. */
5687
5688 static int
5689 ix86_return_pops_args (tree fundecl, tree funtype, int size)
5690 {
5691 unsigned int ccvt;
5692
5693 /* None of the 64-bit ABIs pop arguments. */
5694 if (TARGET_64BIT)
5695 return 0;
5696
5697 ccvt = ix86_get_callcvt (funtype);
5698
5699 if ((ccvt & (IX86_CALLCVT_STDCALL | IX86_CALLCVT_FASTCALL
5700 | IX86_CALLCVT_THISCALL)) != 0
5701 && ! stdarg_p (funtype))
5702 return size;
5703
5704 /* Lose any fake structure return argument if it is passed on the stack. */
5705 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
5706 && !ix86_keep_aggregate_return_pointer (funtype))
5707 {
5708 int nregs = ix86_function_regparm (funtype, fundecl);
5709 if (nregs == 0)
5710 return GET_MODE_SIZE (Pmode);
5711 }
5712
5713 return 0;
5714 }
5715 \f
5716 /* Argument support functions. */
5717
5718 /* Return true when register may be used to pass function parameters. */
5719 bool
5720 ix86_function_arg_regno_p (int regno)
5721 {
5722 int i;
5723 const int *parm_regs;
5724
5725 if (!TARGET_64BIT)
5726 {
5727 if (TARGET_MACHO)
5728 return (regno < REGPARM_MAX
5729 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
5730 else
5731 return (regno < REGPARM_MAX
5732 || (TARGET_MMX && MMX_REGNO_P (regno)
5733 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
5734 || (TARGET_SSE && SSE_REGNO_P (regno)
5735 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
5736 }
5737
5738 if (TARGET_MACHO)
5739 {
5740 if (SSE_REGNO_P (regno) && TARGET_SSE)
5741 return true;
5742 }
5743 else
5744 {
5745 if (TARGET_SSE && SSE_REGNO_P (regno)
5746 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
5747 return true;
5748 }
5749
5750 /* TODO: The function should depend on current function ABI but
5751 builtins.c would need updating then. Therefore we use the
5752 default ABI. */
5753
5754 /* RAX is used as hidden argument to va_arg functions. */
5755 if (ix86_abi == SYSV_ABI && regno == AX_REG)
5756 return true;
5757
5758 if (ix86_abi == MS_ABI)
5759 parm_regs = x86_64_ms_abi_int_parameter_registers;
5760 else
5761 parm_regs = x86_64_int_parameter_registers;
5762 for (i = 0; i < (ix86_abi == MS_ABI
5763 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
5764 if (regno == parm_regs[i])
5765 return true;
5766 return false;
5767 }
5768
5769 /* Return if we do not know how to pass TYPE solely in registers. */
5770
5771 static bool
5772 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
5773 {
5774 if (must_pass_in_stack_var_size_or_pad (mode, type))
5775 return true;
5776
5777 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5778 The layout_type routine is crafty and tries to trick us into passing
5779 currently unsupported vector types on the stack by using TImode. */
5780 return (!TARGET_64BIT && mode == TImode
5781 && type && TREE_CODE (type) != VECTOR_TYPE);
5782 }
5783
5784 /* It returns the size, in bytes, of the area reserved for arguments passed
5785 in registers for the function represented by fndecl dependent to the used
5786 abi format. */
5787 int
5788 ix86_reg_parm_stack_space (const_tree fndecl)
5789 {
5790 enum calling_abi call_abi = SYSV_ABI;
5791 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
5792 call_abi = ix86_function_abi (fndecl);
5793 else
5794 call_abi = ix86_function_type_abi (fndecl);
5795 if (TARGET_64BIT && call_abi == MS_ABI)
5796 return 32;
5797 return 0;
5798 }
5799
5800 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5801 call abi used. */
5802 enum calling_abi
5803 ix86_function_type_abi (const_tree fntype)
5804 {
5805 if (fntype != NULL_TREE && TYPE_ATTRIBUTES (fntype) != NULL_TREE)
5806 {
5807 enum calling_abi abi = ix86_abi;
5808 if (abi == SYSV_ABI)
5809 {
5810 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5811 abi = MS_ABI;
5812 }
5813 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5814 abi = SYSV_ABI;
5815 return abi;
5816 }
5817 return ix86_abi;
5818 }
5819
5820 static bool
5821 ix86_function_ms_hook_prologue (const_tree fn)
5822 {
5823 if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
5824 {
5825 if (decl_function_context (fn) != NULL_TREE)
5826 error_at (DECL_SOURCE_LOCATION (fn),
5827 "ms_hook_prologue is not compatible with nested function");
5828 else
5829 return true;
5830 }
5831 return false;
5832 }
5833
5834 static enum calling_abi
5835 ix86_function_abi (const_tree fndecl)
5836 {
5837 if (! fndecl)
5838 return ix86_abi;
5839 return ix86_function_type_abi (TREE_TYPE (fndecl));
5840 }
5841
5842 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5843 call abi used. */
5844 enum calling_abi
5845 ix86_cfun_abi (void)
5846 {
5847 if (! cfun)
5848 return ix86_abi;
5849 return cfun->machine->call_abi;
5850 }
5851
5852 /* Write the extra assembler code needed to declare a function properly. */
5853
5854 void
5855 ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
5856 tree decl)
5857 {
5858 bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
5859
5860 if (is_ms_hook)
5861 {
5862 int i, filler_count = (TARGET_64BIT ? 32 : 16);
5863 unsigned int filler_cc = 0xcccccccc;
5864
5865 for (i = 0; i < filler_count; i += 4)
5866 fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
5867 }
5868
5869 #ifdef SUBTARGET_ASM_UNWIND_INIT
5870 SUBTARGET_ASM_UNWIND_INIT (asm_out_file);
5871 #endif
5872
5873 ASM_OUTPUT_LABEL (asm_out_file, fname);
5874
5875 /* Output magic byte marker, if hot-patch attribute is set. */
5876 if (is_ms_hook)
5877 {
5878 if (TARGET_64BIT)
5879 {
5880 /* leaq [%rsp + 0], %rsp */
5881 asm_fprintf (asm_out_file, ASM_BYTE
5882 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5883 }
5884 else
5885 {
5886 /* movl.s %edi, %edi
5887 push %ebp
5888 movl.s %esp, %ebp */
5889 asm_fprintf (asm_out_file, ASM_BYTE
5890 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5891 }
5892 }
5893 }
5894
5895 /* regclass.c */
5896 extern void init_regs (void);
5897
5898 /* Implementation of call abi switching target hook. Specific to FNDECL
5899 the specific call register sets are set. See also
5900 ix86_conditional_register_usage for more details. */
5901 void
5902 ix86_call_abi_override (const_tree fndecl)
5903 {
5904 if (fndecl == NULL_TREE)
5905 cfun->machine->call_abi = ix86_abi;
5906 else
5907 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5908 }
5909
5910 /* 64-bit MS and SYSV ABI have different set of call used registers. Avoid
5911 expensive re-initialization of init_regs each time we switch function context
5912 since this is needed only during RTL expansion. */
5913 static void
5914 ix86_maybe_switch_abi (void)
5915 {
5916 if (TARGET_64BIT &&
5917 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5918 reinit_regs ();
5919 }
5920
5921 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5922 for a call to a function whose data type is FNTYPE.
5923 For a library call, FNTYPE is 0. */
5924
5925 void
5926 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5927 tree fntype, /* tree ptr for function decl */
5928 rtx libname, /* SYMBOL_REF of library name or 0 */
5929 tree fndecl,
5930 int caller)
5931 {
5932 struct cgraph_local_info *i;
5933 tree fnret_type;
5934
5935 memset (cum, 0, sizeof (*cum));
5936
5937 /* Initialize for the current callee. */
5938 if (caller)
5939 {
5940 cfun->machine->callee_pass_avx256_p = false;
5941 cfun->machine->callee_return_avx256_p = false;
5942 }
5943
5944 if (fndecl)
5945 {
5946 i = cgraph_local_info (fndecl);
5947 cum->call_abi = ix86_function_abi (fndecl);
5948 fnret_type = TREE_TYPE (TREE_TYPE (fndecl));
5949 }
5950 else
5951 {
5952 i = NULL;
5953 cum->call_abi = ix86_function_type_abi (fntype);
5954 if (fntype)
5955 fnret_type = TREE_TYPE (fntype);
5956 else
5957 fnret_type = NULL;
5958 }
5959
5960 if (TARGET_VZEROUPPER && fnret_type)
5961 {
5962 rtx fnret_value = ix86_function_value (fnret_type, fntype,
5963 false);
5964 if (function_pass_avx256_p (fnret_value))
5965 {
5966 /* The return value of this function uses 256bit AVX modes. */
5967 if (caller)
5968 cfun->machine->callee_return_avx256_p = true;
5969 else
5970 cfun->machine->caller_return_avx256_p = true;
5971 }
5972 }
5973
5974 cum->caller = caller;
5975
5976 /* Set up the number of registers to use for passing arguments. */
5977
5978 if (TARGET_64BIT && cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5979 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5980 "or subtarget optimization implying it");
5981 cum->nregs = ix86_regparm;
5982 if (TARGET_64BIT)
5983 {
5984 cum->nregs = (cum->call_abi == SYSV_ABI
5985 ? X86_64_REGPARM_MAX
5986 : X86_64_MS_REGPARM_MAX);
5987 }
5988 if (TARGET_SSE)
5989 {
5990 cum->sse_nregs = SSE_REGPARM_MAX;
5991 if (TARGET_64BIT)
5992 {
5993 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5994 ? X86_64_SSE_REGPARM_MAX
5995 : X86_64_MS_SSE_REGPARM_MAX);
5996 }
5997 }
5998 if (TARGET_MMX)
5999 cum->mmx_nregs = MMX_REGPARM_MAX;
6000 cum->warn_avx = true;
6001 cum->warn_sse = true;
6002 cum->warn_mmx = true;
6003
6004 /* Because type might mismatch in between caller and callee, we need to
6005 use actual type of function for local calls.
6006 FIXME: cgraph_analyze can be told to actually record if function uses
6007 va_start so for local functions maybe_vaarg can be made aggressive
6008 helping K&R code.
6009 FIXME: once typesytem is fixed, we won't need this code anymore. */
6010 if (i && i->local && i->can_change_signature)
6011 fntype = TREE_TYPE (fndecl);
6012 cum->maybe_vaarg = (fntype
6013 ? (!prototype_p (fntype) || stdarg_p (fntype))
6014 : !libname);
6015
6016 if (!TARGET_64BIT)
6017 {
6018 /* If there are variable arguments, then we won't pass anything
6019 in registers in 32-bit mode. */
6020 if (stdarg_p (fntype))
6021 {
6022 cum->nregs = 0;
6023 cum->sse_nregs = 0;
6024 cum->mmx_nregs = 0;
6025 cum->warn_avx = 0;
6026 cum->warn_sse = 0;
6027 cum->warn_mmx = 0;
6028 return;
6029 }
6030
6031 /* Use ecx and edx registers if function has fastcall attribute,
6032 else look for regparm information. */
6033 if (fntype)
6034 {
6035 unsigned int ccvt = ix86_get_callcvt (fntype);
6036 if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
6037 {
6038 cum->nregs = 1;
6039 cum->fastcall = 1; /* Same first register as in fastcall. */
6040 }
6041 else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
6042 {
6043 cum->nregs = 2;
6044 cum->fastcall = 1;
6045 }
6046 else
6047 cum->nregs = ix86_function_regparm (fntype, fndecl);
6048 }
6049
6050 /* Set up the number of SSE registers used for passing SFmode
6051 and DFmode arguments. Warn for mismatching ABI. */
6052 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
6053 }
6054 }
6055
6056 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
6057 But in the case of vector types, it is some vector mode.
6058
6059 When we have only some of our vector isa extensions enabled, then there
6060 are some modes for which vector_mode_supported_p is false. For these
6061 modes, the generic vector support in gcc will choose some non-vector mode
6062 in order to implement the type. By computing the natural mode, we'll
6063 select the proper ABI location for the operand and not depend on whatever
6064 the middle-end decides to do with these vector types.
6065
6066 The midde-end can't deal with the vector types > 16 bytes. In this
6067 case, we return the original mode and warn ABI change if CUM isn't
6068 NULL. */
6069
6070 static enum machine_mode
6071 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
6072 {
6073 enum machine_mode mode = TYPE_MODE (type);
6074
6075 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
6076 {
6077 HOST_WIDE_INT size = int_size_in_bytes (type);
6078 if ((size == 8 || size == 16 || size == 32)
6079 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
6080 && TYPE_VECTOR_SUBPARTS (type) > 1)
6081 {
6082 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
6083
6084 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6085 mode = MIN_MODE_VECTOR_FLOAT;
6086 else
6087 mode = MIN_MODE_VECTOR_INT;
6088
6089 /* Get the mode which has this inner mode and number of units. */
6090 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
6091 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
6092 && GET_MODE_INNER (mode) == innermode)
6093 {
6094 if (size == 32 && !TARGET_AVX)
6095 {
6096 static bool warnedavx;
6097
6098 if (cum
6099 && !warnedavx
6100 && cum->warn_avx)
6101 {
6102 warnedavx = true;
6103 warning (0, "AVX vector argument without AVX "
6104 "enabled changes the ABI");
6105 }
6106 return TYPE_MODE (type);
6107 }
6108 else
6109 return mode;
6110 }
6111
6112 gcc_unreachable ();
6113 }
6114 }
6115
6116 return mode;
6117 }
6118
6119 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
6120 this may not agree with the mode that the type system has chosen for the
6121 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
6122 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
6123
6124 static rtx
6125 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
6126 unsigned int regno)
6127 {
6128 rtx tmp;
6129
6130 if (orig_mode != BLKmode)
6131 tmp = gen_rtx_REG (orig_mode, regno);
6132 else
6133 {
6134 tmp = gen_rtx_REG (mode, regno);
6135 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
6136 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
6137 }
6138
6139 return tmp;
6140 }
6141
6142 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
6143 of this code is to classify each 8bytes of incoming argument by the register
6144 class and assign registers accordingly. */
6145
6146 /* Return the union class of CLASS1 and CLASS2.
6147 See the x86-64 PS ABI for details. */
6148
6149 static enum x86_64_reg_class
6150 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
6151 {
6152 /* Rule #1: If both classes are equal, this is the resulting class. */
6153 if (class1 == class2)
6154 return class1;
6155
6156 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
6157 the other class. */
6158 if (class1 == X86_64_NO_CLASS)
6159 return class2;
6160 if (class2 == X86_64_NO_CLASS)
6161 return class1;
6162
6163 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
6164 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
6165 return X86_64_MEMORY_CLASS;
6166
6167 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
6168 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
6169 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
6170 return X86_64_INTEGERSI_CLASS;
6171 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
6172 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
6173 return X86_64_INTEGER_CLASS;
6174
6175 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
6176 MEMORY is used. */
6177 if (class1 == X86_64_X87_CLASS
6178 || class1 == X86_64_X87UP_CLASS
6179 || class1 == X86_64_COMPLEX_X87_CLASS
6180 || class2 == X86_64_X87_CLASS
6181 || class2 == X86_64_X87UP_CLASS
6182 || class2 == X86_64_COMPLEX_X87_CLASS)
6183 return X86_64_MEMORY_CLASS;
6184
6185 /* Rule #6: Otherwise class SSE is used. */
6186 return X86_64_SSE_CLASS;
6187 }
6188
6189 /* Classify the argument of type TYPE and mode MODE.
6190 CLASSES will be filled by the register class used to pass each word
6191 of the operand. The number of words is returned. In case the parameter
6192 should be passed in memory, 0 is returned. As a special case for zero
6193 sized containers, classes[0] will be NO_CLASS and 1 is returned.
6194
6195 BIT_OFFSET is used internally for handling records and specifies offset
6196 of the offset in bits modulo 256 to avoid overflow cases.
6197
6198 See the x86-64 PS ABI for details.
6199 */
6200
6201 static int
6202 classify_argument (enum machine_mode mode, const_tree type,
6203 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
6204 {
6205 HOST_WIDE_INT bytes =
6206 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6207 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6208
6209 /* Variable sized entities are always passed/returned in memory. */
6210 if (bytes < 0)
6211 return 0;
6212
6213 if (mode != VOIDmode
6214 && targetm.calls.must_pass_in_stack (mode, type))
6215 return 0;
6216
6217 if (type && AGGREGATE_TYPE_P (type))
6218 {
6219 int i;
6220 tree field;
6221 enum x86_64_reg_class subclasses[MAX_CLASSES];
6222
6223 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
6224 if (bytes > 32)
6225 return 0;
6226
6227 for (i = 0; i < words; i++)
6228 classes[i] = X86_64_NO_CLASS;
6229
6230 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
6231 signalize memory class, so handle it as special case. */
6232 if (!words)
6233 {
6234 classes[0] = X86_64_NO_CLASS;
6235 return 1;
6236 }
6237
6238 /* Classify each field of record and merge classes. */
6239 switch (TREE_CODE (type))
6240 {
6241 case RECORD_TYPE:
6242 /* And now merge the fields of structure. */
6243 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6244 {
6245 if (TREE_CODE (field) == FIELD_DECL)
6246 {
6247 int num;
6248
6249 if (TREE_TYPE (field) == error_mark_node)
6250 continue;
6251
6252 /* Bitfields are always classified as integer. Handle them
6253 early, since later code would consider them to be
6254 misaligned integers. */
6255 if (DECL_BIT_FIELD (field))
6256 {
6257 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6258 i < ((int_bit_position (field) + (bit_offset % 64))
6259 + tree_low_cst (DECL_SIZE (field), 0)
6260 + 63) / 8 / 8; i++)
6261 classes[i] =
6262 merge_classes (X86_64_INTEGER_CLASS,
6263 classes[i]);
6264 }
6265 else
6266 {
6267 int pos;
6268
6269 type = TREE_TYPE (field);
6270
6271 /* Flexible array member is ignored. */
6272 if (TYPE_MODE (type) == BLKmode
6273 && TREE_CODE (type) == ARRAY_TYPE
6274 && TYPE_SIZE (type) == NULL_TREE
6275 && TYPE_DOMAIN (type) != NULL_TREE
6276 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
6277 == NULL_TREE))
6278 {
6279 static bool warned;
6280
6281 if (!warned && warn_psabi)
6282 {
6283 warned = true;
6284 inform (input_location,
6285 "the ABI of passing struct with"
6286 " a flexible array member has"
6287 " changed in GCC 4.4");
6288 }
6289 continue;
6290 }
6291 num = classify_argument (TYPE_MODE (type), type,
6292 subclasses,
6293 (int_bit_position (field)
6294 + bit_offset) % 256);
6295 if (!num)
6296 return 0;
6297 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6298 for (i = 0; i < num && (i + pos) < words; i++)
6299 classes[i + pos] =
6300 merge_classes (subclasses[i], classes[i + pos]);
6301 }
6302 }
6303 }
6304 break;
6305
6306 case ARRAY_TYPE:
6307 /* Arrays are handled as small records. */
6308 {
6309 int num;
6310 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
6311 TREE_TYPE (type), subclasses, bit_offset);
6312 if (!num)
6313 return 0;
6314
6315 /* The partial classes are now full classes. */
6316 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
6317 subclasses[0] = X86_64_SSE_CLASS;
6318 if (subclasses[0] == X86_64_INTEGERSI_CLASS
6319 && !((bit_offset % 64) == 0 && bytes == 4))
6320 subclasses[0] = X86_64_INTEGER_CLASS;
6321
6322 for (i = 0; i < words; i++)
6323 classes[i] = subclasses[i % num];
6324
6325 break;
6326 }
6327 case UNION_TYPE:
6328 case QUAL_UNION_TYPE:
6329 /* Unions are similar to RECORD_TYPE but offset is always 0.
6330 */
6331 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6332 {
6333 if (TREE_CODE (field) == FIELD_DECL)
6334 {
6335 int num;
6336
6337 if (TREE_TYPE (field) == error_mark_node)
6338 continue;
6339
6340 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
6341 TREE_TYPE (field), subclasses,
6342 bit_offset);
6343 if (!num)
6344 return 0;
6345 for (i = 0; i < num; i++)
6346 classes[i] = merge_classes (subclasses[i], classes[i]);
6347 }
6348 }
6349 break;
6350
6351 default:
6352 gcc_unreachable ();
6353 }
6354
6355 if (words > 2)
6356 {
6357 /* When size > 16 bytes, if the first one isn't
6358 X86_64_SSE_CLASS or any other ones aren't
6359 X86_64_SSEUP_CLASS, everything should be passed in
6360 memory. */
6361 if (classes[0] != X86_64_SSE_CLASS)
6362 return 0;
6363
6364 for (i = 1; i < words; i++)
6365 if (classes[i] != X86_64_SSEUP_CLASS)
6366 return 0;
6367 }
6368
6369 /* Final merger cleanup. */
6370 for (i = 0; i < words; i++)
6371 {
6372 /* If one class is MEMORY, everything should be passed in
6373 memory. */
6374 if (classes[i] == X86_64_MEMORY_CLASS)
6375 return 0;
6376
6377 /* The X86_64_SSEUP_CLASS should be always preceded by
6378 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
6379 if (classes[i] == X86_64_SSEUP_CLASS
6380 && classes[i - 1] != X86_64_SSE_CLASS
6381 && classes[i - 1] != X86_64_SSEUP_CLASS)
6382 {
6383 /* The first one should never be X86_64_SSEUP_CLASS. */
6384 gcc_assert (i != 0);
6385 classes[i] = X86_64_SSE_CLASS;
6386 }
6387
6388 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
6389 everything should be passed in memory. */
6390 if (classes[i] == X86_64_X87UP_CLASS
6391 && (classes[i - 1] != X86_64_X87_CLASS))
6392 {
6393 static bool warned;
6394
6395 /* The first one should never be X86_64_X87UP_CLASS. */
6396 gcc_assert (i != 0);
6397 if (!warned && warn_psabi)
6398 {
6399 warned = true;
6400 inform (input_location,
6401 "the ABI of passing union with long double"
6402 " has changed in GCC 4.4");
6403 }
6404 return 0;
6405 }
6406 }
6407 return words;
6408 }
6409
6410 /* Compute alignment needed. We align all types to natural boundaries with
6411 exception of XFmode that is aligned to 64bits. */
6412 if (mode != VOIDmode && mode != BLKmode)
6413 {
6414 int mode_alignment = GET_MODE_BITSIZE (mode);
6415
6416 if (mode == XFmode)
6417 mode_alignment = 128;
6418 else if (mode == XCmode)
6419 mode_alignment = 256;
6420 if (COMPLEX_MODE_P (mode))
6421 mode_alignment /= 2;
6422 /* Misaligned fields are always returned in memory. */
6423 if (bit_offset % mode_alignment)
6424 return 0;
6425 }
6426
6427 /* for V1xx modes, just use the base mode */
6428 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
6429 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
6430 mode = GET_MODE_INNER (mode);
6431
6432 /* Classification of atomic types. */
6433 switch (mode)
6434 {
6435 case SDmode:
6436 case DDmode:
6437 classes[0] = X86_64_SSE_CLASS;
6438 return 1;
6439 case TDmode:
6440 classes[0] = X86_64_SSE_CLASS;
6441 classes[1] = X86_64_SSEUP_CLASS;
6442 return 2;
6443 case DImode:
6444 case SImode:
6445 case HImode:
6446 case QImode:
6447 case CSImode:
6448 case CHImode:
6449 case CQImode:
6450 {
6451 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
6452
6453 if (size <= 32)
6454 {
6455 classes[0] = X86_64_INTEGERSI_CLASS;
6456 return 1;
6457 }
6458 else if (size <= 64)
6459 {
6460 classes[0] = X86_64_INTEGER_CLASS;
6461 return 1;
6462 }
6463 else if (size <= 64+32)
6464 {
6465 classes[0] = X86_64_INTEGER_CLASS;
6466 classes[1] = X86_64_INTEGERSI_CLASS;
6467 return 2;
6468 }
6469 else if (size <= 64+64)
6470 {
6471 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6472 return 2;
6473 }
6474 else
6475 gcc_unreachable ();
6476 }
6477 case CDImode:
6478 case TImode:
6479 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6480 return 2;
6481 case COImode:
6482 case OImode:
6483 /* OImode shouldn't be used directly. */
6484 gcc_unreachable ();
6485 case CTImode:
6486 return 0;
6487 case SFmode:
6488 if (!(bit_offset % 64))
6489 classes[0] = X86_64_SSESF_CLASS;
6490 else
6491 classes[0] = X86_64_SSE_CLASS;
6492 return 1;
6493 case DFmode:
6494 classes[0] = X86_64_SSEDF_CLASS;
6495 return 1;
6496 case XFmode:
6497 classes[0] = X86_64_X87_CLASS;
6498 classes[1] = X86_64_X87UP_CLASS;
6499 return 2;
6500 case TFmode:
6501 classes[0] = X86_64_SSE_CLASS;
6502 classes[1] = X86_64_SSEUP_CLASS;
6503 return 2;
6504 case SCmode:
6505 classes[0] = X86_64_SSE_CLASS;
6506 if (!(bit_offset % 64))
6507 return 1;
6508 else
6509 {
6510 static bool warned;
6511
6512 if (!warned && warn_psabi)
6513 {
6514 warned = true;
6515 inform (input_location,
6516 "the ABI of passing structure with complex float"
6517 " member has changed in GCC 4.4");
6518 }
6519 classes[1] = X86_64_SSESF_CLASS;
6520 return 2;
6521 }
6522 case DCmode:
6523 classes[0] = X86_64_SSEDF_CLASS;
6524 classes[1] = X86_64_SSEDF_CLASS;
6525 return 2;
6526 case XCmode:
6527 classes[0] = X86_64_COMPLEX_X87_CLASS;
6528 return 1;
6529 case TCmode:
6530 /* This modes is larger than 16 bytes. */
6531 return 0;
6532 case V8SFmode:
6533 case V8SImode:
6534 case V32QImode:
6535 case V16HImode:
6536 case V4DFmode:
6537 case V4DImode:
6538 classes[0] = X86_64_SSE_CLASS;
6539 classes[1] = X86_64_SSEUP_CLASS;
6540 classes[2] = X86_64_SSEUP_CLASS;
6541 classes[3] = X86_64_SSEUP_CLASS;
6542 return 4;
6543 case V4SFmode:
6544 case V4SImode:
6545 case V16QImode:
6546 case V8HImode:
6547 case V2DFmode:
6548 case V2DImode:
6549 classes[0] = X86_64_SSE_CLASS;
6550 classes[1] = X86_64_SSEUP_CLASS;
6551 return 2;
6552 case V1TImode:
6553 case V1DImode:
6554 case V2SFmode:
6555 case V2SImode:
6556 case V4HImode:
6557 case V8QImode:
6558 classes[0] = X86_64_SSE_CLASS;
6559 return 1;
6560 case BLKmode:
6561 case VOIDmode:
6562 return 0;
6563 default:
6564 gcc_assert (VECTOR_MODE_P (mode));
6565
6566 if (bytes > 16)
6567 return 0;
6568
6569 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
6570
6571 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
6572 classes[0] = X86_64_INTEGERSI_CLASS;
6573 else
6574 classes[0] = X86_64_INTEGER_CLASS;
6575 classes[1] = X86_64_INTEGER_CLASS;
6576 return 1 + (bytes > 8);
6577 }
6578 }
6579
6580 /* Examine the argument and return set number of register required in each
6581 class. Return 0 iff parameter should be passed in memory. */
6582 static int
6583 examine_argument (enum machine_mode mode, const_tree type, int in_return,
6584 int *int_nregs, int *sse_nregs)
6585 {
6586 enum x86_64_reg_class regclass[MAX_CLASSES];
6587 int n = classify_argument (mode, type, regclass, 0);
6588
6589 *int_nregs = 0;
6590 *sse_nregs = 0;
6591 if (!n)
6592 return 0;
6593 for (n--; n >= 0; n--)
6594 switch (regclass[n])
6595 {
6596 case X86_64_INTEGER_CLASS:
6597 case X86_64_INTEGERSI_CLASS:
6598 (*int_nregs)++;
6599 break;
6600 case X86_64_SSE_CLASS:
6601 case X86_64_SSESF_CLASS:
6602 case X86_64_SSEDF_CLASS:
6603 (*sse_nregs)++;
6604 break;
6605 case X86_64_NO_CLASS:
6606 case X86_64_SSEUP_CLASS:
6607 break;
6608 case X86_64_X87_CLASS:
6609 case X86_64_X87UP_CLASS:
6610 if (!in_return)
6611 return 0;
6612 break;
6613 case X86_64_COMPLEX_X87_CLASS:
6614 return in_return ? 2 : 0;
6615 case X86_64_MEMORY_CLASS:
6616 gcc_unreachable ();
6617 }
6618 return 1;
6619 }
6620
6621 /* Construct container for the argument used by GCC interface. See
6622 FUNCTION_ARG for the detailed description. */
6623
6624 static rtx
6625 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
6626 const_tree type, int in_return, int nintregs, int nsseregs,
6627 const int *intreg, int sse_regno)
6628 {
6629 /* The following variables hold the static issued_error state. */
6630 static bool issued_sse_arg_error;
6631 static bool issued_sse_ret_error;
6632 static bool issued_x87_ret_error;
6633
6634 enum machine_mode tmpmode;
6635 int bytes =
6636 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6637 enum x86_64_reg_class regclass[MAX_CLASSES];
6638 int n;
6639 int i;
6640 int nexps = 0;
6641 int needed_sseregs, needed_intregs;
6642 rtx exp[MAX_CLASSES];
6643 rtx ret;
6644
6645 n = classify_argument (mode, type, regclass, 0);
6646 if (!n)
6647 return NULL;
6648 if (!examine_argument (mode, type, in_return, &needed_intregs,
6649 &needed_sseregs))
6650 return NULL;
6651 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
6652 return NULL;
6653
6654 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6655 some less clueful developer tries to use floating-point anyway. */
6656 if (needed_sseregs && !TARGET_SSE)
6657 {
6658 if (in_return)
6659 {
6660 if (!issued_sse_ret_error)
6661 {
6662 error ("SSE register return with SSE disabled");
6663 issued_sse_ret_error = true;
6664 }
6665 }
6666 else if (!issued_sse_arg_error)
6667 {
6668 error ("SSE register argument with SSE disabled");
6669 issued_sse_arg_error = true;
6670 }
6671 return NULL;
6672 }
6673
6674 /* Likewise, error if the ABI requires us to return values in the
6675 x87 registers and the user specified -mno-80387. */
6676 if (!TARGET_80387 && in_return)
6677 for (i = 0; i < n; i++)
6678 if (regclass[i] == X86_64_X87_CLASS
6679 || regclass[i] == X86_64_X87UP_CLASS
6680 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
6681 {
6682 if (!issued_x87_ret_error)
6683 {
6684 error ("x87 register return with x87 disabled");
6685 issued_x87_ret_error = true;
6686 }
6687 return NULL;
6688 }
6689
6690 /* First construct simple cases. Avoid SCmode, since we want to use
6691 single register to pass this type. */
6692 if (n == 1 && mode != SCmode)
6693 switch (regclass[0])
6694 {
6695 case X86_64_INTEGER_CLASS:
6696 case X86_64_INTEGERSI_CLASS:
6697 return gen_rtx_REG (mode, intreg[0]);
6698 case X86_64_SSE_CLASS:
6699 case X86_64_SSESF_CLASS:
6700 case X86_64_SSEDF_CLASS:
6701 if (mode != BLKmode)
6702 return gen_reg_or_parallel (mode, orig_mode,
6703 SSE_REGNO (sse_regno));
6704 break;
6705 case X86_64_X87_CLASS:
6706 case X86_64_COMPLEX_X87_CLASS:
6707 return gen_rtx_REG (mode, FIRST_STACK_REG);
6708 case X86_64_NO_CLASS:
6709 /* Zero sized array, struct or class. */
6710 return NULL;
6711 default:
6712 gcc_unreachable ();
6713 }
6714 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
6715 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
6716 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6717 if (n == 4
6718 && regclass[0] == X86_64_SSE_CLASS
6719 && regclass[1] == X86_64_SSEUP_CLASS
6720 && regclass[2] == X86_64_SSEUP_CLASS
6721 && regclass[3] == X86_64_SSEUP_CLASS
6722 && mode != BLKmode)
6723 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6724
6725 if (n == 2
6726 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
6727 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
6728 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
6729 && regclass[1] == X86_64_INTEGER_CLASS
6730 && (mode == CDImode || mode == TImode || mode == TFmode)
6731 && intreg[0] + 1 == intreg[1])
6732 return gen_rtx_REG (mode, intreg[0]);
6733
6734 /* Otherwise figure out the entries of the PARALLEL. */
6735 for (i = 0; i < n; i++)
6736 {
6737 int pos;
6738
6739 switch (regclass[i])
6740 {
6741 case X86_64_NO_CLASS:
6742 break;
6743 case X86_64_INTEGER_CLASS:
6744 case X86_64_INTEGERSI_CLASS:
6745 /* Merge TImodes on aligned occasions here too. */
6746 if (i * 8 + 8 > bytes)
6747 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
6748 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
6749 tmpmode = SImode;
6750 else
6751 tmpmode = DImode;
6752 /* We've requested 24 bytes we don't have mode for. Use DImode. */
6753 if (tmpmode == BLKmode)
6754 tmpmode = DImode;
6755 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6756 gen_rtx_REG (tmpmode, *intreg),
6757 GEN_INT (i*8));
6758 intreg++;
6759 break;
6760 case X86_64_SSESF_CLASS:
6761 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6762 gen_rtx_REG (SFmode,
6763 SSE_REGNO (sse_regno)),
6764 GEN_INT (i*8));
6765 sse_regno++;
6766 break;
6767 case X86_64_SSEDF_CLASS:
6768 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6769 gen_rtx_REG (DFmode,
6770 SSE_REGNO (sse_regno)),
6771 GEN_INT (i*8));
6772 sse_regno++;
6773 break;
6774 case X86_64_SSE_CLASS:
6775 pos = i;
6776 switch (n)
6777 {
6778 case 1:
6779 tmpmode = DImode;
6780 break;
6781 case 2:
6782 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
6783 {
6784 tmpmode = TImode;
6785 i++;
6786 }
6787 else
6788 tmpmode = DImode;
6789 break;
6790 case 4:
6791 gcc_assert (i == 0
6792 && regclass[1] == X86_64_SSEUP_CLASS
6793 && regclass[2] == X86_64_SSEUP_CLASS
6794 && regclass[3] == X86_64_SSEUP_CLASS);
6795 tmpmode = OImode;
6796 i += 3;
6797 break;
6798 default:
6799 gcc_unreachable ();
6800 }
6801 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6802 gen_rtx_REG (tmpmode,
6803 SSE_REGNO (sse_regno)),
6804 GEN_INT (pos*8));
6805 sse_regno++;
6806 break;
6807 default:
6808 gcc_unreachable ();
6809 }
6810 }
6811
6812 /* Empty aligned struct, union or class. */
6813 if (nexps == 0)
6814 return NULL;
6815
6816 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
6817 for (i = 0; i < nexps; i++)
6818 XVECEXP (ret, 0, i) = exp [i];
6819 return ret;
6820 }
6821
6822 /* Update the data in CUM to advance over an argument of mode MODE
6823 and data type TYPE. (TYPE is null for libcalls where that information
6824 may not be available.) */
6825
6826 static void
6827 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6828 const_tree type, HOST_WIDE_INT bytes,
6829 HOST_WIDE_INT words)
6830 {
6831 switch (mode)
6832 {
6833 default:
6834 break;
6835
6836 case BLKmode:
6837 if (bytes < 0)
6838 break;
6839 /* FALLTHRU */
6840
6841 case DImode:
6842 case SImode:
6843 case HImode:
6844 case QImode:
6845 cum->words += words;
6846 cum->nregs -= words;
6847 cum->regno += words;
6848
6849 if (cum->nregs <= 0)
6850 {
6851 cum->nregs = 0;
6852 cum->regno = 0;
6853 }
6854 break;
6855
6856 case OImode:
6857 /* OImode shouldn't be used directly. */
6858 gcc_unreachable ();
6859
6860 case DFmode:
6861 if (cum->float_in_sse < 2)
6862 break;
6863 case SFmode:
6864 if (cum->float_in_sse < 1)
6865 break;
6866 /* FALLTHRU */
6867
6868 case V8SFmode:
6869 case V8SImode:
6870 case V32QImode:
6871 case V16HImode:
6872 case V4DFmode:
6873 case V4DImode:
6874 case TImode:
6875 case V16QImode:
6876 case V8HImode:
6877 case V4SImode:
6878 case V2DImode:
6879 case V4SFmode:
6880 case V2DFmode:
6881 if (!type || !AGGREGATE_TYPE_P (type))
6882 {
6883 cum->sse_words += words;
6884 cum->sse_nregs -= 1;
6885 cum->sse_regno += 1;
6886 if (cum->sse_nregs <= 0)
6887 {
6888 cum->sse_nregs = 0;
6889 cum->sse_regno = 0;
6890 }
6891 }
6892 break;
6893
6894 case V8QImode:
6895 case V4HImode:
6896 case V2SImode:
6897 case V2SFmode:
6898 case V1TImode:
6899 case V1DImode:
6900 if (!type || !AGGREGATE_TYPE_P (type))
6901 {
6902 cum->mmx_words += words;
6903 cum->mmx_nregs -= 1;
6904 cum->mmx_regno += 1;
6905 if (cum->mmx_nregs <= 0)
6906 {
6907 cum->mmx_nregs = 0;
6908 cum->mmx_regno = 0;
6909 }
6910 }
6911 break;
6912 }
6913 }
6914
6915 static void
6916 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6917 const_tree type, HOST_WIDE_INT words, bool named)
6918 {
6919 int int_nregs, sse_nregs;
6920
6921 /* Unnamed 256bit vector mode parameters are passed on stack. */
6922 if (!named && VALID_AVX256_REG_MODE (mode))
6923 return;
6924
6925 if (examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
6926 && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6927 {
6928 cum->nregs -= int_nregs;
6929 cum->sse_nregs -= sse_nregs;
6930 cum->regno += int_nregs;
6931 cum->sse_regno += sse_nregs;
6932 }
6933 else
6934 {
6935 int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
6936 cum->words = (cum->words + align - 1) & ~(align - 1);
6937 cum->words += words;
6938 }
6939 }
6940
6941 static void
6942 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6943 HOST_WIDE_INT words)
6944 {
6945 /* Otherwise, this should be passed indirect. */
6946 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6947
6948 cum->words += words;
6949 if (cum->nregs > 0)
6950 {
6951 cum->nregs -= 1;
6952 cum->regno += 1;
6953 }
6954 }
6955
6956 /* Update the data in CUM to advance over an argument of mode MODE and
6957 data type TYPE. (TYPE is null for libcalls where that information
6958 may not be available.) */
6959
6960 static void
6961 ix86_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6962 const_tree type, bool named)
6963 {
6964 HOST_WIDE_INT bytes, words;
6965
6966 if (mode == BLKmode)
6967 bytes = int_size_in_bytes (type);
6968 else
6969 bytes = GET_MODE_SIZE (mode);
6970 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6971
6972 if (type)
6973 mode = type_natural_mode (type, NULL);
6974
6975 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6976 function_arg_advance_ms_64 (cum, bytes, words);
6977 else if (TARGET_64BIT)
6978 function_arg_advance_64 (cum, mode, type, words, named);
6979 else
6980 function_arg_advance_32 (cum, mode, type, bytes, words);
6981 }
6982
6983 /* Define where to put the arguments to a function.
6984 Value is zero to push the argument on the stack,
6985 or a hard register in which to store the argument.
6986
6987 MODE is the argument's machine mode.
6988 TYPE is the data type of the argument (as a tree).
6989 This is null for libcalls where that information may
6990 not be available.
6991 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6992 the preceding args and about the function being called.
6993 NAMED is nonzero if this argument is a named parameter
6994 (otherwise it is an extra parameter matching an ellipsis). */
6995
6996 static rtx
6997 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6998 enum machine_mode orig_mode, const_tree type,
6999 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
7000 {
7001 static bool warnedsse, warnedmmx;
7002
7003 /* Avoid the AL settings for the Unix64 ABI. */
7004 if (mode == VOIDmode)
7005 return constm1_rtx;
7006
7007 switch (mode)
7008 {
7009 default:
7010 break;
7011
7012 case BLKmode:
7013 if (bytes < 0)
7014 break;
7015 /* FALLTHRU */
7016 case DImode:
7017 case SImode:
7018 case HImode:
7019 case QImode:
7020 if (words <= cum->nregs)
7021 {
7022 int regno = cum->regno;
7023
7024 /* Fastcall allocates the first two DWORD (SImode) or
7025 smaller arguments to ECX and EDX if it isn't an
7026 aggregate type . */
7027 if (cum->fastcall)
7028 {
7029 if (mode == BLKmode
7030 || mode == DImode
7031 || (type && AGGREGATE_TYPE_P (type)))
7032 break;
7033
7034 /* ECX not EAX is the first allocated register. */
7035 if (regno == AX_REG)
7036 regno = CX_REG;
7037 }
7038 return gen_rtx_REG (mode, regno);
7039 }
7040 break;
7041
7042 case DFmode:
7043 if (cum->float_in_sse < 2)
7044 break;
7045 case SFmode:
7046 if (cum->float_in_sse < 1)
7047 break;
7048 /* FALLTHRU */
7049 case TImode:
7050 /* In 32bit, we pass TImode in xmm registers. */
7051 case V16QImode:
7052 case V8HImode:
7053 case V4SImode:
7054 case V2DImode:
7055 case V4SFmode:
7056 case V2DFmode:
7057 if (!type || !AGGREGATE_TYPE_P (type))
7058 {
7059 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
7060 {
7061 warnedsse = true;
7062 warning (0, "SSE vector argument without SSE enabled "
7063 "changes the ABI");
7064 }
7065 if (cum->sse_nregs)
7066 return gen_reg_or_parallel (mode, orig_mode,
7067 cum->sse_regno + FIRST_SSE_REG);
7068 }
7069 break;
7070
7071 case OImode:
7072 /* OImode shouldn't be used directly. */
7073 gcc_unreachable ();
7074
7075 case V8SFmode:
7076 case V8SImode:
7077 case V32QImode:
7078 case V16HImode:
7079 case V4DFmode:
7080 case V4DImode:
7081 if (!type || !AGGREGATE_TYPE_P (type))
7082 {
7083 if (cum->sse_nregs)
7084 return gen_reg_or_parallel (mode, orig_mode,
7085 cum->sse_regno + FIRST_SSE_REG);
7086 }
7087 break;
7088
7089 case V8QImode:
7090 case V4HImode:
7091 case V2SImode:
7092 case V2SFmode:
7093 case V1TImode:
7094 case V1DImode:
7095 if (!type || !AGGREGATE_TYPE_P (type))
7096 {
7097 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
7098 {
7099 warnedmmx = true;
7100 warning (0, "MMX vector argument without MMX enabled "
7101 "changes the ABI");
7102 }
7103 if (cum->mmx_nregs)
7104 return gen_reg_or_parallel (mode, orig_mode,
7105 cum->mmx_regno + FIRST_MMX_REG);
7106 }
7107 break;
7108 }
7109
7110 return NULL_RTX;
7111 }
7112
7113 static rtx
7114 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7115 enum machine_mode orig_mode, const_tree type, bool named)
7116 {
7117 /* Handle a hidden AL argument containing number of registers
7118 for varargs x86-64 functions. */
7119 if (mode == VOIDmode)
7120 return GEN_INT (cum->maybe_vaarg
7121 ? (cum->sse_nregs < 0
7122 ? X86_64_SSE_REGPARM_MAX
7123 : cum->sse_regno)
7124 : -1);
7125
7126 switch (mode)
7127 {
7128 default:
7129 break;
7130
7131 case V8SFmode:
7132 case V8SImode:
7133 case V32QImode:
7134 case V16HImode:
7135 case V4DFmode:
7136 case V4DImode:
7137 /* Unnamed 256bit vector mode parameters are passed on stack. */
7138 if (!named)
7139 return NULL;
7140 break;
7141 }
7142
7143 return construct_container (mode, orig_mode, type, 0, cum->nregs,
7144 cum->sse_nregs,
7145 &x86_64_int_parameter_registers [cum->regno],
7146 cum->sse_regno);
7147 }
7148
7149 static rtx
7150 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7151 enum machine_mode orig_mode, bool named,
7152 HOST_WIDE_INT bytes)
7153 {
7154 unsigned int regno;
7155
7156 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
7157 We use value of -2 to specify that current function call is MSABI. */
7158 if (mode == VOIDmode)
7159 return GEN_INT (-2);
7160
7161 /* If we've run out of registers, it goes on the stack. */
7162 if (cum->nregs == 0)
7163 return NULL_RTX;
7164
7165 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
7166
7167 /* Only floating point modes are passed in anything but integer regs. */
7168 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
7169 {
7170 if (named)
7171 regno = cum->regno + FIRST_SSE_REG;
7172 else
7173 {
7174 rtx t1, t2;
7175
7176 /* Unnamed floating parameters are passed in both the
7177 SSE and integer registers. */
7178 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
7179 t2 = gen_rtx_REG (mode, regno);
7180 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
7181 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
7182 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
7183 }
7184 }
7185 /* Handle aggregated types passed in register. */
7186 if (orig_mode == BLKmode)
7187 {
7188 if (bytes > 0 && bytes <= 8)
7189 mode = (bytes > 4 ? DImode : SImode);
7190 if (mode == BLKmode)
7191 mode = DImode;
7192 }
7193
7194 return gen_reg_or_parallel (mode, orig_mode, regno);
7195 }
7196
7197 /* Return where to put the arguments to a function.
7198 Return zero to push the argument on the stack, or a hard register in which to store the argument.
7199
7200 MODE is the argument's machine mode. TYPE is the data type of the
7201 argument. It is null for libcalls where that information may not be
7202 available. CUM gives information about the preceding args and about
7203 the function being called. NAMED is nonzero if this argument is a
7204 named parameter (otherwise it is an extra parameter matching an
7205 ellipsis). */
7206
7207 static rtx
7208 ix86_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
7209 const_tree type, bool named)
7210 {
7211 enum machine_mode mode = omode;
7212 HOST_WIDE_INT bytes, words;
7213 rtx arg;
7214
7215 if (mode == BLKmode)
7216 bytes = int_size_in_bytes (type);
7217 else
7218 bytes = GET_MODE_SIZE (mode);
7219 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7220
7221 /* To simplify the code below, represent vector types with a vector mode
7222 even if MMX/SSE are not active. */
7223 if (type && TREE_CODE (type) == VECTOR_TYPE)
7224 mode = type_natural_mode (type, cum);
7225
7226 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
7227 arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
7228 else if (TARGET_64BIT)
7229 arg = function_arg_64 (cum, mode, omode, type, named);
7230 else
7231 arg = function_arg_32 (cum, mode, omode, type, bytes, words);
7232
7233 if (TARGET_VZEROUPPER && function_pass_avx256_p (arg))
7234 {
7235 /* This argument uses 256bit AVX modes. */
7236 if (cum->caller)
7237 cfun->machine->callee_pass_avx256_p = true;
7238 else
7239 cfun->machine->caller_pass_avx256_p = true;
7240 }
7241
7242 return arg;
7243 }
7244
7245 /* A C expression that indicates when an argument must be passed by
7246 reference. If nonzero for an argument, a copy of that argument is
7247 made in memory and a pointer to the argument is passed instead of
7248 the argument itself. The pointer is passed in whatever way is
7249 appropriate for passing a pointer to that type. */
7250
7251 static bool
7252 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7253 enum machine_mode mode ATTRIBUTE_UNUSED,
7254 const_tree type, bool named ATTRIBUTE_UNUSED)
7255 {
7256 /* See Windows x64 Software Convention. */
7257 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
7258 {
7259 int msize = (int) GET_MODE_SIZE (mode);
7260 if (type)
7261 {
7262 /* Arrays are passed by reference. */
7263 if (TREE_CODE (type) == ARRAY_TYPE)
7264 return true;
7265
7266 if (AGGREGATE_TYPE_P (type))
7267 {
7268 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
7269 are passed by reference. */
7270 msize = int_size_in_bytes (type);
7271 }
7272 }
7273
7274 /* __m128 is passed by reference. */
7275 switch (msize) {
7276 case 1: case 2: case 4: case 8:
7277 break;
7278 default:
7279 return true;
7280 }
7281 }
7282 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
7283 return 1;
7284
7285 return 0;
7286 }
7287
7288 /* Return true when TYPE should be 128bit aligned for 32bit argument
7289 passing ABI. XXX: This function is obsolete and is only used for
7290 checking psABI compatibility with previous versions of GCC. */
7291
7292 static bool
7293 ix86_compat_aligned_value_p (const_tree type)
7294 {
7295 enum machine_mode mode = TYPE_MODE (type);
7296 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
7297 || mode == TDmode
7298 || mode == TFmode
7299 || mode == TCmode)
7300 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
7301 return true;
7302 if (TYPE_ALIGN (type) < 128)
7303 return false;
7304
7305 if (AGGREGATE_TYPE_P (type))
7306 {
7307 /* Walk the aggregates recursively. */
7308 switch (TREE_CODE (type))
7309 {
7310 case RECORD_TYPE:
7311 case UNION_TYPE:
7312 case QUAL_UNION_TYPE:
7313 {
7314 tree field;
7315
7316 /* Walk all the structure fields. */
7317 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
7318 {
7319 if (TREE_CODE (field) == FIELD_DECL
7320 && ix86_compat_aligned_value_p (TREE_TYPE (field)))
7321 return true;
7322 }
7323 break;
7324 }
7325
7326 case ARRAY_TYPE:
7327 /* Just for use if some languages passes arrays by value. */
7328 if (ix86_compat_aligned_value_p (TREE_TYPE (type)))
7329 return true;
7330 break;
7331
7332 default:
7333 gcc_unreachable ();
7334 }
7335 }
7336 return false;
7337 }
7338
7339 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
7340 XXX: This function is obsolete and is only used for checking psABI
7341 compatibility with previous versions of GCC. */
7342
7343 static unsigned int
7344 ix86_compat_function_arg_boundary (enum machine_mode mode,
7345 const_tree type, unsigned int align)
7346 {
7347 /* In 32bit, only _Decimal128 and __float128 are aligned to their
7348 natural boundaries. */
7349 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
7350 {
7351 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
7352 make an exception for SSE modes since these require 128bit
7353 alignment.
7354
7355 The handling here differs from field_alignment. ICC aligns MMX
7356 arguments to 4 byte boundaries, while structure fields are aligned
7357 to 8 byte boundaries. */
7358 if (!type)
7359 {
7360 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
7361 align = PARM_BOUNDARY;
7362 }
7363 else
7364 {
7365 if (!ix86_compat_aligned_value_p (type))
7366 align = PARM_BOUNDARY;
7367 }
7368 }
7369 if (align > BIGGEST_ALIGNMENT)
7370 align = BIGGEST_ALIGNMENT;
7371 return align;
7372 }
7373
7374 /* Return true when TYPE should be 128bit aligned for 32bit argument
7375 passing ABI. */
7376
7377 static bool
7378 ix86_contains_aligned_value_p (const_tree type)
7379 {
7380 enum machine_mode mode = TYPE_MODE (type);
7381
7382 if (mode == XFmode || mode == XCmode)
7383 return false;
7384
7385 if (TYPE_ALIGN (type) < 128)
7386 return false;
7387
7388 if (AGGREGATE_TYPE_P (type))
7389 {
7390 /* Walk the aggregates recursively. */
7391 switch (TREE_CODE (type))
7392 {
7393 case RECORD_TYPE:
7394 case UNION_TYPE:
7395 case QUAL_UNION_TYPE:
7396 {
7397 tree field;
7398
7399 /* Walk all the structure fields. */
7400 for (field = TYPE_FIELDS (type);
7401 field;
7402 field = DECL_CHAIN (field))
7403 {
7404 if (TREE_CODE (field) == FIELD_DECL
7405 && ix86_contains_aligned_value_p (TREE_TYPE (field)))
7406 return true;
7407 }
7408 break;
7409 }
7410
7411 case ARRAY_TYPE:
7412 /* Just for use if some languages passes arrays by value. */
7413 if (ix86_contains_aligned_value_p (TREE_TYPE (type)))
7414 return true;
7415 break;
7416
7417 default:
7418 gcc_unreachable ();
7419 }
7420 }
7421 else
7422 return TYPE_ALIGN (type) >= 128;
7423
7424 return false;
7425 }
7426
7427 /* Gives the alignment boundary, in bits, of an argument with the
7428 specified mode and type. */
7429
7430 static unsigned int
7431 ix86_function_arg_boundary (enum machine_mode mode, const_tree type)
7432 {
7433 unsigned int align;
7434 if (type)
7435 {
7436 /* Since the main variant type is used for call, we convert it to
7437 the main variant type. */
7438 type = TYPE_MAIN_VARIANT (type);
7439 align = TYPE_ALIGN (type);
7440 }
7441 else
7442 align = GET_MODE_ALIGNMENT (mode);
7443 if (align < PARM_BOUNDARY)
7444 align = PARM_BOUNDARY;
7445 else
7446 {
7447 static bool warned;
7448 unsigned int saved_align = align;
7449
7450 if (!TARGET_64BIT)
7451 {
7452 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
7453 if (!type)
7454 {
7455 if (mode == XFmode || mode == XCmode)
7456 align = PARM_BOUNDARY;
7457 }
7458 else if (!ix86_contains_aligned_value_p (type))
7459 align = PARM_BOUNDARY;
7460
7461 if (align < 128)
7462 align = PARM_BOUNDARY;
7463 }
7464
7465 if (warn_psabi
7466 && !warned
7467 && align != ix86_compat_function_arg_boundary (mode, type,
7468 saved_align))
7469 {
7470 warned = true;
7471 inform (input_location,
7472 "The ABI for passing parameters with %d-byte"
7473 " alignment has changed in GCC 4.6",
7474 align / BITS_PER_UNIT);
7475 }
7476 }
7477
7478 return align;
7479 }
7480
7481 /* Return true if N is a possible register number of function value. */
7482
7483 static bool
7484 ix86_function_value_regno_p (const unsigned int regno)
7485 {
7486 switch (regno)
7487 {
7488 case 0:
7489 return true;
7490
7491 case FIRST_FLOAT_REG:
7492 /* TODO: The function should depend on current function ABI but
7493 builtins.c would need updating then. Therefore we use the
7494 default ABI. */
7495 if (TARGET_64BIT && ix86_abi == MS_ABI)
7496 return false;
7497 return TARGET_FLOAT_RETURNS_IN_80387;
7498
7499 case FIRST_SSE_REG:
7500 return TARGET_SSE;
7501
7502 case FIRST_MMX_REG:
7503 if (TARGET_MACHO || TARGET_64BIT)
7504 return false;
7505 return TARGET_MMX;
7506 }
7507
7508 return false;
7509 }
7510
7511 /* Define how to find the value returned by a function.
7512 VALTYPE is the data type of the value (as a tree).
7513 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7514 otherwise, FUNC is 0. */
7515
7516 static rtx
7517 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
7518 const_tree fntype, const_tree fn)
7519 {
7520 unsigned int regno;
7521
7522 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7523 we normally prevent this case when mmx is not available. However
7524 some ABIs may require the result to be returned like DImode. */
7525 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7526 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
7527
7528 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7529 we prevent this case when sse is not available. However some ABIs
7530 may require the result to be returned like integer TImode. */
7531 else if (mode == TImode
7532 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7533 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
7534
7535 /* 32-byte vector modes in %ymm0. */
7536 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
7537 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
7538
7539 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7540 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
7541 regno = FIRST_FLOAT_REG;
7542 else
7543 /* Most things go in %eax. */
7544 regno = AX_REG;
7545
7546 /* Override FP return register with %xmm0 for local functions when
7547 SSE math is enabled or for functions with sseregparm attribute. */
7548 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
7549 {
7550 int sse_level = ix86_function_sseregparm (fntype, fn, false);
7551 if ((sse_level >= 1 && mode == SFmode)
7552 || (sse_level == 2 && mode == DFmode))
7553 regno = FIRST_SSE_REG;
7554 }
7555
7556 /* OImode shouldn't be used directly. */
7557 gcc_assert (mode != OImode);
7558
7559 return gen_rtx_REG (orig_mode, regno);
7560 }
7561
7562 static rtx
7563 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
7564 const_tree valtype)
7565 {
7566 rtx ret;
7567
7568 /* Handle libcalls, which don't provide a type node. */
7569 if (valtype == NULL)
7570 {
7571 switch (mode)
7572 {
7573 case SFmode:
7574 case SCmode:
7575 case DFmode:
7576 case DCmode:
7577 case TFmode:
7578 case SDmode:
7579 case DDmode:
7580 case TDmode:
7581 return gen_rtx_REG (mode, FIRST_SSE_REG);
7582 case XFmode:
7583 case XCmode:
7584 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
7585 case TCmode:
7586 return NULL;
7587 default:
7588 return gen_rtx_REG (mode, AX_REG);
7589 }
7590 }
7591
7592 ret = construct_container (mode, orig_mode, valtype, 1,
7593 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
7594 x86_64_int_return_registers, 0);
7595
7596 /* For zero sized structures, construct_container returns NULL, but we
7597 need to keep rest of compiler happy by returning meaningful value. */
7598 if (!ret)
7599 ret = gen_rtx_REG (orig_mode, AX_REG);
7600
7601 return ret;
7602 }
7603
7604 static rtx
7605 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
7606 {
7607 unsigned int regno = AX_REG;
7608
7609 if (TARGET_SSE)
7610 {
7611 switch (GET_MODE_SIZE (mode))
7612 {
7613 case 16:
7614 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7615 && !COMPLEX_MODE_P (mode))
7616 regno = FIRST_SSE_REG;
7617 break;
7618 case 8:
7619 case 4:
7620 if (mode == SFmode || mode == DFmode)
7621 regno = FIRST_SSE_REG;
7622 break;
7623 default:
7624 break;
7625 }
7626 }
7627 return gen_rtx_REG (orig_mode, regno);
7628 }
7629
7630 static rtx
7631 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
7632 enum machine_mode orig_mode, enum machine_mode mode)
7633 {
7634 const_tree fn, fntype;
7635
7636 fn = NULL_TREE;
7637 if (fntype_or_decl && DECL_P (fntype_or_decl))
7638 fn = fntype_or_decl;
7639 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
7640
7641 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
7642 return function_value_ms_64 (orig_mode, mode);
7643 else if (TARGET_64BIT)
7644 return function_value_64 (orig_mode, mode, valtype);
7645 else
7646 return function_value_32 (orig_mode, mode, fntype, fn);
7647 }
7648
7649 static rtx
7650 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
7651 bool outgoing ATTRIBUTE_UNUSED)
7652 {
7653 enum machine_mode mode, orig_mode;
7654
7655 orig_mode = TYPE_MODE (valtype);
7656 mode = type_natural_mode (valtype, NULL);
7657 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
7658 }
7659
7660 rtx
7661 ix86_libcall_value (enum machine_mode mode)
7662 {
7663 return ix86_function_value_1 (NULL, NULL, mode, mode);
7664 }
7665
7666 /* Return true iff type is returned in memory. */
7667
7668 static bool ATTRIBUTE_UNUSED
7669 return_in_memory_32 (const_tree type, enum machine_mode mode)
7670 {
7671 HOST_WIDE_INT size;
7672
7673 if (mode == BLKmode)
7674 return true;
7675
7676 size = int_size_in_bytes (type);
7677
7678 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
7679 return false;
7680
7681 if (VECTOR_MODE_P (mode) || mode == TImode)
7682 {
7683 /* User-created vectors small enough to fit in EAX. */
7684 if (size < 8)
7685 return false;
7686
7687 /* MMX/3dNow values are returned in MM0,
7688 except when it doesn't exits or the ABI prescribes otherwise. */
7689 if (size == 8)
7690 return !TARGET_MMX || TARGET_VECT8_RETURNS;
7691
7692 /* SSE values are returned in XMM0, except when it doesn't exist. */
7693 if (size == 16)
7694 return !TARGET_SSE;
7695
7696 /* AVX values are returned in YMM0, except when it doesn't exist. */
7697 if (size == 32)
7698 return !TARGET_AVX;
7699 }
7700
7701 if (mode == XFmode)
7702 return false;
7703
7704 if (size > 12)
7705 return true;
7706
7707 /* OImode shouldn't be used directly. */
7708 gcc_assert (mode != OImode);
7709
7710 return false;
7711 }
7712
7713 static bool ATTRIBUTE_UNUSED
7714 return_in_memory_64 (const_tree type, enum machine_mode mode)
7715 {
7716 int needed_intregs, needed_sseregs;
7717 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
7718 }
7719
7720 static bool ATTRIBUTE_UNUSED
7721 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
7722 {
7723 HOST_WIDE_INT size = int_size_in_bytes (type);
7724
7725 /* __m128 is returned in xmm0. */
7726 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7727 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
7728 return false;
7729
7730 /* Otherwise, the size must be exactly in [1248]. */
7731 return size != 1 && size != 2 && size != 4 && size != 8;
7732 }
7733
7734 static bool
7735 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7736 {
7737 #ifdef SUBTARGET_RETURN_IN_MEMORY
7738 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
7739 #else
7740 const enum machine_mode mode = type_natural_mode (type, NULL);
7741
7742 if (TARGET_64BIT)
7743 {
7744 if (ix86_function_type_abi (fntype) == MS_ABI)
7745 return return_in_memory_ms_64 (type, mode);
7746 else
7747 return return_in_memory_64 (type, mode);
7748 }
7749 else
7750 return return_in_memory_32 (type, mode);
7751 #endif
7752 }
7753
7754 /* When returning SSE vector types, we have a choice of either
7755 (1) being abi incompatible with a -march switch, or
7756 (2) generating an error.
7757 Given no good solution, I think the safest thing is one warning.
7758 The user won't be able to use -Werror, but....
7759
7760 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7761 called in response to actually generating a caller or callee that
7762 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7763 via aggregate_value_p for general type probing from tree-ssa. */
7764
7765 static rtx
7766 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
7767 {
7768 static bool warnedsse, warnedmmx;
7769
7770 if (!TARGET_64BIT && type)
7771 {
7772 /* Look at the return type of the function, not the function type. */
7773 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
7774
7775 if (!TARGET_SSE && !warnedsse)
7776 {
7777 if (mode == TImode
7778 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7779 {
7780 warnedsse = true;
7781 warning (0, "SSE vector return without SSE enabled "
7782 "changes the ABI");
7783 }
7784 }
7785
7786 if (!TARGET_MMX && !warnedmmx)
7787 {
7788 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7789 {
7790 warnedmmx = true;
7791 warning (0, "MMX vector return without MMX enabled "
7792 "changes the ABI");
7793 }
7794 }
7795 }
7796
7797 return NULL;
7798 }
7799
7800 \f
7801 /* Create the va_list data type. */
7802
7803 /* Returns the calling convention specific va_list date type.
7804 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7805
7806 static tree
7807 ix86_build_builtin_va_list_abi (enum calling_abi abi)
7808 {
7809 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
7810
7811 /* For i386 we use plain pointer to argument area. */
7812 if (!TARGET_64BIT || abi == MS_ABI)
7813 return build_pointer_type (char_type_node);
7814
7815 record = lang_hooks.types.make_type (RECORD_TYPE);
7816 type_decl = build_decl (BUILTINS_LOCATION,
7817 TYPE_DECL, get_identifier ("__va_list_tag"), record);
7818
7819 f_gpr = build_decl (BUILTINS_LOCATION,
7820 FIELD_DECL, get_identifier ("gp_offset"),
7821 unsigned_type_node);
7822 f_fpr = build_decl (BUILTINS_LOCATION,
7823 FIELD_DECL, get_identifier ("fp_offset"),
7824 unsigned_type_node);
7825 f_ovf = build_decl (BUILTINS_LOCATION,
7826 FIELD_DECL, get_identifier ("overflow_arg_area"),
7827 ptr_type_node);
7828 f_sav = build_decl (BUILTINS_LOCATION,
7829 FIELD_DECL, get_identifier ("reg_save_area"),
7830 ptr_type_node);
7831
7832 va_list_gpr_counter_field = f_gpr;
7833 va_list_fpr_counter_field = f_fpr;
7834
7835 DECL_FIELD_CONTEXT (f_gpr) = record;
7836 DECL_FIELD_CONTEXT (f_fpr) = record;
7837 DECL_FIELD_CONTEXT (f_ovf) = record;
7838 DECL_FIELD_CONTEXT (f_sav) = record;
7839
7840 TYPE_STUB_DECL (record) = type_decl;
7841 TYPE_NAME (record) = type_decl;
7842 TYPE_FIELDS (record) = f_gpr;
7843 DECL_CHAIN (f_gpr) = f_fpr;
7844 DECL_CHAIN (f_fpr) = f_ovf;
7845 DECL_CHAIN (f_ovf) = f_sav;
7846
7847 layout_type (record);
7848
7849 /* The correct type is an array type of one element. */
7850 return build_array_type (record, build_index_type (size_zero_node));
7851 }
7852
7853 /* Setup the builtin va_list data type and for 64-bit the additional
7854 calling convention specific va_list data types. */
7855
7856 static tree
7857 ix86_build_builtin_va_list (void)
7858 {
7859 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
7860
7861 /* Initialize abi specific va_list builtin types. */
7862 if (TARGET_64BIT)
7863 {
7864 tree t;
7865 if (ix86_abi == MS_ABI)
7866 {
7867 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
7868 if (TREE_CODE (t) != RECORD_TYPE)
7869 t = build_variant_type_copy (t);
7870 sysv_va_list_type_node = t;
7871 }
7872 else
7873 {
7874 t = ret;
7875 if (TREE_CODE (t) != RECORD_TYPE)
7876 t = build_variant_type_copy (t);
7877 sysv_va_list_type_node = t;
7878 }
7879 if (ix86_abi != MS_ABI)
7880 {
7881 t = ix86_build_builtin_va_list_abi (MS_ABI);
7882 if (TREE_CODE (t) != RECORD_TYPE)
7883 t = build_variant_type_copy (t);
7884 ms_va_list_type_node = t;
7885 }
7886 else
7887 {
7888 t = ret;
7889 if (TREE_CODE (t) != RECORD_TYPE)
7890 t = build_variant_type_copy (t);
7891 ms_va_list_type_node = t;
7892 }
7893 }
7894
7895 return ret;
7896 }
7897
7898 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7899
7900 static void
7901 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
7902 {
7903 rtx save_area, mem;
7904 alias_set_type set;
7905 int i, max;
7906
7907 /* GPR size of varargs save area. */
7908 if (cfun->va_list_gpr_size)
7909 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
7910 else
7911 ix86_varargs_gpr_size = 0;
7912
7913 /* FPR size of varargs save area. We don't need it if we don't pass
7914 anything in SSE registers. */
7915 if (TARGET_SSE && cfun->va_list_fpr_size)
7916 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
7917 else
7918 ix86_varargs_fpr_size = 0;
7919
7920 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
7921 return;
7922
7923 save_area = frame_pointer_rtx;
7924 set = get_varargs_alias_set ();
7925
7926 max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
7927 if (max > X86_64_REGPARM_MAX)
7928 max = X86_64_REGPARM_MAX;
7929
7930 for (i = cum->regno; i < max; i++)
7931 {
7932 mem = gen_rtx_MEM (Pmode,
7933 plus_constant (save_area, i * UNITS_PER_WORD));
7934 MEM_NOTRAP_P (mem) = 1;
7935 set_mem_alias_set (mem, set);
7936 emit_move_insn (mem, gen_rtx_REG (Pmode,
7937 x86_64_int_parameter_registers[i]));
7938 }
7939
7940 if (ix86_varargs_fpr_size)
7941 {
7942 enum machine_mode smode;
7943 rtx label, test;
7944
7945 /* Now emit code to save SSE registers. The AX parameter contains number
7946 of SSE parameter registers used to call this function, though all we
7947 actually check here is the zero/non-zero status. */
7948
7949 label = gen_label_rtx ();
7950 test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
7951 emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
7952 label));
7953
7954 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7955 we used movdqa (i.e. TImode) instead? Perhaps even better would
7956 be if we could determine the real mode of the data, via a hook
7957 into pass_stdarg. Ignore all that for now. */
7958 smode = V4SFmode;
7959 if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
7960 crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
7961
7962 max = cum->sse_regno + cfun->va_list_fpr_size / 16;
7963 if (max > X86_64_SSE_REGPARM_MAX)
7964 max = X86_64_SSE_REGPARM_MAX;
7965
7966 for (i = cum->sse_regno; i < max; ++i)
7967 {
7968 mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
7969 mem = gen_rtx_MEM (smode, mem);
7970 MEM_NOTRAP_P (mem) = 1;
7971 set_mem_alias_set (mem, set);
7972 set_mem_align (mem, GET_MODE_ALIGNMENT (smode));
7973
7974 emit_move_insn (mem, gen_rtx_REG (smode, SSE_REGNO (i)));
7975 }
7976
7977 emit_label (label);
7978 }
7979 }
7980
7981 static void
7982 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7983 {
7984 alias_set_type set = get_varargs_alias_set ();
7985 int i;
7986
7987 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7988 {
7989 rtx reg, mem;
7990
7991 mem = gen_rtx_MEM (Pmode,
7992 plus_constant (virtual_incoming_args_rtx,
7993 i * UNITS_PER_WORD));
7994 MEM_NOTRAP_P (mem) = 1;
7995 set_mem_alias_set (mem, set);
7996
7997 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7998 emit_move_insn (mem, reg);
7999 }
8000 }
8001
8002 static void
8003 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8004 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8005 int no_rtl)
8006 {
8007 CUMULATIVE_ARGS next_cum;
8008 tree fntype;
8009
8010 /* This argument doesn't appear to be used anymore. Which is good,
8011 because the old code here didn't suppress rtl generation. */
8012 gcc_assert (!no_rtl);
8013
8014 if (!TARGET_64BIT)
8015 return;
8016
8017 fntype = TREE_TYPE (current_function_decl);
8018
8019 /* For varargs, we do not want to skip the dummy va_dcl argument.
8020 For stdargs, we do want to skip the last named argument. */
8021 next_cum = *cum;
8022 if (stdarg_p (fntype))
8023 ix86_function_arg_advance (&next_cum, mode, type, true);
8024
8025 if (cum->call_abi == MS_ABI)
8026 setup_incoming_varargs_ms_64 (&next_cum);
8027 else
8028 setup_incoming_varargs_64 (&next_cum);
8029 }
8030
8031 /* Checks if TYPE is of kind va_list char *. */
8032
8033 static bool
8034 is_va_list_char_pointer (tree type)
8035 {
8036 tree canonic;
8037
8038 /* For 32-bit it is always true. */
8039 if (!TARGET_64BIT)
8040 return true;
8041 canonic = ix86_canonical_va_list_type (type);
8042 return (canonic == ms_va_list_type_node
8043 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
8044 }
8045
8046 /* Implement va_start. */
8047
8048 static void
8049 ix86_va_start (tree valist, rtx nextarg)
8050 {
8051 HOST_WIDE_INT words, n_gpr, n_fpr;
8052 tree f_gpr, f_fpr, f_ovf, f_sav;
8053 tree gpr, fpr, ovf, sav, t;
8054 tree type;
8055 rtx ovf_rtx;
8056
8057 if (flag_split_stack
8058 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8059 {
8060 unsigned int scratch_regno;
8061
8062 /* When we are splitting the stack, we can't refer to the stack
8063 arguments using internal_arg_pointer, because they may be on
8064 the old stack. The split stack prologue will arrange to
8065 leave a pointer to the old stack arguments in a scratch
8066 register, which we here copy to a pseudo-register. The split
8067 stack prologue can't set the pseudo-register directly because
8068 it (the prologue) runs before any registers have been saved. */
8069
8070 scratch_regno = split_stack_prologue_scratch_regno ();
8071 if (scratch_regno != INVALID_REGNUM)
8072 {
8073 rtx reg, seq;
8074
8075 reg = gen_reg_rtx (Pmode);
8076 cfun->machine->split_stack_varargs_pointer = reg;
8077
8078 start_sequence ();
8079 emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
8080 seq = get_insns ();
8081 end_sequence ();
8082
8083 push_topmost_sequence ();
8084 emit_insn_after (seq, entry_of_function ());
8085 pop_topmost_sequence ();
8086 }
8087 }
8088
8089 /* Only 64bit target needs something special. */
8090 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
8091 {
8092 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8093 std_expand_builtin_va_start (valist, nextarg);
8094 else
8095 {
8096 rtx va_r, next;
8097
8098 va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
8099 next = expand_binop (ptr_mode, add_optab,
8100 cfun->machine->split_stack_varargs_pointer,
8101 crtl->args.arg_offset_rtx,
8102 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8103 convert_move (va_r, next, 0);
8104 }
8105 return;
8106 }
8107
8108 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
8109 f_fpr = DECL_CHAIN (f_gpr);
8110 f_ovf = DECL_CHAIN (f_fpr);
8111 f_sav = DECL_CHAIN (f_ovf);
8112
8113 valist = build_simple_mem_ref (valist);
8114 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
8115 /* The following should be folded into the MEM_REF offset. */
8116 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
8117 f_gpr, NULL_TREE);
8118 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
8119 f_fpr, NULL_TREE);
8120 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
8121 f_ovf, NULL_TREE);
8122 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
8123 f_sav, NULL_TREE);
8124
8125 /* Count number of gp and fp argument registers used. */
8126 words = crtl->args.info.words;
8127 n_gpr = crtl->args.info.regno;
8128 n_fpr = crtl->args.info.sse_regno;
8129
8130 if (cfun->va_list_gpr_size)
8131 {
8132 type = TREE_TYPE (gpr);
8133 t = build2 (MODIFY_EXPR, type,
8134 gpr, build_int_cst (type, n_gpr * 8));
8135 TREE_SIDE_EFFECTS (t) = 1;
8136 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8137 }
8138
8139 if (TARGET_SSE && cfun->va_list_fpr_size)
8140 {
8141 type = TREE_TYPE (fpr);
8142 t = build2 (MODIFY_EXPR, type, fpr,
8143 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
8144 TREE_SIDE_EFFECTS (t) = 1;
8145 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8146 }
8147
8148 /* Find the overflow area. */
8149 type = TREE_TYPE (ovf);
8150 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8151 ovf_rtx = crtl->args.internal_arg_pointer;
8152 else
8153 ovf_rtx = cfun->machine->split_stack_varargs_pointer;
8154 t = make_tree (type, ovf_rtx);
8155 if (words != 0)
8156 t = build2 (POINTER_PLUS_EXPR, type, t,
8157 size_int (words * UNITS_PER_WORD));
8158 t = build2 (MODIFY_EXPR, type, ovf, t);
8159 TREE_SIDE_EFFECTS (t) = 1;
8160 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8161
8162 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
8163 {
8164 /* Find the register save area.
8165 Prologue of the function save it right above stack frame. */
8166 type = TREE_TYPE (sav);
8167 t = make_tree (type, frame_pointer_rtx);
8168 if (!ix86_varargs_gpr_size)
8169 t = build2 (POINTER_PLUS_EXPR, type, t,
8170 size_int (-8 * X86_64_REGPARM_MAX));
8171 t = build2 (MODIFY_EXPR, type, sav, t);
8172 TREE_SIDE_EFFECTS (t) = 1;
8173 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8174 }
8175 }
8176
8177 /* Implement va_arg. */
8178
8179 static tree
8180 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8181 gimple_seq *post_p)
8182 {
8183 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
8184 tree f_gpr, f_fpr, f_ovf, f_sav;
8185 tree gpr, fpr, ovf, sav, t;
8186 int size, rsize;
8187 tree lab_false, lab_over = NULL_TREE;
8188 tree addr, t2;
8189 rtx container;
8190 int indirect_p = 0;
8191 tree ptrtype;
8192 enum machine_mode nat_mode;
8193 unsigned int arg_boundary;
8194
8195 /* Only 64bit target needs something special. */
8196 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
8197 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
8198
8199 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
8200 f_fpr = DECL_CHAIN (f_gpr);
8201 f_ovf = DECL_CHAIN (f_fpr);
8202 f_sav = DECL_CHAIN (f_ovf);
8203
8204 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
8205 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
8206 valist = build_va_arg_indirect_ref (valist);
8207 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8208 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8209 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8210
8211 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
8212 if (indirect_p)
8213 type = build_pointer_type (type);
8214 size = int_size_in_bytes (type);
8215 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8216
8217 nat_mode = type_natural_mode (type, NULL);
8218 switch (nat_mode)
8219 {
8220 case V8SFmode:
8221 case V8SImode:
8222 case V32QImode:
8223 case V16HImode:
8224 case V4DFmode:
8225 case V4DImode:
8226 /* Unnamed 256bit vector mode parameters are passed on stack. */
8227 if (!TARGET_64BIT_MS_ABI)
8228 {
8229 container = NULL;
8230 break;
8231 }
8232
8233 default:
8234 container = construct_container (nat_mode, TYPE_MODE (type),
8235 type, 0, X86_64_REGPARM_MAX,
8236 X86_64_SSE_REGPARM_MAX, intreg,
8237 0);
8238 break;
8239 }
8240
8241 /* Pull the value out of the saved registers. */
8242
8243 addr = create_tmp_var (ptr_type_node, "addr");
8244
8245 if (container)
8246 {
8247 int needed_intregs, needed_sseregs;
8248 bool need_temp;
8249 tree int_addr, sse_addr;
8250
8251 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8252 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8253
8254 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
8255
8256 need_temp = (!REG_P (container)
8257 && ((needed_intregs && TYPE_ALIGN (type) > 64)
8258 || TYPE_ALIGN (type) > 128));
8259
8260 /* In case we are passing structure, verify that it is consecutive block
8261 on the register save area. If not we need to do moves. */
8262 if (!need_temp && !REG_P (container))
8263 {
8264 /* Verify that all registers are strictly consecutive */
8265 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
8266 {
8267 int i;
8268
8269 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8270 {
8271 rtx slot = XVECEXP (container, 0, i);
8272 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
8273 || INTVAL (XEXP (slot, 1)) != i * 16)
8274 need_temp = 1;
8275 }
8276 }
8277 else
8278 {
8279 int i;
8280
8281 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8282 {
8283 rtx slot = XVECEXP (container, 0, i);
8284 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
8285 || INTVAL (XEXP (slot, 1)) != i * 8)
8286 need_temp = 1;
8287 }
8288 }
8289 }
8290 if (!need_temp)
8291 {
8292 int_addr = addr;
8293 sse_addr = addr;
8294 }
8295 else
8296 {
8297 int_addr = create_tmp_var (ptr_type_node, "int_addr");
8298 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
8299 }
8300
8301 /* First ensure that we fit completely in registers. */
8302 if (needed_intregs)
8303 {
8304 t = build_int_cst (TREE_TYPE (gpr),
8305 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
8306 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
8307 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8308 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8309 gimplify_and_add (t, pre_p);
8310 }
8311 if (needed_sseregs)
8312 {
8313 t = build_int_cst (TREE_TYPE (fpr),
8314 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
8315 + X86_64_REGPARM_MAX * 8);
8316 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
8317 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8318 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8319 gimplify_and_add (t, pre_p);
8320 }
8321
8322 /* Compute index to start of area used for integer regs. */
8323 if (needed_intregs)
8324 {
8325 /* int_addr = gpr + sav; */
8326 t = fold_convert (sizetype, gpr);
8327 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8328 gimplify_assign (int_addr, t, pre_p);
8329 }
8330 if (needed_sseregs)
8331 {
8332 /* sse_addr = fpr + sav; */
8333 t = fold_convert (sizetype, fpr);
8334 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8335 gimplify_assign (sse_addr, t, pre_p);
8336 }
8337 if (need_temp)
8338 {
8339 int i, prev_size = 0;
8340 tree temp = create_tmp_var (type, "va_arg_tmp");
8341
8342 /* addr = &temp; */
8343 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
8344 gimplify_assign (addr, t, pre_p);
8345
8346 for (i = 0; i < XVECLEN (container, 0); i++)
8347 {
8348 rtx slot = XVECEXP (container, 0, i);
8349 rtx reg = XEXP (slot, 0);
8350 enum machine_mode mode = GET_MODE (reg);
8351 tree piece_type;
8352 tree addr_type;
8353 tree daddr_type;
8354 tree src_addr, src;
8355 int src_offset;
8356 tree dest_addr, dest;
8357 int cur_size = GET_MODE_SIZE (mode);
8358
8359 gcc_assert (prev_size <= INTVAL (XEXP (slot, 1)));
8360 prev_size = INTVAL (XEXP (slot, 1));
8361 if (prev_size + cur_size > size)
8362 {
8363 cur_size = size - prev_size;
8364 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
8365 if (mode == BLKmode)
8366 mode = QImode;
8367 }
8368 piece_type = lang_hooks.types.type_for_mode (mode, 1);
8369 if (mode == GET_MODE (reg))
8370 addr_type = build_pointer_type (piece_type);
8371 else
8372 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8373 true);
8374 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8375 true);
8376
8377 if (SSE_REGNO_P (REGNO (reg)))
8378 {
8379 src_addr = sse_addr;
8380 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
8381 }
8382 else
8383 {
8384 src_addr = int_addr;
8385 src_offset = REGNO (reg) * 8;
8386 }
8387 src_addr = fold_convert (addr_type, src_addr);
8388 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
8389 size_int (src_offset));
8390
8391 dest_addr = fold_convert (daddr_type, addr);
8392 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
8393 size_int (prev_size));
8394 if (cur_size == GET_MODE_SIZE (mode))
8395 {
8396 src = build_va_arg_indirect_ref (src_addr);
8397 dest = build_va_arg_indirect_ref (dest_addr);
8398
8399 gimplify_assign (dest, src, pre_p);
8400 }
8401 else
8402 {
8403 tree copy
8404 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
8405 3, dest_addr, src_addr,
8406 size_int (cur_size));
8407 gimplify_and_add (copy, pre_p);
8408 }
8409 prev_size += cur_size;
8410 }
8411 }
8412
8413 if (needed_intregs)
8414 {
8415 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
8416 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
8417 gimplify_assign (gpr, t, pre_p);
8418 }
8419
8420 if (needed_sseregs)
8421 {
8422 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
8423 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
8424 gimplify_assign (fpr, t, pre_p);
8425 }
8426
8427 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8428
8429 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8430 }
8431
8432 /* ... otherwise out of the overflow area. */
8433
8434 /* When we align parameter on stack for caller, if the parameter
8435 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
8436 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
8437 here with caller. */
8438 arg_boundary = ix86_function_arg_boundary (VOIDmode, type);
8439 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
8440 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
8441
8442 /* Care for on-stack alignment if needed. */
8443 if (arg_boundary <= 64 || size == 0)
8444 t = ovf;
8445 else
8446 {
8447 HOST_WIDE_INT align = arg_boundary / 8;
8448 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
8449 size_int (align - 1));
8450 t = fold_convert (sizetype, t);
8451 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
8452 size_int (-align));
8453 t = fold_convert (TREE_TYPE (ovf), t);
8454 }
8455
8456 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8457 gimplify_assign (addr, t, pre_p);
8458
8459 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
8460 size_int (rsize * UNITS_PER_WORD));
8461 gimplify_assign (unshare_expr (ovf), t, pre_p);
8462
8463 if (container)
8464 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8465
8466 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
8467 addr = fold_convert (ptrtype, addr);
8468
8469 if (indirect_p)
8470 addr = build_va_arg_indirect_ref (addr);
8471 return build_va_arg_indirect_ref (addr);
8472 }
8473 \f
8474 /* Return true if OPNUM's MEM should be matched
8475 in movabs* patterns. */
8476
8477 bool
8478 ix86_check_movabs (rtx insn, int opnum)
8479 {
8480 rtx set, mem;
8481
8482 set = PATTERN (insn);
8483 if (GET_CODE (set) == PARALLEL)
8484 set = XVECEXP (set, 0, 0);
8485 gcc_assert (GET_CODE (set) == SET);
8486 mem = XEXP (set, opnum);
8487 while (GET_CODE (mem) == SUBREG)
8488 mem = SUBREG_REG (mem);
8489 gcc_assert (MEM_P (mem));
8490 return volatile_ok || !MEM_VOLATILE_P (mem);
8491 }
8492 \f
8493 /* Initialize the table of extra 80387 mathematical constants. */
8494
8495 static void
8496 init_ext_80387_constants (void)
8497 {
8498 static const char * cst[5] =
8499 {
8500 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8501 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8502 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8503 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8504 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8505 };
8506 int i;
8507
8508 for (i = 0; i < 5; i++)
8509 {
8510 real_from_string (&ext_80387_constants_table[i], cst[i]);
8511 /* Ensure each constant is rounded to XFmode precision. */
8512 real_convert (&ext_80387_constants_table[i],
8513 XFmode, &ext_80387_constants_table[i]);
8514 }
8515
8516 ext_80387_constants_init = 1;
8517 }
8518
8519 /* Return non-zero if the constant is something that
8520 can be loaded with a special instruction. */
8521
8522 int
8523 standard_80387_constant_p (rtx x)
8524 {
8525 enum machine_mode mode = GET_MODE (x);
8526
8527 REAL_VALUE_TYPE r;
8528
8529 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
8530 return -1;
8531
8532 if (x == CONST0_RTX (mode))
8533 return 1;
8534 if (x == CONST1_RTX (mode))
8535 return 2;
8536
8537 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8538
8539 /* For XFmode constants, try to find a special 80387 instruction when
8540 optimizing for size or on those CPUs that benefit from them. */
8541 if (mode == XFmode
8542 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
8543 {
8544 int i;
8545
8546 if (! ext_80387_constants_init)
8547 init_ext_80387_constants ();
8548
8549 for (i = 0; i < 5; i++)
8550 if (real_identical (&r, &ext_80387_constants_table[i]))
8551 return i + 3;
8552 }
8553
8554 /* Load of the constant -0.0 or -1.0 will be split as
8555 fldz;fchs or fld1;fchs sequence. */
8556 if (real_isnegzero (&r))
8557 return 8;
8558 if (real_identical (&r, &dconstm1))
8559 return 9;
8560
8561 return 0;
8562 }
8563
8564 /* Return the opcode of the special instruction to be used to load
8565 the constant X. */
8566
8567 const char *
8568 standard_80387_constant_opcode (rtx x)
8569 {
8570 switch (standard_80387_constant_p (x))
8571 {
8572 case 1:
8573 return "fldz";
8574 case 2:
8575 return "fld1";
8576 case 3:
8577 return "fldlg2";
8578 case 4:
8579 return "fldln2";
8580 case 5:
8581 return "fldl2e";
8582 case 6:
8583 return "fldl2t";
8584 case 7:
8585 return "fldpi";
8586 case 8:
8587 case 9:
8588 return "#";
8589 default:
8590 gcc_unreachable ();
8591 }
8592 }
8593
8594 /* Return the CONST_DOUBLE representing the 80387 constant that is
8595 loaded by the specified special instruction. The argument IDX
8596 matches the return value from standard_80387_constant_p. */
8597
8598 rtx
8599 standard_80387_constant_rtx (int idx)
8600 {
8601 int i;
8602
8603 if (! ext_80387_constants_init)
8604 init_ext_80387_constants ();
8605
8606 switch (idx)
8607 {
8608 case 3:
8609 case 4:
8610 case 5:
8611 case 6:
8612 case 7:
8613 i = idx - 3;
8614 break;
8615
8616 default:
8617 gcc_unreachable ();
8618 }
8619
8620 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
8621 XFmode);
8622 }
8623
8624 /* Return 1 if X is all 0s and 2 if x is all 1s
8625 in supported SSE vector mode. */
8626
8627 int
8628 standard_sse_constant_p (rtx x)
8629 {
8630 enum machine_mode mode = GET_MODE (x);
8631
8632 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
8633 return 1;
8634 if (vector_all_ones_operand (x, mode))
8635 switch (mode)
8636 {
8637 case V16QImode:
8638 case V8HImode:
8639 case V4SImode:
8640 case V2DImode:
8641 if (TARGET_SSE2)
8642 return 2;
8643 default:
8644 break;
8645 }
8646
8647 return 0;
8648 }
8649
8650 /* Return the opcode of the special instruction to be used to load
8651 the constant X. */
8652
8653 const char *
8654 standard_sse_constant_opcode (rtx insn, rtx x)
8655 {
8656 switch (standard_sse_constant_p (x))
8657 {
8658 case 1:
8659 switch (get_attr_mode (insn))
8660 {
8661 case MODE_V4SF:
8662 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8663 case MODE_V2DF:
8664 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8665 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8666 else
8667 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
8668 case MODE_TI:
8669 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8670 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8671 else
8672 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
8673 case MODE_V8SF:
8674 return "vxorps\t%x0, %x0, %x0";
8675 case MODE_V4DF:
8676 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8677 return "vxorps\t%x0, %x0, %x0";
8678 else
8679 return "vxorpd\t%x0, %x0, %x0";
8680 case MODE_OI:
8681 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8682 return "vxorps\t%x0, %x0, %x0";
8683 else
8684 return "vpxor\t%x0, %x0, %x0";
8685 default:
8686 break;
8687 }
8688 case 2:
8689 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
8690 default:
8691 break;
8692 }
8693 gcc_unreachable ();
8694 }
8695
8696 /* Returns true if OP contains a symbol reference */
8697
8698 bool
8699 symbolic_reference_mentioned_p (rtx op)
8700 {
8701 const char *fmt;
8702 int i;
8703
8704 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
8705 return true;
8706
8707 fmt = GET_RTX_FORMAT (GET_CODE (op));
8708 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
8709 {
8710 if (fmt[i] == 'E')
8711 {
8712 int j;
8713
8714 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
8715 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
8716 return true;
8717 }
8718
8719 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
8720 return true;
8721 }
8722
8723 return false;
8724 }
8725
8726 /* Return true if it is appropriate to emit `ret' instructions in the
8727 body of a function. Do this only if the epilogue is simple, needing a
8728 couple of insns. Prior to reloading, we can't tell how many registers
8729 must be saved, so return false then. Return false if there is no frame
8730 marker to de-allocate. */
8731
8732 bool
8733 ix86_can_use_return_insn_p (void)
8734 {
8735 struct ix86_frame frame;
8736
8737 if (! reload_completed || frame_pointer_needed)
8738 return 0;
8739
8740 /* Don't allow more than 32k pop, since that's all we can do
8741 with one instruction. */
8742 if (crtl->args.pops_args && crtl->args.size >= 32768)
8743 return 0;
8744
8745 ix86_compute_frame_layout (&frame);
8746 return (frame.stack_pointer_offset == UNITS_PER_WORD
8747 && (frame.nregs + frame.nsseregs) == 0);
8748 }
8749 \f
8750 /* Value should be nonzero if functions must have frame pointers.
8751 Zero means the frame pointer need not be set up (and parms may
8752 be accessed via the stack pointer) in functions that seem suitable. */
8753
8754 static bool
8755 ix86_frame_pointer_required (void)
8756 {
8757 /* If we accessed previous frames, then the generated code expects
8758 to be able to access the saved ebp value in our frame. */
8759 if (cfun->machine->accesses_prev_frame)
8760 return true;
8761
8762 /* Several x86 os'es need a frame pointer for other reasons,
8763 usually pertaining to setjmp. */
8764 if (SUBTARGET_FRAME_POINTER_REQUIRED)
8765 return true;
8766
8767 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8768 turns off the frame pointer by default. Turn it back on now if
8769 we've not got a leaf function. */
8770 if (TARGET_OMIT_LEAF_FRAME_POINTER
8771 && (!current_function_is_leaf
8772 || ix86_current_function_calls_tls_descriptor))
8773 return true;
8774
8775 if (crtl->profile && !flag_fentry)
8776 return true;
8777
8778 return false;
8779 }
8780
8781 /* Record that the current function accesses previous call frames. */
8782
8783 void
8784 ix86_setup_frame_addresses (void)
8785 {
8786 cfun->machine->accesses_prev_frame = 1;
8787 }
8788 \f
8789 #ifndef USE_HIDDEN_LINKONCE
8790 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
8791 # define USE_HIDDEN_LINKONCE 1
8792 # else
8793 # define USE_HIDDEN_LINKONCE 0
8794 # endif
8795 #endif
8796
8797 static int pic_labels_used;
8798
8799 /* Fills in the label name that should be used for a pc thunk for
8800 the given register. */
8801
8802 static void
8803 get_pc_thunk_name (char name[32], unsigned int regno)
8804 {
8805 gcc_assert (!TARGET_64BIT);
8806
8807 if (USE_HIDDEN_LINKONCE)
8808 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
8809 else
8810 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
8811 }
8812
8813
8814 /* This function generates code for -fpic that loads %ebx with
8815 the return address of the caller and then returns. */
8816
8817 static void
8818 ix86_code_end (void)
8819 {
8820 rtx xops[2];
8821 int regno;
8822
8823 for (regno = AX_REG; regno <= SP_REG; regno++)
8824 {
8825 char name[32];
8826 tree decl;
8827
8828 if (!(pic_labels_used & (1 << regno)))
8829 continue;
8830
8831 get_pc_thunk_name (name, regno);
8832
8833 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
8834 get_identifier (name),
8835 build_function_type (void_type_node, void_list_node));
8836 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
8837 NULL_TREE, void_type_node);
8838 TREE_PUBLIC (decl) = 1;
8839 TREE_STATIC (decl) = 1;
8840
8841 #if TARGET_MACHO
8842 if (TARGET_MACHO)
8843 {
8844 switch_to_section (darwin_sections[text_coal_section]);
8845 fputs ("\t.weak_definition\t", asm_out_file);
8846 assemble_name (asm_out_file, name);
8847 fputs ("\n\t.private_extern\t", asm_out_file);
8848 assemble_name (asm_out_file, name);
8849 putc ('\n', asm_out_file);
8850 ASM_OUTPUT_LABEL (asm_out_file, name);
8851 DECL_WEAK (decl) = 1;
8852 }
8853 else
8854 #endif
8855 if (USE_HIDDEN_LINKONCE)
8856 {
8857 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
8858
8859 targetm.asm_out.unique_section (decl, 0);
8860 switch_to_section (get_named_section (decl, NULL, 0));
8861
8862 targetm.asm_out.globalize_label (asm_out_file, name);
8863 fputs ("\t.hidden\t", asm_out_file);
8864 assemble_name (asm_out_file, name);
8865 putc ('\n', asm_out_file);
8866 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
8867 }
8868 else
8869 {
8870 switch_to_section (text_section);
8871 ASM_OUTPUT_LABEL (asm_out_file, name);
8872 }
8873
8874 DECL_INITIAL (decl) = make_node (BLOCK);
8875 current_function_decl = decl;
8876 init_function_start (decl);
8877 first_function_block_is_cold = false;
8878 /* Make sure unwind info is emitted for the thunk if needed. */
8879 final_start_function (emit_barrier (), asm_out_file, 1);
8880
8881 /* Pad stack IP move with 4 instructions (two NOPs count
8882 as one instruction). */
8883 if (TARGET_PAD_SHORT_FUNCTION)
8884 {
8885 int i = 8;
8886
8887 while (i--)
8888 fputs ("\tnop\n", asm_out_file);
8889 }
8890
8891 xops[0] = gen_rtx_REG (Pmode, regno);
8892 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8893 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
8894 fputs ("\tret\n", asm_out_file);
8895 final_end_function ();
8896 init_insn_lengths ();
8897 free_after_compilation (cfun);
8898 set_cfun (NULL);
8899 current_function_decl = NULL;
8900 }
8901
8902 if (flag_split_stack)
8903 file_end_indicate_split_stack ();
8904 }
8905
8906 /* Emit code for the SET_GOT patterns. */
8907
8908 const char *
8909 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
8910 {
8911 rtx xops[3];
8912
8913 xops[0] = dest;
8914
8915 if (TARGET_VXWORKS_RTP && flag_pic)
8916 {
8917 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8918 xops[2] = gen_rtx_MEM (Pmode,
8919 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
8920 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
8921
8922 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8923 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8924 an unadorned address. */
8925 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
8926 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
8927 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
8928 return "";
8929 }
8930
8931 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
8932
8933 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
8934 {
8935 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
8936
8937 if (!flag_pic)
8938 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
8939 else
8940 {
8941 output_asm_insn ("call\t%a2", xops);
8942 #ifdef DWARF2_UNWIND_INFO
8943 /* The call to next label acts as a push. */
8944 if (dwarf2out_do_frame ())
8945 {
8946 rtx insn;
8947 start_sequence ();
8948 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8949 gen_rtx_PLUS (Pmode,
8950 stack_pointer_rtx,
8951 GEN_INT (-4))));
8952 RTX_FRAME_RELATED_P (insn) = 1;
8953 dwarf2out_frame_debug (insn, true);
8954 end_sequence ();
8955 }
8956 #endif
8957 }
8958
8959 #if TARGET_MACHO
8960 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8961 is what will be referenced by the Mach-O PIC subsystem. */
8962 if (!label)
8963 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8964 #endif
8965
8966 targetm.asm_out.internal_label (asm_out_file, "L",
8967 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
8968
8969 if (flag_pic)
8970 {
8971 output_asm_insn ("pop%z0\t%0", xops);
8972 #ifdef DWARF2_UNWIND_INFO
8973 /* The pop is a pop and clobbers dest, but doesn't restore it
8974 for unwind info purposes. */
8975 if (dwarf2out_do_frame ())
8976 {
8977 rtx insn;
8978 start_sequence ();
8979 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
8980 dwarf2out_frame_debug (insn, true);
8981 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8982 gen_rtx_PLUS (Pmode,
8983 stack_pointer_rtx,
8984 GEN_INT (4))));
8985 RTX_FRAME_RELATED_P (insn) = 1;
8986 dwarf2out_frame_debug (insn, true);
8987 end_sequence ();
8988 }
8989 #endif
8990 }
8991 }
8992 else
8993 {
8994 char name[32];
8995 get_pc_thunk_name (name, REGNO (dest));
8996 pic_labels_used |= 1 << REGNO (dest);
8997
8998 #ifdef DWARF2_UNWIND_INFO
8999 /* Ensure all queued register saves are flushed before the
9000 call. */
9001 if (dwarf2out_do_frame ())
9002 dwarf2out_flush_queued_reg_saves ();
9003 #endif
9004 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
9005 xops[2] = gen_rtx_MEM (QImode, xops[2]);
9006 output_asm_insn ("call\t%X2", xops);
9007 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
9008 is what will be referenced by the Mach-O PIC subsystem. */
9009 #if TARGET_MACHO
9010 if (!label)
9011 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
9012 else
9013 targetm.asm_out.internal_label (asm_out_file, "L",
9014 CODE_LABEL_NUMBER (label));
9015 #endif
9016 }
9017
9018 if (TARGET_MACHO)
9019 return "";
9020
9021 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
9022 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
9023 else
9024 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
9025
9026 return "";
9027 }
9028
9029 /* Generate an "push" pattern for input ARG. */
9030
9031 static rtx
9032 gen_push (rtx arg)
9033 {
9034 struct machine_function *m = cfun->machine;
9035
9036 if (m->fs.cfa_reg == stack_pointer_rtx)
9037 m->fs.cfa_offset += UNITS_PER_WORD;
9038 m->fs.sp_offset += UNITS_PER_WORD;
9039
9040 return gen_rtx_SET (VOIDmode,
9041 gen_rtx_MEM (Pmode,
9042 gen_rtx_PRE_DEC (Pmode,
9043 stack_pointer_rtx)),
9044 arg);
9045 }
9046
9047 /* Generate an "pop" pattern for input ARG. */
9048
9049 static rtx
9050 gen_pop (rtx arg)
9051 {
9052 return gen_rtx_SET (VOIDmode,
9053 arg,
9054 gen_rtx_MEM (Pmode,
9055 gen_rtx_POST_INC (Pmode,
9056 stack_pointer_rtx)));
9057 }
9058
9059 /* Return >= 0 if there is an unused call-clobbered register available
9060 for the entire function. */
9061
9062 static unsigned int
9063 ix86_select_alt_pic_regnum (void)
9064 {
9065 if (current_function_is_leaf
9066 && !crtl->profile
9067 && !ix86_current_function_calls_tls_descriptor)
9068 {
9069 int i, drap;
9070 /* Can't use the same register for both PIC and DRAP. */
9071 if (crtl->drap_reg)
9072 drap = REGNO (crtl->drap_reg);
9073 else
9074 drap = -1;
9075 for (i = 2; i >= 0; --i)
9076 if (i != drap && !df_regs_ever_live_p (i))
9077 return i;
9078 }
9079
9080 return INVALID_REGNUM;
9081 }
9082
9083 /* Return 1 if we need to save REGNO. */
9084 static int
9085 ix86_save_reg (unsigned int regno, int maybe_eh_return)
9086 {
9087 if (pic_offset_table_rtx
9088 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
9089 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
9090 || crtl->profile
9091 || crtl->calls_eh_return
9092 || crtl->uses_const_pool))
9093 {
9094 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
9095 return 0;
9096 return 1;
9097 }
9098
9099 if (crtl->calls_eh_return && maybe_eh_return)
9100 {
9101 unsigned i;
9102 for (i = 0; ; i++)
9103 {
9104 unsigned test = EH_RETURN_DATA_REGNO (i);
9105 if (test == INVALID_REGNUM)
9106 break;
9107 if (test == regno)
9108 return 1;
9109 }
9110 }
9111
9112 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
9113 return 1;
9114
9115 return (df_regs_ever_live_p (regno)
9116 && !call_used_regs[regno]
9117 && !fixed_regs[regno]
9118 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
9119 }
9120
9121 /* Return number of saved general prupose registers. */
9122
9123 static int
9124 ix86_nsaved_regs (void)
9125 {
9126 int nregs = 0;
9127 int regno;
9128
9129 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9130 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9131 nregs ++;
9132 return nregs;
9133 }
9134
9135 /* Return number of saved SSE registrers. */
9136
9137 static int
9138 ix86_nsaved_sseregs (void)
9139 {
9140 int nregs = 0;
9141 int regno;
9142
9143 if (!TARGET_64BIT_MS_ABI)
9144 return 0;
9145 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9146 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9147 nregs ++;
9148 return nregs;
9149 }
9150
9151 /* Given FROM and TO register numbers, say whether this elimination is
9152 allowed. If stack alignment is needed, we can only replace argument
9153 pointer with hard frame pointer, or replace frame pointer with stack
9154 pointer. Otherwise, frame pointer elimination is automatically
9155 handled and all other eliminations are valid. */
9156
9157 static bool
9158 ix86_can_eliminate (const int from, const int to)
9159 {
9160 if (stack_realign_fp)
9161 return ((from == ARG_POINTER_REGNUM
9162 && to == HARD_FRAME_POINTER_REGNUM)
9163 || (from == FRAME_POINTER_REGNUM
9164 && to == STACK_POINTER_REGNUM));
9165 else
9166 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
9167 }
9168
9169 /* Return the offset between two registers, one to be eliminated, and the other
9170 its replacement, at the start of a routine. */
9171
9172 HOST_WIDE_INT
9173 ix86_initial_elimination_offset (int from, int to)
9174 {
9175 struct ix86_frame frame;
9176 ix86_compute_frame_layout (&frame);
9177
9178 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9179 return frame.hard_frame_pointer_offset;
9180 else if (from == FRAME_POINTER_REGNUM
9181 && to == HARD_FRAME_POINTER_REGNUM)
9182 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
9183 else
9184 {
9185 gcc_assert (to == STACK_POINTER_REGNUM);
9186
9187 if (from == ARG_POINTER_REGNUM)
9188 return frame.stack_pointer_offset;
9189
9190 gcc_assert (from == FRAME_POINTER_REGNUM);
9191 return frame.stack_pointer_offset - frame.frame_pointer_offset;
9192 }
9193 }
9194
9195 /* In a dynamically-aligned function, we can't know the offset from
9196 stack pointer to frame pointer, so we must ensure that setjmp
9197 eliminates fp against the hard fp (%ebp) rather than trying to
9198 index from %esp up to the top of the frame across a gap that is
9199 of unknown (at compile-time) size. */
9200 static rtx
9201 ix86_builtin_setjmp_frame_value (void)
9202 {
9203 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
9204 }
9205
9206 /* On the x86 -fsplit-stack and -fstack-protector both use the same
9207 field in the TCB, so they can not be used together. */
9208
9209 static bool
9210 ix86_supports_split_stack (bool report ATTRIBUTE_UNUSED,
9211 struct gcc_options *opts ATTRIBUTE_UNUSED)
9212 {
9213 bool ret = true;
9214
9215 #ifndef TARGET_THREAD_SPLIT_STACK_OFFSET
9216 if (report)
9217 error ("%<-fsplit-stack%> currently only supported on GNU/Linux");
9218 ret = false;
9219 #else
9220 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
9221 {
9222 if (report)
9223 error ("%<-fsplit-stack%> requires "
9224 "assembler support for CFI directives");
9225 ret = false;
9226 }
9227 #endif
9228
9229 return ret;
9230 }
9231
9232 /* When using -fsplit-stack, the allocation routines set a field in
9233 the TCB to the bottom of the stack plus this much space, measured
9234 in bytes. */
9235
9236 #define SPLIT_STACK_AVAILABLE 256
9237
9238 /* Fill structure ix86_frame about frame of currently computed function. */
9239
9240 static void
9241 ix86_compute_frame_layout (struct ix86_frame *frame)
9242 {
9243 unsigned int stack_alignment_needed;
9244 HOST_WIDE_INT offset;
9245 unsigned int preferred_alignment;
9246 HOST_WIDE_INT size = get_frame_size ();
9247 HOST_WIDE_INT to_allocate;
9248
9249 frame->nregs = ix86_nsaved_regs ();
9250 frame->nsseregs = ix86_nsaved_sseregs ();
9251
9252 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
9253 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
9254
9255 /* 64-bit MS ABI seem to require stack alignment to be always 16 except for
9256 function prologues and leaf. */
9257 if ((TARGET_64BIT_MS_ABI && preferred_alignment < 16)
9258 && (!current_function_is_leaf || cfun->calls_alloca != 0
9259 || ix86_current_function_calls_tls_descriptor))
9260 {
9261 preferred_alignment = 16;
9262 stack_alignment_needed = 16;
9263 crtl->preferred_stack_boundary = 128;
9264 crtl->stack_alignment_needed = 128;
9265 }
9266
9267 gcc_assert (!size || stack_alignment_needed);
9268 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
9269 gcc_assert (preferred_alignment <= stack_alignment_needed);
9270
9271 /* For SEH we have to limit the amount of code movement into the prologue.
9272 At present we do this via a BLOCKAGE, at which point there's very little
9273 scheduling that can be done, which means that there's very little point
9274 in doing anything except PUSHs. */
9275 if (TARGET_SEH)
9276 cfun->machine->use_fast_prologue_epilogue = false;
9277
9278 /* During reload iteration the amount of registers saved can change.
9279 Recompute the value as needed. Do not recompute when amount of registers
9280 didn't change as reload does multiple calls to the function and does not
9281 expect the decision to change within single iteration. */
9282 else if (!optimize_function_for_size_p (cfun)
9283 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
9284 {
9285 int count = frame->nregs;
9286 struct cgraph_node *node = cgraph_get_node (current_function_decl);
9287
9288 cfun->machine->use_fast_prologue_epilogue_nregs = count;
9289
9290 /* The fast prologue uses move instead of push to save registers. This
9291 is significantly longer, but also executes faster as modern hardware
9292 can execute the moves in parallel, but can't do that for push/pop.
9293
9294 Be careful about choosing what prologue to emit: When function takes
9295 many instructions to execute we may use slow version as well as in
9296 case function is known to be outside hot spot (this is known with
9297 feedback only). Weight the size of function by number of registers
9298 to save as it is cheap to use one or two push instructions but very
9299 slow to use many of them. */
9300 if (count)
9301 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
9302 if (node->frequency < NODE_FREQUENCY_NORMAL
9303 || (flag_branch_probabilities
9304 && node->frequency < NODE_FREQUENCY_HOT))
9305 cfun->machine->use_fast_prologue_epilogue = false;
9306 else
9307 cfun->machine->use_fast_prologue_epilogue
9308 = !expensive_function_p (count);
9309 }
9310 if (TARGET_PROLOGUE_USING_MOVE
9311 && cfun->machine->use_fast_prologue_epilogue)
9312 frame->save_regs_using_mov = true;
9313 else
9314 frame->save_regs_using_mov = false;
9315
9316 /* If static stack checking is enabled and done with probes, the registers
9317 need to be saved before allocating the frame. */
9318 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
9319 frame->save_regs_using_mov = false;
9320
9321 /* Skip return address. */
9322 offset = UNITS_PER_WORD;
9323
9324 /* Skip pushed static chain. */
9325 if (ix86_static_chain_on_stack)
9326 offset += UNITS_PER_WORD;
9327
9328 /* Skip saved base pointer. */
9329 if (frame_pointer_needed)
9330 offset += UNITS_PER_WORD;
9331 frame->hfp_save_offset = offset;
9332
9333 /* The traditional frame pointer location is at the top of the frame. */
9334 frame->hard_frame_pointer_offset = offset;
9335
9336 /* Register save area */
9337 offset += frame->nregs * UNITS_PER_WORD;
9338 frame->reg_save_offset = offset;
9339
9340 /* Align and set SSE register save area. */
9341 if (frame->nsseregs)
9342 {
9343 /* The only ABI that has saved SSE registers (Win64) also has a
9344 16-byte aligned default stack, and thus we don't need to be
9345 within the re-aligned local stack frame to save them. */
9346 gcc_assert (INCOMING_STACK_BOUNDARY >= 128);
9347 offset = (offset + 16 - 1) & -16;
9348 offset += frame->nsseregs * 16;
9349 }
9350 frame->sse_reg_save_offset = offset;
9351
9352 /* The re-aligned stack starts here. Values before this point are not
9353 directly comparable with values below this point. In order to make
9354 sure that no value happens to be the same before and after, force
9355 the alignment computation below to add a non-zero value. */
9356 if (stack_realign_fp)
9357 offset = (offset + stack_alignment_needed) & -stack_alignment_needed;
9358
9359 /* Va-arg area */
9360 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
9361 offset += frame->va_arg_size;
9362
9363 /* Align start of frame for local function. */
9364 if (stack_realign_fp
9365 || offset != frame->sse_reg_save_offset
9366 || size != 0
9367 || !current_function_is_leaf
9368 || cfun->calls_alloca
9369 || ix86_current_function_calls_tls_descriptor)
9370 offset = (offset + stack_alignment_needed - 1) & -stack_alignment_needed;
9371
9372 /* Frame pointer points here. */
9373 frame->frame_pointer_offset = offset;
9374
9375 offset += size;
9376
9377 /* Add outgoing arguments area. Can be skipped if we eliminated
9378 all the function calls as dead code.
9379 Skipping is however impossible when function calls alloca. Alloca
9380 expander assumes that last crtl->outgoing_args_size
9381 of stack frame are unused. */
9382 if (ACCUMULATE_OUTGOING_ARGS
9383 && (!current_function_is_leaf || cfun->calls_alloca
9384 || ix86_current_function_calls_tls_descriptor))
9385 {
9386 offset += crtl->outgoing_args_size;
9387 frame->outgoing_arguments_size = crtl->outgoing_args_size;
9388 }
9389 else
9390 frame->outgoing_arguments_size = 0;
9391
9392 /* Align stack boundary. Only needed if we're calling another function
9393 or using alloca. */
9394 if (!current_function_is_leaf || cfun->calls_alloca
9395 || ix86_current_function_calls_tls_descriptor)
9396 offset = (offset + preferred_alignment - 1) & -preferred_alignment;
9397
9398 /* We've reached end of stack frame. */
9399 frame->stack_pointer_offset = offset;
9400
9401 /* Size prologue needs to allocate. */
9402 to_allocate = offset - frame->sse_reg_save_offset;
9403
9404 if ((!to_allocate && frame->nregs <= 1)
9405 || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
9406 frame->save_regs_using_mov = false;
9407
9408 if (ix86_using_red_zone ()
9409 && current_function_sp_is_unchanging
9410 && current_function_is_leaf
9411 && !ix86_current_function_calls_tls_descriptor)
9412 {
9413 frame->red_zone_size = to_allocate;
9414 if (frame->save_regs_using_mov)
9415 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
9416 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
9417 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
9418 }
9419 else
9420 frame->red_zone_size = 0;
9421 frame->stack_pointer_offset -= frame->red_zone_size;
9422
9423 /* The SEH frame pointer location is near the bottom of the frame.
9424 This is enforced by the fact that the difference between the
9425 stack pointer and the frame pointer is limited to 240 bytes in
9426 the unwind data structure. */
9427 if (TARGET_SEH)
9428 {
9429 HOST_WIDE_INT diff;
9430
9431 /* If we can leave the frame pointer where it is, do so. */
9432 diff = frame->stack_pointer_offset - frame->hard_frame_pointer_offset;
9433 if (diff > 240 || (diff & 15) != 0)
9434 {
9435 /* Ideally we'd determine what portion of the local stack frame
9436 (within the constraint of the lowest 240) is most heavily used.
9437 But without that complication, simply bias the frame pointer
9438 by 128 bytes so as to maximize the amount of the local stack
9439 frame that is addressable with 8-bit offsets. */
9440 frame->hard_frame_pointer_offset = frame->stack_pointer_offset - 128;
9441 }
9442 }
9443 }
9444
9445 /* This is semi-inlined memory_address_length, but simplified
9446 since we know that we're always dealing with reg+offset, and
9447 to avoid having to create and discard all that rtl. */
9448
9449 static inline int
9450 choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
9451 {
9452 int len = 4;
9453
9454 if (offset == 0)
9455 {
9456 /* EBP and R13 cannot be encoded without an offset. */
9457 len = (regno == BP_REG || regno == R13_REG);
9458 }
9459 else if (IN_RANGE (offset, -128, 127))
9460 len = 1;
9461
9462 /* ESP and R12 must be encoded with a SIB byte. */
9463 if (regno == SP_REG || regno == R12_REG)
9464 len++;
9465
9466 return len;
9467 }
9468
9469 /* Return an RTX that points to CFA_OFFSET within the stack frame.
9470 The valid base registers are taken from CFUN->MACHINE->FS. */
9471
9472 static rtx
9473 choose_baseaddr (HOST_WIDE_INT cfa_offset)
9474 {
9475 const struct machine_function *m = cfun->machine;
9476 rtx base_reg = NULL;
9477 HOST_WIDE_INT base_offset = 0;
9478
9479 if (m->use_fast_prologue_epilogue)
9480 {
9481 /* Choose the base register most likely to allow the most scheduling
9482 opportunities. Generally FP is valid througout the function,
9483 while DRAP must be reloaded within the epilogue. But choose either
9484 over the SP due to increased encoding size. */
9485
9486 if (m->fs.fp_valid)
9487 {
9488 base_reg = hard_frame_pointer_rtx;
9489 base_offset = m->fs.fp_offset - cfa_offset;
9490 }
9491 else if (m->fs.drap_valid)
9492 {
9493 base_reg = crtl->drap_reg;
9494 base_offset = 0 - cfa_offset;
9495 }
9496 else if (m->fs.sp_valid)
9497 {
9498 base_reg = stack_pointer_rtx;
9499 base_offset = m->fs.sp_offset - cfa_offset;
9500 }
9501 }
9502 else
9503 {
9504 HOST_WIDE_INT toffset;
9505 int len = 16, tlen;
9506
9507 /* Choose the base register with the smallest address encoding.
9508 With a tie, choose FP > DRAP > SP. */
9509 if (m->fs.sp_valid)
9510 {
9511 base_reg = stack_pointer_rtx;
9512 base_offset = m->fs.sp_offset - cfa_offset;
9513 len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
9514 }
9515 if (m->fs.drap_valid)
9516 {
9517 toffset = 0 - cfa_offset;
9518 tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
9519 if (tlen <= len)
9520 {
9521 base_reg = crtl->drap_reg;
9522 base_offset = toffset;
9523 len = tlen;
9524 }
9525 }
9526 if (m->fs.fp_valid)
9527 {
9528 toffset = m->fs.fp_offset - cfa_offset;
9529 tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
9530 if (tlen <= len)
9531 {
9532 base_reg = hard_frame_pointer_rtx;
9533 base_offset = toffset;
9534 len = tlen;
9535 }
9536 }
9537 }
9538 gcc_assert (base_reg != NULL);
9539
9540 return plus_constant (base_reg, base_offset);
9541 }
9542
9543 /* Emit code to save registers in the prologue. */
9544
9545 static void
9546 ix86_emit_save_regs (void)
9547 {
9548 unsigned int regno;
9549 rtx insn;
9550
9551 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
9552 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9553 {
9554 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
9555 RTX_FRAME_RELATED_P (insn) = 1;
9556 }
9557 }
9558
9559 /* Emit a single register save at CFA - CFA_OFFSET. */
9560
9561 static void
9562 ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
9563 HOST_WIDE_INT cfa_offset)
9564 {
9565 struct machine_function *m = cfun->machine;
9566 rtx reg = gen_rtx_REG (mode, regno);
9567 rtx mem, addr, base, insn;
9568
9569 addr = choose_baseaddr (cfa_offset);
9570 mem = gen_frame_mem (mode, addr);
9571
9572 /* For SSE saves, we need to indicate the 128-bit alignment. */
9573 set_mem_align (mem, GET_MODE_ALIGNMENT (mode));
9574
9575 insn = emit_move_insn (mem, reg);
9576 RTX_FRAME_RELATED_P (insn) = 1;
9577
9578 base = addr;
9579 if (GET_CODE (base) == PLUS)
9580 base = XEXP (base, 0);
9581 gcc_checking_assert (REG_P (base));
9582
9583 /* When saving registers into a re-aligned local stack frame, avoid
9584 any tricky guessing by dwarf2out. */
9585 if (m->fs.realigned)
9586 {
9587 gcc_checking_assert (stack_realign_drap);
9588
9589 if (regno == REGNO (crtl->drap_reg))
9590 {
9591 /* A bit of a hack. We force the DRAP register to be saved in
9592 the re-aligned stack frame, which provides us with a copy
9593 of the CFA that will last past the prologue. Install it. */
9594 gcc_checking_assert (cfun->machine->fs.fp_valid);
9595 addr = plus_constant (hard_frame_pointer_rtx,
9596 cfun->machine->fs.fp_offset - cfa_offset);
9597 mem = gen_rtx_MEM (mode, addr);
9598 add_reg_note (insn, REG_CFA_DEF_CFA, mem);
9599 }
9600 else
9601 {
9602 /* The frame pointer is a stable reference within the
9603 aligned frame. Use it. */
9604 gcc_checking_assert (cfun->machine->fs.fp_valid);
9605 addr = plus_constant (hard_frame_pointer_rtx,
9606 cfun->machine->fs.fp_offset - cfa_offset);
9607 mem = gen_rtx_MEM (mode, addr);
9608 add_reg_note (insn, REG_CFA_EXPRESSION,
9609 gen_rtx_SET (VOIDmode, mem, reg));
9610 }
9611 }
9612
9613 /* The memory may not be relative to the current CFA register,
9614 which means that we may need to generate a new pattern for
9615 use by the unwind info. */
9616 else if (base != m->fs.cfa_reg)
9617 {
9618 addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
9619 mem = gen_rtx_MEM (mode, addr);
9620 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
9621 }
9622 }
9623
9624 /* Emit code to save registers using MOV insns.
9625 First register is stored at CFA - CFA_OFFSET. */
9626 static void
9627 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
9628 {
9629 unsigned int regno;
9630
9631 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9632 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9633 {
9634 ix86_emit_save_reg_using_mov (Pmode, regno, cfa_offset);
9635 cfa_offset -= UNITS_PER_WORD;
9636 }
9637 }
9638
9639 /* Emit code to save SSE registers using MOV insns.
9640 First register is stored at CFA - CFA_OFFSET. */
9641 static void
9642 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
9643 {
9644 unsigned int regno;
9645
9646 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9647 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9648 {
9649 ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
9650 cfa_offset -= 16;
9651 }
9652 }
9653
9654 static GTY(()) rtx queued_cfa_restores;
9655
9656 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9657 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9658 Don't add the note if the previously saved value will be left untouched
9659 within stack red-zone till return, as unwinders can find the same value
9660 in the register and on the stack. */
9661
9662 static void
9663 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT cfa_offset)
9664 {
9665 if (cfa_offset <= cfun->machine->fs.red_zone_offset)
9666 return;
9667
9668 if (insn)
9669 {
9670 add_reg_note (insn, REG_CFA_RESTORE, reg);
9671 RTX_FRAME_RELATED_P (insn) = 1;
9672 }
9673 else
9674 queued_cfa_restores
9675 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
9676 }
9677
9678 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9679
9680 static void
9681 ix86_add_queued_cfa_restore_notes (rtx insn)
9682 {
9683 rtx last;
9684 if (!queued_cfa_restores)
9685 return;
9686 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
9687 ;
9688 XEXP (last, 1) = REG_NOTES (insn);
9689 REG_NOTES (insn) = queued_cfa_restores;
9690 queued_cfa_restores = NULL_RTX;
9691 RTX_FRAME_RELATED_P (insn) = 1;
9692 }
9693
9694 /* Expand prologue or epilogue stack adjustment.
9695 The pattern exist to put a dependency on all ebp-based memory accesses.
9696 STYLE should be negative if instructions should be marked as frame related,
9697 zero if %r11 register is live and cannot be freely used and positive
9698 otherwise. */
9699
9700 static void
9701 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
9702 int style, bool set_cfa)
9703 {
9704 struct machine_function *m = cfun->machine;
9705 rtx insn;
9706 bool add_frame_related_expr = false;
9707
9708 if (! TARGET_64BIT)
9709 insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
9710 else if (x86_64_immediate_operand (offset, DImode))
9711 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
9712 else
9713 {
9714 rtx tmp;
9715 /* r11 is used by indirect sibcall return as well, set before the
9716 epilogue and used after the epilogue. */
9717 if (style)
9718 tmp = gen_rtx_REG (DImode, R11_REG);
9719 else
9720 {
9721 gcc_assert (src != hard_frame_pointer_rtx
9722 && dest != hard_frame_pointer_rtx);
9723 tmp = hard_frame_pointer_rtx;
9724 }
9725 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
9726 if (style < 0)
9727 add_frame_related_expr = true;
9728
9729 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
9730 }
9731
9732 insn = emit_insn (insn);
9733 if (style >= 0)
9734 ix86_add_queued_cfa_restore_notes (insn);
9735
9736 if (set_cfa)
9737 {
9738 rtx r;
9739
9740 gcc_assert (m->fs.cfa_reg == src);
9741 m->fs.cfa_offset += INTVAL (offset);
9742 m->fs.cfa_reg = dest;
9743
9744 r = gen_rtx_PLUS (Pmode, src, offset);
9745 r = gen_rtx_SET (VOIDmode, dest, r);
9746 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9747 RTX_FRAME_RELATED_P (insn) = 1;
9748 }
9749 else if (style < 0)
9750 {
9751 RTX_FRAME_RELATED_P (insn) = 1;
9752 if (add_frame_related_expr)
9753 {
9754 rtx r = gen_rtx_PLUS (Pmode, src, offset);
9755 r = gen_rtx_SET (VOIDmode, dest, r);
9756 add_reg_note (insn, REG_FRAME_RELATED_EXPR, r);
9757 }
9758 }
9759
9760 if (dest == stack_pointer_rtx)
9761 {
9762 HOST_WIDE_INT ooffset = m->fs.sp_offset;
9763 bool valid = m->fs.sp_valid;
9764
9765 if (src == hard_frame_pointer_rtx)
9766 {
9767 valid = m->fs.fp_valid;
9768 ooffset = m->fs.fp_offset;
9769 }
9770 else if (src == crtl->drap_reg)
9771 {
9772 valid = m->fs.drap_valid;
9773 ooffset = 0;
9774 }
9775 else
9776 {
9777 /* Else there are two possibilities: SP itself, which we set
9778 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9779 taken care of this by hand along the eh_return path. */
9780 gcc_checking_assert (src == stack_pointer_rtx
9781 || offset == const0_rtx);
9782 }
9783
9784 m->fs.sp_offset = ooffset - INTVAL (offset);
9785 m->fs.sp_valid = valid;
9786 }
9787 }
9788
9789 /* Find an available register to be used as dynamic realign argument
9790 pointer regsiter. Such a register will be written in prologue and
9791 used in begin of body, so it must not be
9792 1. parameter passing register.
9793 2. GOT pointer.
9794 We reuse static-chain register if it is available. Otherwise, we
9795 use DI for i386 and R13 for x86-64. We chose R13 since it has
9796 shorter encoding.
9797
9798 Return: the regno of chosen register. */
9799
9800 static unsigned int
9801 find_drap_reg (void)
9802 {
9803 tree decl = cfun->decl;
9804
9805 if (TARGET_64BIT)
9806 {
9807 /* Use R13 for nested function or function need static chain.
9808 Since function with tail call may use any caller-saved
9809 registers in epilogue, DRAP must not use caller-saved
9810 register in such case. */
9811 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9812 return R13_REG;
9813
9814 return R10_REG;
9815 }
9816 else
9817 {
9818 /* Use DI for nested function or function need static chain.
9819 Since function with tail call may use any caller-saved
9820 registers in epilogue, DRAP must not use caller-saved
9821 register in such case. */
9822 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9823 return DI_REG;
9824
9825 /* Reuse static chain register if it isn't used for parameter
9826 passing. */
9827 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2)
9828 {
9829 unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (decl));
9830 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) == 0)
9831 return CX_REG;
9832 }
9833 return DI_REG;
9834 }
9835 }
9836
9837 /* Return minimum incoming stack alignment. */
9838
9839 static unsigned int
9840 ix86_minimum_incoming_stack_boundary (bool sibcall)
9841 {
9842 unsigned int incoming_stack_boundary;
9843
9844 /* Prefer the one specified at command line. */
9845 if (ix86_user_incoming_stack_boundary)
9846 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
9847 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9848 if -mstackrealign is used, it isn't used for sibcall check and
9849 estimated stack alignment is 128bit. */
9850 else if (!sibcall
9851 && !TARGET_64BIT
9852 && ix86_force_align_arg_pointer
9853 && crtl->stack_alignment_estimated == 128)
9854 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9855 else
9856 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
9857
9858 /* Incoming stack alignment can be changed on individual functions
9859 via force_align_arg_pointer attribute. We use the smallest
9860 incoming stack boundary. */
9861 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
9862 && lookup_attribute (ix86_force_align_arg_pointer_string,
9863 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
9864 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9865
9866 /* The incoming stack frame has to be aligned at least at
9867 parm_stack_boundary. */
9868 if (incoming_stack_boundary < crtl->parm_stack_boundary)
9869 incoming_stack_boundary = crtl->parm_stack_boundary;
9870
9871 /* Stack at entrance of main is aligned by runtime. We use the
9872 smallest incoming stack boundary. */
9873 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
9874 && DECL_NAME (current_function_decl)
9875 && MAIN_NAME_P (DECL_NAME (current_function_decl))
9876 && DECL_FILE_SCOPE_P (current_function_decl))
9877 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
9878
9879 return incoming_stack_boundary;
9880 }
9881
9882 /* Update incoming stack boundary and estimated stack alignment. */
9883
9884 static void
9885 ix86_update_stack_boundary (void)
9886 {
9887 ix86_incoming_stack_boundary
9888 = ix86_minimum_incoming_stack_boundary (false);
9889
9890 /* x86_64 vararg needs 16byte stack alignment for register save
9891 area. */
9892 if (TARGET_64BIT
9893 && cfun->stdarg
9894 && crtl->stack_alignment_estimated < 128)
9895 crtl->stack_alignment_estimated = 128;
9896 }
9897
9898 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9899 needed or an rtx for DRAP otherwise. */
9900
9901 static rtx
9902 ix86_get_drap_rtx (void)
9903 {
9904 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
9905 crtl->need_drap = true;
9906
9907 if (stack_realign_drap)
9908 {
9909 /* Assign DRAP to vDRAP and returns vDRAP */
9910 unsigned int regno = find_drap_reg ();
9911 rtx drap_vreg;
9912 rtx arg_ptr;
9913 rtx seq, insn;
9914
9915 arg_ptr = gen_rtx_REG (Pmode, regno);
9916 crtl->drap_reg = arg_ptr;
9917
9918 start_sequence ();
9919 drap_vreg = copy_to_reg (arg_ptr);
9920 seq = get_insns ();
9921 end_sequence ();
9922
9923 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
9924 if (!optimize)
9925 {
9926 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
9927 RTX_FRAME_RELATED_P (insn) = 1;
9928 }
9929 return drap_vreg;
9930 }
9931 else
9932 return NULL;
9933 }
9934
9935 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9936
9937 static rtx
9938 ix86_internal_arg_pointer (void)
9939 {
9940 return virtual_incoming_args_rtx;
9941 }
9942
9943 struct scratch_reg {
9944 rtx reg;
9945 bool saved;
9946 };
9947
9948 /* Return a short-lived scratch register for use on function entry.
9949 In 32-bit mode, it is valid only after the registers are saved
9950 in the prologue. This register must be released by means of
9951 release_scratch_register_on_entry once it is dead. */
9952
9953 static void
9954 get_scratch_register_on_entry (struct scratch_reg *sr)
9955 {
9956 int regno;
9957
9958 sr->saved = false;
9959
9960 if (TARGET_64BIT)
9961 {
9962 /* We always use R11 in 64-bit mode. */
9963 regno = R11_REG;
9964 }
9965 else
9966 {
9967 tree decl = current_function_decl, fntype = TREE_TYPE (decl);
9968 bool fastcall_p
9969 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
9970 bool static_chain_p = DECL_STATIC_CHAIN (decl);
9971 int regparm = ix86_function_regparm (fntype, decl);
9972 int drap_regno
9973 = crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;
9974
9975 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9976 for the static chain register. */
9977 if ((regparm < 1 || (fastcall_p && !static_chain_p))
9978 && drap_regno != AX_REG)
9979 regno = AX_REG;
9980 else if (regparm < 2 && drap_regno != DX_REG)
9981 regno = DX_REG;
9982 /* ecx is the static chain register. */
9983 else if (regparm < 3 && !fastcall_p && !static_chain_p
9984 && drap_regno != CX_REG)
9985 regno = CX_REG;
9986 else if (ix86_save_reg (BX_REG, true))
9987 regno = BX_REG;
9988 /* esi is the static chain register. */
9989 else if (!(regparm == 3 && static_chain_p)
9990 && ix86_save_reg (SI_REG, true))
9991 regno = SI_REG;
9992 else if (ix86_save_reg (DI_REG, true))
9993 regno = DI_REG;
9994 else
9995 {
9996 regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
9997 sr->saved = true;
9998 }
9999 }
10000
10001 sr->reg = gen_rtx_REG (Pmode, regno);
10002 if (sr->saved)
10003 {
10004 rtx insn = emit_insn (gen_push (sr->reg));
10005 RTX_FRAME_RELATED_P (insn) = 1;
10006 }
10007 }
10008
10009 /* Release a scratch register obtained from the preceding function. */
10010
10011 static void
10012 release_scratch_register_on_entry (struct scratch_reg *sr)
10013 {
10014 if (sr->saved)
10015 {
10016 rtx x, insn = emit_insn (gen_pop (sr->reg));
10017
10018 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
10019 RTX_FRAME_RELATED_P (insn) = 1;
10020 x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
10021 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10022 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
10023 }
10024 }
10025
10026 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
10027
10028 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
10029
10030 static void
10031 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
10032 {
10033 /* We skip the probe for the first interval + a small dope of 4 words and
10034 probe that many bytes past the specified size to maintain a protection
10035 area at the botton of the stack. */
10036 const int dope = 4 * UNITS_PER_WORD;
10037 rtx size_rtx = GEN_INT (size), last;
10038
10039 /* See if we have a constant small number of probes to generate. If so,
10040 that's the easy case. The run-time loop is made up of 11 insns in the
10041 generic case while the compile-time loop is made up of 3+2*(n-1) insns
10042 for n # of intervals. */
10043 if (size <= 5 * PROBE_INTERVAL)
10044 {
10045 HOST_WIDE_INT i, adjust;
10046 bool first_probe = true;
10047
10048 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
10049 values of N from 1 until it exceeds SIZE. If only one probe is
10050 needed, this will not generate any code. Then adjust and probe
10051 to PROBE_INTERVAL + SIZE. */
10052 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10053 {
10054 if (first_probe)
10055 {
10056 adjust = 2 * PROBE_INTERVAL + dope;
10057 first_probe = false;
10058 }
10059 else
10060 adjust = PROBE_INTERVAL;
10061
10062 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10063 plus_constant (stack_pointer_rtx, -adjust)));
10064 emit_stack_probe (stack_pointer_rtx);
10065 }
10066
10067 if (first_probe)
10068 adjust = size + PROBE_INTERVAL + dope;
10069 else
10070 adjust = size + PROBE_INTERVAL - i;
10071
10072 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10073 plus_constant (stack_pointer_rtx, -adjust)));
10074 emit_stack_probe (stack_pointer_rtx);
10075
10076 /* Adjust back to account for the additional first interval. */
10077 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10078 plus_constant (stack_pointer_rtx,
10079 PROBE_INTERVAL + dope)));
10080 }
10081
10082 /* Otherwise, do the same as above, but in a loop. Note that we must be
10083 extra careful with variables wrapping around because we might be at
10084 the very top (or the very bottom) of the address space and we have
10085 to be able to handle this case properly; in particular, we use an
10086 equality test for the loop condition. */
10087 else
10088 {
10089 HOST_WIDE_INT rounded_size;
10090 struct scratch_reg sr;
10091
10092 get_scratch_register_on_entry (&sr);
10093
10094
10095 /* Step 1: round SIZE to the previous multiple of the interval. */
10096
10097 rounded_size = size & -PROBE_INTERVAL;
10098
10099
10100 /* Step 2: compute initial and final value of the loop counter. */
10101
10102 /* SP = SP_0 + PROBE_INTERVAL. */
10103 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10104 plus_constant (stack_pointer_rtx,
10105 - (PROBE_INTERVAL + dope))));
10106
10107 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
10108 emit_move_insn (sr.reg, GEN_INT (-rounded_size));
10109 emit_insn (gen_rtx_SET (VOIDmode, sr.reg,
10110 gen_rtx_PLUS (Pmode, sr.reg,
10111 stack_pointer_rtx)));
10112
10113
10114 /* Step 3: the loop
10115
10116 while (SP != LAST_ADDR)
10117 {
10118 SP = SP + PROBE_INTERVAL
10119 probe at SP
10120 }
10121
10122 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
10123 values of N from 1 until it is equal to ROUNDED_SIZE. */
10124
10125 emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));
10126
10127
10128 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
10129 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
10130
10131 if (size != rounded_size)
10132 {
10133 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10134 plus_constant (stack_pointer_rtx,
10135 rounded_size - size)));
10136 emit_stack_probe (stack_pointer_rtx);
10137 }
10138
10139 /* Adjust back to account for the additional first interval. */
10140 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10141 plus_constant (stack_pointer_rtx,
10142 PROBE_INTERVAL + dope)));
10143
10144 release_scratch_register_on_entry (&sr);
10145 }
10146
10147 gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
10148
10149 /* Even if the stack pointer isn't the CFA register, we need to correctly
10150 describe the adjustments made to it, in particular differentiate the
10151 frame-related ones from the frame-unrelated ones. */
10152 if (size > 0)
10153 {
10154 rtx expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
10155 XVECEXP (expr, 0, 0)
10156 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10157 plus_constant (stack_pointer_rtx, -size));
10158 XVECEXP (expr, 0, 1)
10159 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10160 plus_constant (stack_pointer_rtx,
10161 PROBE_INTERVAL + dope + size));
10162 add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
10163 RTX_FRAME_RELATED_P (last) = 1;
10164
10165 cfun->machine->fs.sp_offset += size;
10166 }
10167
10168 /* Make sure nothing is scheduled before we are done. */
10169 emit_insn (gen_blockage ());
10170 }
10171
10172 /* Adjust the stack pointer up to REG while probing it. */
10173
10174 const char *
10175 output_adjust_stack_and_probe (rtx reg)
10176 {
10177 static int labelno = 0;
10178 char loop_lab[32], end_lab[32];
10179 rtx xops[2];
10180
10181 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10182 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10183
10184 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10185
10186 /* Jump to END_LAB if SP == LAST_ADDR. */
10187 xops[0] = stack_pointer_rtx;
10188 xops[1] = reg;
10189 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10190 fputs ("\tje\t", asm_out_file);
10191 assemble_name_raw (asm_out_file, end_lab);
10192 fputc ('\n', asm_out_file);
10193
10194 /* SP = SP + PROBE_INTERVAL. */
10195 xops[1] = GEN_INT (PROBE_INTERVAL);
10196 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10197
10198 /* Probe at SP. */
10199 xops[1] = const0_rtx;
10200 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);
10201
10202 fprintf (asm_out_file, "\tjmp\t");
10203 assemble_name_raw (asm_out_file, loop_lab);
10204 fputc ('\n', asm_out_file);
10205
10206 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10207
10208 return "";
10209 }
10210
10211 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
10212 inclusive. These are offsets from the current stack pointer. */
10213
10214 static void
10215 ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
10216 {
10217 /* See if we have a constant small number of probes to generate. If so,
10218 that's the easy case. The run-time loop is made up of 7 insns in the
10219 generic case while the compile-time loop is made up of n insns for n #
10220 of intervals. */
10221 if (size <= 7 * PROBE_INTERVAL)
10222 {
10223 HOST_WIDE_INT i;
10224
10225 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
10226 it exceeds SIZE. If only one probe is needed, this will not
10227 generate any code. Then probe at FIRST + SIZE. */
10228 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10229 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
10230
10231 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
10232 }
10233
10234 /* Otherwise, do the same as above, but in a loop. Note that we must be
10235 extra careful with variables wrapping around because we might be at
10236 the very top (or the very bottom) of the address space and we have
10237 to be able to handle this case properly; in particular, we use an
10238 equality test for the loop condition. */
10239 else
10240 {
10241 HOST_WIDE_INT rounded_size, last;
10242 struct scratch_reg sr;
10243
10244 get_scratch_register_on_entry (&sr);
10245
10246
10247 /* Step 1: round SIZE to the previous multiple of the interval. */
10248
10249 rounded_size = size & -PROBE_INTERVAL;
10250
10251
10252 /* Step 2: compute initial and final value of the loop counter. */
10253
10254 /* TEST_OFFSET = FIRST. */
10255 emit_move_insn (sr.reg, GEN_INT (-first));
10256
10257 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
10258 last = first + rounded_size;
10259
10260
10261 /* Step 3: the loop
10262
10263 while (TEST_ADDR != LAST_ADDR)
10264 {
10265 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
10266 probe at TEST_ADDR
10267 }
10268
10269 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
10270 until it is equal to ROUNDED_SIZE. */
10271
10272 emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));
10273
10274
10275 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
10276 that SIZE is equal to ROUNDED_SIZE. */
10277
10278 if (size != rounded_size)
10279 emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
10280 stack_pointer_rtx,
10281 sr.reg),
10282 rounded_size - size));
10283
10284 release_scratch_register_on_entry (&sr);
10285 }
10286
10287 /* Make sure nothing is scheduled before we are done. */
10288 emit_insn (gen_blockage ());
10289 }
10290
10291 /* Probe a range of stack addresses from REG to END, inclusive. These are
10292 offsets from the current stack pointer. */
10293
10294 const char *
10295 output_probe_stack_range (rtx reg, rtx end)
10296 {
10297 static int labelno = 0;
10298 char loop_lab[32], end_lab[32];
10299 rtx xops[3];
10300
10301 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10302 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10303
10304 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10305
10306 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
10307 xops[0] = reg;
10308 xops[1] = end;
10309 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10310 fputs ("\tje\t", asm_out_file);
10311 assemble_name_raw (asm_out_file, end_lab);
10312 fputc ('\n', asm_out_file);
10313
10314 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
10315 xops[1] = GEN_INT (PROBE_INTERVAL);
10316 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10317
10318 /* Probe at TEST_ADDR. */
10319 xops[0] = stack_pointer_rtx;
10320 xops[1] = reg;
10321 xops[2] = const0_rtx;
10322 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);
10323
10324 fprintf (asm_out_file, "\tjmp\t");
10325 assemble_name_raw (asm_out_file, loop_lab);
10326 fputc ('\n', asm_out_file);
10327
10328 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10329
10330 return "";
10331 }
10332
10333 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
10334 to be generated in correct form. */
10335 static void
10336 ix86_finalize_stack_realign_flags (void)
10337 {
10338 /* Check if stack realign is really needed after reload, and
10339 stores result in cfun */
10340 unsigned int incoming_stack_boundary
10341 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
10342 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
10343 unsigned int stack_realign = (incoming_stack_boundary
10344 < (current_function_is_leaf
10345 ? crtl->max_used_stack_slot_alignment
10346 : crtl->stack_alignment_needed));
10347
10348 if (crtl->stack_realign_finalized)
10349 {
10350 /* After stack_realign_needed is finalized, we can't no longer
10351 change it. */
10352 gcc_assert (crtl->stack_realign_needed == stack_realign);
10353 }
10354 else
10355 {
10356 crtl->stack_realign_needed = stack_realign;
10357 crtl->stack_realign_finalized = true;
10358 }
10359 }
10360
10361 /* Expand the prologue into a bunch of separate insns. */
10362
10363 void
10364 ix86_expand_prologue (void)
10365 {
10366 struct machine_function *m = cfun->machine;
10367 rtx insn, t;
10368 bool pic_reg_used;
10369 struct ix86_frame frame;
10370 HOST_WIDE_INT allocate;
10371 bool int_registers_saved;
10372
10373 ix86_finalize_stack_realign_flags ();
10374
10375 /* DRAP should not coexist with stack_realign_fp */
10376 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
10377
10378 memset (&m->fs, 0, sizeof (m->fs));
10379
10380 /* Initialize CFA state for before the prologue. */
10381 m->fs.cfa_reg = stack_pointer_rtx;
10382 m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;
10383
10384 /* Track SP offset to the CFA. We continue tracking this after we've
10385 swapped the CFA register away from SP. In the case of re-alignment
10386 this is fudged; we're interested to offsets within the local frame. */
10387 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10388 m->fs.sp_valid = true;
10389
10390 ix86_compute_frame_layout (&frame);
10391
10392 if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
10393 {
10394 /* We should have already generated an error for any use of
10395 ms_hook on a nested function. */
10396 gcc_checking_assert (!ix86_static_chain_on_stack);
10397
10398 /* Check if profiling is active and we shall use profiling before
10399 prologue variant. If so sorry. */
10400 if (crtl->profile && flag_fentry != 0)
10401 sorry ("ms_hook_prologue attribute isn%'t compatible "
10402 "with -mfentry for 32-bit");
10403
10404 /* In ix86_asm_output_function_label we emitted:
10405 8b ff movl.s %edi,%edi
10406 55 push %ebp
10407 8b ec movl.s %esp,%ebp
10408
10409 This matches the hookable function prologue in Win32 API
10410 functions in Microsoft Windows XP Service Pack 2 and newer.
10411 Wine uses this to enable Windows apps to hook the Win32 API
10412 functions provided by Wine.
10413
10414 What that means is that we've already set up the frame pointer. */
10415
10416 if (frame_pointer_needed
10417 && !(crtl->drap_reg && crtl->stack_realign_needed))
10418 {
10419 rtx push, mov;
10420
10421 /* We've decided to use the frame pointer already set up.
10422 Describe this to the unwinder by pretending that both
10423 push and mov insns happen right here.
10424
10425 Putting the unwind info here at the end of the ms_hook
10426 is done so that we can make absolutely certain we get
10427 the required byte sequence at the start of the function,
10428 rather than relying on an assembler that can produce
10429 the exact encoding required.
10430
10431 However it does mean (in the unpatched case) that we have
10432 a 1 insn window where the asynchronous unwind info is
10433 incorrect. However, if we placed the unwind info at
10434 its correct location we would have incorrect unwind info
10435 in the patched case. Which is probably all moot since
10436 I don't expect Wine generates dwarf2 unwind info for the
10437 system libraries that use this feature. */
10438
10439 insn = emit_insn (gen_blockage ());
10440
10441 push = gen_push (hard_frame_pointer_rtx);
10442 mov = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
10443 stack_pointer_rtx);
10444 RTX_FRAME_RELATED_P (push) = 1;
10445 RTX_FRAME_RELATED_P (mov) = 1;
10446
10447 RTX_FRAME_RELATED_P (insn) = 1;
10448 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10449 gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));
10450
10451 /* Note that gen_push incremented m->fs.cfa_offset, even
10452 though we didn't emit the push insn here. */
10453 m->fs.cfa_reg = hard_frame_pointer_rtx;
10454 m->fs.fp_offset = m->fs.cfa_offset;
10455 m->fs.fp_valid = true;
10456 }
10457 else
10458 {
10459 /* The frame pointer is not needed so pop %ebp again.
10460 This leaves us with a pristine state. */
10461 emit_insn (gen_pop (hard_frame_pointer_rtx));
10462 }
10463 }
10464
10465 /* The first insn of a function that accepts its static chain on the
10466 stack is to push the register that would be filled in by a direct
10467 call. This insn will be skipped by the trampoline. */
10468 else if (ix86_static_chain_on_stack)
10469 {
10470 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
10471 emit_insn (gen_blockage ());
10472
10473 /* We don't want to interpret this push insn as a register save,
10474 only as a stack adjustment. The real copy of the register as
10475 a save will be done later, if needed. */
10476 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
10477 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
10478 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
10479 RTX_FRAME_RELATED_P (insn) = 1;
10480 }
10481
10482 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
10483 of DRAP is needed and stack realignment is really needed after reload */
10484 if (stack_realign_drap)
10485 {
10486 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10487
10488 /* Only need to push parameter pointer reg if it is caller saved. */
10489 if (!call_used_regs[REGNO (crtl->drap_reg)])
10490 {
10491 /* Push arg pointer reg */
10492 insn = emit_insn (gen_push (crtl->drap_reg));
10493 RTX_FRAME_RELATED_P (insn) = 1;
10494 }
10495
10496 /* Grab the argument pointer. */
10497 t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
10498 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10499 RTX_FRAME_RELATED_P (insn) = 1;
10500 m->fs.cfa_reg = crtl->drap_reg;
10501 m->fs.cfa_offset = 0;
10502
10503 /* Align the stack. */
10504 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10505 stack_pointer_rtx,
10506 GEN_INT (-align_bytes)));
10507 RTX_FRAME_RELATED_P (insn) = 1;
10508
10509 /* Replicate the return address on the stack so that return
10510 address can be reached via (argp - 1) slot. This is needed
10511 to implement macro RETURN_ADDR_RTX and intrinsic function
10512 expand_builtin_return_addr etc. */
10513 t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
10514 t = gen_frame_mem (Pmode, t);
10515 insn = emit_insn (gen_push (t));
10516 RTX_FRAME_RELATED_P (insn) = 1;
10517
10518 /* For the purposes of frame and register save area addressing,
10519 we've started over with a new frame. */
10520 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10521 m->fs.realigned = true;
10522 }
10523
10524 if (frame_pointer_needed && !m->fs.fp_valid)
10525 {
10526 /* Note: AT&T enter does NOT have reversed args. Enter is probably
10527 slower on all targets. Also sdb doesn't like it. */
10528 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
10529 RTX_FRAME_RELATED_P (insn) = 1;
10530
10531 if (m->fs.sp_offset == frame.hard_frame_pointer_offset)
10532 {
10533 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10534 RTX_FRAME_RELATED_P (insn) = 1;
10535
10536 if (m->fs.cfa_reg == stack_pointer_rtx)
10537 m->fs.cfa_reg = hard_frame_pointer_rtx;
10538 m->fs.fp_offset = m->fs.sp_offset;
10539 m->fs.fp_valid = true;
10540 }
10541 }
10542
10543 int_registers_saved = (frame.nregs == 0);
10544
10545 if (!int_registers_saved)
10546 {
10547 /* If saving registers via PUSH, do so now. */
10548 if (!frame.save_regs_using_mov)
10549 {
10550 ix86_emit_save_regs ();
10551 int_registers_saved = true;
10552 gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
10553 }
10554
10555 /* When using red zone we may start register saving before allocating
10556 the stack frame saving one cycle of the prologue. However, avoid
10557 doing this if we have to probe the stack; at least on x86_64 the
10558 stack probe can turn into a call that clobbers a red zone location. */
10559 else if (ix86_using_red_zone ()
10560 && (! TARGET_STACK_PROBE
10561 || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
10562 {
10563 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10564 int_registers_saved = true;
10565 }
10566 }
10567
10568 if (stack_realign_fp)
10569 {
10570 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10571 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
10572
10573 /* The computation of the size of the re-aligned stack frame means
10574 that we must allocate the size of the register save area before
10575 performing the actual alignment. Otherwise we cannot guarantee
10576 that there's enough storage above the realignment point. */
10577 if (m->fs.sp_offset != frame.sse_reg_save_offset)
10578 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10579 GEN_INT (m->fs.sp_offset
10580 - frame.sse_reg_save_offset),
10581 -1, false);
10582
10583 /* Align the stack. */
10584 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10585 stack_pointer_rtx,
10586 GEN_INT (-align_bytes)));
10587
10588 /* For the purposes of register save area addressing, the stack
10589 pointer is no longer valid. As for the value of sp_offset,
10590 see ix86_compute_frame_layout, which we need to match in order
10591 to pass verification of stack_pointer_offset at the end. */
10592 m->fs.sp_offset = (m->fs.sp_offset + align_bytes) & -align_bytes;
10593 m->fs.sp_valid = false;
10594 }
10595
10596 allocate = frame.stack_pointer_offset - m->fs.sp_offset;
10597
10598 if (flag_stack_usage)
10599 {
10600 /* We start to count from ARG_POINTER. */
10601 HOST_WIDE_INT stack_size = frame.stack_pointer_offset;
10602
10603 /* If it was realigned, take into account the fake frame. */
10604 if (stack_realign_drap)
10605 {
10606 if (ix86_static_chain_on_stack)
10607 stack_size += UNITS_PER_WORD;
10608
10609 if (!call_used_regs[REGNO (crtl->drap_reg)])
10610 stack_size += UNITS_PER_WORD;
10611
10612 /* This over-estimates by 1 minimal-stack-alignment-unit but
10613 mitigates that by counting in the new return address slot. */
10614 current_function_dynamic_stack_size
10615 += crtl->stack_alignment_needed / BITS_PER_UNIT;
10616 }
10617
10618 current_function_static_stack_size = stack_size;
10619 }
10620
10621 /* The stack has already been decremented by the instruction calling us
10622 so we need to probe unconditionally to preserve the protection area. */
10623 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
10624 {
10625 /* We expect the registers to be saved when probes are used. */
10626 gcc_assert (int_registers_saved);
10627
10628 if (STACK_CHECK_MOVING_SP)
10629 {
10630 ix86_adjust_stack_and_probe (allocate);
10631 allocate = 0;
10632 }
10633 else
10634 {
10635 HOST_WIDE_INT size = allocate;
10636
10637 if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
10638 size = 0x80000000 - STACK_CHECK_PROTECT - 1;
10639
10640 if (TARGET_STACK_PROBE)
10641 ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
10642 else
10643 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
10644 }
10645 }
10646
10647 if (allocate == 0)
10648 ;
10649 else if (!ix86_target_stack_probe ()
10650 || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
10651 {
10652 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10653 GEN_INT (-allocate), -1,
10654 m->fs.cfa_reg == stack_pointer_rtx);
10655 }
10656 else
10657 {
10658 rtx eax = gen_rtx_REG (Pmode, AX_REG);
10659 rtx r10 = NULL;
10660 rtx (*adjust_stack_insn)(rtx, rtx, rtx);
10661
10662 bool eax_live = false;
10663 bool r10_live = false;
10664
10665 if (TARGET_64BIT)
10666 r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);
10667 if (!TARGET_64BIT_MS_ABI)
10668 eax_live = ix86_eax_live_at_start_p ();
10669
10670 if (eax_live)
10671 {
10672 emit_insn (gen_push (eax));
10673 allocate -= UNITS_PER_WORD;
10674 }
10675 if (r10_live)
10676 {
10677 r10 = gen_rtx_REG (Pmode, R10_REG);
10678 emit_insn (gen_push (r10));
10679 allocate -= UNITS_PER_WORD;
10680 }
10681
10682 emit_move_insn (eax, GEN_INT (allocate));
10683 emit_insn (ix86_gen_allocate_stack_worker (eax, eax));
10684
10685 /* Use the fact that AX still contains ALLOCATE. */
10686 adjust_stack_insn = (TARGET_64BIT
10687 ? gen_pro_epilogue_adjust_stack_di_sub
10688 : gen_pro_epilogue_adjust_stack_si_sub);
10689
10690 insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
10691 stack_pointer_rtx, eax));
10692
10693 /* Note that SEH directives need to continue tracking the stack
10694 pointer even after the frame pointer has been set up. */
10695 if (m->fs.cfa_reg == stack_pointer_rtx || TARGET_SEH)
10696 {
10697 if (m->fs.cfa_reg == stack_pointer_rtx)
10698 m->fs.cfa_offset += allocate;
10699
10700 RTX_FRAME_RELATED_P (insn) = 1;
10701 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10702 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10703 plus_constant (stack_pointer_rtx,
10704 -allocate)));
10705 }
10706 m->fs.sp_offset += allocate;
10707
10708 if (r10_live && eax_live)
10709 {
10710 t = choose_baseaddr (m->fs.sp_offset - allocate);
10711 emit_move_insn (r10, gen_frame_mem (Pmode, t));
10712 t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
10713 emit_move_insn (eax, gen_frame_mem (Pmode, t));
10714 }
10715 else if (eax_live || r10_live)
10716 {
10717 t = choose_baseaddr (m->fs.sp_offset - allocate);
10718 emit_move_insn ((eax_live ? eax : r10), gen_frame_mem (Pmode, t));
10719 }
10720 }
10721 gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);
10722
10723 /* If we havn't already set up the frame pointer, do so now. */
10724 if (frame_pointer_needed && !m->fs.fp_valid)
10725 {
10726 insn = ix86_gen_add3 (hard_frame_pointer_rtx, stack_pointer_rtx,
10727 GEN_INT (frame.stack_pointer_offset
10728 - frame.hard_frame_pointer_offset));
10729 insn = emit_insn (insn);
10730 RTX_FRAME_RELATED_P (insn) = 1;
10731 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
10732
10733 if (m->fs.cfa_reg == stack_pointer_rtx)
10734 m->fs.cfa_reg = hard_frame_pointer_rtx;
10735 m->fs.fp_offset = frame.hard_frame_pointer_offset;
10736 m->fs.fp_valid = true;
10737 }
10738
10739 if (!int_registers_saved)
10740 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10741 if (frame.nsseregs)
10742 ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
10743
10744 pic_reg_used = false;
10745 if (pic_offset_table_rtx
10746 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
10747 || crtl->profile))
10748 {
10749 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
10750
10751 if (alt_pic_reg_used != INVALID_REGNUM)
10752 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
10753
10754 pic_reg_used = true;
10755 }
10756
10757 if (pic_reg_used)
10758 {
10759 if (TARGET_64BIT)
10760 {
10761 if (ix86_cmodel == CM_LARGE_PIC)
10762 {
10763 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
10764 rtx label = gen_label_rtx ();
10765 emit_label (label);
10766 LABEL_PRESERVE_P (label) = 1;
10767 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
10768 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
10769 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
10770 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
10771 pic_offset_table_rtx, tmp_reg));
10772 }
10773 else
10774 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
10775 }
10776 else
10777 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
10778 }
10779
10780 /* In the pic_reg_used case, make sure that the got load isn't deleted
10781 when mcount needs it. Blockage to avoid call movement across mcount
10782 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10783 note. */
10784 if (crtl->profile && !flag_fentry && pic_reg_used)
10785 emit_insn (gen_prologue_use (pic_offset_table_rtx));
10786
10787 if (crtl->drap_reg && !crtl->stack_realign_needed)
10788 {
10789 /* vDRAP is setup but after reload it turns out stack realign
10790 isn't necessary, here we will emit prologue to setup DRAP
10791 without stack realign adjustment */
10792 t = choose_baseaddr (0);
10793 emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10794 }
10795
10796 /* Prevent instructions from being scheduled into register save push
10797 sequence when access to the redzone area is done through frame pointer.
10798 The offset between the frame pointer and the stack pointer is calculated
10799 relative to the value of the stack pointer at the end of the function
10800 prologue, and moving instructions that access redzone area via frame
10801 pointer inside push sequence violates this assumption. */
10802 if (frame_pointer_needed && frame.red_zone_size)
10803 emit_insn (gen_memory_blockage ());
10804
10805 /* Emit cld instruction if stringops are used in the function. */
10806 if (TARGET_CLD && ix86_current_function_needs_cld)
10807 emit_insn (gen_cld ());
10808
10809 /* SEH requires that the prologue end within 256 bytes of the start of
10810 the function. Prevent instruction schedules that would extend that. */
10811 if (TARGET_SEH)
10812 emit_insn (gen_blockage ());
10813 }
10814
10815 /* Emit code to restore REG using a POP insn. */
10816
10817 static void
10818 ix86_emit_restore_reg_using_pop (rtx reg)
10819 {
10820 struct machine_function *m = cfun->machine;
10821 rtx insn = emit_insn (gen_pop (reg));
10822
10823 ix86_add_cfa_restore_note (insn, reg, m->fs.sp_offset);
10824 m->fs.sp_offset -= UNITS_PER_WORD;
10825
10826 if (m->fs.cfa_reg == crtl->drap_reg
10827 && REGNO (reg) == REGNO (crtl->drap_reg))
10828 {
10829 /* Previously we'd represented the CFA as an expression
10830 like *(%ebp - 8). We've just popped that value from
10831 the stack, which means we need to reset the CFA to
10832 the drap register. This will remain until we restore
10833 the stack pointer. */
10834 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10835 RTX_FRAME_RELATED_P (insn) = 1;
10836
10837 /* This means that the DRAP register is valid for addressing too. */
10838 m->fs.drap_valid = true;
10839 return;
10840 }
10841
10842 if (m->fs.cfa_reg == stack_pointer_rtx)
10843 {
10844 rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
10845 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10846 add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
10847 RTX_FRAME_RELATED_P (insn) = 1;
10848
10849 m->fs.cfa_offset -= UNITS_PER_WORD;
10850 }
10851
10852 /* When the frame pointer is the CFA, and we pop it, we are
10853 swapping back to the stack pointer as the CFA. This happens
10854 for stack frames that don't allocate other data, so we assume
10855 the stack pointer is now pointing at the return address, i.e.
10856 the function entry state, which makes the offset be 1 word. */
10857 if (reg == hard_frame_pointer_rtx)
10858 {
10859 m->fs.fp_valid = false;
10860 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10861 {
10862 m->fs.cfa_reg = stack_pointer_rtx;
10863 m->fs.cfa_offset -= UNITS_PER_WORD;
10864
10865 add_reg_note (insn, REG_CFA_DEF_CFA,
10866 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10867 GEN_INT (m->fs.cfa_offset)));
10868 RTX_FRAME_RELATED_P (insn) = 1;
10869 }
10870 }
10871 }
10872
10873 /* Emit code to restore saved registers using POP insns. */
10874
10875 static void
10876 ix86_emit_restore_regs_using_pop (void)
10877 {
10878 unsigned int regno;
10879
10880 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10881 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
10882 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno));
10883 }
10884
10885 /* Emit code and notes for the LEAVE instruction. */
10886
10887 static void
10888 ix86_emit_leave (void)
10889 {
10890 struct machine_function *m = cfun->machine;
10891 rtx insn = emit_insn (ix86_gen_leave ());
10892
10893 ix86_add_queued_cfa_restore_notes (insn);
10894
10895 gcc_assert (m->fs.fp_valid);
10896 m->fs.sp_valid = true;
10897 m->fs.sp_offset = m->fs.fp_offset - UNITS_PER_WORD;
10898 m->fs.fp_valid = false;
10899
10900 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10901 {
10902 m->fs.cfa_reg = stack_pointer_rtx;
10903 m->fs.cfa_offset = m->fs.sp_offset;
10904
10905 add_reg_note (insn, REG_CFA_DEF_CFA,
10906 plus_constant (stack_pointer_rtx, m->fs.sp_offset));
10907 RTX_FRAME_RELATED_P (insn) = 1;
10908 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
10909 m->fs.fp_offset);
10910 }
10911 }
10912
10913 /* Emit code to restore saved registers using MOV insns.
10914 First register is restored from CFA - CFA_OFFSET. */
10915 static void
10916 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset,
10917 int maybe_eh_return)
10918 {
10919 struct machine_function *m = cfun->machine;
10920 unsigned int regno;
10921
10922 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10923 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10924 {
10925 rtx reg = gen_rtx_REG (Pmode, regno);
10926 rtx insn, mem;
10927
10928 mem = choose_baseaddr (cfa_offset);
10929 mem = gen_frame_mem (Pmode, mem);
10930 insn = emit_move_insn (reg, mem);
10931
10932 if (m->fs.cfa_reg == crtl->drap_reg && regno == REGNO (crtl->drap_reg))
10933 {
10934 /* Previously we'd represented the CFA as an expression
10935 like *(%ebp - 8). We've just popped that value from
10936 the stack, which means we need to reset the CFA to
10937 the drap register. This will remain until we restore
10938 the stack pointer. */
10939 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10940 RTX_FRAME_RELATED_P (insn) = 1;
10941
10942 /* This means that the DRAP register is valid for addressing. */
10943 m->fs.drap_valid = true;
10944 }
10945 else
10946 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10947
10948 cfa_offset -= UNITS_PER_WORD;
10949 }
10950 }
10951
10952 /* Emit code to restore saved registers using MOV insns.
10953 First register is restored from CFA - CFA_OFFSET. */
10954 static void
10955 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset,
10956 int maybe_eh_return)
10957 {
10958 unsigned int regno;
10959
10960 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10961 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10962 {
10963 rtx reg = gen_rtx_REG (V4SFmode, regno);
10964 rtx mem;
10965
10966 mem = choose_baseaddr (cfa_offset);
10967 mem = gen_rtx_MEM (V4SFmode, mem);
10968 set_mem_align (mem, 128);
10969 emit_move_insn (reg, mem);
10970
10971 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10972
10973 cfa_offset -= 16;
10974 }
10975 }
10976
10977 /* Restore function stack, frame, and registers. */
10978
10979 void
10980 ix86_expand_epilogue (int style)
10981 {
10982 struct machine_function *m = cfun->machine;
10983 struct machine_frame_state frame_state_save = m->fs;
10984 struct ix86_frame frame;
10985 bool restore_regs_via_mov;
10986 bool using_drap;
10987
10988 ix86_finalize_stack_realign_flags ();
10989 ix86_compute_frame_layout (&frame);
10990
10991 m->fs.sp_valid = (!frame_pointer_needed
10992 || (current_function_sp_is_unchanging
10993 && !stack_realign_fp));
10994 gcc_assert (!m->fs.sp_valid
10995 || m->fs.sp_offset == frame.stack_pointer_offset);
10996
10997 /* The FP must be valid if the frame pointer is present. */
10998 gcc_assert (frame_pointer_needed == m->fs.fp_valid);
10999 gcc_assert (!m->fs.fp_valid
11000 || m->fs.fp_offset == frame.hard_frame_pointer_offset);
11001
11002 /* We must have *some* valid pointer to the stack frame. */
11003 gcc_assert (m->fs.sp_valid || m->fs.fp_valid);
11004
11005 /* The DRAP is never valid at this point. */
11006 gcc_assert (!m->fs.drap_valid);
11007
11008 /* See the comment about red zone and frame
11009 pointer usage in ix86_expand_prologue. */
11010 if (frame_pointer_needed && frame.red_zone_size)
11011 emit_insn (gen_memory_blockage ());
11012
11013 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
11014 gcc_assert (!using_drap || m->fs.cfa_reg == crtl->drap_reg);
11015
11016 /* Determine the CFA offset of the end of the red-zone. */
11017 m->fs.red_zone_offset = 0;
11018 if (ix86_using_red_zone () && crtl->args.pops_args < 65536)
11019 {
11020 /* The red-zone begins below the return address. */
11021 m->fs.red_zone_offset = RED_ZONE_SIZE + UNITS_PER_WORD;
11022
11023 /* When the register save area is in the aligned portion of
11024 the stack, determine the maximum runtime displacement that
11025 matches up with the aligned frame. */
11026 if (stack_realign_drap)
11027 m->fs.red_zone_offset -= (crtl->stack_alignment_needed / BITS_PER_UNIT
11028 + UNITS_PER_WORD);
11029 }
11030
11031 /* Special care must be taken for the normal return case of a function
11032 using eh_return: the eax and edx registers are marked as saved, but
11033 not restored along this path. Adjust the save location to match. */
11034 if (crtl->calls_eh_return && style != 2)
11035 frame.reg_save_offset -= 2 * UNITS_PER_WORD;
11036
11037 /* EH_RETURN requires the use of moves to function properly. */
11038 if (crtl->calls_eh_return)
11039 restore_regs_via_mov = true;
11040 /* SEH requires the use of pops to identify the epilogue. */
11041 else if (TARGET_SEH)
11042 restore_regs_via_mov = false;
11043 /* If we're only restoring one register and sp is not valid then
11044 using a move instruction to restore the register since it's
11045 less work than reloading sp and popping the register. */
11046 else if (!m->fs.sp_valid && frame.nregs <= 1)
11047 restore_regs_via_mov = true;
11048 else if (TARGET_EPILOGUE_USING_MOVE
11049 && cfun->machine->use_fast_prologue_epilogue
11050 && (frame.nregs > 1
11051 || m->fs.sp_offset != frame.reg_save_offset))
11052 restore_regs_via_mov = true;
11053 else if (frame_pointer_needed
11054 && !frame.nregs
11055 && m->fs.sp_offset != frame.reg_save_offset)
11056 restore_regs_via_mov = true;
11057 else if (frame_pointer_needed
11058 && TARGET_USE_LEAVE
11059 && cfun->machine->use_fast_prologue_epilogue
11060 && frame.nregs == 1)
11061 restore_regs_via_mov = true;
11062 else
11063 restore_regs_via_mov = false;
11064
11065 if (restore_regs_via_mov || frame.nsseregs)
11066 {
11067 /* Ensure that the entire register save area is addressable via
11068 the stack pointer, if we will restore via sp. */
11069 if (TARGET_64BIT
11070 && m->fs.sp_offset > 0x7fffffff
11071 && !(m->fs.fp_valid || m->fs.drap_valid)
11072 && (frame.nsseregs + frame.nregs) != 0)
11073 {
11074 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11075 GEN_INT (m->fs.sp_offset
11076 - frame.sse_reg_save_offset),
11077 style,
11078 m->fs.cfa_reg == stack_pointer_rtx);
11079 }
11080 }
11081
11082 /* If there are any SSE registers to restore, then we have to do it
11083 via moves, since there's obviously no pop for SSE regs. */
11084 if (frame.nsseregs)
11085 ix86_emit_restore_sse_regs_using_mov (frame.sse_reg_save_offset,
11086 style == 2);
11087
11088 if (restore_regs_via_mov)
11089 {
11090 rtx t;
11091
11092 if (frame.nregs)
11093 ix86_emit_restore_regs_using_mov (frame.reg_save_offset, style == 2);
11094
11095 /* eh_return epilogues need %ecx added to the stack pointer. */
11096 if (style == 2)
11097 {
11098 rtx insn, sa = EH_RETURN_STACKADJ_RTX;
11099
11100 /* Stack align doesn't work with eh_return. */
11101 gcc_assert (!stack_realign_drap);
11102 /* Neither does regparm nested functions. */
11103 gcc_assert (!ix86_static_chain_on_stack);
11104
11105 if (frame_pointer_needed)
11106 {
11107 t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
11108 t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
11109 emit_insn (gen_rtx_SET (VOIDmode, sa, t));
11110
11111 t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
11112 insn = emit_move_insn (hard_frame_pointer_rtx, t);
11113
11114 /* Note that we use SA as a temporary CFA, as the return
11115 address is at the proper place relative to it. We
11116 pretend this happens at the FP restore insn because
11117 prior to this insn the FP would be stored at the wrong
11118 offset relative to SA, and after this insn we have no
11119 other reasonable register to use for the CFA. We don't
11120 bother resetting the CFA to the SP for the duration of
11121 the return insn. */
11122 add_reg_note (insn, REG_CFA_DEF_CFA,
11123 plus_constant (sa, UNITS_PER_WORD));
11124 ix86_add_queued_cfa_restore_notes (insn);
11125 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
11126 RTX_FRAME_RELATED_P (insn) = 1;
11127
11128 m->fs.cfa_reg = sa;
11129 m->fs.cfa_offset = UNITS_PER_WORD;
11130 m->fs.fp_valid = false;
11131
11132 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
11133 const0_rtx, style, false);
11134 }
11135 else
11136 {
11137 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
11138 t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
11139 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
11140 ix86_add_queued_cfa_restore_notes (insn);
11141
11142 gcc_assert (m->fs.cfa_reg == stack_pointer_rtx);
11143 if (m->fs.cfa_offset != UNITS_PER_WORD)
11144 {
11145 m->fs.cfa_offset = UNITS_PER_WORD;
11146 add_reg_note (insn, REG_CFA_DEF_CFA,
11147 plus_constant (stack_pointer_rtx,
11148 UNITS_PER_WORD));
11149 RTX_FRAME_RELATED_P (insn) = 1;
11150 }
11151 }
11152 m->fs.sp_offset = UNITS_PER_WORD;
11153 m->fs.sp_valid = true;
11154 }
11155 }
11156 else
11157 {
11158 /* SEH requires that the function end with (1) a stack adjustment
11159 if necessary, (2) a sequence of pops, and (3) a return or
11160 jump instruction. Prevent insns from the function body from
11161 being scheduled into this sequence. */
11162 if (TARGET_SEH)
11163 {
11164 /* Prevent a catch region from being adjacent to the standard
11165 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
11166 several other flags that would be interesting to test are
11167 not yet set up. */
11168 if (flag_non_call_exceptions)
11169 emit_insn (gen_nops (const1_rtx));
11170 else
11171 emit_insn (gen_blockage ());
11172 }
11173
11174 /* First step is to deallocate the stack frame so that we can
11175 pop the registers. */
11176 if (!m->fs.sp_valid)
11177 {
11178 pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx,
11179 GEN_INT (m->fs.fp_offset
11180 - frame.reg_save_offset),
11181 style, false);
11182 }
11183 else if (m->fs.sp_offset != frame.reg_save_offset)
11184 {
11185 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11186 GEN_INT (m->fs.sp_offset
11187 - frame.reg_save_offset),
11188 style,
11189 m->fs.cfa_reg == stack_pointer_rtx);
11190 }
11191
11192 ix86_emit_restore_regs_using_pop ();
11193 }
11194
11195 /* If we used a stack pointer and haven't already got rid of it,
11196 then do so now. */
11197 if (m->fs.fp_valid)
11198 {
11199 /* If the stack pointer is valid and pointing at the frame
11200 pointer store address, then we only need a pop. */
11201 if (m->fs.sp_valid && m->fs.sp_offset == frame.hfp_save_offset)
11202 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
11203 /* Leave results in shorter dependency chains on CPUs that are
11204 able to grok it fast. */
11205 else if (TARGET_USE_LEAVE
11206 || optimize_function_for_size_p (cfun)
11207 || !cfun->machine->use_fast_prologue_epilogue)
11208 ix86_emit_leave ();
11209 else
11210 {
11211 pro_epilogue_adjust_stack (stack_pointer_rtx,
11212 hard_frame_pointer_rtx,
11213 const0_rtx, style, !using_drap);
11214 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
11215 }
11216 }
11217
11218 if (using_drap)
11219 {
11220 int param_ptr_offset = UNITS_PER_WORD;
11221 rtx insn;
11222
11223 gcc_assert (stack_realign_drap);
11224
11225 if (ix86_static_chain_on_stack)
11226 param_ptr_offset += UNITS_PER_WORD;
11227 if (!call_used_regs[REGNO (crtl->drap_reg)])
11228 param_ptr_offset += UNITS_PER_WORD;
11229
11230 insn = emit_insn (gen_rtx_SET
11231 (VOIDmode, stack_pointer_rtx,
11232 gen_rtx_PLUS (Pmode,
11233 crtl->drap_reg,
11234 GEN_INT (-param_ptr_offset))));
11235 m->fs.cfa_reg = stack_pointer_rtx;
11236 m->fs.cfa_offset = param_ptr_offset;
11237 m->fs.sp_offset = param_ptr_offset;
11238 m->fs.realigned = false;
11239
11240 add_reg_note (insn, REG_CFA_DEF_CFA,
11241 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11242 GEN_INT (param_ptr_offset)));
11243 RTX_FRAME_RELATED_P (insn) = 1;
11244
11245 if (!call_used_regs[REGNO (crtl->drap_reg)])
11246 ix86_emit_restore_reg_using_pop (crtl->drap_reg);
11247 }
11248
11249 /* At this point the stack pointer must be valid, and we must have
11250 restored all of the registers. We may not have deallocated the
11251 entire stack frame. We've delayed this until now because it may
11252 be possible to merge the local stack deallocation with the
11253 deallocation forced by ix86_static_chain_on_stack. */
11254 gcc_assert (m->fs.sp_valid);
11255 gcc_assert (!m->fs.fp_valid);
11256 gcc_assert (!m->fs.realigned);
11257 if (m->fs.sp_offset != UNITS_PER_WORD)
11258 {
11259 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11260 GEN_INT (m->fs.sp_offset - UNITS_PER_WORD),
11261 style, true);
11262 }
11263
11264 /* Sibcall epilogues don't want a return instruction. */
11265 if (style == 0)
11266 {
11267 m->fs = frame_state_save;
11268 return;
11269 }
11270
11271 /* Emit vzeroupper if needed. */
11272 if (TARGET_VZEROUPPER
11273 && !TREE_THIS_VOLATILE (cfun->decl)
11274 && !cfun->machine->caller_return_avx256_p)
11275 emit_insn (gen_avx_vzeroupper (GEN_INT (call_no_avx256)));
11276
11277 if (crtl->args.pops_args && crtl->args.size)
11278 {
11279 rtx popc = GEN_INT (crtl->args.pops_args);
11280
11281 /* i386 can only pop 64K bytes. If asked to pop more, pop return
11282 address, do explicit add, and jump indirectly to the caller. */
11283
11284 if (crtl->args.pops_args >= 65536)
11285 {
11286 rtx ecx = gen_rtx_REG (SImode, CX_REG);
11287 rtx insn;
11288
11289 /* There is no "pascal" calling convention in any 64bit ABI. */
11290 gcc_assert (!TARGET_64BIT);
11291
11292 insn = emit_insn (gen_pop (ecx));
11293 m->fs.cfa_offset -= UNITS_PER_WORD;
11294 m->fs.sp_offset -= UNITS_PER_WORD;
11295
11296 add_reg_note (insn, REG_CFA_ADJUST_CFA,
11297 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
11298 add_reg_note (insn, REG_CFA_REGISTER,
11299 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
11300 RTX_FRAME_RELATED_P (insn) = 1;
11301
11302 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11303 popc, -1, true);
11304 emit_jump_insn (gen_return_indirect_internal (ecx));
11305 }
11306 else
11307 emit_jump_insn (gen_return_pop_internal (popc));
11308 }
11309 else
11310 emit_jump_insn (gen_return_internal ());
11311
11312 /* Restore the state back to the state from the prologue,
11313 so that it's correct for the next epilogue. */
11314 m->fs = frame_state_save;
11315 }
11316
11317 /* Reset from the function's potential modifications. */
11318
11319 static void
11320 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
11321 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
11322 {
11323 if (pic_offset_table_rtx)
11324 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
11325 #if TARGET_MACHO
11326 /* Mach-O doesn't support labels at the end of objects, so if
11327 it looks like we might want one, insert a NOP. */
11328 {
11329 rtx insn = get_last_insn ();
11330 while (insn
11331 && NOTE_P (insn)
11332 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
11333 insn = PREV_INSN (insn);
11334 if (insn
11335 && (LABEL_P (insn)
11336 || (NOTE_P (insn)
11337 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
11338 fputs ("\tnop\n", file);
11339 }
11340 #endif
11341
11342 }
11343
11344 /* Return a scratch register to use in the split stack prologue. The
11345 split stack prologue is used for -fsplit-stack. It is the first
11346 instructions in the function, even before the regular prologue.
11347 The scratch register can be any caller-saved register which is not
11348 used for parameters or for the static chain. */
11349
11350 static unsigned int
11351 split_stack_prologue_scratch_regno (void)
11352 {
11353 if (TARGET_64BIT)
11354 return R11_REG;
11355 else
11356 {
11357 bool is_fastcall;
11358 int regparm;
11359
11360 is_fastcall = (lookup_attribute ("fastcall",
11361 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
11362 != NULL);
11363 regparm = ix86_function_regparm (TREE_TYPE (cfun->decl), cfun->decl);
11364
11365 if (is_fastcall)
11366 {
11367 if (DECL_STATIC_CHAIN (cfun->decl))
11368 {
11369 sorry ("-fsplit-stack does not support fastcall with "
11370 "nested function");
11371 return INVALID_REGNUM;
11372 }
11373 return AX_REG;
11374 }
11375 else if (regparm < 3)
11376 {
11377 if (!DECL_STATIC_CHAIN (cfun->decl))
11378 return CX_REG;
11379 else
11380 {
11381 if (regparm >= 2)
11382 {
11383 sorry ("-fsplit-stack does not support 2 register "
11384 " parameters for a nested function");
11385 return INVALID_REGNUM;
11386 }
11387 return DX_REG;
11388 }
11389 }
11390 else
11391 {
11392 /* FIXME: We could make this work by pushing a register
11393 around the addition and comparison. */
11394 sorry ("-fsplit-stack does not support 3 register parameters");
11395 return INVALID_REGNUM;
11396 }
11397 }
11398 }
11399
11400 /* A SYMBOL_REF for the function which allocates new stackspace for
11401 -fsplit-stack. */
11402
11403 static GTY(()) rtx split_stack_fn;
11404
11405 /* A SYMBOL_REF for the more stack function when using the large
11406 model. */
11407
11408 static GTY(()) rtx split_stack_fn_large;
11409
11410 /* Handle -fsplit-stack. These are the first instructions in the
11411 function, even before the regular prologue. */
11412
11413 void
11414 ix86_expand_split_stack_prologue (void)
11415 {
11416 struct ix86_frame frame;
11417 HOST_WIDE_INT allocate;
11418 unsigned HOST_WIDE_INT args_size;
11419 rtx label, limit, current, jump_insn, allocate_rtx, call_insn, call_fusage;
11420 rtx scratch_reg = NULL_RTX;
11421 rtx varargs_label = NULL_RTX;
11422 rtx fn;
11423
11424 gcc_assert (flag_split_stack && reload_completed);
11425
11426 ix86_finalize_stack_realign_flags ();
11427 ix86_compute_frame_layout (&frame);
11428 allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
11429
11430 /* This is the label we will branch to if we have enough stack
11431 space. We expect the basic block reordering pass to reverse this
11432 branch if optimizing, so that we branch in the unlikely case. */
11433 label = gen_label_rtx ();
11434
11435 /* We need to compare the stack pointer minus the frame size with
11436 the stack boundary in the TCB. The stack boundary always gives
11437 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
11438 can compare directly. Otherwise we need to do an addition. */
11439
11440 limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
11441 UNSPEC_STACK_CHECK);
11442 limit = gen_rtx_CONST (Pmode, limit);
11443 limit = gen_rtx_MEM (Pmode, limit);
11444 if (allocate < SPLIT_STACK_AVAILABLE)
11445 current = stack_pointer_rtx;
11446 else
11447 {
11448 unsigned int scratch_regno;
11449 rtx offset;
11450
11451 /* We need a scratch register to hold the stack pointer minus
11452 the required frame size. Since this is the very start of the
11453 function, the scratch register can be any caller-saved
11454 register which is not used for parameters. */
11455 offset = GEN_INT (- allocate);
11456 scratch_regno = split_stack_prologue_scratch_regno ();
11457 if (scratch_regno == INVALID_REGNUM)
11458 return;
11459 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11460 if (!TARGET_64BIT || x86_64_immediate_operand (offset, Pmode))
11461 {
11462 /* We don't use ix86_gen_add3 in this case because it will
11463 want to split to lea, but when not optimizing the insn
11464 will not be split after this point. */
11465 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11466 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11467 offset)));
11468 }
11469 else
11470 {
11471 emit_move_insn (scratch_reg, offset);
11472 emit_insn (gen_adddi3 (scratch_reg, scratch_reg,
11473 stack_pointer_rtx));
11474 }
11475 current = scratch_reg;
11476 }
11477
11478 ix86_expand_branch (GEU, current, limit, label);
11479 jump_insn = get_last_insn ();
11480 JUMP_LABEL (jump_insn) = label;
11481
11482 /* Mark the jump as very likely to be taken. */
11483 add_reg_note (jump_insn, REG_BR_PROB,
11484 GEN_INT (REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100));
11485
11486 if (split_stack_fn == NULL_RTX)
11487 split_stack_fn = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11488 fn = split_stack_fn;
11489
11490 /* Get more stack space. We pass in the desired stack space and the
11491 size of the arguments to copy to the new stack. In 32-bit mode
11492 we push the parameters; __morestack will return on a new stack
11493 anyhow. In 64-bit mode we pass the parameters in r10 and
11494 r11. */
11495 allocate_rtx = GEN_INT (allocate);
11496 args_size = crtl->args.size >= 0 ? crtl->args.size : 0;
11497 call_fusage = NULL_RTX;
11498 if (TARGET_64BIT)
11499 {
11500 rtx reg10, reg11;
11501
11502 reg10 = gen_rtx_REG (Pmode, R10_REG);
11503 reg11 = gen_rtx_REG (Pmode, R11_REG);
11504
11505 /* If this function uses a static chain, it will be in %r10.
11506 Preserve it across the call to __morestack. */
11507 if (DECL_STATIC_CHAIN (cfun->decl))
11508 {
11509 rtx rax;
11510
11511 rax = gen_rtx_REG (Pmode, AX_REG);
11512 emit_move_insn (rax, reg10);
11513 use_reg (&call_fusage, rax);
11514 }
11515
11516 if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
11517 {
11518 HOST_WIDE_INT argval;
11519
11520 /* When using the large model we need to load the address
11521 into a register, and we've run out of registers. So we
11522 switch to a different calling convention, and we call a
11523 different function: __morestack_large. We pass the
11524 argument size in the upper 32 bits of r10 and pass the
11525 frame size in the lower 32 bits. */
11526 gcc_assert ((allocate & (HOST_WIDE_INT) 0xffffffff) == allocate);
11527 gcc_assert ((args_size & 0xffffffff) == args_size);
11528
11529 if (split_stack_fn_large == NULL_RTX)
11530 split_stack_fn_large =
11531 gen_rtx_SYMBOL_REF (Pmode, "__morestack_large_model");
11532
11533 if (ix86_cmodel == CM_LARGE_PIC)
11534 {
11535 rtx label, x;
11536
11537 label = gen_label_rtx ();
11538 emit_label (label);
11539 LABEL_PRESERVE_P (label) = 1;
11540 emit_insn (gen_set_rip_rex64 (reg10, label));
11541 emit_insn (gen_set_got_offset_rex64 (reg11, label));
11542 emit_insn (gen_adddi3 (reg10, reg10, reg11));
11543 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, split_stack_fn_large),
11544 UNSPEC_GOT);
11545 x = gen_rtx_CONST (Pmode, x);
11546 emit_move_insn (reg11, x);
11547 x = gen_rtx_PLUS (Pmode, reg10, reg11);
11548 x = gen_const_mem (Pmode, x);
11549 emit_move_insn (reg11, x);
11550 }
11551 else
11552 emit_move_insn (reg11, split_stack_fn_large);
11553
11554 fn = reg11;
11555
11556 argval = ((args_size << 16) << 16) + allocate;
11557 emit_move_insn (reg10, GEN_INT (argval));
11558 }
11559 else
11560 {
11561 emit_move_insn (reg10, allocate_rtx);
11562 emit_move_insn (reg11, GEN_INT (args_size));
11563 use_reg (&call_fusage, reg11);
11564 }
11565
11566 use_reg (&call_fusage, reg10);
11567 }
11568 else
11569 {
11570 emit_insn (gen_push (GEN_INT (args_size)));
11571 emit_insn (gen_push (allocate_rtx));
11572 }
11573 call_insn = ix86_expand_call (NULL_RTX, gen_rtx_MEM (QImode, fn),
11574 GEN_INT (UNITS_PER_WORD), constm1_rtx,
11575 NULL_RTX, 0);
11576 add_function_usage_to (call_insn, call_fusage);
11577
11578 /* In order to make call/return prediction work right, we now need
11579 to execute a return instruction. See
11580 libgcc/config/i386/morestack.S for the details on how this works.
11581
11582 For flow purposes gcc must not see this as a return
11583 instruction--we need control flow to continue at the subsequent
11584 label. Therefore, we use an unspec. */
11585 gcc_assert (crtl->args.pops_args < 65536);
11586 emit_insn (gen_split_stack_return (GEN_INT (crtl->args.pops_args)));
11587
11588 /* If we are in 64-bit mode and this function uses a static chain,
11589 we saved %r10 in %rax before calling _morestack. */
11590 if (TARGET_64BIT && DECL_STATIC_CHAIN (cfun->decl))
11591 emit_move_insn (gen_rtx_REG (Pmode, R10_REG),
11592 gen_rtx_REG (Pmode, AX_REG));
11593
11594 /* If this function calls va_start, we need to store a pointer to
11595 the arguments on the old stack, because they may not have been
11596 all copied to the new stack. At this point the old stack can be
11597 found at the frame pointer value used by __morestack, because
11598 __morestack has set that up before calling back to us. Here we
11599 store that pointer in a scratch register, and in
11600 ix86_expand_prologue we store the scratch register in a stack
11601 slot. */
11602 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11603 {
11604 unsigned int scratch_regno;
11605 rtx frame_reg;
11606 int words;
11607
11608 scratch_regno = split_stack_prologue_scratch_regno ();
11609 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11610 frame_reg = gen_rtx_REG (Pmode, BP_REG);
11611
11612 /* 64-bit:
11613 fp -> old fp value
11614 return address within this function
11615 return address of caller of this function
11616 stack arguments
11617 So we add three words to get to the stack arguments.
11618
11619 32-bit:
11620 fp -> old fp value
11621 return address within this function
11622 first argument to __morestack
11623 second argument to __morestack
11624 return address of caller of this function
11625 stack arguments
11626 So we add five words to get to the stack arguments.
11627 */
11628 words = TARGET_64BIT ? 3 : 5;
11629 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11630 gen_rtx_PLUS (Pmode, frame_reg,
11631 GEN_INT (words * UNITS_PER_WORD))));
11632
11633 varargs_label = gen_label_rtx ();
11634 emit_jump_insn (gen_jump (varargs_label));
11635 JUMP_LABEL (get_last_insn ()) = varargs_label;
11636
11637 emit_barrier ();
11638 }
11639
11640 emit_label (label);
11641 LABEL_NUSES (label) = 1;
11642
11643 /* If this function calls va_start, we now have to set the scratch
11644 register for the case where we do not call __morestack. In this
11645 case we need to set it based on the stack pointer. */
11646 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11647 {
11648 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11649 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11650 GEN_INT (UNITS_PER_WORD))));
11651
11652 emit_label (varargs_label);
11653 LABEL_NUSES (varargs_label) = 1;
11654 }
11655 }
11656
11657 /* We may have to tell the dataflow pass that the split stack prologue
11658 is initializing a scratch register. */
11659
11660 static void
11661 ix86_live_on_entry (bitmap regs)
11662 {
11663 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11664 {
11665 gcc_assert (flag_split_stack);
11666 bitmap_set_bit (regs, split_stack_prologue_scratch_regno ());
11667 }
11668 }
11669 \f
11670 /* Extract the parts of an RTL expression that is a valid memory address
11671 for an instruction. Return 0 if the structure of the address is
11672 grossly off. Return -1 if the address contains ASHIFT, so it is not
11673 strictly valid, but still used for computing length of lea instruction. */
11674
11675 int
11676 ix86_decompose_address (rtx addr, struct ix86_address *out)
11677 {
11678 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
11679 rtx base_reg, index_reg;
11680 HOST_WIDE_INT scale = 1;
11681 rtx scale_rtx = NULL_RTX;
11682 rtx tmp;
11683 int retval = 1;
11684 enum ix86_address_seg seg = SEG_DEFAULT;
11685
11686 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
11687 base = addr;
11688 else if (GET_CODE (addr) == PLUS)
11689 {
11690 rtx addends[4], op;
11691 int n = 0, i;
11692
11693 op = addr;
11694 do
11695 {
11696 if (n >= 4)
11697 return 0;
11698 addends[n++] = XEXP (op, 1);
11699 op = XEXP (op, 0);
11700 }
11701 while (GET_CODE (op) == PLUS);
11702 if (n >= 4)
11703 return 0;
11704 addends[n] = op;
11705
11706 for (i = n; i >= 0; --i)
11707 {
11708 op = addends[i];
11709 switch (GET_CODE (op))
11710 {
11711 case MULT:
11712 if (index)
11713 return 0;
11714 index = XEXP (op, 0);
11715 scale_rtx = XEXP (op, 1);
11716 break;
11717
11718 case ASHIFT:
11719 if (index)
11720 return 0;
11721 index = XEXP (op, 0);
11722 tmp = XEXP (op, 1);
11723 if (!CONST_INT_P (tmp))
11724 return 0;
11725 scale = INTVAL (tmp);
11726 if ((unsigned HOST_WIDE_INT) scale > 3)
11727 return 0;
11728 scale = 1 << scale;
11729 break;
11730
11731 case UNSPEC:
11732 if (XINT (op, 1) == UNSPEC_TP
11733 && TARGET_TLS_DIRECT_SEG_REFS
11734 && seg == SEG_DEFAULT)
11735 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
11736 else
11737 return 0;
11738 break;
11739
11740 case REG:
11741 case SUBREG:
11742 if (!base)
11743 base = op;
11744 else if (!index)
11745 index = op;
11746 else
11747 return 0;
11748 break;
11749
11750 case CONST:
11751 case CONST_INT:
11752 case SYMBOL_REF:
11753 case LABEL_REF:
11754 if (disp)
11755 return 0;
11756 disp = op;
11757 break;
11758
11759 default:
11760 return 0;
11761 }
11762 }
11763 }
11764 else if (GET_CODE (addr) == MULT)
11765 {
11766 index = XEXP (addr, 0); /* index*scale */
11767 scale_rtx = XEXP (addr, 1);
11768 }
11769 else if (GET_CODE (addr) == ASHIFT)
11770 {
11771 /* We're called for lea too, which implements ashift on occasion. */
11772 index = XEXP (addr, 0);
11773 tmp = XEXP (addr, 1);
11774 if (!CONST_INT_P (tmp))
11775 return 0;
11776 scale = INTVAL (tmp);
11777 if ((unsigned HOST_WIDE_INT) scale > 3)
11778 return 0;
11779 scale = 1 << scale;
11780 retval = -1;
11781 }
11782 else
11783 disp = addr; /* displacement */
11784
11785 /* Extract the integral value of scale. */
11786 if (scale_rtx)
11787 {
11788 if (!CONST_INT_P (scale_rtx))
11789 return 0;
11790 scale = INTVAL (scale_rtx);
11791 }
11792
11793 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
11794 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
11795
11796 /* Avoid useless 0 displacement. */
11797 if (disp == const0_rtx && (base || index))
11798 disp = NULL_RTX;
11799
11800 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11801 if (base_reg && index_reg && scale == 1
11802 && (index_reg == arg_pointer_rtx
11803 || index_reg == frame_pointer_rtx
11804 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
11805 {
11806 rtx tmp;
11807 tmp = base, base = index, index = tmp;
11808 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
11809 }
11810
11811 /* Special case: %ebp cannot be encoded as a base without a displacement.
11812 Similarly %r13. */
11813 if (!disp
11814 && base_reg
11815 && (base_reg == hard_frame_pointer_rtx
11816 || base_reg == frame_pointer_rtx
11817 || base_reg == arg_pointer_rtx
11818 || (REG_P (base_reg)
11819 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
11820 || REGNO (base_reg) == R13_REG))))
11821 disp = const0_rtx;
11822
11823 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11824 Avoid this by transforming to [%esi+0].
11825 Reload calls address legitimization without cfun defined, so we need
11826 to test cfun for being non-NULL. */
11827 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
11828 && base_reg && !index_reg && !disp
11829 && REG_P (base_reg) && REGNO (base_reg) == SI_REG)
11830 disp = const0_rtx;
11831
11832 /* Special case: encode reg+reg instead of reg*2. */
11833 if (!base && index && scale == 2)
11834 base = index, base_reg = index_reg, scale = 1;
11835
11836 /* Special case: scaling cannot be encoded without base or displacement. */
11837 if (!base && !disp && index && scale != 1)
11838 disp = const0_rtx;
11839
11840 out->base = base;
11841 out->index = index;
11842 out->disp = disp;
11843 out->scale = scale;
11844 out->seg = seg;
11845
11846 return retval;
11847 }
11848 \f
11849 /* Return cost of the memory address x.
11850 For i386, it is better to use a complex address than let gcc copy
11851 the address into a reg and make a new pseudo. But not if the address
11852 requires to two regs - that would mean more pseudos with longer
11853 lifetimes. */
11854 static int
11855 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
11856 {
11857 struct ix86_address parts;
11858 int cost = 1;
11859 int ok = ix86_decompose_address (x, &parts);
11860
11861 gcc_assert (ok);
11862
11863 if (parts.base && GET_CODE (parts.base) == SUBREG)
11864 parts.base = SUBREG_REG (parts.base);
11865 if (parts.index && GET_CODE (parts.index) == SUBREG)
11866 parts.index = SUBREG_REG (parts.index);
11867
11868 /* Attempt to minimize number of registers in the address. */
11869 if ((parts.base
11870 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
11871 || (parts.index
11872 && (!REG_P (parts.index)
11873 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
11874 cost++;
11875
11876 if (parts.base
11877 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
11878 && parts.index
11879 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
11880 && parts.base != parts.index)
11881 cost++;
11882
11883 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11884 since it's predecode logic can't detect the length of instructions
11885 and it degenerates to vector decoded. Increase cost of such
11886 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11887 to split such addresses or even refuse such addresses at all.
11888
11889 Following addressing modes are affected:
11890 [base+scale*index]
11891 [scale*index+disp]
11892 [base+index]
11893
11894 The first and last case may be avoidable by explicitly coding the zero in
11895 memory address, but I don't have AMD-K6 machine handy to check this
11896 theory. */
11897
11898 if (TARGET_K6
11899 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
11900 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
11901 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
11902 cost += 10;
11903
11904 return cost;
11905 }
11906 \f
11907 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11908 this is used for to form addresses to local data when -fPIC is in
11909 use. */
11910
11911 static bool
11912 darwin_local_data_pic (rtx disp)
11913 {
11914 return (GET_CODE (disp) == UNSPEC
11915 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
11916 }
11917
11918 /* Determine if a given RTX is a valid constant. We already know this
11919 satisfies CONSTANT_P. */
11920
11921 static bool
11922 ix86_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
11923 {
11924 switch (GET_CODE (x))
11925 {
11926 case CONST:
11927 x = XEXP (x, 0);
11928
11929 if (GET_CODE (x) == PLUS)
11930 {
11931 if (!CONST_INT_P (XEXP (x, 1)))
11932 return false;
11933 x = XEXP (x, 0);
11934 }
11935
11936 if (TARGET_MACHO && darwin_local_data_pic (x))
11937 return true;
11938
11939 /* Only some unspecs are valid as "constants". */
11940 if (GET_CODE (x) == UNSPEC)
11941 switch (XINT (x, 1))
11942 {
11943 case UNSPEC_GOT:
11944 case UNSPEC_GOTOFF:
11945 case UNSPEC_PLTOFF:
11946 return TARGET_64BIT;
11947 case UNSPEC_TPOFF:
11948 case UNSPEC_NTPOFF:
11949 x = XVECEXP (x, 0, 0);
11950 return (GET_CODE (x) == SYMBOL_REF
11951 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11952 case UNSPEC_DTPOFF:
11953 x = XVECEXP (x, 0, 0);
11954 return (GET_CODE (x) == SYMBOL_REF
11955 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
11956 default:
11957 return false;
11958 }
11959
11960 /* We must have drilled down to a symbol. */
11961 if (GET_CODE (x) == LABEL_REF)
11962 return true;
11963 if (GET_CODE (x) != SYMBOL_REF)
11964 return false;
11965 /* FALLTHRU */
11966
11967 case SYMBOL_REF:
11968 /* TLS symbols are never valid. */
11969 if (SYMBOL_REF_TLS_MODEL (x))
11970 return false;
11971
11972 /* DLLIMPORT symbols are never valid. */
11973 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11974 && SYMBOL_REF_DLLIMPORT_P (x))
11975 return false;
11976
11977 #if TARGET_MACHO
11978 /* mdynamic-no-pic */
11979 if (MACHO_DYNAMIC_NO_PIC_P)
11980 return machopic_symbol_defined_p (x);
11981 #endif
11982 break;
11983
11984 case CONST_DOUBLE:
11985 if (GET_MODE (x) == TImode
11986 && x != CONST0_RTX (TImode)
11987 && !TARGET_64BIT)
11988 return false;
11989 break;
11990
11991 case CONST_VECTOR:
11992 if (!standard_sse_constant_p (x))
11993 return false;
11994
11995 default:
11996 break;
11997 }
11998
11999 /* Otherwise we handle everything else in the move patterns. */
12000 return true;
12001 }
12002
12003 /* Determine if it's legal to put X into the constant pool. This
12004 is not possible for the address of thread-local symbols, which
12005 is checked above. */
12006
12007 static bool
12008 ix86_cannot_force_const_mem (enum machine_mode mode, rtx x)
12009 {
12010 /* We can always put integral constants and vectors in memory. */
12011 switch (GET_CODE (x))
12012 {
12013 case CONST_INT:
12014 case CONST_DOUBLE:
12015 case CONST_VECTOR:
12016 return false;
12017
12018 default:
12019 break;
12020 }
12021 return !ix86_legitimate_constant_p (mode, x);
12022 }
12023
12024
12025 /* Nonzero if the constant value X is a legitimate general operand
12026 when generating PIC code. It is given that flag_pic is on and
12027 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
12028
12029 bool
12030 legitimate_pic_operand_p (rtx x)
12031 {
12032 rtx inner;
12033
12034 switch (GET_CODE (x))
12035 {
12036 case CONST:
12037 inner = XEXP (x, 0);
12038 if (GET_CODE (inner) == PLUS
12039 && CONST_INT_P (XEXP (inner, 1)))
12040 inner = XEXP (inner, 0);
12041
12042 /* Only some unspecs are valid as "constants". */
12043 if (GET_CODE (inner) == UNSPEC)
12044 switch (XINT (inner, 1))
12045 {
12046 case UNSPEC_GOT:
12047 case UNSPEC_GOTOFF:
12048 case UNSPEC_PLTOFF:
12049 return TARGET_64BIT;
12050 case UNSPEC_TPOFF:
12051 x = XVECEXP (inner, 0, 0);
12052 return (GET_CODE (x) == SYMBOL_REF
12053 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
12054 case UNSPEC_MACHOPIC_OFFSET:
12055 return legitimate_pic_address_disp_p (x);
12056 default:
12057 return false;
12058 }
12059 /* FALLTHRU */
12060
12061 case SYMBOL_REF:
12062 case LABEL_REF:
12063 return legitimate_pic_address_disp_p (x);
12064
12065 default:
12066 return true;
12067 }
12068 }
12069
12070 /* Determine if a given CONST RTX is a valid memory displacement
12071 in PIC mode. */
12072
12073 bool
12074 legitimate_pic_address_disp_p (rtx disp)
12075 {
12076 bool saw_plus;
12077
12078 /* In 64bit mode we can allow direct addresses of symbols and labels
12079 when they are not dynamic symbols. */
12080 if (TARGET_64BIT)
12081 {
12082 rtx op0 = disp, op1;
12083
12084 switch (GET_CODE (disp))
12085 {
12086 case LABEL_REF:
12087 return true;
12088
12089 case CONST:
12090 if (GET_CODE (XEXP (disp, 0)) != PLUS)
12091 break;
12092 op0 = XEXP (XEXP (disp, 0), 0);
12093 op1 = XEXP (XEXP (disp, 0), 1);
12094 if (!CONST_INT_P (op1)
12095 || INTVAL (op1) >= 16*1024*1024
12096 || INTVAL (op1) < -16*1024*1024)
12097 break;
12098 if (GET_CODE (op0) == LABEL_REF)
12099 return true;
12100 if (GET_CODE (op0) != SYMBOL_REF)
12101 break;
12102 /* FALLTHRU */
12103
12104 case SYMBOL_REF:
12105 /* TLS references should always be enclosed in UNSPEC. */
12106 if (SYMBOL_REF_TLS_MODEL (op0))
12107 return false;
12108 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
12109 && ix86_cmodel != CM_LARGE_PIC)
12110 return true;
12111 break;
12112
12113 default:
12114 break;
12115 }
12116 }
12117 if (GET_CODE (disp) != CONST)
12118 return false;
12119 disp = XEXP (disp, 0);
12120
12121 if (TARGET_64BIT)
12122 {
12123 /* We are unsafe to allow PLUS expressions. This limit allowed distance
12124 of GOT tables. We should not need these anyway. */
12125 if (GET_CODE (disp) != UNSPEC
12126 || (XINT (disp, 1) != UNSPEC_GOTPCREL
12127 && XINT (disp, 1) != UNSPEC_GOTOFF
12128 && XINT (disp, 1) != UNSPEC_PCREL
12129 && XINT (disp, 1) != UNSPEC_PLTOFF))
12130 return false;
12131
12132 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
12133 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
12134 return false;
12135 return true;
12136 }
12137
12138 saw_plus = false;
12139 if (GET_CODE (disp) == PLUS)
12140 {
12141 if (!CONST_INT_P (XEXP (disp, 1)))
12142 return false;
12143 disp = XEXP (disp, 0);
12144 saw_plus = true;
12145 }
12146
12147 if (TARGET_MACHO && darwin_local_data_pic (disp))
12148 return true;
12149
12150 if (GET_CODE (disp) != UNSPEC)
12151 return false;
12152
12153 switch (XINT (disp, 1))
12154 {
12155 case UNSPEC_GOT:
12156 if (saw_plus)
12157 return false;
12158 /* We need to check for both symbols and labels because VxWorks loads
12159 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
12160 details. */
12161 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
12162 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
12163 case UNSPEC_GOTOFF:
12164 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
12165 While ABI specify also 32bit relocation but we don't produce it in
12166 small PIC model at all. */
12167 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
12168 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
12169 && !TARGET_64BIT)
12170 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
12171 return false;
12172 case UNSPEC_GOTTPOFF:
12173 case UNSPEC_GOTNTPOFF:
12174 case UNSPEC_INDNTPOFF:
12175 if (saw_plus)
12176 return false;
12177 disp = XVECEXP (disp, 0, 0);
12178 return (GET_CODE (disp) == SYMBOL_REF
12179 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
12180 case UNSPEC_NTPOFF:
12181 disp = XVECEXP (disp, 0, 0);
12182 return (GET_CODE (disp) == SYMBOL_REF
12183 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
12184 case UNSPEC_DTPOFF:
12185 disp = XVECEXP (disp, 0, 0);
12186 return (GET_CODE (disp) == SYMBOL_REF
12187 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
12188 }
12189
12190 return false;
12191 }
12192
12193 /* Recognizes RTL expressions that are valid memory addresses for an
12194 instruction. The MODE argument is the machine mode for the MEM
12195 expression that wants to use this address.
12196
12197 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
12198 convert common non-canonical forms to canonical form so that they will
12199 be recognized. */
12200
12201 static bool
12202 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
12203 rtx addr, bool strict)
12204 {
12205 struct ix86_address parts;
12206 rtx base, index, disp;
12207 HOST_WIDE_INT scale;
12208
12209 if (ix86_decompose_address (addr, &parts) <= 0)
12210 /* Decomposition failed. */
12211 return false;
12212
12213 base = parts.base;
12214 index = parts.index;
12215 disp = parts.disp;
12216 scale = parts.scale;
12217
12218 /* Validate base register.
12219
12220 Don't allow SUBREG's that span more than a word here. It can lead to spill
12221 failures when the base is one word out of a two word structure, which is
12222 represented internally as a DImode int. */
12223
12224 if (base)
12225 {
12226 rtx reg;
12227
12228 if (REG_P (base))
12229 reg = base;
12230 else if (GET_CODE (base) == SUBREG
12231 && REG_P (SUBREG_REG (base))
12232 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
12233 <= UNITS_PER_WORD)
12234 reg = SUBREG_REG (base);
12235 else
12236 /* Base is not a register. */
12237 return false;
12238
12239 if (GET_MODE (base) != Pmode)
12240 /* Base is not in Pmode. */
12241 return false;
12242
12243 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
12244 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
12245 /* Base is not valid. */
12246 return false;
12247 }
12248
12249 /* Validate index register.
12250
12251 Don't allow SUBREG's that span more than a word here -- same as above. */
12252
12253 if (index)
12254 {
12255 rtx reg;
12256
12257 if (REG_P (index))
12258 reg = index;
12259 else if (GET_CODE (index) == SUBREG
12260 && REG_P (SUBREG_REG (index))
12261 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
12262 <= UNITS_PER_WORD)
12263 reg = SUBREG_REG (index);
12264 else
12265 /* Index is not a register. */
12266 return false;
12267
12268 if (GET_MODE (index) != Pmode)
12269 /* Index is not in Pmode. */
12270 return false;
12271
12272 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
12273 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
12274 /* Index is not valid. */
12275 return false;
12276 }
12277
12278 /* Validate scale factor. */
12279 if (scale != 1)
12280 {
12281 if (!index)
12282 /* Scale without index. */
12283 return false;
12284
12285 if (scale != 2 && scale != 4 && scale != 8)
12286 /* Scale is not a valid multiplier. */
12287 return false;
12288 }
12289
12290 /* Validate displacement. */
12291 if (disp)
12292 {
12293 if (GET_CODE (disp) == CONST
12294 && GET_CODE (XEXP (disp, 0)) == UNSPEC
12295 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
12296 switch (XINT (XEXP (disp, 0), 1))
12297 {
12298 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
12299 used. While ABI specify also 32bit relocations, we don't produce
12300 them at all and use IP relative instead. */
12301 case UNSPEC_GOT:
12302 case UNSPEC_GOTOFF:
12303 gcc_assert (flag_pic);
12304 if (!TARGET_64BIT)
12305 goto is_legitimate_pic;
12306
12307 /* 64bit address unspec. */
12308 return false;
12309
12310 case UNSPEC_GOTPCREL:
12311 case UNSPEC_PCREL:
12312 gcc_assert (flag_pic);
12313 goto is_legitimate_pic;
12314
12315 case UNSPEC_GOTTPOFF:
12316 case UNSPEC_GOTNTPOFF:
12317 case UNSPEC_INDNTPOFF:
12318 case UNSPEC_NTPOFF:
12319 case UNSPEC_DTPOFF:
12320 break;
12321
12322 case UNSPEC_STACK_CHECK:
12323 gcc_assert (flag_split_stack);
12324 break;
12325
12326 default:
12327 /* Invalid address unspec. */
12328 return false;
12329 }
12330
12331 else if (SYMBOLIC_CONST (disp)
12332 && (flag_pic
12333 || (TARGET_MACHO
12334 #if TARGET_MACHO
12335 && MACHOPIC_INDIRECT
12336 && !machopic_operand_p (disp)
12337 #endif
12338 )))
12339 {
12340
12341 is_legitimate_pic:
12342 if (TARGET_64BIT && (index || base))
12343 {
12344 /* foo@dtpoff(%rX) is ok. */
12345 if (GET_CODE (disp) != CONST
12346 || GET_CODE (XEXP (disp, 0)) != PLUS
12347 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
12348 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
12349 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
12350 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
12351 /* Non-constant pic memory reference. */
12352 return false;
12353 }
12354 else if ((!TARGET_MACHO || flag_pic)
12355 && ! legitimate_pic_address_disp_p (disp))
12356 /* Displacement is an invalid pic construct. */
12357 return false;
12358 #if TARGET_MACHO
12359 else if (MACHO_DYNAMIC_NO_PIC_P
12360 && !ix86_legitimate_constant_p (Pmode, disp))
12361 /* displacment must be referenced via non_lazy_pointer */
12362 return false;
12363 #endif
12364
12365 /* This code used to verify that a symbolic pic displacement
12366 includes the pic_offset_table_rtx register.
12367
12368 While this is good idea, unfortunately these constructs may
12369 be created by "adds using lea" optimization for incorrect
12370 code like:
12371
12372 int a;
12373 int foo(int i)
12374 {
12375 return *(&a+i);
12376 }
12377
12378 This code is nonsensical, but results in addressing
12379 GOT table with pic_offset_table_rtx base. We can't
12380 just refuse it easily, since it gets matched by
12381 "addsi3" pattern, that later gets split to lea in the
12382 case output register differs from input. While this
12383 can be handled by separate addsi pattern for this case
12384 that never results in lea, this seems to be easier and
12385 correct fix for crash to disable this test. */
12386 }
12387 else if (GET_CODE (disp) != LABEL_REF
12388 && !CONST_INT_P (disp)
12389 && (GET_CODE (disp) != CONST
12390 || !ix86_legitimate_constant_p (Pmode, disp))
12391 && (GET_CODE (disp) != SYMBOL_REF
12392 || !ix86_legitimate_constant_p (Pmode, disp)))
12393 /* Displacement is not constant. */
12394 return false;
12395 else if (TARGET_64BIT
12396 && !x86_64_immediate_operand (disp, VOIDmode))
12397 /* Displacement is out of range. */
12398 return false;
12399 }
12400
12401 /* Everything looks valid. */
12402 return true;
12403 }
12404
12405 /* Determine if a given RTX is a valid constant address. */
12406
12407 bool
12408 constant_address_p (rtx x)
12409 {
12410 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
12411 }
12412 \f
12413 /* Return a unique alias set for the GOT. */
12414
12415 static alias_set_type
12416 ix86_GOT_alias_set (void)
12417 {
12418 static alias_set_type set = -1;
12419 if (set == -1)
12420 set = new_alias_set ();
12421 return set;
12422 }
12423
12424 /* Return a legitimate reference for ORIG (an address) using the
12425 register REG. If REG is 0, a new pseudo is generated.
12426
12427 There are two types of references that must be handled:
12428
12429 1. Global data references must load the address from the GOT, via
12430 the PIC reg. An insn is emitted to do this load, and the reg is
12431 returned.
12432
12433 2. Static data references, constant pool addresses, and code labels
12434 compute the address as an offset from the GOT, whose base is in
12435 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
12436 differentiate them from global data objects. The returned
12437 address is the PIC reg + an unspec constant.
12438
12439 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
12440 reg also appears in the address. */
12441
12442 static rtx
12443 legitimize_pic_address (rtx orig, rtx reg)
12444 {
12445 rtx addr = orig;
12446 rtx new_rtx = orig;
12447 rtx base;
12448
12449 #if TARGET_MACHO
12450 if (TARGET_MACHO && !TARGET_64BIT)
12451 {
12452 if (reg == 0)
12453 reg = gen_reg_rtx (Pmode);
12454 /* Use the generic Mach-O PIC machinery. */
12455 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
12456 }
12457 #endif
12458
12459 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
12460 new_rtx = addr;
12461 else if (TARGET_64BIT
12462 && ix86_cmodel != CM_SMALL_PIC
12463 && gotoff_operand (addr, Pmode))
12464 {
12465 rtx tmpreg;
12466 /* This symbol may be referenced via a displacement from the PIC
12467 base address (@GOTOFF). */
12468
12469 if (reload_in_progress)
12470 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12471 if (GET_CODE (addr) == CONST)
12472 addr = XEXP (addr, 0);
12473 if (GET_CODE (addr) == PLUS)
12474 {
12475 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12476 UNSPEC_GOTOFF);
12477 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12478 }
12479 else
12480 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12481 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12482 if (!reg)
12483 tmpreg = gen_reg_rtx (Pmode);
12484 else
12485 tmpreg = reg;
12486 emit_move_insn (tmpreg, new_rtx);
12487
12488 if (reg != 0)
12489 {
12490 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
12491 tmpreg, 1, OPTAB_DIRECT);
12492 new_rtx = reg;
12493 }
12494 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
12495 }
12496 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
12497 {
12498 /* This symbol may be referenced via a displacement from the PIC
12499 base address (@GOTOFF). */
12500
12501 if (reload_in_progress)
12502 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12503 if (GET_CODE (addr) == CONST)
12504 addr = XEXP (addr, 0);
12505 if (GET_CODE (addr) == PLUS)
12506 {
12507 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12508 UNSPEC_GOTOFF);
12509 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12510 }
12511 else
12512 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12513 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12514 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12515
12516 if (reg != 0)
12517 {
12518 emit_move_insn (reg, new_rtx);
12519 new_rtx = reg;
12520 }
12521 }
12522 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
12523 /* We can't use @GOTOFF for text labels on VxWorks;
12524 see gotoff_operand. */
12525 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
12526 {
12527 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12528 {
12529 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
12530 return legitimize_dllimport_symbol (addr, true);
12531 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
12532 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
12533 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
12534 {
12535 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
12536 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
12537 }
12538 }
12539
12540 /* For x64 PE-COFF there is no GOT table. So we use address
12541 directly. */
12542 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12543 {
12544 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_PCREL);
12545 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12546
12547 if (reg == 0)
12548 reg = gen_reg_rtx (Pmode);
12549 emit_move_insn (reg, new_rtx);
12550 new_rtx = reg;
12551 }
12552 else if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
12553 {
12554 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
12555 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12556 new_rtx = gen_const_mem (Pmode, new_rtx);
12557 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12558
12559 if (reg == 0)
12560 reg = gen_reg_rtx (Pmode);
12561 /* Use directly gen_movsi, otherwise the address is loaded
12562 into register for CSE. We don't want to CSE this addresses,
12563 instead we CSE addresses from the GOT table, so skip this. */
12564 emit_insn (gen_movsi (reg, new_rtx));
12565 new_rtx = reg;
12566 }
12567 else
12568 {
12569 /* This symbol must be referenced via a load from the
12570 Global Offset Table (@GOT). */
12571
12572 if (reload_in_progress)
12573 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12574 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
12575 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12576 if (TARGET_64BIT)
12577 new_rtx = force_reg (Pmode, new_rtx);
12578 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12579 new_rtx = gen_const_mem (Pmode, new_rtx);
12580 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12581
12582 if (reg == 0)
12583 reg = gen_reg_rtx (Pmode);
12584 emit_move_insn (reg, new_rtx);
12585 new_rtx = reg;
12586 }
12587 }
12588 else
12589 {
12590 if (CONST_INT_P (addr)
12591 && !x86_64_immediate_operand (addr, VOIDmode))
12592 {
12593 if (reg)
12594 {
12595 emit_move_insn (reg, addr);
12596 new_rtx = reg;
12597 }
12598 else
12599 new_rtx = force_reg (Pmode, addr);
12600 }
12601 else if (GET_CODE (addr) == CONST)
12602 {
12603 addr = XEXP (addr, 0);
12604
12605 /* We must match stuff we generate before. Assume the only
12606 unspecs that can get here are ours. Not that we could do
12607 anything with them anyway.... */
12608 if (GET_CODE (addr) == UNSPEC
12609 || (GET_CODE (addr) == PLUS
12610 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
12611 return orig;
12612 gcc_assert (GET_CODE (addr) == PLUS);
12613 }
12614 if (GET_CODE (addr) == PLUS)
12615 {
12616 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
12617
12618 /* Check first to see if this is a constant offset from a @GOTOFF
12619 symbol reference. */
12620 if (gotoff_operand (op0, Pmode)
12621 && CONST_INT_P (op1))
12622 {
12623 if (!TARGET_64BIT)
12624 {
12625 if (reload_in_progress)
12626 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12627 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
12628 UNSPEC_GOTOFF);
12629 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
12630 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12631 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12632
12633 if (reg != 0)
12634 {
12635 emit_move_insn (reg, new_rtx);
12636 new_rtx = reg;
12637 }
12638 }
12639 else
12640 {
12641 if (INTVAL (op1) < -16*1024*1024
12642 || INTVAL (op1) >= 16*1024*1024)
12643 {
12644 if (!x86_64_immediate_operand (op1, Pmode))
12645 op1 = force_reg (Pmode, op1);
12646 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
12647 }
12648 }
12649 }
12650 else
12651 {
12652 base = legitimize_pic_address (XEXP (addr, 0), reg);
12653 new_rtx = legitimize_pic_address (XEXP (addr, 1),
12654 base == reg ? NULL_RTX : reg);
12655
12656 if (CONST_INT_P (new_rtx))
12657 new_rtx = plus_constant (base, INTVAL (new_rtx));
12658 else
12659 {
12660 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
12661 {
12662 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
12663 new_rtx = XEXP (new_rtx, 1);
12664 }
12665 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
12666 }
12667 }
12668 }
12669 }
12670 return new_rtx;
12671 }
12672 \f
12673 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12674
12675 static rtx
12676 get_thread_pointer (int to_reg)
12677 {
12678 rtx tp, reg, insn;
12679
12680 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
12681 if (!to_reg)
12682 return tp;
12683
12684 reg = gen_reg_rtx (Pmode);
12685 insn = gen_rtx_SET (VOIDmode, reg, tp);
12686 insn = emit_insn (insn);
12687
12688 return reg;
12689 }
12690
12691 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12692 false if we expect this to be used for a memory address and true if
12693 we expect to load the address into a register. */
12694
12695 static rtx
12696 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
12697 {
12698 rtx dest, base, off, pic, tp;
12699 int type;
12700
12701 switch (model)
12702 {
12703 case TLS_MODEL_GLOBAL_DYNAMIC:
12704 dest = gen_reg_rtx (Pmode);
12705 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12706
12707 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12708 {
12709 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
12710
12711 start_sequence ();
12712 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
12713 insns = get_insns ();
12714 end_sequence ();
12715
12716 RTL_CONST_CALL_P (insns) = 1;
12717 emit_libcall_block (insns, dest, rax, x);
12718 }
12719 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12720 emit_insn (gen_tls_global_dynamic_64 (dest, x));
12721 else
12722 emit_insn (gen_tls_global_dynamic_32 (dest, x));
12723
12724 if (TARGET_GNU2_TLS)
12725 {
12726 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
12727
12728 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12729 }
12730 break;
12731
12732 case TLS_MODEL_LOCAL_DYNAMIC:
12733 base = gen_reg_rtx (Pmode);
12734 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12735
12736 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12737 {
12738 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
12739
12740 start_sequence ();
12741 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
12742 insns = get_insns ();
12743 end_sequence ();
12744
12745 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
12746 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
12747 RTL_CONST_CALL_P (insns) = 1;
12748 emit_libcall_block (insns, base, rax, note);
12749 }
12750 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12751 emit_insn (gen_tls_local_dynamic_base_64 (base));
12752 else
12753 emit_insn (gen_tls_local_dynamic_base_32 (base));
12754
12755 if (TARGET_GNU2_TLS)
12756 {
12757 rtx x = ix86_tls_module_base ();
12758
12759 set_unique_reg_note (get_last_insn (), REG_EQUIV,
12760 gen_rtx_MINUS (Pmode, x, tp));
12761 }
12762
12763 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
12764 off = gen_rtx_CONST (Pmode, off);
12765
12766 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
12767
12768 if (TARGET_GNU2_TLS)
12769 {
12770 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
12771
12772 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12773 }
12774
12775 break;
12776
12777 case TLS_MODEL_INITIAL_EXEC:
12778 if (TARGET_64BIT)
12779 {
12780 if (TARGET_SUN_TLS)
12781 {
12782 /* The Sun linker took the AMD64 TLS spec literally
12783 and can only handle %rax as destination of the
12784 initial executable code sequence. */
12785
12786 dest = gen_reg_rtx (Pmode);
12787 emit_insn (gen_tls_initial_exec_64_sun (dest, x));
12788 return dest;
12789 }
12790
12791 pic = NULL;
12792 type = UNSPEC_GOTNTPOFF;
12793 }
12794 else if (flag_pic)
12795 {
12796 if (reload_in_progress)
12797 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12798 pic = pic_offset_table_rtx;
12799 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
12800 }
12801 else if (!TARGET_ANY_GNU_TLS)
12802 {
12803 pic = gen_reg_rtx (Pmode);
12804 emit_insn (gen_set_got (pic));
12805 type = UNSPEC_GOTTPOFF;
12806 }
12807 else
12808 {
12809 pic = NULL;
12810 type = UNSPEC_INDNTPOFF;
12811 }
12812
12813 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
12814 off = gen_rtx_CONST (Pmode, off);
12815 if (pic)
12816 off = gen_rtx_PLUS (Pmode, pic, off);
12817 off = gen_const_mem (Pmode, off);
12818 set_mem_alias_set (off, ix86_GOT_alias_set ());
12819
12820 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12821 {
12822 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12823 off = force_reg (Pmode, off);
12824 return gen_rtx_PLUS (Pmode, base, off);
12825 }
12826 else
12827 {
12828 base = get_thread_pointer (true);
12829 dest = gen_reg_rtx (Pmode);
12830 emit_insn (gen_subsi3 (dest, base, off));
12831 }
12832 break;
12833
12834 case TLS_MODEL_LOCAL_EXEC:
12835 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
12836 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12837 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
12838 off = gen_rtx_CONST (Pmode, off);
12839
12840 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12841 {
12842 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12843 return gen_rtx_PLUS (Pmode, base, off);
12844 }
12845 else
12846 {
12847 base = get_thread_pointer (true);
12848 dest = gen_reg_rtx (Pmode);
12849 emit_insn (gen_subsi3 (dest, base, off));
12850 }
12851 break;
12852
12853 default:
12854 gcc_unreachable ();
12855 }
12856
12857 return dest;
12858 }
12859
12860 /* Create or return the unique __imp_DECL dllimport symbol corresponding
12861 to symbol DECL. */
12862
12863 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
12864 htab_t dllimport_map;
12865
12866 static tree
12867 get_dllimport_decl (tree decl)
12868 {
12869 struct tree_map *h, in;
12870 void **loc;
12871 const char *name;
12872 const char *prefix;
12873 size_t namelen, prefixlen;
12874 char *imp_name;
12875 tree to;
12876 rtx rtl;
12877
12878 if (!dllimport_map)
12879 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
12880
12881 in.hash = htab_hash_pointer (decl);
12882 in.base.from = decl;
12883 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
12884 h = (struct tree_map *) *loc;
12885 if (h)
12886 return h->to;
12887
12888 *loc = h = ggc_alloc_tree_map ();
12889 h->hash = in.hash;
12890 h->base.from = decl;
12891 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
12892 VAR_DECL, NULL, ptr_type_node);
12893 DECL_ARTIFICIAL (to) = 1;
12894 DECL_IGNORED_P (to) = 1;
12895 DECL_EXTERNAL (to) = 1;
12896 TREE_READONLY (to) = 1;
12897
12898 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
12899 name = targetm.strip_name_encoding (name);
12900 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
12901 ? "*__imp_" : "*__imp__";
12902 namelen = strlen (name);
12903 prefixlen = strlen (prefix);
12904 imp_name = (char *) alloca (namelen + prefixlen + 1);
12905 memcpy (imp_name, prefix, prefixlen);
12906 memcpy (imp_name + prefixlen, name, namelen + 1);
12907
12908 name = ggc_alloc_string (imp_name, namelen + prefixlen);
12909 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
12910 SET_SYMBOL_REF_DECL (rtl, to);
12911 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
12912
12913 rtl = gen_const_mem (Pmode, rtl);
12914 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
12915
12916 SET_DECL_RTL (to, rtl);
12917 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
12918
12919 return to;
12920 }
12921
12922 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
12923 true if we require the result be a register. */
12924
12925 static rtx
12926 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
12927 {
12928 tree imp_decl;
12929 rtx x;
12930
12931 gcc_assert (SYMBOL_REF_DECL (symbol));
12932 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
12933
12934 x = DECL_RTL (imp_decl);
12935 if (want_reg)
12936 x = force_reg (Pmode, x);
12937 return x;
12938 }
12939
12940 /* Try machine-dependent ways of modifying an illegitimate address
12941 to be legitimate. If we find one, return the new, valid address.
12942 This macro is used in only one place: `memory_address' in explow.c.
12943
12944 OLDX is the address as it was before break_out_memory_refs was called.
12945 In some cases it is useful to look at this to decide what needs to be done.
12946
12947 It is always safe for this macro to do nothing. It exists to recognize
12948 opportunities to optimize the output.
12949
12950 For the 80386, we handle X+REG by loading X into a register R and
12951 using R+REG. R will go in a general reg and indexing will be used.
12952 However, if REG is a broken-out memory address or multiplication,
12953 nothing needs to be done because REG can certainly go in a general reg.
12954
12955 When -fpic is used, special handling is needed for symbolic references.
12956 See comments by legitimize_pic_address in i386.c for details. */
12957
12958 static rtx
12959 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
12960 enum machine_mode mode)
12961 {
12962 int changed = 0;
12963 unsigned log;
12964
12965 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
12966 if (log)
12967 return legitimize_tls_address (x, (enum tls_model) log, false);
12968 if (GET_CODE (x) == CONST
12969 && GET_CODE (XEXP (x, 0)) == PLUS
12970 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12971 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
12972 {
12973 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
12974 (enum tls_model) log, false);
12975 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12976 }
12977
12978 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12979 {
12980 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
12981 return legitimize_dllimport_symbol (x, true);
12982 if (GET_CODE (x) == CONST
12983 && GET_CODE (XEXP (x, 0)) == PLUS
12984 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12985 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
12986 {
12987 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
12988 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12989 }
12990 }
12991
12992 if (flag_pic && SYMBOLIC_CONST (x))
12993 return legitimize_pic_address (x, 0);
12994
12995 #if TARGET_MACHO
12996 if (MACHO_DYNAMIC_NO_PIC_P && SYMBOLIC_CONST (x))
12997 return machopic_indirect_data_reference (x, 0);
12998 #endif
12999
13000 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
13001 if (GET_CODE (x) == ASHIFT
13002 && CONST_INT_P (XEXP (x, 1))
13003 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
13004 {
13005 changed = 1;
13006 log = INTVAL (XEXP (x, 1));
13007 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
13008 GEN_INT (1 << log));
13009 }
13010
13011 if (GET_CODE (x) == PLUS)
13012 {
13013 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
13014
13015 if (GET_CODE (XEXP (x, 0)) == ASHIFT
13016 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
13017 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
13018 {
13019 changed = 1;
13020 log = INTVAL (XEXP (XEXP (x, 0), 1));
13021 XEXP (x, 0) = gen_rtx_MULT (Pmode,
13022 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
13023 GEN_INT (1 << log));
13024 }
13025
13026 if (GET_CODE (XEXP (x, 1)) == ASHIFT
13027 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
13028 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
13029 {
13030 changed = 1;
13031 log = INTVAL (XEXP (XEXP (x, 1), 1));
13032 XEXP (x, 1) = gen_rtx_MULT (Pmode,
13033 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
13034 GEN_INT (1 << log));
13035 }
13036
13037 /* Put multiply first if it isn't already. */
13038 if (GET_CODE (XEXP (x, 1)) == MULT)
13039 {
13040 rtx tmp = XEXP (x, 0);
13041 XEXP (x, 0) = XEXP (x, 1);
13042 XEXP (x, 1) = tmp;
13043 changed = 1;
13044 }
13045
13046 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
13047 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
13048 created by virtual register instantiation, register elimination, and
13049 similar optimizations. */
13050 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
13051 {
13052 changed = 1;
13053 x = gen_rtx_PLUS (Pmode,
13054 gen_rtx_PLUS (Pmode, XEXP (x, 0),
13055 XEXP (XEXP (x, 1), 0)),
13056 XEXP (XEXP (x, 1), 1));
13057 }
13058
13059 /* Canonicalize
13060 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
13061 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
13062 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
13063 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
13064 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
13065 && CONSTANT_P (XEXP (x, 1)))
13066 {
13067 rtx constant;
13068 rtx other = NULL_RTX;
13069
13070 if (CONST_INT_P (XEXP (x, 1)))
13071 {
13072 constant = XEXP (x, 1);
13073 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
13074 }
13075 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
13076 {
13077 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
13078 other = XEXP (x, 1);
13079 }
13080 else
13081 constant = 0;
13082
13083 if (constant)
13084 {
13085 changed = 1;
13086 x = gen_rtx_PLUS (Pmode,
13087 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
13088 XEXP (XEXP (XEXP (x, 0), 1), 0)),
13089 plus_constant (other, INTVAL (constant)));
13090 }
13091 }
13092
13093 if (changed && ix86_legitimate_address_p (mode, x, false))
13094 return x;
13095
13096 if (GET_CODE (XEXP (x, 0)) == MULT)
13097 {
13098 changed = 1;
13099 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
13100 }
13101
13102 if (GET_CODE (XEXP (x, 1)) == MULT)
13103 {
13104 changed = 1;
13105 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
13106 }
13107
13108 if (changed
13109 && REG_P (XEXP (x, 1))
13110 && REG_P (XEXP (x, 0)))
13111 return x;
13112
13113 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
13114 {
13115 changed = 1;
13116 x = legitimize_pic_address (x, 0);
13117 }
13118
13119 if (changed && ix86_legitimate_address_p (mode, x, false))
13120 return x;
13121
13122 if (REG_P (XEXP (x, 0)))
13123 {
13124 rtx temp = gen_reg_rtx (Pmode);
13125 rtx val = force_operand (XEXP (x, 1), temp);
13126 if (val != temp)
13127 emit_move_insn (temp, val);
13128
13129 XEXP (x, 1) = temp;
13130 return x;
13131 }
13132
13133 else if (REG_P (XEXP (x, 1)))
13134 {
13135 rtx temp = gen_reg_rtx (Pmode);
13136 rtx val = force_operand (XEXP (x, 0), temp);
13137 if (val != temp)
13138 emit_move_insn (temp, val);
13139
13140 XEXP (x, 0) = temp;
13141 return x;
13142 }
13143 }
13144
13145 return x;
13146 }
13147 \f
13148 /* Print an integer constant expression in assembler syntax. Addition
13149 and subtraction are the only arithmetic that may appear in these
13150 expressions. FILE is the stdio stream to write to, X is the rtx, and
13151 CODE is the operand print code from the output string. */
13152
13153 static void
13154 output_pic_addr_const (FILE *file, rtx x, int code)
13155 {
13156 char buf[256];
13157
13158 switch (GET_CODE (x))
13159 {
13160 case PC:
13161 gcc_assert (flag_pic);
13162 putc ('.', file);
13163 break;
13164
13165 case SYMBOL_REF:
13166 if (TARGET_64BIT || ! TARGET_MACHO_BRANCH_ISLANDS)
13167 output_addr_const (file, x);
13168 else
13169 {
13170 const char *name = XSTR (x, 0);
13171
13172 /* Mark the decl as referenced so that cgraph will
13173 output the function. */
13174 if (SYMBOL_REF_DECL (x))
13175 mark_decl_referenced (SYMBOL_REF_DECL (x));
13176
13177 #if TARGET_MACHO
13178 if (MACHOPIC_INDIRECT
13179 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
13180 name = machopic_indirection_name (x, /*stub_p=*/true);
13181 #endif
13182 assemble_name (file, name);
13183 }
13184 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
13185 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
13186 fputs ("@PLT", file);
13187 break;
13188
13189 case LABEL_REF:
13190 x = XEXP (x, 0);
13191 /* FALLTHRU */
13192 case CODE_LABEL:
13193 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
13194 assemble_name (asm_out_file, buf);
13195 break;
13196
13197 case CONST_INT:
13198 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
13199 break;
13200
13201 case CONST:
13202 /* This used to output parentheses around the expression,
13203 but that does not work on the 386 (either ATT or BSD assembler). */
13204 output_pic_addr_const (file, XEXP (x, 0), code);
13205 break;
13206
13207 case CONST_DOUBLE:
13208 if (GET_MODE (x) == VOIDmode)
13209 {
13210 /* We can use %d if the number is <32 bits and positive. */
13211 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
13212 fprintf (file, "0x%lx%08lx",
13213 (unsigned long) CONST_DOUBLE_HIGH (x),
13214 (unsigned long) CONST_DOUBLE_LOW (x));
13215 else
13216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
13217 }
13218 else
13219 /* We can't handle floating point constants;
13220 TARGET_PRINT_OPERAND must handle them. */
13221 output_operand_lossage ("floating constant misused");
13222 break;
13223
13224 case PLUS:
13225 /* Some assemblers need integer constants to appear first. */
13226 if (CONST_INT_P (XEXP (x, 0)))
13227 {
13228 output_pic_addr_const (file, XEXP (x, 0), code);
13229 putc ('+', file);
13230 output_pic_addr_const (file, XEXP (x, 1), code);
13231 }
13232 else
13233 {
13234 gcc_assert (CONST_INT_P (XEXP (x, 1)));
13235 output_pic_addr_const (file, XEXP (x, 1), code);
13236 putc ('+', file);
13237 output_pic_addr_const (file, XEXP (x, 0), code);
13238 }
13239 break;
13240
13241 case MINUS:
13242 if (!TARGET_MACHO)
13243 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
13244 output_pic_addr_const (file, XEXP (x, 0), code);
13245 putc ('-', file);
13246 output_pic_addr_const (file, XEXP (x, 1), code);
13247 if (!TARGET_MACHO)
13248 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
13249 break;
13250
13251 case UNSPEC:
13252 if (XINT (x, 1) == UNSPEC_STACK_CHECK)
13253 {
13254 bool f = i386_asm_output_addr_const_extra (file, x);
13255 gcc_assert (f);
13256 break;
13257 }
13258
13259 gcc_assert (XVECLEN (x, 0) == 1);
13260 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
13261 switch (XINT (x, 1))
13262 {
13263 case UNSPEC_GOT:
13264 fputs ("@GOT", file);
13265 break;
13266 case UNSPEC_GOTOFF:
13267 fputs ("@GOTOFF", file);
13268 break;
13269 case UNSPEC_PLTOFF:
13270 fputs ("@PLTOFF", file);
13271 break;
13272 case UNSPEC_PCREL:
13273 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13274 "(%rip)" : "[rip]", file);
13275 break;
13276 case UNSPEC_GOTPCREL:
13277 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13278 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
13279 break;
13280 case UNSPEC_GOTTPOFF:
13281 /* FIXME: This might be @TPOFF in Sun ld too. */
13282 fputs ("@gottpoff", file);
13283 break;
13284 case UNSPEC_TPOFF:
13285 fputs ("@tpoff", file);
13286 break;
13287 case UNSPEC_NTPOFF:
13288 if (TARGET_64BIT)
13289 fputs ("@tpoff", file);
13290 else
13291 fputs ("@ntpoff", file);
13292 break;
13293 case UNSPEC_DTPOFF:
13294 fputs ("@dtpoff", file);
13295 break;
13296 case UNSPEC_GOTNTPOFF:
13297 if (TARGET_64BIT)
13298 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13299 "@gottpoff(%rip)": "@gottpoff[rip]", file);
13300 else
13301 fputs ("@gotntpoff", file);
13302 break;
13303 case UNSPEC_INDNTPOFF:
13304 fputs ("@indntpoff", file);
13305 break;
13306 #if TARGET_MACHO
13307 case UNSPEC_MACHOPIC_OFFSET:
13308 putc ('-', file);
13309 machopic_output_function_base_name (file);
13310 break;
13311 #endif
13312 default:
13313 output_operand_lossage ("invalid UNSPEC as operand");
13314 break;
13315 }
13316 break;
13317
13318 default:
13319 output_operand_lossage ("invalid expression as operand");
13320 }
13321 }
13322
13323 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
13324 We need to emit DTP-relative relocations. */
13325
13326 static void ATTRIBUTE_UNUSED
13327 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
13328 {
13329 fputs (ASM_LONG, file);
13330 output_addr_const (file, x);
13331 fputs ("@dtpoff", file);
13332 switch (size)
13333 {
13334 case 4:
13335 break;
13336 case 8:
13337 fputs (", 0", file);
13338 break;
13339 default:
13340 gcc_unreachable ();
13341 }
13342 }
13343
13344 /* Return true if X is a representation of the PIC register. This copes
13345 with calls from ix86_find_base_term, where the register might have
13346 been replaced by a cselib value. */
13347
13348 static bool
13349 ix86_pic_register_p (rtx x)
13350 {
13351 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
13352 return (pic_offset_table_rtx
13353 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
13354 else
13355 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
13356 }
13357
13358 /* Helper function for ix86_delegitimize_address.
13359 Attempt to delegitimize TLS local-exec accesses. */
13360
13361 static rtx
13362 ix86_delegitimize_tls_address (rtx orig_x)
13363 {
13364 rtx x = orig_x, unspec;
13365 struct ix86_address addr;
13366
13367 if (!TARGET_TLS_DIRECT_SEG_REFS)
13368 return orig_x;
13369 if (MEM_P (x))
13370 x = XEXP (x, 0);
13371 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
13372 return orig_x;
13373 if (ix86_decompose_address (x, &addr) == 0
13374 || addr.seg != (TARGET_64BIT ? SEG_FS : SEG_GS)
13375 || addr.disp == NULL_RTX
13376 || GET_CODE (addr.disp) != CONST)
13377 return orig_x;
13378 unspec = XEXP (addr.disp, 0);
13379 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
13380 unspec = XEXP (unspec, 0);
13381 if (GET_CODE (unspec) != UNSPEC || XINT (unspec, 1) != UNSPEC_NTPOFF)
13382 return orig_x;
13383 x = XVECEXP (unspec, 0, 0);
13384 gcc_assert (GET_CODE (x) == SYMBOL_REF);
13385 if (unspec != XEXP (addr.disp, 0))
13386 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.disp, 0), 1));
13387 if (addr.index)
13388 {
13389 rtx idx = addr.index;
13390 if (addr.scale != 1)
13391 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
13392 x = gen_rtx_PLUS (Pmode, idx, x);
13393 }
13394 if (addr.base)
13395 x = gen_rtx_PLUS (Pmode, addr.base, x);
13396 if (MEM_P (orig_x))
13397 x = replace_equiv_address_nv (orig_x, x);
13398 return x;
13399 }
13400
13401 /* In the name of slightly smaller debug output, and to cater to
13402 general assembler lossage, recognize PIC+GOTOFF and turn it back
13403 into a direct symbol reference.
13404
13405 On Darwin, this is necessary to avoid a crash, because Darwin
13406 has a different PIC label for each routine but the DWARF debugging
13407 information is not associated with any particular routine, so it's
13408 necessary to remove references to the PIC label from RTL stored by
13409 the DWARF output code. */
13410
13411 static rtx
13412 ix86_delegitimize_address (rtx x)
13413 {
13414 rtx orig_x = delegitimize_mem_from_attrs (x);
13415 /* addend is NULL or some rtx if x is something+GOTOFF where
13416 something doesn't include the PIC register. */
13417 rtx addend = NULL_RTX;
13418 /* reg_addend is NULL or a multiple of some register. */
13419 rtx reg_addend = NULL_RTX;
13420 /* const_addend is NULL or a const_int. */
13421 rtx const_addend = NULL_RTX;
13422 /* This is the result, or NULL. */
13423 rtx result = NULL_RTX;
13424
13425 x = orig_x;
13426
13427 if (MEM_P (x))
13428 x = XEXP (x, 0);
13429
13430 if (TARGET_64BIT)
13431 {
13432 if (GET_CODE (x) != CONST
13433 || GET_CODE (XEXP (x, 0)) != UNSPEC
13434 || (XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
13435 && XINT (XEXP (x, 0), 1) != UNSPEC_PCREL)
13436 || !MEM_P (orig_x))
13437 return ix86_delegitimize_tls_address (orig_x);
13438 x = XVECEXP (XEXP (x, 0), 0, 0);
13439 if (GET_MODE (orig_x) != Pmode)
13440 {
13441 x = simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
13442 if (x == NULL_RTX)
13443 return orig_x;
13444 }
13445 return x;
13446 }
13447
13448 if (GET_CODE (x) != PLUS
13449 || GET_CODE (XEXP (x, 1)) != CONST)
13450 return ix86_delegitimize_tls_address (orig_x);
13451
13452 if (ix86_pic_register_p (XEXP (x, 0)))
13453 /* %ebx + GOT/GOTOFF */
13454 ;
13455 else if (GET_CODE (XEXP (x, 0)) == PLUS)
13456 {
13457 /* %ebx + %reg * scale + GOT/GOTOFF */
13458 reg_addend = XEXP (x, 0);
13459 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
13460 reg_addend = XEXP (reg_addend, 1);
13461 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
13462 reg_addend = XEXP (reg_addend, 0);
13463 else
13464 {
13465 reg_addend = NULL_RTX;
13466 addend = XEXP (x, 0);
13467 }
13468 }
13469 else
13470 addend = XEXP (x, 0);
13471
13472 x = XEXP (XEXP (x, 1), 0);
13473 if (GET_CODE (x) == PLUS
13474 && CONST_INT_P (XEXP (x, 1)))
13475 {
13476 const_addend = XEXP (x, 1);
13477 x = XEXP (x, 0);
13478 }
13479
13480 if (GET_CODE (x) == UNSPEC
13481 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
13482 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
13483 result = XVECEXP (x, 0, 0);
13484
13485 if (TARGET_MACHO && darwin_local_data_pic (x)
13486 && !MEM_P (orig_x))
13487 result = XVECEXP (x, 0, 0);
13488
13489 if (! result)
13490 return ix86_delegitimize_tls_address (orig_x);
13491
13492 if (const_addend)
13493 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
13494 if (reg_addend)
13495 result = gen_rtx_PLUS (Pmode, reg_addend, result);
13496 if (addend)
13497 {
13498 /* If the rest of original X doesn't involve the PIC register, add
13499 addend and subtract pic_offset_table_rtx. This can happen e.g.
13500 for code like:
13501 leal (%ebx, %ecx, 4), %ecx
13502 ...
13503 movl foo@GOTOFF(%ecx), %edx
13504 in which case we return (%ecx - %ebx) + foo. */
13505 if (pic_offset_table_rtx)
13506 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
13507 pic_offset_table_rtx),
13508 result);
13509 else
13510 return orig_x;
13511 }
13512 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
13513 {
13514 result = simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
13515 if (result == NULL_RTX)
13516 return orig_x;
13517 }
13518 return result;
13519 }
13520
13521 /* If X is a machine specific address (i.e. a symbol or label being
13522 referenced as a displacement from the GOT implemented using an
13523 UNSPEC), then return the base term. Otherwise return X. */
13524
13525 rtx
13526 ix86_find_base_term (rtx x)
13527 {
13528 rtx term;
13529
13530 if (TARGET_64BIT)
13531 {
13532 if (GET_CODE (x) != CONST)
13533 return x;
13534 term = XEXP (x, 0);
13535 if (GET_CODE (term) == PLUS
13536 && (CONST_INT_P (XEXP (term, 1))
13537 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
13538 term = XEXP (term, 0);
13539 if (GET_CODE (term) != UNSPEC
13540 || (XINT (term, 1) != UNSPEC_GOTPCREL
13541 && XINT (term, 1) != UNSPEC_PCREL))
13542 return x;
13543
13544 return XVECEXP (term, 0, 0);
13545 }
13546
13547 return ix86_delegitimize_address (x);
13548 }
13549 \f
13550 static void
13551 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
13552 int fp, FILE *file)
13553 {
13554 const char *suffix;
13555
13556 if (mode == CCFPmode || mode == CCFPUmode)
13557 {
13558 code = ix86_fp_compare_code_to_integer (code);
13559 mode = CCmode;
13560 }
13561 if (reverse)
13562 code = reverse_condition (code);
13563
13564 switch (code)
13565 {
13566 case EQ:
13567 switch (mode)
13568 {
13569 case CCAmode:
13570 suffix = "a";
13571 break;
13572
13573 case CCCmode:
13574 suffix = "c";
13575 break;
13576
13577 case CCOmode:
13578 suffix = "o";
13579 break;
13580
13581 case CCSmode:
13582 suffix = "s";
13583 break;
13584
13585 default:
13586 suffix = "e";
13587 }
13588 break;
13589 case NE:
13590 switch (mode)
13591 {
13592 case CCAmode:
13593 suffix = "na";
13594 break;
13595
13596 case CCCmode:
13597 suffix = "nc";
13598 break;
13599
13600 case CCOmode:
13601 suffix = "no";
13602 break;
13603
13604 case CCSmode:
13605 suffix = "ns";
13606 break;
13607
13608 default:
13609 suffix = "ne";
13610 }
13611 break;
13612 case GT:
13613 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
13614 suffix = "g";
13615 break;
13616 case GTU:
13617 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13618 Those same assemblers have the same but opposite lossage on cmov. */
13619 if (mode == CCmode)
13620 suffix = fp ? "nbe" : "a";
13621 else if (mode == CCCmode)
13622 suffix = "b";
13623 else
13624 gcc_unreachable ();
13625 break;
13626 case LT:
13627 switch (mode)
13628 {
13629 case CCNOmode:
13630 case CCGOCmode:
13631 suffix = "s";
13632 break;
13633
13634 case CCmode:
13635 case CCGCmode:
13636 suffix = "l";
13637 break;
13638
13639 default:
13640 gcc_unreachable ();
13641 }
13642 break;
13643 case LTU:
13644 gcc_assert (mode == CCmode || mode == CCCmode);
13645 suffix = "b";
13646 break;
13647 case GE:
13648 switch (mode)
13649 {
13650 case CCNOmode:
13651 case CCGOCmode:
13652 suffix = "ns";
13653 break;
13654
13655 case CCmode:
13656 case CCGCmode:
13657 suffix = "ge";
13658 break;
13659
13660 default:
13661 gcc_unreachable ();
13662 }
13663 break;
13664 case GEU:
13665 /* ??? As above. */
13666 gcc_assert (mode == CCmode || mode == CCCmode);
13667 suffix = fp ? "nb" : "ae";
13668 break;
13669 case LE:
13670 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
13671 suffix = "le";
13672 break;
13673 case LEU:
13674 /* ??? As above. */
13675 if (mode == CCmode)
13676 suffix = "be";
13677 else if (mode == CCCmode)
13678 suffix = fp ? "nb" : "ae";
13679 else
13680 gcc_unreachable ();
13681 break;
13682 case UNORDERED:
13683 suffix = fp ? "u" : "p";
13684 break;
13685 case ORDERED:
13686 suffix = fp ? "nu" : "np";
13687 break;
13688 default:
13689 gcc_unreachable ();
13690 }
13691 fputs (suffix, file);
13692 }
13693
13694 /* Print the name of register X to FILE based on its machine mode and number.
13695 If CODE is 'w', pretend the mode is HImode.
13696 If CODE is 'b', pretend the mode is QImode.
13697 If CODE is 'k', pretend the mode is SImode.
13698 If CODE is 'q', pretend the mode is DImode.
13699 If CODE is 'x', pretend the mode is V4SFmode.
13700 If CODE is 't', pretend the mode is V8SFmode.
13701 If CODE is 'h', pretend the reg is the 'high' byte register.
13702 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13703 If CODE is 'd', duplicate the operand for AVX instruction.
13704 */
13705
13706 void
13707 print_reg (rtx x, int code, FILE *file)
13708 {
13709 const char *reg;
13710 bool duplicated = code == 'd' && TARGET_AVX;
13711
13712 gcc_assert (x == pc_rtx
13713 || (REGNO (x) != ARG_POINTER_REGNUM
13714 && REGNO (x) != FRAME_POINTER_REGNUM
13715 && REGNO (x) != FLAGS_REG
13716 && REGNO (x) != FPSR_REG
13717 && REGNO (x) != FPCR_REG));
13718
13719 if (ASSEMBLER_DIALECT == ASM_ATT)
13720 putc ('%', file);
13721
13722 if (x == pc_rtx)
13723 {
13724 gcc_assert (TARGET_64BIT);
13725 fputs ("rip", file);
13726 return;
13727 }
13728
13729 if (code == 'w' || MMX_REG_P (x))
13730 code = 2;
13731 else if (code == 'b')
13732 code = 1;
13733 else if (code == 'k')
13734 code = 4;
13735 else if (code == 'q')
13736 code = 8;
13737 else if (code == 'y')
13738 code = 3;
13739 else if (code == 'h')
13740 code = 0;
13741 else if (code == 'x')
13742 code = 16;
13743 else if (code == 't')
13744 code = 32;
13745 else
13746 code = GET_MODE_SIZE (GET_MODE (x));
13747
13748 /* Irritatingly, AMD extended registers use different naming convention
13749 from the normal registers. */
13750 if (REX_INT_REG_P (x))
13751 {
13752 gcc_assert (TARGET_64BIT);
13753 switch (code)
13754 {
13755 case 0:
13756 error ("extended registers have no high halves");
13757 break;
13758 case 1:
13759 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
13760 break;
13761 case 2:
13762 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
13763 break;
13764 case 4:
13765 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
13766 break;
13767 case 8:
13768 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
13769 break;
13770 default:
13771 error ("unsupported operand size for extended register");
13772 break;
13773 }
13774 return;
13775 }
13776
13777 reg = NULL;
13778 switch (code)
13779 {
13780 case 3:
13781 if (STACK_TOP_P (x))
13782 {
13783 reg = "st(0)";
13784 break;
13785 }
13786 /* FALLTHRU */
13787 case 8:
13788 case 4:
13789 case 12:
13790 if (! ANY_FP_REG_P (x))
13791 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
13792 /* FALLTHRU */
13793 case 16:
13794 case 2:
13795 normal:
13796 reg = hi_reg_name[REGNO (x)];
13797 break;
13798 case 1:
13799 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
13800 goto normal;
13801 reg = qi_reg_name[REGNO (x)];
13802 break;
13803 case 0:
13804 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
13805 goto normal;
13806 reg = qi_high_reg_name[REGNO (x)];
13807 break;
13808 case 32:
13809 if (SSE_REG_P (x))
13810 {
13811 gcc_assert (!duplicated);
13812 putc ('y', file);
13813 fputs (hi_reg_name[REGNO (x)] + 1, file);
13814 return;
13815 }
13816 break;
13817 default:
13818 gcc_unreachable ();
13819 }
13820
13821 fputs (reg, file);
13822 if (duplicated)
13823 {
13824 if (ASSEMBLER_DIALECT == ASM_ATT)
13825 fprintf (file, ", %%%s", reg);
13826 else
13827 fprintf (file, ", %s", reg);
13828 }
13829 }
13830
13831 /* Locate some local-dynamic symbol still in use by this function
13832 so that we can print its name in some tls_local_dynamic_base
13833 pattern. */
13834
13835 static int
13836 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
13837 {
13838 rtx x = *px;
13839
13840 if (GET_CODE (x) == SYMBOL_REF
13841 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
13842 {
13843 cfun->machine->some_ld_name = XSTR (x, 0);
13844 return 1;
13845 }
13846
13847 return 0;
13848 }
13849
13850 static const char *
13851 get_some_local_dynamic_name (void)
13852 {
13853 rtx insn;
13854
13855 if (cfun->machine->some_ld_name)
13856 return cfun->machine->some_ld_name;
13857
13858 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
13859 if (NONDEBUG_INSN_P (insn)
13860 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
13861 return cfun->machine->some_ld_name;
13862
13863 return NULL;
13864 }
13865
13866 /* Meaning of CODE:
13867 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
13868 C -- print opcode suffix for set/cmov insn.
13869 c -- like C, but print reversed condition
13870 F,f -- likewise, but for floating-point.
13871 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
13872 otherwise nothing
13873 R -- print the prefix for register names.
13874 z -- print the opcode suffix for the size of the current operand.
13875 Z -- likewise, with special suffixes for x87 instructions.
13876 * -- print a star (in certain assembler syntax)
13877 A -- print an absolute memory reference.
13878 w -- print the operand as if it's a "word" (HImode) even if it isn't.
13879 s -- print a shift double count, followed by the assemblers argument
13880 delimiter.
13881 b -- print the QImode name of the register for the indicated operand.
13882 %b0 would print %al if operands[0] is reg 0.
13883 w -- likewise, print the HImode name of the register.
13884 k -- likewise, print the SImode name of the register.
13885 q -- likewise, print the DImode name of the register.
13886 x -- likewise, print the V4SFmode name of the register.
13887 t -- likewise, print the V8SFmode name of the register.
13888 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
13889 y -- print "st(0)" instead of "st" as a register.
13890 d -- print duplicated register operand for AVX instruction.
13891 D -- print condition for SSE cmp instruction.
13892 P -- if PIC, print an @PLT suffix.
13893 X -- don't print any sort of PIC '@' suffix for a symbol.
13894 & -- print some in-use local-dynamic symbol name.
13895 H -- print a memory address offset by 8; used for sse high-parts
13896 Y -- print condition for XOP pcom* instruction.
13897 + -- print a branch hint as 'cs' or 'ds' prefix
13898 ; -- print a semicolon (after prefixes due to bug in older gas).
13899 @ -- print a segment register of thread base pointer load
13900 */
13901
13902 void
13903 ix86_print_operand (FILE *file, rtx x, int code)
13904 {
13905 if (code)
13906 {
13907 switch (code)
13908 {
13909 case '*':
13910 if (ASSEMBLER_DIALECT == ASM_ATT)
13911 putc ('*', file);
13912 return;
13913
13914 case '&':
13915 {
13916 const char *name = get_some_local_dynamic_name ();
13917 if (name == NULL)
13918 output_operand_lossage ("'%%&' used without any "
13919 "local dynamic TLS references");
13920 else
13921 assemble_name (file, name);
13922 return;
13923 }
13924
13925 case 'A':
13926 switch (ASSEMBLER_DIALECT)
13927 {
13928 case ASM_ATT:
13929 putc ('*', file);
13930 break;
13931
13932 case ASM_INTEL:
13933 /* Intel syntax. For absolute addresses, registers should not
13934 be surrounded by braces. */
13935 if (!REG_P (x))
13936 {
13937 putc ('[', file);
13938 ix86_print_operand (file, x, 0);
13939 putc (']', file);
13940 return;
13941 }
13942 break;
13943
13944 default:
13945 gcc_unreachable ();
13946 }
13947
13948 ix86_print_operand (file, x, 0);
13949 return;
13950
13951
13952 case 'L':
13953 if (ASSEMBLER_DIALECT == ASM_ATT)
13954 putc ('l', file);
13955 return;
13956
13957 case 'W':
13958 if (ASSEMBLER_DIALECT == ASM_ATT)
13959 putc ('w', file);
13960 return;
13961
13962 case 'B':
13963 if (ASSEMBLER_DIALECT == ASM_ATT)
13964 putc ('b', file);
13965 return;
13966
13967 case 'Q':
13968 if (ASSEMBLER_DIALECT == ASM_ATT)
13969 putc ('l', file);
13970 return;
13971
13972 case 'S':
13973 if (ASSEMBLER_DIALECT == ASM_ATT)
13974 putc ('s', file);
13975 return;
13976
13977 case 'T':
13978 if (ASSEMBLER_DIALECT == ASM_ATT)
13979 putc ('t', file);
13980 return;
13981
13982 case 'z':
13983 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13984 {
13985 /* Opcodes don't get size suffixes if using Intel opcodes. */
13986 if (ASSEMBLER_DIALECT == ASM_INTEL)
13987 return;
13988
13989 switch (GET_MODE_SIZE (GET_MODE (x)))
13990 {
13991 case 1:
13992 putc ('b', file);
13993 return;
13994
13995 case 2:
13996 putc ('w', file);
13997 return;
13998
13999 case 4:
14000 putc ('l', file);
14001 return;
14002
14003 case 8:
14004 putc ('q', file);
14005 return;
14006
14007 default:
14008 output_operand_lossage
14009 ("invalid operand size for operand code '%c'", code);
14010 return;
14011 }
14012 }
14013
14014 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14015 warning
14016 (0, "non-integer operand used with operand code '%c'", code);
14017 /* FALLTHRU */
14018
14019 case 'Z':
14020 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
14021 if (ASSEMBLER_DIALECT == ASM_INTEL)
14022 return;
14023
14024 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
14025 {
14026 switch (GET_MODE_SIZE (GET_MODE (x)))
14027 {
14028 case 2:
14029 #ifdef HAVE_AS_IX86_FILDS
14030 putc ('s', file);
14031 #endif
14032 return;
14033
14034 case 4:
14035 putc ('l', file);
14036 return;
14037
14038 case 8:
14039 #ifdef HAVE_AS_IX86_FILDQ
14040 putc ('q', file);
14041 #else
14042 fputs ("ll", file);
14043 #endif
14044 return;
14045
14046 default:
14047 break;
14048 }
14049 }
14050 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14051 {
14052 /* 387 opcodes don't get size suffixes
14053 if the operands are registers. */
14054 if (STACK_REG_P (x))
14055 return;
14056
14057 switch (GET_MODE_SIZE (GET_MODE (x)))
14058 {
14059 case 4:
14060 putc ('s', file);
14061 return;
14062
14063 case 8:
14064 putc ('l', file);
14065 return;
14066
14067 case 12:
14068 case 16:
14069 putc ('t', file);
14070 return;
14071
14072 default:
14073 break;
14074 }
14075 }
14076 else
14077 {
14078 output_operand_lossage
14079 ("invalid operand type used with operand code '%c'", code);
14080 return;
14081 }
14082
14083 output_operand_lossage
14084 ("invalid operand size for operand code '%c'", code);
14085 return;
14086
14087 case 'd':
14088 case 'b':
14089 case 'w':
14090 case 'k':
14091 case 'q':
14092 case 'h':
14093 case 't':
14094 case 'y':
14095 case 'x':
14096 case 'X':
14097 case 'P':
14098 break;
14099
14100 case 's':
14101 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
14102 {
14103 ix86_print_operand (file, x, 0);
14104 fputs (", ", file);
14105 }
14106 return;
14107
14108 case 'D':
14109 /* Little bit of braindamage here. The SSE compare instructions
14110 does use completely different names for the comparisons that the
14111 fp conditional moves. */
14112 if (TARGET_AVX)
14113 {
14114 switch (GET_CODE (x))
14115 {
14116 case EQ:
14117 fputs ("eq", file);
14118 break;
14119 case UNEQ:
14120 fputs ("eq_us", file);
14121 break;
14122 case LT:
14123 fputs ("lt", file);
14124 break;
14125 case UNLT:
14126 fputs ("nge", file);
14127 break;
14128 case LE:
14129 fputs ("le", file);
14130 break;
14131 case UNLE:
14132 fputs ("ngt", file);
14133 break;
14134 case UNORDERED:
14135 fputs ("unord", file);
14136 break;
14137 case NE:
14138 fputs ("neq", file);
14139 break;
14140 case LTGT:
14141 fputs ("neq_oq", file);
14142 break;
14143 case GE:
14144 fputs ("ge", file);
14145 break;
14146 case UNGE:
14147 fputs ("nlt", file);
14148 break;
14149 case GT:
14150 fputs ("gt", file);
14151 break;
14152 case UNGT:
14153 fputs ("nle", file);
14154 break;
14155 case ORDERED:
14156 fputs ("ord", file);
14157 break;
14158 default:
14159 output_operand_lossage ("operand is not a condition code, "
14160 "invalid operand code 'D'");
14161 return;
14162 }
14163 }
14164 else
14165 {
14166 switch (GET_CODE (x))
14167 {
14168 case EQ:
14169 case UNEQ:
14170 fputs ("eq", file);
14171 break;
14172 case LT:
14173 case UNLT:
14174 fputs ("lt", file);
14175 break;
14176 case LE:
14177 case UNLE:
14178 fputs ("le", file);
14179 break;
14180 case UNORDERED:
14181 fputs ("unord", file);
14182 break;
14183 case NE:
14184 case LTGT:
14185 fputs ("neq", file);
14186 break;
14187 case UNGE:
14188 case GE:
14189 fputs ("nlt", file);
14190 break;
14191 case UNGT:
14192 case GT:
14193 fputs ("nle", file);
14194 break;
14195 case ORDERED:
14196 fputs ("ord", file);
14197 break;
14198 default:
14199 output_operand_lossage ("operand is not a condition code, "
14200 "invalid operand code 'D'");
14201 return;
14202 }
14203 }
14204 return;
14205 case 'O':
14206 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14207 if (ASSEMBLER_DIALECT == ASM_ATT)
14208 {
14209 switch (GET_MODE (x))
14210 {
14211 case HImode: putc ('w', file); break;
14212 case SImode:
14213 case SFmode: putc ('l', file); break;
14214 case DImode:
14215 case DFmode: putc ('q', file); break;
14216 default: gcc_unreachable ();
14217 }
14218 putc ('.', file);
14219 }
14220 #endif
14221 return;
14222 case 'C':
14223 if (!COMPARISON_P (x))
14224 {
14225 output_operand_lossage ("operand is neither a constant nor a "
14226 "condition code, invalid operand code "
14227 "'C'");
14228 return;
14229 }
14230 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
14231 return;
14232 case 'F':
14233 if (!COMPARISON_P (x))
14234 {
14235 output_operand_lossage ("operand is neither a constant nor a "
14236 "condition code, invalid operand code "
14237 "'F'");
14238 return;
14239 }
14240 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14241 if (ASSEMBLER_DIALECT == ASM_ATT)
14242 putc ('.', file);
14243 #endif
14244 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
14245 return;
14246
14247 /* Like above, but reverse condition */
14248 case 'c':
14249 /* Check to see if argument to %c is really a constant
14250 and not a condition code which needs to be reversed. */
14251 if (!COMPARISON_P (x))
14252 {
14253 output_operand_lossage ("operand is neither a constant nor a "
14254 "condition code, invalid operand "
14255 "code 'c'");
14256 return;
14257 }
14258 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
14259 return;
14260 case 'f':
14261 if (!COMPARISON_P (x))
14262 {
14263 output_operand_lossage ("operand is neither a constant nor a "
14264 "condition code, invalid operand "
14265 "code 'f'");
14266 return;
14267 }
14268 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14269 if (ASSEMBLER_DIALECT == ASM_ATT)
14270 putc ('.', file);
14271 #endif
14272 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
14273 return;
14274
14275 case 'H':
14276 /* It doesn't actually matter what mode we use here, as we're
14277 only going to use this for printing. */
14278 x = adjust_address_nv (x, DImode, 8);
14279 break;
14280
14281 case '+':
14282 {
14283 rtx x;
14284
14285 if (!optimize
14286 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
14287 return;
14288
14289 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
14290 if (x)
14291 {
14292 int pred_val = INTVAL (XEXP (x, 0));
14293
14294 if (pred_val < REG_BR_PROB_BASE * 45 / 100
14295 || pred_val > REG_BR_PROB_BASE * 55 / 100)
14296 {
14297 int taken = pred_val > REG_BR_PROB_BASE / 2;
14298 int cputaken = final_forward_branch_p (current_output_insn) == 0;
14299
14300 /* Emit hints only in the case default branch prediction
14301 heuristics would fail. */
14302 if (taken != cputaken)
14303 {
14304 /* We use 3e (DS) prefix for taken branches and
14305 2e (CS) prefix for not taken branches. */
14306 if (taken)
14307 fputs ("ds ; ", file);
14308 else
14309 fputs ("cs ; ", file);
14310 }
14311 }
14312 }
14313 return;
14314 }
14315
14316 case 'Y':
14317 switch (GET_CODE (x))
14318 {
14319 case NE:
14320 fputs ("neq", file);
14321 break;
14322 case EQ:
14323 fputs ("eq", file);
14324 break;
14325 case GE:
14326 case GEU:
14327 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
14328 break;
14329 case GT:
14330 case GTU:
14331 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
14332 break;
14333 case LE:
14334 case LEU:
14335 fputs ("le", file);
14336 break;
14337 case LT:
14338 case LTU:
14339 fputs ("lt", file);
14340 break;
14341 case UNORDERED:
14342 fputs ("unord", file);
14343 break;
14344 case ORDERED:
14345 fputs ("ord", file);
14346 break;
14347 case UNEQ:
14348 fputs ("ueq", file);
14349 break;
14350 case UNGE:
14351 fputs ("nlt", file);
14352 break;
14353 case UNGT:
14354 fputs ("nle", file);
14355 break;
14356 case UNLE:
14357 fputs ("ule", file);
14358 break;
14359 case UNLT:
14360 fputs ("ult", file);
14361 break;
14362 case LTGT:
14363 fputs ("une", file);
14364 break;
14365 default:
14366 output_operand_lossage ("operand is not a condition code, "
14367 "invalid operand code 'Y'");
14368 return;
14369 }
14370 return;
14371
14372 case ';':
14373 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
14374 putc (';', file);
14375 #endif
14376 return;
14377
14378 case '@':
14379 if (ASSEMBLER_DIALECT == ASM_ATT)
14380 putc ('%', file);
14381
14382 /* The kernel uses a different segment register for performance
14383 reasons; a system call would not have to trash the userspace
14384 segment register, which would be expensive. */
14385 if (TARGET_64BIT && ix86_cmodel != CM_KERNEL)
14386 fputs ("fs", file);
14387 else
14388 fputs ("gs", file);
14389 return;
14390
14391 default:
14392 output_operand_lossage ("invalid operand code '%c'", code);
14393 }
14394 }
14395
14396 if (REG_P (x))
14397 print_reg (x, code, file);
14398
14399 else if (MEM_P (x))
14400 {
14401 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
14402 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
14403 && GET_MODE (x) != BLKmode)
14404 {
14405 const char * size;
14406 switch (GET_MODE_SIZE (GET_MODE (x)))
14407 {
14408 case 1: size = "BYTE"; break;
14409 case 2: size = "WORD"; break;
14410 case 4: size = "DWORD"; break;
14411 case 8: size = "QWORD"; break;
14412 case 12: size = "TBYTE"; break;
14413 case 16:
14414 if (GET_MODE (x) == XFmode)
14415 size = "TBYTE";
14416 else
14417 size = "XMMWORD";
14418 break;
14419 case 32: size = "YMMWORD"; break;
14420 default:
14421 gcc_unreachable ();
14422 }
14423
14424 /* Check for explicit size override (codes 'b', 'w' and 'k') */
14425 if (code == 'b')
14426 size = "BYTE";
14427 else if (code == 'w')
14428 size = "WORD";
14429 else if (code == 'k')
14430 size = "DWORD";
14431
14432 fputs (size, file);
14433 fputs (" PTR ", file);
14434 }
14435
14436 x = XEXP (x, 0);
14437 /* Avoid (%rip) for call operands. */
14438 if (CONSTANT_ADDRESS_P (x) && code == 'P'
14439 && !CONST_INT_P (x))
14440 output_addr_const (file, x);
14441 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
14442 output_operand_lossage ("invalid constraints for operand");
14443 else
14444 output_address (x);
14445 }
14446
14447 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
14448 {
14449 REAL_VALUE_TYPE r;
14450 long l;
14451
14452 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14453 REAL_VALUE_TO_TARGET_SINGLE (r, l);
14454
14455 if (ASSEMBLER_DIALECT == ASM_ATT)
14456 putc ('$', file);
14457 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14458 if (code == 'q')
14459 fprintf (file, "0x%08llx", (unsigned long long) (int) l);
14460 else
14461 fprintf (file, "0x%08x", (unsigned int) l);
14462 }
14463
14464 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
14465 {
14466 REAL_VALUE_TYPE r;
14467 long l[2];
14468
14469 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14470 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
14471
14472 if (ASSEMBLER_DIALECT == ASM_ATT)
14473 putc ('$', file);
14474 fprintf (file, "0x%lx%08lx", l[1] & 0xffffffff, l[0] & 0xffffffff);
14475 }
14476
14477 /* These float cases don't actually occur as immediate operands. */
14478 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
14479 {
14480 char dstr[30];
14481
14482 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
14483 fputs (dstr, file);
14484 }
14485
14486 else
14487 {
14488 /* We have patterns that allow zero sets of memory, for instance.
14489 In 64-bit mode, we should probably support all 8-byte vectors,
14490 since we can in fact encode that into an immediate. */
14491 if (GET_CODE (x) == CONST_VECTOR)
14492 {
14493 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
14494 x = const0_rtx;
14495 }
14496
14497 if (code != 'P')
14498 {
14499 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
14500 {
14501 if (ASSEMBLER_DIALECT == ASM_ATT)
14502 putc ('$', file);
14503 }
14504 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
14505 || GET_CODE (x) == LABEL_REF)
14506 {
14507 if (ASSEMBLER_DIALECT == ASM_ATT)
14508 putc ('$', file);
14509 else
14510 fputs ("OFFSET FLAT:", file);
14511 }
14512 }
14513 if (CONST_INT_P (x))
14514 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
14515 else if (flag_pic || MACHOPIC_INDIRECT)
14516 output_pic_addr_const (file, x, code);
14517 else
14518 output_addr_const (file, x);
14519 }
14520 }
14521
14522 static bool
14523 ix86_print_operand_punct_valid_p (unsigned char code)
14524 {
14525 return (code == '@' || code == '*' || code == '+'
14526 || code == '&' || code == ';');
14527 }
14528 \f
14529 /* Print a memory operand whose address is ADDR. */
14530
14531 static void
14532 ix86_print_operand_address (FILE *file, rtx addr)
14533 {
14534 struct ix86_address parts;
14535 rtx base, index, disp;
14536 int scale;
14537 int ok = ix86_decompose_address (addr, &parts);
14538
14539 gcc_assert (ok);
14540
14541 base = parts.base;
14542 index = parts.index;
14543 disp = parts.disp;
14544 scale = parts.scale;
14545
14546 switch (parts.seg)
14547 {
14548 case SEG_DEFAULT:
14549 break;
14550 case SEG_FS:
14551 case SEG_GS:
14552 if (ASSEMBLER_DIALECT == ASM_ATT)
14553 putc ('%', file);
14554 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
14555 break;
14556 default:
14557 gcc_unreachable ();
14558 }
14559
14560 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14561 if (TARGET_64BIT && !base && !index)
14562 {
14563 rtx symbol = disp;
14564
14565 if (GET_CODE (disp) == CONST
14566 && GET_CODE (XEXP (disp, 0)) == PLUS
14567 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14568 symbol = XEXP (XEXP (disp, 0), 0);
14569
14570 if (GET_CODE (symbol) == LABEL_REF
14571 || (GET_CODE (symbol) == SYMBOL_REF
14572 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
14573 base = pc_rtx;
14574 }
14575 if (!base && !index)
14576 {
14577 /* Displacement only requires special attention. */
14578
14579 if (CONST_INT_P (disp))
14580 {
14581 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
14582 fputs ("ds:", file);
14583 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
14584 }
14585 else if (flag_pic)
14586 output_pic_addr_const (file, disp, 0);
14587 else
14588 output_addr_const (file, disp);
14589 }
14590 else
14591 {
14592 if (ASSEMBLER_DIALECT == ASM_ATT)
14593 {
14594 if (disp)
14595 {
14596 if (flag_pic)
14597 output_pic_addr_const (file, disp, 0);
14598 else if (GET_CODE (disp) == LABEL_REF)
14599 output_asm_label (disp);
14600 else
14601 output_addr_const (file, disp);
14602 }
14603
14604 putc ('(', file);
14605 if (base)
14606 print_reg (base, 0, file);
14607 if (index)
14608 {
14609 putc (',', file);
14610 print_reg (index, 0, file);
14611 if (scale != 1)
14612 fprintf (file, ",%d", scale);
14613 }
14614 putc (')', file);
14615 }
14616 else
14617 {
14618 rtx offset = NULL_RTX;
14619
14620 if (disp)
14621 {
14622 /* Pull out the offset of a symbol; print any symbol itself. */
14623 if (GET_CODE (disp) == CONST
14624 && GET_CODE (XEXP (disp, 0)) == PLUS
14625 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14626 {
14627 offset = XEXP (XEXP (disp, 0), 1);
14628 disp = gen_rtx_CONST (VOIDmode,
14629 XEXP (XEXP (disp, 0), 0));
14630 }
14631
14632 if (flag_pic)
14633 output_pic_addr_const (file, disp, 0);
14634 else if (GET_CODE (disp) == LABEL_REF)
14635 output_asm_label (disp);
14636 else if (CONST_INT_P (disp))
14637 offset = disp;
14638 else
14639 output_addr_const (file, disp);
14640 }
14641
14642 putc ('[', file);
14643 if (base)
14644 {
14645 print_reg (base, 0, file);
14646 if (offset)
14647 {
14648 if (INTVAL (offset) >= 0)
14649 putc ('+', file);
14650 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14651 }
14652 }
14653 else if (offset)
14654 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14655 else
14656 putc ('0', file);
14657
14658 if (index)
14659 {
14660 putc ('+', file);
14661 print_reg (index, 0, file);
14662 if (scale != 1)
14663 fprintf (file, "*%d", scale);
14664 }
14665 putc (']', file);
14666 }
14667 }
14668 }
14669
14670 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14671
14672 static bool
14673 i386_asm_output_addr_const_extra (FILE *file, rtx x)
14674 {
14675 rtx op;
14676
14677 if (GET_CODE (x) != UNSPEC)
14678 return false;
14679
14680 op = XVECEXP (x, 0, 0);
14681 switch (XINT (x, 1))
14682 {
14683 case UNSPEC_GOTTPOFF:
14684 output_addr_const (file, op);
14685 /* FIXME: This might be @TPOFF in Sun ld. */
14686 fputs ("@gottpoff", file);
14687 break;
14688 case UNSPEC_TPOFF:
14689 output_addr_const (file, op);
14690 fputs ("@tpoff", file);
14691 break;
14692 case UNSPEC_NTPOFF:
14693 output_addr_const (file, op);
14694 if (TARGET_64BIT)
14695 fputs ("@tpoff", file);
14696 else
14697 fputs ("@ntpoff", file);
14698 break;
14699 case UNSPEC_DTPOFF:
14700 output_addr_const (file, op);
14701 fputs ("@dtpoff", file);
14702 break;
14703 case UNSPEC_GOTNTPOFF:
14704 output_addr_const (file, op);
14705 if (TARGET_64BIT)
14706 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
14707 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
14708 else
14709 fputs ("@gotntpoff", file);
14710 break;
14711 case UNSPEC_INDNTPOFF:
14712 output_addr_const (file, op);
14713 fputs ("@indntpoff", file);
14714 break;
14715 #if TARGET_MACHO
14716 case UNSPEC_MACHOPIC_OFFSET:
14717 output_addr_const (file, op);
14718 putc ('-', file);
14719 machopic_output_function_base_name (file);
14720 break;
14721 #endif
14722
14723 case UNSPEC_STACK_CHECK:
14724 {
14725 int offset;
14726
14727 gcc_assert (flag_split_stack);
14728
14729 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14730 offset = TARGET_THREAD_SPLIT_STACK_OFFSET;
14731 #else
14732 gcc_unreachable ();
14733 #endif
14734
14735 fprintf (file, "%s:%d", TARGET_64BIT ? "%fs" : "%gs", offset);
14736 }
14737 break;
14738
14739 default:
14740 return false;
14741 }
14742
14743 return true;
14744 }
14745 \f
14746 /* Split one or more double-mode RTL references into pairs of half-mode
14747 references. The RTL can be REG, offsettable MEM, integer constant, or
14748 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14749 split and "num" is its length. lo_half and hi_half are output arrays
14750 that parallel "operands". */
14751
14752 void
14753 split_double_mode (enum machine_mode mode, rtx operands[],
14754 int num, rtx lo_half[], rtx hi_half[])
14755 {
14756 enum machine_mode half_mode;
14757 unsigned int byte;
14758
14759 switch (mode)
14760 {
14761 case TImode:
14762 half_mode = DImode;
14763 break;
14764 case DImode:
14765 half_mode = SImode;
14766 break;
14767 default:
14768 gcc_unreachable ();
14769 }
14770
14771 byte = GET_MODE_SIZE (half_mode);
14772
14773 while (num--)
14774 {
14775 rtx op = operands[num];
14776
14777 /* simplify_subreg refuse to split volatile memory addresses,
14778 but we still have to handle it. */
14779 if (MEM_P (op))
14780 {
14781 lo_half[num] = adjust_address (op, half_mode, 0);
14782 hi_half[num] = adjust_address (op, half_mode, byte);
14783 }
14784 else
14785 {
14786 lo_half[num] = simplify_gen_subreg (half_mode, op,
14787 GET_MODE (op) == VOIDmode
14788 ? mode : GET_MODE (op), 0);
14789 hi_half[num] = simplify_gen_subreg (half_mode, op,
14790 GET_MODE (op) == VOIDmode
14791 ? mode : GET_MODE (op), byte);
14792 }
14793 }
14794 }
14795 \f
14796 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
14797 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
14798 is the expression of the binary operation. The output may either be
14799 emitted here, or returned to the caller, like all output_* functions.
14800
14801 There is no guarantee that the operands are the same mode, as they
14802 might be within FLOAT or FLOAT_EXTEND expressions. */
14803
14804 #ifndef SYSV386_COMPAT
14805 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
14806 wants to fix the assemblers because that causes incompatibility
14807 with gcc. No-one wants to fix gcc because that causes
14808 incompatibility with assemblers... You can use the option of
14809 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
14810 #define SYSV386_COMPAT 1
14811 #endif
14812
14813 const char *
14814 output_387_binary_op (rtx insn, rtx *operands)
14815 {
14816 static char buf[40];
14817 const char *p;
14818 const char *ssep;
14819 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
14820
14821 #ifdef ENABLE_CHECKING
14822 /* Even if we do not want to check the inputs, this documents input
14823 constraints. Which helps in understanding the following code. */
14824 if (STACK_REG_P (operands[0])
14825 && ((REG_P (operands[1])
14826 && REGNO (operands[0]) == REGNO (operands[1])
14827 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
14828 || (REG_P (operands[2])
14829 && REGNO (operands[0]) == REGNO (operands[2])
14830 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
14831 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
14832 ; /* ok */
14833 else
14834 gcc_assert (is_sse);
14835 #endif
14836
14837 switch (GET_CODE (operands[3]))
14838 {
14839 case PLUS:
14840 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14841 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14842 p = "fiadd";
14843 else
14844 p = "fadd";
14845 ssep = "vadd";
14846 break;
14847
14848 case MINUS:
14849 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14850 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14851 p = "fisub";
14852 else
14853 p = "fsub";
14854 ssep = "vsub";
14855 break;
14856
14857 case MULT:
14858 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14859 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14860 p = "fimul";
14861 else
14862 p = "fmul";
14863 ssep = "vmul";
14864 break;
14865
14866 case DIV:
14867 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14868 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14869 p = "fidiv";
14870 else
14871 p = "fdiv";
14872 ssep = "vdiv";
14873 break;
14874
14875 default:
14876 gcc_unreachable ();
14877 }
14878
14879 if (is_sse)
14880 {
14881 if (TARGET_AVX)
14882 {
14883 strcpy (buf, ssep);
14884 if (GET_MODE (operands[0]) == SFmode)
14885 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
14886 else
14887 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
14888 }
14889 else
14890 {
14891 strcpy (buf, ssep + 1);
14892 if (GET_MODE (operands[0]) == SFmode)
14893 strcat (buf, "ss\t{%2, %0|%0, %2}");
14894 else
14895 strcat (buf, "sd\t{%2, %0|%0, %2}");
14896 }
14897 return buf;
14898 }
14899 strcpy (buf, p);
14900
14901 switch (GET_CODE (operands[3]))
14902 {
14903 case MULT:
14904 case PLUS:
14905 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
14906 {
14907 rtx temp = operands[2];
14908 operands[2] = operands[1];
14909 operands[1] = temp;
14910 }
14911
14912 /* know operands[0] == operands[1]. */
14913
14914 if (MEM_P (operands[2]))
14915 {
14916 p = "%Z2\t%2";
14917 break;
14918 }
14919
14920 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14921 {
14922 if (STACK_TOP_P (operands[0]))
14923 /* How is it that we are storing to a dead operand[2]?
14924 Well, presumably operands[1] is dead too. We can't
14925 store the result to st(0) as st(0) gets popped on this
14926 instruction. Instead store to operands[2] (which I
14927 think has to be st(1)). st(1) will be popped later.
14928 gcc <= 2.8.1 didn't have this check and generated
14929 assembly code that the Unixware assembler rejected. */
14930 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14931 else
14932 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14933 break;
14934 }
14935
14936 if (STACK_TOP_P (operands[0]))
14937 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14938 else
14939 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14940 break;
14941
14942 case MINUS:
14943 case DIV:
14944 if (MEM_P (operands[1]))
14945 {
14946 p = "r%Z1\t%1";
14947 break;
14948 }
14949
14950 if (MEM_P (operands[2]))
14951 {
14952 p = "%Z2\t%2";
14953 break;
14954 }
14955
14956 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14957 {
14958 #if SYSV386_COMPAT
14959 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
14960 derived assemblers, confusingly reverse the direction of
14961 the operation for fsub{r} and fdiv{r} when the
14962 destination register is not st(0). The Intel assembler
14963 doesn't have this brain damage. Read !SYSV386_COMPAT to
14964 figure out what the hardware really does. */
14965 if (STACK_TOP_P (operands[0]))
14966 p = "{p\t%0, %2|rp\t%2, %0}";
14967 else
14968 p = "{rp\t%2, %0|p\t%0, %2}";
14969 #else
14970 if (STACK_TOP_P (operands[0]))
14971 /* As above for fmul/fadd, we can't store to st(0). */
14972 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14973 else
14974 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14975 #endif
14976 break;
14977 }
14978
14979 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
14980 {
14981 #if SYSV386_COMPAT
14982 if (STACK_TOP_P (operands[0]))
14983 p = "{rp\t%0, %1|p\t%1, %0}";
14984 else
14985 p = "{p\t%1, %0|rp\t%0, %1}";
14986 #else
14987 if (STACK_TOP_P (operands[0]))
14988 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
14989 else
14990 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
14991 #endif
14992 break;
14993 }
14994
14995 if (STACK_TOP_P (operands[0]))
14996 {
14997 if (STACK_TOP_P (operands[1]))
14998 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14999 else
15000 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
15001 break;
15002 }
15003 else if (STACK_TOP_P (operands[1]))
15004 {
15005 #if SYSV386_COMPAT
15006 p = "{\t%1, %0|r\t%0, %1}";
15007 #else
15008 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
15009 #endif
15010 }
15011 else
15012 {
15013 #if SYSV386_COMPAT
15014 p = "{r\t%2, %0|\t%0, %2}";
15015 #else
15016 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
15017 #endif
15018 }
15019 break;
15020
15021 default:
15022 gcc_unreachable ();
15023 }
15024
15025 strcat (buf, p);
15026 return buf;
15027 }
15028
15029 /* Return needed mode for entity in optimize_mode_switching pass. */
15030
15031 int
15032 ix86_mode_needed (int entity, rtx insn)
15033 {
15034 enum attr_i387_cw mode;
15035
15036 /* The mode UNINITIALIZED is used to store control word after a
15037 function call or ASM pattern. The mode ANY specify that function
15038 has no requirements on the control word and make no changes in the
15039 bits we are interested in. */
15040
15041 if (CALL_P (insn)
15042 || (NONJUMP_INSN_P (insn)
15043 && (asm_noperands (PATTERN (insn)) >= 0
15044 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
15045 return I387_CW_UNINITIALIZED;
15046
15047 if (recog_memoized (insn) < 0)
15048 return I387_CW_ANY;
15049
15050 mode = get_attr_i387_cw (insn);
15051
15052 switch (entity)
15053 {
15054 case I387_TRUNC:
15055 if (mode == I387_CW_TRUNC)
15056 return mode;
15057 break;
15058
15059 case I387_FLOOR:
15060 if (mode == I387_CW_FLOOR)
15061 return mode;
15062 break;
15063
15064 case I387_CEIL:
15065 if (mode == I387_CW_CEIL)
15066 return mode;
15067 break;
15068
15069 case I387_MASK_PM:
15070 if (mode == I387_CW_MASK_PM)
15071 return mode;
15072 break;
15073
15074 default:
15075 gcc_unreachable ();
15076 }
15077
15078 return I387_CW_ANY;
15079 }
15080
15081 /* Output code to initialize control word copies used by trunc?f?i and
15082 rounding patterns. CURRENT_MODE is set to current control word,
15083 while NEW_MODE is set to new control word. */
15084
15085 void
15086 emit_i387_cw_initialization (int mode)
15087 {
15088 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
15089 rtx new_mode;
15090
15091 enum ix86_stack_slot slot;
15092
15093 rtx reg = gen_reg_rtx (HImode);
15094
15095 emit_insn (gen_x86_fnstcw_1 (stored_mode));
15096 emit_move_insn (reg, copy_rtx (stored_mode));
15097
15098 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
15099 || optimize_function_for_size_p (cfun))
15100 {
15101 switch (mode)
15102 {
15103 case I387_CW_TRUNC:
15104 /* round toward zero (truncate) */
15105 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
15106 slot = SLOT_CW_TRUNC;
15107 break;
15108
15109 case I387_CW_FLOOR:
15110 /* round down toward -oo */
15111 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
15112 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
15113 slot = SLOT_CW_FLOOR;
15114 break;
15115
15116 case I387_CW_CEIL:
15117 /* round up toward +oo */
15118 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
15119 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
15120 slot = SLOT_CW_CEIL;
15121 break;
15122
15123 case I387_CW_MASK_PM:
15124 /* mask precision exception for nearbyint() */
15125 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15126 slot = SLOT_CW_MASK_PM;
15127 break;
15128
15129 default:
15130 gcc_unreachable ();
15131 }
15132 }
15133 else
15134 {
15135 switch (mode)
15136 {
15137 case I387_CW_TRUNC:
15138 /* round toward zero (truncate) */
15139 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
15140 slot = SLOT_CW_TRUNC;
15141 break;
15142
15143 case I387_CW_FLOOR:
15144 /* round down toward -oo */
15145 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
15146 slot = SLOT_CW_FLOOR;
15147 break;
15148
15149 case I387_CW_CEIL:
15150 /* round up toward +oo */
15151 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
15152 slot = SLOT_CW_CEIL;
15153 break;
15154
15155 case I387_CW_MASK_PM:
15156 /* mask precision exception for nearbyint() */
15157 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15158 slot = SLOT_CW_MASK_PM;
15159 break;
15160
15161 default:
15162 gcc_unreachable ();
15163 }
15164 }
15165
15166 gcc_assert (slot < MAX_386_STACK_LOCALS);
15167
15168 new_mode = assign_386_stack_local (HImode, slot);
15169 emit_move_insn (new_mode, reg);
15170 }
15171
15172 /* Output code for INSN to convert a float to a signed int. OPERANDS
15173 are the insn operands. The output may be [HSD]Imode and the input
15174 operand may be [SDX]Fmode. */
15175
15176 const char *
15177 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
15178 {
15179 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15180 int dimode_p = GET_MODE (operands[0]) == DImode;
15181 int round_mode = get_attr_i387_cw (insn);
15182
15183 /* Jump through a hoop or two for DImode, since the hardware has no
15184 non-popping instruction. We used to do this a different way, but
15185 that was somewhat fragile and broke with post-reload splitters. */
15186 if ((dimode_p || fisttp) && !stack_top_dies)
15187 output_asm_insn ("fld\t%y1", operands);
15188
15189 gcc_assert (STACK_TOP_P (operands[1]));
15190 gcc_assert (MEM_P (operands[0]));
15191 gcc_assert (GET_MODE (operands[1]) != TFmode);
15192
15193 if (fisttp)
15194 output_asm_insn ("fisttp%Z0\t%0", operands);
15195 else
15196 {
15197 if (round_mode != I387_CW_ANY)
15198 output_asm_insn ("fldcw\t%3", operands);
15199 if (stack_top_dies || dimode_p)
15200 output_asm_insn ("fistp%Z0\t%0", operands);
15201 else
15202 output_asm_insn ("fist%Z0\t%0", operands);
15203 if (round_mode != I387_CW_ANY)
15204 output_asm_insn ("fldcw\t%2", operands);
15205 }
15206
15207 return "";
15208 }
15209
15210 /* Output code for x87 ffreep insn. The OPNO argument, which may only
15211 have the values zero or one, indicates the ffreep insn's operand
15212 from the OPERANDS array. */
15213
15214 static const char *
15215 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
15216 {
15217 if (TARGET_USE_FFREEP)
15218 #ifdef HAVE_AS_IX86_FFREEP
15219 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
15220 #else
15221 {
15222 static char retval[32];
15223 int regno = REGNO (operands[opno]);
15224
15225 gcc_assert (FP_REGNO_P (regno));
15226
15227 regno -= FIRST_STACK_REG;
15228
15229 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
15230 return retval;
15231 }
15232 #endif
15233
15234 return opno ? "fstp\t%y1" : "fstp\t%y0";
15235 }
15236
15237
15238 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
15239 should be used. UNORDERED_P is true when fucom should be used. */
15240
15241 const char *
15242 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
15243 {
15244 int stack_top_dies;
15245 rtx cmp_op0, cmp_op1;
15246 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
15247
15248 if (eflags_p)
15249 {
15250 cmp_op0 = operands[0];
15251 cmp_op1 = operands[1];
15252 }
15253 else
15254 {
15255 cmp_op0 = operands[1];
15256 cmp_op1 = operands[2];
15257 }
15258
15259 if (is_sse)
15260 {
15261 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
15262 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
15263 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
15264 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
15265
15266 if (GET_MODE (operands[0]) == SFmode)
15267 if (unordered_p)
15268 return &ucomiss[TARGET_AVX ? 0 : 1];
15269 else
15270 return &comiss[TARGET_AVX ? 0 : 1];
15271 else
15272 if (unordered_p)
15273 return &ucomisd[TARGET_AVX ? 0 : 1];
15274 else
15275 return &comisd[TARGET_AVX ? 0 : 1];
15276 }
15277
15278 gcc_assert (STACK_TOP_P (cmp_op0));
15279
15280 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15281
15282 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
15283 {
15284 if (stack_top_dies)
15285 {
15286 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
15287 return output_387_ffreep (operands, 1);
15288 }
15289 else
15290 return "ftst\n\tfnstsw\t%0";
15291 }
15292
15293 if (STACK_REG_P (cmp_op1)
15294 && stack_top_dies
15295 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
15296 && REGNO (cmp_op1) != FIRST_STACK_REG)
15297 {
15298 /* If both the top of the 387 stack dies, and the other operand
15299 is also a stack register that dies, then this must be a
15300 `fcompp' float compare */
15301
15302 if (eflags_p)
15303 {
15304 /* There is no double popping fcomi variant. Fortunately,
15305 eflags is immune from the fstp's cc clobbering. */
15306 if (unordered_p)
15307 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
15308 else
15309 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
15310 return output_387_ffreep (operands, 0);
15311 }
15312 else
15313 {
15314 if (unordered_p)
15315 return "fucompp\n\tfnstsw\t%0";
15316 else
15317 return "fcompp\n\tfnstsw\t%0";
15318 }
15319 }
15320 else
15321 {
15322 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
15323
15324 static const char * const alt[16] =
15325 {
15326 "fcom%Z2\t%y2\n\tfnstsw\t%0",
15327 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
15328 "fucom%Z2\t%y2\n\tfnstsw\t%0",
15329 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
15330
15331 "ficom%Z2\t%y2\n\tfnstsw\t%0",
15332 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
15333 NULL,
15334 NULL,
15335
15336 "fcomi\t{%y1, %0|%0, %y1}",
15337 "fcomip\t{%y1, %0|%0, %y1}",
15338 "fucomi\t{%y1, %0|%0, %y1}",
15339 "fucomip\t{%y1, %0|%0, %y1}",
15340
15341 NULL,
15342 NULL,
15343 NULL,
15344 NULL
15345 };
15346
15347 int mask;
15348 const char *ret;
15349
15350 mask = eflags_p << 3;
15351 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
15352 mask |= unordered_p << 1;
15353 mask |= stack_top_dies;
15354
15355 gcc_assert (mask < 16);
15356 ret = alt[mask];
15357 gcc_assert (ret);
15358
15359 return ret;
15360 }
15361 }
15362
15363 void
15364 ix86_output_addr_vec_elt (FILE *file, int value)
15365 {
15366 const char *directive = ASM_LONG;
15367
15368 #ifdef ASM_QUAD
15369 if (TARGET_64BIT)
15370 directive = ASM_QUAD;
15371 #else
15372 gcc_assert (!TARGET_64BIT);
15373 #endif
15374
15375 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
15376 }
15377
15378 void
15379 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
15380 {
15381 const char *directive = ASM_LONG;
15382
15383 #ifdef ASM_QUAD
15384 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
15385 directive = ASM_QUAD;
15386 #else
15387 gcc_assert (!TARGET_64BIT);
15388 #endif
15389 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15390 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
15391 fprintf (file, "%s%s%d-%s%d\n",
15392 directive, LPREFIX, value, LPREFIX, rel);
15393 else if (HAVE_AS_GOTOFF_IN_DATA)
15394 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
15395 #if TARGET_MACHO
15396 else if (TARGET_MACHO)
15397 {
15398 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
15399 machopic_output_function_base_name (file);
15400 putc ('\n', file);
15401 }
15402 #endif
15403 else
15404 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
15405 GOT_SYMBOL_NAME, LPREFIX, value);
15406 }
15407 \f
15408 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
15409 for the target. */
15410
15411 void
15412 ix86_expand_clear (rtx dest)
15413 {
15414 rtx tmp;
15415
15416 /* We play register width games, which are only valid after reload. */
15417 gcc_assert (reload_completed);
15418
15419 /* Avoid HImode and its attendant prefix byte. */
15420 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
15421 dest = gen_rtx_REG (SImode, REGNO (dest));
15422 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
15423
15424 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15425 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
15426 {
15427 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15428 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
15429 }
15430
15431 emit_insn (tmp);
15432 }
15433
15434 /* X is an unchanging MEM. If it is a constant pool reference, return
15435 the constant pool rtx, else NULL. */
15436
15437 rtx
15438 maybe_get_pool_constant (rtx x)
15439 {
15440 x = ix86_delegitimize_address (XEXP (x, 0));
15441
15442 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
15443 return get_pool_constant (x);
15444
15445 return NULL_RTX;
15446 }
15447
15448 void
15449 ix86_expand_move (enum machine_mode mode, rtx operands[])
15450 {
15451 rtx op0, op1;
15452 enum tls_model model;
15453
15454 op0 = operands[0];
15455 op1 = operands[1];
15456
15457 if (GET_CODE (op1) == SYMBOL_REF)
15458 {
15459 model = SYMBOL_REF_TLS_MODEL (op1);
15460 if (model)
15461 {
15462 op1 = legitimize_tls_address (op1, model, true);
15463 op1 = force_operand (op1, op0);
15464 if (op1 == op0)
15465 return;
15466 }
15467 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15468 && SYMBOL_REF_DLLIMPORT_P (op1))
15469 op1 = legitimize_dllimport_symbol (op1, false);
15470 }
15471 else if (GET_CODE (op1) == CONST
15472 && GET_CODE (XEXP (op1, 0)) == PLUS
15473 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
15474 {
15475 rtx addend = XEXP (XEXP (op1, 0), 1);
15476 rtx symbol = XEXP (XEXP (op1, 0), 0);
15477 rtx tmp = NULL;
15478
15479 model = SYMBOL_REF_TLS_MODEL (symbol);
15480 if (model)
15481 tmp = legitimize_tls_address (symbol, model, true);
15482 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15483 && SYMBOL_REF_DLLIMPORT_P (symbol))
15484 tmp = legitimize_dllimport_symbol (symbol, true);
15485
15486 if (tmp)
15487 {
15488 tmp = force_operand (tmp, NULL);
15489 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
15490 op0, 1, OPTAB_DIRECT);
15491 if (tmp == op0)
15492 return;
15493 }
15494 }
15495
15496 if ((flag_pic || MACHOPIC_INDIRECT)
15497 && mode == Pmode && symbolic_operand (op1, Pmode))
15498 {
15499 if (TARGET_MACHO && !TARGET_64BIT)
15500 {
15501 #if TARGET_MACHO
15502 /* dynamic-no-pic */
15503 if (MACHOPIC_INDIRECT)
15504 {
15505 rtx temp = ((reload_in_progress
15506 || ((op0 && REG_P (op0))
15507 && mode == Pmode))
15508 ? op0 : gen_reg_rtx (Pmode));
15509 op1 = machopic_indirect_data_reference (op1, temp);
15510 if (MACHOPIC_PURE)
15511 op1 = machopic_legitimize_pic_address (op1, mode,
15512 temp == op1 ? 0 : temp);
15513 }
15514 if (op0 != op1 && GET_CODE (op0) != MEM)
15515 {
15516 rtx insn = gen_rtx_SET (VOIDmode, op0, op1);
15517 emit_insn (insn);
15518 return;
15519 }
15520 if (GET_CODE (op0) == MEM)
15521 op1 = force_reg (Pmode, op1);
15522 else
15523 {
15524 rtx temp = op0;
15525 if (GET_CODE (temp) != REG)
15526 temp = gen_reg_rtx (Pmode);
15527 temp = legitimize_pic_address (op1, temp);
15528 if (temp == op0)
15529 return;
15530 op1 = temp;
15531 }
15532 /* dynamic-no-pic */
15533 #endif
15534 }
15535 else
15536 {
15537 if (MEM_P (op0))
15538 op1 = force_reg (Pmode, op1);
15539 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
15540 {
15541 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
15542 op1 = legitimize_pic_address (op1, reg);
15543 if (op0 == op1)
15544 return;
15545 }
15546 }
15547 }
15548 else
15549 {
15550 if (MEM_P (op0)
15551 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
15552 || !push_operand (op0, mode))
15553 && MEM_P (op1))
15554 op1 = force_reg (mode, op1);
15555
15556 if (push_operand (op0, mode)
15557 && ! general_no_elim_operand (op1, mode))
15558 op1 = copy_to_mode_reg (mode, op1);
15559
15560 /* Force large constants in 64bit compilation into register
15561 to get them CSEed. */
15562 if (can_create_pseudo_p ()
15563 && (mode == DImode) && TARGET_64BIT
15564 && immediate_operand (op1, mode)
15565 && !x86_64_zext_immediate_operand (op1, VOIDmode)
15566 && !register_operand (op0, mode)
15567 && optimize)
15568 op1 = copy_to_mode_reg (mode, op1);
15569
15570 if (can_create_pseudo_p ()
15571 && FLOAT_MODE_P (mode)
15572 && GET_CODE (op1) == CONST_DOUBLE)
15573 {
15574 /* If we are loading a floating point constant to a register,
15575 force the value to memory now, since we'll get better code
15576 out the back end. */
15577
15578 op1 = validize_mem (force_const_mem (mode, op1));
15579 if (!register_operand (op0, mode))
15580 {
15581 rtx temp = gen_reg_rtx (mode);
15582 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
15583 emit_move_insn (op0, temp);
15584 return;
15585 }
15586 }
15587 }
15588
15589 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15590 }
15591
15592 void
15593 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
15594 {
15595 rtx op0 = operands[0], op1 = operands[1];
15596 unsigned int align = GET_MODE_ALIGNMENT (mode);
15597
15598 /* Force constants other than zero into memory. We do not know how
15599 the instructions used to build constants modify the upper 64 bits
15600 of the register, once we have that information we may be able
15601 to handle some of them more efficiently. */
15602 if (can_create_pseudo_p ()
15603 && register_operand (op0, mode)
15604 && (CONSTANT_P (op1)
15605 || (GET_CODE (op1) == SUBREG
15606 && CONSTANT_P (SUBREG_REG (op1))))
15607 && !standard_sse_constant_p (op1))
15608 op1 = validize_mem (force_const_mem (mode, op1));
15609
15610 /* We need to check memory alignment for SSE mode since attribute
15611 can make operands unaligned. */
15612 if (can_create_pseudo_p ()
15613 && SSE_REG_MODE_P (mode)
15614 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
15615 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
15616 {
15617 rtx tmp[2];
15618
15619 /* ix86_expand_vector_move_misalign() does not like constants ... */
15620 if (CONSTANT_P (op1)
15621 || (GET_CODE (op1) == SUBREG
15622 && CONSTANT_P (SUBREG_REG (op1))))
15623 op1 = validize_mem (force_const_mem (mode, op1));
15624
15625 /* ... nor both arguments in memory. */
15626 if (!register_operand (op0, mode)
15627 && !register_operand (op1, mode))
15628 op1 = force_reg (mode, op1);
15629
15630 tmp[0] = op0; tmp[1] = op1;
15631 ix86_expand_vector_move_misalign (mode, tmp);
15632 return;
15633 }
15634
15635 /* Make operand1 a register if it isn't already. */
15636 if (can_create_pseudo_p ()
15637 && !register_operand (op0, mode)
15638 && !register_operand (op1, mode))
15639 {
15640 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
15641 return;
15642 }
15643
15644 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15645 }
15646
15647 /* Split 32-byte AVX unaligned load and store if needed. */
15648
15649 static void
15650 ix86_avx256_split_vector_move_misalign (rtx op0, rtx op1)
15651 {
15652 rtx m;
15653 rtx (*extract) (rtx, rtx, rtx);
15654 rtx (*move_unaligned) (rtx, rtx);
15655 enum machine_mode mode;
15656
15657 switch (GET_MODE (op0))
15658 {
15659 default:
15660 gcc_unreachable ();
15661 case V32QImode:
15662 extract = gen_avx_vextractf128v32qi;
15663 move_unaligned = gen_avx_movdqu256;
15664 mode = V16QImode;
15665 break;
15666 case V8SFmode:
15667 extract = gen_avx_vextractf128v8sf;
15668 move_unaligned = gen_avx_movups256;
15669 mode = V4SFmode;
15670 break;
15671 case V4DFmode:
15672 extract = gen_avx_vextractf128v4df;
15673 move_unaligned = gen_avx_movupd256;
15674 mode = V2DFmode;
15675 break;
15676 }
15677
15678 if (MEM_P (op1) && TARGET_AVX256_SPLIT_UNALIGNED_LOAD)
15679 {
15680 rtx r = gen_reg_rtx (mode);
15681 m = adjust_address (op1, mode, 0);
15682 emit_move_insn (r, m);
15683 m = adjust_address (op1, mode, 16);
15684 r = gen_rtx_VEC_CONCAT (GET_MODE (op0), r, m);
15685 emit_move_insn (op0, r);
15686 }
15687 else if (MEM_P (op0) && TARGET_AVX256_SPLIT_UNALIGNED_STORE)
15688 {
15689 m = adjust_address (op0, mode, 0);
15690 emit_insn (extract (m, op1, const0_rtx));
15691 m = adjust_address (op0, mode, 16);
15692 emit_insn (extract (m, op1, const1_rtx));
15693 }
15694 else
15695 emit_insn (move_unaligned (op0, op1));
15696 }
15697
15698 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
15699 straight to ix86_expand_vector_move. */
15700 /* Code generation for scalar reg-reg moves of single and double precision data:
15701 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
15702 movaps reg, reg
15703 else
15704 movss reg, reg
15705 if (x86_sse_partial_reg_dependency == true)
15706 movapd reg, reg
15707 else
15708 movsd reg, reg
15709
15710 Code generation for scalar loads of double precision data:
15711 if (x86_sse_split_regs == true)
15712 movlpd mem, reg (gas syntax)
15713 else
15714 movsd mem, reg
15715
15716 Code generation for unaligned packed loads of single precision data
15717 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
15718 if (x86_sse_unaligned_move_optimal)
15719 movups mem, reg
15720
15721 if (x86_sse_partial_reg_dependency == true)
15722 {
15723 xorps reg, reg
15724 movlps mem, reg
15725 movhps mem+8, reg
15726 }
15727 else
15728 {
15729 movlps mem, reg
15730 movhps mem+8, reg
15731 }
15732
15733 Code generation for unaligned packed loads of double precision data
15734 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
15735 if (x86_sse_unaligned_move_optimal)
15736 movupd mem, reg
15737
15738 if (x86_sse_split_regs == true)
15739 {
15740 movlpd mem, reg
15741 movhpd mem+8, reg
15742 }
15743 else
15744 {
15745 movsd mem, reg
15746 movhpd mem+8, reg
15747 }
15748 */
15749
15750 void
15751 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
15752 {
15753 rtx op0, op1, m;
15754
15755 op0 = operands[0];
15756 op1 = operands[1];
15757
15758 if (TARGET_AVX)
15759 {
15760 switch (GET_MODE_CLASS (mode))
15761 {
15762 case MODE_VECTOR_INT:
15763 case MODE_INT:
15764 switch (GET_MODE_SIZE (mode))
15765 {
15766 case 16:
15767 /* If we're optimizing for size, movups is the smallest. */
15768 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15769 {
15770 op0 = gen_lowpart (V4SFmode, op0);
15771 op1 = gen_lowpart (V4SFmode, op1);
15772 emit_insn (gen_sse_movups (op0, op1));
15773 return;
15774 }
15775 op0 = gen_lowpart (V16QImode, op0);
15776 op1 = gen_lowpart (V16QImode, op1);
15777 emit_insn (gen_sse2_movdqu (op0, op1));
15778 break;
15779 case 32:
15780 op0 = gen_lowpart (V32QImode, op0);
15781 op1 = gen_lowpart (V32QImode, op1);
15782 ix86_avx256_split_vector_move_misalign (op0, op1);
15783 break;
15784 default:
15785 gcc_unreachable ();
15786 }
15787 break;
15788 case MODE_VECTOR_FLOAT:
15789 op0 = gen_lowpart (mode, op0);
15790 op1 = gen_lowpart (mode, op1);
15791
15792 switch (mode)
15793 {
15794 case V4SFmode:
15795 emit_insn (gen_sse_movups (op0, op1));
15796 break;
15797 case V8SFmode:
15798 ix86_avx256_split_vector_move_misalign (op0, op1);
15799 break;
15800 case V2DFmode:
15801 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15802 {
15803 op0 = gen_lowpart (V4SFmode, op0);
15804 op1 = gen_lowpart (V4SFmode, op1);
15805 emit_insn (gen_sse_movups (op0, op1));
15806 return;
15807 }
15808 emit_insn (gen_sse2_movupd (op0, op1));
15809 break;
15810 case V4DFmode:
15811 ix86_avx256_split_vector_move_misalign (op0, op1);
15812 break;
15813 default:
15814 gcc_unreachable ();
15815 }
15816 break;
15817
15818 default:
15819 gcc_unreachable ();
15820 }
15821
15822 return;
15823 }
15824
15825 if (MEM_P (op1))
15826 {
15827 /* If we're optimizing for size, movups is the smallest. */
15828 if (optimize_insn_for_size_p ()
15829 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15830 {
15831 op0 = gen_lowpart (V4SFmode, op0);
15832 op1 = gen_lowpart (V4SFmode, op1);
15833 emit_insn (gen_sse_movups (op0, op1));
15834 return;
15835 }
15836
15837 /* ??? If we have typed data, then it would appear that using
15838 movdqu is the only way to get unaligned data loaded with
15839 integer type. */
15840 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15841 {
15842 op0 = gen_lowpart (V16QImode, op0);
15843 op1 = gen_lowpart (V16QImode, op1);
15844 emit_insn (gen_sse2_movdqu (op0, op1));
15845 return;
15846 }
15847
15848 if (TARGET_SSE2 && mode == V2DFmode)
15849 {
15850 rtx zero;
15851
15852 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15853 {
15854 op0 = gen_lowpart (V2DFmode, op0);
15855 op1 = gen_lowpart (V2DFmode, op1);
15856 emit_insn (gen_sse2_movupd (op0, op1));
15857 return;
15858 }
15859
15860 /* When SSE registers are split into halves, we can avoid
15861 writing to the top half twice. */
15862 if (TARGET_SSE_SPLIT_REGS)
15863 {
15864 emit_clobber (op0);
15865 zero = op0;
15866 }
15867 else
15868 {
15869 /* ??? Not sure about the best option for the Intel chips.
15870 The following would seem to satisfy; the register is
15871 entirely cleared, breaking the dependency chain. We
15872 then store to the upper half, with a dependency depth
15873 of one. A rumor has it that Intel recommends two movsd
15874 followed by an unpacklpd, but this is unconfirmed. And
15875 given that the dependency depth of the unpacklpd would
15876 still be one, I'm not sure why this would be better. */
15877 zero = CONST0_RTX (V2DFmode);
15878 }
15879
15880 m = adjust_address (op1, DFmode, 0);
15881 emit_insn (gen_sse2_loadlpd (op0, zero, m));
15882 m = adjust_address (op1, DFmode, 8);
15883 emit_insn (gen_sse2_loadhpd (op0, op0, m));
15884 }
15885 else
15886 {
15887 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15888 {
15889 op0 = gen_lowpart (V4SFmode, op0);
15890 op1 = gen_lowpart (V4SFmode, op1);
15891 emit_insn (gen_sse_movups (op0, op1));
15892 return;
15893 }
15894
15895 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
15896 emit_move_insn (op0, CONST0_RTX (mode));
15897 else
15898 emit_clobber (op0);
15899
15900 if (mode != V4SFmode)
15901 op0 = gen_lowpart (V4SFmode, op0);
15902 m = adjust_address (op1, V2SFmode, 0);
15903 emit_insn (gen_sse_loadlps (op0, op0, m));
15904 m = adjust_address (op1, V2SFmode, 8);
15905 emit_insn (gen_sse_loadhps (op0, op0, m));
15906 }
15907 }
15908 else if (MEM_P (op0))
15909 {
15910 /* If we're optimizing for size, movups is the smallest. */
15911 if (optimize_insn_for_size_p ()
15912 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15913 {
15914 op0 = gen_lowpart (V4SFmode, op0);
15915 op1 = gen_lowpart (V4SFmode, op1);
15916 emit_insn (gen_sse_movups (op0, op1));
15917 return;
15918 }
15919
15920 /* ??? Similar to above, only less clear because of quote
15921 typeless stores unquote. */
15922 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
15923 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15924 {
15925 op0 = gen_lowpart (V16QImode, op0);
15926 op1 = gen_lowpart (V16QImode, op1);
15927 emit_insn (gen_sse2_movdqu (op0, op1));
15928 return;
15929 }
15930
15931 if (TARGET_SSE2 && mode == V2DFmode)
15932 {
15933 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15934 {
15935 op0 = gen_lowpart (V2DFmode, op0);
15936 op1 = gen_lowpart (V2DFmode, op1);
15937 emit_insn (gen_sse2_movupd (op0, op1));
15938 }
15939 else
15940 {
15941 m = adjust_address (op0, DFmode, 0);
15942 emit_insn (gen_sse2_storelpd (m, op1));
15943 m = adjust_address (op0, DFmode, 8);
15944 emit_insn (gen_sse2_storehpd (m, op1));
15945 }
15946 }
15947 else
15948 {
15949 if (mode != V4SFmode)
15950 op1 = gen_lowpart (V4SFmode, op1);
15951
15952 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15953 {
15954 op0 = gen_lowpart (V4SFmode, op0);
15955 emit_insn (gen_sse_movups (op0, op1));
15956 }
15957 else
15958 {
15959 m = adjust_address (op0, V2SFmode, 0);
15960 emit_insn (gen_sse_storelps (m, op1));
15961 m = adjust_address (op0, V2SFmode, 8);
15962 emit_insn (gen_sse_storehps (m, op1));
15963 }
15964 }
15965 }
15966 else
15967 gcc_unreachable ();
15968 }
15969
15970 /* Expand a push in MODE. This is some mode for which we do not support
15971 proper push instructions, at least from the registers that we expect
15972 the value to live in. */
15973
15974 void
15975 ix86_expand_push (enum machine_mode mode, rtx x)
15976 {
15977 rtx tmp;
15978
15979 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
15980 GEN_INT (-GET_MODE_SIZE (mode)),
15981 stack_pointer_rtx, 1, OPTAB_DIRECT);
15982 if (tmp != stack_pointer_rtx)
15983 emit_move_insn (stack_pointer_rtx, tmp);
15984
15985 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
15986
15987 /* When we push an operand onto stack, it has to be aligned at least
15988 at the function argument boundary. However since we don't have
15989 the argument type, we can't determine the actual argument
15990 boundary. */
15991 emit_move_insn (tmp, x);
15992 }
15993
15994 /* Helper function of ix86_fixup_binary_operands to canonicalize
15995 operand order. Returns true if the operands should be swapped. */
15996
15997 static bool
15998 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
15999 rtx operands[])
16000 {
16001 rtx dst = operands[0];
16002 rtx src1 = operands[1];
16003 rtx src2 = operands[2];
16004
16005 /* If the operation is not commutative, we can't do anything. */
16006 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
16007 return false;
16008
16009 /* Highest priority is that src1 should match dst. */
16010 if (rtx_equal_p (dst, src1))
16011 return false;
16012 if (rtx_equal_p (dst, src2))
16013 return true;
16014
16015 /* Next highest priority is that immediate constants come second. */
16016 if (immediate_operand (src2, mode))
16017 return false;
16018 if (immediate_operand (src1, mode))
16019 return true;
16020
16021 /* Lowest priority is that memory references should come second. */
16022 if (MEM_P (src2))
16023 return false;
16024 if (MEM_P (src1))
16025 return true;
16026
16027 return false;
16028 }
16029
16030
16031 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
16032 destination to use for the operation. If different from the true
16033 destination in operands[0], a copy operation will be required. */
16034
16035 rtx
16036 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
16037 rtx operands[])
16038 {
16039 rtx dst = operands[0];
16040 rtx src1 = operands[1];
16041 rtx src2 = operands[2];
16042
16043 /* Canonicalize operand order. */
16044 if (ix86_swap_binary_operands_p (code, mode, operands))
16045 {
16046 rtx temp;
16047
16048 /* It is invalid to swap operands of different modes. */
16049 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
16050
16051 temp = src1;
16052 src1 = src2;
16053 src2 = temp;
16054 }
16055
16056 /* Both source operands cannot be in memory. */
16057 if (MEM_P (src1) && MEM_P (src2))
16058 {
16059 /* Optimization: Only read from memory once. */
16060 if (rtx_equal_p (src1, src2))
16061 {
16062 src2 = force_reg (mode, src2);
16063 src1 = src2;
16064 }
16065 else
16066 src2 = force_reg (mode, src2);
16067 }
16068
16069 /* If the destination is memory, and we do not have matching source
16070 operands, do things in registers. */
16071 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16072 dst = gen_reg_rtx (mode);
16073
16074 /* Source 1 cannot be a constant. */
16075 if (CONSTANT_P (src1))
16076 src1 = force_reg (mode, src1);
16077
16078 /* Source 1 cannot be a non-matching memory. */
16079 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16080 src1 = force_reg (mode, src1);
16081
16082 operands[1] = src1;
16083 operands[2] = src2;
16084 return dst;
16085 }
16086
16087 /* Similarly, but assume that the destination has already been
16088 set up properly. */
16089
16090 void
16091 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
16092 enum machine_mode mode, rtx operands[])
16093 {
16094 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
16095 gcc_assert (dst == operands[0]);
16096 }
16097
16098 /* Attempt to expand a binary operator. Make the expansion closer to the
16099 actual machine, then just general_operand, which will allow 3 separate
16100 memory references (one output, two input) in a single insn. */
16101
16102 void
16103 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
16104 rtx operands[])
16105 {
16106 rtx src1, src2, dst, op, clob;
16107
16108 dst = ix86_fixup_binary_operands (code, mode, operands);
16109 src1 = operands[1];
16110 src2 = operands[2];
16111
16112 /* Emit the instruction. */
16113
16114 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
16115 if (reload_in_progress)
16116 {
16117 /* Reload doesn't know about the flags register, and doesn't know that
16118 it doesn't want to clobber it. We can only do this with PLUS. */
16119 gcc_assert (code == PLUS);
16120 emit_insn (op);
16121 }
16122 else if (reload_completed
16123 && code == PLUS
16124 && !rtx_equal_p (dst, src1))
16125 {
16126 /* This is going to be an LEA; avoid splitting it later. */
16127 emit_insn (op);
16128 }
16129 else
16130 {
16131 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16132 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16133 }
16134
16135 /* Fix up the destination if needed. */
16136 if (dst != operands[0])
16137 emit_move_insn (operands[0], dst);
16138 }
16139
16140 /* Return TRUE or FALSE depending on whether the binary operator meets the
16141 appropriate constraints. */
16142
16143 bool
16144 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
16145 rtx operands[3])
16146 {
16147 rtx dst = operands[0];
16148 rtx src1 = operands[1];
16149 rtx src2 = operands[2];
16150
16151 /* Both source operands cannot be in memory. */
16152 if (MEM_P (src1) && MEM_P (src2))
16153 return false;
16154
16155 /* Canonicalize operand order for commutative operators. */
16156 if (ix86_swap_binary_operands_p (code, mode, operands))
16157 {
16158 rtx temp = src1;
16159 src1 = src2;
16160 src2 = temp;
16161 }
16162
16163 /* If the destination is memory, we must have a matching source operand. */
16164 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16165 return false;
16166
16167 /* Source 1 cannot be a constant. */
16168 if (CONSTANT_P (src1))
16169 return false;
16170
16171 /* Source 1 cannot be a non-matching memory. */
16172 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16173 {
16174 /* Support "andhi/andsi/anddi" as a zero-extending move. */
16175 return (code == AND
16176 && (mode == HImode
16177 || mode == SImode
16178 || (TARGET_64BIT && mode == DImode))
16179 && CONST_INT_P (src2)
16180 && (INTVAL (src2) == 0xff
16181 || INTVAL (src2) == 0xffff));
16182 }
16183
16184 return true;
16185 }
16186
16187 /* Attempt to expand a unary operator. Make the expansion closer to the
16188 actual machine, then just general_operand, which will allow 2 separate
16189 memory references (one output, one input) in a single insn. */
16190
16191 void
16192 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
16193 rtx operands[])
16194 {
16195 int matching_memory;
16196 rtx src, dst, op, clob;
16197
16198 dst = operands[0];
16199 src = operands[1];
16200
16201 /* If the destination is memory, and we do not have matching source
16202 operands, do things in registers. */
16203 matching_memory = 0;
16204 if (MEM_P (dst))
16205 {
16206 if (rtx_equal_p (dst, src))
16207 matching_memory = 1;
16208 else
16209 dst = gen_reg_rtx (mode);
16210 }
16211
16212 /* When source operand is memory, destination must match. */
16213 if (MEM_P (src) && !matching_memory)
16214 src = force_reg (mode, src);
16215
16216 /* Emit the instruction. */
16217
16218 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
16219 if (reload_in_progress || code == NOT)
16220 {
16221 /* Reload doesn't know about the flags register, and doesn't know that
16222 it doesn't want to clobber it. */
16223 gcc_assert (code == NOT);
16224 emit_insn (op);
16225 }
16226 else
16227 {
16228 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16229 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16230 }
16231
16232 /* Fix up the destination if needed. */
16233 if (dst != operands[0])
16234 emit_move_insn (operands[0], dst);
16235 }
16236
16237 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
16238 divisor are within the range [0-255]. */
16239
16240 void
16241 ix86_split_idivmod (enum machine_mode mode, rtx operands[],
16242 bool signed_p)
16243 {
16244 rtx end_label, qimode_label;
16245 rtx insn, div, mod;
16246 rtx scratch, tmp0, tmp1, tmp2;
16247 rtx (*gen_divmod4_1) (rtx, rtx, rtx, rtx);
16248 rtx (*gen_zero_extend) (rtx, rtx);
16249 rtx (*gen_test_ccno_1) (rtx, rtx);
16250
16251 switch (mode)
16252 {
16253 case SImode:
16254 gen_divmod4_1 = signed_p ? gen_divmodsi4_1 : gen_udivmodsi4_1;
16255 gen_test_ccno_1 = gen_testsi_ccno_1;
16256 gen_zero_extend = gen_zero_extendqisi2;
16257 break;
16258 case DImode:
16259 gen_divmod4_1 = signed_p ? gen_divmoddi4_1 : gen_udivmoddi4_1;
16260 gen_test_ccno_1 = gen_testdi_ccno_1;
16261 gen_zero_extend = gen_zero_extendqidi2;
16262 break;
16263 default:
16264 gcc_unreachable ();
16265 }
16266
16267 end_label = gen_label_rtx ();
16268 qimode_label = gen_label_rtx ();
16269
16270 scratch = gen_reg_rtx (mode);
16271
16272 /* Use 8bit unsigned divimod if dividend and divisor are within
16273 the range [0-255]. */
16274 emit_move_insn (scratch, operands[2]);
16275 scratch = expand_simple_binop (mode, IOR, scratch, operands[3],
16276 scratch, 1, OPTAB_DIRECT);
16277 emit_insn (gen_test_ccno_1 (scratch, GEN_INT (-0x100)));
16278 tmp0 = gen_rtx_REG (CCNOmode, FLAGS_REG);
16279 tmp0 = gen_rtx_EQ (VOIDmode, tmp0, const0_rtx);
16280 tmp0 = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp0,
16281 gen_rtx_LABEL_REF (VOIDmode, qimode_label),
16282 pc_rtx);
16283 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp0));
16284 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16285 JUMP_LABEL (insn) = qimode_label;
16286
16287 /* Generate original signed/unsigned divimod. */
16288 div = gen_divmod4_1 (operands[0], operands[1],
16289 operands[2], operands[3]);
16290 emit_insn (div);
16291
16292 /* Branch to the end. */
16293 emit_jump_insn (gen_jump (end_label));
16294 emit_barrier ();
16295
16296 /* Generate 8bit unsigned divide. */
16297 emit_label (qimode_label);
16298 /* Don't use operands[0] for result of 8bit divide since not all
16299 registers support QImode ZERO_EXTRACT. */
16300 tmp0 = simplify_gen_subreg (HImode, scratch, mode, 0);
16301 tmp1 = simplify_gen_subreg (HImode, operands[2], mode, 0);
16302 tmp2 = simplify_gen_subreg (QImode, operands[3], mode, 0);
16303 emit_insn (gen_udivmodhiqi3 (tmp0, tmp1, tmp2));
16304
16305 if (signed_p)
16306 {
16307 div = gen_rtx_DIV (SImode, operands[2], operands[3]);
16308 mod = gen_rtx_MOD (SImode, operands[2], operands[3]);
16309 }
16310 else
16311 {
16312 div = gen_rtx_UDIV (SImode, operands[2], operands[3]);
16313 mod = gen_rtx_UMOD (SImode, operands[2], operands[3]);
16314 }
16315
16316 /* Extract remainder from AH. */
16317 tmp1 = gen_rtx_ZERO_EXTRACT (mode, tmp0, GEN_INT (8), GEN_INT (8));
16318 if (REG_P (operands[1]))
16319 insn = emit_move_insn (operands[1], tmp1);
16320 else
16321 {
16322 /* Need a new scratch register since the old one has result
16323 of 8bit divide. */
16324 scratch = gen_reg_rtx (mode);
16325 emit_move_insn (scratch, tmp1);
16326 insn = emit_move_insn (operands[1], scratch);
16327 }
16328 set_unique_reg_note (insn, REG_EQUAL, mod);
16329
16330 /* Zero extend quotient from AL. */
16331 tmp1 = gen_lowpart (QImode, tmp0);
16332 insn = emit_insn (gen_zero_extend (operands[0], tmp1));
16333 set_unique_reg_note (insn, REG_EQUAL, div);
16334
16335 emit_label (end_label);
16336 }
16337
16338 #define LEA_SEARCH_THRESHOLD 12
16339
16340 /* Search backward for non-agu definition of register number REGNO1
16341 or register number REGNO2 in INSN's basic block until
16342 1. Pass LEA_SEARCH_THRESHOLD instructions, or
16343 2. Reach BB boundary, or
16344 3. Reach agu definition.
16345 Returns the distance between the non-agu definition point and INSN.
16346 If no definition point, returns -1. */
16347
16348 static int
16349 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
16350 rtx insn)
16351 {
16352 basic_block bb = BLOCK_FOR_INSN (insn);
16353 int distance = 0;
16354 df_ref *def_rec;
16355 enum attr_type insn_type;
16356
16357 if (insn != BB_HEAD (bb))
16358 {
16359 rtx prev = PREV_INSN (insn);
16360 while (prev && distance < LEA_SEARCH_THRESHOLD)
16361 {
16362 if (NONDEBUG_INSN_P (prev))
16363 {
16364 distance++;
16365 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16366 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16367 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16368 && (regno1 == DF_REF_REGNO (*def_rec)
16369 || regno2 == DF_REF_REGNO (*def_rec)))
16370 {
16371 insn_type = get_attr_type (prev);
16372 if (insn_type != TYPE_LEA)
16373 goto done;
16374 }
16375 }
16376 if (prev == BB_HEAD (bb))
16377 break;
16378 prev = PREV_INSN (prev);
16379 }
16380 }
16381
16382 if (distance < LEA_SEARCH_THRESHOLD)
16383 {
16384 edge e;
16385 edge_iterator ei;
16386 bool simple_loop = false;
16387
16388 FOR_EACH_EDGE (e, ei, bb->preds)
16389 if (e->src == bb)
16390 {
16391 simple_loop = true;
16392 break;
16393 }
16394
16395 if (simple_loop)
16396 {
16397 rtx prev = BB_END (bb);
16398 while (prev
16399 && prev != insn
16400 && distance < LEA_SEARCH_THRESHOLD)
16401 {
16402 if (NONDEBUG_INSN_P (prev))
16403 {
16404 distance++;
16405 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16406 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16407 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16408 && (regno1 == DF_REF_REGNO (*def_rec)
16409 || regno2 == DF_REF_REGNO (*def_rec)))
16410 {
16411 insn_type = get_attr_type (prev);
16412 if (insn_type != TYPE_LEA)
16413 goto done;
16414 }
16415 }
16416 prev = PREV_INSN (prev);
16417 }
16418 }
16419 }
16420
16421 distance = -1;
16422
16423 done:
16424 /* get_attr_type may modify recog data. We want to make sure
16425 that recog data is valid for instruction INSN, on which
16426 distance_non_agu_define is called. INSN is unchanged here. */
16427 extract_insn_cached (insn);
16428 return distance;
16429 }
16430
16431 /* Return the distance between INSN and the next insn that uses
16432 register number REGNO0 in memory address. Return -1 if no such
16433 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
16434
16435 static int
16436 distance_agu_use (unsigned int regno0, rtx insn)
16437 {
16438 basic_block bb = BLOCK_FOR_INSN (insn);
16439 int distance = 0;
16440 df_ref *def_rec;
16441 df_ref *use_rec;
16442
16443 if (insn != BB_END (bb))
16444 {
16445 rtx next = NEXT_INSN (insn);
16446 while (next && distance < LEA_SEARCH_THRESHOLD)
16447 {
16448 if (NONDEBUG_INSN_P (next))
16449 {
16450 distance++;
16451
16452 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16453 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16454 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16455 && regno0 == DF_REF_REGNO (*use_rec))
16456 {
16457 /* Return DISTANCE if OP0 is used in memory
16458 address in NEXT. */
16459 return distance;
16460 }
16461
16462 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16463 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16464 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16465 && regno0 == DF_REF_REGNO (*def_rec))
16466 {
16467 /* Return -1 if OP0 is set in NEXT. */
16468 return -1;
16469 }
16470 }
16471 if (next == BB_END (bb))
16472 break;
16473 next = NEXT_INSN (next);
16474 }
16475 }
16476
16477 if (distance < LEA_SEARCH_THRESHOLD)
16478 {
16479 edge e;
16480 edge_iterator ei;
16481 bool simple_loop = false;
16482
16483 FOR_EACH_EDGE (e, ei, bb->succs)
16484 if (e->dest == bb)
16485 {
16486 simple_loop = true;
16487 break;
16488 }
16489
16490 if (simple_loop)
16491 {
16492 rtx next = BB_HEAD (bb);
16493 while (next
16494 && next != insn
16495 && distance < LEA_SEARCH_THRESHOLD)
16496 {
16497 if (NONDEBUG_INSN_P (next))
16498 {
16499 distance++;
16500
16501 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16502 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16503 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16504 && regno0 == DF_REF_REGNO (*use_rec))
16505 {
16506 /* Return DISTANCE if OP0 is used in memory
16507 address in NEXT. */
16508 return distance;
16509 }
16510
16511 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16512 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16513 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16514 && regno0 == DF_REF_REGNO (*def_rec))
16515 {
16516 /* Return -1 if OP0 is set in NEXT. */
16517 return -1;
16518 }
16519
16520 }
16521 next = NEXT_INSN (next);
16522 }
16523 }
16524 }
16525
16526 return -1;
16527 }
16528
16529 /* Define this macro to tune LEA priority vs ADD, it take effect when
16530 there is a dilemma of choicing LEA or ADD
16531 Negative value: ADD is more preferred than LEA
16532 Zero: Netrual
16533 Positive value: LEA is more preferred than ADD*/
16534 #define IX86_LEA_PRIORITY 2
16535
16536 /* Return true if it is ok to optimize an ADD operation to LEA
16537 operation to avoid flag register consumation. For most processors,
16538 ADD is faster than LEA. For the processors like ATOM, if the
16539 destination register of LEA holds an actual address which will be
16540 used soon, LEA is better and otherwise ADD is better. */
16541
16542 bool
16543 ix86_lea_for_add_ok (rtx insn, rtx operands[])
16544 {
16545 unsigned int regno0 = true_regnum (operands[0]);
16546 unsigned int regno1 = true_regnum (operands[1]);
16547 unsigned int regno2 = true_regnum (operands[2]);
16548
16549 /* If a = b + c, (a!=b && a!=c), must use lea form. */
16550 if (regno0 != regno1 && regno0 != regno2)
16551 return true;
16552
16553 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16554 return false;
16555 else
16556 {
16557 int dist_define, dist_use;
16558
16559 /* Return false if REGNO0 isn't used in memory address. */
16560 dist_use = distance_agu_use (regno0, insn);
16561 if (dist_use <= 0)
16562 return false;
16563
16564 dist_define = distance_non_agu_define (regno1, regno2, insn);
16565 if (dist_define <= 0)
16566 return true;
16567
16568 /* If this insn has both backward non-agu dependence and forward
16569 agu dependence, the one with short distance take effect. */
16570 if ((dist_define + IX86_LEA_PRIORITY) < dist_use)
16571 return false;
16572
16573 return true;
16574 }
16575 }
16576
16577 /* Return true if destination reg of SET_BODY is shift count of
16578 USE_BODY. */
16579
16580 static bool
16581 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
16582 {
16583 rtx set_dest;
16584 rtx shift_rtx;
16585 int i;
16586
16587 /* Retrieve destination of SET_BODY. */
16588 switch (GET_CODE (set_body))
16589 {
16590 case SET:
16591 set_dest = SET_DEST (set_body);
16592 if (!set_dest || !REG_P (set_dest))
16593 return false;
16594 break;
16595 case PARALLEL:
16596 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
16597 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
16598 use_body))
16599 return true;
16600 default:
16601 return false;
16602 break;
16603 }
16604
16605 /* Retrieve shift count of USE_BODY. */
16606 switch (GET_CODE (use_body))
16607 {
16608 case SET:
16609 shift_rtx = XEXP (use_body, 1);
16610 break;
16611 case PARALLEL:
16612 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
16613 if (ix86_dep_by_shift_count_body (set_body,
16614 XVECEXP (use_body, 0, i)))
16615 return true;
16616 default:
16617 return false;
16618 break;
16619 }
16620
16621 if (shift_rtx
16622 && (GET_CODE (shift_rtx) == ASHIFT
16623 || GET_CODE (shift_rtx) == LSHIFTRT
16624 || GET_CODE (shift_rtx) == ASHIFTRT
16625 || GET_CODE (shift_rtx) == ROTATE
16626 || GET_CODE (shift_rtx) == ROTATERT))
16627 {
16628 rtx shift_count = XEXP (shift_rtx, 1);
16629
16630 /* Return true if shift count is dest of SET_BODY. */
16631 if (REG_P (shift_count)
16632 && true_regnum (set_dest) == true_regnum (shift_count))
16633 return true;
16634 }
16635
16636 return false;
16637 }
16638
16639 /* Return true if destination reg of SET_INSN is shift count of
16640 USE_INSN. */
16641
16642 bool
16643 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
16644 {
16645 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
16646 PATTERN (use_insn));
16647 }
16648
16649 /* Return TRUE or FALSE depending on whether the unary operator meets the
16650 appropriate constraints. */
16651
16652 bool
16653 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
16654 enum machine_mode mode ATTRIBUTE_UNUSED,
16655 rtx operands[2] ATTRIBUTE_UNUSED)
16656 {
16657 /* If one of operands is memory, source and destination must match. */
16658 if ((MEM_P (operands[0])
16659 || MEM_P (operands[1]))
16660 && ! rtx_equal_p (operands[0], operands[1]))
16661 return false;
16662 return true;
16663 }
16664
16665 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
16666 are ok, keeping in mind the possible movddup alternative. */
16667
16668 bool
16669 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
16670 {
16671 if (MEM_P (operands[0]))
16672 return rtx_equal_p (operands[0], operands[1 + high]);
16673 if (MEM_P (operands[1]) && MEM_P (operands[2]))
16674 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
16675 return true;
16676 }
16677
16678 /* Post-reload splitter for converting an SF or DFmode value in an
16679 SSE register into an unsigned SImode. */
16680
16681 void
16682 ix86_split_convert_uns_si_sse (rtx operands[])
16683 {
16684 enum machine_mode vecmode;
16685 rtx value, large, zero_or_two31, input, two31, x;
16686
16687 large = operands[1];
16688 zero_or_two31 = operands[2];
16689 input = operands[3];
16690 two31 = operands[4];
16691 vecmode = GET_MODE (large);
16692 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
16693
16694 /* Load up the value into the low element. We must ensure that the other
16695 elements are valid floats -- zero is the easiest such value. */
16696 if (MEM_P (input))
16697 {
16698 if (vecmode == V4SFmode)
16699 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
16700 else
16701 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
16702 }
16703 else
16704 {
16705 input = gen_rtx_REG (vecmode, REGNO (input));
16706 emit_move_insn (value, CONST0_RTX (vecmode));
16707 if (vecmode == V4SFmode)
16708 emit_insn (gen_sse_movss (value, value, input));
16709 else
16710 emit_insn (gen_sse2_movsd (value, value, input));
16711 }
16712
16713 emit_move_insn (large, two31);
16714 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
16715
16716 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
16717 emit_insn (gen_rtx_SET (VOIDmode, large, x));
16718
16719 x = gen_rtx_AND (vecmode, zero_or_two31, large);
16720 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
16721
16722 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
16723 emit_insn (gen_rtx_SET (VOIDmode, value, x));
16724
16725 large = gen_rtx_REG (V4SImode, REGNO (large));
16726 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
16727
16728 x = gen_rtx_REG (V4SImode, REGNO (value));
16729 if (vecmode == V4SFmode)
16730 emit_insn (gen_sse2_cvttps2dq (x, value));
16731 else
16732 emit_insn (gen_sse2_cvttpd2dq (x, value));
16733 value = x;
16734
16735 emit_insn (gen_xorv4si3 (value, value, large));
16736 }
16737
16738 /* Convert an unsigned DImode value into a DFmode, using only SSE.
16739 Expects the 64-bit DImode to be supplied in a pair of integral
16740 registers. Requires SSE2; will use SSE3 if available. For x86_32,
16741 -mfpmath=sse, !optimize_size only. */
16742
16743 void
16744 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
16745 {
16746 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
16747 rtx int_xmm, fp_xmm;
16748 rtx biases, exponents;
16749 rtx x;
16750
16751 int_xmm = gen_reg_rtx (V4SImode);
16752 if (TARGET_INTER_UNIT_MOVES)
16753 emit_insn (gen_movdi_to_sse (int_xmm, input));
16754 else if (TARGET_SSE_SPLIT_REGS)
16755 {
16756 emit_clobber (int_xmm);
16757 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
16758 }
16759 else
16760 {
16761 x = gen_reg_rtx (V2DImode);
16762 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
16763 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
16764 }
16765
16766 x = gen_rtx_CONST_VECTOR (V4SImode,
16767 gen_rtvec (4, GEN_INT (0x43300000UL),
16768 GEN_INT (0x45300000UL),
16769 const0_rtx, const0_rtx));
16770 exponents = validize_mem (force_const_mem (V4SImode, x));
16771
16772 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
16773 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
16774
16775 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
16776 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
16777 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
16778 (0x1.0p84 + double(fp_value_hi_xmm)).
16779 Note these exponents differ by 32. */
16780
16781 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
16782
16783 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
16784 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
16785 real_ldexp (&bias_lo_rvt, &dconst1, 52);
16786 real_ldexp (&bias_hi_rvt, &dconst1, 84);
16787 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
16788 x = const_double_from_real_value (bias_hi_rvt, DFmode);
16789 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
16790 biases = validize_mem (force_const_mem (V2DFmode, biases));
16791 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
16792
16793 /* Add the upper and lower DFmode values together. */
16794 if (TARGET_SSE3)
16795 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
16796 else
16797 {
16798 x = copy_to_mode_reg (V2DFmode, fp_xmm);
16799 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
16800 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
16801 }
16802
16803 ix86_expand_vector_extract (false, target, fp_xmm, 0);
16804 }
16805
16806 /* Not used, but eases macroization of patterns. */
16807 void
16808 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
16809 rtx input ATTRIBUTE_UNUSED)
16810 {
16811 gcc_unreachable ();
16812 }
16813
16814 /* Convert an unsigned SImode value into a DFmode. Only currently used
16815 for SSE, but applicable anywhere. */
16816
16817 void
16818 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
16819 {
16820 REAL_VALUE_TYPE TWO31r;
16821 rtx x, fp;
16822
16823 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
16824 NULL, 1, OPTAB_DIRECT);
16825
16826 fp = gen_reg_rtx (DFmode);
16827 emit_insn (gen_floatsidf2 (fp, x));
16828
16829 real_ldexp (&TWO31r, &dconst1, 31);
16830 x = const_double_from_real_value (TWO31r, DFmode);
16831
16832 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
16833 if (x != target)
16834 emit_move_insn (target, x);
16835 }
16836
16837 /* Convert a signed DImode value into a DFmode. Only used for SSE in
16838 32-bit mode; otherwise we have a direct convert instruction. */
16839
16840 void
16841 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
16842 {
16843 REAL_VALUE_TYPE TWO32r;
16844 rtx fp_lo, fp_hi, x;
16845
16846 fp_lo = gen_reg_rtx (DFmode);
16847 fp_hi = gen_reg_rtx (DFmode);
16848
16849 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
16850
16851 real_ldexp (&TWO32r, &dconst1, 32);
16852 x = const_double_from_real_value (TWO32r, DFmode);
16853 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
16854
16855 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
16856
16857 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
16858 0, OPTAB_DIRECT);
16859 if (x != target)
16860 emit_move_insn (target, x);
16861 }
16862
16863 /* Convert an unsigned SImode value into a SFmode, using only SSE.
16864 For x86_32, -mfpmath=sse, !optimize_size only. */
16865 void
16866 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
16867 {
16868 REAL_VALUE_TYPE ONE16r;
16869 rtx fp_hi, fp_lo, int_hi, int_lo, x;
16870
16871 real_ldexp (&ONE16r, &dconst1, 16);
16872 x = const_double_from_real_value (ONE16r, SFmode);
16873 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
16874 NULL, 0, OPTAB_DIRECT);
16875 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
16876 NULL, 0, OPTAB_DIRECT);
16877 fp_hi = gen_reg_rtx (SFmode);
16878 fp_lo = gen_reg_rtx (SFmode);
16879 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
16880 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
16881 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
16882 0, OPTAB_DIRECT);
16883 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
16884 0, OPTAB_DIRECT);
16885 if (!rtx_equal_p (target, fp_hi))
16886 emit_move_insn (target, fp_hi);
16887 }
16888
16889 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
16890 then replicate the value for all elements of the vector
16891 register. */
16892
16893 rtx
16894 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
16895 {
16896 rtvec v;
16897 switch (mode)
16898 {
16899 case V4SImode:
16900 gcc_assert (vect);
16901 v = gen_rtvec (4, value, value, value, value);
16902 return gen_rtx_CONST_VECTOR (V4SImode, v);
16903
16904 case V2DImode:
16905 gcc_assert (vect);
16906 v = gen_rtvec (2, value, value);
16907 return gen_rtx_CONST_VECTOR (V2DImode, v);
16908
16909 case V8SFmode:
16910 if (vect)
16911 v = gen_rtvec (8, value, value, value, value,
16912 value, value, value, value);
16913 else
16914 v = gen_rtvec (8, value, CONST0_RTX (SFmode),
16915 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16916 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16917 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16918 return gen_rtx_CONST_VECTOR (V8SFmode, v);
16919
16920 case V4SFmode:
16921 if (vect)
16922 v = gen_rtvec (4, value, value, value, value);
16923 else
16924 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
16925 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16926 return gen_rtx_CONST_VECTOR (V4SFmode, v);
16927
16928 case V4DFmode:
16929 if (vect)
16930 v = gen_rtvec (4, value, value, value, value);
16931 else
16932 v = gen_rtvec (4, value, CONST0_RTX (DFmode),
16933 CONST0_RTX (DFmode), CONST0_RTX (DFmode));
16934 return gen_rtx_CONST_VECTOR (V4DFmode, v);
16935
16936 case V2DFmode:
16937 if (vect)
16938 v = gen_rtvec (2, value, value);
16939 else
16940 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
16941 return gen_rtx_CONST_VECTOR (V2DFmode, v);
16942
16943 default:
16944 gcc_unreachable ();
16945 }
16946 }
16947
16948 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
16949 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
16950 for an SSE register. If VECT is true, then replicate the mask for
16951 all elements of the vector register. If INVERT is true, then create
16952 a mask excluding the sign bit. */
16953
16954 rtx
16955 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
16956 {
16957 enum machine_mode vec_mode, imode;
16958 HOST_WIDE_INT hi, lo;
16959 int shift = 63;
16960 rtx v;
16961 rtx mask;
16962
16963 /* Find the sign bit, sign extended to 2*HWI. */
16964 switch (mode)
16965 {
16966 case V4SImode:
16967 case V8SFmode:
16968 case V4SFmode:
16969 vec_mode = mode;
16970 mode = GET_MODE_INNER (mode);
16971 imode = SImode;
16972 lo = 0x80000000, hi = lo < 0;
16973 break;
16974
16975 case V2DImode:
16976 case V4DFmode:
16977 case V2DFmode:
16978 vec_mode = mode;
16979 mode = GET_MODE_INNER (mode);
16980 imode = DImode;
16981 if (HOST_BITS_PER_WIDE_INT >= 64)
16982 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
16983 else
16984 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
16985 break;
16986
16987 case TImode:
16988 case TFmode:
16989 vec_mode = VOIDmode;
16990 if (HOST_BITS_PER_WIDE_INT >= 64)
16991 {
16992 imode = TImode;
16993 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
16994 }
16995 else
16996 {
16997 rtvec vec;
16998
16999 imode = DImode;
17000 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
17001
17002 if (invert)
17003 {
17004 lo = ~lo, hi = ~hi;
17005 v = constm1_rtx;
17006 }
17007 else
17008 v = const0_rtx;
17009
17010 mask = immed_double_const (lo, hi, imode);
17011
17012 vec = gen_rtvec (2, v, mask);
17013 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
17014 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
17015
17016 return v;
17017 }
17018 break;
17019
17020 default:
17021 gcc_unreachable ();
17022 }
17023
17024 if (invert)
17025 lo = ~lo, hi = ~hi;
17026
17027 /* Force this value into the low part of a fp vector constant. */
17028 mask = immed_double_const (lo, hi, imode);
17029 mask = gen_lowpart (mode, mask);
17030
17031 if (vec_mode == VOIDmode)
17032 return force_reg (mode, mask);
17033
17034 v = ix86_build_const_vector (vec_mode, vect, mask);
17035 return force_reg (vec_mode, v);
17036 }
17037
17038 /* Generate code for floating point ABS or NEG. */
17039
17040 void
17041 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
17042 rtx operands[])
17043 {
17044 rtx mask, set, dst, src;
17045 bool use_sse = false;
17046 bool vector_mode = VECTOR_MODE_P (mode);
17047 enum machine_mode vmode = mode;
17048
17049 if (vector_mode)
17050 use_sse = true;
17051 else if (mode == TFmode)
17052 use_sse = true;
17053 else if (TARGET_SSE_MATH)
17054 {
17055 use_sse = SSE_FLOAT_MODE_P (mode);
17056 if (mode == SFmode)
17057 vmode = V4SFmode;
17058 else if (mode == DFmode)
17059 vmode = V2DFmode;
17060 }
17061
17062 /* NEG and ABS performed with SSE use bitwise mask operations.
17063 Create the appropriate mask now. */
17064 if (use_sse)
17065 mask = ix86_build_signbit_mask (vmode, vector_mode, code == ABS);
17066 else
17067 mask = NULL_RTX;
17068
17069 dst = operands[0];
17070 src = operands[1];
17071
17072 set = gen_rtx_fmt_e (code, mode, src);
17073 set = gen_rtx_SET (VOIDmode, dst, set);
17074
17075 if (mask)
17076 {
17077 rtx use, clob;
17078 rtvec par;
17079
17080 use = gen_rtx_USE (VOIDmode, mask);
17081 if (vector_mode)
17082 par = gen_rtvec (2, set, use);
17083 else
17084 {
17085 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
17086 par = gen_rtvec (3, set, use, clob);
17087 }
17088 emit_insn (gen_rtx_PARALLEL (VOIDmode, par));
17089 }
17090 else
17091 emit_insn (set);
17092 }
17093
17094 /* Expand a copysign operation. Special case operand 0 being a constant. */
17095
17096 void
17097 ix86_expand_copysign (rtx operands[])
17098 {
17099 enum machine_mode mode, vmode;
17100 rtx dest, op0, op1, mask, nmask;
17101
17102 dest = operands[0];
17103 op0 = operands[1];
17104 op1 = operands[2];
17105
17106 mode = GET_MODE (dest);
17107
17108 if (mode == SFmode)
17109 vmode = V4SFmode;
17110 else if (mode == DFmode)
17111 vmode = V2DFmode;
17112 else
17113 vmode = mode;
17114
17115 if (GET_CODE (op0) == CONST_DOUBLE)
17116 {
17117 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
17118
17119 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
17120 op0 = simplify_unary_operation (ABS, mode, op0, mode);
17121
17122 if (mode == SFmode || mode == DFmode)
17123 {
17124 if (op0 == CONST0_RTX (mode))
17125 op0 = CONST0_RTX (vmode);
17126 else
17127 {
17128 rtx v = ix86_build_const_vector (vmode, false, op0);
17129
17130 op0 = force_reg (vmode, v);
17131 }
17132 }
17133 else if (op0 != CONST0_RTX (mode))
17134 op0 = force_reg (mode, op0);
17135
17136 mask = ix86_build_signbit_mask (vmode, 0, 0);
17137
17138 if (mode == SFmode)
17139 copysign_insn = gen_copysignsf3_const;
17140 else if (mode == DFmode)
17141 copysign_insn = gen_copysigndf3_const;
17142 else
17143 copysign_insn = gen_copysigntf3_const;
17144
17145 emit_insn (copysign_insn (dest, op0, op1, mask));
17146 }
17147 else
17148 {
17149 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
17150
17151 nmask = ix86_build_signbit_mask (vmode, 0, 1);
17152 mask = ix86_build_signbit_mask (vmode, 0, 0);
17153
17154 if (mode == SFmode)
17155 copysign_insn = gen_copysignsf3_var;
17156 else if (mode == DFmode)
17157 copysign_insn = gen_copysigndf3_var;
17158 else
17159 copysign_insn = gen_copysigntf3_var;
17160
17161 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
17162 }
17163 }
17164
17165 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
17166 be a constant, and so has already been expanded into a vector constant. */
17167
17168 void
17169 ix86_split_copysign_const (rtx operands[])
17170 {
17171 enum machine_mode mode, vmode;
17172 rtx dest, op0, mask, x;
17173
17174 dest = operands[0];
17175 op0 = operands[1];
17176 mask = operands[3];
17177
17178 mode = GET_MODE (dest);
17179 vmode = GET_MODE (mask);
17180
17181 dest = simplify_gen_subreg (vmode, dest, mode, 0);
17182 x = gen_rtx_AND (vmode, dest, mask);
17183 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17184
17185 if (op0 != CONST0_RTX (vmode))
17186 {
17187 x = gen_rtx_IOR (vmode, dest, op0);
17188 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17189 }
17190 }
17191
17192 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
17193 so we have to do two masks. */
17194
17195 void
17196 ix86_split_copysign_var (rtx operands[])
17197 {
17198 enum machine_mode mode, vmode;
17199 rtx dest, scratch, op0, op1, mask, nmask, x;
17200
17201 dest = operands[0];
17202 scratch = operands[1];
17203 op0 = operands[2];
17204 op1 = operands[3];
17205 nmask = operands[4];
17206 mask = operands[5];
17207
17208 mode = GET_MODE (dest);
17209 vmode = GET_MODE (mask);
17210
17211 if (rtx_equal_p (op0, op1))
17212 {
17213 /* Shouldn't happen often (it's useless, obviously), but when it does
17214 we'd generate incorrect code if we continue below. */
17215 emit_move_insn (dest, op0);
17216 return;
17217 }
17218
17219 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
17220 {
17221 gcc_assert (REGNO (op1) == REGNO (scratch));
17222
17223 x = gen_rtx_AND (vmode, scratch, mask);
17224 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17225
17226 dest = mask;
17227 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17228 x = gen_rtx_NOT (vmode, dest);
17229 x = gen_rtx_AND (vmode, x, op0);
17230 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17231 }
17232 else
17233 {
17234 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
17235 {
17236 x = gen_rtx_AND (vmode, scratch, mask);
17237 }
17238 else /* alternative 2,4 */
17239 {
17240 gcc_assert (REGNO (mask) == REGNO (scratch));
17241 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
17242 x = gen_rtx_AND (vmode, scratch, op1);
17243 }
17244 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17245
17246 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
17247 {
17248 dest = simplify_gen_subreg (vmode, op0, mode, 0);
17249 x = gen_rtx_AND (vmode, dest, nmask);
17250 }
17251 else /* alternative 3,4 */
17252 {
17253 gcc_assert (REGNO (nmask) == REGNO (dest));
17254 dest = nmask;
17255 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17256 x = gen_rtx_AND (vmode, dest, op0);
17257 }
17258 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17259 }
17260
17261 x = gen_rtx_IOR (vmode, dest, scratch);
17262 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17263 }
17264
17265 /* Return TRUE or FALSE depending on whether the first SET in INSN
17266 has source and destination with matching CC modes, and that the
17267 CC mode is at least as constrained as REQ_MODE. */
17268
17269 bool
17270 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
17271 {
17272 rtx set;
17273 enum machine_mode set_mode;
17274
17275 set = PATTERN (insn);
17276 if (GET_CODE (set) == PARALLEL)
17277 set = XVECEXP (set, 0, 0);
17278 gcc_assert (GET_CODE (set) == SET);
17279 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
17280
17281 set_mode = GET_MODE (SET_DEST (set));
17282 switch (set_mode)
17283 {
17284 case CCNOmode:
17285 if (req_mode != CCNOmode
17286 && (req_mode != CCmode
17287 || XEXP (SET_SRC (set), 1) != const0_rtx))
17288 return false;
17289 break;
17290 case CCmode:
17291 if (req_mode == CCGCmode)
17292 return false;
17293 /* FALLTHRU */
17294 case CCGCmode:
17295 if (req_mode == CCGOCmode || req_mode == CCNOmode)
17296 return false;
17297 /* FALLTHRU */
17298 case CCGOCmode:
17299 if (req_mode == CCZmode)
17300 return false;
17301 /* FALLTHRU */
17302 case CCAmode:
17303 case CCCmode:
17304 case CCOmode:
17305 case CCSmode:
17306 case CCZmode:
17307 break;
17308
17309 default:
17310 gcc_unreachable ();
17311 }
17312
17313 return GET_MODE (SET_SRC (set)) == set_mode;
17314 }
17315
17316 /* Generate insn patterns to do an integer compare of OPERANDS. */
17317
17318 static rtx
17319 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
17320 {
17321 enum machine_mode cmpmode;
17322 rtx tmp, flags;
17323
17324 cmpmode = SELECT_CC_MODE (code, op0, op1);
17325 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
17326
17327 /* This is very simple, but making the interface the same as in the
17328 FP case makes the rest of the code easier. */
17329 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
17330 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
17331
17332 /* Return the test that should be put into the flags user, i.e.
17333 the bcc, scc, or cmov instruction. */
17334 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
17335 }
17336
17337 /* Figure out whether to use ordered or unordered fp comparisons.
17338 Return the appropriate mode to use. */
17339
17340 enum machine_mode
17341 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
17342 {
17343 /* ??? In order to make all comparisons reversible, we do all comparisons
17344 non-trapping when compiling for IEEE. Once gcc is able to distinguish
17345 all forms trapping and nontrapping comparisons, we can make inequality
17346 comparisons trapping again, since it results in better code when using
17347 FCOM based compares. */
17348 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
17349 }
17350
17351 enum machine_mode
17352 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
17353 {
17354 enum machine_mode mode = GET_MODE (op0);
17355
17356 if (SCALAR_FLOAT_MODE_P (mode))
17357 {
17358 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
17359 return ix86_fp_compare_mode (code);
17360 }
17361
17362 switch (code)
17363 {
17364 /* Only zero flag is needed. */
17365 case EQ: /* ZF=0 */
17366 case NE: /* ZF!=0 */
17367 return CCZmode;
17368 /* Codes needing carry flag. */
17369 case GEU: /* CF=0 */
17370 case LTU: /* CF=1 */
17371 /* Detect overflow checks. They need just the carry flag. */
17372 if (GET_CODE (op0) == PLUS
17373 && rtx_equal_p (op1, XEXP (op0, 0)))
17374 return CCCmode;
17375 else
17376 return CCmode;
17377 case GTU: /* CF=0 & ZF=0 */
17378 case LEU: /* CF=1 | ZF=1 */
17379 /* Detect overflow checks. They need just the carry flag. */
17380 if (GET_CODE (op0) == MINUS
17381 && rtx_equal_p (op1, XEXP (op0, 0)))
17382 return CCCmode;
17383 else
17384 return CCmode;
17385 /* Codes possibly doable only with sign flag when
17386 comparing against zero. */
17387 case GE: /* SF=OF or SF=0 */
17388 case LT: /* SF<>OF or SF=1 */
17389 if (op1 == const0_rtx)
17390 return CCGOCmode;
17391 else
17392 /* For other cases Carry flag is not required. */
17393 return CCGCmode;
17394 /* Codes doable only with sign flag when comparing
17395 against zero, but we miss jump instruction for it
17396 so we need to use relational tests against overflow
17397 that thus needs to be zero. */
17398 case GT: /* ZF=0 & SF=OF */
17399 case LE: /* ZF=1 | SF<>OF */
17400 if (op1 == const0_rtx)
17401 return CCNOmode;
17402 else
17403 return CCGCmode;
17404 /* strcmp pattern do (use flags) and combine may ask us for proper
17405 mode. */
17406 case USE:
17407 return CCmode;
17408 default:
17409 gcc_unreachable ();
17410 }
17411 }
17412
17413 /* Return the fixed registers used for condition codes. */
17414
17415 static bool
17416 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
17417 {
17418 *p1 = FLAGS_REG;
17419 *p2 = FPSR_REG;
17420 return true;
17421 }
17422
17423 /* If two condition code modes are compatible, return a condition code
17424 mode which is compatible with both. Otherwise, return
17425 VOIDmode. */
17426
17427 static enum machine_mode
17428 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
17429 {
17430 if (m1 == m2)
17431 return m1;
17432
17433 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
17434 return VOIDmode;
17435
17436 if ((m1 == CCGCmode && m2 == CCGOCmode)
17437 || (m1 == CCGOCmode && m2 == CCGCmode))
17438 return CCGCmode;
17439
17440 switch (m1)
17441 {
17442 default:
17443 gcc_unreachable ();
17444
17445 case CCmode:
17446 case CCGCmode:
17447 case CCGOCmode:
17448 case CCNOmode:
17449 case CCAmode:
17450 case CCCmode:
17451 case CCOmode:
17452 case CCSmode:
17453 case CCZmode:
17454 switch (m2)
17455 {
17456 default:
17457 return VOIDmode;
17458
17459 case CCmode:
17460 case CCGCmode:
17461 case CCGOCmode:
17462 case CCNOmode:
17463 case CCAmode:
17464 case CCCmode:
17465 case CCOmode:
17466 case CCSmode:
17467 case CCZmode:
17468 return CCmode;
17469 }
17470
17471 case CCFPmode:
17472 case CCFPUmode:
17473 /* These are only compatible with themselves, which we already
17474 checked above. */
17475 return VOIDmode;
17476 }
17477 }
17478
17479
17480 /* Return a comparison we can do and that it is equivalent to
17481 swap_condition (code) apart possibly from orderedness.
17482 But, never change orderedness if TARGET_IEEE_FP, returning
17483 UNKNOWN in that case if necessary. */
17484
17485 static enum rtx_code
17486 ix86_fp_swap_condition (enum rtx_code code)
17487 {
17488 switch (code)
17489 {
17490 case GT: /* GTU - CF=0 & ZF=0 */
17491 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
17492 case GE: /* GEU - CF=0 */
17493 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
17494 case UNLT: /* LTU - CF=1 */
17495 return TARGET_IEEE_FP ? UNKNOWN : GT;
17496 case UNLE: /* LEU - CF=1 | ZF=1 */
17497 return TARGET_IEEE_FP ? UNKNOWN : GE;
17498 default:
17499 return swap_condition (code);
17500 }
17501 }
17502
17503 /* Return cost of comparison CODE using the best strategy for performance.
17504 All following functions do use number of instructions as a cost metrics.
17505 In future this should be tweaked to compute bytes for optimize_size and
17506 take into account performance of various instructions on various CPUs. */
17507
17508 static int
17509 ix86_fp_comparison_cost (enum rtx_code code)
17510 {
17511 int arith_cost;
17512
17513 /* The cost of code using bit-twiddling on %ah. */
17514 switch (code)
17515 {
17516 case UNLE:
17517 case UNLT:
17518 case LTGT:
17519 case GT:
17520 case GE:
17521 case UNORDERED:
17522 case ORDERED:
17523 case UNEQ:
17524 arith_cost = 4;
17525 break;
17526 case LT:
17527 case NE:
17528 case EQ:
17529 case UNGE:
17530 arith_cost = TARGET_IEEE_FP ? 5 : 4;
17531 break;
17532 case LE:
17533 case UNGT:
17534 arith_cost = TARGET_IEEE_FP ? 6 : 4;
17535 break;
17536 default:
17537 gcc_unreachable ();
17538 }
17539
17540 switch (ix86_fp_comparison_strategy (code))
17541 {
17542 case IX86_FPCMP_COMI:
17543 return arith_cost > 4 ? 3 : 2;
17544 case IX86_FPCMP_SAHF:
17545 return arith_cost > 4 ? 4 : 3;
17546 default:
17547 return arith_cost;
17548 }
17549 }
17550
17551 /* Return strategy to use for floating-point. We assume that fcomi is always
17552 preferrable where available, since that is also true when looking at size
17553 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
17554
17555 enum ix86_fpcmp_strategy
17556 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
17557 {
17558 /* Do fcomi/sahf based test when profitable. */
17559
17560 if (TARGET_CMOVE)
17561 return IX86_FPCMP_COMI;
17562
17563 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
17564 return IX86_FPCMP_SAHF;
17565
17566 return IX86_FPCMP_ARITH;
17567 }
17568
17569 /* Swap, force into registers, or otherwise massage the two operands
17570 to a fp comparison. The operands are updated in place; the new
17571 comparison code is returned. */
17572
17573 static enum rtx_code
17574 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
17575 {
17576 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
17577 rtx op0 = *pop0, op1 = *pop1;
17578 enum machine_mode op_mode = GET_MODE (op0);
17579 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
17580
17581 /* All of the unordered compare instructions only work on registers.
17582 The same is true of the fcomi compare instructions. The XFmode
17583 compare instructions require registers except when comparing
17584 against zero or when converting operand 1 from fixed point to
17585 floating point. */
17586
17587 if (!is_sse
17588 && (fpcmp_mode == CCFPUmode
17589 || (op_mode == XFmode
17590 && ! (standard_80387_constant_p (op0) == 1
17591 || standard_80387_constant_p (op1) == 1)
17592 && GET_CODE (op1) != FLOAT)
17593 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
17594 {
17595 op0 = force_reg (op_mode, op0);
17596 op1 = force_reg (op_mode, op1);
17597 }
17598 else
17599 {
17600 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
17601 things around if they appear profitable, otherwise force op0
17602 into a register. */
17603
17604 if (standard_80387_constant_p (op0) == 0
17605 || (MEM_P (op0)
17606 && ! (standard_80387_constant_p (op1) == 0
17607 || MEM_P (op1))))
17608 {
17609 enum rtx_code new_code = ix86_fp_swap_condition (code);
17610 if (new_code != UNKNOWN)
17611 {
17612 rtx tmp;
17613 tmp = op0, op0 = op1, op1 = tmp;
17614 code = new_code;
17615 }
17616 }
17617
17618 if (!REG_P (op0))
17619 op0 = force_reg (op_mode, op0);
17620
17621 if (CONSTANT_P (op1))
17622 {
17623 int tmp = standard_80387_constant_p (op1);
17624 if (tmp == 0)
17625 op1 = validize_mem (force_const_mem (op_mode, op1));
17626 else if (tmp == 1)
17627 {
17628 if (TARGET_CMOVE)
17629 op1 = force_reg (op_mode, op1);
17630 }
17631 else
17632 op1 = force_reg (op_mode, op1);
17633 }
17634 }
17635
17636 /* Try to rearrange the comparison to make it cheaper. */
17637 if (ix86_fp_comparison_cost (code)
17638 > ix86_fp_comparison_cost (swap_condition (code))
17639 && (REG_P (op1) || can_create_pseudo_p ()))
17640 {
17641 rtx tmp;
17642 tmp = op0, op0 = op1, op1 = tmp;
17643 code = swap_condition (code);
17644 if (!REG_P (op0))
17645 op0 = force_reg (op_mode, op0);
17646 }
17647
17648 *pop0 = op0;
17649 *pop1 = op1;
17650 return code;
17651 }
17652
17653 /* Convert comparison codes we use to represent FP comparison to integer
17654 code that will result in proper branch. Return UNKNOWN if no such code
17655 is available. */
17656
17657 enum rtx_code
17658 ix86_fp_compare_code_to_integer (enum rtx_code code)
17659 {
17660 switch (code)
17661 {
17662 case GT:
17663 return GTU;
17664 case GE:
17665 return GEU;
17666 case ORDERED:
17667 case UNORDERED:
17668 return code;
17669 break;
17670 case UNEQ:
17671 return EQ;
17672 break;
17673 case UNLT:
17674 return LTU;
17675 break;
17676 case UNLE:
17677 return LEU;
17678 break;
17679 case LTGT:
17680 return NE;
17681 break;
17682 default:
17683 return UNKNOWN;
17684 }
17685 }
17686
17687 /* Generate insn patterns to do a floating point compare of OPERANDS. */
17688
17689 static rtx
17690 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
17691 {
17692 enum machine_mode fpcmp_mode, intcmp_mode;
17693 rtx tmp, tmp2;
17694
17695 fpcmp_mode = ix86_fp_compare_mode (code);
17696 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
17697
17698 /* Do fcomi/sahf based test when profitable. */
17699 switch (ix86_fp_comparison_strategy (code))
17700 {
17701 case IX86_FPCMP_COMI:
17702 intcmp_mode = fpcmp_mode;
17703 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17704 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17705 tmp);
17706 emit_insn (tmp);
17707 break;
17708
17709 case IX86_FPCMP_SAHF:
17710 intcmp_mode = fpcmp_mode;
17711 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17712 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17713 tmp);
17714
17715 if (!scratch)
17716 scratch = gen_reg_rtx (HImode);
17717 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
17718 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
17719 break;
17720
17721 case IX86_FPCMP_ARITH:
17722 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
17723 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17724 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
17725 if (!scratch)
17726 scratch = gen_reg_rtx (HImode);
17727 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
17728
17729 /* In the unordered case, we have to check C2 for NaN's, which
17730 doesn't happen to work out to anything nice combination-wise.
17731 So do some bit twiddling on the value we've got in AH to come
17732 up with an appropriate set of condition codes. */
17733
17734 intcmp_mode = CCNOmode;
17735 switch (code)
17736 {
17737 case GT:
17738 case UNGT:
17739 if (code == GT || !TARGET_IEEE_FP)
17740 {
17741 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17742 code = EQ;
17743 }
17744 else
17745 {
17746 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17747 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17748 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
17749 intcmp_mode = CCmode;
17750 code = GEU;
17751 }
17752 break;
17753 case LT:
17754 case UNLT:
17755 if (code == LT && TARGET_IEEE_FP)
17756 {
17757 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17758 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
17759 intcmp_mode = CCmode;
17760 code = EQ;
17761 }
17762 else
17763 {
17764 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
17765 code = NE;
17766 }
17767 break;
17768 case GE:
17769 case UNGE:
17770 if (code == GE || !TARGET_IEEE_FP)
17771 {
17772 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
17773 code = EQ;
17774 }
17775 else
17776 {
17777 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17778 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
17779 code = NE;
17780 }
17781 break;
17782 case LE:
17783 case UNLE:
17784 if (code == LE && TARGET_IEEE_FP)
17785 {
17786 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17787 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17788 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17789 intcmp_mode = CCmode;
17790 code = LTU;
17791 }
17792 else
17793 {
17794 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17795 code = NE;
17796 }
17797 break;
17798 case EQ:
17799 case UNEQ:
17800 if (code == EQ && TARGET_IEEE_FP)
17801 {
17802 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17803 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17804 intcmp_mode = CCmode;
17805 code = EQ;
17806 }
17807 else
17808 {
17809 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17810 code = NE;
17811 }
17812 break;
17813 case NE:
17814 case LTGT:
17815 if (code == NE && TARGET_IEEE_FP)
17816 {
17817 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17818 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
17819 GEN_INT (0x40)));
17820 code = NE;
17821 }
17822 else
17823 {
17824 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17825 code = EQ;
17826 }
17827 break;
17828
17829 case UNORDERED:
17830 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17831 code = NE;
17832 break;
17833 case ORDERED:
17834 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17835 code = EQ;
17836 break;
17837
17838 default:
17839 gcc_unreachable ();
17840 }
17841 break;
17842
17843 default:
17844 gcc_unreachable();
17845 }
17846
17847 /* Return the test that should be put into the flags user, i.e.
17848 the bcc, scc, or cmov instruction. */
17849 return gen_rtx_fmt_ee (code, VOIDmode,
17850 gen_rtx_REG (intcmp_mode, FLAGS_REG),
17851 const0_rtx);
17852 }
17853
17854 static rtx
17855 ix86_expand_compare (enum rtx_code code, rtx op0, rtx op1)
17856 {
17857 rtx ret;
17858
17859 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
17860 ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
17861
17862 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
17863 {
17864 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
17865 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
17866 }
17867 else
17868 ret = ix86_expand_int_compare (code, op0, op1);
17869
17870 return ret;
17871 }
17872
17873 void
17874 ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
17875 {
17876 enum machine_mode mode = GET_MODE (op0);
17877 rtx tmp;
17878
17879 switch (mode)
17880 {
17881 case SFmode:
17882 case DFmode:
17883 case XFmode:
17884 case QImode:
17885 case HImode:
17886 case SImode:
17887 simple:
17888 tmp = ix86_expand_compare (code, op0, op1);
17889 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
17890 gen_rtx_LABEL_REF (VOIDmode, label),
17891 pc_rtx);
17892 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
17893 return;
17894
17895 case DImode:
17896 if (TARGET_64BIT)
17897 goto simple;
17898 case TImode:
17899 /* Expand DImode branch into multiple compare+branch. */
17900 {
17901 rtx lo[2], hi[2], label2;
17902 enum rtx_code code1, code2, code3;
17903 enum machine_mode submode;
17904
17905 if (CONSTANT_P (op0) && !CONSTANT_P (op1))
17906 {
17907 tmp = op0, op0 = op1, op1 = tmp;
17908 code = swap_condition (code);
17909 }
17910
17911 split_double_mode (mode, &op0, 1, lo+0, hi+0);
17912 split_double_mode (mode, &op1, 1, lo+1, hi+1);
17913
17914 submode = mode == DImode ? SImode : DImode;
17915
17916 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
17917 avoid two branches. This costs one extra insn, so disable when
17918 optimizing for size. */
17919
17920 if ((code == EQ || code == NE)
17921 && (!optimize_insn_for_size_p ()
17922 || hi[1] == const0_rtx || lo[1] == const0_rtx))
17923 {
17924 rtx xor0, xor1;
17925
17926 xor1 = hi[0];
17927 if (hi[1] != const0_rtx)
17928 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
17929 NULL_RTX, 0, OPTAB_WIDEN);
17930
17931 xor0 = lo[0];
17932 if (lo[1] != const0_rtx)
17933 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
17934 NULL_RTX, 0, OPTAB_WIDEN);
17935
17936 tmp = expand_binop (submode, ior_optab, xor1, xor0,
17937 NULL_RTX, 0, OPTAB_WIDEN);
17938
17939 ix86_expand_branch (code, tmp, const0_rtx, label);
17940 return;
17941 }
17942
17943 /* Otherwise, if we are doing less-than or greater-or-equal-than,
17944 op1 is a constant and the low word is zero, then we can just
17945 examine the high word. Similarly for low word -1 and
17946 less-or-equal-than or greater-than. */
17947
17948 if (CONST_INT_P (hi[1]))
17949 switch (code)
17950 {
17951 case LT: case LTU: case GE: case GEU:
17952 if (lo[1] == const0_rtx)
17953 {
17954 ix86_expand_branch (code, hi[0], hi[1], label);
17955 return;
17956 }
17957 break;
17958 case LE: case LEU: case GT: case GTU:
17959 if (lo[1] == constm1_rtx)
17960 {
17961 ix86_expand_branch (code, hi[0], hi[1], label);
17962 return;
17963 }
17964 break;
17965 default:
17966 break;
17967 }
17968
17969 /* Otherwise, we need two or three jumps. */
17970
17971 label2 = gen_label_rtx ();
17972
17973 code1 = code;
17974 code2 = swap_condition (code);
17975 code3 = unsigned_condition (code);
17976
17977 switch (code)
17978 {
17979 case LT: case GT: case LTU: case GTU:
17980 break;
17981
17982 case LE: code1 = LT; code2 = GT; break;
17983 case GE: code1 = GT; code2 = LT; break;
17984 case LEU: code1 = LTU; code2 = GTU; break;
17985 case GEU: code1 = GTU; code2 = LTU; break;
17986
17987 case EQ: code1 = UNKNOWN; code2 = NE; break;
17988 case NE: code2 = UNKNOWN; break;
17989
17990 default:
17991 gcc_unreachable ();
17992 }
17993
17994 /*
17995 * a < b =>
17996 * if (hi(a) < hi(b)) goto true;
17997 * if (hi(a) > hi(b)) goto false;
17998 * if (lo(a) < lo(b)) goto true;
17999 * false:
18000 */
18001
18002 if (code1 != UNKNOWN)
18003 ix86_expand_branch (code1, hi[0], hi[1], label);
18004 if (code2 != UNKNOWN)
18005 ix86_expand_branch (code2, hi[0], hi[1], label2);
18006
18007 ix86_expand_branch (code3, lo[0], lo[1], label);
18008
18009 if (code2 != UNKNOWN)
18010 emit_label (label2);
18011 return;
18012 }
18013
18014 default:
18015 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC);
18016 goto simple;
18017 }
18018 }
18019
18020 /* Split branch based on floating point condition. */
18021 void
18022 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
18023 rtx target1, rtx target2, rtx tmp, rtx pushed)
18024 {
18025 rtx condition;
18026 rtx i;
18027
18028 if (target2 != pc_rtx)
18029 {
18030 rtx tmp = target2;
18031 code = reverse_condition_maybe_unordered (code);
18032 target2 = target1;
18033 target1 = tmp;
18034 }
18035
18036 condition = ix86_expand_fp_compare (code, op1, op2,
18037 tmp);
18038
18039 /* Remove pushed operand from stack. */
18040 if (pushed)
18041 ix86_free_from_memory (GET_MODE (pushed));
18042
18043 i = emit_jump_insn (gen_rtx_SET
18044 (VOIDmode, pc_rtx,
18045 gen_rtx_IF_THEN_ELSE (VOIDmode,
18046 condition, target1, target2)));
18047 if (split_branch_probability >= 0)
18048 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
18049 }
18050
18051 void
18052 ix86_expand_setcc (rtx dest, enum rtx_code code, rtx op0, rtx op1)
18053 {
18054 rtx ret;
18055
18056 gcc_assert (GET_MODE (dest) == QImode);
18057
18058 ret = ix86_expand_compare (code, op0, op1);
18059 PUT_MODE (ret, QImode);
18060 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
18061 }
18062
18063 /* Expand comparison setting or clearing carry flag. Return true when
18064 successful and set pop for the operation. */
18065 static bool
18066 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
18067 {
18068 enum machine_mode mode =
18069 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
18070
18071 /* Do not handle double-mode compares that go through special path. */
18072 if (mode == (TARGET_64BIT ? TImode : DImode))
18073 return false;
18074
18075 if (SCALAR_FLOAT_MODE_P (mode))
18076 {
18077 rtx compare_op, compare_seq;
18078
18079 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
18080
18081 /* Shortcut: following common codes never translate
18082 into carry flag compares. */
18083 if (code == EQ || code == NE || code == UNEQ || code == LTGT
18084 || code == ORDERED || code == UNORDERED)
18085 return false;
18086
18087 /* These comparisons require zero flag; swap operands so they won't. */
18088 if ((code == GT || code == UNLE || code == LE || code == UNGT)
18089 && !TARGET_IEEE_FP)
18090 {
18091 rtx tmp = op0;
18092 op0 = op1;
18093 op1 = tmp;
18094 code = swap_condition (code);
18095 }
18096
18097 /* Try to expand the comparison and verify that we end up with
18098 carry flag based comparison. This fails to be true only when
18099 we decide to expand comparison using arithmetic that is not
18100 too common scenario. */
18101 start_sequence ();
18102 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
18103 compare_seq = get_insns ();
18104 end_sequence ();
18105
18106 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
18107 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
18108 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
18109 else
18110 code = GET_CODE (compare_op);
18111
18112 if (code != LTU && code != GEU)
18113 return false;
18114
18115 emit_insn (compare_seq);
18116 *pop = compare_op;
18117 return true;
18118 }
18119
18120 if (!INTEGRAL_MODE_P (mode))
18121 return false;
18122
18123 switch (code)
18124 {
18125 case LTU:
18126 case GEU:
18127 break;
18128
18129 /* Convert a==0 into (unsigned)a<1. */
18130 case EQ:
18131 case NE:
18132 if (op1 != const0_rtx)
18133 return false;
18134 op1 = const1_rtx;
18135 code = (code == EQ ? LTU : GEU);
18136 break;
18137
18138 /* Convert a>b into b<a or a>=b-1. */
18139 case GTU:
18140 case LEU:
18141 if (CONST_INT_P (op1))
18142 {
18143 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
18144 /* Bail out on overflow. We still can swap operands but that
18145 would force loading of the constant into register. */
18146 if (op1 == const0_rtx
18147 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
18148 return false;
18149 code = (code == GTU ? GEU : LTU);
18150 }
18151 else
18152 {
18153 rtx tmp = op1;
18154 op1 = op0;
18155 op0 = tmp;
18156 code = (code == GTU ? LTU : GEU);
18157 }
18158 break;
18159
18160 /* Convert a>=0 into (unsigned)a<0x80000000. */
18161 case LT:
18162 case GE:
18163 if (mode == DImode || op1 != const0_rtx)
18164 return false;
18165 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18166 code = (code == LT ? GEU : LTU);
18167 break;
18168 case LE:
18169 case GT:
18170 if (mode == DImode || op1 != constm1_rtx)
18171 return false;
18172 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18173 code = (code == LE ? GEU : LTU);
18174 break;
18175
18176 default:
18177 return false;
18178 }
18179 /* Swapping operands may cause constant to appear as first operand. */
18180 if (!nonimmediate_operand (op0, VOIDmode))
18181 {
18182 if (!can_create_pseudo_p ())
18183 return false;
18184 op0 = force_reg (mode, op0);
18185 }
18186 *pop = ix86_expand_compare (code, op0, op1);
18187 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
18188 return true;
18189 }
18190
18191 bool
18192 ix86_expand_int_movcc (rtx operands[])
18193 {
18194 enum rtx_code code = GET_CODE (operands[1]), compare_code;
18195 rtx compare_seq, compare_op;
18196 enum machine_mode mode = GET_MODE (operands[0]);
18197 bool sign_bit_compare_p = false;
18198 rtx op0 = XEXP (operands[1], 0);
18199 rtx op1 = XEXP (operands[1], 1);
18200
18201 start_sequence ();
18202 compare_op = ix86_expand_compare (code, op0, op1);
18203 compare_seq = get_insns ();
18204 end_sequence ();
18205
18206 compare_code = GET_CODE (compare_op);
18207
18208 if ((op1 == const0_rtx && (code == GE || code == LT))
18209 || (op1 == constm1_rtx && (code == GT || code == LE)))
18210 sign_bit_compare_p = true;
18211
18212 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
18213 HImode insns, we'd be swallowed in word prefix ops. */
18214
18215 if ((mode != HImode || TARGET_FAST_PREFIX)
18216 && (mode != (TARGET_64BIT ? TImode : DImode))
18217 && CONST_INT_P (operands[2])
18218 && CONST_INT_P (operands[3]))
18219 {
18220 rtx out = operands[0];
18221 HOST_WIDE_INT ct = INTVAL (operands[2]);
18222 HOST_WIDE_INT cf = INTVAL (operands[3]);
18223 HOST_WIDE_INT diff;
18224
18225 diff = ct - cf;
18226 /* Sign bit compares are better done using shifts than we do by using
18227 sbb. */
18228 if (sign_bit_compare_p
18229 || ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
18230 {
18231 /* Detect overlap between destination and compare sources. */
18232 rtx tmp = out;
18233
18234 if (!sign_bit_compare_p)
18235 {
18236 rtx flags;
18237 bool fpcmp = false;
18238
18239 compare_code = GET_CODE (compare_op);
18240
18241 flags = XEXP (compare_op, 0);
18242
18243 if (GET_MODE (flags) == CCFPmode
18244 || GET_MODE (flags) == CCFPUmode)
18245 {
18246 fpcmp = true;
18247 compare_code
18248 = ix86_fp_compare_code_to_integer (compare_code);
18249 }
18250
18251 /* To simplify rest of code, restrict to the GEU case. */
18252 if (compare_code == LTU)
18253 {
18254 HOST_WIDE_INT tmp = ct;
18255 ct = cf;
18256 cf = tmp;
18257 compare_code = reverse_condition (compare_code);
18258 code = reverse_condition (code);
18259 }
18260 else
18261 {
18262 if (fpcmp)
18263 PUT_CODE (compare_op,
18264 reverse_condition_maybe_unordered
18265 (GET_CODE (compare_op)));
18266 else
18267 PUT_CODE (compare_op,
18268 reverse_condition (GET_CODE (compare_op)));
18269 }
18270 diff = ct - cf;
18271
18272 if (reg_overlap_mentioned_p (out, op0)
18273 || reg_overlap_mentioned_p (out, op1))
18274 tmp = gen_reg_rtx (mode);
18275
18276 if (mode == DImode)
18277 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
18278 else
18279 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
18280 flags, compare_op));
18281 }
18282 else
18283 {
18284 if (code == GT || code == GE)
18285 code = reverse_condition (code);
18286 else
18287 {
18288 HOST_WIDE_INT tmp = ct;
18289 ct = cf;
18290 cf = tmp;
18291 diff = ct - cf;
18292 }
18293 tmp = emit_store_flag (tmp, code, op0, op1, VOIDmode, 0, -1);
18294 }
18295
18296 if (diff == 1)
18297 {
18298 /*
18299 * cmpl op0,op1
18300 * sbbl dest,dest
18301 * [addl dest, ct]
18302 *
18303 * Size 5 - 8.
18304 */
18305 if (ct)
18306 tmp = expand_simple_binop (mode, PLUS,
18307 tmp, GEN_INT (ct),
18308 copy_rtx (tmp), 1, OPTAB_DIRECT);
18309 }
18310 else if (cf == -1)
18311 {
18312 /*
18313 * cmpl op0,op1
18314 * sbbl dest,dest
18315 * orl $ct, dest
18316 *
18317 * Size 8.
18318 */
18319 tmp = expand_simple_binop (mode, IOR,
18320 tmp, GEN_INT (ct),
18321 copy_rtx (tmp), 1, OPTAB_DIRECT);
18322 }
18323 else if (diff == -1 && ct)
18324 {
18325 /*
18326 * cmpl op0,op1
18327 * sbbl dest,dest
18328 * notl dest
18329 * [addl dest, cf]
18330 *
18331 * Size 8 - 11.
18332 */
18333 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18334 if (cf)
18335 tmp = expand_simple_binop (mode, PLUS,
18336 copy_rtx (tmp), GEN_INT (cf),
18337 copy_rtx (tmp), 1, OPTAB_DIRECT);
18338 }
18339 else
18340 {
18341 /*
18342 * cmpl op0,op1
18343 * sbbl dest,dest
18344 * [notl dest]
18345 * andl cf - ct, dest
18346 * [addl dest, ct]
18347 *
18348 * Size 8 - 11.
18349 */
18350
18351 if (cf == 0)
18352 {
18353 cf = ct;
18354 ct = 0;
18355 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18356 }
18357
18358 tmp = expand_simple_binop (mode, AND,
18359 copy_rtx (tmp),
18360 gen_int_mode (cf - ct, mode),
18361 copy_rtx (tmp), 1, OPTAB_DIRECT);
18362 if (ct)
18363 tmp = expand_simple_binop (mode, PLUS,
18364 copy_rtx (tmp), GEN_INT (ct),
18365 copy_rtx (tmp), 1, OPTAB_DIRECT);
18366 }
18367
18368 if (!rtx_equal_p (tmp, out))
18369 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
18370
18371 return true;
18372 }
18373
18374 if (diff < 0)
18375 {
18376 enum machine_mode cmp_mode = GET_MODE (op0);
18377
18378 HOST_WIDE_INT tmp;
18379 tmp = ct, ct = cf, cf = tmp;
18380 diff = -diff;
18381
18382 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18383 {
18384 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18385
18386 /* We may be reversing unordered compare to normal compare, that
18387 is not valid in general (we may convert non-trapping condition
18388 to trapping one), however on i386 we currently emit all
18389 comparisons unordered. */
18390 compare_code = reverse_condition_maybe_unordered (compare_code);
18391 code = reverse_condition_maybe_unordered (code);
18392 }
18393 else
18394 {
18395 compare_code = reverse_condition (compare_code);
18396 code = reverse_condition (code);
18397 }
18398 }
18399
18400 compare_code = UNKNOWN;
18401 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
18402 && CONST_INT_P (op1))
18403 {
18404 if (op1 == const0_rtx
18405 && (code == LT || code == GE))
18406 compare_code = code;
18407 else if (op1 == constm1_rtx)
18408 {
18409 if (code == LE)
18410 compare_code = LT;
18411 else if (code == GT)
18412 compare_code = GE;
18413 }
18414 }
18415
18416 /* Optimize dest = (op0 < 0) ? -1 : cf. */
18417 if (compare_code != UNKNOWN
18418 && GET_MODE (op0) == GET_MODE (out)
18419 && (cf == -1 || ct == -1))
18420 {
18421 /* If lea code below could be used, only optimize
18422 if it results in a 2 insn sequence. */
18423
18424 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
18425 || diff == 3 || diff == 5 || diff == 9)
18426 || (compare_code == LT && ct == -1)
18427 || (compare_code == GE && cf == -1))
18428 {
18429 /*
18430 * notl op1 (if necessary)
18431 * sarl $31, op1
18432 * orl cf, op1
18433 */
18434 if (ct != -1)
18435 {
18436 cf = ct;
18437 ct = -1;
18438 code = reverse_condition (code);
18439 }
18440
18441 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18442
18443 out = expand_simple_binop (mode, IOR,
18444 out, GEN_INT (cf),
18445 out, 1, OPTAB_DIRECT);
18446 if (out != operands[0])
18447 emit_move_insn (operands[0], out);
18448
18449 return true;
18450 }
18451 }
18452
18453
18454 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
18455 || diff == 3 || diff == 5 || diff == 9)
18456 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
18457 && (mode != DImode
18458 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
18459 {
18460 /*
18461 * xorl dest,dest
18462 * cmpl op1,op2
18463 * setcc dest
18464 * lea cf(dest*(ct-cf)),dest
18465 *
18466 * Size 14.
18467 *
18468 * This also catches the degenerate setcc-only case.
18469 */
18470
18471 rtx tmp;
18472 int nops;
18473
18474 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18475
18476 nops = 0;
18477 /* On x86_64 the lea instruction operates on Pmode, so we need
18478 to get arithmetics done in proper mode to match. */
18479 if (diff == 1)
18480 tmp = copy_rtx (out);
18481 else
18482 {
18483 rtx out1;
18484 out1 = copy_rtx (out);
18485 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
18486 nops++;
18487 if (diff & 1)
18488 {
18489 tmp = gen_rtx_PLUS (mode, tmp, out1);
18490 nops++;
18491 }
18492 }
18493 if (cf != 0)
18494 {
18495 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
18496 nops++;
18497 }
18498 if (!rtx_equal_p (tmp, out))
18499 {
18500 if (nops == 1)
18501 out = force_operand (tmp, copy_rtx (out));
18502 else
18503 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
18504 }
18505 if (!rtx_equal_p (out, operands[0]))
18506 emit_move_insn (operands[0], copy_rtx (out));
18507
18508 return true;
18509 }
18510
18511 /*
18512 * General case: Jumpful:
18513 * xorl dest,dest cmpl op1, op2
18514 * cmpl op1, op2 movl ct, dest
18515 * setcc dest jcc 1f
18516 * decl dest movl cf, dest
18517 * andl (cf-ct),dest 1:
18518 * addl ct,dest
18519 *
18520 * Size 20. Size 14.
18521 *
18522 * This is reasonably steep, but branch mispredict costs are
18523 * high on modern cpus, so consider failing only if optimizing
18524 * for space.
18525 */
18526
18527 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18528 && BRANCH_COST (optimize_insn_for_speed_p (),
18529 false) >= 2)
18530 {
18531 if (cf == 0)
18532 {
18533 enum machine_mode cmp_mode = GET_MODE (op0);
18534
18535 cf = ct;
18536 ct = 0;
18537
18538 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18539 {
18540 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18541
18542 /* We may be reversing unordered compare to normal compare,
18543 that is not valid in general (we may convert non-trapping
18544 condition to trapping one), however on i386 we currently
18545 emit all comparisons unordered. */
18546 code = reverse_condition_maybe_unordered (code);
18547 }
18548 else
18549 {
18550 code = reverse_condition (code);
18551 if (compare_code != UNKNOWN)
18552 compare_code = reverse_condition (compare_code);
18553 }
18554 }
18555
18556 if (compare_code != UNKNOWN)
18557 {
18558 /* notl op1 (if needed)
18559 sarl $31, op1
18560 andl (cf-ct), op1
18561 addl ct, op1
18562
18563 For x < 0 (resp. x <= -1) there will be no notl,
18564 so if possible swap the constants to get rid of the
18565 complement.
18566 True/false will be -1/0 while code below (store flag
18567 followed by decrement) is 0/-1, so the constants need
18568 to be exchanged once more. */
18569
18570 if (compare_code == GE || !cf)
18571 {
18572 code = reverse_condition (code);
18573 compare_code = LT;
18574 }
18575 else
18576 {
18577 HOST_WIDE_INT tmp = cf;
18578 cf = ct;
18579 ct = tmp;
18580 }
18581
18582 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18583 }
18584 else
18585 {
18586 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18587
18588 out = expand_simple_binop (mode, PLUS, copy_rtx (out),
18589 constm1_rtx,
18590 copy_rtx (out), 1, OPTAB_DIRECT);
18591 }
18592
18593 out = expand_simple_binop (mode, AND, copy_rtx (out),
18594 gen_int_mode (cf - ct, mode),
18595 copy_rtx (out), 1, OPTAB_DIRECT);
18596 if (ct)
18597 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
18598 copy_rtx (out), 1, OPTAB_DIRECT);
18599 if (!rtx_equal_p (out, operands[0]))
18600 emit_move_insn (operands[0], copy_rtx (out));
18601
18602 return true;
18603 }
18604 }
18605
18606 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18607 {
18608 /* Try a few things more with specific constants and a variable. */
18609
18610 optab op;
18611 rtx var, orig_out, out, tmp;
18612
18613 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
18614 return false;
18615
18616 /* If one of the two operands is an interesting constant, load a
18617 constant with the above and mask it in with a logical operation. */
18618
18619 if (CONST_INT_P (operands[2]))
18620 {
18621 var = operands[3];
18622 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
18623 operands[3] = constm1_rtx, op = and_optab;
18624 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
18625 operands[3] = const0_rtx, op = ior_optab;
18626 else
18627 return false;
18628 }
18629 else if (CONST_INT_P (operands[3]))
18630 {
18631 var = operands[2];
18632 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
18633 operands[2] = constm1_rtx, op = and_optab;
18634 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
18635 operands[2] = const0_rtx, op = ior_optab;
18636 else
18637 return false;
18638 }
18639 else
18640 return false;
18641
18642 orig_out = operands[0];
18643 tmp = gen_reg_rtx (mode);
18644 operands[0] = tmp;
18645
18646 /* Recurse to get the constant loaded. */
18647 if (ix86_expand_int_movcc (operands) == 0)
18648 return false;
18649
18650 /* Mask in the interesting variable. */
18651 out = expand_binop (mode, op, var, tmp, orig_out, 0,
18652 OPTAB_WIDEN);
18653 if (!rtx_equal_p (out, orig_out))
18654 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
18655
18656 return true;
18657 }
18658
18659 /*
18660 * For comparison with above,
18661 *
18662 * movl cf,dest
18663 * movl ct,tmp
18664 * cmpl op1,op2
18665 * cmovcc tmp,dest
18666 *
18667 * Size 15.
18668 */
18669
18670 if (! nonimmediate_operand (operands[2], mode))
18671 operands[2] = force_reg (mode, operands[2]);
18672 if (! nonimmediate_operand (operands[3], mode))
18673 operands[3] = force_reg (mode, operands[3]);
18674
18675 if (! register_operand (operands[2], VOIDmode)
18676 && (mode == QImode
18677 || ! register_operand (operands[3], VOIDmode)))
18678 operands[2] = force_reg (mode, operands[2]);
18679
18680 if (mode == QImode
18681 && ! register_operand (operands[3], VOIDmode))
18682 operands[3] = force_reg (mode, operands[3]);
18683
18684 emit_insn (compare_seq);
18685 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18686 gen_rtx_IF_THEN_ELSE (mode,
18687 compare_op, operands[2],
18688 operands[3])));
18689 return true;
18690 }
18691
18692 /* Swap, force into registers, or otherwise massage the two operands
18693 to an sse comparison with a mask result. Thus we differ a bit from
18694 ix86_prepare_fp_compare_args which expects to produce a flags result.
18695
18696 The DEST operand exists to help determine whether to commute commutative
18697 operators. The POP0/POP1 operands are updated in place. The new
18698 comparison code is returned, or UNKNOWN if not implementable. */
18699
18700 static enum rtx_code
18701 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
18702 rtx *pop0, rtx *pop1)
18703 {
18704 rtx tmp;
18705
18706 switch (code)
18707 {
18708 case LTGT:
18709 case UNEQ:
18710 /* We have no LTGT as an operator. We could implement it with
18711 NE & ORDERED, but this requires an extra temporary. It's
18712 not clear that it's worth it. */
18713 return UNKNOWN;
18714
18715 case LT:
18716 case LE:
18717 case UNGT:
18718 case UNGE:
18719 /* These are supported directly. */
18720 break;
18721
18722 case EQ:
18723 case NE:
18724 case UNORDERED:
18725 case ORDERED:
18726 /* For commutative operators, try to canonicalize the destination
18727 operand to be first in the comparison - this helps reload to
18728 avoid extra moves. */
18729 if (!dest || !rtx_equal_p (dest, *pop1))
18730 break;
18731 /* FALLTHRU */
18732
18733 case GE:
18734 case GT:
18735 case UNLE:
18736 case UNLT:
18737 /* These are not supported directly. Swap the comparison operands
18738 to transform into something that is supported. */
18739 tmp = *pop0;
18740 *pop0 = *pop1;
18741 *pop1 = tmp;
18742 code = swap_condition (code);
18743 break;
18744
18745 default:
18746 gcc_unreachable ();
18747 }
18748
18749 return code;
18750 }
18751
18752 /* Detect conditional moves that exactly match min/max operational
18753 semantics. Note that this is IEEE safe, as long as we don't
18754 interchange the operands.
18755
18756 Returns FALSE if this conditional move doesn't match a MIN/MAX,
18757 and TRUE if the operation is successful and instructions are emitted. */
18758
18759 static bool
18760 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
18761 rtx cmp_op1, rtx if_true, rtx if_false)
18762 {
18763 enum machine_mode mode;
18764 bool is_min;
18765 rtx tmp;
18766
18767 if (code == LT)
18768 ;
18769 else if (code == UNGE)
18770 {
18771 tmp = if_true;
18772 if_true = if_false;
18773 if_false = tmp;
18774 }
18775 else
18776 return false;
18777
18778 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
18779 is_min = true;
18780 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
18781 is_min = false;
18782 else
18783 return false;
18784
18785 mode = GET_MODE (dest);
18786
18787 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
18788 but MODE may be a vector mode and thus not appropriate. */
18789 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
18790 {
18791 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
18792 rtvec v;
18793
18794 if_true = force_reg (mode, if_true);
18795 v = gen_rtvec (2, if_true, if_false);
18796 tmp = gen_rtx_UNSPEC (mode, v, u);
18797 }
18798 else
18799 {
18800 code = is_min ? SMIN : SMAX;
18801 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
18802 }
18803
18804 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
18805 return true;
18806 }
18807
18808 /* Expand an sse vector comparison. Return the register with the result. */
18809
18810 static rtx
18811 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
18812 rtx op_true, rtx op_false)
18813 {
18814 enum machine_mode mode = GET_MODE (dest);
18815 rtx x;
18816
18817 cmp_op0 = force_reg (mode, cmp_op0);
18818 if (!nonimmediate_operand (cmp_op1, mode))
18819 cmp_op1 = force_reg (mode, cmp_op1);
18820
18821 if (optimize
18822 || reg_overlap_mentioned_p (dest, op_true)
18823 || reg_overlap_mentioned_p (dest, op_false))
18824 dest = gen_reg_rtx (mode);
18825
18826 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
18827 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18828
18829 return dest;
18830 }
18831
18832 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
18833 operations. This is used for both scalar and vector conditional moves. */
18834
18835 static void
18836 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
18837 {
18838 enum machine_mode mode = GET_MODE (dest);
18839 rtx t2, t3, x;
18840
18841 if (op_false == CONST0_RTX (mode))
18842 {
18843 op_true = force_reg (mode, op_true);
18844 x = gen_rtx_AND (mode, cmp, op_true);
18845 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18846 }
18847 else if (op_true == CONST0_RTX (mode))
18848 {
18849 op_false = force_reg (mode, op_false);
18850 x = gen_rtx_NOT (mode, cmp);
18851 x = gen_rtx_AND (mode, x, op_false);
18852 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18853 }
18854 else if (TARGET_XOP)
18855 {
18856 rtx pcmov = gen_rtx_SET (mode, dest,
18857 gen_rtx_IF_THEN_ELSE (mode, cmp,
18858 op_true,
18859 op_false));
18860 emit_insn (pcmov);
18861 }
18862 else
18863 {
18864 op_true = force_reg (mode, op_true);
18865 op_false = force_reg (mode, op_false);
18866
18867 t2 = gen_reg_rtx (mode);
18868 if (optimize)
18869 t3 = gen_reg_rtx (mode);
18870 else
18871 t3 = dest;
18872
18873 x = gen_rtx_AND (mode, op_true, cmp);
18874 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
18875
18876 x = gen_rtx_NOT (mode, cmp);
18877 x = gen_rtx_AND (mode, x, op_false);
18878 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
18879
18880 x = gen_rtx_IOR (mode, t3, t2);
18881 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18882 }
18883 }
18884
18885 /* Expand a floating-point conditional move. Return true if successful. */
18886
18887 bool
18888 ix86_expand_fp_movcc (rtx operands[])
18889 {
18890 enum machine_mode mode = GET_MODE (operands[0]);
18891 enum rtx_code code = GET_CODE (operands[1]);
18892 rtx tmp, compare_op;
18893 rtx op0 = XEXP (operands[1], 0);
18894 rtx op1 = XEXP (operands[1], 1);
18895
18896 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
18897 {
18898 enum machine_mode cmode;
18899
18900 /* Since we've no cmove for sse registers, don't force bad register
18901 allocation just to gain access to it. Deny movcc when the
18902 comparison mode doesn't match the move mode. */
18903 cmode = GET_MODE (op0);
18904 if (cmode == VOIDmode)
18905 cmode = GET_MODE (op1);
18906 if (cmode != mode)
18907 return false;
18908
18909 code = ix86_prepare_sse_fp_compare_args (operands[0], code, &op0, &op1);
18910 if (code == UNKNOWN)
18911 return false;
18912
18913 if (ix86_expand_sse_fp_minmax (operands[0], code, op0, op1,
18914 operands[2], operands[3]))
18915 return true;
18916
18917 tmp = ix86_expand_sse_cmp (operands[0], code, op0, op1,
18918 operands[2], operands[3]);
18919 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
18920 return true;
18921 }
18922
18923 /* The floating point conditional move instructions don't directly
18924 support conditions resulting from a signed integer comparison. */
18925
18926 compare_op = ix86_expand_compare (code, op0, op1);
18927 if (!fcmov_comparison_operator (compare_op, VOIDmode))
18928 {
18929 tmp = gen_reg_rtx (QImode);
18930 ix86_expand_setcc (tmp, code, op0, op1);
18931
18932 compare_op = ix86_expand_compare (NE, tmp, const0_rtx);
18933 }
18934
18935 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18936 gen_rtx_IF_THEN_ELSE (mode, compare_op,
18937 operands[2], operands[3])));
18938
18939 return true;
18940 }
18941
18942 /* Expand a floating-point vector conditional move; a vcond operation
18943 rather than a movcc operation. */
18944
18945 bool
18946 ix86_expand_fp_vcond (rtx operands[])
18947 {
18948 enum rtx_code code = GET_CODE (operands[3]);
18949 rtx cmp;
18950
18951 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
18952 &operands[4], &operands[5]);
18953 if (code == UNKNOWN)
18954 return false;
18955
18956 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
18957 operands[5], operands[1], operands[2]))
18958 return true;
18959
18960 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
18961 operands[1], operands[2]);
18962 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
18963 return true;
18964 }
18965
18966 /* Expand a signed/unsigned integral vector conditional move. */
18967
18968 bool
18969 ix86_expand_int_vcond (rtx operands[])
18970 {
18971 enum machine_mode mode = GET_MODE (operands[0]);
18972 enum rtx_code code = GET_CODE (operands[3]);
18973 bool negate = false;
18974 rtx x, cop0, cop1;
18975
18976 cop0 = operands[4];
18977 cop1 = operands[5];
18978
18979 /* XOP supports all of the comparisons on all vector int types. */
18980 if (!TARGET_XOP)
18981 {
18982 /* Canonicalize the comparison to EQ, GT, GTU. */
18983 switch (code)
18984 {
18985 case EQ:
18986 case GT:
18987 case GTU:
18988 break;
18989
18990 case NE:
18991 case LE:
18992 case LEU:
18993 code = reverse_condition (code);
18994 negate = true;
18995 break;
18996
18997 case GE:
18998 case GEU:
18999 code = reverse_condition (code);
19000 negate = true;
19001 /* FALLTHRU */
19002
19003 case LT:
19004 case LTU:
19005 code = swap_condition (code);
19006 x = cop0, cop0 = cop1, cop1 = x;
19007 break;
19008
19009 default:
19010 gcc_unreachable ();
19011 }
19012
19013 /* Only SSE4.1/SSE4.2 supports V2DImode. */
19014 if (mode == V2DImode)
19015 {
19016 switch (code)
19017 {
19018 case EQ:
19019 /* SSE4.1 supports EQ. */
19020 if (!TARGET_SSE4_1)
19021 return false;
19022 break;
19023
19024 case GT:
19025 case GTU:
19026 /* SSE4.2 supports GT/GTU. */
19027 if (!TARGET_SSE4_2)
19028 return false;
19029 break;
19030
19031 default:
19032 gcc_unreachable ();
19033 }
19034 }
19035
19036 /* Unsigned parallel compare is not supported by the hardware.
19037 Play some tricks to turn this into a signed comparison
19038 against 0. */
19039 if (code == GTU)
19040 {
19041 cop0 = force_reg (mode, cop0);
19042
19043 switch (mode)
19044 {
19045 case V4SImode:
19046 case V2DImode:
19047 {
19048 rtx t1, t2, mask;
19049 rtx (*gen_sub3) (rtx, rtx, rtx);
19050
19051 /* Subtract (-(INT MAX) - 1) from both operands to make
19052 them signed. */
19053 mask = ix86_build_signbit_mask (mode, true, false);
19054 gen_sub3 = (mode == V4SImode
19055 ? gen_subv4si3 : gen_subv2di3);
19056 t1 = gen_reg_rtx (mode);
19057 emit_insn (gen_sub3 (t1, cop0, mask));
19058
19059 t2 = gen_reg_rtx (mode);
19060 emit_insn (gen_sub3 (t2, cop1, mask));
19061
19062 cop0 = t1;
19063 cop1 = t2;
19064 code = GT;
19065 }
19066 break;
19067
19068 case V16QImode:
19069 case V8HImode:
19070 /* Perform a parallel unsigned saturating subtraction. */
19071 x = gen_reg_rtx (mode);
19072 emit_insn (gen_rtx_SET (VOIDmode, x,
19073 gen_rtx_US_MINUS (mode, cop0, cop1)));
19074
19075 cop0 = x;
19076 cop1 = CONST0_RTX (mode);
19077 code = EQ;
19078 negate = !negate;
19079 break;
19080
19081 default:
19082 gcc_unreachable ();
19083 }
19084 }
19085 }
19086
19087 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
19088 operands[1+negate], operands[2-negate]);
19089
19090 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
19091 operands[2-negate]);
19092 return true;
19093 }
19094
19095 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
19096 true if we should do zero extension, else sign extension. HIGH_P is
19097 true if we want the N/2 high elements, else the low elements. */
19098
19099 void
19100 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
19101 {
19102 enum machine_mode imode = GET_MODE (operands[1]);
19103 rtx tmp, dest;
19104
19105 if (TARGET_SSE4_1)
19106 {
19107 rtx (*unpack)(rtx, rtx);
19108
19109 switch (imode)
19110 {
19111 case V16QImode:
19112 if (unsigned_p)
19113 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
19114 else
19115 unpack = gen_sse4_1_sign_extendv8qiv8hi2;
19116 break;
19117 case V8HImode:
19118 if (unsigned_p)
19119 unpack = gen_sse4_1_zero_extendv4hiv4si2;
19120 else
19121 unpack = gen_sse4_1_sign_extendv4hiv4si2;
19122 break;
19123 case V4SImode:
19124 if (unsigned_p)
19125 unpack = gen_sse4_1_zero_extendv2siv2di2;
19126 else
19127 unpack = gen_sse4_1_sign_extendv2siv2di2;
19128 break;
19129 default:
19130 gcc_unreachable ();
19131 }
19132
19133 if (high_p)
19134 {
19135 /* Shift higher 8 bytes to lower 8 bytes. */
19136 tmp = gen_reg_rtx (imode);
19137 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, tmp),
19138 gen_lowpart (V1TImode, operands[1]),
19139 GEN_INT (64)));
19140 }
19141 else
19142 tmp = operands[1];
19143
19144 emit_insn (unpack (operands[0], tmp));
19145 }
19146 else
19147 {
19148 rtx (*unpack)(rtx, rtx, rtx);
19149
19150 switch (imode)
19151 {
19152 case V16QImode:
19153 if (high_p)
19154 unpack = gen_vec_interleave_highv16qi;
19155 else
19156 unpack = gen_vec_interleave_lowv16qi;
19157 break;
19158 case V8HImode:
19159 if (high_p)
19160 unpack = gen_vec_interleave_highv8hi;
19161 else
19162 unpack = gen_vec_interleave_lowv8hi;
19163 break;
19164 case V4SImode:
19165 if (high_p)
19166 unpack = gen_vec_interleave_highv4si;
19167 else
19168 unpack = gen_vec_interleave_lowv4si;
19169 break;
19170 default:
19171 gcc_unreachable ();
19172 }
19173
19174 dest = gen_lowpart (imode, operands[0]);
19175
19176 if (unsigned_p)
19177 tmp = force_reg (imode, CONST0_RTX (imode));
19178 else
19179 tmp = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
19180 operands[1], pc_rtx, pc_rtx);
19181
19182 emit_insn (unpack (dest, operands[1], tmp));
19183 }
19184 }
19185
19186 /* Expand conditional increment or decrement using adb/sbb instructions.
19187 The default case using setcc followed by the conditional move can be
19188 done by generic code. */
19189 bool
19190 ix86_expand_int_addcc (rtx operands[])
19191 {
19192 enum rtx_code code = GET_CODE (operands[1]);
19193 rtx flags;
19194 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
19195 rtx compare_op;
19196 rtx val = const0_rtx;
19197 bool fpcmp = false;
19198 enum machine_mode mode;
19199 rtx op0 = XEXP (operands[1], 0);
19200 rtx op1 = XEXP (operands[1], 1);
19201
19202 if (operands[3] != const1_rtx
19203 && operands[3] != constm1_rtx)
19204 return false;
19205 if (!ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
19206 return false;
19207 code = GET_CODE (compare_op);
19208
19209 flags = XEXP (compare_op, 0);
19210
19211 if (GET_MODE (flags) == CCFPmode
19212 || GET_MODE (flags) == CCFPUmode)
19213 {
19214 fpcmp = true;
19215 code = ix86_fp_compare_code_to_integer (code);
19216 }
19217
19218 if (code != LTU)
19219 {
19220 val = constm1_rtx;
19221 if (fpcmp)
19222 PUT_CODE (compare_op,
19223 reverse_condition_maybe_unordered
19224 (GET_CODE (compare_op)));
19225 else
19226 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
19227 }
19228
19229 mode = GET_MODE (operands[0]);
19230
19231 /* Construct either adc or sbb insn. */
19232 if ((code == LTU) == (operands[3] == constm1_rtx))
19233 {
19234 switch (mode)
19235 {
19236 case QImode:
19237 insn = gen_subqi3_carry;
19238 break;
19239 case HImode:
19240 insn = gen_subhi3_carry;
19241 break;
19242 case SImode:
19243 insn = gen_subsi3_carry;
19244 break;
19245 case DImode:
19246 insn = gen_subdi3_carry;
19247 break;
19248 default:
19249 gcc_unreachable ();
19250 }
19251 }
19252 else
19253 {
19254 switch (mode)
19255 {
19256 case QImode:
19257 insn = gen_addqi3_carry;
19258 break;
19259 case HImode:
19260 insn = gen_addhi3_carry;
19261 break;
19262 case SImode:
19263 insn = gen_addsi3_carry;
19264 break;
19265 case DImode:
19266 insn = gen_adddi3_carry;
19267 break;
19268 default:
19269 gcc_unreachable ();
19270 }
19271 }
19272 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
19273
19274 return true;
19275 }
19276
19277
19278 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
19279 but works for floating pointer parameters and nonoffsetable memories.
19280 For pushes, it returns just stack offsets; the values will be saved
19281 in the right order. Maximally three parts are generated. */
19282
19283 static int
19284 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
19285 {
19286 int size;
19287
19288 if (!TARGET_64BIT)
19289 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
19290 else
19291 size = (GET_MODE_SIZE (mode) + 4) / 8;
19292
19293 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
19294 gcc_assert (size >= 2 && size <= 4);
19295
19296 /* Optimize constant pool reference to immediates. This is used by fp
19297 moves, that force all constants to memory to allow combining. */
19298 if (MEM_P (operand) && MEM_READONLY_P (operand))
19299 {
19300 rtx tmp = maybe_get_pool_constant (operand);
19301 if (tmp)
19302 operand = tmp;
19303 }
19304
19305 if (MEM_P (operand) && !offsettable_memref_p (operand))
19306 {
19307 /* The only non-offsetable memories we handle are pushes. */
19308 int ok = push_operand (operand, VOIDmode);
19309
19310 gcc_assert (ok);
19311
19312 operand = copy_rtx (operand);
19313 PUT_MODE (operand, Pmode);
19314 parts[0] = parts[1] = parts[2] = parts[3] = operand;
19315 return size;
19316 }
19317
19318 if (GET_CODE (operand) == CONST_VECTOR)
19319 {
19320 enum machine_mode imode = int_mode_for_mode (mode);
19321 /* Caution: if we looked through a constant pool memory above,
19322 the operand may actually have a different mode now. That's
19323 ok, since we want to pun this all the way back to an integer. */
19324 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
19325 gcc_assert (operand != NULL);
19326 mode = imode;
19327 }
19328
19329 if (!TARGET_64BIT)
19330 {
19331 if (mode == DImode)
19332 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19333 else
19334 {
19335 int i;
19336
19337 if (REG_P (operand))
19338 {
19339 gcc_assert (reload_completed);
19340 for (i = 0; i < size; i++)
19341 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
19342 }
19343 else if (offsettable_memref_p (operand))
19344 {
19345 operand = adjust_address (operand, SImode, 0);
19346 parts[0] = operand;
19347 for (i = 1; i < size; i++)
19348 parts[i] = adjust_address (operand, SImode, 4 * i);
19349 }
19350 else if (GET_CODE (operand) == CONST_DOUBLE)
19351 {
19352 REAL_VALUE_TYPE r;
19353 long l[4];
19354
19355 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19356 switch (mode)
19357 {
19358 case TFmode:
19359 real_to_target (l, &r, mode);
19360 parts[3] = gen_int_mode (l[3], SImode);
19361 parts[2] = gen_int_mode (l[2], SImode);
19362 break;
19363 case XFmode:
19364 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
19365 parts[2] = gen_int_mode (l[2], SImode);
19366 break;
19367 case DFmode:
19368 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
19369 break;
19370 default:
19371 gcc_unreachable ();
19372 }
19373 parts[1] = gen_int_mode (l[1], SImode);
19374 parts[0] = gen_int_mode (l[0], SImode);
19375 }
19376 else
19377 gcc_unreachable ();
19378 }
19379 }
19380 else
19381 {
19382 if (mode == TImode)
19383 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19384 if (mode == XFmode || mode == TFmode)
19385 {
19386 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
19387 if (REG_P (operand))
19388 {
19389 gcc_assert (reload_completed);
19390 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
19391 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
19392 }
19393 else if (offsettable_memref_p (operand))
19394 {
19395 operand = adjust_address (operand, DImode, 0);
19396 parts[0] = operand;
19397 parts[1] = adjust_address (operand, upper_mode, 8);
19398 }
19399 else if (GET_CODE (operand) == CONST_DOUBLE)
19400 {
19401 REAL_VALUE_TYPE r;
19402 long l[4];
19403
19404 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19405 real_to_target (l, &r, mode);
19406
19407 /* Do not use shift by 32 to avoid warning on 32bit systems. */
19408 if (HOST_BITS_PER_WIDE_INT >= 64)
19409 parts[0]
19410 = gen_int_mode
19411 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
19412 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
19413 DImode);
19414 else
19415 parts[0] = immed_double_const (l[0], l[1], DImode);
19416
19417 if (upper_mode == SImode)
19418 parts[1] = gen_int_mode (l[2], SImode);
19419 else if (HOST_BITS_PER_WIDE_INT >= 64)
19420 parts[1]
19421 = gen_int_mode
19422 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
19423 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
19424 DImode);
19425 else
19426 parts[1] = immed_double_const (l[2], l[3], DImode);
19427 }
19428 else
19429 gcc_unreachable ();
19430 }
19431 }
19432
19433 return size;
19434 }
19435
19436 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
19437 Return false when normal moves are needed; true when all required
19438 insns have been emitted. Operands 2-4 contain the input values
19439 int the correct order; operands 5-7 contain the output values. */
19440
19441 void
19442 ix86_split_long_move (rtx operands[])
19443 {
19444 rtx part[2][4];
19445 int nparts, i, j;
19446 int push = 0;
19447 int collisions = 0;
19448 enum machine_mode mode = GET_MODE (operands[0]);
19449 bool collisionparts[4];
19450
19451 /* The DFmode expanders may ask us to move double.
19452 For 64bit target this is single move. By hiding the fact
19453 here we simplify i386.md splitters. */
19454 if (TARGET_64BIT && GET_MODE_SIZE (GET_MODE (operands[0])) == 8)
19455 {
19456 /* Optimize constant pool reference to immediates. This is used by
19457 fp moves, that force all constants to memory to allow combining. */
19458
19459 if (MEM_P (operands[1])
19460 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
19461 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
19462 operands[1] = get_pool_constant (XEXP (operands[1], 0));
19463 if (push_operand (operands[0], VOIDmode))
19464 {
19465 operands[0] = copy_rtx (operands[0]);
19466 PUT_MODE (operands[0], Pmode);
19467 }
19468 else
19469 operands[0] = gen_lowpart (DImode, operands[0]);
19470 operands[1] = gen_lowpart (DImode, operands[1]);
19471 emit_move_insn (operands[0], operands[1]);
19472 return;
19473 }
19474
19475 /* The only non-offsettable memory we handle is push. */
19476 if (push_operand (operands[0], VOIDmode))
19477 push = 1;
19478 else
19479 gcc_assert (!MEM_P (operands[0])
19480 || offsettable_memref_p (operands[0]));
19481
19482 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
19483 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
19484
19485 /* When emitting push, take care for source operands on the stack. */
19486 if (push && MEM_P (operands[1])
19487 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
19488 {
19489 rtx src_base = XEXP (part[1][nparts - 1], 0);
19490
19491 /* Compensate for the stack decrement by 4. */
19492 if (!TARGET_64BIT && nparts == 3
19493 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
19494 src_base = plus_constant (src_base, 4);
19495
19496 /* src_base refers to the stack pointer and is
19497 automatically decreased by emitted push. */
19498 for (i = 0; i < nparts; i++)
19499 part[1][i] = change_address (part[1][i],
19500 GET_MODE (part[1][i]), src_base);
19501 }
19502
19503 /* We need to do copy in the right order in case an address register
19504 of the source overlaps the destination. */
19505 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
19506 {
19507 rtx tmp;
19508
19509 for (i = 0; i < nparts; i++)
19510 {
19511 collisionparts[i]
19512 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
19513 if (collisionparts[i])
19514 collisions++;
19515 }
19516
19517 /* Collision in the middle part can be handled by reordering. */
19518 if (collisions == 1 && nparts == 3 && collisionparts [1])
19519 {
19520 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19521 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19522 }
19523 else if (collisions == 1
19524 && nparts == 4
19525 && (collisionparts [1] || collisionparts [2]))
19526 {
19527 if (collisionparts [1])
19528 {
19529 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19530 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19531 }
19532 else
19533 {
19534 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
19535 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
19536 }
19537 }
19538
19539 /* If there are more collisions, we can't handle it by reordering.
19540 Do an lea to the last part and use only one colliding move. */
19541 else if (collisions > 1)
19542 {
19543 rtx base;
19544
19545 collisions = 1;
19546
19547 base = part[0][nparts - 1];
19548
19549 /* Handle the case when the last part isn't valid for lea.
19550 Happens in 64-bit mode storing the 12-byte XFmode. */
19551 if (GET_MODE (base) != Pmode)
19552 base = gen_rtx_REG (Pmode, REGNO (base));
19553
19554 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
19555 part[1][0] = replace_equiv_address (part[1][0], base);
19556 for (i = 1; i < nparts; i++)
19557 {
19558 tmp = plus_constant (base, UNITS_PER_WORD * i);
19559 part[1][i] = replace_equiv_address (part[1][i], tmp);
19560 }
19561 }
19562 }
19563
19564 if (push)
19565 {
19566 if (!TARGET_64BIT)
19567 {
19568 if (nparts == 3)
19569 {
19570 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
19571 emit_insn (gen_addsi3 (stack_pointer_rtx,
19572 stack_pointer_rtx, GEN_INT (-4)));
19573 emit_move_insn (part[0][2], part[1][2]);
19574 }
19575 else if (nparts == 4)
19576 {
19577 emit_move_insn (part[0][3], part[1][3]);
19578 emit_move_insn (part[0][2], part[1][2]);
19579 }
19580 }
19581 else
19582 {
19583 /* In 64bit mode we don't have 32bit push available. In case this is
19584 register, it is OK - we will just use larger counterpart. We also
19585 retype memory - these comes from attempt to avoid REX prefix on
19586 moving of second half of TFmode value. */
19587 if (GET_MODE (part[1][1]) == SImode)
19588 {
19589 switch (GET_CODE (part[1][1]))
19590 {
19591 case MEM:
19592 part[1][1] = adjust_address (part[1][1], DImode, 0);
19593 break;
19594
19595 case REG:
19596 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
19597 break;
19598
19599 default:
19600 gcc_unreachable ();
19601 }
19602
19603 if (GET_MODE (part[1][0]) == SImode)
19604 part[1][0] = part[1][1];
19605 }
19606 }
19607 emit_move_insn (part[0][1], part[1][1]);
19608 emit_move_insn (part[0][0], part[1][0]);
19609 return;
19610 }
19611
19612 /* Choose correct order to not overwrite the source before it is copied. */
19613 if ((REG_P (part[0][0])
19614 && REG_P (part[1][1])
19615 && (REGNO (part[0][0]) == REGNO (part[1][1])
19616 || (nparts == 3
19617 && REGNO (part[0][0]) == REGNO (part[1][2]))
19618 || (nparts == 4
19619 && REGNO (part[0][0]) == REGNO (part[1][3]))))
19620 || (collisions > 0
19621 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
19622 {
19623 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
19624 {
19625 operands[2 + i] = part[0][j];
19626 operands[6 + i] = part[1][j];
19627 }
19628 }
19629 else
19630 {
19631 for (i = 0; i < nparts; i++)
19632 {
19633 operands[2 + i] = part[0][i];
19634 operands[6 + i] = part[1][i];
19635 }
19636 }
19637
19638 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
19639 if (optimize_insn_for_size_p ())
19640 {
19641 for (j = 0; j < nparts - 1; j++)
19642 if (CONST_INT_P (operands[6 + j])
19643 && operands[6 + j] != const0_rtx
19644 && REG_P (operands[2 + j]))
19645 for (i = j; i < nparts - 1; i++)
19646 if (CONST_INT_P (operands[7 + i])
19647 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
19648 operands[7 + i] = operands[2 + j];
19649 }
19650
19651 for (i = 0; i < nparts; i++)
19652 emit_move_insn (operands[2 + i], operands[6 + i]);
19653
19654 return;
19655 }
19656
19657 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
19658 left shift by a constant, either using a single shift or
19659 a sequence of add instructions. */
19660
19661 static void
19662 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
19663 {
19664 rtx (*insn)(rtx, rtx, rtx);
19665
19666 if (count == 1
19667 || (count * ix86_cost->add <= ix86_cost->shift_const
19668 && !optimize_insn_for_size_p ()))
19669 {
19670 insn = mode == DImode ? gen_addsi3 : gen_adddi3;
19671 while (count-- > 0)
19672 emit_insn (insn (operand, operand, operand));
19673 }
19674 else
19675 {
19676 insn = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19677 emit_insn (insn (operand, operand, GEN_INT (count)));
19678 }
19679 }
19680
19681 void
19682 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
19683 {
19684 rtx (*gen_ashl3)(rtx, rtx, rtx);
19685 rtx (*gen_shld)(rtx, rtx, rtx);
19686 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19687
19688 rtx low[2], high[2];
19689 int count;
19690
19691 if (CONST_INT_P (operands[2]))
19692 {
19693 split_double_mode (mode, operands, 2, low, high);
19694 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19695
19696 if (count >= half_width)
19697 {
19698 emit_move_insn (high[0], low[1]);
19699 emit_move_insn (low[0], const0_rtx);
19700
19701 if (count > half_width)
19702 ix86_expand_ashl_const (high[0], count - half_width, mode);
19703 }
19704 else
19705 {
19706 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19707
19708 if (!rtx_equal_p (operands[0], operands[1]))
19709 emit_move_insn (operands[0], operands[1]);
19710
19711 emit_insn (gen_shld (high[0], low[0], GEN_INT (count)));
19712 ix86_expand_ashl_const (low[0], count, mode);
19713 }
19714 return;
19715 }
19716
19717 split_double_mode (mode, operands, 1, low, high);
19718
19719 gen_ashl3 = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19720
19721 if (operands[1] == const1_rtx)
19722 {
19723 /* Assuming we've chosen a QImode capable registers, then 1 << N
19724 can be done with two 32/64-bit shifts, no branches, no cmoves. */
19725 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
19726 {
19727 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
19728
19729 ix86_expand_clear (low[0]);
19730 ix86_expand_clear (high[0]);
19731 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (half_width)));
19732
19733 d = gen_lowpart (QImode, low[0]);
19734 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19735 s = gen_rtx_EQ (QImode, flags, const0_rtx);
19736 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19737
19738 d = gen_lowpart (QImode, high[0]);
19739 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19740 s = gen_rtx_NE (QImode, flags, const0_rtx);
19741 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19742 }
19743
19744 /* Otherwise, we can get the same results by manually performing
19745 a bit extract operation on bit 5/6, and then performing the two
19746 shifts. The two methods of getting 0/1 into low/high are exactly
19747 the same size. Avoiding the shift in the bit extract case helps
19748 pentium4 a bit; no one else seems to care much either way. */
19749 else
19750 {
19751 enum machine_mode half_mode;
19752 rtx (*gen_lshr3)(rtx, rtx, rtx);
19753 rtx (*gen_and3)(rtx, rtx, rtx);
19754 rtx (*gen_xor3)(rtx, rtx, rtx);
19755 HOST_WIDE_INT bits;
19756 rtx x;
19757
19758 if (mode == DImode)
19759 {
19760 half_mode = SImode;
19761 gen_lshr3 = gen_lshrsi3;
19762 gen_and3 = gen_andsi3;
19763 gen_xor3 = gen_xorsi3;
19764 bits = 5;
19765 }
19766 else
19767 {
19768 half_mode = DImode;
19769 gen_lshr3 = gen_lshrdi3;
19770 gen_and3 = gen_anddi3;
19771 gen_xor3 = gen_xordi3;
19772 bits = 6;
19773 }
19774
19775 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
19776 x = gen_rtx_ZERO_EXTEND (half_mode, operands[2]);
19777 else
19778 x = gen_lowpart (half_mode, operands[2]);
19779 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
19780
19781 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (bits)));
19782 emit_insn (gen_and3 (high[0], high[0], const1_rtx));
19783 emit_move_insn (low[0], high[0]);
19784 emit_insn (gen_xor3 (low[0], low[0], const1_rtx));
19785 }
19786
19787 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19788 emit_insn (gen_ashl3 (high[0], high[0], operands[2]));
19789 return;
19790 }
19791
19792 if (operands[1] == constm1_rtx)
19793 {
19794 /* For -1 << N, we can avoid the shld instruction, because we
19795 know that we're shifting 0...31/63 ones into a -1. */
19796 emit_move_insn (low[0], constm1_rtx);
19797 if (optimize_insn_for_size_p ())
19798 emit_move_insn (high[0], low[0]);
19799 else
19800 emit_move_insn (high[0], constm1_rtx);
19801 }
19802 else
19803 {
19804 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19805
19806 if (!rtx_equal_p (operands[0], operands[1]))
19807 emit_move_insn (operands[0], operands[1]);
19808
19809 split_double_mode (mode, operands, 1, low, high);
19810 emit_insn (gen_shld (high[0], low[0], operands[2]));
19811 }
19812
19813 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19814
19815 if (TARGET_CMOVE && scratch)
19816 {
19817 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19818 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19819
19820 ix86_expand_clear (scratch);
19821 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
19822 }
19823 else
19824 {
19825 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19826 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19827
19828 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
19829 }
19830 }
19831
19832 void
19833 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
19834 {
19835 rtx (*gen_ashr3)(rtx, rtx, rtx)
19836 = mode == DImode ? gen_ashrsi3 : gen_ashrdi3;
19837 rtx (*gen_shrd)(rtx, rtx, rtx);
19838 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19839
19840 rtx low[2], high[2];
19841 int count;
19842
19843 if (CONST_INT_P (operands[2]))
19844 {
19845 split_double_mode (mode, operands, 2, low, high);
19846 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19847
19848 if (count == GET_MODE_BITSIZE (mode) - 1)
19849 {
19850 emit_move_insn (high[0], high[1]);
19851 emit_insn (gen_ashr3 (high[0], high[0],
19852 GEN_INT (half_width - 1)));
19853 emit_move_insn (low[0], high[0]);
19854
19855 }
19856 else if (count >= half_width)
19857 {
19858 emit_move_insn (low[0], high[1]);
19859 emit_move_insn (high[0], low[0]);
19860 emit_insn (gen_ashr3 (high[0], high[0],
19861 GEN_INT (half_width - 1)));
19862
19863 if (count > half_width)
19864 emit_insn (gen_ashr3 (low[0], low[0],
19865 GEN_INT (count - half_width)));
19866 }
19867 else
19868 {
19869 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19870
19871 if (!rtx_equal_p (operands[0], operands[1]))
19872 emit_move_insn (operands[0], operands[1]);
19873
19874 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19875 emit_insn (gen_ashr3 (high[0], high[0], GEN_INT (count)));
19876 }
19877 }
19878 else
19879 {
19880 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19881
19882 if (!rtx_equal_p (operands[0], operands[1]))
19883 emit_move_insn (operands[0], operands[1]);
19884
19885 split_double_mode (mode, operands, 1, low, high);
19886
19887 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19888 emit_insn (gen_ashr3 (high[0], high[0], operands[2]));
19889
19890 if (TARGET_CMOVE && scratch)
19891 {
19892 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19893 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19894
19895 emit_move_insn (scratch, high[0]);
19896 emit_insn (gen_ashr3 (scratch, scratch,
19897 GEN_INT (half_width - 1)));
19898 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19899 scratch));
19900 }
19901 else
19902 {
19903 rtx (*gen_x86_shift_adj_3)(rtx, rtx, rtx)
19904 = mode == DImode ? gen_x86_shiftsi_adj_3 : gen_x86_shiftdi_adj_3;
19905
19906 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
19907 }
19908 }
19909 }
19910
19911 void
19912 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
19913 {
19914 rtx (*gen_lshr3)(rtx, rtx, rtx)
19915 = mode == DImode ? gen_lshrsi3 : gen_lshrdi3;
19916 rtx (*gen_shrd)(rtx, rtx, rtx);
19917 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19918
19919 rtx low[2], high[2];
19920 int count;
19921
19922 if (CONST_INT_P (operands[2]))
19923 {
19924 split_double_mode (mode, operands, 2, low, high);
19925 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19926
19927 if (count >= half_width)
19928 {
19929 emit_move_insn (low[0], high[1]);
19930 ix86_expand_clear (high[0]);
19931
19932 if (count > half_width)
19933 emit_insn (gen_lshr3 (low[0], low[0],
19934 GEN_INT (count - half_width)));
19935 }
19936 else
19937 {
19938 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19939
19940 if (!rtx_equal_p (operands[0], operands[1]))
19941 emit_move_insn (operands[0], operands[1]);
19942
19943 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19944 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (count)));
19945 }
19946 }
19947 else
19948 {
19949 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19950
19951 if (!rtx_equal_p (operands[0], operands[1]))
19952 emit_move_insn (operands[0], operands[1]);
19953
19954 split_double_mode (mode, operands, 1, low, high);
19955
19956 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19957 emit_insn (gen_lshr3 (high[0], high[0], operands[2]));
19958
19959 if (TARGET_CMOVE && scratch)
19960 {
19961 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19962 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19963
19964 ix86_expand_clear (scratch);
19965 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19966 scratch));
19967 }
19968 else
19969 {
19970 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19971 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19972
19973 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
19974 }
19975 }
19976 }
19977
19978 /* Predict just emitted jump instruction to be taken with probability PROB. */
19979 static void
19980 predict_jump (int prob)
19981 {
19982 rtx insn = get_last_insn ();
19983 gcc_assert (JUMP_P (insn));
19984 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
19985 }
19986
19987 /* Helper function for the string operations below. Dest VARIABLE whether
19988 it is aligned to VALUE bytes. If true, jump to the label. */
19989 static rtx
19990 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
19991 {
19992 rtx label = gen_label_rtx ();
19993 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
19994 if (GET_MODE (variable) == DImode)
19995 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
19996 else
19997 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
19998 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
19999 1, label);
20000 if (epilogue)
20001 predict_jump (REG_BR_PROB_BASE * 50 / 100);
20002 else
20003 predict_jump (REG_BR_PROB_BASE * 90 / 100);
20004 return label;
20005 }
20006
20007 /* Adjust COUNTER by the VALUE. */
20008 static void
20009 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
20010 {
20011 rtx (*gen_add)(rtx, rtx, rtx)
20012 = GET_MODE (countreg) == DImode ? gen_adddi3 : gen_addsi3;
20013
20014 emit_insn (gen_add (countreg, countreg, GEN_INT (-value)));
20015 }
20016
20017 /* Zero extend possibly SImode EXP to Pmode register. */
20018 rtx
20019 ix86_zero_extend_to_Pmode (rtx exp)
20020 {
20021 rtx r;
20022 if (GET_MODE (exp) == VOIDmode)
20023 return force_reg (Pmode, exp);
20024 if (GET_MODE (exp) == Pmode)
20025 return copy_to_mode_reg (Pmode, exp);
20026 r = gen_reg_rtx (Pmode);
20027 emit_insn (gen_zero_extendsidi2 (r, exp));
20028 return r;
20029 }
20030
20031 /* Divide COUNTREG by SCALE. */
20032 static rtx
20033 scale_counter (rtx countreg, int scale)
20034 {
20035 rtx sc;
20036
20037 if (scale == 1)
20038 return countreg;
20039 if (CONST_INT_P (countreg))
20040 return GEN_INT (INTVAL (countreg) / scale);
20041 gcc_assert (REG_P (countreg));
20042
20043 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
20044 GEN_INT (exact_log2 (scale)),
20045 NULL, 1, OPTAB_DIRECT);
20046 return sc;
20047 }
20048
20049 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
20050 DImode for constant loop counts. */
20051
20052 static enum machine_mode
20053 counter_mode (rtx count_exp)
20054 {
20055 if (GET_MODE (count_exp) != VOIDmode)
20056 return GET_MODE (count_exp);
20057 if (!CONST_INT_P (count_exp))
20058 return Pmode;
20059 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
20060 return DImode;
20061 return SImode;
20062 }
20063
20064 /* When SRCPTR is non-NULL, output simple loop to move memory
20065 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
20066 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
20067 equivalent loop to set memory by VALUE (supposed to be in MODE).
20068
20069 The size is rounded down to whole number of chunk size moved at once.
20070 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
20071
20072
20073 static void
20074 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
20075 rtx destptr, rtx srcptr, rtx value,
20076 rtx count, enum machine_mode mode, int unroll,
20077 int expected_size)
20078 {
20079 rtx out_label, top_label, iter, tmp;
20080 enum machine_mode iter_mode = counter_mode (count);
20081 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
20082 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
20083 rtx size;
20084 rtx x_addr;
20085 rtx y_addr;
20086 int i;
20087
20088 top_label = gen_label_rtx ();
20089 out_label = gen_label_rtx ();
20090 iter = gen_reg_rtx (iter_mode);
20091
20092 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
20093 NULL, 1, OPTAB_DIRECT);
20094 /* Those two should combine. */
20095 if (piece_size == const1_rtx)
20096 {
20097 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
20098 true, out_label);
20099 predict_jump (REG_BR_PROB_BASE * 10 / 100);
20100 }
20101 emit_move_insn (iter, const0_rtx);
20102
20103 emit_label (top_label);
20104
20105 tmp = convert_modes (Pmode, iter_mode, iter, true);
20106 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
20107 destmem = change_address (destmem, mode, x_addr);
20108
20109 if (srcmem)
20110 {
20111 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
20112 srcmem = change_address (srcmem, mode, y_addr);
20113
20114 /* When unrolling for chips that reorder memory reads and writes,
20115 we can save registers by using single temporary.
20116 Also using 4 temporaries is overkill in 32bit mode. */
20117 if (!TARGET_64BIT && 0)
20118 {
20119 for (i = 0; i < unroll; i++)
20120 {
20121 if (i)
20122 {
20123 destmem =
20124 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20125 srcmem =
20126 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20127 }
20128 emit_move_insn (destmem, srcmem);
20129 }
20130 }
20131 else
20132 {
20133 rtx tmpreg[4];
20134 gcc_assert (unroll <= 4);
20135 for (i = 0; i < unroll; i++)
20136 {
20137 tmpreg[i] = gen_reg_rtx (mode);
20138 if (i)
20139 {
20140 srcmem =
20141 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20142 }
20143 emit_move_insn (tmpreg[i], srcmem);
20144 }
20145 for (i = 0; i < unroll; i++)
20146 {
20147 if (i)
20148 {
20149 destmem =
20150 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20151 }
20152 emit_move_insn (destmem, tmpreg[i]);
20153 }
20154 }
20155 }
20156 else
20157 for (i = 0; i < unroll; i++)
20158 {
20159 if (i)
20160 destmem =
20161 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20162 emit_move_insn (destmem, value);
20163 }
20164
20165 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
20166 true, OPTAB_LIB_WIDEN);
20167 if (tmp != iter)
20168 emit_move_insn (iter, tmp);
20169
20170 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
20171 true, top_label);
20172 if (expected_size != -1)
20173 {
20174 expected_size /= GET_MODE_SIZE (mode) * unroll;
20175 if (expected_size == 0)
20176 predict_jump (0);
20177 else if (expected_size > REG_BR_PROB_BASE)
20178 predict_jump (REG_BR_PROB_BASE - 1);
20179 else
20180 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
20181 }
20182 else
20183 predict_jump (REG_BR_PROB_BASE * 80 / 100);
20184 iter = ix86_zero_extend_to_Pmode (iter);
20185 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
20186 true, OPTAB_LIB_WIDEN);
20187 if (tmp != destptr)
20188 emit_move_insn (destptr, tmp);
20189 if (srcptr)
20190 {
20191 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
20192 true, OPTAB_LIB_WIDEN);
20193 if (tmp != srcptr)
20194 emit_move_insn (srcptr, tmp);
20195 }
20196 emit_label (out_label);
20197 }
20198
20199 /* Output "rep; mov" instruction.
20200 Arguments have same meaning as for previous function */
20201 static void
20202 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
20203 rtx destptr, rtx srcptr,
20204 rtx count,
20205 enum machine_mode mode)
20206 {
20207 rtx destexp;
20208 rtx srcexp;
20209 rtx countreg;
20210
20211 /* If the size is known, it is shorter to use rep movs. */
20212 if (mode == QImode && CONST_INT_P (count)
20213 && !(INTVAL (count) & 3))
20214 mode = SImode;
20215
20216 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
20217 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
20218 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
20219 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
20220 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
20221 if (mode != QImode)
20222 {
20223 destexp = gen_rtx_ASHIFT (Pmode, countreg,
20224 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20225 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
20226 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
20227 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20228 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
20229 }
20230 else
20231 {
20232 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
20233 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
20234 }
20235 if (CONST_INT_P (count))
20236 {
20237 count = GEN_INT (INTVAL (count)
20238 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
20239 destmem = shallow_copy_rtx (destmem);
20240 srcmem = shallow_copy_rtx (srcmem);
20241 set_mem_size (destmem, count);
20242 set_mem_size (srcmem, count);
20243 }
20244 else
20245 {
20246 if (MEM_SIZE (destmem))
20247 set_mem_size (destmem, NULL_RTX);
20248 if (MEM_SIZE (srcmem))
20249 set_mem_size (srcmem, NULL_RTX);
20250 }
20251 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
20252 destexp, srcexp));
20253 }
20254
20255 /* Output "rep; stos" instruction.
20256 Arguments have same meaning as for previous function */
20257 static void
20258 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
20259 rtx count, enum machine_mode mode,
20260 rtx orig_value)
20261 {
20262 rtx destexp;
20263 rtx countreg;
20264
20265 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
20266 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
20267 value = force_reg (mode, gen_lowpart (mode, value));
20268 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
20269 if (mode != QImode)
20270 {
20271 destexp = gen_rtx_ASHIFT (Pmode, countreg,
20272 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20273 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
20274 }
20275 else
20276 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
20277 if (orig_value == const0_rtx && CONST_INT_P (count))
20278 {
20279 count = GEN_INT (INTVAL (count)
20280 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
20281 destmem = shallow_copy_rtx (destmem);
20282 set_mem_size (destmem, count);
20283 }
20284 else if (MEM_SIZE (destmem))
20285 set_mem_size (destmem, NULL_RTX);
20286 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
20287 }
20288
20289 static void
20290 emit_strmov (rtx destmem, rtx srcmem,
20291 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
20292 {
20293 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
20294 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
20295 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20296 }
20297
20298 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
20299 static void
20300 expand_movmem_epilogue (rtx destmem, rtx srcmem,
20301 rtx destptr, rtx srcptr, rtx count, int max_size)
20302 {
20303 rtx src, dest;
20304 if (CONST_INT_P (count))
20305 {
20306 HOST_WIDE_INT countval = INTVAL (count);
20307 int offset = 0;
20308
20309 if ((countval & 0x10) && max_size > 16)
20310 {
20311 if (TARGET_64BIT)
20312 {
20313 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
20314 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
20315 }
20316 else
20317 gcc_unreachable ();
20318 offset += 16;
20319 }
20320 if ((countval & 0x08) && max_size > 8)
20321 {
20322 if (TARGET_64BIT)
20323 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
20324 else
20325 {
20326 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
20327 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
20328 }
20329 offset += 8;
20330 }
20331 if ((countval & 0x04) && max_size > 4)
20332 {
20333 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
20334 offset += 4;
20335 }
20336 if ((countval & 0x02) && max_size > 2)
20337 {
20338 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
20339 offset += 2;
20340 }
20341 if ((countval & 0x01) && max_size > 1)
20342 {
20343 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
20344 offset += 1;
20345 }
20346 return;
20347 }
20348 if (max_size > 8)
20349 {
20350 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
20351 count, 1, OPTAB_DIRECT);
20352 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
20353 count, QImode, 1, 4);
20354 return;
20355 }
20356
20357 /* When there are stringops, we can cheaply increase dest and src pointers.
20358 Otherwise we save code size by maintaining offset (zero is readily
20359 available from preceding rep operation) and using x86 addressing modes.
20360 */
20361 if (TARGET_SINGLE_STRINGOP)
20362 {
20363 if (max_size > 4)
20364 {
20365 rtx label = ix86_expand_aligntest (count, 4, true);
20366 src = change_address (srcmem, SImode, srcptr);
20367 dest = change_address (destmem, SImode, destptr);
20368 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20369 emit_label (label);
20370 LABEL_NUSES (label) = 1;
20371 }
20372 if (max_size > 2)
20373 {
20374 rtx label = ix86_expand_aligntest (count, 2, true);
20375 src = change_address (srcmem, HImode, srcptr);
20376 dest = change_address (destmem, HImode, destptr);
20377 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20378 emit_label (label);
20379 LABEL_NUSES (label) = 1;
20380 }
20381 if (max_size > 1)
20382 {
20383 rtx label = ix86_expand_aligntest (count, 1, true);
20384 src = change_address (srcmem, QImode, srcptr);
20385 dest = change_address (destmem, QImode, destptr);
20386 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20387 emit_label (label);
20388 LABEL_NUSES (label) = 1;
20389 }
20390 }
20391 else
20392 {
20393 rtx offset = force_reg (Pmode, const0_rtx);
20394 rtx tmp;
20395
20396 if (max_size > 4)
20397 {
20398 rtx label = ix86_expand_aligntest (count, 4, true);
20399 src = change_address (srcmem, SImode, srcptr);
20400 dest = change_address (destmem, SImode, destptr);
20401 emit_move_insn (dest, src);
20402 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
20403 true, OPTAB_LIB_WIDEN);
20404 if (tmp != offset)
20405 emit_move_insn (offset, tmp);
20406 emit_label (label);
20407 LABEL_NUSES (label) = 1;
20408 }
20409 if (max_size > 2)
20410 {
20411 rtx label = ix86_expand_aligntest (count, 2, true);
20412 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20413 src = change_address (srcmem, HImode, tmp);
20414 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20415 dest = change_address (destmem, HImode, tmp);
20416 emit_move_insn (dest, src);
20417 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
20418 true, OPTAB_LIB_WIDEN);
20419 if (tmp != offset)
20420 emit_move_insn (offset, tmp);
20421 emit_label (label);
20422 LABEL_NUSES (label) = 1;
20423 }
20424 if (max_size > 1)
20425 {
20426 rtx label = ix86_expand_aligntest (count, 1, true);
20427 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20428 src = change_address (srcmem, QImode, tmp);
20429 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20430 dest = change_address (destmem, QImode, tmp);
20431 emit_move_insn (dest, src);
20432 emit_label (label);
20433 LABEL_NUSES (label) = 1;
20434 }
20435 }
20436 }
20437
20438 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20439 static void
20440 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
20441 rtx count, int max_size)
20442 {
20443 count =
20444 expand_simple_binop (counter_mode (count), AND, count,
20445 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
20446 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
20447 gen_lowpart (QImode, value), count, QImode,
20448 1, max_size / 2);
20449 }
20450
20451 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20452 static void
20453 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
20454 {
20455 rtx dest;
20456
20457 if (CONST_INT_P (count))
20458 {
20459 HOST_WIDE_INT countval = INTVAL (count);
20460 int offset = 0;
20461
20462 if ((countval & 0x10) && max_size > 16)
20463 {
20464 if (TARGET_64BIT)
20465 {
20466 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20467 emit_insn (gen_strset (destptr, dest, value));
20468 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
20469 emit_insn (gen_strset (destptr, dest, value));
20470 }
20471 else
20472 gcc_unreachable ();
20473 offset += 16;
20474 }
20475 if ((countval & 0x08) && max_size > 8)
20476 {
20477 if (TARGET_64BIT)
20478 {
20479 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20480 emit_insn (gen_strset (destptr, dest, value));
20481 }
20482 else
20483 {
20484 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20485 emit_insn (gen_strset (destptr, dest, value));
20486 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
20487 emit_insn (gen_strset (destptr, dest, value));
20488 }
20489 offset += 8;
20490 }
20491 if ((countval & 0x04) && max_size > 4)
20492 {
20493 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20494 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20495 offset += 4;
20496 }
20497 if ((countval & 0x02) && max_size > 2)
20498 {
20499 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
20500 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20501 offset += 2;
20502 }
20503 if ((countval & 0x01) && max_size > 1)
20504 {
20505 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
20506 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20507 offset += 1;
20508 }
20509 return;
20510 }
20511 if (max_size > 32)
20512 {
20513 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
20514 return;
20515 }
20516 if (max_size > 16)
20517 {
20518 rtx label = ix86_expand_aligntest (count, 16, true);
20519 if (TARGET_64BIT)
20520 {
20521 dest = change_address (destmem, DImode, destptr);
20522 emit_insn (gen_strset (destptr, dest, value));
20523 emit_insn (gen_strset (destptr, dest, value));
20524 }
20525 else
20526 {
20527 dest = change_address (destmem, SImode, destptr);
20528 emit_insn (gen_strset (destptr, dest, value));
20529 emit_insn (gen_strset (destptr, dest, value));
20530 emit_insn (gen_strset (destptr, dest, value));
20531 emit_insn (gen_strset (destptr, dest, value));
20532 }
20533 emit_label (label);
20534 LABEL_NUSES (label) = 1;
20535 }
20536 if (max_size > 8)
20537 {
20538 rtx label = ix86_expand_aligntest (count, 8, true);
20539 if (TARGET_64BIT)
20540 {
20541 dest = change_address (destmem, DImode, destptr);
20542 emit_insn (gen_strset (destptr, dest, value));
20543 }
20544 else
20545 {
20546 dest = change_address (destmem, SImode, destptr);
20547 emit_insn (gen_strset (destptr, dest, value));
20548 emit_insn (gen_strset (destptr, dest, value));
20549 }
20550 emit_label (label);
20551 LABEL_NUSES (label) = 1;
20552 }
20553 if (max_size > 4)
20554 {
20555 rtx label = ix86_expand_aligntest (count, 4, true);
20556 dest = change_address (destmem, SImode, destptr);
20557 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20558 emit_label (label);
20559 LABEL_NUSES (label) = 1;
20560 }
20561 if (max_size > 2)
20562 {
20563 rtx label = ix86_expand_aligntest (count, 2, true);
20564 dest = change_address (destmem, HImode, destptr);
20565 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20566 emit_label (label);
20567 LABEL_NUSES (label) = 1;
20568 }
20569 if (max_size > 1)
20570 {
20571 rtx label = ix86_expand_aligntest (count, 1, true);
20572 dest = change_address (destmem, QImode, destptr);
20573 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20574 emit_label (label);
20575 LABEL_NUSES (label) = 1;
20576 }
20577 }
20578
20579 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
20580 DESIRED_ALIGNMENT. */
20581 static void
20582 expand_movmem_prologue (rtx destmem, rtx srcmem,
20583 rtx destptr, rtx srcptr, rtx count,
20584 int align, int desired_alignment)
20585 {
20586 if (align <= 1 && desired_alignment > 1)
20587 {
20588 rtx label = ix86_expand_aligntest (destptr, 1, false);
20589 srcmem = change_address (srcmem, QImode, srcptr);
20590 destmem = change_address (destmem, QImode, destptr);
20591 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20592 ix86_adjust_counter (count, 1);
20593 emit_label (label);
20594 LABEL_NUSES (label) = 1;
20595 }
20596 if (align <= 2 && desired_alignment > 2)
20597 {
20598 rtx label = ix86_expand_aligntest (destptr, 2, false);
20599 srcmem = change_address (srcmem, HImode, srcptr);
20600 destmem = change_address (destmem, HImode, destptr);
20601 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20602 ix86_adjust_counter (count, 2);
20603 emit_label (label);
20604 LABEL_NUSES (label) = 1;
20605 }
20606 if (align <= 4 && desired_alignment > 4)
20607 {
20608 rtx label = ix86_expand_aligntest (destptr, 4, false);
20609 srcmem = change_address (srcmem, SImode, srcptr);
20610 destmem = change_address (destmem, SImode, destptr);
20611 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20612 ix86_adjust_counter (count, 4);
20613 emit_label (label);
20614 LABEL_NUSES (label) = 1;
20615 }
20616 gcc_assert (desired_alignment <= 8);
20617 }
20618
20619 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
20620 ALIGN_BYTES is how many bytes need to be copied. */
20621 static rtx
20622 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
20623 int desired_align, int align_bytes)
20624 {
20625 rtx src = *srcp;
20626 rtx src_size, dst_size;
20627 int off = 0;
20628 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
20629 if (src_align_bytes >= 0)
20630 src_align_bytes = desired_align - src_align_bytes;
20631 src_size = MEM_SIZE (src);
20632 dst_size = MEM_SIZE (dst);
20633 if (align_bytes & 1)
20634 {
20635 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20636 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
20637 off = 1;
20638 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20639 }
20640 if (align_bytes & 2)
20641 {
20642 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20643 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
20644 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20645 set_mem_align (dst, 2 * BITS_PER_UNIT);
20646 if (src_align_bytes >= 0
20647 && (src_align_bytes & 1) == (align_bytes & 1)
20648 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
20649 set_mem_align (src, 2 * BITS_PER_UNIT);
20650 off = 2;
20651 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20652 }
20653 if (align_bytes & 4)
20654 {
20655 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20656 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
20657 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20658 set_mem_align (dst, 4 * BITS_PER_UNIT);
20659 if (src_align_bytes >= 0)
20660 {
20661 unsigned int src_align = 0;
20662 if ((src_align_bytes & 3) == (align_bytes & 3))
20663 src_align = 4;
20664 else if ((src_align_bytes & 1) == (align_bytes & 1))
20665 src_align = 2;
20666 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20667 set_mem_align (src, src_align * BITS_PER_UNIT);
20668 }
20669 off = 4;
20670 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20671 }
20672 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20673 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
20674 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20675 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20676 if (src_align_bytes >= 0)
20677 {
20678 unsigned int src_align = 0;
20679 if ((src_align_bytes & 7) == (align_bytes & 7))
20680 src_align = 8;
20681 else if ((src_align_bytes & 3) == (align_bytes & 3))
20682 src_align = 4;
20683 else if ((src_align_bytes & 1) == (align_bytes & 1))
20684 src_align = 2;
20685 if (src_align > (unsigned int) desired_align)
20686 src_align = desired_align;
20687 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20688 set_mem_align (src, src_align * BITS_PER_UNIT);
20689 }
20690 if (dst_size)
20691 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20692 if (src_size)
20693 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
20694 *srcp = src;
20695 return dst;
20696 }
20697
20698 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
20699 DESIRED_ALIGNMENT. */
20700 static void
20701 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
20702 int align, int desired_alignment)
20703 {
20704 if (align <= 1 && desired_alignment > 1)
20705 {
20706 rtx label = ix86_expand_aligntest (destptr, 1, false);
20707 destmem = change_address (destmem, QImode, destptr);
20708 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
20709 ix86_adjust_counter (count, 1);
20710 emit_label (label);
20711 LABEL_NUSES (label) = 1;
20712 }
20713 if (align <= 2 && desired_alignment > 2)
20714 {
20715 rtx label = ix86_expand_aligntest (destptr, 2, false);
20716 destmem = change_address (destmem, HImode, destptr);
20717 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
20718 ix86_adjust_counter (count, 2);
20719 emit_label (label);
20720 LABEL_NUSES (label) = 1;
20721 }
20722 if (align <= 4 && desired_alignment > 4)
20723 {
20724 rtx label = ix86_expand_aligntest (destptr, 4, false);
20725 destmem = change_address (destmem, SImode, destptr);
20726 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
20727 ix86_adjust_counter (count, 4);
20728 emit_label (label);
20729 LABEL_NUSES (label) = 1;
20730 }
20731 gcc_assert (desired_alignment <= 8);
20732 }
20733
20734 /* Set enough from DST to align DST known to by aligned by ALIGN to
20735 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
20736 static rtx
20737 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
20738 int desired_align, int align_bytes)
20739 {
20740 int off = 0;
20741 rtx dst_size = MEM_SIZE (dst);
20742 if (align_bytes & 1)
20743 {
20744 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20745 off = 1;
20746 emit_insn (gen_strset (destreg, dst,
20747 gen_lowpart (QImode, value)));
20748 }
20749 if (align_bytes & 2)
20750 {
20751 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20752 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20753 set_mem_align (dst, 2 * BITS_PER_UNIT);
20754 off = 2;
20755 emit_insn (gen_strset (destreg, dst,
20756 gen_lowpart (HImode, value)));
20757 }
20758 if (align_bytes & 4)
20759 {
20760 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20761 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20762 set_mem_align (dst, 4 * BITS_PER_UNIT);
20763 off = 4;
20764 emit_insn (gen_strset (destreg, dst,
20765 gen_lowpart (SImode, value)));
20766 }
20767 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20768 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20769 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20770 if (dst_size)
20771 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20772 return dst;
20773 }
20774
20775 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
20776 static enum stringop_alg
20777 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
20778 int *dynamic_check)
20779 {
20780 const struct stringop_algs * algs;
20781 bool optimize_for_speed;
20782 /* Algorithms using the rep prefix want at least edi and ecx;
20783 additionally, memset wants eax and memcpy wants esi. Don't
20784 consider such algorithms if the user has appropriated those
20785 registers for their own purposes. */
20786 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
20787 || (memset
20788 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
20789
20790 #define ALG_USABLE_P(alg) (rep_prefix_usable \
20791 || (alg != rep_prefix_1_byte \
20792 && alg != rep_prefix_4_byte \
20793 && alg != rep_prefix_8_byte))
20794 const struct processor_costs *cost;
20795
20796 /* Even if the string operation call is cold, we still might spend a lot
20797 of time processing large blocks. */
20798 if (optimize_function_for_size_p (cfun)
20799 || (optimize_insn_for_size_p ()
20800 && expected_size != -1 && expected_size < 256))
20801 optimize_for_speed = false;
20802 else
20803 optimize_for_speed = true;
20804
20805 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
20806
20807 *dynamic_check = -1;
20808 if (memset)
20809 algs = &cost->memset[TARGET_64BIT != 0];
20810 else
20811 algs = &cost->memcpy[TARGET_64BIT != 0];
20812 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
20813 return stringop_alg;
20814 /* rep; movq or rep; movl is the smallest variant. */
20815 else if (!optimize_for_speed)
20816 {
20817 if (!count || (count & 3))
20818 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
20819 else
20820 return rep_prefix_usable ? rep_prefix_4_byte : loop;
20821 }
20822 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
20823 */
20824 else if (expected_size != -1 && expected_size < 4)
20825 return loop_1_byte;
20826 else if (expected_size != -1)
20827 {
20828 unsigned int i;
20829 enum stringop_alg alg = libcall;
20830 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20831 {
20832 /* We get here if the algorithms that were not libcall-based
20833 were rep-prefix based and we are unable to use rep prefixes
20834 based on global register usage. Break out of the loop and
20835 use the heuristic below. */
20836 if (algs->size[i].max == 0)
20837 break;
20838 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
20839 {
20840 enum stringop_alg candidate = algs->size[i].alg;
20841
20842 if (candidate != libcall && ALG_USABLE_P (candidate))
20843 alg = candidate;
20844 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
20845 last non-libcall inline algorithm. */
20846 if (TARGET_INLINE_ALL_STRINGOPS)
20847 {
20848 /* When the current size is best to be copied by a libcall,
20849 but we are still forced to inline, run the heuristic below
20850 that will pick code for medium sized blocks. */
20851 if (alg != libcall)
20852 return alg;
20853 break;
20854 }
20855 else if (ALG_USABLE_P (candidate))
20856 return candidate;
20857 }
20858 }
20859 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
20860 }
20861 /* When asked to inline the call anyway, try to pick meaningful choice.
20862 We look for maximal size of block that is faster to copy by hand and
20863 take blocks of at most of that size guessing that average size will
20864 be roughly half of the block.
20865
20866 If this turns out to be bad, we might simply specify the preferred
20867 choice in ix86_costs. */
20868 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20869 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
20870 {
20871 int max = -1;
20872 enum stringop_alg alg;
20873 int i;
20874 bool any_alg_usable_p = true;
20875
20876 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20877 {
20878 enum stringop_alg candidate = algs->size[i].alg;
20879 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
20880
20881 if (candidate != libcall && candidate
20882 && ALG_USABLE_P (candidate))
20883 max = algs->size[i].max;
20884 }
20885 /* If there aren't any usable algorithms, then recursing on
20886 smaller sizes isn't going to find anything. Just return the
20887 simple byte-at-a-time copy loop. */
20888 if (!any_alg_usable_p)
20889 {
20890 /* Pick something reasonable. */
20891 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20892 *dynamic_check = 128;
20893 return loop_1_byte;
20894 }
20895 if (max == -1)
20896 max = 4096;
20897 alg = decide_alg (count, max / 2, memset, dynamic_check);
20898 gcc_assert (*dynamic_check == -1);
20899 gcc_assert (alg != libcall);
20900 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20901 *dynamic_check = max;
20902 return alg;
20903 }
20904 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
20905 #undef ALG_USABLE_P
20906 }
20907
20908 /* Decide on alignment. We know that the operand is already aligned to ALIGN
20909 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
20910 static int
20911 decide_alignment (int align,
20912 enum stringop_alg alg,
20913 int expected_size)
20914 {
20915 int desired_align = 0;
20916 switch (alg)
20917 {
20918 case no_stringop:
20919 gcc_unreachable ();
20920 case loop:
20921 case unrolled_loop:
20922 desired_align = GET_MODE_SIZE (Pmode);
20923 break;
20924 case rep_prefix_8_byte:
20925 desired_align = 8;
20926 break;
20927 case rep_prefix_4_byte:
20928 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20929 copying whole cacheline at once. */
20930 if (TARGET_PENTIUMPRO)
20931 desired_align = 8;
20932 else
20933 desired_align = 4;
20934 break;
20935 case rep_prefix_1_byte:
20936 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20937 copying whole cacheline at once. */
20938 if (TARGET_PENTIUMPRO)
20939 desired_align = 8;
20940 else
20941 desired_align = 1;
20942 break;
20943 case loop_1_byte:
20944 desired_align = 1;
20945 break;
20946 case libcall:
20947 return 0;
20948 }
20949
20950 if (optimize_size)
20951 desired_align = 1;
20952 if (desired_align < align)
20953 desired_align = align;
20954 if (expected_size != -1 && expected_size < 4)
20955 desired_align = align;
20956 return desired_align;
20957 }
20958
20959 /* Return the smallest power of 2 greater than VAL. */
20960 static int
20961 smallest_pow2_greater_than (int val)
20962 {
20963 int ret = 1;
20964 while (ret <= val)
20965 ret <<= 1;
20966 return ret;
20967 }
20968
20969 /* Expand string move (memcpy) operation. Use i386 string operations
20970 when profitable. expand_setmem contains similar code. The code
20971 depends upon architecture, block size and alignment, but always has
20972 the same overall structure:
20973
20974 1) Prologue guard: Conditional that jumps up to epilogues for small
20975 blocks that can be handled by epilogue alone. This is faster
20976 but also needed for correctness, since prologue assume the block
20977 is larger than the desired alignment.
20978
20979 Optional dynamic check for size and libcall for large
20980 blocks is emitted here too, with -minline-stringops-dynamically.
20981
20982 2) Prologue: copy first few bytes in order to get destination
20983 aligned to DESIRED_ALIGN. It is emitted only when ALIGN is less
20984 than DESIRED_ALIGN and up to DESIRED_ALIGN - ALIGN bytes can be
20985 copied. We emit either a jump tree on power of two sized
20986 blocks, or a byte loop.
20987
20988 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
20989 with specified algorithm.
20990
20991 4) Epilogue: code copying tail of the block that is too small to be
20992 handled by main body (or up to size guarded by prologue guard). */
20993
20994 bool
20995 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
20996 rtx expected_align_exp, rtx expected_size_exp)
20997 {
20998 rtx destreg;
20999 rtx srcreg;
21000 rtx label = NULL;
21001 rtx tmp;
21002 rtx jump_around_label = NULL;
21003 HOST_WIDE_INT align = 1;
21004 unsigned HOST_WIDE_INT count = 0;
21005 HOST_WIDE_INT expected_size = -1;
21006 int size_needed = 0, epilogue_size_needed;
21007 int desired_align = 0, align_bytes = 0;
21008 enum stringop_alg alg;
21009 int dynamic_check;
21010 bool need_zero_guard = false;
21011
21012 if (CONST_INT_P (align_exp))
21013 align = INTVAL (align_exp);
21014 /* i386 can do misaligned access on reasonably increased cost. */
21015 if (CONST_INT_P (expected_align_exp)
21016 && INTVAL (expected_align_exp) > align)
21017 align = INTVAL (expected_align_exp);
21018 /* ALIGN is the minimum of destination and source alignment, but we care here
21019 just about destination alignment. */
21020 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
21021 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
21022
21023 if (CONST_INT_P (count_exp))
21024 count = expected_size = INTVAL (count_exp);
21025 if (CONST_INT_P (expected_size_exp) && count == 0)
21026 expected_size = INTVAL (expected_size_exp);
21027
21028 /* Make sure we don't need to care about overflow later on. */
21029 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21030 return false;
21031
21032 /* Step 0: Decide on preferred algorithm, desired alignment and
21033 size of chunks to be copied by main loop. */
21034
21035 alg = decide_alg (count, expected_size, false, &dynamic_check);
21036 desired_align = decide_alignment (align, alg, expected_size);
21037
21038 if (!TARGET_ALIGN_STRINGOPS)
21039 align = desired_align;
21040
21041 if (alg == libcall)
21042 return false;
21043 gcc_assert (alg != no_stringop);
21044 if (!count)
21045 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
21046 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21047 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
21048 switch (alg)
21049 {
21050 case libcall:
21051 case no_stringop:
21052 gcc_unreachable ();
21053 case loop:
21054 need_zero_guard = true;
21055 size_needed = GET_MODE_SIZE (Pmode);
21056 break;
21057 case unrolled_loop:
21058 need_zero_guard = true;
21059 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
21060 break;
21061 case rep_prefix_8_byte:
21062 size_needed = 8;
21063 break;
21064 case rep_prefix_4_byte:
21065 size_needed = 4;
21066 break;
21067 case rep_prefix_1_byte:
21068 size_needed = 1;
21069 break;
21070 case loop_1_byte:
21071 need_zero_guard = true;
21072 size_needed = 1;
21073 break;
21074 }
21075
21076 epilogue_size_needed = size_needed;
21077
21078 /* Step 1: Prologue guard. */
21079
21080 /* Alignment code needs count to be in register. */
21081 if (CONST_INT_P (count_exp) && desired_align > align)
21082 {
21083 if (INTVAL (count_exp) > desired_align
21084 && INTVAL (count_exp) > size_needed)
21085 {
21086 align_bytes
21087 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21088 if (align_bytes <= 0)
21089 align_bytes = 0;
21090 else
21091 align_bytes = desired_align - align_bytes;
21092 }
21093 if (align_bytes == 0)
21094 count_exp = force_reg (counter_mode (count_exp), count_exp);
21095 }
21096 gcc_assert (desired_align >= 1 && align >= 1);
21097
21098 /* Ensure that alignment prologue won't copy past end of block. */
21099 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21100 {
21101 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21102 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
21103 Make sure it is power of 2. */
21104 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21105
21106 if (count)
21107 {
21108 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21109 {
21110 /* If main algorithm works on QImode, no epilogue is needed.
21111 For small sizes just don't align anything. */
21112 if (size_needed == 1)
21113 desired_align = align;
21114 else
21115 goto epilogue;
21116 }
21117 }
21118 else
21119 {
21120 label = gen_label_rtx ();
21121 emit_cmp_and_jump_insns (count_exp,
21122 GEN_INT (epilogue_size_needed),
21123 LTU, 0, counter_mode (count_exp), 1, label);
21124 if (expected_size == -1 || expected_size < epilogue_size_needed)
21125 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21126 else
21127 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21128 }
21129 }
21130
21131 /* Emit code to decide on runtime whether library call or inline should be
21132 used. */
21133 if (dynamic_check != -1)
21134 {
21135 if (CONST_INT_P (count_exp))
21136 {
21137 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
21138 {
21139 emit_block_move_via_libcall (dst, src, count_exp, false);
21140 count_exp = const0_rtx;
21141 goto epilogue;
21142 }
21143 }
21144 else
21145 {
21146 rtx hot_label = gen_label_rtx ();
21147 jump_around_label = gen_label_rtx ();
21148 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21149 LEU, 0, GET_MODE (count_exp), 1, hot_label);
21150 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21151 emit_block_move_via_libcall (dst, src, count_exp, false);
21152 emit_jump (jump_around_label);
21153 emit_label (hot_label);
21154 }
21155 }
21156
21157 /* Step 2: Alignment prologue. */
21158
21159 if (desired_align > align)
21160 {
21161 if (align_bytes == 0)
21162 {
21163 /* Except for the first move in epilogue, we no longer know
21164 constant offset in aliasing info. It don't seems to worth
21165 the pain to maintain it for the first move, so throw away
21166 the info early. */
21167 src = change_address (src, BLKmode, srcreg);
21168 dst = change_address (dst, BLKmode, destreg);
21169 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
21170 desired_align);
21171 }
21172 else
21173 {
21174 /* If we know how many bytes need to be stored before dst is
21175 sufficiently aligned, maintain aliasing info accurately. */
21176 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
21177 desired_align, align_bytes);
21178 count_exp = plus_constant (count_exp, -align_bytes);
21179 count -= align_bytes;
21180 }
21181 if (need_zero_guard
21182 && (count < (unsigned HOST_WIDE_INT) size_needed
21183 || (align_bytes == 0
21184 && count < ((unsigned HOST_WIDE_INT) size_needed
21185 + desired_align - align))))
21186 {
21187 /* It is possible that we copied enough so the main loop will not
21188 execute. */
21189 gcc_assert (size_needed > 1);
21190 if (label == NULL_RTX)
21191 label = gen_label_rtx ();
21192 emit_cmp_and_jump_insns (count_exp,
21193 GEN_INT (size_needed),
21194 LTU, 0, counter_mode (count_exp), 1, label);
21195 if (expected_size == -1
21196 || expected_size < (desired_align - align) / 2 + size_needed)
21197 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21198 else
21199 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21200 }
21201 }
21202 if (label && size_needed == 1)
21203 {
21204 emit_label (label);
21205 LABEL_NUSES (label) = 1;
21206 label = NULL;
21207 epilogue_size_needed = 1;
21208 }
21209 else if (label == NULL_RTX)
21210 epilogue_size_needed = size_needed;
21211
21212 /* Step 3: Main loop. */
21213
21214 switch (alg)
21215 {
21216 case libcall:
21217 case no_stringop:
21218 gcc_unreachable ();
21219 case loop_1_byte:
21220 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21221 count_exp, QImode, 1, expected_size);
21222 break;
21223 case loop:
21224 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21225 count_exp, Pmode, 1, expected_size);
21226 break;
21227 case unrolled_loop:
21228 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
21229 registers for 4 temporaries anyway. */
21230 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21231 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
21232 expected_size);
21233 break;
21234 case rep_prefix_8_byte:
21235 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21236 DImode);
21237 break;
21238 case rep_prefix_4_byte:
21239 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21240 SImode);
21241 break;
21242 case rep_prefix_1_byte:
21243 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21244 QImode);
21245 break;
21246 }
21247 /* Adjust properly the offset of src and dest memory for aliasing. */
21248 if (CONST_INT_P (count_exp))
21249 {
21250 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
21251 (count / size_needed) * size_needed);
21252 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21253 (count / size_needed) * size_needed);
21254 }
21255 else
21256 {
21257 src = change_address (src, BLKmode, srcreg);
21258 dst = change_address (dst, BLKmode, destreg);
21259 }
21260
21261 /* Step 4: Epilogue to copy the remaining bytes. */
21262 epilogue:
21263 if (label)
21264 {
21265 /* When the main loop is done, COUNT_EXP might hold original count,
21266 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21267 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21268 bytes. Compensate if needed. */
21269
21270 if (size_needed < epilogue_size_needed)
21271 {
21272 tmp =
21273 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21274 GEN_INT (size_needed - 1), count_exp, 1,
21275 OPTAB_DIRECT);
21276 if (tmp != count_exp)
21277 emit_move_insn (count_exp, tmp);
21278 }
21279 emit_label (label);
21280 LABEL_NUSES (label) = 1;
21281 }
21282
21283 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21284 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
21285 epilogue_size_needed);
21286 if (jump_around_label)
21287 emit_label (jump_around_label);
21288 return true;
21289 }
21290
21291 /* Helper function for memcpy. For QImode value 0xXY produce
21292 0xXYXYXYXY of wide specified by MODE. This is essentially
21293 a * 0x10101010, but we can do slightly better than
21294 synth_mult by unwinding the sequence by hand on CPUs with
21295 slow multiply. */
21296 static rtx
21297 promote_duplicated_reg (enum machine_mode mode, rtx val)
21298 {
21299 enum machine_mode valmode = GET_MODE (val);
21300 rtx tmp;
21301 int nops = mode == DImode ? 3 : 2;
21302
21303 gcc_assert (mode == SImode || mode == DImode);
21304 if (val == const0_rtx)
21305 return copy_to_mode_reg (mode, const0_rtx);
21306 if (CONST_INT_P (val))
21307 {
21308 HOST_WIDE_INT v = INTVAL (val) & 255;
21309
21310 v |= v << 8;
21311 v |= v << 16;
21312 if (mode == DImode)
21313 v |= (v << 16) << 16;
21314 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
21315 }
21316
21317 if (valmode == VOIDmode)
21318 valmode = QImode;
21319 if (valmode != QImode)
21320 val = gen_lowpart (QImode, val);
21321 if (mode == QImode)
21322 return val;
21323 if (!TARGET_PARTIAL_REG_STALL)
21324 nops--;
21325 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
21326 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
21327 <= (ix86_cost->shift_const + ix86_cost->add) * nops
21328 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
21329 {
21330 rtx reg = convert_modes (mode, QImode, val, true);
21331 tmp = promote_duplicated_reg (mode, const1_rtx);
21332 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
21333 OPTAB_DIRECT);
21334 }
21335 else
21336 {
21337 rtx reg = convert_modes (mode, QImode, val, true);
21338
21339 if (!TARGET_PARTIAL_REG_STALL)
21340 if (mode == SImode)
21341 emit_insn (gen_movsi_insv_1 (reg, reg));
21342 else
21343 emit_insn (gen_movdi_insv_1 (reg, reg));
21344 else
21345 {
21346 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
21347 NULL, 1, OPTAB_DIRECT);
21348 reg =
21349 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21350 }
21351 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
21352 NULL, 1, OPTAB_DIRECT);
21353 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21354 if (mode == SImode)
21355 return reg;
21356 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
21357 NULL, 1, OPTAB_DIRECT);
21358 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21359 return reg;
21360 }
21361 }
21362
21363 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
21364 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
21365 alignment from ALIGN to DESIRED_ALIGN. */
21366 static rtx
21367 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
21368 {
21369 rtx promoted_val;
21370
21371 if (TARGET_64BIT
21372 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
21373 promoted_val = promote_duplicated_reg (DImode, val);
21374 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
21375 promoted_val = promote_duplicated_reg (SImode, val);
21376 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
21377 promoted_val = promote_duplicated_reg (HImode, val);
21378 else
21379 promoted_val = val;
21380
21381 return promoted_val;
21382 }
21383
21384 /* Expand string clear operation (bzero). Use i386 string operations when
21385 profitable. See expand_movmem comment for explanation of individual
21386 steps performed. */
21387 bool
21388 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
21389 rtx expected_align_exp, rtx expected_size_exp)
21390 {
21391 rtx destreg;
21392 rtx label = NULL;
21393 rtx tmp;
21394 rtx jump_around_label = NULL;
21395 HOST_WIDE_INT align = 1;
21396 unsigned HOST_WIDE_INT count = 0;
21397 HOST_WIDE_INT expected_size = -1;
21398 int size_needed = 0, epilogue_size_needed;
21399 int desired_align = 0, align_bytes = 0;
21400 enum stringop_alg alg;
21401 rtx promoted_val = NULL;
21402 bool force_loopy_epilogue = false;
21403 int dynamic_check;
21404 bool need_zero_guard = false;
21405
21406 if (CONST_INT_P (align_exp))
21407 align = INTVAL (align_exp);
21408 /* i386 can do misaligned access on reasonably increased cost. */
21409 if (CONST_INT_P (expected_align_exp)
21410 && INTVAL (expected_align_exp) > align)
21411 align = INTVAL (expected_align_exp);
21412 if (CONST_INT_P (count_exp))
21413 count = expected_size = INTVAL (count_exp);
21414 if (CONST_INT_P (expected_size_exp) && count == 0)
21415 expected_size = INTVAL (expected_size_exp);
21416
21417 /* Make sure we don't need to care about overflow later on. */
21418 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21419 return false;
21420
21421 /* Step 0: Decide on preferred algorithm, desired alignment and
21422 size of chunks to be copied by main loop. */
21423
21424 alg = decide_alg (count, expected_size, true, &dynamic_check);
21425 desired_align = decide_alignment (align, alg, expected_size);
21426
21427 if (!TARGET_ALIGN_STRINGOPS)
21428 align = desired_align;
21429
21430 if (alg == libcall)
21431 return false;
21432 gcc_assert (alg != no_stringop);
21433 if (!count)
21434 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
21435 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21436 switch (alg)
21437 {
21438 case libcall:
21439 case no_stringop:
21440 gcc_unreachable ();
21441 case loop:
21442 need_zero_guard = true;
21443 size_needed = GET_MODE_SIZE (Pmode);
21444 break;
21445 case unrolled_loop:
21446 need_zero_guard = true;
21447 size_needed = GET_MODE_SIZE (Pmode) * 4;
21448 break;
21449 case rep_prefix_8_byte:
21450 size_needed = 8;
21451 break;
21452 case rep_prefix_4_byte:
21453 size_needed = 4;
21454 break;
21455 case rep_prefix_1_byte:
21456 size_needed = 1;
21457 break;
21458 case loop_1_byte:
21459 need_zero_guard = true;
21460 size_needed = 1;
21461 break;
21462 }
21463 epilogue_size_needed = size_needed;
21464
21465 /* Step 1: Prologue guard. */
21466
21467 /* Alignment code needs count to be in register. */
21468 if (CONST_INT_P (count_exp) && desired_align > align)
21469 {
21470 if (INTVAL (count_exp) > desired_align
21471 && INTVAL (count_exp) > size_needed)
21472 {
21473 align_bytes
21474 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21475 if (align_bytes <= 0)
21476 align_bytes = 0;
21477 else
21478 align_bytes = desired_align - align_bytes;
21479 }
21480 if (align_bytes == 0)
21481 {
21482 enum machine_mode mode = SImode;
21483 if (TARGET_64BIT && (count & ~0xffffffff))
21484 mode = DImode;
21485 count_exp = force_reg (mode, count_exp);
21486 }
21487 }
21488 /* Do the cheap promotion to allow better CSE across the
21489 main loop and epilogue (ie one load of the big constant in the
21490 front of all code. */
21491 if (CONST_INT_P (val_exp))
21492 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21493 desired_align, align);
21494 /* Ensure that alignment prologue won't copy past end of block. */
21495 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21496 {
21497 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21498 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
21499 Make sure it is power of 2. */
21500 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21501
21502 /* To improve performance of small blocks, we jump around the VAL
21503 promoting mode. This mean that if the promoted VAL is not constant,
21504 we might not use it in the epilogue and have to use byte
21505 loop variant. */
21506 if (epilogue_size_needed > 2 && !promoted_val)
21507 force_loopy_epilogue = true;
21508 if (count)
21509 {
21510 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21511 {
21512 /* If main algorithm works on QImode, no epilogue is needed.
21513 For small sizes just don't align anything. */
21514 if (size_needed == 1)
21515 desired_align = align;
21516 else
21517 goto epilogue;
21518 }
21519 }
21520 else
21521 {
21522 label = gen_label_rtx ();
21523 emit_cmp_and_jump_insns (count_exp,
21524 GEN_INT (epilogue_size_needed),
21525 LTU, 0, counter_mode (count_exp), 1, label);
21526 if (expected_size == -1 || expected_size <= epilogue_size_needed)
21527 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21528 else
21529 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21530 }
21531 }
21532 if (dynamic_check != -1)
21533 {
21534 rtx hot_label = gen_label_rtx ();
21535 jump_around_label = gen_label_rtx ();
21536 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21537 LEU, 0, counter_mode (count_exp), 1, hot_label);
21538 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21539 set_storage_via_libcall (dst, count_exp, val_exp, false);
21540 emit_jump (jump_around_label);
21541 emit_label (hot_label);
21542 }
21543
21544 /* Step 2: Alignment prologue. */
21545
21546 /* Do the expensive promotion once we branched off the small blocks. */
21547 if (!promoted_val)
21548 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21549 desired_align, align);
21550 gcc_assert (desired_align >= 1 && align >= 1);
21551
21552 if (desired_align > align)
21553 {
21554 if (align_bytes == 0)
21555 {
21556 /* Except for the first move in epilogue, we no longer know
21557 constant offset in aliasing info. It don't seems to worth
21558 the pain to maintain it for the first move, so throw away
21559 the info early. */
21560 dst = change_address (dst, BLKmode, destreg);
21561 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
21562 desired_align);
21563 }
21564 else
21565 {
21566 /* If we know how many bytes need to be stored before dst is
21567 sufficiently aligned, maintain aliasing info accurately. */
21568 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
21569 desired_align, align_bytes);
21570 count_exp = plus_constant (count_exp, -align_bytes);
21571 count -= align_bytes;
21572 }
21573 if (need_zero_guard
21574 && (count < (unsigned HOST_WIDE_INT) size_needed
21575 || (align_bytes == 0
21576 && count < ((unsigned HOST_WIDE_INT) size_needed
21577 + desired_align - align))))
21578 {
21579 /* It is possible that we copied enough so the main loop will not
21580 execute. */
21581 gcc_assert (size_needed > 1);
21582 if (label == NULL_RTX)
21583 label = gen_label_rtx ();
21584 emit_cmp_and_jump_insns (count_exp,
21585 GEN_INT (size_needed),
21586 LTU, 0, counter_mode (count_exp), 1, label);
21587 if (expected_size == -1
21588 || expected_size < (desired_align - align) / 2 + size_needed)
21589 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21590 else
21591 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21592 }
21593 }
21594 if (label && size_needed == 1)
21595 {
21596 emit_label (label);
21597 LABEL_NUSES (label) = 1;
21598 label = NULL;
21599 promoted_val = val_exp;
21600 epilogue_size_needed = 1;
21601 }
21602 else if (label == NULL_RTX)
21603 epilogue_size_needed = size_needed;
21604
21605 /* Step 3: Main loop. */
21606
21607 switch (alg)
21608 {
21609 case libcall:
21610 case no_stringop:
21611 gcc_unreachable ();
21612 case loop_1_byte:
21613 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21614 count_exp, QImode, 1, expected_size);
21615 break;
21616 case loop:
21617 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21618 count_exp, Pmode, 1, expected_size);
21619 break;
21620 case unrolled_loop:
21621 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21622 count_exp, Pmode, 4, expected_size);
21623 break;
21624 case rep_prefix_8_byte:
21625 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21626 DImode, val_exp);
21627 break;
21628 case rep_prefix_4_byte:
21629 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21630 SImode, val_exp);
21631 break;
21632 case rep_prefix_1_byte:
21633 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21634 QImode, val_exp);
21635 break;
21636 }
21637 /* Adjust properly the offset of src and dest memory for aliasing. */
21638 if (CONST_INT_P (count_exp))
21639 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21640 (count / size_needed) * size_needed);
21641 else
21642 dst = change_address (dst, BLKmode, destreg);
21643
21644 /* Step 4: Epilogue to copy the remaining bytes. */
21645
21646 if (label)
21647 {
21648 /* When the main loop is done, COUNT_EXP might hold original count,
21649 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21650 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21651 bytes. Compensate if needed. */
21652
21653 if (size_needed < epilogue_size_needed)
21654 {
21655 tmp =
21656 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21657 GEN_INT (size_needed - 1), count_exp, 1,
21658 OPTAB_DIRECT);
21659 if (tmp != count_exp)
21660 emit_move_insn (count_exp, tmp);
21661 }
21662 emit_label (label);
21663 LABEL_NUSES (label) = 1;
21664 }
21665 epilogue:
21666 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21667 {
21668 if (force_loopy_epilogue)
21669 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
21670 epilogue_size_needed);
21671 else
21672 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
21673 epilogue_size_needed);
21674 }
21675 if (jump_around_label)
21676 emit_label (jump_around_label);
21677 return true;
21678 }
21679
21680 /* Expand the appropriate insns for doing strlen if not just doing
21681 repnz; scasb
21682
21683 out = result, initialized with the start address
21684 align_rtx = alignment of the address.
21685 scratch = scratch register, initialized with the startaddress when
21686 not aligned, otherwise undefined
21687
21688 This is just the body. It needs the initializations mentioned above and
21689 some address computing at the end. These things are done in i386.md. */
21690
21691 static void
21692 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
21693 {
21694 int align;
21695 rtx tmp;
21696 rtx align_2_label = NULL_RTX;
21697 rtx align_3_label = NULL_RTX;
21698 rtx align_4_label = gen_label_rtx ();
21699 rtx end_0_label = gen_label_rtx ();
21700 rtx mem;
21701 rtx tmpreg = gen_reg_rtx (SImode);
21702 rtx scratch = gen_reg_rtx (SImode);
21703 rtx cmp;
21704
21705 align = 0;
21706 if (CONST_INT_P (align_rtx))
21707 align = INTVAL (align_rtx);
21708
21709 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
21710
21711 /* Is there a known alignment and is it less than 4? */
21712 if (align < 4)
21713 {
21714 rtx scratch1 = gen_reg_rtx (Pmode);
21715 emit_move_insn (scratch1, out);
21716 /* Is there a known alignment and is it not 2? */
21717 if (align != 2)
21718 {
21719 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
21720 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
21721
21722 /* Leave just the 3 lower bits. */
21723 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
21724 NULL_RTX, 0, OPTAB_WIDEN);
21725
21726 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21727 Pmode, 1, align_4_label);
21728 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
21729 Pmode, 1, align_2_label);
21730 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
21731 Pmode, 1, align_3_label);
21732 }
21733 else
21734 {
21735 /* Since the alignment is 2, we have to check 2 or 0 bytes;
21736 check if is aligned to 4 - byte. */
21737
21738 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
21739 NULL_RTX, 0, OPTAB_WIDEN);
21740
21741 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21742 Pmode, 1, align_4_label);
21743 }
21744
21745 mem = change_address (src, QImode, out);
21746
21747 /* Now compare the bytes. */
21748
21749 /* Compare the first n unaligned byte on a byte per byte basis. */
21750 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
21751 QImode, 1, end_0_label);
21752
21753 /* Increment the address. */
21754 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21755
21756 /* Not needed with an alignment of 2 */
21757 if (align != 2)
21758 {
21759 emit_label (align_2_label);
21760
21761 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21762 end_0_label);
21763
21764 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21765
21766 emit_label (align_3_label);
21767 }
21768
21769 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21770 end_0_label);
21771
21772 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21773 }
21774
21775 /* Generate loop to check 4 bytes at a time. It is not a good idea to
21776 align this loop. It gives only huge programs, but does not help to
21777 speed up. */
21778 emit_label (align_4_label);
21779
21780 mem = change_address (src, SImode, out);
21781 emit_move_insn (scratch, mem);
21782 emit_insn (ix86_gen_add3 (out, out, GEN_INT (4)));
21783
21784 /* This formula yields a nonzero result iff one of the bytes is zero.
21785 This saves three branches inside loop and many cycles. */
21786
21787 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
21788 emit_insn (gen_one_cmplsi2 (scratch, scratch));
21789 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
21790 emit_insn (gen_andsi3 (tmpreg, tmpreg,
21791 gen_int_mode (0x80808080, SImode)));
21792 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
21793 align_4_label);
21794
21795 if (TARGET_CMOVE)
21796 {
21797 rtx reg = gen_reg_rtx (SImode);
21798 rtx reg2 = gen_reg_rtx (Pmode);
21799 emit_move_insn (reg, tmpreg);
21800 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
21801
21802 /* If zero is not in the first two bytes, move two bytes forward. */
21803 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21804 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21805 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21806 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
21807 gen_rtx_IF_THEN_ELSE (SImode, tmp,
21808 reg,
21809 tmpreg)));
21810 /* Emit lea manually to avoid clobbering of flags. */
21811 emit_insn (gen_rtx_SET (SImode, reg2,
21812 gen_rtx_PLUS (Pmode, out, const2_rtx)));
21813
21814 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21815 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21816 emit_insn (gen_rtx_SET (VOIDmode, out,
21817 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
21818 reg2,
21819 out)));
21820 }
21821 else
21822 {
21823 rtx end_2_label = gen_label_rtx ();
21824 /* Is zero in the first two bytes? */
21825
21826 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21827 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21828 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
21829 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
21830 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
21831 pc_rtx);
21832 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
21833 JUMP_LABEL (tmp) = end_2_label;
21834
21835 /* Not in the first two. Move two bytes forward. */
21836 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
21837 emit_insn (ix86_gen_add3 (out, out, const2_rtx));
21838
21839 emit_label (end_2_label);
21840
21841 }
21842
21843 /* Avoid branch in fixing the byte. */
21844 tmpreg = gen_lowpart (QImode, tmpreg);
21845 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
21846 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
21847 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
21848 emit_insn (ix86_gen_sub3_carry (out, out, GEN_INT (3), tmp, cmp));
21849
21850 emit_label (end_0_label);
21851 }
21852
21853 /* Expand strlen. */
21854
21855 bool
21856 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
21857 {
21858 rtx addr, scratch1, scratch2, scratch3, scratch4;
21859
21860 /* The generic case of strlen expander is long. Avoid it's
21861 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
21862
21863 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21864 && !TARGET_INLINE_ALL_STRINGOPS
21865 && !optimize_insn_for_size_p ()
21866 && (!CONST_INT_P (align) || INTVAL (align) < 4))
21867 return false;
21868
21869 addr = force_reg (Pmode, XEXP (src, 0));
21870 scratch1 = gen_reg_rtx (Pmode);
21871
21872 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21873 && !optimize_insn_for_size_p ())
21874 {
21875 /* Well it seems that some optimizer does not combine a call like
21876 foo(strlen(bar), strlen(bar));
21877 when the move and the subtraction is done here. It does calculate
21878 the length just once when these instructions are done inside of
21879 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
21880 often used and I use one fewer register for the lifetime of
21881 output_strlen_unroll() this is better. */
21882
21883 emit_move_insn (out, addr);
21884
21885 ix86_expand_strlensi_unroll_1 (out, src, align);
21886
21887 /* strlensi_unroll_1 returns the address of the zero at the end of
21888 the string, like memchr(), so compute the length by subtracting
21889 the start address. */
21890 emit_insn (ix86_gen_sub3 (out, out, addr));
21891 }
21892 else
21893 {
21894 rtx unspec;
21895
21896 /* Can't use this if the user has appropriated eax, ecx, or edi. */
21897 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
21898 return false;
21899
21900 scratch2 = gen_reg_rtx (Pmode);
21901 scratch3 = gen_reg_rtx (Pmode);
21902 scratch4 = force_reg (Pmode, constm1_rtx);
21903
21904 emit_move_insn (scratch3, addr);
21905 eoschar = force_reg (QImode, eoschar);
21906
21907 src = replace_equiv_address_nv (src, scratch3);
21908
21909 /* If .md starts supporting :P, this can be done in .md. */
21910 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
21911 scratch4), UNSPEC_SCAS);
21912 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
21913 emit_insn (ix86_gen_one_cmpl2 (scratch2, scratch1));
21914 emit_insn (ix86_gen_add3 (out, scratch2, constm1_rtx));
21915 }
21916 return true;
21917 }
21918
21919 /* For given symbol (function) construct code to compute address of it's PLT
21920 entry in large x86-64 PIC model. */
21921 rtx
21922 construct_plt_address (rtx symbol)
21923 {
21924 rtx tmp = gen_reg_rtx (Pmode);
21925 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
21926
21927 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
21928 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
21929
21930 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
21931 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
21932 return tmp;
21933 }
21934
21935 rtx
21936 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
21937 rtx callarg2,
21938 rtx pop, int sibcall)
21939 {
21940 rtx use = NULL, call;
21941
21942 if (pop == const0_rtx)
21943 pop = NULL;
21944 gcc_assert (!TARGET_64BIT || !pop);
21945
21946 if (TARGET_MACHO && !TARGET_64BIT)
21947 {
21948 #if TARGET_MACHO
21949 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
21950 fnaddr = machopic_indirect_call_target (fnaddr);
21951 #endif
21952 }
21953 else
21954 {
21955 /* Static functions and indirect calls don't need the pic register. */
21956 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
21957 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21958 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
21959 use_reg (&use, pic_offset_table_rtx);
21960 }
21961
21962 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
21963 {
21964 rtx al = gen_rtx_REG (QImode, AX_REG);
21965 emit_move_insn (al, callarg2);
21966 use_reg (&use, al);
21967 }
21968
21969 if (ix86_cmodel == CM_LARGE_PIC
21970 && MEM_P (fnaddr)
21971 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21972 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
21973 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
21974 else if (sibcall
21975 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
21976 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
21977 {
21978 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
21979 fnaddr = gen_rtx_MEM (QImode, fnaddr);
21980 }
21981
21982 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
21983 if (retval)
21984 call = gen_rtx_SET (VOIDmode, retval, call);
21985 if (pop)
21986 {
21987 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
21988 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
21989 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
21990 }
21991 if (TARGET_64BIT_MS_ABI
21992 && (!callarg2 || INTVAL (callarg2) != -2))
21993 {
21994 /* We need to represent that SI and DI registers are clobbered
21995 by SYSV calls. */
21996 static int clobbered_registers[] = {
21997 XMM6_REG, XMM7_REG, XMM8_REG,
21998 XMM9_REG, XMM10_REG, XMM11_REG,
21999 XMM12_REG, XMM13_REG, XMM14_REG,
22000 XMM15_REG, SI_REG, DI_REG
22001 };
22002 unsigned int i;
22003 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
22004 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
22005 UNSPEC_MS_TO_SYSV_CALL);
22006
22007 vec[0] = call;
22008 vec[1] = unspec;
22009 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
22010 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
22011 ? TImode : DImode,
22012 gen_rtx_REG
22013 (SSE_REGNO_P (clobbered_registers[i])
22014 ? TImode : DImode,
22015 clobbered_registers[i]));
22016
22017 call = gen_rtx_PARALLEL (VOIDmode,
22018 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
22019 + 2, vec));
22020 }
22021
22022 /* Add UNSPEC_CALL_NEEDS_VZEROUPPER decoration. */
22023 if (TARGET_VZEROUPPER)
22024 {
22025 rtx unspec;
22026 int avx256;
22027
22028 if (cfun->machine->callee_pass_avx256_p)
22029 {
22030 if (cfun->machine->callee_return_avx256_p)
22031 avx256 = callee_return_pass_avx256;
22032 else
22033 avx256 = callee_pass_avx256;
22034 }
22035 else if (cfun->machine->callee_return_avx256_p)
22036 avx256 = callee_return_avx256;
22037 else
22038 avx256 = call_no_avx256;
22039
22040 if (reload_completed)
22041 emit_insn (gen_avx_vzeroupper (GEN_INT (avx256)));
22042 else
22043 {
22044 unspec = gen_rtx_UNSPEC (VOIDmode,
22045 gen_rtvec (1, GEN_INT (avx256)),
22046 UNSPEC_CALL_NEEDS_VZEROUPPER);
22047 call = gen_rtx_PARALLEL (VOIDmode,
22048 gen_rtvec (2, call, unspec));
22049 }
22050 }
22051
22052 call = emit_call_insn (call);
22053 if (use)
22054 CALL_INSN_FUNCTION_USAGE (call) = use;
22055
22056 return call;
22057 }
22058
22059 void
22060 ix86_split_call_vzeroupper (rtx insn, rtx vzeroupper)
22061 {
22062 rtx call = XVECEXP (PATTERN (insn), 0, 0);
22063 emit_insn (gen_avx_vzeroupper (vzeroupper));
22064 emit_call_insn (call);
22065 }
22066
22067 /* Output the assembly for a call instruction. */
22068
22069 const char *
22070 ix86_output_call_insn (rtx insn, rtx call_op, int addr_op)
22071 {
22072 bool direct_p = constant_call_address_operand (call_op, Pmode);
22073 bool seh_nop_p = false;
22074
22075 gcc_assert (addr_op == 0 || addr_op == 1);
22076
22077 if (SIBLING_CALL_P (insn))
22078 {
22079 if (direct_p)
22080 return addr_op ? "jmp\t%P1" : "jmp\t%P0";
22081 /* SEH epilogue detection requires the indirect branch case
22082 to include REX.W. */
22083 else if (TARGET_SEH)
22084 return addr_op ? "rex.W jmp %A1" : "rex.W jmp %A0";
22085 else
22086 return addr_op ? "jmp\t%A1" : "jmp\t%A0";
22087 }
22088
22089 /* SEH unwinding can require an extra nop to be emitted in several
22090 circumstances. Determine if we have one of those. */
22091 if (TARGET_SEH)
22092 {
22093 rtx i;
22094
22095 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
22096 {
22097 /* If we get to another real insn, we don't need the nop. */
22098 if (INSN_P (i))
22099 break;
22100
22101 /* If we get to the epilogue note, prevent a catch region from
22102 being adjacent to the standard epilogue sequence. If non-
22103 call-exceptions, we'll have done this during epilogue emission. */
22104 if (NOTE_P (i) && NOTE_KIND (i) == NOTE_INSN_EPILOGUE_BEG
22105 && !flag_non_call_exceptions
22106 && !can_throw_internal (insn))
22107 {
22108 seh_nop_p = true;
22109 break;
22110 }
22111 }
22112
22113 /* If we didn't find a real insn following the call, prevent the
22114 unwinder from looking into the next function. */
22115 if (i == NULL)
22116 seh_nop_p = true;
22117 }
22118
22119 if (direct_p)
22120 {
22121 if (seh_nop_p)
22122 return addr_op ? "call\t%P1\n\tnop" : "call\t%P0\n\tnop";
22123 else
22124 return addr_op ? "call\t%P1" : "call\t%P0";
22125 }
22126 else
22127 {
22128 if (seh_nop_p)
22129 return addr_op ? "call\t%A1\n\tnop" : "call\t%A0\n\tnop";
22130 else
22131 return addr_op ? "call\t%A1" : "call\t%A0";
22132 }
22133 }
22134 \f
22135 /* Clear stack slot assignments remembered from previous functions.
22136 This is called from INIT_EXPANDERS once before RTL is emitted for each
22137 function. */
22138
22139 static struct machine_function *
22140 ix86_init_machine_status (void)
22141 {
22142 struct machine_function *f;
22143
22144 f = ggc_alloc_cleared_machine_function ();
22145 f->use_fast_prologue_epilogue_nregs = -1;
22146 f->tls_descriptor_call_expanded_p = 0;
22147 f->call_abi = ix86_abi;
22148
22149 return f;
22150 }
22151
22152 /* Return a MEM corresponding to a stack slot with mode MODE.
22153 Allocate a new slot if necessary.
22154
22155 The RTL for a function can have several slots available: N is
22156 which slot to use. */
22157
22158 rtx
22159 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
22160 {
22161 struct stack_local_entry *s;
22162
22163 gcc_assert (n < MAX_386_STACK_LOCALS);
22164
22165 /* Virtual slot is valid only before vregs are instantiated. */
22166 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
22167
22168 for (s = ix86_stack_locals; s; s = s->next)
22169 if (s->mode == mode && s->n == n)
22170 return copy_rtx (s->rtl);
22171
22172 s = ggc_alloc_stack_local_entry ();
22173 s->n = n;
22174 s->mode = mode;
22175 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
22176
22177 s->next = ix86_stack_locals;
22178 ix86_stack_locals = s;
22179 return s->rtl;
22180 }
22181
22182 /* Construct the SYMBOL_REF for the tls_get_addr function. */
22183
22184 static GTY(()) rtx ix86_tls_symbol;
22185 rtx
22186 ix86_tls_get_addr (void)
22187 {
22188
22189 if (!ix86_tls_symbol)
22190 {
22191 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
22192 (TARGET_ANY_GNU_TLS
22193 && !TARGET_64BIT)
22194 ? "___tls_get_addr"
22195 : "__tls_get_addr");
22196 }
22197
22198 return ix86_tls_symbol;
22199 }
22200
22201 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
22202
22203 static GTY(()) rtx ix86_tls_module_base_symbol;
22204 rtx
22205 ix86_tls_module_base (void)
22206 {
22207
22208 if (!ix86_tls_module_base_symbol)
22209 {
22210 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
22211 "_TLS_MODULE_BASE_");
22212 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
22213 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
22214 }
22215
22216 return ix86_tls_module_base_symbol;
22217 }
22218 \f
22219 /* Calculate the length of the memory address in the instruction
22220 encoding. Does not include the one-byte modrm, opcode, or prefix. */
22221
22222 int
22223 memory_address_length (rtx addr)
22224 {
22225 struct ix86_address parts;
22226 rtx base, index, disp;
22227 int len;
22228 int ok;
22229
22230 if (GET_CODE (addr) == PRE_DEC
22231 || GET_CODE (addr) == POST_INC
22232 || GET_CODE (addr) == PRE_MODIFY
22233 || GET_CODE (addr) == POST_MODIFY)
22234 return 0;
22235
22236 ok = ix86_decompose_address (addr, &parts);
22237 gcc_assert (ok);
22238
22239 if (parts.base && GET_CODE (parts.base) == SUBREG)
22240 parts.base = SUBREG_REG (parts.base);
22241 if (parts.index && GET_CODE (parts.index) == SUBREG)
22242 parts.index = SUBREG_REG (parts.index);
22243
22244 base = parts.base;
22245 index = parts.index;
22246 disp = parts.disp;
22247 len = 0;
22248
22249 /* Rule of thumb:
22250 - esp as the base always wants an index,
22251 - ebp as the base always wants a displacement,
22252 - r12 as the base always wants an index,
22253 - r13 as the base always wants a displacement. */
22254
22255 /* Register Indirect. */
22256 if (base && !index && !disp)
22257 {
22258 /* esp (for its index) and ebp (for its displacement) need
22259 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
22260 code. */
22261 if (REG_P (addr)
22262 && (addr == arg_pointer_rtx
22263 || addr == frame_pointer_rtx
22264 || REGNO (addr) == SP_REG
22265 || REGNO (addr) == BP_REG
22266 || REGNO (addr) == R12_REG
22267 || REGNO (addr) == R13_REG))
22268 len = 1;
22269 }
22270
22271 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
22272 is not disp32, but disp32(%rip), so for disp32
22273 SIB byte is needed, unless print_operand_address
22274 optimizes it into disp32(%rip) or (%rip) is implied
22275 by UNSPEC. */
22276 else if (disp && !base && !index)
22277 {
22278 len = 4;
22279 if (TARGET_64BIT)
22280 {
22281 rtx symbol = disp;
22282
22283 if (GET_CODE (disp) == CONST)
22284 symbol = XEXP (disp, 0);
22285 if (GET_CODE (symbol) == PLUS
22286 && CONST_INT_P (XEXP (symbol, 1)))
22287 symbol = XEXP (symbol, 0);
22288
22289 if (GET_CODE (symbol) != LABEL_REF
22290 && (GET_CODE (symbol) != SYMBOL_REF
22291 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
22292 && (GET_CODE (symbol) != UNSPEC
22293 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
22294 && XINT (symbol, 1) != UNSPEC_PCREL
22295 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
22296 len += 1;
22297 }
22298 }
22299
22300 else
22301 {
22302 /* Find the length of the displacement constant. */
22303 if (disp)
22304 {
22305 if (base && satisfies_constraint_K (disp))
22306 len = 1;
22307 else
22308 len = 4;
22309 }
22310 /* ebp always wants a displacement. Similarly r13. */
22311 else if (base && REG_P (base)
22312 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
22313 len = 1;
22314
22315 /* An index requires the two-byte modrm form.... */
22316 if (index
22317 /* ...like esp (or r12), which always wants an index. */
22318 || base == arg_pointer_rtx
22319 || base == frame_pointer_rtx
22320 || (base && REG_P (base)
22321 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
22322 len += 1;
22323 }
22324
22325 switch (parts.seg)
22326 {
22327 case SEG_FS:
22328 case SEG_GS:
22329 len += 1;
22330 break;
22331 default:
22332 break;
22333 }
22334
22335 return len;
22336 }
22337
22338 /* Compute default value for "length_immediate" attribute. When SHORTFORM
22339 is set, expect that insn have 8bit immediate alternative. */
22340 int
22341 ix86_attr_length_immediate_default (rtx insn, int shortform)
22342 {
22343 int len = 0;
22344 int i;
22345 extract_insn_cached (insn);
22346 for (i = recog_data.n_operands - 1; i >= 0; --i)
22347 if (CONSTANT_P (recog_data.operand[i]))
22348 {
22349 enum attr_mode mode = get_attr_mode (insn);
22350
22351 gcc_assert (!len);
22352 if (shortform && CONST_INT_P (recog_data.operand[i]))
22353 {
22354 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
22355 switch (mode)
22356 {
22357 case MODE_QI:
22358 len = 1;
22359 continue;
22360 case MODE_HI:
22361 ival = trunc_int_for_mode (ival, HImode);
22362 break;
22363 case MODE_SI:
22364 ival = trunc_int_for_mode (ival, SImode);
22365 break;
22366 default:
22367 break;
22368 }
22369 if (IN_RANGE (ival, -128, 127))
22370 {
22371 len = 1;
22372 continue;
22373 }
22374 }
22375 switch (mode)
22376 {
22377 case MODE_QI:
22378 len = 1;
22379 break;
22380 case MODE_HI:
22381 len = 2;
22382 break;
22383 case MODE_SI:
22384 len = 4;
22385 break;
22386 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
22387 case MODE_DI:
22388 len = 4;
22389 break;
22390 default:
22391 fatal_insn ("unknown insn mode", insn);
22392 }
22393 }
22394 return len;
22395 }
22396 /* Compute default value for "length_address" attribute. */
22397 int
22398 ix86_attr_length_address_default (rtx insn)
22399 {
22400 int i;
22401
22402 if (get_attr_type (insn) == TYPE_LEA)
22403 {
22404 rtx set = PATTERN (insn), addr;
22405
22406 if (GET_CODE (set) == PARALLEL)
22407 set = XVECEXP (set, 0, 0);
22408
22409 gcc_assert (GET_CODE (set) == SET);
22410
22411 addr = SET_SRC (set);
22412 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
22413 {
22414 if (GET_CODE (addr) == ZERO_EXTEND)
22415 addr = XEXP (addr, 0);
22416 if (GET_CODE (addr) == SUBREG)
22417 addr = SUBREG_REG (addr);
22418 }
22419
22420 return memory_address_length (addr);
22421 }
22422
22423 extract_insn_cached (insn);
22424 for (i = recog_data.n_operands - 1; i >= 0; --i)
22425 if (MEM_P (recog_data.operand[i]))
22426 {
22427 constrain_operands_cached (reload_completed);
22428 if (which_alternative != -1)
22429 {
22430 const char *constraints = recog_data.constraints[i];
22431 int alt = which_alternative;
22432
22433 while (*constraints == '=' || *constraints == '+')
22434 constraints++;
22435 while (alt-- > 0)
22436 while (*constraints++ != ',')
22437 ;
22438 /* Skip ignored operands. */
22439 if (*constraints == 'X')
22440 continue;
22441 }
22442 return memory_address_length (XEXP (recog_data.operand[i], 0));
22443 }
22444 return 0;
22445 }
22446
22447 /* Compute default value for "length_vex" attribute. It includes
22448 2 or 3 byte VEX prefix and 1 opcode byte. */
22449
22450 int
22451 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
22452 int has_vex_w)
22453 {
22454 int i;
22455
22456 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
22457 byte VEX prefix. */
22458 if (!has_0f_opcode || has_vex_w)
22459 return 3 + 1;
22460
22461 /* We can always use 2 byte VEX prefix in 32bit. */
22462 if (!TARGET_64BIT)
22463 return 2 + 1;
22464
22465 extract_insn_cached (insn);
22466
22467 for (i = recog_data.n_operands - 1; i >= 0; --i)
22468 if (REG_P (recog_data.operand[i]))
22469 {
22470 /* REX.W bit uses 3 byte VEX prefix. */
22471 if (GET_MODE (recog_data.operand[i]) == DImode
22472 && GENERAL_REG_P (recog_data.operand[i]))
22473 return 3 + 1;
22474 }
22475 else
22476 {
22477 /* REX.X or REX.B bits use 3 byte VEX prefix. */
22478 if (MEM_P (recog_data.operand[i])
22479 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
22480 return 3 + 1;
22481 }
22482
22483 return 2 + 1;
22484 }
22485 \f
22486 /* Return the maximum number of instructions a cpu can issue. */
22487
22488 static int
22489 ix86_issue_rate (void)
22490 {
22491 switch (ix86_tune)
22492 {
22493 case PROCESSOR_PENTIUM:
22494 case PROCESSOR_ATOM:
22495 case PROCESSOR_K6:
22496 return 2;
22497
22498 case PROCESSOR_PENTIUMPRO:
22499 case PROCESSOR_PENTIUM4:
22500 case PROCESSOR_CORE2_32:
22501 case PROCESSOR_CORE2_64:
22502 case PROCESSOR_COREI7_32:
22503 case PROCESSOR_COREI7_64:
22504 case PROCESSOR_ATHLON:
22505 case PROCESSOR_K8:
22506 case PROCESSOR_AMDFAM10:
22507 case PROCESSOR_NOCONA:
22508 case PROCESSOR_GENERIC32:
22509 case PROCESSOR_GENERIC64:
22510 case PROCESSOR_BDVER1:
22511 case PROCESSOR_BTVER1:
22512 return 3;
22513
22514 default:
22515 return 1;
22516 }
22517 }
22518
22519 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
22520 by DEP_INSN and nothing set by DEP_INSN. */
22521
22522 static int
22523 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
22524 {
22525 rtx set, set2;
22526
22527 /* Simplify the test for uninteresting insns. */
22528 if (insn_type != TYPE_SETCC
22529 && insn_type != TYPE_ICMOV
22530 && insn_type != TYPE_FCMOV
22531 && insn_type != TYPE_IBR)
22532 return 0;
22533
22534 if ((set = single_set (dep_insn)) != 0)
22535 {
22536 set = SET_DEST (set);
22537 set2 = NULL_RTX;
22538 }
22539 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
22540 && XVECLEN (PATTERN (dep_insn), 0) == 2
22541 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
22542 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
22543 {
22544 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22545 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22546 }
22547 else
22548 return 0;
22549
22550 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
22551 return 0;
22552
22553 /* This test is true if the dependent insn reads the flags but
22554 not any other potentially set register. */
22555 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
22556 return 0;
22557
22558 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
22559 return 0;
22560
22561 return 1;
22562 }
22563
22564 /* Return true iff USE_INSN has a memory address with operands set by
22565 SET_INSN. */
22566
22567 bool
22568 ix86_agi_dependent (rtx set_insn, rtx use_insn)
22569 {
22570 int i;
22571 extract_insn_cached (use_insn);
22572 for (i = recog_data.n_operands - 1; i >= 0; --i)
22573 if (MEM_P (recog_data.operand[i]))
22574 {
22575 rtx addr = XEXP (recog_data.operand[i], 0);
22576 return modified_in_p (addr, set_insn) != 0;
22577 }
22578 return false;
22579 }
22580
22581 static int
22582 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22583 {
22584 enum attr_type insn_type, dep_insn_type;
22585 enum attr_memory memory;
22586 rtx set, set2;
22587 int dep_insn_code_number;
22588
22589 /* Anti and output dependencies have zero cost on all CPUs. */
22590 if (REG_NOTE_KIND (link) != 0)
22591 return 0;
22592
22593 dep_insn_code_number = recog_memoized (dep_insn);
22594
22595 /* If we can't recognize the insns, we can't really do anything. */
22596 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
22597 return cost;
22598
22599 insn_type = get_attr_type (insn);
22600 dep_insn_type = get_attr_type (dep_insn);
22601
22602 switch (ix86_tune)
22603 {
22604 case PROCESSOR_PENTIUM:
22605 /* Address Generation Interlock adds a cycle of latency. */
22606 if (insn_type == TYPE_LEA)
22607 {
22608 rtx addr = PATTERN (insn);
22609
22610 if (GET_CODE (addr) == PARALLEL)
22611 addr = XVECEXP (addr, 0, 0);
22612
22613 gcc_assert (GET_CODE (addr) == SET);
22614
22615 addr = SET_SRC (addr);
22616 if (modified_in_p (addr, dep_insn))
22617 cost += 1;
22618 }
22619 else if (ix86_agi_dependent (dep_insn, insn))
22620 cost += 1;
22621
22622 /* ??? Compares pair with jump/setcc. */
22623 if (ix86_flags_dependent (insn, dep_insn, insn_type))
22624 cost = 0;
22625
22626 /* Floating point stores require value to be ready one cycle earlier. */
22627 if (insn_type == TYPE_FMOV
22628 && get_attr_memory (insn) == MEMORY_STORE
22629 && !ix86_agi_dependent (dep_insn, insn))
22630 cost += 1;
22631 break;
22632
22633 case PROCESSOR_PENTIUMPRO:
22634 memory = get_attr_memory (insn);
22635
22636 /* INT->FP conversion is expensive. */
22637 if (get_attr_fp_int_src (dep_insn))
22638 cost += 5;
22639
22640 /* There is one cycle extra latency between an FP op and a store. */
22641 if (insn_type == TYPE_FMOV
22642 && (set = single_set (dep_insn)) != NULL_RTX
22643 && (set2 = single_set (insn)) != NULL_RTX
22644 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
22645 && MEM_P (SET_DEST (set2)))
22646 cost += 1;
22647
22648 /* Show ability of reorder buffer to hide latency of load by executing
22649 in parallel with previous instruction in case
22650 previous instruction is not needed to compute the address. */
22651 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22652 && !ix86_agi_dependent (dep_insn, insn))
22653 {
22654 /* Claim moves to take one cycle, as core can issue one load
22655 at time and the next load can start cycle later. */
22656 if (dep_insn_type == TYPE_IMOV
22657 || dep_insn_type == TYPE_FMOV)
22658 cost = 1;
22659 else if (cost > 1)
22660 cost--;
22661 }
22662 break;
22663
22664 case PROCESSOR_K6:
22665 memory = get_attr_memory (insn);
22666
22667 /* The esp dependency is resolved before the instruction is really
22668 finished. */
22669 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
22670 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
22671 return 1;
22672
22673 /* INT->FP conversion is expensive. */
22674 if (get_attr_fp_int_src (dep_insn))
22675 cost += 5;
22676
22677 /* Show ability of reorder buffer to hide latency of load by executing
22678 in parallel with previous instruction in case
22679 previous instruction is not needed to compute the address. */
22680 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22681 && !ix86_agi_dependent (dep_insn, insn))
22682 {
22683 /* Claim moves to take one cycle, as core can issue one load
22684 at time and the next load can start cycle later. */
22685 if (dep_insn_type == TYPE_IMOV
22686 || dep_insn_type == TYPE_FMOV)
22687 cost = 1;
22688 else if (cost > 2)
22689 cost -= 2;
22690 else
22691 cost = 1;
22692 }
22693 break;
22694
22695 case PROCESSOR_ATHLON:
22696 case PROCESSOR_K8:
22697 case PROCESSOR_AMDFAM10:
22698 case PROCESSOR_BDVER1:
22699 case PROCESSOR_BTVER1:
22700 case PROCESSOR_ATOM:
22701 case PROCESSOR_GENERIC32:
22702 case PROCESSOR_GENERIC64:
22703 memory = get_attr_memory (insn);
22704
22705 /* Show ability of reorder buffer to hide latency of load by executing
22706 in parallel with previous instruction in case
22707 previous instruction is not needed to compute the address. */
22708 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22709 && !ix86_agi_dependent (dep_insn, insn))
22710 {
22711 enum attr_unit unit = get_attr_unit (insn);
22712 int loadcost = 3;
22713
22714 /* Because of the difference between the length of integer and
22715 floating unit pipeline preparation stages, the memory operands
22716 for floating point are cheaper.
22717
22718 ??? For Athlon it the difference is most probably 2. */
22719 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
22720 loadcost = 3;
22721 else
22722 loadcost = TARGET_ATHLON ? 2 : 0;
22723
22724 if (cost >= loadcost)
22725 cost -= loadcost;
22726 else
22727 cost = 0;
22728 }
22729
22730 default:
22731 break;
22732 }
22733
22734 return cost;
22735 }
22736
22737 /* How many alternative schedules to try. This should be as wide as the
22738 scheduling freedom in the DFA, but no wider. Making this value too
22739 large results extra work for the scheduler. */
22740
22741 static int
22742 ia32_multipass_dfa_lookahead (void)
22743 {
22744 switch (ix86_tune)
22745 {
22746 case PROCESSOR_PENTIUM:
22747 return 2;
22748
22749 case PROCESSOR_PENTIUMPRO:
22750 case PROCESSOR_K6:
22751 return 1;
22752
22753 case PROCESSOR_CORE2_32:
22754 case PROCESSOR_CORE2_64:
22755 case PROCESSOR_COREI7_32:
22756 case PROCESSOR_COREI7_64:
22757 /* Generally, we want haifa-sched:max_issue() to look ahead as far
22758 as many instructions can be executed on a cycle, i.e.,
22759 issue_rate. I wonder why tuning for many CPUs does not do this. */
22760 return ix86_issue_rate ();
22761
22762 default:
22763 return 0;
22764 }
22765 }
22766
22767 \f
22768
22769 /* Model decoder of Core 2/i7.
22770 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
22771 track the instruction fetch block boundaries and make sure that long
22772 (9+ bytes) instructions are assigned to D0. */
22773
22774 /* Maximum length of an insn that can be handled by
22775 a secondary decoder unit. '8' for Core 2/i7. */
22776 static int core2i7_secondary_decoder_max_insn_size;
22777
22778 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
22779 '16' for Core 2/i7. */
22780 static int core2i7_ifetch_block_size;
22781
22782 /* Maximum number of instructions decoder can handle per cycle.
22783 '6' for Core 2/i7. */
22784 static int core2i7_ifetch_block_max_insns;
22785
22786 typedef struct ix86_first_cycle_multipass_data_ *
22787 ix86_first_cycle_multipass_data_t;
22788 typedef const struct ix86_first_cycle_multipass_data_ *
22789 const_ix86_first_cycle_multipass_data_t;
22790
22791 /* A variable to store target state across calls to max_issue within
22792 one cycle. */
22793 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data,
22794 *ix86_first_cycle_multipass_data = &_ix86_first_cycle_multipass_data;
22795
22796 /* Initialize DATA. */
22797 static void
22798 core2i7_first_cycle_multipass_init (void *_data)
22799 {
22800 ix86_first_cycle_multipass_data_t data
22801 = (ix86_first_cycle_multipass_data_t) _data;
22802
22803 data->ifetch_block_len = 0;
22804 data->ifetch_block_n_insns = 0;
22805 data->ready_try_change = NULL;
22806 data->ready_try_change_size = 0;
22807 }
22808
22809 /* Advancing the cycle; reset ifetch block counts. */
22810 static void
22811 core2i7_dfa_post_advance_cycle (void)
22812 {
22813 ix86_first_cycle_multipass_data_t data = ix86_first_cycle_multipass_data;
22814
22815 gcc_assert (data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22816
22817 data->ifetch_block_len = 0;
22818 data->ifetch_block_n_insns = 0;
22819 }
22820
22821 static int min_insn_size (rtx);
22822
22823 /* Filter out insns from ready_try that the core will not be able to issue
22824 on current cycle due to decoder. */
22825 static void
22826 core2i7_first_cycle_multipass_filter_ready_try
22827 (const_ix86_first_cycle_multipass_data_t data,
22828 char *ready_try, int n_ready, bool first_cycle_insn_p)
22829 {
22830 while (n_ready--)
22831 {
22832 rtx insn;
22833 int insn_size;
22834
22835 if (ready_try[n_ready])
22836 continue;
22837
22838 insn = get_ready_element (n_ready);
22839 insn_size = min_insn_size (insn);
22840
22841 if (/* If this is a too long an insn for a secondary decoder ... */
22842 (!first_cycle_insn_p
22843 && insn_size > core2i7_secondary_decoder_max_insn_size)
22844 /* ... or it would not fit into the ifetch block ... */
22845 || data->ifetch_block_len + insn_size > core2i7_ifetch_block_size
22846 /* ... or the decoder is full already ... */
22847 || data->ifetch_block_n_insns + 1 > core2i7_ifetch_block_max_insns)
22848 /* ... mask the insn out. */
22849 {
22850 ready_try[n_ready] = 1;
22851
22852 if (data->ready_try_change)
22853 SET_BIT (data->ready_try_change, n_ready);
22854 }
22855 }
22856 }
22857
22858 /* Prepare for a new round of multipass lookahead scheduling. */
22859 static void
22860 core2i7_first_cycle_multipass_begin (void *_data, char *ready_try, int n_ready,
22861 bool first_cycle_insn_p)
22862 {
22863 ix86_first_cycle_multipass_data_t data
22864 = (ix86_first_cycle_multipass_data_t) _data;
22865 const_ix86_first_cycle_multipass_data_t prev_data
22866 = ix86_first_cycle_multipass_data;
22867
22868 /* Restore the state from the end of the previous round. */
22869 data->ifetch_block_len = prev_data->ifetch_block_len;
22870 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns;
22871
22872 /* Filter instructions that cannot be issued on current cycle due to
22873 decoder restrictions. */
22874 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22875 first_cycle_insn_p);
22876 }
22877
22878 /* INSN is being issued in current solution. Account for its impact on
22879 the decoder model. */
22880 static void
22881 core2i7_first_cycle_multipass_issue (void *_data, char *ready_try, int n_ready,
22882 rtx insn, const void *_prev_data)
22883 {
22884 ix86_first_cycle_multipass_data_t data
22885 = (ix86_first_cycle_multipass_data_t) _data;
22886 const_ix86_first_cycle_multipass_data_t prev_data
22887 = (const_ix86_first_cycle_multipass_data_t) _prev_data;
22888
22889 int insn_size = min_insn_size (insn);
22890
22891 data->ifetch_block_len = prev_data->ifetch_block_len + insn_size;
22892 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns + 1;
22893 gcc_assert (data->ifetch_block_len <= core2i7_ifetch_block_size
22894 && data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22895
22896 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
22897 if (!data->ready_try_change)
22898 {
22899 data->ready_try_change = sbitmap_alloc (n_ready);
22900 data->ready_try_change_size = n_ready;
22901 }
22902 else if (data->ready_try_change_size < n_ready)
22903 {
22904 data->ready_try_change = sbitmap_resize (data->ready_try_change,
22905 n_ready, 0);
22906 data->ready_try_change_size = n_ready;
22907 }
22908 sbitmap_zero (data->ready_try_change);
22909
22910 /* Filter out insns from ready_try that the core will not be able to issue
22911 on current cycle due to decoder. */
22912 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22913 false);
22914 }
22915
22916 /* Revert the effect on ready_try. */
22917 static void
22918 core2i7_first_cycle_multipass_backtrack (const void *_data,
22919 char *ready_try,
22920 int n_ready ATTRIBUTE_UNUSED)
22921 {
22922 const_ix86_first_cycle_multipass_data_t data
22923 = (const_ix86_first_cycle_multipass_data_t) _data;
22924 unsigned int i = 0;
22925 sbitmap_iterator sbi;
22926
22927 gcc_assert (sbitmap_last_set_bit (data->ready_try_change) < n_ready);
22928 EXECUTE_IF_SET_IN_SBITMAP (data->ready_try_change, 0, i, sbi)
22929 {
22930 ready_try[i] = 0;
22931 }
22932 }
22933
22934 /* Save the result of multipass lookahead scheduling for the next round. */
22935 static void
22936 core2i7_first_cycle_multipass_end (const void *_data)
22937 {
22938 const_ix86_first_cycle_multipass_data_t data
22939 = (const_ix86_first_cycle_multipass_data_t) _data;
22940 ix86_first_cycle_multipass_data_t next_data
22941 = ix86_first_cycle_multipass_data;
22942
22943 if (data != NULL)
22944 {
22945 next_data->ifetch_block_len = data->ifetch_block_len;
22946 next_data->ifetch_block_n_insns = data->ifetch_block_n_insns;
22947 }
22948 }
22949
22950 /* Deallocate target data. */
22951 static void
22952 core2i7_first_cycle_multipass_fini (void *_data)
22953 {
22954 ix86_first_cycle_multipass_data_t data
22955 = (ix86_first_cycle_multipass_data_t) _data;
22956
22957 if (data->ready_try_change)
22958 {
22959 sbitmap_free (data->ready_try_change);
22960 data->ready_try_change = NULL;
22961 data->ready_try_change_size = 0;
22962 }
22963 }
22964
22965 /* Prepare for scheduling pass. */
22966 static void
22967 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
22968 int verbose ATTRIBUTE_UNUSED,
22969 int max_uid ATTRIBUTE_UNUSED)
22970 {
22971 /* Install scheduling hooks for current CPU. Some of these hooks are used
22972 in time-critical parts of the scheduler, so we only set them up when
22973 they are actually used. */
22974 switch (ix86_tune)
22975 {
22976 case PROCESSOR_CORE2_32:
22977 case PROCESSOR_CORE2_64:
22978 case PROCESSOR_COREI7_32:
22979 case PROCESSOR_COREI7_64:
22980 targetm.sched.dfa_post_advance_cycle
22981 = core2i7_dfa_post_advance_cycle;
22982 targetm.sched.first_cycle_multipass_init
22983 = core2i7_first_cycle_multipass_init;
22984 targetm.sched.first_cycle_multipass_begin
22985 = core2i7_first_cycle_multipass_begin;
22986 targetm.sched.first_cycle_multipass_issue
22987 = core2i7_first_cycle_multipass_issue;
22988 targetm.sched.first_cycle_multipass_backtrack
22989 = core2i7_first_cycle_multipass_backtrack;
22990 targetm.sched.first_cycle_multipass_end
22991 = core2i7_first_cycle_multipass_end;
22992 targetm.sched.first_cycle_multipass_fini
22993 = core2i7_first_cycle_multipass_fini;
22994
22995 /* Set decoder parameters. */
22996 core2i7_secondary_decoder_max_insn_size = 8;
22997 core2i7_ifetch_block_size = 16;
22998 core2i7_ifetch_block_max_insns = 6;
22999 break;
23000
23001 default:
23002 targetm.sched.dfa_post_advance_cycle = NULL;
23003 targetm.sched.first_cycle_multipass_init = NULL;
23004 targetm.sched.first_cycle_multipass_begin = NULL;
23005 targetm.sched.first_cycle_multipass_issue = NULL;
23006 targetm.sched.first_cycle_multipass_backtrack = NULL;
23007 targetm.sched.first_cycle_multipass_end = NULL;
23008 targetm.sched.first_cycle_multipass_fini = NULL;
23009 break;
23010 }
23011 }
23012
23013 \f
23014 /* Compute the alignment given to a constant that is being placed in memory.
23015 EXP is the constant and ALIGN is the alignment that the object would
23016 ordinarily have.
23017 The value of this function is used instead of that alignment to align
23018 the object. */
23019
23020 int
23021 ix86_constant_alignment (tree exp, int align)
23022 {
23023 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
23024 || TREE_CODE (exp) == INTEGER_CST)
23025 {
23026 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
23027 return 64;
23028 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
23029 return 128;
23030 }
23031 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
23032 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
23033 return BITS_PER_WORD;
23034
23035 return align;
23036 }
23037
23038 /* Compute the alignment for a static variable.
23039 TYPE is the data type, and ALIGN is the alignment that
23040 the object would ordinarily have. The value of this function is used
23041 instead of that alignment to align the object. */
23042
23043 int
23044 ix86_data_alignment (tree type, int align)
23045 {
23046 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
23047
23048 if (AGGREGATE_TYPE_P (type)
23049 && TYPE_SIZE (type)
23050 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23051 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
23052 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
23053 && align < max_align)
23054 align = max_align;
23055
23056 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23057 to 16byte boundary. */
23058 if (TARGET_64BIT)
23059 {
23060 if (AGGREGATE_TYPE_P (type)
23061 && TYPE_SIZE (type)
23062 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23063 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
23064 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23065 return 128;
23066 }
23067
23068 if (TREE_CODE (type) == ARRAY_TYPE)
23069 {
23070 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23071 return 64;
23072 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23073 return 128;
23074 }
23075 else if (TREE_CODE (type) == COMPLEX_TYPE)
23076 {
23077
23078 if (TYPE_MODE (type) == DCmode && align < 64)
23079 return 64;
23080 if ((TYPE_MODE (type) == XCmode
23081 || TYPE_MODE (type) == TCmode) && align < 128)
23082 return 128;
23083 }
23084 else if ((TREE_CODE (type) == RECORD_TYPE
23085 || TREE_CODE (type) == UNION_TYPE
23086 || TREE_CODE (type) == QUAL_UNION_TYPE)
23087 && TYPE_FIELDS (type))
23088 {
23089 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23090 return 64;
23091 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23092 return 128;
23093 }
23094 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23095 || TREE_CODE (type) == INTEGER_TYPE)
23096 {
23097 if (TYPE_MODE (type) == DFmode && align < 64)
23098 return 64;
23099 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23100 return 128;
23101 }
23102
23103 return align;
23104 }
23105
23106 /* Compute the alignment for a local variable or a stack slot. EXP is
23107 the data type or decl itself, MODE is the widest mode available and
23108 ALIGN is the alignment that the object would ordinarily have. The
23109 value of this macro is used instead of that alignment to align the
23110 object. */
23111
23112 unsigned int
23113 ix86_local_alignment (tree exp, enum machine_mode mode,
23114 unsigned int align)
23115 {
23116 tree type, decl;
23117
23118 if (exp && DECL_P (exp))
23119 {
23120 type = TREE_TYPE (exp);
23121 decl = exp;
23122 }
23123 else
23124 {
23125 type = exp;
23126 decl = NULL;
23127 }
23128
23129 /* Don't do dynamic stack realignment for long long objects with
23130 -mpreferred-stack-boundary=2. */
23131 if (!TARGET_64BIT
23132 && align == 64
23133 && ix86_preferred_stack_boundary < 64
23134 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
23135 && (!type || !TYPE_USER_ALIGN (type))
23136 && (!decl || !DECL_USER_ALIGN (decl)))
23137 align = 32;
23138
23139 /* If TYPE is NULL, we are allocating a stack slot for caller-save
23140 register in MODE. We will return the largest alignment of XF
23141 and DF. */
23142 if (!type)
23143 {
23144 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
23145 align = GET_MODE_ALIGNMENT (DFmode);
23146 return align;
23147 }
23148
23149 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23150 to 16byte boundary. Exact wording is:
23151
23152 An array uses the same alignment as its elements, except that a local or
23153 global array variable of length at least 16 bytes or
23154 a C99 variable-length array variable always has alignment of at least 16 bytes.
23155
23156 This was added to allow use of aligned SSE instructions at arrays. This
23157 rule is meant for static storage (where compiler can not do the analysis
23158 by itself). We follow it for automatic variables only when convenient.
23159 We fully control everything in the function compiled and functions from
23160 other unit can not rely on the alignment.
23161
23162 Exclude va_list type. It is the common case of local array where
23163 we can not benefit from the alignment. */
23164 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
23165 && TARGET_SSE)
23166 {
23167 if (AGGREGATE_TYPE_P (type)
23168 && (va_list_type_node == NULL_TREE
23169 || (TYPE_MAIN_VARIANT (type)
23170 != TYPE_MAIN_VARIANT (va_list_type_node)))
23171 && TYPE_SIZE (type)
23172 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23173 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
23174 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23175 return 128;
23176 }
23177 if (TREE_CODE (type) == ARRAY_TYPE)
23178 {
23179 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23180 return 64;
23181 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23182 return 128;
23183 }
23184 else if (TREE_CODE (type) == COMPLEX_TYPE)
23185 {
23186 if (TYPE_MODE (type) == DCmode && align < 64)
23187 return 64;
23188 if ((TYPE_MODE (type) == XCmode
23189 || TYPE_MODE (type) == TCmode) && align < 128)
23190 return 128;
23191 }
23192 else if ((TREE_CODE (type) == RECORD_TYPE
23193 || TREE_CODE (type) == UNION_TYPE
23194 || TREE_CODE (type) == QUAL_UNION_TYPE)
23195 && TYPE_FIELDS (type))
23196 {
23197 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23198 return 64;
23199 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23200 return 128;
23201 }
23202 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23203 || TREE_CODE (type) == INTEGER_TYPE)
23204 {
23205
23206 if (TYPE_MODE (type) == DFmode && align < 64)
23207 return 64;
23208 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23209 return 128;
23210 }
23211 return align;
23212 }
23213
23214 /* Compute the minimum required alignment for dynamic stack realignment
23215 purposes for a local variable, parameter or a stack slot. EXP is
23216 the data type or decl itself, MODE is its mode and ALIGN is the
23217 alignment that the object would ordinarily have. */
23218
23219 unsigned int
23220 ix86_minimum_alignment (tree exp, enum machine_mode mode,
23221 unsigned int align)
23222 {
23223 tree type, decl;
23224
23225 if (exp && DECL_P (exp))
23226 {
23227 type = TREE_TYPE (exp);
23228 decl = exp;
23229 }
23230 else
23231 {
23232 type = exp;
23233 decl = NULL;
23234 }
23235
23236 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
23237 return align;
23238
23239 /* Don't do dynamic stack realignment for long long objects with
23240 -mpreferred-stack-boundary=2. */
23241 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
23242 && (!type || !TYPE_USER_ALIGN (type))
23243 && (!decl || !DECL_USER_ALIGN (decl)))
23244 return 32;
23245
23246 return align;
23247 }
23248 \f
23249 /* Find a location for the static chain incoming to a nested function.
23250 This is a register, unless all free registers are used by arguments. */
23251
23252 static rtx
23253 ix86_static_chain (const_tree fndecl, bool incoming_p)
23254 {
23255 unsigned regno;
23256
23257 if (!DECL_STATIC_CHAIN (fndecl))
23258 return NULL;
23259
23260 if (TARGET_64BIT)
23261 {
23262 /* We always use R10 in 64-bit mode. */
23263 regno = R10_REG;
23264 }
23265 else
23266 {
23267 tree fntype;
23268 unsigned int ccvt;
23269
23270 /* By default in 32-bit mode we use ECX to pass the static chain. */
23271 regno = CX_REG;
23272
23273 fntype = TREE_TYPE (fndecl);
23274 ccvt = ix86_get_callcvt (fntype);
23275 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) != 0)
23276 {
23277 /* Fastcall functions use ecx/edx for arguments, which leaves
23278 us with EAX for the static chain.
23279 Thiscall functions use ecx for arguments, which also
23280 leaves us with EAX for the static chain. */
23281 regno = AX_REG;
23282 }
23283 else if (ix86_function_regparm (fntype, fndecl) == 3)
23284 {
23285 /* For regparm 3, we have no free call-clobbered registers in
23286 which to store the static chain. In order to implement this,
23287 we have the trampoline push the static chain to the stack.
23288 However, we can't push a value below the return address when
23289 we call the nested function directly, so we have to use an
23290 alternate entry point. For this we use ESI, and have the
23291 alternate entry point push ESI, so that things appear the
23292 same once we're executing the nested function. */
23293 if (incoming_p)
23294 {
23295 if (fndecl == current_function_decl)
23296 ix86_static_chain_on_stack = true;
23297 return gen_frame_mem (SImode,
23298 plus_constant (arg_pointer_rtx, -8));
23299 }
23300 regno = SI_REG;
23301 }
23302 }
23303
23304 return gen_rtx_REG (Pmode, regno);
23305 }
23306
23307 /* Emit RTL insns to initialize the variable parts of a trampoline.
23308 FNDECL is the decl of the target address; M_TRAMP is a MEM for
23309 the trampoline, and CHAIN_VALUE is an RTX for the static chain
23310 to be passed to the target function. */
23311
23312 static void
23313 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
23314 {
23315 rtx mem, fnaddr;
23316
23317 fnaddr = XEXP (DECL_RTL (fndecl), 0);
23318
23319 if (!TARGET_64BIT)
23320 {
23321 rtx disp, chain;
23322 int opcode;
23323
23324 /* Depending on the static chain location, either load a register
23325 with a constant, or push the constant to the stack. All of the
23326 instructions are the same size. */
23327 chain = ix86_static_chain (fndecl, true);
23328 if (REG_P (chain))
23329 {
23330 if (REGNO (chain) == CX_REG)
23331 opcode = 0xb9;
23332 else if (REGNO (chain) == AX_REG)
23333 opcode = 0xb8;
23334 else
23335 gcc_unreachable ();
23336 }
23337 else
23338 opcode = 0x68;
23339
23340 mem = adjust_address (m_tramp, QImode, 0);
23341 emit_move_insn (mem, gen_int_mode (opcode, QImode));
23342
23343 mem = adjust_address (m_tramp, SImode, 1);
23344 emit_move_insn (mem, chain_value);
23345
23346 /* Compute offset from the end of the jmp to the target function.
23347 In the case in which the trampoline stores the static chain on
23348 the stack, we need to skip the first insn which pushes the
23349 (call-saved) register static chain; this push is 1 byte. */
23350 disp = expand_binop (SImode, sub_optab, fnaddr,
23351 plus_constant (XEXP (m_tramp, 0),
23352 MEM_P (chain) ? 9 : 10),
23353 NULL_RTX, 1, OPTAB_DIRECT);
23354
23355 mem = adjust_address (m_tramp, QImode, 5);
23356 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
23357
23358 mem = adjust_address (m_tramp, SImode, 6);
23359 emit_move_insn (mem, disp);
23360 }
23361 else
23362 {
23363 int offset = 0;
23364
23365 /* Load the function address to r11. Try to load address using
23366 the shorter movl instead of movabs. We may want to support
23367 movq for kernel mode, but kernel does not use trampolines at
23368 the moment. */
23369 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
23370 {
23371 fnaddr = copy_to_mode_reg (DImode, fnaddr);
23372
23373 mem = adjust_address (m_tramp, HImode, offset);
23374 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
23375
23376 mem = adjust_address (m_tramp, SImode, offset + 2);
23377 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
23378 offset += 6;
23379 }
23380 else
23381 {
23382 mem = adjust_address (m_tramp, HImode, offset);
23383 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
23384
23385 mem = adjust_address (m_tramp, DImode, offset + 2);
23386 emit_move_insn (mem, fnaddr);
23387 offset += 10;
23388 }
23389
23390 /* Load static chain using movabs to r10. */
23391 mem = adjust_address (m_tramp, HImode, offset);
23392 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
23393
23394 mem = adjust_address (m_tramp, DImode, offset + 2);
23395 emit_move_insn (mem, chain_value);
23396 offset += 10;
23397
23398 /* Jump to r11; the last (unused) byte is a nop, only there to
23399 pad the write out to a single 32-bit store. */
23400 mem = adjust_address (m_tramp, SImode, offset);
23401 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
23402 offset += 4;
23403
23404 gcc_assert (offset <= TRAMPOLINE_SIZE);
23405 }
23406
23407 #ifdef ENABLE_EXECUTE_STACK
23408 #ifdef CHECK_EXECUTE_STACK_ENABLED
23409 if (CHECK_EXECUTE_STACK_ENABLED)
23410 #endif
23411 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
23412 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
23413 #endif
23414 }
23415 \f
23416 /* The following file contains several enumerations and data structures
23417 built from the definitions in i386-builtin-types.def. */
23418
23419 #include "i386-builtin-types.inc"
23420
23421 /* Table for the ix86 builtin non-function types. */
23422 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
23423
23424 /* Retrieve an element from the above table, building some of
23425 the types lazily. */
23426
23427 static tree
23428 ix86_get_builtin_type (enum ix86_builtin_type tcode)
23429 {
23430 unsigned int index;
23431 tree type, itype;
23432
23433 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
23434
23435 type = ix86_builtin_type_tab[(int) tcode];
23436 if (type != NULL)
23437 return type;
23438
23439 gcc_assert (tcode > IX86_BT_LAST_PRIM);
23440 if (tcode <= IX86_BT_LAST_VECT)
23441 {
23442 enum machine_mode mode;
23443
23444 index = tcode - IX86_BT_LAST_PRIM - 1;
23445 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
23446 mode = ix86_builtin_type_vect_mode[index];
23447
23448 type = build_vector_type_for_mode (itype, mode);
23449 }
23450 else
23451 {
23452 int quals;
23453
23454 index = tcode - IX86_BT_LAST_VECT - 1;
23455 if (tcode <= IX86_BT_LAST_PTR)
23456 quals = TYPE_UNQUALIFIED;
23457 else
23458 quals = TYPE_QUAL_CONST;
23459
23460 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
23461 if (quals != TYPE_UNQUALIFIED)
23462 itype = build_qualified_type (itype, quals);
23463
23464 type = build_pointer_type (itype);
23465 }
23466
23467 ix86_builtin_type_tab[(int) tcode] = type;
23468 return type;
23469 }
23470
23471 /* Table for the ix86 builtin function types. */
23472 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
23473
23474 /* Retrieve an element from the above table, building some of
23475 the types lazily. */
23476
23477 static tree
23478 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
23479 {
23480 tree type;
23481
23482 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
23483
23484 type = ix86_builtin_func_type_tab[(int) tcode];
23485 if (type != NULL)
23486 return type;
23487
23488 if (tcode <= IX86_BT_LAST_FUNC)
23489 {
23490 unsigned start = ix86_builtin_func_start[(int) tcode];
23491 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
23492 tree rtype, atype, args = void_list_node;
23493 unsigned i;
23494
23495 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
23496 for (i = after - 1; i > start; --i)
23497 {
23498 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
23499 args = tree_cons (NULL, atype, args);
23500 }
23501
23502 type = build_function_type (rtype, args);
23503 }
23504 else
23505 {
23506 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
23507 enum ix86_builtin_func_type icode;
23508
23509 icode = ix86_builtin_func_alias_base[index];
23510 type = ix86_get_builtin_func_type (icode);
23511 }
23512
23513 ix86_builtin_func_type_tab[(int) tcode] = type;
23514 return type;
23515 }
23516
23517
23518 /* Codes for all the SSE/MMX builtins. */
23519 enum ix86_builtins
23520 {
23521 IX86_BUILTIN_ADDPS,
23522 IX86_BUILTIN_ADDSS,
23523 IX86_BUILTIN_DIVPS,
23524 IX86_BUILTIN_DIVSS,
23525 IX86_BUILTIN_MULPS,
23526 IX86_BUILTIN_MULSS,
23527 IX86_BUILTIN_SUBPS,
23528 IX86_BUILTIN_SUBSS,
23529
23530 IX86_BUILTIN_CMPEQPS,
23531 IX86_BUILTIN_CMPLTPS,
23532 IX86_BUILTIN_CMPLEPS,
23533 IX86_BUILTIN_CMPGTPS,
23534 IX86_BUILTIN_CMPGEPS,
23535 IX86_BUILTIN_CMPNEQPS,
23536 IX86_BUILTIN_CMPNLTPS,
23537 IX86_BUILTIN_CMPNLEPS,
23538 IX86_BUILTIN_CMPNGTPS,
23539 IX86_BUILTIN_CMPNGEPS,
23540 IX86_BUILTIN_CMPORDPS,
23541 IX86_BUILTIN_CMPUNORDPS,
23542 IX86_BUILTIN_CMPEQSS,
23543 IX86_BUILTIN_CMPLTSS,
23544 IX86_BUILTIN_CMPLESS,
23545 IX86_BUILTIN_CMPNEQSS,
23546 IX86_BUILTIN_CMPNLTSS,
23547 IX86_BUILTIN_CMPNLESS,
23548 IX86_BUILTIN_CMPNGTSS,
23549 IX86_BUILTIN_CMPNGESS,
23550 IX86_BUILTIN_CMPORDSS,
23551 IX86_BUILTIN_CMPUNORDSS,
23552
23553 IX86_BUILTIN_COMIEQSS,
23554 IX86_BUILTIN_COMILTSS,
23555 IX86_BUILTIN_COMILESS,
23556 IX86_BUILTIN_COMIGTSS,
23557 IX86_BUILTIN_COMIGESS,
23558 IX86_BUILTIN_COMINEQSS,
23559 IX86_BUILTIN_UCOMIEQSS,
23560 IX86_BUILTIN_UCOMILTSS,
23561 IX86_BUILTIN_UCOMILESS,
23562 IX86_BUILTIN_UCOMIGTSS,
23563 IX86_BUILTIN_UCOMIGESS,
23564 IX86_BUILTIN_UCOMINEQSS,
23565
23566 IX86_BUILTIN_CVTPI2PS,
23567 IX86_BUILTIN_CVTPS2PI,
23568 IX86_BUILTIN_CVTSI2SS,
23569 IX86_BUILTIN_CVTSI642SS,
23570 IX86_BUILTIN_CVTSS2SI,
23571 IX86_BUILTIN_CVTSS2SI64,
23572 IX86_BUILTIN_CVTTPS2PI,
23573 IX86_BUILTIN_CVTTSS2SI,
23574 IX86_BUILTIN_CVTTSS2SI64,
23575
23576 IX86_BUILTIN_MAXPS,
23577 IX86_BUILTIN_MAXSS,
23578 IX86_BUILTIN_MINPS,
23579 IX86_BUILTIN_MINSS,
23580
23581 IX86_BUILTIN_LOADUPS,
23582 IX86_BUILTIN_STOREUPS,
23583 IX86_BUILTIN_MOVSS,
23584
23585 IX86_BUILTIN_MOVHLPS,
23586 IX86_BUILTIN_MOVLHPS,
23587 IX86_BUILTIN_LOADHPS,
23588 IX86_BUILTIN_LOADLPS,
23589 IX86_BUILTIN_STOREHPS,
23590 IX86_BUILTIN_STORELPS,
23591
23592 IX86_BUILTIN_MASKMOVQ,
23593 IX86_BUILTIN_MOVMSKPS,
23594 IX86_BUILTIN_PMOVMSKB,
23595
23596 IX86_BUILTIN_MOVNTPS,
23597 IX86_BUILTIN_MOVNTQ,
23598
23599 IX86_BUILTIN_LOADDQU,
23600 IX86_BUILTIN_STOREDQU,
23601
23602 IX86_BUILTIN_PACKSSWB,
23603 IX86_BUILTIN_PACKSSDW,
23604 IX86_BUILTIN_PACKUSWB,
23605
23606 IX86_BUILTIN_PADDB,
23607 IX86_BUILTIN_PADDW,
23608 IX86_BUILTIN_PADDD,
23609 IX86_BUILTIN_PADDQ,
23610 IX86_BUILTIN_PADDSB,
23611 IX86_BUILTIN_PADDSW,
23612 IX86_BUILTIN_PADDUSB,
23613 IX86_BUILTIN_PADDUSW,
23614 IX86_BUILTIN_PSUBB,
23615 IX86_BUILTIN_PSUBW,
23616 IX86_BUILTIN_PSUBD,
23617 IX86_BUILTIN_PSUBQ,
23618 IX86_BUILTIN_PSUBSB,
23619 IX86_BUILTIN_PSUBSW,
23620 IX86_BUILTIN_PSUBUSB,
23621 IX86_BUILTIN_PSUBUSW,
23622
23623 IX86_BUILTIN_PAND,
23624 IX86_BUILTIN_PANDN,
23625 IX86_BUILTIN_POR,
23626 IX86_BUILTIN_PXOR,
23627
23628 IX86_BUILTIN_PAVGB,
23629 IX86_BUILTIN_PAVGW,
23630
23631 IX86_BUILTIN_PCMPEQB,
23632 IX86_BUILTIN_PCMPEQW,
23633 IX86_BUILTIN_PCMPEQD,
23634 IX86_BUILTIN_PCMPGTB,
23635 IX86_BUILTIN_PCMPGTW,
23636 IX86_BUILTIN_PCMPGTD,
23637
23638 IX86_BUILTIN_PMADDWD,
23639
23640 IX86_BUILTIN_PMAXSW,
23641 IX86_BUILTIN_PMAXUB,
23642 IX86_BUILTIN_PMINSW,
23643 IX86_BUILTIN_PMINUB,
23644
23645 IX86_BUILTIN_PMULHUW,
23646 IX86_BUILTIN_PMULHW,
23647 IX86_BUILTIN_PMULLW,
23648
23649 IX86_BUILTIN_PSADBW,
23650 IX86_BUILTIN_PSHUFW,
23651
23652 IX86_BUILTIN_PSLLW,
23653 IX86_BUILTIN_PSLLD,
23654 IX86_BUILTIN_PSLLQ,
23655 IX86_BUILTIN_PSRAW,
23656 IX86_BUILTIN_PSRAD,
23657 IX86_BUILTIN_PSRLW,
23658 IX86_BUILTIN_PSRLD,
23659 IX86_BUILTIN_PSRLQ,
23660 IX86_BUILTIN_PSLLWI,
23661 IX86_BUILTIN_PSLLDI,
23662 IX86_BUILTIN_PSLLQI,
23663 IX86_BUILTIN_PSRAWI,
23664 IX86_BUILTIN_PSRADI,
23665 IX86_BUILTIN_PSRLWI,
23666 IX86_BUILTIN_PSRLDI,
23667 IX86_BUILTIN_PSRLQI,
23668
23669 IX86_BUILTIN_PUNPCKHBW,
23670 IX86_BUILTIN_PUNPCKHWD,
23671 IX86_BUILTIN_PUNPCKHDQ,
23672 IX86_BUILTIN_PUNPCKLBW,
23673 IX86_BUILTIN_PUNPCKLWD,
23674 IX86_BUILTIN_PUNPCKLDQ,
23675
23676 IX86_BUILTIN_SHUFPS,
23677
23678 IX86_BUILTIN_RCPPS,
23679 IX86_BUILTIN_RCPSS,
23680 IX86_BUILTIN_RSQRTPS,
23681 IX86_BUILTIN_RSQRTPS_NR,
23682 IX86_BUILTIN_RSQRTSS,
23683 IX86_BUILTIN_RSQRTF,
23684 IX86_BUILTIN_SQRTPS,
23685 IX86_BUILTIN_SQRTPS_NR,
23686 IX86_BUILTIN_SQRTSS,
23687
23688 IX86_BUILTIN_UNPCKHPS,
23689 IX86_BUILTIN_UNPCKLPS,
23690
23691 IX86_BUILTIN_ANDPS,
23692 IX86_BUILTIN_ANDNPS,
23693 IX86_BUILTIN_ORPS,
23694 IX86_BUILTIN_XORPS,
23695
23696 IX86_BUILTIN_EMMS,
23697 IX86_BUILTIN_LDMXCSR,
23698 IX86_BUILTIN_STMXCSR,
23699 IX86_BUILTIN_SFENCE,
23700
23701 /* 3DNow! Original */
23702 IX86_BUILTIN_FEMMS,
23703 IX86_BUILTIN_PAVGUSB,
23704 IX86_BUILTIN_PF2ID,
23705 IX86_BUILTIN_PFACC,
23706 IX86_BUILTIN_PFADD,
23707 IX86_BUILTIN_PFCMPEQ,
23708 IX86_BUILTIN_PFCMPGE,
23709 IX86_BUILTIN_PFCMPGT,
23710 IX86_BUILTIN_PFMAX,
23711 IX86_BUILTIN_PFMIN,
23712 IX86_BUILTIN_PFMUL,
23713 IX86_BUILTIN_PFRCP,
23714 IX86_BUILTIN_PFRCPIT1,
23715 IX86_BUILTIN_PFRCPIT2,
23716 IX86_BUILTIN_PFRSQIT1,
23717 IX86_BUILTIN_PFRSQRT,
23718 IX86_BUILTIN_PFSUB,
23719 IX86_BUILTIN_PFSUBR,
23720 IX86_BUILTIN_PI2FD,
23721 IX86_BUILTIN_PMULHRW,
23722
23723 /* 3DNow! Athlon Extensions */
23724 IX86_BUILTIN_PF2IW,
23725 IX86_BUILTIN_PFNACC,
23726 IX86_BUILTIN_PFPNACC,
23727 IX86_BUILTIN_PI2FW,
23728 IX86_BUILTIN_PSWAPDSI,
23729 IX86_BUILTIN_PSWAPDSF,
23730
23731 /* SSE2 */
23732 IX86_BUILTIN_ADDPD,
23733 IX86_BUILTIN_ADDSD,
23734 IX86_BUILTIN_DIVPD,
23735 IX86_BUILTIN_DIVSD,
23736 IX86_BUILTIN_MULPD,
23737 IX86_BUILTIN_MULSD,
23738 IX86_BUILTIN_SUBPD,
23739 IX86_BUILTIN_SUBSD,
23740
23741 IX86_BUILTIN_CMPEQPD,
23742 IX86_BUILTIN_CMPLTPD,
23743 IX86_BUILTIN_CMPLEPD,
23744 IX86_BUILTIN_CMPGTPD,
23745 IX86_BUILTIN_CMPGEPD,
23746 IX86_BUILTIN_CMPNEQPD,
23747 IX86_BUILTIN_CMPNLTPD,
23748 IX86_BUILTIN_CMPNLEPD,
23749 IX86_BUILTIN_CMPNGTPD,
23750 IX86_BUILTIN_CMPNGEPD,
23751 IX86_BUILTIN_CMPORDPD,
23752 IX86_BUILTIN_CMPUNORDPD,
23753 IX86_BUILTIN_CMPEQSD,
23754 IX86_BUILTIN_CMPLTSD,
23755 IX86_BUILTIN_CMPLESD,
23756 IX86_BUILTIN_CMPNEQSD,
23757 IX86_BUILTIN_CMPNLTSD,
23758 IX86_BUILTIN_CMPNLESD,
23759 IX86_BUILTIN_CMPORDSD,
23760 IX86_BUILTIN_CMPUNORDSD,
23761
23762 IX86_BUILTIN_COMIEQSD,
23763 IX86_BUILTIN_COMILTSD,
23764 IX86_BUILTIN_COMILESD,
23765 IX86_BUILTIN_COMIGTSD,
23766 IX86_BUILTIN_COMIGESD,
23767 IX86_BUILTIN_COMINEQSD,
23768 IX86_BUILTIN_UCOMIEQSD,
23769 IX86_BUILTIN_UCOMILTSD,
23770 IX86_BUILTIN_UCOMILESD,
23771 IX86_BUILTIN_UCOMIGTSD,
23772 IX86_BUILTIN_UCOMIGESD,
23773 IX86_BUILTIN_UCOMINEQSD,
23774
23775 IX86_BUILTIN_MAXPD,
23776 IX86_BUILTIN_MAXSD,
23777 IX86_BUILTIN_MINPD,
23778 IX86_BUILTIN_MINSD,
23779
23780 IX86_BUILTIN_ANDPD,
23781 IX86_BUILTIN_ANDNPD,
23782 IX86_BUILTIN_ORPD,
23783 IX86_BUILTIN_XORPD,
23784
23785 IX86_BUILTIN_SQRTPD,
23786 IX86_BUILTIN_SQRTSD,
23787
23788 IX86_BUILTIN_UNPCKHPD,
23789 IX86_BUILTIN_UNPCKLPD,
23790
23791 IX86_BUILTIN_SHUFPD,
23792
23793 IX86_BUILTIN_LOADUPD,
23794 IX86_BUILTIN_STOREUPD,
23795 IX86_BUILTIN_MOVSD,
23796
23797 IX86_BUILTIN_LOADHPD,
23798 IX86_BUILTIN_LOADLPD,
23799
23800 IX86_BUILTIN_CVTDQ2PD,
23801 IX86_BUILTIN_CVTDQ2PS,
23802
23803 IX86_BUILTIN_CVTPD2DQ,
23804 IX86_BUILTIN_CVTPD2PI,
23805 IX86_BUILTIN_CVTPD2PS,
23806 IX86_BUILTIN_CVTTPD2DQ,
23807 IX86_BUILTIN_CVTTPD2PI,
23808
23809 IX86_BUILTIN_CVTPI2PD,
23810 IX86_BUILTIN_CVTSI2SD,
23811 IX86_BUILTIN_CVTSI642SD,
23812
23813 IX86_BUILTIN_CVTSD2SI,
23814 IX86_BUILTIN_CVTSD2SI64,
23815 IX86_BUILTIN_CVTSD2SS,
23816 IX86_BUILTIN_CVTSS2SD,
23817 IX86_BUILTIN_CVTTSD2SI,
23818 IX86_BUILTIN_CVTTSD2SI64,
23819
23820 IX86_BUILTIN_CVTPS2DQ,
23821 IX86_BUILTIN_CVTPS2PD,
23822 IX86_BUILTIN_CVTTPS2DQ,
23823
23824 IX86_BUILTIN_MOVNTI,
23825 IX86_BUILTIN_MOVNTPD,
23826 IX86_BUILTIN_MOVNTDQ,
23827
23828 IX86_BUILTIN_MOVQ128,
23829
23830 /* SSE2 MMX */
23831 IX86_BUILTIN_MASKMOVDQU,
23832 IX86_BUILTIN_MOVMSKPD,
23833 IX86_BUILTIN_PMOVMSKB128,
23834
23835 IX86_BUILTIN_PACKSSWB128,
23836 IX86_BUILTIN_PACKSSDW128,
23837 IX86_BUILTIN_PACKUSWB128,
23838
23839 IX86_BUILTIN_PADDB128,
23840 IX86_BUILTIN_PADDW128,
23841 IX86_BUILTIN_PADDD128,
23842 IX86_BUILTIN_PADDQ128,
23843 IX86_BUILTIN_PADDSB128,
23844 IX86_BUILTIN_PADDSW128,
23845 IX86_BUILTIN_PADDUSB128,
23846 IX86_BUILTIN_PADDUSW128,
23847 IX86_BUILTIN_PSUBB128,
23848 IX86_BUILTIN_PSUBW128,
23849 IX86_BUILTIN_PSUBD128,
23850 IX86_BUILTIN_PSUBQ128,
23851 IX86_BUILTIN_PSUBSB128,
23852 IX86_BUILTIN_PSUBSW128,
23853 IX86_BUILTIN_PSUBUSB128,
23854 IX86_BUILTIN_PSUBUSW128,
23855
23856 IX86_BUILTIN_PAND128,
23857 IX86_BUILTIN_PANDN128,
23858 IX86_BUILTIN_POR128,
23859 IX86_BUILTIN_PXOR128,
23860
23861 IX86_BUILTIN_PAVGB128,
23862 IX86_BUILTIN_PAVGW128,
23863
23864 IX86_BUILTIN_PCMPEQB128,
23865 IX86_BUILTIN_PCMPEQW128,
23866 IX86_BUILTIN_PCMPEQD128,
23867 IX86_BUILTIN_PCMPGTB128,
23868 IX86_BUILTIN_PCMPGTW128,
23869 IX86_BUILTIN_PCMPGTD128,
23870
23871 IX86_BUILTIN_PMADDWD128,
23872
23873 IX86_BUILTIN_PMAXSW128,
23874 IX86_BUILTIN_PMAXUB128,
23875 IX86_BUILTIN_PMINSW128,
23876 IX86_BUILTIN_PMINUB128,
23877
23878 IX86_BUILTIN_PMULUDQ,
23879 IX86_BUILTIN_PMULUDQ128,
23880 IX86_BUILTIN_PMULHUW128,
23881 IX86_BUILTIN_PMULHW128,
23882 IX86_BUILTIN_PMULLW128,
23883
23884 IX86_BUILTIN_PSADBW128,
23885 IX86_BUILTIN_PSHUFHW,
23886 IX86_BUILTIN_PSHUFLW,
23887 IX86_BUILTIN_PSHUFD,
23888
23889 IX86_BUILTIN_PSLLDQI128,
23890 IX86_BUILTIN_PSLLWI128,
23891 IX86_BUILTIN_PSLLDI128,
23892 IX86_BUILTIN_PSLLQI128,
23893 IX86_BUILTIN_PSRAWI128,
23894 IX86_BUILTIN_PSRADI128,
23895 IX86_BUILTIN_PSRLDQI128,
23896 IX86_BUILTIN_PSRLWI128,
23897 IX86_BUILTIN_PSRLDI128,
23898 IX86_BUILTIN_PSRLQI128,
23899
23900 IX86_BUILTIN_PSLLDQ128,
23901 IX86_BUILTIN_PSLLW128,
23902 IX86_BUILTIN_PSLLD128,
23903 IX86_BUILTIN_PSLLQ128,
23904 IX86_BUILTIN_PSRAW128,
23905 IX86_BUILTIN_PSRAD128,
23906 IX86_BUILTIN_PSRLW128,
23907 IX86_BUILTIN_PSRLD128,
23908 IX86_BUILTIN_PSRLQ128,
23909
23910 IX86_BUILTIN_PUNPCKHBW128,
23911 IX86_BUILTIN_PUNPCKHWD128,
23912 IX86_BUILTIN_PUNPCKHDQ128,
23913 IX86_BUILTIN_PUNPCKHQDQ128,
23914 IX86_BUILTIN_PUNPCKLBW128,
23915 IX86_BUILTIN_PUNPCKLWD128,
23916 IX86_BUILTIN_PUNPCKLDQ128,
23917 IX86_BUILTIN_PUNPCKLQDQ128,
23918
23919 IX86_BUILTIN_CLFLUSH,
23920 IX86_BUILTIN_MFENCE,
23921 IX86_BUILTIN_LFENCE,
23922
23923 IX86_BUILTIN_BSRSI,
23924 IX86_BUILTIN_BSRDI,
23925 IX86_BUILTIN_RDPMC,
23926 IX86_BUILTIN_RDTSC,
23927 IX86_BUILTIN_RDTSCP,
23928 IX86_BUILTIN_ROLQI,
23929 IX86_BUILTIN_ROLHI,
23930 IX86_BUILTIN_RORQI,
23931 IX86_BUILTIN_RORHI,
23932
23933 /* SSE3. */
23934 IX86_BUILTIN_ADDSUBPS,
23935 IX86_BUILTIN_HADDPS,
23936 IX86_BUILTIN_HSUBPS,
23937 IX86_BUILTIN_MOVSHDUP,
23938 IX86_BUILTIN_MOVSLDUP,
23939 IX86_BUILTIN_ADDSUBPD,
23940 IX86_BUILTIN_HADDPD,
23941 IX86_BUILTIN_HSUBPD,
23942 IX86_BUILTIN_LDDQU,
23943
23944 IX86_BUILTIN_MONITOR,
23945 IX86_BUILTIN_MWAIT,
23946
23947 /* SSSE3. */
23948 IX86_BUILTIN_PHADDW,
23949 IX86_BUILTIN_PHADDD,
23950 IX86_BUILTIN_PHADDSW,
23951 IX86_BUILTIN_PHSUBW,
23952 IX86_BUILTIN_PHSUBD,
23953 IX86_BUILTIN_PHSUBSW,
23954 IX86_BUILTIN_PMADDUBSW,
23955 IX86_BUILTIN_PMULHRSW,
23956 IX86_BUILTIN_PSHUFB,
23957 IX86_BUILTIN_PSIGNB,
23958 IX86_BUILTIN_PSIGNW,
23959 IX86_BUILTIN_PSIGND,
23960 IX86_BUILTIN_PALIGNR,
23961 IX86_BUILTIN_PABSB,
23962 IX86_BUILTIN_PABSW,
23963 IX86_BUILTIN_PABSD,
23964
23965 IX86_BUILTIN_PHADDW128,
23966 IX86_BUILTIN_PHADDD128,
23967 IX86_BUILTIN_PHADDSW128,
23968 IX86_BUILTIN_PHSUBW128,
23969 IX86_BUILTIN_PHSUBD128,
23970 IX86_BUILTIN_PHSUBSW128,
23971 IX86_BUILTIN_PMADDUBSW128,
23972 IX86_BUILTIN_PMULHRSW128,
23973 IX86_BUILTIN_PSHUFB128,
23974 IX86_BUILTIN_PSIGNB128,
23975 IX86_BUILTIN_PSIGNW128,
23976 IX86_BUILTIN_PSIGND128,
23977 IX86_BUILTIN_PALIGNR128,
23978 IX86_BUILTIN_PABSB128,
23979 IX86_BUILTIN_PABSW128,
23980 IX86_BUILTIN_PABSD128,
23981
23982 /* AMDFAM10 - SSE4A New Instructions. */
23983 IX86_BUILTIN_MOVNTSD,
23984 IX86_BUILTIN_MOVNTSS,
23985 IX86_BUILTIN_EXTRQI,
23986 IX86_BUILTIN_EXTRQ,
23987 IX86_BUILTIN_INSERTQI,
23988 IX86_BUILTIN_INSERTQ,
23989
23990 /* SSE4.1. */
23991 IX86_BUILTIN_BLENDPD,
23992 IX86_BUILTIN_BLENDPS,
23993 IX86_BUILTIN_BLENDVPD,
23994 IX86_BUILTIN_BLENDVPS,
23995 IX86_BUILTIN_PBLENDVB128,
23996 IX86_BUILTIN_PBLENDW128,
23997
23998 IX86_BUILTIN_DPPD,
23999 IX86_BUILTIN_DPPS,
24000
24001 IX86_BUILTIN_INSERTPS128,
24002
24003 IX86_BUILTIN_MOVNTDQA,
24004 IX86_BUILTIN_MPSADBW128,
24005 IX86_BUILTIN_PACKUSDW128,
24006 IX86_BUILTIN_PCMPEQQ,
24007 IX86_BUILTIN_PHMINPOSUW128,
24008
24009 IX86_BUILTIN_PMAXSB128,
24010 IX86_BUILTIN_PMAXSD128,
24011 IX86_BUILTIN_PMAXUD128,
24012 IX86_BUILTIN_PMAXUW128,
24013
24014 IX86_BUILTIN_PMINSB128,
24015 IX86_BUILTIN_PMINSD128,
24016 IX86_BUILTIN_PMINUD128,
24017 IX86_BUILTIN_PMINUW128,
24018
24019 IX86_BUILTIN_PMOVSXBW128,
24020 IX86_BUILTIN_PMOVSXBD128,
24021 IX86_BUILTIN_PMOVSXBQ128,
24022 IX86_BUILTIN_PMOVSXWD128,
24023 IX86_BUILTIN_PMOVSXWQ128,
24024 IX86_BUILTIN_PMOVSXDQ128,
24025
24026 IX86_BUILTIN_PMOVZXBW128,
24027 IX86_BUILTIN_PMOVZXBD128,
24028 IX86_BUILTIN_PMOVZXBQ128,
24029 IX86_BUILTIN_PMOVZXWD128,
24030 IX86_BUILTIN_PMOVZXWQ128,
24031 IX86_BUILTIN_PMOVZXDQ128,
24032
24033 IX86_BUILTIN_PMULDQ128,
24034 IX86_BUILTIN_PMULLD128,
24035
24036 IX86_BUILTIN_ROUNDPD,
24037 IX86_BUILTIN_ROUNDPS,
24038 IX86_BUILTIN_ROUNDSD,
24039 IX86_BUILTIN_ROUNDSS,
24040
24041 IX86_BUILTIN_FLOORPD,
24042 IX86_BUILTIN_CEILPD,
24043 IX86_BUILTIN_TRUNCPD,
24044 IX86_BUILTIN_RINTPD,
24045 IX86_BUILTIN_FLOORPS,
24046 IX86_BUILTIN_CEILPS,
24047 IX86_BUILTIN_TRUNCPS,
24048 IX86_BUILTIN_RINTPS,
24049
24050 IX86_BUILTIN_PTESTZ,
24051 IX86_BUILTIN_PTESTC,
24052 IX86_BUILTIN_PTESTNZC,
24053
24054 IX86_BUILTIN_VEC_INIT_V2SI,
24055 IX86_BUILTIN_VEC_INIT_V4HI,
24056 IX86_BUILTIN_VEC_INIT_V8QI,
24057 IX86_BUILTIN_VEC_EXT_V2DF,
24058 IX86_BUILTIN_VEC_EXT_V2DI,
24059 IX86_BUILTIN_VEC_EXT_V4SF,
24060 IX86_BUILTIN_VEC_EXT_V4SI,
24061 IX86_BUILTIN_VEC_EXT_V8HI,
24062 IX86_BUILTIN_VEC_EXT_V2SI,
24063 IX86_BUILTIN_VEC_EXT_V4HI,
24064 IX86_BUILTIN_VEC_EXT_V16QI,
24065 IX86_BUILTIN_VEC_SET_V2DI,
24066 IX86_BUILTIN_VEC_SET_V4SF,
24067 IX86_BUILTIN_VEC_SET_V4SI,
24068 IX86_BUILTIN_VEC_SET_V8HI,
24069 IX86_BUILTIN_VEC_SET_V4HI,
24070 IX86_BUILTIN_VEC_SET_V16QI,
24071
24072 IX86_BUILTIN_VEC_PACK_SFIX,
24073
24074 /* SSE4.2. */
24075 IX86_BUILTIN_CRC32QI,
24076 IX86_BUILTIN_CRC32HI,
24077 IX86_BUILTIN_CRC32SI,
24078 IX86_BUILTIN_CRC32DI,
24079
24080 IX86_BUILTIN_PCMPESTRI128,
24081 IX86_BUILTIN_PCMPESTRM128,
24082 IX86_BUILTIN_PCMPESTRA128,
24083 IX86_BUILTIN_PCMPESTRC128,
24084 IX86_BUILTIN_PCMPESTRO128,
24085 IX86_BUILTIN_PCMPESTRS128,
24086 IX86_BUILTIN_PCMPESTRZ128,
24087 IX86_BUILTIN_PCMPISTRI128,
24088 IX86_BUILTIN_PCMPISTRM128,
24089 IX86_BUILTIN_PCMPISTRA128,
24090 IX86_BUILTIN_PCMPISTRC128,
24091 IX86_BUILTIN_PCMPISTRO128,
24092 IX86_BUILTIN_PCMPISTRS128,
24093 IX86_BUILTIN_PCMPISTRZ128,
24094
24095 IX86_BUILTIN_PCMPGTQ,
24096
24097 /* AES instructions */
24098 IX86_BUILTIN_AESENC128,
24099 IX86_BUILTIN_AESENCLAST128,
24100 IX86_BUILTIN_AESDEC128,
24101 IX86_BUILTIN_AESDECLAST128,
24102 IX86_BUILTIN_AESIMC128,
24103 IX86_BUILTIN_AESKEYGENASSIST128,
24104
24105 /* PCLMUL instruction */
24106 IX86_BUILTIN_PCLMULQDQ128,
24107
24108 /* AVX */
24109 IX86_BUILTIN_ADDPD256,
24110 IX86_BUILTIN_ADDPS256,
24111 IX86_BUILTIN_ADDSUBPD256,
24112 IX86_BUILTIN_ADDSUBPS256,
24113 IX86_BUILTIN_ANDPD256,
24114 IX86_BUILTIN_ANDPS256,
24115 IX86_BUILTIN_ANDNPD256,
24116 IX86_BUILTIN_ANDNPS256,
24117 IX86_BUILTIN_BLENDPD256,
24118 IX86_BUILTIN_BLENDPS256,
24119 IX86_BUILTIN_BLENDVPD256,
24120 IX86_BUILTIN_BLENDVPS256,
24121 IX86_BUILTIN_DIVPD256,
24122 IX86_BUILTIN_DIVPS256,
24123 IX86_BUILTIN_DPPS256,
24124 IX86_BUILTIN_HADDPD256,
24125 IX86_BUILTIN_HADDPS256,
24126 IX86_BUILTIN_HSUBPD256,
24127 IX86_BUILTIN_HSUBPS256,
24128 IX86_BUILTIN_MAXPD256,
24129 IX86_BUILTIN_MAXPS256,
24130 IX86_BUILTIN_MINPD256,
24131 IX86_BUILTIN_MINPS256,
24132 IX86_BUILTIN_MULPD256,
24133 IX86_BUILTIN_MULPS256,
24134 IX86_BUILTIN_ORPD256,
24135 IX86_BUILTIN_ORPS256,
24136 IX86_BUILTIN_SHUFPD256,
24137 IX86_BUILTIN_SHUFPS256,
24138 IX86_BUILTIN_SUBPD256,
24139 IX86_BUILTIN_SUBPS256,
24140 IX86_BUILTIN_XORPD256,
24141 IX86_BUILTIN_XORPS256,
24142 IX86_BUILTIN_CMPSD,
24143 IX86_BUILTIN_CMPSS,
24144 IX86_BUILTIN_CMPPD,
24145 IX86_BUILTIN_CMPPS,
24146 IX86_BUILTIN_CMPPD256,
24147 IX86_BUILTIN_CMPPS256,
24148 IX86_BUILTIN_CVTDQ2PD256,
24149 IX86_BUILTIN_CVTDQ2PS256,
24150 IX86_BUILTIN_CVTPD2PS256,
24151 IX86_BUILTIN_CVTPS2DQ256,
24152 IX86_BUILTIN_CVTPS2PD256,
24153 IX86_BUILTIN_CVTTPD2DQ256,
24154 IX86_BUILTIN_CVTPD2DQ256,
24155 IX86_BUILTIN_CVTTPS2DQ256,
24156 IX86_BUILTIN_EXTRACTF128PD256,
24157 IX86_BUILTIN_EXTRACTF128PS256,
24158 IX86_BUILTIN_EXTRACTF128SI256,
24159 IX86_BUILTIN_VZEROALL,
24160 IX86_BUILTIN_VZEROUPPER,
24161 IX86_BUILTIN_VPERMILVARPD,
24162 IX86_BUILTIN_VPERMILVARPS,
24163 IX86_BUILTIN_VPERMILVARPD256,
24164 IX86_BUILTIN_VPERMILVARPS256,
24165 IX86_BUILTIN_VPERMILPD,
24166 IX86_BUILTIN_VPERMILPS,
24167 IX86_BUILTIN_VPERMILPD256,
24168 IX86_BUILTIN_VPERMILPS256,
24169 IX86_BUILTIN_VPERMIL2PD,
24170 IX86_BUILTIN_VPERMIL2PS,
24171 IX86_BUILTIN_VPERMIL2PD256,
24172 IX86_BUILTIN_VPERMIL2PS256,
24173 IX86_BUILTIN_VPERM2F128PD256,
24174 IX86_BUILTIN_VPERM2F128PS256,
24175 IX86_BUILTIN_VPERM2F128SI256,
24176 IX86_BUILTIN_VBROADCASTSS,
24177 IX86_BUILTIN_VBROADCASTSD256,
24178 IX86_BUILTIN_VBROADCASTSS256,
24179 IX86_BUILTIN_VBROADCASTPD256,
24180 IX86_BUILTIN_VBROADCASTPS256,
24181 IX86_BUILTIN_VINSERTF128PD256,
24182 IX86_BUILTIN_VINSERTF128PS256,
24183 IX86_BUILTIN_VINSERTF128SI256,
24184 IX86_BUILTIN_LOADUPD256,
24185 IX86_BUILTIN_LOADUPS256,
24186 IX86_BUILTIN_STOREUPD256,
24187 IX86_BUILTIN_STOREUPS256,
24188 IX86_BUILTIN_LDDQU256,
24189 IX86_BUILTIN_MOVNTDQ256,
24190 IX86_BUILTIN_MOVNTPD256,
24191 IX86_BUILTIN_MOVNTPS256,
24192 IX86_BUILTIN_LOADDQU256,
24193 IX86_BUILTIN_STOREDQU256,
24194 IX86_BUILTIN_MASKLOADPD,
24195 IX86_BUILTIN_MASKLOADPS,
24196 IX86_BUILTIN_MASKSTOREPD,
24197 IX86_BUILTIN_MASKSTOREPS,
24198 IX86_BUILTIN_MASKLOADPD256,
24199 IX86_BUILTIN_MASKLOADPS256,
24200 IX86_BUILTIN_MASKSTOREPD256,
24201 IX86_BUILTIN_MASKSTOREPS256,
24202 IX86_BUILTIN_MOVSHDUP256,
24203 IX86_BUILTIN_MOVSLDUP256,
24204 IX86_BUILTIN_MOVDDUP256,
24205
24206 IX86_BUILTIN_SQRTPD256,
24207 IX86_BUILTIN_SQRTPS256,
24208 IX86_BUILTIN_SQRTPS_NR256,
24209 IX86_BUILTIN_RSQRTPS256,
24210 IX86_BUILTIN_RSQRTPS_NR256,
24211
24212 IX86_BUILTIN_RCPPS256,
24213
24214 IX86_BUILTIN_ROUNDPD256,
24215 IX86_BUILTIN_ROUNDPS256,
24216
24217 IX86_BUILTIN_FLOORPD256,
24218 IX86_BUILTIN_CEILPD256,
24219 IX86_BUILTIN_TRUNCPD256,
24220 IX86_BUILTIN_RINTPD256,
24221 IX86_BUILTIN_FLOORPS256,
24222 IX86_BUILTIN_CEILPS256,
24223 IX86_BUILTIN_TRUNCPS256,
24224 IX86_BUILTIN_RINTPS256,
24225
24226 IX86_BUILTIN_UNPCKHPD256,
24227 IX86_BUILTIN_UNPCKLPD256,
24228 IX86_BUILTIN_UNPCKHPS256,
24229 IX86_BUILTIN_UNPCKLPS256,
24230
24231 IX86_BUILTIN_SI256_SI,
24232 IX86_BUILTIN_PS256_PS,
24233 IX86_BUILTIN_PD256_PD,
24234 IX86_BUILTIN_SI_SI256,
24235 IX86_BUILTIN_PS_PS256,
24236 IX86_BUILTIN_PD_PD256,
24237
24238 IX86_BUILTIN_VTESTZPD,
24239 IX86_BUILTIN_VTESTCPD,
24240 IX86_BUILTIN_VTESTNZCPD,
24241 IX86_BUILTIN_VTESTZPS,
24242 IX86_BUILTIN_VTESTCPS,
24243 IX86_BUILTIN_VTESTNZCPS,
24244 IX86_BUILTIN_VTESTZPD256,
24245 IX86_BUILTIN_VTESTCPD256,
24246 IX86_BUILTIN_VTESTNZCPD256,
24247 IX86_BUILTIN_VTESTZPS256,
24248 IX86_BUILTIN_VTESTCPS256,
24249 IX86_BUILTIN_VTESTNZCPS256,
24250 IX86_BUILTIN_PTESTZ256,
24251 IX86_BUILTIN_PTESTC256,
24252 IX86_BUILTIN_PTESTNZC256,
24253
24254 IX86_BUILTIN_MOVMSKPD256,
24255 IX86_BUILTIN_MOVMSKPS256,
24256
24257 /* TFmode support builtins. */
24258 IX86_BUILTIN_INFQ,
24259 IX86_BUILTIN_HUGE_VALQ,
24260 IX86_BUILTIN_FABSQ,
24261 IX86_BUILTIN_COPYSIGNQ,
24262
24263 /* Vectorizer support builtins. */
24264 IX86_BUILTIN_CPYSGNPS,
24265 IX86_BUILTIN_CPYSGNPD,
24266 IX86_BUILTIN_CPYSGNPS256,
24267 IX86_BUILTIN_CPYSGNPD256,
24268
24269 IX86_BUILTIN_CVTUDQ2PS,
24270
24271 IX86_BUILTIN_VEC_PERM_V2DF,
24272 IX86_BUILTIN_VEC_PERM_V4SF,
24273 IX86_BUILTIN_VEC_PERM_V2DI,
24274 IX86_BUILTIN_VEC_PERM_V4SI,
24275 IX86_BUILTIN_VEC_PERM_V8HI,
24276 IX86_BUILTIN_VEC_PERM_V16QI,
24277 IX86_BUILTIN_VEC_PERM_V2DI_U,
24278 IX86_BUILTIN_VEC_PERM_V4SI_U,
24279 IX86_BUILTIN_VEC_PERM_V8HI_U,
24280 IX86_BUILTIN_VEC_PERM_V16QI_U,
24281 IX86_BUILTIN_VEC_PERM_V4DF,
24282 IX86_BUILTIN_VEC_PERM_V8SF,
24283
24284 /* FMA4 and XOP instructions. */
24285 IX86_BUILTIN_VFMADDSS,
24286 IX86_BUILTIN_VFMADDSD,
24287 IX86_BUILTIN_VFMADDPS,
24288 IX86_BUILTIN_VFMADDPD,
24289 IX86_BUILTIN_VFMADDPS256,
24290 IX86_BUILTIN_VFMADDPD256,
24291 IX86_BUILTIN_VFMADDSUBPS,
24292 IX86_BUILTIN_VFMADDSUBPD,
24293 IX86_BUILTIN_VFMADDSUBPS256,
24294 IX86_BUILTIN_VFMADDSUBPD256,
24295
24296 IX86_BUILTIN_VPCMOV,
24297 IX86_BUILTIN_VPCMOV_V2DI,
24298 IX86_BUILTIN_VPCMOV_V4SI,
24299 IX86_BUILTIN_VPCMOV_V8HI,
24300 IX86_BUILTIN_VPCMOV_V16QI,
24301 IX86_BUILTIN_VPCMOV_V4SF,
24302 IX86_BUILTIN_VPCMOV_V2DF,
24303 IX86_BUILTIN_VPCMOV256,
24304 IX86_BUILTIN_VPCMOV_V4DI256,
24305 IX86_BUILTIN_VPCMOV_V8SI256,
24306 IX86_BUILTIN_VPCMOV_V16HI256,
24307 IX86_BUILTIN_VPCMOV_V32QI256,
24308 IX86_BUILTIN_VPCMOV_V8SF256,
24309 IX86_BUILTIN_VPCMOV_V4DF256,
24310
24311 IX86_BUILTIN_VPPERM,
24312
24313 IX86_BUILTIN_VPMACSSWW,
24314 IX86_BUILTIN_VPMACSWW,
24315 IX86_BUILTIN_VPMACSSWD,
24316 IX86_BUILTIN_VPMACSWD,
24317 IX86_BUILTIN_VPMACSSDD,
24318 IX86_BUILTIN_VPMACSDD,
24319 IX86_BUILTIN_VPMACSSDQL,
24320 IX86_BUILTIN_VPMACSSDQH,
24321 IX86_BUILTIN_VPMACSDQL,
24322 IX86_BUILTIN_VPMACSDQH,
24323 IX86_BUILTIN_VPMADCSSWD,
24324 IX86_BUILTIN_VPMADCSWD,
24325
24326 IX86_BUILTIN_VPHADDBW,
24327 IX86_BUILTIN_VPHADDBD,
24328 IX86_BUILTIN_VPHADDBQ,
24329 IX86_BUILTIN_VPHADDWD,
24330 IX86_BUILTIN_VPHADDWQ,
24331 IX86_BUILTIN_VPHADDDQ,
24332 IX86_BUILTIN_VPHADDUBW,
24333 IX86_BUILTIN_VPHADDUBD,
24334 IX86_BUILTIN_VPHADDUBQ,
24335 IX86_BUILTIN_VPHADDUWD,
24336 IX86_BUILTIN_VPHADDUWQ,
24337 IX86_BUILTIN_VPHADDUDQ,
24338 IX86_BUILTIN_VPHSUBBW,
24339 IX86_BUILTIN_VPHSUBWD,
24340 IX86_BUILTIN_VPHSUBDQ,
24341
24342 IX86_BUILTIN_VPROTB,
24343 IX86_BUILTIN_VPROTW,
24344 IX86_BUILTIN_VPROTD,
24345 IX86_BUILTIN_VPROTQ,
24346 IX86_BUILTIN_VPROTB_IMM,
24347 IX86_BUILTIN_VPROTW_IMM,
24348 IX86_BUILTIN_VPROTD_IMM,
24349 IX86_BUILTIN_VPROTQ_IMM,
24350
24351 IX86_BUILTIN_VPSHLB,
24352 IX86_BUILTIN_VPSHLW,
24353 IX86_BUILTIN_VPSHLD,
24354 IX86_BUILTIN_VPSHLQ,
24355 IX86_BUILTIN_VPSHAB,
24356 IX86_BUILTIN_VPSHAW,
24357 IX86_BUILTIN_VPSHAD,
24358 IX86_BUILTIN_VPSHAQ,
24359
24360 IX86_BUILTIN_VFRCZSS,
24361 IX86_BUILTIN_VFRCZSD,
24362 IX86_BUILTIN_VFRCZPS,
24363 IX86_BUILTIN_VFRCZPD,
24364 IX86_BUILTIN_VFRCZPS256,
24365 IX86_BUILTIN_VFRCZPD256,
24366
24367 IX86_BUILTIN_VPCOMEQUB,
24368 IX86_BUILTIN_VPCOMNEUB,
24369 IX86_BUILTIN_VPCOMLTUB,
24370 IX86_BUILTIN_VPCOMLEUB,
24371 IX86_BUILTIN_VPCOMGTUB,
24372 IX86_BUILTIN_VPCOMGEUB,
24373 IX86_BUILTIN_VPCOMFALSEUB,
24374 IX86_BUILTIN_VPCOMTRUEUB,
24375
24376 IX86_BUILTIN_VPCOMEQUW,
24377 IX86_BUILTIN_VPCOMNEUW,
24378 IX86_BUILTIN_VPCOMLTUW,
24379 IX86_BUILTIN_VPCOMLEUW,
24380 IX86_BUILTIN_VPCOMGTUW,
24381 IX86_BUILTIN_VPCOMGEUW,
24382 IX86_BUILTIN_VPCOMFALSEUW,
24383 IX86_BUILTIN_VPCOMTRUEUW,
24384
24385 IX86_BUILTIN_VPCOMEQUD,
24386 IX86_BUILTIN_VPCOMNEUD,
24387 IX86_BUILTIN_VPCOMLTUD,
24388 IX86_BUILTIN_VPCOMLEUD,
24389 IX86_BUILTIN_VPCOMGTUD,
24390 IX86_BUILTIN_VPCOMGEUD,
24391 IX86_BUILTIN_VPCOMFALSEUD,
24392 IX86_BUILTIN_VPCOMTRUEUD,
24393
24394 IX86_BUILTIN_VPCOMEQUQ,
24395 IX86_BUILTIN_VPCOMNEUQ,
24396 IX86_BUILTIN_VPCOMLTUQ,
24397 IX86_BUILTIN_VPCOMLEUQ,
24398 IX86_BUILTIN_VPCOMGTUQ,
24399 IX86_BUILTIN_VPCOMGEUQ,
24400 IX86_BUILTIN_VPCOMFALSEUQ,
24401 IX86_BUILTIN_VPCOMTRUEUQ,
24402
24403 IX86_BUILTIN_VPCOMEQB,
24404 IX86_BUILTIN_VPCOMNEB,
24405 IX86_BUILTIN_VPCOMLTB,
24406 IX86_BUILTIN_VPCOMLEB,
24407 IX86_BUILTIN_VPCOMGTB,
24408 IX86_BUILTIN_VPCOMGEB,
24409 IX86_BUILTIN_VPCOMFALSEB,
24410 IX86_BUILTIN_VPCOMTRUEB,
24411
24412 IX86_BUILTIN_VPCOMEQW,
24413 IX86_BUILTIN_VPCOMNEW,
24414 IX86_BUILTIN_VPCOMLTW,
24415 IX86_BUILTIN_VPCOMLEW,
24416 IX86_BUILTIN_VPCOMGTW,
24417 IX86_BUILTIN_VPCOMGEW,
24418 IX86_BUILTIN_VPCOMFALSEW,
24419 IX86_BUILTIN_VPCOMTRUEW,
24420
24421 IX86_BUILTIN_VPCOMEQD,
24422 IX86_BUILTIN_VPCOMNED,
24423 IX86_BUILTIN_VPCOMLTD,
24424 IX86_BUILTIN_VPCOMLED,
24425 IX86_BUILTIN_VPCOMGTD,
24426 IX86_BUILTIN_VPCOMGED,
24427 IX86_BUILTIN_VPCOMFALSED,
24428 IX86_BUILTIN_VPCOMTRUED,
24429
24430 IX86_BUILTIN_VPCOMEQQ,
24431 IX86_BUILTIN_VPCOMNEQ,
24432 IX86_BUILTIN_VPCOMLTQ,
24433 IX86_BUILTIN_VPCOMLEQ,
24434 IX86_BUILTIN_VPCOMGTQ,
24435 IX86_BUILTIN_VPCOMGEQ,
24436 IX86_BUILTIN_VPCOMFALSEQ,
24437 IX86_BUILTIN_VPCOMTRUEQ,
24438
24439 /* LWP instructions. */
24440 IX86_BUILTIN_LLWPCB,
24441 IX86_BUILTIN_SLWPCB,
24442 IX86_BUILTIN_LWPVAL32,
24443 IX86_BUILTIN_LWPVAL64,
24444 IX86_BUILTIN_LWPINS32,
24445 IX86_BUILTIN_LWPINS64,
24446
24447 IX86_BUILTIN_CLZS,
24448
24449 /* BMI instructions. */
24450 IX86_BUILTIN_BEXTR32,
24451 IX86_BUILTIN_BEXTR64,
24452 IX86_BUILTIN_CTZS,
24453
24454 /* TBM instructions. */
24455 IX86_BUILTIN_BEXTRI32,
24456 IX86_BUILTIN_BEXTRI64,
24457
24458
24459 /* FSGSBASE instructions. */
24460 IX86_BUILTIN_RDFSBASE32,
24461 IX86_BUILTIN_RDFSBASE64,
24462 IX86_BUILTIN_RDGSBASE32,
24463 IX86_BUILTIN_RDGSBASE64,
24464 IX86_BUILTIN_WRFSBASE32,
24465 IX86_BUILTIN_WRFSBASE64,
24466 IX86_BUILTIN_WRGSBASE32,
24467 IX86_BUILTIN_WRGSBASE64,
24468
24469 /* RDRND instructions. */
24470 IX86_BUILTIN_RDRAND16_STEP,
24471 IX86_BUILTIN_RDRAND32_STEP,
24472 IX86_BUILTIN_RDRAND64_STEP,
24473
24474 /* F16C instructions. */
24475 IX86_BUILTIN_CVTPH2PS,
24476 IX86_BUILTIN_CVTPH2PS256,
24477 IX86_BUILTIN_CVTPS2PH,
24478 IX86_BUILTIN_CVTPS2PH256,
24479
24480 /* CFString built-in for darwin */
24481 IX86_BUILTIN_CFSTRING,
24482
24483 IX86_BUILTIN_MAX
24484 };
24485
24486 /* Table for the ix86 builtin decls. */
24487 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
24488
24489 /* Table of all of the builtin functions that are possible with different ISA's
24490 but are waiting to be built until a function is declared to use that
24491 ISA. */
24492 struct builtin_isa {
24493 const char *name; /* function name */
24494 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
24495 int isa; /* isa_flags this builtin is defined for */
24496 bool const_p; /* true if the declaration is constant */
24497 bool set_and_not_built_p;
24498 };
24499
24500 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
24501
24502
24503 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
24504 of which isa_flags to use in the ix86_builtins_isa array. Stores the
24505 function decl in the ix86_builtins array. Returns the function decl or
24506 NULL_TREE, if the builtin was not added.
24507
24508 If the front end has a special hook for builtin functions, delay adding
24509 builtin functions that aren't in the current ISA until the ISA is changed
24510 with function specific optimization. Doing so, can save about 300K for the
24511 default compiler. When the builtin is expanded, check at that time whether
24512 it is valid.
24513
24514 If the front end doesn't have a special hook, record all builtins, even if
24515 it isn't an instruction set in the current ISA in case the user uses
24516 function specific options for a different ISA, so that we don't get scope
24517 errors if a builtin is added in the middle of a function scope. */
24518
24519 static inline tree
24520 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
24521 enum ix86_builtins code)
24522 {
24523 tree decl = NULL_TREE;
24524
24525 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
24526 {
24527 ix86_builtins_isa[(int) code].isa = mask;
24528
24529 mask &= ~OPTION_MASK_ISA_64BIT;
24530 if (mask == 0
24531 || (mask & ix86_isa_flags) != 0
24532 || (lang_hooks.builtin_function
24533 == lang_hooks.builtin_function_ext_scope))
24534
24535 {
24536 tree type = ix86_get_builtin_func_type (tcode);
24537 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
24538 NULL, NULL_TREE);
24539 ix86_builtins[(int) code] = decl;
24540 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
24541 }
24542 else
24543 {
24544 ix86_builtins[(int) code] = NULL_TREE;
24545 ix86_builtins_isa[(int) code].tcode = tcode;
24546 ix86_builtins_isa[(int) code].name = name;
24547 ix86_builtins_isa[(int) code].const_p = false;
24548 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
24549 }
24550 }
24551
24552 return decl;
24553 }
24554
24555 /* Like def_builtin, but also marks the function decl "const". */
24556
24557 static inline tree
24558 def_builtin_const (int mask, const char *name,
24559 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
24560 {
24561 tree decl = def_builtin (mask, name, tcode, code);
24562 if (decl)
24563 TREE_READONLY (decl) = 1;
24564 else
24565 ix86_builtins_isa[(int) code].const_p = true;
24566
24567 return decl;
24568 }
24569
24570 /* Add any new builtin functions for a given ISA that may not have been
24571 declared. This saves a bit of space compared to adding all of the
24572 declarations to the tree, even if we didn't use them. */
24573
24574 static void
24575 ix86_add_new_builtins (int isa)
24576 {
24577 int i;
24578
24579 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
24580 {
24581 if ((ix86_builtins_isa[i].isa & isa) != 0
24582 && ix86_builtins_isa[i].set_and_not_built_p)
24583 {
24584 tree decl, type;
24585
24586 /* Don't define the builtin again. */
24587 ix86_builtins_isa[i].set_and_not_built_p = false;
24588
24589 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
24590 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
24591 type, i, BUILT_IN_MD, NULL,
24592 NULL_TREE);
24593
24594 ix86_builtins[i] = decl;
24595 if (ix86_builtins_isa[i].const_p)
24596 TREE_READONLY (decl) = 1;
24597 }
24598 }
24599 }
24600
24601 /* Bits for builtin_description.flag. */
24602
24603 /* Set when we don't support the comparison natively, and should
24604 swap_comparison in order to support it. */
24605 #define BUILTIN_DESC_SWAP_OPERANDS 1
24606
24607 struct builtin_description
24608 {
24609 const unsigned int mask;
24610 const enum insn_code icode;
24611 const char *const name;
24612 const enum ix86_builtins code;
24613 const enum rtx_code comparison;
24614 const int flag;
24615 };
24616
24617 static const struct builtin_description bdesc_comi[] =
24618 {
24619 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
24620 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
24621 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
24622 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
24623 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
24624 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
24625 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
24626 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
24627 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
24628 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
24629 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
24630 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
24631 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
24632 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
24633 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
24634 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
24635 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
24636 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
24637 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
24638 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
24639 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
24640 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
24641 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
24642 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
24643 };
24644
24645 static const struct builtin_description bdesc_pcmpestr[] =
24646 {
24647 /* SSE4.2 */
24648 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
24649 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
24650 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
24651 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
24652 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
24653 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
24654 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
24655 };
24656
24657 static const struct builtin_description bdesc_pcmpistr[] =
24658 {
24659 /* SSE4.2 */
24660 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
24661 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
24662 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
24663 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
24664 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
24665 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
24666 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
24667 };
24668
24669 /* Special builtins with variable number of arguments. */
24670 static const struct builtin_description bdesc_special_args[] =
24671 {
24672 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
24673 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
24674
24675 /* MMX */
24676 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24677
24678 /* 3DNow! */
24679 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24680
24681 /* SSE */
24682 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24683 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24684 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24685
24686 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24687 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24688 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24689 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24690
24691 /* SSE or 3DNow!A */
24692 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24693 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
24694
24695 /* SSE2 */
24696 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24697 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24698 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24699 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
24700 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24701 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
24702 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
24703 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
24704 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24705
24706 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24707 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24708
24709 /* SSE3 */
24710 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24711
24712 /* SSE4.1 */
24713 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
24714
24715 /* SSE4A */
24716 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24717 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24718
24719 /* AVX */
24720 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
24721 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
24722
24723 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24724 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24725 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24726 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
24727 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
24728
24729 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24730 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24731 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24732 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24733 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24734 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
24735 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24736
24737 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
24738 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24739 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24740
24741 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DI },
24742 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SI },
24743 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DI },
24744 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SI },
24745 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DI_V2DF },
24746 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SI_V4SF },
24747 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DI_V4DF },
24748 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SI_V8SF },
24749
24750 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
24751 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
24752 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
24753 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
24754 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
24755 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
24756
24757 /* FSGSBASE */
24758 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasesi, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24759 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasedi, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24760 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasesi, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24761 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasedi, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24762 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasesi, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24763 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasedi, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24764 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasesi, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24765 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasedi, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24766 };
24767
24768 /* Builtins with variable number of arguments. */
24769 static const struct builtin_description bdesc_args[] =
24770 {
24771 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
24772 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
24773 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
24774 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24775 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24776 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24777 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24778
24779 /* MMX */
24780 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24781 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24782 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24783 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24784 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24785 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24786
24787 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24788 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24789 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24790 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24791 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24792 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24793 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24794 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24795
24796 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24797 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24798
24799 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24800 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24801 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24802 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24803
24804 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24805 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24806 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24807 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24808 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24809 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24810
24811 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24812 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24813 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24814 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24815 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
24816 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
24817
24818 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24819 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
24820 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24821
24822 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
24823
24824 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24825 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24826 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24827 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24828 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24829 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24830
24831 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24832 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24833 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24834 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24835 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24836 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24837
24838 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24839 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24840 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24841 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24842
24843 /* 3DNow! */
24844 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24845 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24846 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24847 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24848
24849 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24850 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24851 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24852 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24853 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24854 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24855 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24856 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24857 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24858 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24859 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24860 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24861 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24862 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24863 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24864
24865 /* 3DNow!A */
24866 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24867 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24868 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
24869 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24870 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24871 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24872
24873 /* SSE */
24874 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
24875 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24876 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24877 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24878 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24879 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24880 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24881 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24882 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24883 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24884 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24885 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24886
24887 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24888
24889 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24890 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24891 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24892 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24893 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24894 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24895 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24896 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24897
24898 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24899 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24900 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24901 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24902 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24903 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24904 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24905 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24906 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24907 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24908 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
24909 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24910 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24911 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24912 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24913 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24914 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24915 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24916 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24917 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24918 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24919 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24920
24921 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24922 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24923 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24924 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24925
24926 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24927 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24928 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24929 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24930
24931 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24932
24933 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24934 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24935 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24936 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24937 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24938
24939 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
24940 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
24941 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
24942
24943 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
24944
24945 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24946 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24947 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24948
24949 /* SSE MMX or 3Dnow!A */
24950 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24951 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24952 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24953
24954 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24955 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24956 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24957 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24958
24959 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
24960 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
24961
24962 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
24963
24964 /* SSE2 */
24965 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24966
24967 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
24968 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
24969 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
24970 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
24971 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
24972 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
24973 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
24974 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
24975 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
24976 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
24977 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
24978 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
24979
24980 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
24981 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
24982 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
24983 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
24984 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
24985 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
24986
24987 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
24988 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
24989 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
24990 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
24991 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
24992
24993 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
24994
24995 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
24996 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
24997 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
24998 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
24999
25000 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
25001 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
25002 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
25003
25004 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25005 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25006 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25007 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25008 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25009 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25010 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25011 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25012
25013 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
25014 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
25015 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
25016 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25017 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
25018 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25019 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
25020 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
25021 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
25022 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25023 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25024 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25025 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
25026 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
25027 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
25028 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25029 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
25030 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
25031 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
25032 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25033
25034 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25035 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25036 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25037 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25038
25039 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25041 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25042 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25043
25044 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25045
25046 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25047 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25048 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25049
25050 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
25051
25052 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25053 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25054 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25055 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25056 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25057 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25058 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25059 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25060
25061 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25062 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25063 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25064 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25065 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25066 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25067 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25068 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25069
25070 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25071 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
25072
25073 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25074 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25075 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25076 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25077
25078 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25079 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25080
25081 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25082 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25083 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25084 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25085 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25086 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25087
25088 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25089 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25090 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25091 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25092
25093 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25094 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25095 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25096 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25097 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25098 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25099 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25100 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25101
25102 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
25103 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
25104 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
25105
25106 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25107 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
25108
25109 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
25110 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
25111
25112 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
25113
25114 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
25115 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
25116 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
25117 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
25118
25119 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
25120 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25121 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25122 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
25123 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25124 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25125 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
25126
25127 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
25128 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25129 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25130 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
25131 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25132 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25133 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
25134
25135 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25136 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25137 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25138 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25139
25140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
25141 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
25142 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
25143
25144 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
25145
25146 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
25147 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
25148
25149 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
25150
25151 /* SSE2 MMX */
25152 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
25153 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
25154
25155 /* SSE3 */
25156 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
25157 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25158
25159 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25160 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25161 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25162 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25163 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25164 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25165
25166 /* SSSE3 */
25167 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
25168 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
25169 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
25170 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
25171 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
25172 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
25173
25174 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25175 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25176 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25177 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25178 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25179 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25180 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25181 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25182 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25183 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25184 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25185 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25186 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
25187 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
25188 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25189 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25190 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25191 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25192 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25193 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25194 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25195 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25196 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25197 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25198
25199 /* SSSE3. */
25200 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
25201 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
25202
25203 /* SSE4.1 */
25204 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25205 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25206 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
25207 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
25208 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25209 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25210 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25211 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
25212 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
25213 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
25214
25215 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
25216 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
25217 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
25218 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
25219 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
25220 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
25221 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
25222 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
25223 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
25224 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
25225 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
25226 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
25227 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
25228
25229 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
25230 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25231 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25232 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25233 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25234 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25235 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25236 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25237 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25238 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25239 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
25240 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25241
25242 /* SSE4.1 */
25243 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
25244 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
25245 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25246 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25247
25248 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_floorpd", IX86_BUILTIN_FLOORPD, (enum rtx_code) ROUND_FLOOR, (int) V2DF_FTYPE_V2DF_ROUND },
25249 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_ceilpd", IX86_BUILTIN_CEILPD, (enum rtx_code) ROUND_CEIL, (int) V2DF_FTYPE_V2DF_ROUND },
25250 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_truncpd", IX86_BUILTIN_TRUNCPD, (enum rtx_code) ROUND_TRUNC, (int) V2DF_FTYPE_V2DF_ROUND },
25251 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_rintpd", IX86_BUILTIN_RINTPD, (enum rtx_code) ROUND_MXCSR, (int) V2DF_FTYPE_V2DF_ROUND },
25252
25253 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_floorps", IX86_BUILTIN_FLOORPS, (enum rtx_code) ROUND_FLOOR, (int) V4SF_FTYPE_V4SF_ROUND },
25254 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_ceilps", IX86_BUILTIN_CEILPS, (enum rtx_code) ROUND_CEIL, (int) V4SF_FTYPE_V4SF_ROUND },
25255 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_truncps", IX86_BUILTIN_TRUNCPS, (enum rtx_code) ROUND_TRUNC, (int) V4SF_FTYPE_V4SF_ROUND },
25256 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_rintps", IX86_BUILTIN_RINTPS, (enum rtx_code) ROUND_MXCSR, (int) V4SF_FTYPE_V4SF_ROUND },
25257
25258 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25259 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25260 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25261
25262 /* SSE4.2 */
25263 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25264 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
25265 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
25266 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25267 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25268
25269 /* SSE4A */
25270 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
25271 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
25272 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
25273 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25274
25275 /* AES */
25276 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
25277 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
25278
25279 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25280 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25281 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25282 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25283
25284 /* PCLMUL */
25285 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
25286
25287 /* AVX */
25288 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25289 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25290 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25291 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25292 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25293 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25294 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25295 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25296 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25297 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25298 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25299 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25300 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25301 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25302 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25303 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25304 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25305 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25306 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25307 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25308 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25309 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25310 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25311 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25312 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25313 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25314
25315 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
25316 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
25317 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
25318 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
25319
25320 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25321 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25322 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
25323 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
25324 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25325 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25326 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25327 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25328 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25329 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25330 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25331 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25332 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25333 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
25334 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
25335 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
25336 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
25337 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
25338 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
25339 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
25340 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
25341 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
25342 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
25343 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
25344 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25345 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25346 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
25347 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
25348 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
25349 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
25350 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
25351 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
25352 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
25353 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
25354
25355 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25356 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25357 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
25358
25359 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
25360 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25361 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25362 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25363 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25364
25365 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25366
25367 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
25368 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
25369
25370 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_floorpd256", IX86_BUILTIN_FLOORPD256, (enum rtx_code) ROUND_FLOOR, (int) V4DF_FTYPE_V4DF_ROUND },
25371 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_ceilpd256", IX86_BUILTIN_CEILPD256, (enum rtx_code) ROUND_CEIL, (int) V4DF_FTYPE_V4DF_ROUND },
25372 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_truncpd256", IX86_BUILTIN_TRUNCPD256, (enum rtx_code) ROUND_TRUNC, (int) V4DF_FTYPE_V4DF_ROUND },
25373 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_rintpd256", IX86_BUILTIN_RINTPD256, (enum rtx_code) ROUND_MXCSR, (int) V4DF_FTYPE_V4DF_ROUND },
25374
25375 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_floorps256", IX86_BUILTIN_FLOORPS256, (enum rtx_code) ROUND_FLOOR, (int) V8SF_FTYPE_V8SF_ROUND },
25376 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_ceilps256", IX86_BUILTIN_CEILPS256, (enum rtx_code) ROUND_CEIL, (int) V8SF_FTYPE_V8SF_ROUND },
25377 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_truncps256", IX86_BUILTIN_TRUNCPS256, (enum rtx_code) ROUND_TRUNC, (int) V8SF_FTYPE_V8SF_ROUND },
25378 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_rintps256", IX86_BUILTIN_RINTPS256, (enum rtx_code) ROUND_MXCSR, (int) V8SF_FTYPE_V8SF_ROUND },
25379
25380 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25381 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25382 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25383 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25384
25385 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
25386 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
25387 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
25388 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
25389 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
25390 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
25391
25392 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25393 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25394 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25395 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25397 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25398 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25400 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25401 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25402 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25403 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25404 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25405 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25406 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25407
25408 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
25409 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
25410
25411 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv8sf3, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25412 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv4df3, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25413
25414 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25415
25416 /* BMI */
25417 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_si, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25418 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_di, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25419 { OPTION_MASK_ISA_BMI, CODE_FOR_ctzhi2, "__builtin_ctzs", IX86_BUILTIN_CTZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25420
25421 /* TBM */
25422 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_si, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25423 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_di, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25424
25425 /* F16C */
25426 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS, UNKNOWN, (int) V4SF_FTYPE_V8HI },
25427 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps256, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256, UNKNOWN, (int) V8SF_FTYPE_V8HI },
25428 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH, UNKNOWN, (int) V8HI_FTYPE_V4SF_INT },
25429 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph256, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256, UNKNOWN, (int) V8HI_FTYPE_V8SF_INT },
25430 };
25431
25432 /* FMA4 and XOP. */
25433 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
25434 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
25435 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
25436 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
25437 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
25438 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
25439 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
25440 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
25441 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
25442 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
25443 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
25444 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
25445 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
25446 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
25447 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
25448 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
25449 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
25450 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
25451 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
25452 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
25453 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
25454 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
25455 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
25456 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
25457 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
25458 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
25459 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
25460 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
25461 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
25462 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
25463 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
25464 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
25465 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
25466 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
25467 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
25468 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
25469 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
25470 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
25471 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
25472 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
25473 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
25474 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
25475 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
25476 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
25477 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
25478 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
25479 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
25480 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
25481 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
25482 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
25483 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
25484 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
25485
25486 static const struct builtin_description bdesc_multi_arg[] =
25487 {
25488 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v4sf,
25489 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS,
25490 UNKNOWN, (int)MULTI_ARG_3_SF },
25491 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v2df,
25492 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD,
25493 UNKNOWN, (int)MULTI_ARG_3_DF },
25494
25495 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4sf,
25496 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS,
25497 UNKNOWN, (int)MULTI_ARG_3_SF },
25498 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v2df,
25499 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD,
25500 UNKNOWN, (int)MULTI_ARG_3_DF },
25501 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v8sf,
25502 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256,
25503 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25504 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4df,
25505 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256,
25506 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25507
25508 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4sf,
25509 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS,
25510 UNKNOWN, (int)MULTI_ARG_3_SF },
25511 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v2df,
25512 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD,
25513 UNKNOWN, (int)MULTI_ARG_3_DF },
25514 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v8sf,
25515 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256,
25516 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25517 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4df,
25518 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256,
25519 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25520
25521 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
25522 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
25523 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
25524 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
25525 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
25526 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
25527 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
25528
25529 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25530 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25531 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
25532 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
25533 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
25534 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
25535 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
25536
25537 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
25538
25539 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25540 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25541 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25542 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25543 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25544 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25545 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25546 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25547 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25548 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25549 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25550 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25551
25552 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25553 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
25554 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
25555 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
25556 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
25557 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
25558 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
25559 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
25560 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25561 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
25562 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
25563 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
25564 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25565 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
25566 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
25567 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
25568
25569 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
25570 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
25571 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
25572 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
25573 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
25574 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
25575
25576 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25577 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25578 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25579 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25580 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25581 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25582 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25583 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25584 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25585 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25586 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25587 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25588 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25589 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25590 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25591
25592 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
25593 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25594 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25595 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
25596 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
25597 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
25598 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
25599
25600 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
25601 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25602 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25603 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
25604 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
25605 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
25606 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
25607
25608 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
25609 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25610 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25611 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
25612 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
25613 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
25614 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
25615
25616 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25617 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25618 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25619 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
25620 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
25621 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
25622 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
25623
25624 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
25625 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25626 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25627 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
25628 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
25629 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
25630 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
25631
25632 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
25633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25634 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25635 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
25636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
25637 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
25638 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
25639
25640 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
25641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25642 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25643 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
25644 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
25645 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
25646 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
25647
25648 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25650 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25651 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
25652 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
25653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
25654 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
25655
25656 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25658 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25659 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25660 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25662 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25663 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25664
25665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25666 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25668 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25669 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25670 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25671 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25672 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25673
25674 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
25675 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
25676 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
25677 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
25678
25679 };
25680
25681 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
25682 in the current target ISA to allow the user to compile particular modules
25683 with different target specific options that differ from the command line
25684 options. */
25685 static void
25686 ix86_init_mmx_sse_builtins (void)
25687 {
25688 const struct builtin_description * d;
25689 enum ix86_builtin_func_type ftype;
25690 size_t i;
25691
25692 /* Add all special builtins with variable number of operands. */
25693 for (i = 0, d = bdesc_special_args;
25694 i < ARRAY_SIZE (bdesc_special_args);
25695 i++, d++)
25696 {
25697 if (d->name == 0)
25698 continue;
25699
25700 ftype = (enum ix86_builtin_func_type) d->flag;
25701 def_builtin (d->mask, d->name, ftype, d->code);
25702 }
25703
25704 /* Add all builtins with variable number of operands. */
25705 for (i = 0, d = bdesc_args;
25706 i < ARRAY_SIZE (bdesc_args);
25707 i++, d++)
25708 {
25709 if (d->name == 0)
25710 continue;
25711
25712 ftype = (enum ix86_builtin_func_type) d->flag;
25713 def_builtin_const (d->mask, d->name, ftype, d->code);
25714 }
25715
25716 /* pcmpestr[im] insns. */
25717 for (i = 0, d = bdesc_pcmpestr;
25718 i < ARRAY_SIZE (bdesc_pcmpestr);
25719 i++, d++)
25720 {
25721 if (d->code == IX86_BUILTIN_PCMPESTRM128)
25722 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
25723 else
25724 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
25725 def_builtin_const (d->mask, d->name, ftype, d->code);
25726 }
25727
25728 /* pcmpistr[im] insns. */
25729 for (i = 0, d = bdesc_pcmpistr;
25730 i < ARRAY_SIZE (bdesc_pcmpistr);
25731 i++, d++)
25732 {
25733 if (d->code == IX86_BUILTIN_PCMPISTRM128)
25734 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
25735 else
25736 ftype = INT_FTYPE_V16QI_V16QI_INT;
25737 def_builtin_const (d->mask, d->name, ftype, d->code);
25738 }
25739
25740 /* comi/ucomi insns. */
25741 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
25742 {
25743 if (d->mask == OPTION_MASK_ISA_SSE2)
25744 ftype = INT_FTYPE_V2DF_V2DF;
25745 else
25746 ftype = INT_FTYPE_V4SF_V4SF;
25747 def_builtin_const (d->mask, d->name, ftype, d->code);
25748 }
25749
25750 /* SSE */
25751 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
25752 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
25753 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
25754 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
25755
25756 /* SSE or 3DNow!A */
25757 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25758 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
25759 IX86_BUILTIN_MASKMOVQ);
25760
25761 /* SSE2 */
25762 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
25763 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
25764
25765 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
25766 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
25767 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
25768 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
25769
25770 /* SSE3. */
25771 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
25772 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
25773 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
25774 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
25775
25776 /* AES */
25777 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
25778 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
25779 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
25780 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
25781 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
25782 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
25783 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
25784 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
25785 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
25786 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
25787 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
25788 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
25789
25790 /* PCLMUL */
25791 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
25792 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
25793
25794 /* RDRND */
25795 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand16_step",
25796 INT_FTYPE_PUSHORT, IX86_BUILTIN_RDRAND16_STEP);
25797 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand32_step",
25798 INT_FTYPE_PUNSIGNED, IX86_BUILTIN_RDRAND32_STEP);
25799 def_builtin (OPTION_MASK_ISA_RDRND | OPTION_MASK_ISA_64BIT,
25800 "__builtin_ia32_rdrand64_step", INT_FTYPE_PULONGLONG,
25801 IX86_BUILTIN_RDRAND64_STEP);
25802
25803 /* MMX access to the vec_init patterns. */
25804 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
25805 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
25806
25807 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
25808 V4HI_FTYPE_HI_HI_HI_HI,
25809 IX86_BUILTIN_VEC_INIT_V4HI);
25810
25811 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
25812 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
25813 IX86_BUILTIN_VEC_INIT_V8QI);
25814
25815 /* Access to the vec_extract patterns. */
25816 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
25817 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
25818 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
25819 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
25820 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
25821 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
25822 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
25823 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
25824 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
25825 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
25826
25827 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25828 "__builtin_ia32_vec_ext_v4hi",
25829 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
25830
25831 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
25832 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
25833
25834 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
25835 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
25836
25837 /* Access to the vec_set patterns. */
25838 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
25839 "__builtin_ia32_vec_set_v2di",
25840 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
25841
25842 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
25843 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
25844
25845 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
25846 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
25847
25848 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
25849 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
25850
25851 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25852 "__builtin_ia32_vec_set_v4hi",
25853 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
25854
25855 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
25856 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
25857
25858 /* Add FMA4 multi-arg argument instructions */
25859 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
25860 {
25861 if (d->name == 0)
25862 continue;
25863
25864 ftype = (enum ix86_builtin_func_type) d->flag;
25865 def_builtin_const (d->mask, d->name, ftype, d->code);
25866 }
25867 }
25868
25869 /* Internal method for ix86_init_builtins. */
25870
25871 static void
25872 ix86_init_builtins_va_builtins_abi (void)
25873 {
25874 tree ms_va_ref, sysv_va_ref;
25875 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
25876 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
25877 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
25878 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
25879
25880 if (!TARGET_64BIT)
25881 return;
25882 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
25883 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
25884 ms_va_ref = build_reference_type (ms_va_list_type_node);
25885 sysv_va_ref =
25886 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
25887
25888 fnvoid_va_end_ms =
25889 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25890 fnvoid_va_start_ms =
25891 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25892 fnvoid_va_end_sysv =
25893 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
25894 fnvoid_va_start_sysv =
25895 build_varargs_function_type_list (void_type_node, sysv_va_ref,
25896 NULL_TREE);
25897 fnvoid_va_copy_ms =
25898 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
25899 NULL_TREE);
25900 fnvoid_va_copy_sysv =
25901 build_function_type_list (void_type_node, sysv_va_ref,
25902 sysv_va_ref, NULL_TREE);
25903
25904 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
25905 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
25906 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
25907 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
25908 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
25909 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
25910 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
25911 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25912 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
25913 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25914 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
25915 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25916 }
25917
25918 static void
25919 ix86_init_builtin_types (void)
25920 {
25921 tree float128_type_node, float80_type_node;
25922
25923 /* The __float80 type. */
25924 float80_type_node = long_double_type_node;
25925 if (TYPE_MODE (float80_type_node) != XFmode)
25926 {
25927 /* The __float80 type. */
25928 float80_type_node = make_node (REAL_TYPE);
25929
25930 TYPE_PRECISION (float80_type_node) = 80;
25931 layout_type (float80_type_node);
25932 }
25933 lang_hooks.types.register_builtin_type (float80_type_node, "__float80");
25934
25935 /* The __float128 type. */
25936 float128_type_node = make_node (REAL_TYPE);
25937 TYPE_PRECISION (float128_type_node) = 128;
25938 layout_type (float128_type_node);
25939 lang_hooks.types.register_builtin_type (float128_type_node, "__float128");
25940
25941 /* This macro is built by i386-builtin-types.awk. */
25942 DEFINE_BUILTIN_PRIMITIVE_TYPES;
25943 }
25944
25945 static void
25946 ix86_init_builtins (void)
25947 {
25948 tree t;
25949
25950 ix86_init_builtin_types ();
25951
25952 /* TFmode support builtins. */
25953 def_builtin_const (0, "__builtin_infq",
25954 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
25955 def_builtin_const (0, "__builtin_huge_valq",
25956 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
25957
25958 /* We will expand them to normal call if SSE2 isn't available since
25959 they are used by libgcc. */
25960 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
25961 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
25962 BUILT_IN_MD, "__fabstf2", NULL_TREE);
25963 TREE_READONLY (t) = 1;
25964 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
25965
25966 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
25967 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
25968 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
25969 TREE_READONLY (t) = 1;
25970 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
25971
25972 ix86_init_mmx_sse_builtins ();
25973
25974 if (TARGET_64BIT)
25975 ix86_init_builtins_va_builtins_abi ();
25976
25977 #ifdef SUBTARGET_INIT_BUILTINS
25978 SUBTARGET_INIT_BUILTINS;
25979 #endif
25980 }
25981
25982 /* Return the ix86 builtin for CODE. */
25983
25984 static tree
25985 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
25986 {
25987 if (code >= IX86_BUILTIN_MAX)
25988 return error_mark_node;
25989
25990 return ix86_builtins[code];
25991 }
25992
25993 /* Errors in the source file can cause expand_expr to return const0_rtx
25994 where we expect a vector. To avoid crashing, use one of the vector
25995 clear instructions. */
25996 static rtx
25997 safe_vector_operand (rtx x, enum machine_mode mode)
25998 {
25999 if (x == const0_rtx)
26000 x = CONST0_RTX (mode);
26001 return x;
26002 }
26003
26004 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
26005
26006 static rtx
26007 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
26008 {
26009 rtx pat;
26010 tree arg0 = CALL_EXPR_ARG (exp, 0);
26011 tree arg1 = CALL_EXPR_ARG (exp, 1);
26012 rtx op0 = expand_normal (arg0);
26013 rtx op1 = expand_normal (arg1);
26014 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26015 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
26016 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
26017
26018 if (VECTOR_MODE_P (mode0))
26019 op0 = safe_vector_operand (op0, mode0);
26020 if (VECTOR_MODE_P (mode1))
26021 op1 = safe_vector_operand (op1, mode1);
26022
26023 if (optimize || !target
26024 || GET_MODE (target) != tmode
26025 || !insn_data[icode].operand[0].predicate (target, tmode))
26026 target = gen_reg_rtx (tmode);
26027
26028 if (GET_MODE (op1) == SImode && mode1 == TImode)
26029 {
26030 rtx x = gen_reg_rtx (V4SImode);
26031 emit_insn (gen_sse2_loadd (x, op1));
26032 op1 = gen_lowpart (TImode, x);
26033 }
26034
26035 if (!insn_data[icode].operand[1].predicate (op0, mode0))
26036 op0 = copy_to_mode_reg (mode0, op0);
26037 if (!insn_data[icode].operand[2].predicate (op1, mode1))
26038 op1 = copy_to_mode_reg (mode1, op1);
26039
26040 pat = GEN_FCN (icode) (target, op0, op1);
26041 if (! pat)
26042 return 0;
26043
26044 emit_insn (pat);
26045
26046 return target;
26047 }
26048
26049 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
26050
26051 static rtx
26052 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
26053 enum ix86_builtin_func_type m_type,
26054 enum rtx_code sub_code)
26055 {
26056 rtx pat;
26057 int i;
26058 int nargs;
26059 bool comparison_p = false;
26060 bool tf_p = false;
26061 bool last_arg_constant = false;
26062 int num_memory = 0;
26063 struct {
26064 rtx op;
26065 enum machine_mode mode;
26066 } args[4];
26067
26068 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26069
26070 switch (m_type)
26071 {
26072 case MULTI_ARG_4_DF2_DI_I:
26073 case MULTI_ARG_4_DF2_DI_I1:
26074 case MULTI_ARG_4_SF2_SI_I:
26075 case MULTI_ARG_4_SF2_SI_I1:
26076 nargs = 4;
26077 last_arg_constant = true;
26078 break;
26079
26080 case MULTI_ARG_3_SF:
26081 case MULTI_ARG_3_DF:
26082 case MULTI_ARG_3_SF2:
26083 case MULTI_ARG_3_DF2:
26084 case MULTI_ARG_3_DI:
26085 case MULTI_ARG_3_SI:
26086 case MULTI_ARG_3_SI_DI:
26087 case MULTI_ARG_3_HI:
26088 case MULTI_ARG_3_HI_SI:
26089 case MULTI_ARG_3_QI:
26090 case MULTI_ARG_3_DI2:
26091 case MULTI_ARG_3_SI2:
26092 case MULTI_ARG_3_HI2:
26093 case MULTI_ARG_3_QI2:
26094 nargs = 3;
26095 break;
26096
26097 case MULTI_ARG_2_SF:
26098 case MULTI_ARG_2_DF:
26099 case MULTI_ARG_2_DI:
26100 case MULTI_ARG_2_SI:
26101 case MULTI_ARG_2_HI:
26102 case MULTI_ARG_2_QI:
26103 nargs = 2;
26104 break;
26105
26106 case MULTI_ARG_2_DI_IMM:
26107 case MULTI_ARG_2_SI_IMM:
26108 case MULTI_ARG_2_HI_IMM:
26109 case MULTI_ARG_2_QI_IMM:
26110 nargs = 2;
26111 last_arg_constant = true;
26112 break;
26113
26114 case MULTI_ARG_1_SF:
26115 case MULTI_ARG_1_DF:
26116 case MULTI_ARG_1_SF2:
26117 case MULTI_ARG_1_DF2:
26118 case MULTI_ARG_1_DI:
26119 case MULTI_ARG_1_SI:
26120 case MULTI_ARG_1_HI:
26121 case MULTI_ARG_1_QI:
26122 case MULTI_ARG_1_SI_DI:
26123 case MULTI_ARG_1_HI_DI:
26124 case MULTI_ARG_1_HI_SI:
26125 case MULTI_ARG_1_QI_DI:
26126 case MULTI_ARG_1_QI_SI:
26127 case MULTI_ARG_1_QI_HI:
26128 nargs = 1;
26129 break;
26130
26131 case MULTI_ARG_2_DI_CMP:
26132 case MULTI_ARG_2_SI_CMP:
26133 case MULTI_ARG_2_HI_CMP:
26134 case MULTI_ARG_2_QI_CMP:
26135 nargs = 2;
26136 comparison_p = true;
26137 break;
26138
26139 case MULTI_ARG_2_SF_TF:
26140 case MULTI_ARG_2_DF_TF:
26141 case MULTI_ARG_2_DI_TF:
26142 case MULTI_ARG_2_SI_TF:
26143 case MULTI_ARG_2_HI_TF:
26144 case MULTI_ARG_2_QI_TF:
26145 nargs = 2;
26146 tf_p = true;
26147 break;
26148
26149 default:
26150 gcc_unreachable ();
26151 }
26152
26153 if (optimize || !target
26154 || GET_MODE (target) != tmode
26155 || !insn_data[icode].operand[0].predicate (target, tmode))
26156 target = gen_reg_rtx (tmode);
26157
26158 gcc_assert (nargs <= 4);
26159
26160 for (i = 0; i < nargs; i++)
26161 {
26162 tree arg = CALL_EXPR_ARG (exp, i);
26163 rtx op = expand_normal (arg);
26164 int adjust = (comparison_p) ? 1 : 0;
26165 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
26166
26167 if (last_arg_constant && i == nargs-1)
26168 {
26169 if (!CONST_INT_P (op))
26170 {
26171 error ("last argument must be an immediate");
26172 return gen_reg_rtx (tmode);
26173 }
26174 }
26175 else
26176 {
26177 if (VECTOR_MODE_P (mode))
26178 op = safe_vector_operand (op, mode);
26179
26180 /* If we aren't optimizing, only allow one memory operand to be
26181 generated. */
26182 if (memory_operand (op, mode))
26183 num_memory++;
26184
26185 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
26186
26187 if (optimize
26188 || !insn_data[icode].operand[i+adjust+1].predicate (op, mode)
26189 || num_memory > 1)
26190 op = force_reg (mode, op);
26191 }
26192
26193 args[i].op = op;
26194 args[i].mode = mode;
26195 }
26196
26197 switch (nargs)
26198 {
26199 case 1:
26200 pat = GEN_FCN (icode) (target, args[0].op);
26201 break;
26202
26203 case 2:
26204 if (tf_p)
26205 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
26206 GEN_INT ((int)sub_code));
26207 else if (! comparison_p)
26208 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
26209 else
26210 {
26211 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
26212 args[0].op,
26213 args[1].op);
26214
26215 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
26216 }
26217 break;
26218
26219 case 3:
26220 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
26221 break;
26222
26223 case 4:
26224 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
26225 break;
26226
26227 default:
26228 gcc_unreachable ();
26229 }
26230
26231 if (! pat)
26232 return 0;
26233
26234 emit_insn (pat);
26235 return target;
26236 }
26237
26238 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
26239 insns with vec_merge. */
26240
26241 static rtx
26242 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
26243 rtx target)
26244 {
26245 rtx pat;
26246 tree arg0 = CALL_EXPR_ARG (exp, 0);
26247 rtx op1, op0 = expand_normal (arg0);
26248 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26249 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
26250
26251 if (optimize || !target
26252 || GET_MODE (target) != tmode
26253 || !insn_data[icode].operand[0].predicate (target, tmode))
26254 target = gen_reg_rtx (tmode);
26255
26256 if (VECTOR_MODE_P (mode0))
26257 op0 = safe_vector_operand (op0, mode0);
26258
26259 if ((optimize && !register_operand (op0, mode0))
26260 || !insn_data[icode].operand[1].predicate (op0, mode0))
26261 op0 = copy_to_mode_reg (mode0, op0);
26262
26263 op1 = op0;
26264 if (!insn_data[icode].operand[2].predicate (op1, mode0))
26265 op1 = copy_to_mode_reg (mode0, op1);
26266
26267 pat = GEN_FCN (icode) (target, op0, op1);
26268 if (! pat)
26269 return 0;
26270 emit_insn (pat);
26271 return target;
26272 }
26273
26274 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
26275
26276 static rtx
26277 ix86_expand_sse_compare (const struct builtin_description *d,
26278 tree exp, rtx target, bool swap)
26279 {
26280 rtx pat;
26281 tree arg0 = CALL_EXPR_ARG (exp, 0);
26282 tree arg1 = CALL_EXPR_ARG (exp, 1);
26283 rtx op0 = expand_normal (arg0);
26284 rtx op1 = expand_normal (arg1);
26285 rtx op2;
26286 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
26287 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
26288 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
26289 enum rtx_code comparison = d->comparison;
26290
26291 if (VECTOR_MODE_P (mode0))
26292 op0 = safe_vector_operand (op0, mode0);
26293 if (VECTOR_MODE_P (mode1))
26294 op1 = safe_vector_operand (op1, mode1);
26295
26296 /* Swap operands if we have a comparison that isn't available in
26297 hardware. */
26298 if (swap)
26299 {
26300 rtx tmp = gen_reg_rtx (mode1);
26301 emit_move_insn (tmp, op1);
26302 op1 = op0;
26303 op0 = tmp;
26304 }
26305
26306 if (optimize || !target
26307 || GET_MODE (target) != tmode
26308 || !insn_data[d->icode].operand[0].predicate (target, tmode))
26309 target = gen_reg_rtx (tmode);
26310
26311 if ((optimize && !register_operand (op0, mode0))
26312 || !insn_data[d->icode].operand[1].predicate (op0, mode0))
26313 op0 = copy_to_mode_reg (mode0, op0);
26314 if ((optimize && !register_operand (op1, mode1))
26315 || !insn_data[d->icode].operand[2].predicate (op1, mode1))
26316 op1 = copy_to_mode_reg (mode1, op1);
26317
26318 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
26319 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
26320 if (! pat)
26321 return 0;
26322 emit_insn (pat);
26323 return target;
26324 }
26325
26326 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
26327
26328 static rtx
26329 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
26330 rtx target)
26331 {
26332 rtx pat;
26333 tree arg0 = CALL_EXPR_ARG (exp, 0);
26334 tree arg1 = CALL_EXPR_ARG (exp, 1);
26335 rtx op0 = expand_normal (arg0);
26336 rtx op1 = expand_normal (arg1);
26337 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26338 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26339 enum rtx_code comparison = d->comparison;
26340
26341 if (VECTOR_MODE_P (mode0))
26342 op0 = safe_vector_operand (op0, mode0);
26343 if (VECTOR_MODE_P (mode1))
26344 op1 = safe_vector_operand (op1, mode1);
26345
26346 /* Swap operands if we have a comparison that isn't available in
26347 hardware. */
26348 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
26349 {
26350 rtx tmp = op1;
26351 op1 = op0;
26352 op0 = tmp;
26353 }
26354
26355 target = gen_reg_rtx (SImode);
26356 emit_move_insn (target, const0_rtx);
26357 target = gen_rtx_SUBREG (QImode, target, 0);
26358
26359 if ((optimize && !register_operand (op0, mode0))
26360 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26361 op0 = copy_to_mode_reg (mode0, op0);
26362 if ((optimize && !register_operand (op1, mode1))
26363 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26364 op1 = copy_to_mode_reg (mode1, op1);
26365
26366 pat = GEN_FCN (d->icode) (op0, op1);
26367 if (! pat)
26368 return 0;
26369 emit_insn (pat);
26370 emit_insn (gen_rtx_SET (VOIDmode,
26371 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26372 gen_rtx_fmt_ee (comparison, QImode,
26373 SET_DEST (pat),
26374 const0_rtx)));
26375
26376 return SUBREG_REG (target);
26377 }
26378
26379 /* Subroutine of ix86_expand_args_builtin to take care of round insns. */
26380
26381 static rtx
26382 ix86_expand_sse_round (const struct builtin_description *d, tree exp,
26383 rtx target)
26384 {
26385 rtx pat;
26386 tree arg0 = CALL_EXPR_ARG (exp, 0);
26387 rtx op1, op0 = expand_normal (arg0);
26388 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
26389 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
26390
26391 if (optimize || target == 0
26392 || GET_MODE (target) != tmode
26393 || !insn_data[d->icode].operand[0].predicate (target, tmode))
26394 target = gen_reg_rtx (tmode);
26395
26396 if (VECTOR_MODE_P (mode0))
26397 op0 = safe_vector_operand (op0, mode0);
26398
26399 if ((optimize && !register_operand (op0, mode0))
26400 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26401 op0 = copy_to_mode_reg (mode0, op0);
26402
26403 op1 = GEN_INT (d->comparison);
26404
26405 pat = GEN_FCN (d->icode) (target, op0, op1);
26406 if (! pat)
26407 return 0;
26408 emit_insn (pat);
26409 return target;
26410 }
26411
26412 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
26413
26414 static rtx
26415 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
26416 rtx target)
26417 {
26418 rtx pat;
26419 tree arg0 = CALL_EXPR_ARG (exp, 0);
26420 tree arg1 = CALL_EXPR_ARG (exp, 1);
26421 rtx op0 = expand_normal (arg0);
26422 rtx op1 = expand_normal (arg1);
26423 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26424 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26425 enum rtx_code comparison = d->comparison;
26426
26427 if (VECTOR_MODE_P (mode0))
26428 op0 = safe_vector_operand (op0, mode0);
26429 if (VECTOR_MODE_P (mode1))
26430 op1 = safe_vector_operand (op1, mode1);
26431
26432 target = gen_reg_rtx (SImode);
26433 emit_move_insn (target, const0_rtx);
26434 target = gen_rtx_SUBREG (QImode, target, 0);
26435
26436 if ((optimize && !register_operand (op0, mode0))
26437 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26438 op0 = copy_to_mode_reg (mode0, op0);
26439 if ((optimize && !register_operand (op1, mode1))
26440 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26441 op1 = copy_to_mode_reg (mode1, op1);
26442
26443 pat = GEN_FCN (d->icode) (op0, op1);
26444 if (! pat)
26445 return 0;
26446 emit_insn (pat);
26447 emit_insn (gen_rtx_SET (VOIDmode,
26448 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26449 gen_rtx_fmt_ee (comparison, QImode,
26450 SET_DEST (pat),
26451 const0_rtx)));
26452
26453 return SUBREG_REG (target);
26454 }
26455
26456 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
26457
26458 static rtx
26459 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
26460 tree exp, rtx target)
26461 {
26462 rtx pat;
26463 tree arg0 = CALL_EXPR_ARG (exp, 0);
26464 tree arg1 = CALL_EXPR_ARG (exp, 1);
26465 tree arg2 = CALL_EXPR_ARG (exp, 2);
26466 tree arg3 = CALL_EXPR_ARG (exp, 3);
26467 tree arg4 = CALL_EXPR_ARG (exp, 4);
26468 rtx scratch0, scratch1;
26469 rtx op0 = expand_normal (arg0);
26470 rtx op1 = expand_normal (arg1);
26471 rtx op2 = expand_normal (arg2);
26472 rtx op3 = expand_normal (arg3);
26473 rtx op4 = expand_normal (arg4);
26474 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
26475
26476 tmode0 = insn_data[d->icode].operand[0].mode;
26477 tmode1 = insn_data[d->icode].operand[1].mode;
26478 modev2 = insn_data[d->icode].operand[2].mode;
26479 modei3 = insn_data[d->icode].operand[3].mode;
26480 modev4 = insn_data[d->icode].operand[4].mode;
26481 modei5 = insn_data[d->icode].operand[5].mode;
26482 modeimm = insn_data[d->icode].operand[6].mode;
26483
26484 if (VECTOR_MODE_P (modev2))
26485 op0 = safe_vector_operand (op0, modev2);
26486 if (VECTOR_MODE_P (modev4))
26487 op2 = safe_vector_operand (op2, modev4);
26488
26489 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26490 op0 = copy_to_mode_reg (modev2, op0);
26491 if (!insn_data[d->icode].operand[3].predicate (op1, modei3))
26492 op1 = copy_to_mode_reg (modei3, op1);
26493 if ((optimize && !register_operand (op2, modev4))
26494 || !insn_data[d->icode].operand[4].predicate (op2, modev4))
26495 op2 = copy_to_mode_reg (modev4, op2);
26496 if (!insn_data[d->icode].operand[5].predicate (op3, modei5))
26497 op3 = copy_to_mode_reg (modei5, op3);
26498
26499 if (!insn_data[d->icode].operand[6].predicate (op4, modeimm))
26500 {
26501 error ("the fifth argument must be a 8-bit immediate");
26502 return const0_rtx;
26503 }
26504
26505 if (d->code == IX86_BUILTIN_PCMPESTRI128)
26506 {
26507 if (optimize || !target
26508 || GET_MODE (target) != tmode0
26509 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26510 target = gen_reg_rtx (tmode0);
26511
26512 scratch1 = gen_reg_rtx (tmode1);
26513
26514 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
26515 }
26516 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
26517 {
26518 if (optimize || !target
26519 || GET_MODE (target) != tmode1
26520 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26521 target = gen_reg_rtx (tmode1);
26522
26523 scratch0 = gen_reg_rtx (tmode0);
26524
26525 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
26526 }
26527 else
26528 {
26529 gcc_assert (d->flag);
26530
26531 scratch0 = gen_reg_rtx (tmode0);
26532 scratch1 = gen_reg_rtx (tmode1);
26533
26534 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
26535 }
26536
26537 if (! pat)
26538 return 0;
26539
26540 emit_insn (pat);
26541
26542 if (d->flag)
26543 {
26544 target = gen_reg_rtx (SImode);
26545 emit_move_insn (target, const0_rtx);
26546 target = gen_rtx_SUBREG (QImode, target, 0);
26547
26548 emit_insn
26549 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26550 gen_rtx_fmt_ee (EQ, QImode,
26551 gen_rtx_REG ((enum machine_mode) d->flag,
26552 FLAGS_REG),
26553 const0_rtx)));
26554 return SUBREG_REG (target);
26555 }
26556 else
26557 return target;
26558 }
26559
26560
26561 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
26562
26563 static rtx
26564 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
26565 tree exp, rtx target)
26566 {
26567 rtx pat;
26568 tree arg0 = CALL_EXPR_ARG (exp, 0);
26569 tree arg1 = CALL_EXPR_ARG (exp, 1);
26570 tree arg2 = CALL_EXPR_ARG (exp, 2);
26571 rtx scratch0, scratch1;
26572 rtx op0 = expand_normal (arg0);
26573 rtx op1 = expand_normal (arg1);
26574 rtx op2 = expand_normal (arg2);
26575 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
26576
26577 tmode0 = insn_data[d->icode].operand[0].mode;
26578 tmode1 = insn_data[d->icode].operand[1].mode;
26579 modev2 = insn_data[d->icode].operand[2].mode;
26580 modev3 = insn_data[d->icode].operand[3].mode;
26581 modeimm = insn_data[d->icode].operand[4].mode;
26582
26583 if (VECTOR_MODE_P (modev2))
26584 op0 = safe_vector_operand (op0, modev2);
26585 if (VECTOR_MODE_P (modev3))
26586 op1 = safe_vector_operand (op1, modev3);
26587
26588 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26589 op0 = copy_to_mode_reg (modev2, op0);
26590 if ((optimize && !register_operand (op1, modev3))
26591 || !insn_data[d->icode].operand[3].predicate (op1, modev3))
26592 op1 = copy_to_mode_reg (modev3, op1);
26593
26594 if (!insn_data[d->icode].operand[4].predicate (op2, modeimm))
26595 {
26596 error ("the third argument must be a 8-bit immediate");
26597 return const0_rtx;
26598 }
26599
26600 if (d->code == IX86_BUILTIN_PCMPISTRI128)
26601 {
26602 if (optimize || !target
26603 || GET_MODE (target) != tmode0
26604 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26605 target = gen_reg_rtx (tmode0);
26606
26607 scratch1 = gen_reg_rtx (tmode1);
26608
26609 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
26610 }
26611 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
26612 {
26613 if (optimize || !target
26614 || GET_MODE (target) != tmode1
26615 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26616 target = gen_reg_rtx (tmode1);
26617
26618 scratch0 = gen_reg_rtx (tmode0);
26619
26620 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
26621 }
26622 else
26623 {
26624 gcc_assert (d->flag);
26625
26626 scratch0 = gen_reg_rtx (tmode0);
26627 scratch1 = gen_reg_rtx (tmode1);
26628
26629 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
26630 }
26631
26632 if (! pat)
26633 return 0;
26634
26635 emit_insn (pat);
26636
26637 if (d->flag)
26638 {
26639 target = gen_reg_rtx (SImode);
26640 emit_move_insn (target, const0_rtx);
26641 target = gen_rtx_SUBREG (QImode, target, 0);
26642
26643 emit_insn
26644 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26645 gen_rtx_fmt_ee (EQ, QImode,
26646 gen_rtx_REG ((enum machine_mode) d->flag,
26647 FLAGS_REG),
26648 const0_rtx)));
26649 return SUBREG_REG (target);
26650 }
26651 else
26652 return target;
26653 }
26654
26655 /* Subroutine of ix86_expand_builtin to take care of insns with
26656 variable number of operands. */
26657
26658 static rtx
26659 ix86_expand_args_builtin (const struct builtin_description *d,
26660 tree exp, rtx target)
26661 {
26662 rtx pat, real_target;
26663 unsigned int i, nargs;
26664 unsigned int nargs_constant = 0;
26665 int num_memory = 0;
26666 struct
26667 {
26668 rtx op;
26669 enum machine_mode mode;
26670 } args[4];
26671 bool last_arg_count = false;
26672 enum insn_code icode = d->icode;
26673 const struct insn_data_d *insn_p = &insn_data[icode];
26674 enum machine_mode tmode = insn_p->operand[0].mode;
26675 enum machine_mode rmode = VOIDmode;
26676 bool swap = false;
26677 enum rtx_code comparison = d->comparison;
26678
26679 switch ((enum ix86_builtin_func_type) d->flag)
26680 {
26681 case V2DF_FTYPE_V2DF_ROUND:
26682 case V4DF_FTYPE_V4DF_ROUND:
26683 case V4SF_FTYPE_V4SF_ROUND:
26684 case V8SF_FTYPE_V8SF_ROUND:
26685 return ix86_expand_sse_round (d, exp, target);
26686 case INT_FTYPE_V8SF_V8SF_PTEST:
26687 case INT_FTYPE_V4DI_V4DI_PTEST:
26688 case INT_FTYPE_V4DF_V4DF_PTEST:
26689 case INT_FTYPE_V4SF_V4SF_PTEST:
26690 case INT_FTYPE_V2DI_V2DI_PTEST:
26691 case INT_FTYPE_V2DF_V2DF_PTEST:
26692 return ix86_expand_sse_ptest (d, exp, target);
26693 case FLOAT128_FTYPE_FLOAT128:
26694 case FLOAT_FTYPE_FLOAT:
26695 case INT_FTYPE_INT:
26696 case UINT64_FTYPE_INT:
26697 case UINT16_FTYPE_UINT16:
26698 case INT64_FTYPE_INT64:
26699 case INT64_FTYPE_V4SF:
26700 case INT64_FTYPE_V2DF:
26701 case INT_FTYPE_V16QI:
26702 case INT_FTYPE_V8QI:
26703 case INT_FTYPE_V8SF:
26704 case INT_FTYPE_V4DF:
26705 case INT_FTYPE_V4SF:
26706 case INT_FTYPE_V2DF:
26707 case V16QI_FTYPE_V16QI:
26708 case V8SI_FTYPE_V8SF:
26709 case V8SI_FTYPE_V4SI:
26710 case V8HI_FTYPE_V8HI:
26711 case V8HI_FTYPE_V16QI:
26712 case V8QI_FTYPE_V8QI:
26713 case V8SF_FTYPE_V8SF:
26714 case V8SF_FTYPE_V8SI:
26715 case V8SF_FTYPE_V4SF:
26716 case V8SF_FTYPE_V8HI:
26717 case V4SI_FTYPE_V4SI:
26718 case V4SI_FTYPE_V16QI:
26719 case V4SI_FTYPE_V4SF:
26720 case V4SI_FTYPE_V8SI:
26721 case V4SI_FTYPE_V8HI:
26722 case V4SI_FTYPE_V4DF:
26723 case V4SI_FTYPE_V2DF:
26724 case V4HI_FTYPE_V4HI:
26725 case V4DF_FTYPE_V4DF:
26726 case V4DF_FTYPE_V4SI:
26727 case V4DF_FTYPE_V4SF:
26728 case V4DF_FTYPE_V2DF:
26729 case V4SF_FTYPE_V4SF:
26730 case V4SF_FTYPE_V4SI:
26731 case V4SF_FTYPE_V8SF:
26732 case V4SF_FTYPE_V4DF:
26733 case V4SF_FTYPE_V8HI:
26734 case V4SF_FTYPE_V2DF:
26735 case V2DI_FTYPE_V2DI:
26736 case V2DI_FTYPE_V16QI:
26737 case V2DI_FTYPE_V8HI:
26738 case V2DI_FTYPE_V4SI:
26739 case V2DF_FTYPE_V2DF:
26740 case V2DF_FTYPE_V4SI:
26741 case V2DF_FTYPE_V4DF:
26742 case V2DF_FTYPE_V4SF:
26743 case V2DF_FTYPE_V2SI:
26744 case V2SI_FTYPE_V2SI:
26745 case V2SI_FTYPE_V4SF:
26746 case V2SI_FTYPE_V2SF:
26747 case V2SI_FTYPE_V2DF:
26748 case V2SF_FTYPE_V2SF:
26749 case V2SF_FTYPE_V2SI:
26750 nargs = 1;
26751 break;
26752 case V4SF_FTYPE_V4SF_VEC_MERGE:
26753 case V2DF_FTYPE_V2DF_VEC_MERGE:
26754 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
26755 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
26756 case V16QI_FTYPE_V16QI_V16QI:
26757 case V16QI_FTYPE_V8HI_V8HI:
26758 case V8QI_FTYPE_V8QI_V8QI:
26759 case V8QI_FTYPE_V4HI_V4HI:
26760 case V8HI_FTYPE_V8HI_V8HI:
26761 case V8HI_FTYPE_V16QI_V16QI:
26762 case V8HI_FTYPE_V4SI_V4SI:
26763 case V8SF_FTYPE_V8SF_V8SF:
26764 case V8SF_FTYPE_V8SF_V8SI:
26765 case V4SI_FTYPE_V4SI_V4SI:
26766 case V4SI_FTYPE_V8HI_V8HI:
26767 case V4SI_FTYPE_V4SF_V4SF:
26768 case V4SI_FTYPE_V2DF_V2DF:
26769 case V4HI_FTYPE_V4HI_V4HI:
26770 case V4HI_FTYPE_V8QI_V8QI:
26771 case V4HI_FTYPE_V2SI_V2SI:
26772 case V4DF_FTYPE_V4DF_V4DF:
26773 case V4DF_FTYPE_V4DF_V4DI:
26774 case V4SF_FTYPE_V4SF_V4SF:
26775 case V4SF_FTYPE_V4SF_V4SI:
26776 case V4SF_FTYPE_V4SF_V2SI:
26777 case V4SF_FTYPE_V4SF_V2DF:
26778 case V4SF_FTYPE_V4SF_DI:
26779 case V4SF_FTYPE_V4SF_SI:
26780 case V2DI_FTYPE_V2DI_V2DI:
26781 case V2DI_FTYPE_V16QI_V16QI:
26782 case V2DI_FTYPE_V4SI_V4SI:
26783 case V2DI_FTYPE_V2DI_V16QI:
26784 case V2DI_FTYPE_V2DF_V2DF:
26785 case V2SI_FTYPE_V2SI_V2SI:
26786 case V2SI_FTYPE_V4HI_V4HI:
26787 case V2SI_FTYPE_V2SF_V2SF:
26788 case V2DF_FTYPE_V2DF_V2DF:
26789 case V2DF_FTYPE_V2DF_V4SF:
26790 case V2DF_FTYPE_V2DF_V2DI:
26791 case V2DF_FTYPE_V2DF_DI:
26792 case V2DF_FTYPE_V2DF_SI:
26793 case V2SF_FTYPE_V2SF_V2SF:
26794 case V1DI_FTYPE_V1DI_V1DI:
26795 case V1DI_FTYPE_V8QI_V8QI:
26796 case V1DI_FTYPE_V2SI_V2SI:
26797 if (comparison == UNKNOWN)
26798 return ix86_expand_binop_builtin (icode, exp, target);
26799 nargs = 2;
26800 break;
26801 case V4SF_FTYPE_V4SF_V4SF_SWAP:
26802 case V2DF_FTYPE_V2DF_V2DF_SWAP:
26803 gcc_assert (comparison != UNKNOWN);
26804 nargs = 2;
26805 swap = true;
26806 break;
26807 case V8HI_FTYPE_V8HI_V8HI_COUNT:
26808 case V8HI_FTYPE_V8HI_SI_COUNT:
26809 case V4SI_FTYPE_V4SI_V4SI_COUNT:
26810 case V4SI_FTYPE_V4SI_SI_COUNT:
26811 case V4HI_FTYPE_V4HI_V4HI_COUNT:
26812 case V4HI_FTYPE_V4HI_SI_COUNT:
26813 case V2DI_FTYPE_V2DI_V2DI_COUNT:
26814 case V2DI_FTYPE_V2DI_SI_COUNT:
26815 case V2SI_FTYPE_V2SI_V2SI_COUNT:
26816 case V2SI_FTYPE_V2SI_SI_COUNT:
26817 case V1DI_FTYPE_V1DI_V1DI_COUNT:
26818 case V1DI_FTYPE_V1DI_SI_COUNT:
26819 nargs = 2;
26820 last_arg_count = true;
26821 break;
26822 case UINT64_FTYPE_UINT64_UINT64:
26823 case UINT_FTYPE_UINT_UINT:
26824 case UINT_FTYPE_UINT_USHORT:
26825 case UINT_FTYPE_UINT_UCHAR:
26826 case UINT16_FTYPE_UINT16_INT:
26827 case UINT8_FTYPE_UINT8_INT:
26828 nargs = 2;
26829 break;
26830 case V2DI_FTYPE_V2DI_INT_CONVERT:
26831 nargs = 2;
26832 rmode = V1TImode;
26833 nargs_constant = 1;
26834 break;
26835 case V8HI_FTYPE_V8HI_INT:
26836 case V8HI_FTYPE_V8SF_INT:
26837 case V8HI_FTYPE_V4SF_INT:
26838 case V8SF_FTYPE_V8SF_INT:
26839 case V4SI_FTYPE_V4SI_INT:
26840 case V4SI_FTYPE_V8SI_INT:
26841 case V4HI_FTYPE_V4HI_INT:
26842 case V4DF_FTYPE_V4DF_INT:
26843 case V4SF_FTYPE_V4SF_INT:
26844 case V4SF_FTYPE_V8SF_INT:
26845 case V2DI_FTYPE_V2DI_INT:
26846 case V2DF_FTYPE_V2DF_INT:
26847 case V2DF_FTYPE_V4DF_INT:
26848 nargs = 2;
26849 nargs_constant = 1;
26850 break;
26851 case V16QI_FTYPE_V16QI_V16QI_V16QI:
26852 case V8SF_FTYPE_V8SF_V8SF_V8SF:
26853 case V4DF_FTYPE_V4DF_V4DF_V4DF:
26854 case V4SF_FTYPE_V4SF_V4SF_V4SF:
26855 case V2DF_FTYPE_V2DF_V2DF_V2DF:
26856 nargs = 3;
26857 break;
26858 case V16QI_FTYPE_V16QI_V16QI_INT:
26859 case V8HI_FTYPE_V8HI_V8HI_INT:
26860 case V8SI_FTYPE_V8SI_V8SI_INT:
26861 case V8SI_FTYPE_V8SI_V4SI_INT:
26862 case V8SF_FTYPE_V8SF_V8SF_INT:
26863 case V8SF_FTYPE_V8SF_V4SF_INT:
26864 case V4SI_FTYPE_V4SI_V4SI_INT:
26865 case V4DF_FTYPE_V4DF_V4DF_INT:
26866 case V4DF_FTYPE_V4DF_V2DF_INT:
26867 case V4SF_FTYPE_V4SF_V4SF_INT:
26868 case V2DI_FTYPE_V2DI_V2DI_INT:
26869 case V2DF_FTYPE_V2DF_V2DF_INT:
26870 nargs = 3;
26871 nargs_constant = 1;
26872 break;
26873 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
26874 nargs = 3;
26875 rmode = V2DImode;
26876 nargs_constant = 1;
26877 break;
26878 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
26879 nargs = 3;
26880 rmode = DImode;
26881 nargs_constant = 1;
26882 break;
26883 case V2DI_FTYPE_V2DI_UINT_UINT:
26884 nargs = 3;
26885 nargs_constant = 2;
26886 break;
26887 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
26888 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
26889 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
26890 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
26891 nargs = 4;
26892 nargs_constant = 1;
26893 break;
26894 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
26895 nargs = 4;
26896 nargs_constant = 2;
26897 break;
26898 default:
26899 gcc_unreachable ();
26900 }
26901
26902 gcc_assert (nargs <= ARRAY_SIZE (args));
26903
26904 if (comparison != UNKNOWN)
26905 {
26906 gcc_assert (nargs == 2);
26907 return ix86_expand_sse_compare (d, exp, target, swap);
26908 }
26909
26910 if (rmode == VOIDmode || rmode == tmode)
26911 {
26912 if (optimize
26913 || target == 0
26914 || GET_MODE (target) != tmode
26915 || !insn_p->operand[0].predicate (target, tmode))
26916 target = gen_reg_rtx (tmode);
26917 real_target = target;
26918 }
26919 else
26920 {
26921 target = gen_reg_rtx (rmode);
26922 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
26923 }
26924
26925 for (i = 0; i < nargs; i++)
26926 {
26927 tree arg = CALL_EXPR_ARG (exp, i);
26928 rtx op = expand_normal (arg);
26929 enum machine_mode mode = insn_p->operand[i + 1].mode;
26930 bool match = insn_p->operand[i + 1].predicate (op, mode);
26931
26932 if (last_arg_count && (i + 1) == nargs)
26933 {
26934 /* SIMD shift insns take either an 8-bit immediate or
26935 register as count. But builtin functions take int as
26936 count. If count doesn't match, we put it in register. */
26937 if (!match)
26938 {
26939 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
26940 if (!insn_p->operand[i + 1].predicate (op, mode))
26941 op = copy_to_reg (op);
26942 }
26943 }
26944 else if ((nargs - i) <= nargs_constant)
26945 {
26946 if (!match)
26947 switch (icode)
26948 {
26949 case CODE_FOR_sse4_1_roundpd:
26950 case CODE_FOR_sse4_1_roundps:
26951 case CODE_FOR_sse4_1_roundsd:
26952 case CODE_FOR_sse4_1_roundss:
26953 case CODE_FOR_sse4_1_blendps:
26954 case CODE_FOR_avx_blendpd256:
26955 case CODE_FOR_avx_vpermilv4df:
26956 case CODE_FOR_avx_roundpd256:
26957 case CODE_FOR_avx_roundps256:
26958 error ("the last argument must be a 4-bit immediate");
26959 return const0_rtx;
26960
26961 case CODE_FOR_sse4_1_blendpd:
26962 case CODE_FOR_avx_vpermilv2df:
26963 case CODE_FOR_xop_vpermil2v2df3:
26964 case CODE_FOR_xop_vpermil2v4sf3:
26965 case CODE_FOR_xop_vpermil2v4df3:
26966 case CODE_FOR_xop_vpermil2v8sf3:
26967 error ("the last argument must be a 2-bit immediate");
26968 return const0_rtx;
26969
26970 case CODE_FOR_avx_vextractf128v4df:
26971 case CODE_FOR_avx_vextractf128v8sf:
26972 case CODE_FOR_avx_vextractf128v8si:
26973 case CODE_FOR_avx_vinsertf128v4df:
26974 case CODE_FOR_avx_vinsertf128v8sf:
26975 case CODE_FOR_avx_vinsertf128v8si:
26976 error ("the last argument must be a 1-bit immediate");
26977 return const0_rtx;
26978
26979 case CODE_FOR_avx_vmcmpv2df3:
26980 case CODE_FOR_avx_vmcmpv4sf3:
26981 case CODE_FOR_avx_cmpv2df3:
26982 case CODE_FOR_avx_cmpv4sf3:
26983 case CODE_FOR_avx_cmpv4df3:
26984 case CODE_FOR_avx_cmpv8sf3:
26985 error ("the last argument must be a 5-bit immediate");
26986 return const0_rtx;
26987
26988 default:
26989 switch (nargs_constant)
26990 {
26991 case 2:
26992 if ((nargs - i) == nargs_constant)
26993 {
26994 error ("the next to last argument must be an 8-bit immediate");
26995 break;
26996 }
26997 case 1:
26998 error ("the last argument must be an 8-bit immediate");
26999 break;
27000 default:
27001 gcc_unreachable ();
27002 }
27003 return const0_rtx;
27004 }
27005 }
27006 else
27007 {
27008 if (VECTOR_MODE_P (mode))
27009 op = safe_vector_operand (op, mode);
27010
27011 /* If we aren't optimizing, only allow one memory operand to
27012 be generated. */
27013 if (memory_operand (op, mode))
27014 num_memory++;
27015
27016 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
27017 {
27018 if (optimize || !match || num_memory > 1)
27019 op = copy_to_mode_reg (mode, op);
27020 }
27021 else
27022 {
27023 op = copy_to_reg (op);
27024 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
27025 }
27026 }
27027
27028 args[i].op = op;
27029 args[i].mode = mode;
27030 }
27031
27032 switch (nargs)
27033 {
27034 case 1:
27035 pat = GEN_FCN (icode) (real_target, args[0].op);
27036 break;
27037 case 2:
27038 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
27039 break;
27040 case 3:
27041 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
27042 args[2].op);
27043 break;
27044 case 4:
27045 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
27046 args[2].op, args[3].op);
27047 break;
27048 default:
27049 gcc_unreachable ();
27050 }
27051
27052 if (! pat)
27053 return 0;
27054
27055 emit_insn (pat);
27056 return target;
27057 }
27058
27059 /* Subroutine of ix86_expand_builtin to take care of special insns
27060 with variable number of operands. */
27061
27062 static rtx
27063 ix86_expand_special_args_builtin (const struct builtin_description *d,
27064 tree exp, rtx target)
27065 {
27066 tree arg;
27067 rtx pat, op;
27068 unsigned int i, nargs, arg_adjust, memory;
27069 struct
27070 {
27071 rtx op;
27072 enum machine_mode mode;
27073 } args[3];
27074 enum insn_code icode = d->icode;
27075 bool last_arg_constant = false;
27076 const struct insn_data_d *insn_p = &insn_data[icode];
27077 enum machine_mode tmode = insn_p->operand[0].mode;
27078 enum { load, store } klass;
27079
27080 switch ((enum ix86_builtin_func_type) d->flag)
27081 {
27082 case VOID_FTYPE_VOID:
27083 if (icode == CODE_FOR_avx_vzeroupper)
27084 target = GEN_INT (vzeroupper_intrinsic);
27085 emit_insn (GEN_FCN (icode) (target));
27086 return 0;
27087 case VOID_FTYPE_UINT64:
27088 case VOID_FTYPE_UNSIGNED:
27089 nargs = 0;
27090 klass = store;
27091 memory = 0;
27092 break;
27093 break;
27094 case UINT64_FTYPE_VOID:
27095 case UNSIGNED_FTYPE_VOID:
27096 nargs = 0;
27097 klass = load;
27098 memory = 0;
27099 break;
27100 case UINT64_FTYPE_PUNSIGNED:
27101 case V2DI_FTYPE_PV2DI:
27102 case V32QI_FTYPE_PCCHAR:
27103 case V16QI_FTYPE_PCCHAR:
27104 case V8SF_FTYPE_PCV4SF:
27105 case V8SF_FTYPE_PCFLOAT:
27106 case V4SF_FTYPE_PCFLOAT:
27107 case V4DF_FTYPE_PCV2DF:
27108 case V4DF_FTYPE_PCDOUBLE:
27109 case V2DF_FTYPE_PCDOUBLE:
27110 case VOID_FTYPE_PVOID:
27111 nargs = 1;
27112 klass = load;
27113 memory = 0;
27114 break;
27115 case VOID_FTYPE_PV2SF_V4SF:
27116 case VOID_FTYPE_PV4DI_V4DI:
27117 case VOID_FTYPE_PV2DI_V2DI:
27118 case VOID_FTYPE_PCHAR_V32QI:
27119 case VOID_FTYPE_PCHAR_V16QI:
27120 case VOID_FTYPE_PFLOAT_V8SF:
27121 case VOID_FTYPE_PFLOAT_V4SF:
27122 case VOID_FTYPE_PDOUBLE_V4DF:
27123 case VOID_FTYPE_PDOUBLE_V2DF:
27124 case VOID_FTYPE_PULONGLONG_ULONGLONG:
27125 case VOID_FTYPE_PINT_INT:
27126 nargs = 1;
27127 klass = store;
27128 /* Reserve memory operand for target. */
27129 memory = ARRAY_SIZE (args);
27130 break;
27131 case V4SF_FTYPE_V4SF_PCV2SF:
27132 case V2DF_FTYPE_V2DF_PCDOUBLE:
27133 nargs = 2;
27134 klass = load;
27135 memory = 1;
27136 break;
27137 case V8SF_FTYPE_PCV8SF_V8SI:
27138 case V4DF_FTYPE_PCV4DF_V4DI:
27139 case V4SF_FTYPE_PCV4SF_V4SI:
27140 case V2DF_FTYPE_PCV2DF_V2DI:
27141 nargs = 2;
27142 klass = load;
27143 memory = 0;
27144 break;
27145 case VOID_FTYPE_PV8SF_V8SI_V8SF:
27146 case VOID_FTYPE_PV4DF_V4DI_V4DF:
27147 case VOID_FTYPE_PV4SF_V4SI_V4SF:
27148 case VOID_FTYPE_PV2DF_V2DI_V2DF:
27149 nargs = 2;
27150 klass = store;
27151 /* Reserve memory operand for target. */
27152 memory = ARRAY_SIZE (args);
27153 break;
27154 case VOID_FTYPE_UINT_UINT_UINT:
27155 case VOID_FTYPE_UINT64_UINT_UINT:
27156 case UCHAR_FTYPE_UINT_UINT_UINT:
27157 case UCHAR_FTYPE_UINT64_UINT_UINT:
27158 nargs = 3;
27159 klass = load;
27160 memory = ARRAY_SIZE (args);
27161 last_arg_constant = true;
27162 break;
27163 default:
27164 gcc_unreachable ();
27165 }
27166
27167 gcc_assert (nargs <= ARRAY_SIZE (args));
27168
27169 if (klass == store)
27170 {
27171 arg = CALL_EXPR_ARG (exp, 0);
27172 op = expand_normal (arg);
27173 gcc_assert (target == 0);
27174 if (memory)
27175 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
27176 else
27177 target = force_reg (tmode, op);
27178 arg_adjust = 1;
27179 }
27180 else
27181 {
27182 arg_adjust = 0;
27183 if (optimize
27184 || target == 0
27185 || GET_MODE (target) != tmode
27186 || !insn_p->operand[0].predicate (target, tmode))
27187 target = gen_reg_rtx (tmode);
27188 }
27189
27190 for (i = 0; i < nargs; i++)
27191 {
27192 enum machine_mode mode = insn_p->operand[i + 1].mode;
27193 bool match;
27194
27195 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
27196 op = expand_normal (arg);
27197 match = insn_p->operand[i + 1].predicate (op, mode);
27198
27199 if (last_arg_constant && (i + 1) == nargs)
27200 {
27201 if (!match)
27202 {
27203 if (icode == CODE_FOR_lwp_lwpvalsi3
27204 || icode == CODE_FOR_lwp_lwpinssi3
27205 || icode == CODE_FOR_lwp_lwpvaldi3
27206 || icode == CODE_FOR_lwp_lwpinsdi3)
27207 error ("the last argument must be a 32-bit immediate");
27208 else
27209 error ("the last argument must be an 8-bit immediate");
27210 return const0_rtx;
27211 }
27212 }
27213 else
27214 {
27215 if (i == memory)
27216 {
27217 /* This must be the memory operand. */
27218 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
27219 gcc_assert (GET_MODE (op) == mode
27220 || GET_MODE (op) == VOIDmode);
27221 }
27222 else
27223 {
27224 /* This must be register. */
27225 if (VECTOR_MODE_P (mode))
27226 op = safe_vector_operand (op, mode);
27227
27228 gcc_assert (GET_MODE (op) == mode
27229 || GET_MODE (op) == VOIDmode);
27230 op = copy_to_mode_reg (mode, op);
27231 }
27232 }
27233
27234 args[i].op = op;
27235 args[i].mode = mode;
27236 }
27237
27238 switch (nargs)
27239 {
27240 case 0:
27241 pat = GEN_FCN (icode) (target);
27242 break;
27243 case 1:
27244 pat = GEN_FCN (icode) (target, args[0].op);
27245 break;
27246 case 2:
27247 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
27248 break;
27249 case 3:
27250 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
27251 break;
27252 default:
27253 gcc_unreachable ();
27254 }
27255
27256 if (! pat)
27257 return 0;
27258 emit_insn (pat);
27259 return klass == store ? 0 : target;
27260 }
27261
27262 /* Return the integer constant in ARG. Constrain it to be in the range
27263 of the subparts of VEC_TYPE; issue an error if not. */
27264
27265 static int
27266 get_element_number (tree vec_type, tree arg)
27267 {
27268 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
27269
27270 if (!host_integerp (arg, 1)
27271 || (elt = tree_low_cst (arg, 1), elt > max))
27272 {
27273 error ("selector must be an integer constant in the range 0..%wi", max);
27274 return 0;
27275 }
27276
27277 return elt;
27278 }
27279
27280 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27281 ix86_expand_vector_init. We DO have language-level syntax for this, in
27282 the form of (type){ init-list }. Except that since we can't place emms
27283 instructions from inside the compiler, we can't allow the use of MMX
27284 registers unless the user explicitly asks for it. So we do *not* define
27285 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
27286 we have builtins invoked by mmintrin.h that gives us license to emit
27287 these sorts of instructions. */
27288
27289 static rtx
27290 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
27291 {
27292 enum machine_mode tmode = TYPE_MODE (type);
27293 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
27294 int i, n_elt = GET_MODE_NUNITS (tmode);
27295 rtvec v = rtvec_alloc (n_elt);
27296
27297 gcc_assert (VECTOR_MODE_P (tmode));
27298 gcc_assert (call_expr_nargs (exp) == n_elt);
27299
27300 for (i = 0; i < n_elt; ++i)
27301 {
27302 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
27303 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
27304 }
27305
27306 if (!target || !register_operand (target, tmode))
27307 target = gen_reg_rtx (tmode);
27308
27309 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
27310 return target;
27311 }
27312
27313 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27314 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
27315 had a language-level syntax for referencing vector elements. */
27316
27317 static rtx
27318 ix86_expand_vec_ext_builtin (tree exp, rtx target)
27319 {
27320 enum machine_mode tmode, mode0;
27321 tree arg0, arg1;
27322 int elt;
27323 rtx op0;
27324
27325 arg0 = CALL_EXPR_ARG (exp, 0);
27326 arg1 = CALL_EXPR_ARG (exp, 1);
27327
27328 op0 = expand_normal (arg0);
27329 elt = get_element_number (TREE_TYPE (arg0), arg1);
27330
27331 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
27332 mode0 = TYPE_MODE (TREE_TYPE (arg0));
27333 gcc_assert (VECTOR_MODE_P (mode0));
27334
27335 op0 = force_reg (mode0, op0);
27336
27337 if (optimize || !target || !register_operand (target, tmode))
27338 target = gen_reg_rtx (tmode);
27339
27340 ix86_expand_vector_extract (true, target, op0, elt);
27341
27342 return target;
27343 }
27344
27345 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27346 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
27347 a language-level syntax for referencing vector elements. */
27348
27349 static rtx
27350 ix86_expand_vec_set_builtin (tree exp)
27351 {
27352 enum machine_mode tmode, mode1;
27353 tree arg0, arg1, arg2;
27354 int elt;
27355 rtx op0, op1, target;
27356
27357 arg0 = CALL_EXPR_ARG (exp, 0);
27358 arg1 = CALL_EXPR_ARG (exp, 1);
27359 arg2 = CALL_EXPR_ARG (exp, 2);
27360
27361 tmode = TYPE_MODE (TREE_TYPE (arg0));
27362 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
27363 gcc_assert (VECTOR_MODE_P (tmode));
27364
27365 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
27366 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
27367 elt = get_element_number (TREE_TYPE (arg0), arg2);
27368
27369 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
27370 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
27371
27372 op0 = force_reg (tmode, op0);
27373 op1 = force_reg (mode1, op1);
27374
27375 /* OP0 is the source of these builtin functions and shouldn't be
27376 modified. Create a copy, use it and return it as target. */
27377 target = gen_reg_rtx (tmode);
27378 emit_move_insn (target, op0);
27379 ix86_expand_vector_set (true, target, op1, elt);
27380
27381 return target;
27382 }
27383
27384 /* Expand an expression EXP that calls a built-in function,
27385 with result going to TARGET if that's convenient
27386 (and in mode MODE if that's convenient).
27387 SUBTARGET may be used as the target for computing one of EXP's operands.
27388 IGNORE is nonzero if the value is to be ignored. */
27389
27390 static rtx
27391 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
27392 enum machine_mode mode ATTRIBUTE_UNUSED,
27393 int ignore ATTRIBUTE_UNUSED)
27394 {
27395 const struct builtin_description *d;
27396 size_t i;
27397 enum insn_code icode;
27398 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
27399 tree arg0, arg1, arg2;
27400 rtx op0, op1, op2, pat;
27401 enum machine_mode mode0, mode1, mode2;
27402 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
27403
27404 /* Determine whether the builtin function is available under the current ISA.
27405 Originally the builtin was not created if it wasn't applicable to the
27406 current ISA based on the command line switches. With function specific
27407 options, we need to check in the context of the function making the call
27408 whether it is supported. */
27409 if (ix86_builtins_isa[fcode].isa
27410 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
27411 {
27412 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
27413 NULL, NULL, false);
27414
27415 if (!opts)
27416 error ("%qE needs unknown isa option", fndecl);
27417 else
27418 {
27419 gcc_assert (opts != NULL);
27420 error ("%qE needs isa option %s", fndecl, opts);
27421 free (opts);
27422 }
27423 return const0_rtx;
27424 }
27425
27426 switch (fcode)
27427 {
27428 case IX86_BUILTIN_MASKMOVQ:
27429 case IX86_BUILTIN_MASKMOVDQU:
27430 icode = (fcode == IX86_BUILTIN_MASKMOVQ
27431 ? CODE_FOR_mmx_maskmovq
27432 : CODE_FOR_sse2_maskmovdqu);
27433 /* Note the arg order is different from the operand order. */
27434 arg1 = CALL_EXPR_ARG (exp, 0);
27435 arg2 = CALL_EXPR_ARG (exp, 1);
27436 arg0 = CALL_EXPR_ARG (exp, 2);
27437 op0 = expand_normal (arg0);
27438 op1 = expand_normal (arg1);
27439 op2 = expand_normal (arg2);
27440 mode0 = insn_data[icode].operand[0].mode;
27441 mode1 = insn_data[icode].operand[1].mode;
27442 mode2 = insn_data[icode].operand[2].mode;
27443
27444 op0 = force_reg (Pmode, op0);
27445 op0 = gen_rtx_MEM (mode1, op0);
27446
27447 if (!insn_data[icode].operand[0].predicate (op0, mode0))
27448 op0 = copy_to_mode_reg (mode0, op0);
27449 if (!insn_data[icode].operand[1].predicate (op1, mode1))
27450 op1 = copy_to_mode_reg (mode1, op1);
27451 if (!insn_data[icode].operand[2].predicate (op2, mode2))
27452 op2 = copy_to_mode_reg (mode2, op2);
27453 pat = GEN_FCN (icode) (op0, op1, op2);
27454 if (! pat)
27455 return 0;
27456 emit_insn (pat);
27457 return 0;
27458
27459 case IX86_BUILTIN_LDMXCSR:
27460 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
27461 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27462 emit_move_insn (target, op0);
27463 emit_insn (gen_sse_ldmxcsr (target));
27464 return 0;
27465
27466 case IX86_BUILTIN_STMXCSR:
27467 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27468 emit_insn (gen_sse_stmxcsr (target));
27469 return copy_to_mode_reg (SImode, target);
27470
27471 case IX86_BUILTIN_CLFLUSH:
27472 arg0 = CALL_EXPR_ARG (exp, 0);
27473 op0 = expand_normal (arg0);
27474 icode = CODE_FOR_sse2_clflush;
27475 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27476 op0 = copy_to_mode_reg (Pmode, op0);
27477
27478 emit_insn (gen_sse2_clflush (op0));
27479 return 0;
27480
27481 case IX86_BUILTIN_MONITOR:
27482 arg0 = CALL_EXPR_ARG (exp, 0);
27483 arg1 = CALL_EXPR_ARG (exp, 1);
27484 arg2 = CALL_EXPR_ARG (exp, 2);
27485 op0 = expand_normal (arg0);
27486 op1 = expand_normal (arg1);
27487 op2 = expand_normal (arg2);
27488 if (!REG_P (op0))
27489 op0 = copy_to_mode_reg (Pmode, op0);
27490 if (!REG_P (op1))
27491 op1 = copy_to_mode_reg (SImode, op1);
27492 if (!REG_P (op2))
27493 op2 = copy_to_mode_reg (SImode, op2);
27494 emit_insn (ix86_gen_monitor (op0, op1, op2));
27495 return 0;
27496
27497 case IX86_BUILTIN_MWAIT:
27498 arg0 = CALL_EXPR_ARG (exp, 0);
27499 arg1 = CALL_EXPR_ARG (exp, 1);
27500 op0 = expand_normal (arg0);
27501 op1 = expand_normal (arg1);
27502 if (!REG_P (op0))
27503 op0 = copy_to_mode_reg (SImode, op0);
27504 if (!REG_P (op1))
27505 op1 = copy_to_mode_reg (SImode, op1);
27506 emit_insn (gen_sse3_mwait (op0, op1));
27507 return 0;
27508
27509 case IX86_BUILTIN_VEC_INIT_V2SI:
27510 case IX86_BUILTIN_VEC_INIT_V4HI:
27511 case IX86_BUILTIN_VEC_INIT_V8QI:
27512 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
27513
27514 case IX86_BUILTIN_VEC_EXT_V2DF:
27515 case IX86_BUILTIN_VEC_EXT_V2DI:
27516 case IX86_BUILTIN_VEC_EXT_V4SF:
27517 case IX86_BUILTIN_VEC_EXT_V4SI:
27518 case IX86_BUILTIN_VEC_EXT_V8HI:
27519 case IX86_BUILTIN_VEC_EXT_V2SI:
27520 case IX86_BUILTIN_VEC_EXT_V4HI:
27521 case IX86_BUILTIN_VEC_EXT_V16QI:
27522 return ix86_expand_vec_ext_builtin (exp, target);
27523
27524 case IX86_BUILTIN_VEC_SET_V2DI:
27525 case IX86_BUILTIN_VEC_SET_V4SF:
27526 case IX86_BUILTIN_VEC_SET_V4SI:
27527 case IX86_BUILTIN_VEC_SET_V8HI:
27528 case IX86_BUILTIN_VEC_SET_V4HI:
27529 case IX86_BUILTIN_VEC_SET_V16QI:
27530 return ix86_expand_vec_set_builtin (exp);
27531
27532 case IX86_BUILTIN_VEC_PERM_V2DF:
27533 case IX86_BUILTIN_VEC_PERM_V4SF:
27534 case IX86_BUILTIN_VEC_PERM_V2DI:
27535 case IX86_BUILTIN_VEC_PERM_V4SI:
27536 case IX86_BUILTIN_VEC_PERM_V8HI:
27537 case IX86_BUILTIN_VEC_PERM_V16QI:
27538 case IX86_BUILTIN_VEC_PERM_V2DI_U:
27539 case IX86_BUILTIN_VEC_PERM_V4SI_U:
27540 case IX86_BUILTIN_VEC_PERM_V8HI_U:
27541 case IX86_BUILTIN_VEC_PERM_V16QI_U:
27542 case IX86_BUILTIN_VEC_PERM_V4DF:
27543 case IX86_BUILTIN_VEC_PERM_V8SF:
27544 return ix86_expand_vec_perm_builtin (exp);
27545
27546 case IX86_BUILTIN_INFQ:
27547 case IX86_BUILTIN_HUGE_VALQ:
27548 {
27549 REAL_VALUE_TYPE inf;
27550 rtx tmp;
27551
27552 real_inf (&inf);
27553 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
27554
27555 tmp = validize_mem (force_const_mem (mode, tmp));
27556
27557 if (target == 0)
27558 target = gen_reg_rtx (mode);
27559
27560 emit_move_insn (target, tmp);
27561 return target;
27562 }
27563
27564 case IX86_BUILTIN_LLWPCB:
27565 arg0 = CALL_EXPR_ARG (exp, 0);
27566 op0 = expand_normal (arg0);
27567 icode = CODE_FOR_lwp_llwpcb;
27568 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27569 op0 = copy_to_mode_reg (Pmode, op0);
27570 emit_insn (gen_lwp_llwpcb (op0));
27571 return 0;
27572
27573 case IX86_BUILTIN_SLWPCB:
27574 icode = CODE_FOR_lwp_slwpcb;
27575 if (!target
27576 || !insn_data[icode].operand[0].predicate (target, Pmode))
27577 target = gen_reg_rtx (Pmode);
27578 emit_insn (gen_lwp_slwpcb (target));
27579 return target;
27580
27581 case IX86_BUILTIN_BEXTRI32:
27582 case IX86_BUILTIN_BEXTRI64:
27583 arg0 = CALL_EXPR_ARG (exp, 0);
27584 arg1 = CALL_EXPR_ARG (exp, 1);
27585 op0 = expand_normal (arg0);
27586 op1 = expand_normal (arg1);
27587 icode = (fcode == IX86_BUILTIN_BEXTRI32
27588 ? CODE_FOR_tbm_bextri_si
27589 : CODE_FOR_tbm_bextri_di);
27590 if (!CONST_INT_P (op1))
27591 {
27592 error ("last argument must be an immediate");
27593 return const0_rtx;
27594 }
27595 else
27596 {
27597 unsigned char length = (INTVAL (op1) >> 8) & 0xFF;
27598 unsigned char lsb_index = INTVAL (op1) & 0xFF;
27599 op1 = GEN_INT (length);
27600 op2 = GEN_INT (lsb_index);
27601 pat = GEN_FCN (icode) (target, op0, op1, op2);
27602 if (pat)
27603 emit_insn (pat);
27604 return target;
27605 }
27606
27607 case IX86_BUILTIN_RDRAND16_STEP:
27608 icode = CODE_FOR_rdrandhi_1;
27609 mode0 = HImode;
27610 goto rdrand_step;
27611
27612 case IX86_BUILTIN_RDRAND32_STEP:
27613 icode = CODE_FOR_rdrandsi_1;
27614 mode0 = SImode;
27615 goto rdrand_step;
27616
27617 case IX86_BUILTIN_RDRAND64_STEP:
27618 icode = CODE_FOR_rdranddi_1;
27619 mode0 = DImode;
27620
27621 rdrand_step:
27622 op0 = gen_reg_rtx (mode0);
27623 emit_insn (GEN_FCN (icode) (op0));
27624
27625 op1 = gen_reg_rtx (SImode);
27626 emit_move_insn (op1, CONST1_RTX (SImode));
27627
27628 /* Emit SImode conditional move. */
27629 if (mode0 == HImode)
27630 {
27631 op2 = gen_reg_rtx (SImode);
27632 emit_insn (gen_zero_extendhisi2 (op2, op0));
27633 }
27634 else if (mode0 == SImode)
27635 op2 = op0;
27636 else
27637 op2 = gen_rtx_SUBREG (SImode, op0, 0);
27638
27639 pat = gen_rtx_GEU (VOIDmode, gen_rtx_REG (CCCmode, FLAGS_REG),
27640 const0_rtx);
27641 emit_insn (gen_rtx_SET (VOIDmode, op1,
27642 gen_rtx_IF_THEN_ELSE (SImode, pat, op2, op1)));
27643 emit_move_insn (target, op1);
27644
27645 arg0 = CALL_EXPR_ARG (exp, 0);
27646 op1 = expand_normal (arg0);
27647 if (!address_operand (op1, VOIDmode))
27648 op1 = copy_addr_to_reg (op1);
27649 emit_move_insn (gen_rtx_MEM (mode0, op1), op0);
27650 return target;
27651
27652 default:
27653 break;
27654 }
27655
27656 for (i = 0, d = bdesc_special_args;
27657 i < ARRAY_SIZE (bdesc_special_args);
27658 i++, d++)
27659 if (d->code == fcode)
27660 return ix86_expand_special_args_builtin (d, exp, target);
27661
27662 for (i = 0, d = bdesc_args;
27663 i < ARRAY_SIZE (bdesc_args);
27664 i++, d++)
27665 if (d->code == fcode)
27666 switch (fcode)
27667 {
27668 case IX86_BUILTIN_FABSQ:
27669 case IX86_BUILTIN_COPYSIGNQ:
27670 if (!TARGET_SSE2)
27671 /* Emit a normal call if SSE2 isn't available. */
27672 return expand_call (exp, target, ignore);
27673 default:
27674 return ix86_expand_args_builtin (d, exp, target);
27675 }
27676
27677 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
27678 if (d->code == fcode)
27679 return ix86_expand_sse_comi (d, exp, target);
27680
27681 for (i = 0, d = bdesc_pcmpestr;
27682 i < ARRAY_SIZE (bdesc_pcmpestr);
27683 i++, d++)
27684 if (d->code == fcode)
27685 return ix86_expand_sse_pcmpestr (d, exp, target);
27686
27687 for (i = 0, d = bdesc_pcmpistr;
27688 i < ARRAY_SIZE (bdesc_pcmpistr);
27689 i++, d++)
27690 if (d->code == fcode)
27691 return ix86_expand_sse_pcmpistr (d, exp, target);
27692
27693 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
27694 if (d->code == fcode)
27695 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
27696 (enum ix86_builtin_func_type)
27697 d->flag, d->comparison);
27698
27699 gcc_unreachable ();
27700 }
27701
27702 /* Returns a function decl for a vectorized version of the builtin function
27703 with builtin function code FN and the result vector type TYPE, or NULL_TREE
27704 if it is not available. */
27705
27706 static tree
27707 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
27708 tree type_in)
27709 {
27710 enum machine_mode in_mode, out_mode;
27711 int in_n, out_n;
27712 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
27713
27714 if (TREE_CODE (type_out) != VECTOR_TYPE
27715 || TREE_CODE (type_in) != VECTOR_TYPE
27716 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
27717 return NULL_TREE;
27718
27719 out_mode = TYPE_MODE (TREE_TYPE (type_out));
27720 out_n = TYPE_VECTOR_SUBPARTS (type_out);
27721 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27722 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27723
27724 switch (fn)
27725 {
27726 case BUILT_IN_SQRT:
27727 if (out_mode == DFmode && in_mode == DFmode)
27728 {
27729 if (out_n == 2 && in_n == 2)
27730 return ix86_builtins[IX86_BUILTIN_SQRTPD];
27731 else if (out_n == 4 && in_n == 4)
27732 return ix86_builtins[IX86_BUILTIN_SQRTPD256];
27733 }
27734 break;
27735
27736 case BUILT_IN_SQRTF:
27737 if (out_mode == SFmode && in_mode == SFmode)
27738 {
27739 if (out_n == 4 && in_n == 4)
27740 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
27741 else if (out_n == 8 && in_n == 8)
27742 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR256];
27743 }
27744 break;
27745
27746 case BUILT_IN_LRINT:
27747 if (out_mode == SImode && out_n == 4
27748 && in_mode == DFmode && in_n == 2)
27749 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
27750 break;
27751
27752 case BUILT_IN_LRINTF:
27753 if (out_mode == SImode && in_mode == SFmode)
27754 {
27755 if (out_n == 4 && in_n == 4)
27756 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
27757 else if (out_n == 8 && in_n == 8)
27758 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ256];
27759 }
27760 break;
27761
27762 case BUILT_IN_COPYSIGN:
27763 if (out_mode == DFmode && in_mode == DFmode)
27764 {
27765 if (out_n == 2 && in_n == 2)
27766 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
27767 else if (out_n == 4 && in_n == 4)
27768 return ix86_builtins[IX86_BUILTIN_CPYSGNPD256];
27769 }
27770 break;
27771
27772 case BUILT_IN_COPYSIGNF:
27773 if (out_mode == SFmode && in_mode == SFmode)
27774 {
27775 if (out_n == 4 && in_n == 4)
27776 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
27777 else if (out_n == 8 && in_n == 8)
27778 return ix86_builtins[IX86_BUILTIN_CPYSGNPS256];
27779 }
27780 break;
27781
27782 case BUILT_IN_FLOOR:
27783 /* The round insn does not trap on denormals. */
27784 if (flag_trapping_math || !TARGET_ROUND)
27785 break;
27786
27787 if (out_mode == DFmode && in_mode == DFmode)
27788 {
27789 if (out_n == 2 && in_n == 2)
27790 return ix86_builtins[IX86_BUILTIN_FLOORPD];
27791 else if (out_n == 4 && in_n == 4)
27792 return ix86_builtins[IX86_BUILTIN_FLOORPD256];
27793 }
27794 break;
27795
27796 case BUILT_IN_FLOORF:
27797 /* The round insn does not trap on denormals. */
27798 if (flag_trapping_math || !TARGET_ROUND)
27799 break;
27800
27801 if (out_mode == SFmode && in_mode == SFmode)
27802 {
27803 if (out_n == 4 && in_n == 4)
27804 return ix86_builtins[IX86_BUILTIN_FLOORPS];
27805 else if (out_n == 8 && in_n == 8)
27806 return ix86_builtins[IX86_BUILTIN_FLOORPS256];
27807 }
27808 break;
27809
27810 case BUILT_IN_CEIL:
27811 /* The round insn does not trap on denormals. */
27812 if (flag_trapping_math || !TARGET_ROUND)
27813 break;
27814
27815 if (out_mode == DFmode && in_mode == DFmode)
27816 {
27817 if (out_n == 2 && in_n == 2)
27818 return ix86_builtins[IX86_BUILTIN_CEILPD];
27819 else if (out_n == 4 && in_n == 4)
27820 return ix86_builtins[IX86_BUILTIN_CEILPD256];
27821 }
27822 break;
27823
27824 case BUILT_IN_CEILF:
27825 /* The round insn does not trap on denormals. */
27826 if (flag_trapping_math || !TARGET_ROUND)
27827 break;
27828
27829 if (out_mode == SFmode && in_mode == SFmode)
27830 {
27831 if (out_n == 4 && in_n == 4)
27832 return ix86_builtins[IX86_BUILTIN_CEILPS];
27833 else if (out_n == 8 && in_n == 8)
27834 return ix86_builtins[IX86_BUILTIN_CEILPS256];
27835 }
27836 break;
27837
27838 case BUILT_IN_TRUNC:
27839 /* The round insn does not trap on denormals. */
27840 if (flag_trapping_math || !TARGET_ROUND)
27841 break;
27842
27843 if (out_mode == DFmode && in_mode == DFmode)
27844 {
27845 if (out_n == 2 && in_n == 2)
27846 return ix86_builtins[IX86_BUILTIN_TRUNCPD];
27847 else if (out_n == 4 && in_n == 4)
27848 return ix86_builtins[IX86_BUILTIN_TRUNCPD256];
27849 }
27850 break;
27851
27852 case BUILT_IN_TRUNCF:
27853 /* The round insn does not trap on denormals. */
27854 if (flag_trapping_math || !TARGET_ROUND)
27855 break;
27856
27857 if (out_mode == SFmode && in_mode == SFmode)
27858 {
27859 if (out_n == 4 && in_n == 4)
27860 return ix86_builtins[IX86_BUILTIN_TRUNCPS];
27861 else if (out_n == 8 && in_n == 8)
27862 return ix86_builtins[IX86_BUILTIN_TRUNCPS256];
27863 }
27864 break;
27865
27866 case BUILT_IN_RINT:
27867 /* The round insn does not trap on denormals. */
27868 if (flag_trapping_math || !TARGET_ROUND)
27869 break;
27870
27871 if (out_mode == DFmode && in_mode == DFmode)
27872 {
27873 if (out_n == 2 && in_n == 2)
27874 return ix86_builtins[IX86_BUILTIN_RINTPD];
27875 else if (out_n == 4 && in_n == 4)
27876 return ix86_builtins[IX86_BUILTIN_RINTPD256];
27877 }
27878 break;
27879
27880 case BUILT_IN_RINTF:
27881 /* The round insn does not trap on denormals. */
27882 if (flag_trapping_math || !TARGET_ROUND)
27883 break;
27884
27885 if (out_mode == SFmode && in_mode == SFmode)
27886 {
27887 if (out_n == 4 && in_n == 4)
27888 return ix86_builtins[IX86_BUILTIN_RINTPS];
27889 else if (out_n == 8 && in_n == 8)
27890 return ix86_builtins[IX86_BUILTIN_RINTPS256];
27891 }
27892 break;
27893
27894 case BUILT_IN_FMA:
27895 if (out_mode == DFmode && in_mode == DFmode)
27896 {
27897 if (out_n == 2 && in_n == 2)
27898 return ix86_builtins[IX86_BUILTIN_VFMADDPD];
27899 if (out_n == 4 && in_n == 4)
27900 return ix86_builtins[IX86_BUILTIN_VFMADDPD256];
27901 }
27902 break;
27903
27904 case BUILT_IN_FMAF:
27905 if (out_mode == SFmode && in_mode == SFmode)
27906 {
27907 if (out_n == 4 && in_n == 4)
27908 return ix86_builtins[IX86_BUILTIN_VFMADDPS];
27909 if (out_n == 8 && in_n == 8)
27910 return ix86_builtins[IX86_BUILTIN_VFMADDPS256];
27911 }
27912 break;
27913
27914 default:
27915 break;
27916 }
27917
27918 /* Dispatch to a handler for a vectorization library. */
27919 if (ix86_veclib_handler)
27920 return ix86_veclib_handler ((enum built_in_function) fn, type_out,
27921 type_in);
27922
27923 return NULL_TREE;
27924 }
27925
27926 /* Handler for an SVML-style interface to
27927 a library with vectorized intrinsics. */
27928
27929 static tree
27930 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
27931 {
27932 char name[20];
27933 tree fntype, new_fndecl, args;
27934 unsigned arity;
27935 const char *bname;
27936 enum machine_mode el_mode, in_mode;
27937 int n, in_n;
27938
27939 /* The SVML is suitable for unsafe math only. */
27940 if (!flag_unsafe_math_optimizations)
27941 return NULL_TREE;
27942
27943 el_mode = TYPE_MODE (TREE_TYPE (type_out));
27944 n = TYPE_VECTOR_SUBPARTS (type_out);
27945 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27946 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27947 if (el_mode != in_mode
27948 || n != in_n)
27949 return NULL_TREE;
27950
27951 switch (fn)
27952 {
27953 case BUILT_IN_EXP:
27954 case BUILT_IN_LOG:
27955 case BUILT_IN_LOG10:
27956 case BUILT_IN_POW:
27957 case BUILT_IN_TANH:
27958 case BUILT_IN_TAN:
27959 case BUILT_IN_ATAN:
27960 case BUILT_IN_ATAN2:
27961 case BUILT_IN_ATANH:
27962 case BUILT_IN_CBRT:
27963 case BUILT_IN_SINH:
27964 case BUILT_IN_SIN:
27965 case BUILT_IN_ASINH:
27966 case BUILT_IN_ASIN:
27967 case BUILT_IN_COSH:
27968 case BUILT_IN_COS:
27969 case BUILT_IN_ACOSH:
27970 case BUILT_IN_ACOS:
27971 if (el_mode != DFmode || n != 2)
27972 return NULL_TREE;
27973 break;
27974
27975 case BUILT_IN_EXPF:
27976 case BUILT_IN_LOGF:
27977 case BUILT_IN_LOG10F:
27978 case BUILT_IN_POWF:
27979 case BUILT_IN_TANHF:
27980 case BUILT_IN_TANF:
27981 case BUILT_IN_ATANF:
27982 case BUILT_IN_ATAN2F:
27983 case BUILT_IN_ATANHF:
27984 case BUILT_IN_CBRTF:
27985 case BUILT_IN_SINHF:
27986 case BUILT_IN_SINF:
27987 case BUILT_IN_ASINHF:
27988 case BUILT_IN_ASINF:
27989 case BUILT_IN_COSHF:
27990 case BUILT_IN_COSF:
27991 case BUILT_IN_ACOSHF:
27992 case BUILT_IN_ACOSF:
27993 if (el_mode != SFmode || n != 4)
27994 return NULL_TREE;
27995 break;
27996
27997 default:
27998 return NULL_TREE;
27999 }
28000
28001 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
28002
28003 if (fn == BUILT_IN_LOGF)
28004 strcpy (name, "vmlsLn4");
28005 else if (fn == BUILT_IN_LOG)
28006 strcpy (name, "vmldLn2");
28007 else if (n == 4)
28008 {
28009 sprintf (name, "vmls%s", bname+10);
28010 name[strlen (name)-1] = '4';
28011 }
28012 else
28013 sprintf (name, "vmld%s2", bname+10);
28014
28015 /* Convert to uppercase. */
28016 name[4] &= ~0x20;
28017
28018 arity = 0;
28019 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
28020 args = TREE_CHAIN (args))
28021 arity++;
28022
28023 if (arity == 1)
28024 fntype = build_function_type_list (type_out, type_in, NULL);
28025 else
28026 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
28027
28028 /* Build a function declaration for the vectorized function. */
28029 new_fndecl = build_decl (BUILTINS_LOCATION,
28030 FUNCTION_DECL, get_identifier (name), fntype);
28031 TREE_PUBLIC (new_fndecl) = 1;
28032 DECL_EXTERNAL (new_fndecl) = 1;
28033 DECL_IS_NOVOPS (new_fndecl) = 1;
28034 TREE_READONLY (new_fndecl) = 1;
28035
28036 return new_fndecl;
28037 }
28038
28039 /* Handler for an ACML-style interface to
28040 a library with vectorized intrinsics. */
28041
28042 static tree
28043 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
28044 {
28045 char name[20] = "__vr.._";
28046 tree fntype, new_fndecl, args;
28047 unsigned arity;
28048 const char *bname;
28049 enum machine_mode el_mode, in_mode;
28050 int n, in_n;
28051
28052 /* The ACML is 64bits only and suitable for unsafe math only as
28053 it does not correctly support parts of IEEE with the required
28054 precision such as denormals. */
28055 if (!TARGET_64BIT
28056 || !flag_unsafe_math_optimizations)
28057 return NULL_TREE;
28058
28059 el_mode = TYPE_MODE (TREE_TYPE (type_out));
28060 n = TYPE_VECTOR_SUBPARTS (type_out);
28061 in_mode = TYPE_MODE (TREE_TYPE (type_in));
28062 in_n = TYPE_VECTOR_SUBPARTS (type_in);
28063 if (el_mode != in_mode
28064 || n != in_n)
28065 return NULL_TREE;
28066
28067 switch (fn)
28068 {
28069 case BUILT_IN_SIN:
28070 case BUILT_IN_COS:
28071 case BUILT_IN_EXP:
28072 case BUILT_IN_LOG:
28073 case BUILT_IN_LOG2:
28074 case BUILT_IN_LOG10:
28075 name[4] = 'd';
28076 name[5] = '2';
28077 if (el_mode != DFmode
28078 || n != 2)
28079 return NULL_TREE;
28080 break;
28081
28082 case BUILT_IN_SINF:
28083 case BUILT_IN_COSF:
28084 case BUILT_IN_EXPF:
28085 case BUILT_IN_POWF:
28086 case BUILT_IN_LOGF:
28087 case BUILT_IN_LOG2F:
28088 case BUILT_IN_LOG10F:
28089 name[4] = 's';
28090 name[5] = '4';
28091 if (el_mode != SFmode
28092 || n != 4)
28093 return NULL_TREE;
28094 break;
28095
28096 default:
28097 return NULL_TREE;
28098 }
28099
28100 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
28101 sprintf (name + 7, "%s", bname+10);
28102
28103 arity = 0;
28104 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
28105 args = TREE_CHAIN (args))
28106 arity++;
28107
28108 if (arity == 1)
28109 fntype = build_function_type_list (type_out, type_in, NULL);
28110 else
28111 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
28112
28113 /* Build a function declaration for the vectorized function. */
28114 new_fndecl = build_decl (BUILTINS_LOCATION,
28115 FUNCTION_DECL, get_identifier (name), fntype);
28116 TREE_PUBLIC (new_fndecl) = 1;
28117 DECL_EXTERNAL (new_fndecl) = 1;
28118 DECL_IS_NOVOPS (new_fndecl) = 1;
28119 TREE_READONLY (new_fndecl) = 1;
28120
28121 return new_fndecl;
28122 }
28123
28124
28125 /* Returns a decl of a function that implements conversion of an integer vector
28126 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
28127 are the types involved when converting according to CODE.
28128 Return NULL_TREE if it is not available. */
28129
28130 static tree
28131 ix86_vectorize_builtin_conversion (unsigned int code,
28132 tree dest_type, tree src_type)
28133 {
28134 if (! TARGET_SSE2)
28135 return NULL_TREE;
28136
28137 switch (code)
28138 {
28139 case FLOAT_EXPR:
28140 switch (TYPE_MODE (src_type))
28141 {
28142 case V4SImode:
28143 switch (TYPE_MODE (dest_type))
28144 {
28145 case V4SFmode:
28146 return (TYPE_UNSIGNED (src_type)
28147 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
28148 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
28149 case V4DFmode:
28150 return (TYPE_UNSIGNED (src_type)
28151 ? NULL_TREE
28152 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
28153 default:
28154 return NULL_TREE;
28155 }
28156 break;
28157 case V8SImode:
28158 switch (TYPE_MODE (dest_type))
28159 {
28160 case V8SFmode:
28161 return (TYPE_UNSIGNED (src_type)
28162 ? NULL_TREE
28163 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS256]);
28164 default:
28165 return NULL_TREE;
28166 }
28167 break;
28168 default:
28169 return NULL_TREE;
28170 }
28171
28172 case FIX_TRUNC_EXPR:
28173 switch (TYPE_MODE (dest_type))
28174 {
28175 case V4SImode:
28176 switch (TYPE_MODE (src_type))
28177 {
28178 case V4SFmode:
28179 return (TYPE_UNSIGNED (dest_type)
28180 ? NULL_TREE
28181 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
28182 case V4DFmode:
28183 return (TYPE_UNSIGNED (dest_type)
28184 ? NULL_TREE
28185 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
28186 default:
28187 return NULL_TREE;
28188 }
28189 break;
28190
28191 case V8SImode:
28192 switch (TYPE_MODE (src_type))
28193 {
28194 case V8SFmode:
28195 return (TYPE_UNSIGNED (dest_type)
28196 ? NULL_TREE
28197 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
28198 default:
28199 return NULL_TREE;
28200 }
28201 break;
28202
28203 default:
28204 return NULL_TREE;
28205 }
28206
28207 default:
28208 return NULL_TREE;
28209 }
28210
28211 return NULL_TREE;
28212 }
28213
28214 /* Returns a code for a target-specific builtin that implements
28215 reciprocal of the function, or NULL_TREE if not available. */
28216
28217 static tree
28218 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
28219 bool sqrt ATTRIBUTE_UNUSED)
28220 {
28221 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
28222 && flag_finite_math_only && !flag_trapping_math
28223 && flag_unsafe_math_optimizations))
28224 return NULL_TREE;
28225
28226 if (md_fn)
28227 /* Machine dependent builtins. */
28228 switch (fn)
28229 {
28230 /* Vectorized version of sqrt to rsqrt conversion. */
28231 case IX86_BUILTIN_SQRTPS_NR:
28232 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
28233
28234 case IX86_BUILTIN_SQRTPS_NR256:
28235 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR256];
28236
28237 default:
28238 return NULL_TREE;
28239 }
28240 else
28241 /* Normal builtins. */
28242 switch (fn)
28243 {
28244 /* Sqrt to rsqrt conversion. */
28245 case BUILT_IN_SQRTF:
28246 return ix86_builtins[IX86_BUILTIN_RSQRTF];
28247
28248 default:
28249 return NULL_TREE;
28250 }
28251 }
28252 \f
28253 /* Helper for avx_vpermilps256_operand et al. This is also used by
28254 the expansion functions to turn the parallel back into a mask.
28255 The return value is 0 for no match and the imm8+1 for a match. */
28256
28257 int
28258 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
28259 {
28260 unsigned i, nelt = GET_MODE_NUNITS (mode);
28261 unsigned mask = 0;
28262 unsigned char ipar[8];
28263
28264 if (XVECLEN (par, 0) != (int) nelt)
28265 return 0;
28266
28267 /* Validate that all of the elements are constants, and not totally
28268 out of range. Copy the data into an integral array to make the
28269 subsequent checks easier. */
28270 for (i = 0; i < nelt; ++i)
28271 {
28272 rtx er = XVECEXP (par, 0, i);
28273 unsigned HOST_WIDE_INT ei;
28274
28275 if (!CONST_INT_P (er))
28276 return 0;
28277 ei = INTVAL (er);
28278 if (ei >= nelt)
28279 return 0;
28280 ipar[i] = ei;
28281 }
28282
28283 switch (mode)
28284 {
28285 case V4DFmode:
28286 /* In the 256-bit DFmode case, we can only move elements within
28287 a 128-bit lane. */
28288 for (i = 0; i < 2; ++i)
28289 {
28290 if (ipar[i] >= 2)
28291 return 0;
28292 mask |= ipar[i] << i;
28293 }
28294 for (i = 2; i < 4; ++i)
28295 {
28296 if (ipar[i] < 2)
28297 return 0;
28298 mask |= (ipar[i] - 2) << i;
28299 }
28300 break;
28301
28302 case V8SFmode:
28303 /* In the 256-bit SFmode case, we have full freedom of movement
28304 within the low 128-bit lane, but the high 128-bit lane must
28305 mirror the exact same pattern. */
28306 for (i = 0; i < 4; ++i)
28307 if (ipar[i] + 4 != ipar[i + 4])
28308 return 0;
28309 nelt = 4;
28310 /* FALLTHRU */
28311
28312 case V2DFmode:
28313 case V4SFmode:
28314 /* In the 128-bit case, we've full freedom in the placement of
28315 the elements from the source operand. */
28316 for (i = 0; i < nelt; ++i)
28317 mask |= ipar[i] << (i * (nelt / 2));
28318 break;
28319
28320 default:
28321 gcc_unreachable ();
28322 }
28323
28324 /* Make sure success has a non-zero value by adding one. */
28325 return mask + 1;
28326 }
28327
28328 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
28329 the expansion functions to turn the parallel back into a mask.
28330 The return value is 0 for no match and the imm8+1 for a match. */
28331
28332 int
28333 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
28334 {
28335 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
28336 unsigned mask = 0;
28337 unsigned char ipar[8];
28338
28339 if (XVECLEN (par, 0) != (int) nelt)
28340 return 0;
28341
28342 /* Validate that all of the elements are constants, and not totally
28343 out of range. Copy the data into an integral array to make the
28344 subsequent checks easier. */
28345 for (i = 0; i < nelt; ++i)
28346 {
28347 rtx er = XVECEXP (par, 0, i);
28348 unsigned HOST_WIDE_INT ei;
28349
28350 if (!CONST_INT_P (er))
28351 return 0;
28352 ei = INTVAL (er);
28353 if (ei >= 2 * nelt)
28354 return 0;
28355 ipar[i] = ei;
28356 }
28357
28358 /* Validate that the halves of the permute are halves. */
28359 for (i = 0; i < nelt2 - 1; ++i)
28360 if (ipar[i] + 1 != ipar[i + 1])
28361 return 0;
28362 for (i = nelt2; i < nelt - 1; ++i)
28363 if (ipar[i] + 1 != ipar[i + 1])
28364 return 0;
28365
28366 /* Reconstruct the mask. */
28367 for (i = 0; i < 2; ++i)
28368 {
28369 unsigned e = ipar[i * nelt2];
28370 if (e % nelt2)
28371 return 0;
28372 e /= nelt2;
28373 mask |= e << (i * 4);
28374 }
28375
28376 /* Make sure success has a non-zero value by adding one. */
28377 return mask + 1;
28378 }
28379 \f
28380
28381 /* Store OPERAND to the memory after reload is completed. This means
28382 that we can't easily use assign_stack_local. */
28383 rtx
28384 ix86_force_to_memory (enum machine_mode mode, rtx operand)
28385 {
28386 rtx result;
28387
28388 gcc_assert (reload_completed);
28389 if (ix86_using_red_zone ())
28390 {
28391 result = gen_rtx_MEM (mode,
28392 gen_rtx_PLUS (Pmode,
28393 stack_pointer_rtx,
28394 GEN_INT (-RED_ZONE_SIZE)));
28395 emit_move_insn (result, operand);
28396 }
28397 else if (TARGET_64BIT)
28398 {
28399 switch (mode)
28400 {
28401 case HImode:
28402 case SImode:
28403 operand = gen_lowpart (DImode, operand);
28404 /* FALLTHRU */
28405 case DImode:
28406 emit_insn (
28407 gen_rtx_SET (VOIDmode,
28408 gen_rtx_MEM (DImode,
28409 gen_rtx_PRE_DEC (DImode,
28410 stack_pointer_rtx)),
28411 operand));
28412 break;
28413 default:
28414 gcc_unreachable ();
28415 }
28416 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28417 }
28418 else
28419 {
28420 switch (mode)
28421 {
28422 case DImode:
28423 {
28424 rtx operands[2];
28425 split_double_mode (mode, &operand, 1, operands, operands + 1);
28426 emit_insn (
28427 gen_rtx_SET (VOIDmode,
28428 gen_rtx_MEM (SImode,
28429 gen_rtx_PRE_DEC (Pmode,
28430 stack_pointer_rtx)),
28431 operands[1]));
28432 emit_insn (
28433 gen_rtx_SET (VOIDmode,
28434 gen_rtx_MEM (SImode,
28435 gen_rtx_PRE_DEC (Pmode,
28436 stack_pointer_rtx)),
28437 operands[0]));
28438 }
28439 break;
28440 case HImode:
28441 /* Store HImodes as SImodes. */
28442 operand = gen_lowpart (SImode, operand);
28443 /* FALLTHRU */
28444 case SImode:
28445 emit_insn (
28446 gen_rtx_SET (VOIDmode,
28447 gen_rtx_MEM (GET_MODE (operand),
28448 gen_rtx_PRE_DEC (SImode,
28449 stack_pointer_rtx)),
28450 operand));
28451 break;
28452 default:
28453 gcc_unreachable ();
28454 }
28455 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28456 }
28457 return result;
28458 }
28459
28460 /* Free operand from the memory. */
28461 void
28462 ix86_free_from_memory (enum machine_mode mode)
28463 {
28464 if (!ix86_using_red_zone ())
28465 {
28466 int size;
28467
28468 if (mode == DImode || TARGET_64BIT)
28469 size = 8;
28470 else
28471 size = 4;
28472 /* Use LEA to deallocate stack space. In peephole2 it will be converted
28473 to pop or add instruction if registers are available. */
28474 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
28475 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
28476 GEN_INT (size))));
28477 }
28478 }
28479
28480 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
28481
28482 Put float CONST_DOUBLE in the constant pool instead of fp regs.
28483 QImode must go into class Q_REGS.
28484 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
28485 movdf to do mem-to-mem moves through integer regs. */
28486
28487 static reg_class_t
28488 ix86_preferred_reload_class (rtx x, reg_class_t regclass)
28489 {
28490 enum machine_mode mode = GET_MODE (x);
28491
28492 /* We're only allowed to return a subclass of CLASS. Many of the
28493 following checks fail for NO_REGS, so eliminate that early. */
28494 if (regclass == NO_REGS)
28495 return NO_REGS;
28496
28497 /* All classes can load zeros. */
28498 if (x == CONST0_RTX (mode))
28499 return regclass;
28500
28501 /* Force constants into memory if we are loading a (nonzero) constant into
28502 an MMX or SSE register. This is because there are no MMX/SSE instructions
28503 to load from a constant. */
28504 if (CONSTANT_P (x)
28505 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
28506 return NO_REGS;
28507
28508 /* Prefer SSE regs only, if we can use them for math. */
28509 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
28510 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
28511
28512 /* Floating-point constants need more complex checks. */
28513 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
28514 {
28515 /* General regs can load everything. */
28516 if (reg_class_subset_p (regclass, GENERAL_REGS))
28517 return regclass;
28518
28519 /* Floats can load 0 and 1 plus some others. Note that we eliminated
28520 zero above. We only want to wind up preferring 80387 registers if
28521 we plan on doing computation with them. */
28522 if (TARGET_80387
28523 && standard_80387_constant_p (x))
28524 {
28525 /* Limit class to non-sse. */
28526 if (regclass == FLOAT_SSE_REGS)
28527 return FLOAT_REGS;
28528 if (regclass == FP_TOP_SSE_REGS)
28529 return FP_TOP_REG;
28530 if (regclass == FP_SECOND_SSE_REGS)
28531 return FP_SECOND_REG;
28532 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
28533 return regclass;
28534 }
28535
28536 return NO_REGS;
28537 }
28538
28539 /* Generally when we see PLUS here, it's the function invariant
28540 (plus soft-fp const_int). Which can only be computed into general
28541 regs. */
28542 if (GET_CODE (x) == PLUS)
28543 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
28544
28545 /* QImode constants are easy to load, but non-constant QImode data
28546 must go into Q_REGS. */
28547 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
28548 {
28549 if (reg_class_subset_p (regclass, Q_REGS))
28550 return regclass;
28551 if (reg_class_subset_p (Q_REGS, regclass))
28552 return Q_REGS;
28553 return NO_REGS;
28554 }
28555
28556 return regclass;
28557 }
28558
28559 /* Discourage putting floating-point values in SSE registers unless
28560 SSE math is being used, and likewise for the 387 registers. */
28561 static reg_class_t
28562 ix86_preferred_output_reload_class (rtx x, reg_class_t regclass)
28563 {
28564 enum machine_mode mode = GET_MODE (x);
28565
28566 /* Restrict the output reload class to the register bank that we are doing
28567 math on. If we would like not to return a subset of CLASS, reject this
28568 alternative: if reload cannot do this, it will still use its choice. */
28569 mode = GET_MODE (x);
28570 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
28571 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
28572
28573 if (X87_FLOAT_MODE_P (mode))
28574 {
28575 if (regclass == FP_TOP_SSE_REGS)
28576 return FP_TOP_REG;
28577 else if (regclass == FP_SECOND_SSE_REGS)
28578 return FP_SECOND_REG;
28579 else
28580 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
28581 }
28582
28583 return regclass;
28584 }
28585
28586 static reg_class_t
28587 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
28588 enum machine_mode mode,
28589 secondary_reload_info *sri ATTRIBUTE_UNUSED)
28590 {
28591 /* QImode spills from non-QI registers require
28592 intermediate register on 32bit targets. */
28593 if (!TARGET_64BIT
28594 && !in_p && mode == QImode
28595 && (rclass == GENERAL_REGS
28596 || rclass == LEGACY_REGS
28597 || rclass == INDEX_REGS))
28598 {
28599 int regno;
28600
28601 if (REG_P (x))
28602 regno = REGNO (x);
28603 else
28604 regno = -1;
28605
28606 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
28607 regno = true_regnum (x);
28608
28609 /* Return Q_REGS if the operand is in memory. */
28610 if (regno == -1)
28611 return Q_REGS;
28612 }
28613
28614 /* This condition handles corner case where an expression involving
28615 pointers gets vectorized. We're trying to use the address of a
28616 stack slot as a vector initializer.
28617
28618 (set (reg:V2DI 74 [ vect_cst_.2 ])
28619 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
28620
28621 Eventually frame gets turned into sp+offset like this:
28622
28623 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28624 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28625 (const_int 392 [0x188]))))
28626
28627 That later gets turned into:
28628
28629 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28630 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28631 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
28632
28633 We'll have the following reload recorded:
28634
28635 Reload 0: reload_in (DI) =
28636 (plus:DI (reg/f:DI 7 sp)
28637 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
28638 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28639 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
28640 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
28641 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28642 reload_reg_rtx: (reg:V2DI 22 xmm1)
28643
28644 Which isn't going to work since SSE instructions can't handle scalar
28645 additions. Returning GENERAL_REGS forces the addition into integer
28646 register and reload can handle subsequent reloads without problems. */
28647
28648 if (in_p && GET_CODE (x) == PLUS
28649 && SSE_CLASS_P (rclass)
28650 && SCALAR_INT_MODE_P (mode))
28651 return GENERAL_REGS;
28652
28653 return NO_REGS;
28654 }
28655
28656 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
28657
28658 static bool
28659 ix86_class_likely_spilled_p (reg_class_t rclass)
28660 {
28661 switch (rclass)
28662 {
28663 case AREG:
28664 case DREG:
28665 case CREG:
28666 case BREG:
28667 case AD_REGS:
28668 case SIREG:
28669 case DIREG:
28670 case SSE_FIRST_REG:
28671 case FP_TOP_REG:
28672 case FP_SECOND_REG:
28673 return true;
28674
28675 default:
28676 break;
28677 }
28678
28679 return false;
28680 }
28681
28682 /* If we are copying between general and FP registers, we need a memory
28683 location. The same is true for SSE and MMX registers.
28684
28685 To optimize register_move_cost performance, allow inline variant.
28686
28687 The macro can't work reliably when one of the CLASSES is class containing
28688 registers from multiple units (SSE, MMX, integer). We avoid this by never
28689 combining those units in single alternative in the machine description.
28690 Ensure that this constraint holds to avoid unexpected surprises.
28691
28692 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
28693 enforce these sanity checks. */
28694
28695 static inline bool
28696 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28697 enum machine_mode mode, int strict)
28698 {
28699 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
28700 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
28701 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
28702 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
28703 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
28704 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
28705 {
28706 gcc_assert (!strict);
28707 return true;
28708 }
28709
28710 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
28711 return true;
28712
28713 /* ??? This is a lie. We do have moves between mmx/general, and for
28714 mmx/sse2. But by saying we need secondary memory we discourage the
28715 register allocator from using the mmx registers unless needed. */
28716 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
28717 return true;
28718
28719 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28720 {
28721 /* SSE1 doesn't have any direct moves from other classes. */
28722 if (!TARGET_SSE2)
28723 return true;
28724
28725 /* If the target says that inter-unit moves are more expensive
28726 than moving through memory, then don't generate them. */
28727 if (!TARGET_INTER_UNIT_MOVES)
28728 return true;
28729
28730 /* Between SSE and general, we have moves no larger than word size. */
28731 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
28732 return true;
28733 }
28734
28735 return false;
28736 }
28737
28738 bool
28739 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28740 enum machine_mode mode, int strict)
28741 {
28742 return inline_secondary_memory_needed (class1, class2, mode, strict);
28743 }
28744
28745 /* Return true if the registers in CLASS cannot represent the change from
28746 modes FROM to TO. */
28747
28748 bool
28749 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
28750 enum reg_class regclass)
28751 {
28752 if (from == to)
28753 return false;
28754
28755 /* x87 registers can't do subreg at all, as all values are reformatted
28756 to extended precision. */
28757 if (MAYBE_FLOAT_CLASS_P (regclass))
28758 return true;
28759
28760 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
28761 {
28762 /* Vector registers do not support QI or HImode loads. If we don't
28763 disallow a change to these modes, reload will assume it's ok to
28764 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
28765 the vec_dupv4hi pattern. */
28766 if (GET_MODE_SIZE (from) < 4)
28767 return true;
28768
28769 /* Vector registers do not support subreg with nonzero offsets, which
28770 are otherwise valid for integer registers. Since we can't see
28771 whether we have a nonzero offset from here, prohibit all
28772 nonparadoxical subregs changing size. */
28773 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
28774 return true;
28775 }
28776
28777 return false;
28778 }
28779
28780 /* Return the cost of moving data of mode M between a
28781 register and memory. A value of 2 is the default; this cost is
28782 relative to those in `REGISTER_MOVE_COST'.
28783
28784 This function is used extensively by register_move_cost that is used to
28785 build tables at startup. Make it inline in this case.
28786 When IN is 2, return maximum of in and out move cost.
28787
28788 If moving between registers and memory is more expensive than
28789 between two registers, you should define this macro to express the
28790 relative cost.
28791
28792 Model also increased moving costs of QImode registers in non
28793 Q_REGS classes.
28794 */
28795 static inline int
28796 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
28797 int in)
28798 {
28799 int cost;
28800 if (FLOAT_CLASS_P (regclass))
28801 {
28802 int index;
28803 switch (mode)
28804 {
28805 case SFmode:
28806 index = 0;
28807 break;
28808 case DFmode:
28809 index = 1;
28810 break;
28811 case XFmode:
28812 index = 2;
28813 break;
28814 default:
28815 return 100;
28816 }
28817 if (in == 2)
28818 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
28819 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
28820 }
28821 if (SSE_CLASS_P (regclass))
28822 {
28823 int index;
28824 switch (GET_MODE_SIZE (mode))
28825 {
28826 case 4:
28827 index = 0;
28828 break;
28829 case 8:
28830 index = 1;
28831 break;
28832 case 16:
28833 index = 2;
28834 break;
28835 default:
28836 return 100;
28837 }
28838 if (in == 2)
28839 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
28840 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
28841 }
28842 if (MMX_CLASS_P (regclass))
28843 {
28844 int index;
28845 switch (GET_MODE_SIZE (mode))
28846 {
28847 case 4:
28848 index = 0;
28849 break;
28850 case 8:
28851 index = 1;
28852 break;
28853 default:
28854 return 100;
28855 }
28856 if (in)
28857 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
28858 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
28859 }
28860 switch (GET_MODE_SIZE (mode))
28861 {
28862 case 1:
28863 if (Q_CLASS_P (regclass) || TARGET_64BIT)
28864 {
28865 if (!in)
28866 return ix86_cost->int_store[0];
28867 if (TARGET_PARTIAL_REG_DEPENDENCY
28868 && optimize_function_for_speed_p (cfun))
28869 cost = ix86_cost->movzbl_load;
28870 else
28871 cost = ix86_cost->int_load[0];
28872 if (in == 2)
28873 return MAX (cost, ix86_cost->int_store[0]);
28874 return cost;
28875 }
28876 else
28877 {
28878 if (in == 2)
28879 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
28880 if (in)
28881 return ix86_cost->movzbl_load;
28882 else
28883 return ix86_cost->int_store[0] + 4;
28884 }
28885 break;
28886 case 2:
28887 if (in == 2)
28888 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
28889 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
28890 default:
28891 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
28892 if (mode == TFmode)
28893 mode = XFmode;
28894 if (in == 2)
28895 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
28896 else if (in)
28897 cost = ix86_cost->int_load[2];
28898 else
28899 cost = ix86_cost->int_store[2];
28900 return (cost * (((int) GET_MODE_SIZE (mode)
28901 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
28902 }
28903 }
28904
28905 static int
28906 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
28907 bool in)
28908 {
28909 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
28910 }
28911
28912
28913 /* Return the cost of moving data from a register in class CLASS1 to
28914 one in class CLASS2.
28915
28916 It is not required that the cost always equal 2 when FROM is the same as TO;
28917 on some machines it is expensive to move between registers if they are not
28918 general registers. */
28919
28920 static int
28921 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
28922 reg_class_t class2_i)
28923 {
28924 enum reg_class class1 = (enum reg_class) class1_i;
28925 enum reg_class class2 = (enum reg_class) class2_i;
28926
28927 /* In case we require secondary memory, compute cost of the store followed
28928 by load. In order to avoid bad register allocation choices, we need
28929 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
28930
28931 if (inline_secondary_memory_needed (class1, class2, mode, 0))
28932 {
28933 int cost = 1;
28934
28935 cost += inline_memory_move_cost (mode, class1, 2);
28936 cost += inline_memory_move_cost (mode, class2, 2);
28937
28938 /* In case of copying from general_purpose_register we may emit multiple
28939 stores followed by single load causing memory size mismatch stall.
28940 Count this as arbitrarily high cost of 20. */
28941 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
28942 cost += 20;
28943
28944 /* In the case of FP/MMX moves, the registers actually overlap, and we
28945 have to switch modes in order to treat them differently. */
28946 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
28947 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
28948 cost += 20;
28949
28950 return cost;
28951 }
28952
28953 /* Moves between SSE/MMX and integer unit are expensive. */
28954 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
28955 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28956
28957 /* ??? By keeping returned value relatively high, we limit the number
28958 of moves between integer and MMX/SSE registers for all targets.
28959 Additionally, high value prevents problem with x86_modes_tieable_p(),
28960 where integer modes in MMX/SSE registers are not tieable
28961 because of missing QImode and HImode moves to, from or between
28962 MMX/SSE registers. */
28963 return MAX (8, ix86_cost->mmxsse_to_integer);
28964
28965 if (MAYBE_FLOAT_CLASS_P (class1))
28966 return ix86_cost->fp_move;
28967 if (MAYBE_SSE_CLASS_P (class1))
28968 return ix86_cost->sse_move;
28969 if (MAYBE_MMX_CLASS_P (class1))
28970 return ix86_cost->mmx_move;
28971 return 2;
28972 }
28973
28974 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
28975
28976 bool
28977 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
28978 {
28979 /* Flags and only flags can only hold CCmode values. */
28980 if (CC_REGNO_P (regno))
28981 return GET_MODE_CLASS (mode) == MODE_CC;
28982 if (GET_MODE_CLASS (mode) == MODE_CC
28983 || GET_MODE_CLASS (mode) == MODE_RANDOM
28984 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
28985 return 0;
28986 if (FP_REGNO_P (regno))
28987 return VALID_FP_MODE_P (mode);
28988 if (SSE_REGNO_P (regno))
28989 {
28990 /* We implement the move patterns for all vector modes into and
28991 out of SSE registers, even when no operation instructions
28992 are available. OImode move is available only when AVX is
28993 enabled. */
28994 return ((TARGET_AVX && mode == OImode)
28995 || VALID_AVX256_REG_MODE (mode)
28996 || VALID_SSE_REG_MODE (mode)
28997 || VALID_SSE2_REG_MODE (mode)
28998 || VALID_MMX_REG_MODE (mode)
28999 || VALID_MMX_REG_MODE_3DNOW (mode));
29000 }
29001 if (MMX_REGNO_P (regno))
29002 {
29003 /* We implement the move patterns for 3DNOW modes even in MMX mode,
29004 so if the register is available at all, then we can move data of
29005 the given mode into or out of it. */
29006 return (VALID_MMX_REG_MODE (mode)
29007 || VALID_MMX_REG_MODE_3DNOW (mode));
29008 }
29009
29010 if (mode == QImode)
29011 {
29012 /* Take care for QImode values - they can be in non-QI regs,
29013 but then they do cause partial register stalls. */
29014 if (regno <= BX_REG || TARGET_64BIT)
29015 return 1;
29016 if (!TARGET_PARTIAL_REG_STALL)
29017 return 1;
29018 return reload_in_progress || reload_completed;
29019 }
29020 /* We handle both integer and floats in the general purpose registers. */
29021 else if (VALID_INT_MODE_P (mode))
29022 return 1;
29023 else if (VALID_FP_MODE_P (mode))
29024 return 1;
29025 else if (VALID_DFP_MODE_P (mode))
29026 return 1;
29027 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
29028 on to use that value in smaller contexts, this can easily force a
29029 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
29030 supporting DImode, allow it. */
29031 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
29032 return 1;
29033
29034 return 0;
29035 }
29036
29037 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
29038 tieable integer mode. */
29039
29040 static bool
29041 ix86_tieable_integer_mode_p (enum machine_mode mode)
29042 {
29043 switch (mode)
29044 {
29045 case HImode:
29046 case SImode:
29047 return true;
29048
29049 case QImode:
29050 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
29051
29052 case DImode:
29053 return TARGET_64BIT;
29054
29055 default:
29056 return false;
29057 }
29058 }
29059
29060 /* Return true if MODE1 is accessible in a register that can hold MODE2
29061 without copying. That is, all register classes that can hold MODE2
29062 can also hold MODE1. */
29063
29064 bool
29065 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
29066 {
29067 if (mode1 == mode2)
29068 return true;
29069
29070 if (ix86_tieable_integer_mode_p (mode1)
29071 && ix86_tieable_integer_mode_p (mode2))
29072 return true;
29073
29074 /* MODE2 being XFmode implies fp stack or general regs, which means we
29075 can tie any smaller floating point modes to it. Note that we do not
29076 tie this with TFmode. */
29077 if (mode2 == XFmode)
29078 return mode1 == SFmode || mode1 == DFmode;
29079
29080 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
29081 that we can tie it with SFmode. */
29082 if (mode2 == DFmode)
29083 return mode1 == SFmode;
29084
29085 /* If MODE2 is only appropriate for an SSE register, then tie with
29086 any other mode acceptable to SSE registers. */
29087 if (GET_MODE_SIZE (mode2) == 16
29088 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
29089 return (GET_MODE_SIZE (mode1) == 16
29090 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
29091
29092 /* If MODE2 is appropriate for an MMX register, then tie
29093 with any other mode acceptable to MMX registers. */
29094 if (GET_MODE_SIZE (mode2) == 8
29095 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
29096 return (GET_MODE_SIZE (mode1) == 8
29097 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
29098
29099 return false;
29100 }
29101
29102 /* Compute a (partial) cost for rtx X. Return true if the complete
29103 cost has been computed, and false if subexpressions should be
29104 scanned. In either case, *TOTAL contains the cost result. */
29105
29106 static bool
29107 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
29108 {
29109 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
29110 enum machine_mode mode = GET_MODE (x);
29111 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
29112
29113 switch (code)
29114 {
29115 case CONST_INT:
29116 case CONST:
29117 case LABEL_REF:
29118 case SYMBOL_REF:
29119 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
29120 *total = 3;
29121 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
29122 *total = 2;
29123 else if (flag_pic && SYMBOLIC_CONST (x)
29124 && (!TARGET_64BIT
29125 || (!GET_CODE (x) != LABEL_REF
29126 && (GET_CODE (x) != SYMBOL_REF
29127 || !SYMBOL_REF_LOCAL_P (x)))))
29128 *total = 1;
29129 else
29130 *total = 0;
29131 return true;
29132
29133 case CONST_DOUBLE:
29134 if (mode == VOIDmode)
29135 *total = 0;
29136 else
29137 switch (standard_80387_constant_p (x))
29138 {
29139 case 1: /* 0.0 */
29140 *total = 1;
29141 break;
29142 default: /* Other constants */
29143 *total = 2;
29144 break;
29145 case 0:
29146 case -1:
29147 /* Start with (MEM (SYMBOL_REF)), since that's where
29148 it'll probably end up. Add a penalty for size. */
29149 *total = (COSTS_N_INSNS (1)
29150 + (flag_pic != 0 && !TARGET_64BIT)
29151 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
29152 break;
29153 }
29154 return true;
29155
29156 case ZERO_EXTEND:
29157 /* The zero extensions is often completely free on x86_64, so make
29158 it as cheap as possible. */
29159 if (TARGET_64BIT && mode == DImode
29160 && GET_MODE (XEXP (x, 0)) == SImode)
29161 *total = 1;
29162 else if (TARGET_ZERO_EXTEND_WITH_AND)
29163 *total = cost->add;
29164 else
29165 *total = cost->movzx;
29166 return false;
29167
29168 case SIGN_EXTEND:
29169 *total = cost->movsx;
29170 return false;
29171
29172 case ASHIFT:
29173 if (CONST_INT_P (XEXP (x, 1))
29174 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
29175 {
29176 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
29177 if (value == 1)
29178 {
29179 *total = cost->add;
29180 return false;
29181 }
29182 if ((value == 2 || value == 3)
29183 && cost->lea <= cost->shift_const)
29184 {
29185 *total = cost->lea;
29186 return false;
29187 }
29188 }
29189 /* FALLTHRU */
29190
29191 case ROTATE:
29192 case ASHIFTRT:
29193 case LSHIFTRT:
29194 case ROTATERT:
29195 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
29196 {
29197 if (CONST_INT_P (XEXP (x, 1)))
29198 {
29199 if (INTVAL (XEXP (x, 1)) > 32)
29200 *total = cost->shift_const + COSTS_N_INSNS (2);
29201 else
29202 *total = cost->shift_const * 2;
29203 }
29204 else
29205 {
29206 if (GET_CODE (XEXP (x, 1)) == AND)
29207 *total = cost->shift_var * 2;
29208 else
29209 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
29210 }
29211 }
29212 else
29213 {
29214 if (CONST_INT_P (XEXP (x, 1)))
29215 *total = cost->shift_const;
29216 else
29217 *total = cost->shift_var;
29218 }
29219 return false;
29220
29221 case FMA:
29222 {
29223 rtx sub;
29224
29225 gcc_assert (FLOAT_MODE_P (mode));
29226 gcc_assert (TARGET_FMA || TARGET_FMA4);
29227
29228 /* ??? SSE scalar/vector cost should be used here. */
29229 /* ??? Bald assumption that fma has the same cost as fmul. */
29230 *total = cost->fmul;
29231 *total += rtx_cost (XEXP (x, 1), FMA, speed);
29232
29233 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
29234 sub = XEXP (x, 0);
29235 if (GET_CODE (sub) == NEG)
29236 sub = XEXP (x, 0);
29237 *total += rtx_cost (sub, FMA, speed);
29238
29239 sub = XEXP (x, 2);
29240 if (GET_CODE (sub) == NEG)
29241 sub = XEXP (x, 0);
29242 *total += rtx_cost (sub, FMA, speed);
29243 return true;
29244 }
29245
29246 case MULT:
29247 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29248 {
29249 /* ??? SSE scalar cost should be used here. */
29250 *total = cost->fmul;
29251 return false;
29252 }
29253 else if (X87_FLOAT_MODE_P (mode))
29254 {
29255 *total = cost->fmul;
29256 return false;
29257 }
29258 else if (FLOAT_MODE_P (mode))
29259 {
29260 /* ??? SSE vector cost should be used here. */
29261 *total = cost->fmul;
29262 return false;
29263 }
29264 else
29265 {
29266 rtx op0 = XEXP (x, 0);
29267 rtx op1 = XEXP (x, 1);
29268 int nbits;
29269 if (CONST_INT_P (XEXP (x, 1)))
29270 {
29271 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
29272 for (nbits = 0; value != 0; value &= value - 1)
29273 nbits++;
29274 }
29275 else
29276 /* This is arbitrary. */
29277 nbits = 7;
29278
29279 /* Compute costs correctly for widening multiplication. */
29280 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
29281 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
29282 == GET_MODE_SIZE (mode))
29283 {
29284 int is_mulwiden = 0;
29285 enum machine_mode inner_mode = GET_MODE (op0);
29286
29287 if (GET_CODE (op0) == GET_CODE (op1))
29288 is_mulwiden = 1, op1 = XEXP (op1, 0);
29289 else if (CONST_INT_P (op1))
29290 {
29291 if (GET_CODE (op0) == SIGN_EXTEND)
29292 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
29293 == INTVAL (op1);
29294 else
29295 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
29296 }
29297
29298 if (is_mulwiden)
29299 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
29300 }
29301
29302 *total = (cost->mult_init[MODE_INDEX (mode)]
29303 + nbits * cost->mult_bit
29304 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
29305
29306 return true;
29307 }
29308
29309 case DIV:
29310 case UDIV:
29311 case MOD:
29312 case UMOD:
29313 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29314 /* ??? SSE cost should be used here. */
29315 *total = cost->fdiv;
29316 else if (X87_FLOAT_MODE_P (mode))
29317 *total = cost->fdiv;
29318 else if (FLOAT_MODE_P (mode))
29319 /* ??? SSE vector cost should be used here. */
29320 *total = cost->fdiv;
29321 else
29322 *total = cost->divide[MODE_INDEX (mode)];
29323 return false;
29324
29325 case PLUS:
29326 if (GET_MODE_CLASS (mode) == MODE_INT
29327 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
29328 {
29329 if (GET_CODE (XEXP (x, 0)) == PLUS
29330 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
29331 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
29332 && CONSTANT_P (XEXP (x, 1)))
29333 {
29334 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
29335 if (val == 2 || val == 4 || val == 8)
29336 {
29337 *total = cost->lea;
29338 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29339 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
29340 outer_code, speed);
29341 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29342 return true;
29343 }
29344 }
29345 else if (GET_CODE (XEXP (x, 0)) == MULT
29346 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
29347 {
29348 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
29349 if (val == 2 || val == 4 || val == 8)
29350 {
29351 *total = cost->lea;
29352 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29353 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29354 return true;
29355 }
29356 }
29357 else if (GET_CODE (XEXP (x, 0)) == PLUS)
29358 {
29359 *total = cost->lea;
29360 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29361 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29362 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29363 return true;
29364 }
29365 }
29366 /* FALLTHRU */
29367
29368 case MINUS:
29369 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29370 {
29371 /* ??? SSE cost should be used here. */
29372 *total = cost->fadd;
29373 return false;
29374 }
29375 else if (X87_FLOAT_MODE_P (mode))
29376 {
29377 *total = cost->fadd;
29378 return false;
29379 }
29380 else if (FLOAT_MODE_P (mode))
29381 {
29382 /* ??? SSE vector cost should be used here. */
29383 *total = cost->fadd;
29384 return false;
29385 }
29386 /* FALLTHRU */
29387
29388 case AND:
29389 case IOR:
29390 case XOR:
29391 if (!TARGET_64BIT && mode == DImode)
29392 {
29393 *total = (cost->add * 2
29394 + (rtx_cost (XEXP (x, 0), outer_code, speed)
29395 << (GET_MODE (XEXP (x, 0)) != DImode))
29396 + (rtx_cost (XEXP (x, 1), outer_code, speed)
29397 << (GET_MODE (XEXP (x, 1)) != DImode)));
29398 return true;
29399 }
29400 /* FALLTHRU */
29401
29402 case NEG:
29403 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29404 {
29405 /* ??? SSE cost should be used here. */
29406 *total = cost->fchs;
29407 return false;
29408 }
29409 else if (X87_FLOAT_MODE_P (mode))
29410 {
29411 *total = cost->fchs;
29412 return false;
29413 }
29414 else if (FLOAT_MODE_P (mode))
29415 {
29416 /* ??? SSE vector cost should be used here. */
29417 *total = cost->fchs;
29418 return false;
29419 }
29420 /* FALLTHRU */
29421
29422 case NOT:
29423 if (!TARGET_64BIT && mode == DImode)
29424 *total = cost->add * 2;
29425 else
29426 *total = cost->add;
29427 return false;
29428
29429 case COMPARE:
29430 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
29431 && XEXP (XEXP (x, 0), 1) == const1_rtx
29432 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
29433 && XEXP (x, 1) == const0_rtx)
29434 {
29435 /* This kind of construct is implemented using test[bwl].
29436 Treat it as if we had an AND. */
29437 *total = (cost->add
29438 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
29439 + rtx_cost (const1_rtx, outer_code, speed));
29440 return true;
29441 }
29442 return false;
29443
29444 case FLOAT_EXTEND:
29445 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
29446 *total = 0;
29447 return false;
29448
29449 case ABS:
29450 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29451 /* ??? SSE cost should be used here. */
29452 *total = cost->fabs;
29453 else if (X87_FLOAT_MODE_P (mode))
29454 *total = cost->fabs;
29455 else if (FLOAT_MODE_P (mode))
29456 /* ??? SSE vector cost should be used here. */
29457 *total = cost->fabs;
29458 return false;
29459
29460 case SQRT:
29461 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29462 /* ??? SSE cost should be used here. */
29463 *total = cost->fsqrt;
29464 else if (X87_FLOAT_MODE_P (mode))
29465 *total = cost->fsqrt;
29466 else if (FLOAT_MODE_P (mode))
29467 /* ??? SSE vector cost should be used here. */
29468 *total = cost->fsqrt;
29469 return false;
29470
29471 case UNSPEC:
29472 if (XINT (x, 1) == UNSPEC_TP)
29473 *total = 0;
29474 return false;
29475
29476 case VEC_SELECT:
29477 case VEC_CONCAT:
29478 case VEC_MERGE:
29479 case VEC_DUPLICATE:
29480 /* ??? Assume all of these vector manipulation patterns are
29481 recognizable. In which case they all pretty much have the
29482 same cost. */
29483 *total = COSTS_N_INSNS (1);
29484 return true;
29485
29486 default:
29487 return false;
29488 }
29489 }
29490
29491 #if TARGET_MACHO
29492
29493 static int current_machopic_label_num;
29494
29495 /* Given a symbol name and its associated stub, write out the
29496 definition of the stub. */
29497
29498 void
29499 machopic_output_stub (FILE *file, const char *symb, const char *stub)
29500 {
29501 unsigned int length;
29502 char *binder_name, *symbol_name, lazy_ptr_name[32];
29503 int label = ++current_machopic_label_num;
29504
29505 /* For 64-bit we shouldn't get here. */
29506 gcc_assert (!TARGET_64BIT);
29507
29508 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
29509 symb = targetm.strip_name_encoding (symb);
29510
29511 length = strlen (stub);
29512 binder_name = XALLOCAVEC (char, length + 32);
29513 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
29514
29515 length = strlen (symb);
29516 symbol_name = XALLOCAVEC (char, length + 32);
29517 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
29518
29519 sprintf (lazy_ptr_name, "L%d$lz", label);
29520
29521 if (MACHOPIC_ATT_STUB)
29522 switch_to_section (darwin_sections[machopic_picsymbol_stub3_section]);
29523 else if (MACHOPIC_PURE)
29524 {
29525 if (TARGET_DEEP_BRANCH_PREDICTION)
29526 switch_to_section (darwin_sections[machopic_picsymbol_stub2_section]);
29527 else
29528 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
29529 }
29530 else
29531 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
29532
29533 fprintf (file, "%s:\n", stub);
29534 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29535
29536 if (MACHOPIC_ATT_STUB)
29537 {
29538 fprintf (file, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
29539 }
29540 else if (MACHOPIC_PURE)
29541 {
29542 /* PIC stub. */
29543 if (TARGET_DEEP_BRANCH_PREDICTION)
29544 {
29545 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29546 rtx tmp = gen_rtx_REG (SImode, 2 /* ECX */);
29547 output_set_got (tmp, NULL_RTX); /* "CALL ___<cpu>.get_pc_thunk.cx". */
29548 fprintf (file, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n", label, lazy_ptr_name, label);
29549 }
29550 else
29551 {
29552 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %eax". */
29553 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%ecx\n", label, label);
29554 fprintf (file, "\tmovl %s-LPC$%d(%%ecx),%%ecx\n", lazy_ptr_name, label);
29555 }
29556 fprintf (file, "\tjmp\t*%%ecx\n");
29557 }
29558 else
29559 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
29560
29561 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
29562 it needs no stub-binding-helper. */
29563 if (MACHOPIC_ATT_STUB)
29564 return;
29565
29566 fprintf (file, "%s:\n", binder_name);
29567
29568 if (MACHOPIC_PURE)
29569 {
29570 fprintf (file, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name, binder_name);
29571 fprintf (file, "\tpushl\t%%ecx\n");
29572 }
29573 else
29574 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
29575
29576 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
29577
29578 /* N.B. Keep the correspondence of these
29579 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
29580 old-pic/new-pic/non-pic stubs; altering this will break
29581 compatibility with existing dylibs. */
29582 if (MACHOPIC_PURE)
29583 {
29584 /* PIC stubs. */
29585 if (TARGET_DEEP_BRANCH_PREDICTION)
29586 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29587 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr2_section]);
29588 else
29589 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %ebx". */
29590 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
29591 }
29592 else
29593 /* 16-byte -mdynamic-no-pic stub. */
29594 switch_to_section(darwin_sections[machopic_lazy_symbol_ptr3_section]);
29595
29596 fprintf (file, "%s:\n", lazy_ptr_name);
29597 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29598 fprintf (file, ASM_LONG "%s\n", binder_name);
29599 }
29600 #endif /* TARGET_MACHO */
29601
29602 /* Order the registers for register allocator. */
29603
29604 void
29605 x86_order_regs_for_local_alloc (void)
29606 {
29607 int pos = 0;
29608 int i;
29609
29610 /* First allocate the local general purpose registers. */
29611 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29612 if (GENERAL_REGNO_P (i) && call_used_regs[i])
29613 reg_alloc_order [pos++] = i;
29614
29615 /* Global general purpose registers. */
29616 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29617 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
29618 reg_alloc_order [pos++] = i;
29619
29620 /* x87 registers come first in case we are doing FP math
29621 using them. */
29622 if (!TARGET_SSE_MATH)
29623 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29624 reg_alloc_order [pos++] = i;
29625
29626 /* SSE registers. */
29627 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
29628 reg_alloc_order [pos++] = i;
29629 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
29630 reg_alloc_order [pos++] = i;
29631
29632 /* x87 registers. */
29633 if (TARGET_SSE_MATH)
29634 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29635 reg_alloc_order [pos++] = i;
29636
29637 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
29638 reg_alloc_order [pos++] = i;
29639
29640 /* Initialize the rest of array as we do not allocate some registers
29641 at all. */
29642 while (pos < FIRST_PSEUDO_REGISTER)
29643 reg_alloc_order [pos++] = 0;
29644 }
29645
29646 /* Handle a "callee_pop_aggregate_return" attribute; arguments as
29647 in struct attribute_spec handler. */
29648 static tree
29649 ix86_handle_callee_pop_aggregate_return (tree *node, tree name,
29650 tree args,
29651 int flags ATTRIBUTE_UNUSED,
29652 bool *no_add_attrs)
29653 {
29654 if (TREE_CODE (*node) != FUNCTION_TYPE
29655 && TREE_CODE (*node) != METHOD_TYPE
29656 && TREE_CODE (*node) != FIELD_DECL
29657 && TREE_CODE (*node) != TYPE_DECL)
29658 {
29659 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29660 name);
29661 *no_add_attrs = true;
29662 return NULL_TREE;
29663 }
29664 if (TARGET_64BIT)
29665 {
29666 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
29667 name);
29668 *no_add_attrs = true;
29669 return NULL_TREE;
29670 }
29671 if (is_attribute_p ("callee_pop_aggregate_return", name))
29672 {
29673 tree cst;
29674
29675 cst = TREE_VALUE (args);
29676 if (TREE_CODE (cst) != INTEGER_CST)
29677 {
29678 warning (OPT_Wattributes,
29679 "%qE attribute requires an integer constant argument",
29680 name);
29681 *no_add_attrs = true;
29682 }
29683 else if (compare_tree_int (cst, 0) != 0
29684 && compare_tree_int (cst, 1) != 0)
29685 {
29686 warning (OPT_Wattributes,
29687 "argument to %qE attribute is neither zero, nor one",
29688 name);
29689 *no_add_attrs = true;
29690 }
29691
29692 return NULL_TREE;
29693 }
29694
29695 return NULL_TREE;
29696 }
29697
29698 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
29699 struct attribute_spec.handler. */
29700 static tree
29701 ix86_handle_abi_attribute (tree *node, tree name,
29702 tree args ATTRIBUTE_UNUSED,
29703 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29704 {
29705 if (TREE_CODE (*node) != FUNCTION_TYPE
29706 && TREE_CODE (*node) != METHOD_TYPE
29707 && TREE_CODE (*node) != FIELD_DECL
29708 && TREE_CODE (*node) != TYPE_DECL)
29709 {
29710 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29711 name);
29712 *no_add_attrs = true;
29713 return NULL_TREE;
29714 }
29715 if (!TARGET_64BIT)
29716 {
29717 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
29718 name);
29719 *no_add_attrs = true;
29720 return NULL_TREE;
29721 }
29722
29723 /* Can combine regparm with all attributes but fastcall. */
29724 if (is_attribute_p ("ms_abi", name))
29725 {
29726 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
29727 {
29728 error ("ms_abi and sysv_abi attributes are not compatible");
29729 }
29730
29731 return NULL_TREE;
29732 }
29733 else if (is_attribute_p ("sysv_abi", name))
29734 {
29735 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
29736 {
29737 error ("ms_abi and sysv_abi attributes are not compatible");
29738 }
29739
29740 return NULL_TREE;
29741 }
29742
29743 return NULL_TREE;
29744 }
29745
29746 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
29747 struct attribute_spec.handler. */
29748 static tree
29749 ix86_handle_struct_attribute (tree *node, tree name,
29750 tree args ATTRIBUTE_UNUSED,
29751 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29752 {
29753 tree *type = NULL;
29754 if (DECL_P (*node))
29755 {
29756 if (TREE_CODE (*node) == TYPE_DECL)
29757 type = &TREE_TYPE (*node);
29758 }
29759 else
29760 type = node;
29761
29762 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
29763 || TREE_CODE (*type) == UNION_TYPE)))
29764 {
29765 warning (OPT_Wattributes, "%qE attribute ignored",
29766 name);
29767 *no_add_attrs = true;
29768 }
29769
29770 else if ((is_attribute_p ("ms_struct", name)
29771 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
29772 || ((is_attribute_p ("gcc_struct", name)
29773 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
29774 {
29775 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
29776 name);
29777 *no_add_attrs = true;
29778 }
29779
29780 return NULL_TREE;
29781 }
29782
29783 static tree
29784 ix86_handle_fndecl_attribute (tree *node, tree name,
29785 tree args ATTRIBUTE_UNUSED,
29786 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29787 {
29788 if (TREE_CODE (*node) != FUNCTION_DECL)
29789 {
29790 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29791 name);
29792 *no_add_attrs = true;
29793 }
29794 return NULL_TREE;
29795 }
29796
29797 static bool
29798 ix86_ms_bitfield_layout_p (const_tree record_type)
29799 {
29800 return ((TARGET_MS_BITFIELD_LAYOUT
29801 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
29802 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)));
29803 }
29804
29805 /* Returns an expression indicating where the this parameter is
29806 located on entry to the FUNCTION. */
29807
29808 static rtx
29809 x86_this_parameter (tree function)
29810 {
29811 tree type = TREE_TYPE (function);
29812 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
29813 int nregs;
29814
29815 if (TARGET_64BIT)
29816 {
29817 const int *parm_regs;
29818
29819 if (ix86_function_type_abi (type) == MS_ABI)
29820 parm_regs = x86_64_ms_abi_int_parameter_registers;
29821 else
29822 parm_regs = x86_64_int_parameter_registers;
29823 return gen_rtx_REG (DImode, parm_regs[aggr]);
29824 }
29825
29826 nregs = ix86_function_regparm (type, function);
29827
29828 if (nregs > 0 && !stdarg_p (type))
29829 {
29830 int regno;
29831 unsigned int ccvt = ix86_get_callcvt (type);
29832
29833 if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
29834 regno = aggr ? DX_REG : CX_REG;
29835 else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
29836 {
29837 regno = CX_REG;
29838 if (aggr)
29839 return gen_rtx_MEM (SImode,
29840 plus_constant (stack_pointer_rtx, 4));
29841 }
29842 else
29843 {
29844 regno = AX_REG;
29845 if (aggr)
29846 {
29847 regno = DX_REG;
29848 if (nregs == 1)
29849 return gen_rtx_MEM (SImode,
29850 plus_constant (stack_pointer_rtx, 4));
29851 }
29852 }
29853 return gen_rtx_REG (SImode, regno);
29854 }
29855
29856 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
29857 }
29858
29859 /* Determine whether x86_output_mi_thunk can succeed. */
29860
29861 static bool
29862 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
29863 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
29864 HOST_WIDE_INT vcall_offset, const_tree function)
29865 {
29866 /* 64-bit can handle anything. */
29867 if (TARGET_64BIT)
29868 return true;
29869
29870 /* For 32-bit, everything's fine if we have one free register. */
29871 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
29872 return true;
29873
29874 /* Need a free register for vcall_offset. */
29875 if (vcall_offset)
29876 return false;
29877
29878 /* Need a free register for GOT references. */
29879 if (flag_pic && !targetm.binds_local_p (function))
29880 return false;
29881
29882 /* Otherwise ok. */
29883 return true;
29884 }
29885
29886 /* Output the assembler code for a thunk function. THUNK_DECL is the
29887 declaration for the thunk function itself, FUNCTION is the decl for
29888 the target function. DELTA is an immediate constant offset to be
29889 added to THIS. If VCALL_OFFSET is nonzero, the word at
29890 *(*this + vcall_offset) should be added to THIS. */
29891
29892 static void
29893 x86_output_mi_thunk (FILE *file,
29894 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
29895 HOST_WIDE_INT vcall_offset, tree function)
29896 {
29897 rtx xops[3];
29898 rtx this_param = x86_this_parameter (function);
29899 rtx this_reg, tmp;
29900
29901 /* Make sure unwind info is emitted for the thunk if needed. */
29902 final_start_function (emit_barrier (), file, 1);
29903
29904 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
29905 pull it in now and let DELTA benefit. */
29906 if (REG_P (this_param))
29907 this_reg = this_param;
29908 else if (vcall_offset)
29909 {
29910 /* Put the this parameter into %eax. */
29911 xops[0] = this_param;
29912 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
29913 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29914 }
29915 else
29916 this_reg = NULL_RTX;
29917
29918 /* Adjust the this parameter by a fixed constant. */
29919 if (delta)
29920 {
29921 xops[0] = GEN_INT (delta);
29922 xops[1] = this_reg ? this_reg : this_param;
29923 if (TARGET_64BIT)
29924 {
29925 if (!x86_64_general_operand (xops[0], DImode))
29926 {
29927 tmp = gen_rtx_REG (DImode, R10_REG);
29928 xops[1] = tmp;
29929 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
29930 xops[0] = tmp;
29931 xops[1] = this_param;
29932 }
29933 if (x86_maybe_negate_const_int (&xops[0], DImode))
29934 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
29935 else
29936 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
29937 }
29938 else if (x86_maybe_negate_const_int (&xops[0], SImode))
29939 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
29940 else
29941 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
29942 }
29943
29944 /* Adjust the this parameter by a value stored in the vtable. */
29945 if (vcall_offset)
29946 {
29947 if (TARGET_64BIT)
29948 tmp = gen_rtx_REG (DImode, R10_REG);
29949 else
29950 {
29951 int tmp_regno = CX_REG;
29952 unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (function));
29953 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) != 0)
29954 tmp_regno = AX_REG;
29955 tmp = gen_rtx_REG (SImode, tmp_regno);
29956 }
29957
29958 xops[0] = gen_rtx_MEM (Pmode, this_reg);
29959 xops[1] = tmp;
29960 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29961
29962 /* Adjust the this parameter. */
29963 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
29964 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
29965 {
29966 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
29967 xops[0] = GEN_INT (vcall_offset);
29968 xops[1] = tmp2;
29969 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
29970 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
29971 }
29972 xops[1] = this_reg;
29973 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
29974 }
29975
29976 /* If necessary, drop THIS back to its stack slot. */
29977 if (this_reg && this_reg != this_param)
29978 {
29979 xops[0] = this_reg;
29980 xops[1] = this_param;
29981 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29982 }
29983
29984 xops[0] = XEXP (DECL_RTL (function), 0);
29985 if (TARGET_64BIT)
29986 {
29987 if (!flag_pic || targetm.binds_local_p (function)
29988 || DEFAULT_ABI == MS_ABI)
29989 output_asm_insn ("jmp\t%P0", xops);
29990 /* All thunks should be in the same object as their target,
29991 and thus binds_local_p should be true. */
29992 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
29993 gcc_unreachable ();
29994 else
29995 {
29996 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
29997 tmp = gen_rtx_CONST (Pmode, tmp);
29998 tmp = gen_rtx_MEM (QImode, tmp);
29999 xops[0] = tmp;
30000 output_asm_insn ("jmp\t%A0", xops);
30001 }
30002 }
30003 else
30004 {
30005 if (!flag_pic || targetm.binds_local_p (function))
30006 output_asm_insn ("jmp\t%P0", xops);
30007 else
30008 #if TARGET_MACHO
30009 if (TARGET_MACHO)
30010 {
30011 rtx sym_ref = XEXP (DECL_RTL (function), 0);
30012 if (TARGET_MACHO_BRANCH_ISLANDS)
30013 sym_ref = (gen_rtx_SYMBOL_REF
30014 (Pmode,
30015 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
30016 tmp = gen_rtx_MEM (QImode, sym_ref);
30017 xops[0] = tmp;
30018 output_asm_insn ("jmp\t%0", xops);
30019 }
30020 else
30021 #endif /* TARGET_MACHO */
30022 {
30023 tmp = gen_rtx_REG (SImode, CX_REG);
30024 output_set_got (tmp, NULL_RTX);
30025
30026 xops[1] = tmp;
30027 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
30028 output_asm_insn ("jmp\t{*}%1", xops);
30029 }
30030 }
30031 final_end_function ();
30032 }
30033
30034 static void
30035 x86_file_start (void)
30036 {
30037 default_file_start ();
30038 #if TARGET_MACHO
30039 darwin_file_start ();
30040 #endif
30041 if (X86_FILE_START_VERSION_DIRECTIVE)
30042 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
30043 if (X86_FILE_START_FLTUSED)
30044 fputs ("\t.global\t__fltused\n", asm_out_file);
30045 if (ix86_asm_dialect == ASM_INTEL)
30046 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
30047 }
30048
30049 int
30050 x86_field_alignment (tree field, int computed)
30051 {
30052 enum machine_mode mode;
30053 tree type = TREE_TYPE (field);
30054
30055 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
30056 return computed;
30057 mode = TYPE_MODE (strip_array_types (type));
30058 if (mode == DFmode || mode == DCmode
30059 || GET_MODE_CLASS (mode) == MODE_INT
30060 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
30061 return MIN (32, computed);
30062 return computed;
30063 }
30064
30065 /* Output assembler code to FILE to increment profiler label # LABELNO
30066 for profiling a function entry. */
30067 void
30068 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
30069 {
30070 const char *mcount_name = (flag_fentry ? MCOUNT_NAME_BEFORE_PROLOGUE
30071 : MCOUNT_NAME);
30072
30073 if (TARGET_64BIT)
30074 {
30075 #ifndef NO_PROFILE_COUNTERS
30076 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
30077 #endif
30078
30079 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
30080 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name);
30081 else
30082 fprintf (file, "\tcall\t%s\n", mcount_name);
30083 }
30084 else if (flag_pic)
30085 {
30086 #ifndef NO_PROFILE_COUNTERS
30087 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
30088 LPREFIX, labelno);
30089 #endif
30090 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name);
30091 }
30092 else
30093 {
30094 #ifndef NO_PROFILE_COUNTERS
30095 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
30096 LPREFIX, labelno);
30097 #endif
30098 fprintf (file, "\tcall\t%s\n", mcount_name);
30099 }
30100 }
30101
30102 /* We don't have exact information about the insn sizes, but we may assume
30103 quite safely that we are informed about all 1 byte insns and memory
30104 address sizes. This is enough to eliminate unnecessary padding in
30105 99% of cases. */
30106
30107 static int
30108 min_insn_size (rtx insn)
30109 {
30110 int l = 0, len;
30111
30112 if (!INSN_P (insn) || !active_insn_p (insn))
30113 return 0;
30114
30115 /* Discard alignments we've emit and jump instructions. */
30116 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
30117 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
30118 return 0;
30119 if (JUMP_TABLE_DATA_P (insn))
30120 return 0;
30121
30122 /* Important case - calls are always 5 bytes.
30123 It is common to have many calls in the row. */
30124 if (CALL_P (insn)
30125 && symbolic_reference_mentioned_p (PATTERN (insn))
30126 && !SIBLING_CALL_P (insn))
30127 return 5;
30128 len = get_attr_length (insn);
30129 if (len <= 1)
30130 return 1;
30131
30132 /* For normal instructions we rely on get_attr_length being exact,
30133 with a few exceptions. */
30134 if (!JUMP_P (insn))
30135 {
30136 enum attr_type type = get_attr_type (insn);
30137
30138 switch (type)
30139 {
30140 case TYPE_MULTI:
30141 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
30142 || asm_noperands (PATTERN (insn)) >= 0)
30143 return 0;
30144 break;
30145 case TYPE_OTHER:
30146 case TYPE_FCMP:
30147 break;
30148 default:
30149 /* Otherwise trust get_attr_length. */
30150 return len;
30151 }
30152
30153 l = get_attr_length_address (insn);
30154 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
30155 l = 4;
30156 }
30157 if (l)
30158 return 1+l;
30159 else
30160 return 2;
30161 }
30162
30163 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30164
30165 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
30166 window. */
30167
30168 static void
30169 ix86_avoid_jump_mispredicts (void)
30170 {
30171 rtx insn, start = get_insns ();
30172 int nbytes = 0, njumps = 0;
30173 int isjump = 0;
30174
30175 /* Look for all minimal intervals of instructions containing 4 jumps.
30176 The intervals are bounded by START and INSN. NBYTES is the total
30177 size of instructions in the interval including INSN and not including
30178 START. When the NBYTES is smaller than 16 bytes, it is possible
30179 that the end of START and INSN ends up in the same 16byte page.
30180
30181 The smallest offset in the page INSN can start is the case where START
30182 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
30183 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
30184 */
30185 for (insn = start; insn; insn = NEXT_INSN (insn))
30186 {
30187 int min_size;
30188
30189 if (LABEL_P (insn))
30190 {
30191 int align = label_to_alignment (insn);
30192 int max_skip = label_to_max_skip (insn);
30193
30194 if (max_skip > 15)
30195 max_skip = 15;
30196 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
30197 already in the current 16 byte page, because otherwise
30198 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
30199 bytes to reach 16 byte boundary. */
30200 if (align <= 0
30201 || (align <= 3 && max_skip != (1 << align) - 1))
30202 max_skip = 0;
30203 if (dump_file)
30204 fprintf (dump_file, "Label %i with max_skip %i\n",
30205 INSN_UID (insn), max_skip);
30206 if (max_skip)
30207 {
30208 while (nbytes + max_skip >= 16)
30209 {
30210 start = NEXT_INSN (start);
30211 if ((JUMP_P (start)
30212 && GET_CODE (PATTERN (start)) != ADDR_VEC
30213 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
30214 || CALL_P (start))
30215 njumps--, isjump = 1;
30216 else
30217 isjump = 0;
30218 nbytes -= min_insn_size (start);
30219 }
30220 }
30221 continue;
30222 }
30223
30224 min_size = min_insn_size (insn);
30225 nbytes += min_size;
30226 if (dump_file)
30227 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
30228 INSN_UID (insn), min_size);
30229 if ((JUMP_P (insn)
30230 && GET_CODE (PATTERN (insn)) != ADDR_VEC
30231 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
30232 || CALL_P (insn))
30233 njumps++;
30234 else
30235 continue;
30236
30237 while (njumps > 3)
30238 {
30239 start = NEXT_INSN (start);
30240 if ((JUMP_P (start)
30241 && GET_CODE (PATTERN (start)) != ADDR_VEC
30242 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
30243 || CALL_P (start))
30244 njumps--, isjump = 1;
30245 else
30246 isjump = 0;
30247 nbytes -= min_insn_size (start);
30248 }
30249 gcc_assert (njumps >= 0);
30250 if (dump_file)
30251 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
30252 INSN_UID (start), INSN_UID (insn), nbytes);
30253
30254 if (njumps == 3 && isjump && nbytes < 16)
30255 {
30256 int padsize = 15 - nbytes + min_insn_size (insn);
30257
30258 if (dump_file)
30259 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
30260 INSN_UID (insn), padsize);
30261 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
30262 }
30263 }
30264 }
30265 #endif
30266
30267 /* AMD Athlon works faster
30268 when RET is not destination of conditional jump or directly preceded
30269 by other jump instruction. We avoid the penalty by inserting NOP just
30270 before the RET instructions in such cases. */
30271 static void
30272 ix86_pad_returns (void)
30273 {
30274 edge e;
30275 edge_iterator ei;
30276
30277 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30278 {
30279 basic_block bb = e->src;
30280 rtx ret = BB_END (bb);
30281 rtx prev;
30282 bool replace = false;
30283
30284 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
30285 || optimize_bb_for_size_p (bb))
30286 continue;
30287 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
30288 if (active_insn_p (prev) || LABEL_P (prev))
30289 break;
30290 if (prev && LABEL_P (prev))
30291 {
30292 edge e;
30293 edge_iterator ei;
30294
30295 FOR_EACH_EDGE (e, ei, bb->preds)
30296 if (EDGE_FREQUENCY (e) && e->src->index >= 0
30297 && !(e->flags & EDGE_FALLTHRU))
30298 replace = true;
30299 }
30300 if (!replace)
30301 {
30302 prev = prev_active_insn (ret);
30303 if (prev
30304 && ((JUMP_P (prev) && any_condjump_p (prev))
30305 || CALL_P (prev)))
30306 replace = true;
30307 /* Empty functions get branch mispredict even when
30308 the jump destination is not visible to us. */
30309 if (!prev && !optimize_function_for_size_p (cfun))
30310 replace = true;
30311 }
30312 if (replace)
30313 {
30314 emit_jump_insn_before (gen_return_internal_long (), ret);
30315 delete_insn (ret);
30316 }
30317 }
30318 }
30319
30320 /* Count the minimum number of instructions in BB. Return 4 if the
30321 number of instructions >= 4. */
30322
30323 static int
30324 ix86_count_insn_bb (basic_block bb)
30325 {
30326 rtx insn;
30327 int insn_count = 0;
30328
30329 /* Count number of instructions in this block. Return 4 if the number
30330 of instructions >= 4. */
30331 FOR_BB_INSNS (bb, insn)
30332 {
30333 /* Only happen in exit blocks. */
30334 if (JUMP_P (insn)
30335 && GET_CODE (PATTERN (insn)) == RETURN)
30336 break;
30337
30338 if (NONDEBUG_INSN_P (insn)
30339 && GET_CODE (PATTERN (insn)) != USE
30340 && GET_CODE (PATTERN (insn)) != CLOBBER)
30341 {
30342 insn_count++;
30343 if (insn_count >= 4)
30344 return insn_count;
30345 }
30346 }
30347
30348 return insn_count;
30349 }
30350
30351
30352 /* Count the minimum number of instructions in code path in BB.
30353 Return 4 if the number of instructions >= 4. */
30354
30355 static int
30356 ix86_count_insn (basic_block bb)
30357 {
30358 edge e;
30359 edge_iterator ei;
30360 int min_prev_count;
30361
30362 /* Only bother counting instructions along paths with no
30363 more than 2 basic blocks between entry and exit. Given
30364 that BB has an edge to exit, determine if a predecessor
30365 of BB has an edge from entry. If so, compute the number
30366 of instructions in the predecessor block. If there
30367 happen to be multiple such blocks, compute the minimum. */
30368 min_prev_count = 4;
30369 FOR_EACH_EDGE (e, ei, bb->preds)
30370 {
30371 edge prev_e;
30372 edge_iterator prev_ei;
30373
30374 if (e->src == ENTRY_BLOCK_PTR)
30375 {
30376 min_prev_count = 0;
30377 break;
30378 }
30379 FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
30380 {
30381 if (prev_e->src == ENTRY_BLOCK_PTR)
30382 {
30383 int count = ix86_count_insn_bb (e->src);
30384 if (count < min_prev_count)
30385 min_prev_count = count;
30386 break;
30387 }
30388 }
30389 }
30390
30391 if (min_prev_count < 4)
30392 min_prev_count += ix86_count_insn_bb (bb);
30393
30394 return min_prev_count;
30395 }
30396
30397 /* Pad short funtion to 4 instructions. */
30398
30399 static void
30400 ix86_pad_short_function (void)
30401 {
30402 edge e;
30403 edge_iterator ei;
30404
30405 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30406 {
30407 rtx ret = BB_END (e->src);
30408 if (JUMP_P (ret) && GET_CODE (PATTERN (ret)) == RETURN)
30409 {
30410 int insn_count = ix86_count_insn (e->src);
30411
30412 /* Pad short function. */
30413 if (insn_count < 4)
30414 {
30415 rtx insn = ret;
30416
30417 /* Find epilogue. */
30418 while (insn
30419 && (!NOTE_P (insn)
30420 || NOTE_KIND (insn) != NOTE_INSN_EPILOGUE_BEG))
30421 insn = PREV_INSN (insn);
30422
30423 if (!insn)
30424 insn = ret;
30425
30426 /* Two NOPs count as one instruction. */
30427 insn_count = 2 * (4 - insn_count);
30428 emit_insn_before (gen_nops (GEN_INT (insn_count)), insn);
30429 }
30430 }
30431 }
30432 }
30433
30434 /* Implement machine specific optimizations. We implement padding of returns
30435 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
30436 static void
30437 ix86_reorg (void)
30438 {
30439 /* We are freeing block_for_insn in the toplev to keep compatibility
30440 with old MDEP_REORGS that are not CFG based. Recompute it now. */
30441 compute_bb_for_insn ();
30442
30443 if (optimize && optimize_function_for_speed_p (cfun))
30444 {
30445 if (TARGET_PAD_SHORT_FUNCTION)
30446 ix86_pad_short_function ();
30447 else if (TARGET_PAD_RETURNS)
30448 ix86_pad_returns ();
30449 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30450 if (TARGET_FOUR_JUMP_LIMIT)
30451 ix86_avoid_jump_mispredicts ();
30452 #endif
30453 }
30454
30455 /* Run the vzeroupper optimization if needed. */
30456 if (TARGET_VZEROUPPER)
30457 move_or_delete_vzeroupper ();
30458 }
30459
30460 /* Return nonzero when QImode register that must be represented via REX prefix
30461 is used. */
30462 bool
30463 x86_extended_QIreg_mentioned_p (rtx insn)
30464 {
30465 int i;
30466 extract_insn_cached (insn);
30467 for (i = 0; i < recog_data.n_operands; i++)
30468 if (REG_P (recog_data.operand[i])
30469 && REGNO (recog_data.operand[i]) > BX_REG)
30470 return true;
30471 return false;
30472 }
30473
30474 /* Return nonzero when P points to register encoded via REX prefix.
30475 Called via for_each_rtx. */
30476 static int
30477 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
30478 {
30479 unsigned int regno;
30480 if (!REG_P (*p))
30481 return 0;
30482 regno = REGNO (*p);
30483 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
30484 }
30485
30486 /* Return true when INSN mentions register that must be encoded using REX
30487 prefix. */
30488 bool
30489 x86_extended_reg_mentioned_p (rtx insn)
30490 {
30491 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
30492 extended_reg_mentioned_1, NULL);
30493 }
30494
30495 /* If profitable, negate (without causing overflow) integer constant
30496 of mode MODE at location LOC. Return true in this case. */
30497 bool
30498 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
30499 {
30500 HOST_WIDE_INT val;
30501
30502 if (!CONST_INT_P (*loc))
30503 return false;
30504
30505 switch (mode)
30506 {
30507 case DImode:
30508 /* DImode x86_64 constants must fit in 32 bits. */
30509 gcc_assert (x86_64_immediate_operand (*loc, mode));
30510
30511 mode = SImode;
30512 break;
30513
30514 case SImode:
30515 case HImode:
30516 case QImode:
30517 break;
30518
30519 default:
30520 gcc_unreachable ();
30521 }
30522
30523 /* Avoid overflows. */
30524 if (mode_signbit_p (mode, *loc))
30525 return false;
30526
30527 val = INTVAL (*loc);
30528
30529 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
30530 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
30531 if ((val < 0 && val != -128)
30532 || val == 128)
30533 {
30534 *loc = GEN_INT (-val);
30535 return true;
30536 }
30537
30538 return false;
30539 }
30540
30541 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
30542 optabs would emit if we didn't have TFmode patterns. */
30543
30544 void
30545 x86_emit_floatuns (rtx operands[2])
30546 {
30547 rtx neglab, donelab, i0, i1, f0, in, out;
30548 enum machine_mode mode, inmode;
30549
30550 inmode = GET_MODE (operands[1]);
30551 gcc_assert (inmode == SImode || inmode == DImode);
30552
30553 out = operands[0];
30554 in = force_reg (inmode, operands[1]);
30555 mode = GET_MODE (out);
30556 neglab = gen_label_rtx ();
30557 donelab = gen_label_rtx ();
30558 f0 = gen_reg_rtx (mode);
30559
30560 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
30561
30562 expand_float (out, in, 0);
30563
30564 emit_jump_insn (gen_jump (donelab));
30565 emit_barrier ();
30566
30567 emit_label (neglab);
30568
30569 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
30570 1, OPTAB_DIRECT);
30571 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
30572 1, OPTAB_DIRECT);
30573 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
30574
30575 expand_float (f0, i0, 0);
30576
30577 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
30578
30579 emit_label (donelab);
30580 }
30581 \f
30582 /* AVX does not support 32-byte integer vector operations,
30583 thus the longest vector we are faced with is V16QImode. */
30584 #define MAX_VECT_LEN 16
30585
30586 struct expand_vec_perm_d
30587 {
30588 rtx target, op0, op1;
30589 unsigned char perm[MAX_VECT_LEN];
30590 enum machine_mode vmode;
30591 unsigned char nelt;
30592 bool testing_p;
30593 };
30594
30595 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
30596 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
30597
30598 /* Get a vector mode of the same size as the original but with elements
30599 twice as wide. This is only guaranteed to apply to integral vectors. */
30600
30601 static inline enum machine_mode
30602 get_mode_wider_vector (enum machine_mode o)
30603 {
30604 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
30605 enum machine_mode n = GET_MODE_WIDER_MODE (o);
30606 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
30607 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
30608 return n;
30609 }
30610
30611 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30612 with all elements equal to VAR. Return true if successful. */
30613
30614 static bool
30615 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
30616 rtx target, rtx val)
30617 {
30618 bool ok;
30619
30620 switch (mode)
30621 {
30622 case V2SImode:
30623 case V2SFmode:
30624 if (!mmx_ok)
30625 return false;
30626 /* FALLTHRU */
30627
30628 case V4DFmode:
30629 case V4DImode:
30630 case V8SFmode:
30631 case V8SImode:
30632 case V2DFmode:
30633 case V2DImode:
30634 case V4SFmode:
30635 case V4SImode:
30636 {
30637 rtx insn, dup;
30638
30639 /* First attempt to recognize VAL as-is. */
30640 dup = gen_rtx_VEC_DUPLICATE (mode, val);
30641 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
30642 if (recog_memoized (insn) < 0)
30643 {
30644 rtx seq;
30645 /* If that fails, force VAL into a register. */
30646
30647 start_sequence ();
30648 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
30649 seq = get_insns ();
30650 end_sequence ();
30651 if (seq)
30652 emit_insn_before (seq, insn);
30653
30654 ok = recog_memoized (insn) >= 0;
30655 gcc_assert (ok);
30656 }
30657 }
30658 return true;
30659
30660 case V4HImode:
30661 if (!mmx_ok)
30662 return false;
30663 if (TARGET_SSE || TARGET_3DNOW_A)
30664 {
30665 rtx x;
30666
30667 val = gen_lowpart (SImode, val);
30668 x = gen_rtx_TRUNCATE (HImode, val);
30669 x = gen_rtx_VEC_DUPLICATE (mode, x);
30670 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30671 return true;
30672 }
30673 goto widen;
30674
30675 case V8QImode:
30676 if (!mmx_ok)
30677 return false;
30678 goto widen;
30679
30680 case V8HImode:
30681 if (TARGET_SSE2)
30682 {
30683 struct expand_vec_perm_d dperm;
30684 rtx tmp1, tmp2;
30685
30686 permute:
30687 memset (&dperm, 0, sizeof (dperm));
30688 dperm.target = target;
30689 dperm.vmode = mode;
30690 dperm.nelt = GET_MODE_NUNITS (mode);
30691 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
30692
30693 /* Extend to SImode using a paradoxical SUBREG. */
30694 tmp1 = gen_reg_rtx (SImode);
30695 emit_move_insn (tmp1, gen_lowpart (SImode, val));
30696
30697 /* Insert the SImode value as low element of a V4SImode vector. */
30698 tmp2 = gen_lowpart (V4SImode, dperm.op0);
30699 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
30700
30701 ok = (expand_vec_perm_1 (&dperm)
30702 || expand_vec_perm_broadcast_1 (&dperm));
30703 gcc_assert (ok);
30704 return ok;
30705 }
30706 goto widen;
30707
30708 case V16QImode:
30709 if (TARGET_SSE2)
30710 goto permute;
30711 goto widen;
30712
30713 widen:
30714 /* Replicate the value once into the next wider mode and recurse. */
30715 {
30716 enum machine_mode smode, wsmode, wvmode;
30717 rtx x;
30718
30719 smode = GET_MODE_INNER (mode);
30720 wvmode = get_mode_wider_vector (mode);
30721 wsmode = GET_MODE_INNER (wvmode);
30722
30723 val = convert_modes (wsmode, smode, val, true);
30724 x = expand_simple_binop (wsmode, ASHIFT, val,
30725 GEN_INT (GET_MODE_BITSIZE (smode)),
30726 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30727 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
30728
30729 x = gen_lowpart (wvmode, target);
30730 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
30731 gcc_assert (ok);
30732 return ok;
30733 }
30734
30735 case V16HImode:
30736 case V32QImode:
30737 {
30738 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
30739 rtx x = gen_reg_rtx (hvmode);
30740
30741 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
30742 gcc_assert (ok);
30743
30744 x = gen_rtx_VEC_CONCAT (mode, x, x);
30745 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30746 }
30747 return true;
30748
30749 default:
30750 return false;
30751 }
30752 }
30753
30754 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30755 whose ONE_VAR element is VAR, and other elements are zero. Return true
30756 if successful. */
30757
30758 static bool
30759 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
30760 rtx target, rtx var, int one_var)
30761 {
30762 enum machine_mode vsimode;
30763 rtx new_target;
30764 rtx x, tmp;
30765 bool use_vector_set = false;
30766
30767 switch (mode)
30768 {
30769 case V2DImode:
30770 /* For SSE4.1, we normally use vector set. But if the second
30771 element is zero and inter-unit moves are OK, we use movq
30772 instead. */
30773 use_vector_set = (TARGET_64BIT
30774 && TARGET_SSE4_1
30775 && !(TARGET_INTER_UNIT_MOVES
30776 && one_var == 0));
30777 break;
30778 case V16QImode:
30779 case V4SImode:
30780 case V4SFmode:
30781 use_vector_set = TARGET_SSE4_1;
30782 break;
30783 case V8HImode:
30784 use_vector_set = TARGET_SSE2;
30785 break;
30786 case V4HImode:
30787 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
30788 break;
30789 case V32QImode:
30790 case V16HImode:
30791 case V8SImode:
30792 case V8SFmode:
30793 case V4DFmode:
30794 use_vector_set = TARGET_AVX;
30795 break;
30796 case V4DImode:
30797 /* Use ix86_expand_vector_set in 64bit mode only. */
30798 use_vector_set = TARGET_AVX && TARGET_64BIT;
30799 break;
30800 default:
30801 break;
30802 }
30803
30804 if (use_vector_set)
30805 {
30806 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
30807 var = force_reg (GET_MODE_INNER (mode), var);
30808 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30809 return true;
30810 }
30811
30812 switch (mode)
30813 {
30814 case V2SFmode:
30815 case V2SImode:
30816 if (!mmx_ok)
30817 return false;
30818 /* FALLTHRU */
30819
30820 case V2DFmode:
30821 case V2DImode:
30822 if (one_var != 0)
30823 return false;
30824 var = force_reg (GET_MODE_INNER (mode), var);
30825 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
30826 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30827 return true;
30828
30829 case V4SFmode:
30830 case V4SImode:
30831 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
30832 new_target = gen_reg_rtx (mode);
30833 else
30834 new_target = target;
30835 var = force_reg (GET_MODE_INNER (mode), var);
30836 x = gen_rtx_VEC_DUPLICATE (mode, var);
30837 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
30838 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
30839 if (one_var != 0)
30840 {
30841 /* We need to shuffle the value to the correct position, so
30842 create a new pseudo to store the intermediate result. */
30843
30844 /* With SSE2, we can use the integer shuffle insns. */
30845 if (mode != V4SFmode && TARGET_SSE2)
30846 {
30847 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
30848 const1_rtx,
30849 GEN_INT (one_var == 1 ? 0 : 1),
30850 GEN_INT (one_var == 2 ? 0 : 1),
30851 GEN_INT (one_var == 3 ? 0 : 1)));
30852 if (target != new_target)
30853 emit_move_insn (target, new_target);
30854 return true;
30855 }
30856
30857 /* Otherwise convert the intermediate result to V4SFmode and
30858 use the SSE1 shuffle instructions. */
30859 if (mode != V4SFmode)
30860 {
30861 tmp = gen_reg_rtx (V4SFmode);
30862 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
30863 }
30864 else
30865 tmp = new_target;
30866
30867 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
30868 const1_rtx,
30869 GEN_INT (one_var == 1 ? 0 : 1),
30870 GEN_INT (one_var == 2 ? 0+4 : 1+4),
30871 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
30872
30873 if (mode != V4SFmode)
30874 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
30875 else if (tmp != target)
30876 emit_move_insn (target, tmp);
30877 }
30878 else if (target != new_target)
30879 emit_move_insn (target, new_target);
30880 return true;
30881
30882 case V8HImode:
30883 case V16QImode:
30884 vsimode = V4SImode;
30885 goto widen;
30886 case V4HImode:
30887 case V8QImode:
30888 if (!mmx_ok)
30889 return false;
30890 vsimode = V2SImode;
30891 goto widen;
30892 widen:
30893 if (one_var != 0)
30894 return false;
30895
30896 /* Zero extend the variable element to SImode and recurse. */
30897 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
30898
30899 x = gen_reg_rtx (vsimode);
30900 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
30901 var, one_var))
30902 gcc_unreachable ();
30903
30904 emit_move_insn (target, gen_lowpart (mode, x));
30905 return true;
30906
30907 default:
30908 return false;
30909 }
30910 }
30911
30912 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30913 consisting of the values in VALS. It is known that all elements
30914 except ONE_VAR are constants. Return true if successful. */
30915
30916 static bool
30917 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
30918 rtx target, rtx vals, int one_var)
30919 {
30920 rtx var = XVECEXP (vals, 0, one_var);
30921 enum machine_mode wmode;
30922 rtx const_vec, x;
30923
30924 const_vec = copy_rtx (vals);
30925 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
30926 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
30927
30928 switch (mode)
30929 {
30930 case V2DFmode:
30931 case V2DImode:
30932 case V2SFmode:
30933 case V2SImode:
30934 /* For the two element vectors, it's just as easy to use
30935 the general case. */
30936 return false;
30937
30938 case V4DImode:
30939 /* Use ix86_expand_vector_set in 64bit mode only. */
30940 if (!TARGET_64BIT)
30941 return false;
30942 case V4DFmode:
30943 case V8SFmode:
30944 case V8SImode:
30945 case V16HImode:
30946 case V32QImode:
30947 case V4SFmode:
30948 case V4SImode:
30949 case V8HImode:
30950 case V4HImode:
30951 break;
30952
30953 case V16QImode:
30954 if (TARGET_SSE4_1)
30955 break;
30956 wmode = V8HImode;
30957 goto widen;
30958 case V8QImode:
30959 wmode = V4HImode;
30960 goto widen;
30961 widen:
30962 /* There's no way to set one QImode entry easily. Combine
30963 the variable value with its adjacent constant value, and
30964 promote to an HImode set. */
30965 x = XVECEXP (vals, 0, one_var ^ 1);
30966 if (one_var & 1)
30967 {
30968 var = convert_modes (HImode, QImode, var, true);
30969 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
30970 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30971 x = GEN_INT (INTVAL (x) & 0xff);
30972 }
30973 else
30974 {
30975 var = convert_modes (HImode, QImode, var, true);
30976 x = gen_int_mode (INTVAL (x) << 8, HImode);
30977 }
30978 if (x != const0_rtx)
30979 var = expand_simple_binop (HImode, IOR, var, x, var,
30980 1, OPTAB_LIB_WIDEN);
30981
30982 x = gen_reg_rtx (wmode);
30983 emit_move_insn (x, gen_lowpart (wmode, const_vec));
30984 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
30985
30986 emit_move_insn (target, gen_lowpart (mode, x));
30987 return true;
30988
30989 default:
30990 return false;
30991 }
30992
30993 emit_move_insn (target, const_vec);
30994 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30995 return true;
30996 }
30997
30998 /* A subroutine of ix86_expand_vector_init_general. Use vector
30999 concatenate to handle the most general case: all values variable,
31000 and none identical. */
31001
31002 static void
31003 ix86_expand_vector_init_concat (enum machine_mode mode,
31004 rtx target, rtx *ops, int n)
31005 {
31006 enum machine_mode cmode, hmode = VOIDmode;
31007 rtx first[8], second[4];
31008 rtvec v;
31009 int i, j;
31010
31011 switch (n)
31012 {
31013 case 2:
31014 switch (mode)
31015 {
31016 case V8SImode:
31017 cmode = V4SImode;
31018 break;
31019 case V8SFmode:
31020 cmode = V4SFmode;
31021 break;
31022 case V4DImode:
31023 cmode = V2DImode;
31024 break;
31025 case V4DFmode:
31026 cmode = V2DFmode;
31027 break;
31028 case V4SImode:
31029 cmode = V2SImode;
31030 break;
31031 case V4SFmode:
31032 cmode = V2SFmode;
31033 break;
31034 case V2DImode:
31035 cmode = DImode;
31036 break;
31037 case V2SImode:
31038 cmode = SImode;
31039 break;
31040 case V2DFmode:
31041 cmode = DFmode;
31042 break;
31043 case V2SFmode:
31044 cmode = SFmode;
31045 break;
31046 default:
31047 gcc_unreachable ();
31048 }
31049
31050 if (!register_operand (ops[1], cmode))
31051 ops[1] = force_reg (cmode, ops[1]);
31052 if (!register_operand (ops[0], cmode))
31053 ops[0] = force_reg (cmode, ops[0]);
31054 emit_insn (gen_rtx_SET (VOIDmode, target,
31055 gen_rtx_VEC_CONCAT (mode, ops[0],
31056 ops[1])));
31057 break;
31058
31059 case 4:
31060 switch (mode)
31061 {
31062 case V4DImode:
31063 cmode = V2DImode;
31064 break;
31065 case V4DFmode:
31066 cmode = V2DFmode;
31067 break;
31068 case V4SImode:
31069 cmode = V2SImode;
31070 break;
31071 case V4SFmode:
31072 cmode = V2SFmode;
31073 break;
31074 default:
31075 gcc_unreachable ();
31076 }
31077 goto half;
31078
31079 case 8:
31080 switch (mode)
31081 {
31082 case V8SImode:
31083 cmode = V2SImode;
31084 hmode = V4SImode;
31085 break;
31086 case V8SFmode:
31087 cmode = V2SFmode;
31088 hmode = V4SFmode;
31089 break;
31090 default:
31091 gcc_unreachable ();
31092 }
31093 goto half;
31094
31095 half:
31096 /* FIXME: We process inputs backward to help RA. PR 36222. */
31097 i = n - 1;
31098 j = (n >> 1) - 1;
31099 for (; i > 0; i -= 2, j--)
31100 {
31101 first[j] = gen_reg_rtx (cmode);
31102 v = gen_rtvec (2, ops[i - 1], ops[i]);
31103 ix86_expand_vector_init (false, first[j],
31104 gen_rtx_PARALLEL (cmode, v));
31105 }
31106
31107 n >>= 1;
31108 if (n > 2)
31109 {
31110 gcc_assert (hmode != VOIDmode);
31111 for (i = j = 0; i < n; i += 2, j++)
31112 {
31113 second[j] = gen_reg_rtx (hmode);
31114 ix86_expand_vector_init_concat (hmode, second [j],
31115 &first [i], 2);
31116 }
31117 n >>= 1;
31118 ix86_expand_vector_init_concat (mode, target, second, n);
31119 }
31120 else
31121 ix86_expand_vector_init_concat (mode, target, first, n);
31122 break;
31123
31124 default:
31125 gcc_unreachable ();
31126 }
31127 }
31128
31129 /* A subroutine of ix86_expand_vector_init_general. Use vector
31130 interleave to handle the most general case: all values variable,
31131 and none identical. */
31132
31133 static void
31134 ix86_expand_vector_init_interleave (enum machine_mode mode,
31135 rtx target, rtx *ops, int n)
31136 {
31137 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
31138 int i, j;
31139 rtx op0, op1;
31140 rtx (*gen_load_even) (rtx, rtx, rtx);
31141 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
31142 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
31143
31144 switch (mode)
31145 {
31146 case V8HImode:
31147 gen_load_even = gen_vec_setv8hi;
31148 gen_interleave_first_low = gen_vec_interleave_lowv4si;
31149 gen_interleave_second_low = gen_vec_interleave_lowv2di;
31150 inner_mode = HImode;
31151 first_imode = V4SImode;
31152 second_imode = V2DImode;
31153 third_imode = VOIDmode;
31154 break;
31155 case V16QImode:
31156 gen_load_even = gen_vec_setv16qi;
31157 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
31158 gen_interleave_second_low = gen_vec_interleave_lowv4si;
31159 inner_mode = QImode;
31160 first_imode = V8HImode;
31161 second_imode = V4SImode;
31162 third_imode = V2DImode;
31163 break;
31164 default:
31165 gcc_unreachable ();
31166 }
31167
31168 for (i = 0; i < n; i++)
31169 {
31170 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
31171 op0 = gen_reg_rtx (SImode);
31172 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
31173
31174 /* Insert the SImode value as low element of V4SImode vector. */
31175 op1 = gen_reg_rtx (V4SImode);
31176 op0 = gen_rtx_VEC_MERGE (V4SImode,
31177 gen_rtx_VEC_DUPLICATE (V4SImode,
31178 op0),
31179 CONST0_RTX (V4SImode),
31180 const1_rtx);
31181 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
31182
31183 /* Cast the V4SImode vector back to a vector in orignal mode. */
31184 op0 = gen_reg_rtx (mode);
31185 emit_move_insn (op0, gen_lowpart (mode, op1));
31186
31187 /* Load even elements into the second positon. */
31188 emit_insn (gen_load_even (op0,
31189 force_reg (inner_mode,
31190 ops [i + i + 1]),
31191 const1_rtx));
31192
31193 /* Cast vector to FIRST_IMODE vector. */
31194 ops[i] = gen_reg_rtx (first_imode);
31195 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
31196 }
31197
31198 /* Interleave low FIRST_IMODE vectors. */
31199 for (i = j = 0; i < n; i += 2, j++)
31200 {
31201 op0 = gen_reg_rtx (first_imode);
31202 emit_insn (gen_interleave_first_low (op0, ops[i], ops[i + 1]));
31203
31204 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
31205 ops[j] = gen_reg_rtx (second_imode);
31206 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
31207 }
31208
31209 /* Interleave low SECOND_IMODE vectors. */
31210 switch (second_imode)
31211 {
31212 case V4SImode:
31213 for (i = j = 0; i < n / 2; i += 2, j++)
31214 {
31215 op0 = gen_reg_rtx (second_imode);
31216 emit_insn (gen_interleave_second_low (op0, ops[i],
31217 ops[i + 1]));
31218
31219 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
31220 vector. */
31221 ops[j] = gen_reg_rtx (third_imode);
31222 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
31223 }
31224 second_imode = V2DImode;
31225 gen_interleave_second_low = gen_vec_interleave_lowv2di;
31226 /* FALLTHRU */
31227
31228 case V2DImode:
31229 op0 = gen_reg_rtx (second_imode);
31230 emit_insn (gen_interleave_second_low (op0, ops[0],
31231 ops[1]));
31232
31233 /* Cast the SECOND_IMODE vector back to a vector on original
31234 mode. */
31235 emit_insn (gen_rtx_SET (VOIDmode, target,
31236 gen_lowpart (mode, op0)));
31237 break;
31238
31239 default:
31240 gcc_unreachable ();
31241 }
31242 }
31243
31244 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
31245 all values variable, and none identical. */
31246
31247 static void
31248 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
31249 rtx target, rtx vals)
31250 {
31251 rtx ops[32], op0, op1;
31252 enum machine_mode half_mode = VOIDmode;
31253 int n, i;
31254
31255 switch (mode)
31256 {
31257 case V2SFmode:
31258 case V2SImode:
31259 if (!mmx_ok && !TARGET_SSE)
31260 break;
31261 /* FALLTHRU */
31262
31263 case V8SFmode:
31264 case V8SImode:
31265 case V4DFmode:
31266 case V4DImode:
31267 case V4SFmode:
31268 case V4SImode:
31269 case V2DFmode:
31270 case V2DImode:
31271 n = GET_MODE_NUNITS (mode);
31272 for (i = 0; i < n; i++)
31273 ops[i] = XVECEXP (vals, 0, i);
31274 ix86_expand_vector_init_concat (mode, target, ops, n);
31275 return;
31276
31277 case V32QImode:
31278 half_mode = V16QImode;
31279 goto half;
31280
31281 case V16HImode:
31282 half_mode = V8HImode;
31283 goto half;
31284
31285 half:
31286 n = GET_MODE_NUNITS (mode);
31287 for (i = 0; i < n; i++)
31288 ops[i] = XVECEXP (vals, 0, i);
31289 op0 = gen_reg_rtx (half_mode);
31290 op1 = gen_reg_rtx (half_mode);
31291 ix86_expand_vector_init_interleave (half_mode, op0, ops,
31292 n >> 2);
31293 ix86_expand_vector_init_interleave (half_mode, op1,
31294 &ops [n >> 1], n >> 2);
31295 emit_insn (gen_rtx_SET (VOIDmode, target,
31296 gen_rtx_VEC_CONCAT (mode, op0, op1)));
31297 return;
31298
31299 case V16QImode:
31300 if (!TARGET_SSE4_1)
31301 break;
31302 /* FALLTHRU */
31303
31304 case V8HImode:
31305 if (!TARGET_SSE2)
31306 break;
31307
31308 /* Don't use ix86_expand_vector_init_interleave if we can't
31309 move from GPR to SSE register directly. */
31310 if (!TARGET_INTER_UNIT_MOVES)
31311 break;
31312
31313 n = GET_MODE_NUNITS (mode);
31314 for (i = 0; i < n; i++)
31315 ops[i] = XVECEXP (vals, 0, i);
31316 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
31317 return;
31318
31319 case V4HImode:
31320 case V8QImode:
31321 break;
31322
31323 default:
31324 gcc_unreachable ();
31325 }
31326
31327 {
31328 int i, j, n_elts, n_words, n_elt_per_word;
31329 enum machine_mode inner_mode;
31330 rtx words[4], shift;
31331
31332 inner_mode = GET_MODE_INNER (mode);
31333 n_elts = GET_MODE_NUNITS (mode);
31334 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
31335 n_elt_per_word = n_elts / n_words;
31336 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
31337
31338 for (i = 0; i < n_words; ++i)
31339 {
31340 rtx word = NULL_RTX;
31341
31342 for (j = 0; j < n_elt_per_word; ++j)
31343 {
31344 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
31345 elt = convert_modes (word_mode, inner_mode, elt, true);
31346
31347 if (j == 0)
31348 word = elt;
31349 else
31350 {
31351 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
31352 word, 1, OPTAB_LIB_WIDEN);
31353 word = expand_simple_binop (word_mode, IOR, word, elt,
31354 word, 1, OPTAB_LIB_WIDEN);
31355 }
31356 }
31357
31358 words[i] = word;
31359 }
31360
31361 if (n_words == 1)
31362 emit_move_insn (target, gen_lowpart (mode, words[0]));
31363 else if (n_words == 2)
31364 {
31365 rtx tmp = gen_reg_rtx (mode);
31366 emit_clobber (tmp);
31367 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
31368 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
31369 emit_move_insn (target, tmp);
31370 }
31371 else if (n_words == 4)
31372 {
31373 rtx tmp = gen_reg_rtx (V4SImode);
31374 gcc_assert (word_mode == SImode);
31375 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
31376 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
31377 emit_move_insn (target, gen_lowpart (mode, tmp));
31378 }
31379 else
31380 gcc_unreachable ();
31381 }
31382 }
31383
31384 /* Initialize vector TARGET via VALS. Suppress the use of MMX
31385 instructions unless MMX_OK is true. */
31386
31387 void
31388 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
31389 {
31390 enum machine_mode mode = GET_MODE (target);
31391 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31392 int n_elts = GET_MODE_NUNITS (mode);
31393 int n_var = 0, one_var = -1;
31394 bool all_same = true, all_const_zero = true;
31395 int i;
31396 rtx x;
31397
31398 for (i = 0; i < n_elts; ++i)
31399 {
31400 x = XVECEXP (vals, 0, i);
31401 if (!(CONST_INT_P (x)
31402 || GET_CODE (x) == CONST_DOUBLE
31403 || GET_CODE (x) == CONST_FIXED))
31404 n_var++, one_var = i;
31405 else if (x != CONST0_RTX (inner_mode))
31406 all_const_zero = false;
31407 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
31408 all_same = false;
31409 }
31410
31411 /* Constants are best loaded from the constant pool. */
31412 if (n_var == 0)
31413 {
31414 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
31415 return;
31416 }
31417
31418 /* If all values are identical, broadcast the value. */
31419 if (all_same
31420 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
31421 XVECEXP (vals, 0, 0)))
31422 return;
31423
31424 /* Values where only one field is non-constant are best loaded from
31425 the pool and overwritten via move later. */
31426 if (n_var == 1)
31427 {
31428 if (all_const_zero
31429 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
31430 XVECEXP (vals, 0, one_var),
31431 one_var))
31432 return;
31433
31434 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
31435 return;
31436 }
31437
31438 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
31439 }
31440
31441 void
31442 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
31443 {
31444 enum machine_mode mode = GET_MODE (target);
31445 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31446 enum machine_mode half_mode;
31447 bool use_vec_merge = false;
31448 rtx tmp;
31449 static rtx (*gen_extract[6][2]) (rtx, rtx)
31450 = {
31451 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
31452 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
31453 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
31454 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
31455 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
31456 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
31457 };
31458 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
31459 = {
31460 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
31461 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
31462 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
31463 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
31464 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
31465 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
31466 };
31467 int i, j, n;
31468
31469 switch (mode)
31470 {
31471 case V2SFmode:
31472 case V2SImode:
31473 if (mmx_ok)
31474 {
31475 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
31476 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
31477 if (elt == 0)
31478 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
31479 else
31480 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
31481 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31482 return;
31483 }
31484 break;
31485
31486 case V2DImode:
31487 use_vec_merge = TARGET_SSE4_1;
31488 if (use_vec_merge)
31489 break;
31490
31491 case V2DFmode:
31492 {
31493 rtx op0, op1;
31494
31495 /* For the two element vectors, we implement a VEC_CONCAT with
31496 the extraction of the other element. */
31497
31498 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
31499 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
31500
31501 if (elt == 0)
31502 op0 = val, op1 = tmp;
31503 else
31504 op0 = tmp, op1 = val;
31505
31506 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
31507 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31508 }
31509 return;
31510
31511 case V4SFmode:
31512 use_vec_merge = TARGET_SSE4_1;
31513 if (use_vec_merge)
31514 break;
31515
31516 switch (elt)
31517 {
31518 case 0:
31519 use_vec_merge = true;
31520 break;
31521
31522 case 1:
31523 /* tmp = target = A B C D */
31524 tmp = copy_to_reg (target);
31525 /* target = A A B B */
31526 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
31527 /* target = X A B B */
31528 ix86_expand_vector_set (false, target, val, 0);
31529 /* target = A X C D */
31530 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31531 const1_rtx, const0_rtx,
31532 GEN_INT (2+4), GEN_INT (3+4)));
31533 return;
31534
31535 case 2:
31536 /* tmp = target = A B C D */
31537 tmp = copy_to_reg (target);
31538 /* tmp = X B C D */
31539 ix86_expand_vector_set (false, tmp, val, 0);
31540 /* target = A B X D */
31541 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31542 const0_rtx, const1_rtx,
31543 GEN_INT (0+4), GEN_INT (3+4)));
31544 return;
31545
31546 case 3:
31547 /* tmp = target = A B C D */
31548 tmp = copy_to_reg (target);
31549 /* tmp = X B C D */
31550 ix86_expand_vector_set (false, tmp, val, 0);
31551 /* target = A B X D */
31552 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31553 const0_rtx, const1_rtx,
31554 GEN_INT (2+4), GEN_INT (0+4)));
31555 return;
31556
31557 default:
31558 gcc_unreachable ();
31559 }
31560 break;
31561
31562 case V4SImode:
31563 use_vec_merge = TARGET_SSE4_1;
31564 if (use_vec_merge)
31565 break;
31566
31567 /* Element 0 handled by vec_merge below. */
31568 if (elt == 0)
31569 {
31570 use_vec_merge = true;
31571 break;
31572 }
31573
31574 if (TARGET_SSE2)
31575 {
31576 /* With SSE2, use integer shuffles to swap element 0 and ELT,
31577 store into element 0, then shuffle them back. */
31578
31579 rtx order[4];
31580
31581 order[0] = GEN_INT (elt);
31582 order[1] = const1_rtx;
31583 order[2] = const2_rtx;
31584 order[3] = GEN_INT (3);
31585 order[elt] = const0_rtx;
31586
31587 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31588 order[1], order[2], order[3]));
31589
31590 ix86_expand_vector_set (false, target, val, 0);
31591
31592 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31593 order[1], order[2], order[3]));
31594 }
31595 else
31596 {
31597 /* For SSE1, we have to reuse the V4SF code. */
31598 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
31599 gen_lowpart (SFmode, val), elt);
31600 }
31601 return;
31602
31603 case V8HImode:
31604 use_vec_merge = TARGET_SSE2;
31605 break;
31606 case V4HImode:
31607 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31608 break;
31609
31610 case V16QImode:
31611 use_vec_merge = TARGET_SSE4_1;
31612 break;
31613
31614 case V8QImode:
31615 break;
31616
31617 case V32QImode:
31618 half_mode = V16QImode;
31619 j = 0;
31620 n = 16;
31621 goto half;
31622
31623 case V16HImode:
31624 half_mode = V8HImode;
31625 j = 1;
31626 n = 8;
31627 goto half;
31628
31629 case V8SImode:
31630 half_mode = V4SImode;
31631 j = 2;
31632 n = 4;
31633 goto half;
31634
31635 case V4DImode:
31636 half_mode = V2DImode;
31637 j = 3;
31638 n = 2;
31639 goto half;
31640
31641 case V8SFmode:
31642 half_mode = V4SFmode;
31643 j = 4;
31644 n = 4;
31645 goto half;
31646
31647 case V4DFmode:
31648 half_mode = V2DFmode;
31649 j = 5;
31650 n = 2;
31651 goto half;
31652
31653 half:
31654 /* Compute offset. */
31655 i = elt / n;
31656 elt %= n;
31657
31658 gcc_assert (i <= 1);
31659
31660 /* Extract the half. */
31661 tmp = gen_reg_rtx (half_mode);
31662 emit_insn (gen_extract[j][i] (tmp, target));
31663
31664 /* Put val in tmp at elt. */
31665 ix86_expand_vector_set (false, tmp, val, elt);
31666
31667 /* Put it back. */
31668 emit_insn (gen_insert[j][i] (target, target, tmp));
31669 return;
31670
31671 default:
31672 break;
31673 }
31674
31675 if (use_vec_merge)
31676 {
31677 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
31678 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
31679 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31680 }
31681 else
31682 {
31683 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31684
31685 emit_move_insn (mem, target);
31686
31687 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31688 emit_move_insn (tmp, val);
31689
31690 emit_move_insn (target, mem);
31691 }
31692 }
31693
31694 void
31695 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
31696 {
31697 enum machine_mode mode = GET_MODE (vec);
31698 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31699 bool use_vec_extr = false;
31700 rtx tmp;
31701
31702 switch (mode)
31703 {
31704 case V2SImode:
31705 case V2SFmode:
31706 if (!mmx_ok)
31707 break;
31708 /* FALLTHRU */
31709
31710 case V2DFmode:
31711 case V2DImode:
31712 use_vec_extr = true;
31713 break;
31714
31715 case V4SFmode:
31716 use_vec_extr = TARGET_SSE4_1;
31717 if (use_vec_extr)
31718 break;
31719
31720 switch (elt)
31721 {
31722 case 0:
31723 tmp = vec;
31724 break;
31725
31726 case 1:
31727 case 3:
31728 tmp = gen_reg_rtx (mode);
31729 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
31730 GEN_INT (elt), GEN_INT (elt),
31731 GEN_INT (elt+4), GEN_INT (elt+4)));
31732 break;
31733
31734 case 2:
31735 tmp = gen_reg_rtx (mode);
31736 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
31737 break;
31738
31739 default:
31740 gcc_unreachable ();
31741 }
31742 vec = tmp;
31743 use_vec_extr = true;
31744 elt = 0;
31745 break;
31746
31747 case V4SImode:
31748 use_vec_extr = TARGET_SSE4_1;
31749 if (use_vec_extr)
31750 break;
31751
31752 if (TARGET_SSE2)
31753 {
31754 switch (elt)
31755 {
31756 case 0:
31757 tmp = vec;
31758 break;
31759
31760 case 1:
31761 case 3:
31762 tmp = gen_reg_rtx (mode);
31763 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
31764 GEN_INT (elt), GEN_INT (elt),
31765 GEN_INT (elt), GEN_INT (elt)));
31766 break;
31767
31768 case 2:
31769 tmp = gen_reg_rtx (mode);
31770 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
31771 break;
31772
31773 default:
31774 gcc_unreachable ();
31775 }
31776 vec = tmp;
31777 use_vec_extr = true;
31778 elt = 0;
31779 }
31780 else
31781 {
31782 /* For SSE1, we have to reuse the V4SF code. */
31783 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
31784 gen_lowpart (V4SFmode, vec), elt);
31785 return;
31786 }
31787 break;
31788
31789 case V8HImode:
31790 use_vec_extr = TARGET_SSE2;
31791 break;
31792 case V4HImode:
31793 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31794 break;
31795
31796 case V16QImode:
31797 use_vec_extr = TARGET_SSE4_1;
31798 break;
31799
31800 case V8QImode:
31801 /* ??? Could extract the appropriate HImode element and shift. */
31802 default:
31803 break;
31804 }
31805
31806 if (use_vec_extr)
31807 {
31808 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
31809 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
31810
31811 /* Let the rtl optimizers know about the zero extension performed. */
31812 if (inner_mode == QImode || inner_mode == HImode)
31813 {
31814 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
31815 target = gen_lowpart (SImode, target);
31816 }
31817
31818 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31819 }
31820 else
31821 {
31822 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31823
31824 emit_move_insn (mem, vec);
31825
31826 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31827 emit_move_insn (target, tmp);
31828 }
31829 }
31830
31831 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
31832 pattern to reduce; DEST is the destination; IN is the input vector. */
31833
31834 void
31835 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
31836 {
31837 rtx tmp1, tmp2, tmp3;
31838
31839 tmp1 = gen_reg_rtx (V4SFmode);
31840 tmp2 = gen_reg_rtx (V4SFmode);
31841 tmp3 = gen_reg_rtx (V4SFmode);
31842
31843 emit_insn (gen_sse_movhlps (tmp1, in, in));
31844 emit_insn (fn (tmp2, tmp1, in));
31845
31846 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
31847 const1_rtx, const1_rtx,
31848 GEN_INT (1+4), GEN_INT (1+4)));
31849 emit_insn (fn (dest, tmp2, tmp3));
31850 }
31851 \f
31852 /* Target hook for scalar_mode_supported_p. */
31853 static bool
31854 ix86_scalar_mode_supported_p (enum machine_mode mode)
31855 {
31856 if (DECIMAL_FLOAT_MODE_P (mode))
31857 return default_decimal_float_supported_p ();
31858 else if (mode == TFmode)
31859 return true;
31860 else
31861 return default_scalar_mode_supported_p (mode);
31862 }
31863
31864 /* Implements target hook vector_mode_supported_p. */
31865 static bool
31866 ix86_vector_mode_supported_p (enum machine_mode mode)
31867 {
31868 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
31869 return true;
31870 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
31871 return true;
31872 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
31873 return true;
31874 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
31875 return true;
31876 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
31877 return true;
31878 return false;
31879 }
31880
31881 /* Target hook for c_mode_for_suffix. */
31882 static enum machine_mode
31883 ix86_c_mode_for_suffix (char suffix)
31884 {
31885 if (suffix == 'q')
31886 return TFmode;
31887 if (suffix == 'w')
31888 return XFmode;
31889
31890 return VOIDmode;
31891 }
31892
31893 /* Worker function for TARGET_MD_ASM_CLOBBERS.
31894
31895 We do this in the new i386 backend to maintain source compatibility
31896 with the old cc0-based compiler. */
31897
31898 static tree
31899 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
31900 tree inputs ATTRIBUTE_UNUSED,
31901 tree clobbers)
31902 {
31903 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
31904 clobbers);
31905 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
31906 clobbers);
31907 return clobbers;
31908 }
31909
31910 /* Implements target vector targetm.asm.encode_section_info. This
31911 is not used by netware. */
31912
31913 static void ATTRIBUTE_UNUSED
31914 ix86_encode_section_info (tree decl, rtx rtl, int first)
31915 {
31916 default_encode_section_info (decl, rtl, first);
31917
31918 if (TREE_CODE (decl) == VAR_DECL
31919 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
31920 && ix86_in_large_data_p (decl))
31921 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
31922 }
31923
31924 /* Worker function for REVERSE_CONDITION. */
31925
31926 enum rtx_code
31927 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
31928 {
31929 return (mode != CCFPmode && mode != CCFPUmode
31930 ? reverse_condition (code)
31931 : reverse_condition_maybe_unordered (code));
31932 }
31933
31934 /* Output code to perform an x87 FP register move, from OPERANDS[1]
31935 to OPERANDS[0]. */
31936
31937 const char *
31938 output_387_reg_move (rtx insn, rtx *operands)
31939 {
31940 if (REG_P (operands[0]))
31941 {
31942 if (REG_P (operands[1])
31943 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31944 {
31945 if (REGNO (operands[0]) == FIRST_STACK_REG)
31946 return output_387_ffreep (operands, 0);
31947 return "fstp\t%y0";
31948 }
31949 if (STACK_TOP_P (operands[0]))
31950 return "fld%Z1\t%y1";
31951 return "fst\t%y0";
31952 }
31953 else if (MEM_P (operands[0]))
31954 {
31955 gcc_assert (REG_P (operands[1]));
31956 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31957 return "fstp%Z0\t%y0";
31958 else
31959 {
31960 /* There is no non-popping store to memory for XFmode.
31961 So if we need one, follow the store with a load. */
31962 if (GET_MODE (operands[0]) == XFmode)
31963 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
31964 else
31965 return "fst%Z0\t%y0";
31966 }
31967 }
31968 else
31969 gcc_unreachable();
31970 }
31971
31972 /* Output code to perform a conditional jump to LABEL, if C2 flag in
31973 FP status register is set. */
31974
31975 void
31976 ix86_emit_fp_unordered_jump (rtx label)
31977 {
31978 rtx reg = gen_reg_rtx (HImode);
31979 rtx temp;
31980
31981 emit_insn (gen_x86_fnstsw_1 (reg));
31982
31983 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
31984 {
31985 emit_insn (gen_x86_sahf_1 (reg));
31986
31987 temp = gen_rtx_REG (CCmode, FLAGS_REG);
31988 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
31989 }
31990 else
31991 {
31992 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
31993
31994 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
31995 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
31996 }
31997
31998 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
31999 gen_rtx_LABEL_REF (VOIDmode, label),
32000 pc_rtx);
32001 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
32002
32003 emit_jump_insn (temp);
32004 predict_jump (REG_BR_PROB_BASE * 10 / 100);
32005 }
32006
32007 /* Output code to perform a log1p XFmode calculation. */
32008
32009 void ix86_emit_i387_log1p (rtx op0, rtx op1)
32010 {
32011 rtx label1 = gen_label_rtx ();
32012 rtx label2 = gen_label_rtx ();
32013
32014 rtx tmp = gen_reg_rtx (XFmode);
32015 rtx tmp2 = gen_reg_rtx (XFmode);
32016 rtx test;
32017
32018 emit_insn (gen_absxf2 (tmp, op1));
32019 test = gen_rtx_GE (VOIDmode, tmp,
32020 CONST_DOUBLE_FROM_REAL_VALUE (
32021 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
32022 XFmode));
32023 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
32024
32025 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
32026 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
32027 emit_jump (label2);
32028
32029 emit_label (label1);
32030 emit_move_insn (tmp, CONST1_RTX (XFmode));
32031 emit_insn (gen_addxf3 (tmp, op1, tmp));
32032 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
32033 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
32034
32035 emit_label (label2);
32036 }
32037
32038 /* Output code to perform a Newton-Rhapson approximation of a single precision
32039 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
32040
32041 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
32042 {
32043 rtx x0, x1, e0, e1;
32044
32045 x0 = gen_reg_rtx (mode);
32046 e0 = gen_reg_rtx (mode);
32047 e1 = gen_reg_rtx (mode);
32048 x1 = gen_reg_rtx (mode);
32049
32050 /* a / b = a * ((rcp(b) + rcp(b)) - (b * rcp(b) * rcp (b))) */
32051
32052 /* x0 = rcp(b) estimate */
32053 emit_insn (gen_rtx_SET (VOIDmode, x0,
32054 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
32055 UNSPEC_RCP)));
32056 /* e0 = x0 * b */
32057 emit_insn (gen_rtx_SET (VOIDmode, e0,
32058 gen_rtx_MULT (mode, x0, b)));
32059
32060 /* e0 = x0 * e0 */
32061 emit_insn (gen_rtx_SET (VOIDmode, e0,
32062 gen_rtx_MULT (mode, x0, e0)));
32063
32064 /* e1 = x0 + x0 */
32065 emit_insn (gen_rtx_SET (VOIDmode, e1,
32066 gen_rtx_PLUS (mode, x0, x0)));
32067
32068 /* x1 = e1 - e0 */
32069 emit_insn (gen_rtx_SET (VOIDmode, x1,
32070 gen_rtx_MINUS (mode, e1, e0)));
32071
32072 /* res = a * x1 */
32073 emit_insn (gen_rtx_SET (VOIDmode, res,
32074 gen_rtx_MULT (mode, a, x1)));
32075 }
32076
32077 /* Output code to perform a Newton-Rhapson approximation of a
32078 single precision floating point [reciprocal] square root. */
32079
32080 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
32081 bool recip)
32082 {
32083 rtx x0, e0, e1, e2, e3, mthree, mhalf;
32084 REAL_VALUE_TYPE r;
32085
32086 x0 = gen_reg_rtx (mode);
32087 e0 = gen_reg_rtx (mode);
32088 e1 = gen_reg_rtx (mode);
32089 e2 = gen_reg_rtx (mode);
32090 e3 = gen_reg_rtx (mode);
32091
32092 real_from_integer (&r, VOIDmode, -3, -1, 0);
32093 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
32094
32095 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
32096 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
32097
32098 if (VECTOR_MODE_P (mode))
32099 {
32100 mthree = ix86_build_const_vector (mode, true, mthree);
32101 mhalf = ix86_build_const_vector (mode, true, mhalf);
32102 }
32103
32104 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
32105 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
32106
32107 /* x0 = rsqrt(a) estimate */
32108 emit_insn (gen_rtx_SET (VOIDmode, x0,
32109 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
32110 UNSPEC_RSQRT)));
32111
32112 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
32113 if (!recip)
32114 {
32115 rtx zero, mask;
32116
32117 zero = gen_reg_rtx (mode);
32118 mask = gen_reg_rtx (mode);
32119
32120 zero = force_reg (mode, CONST0_RTX(mode));
32121 emit_insn (gen_rtx_SET (VOIDmode, mask,
32122 gen_rtx_NE (mode, zero, a)));
32123
32124 emit_insn (gen_rtx_SET (VOIDmode, x0,
32125 gen_rtx_AND (mode, x0, mask)));
32126 }
32127
32128 /* e0 = x0 * a */
32129 emit_insn (gen_rtx_SET (VOIDmode, e0,
32130 gen_rtx_MULT (mode, x0, a)));
32131 /* e1 = e0 * x0 */
32132 emit_insn (gen_rtx_SET (VOIDmode, e1,
32133 gen_rtx_MULT (mode, e0, x0)));
32134
32135 /* e2 = e1 - 3. */
32136 mthree = force_reg (mode, mthree);
32137 emit_insn (gen_rtx_SET (VOIDmode, e2,
32138 gen_rtx_PLUS (mode, e1, mthree)));
32139
32140 mhalf = force_reg (mode, mhalf);
32141 if (recip)
32142 /* e3 = -.5 * x0 */
32143 emit_insn (gen_rtx_SET (VOIDmode, e3,
32144 gen_rtx_MULT (mode, x0, mhalf)));
32145 else
32146 /* e3 = -.5 * e0 */
32147 emit_insn (gen_rtx_SET (VOIDmode, e3,
32148 gen_rtx_MULT (mode, e0, mhalf)));
32149 /* ret = e2 * e3 */
32150 emit_insn (gen_rtx_SET (VOIDmode, res,
32151 gen_rtx_MULT (mode, e2, e3)));
32152 }
32153
32154 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
32155
32156 static void ATTRIBUTE_UNUSED
32157 i386_solaris_elf_named_section (const char *name, unsigned int flags,
32158 tree decl)
32159 {
32160 /* With Binutils 2.15, the "@unwind" marker must be specified on
32161 every occurrence of the ".eh_frame" section, not just the first
32162 one. */
32163 if (TARGET_64BIT
32164 && strcmp (name, ".eh_frame") == 0)
32165 {
32166 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
32167 flags & SECTION_WRITE ? "aw" : "a");
32168 return;
32169 }
32170 default_elf_asm_named_section (name, flags, decl);
32171 }
32172
32173 /* Return the mangling of TYPE if it is an extended fundamental type. */
32174
32175 static const char *
32176 ix86_mangle_type (const_tree type)
32177 {
32178 type = TYPE_MAIN_VARIANT (type);
32179
32180 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32181 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32182 return NULL;
32183
32184 switch (TYPE_MODE (type))
32185 {
32186 case TFmode:
32187 /* __float128 is "g". */
32188 return "g";
32189 case XFmode:
32190 /* "long double" or __float80 is "e". */
32191 return "e";
32192 default:
32193 return NULL;
32194 }
32195 }
32196
32197 /* For 32-bit code we can save PIC register setup by using
32198 __stack_chk_fail_local hidden function instead of calling
32199 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
32200 register, so it is better to call __stack_chk_fail directly. */
32201
32202 static tree
32203 ix86_stack_protect_fail (void)
32204 {
32205 return TARGET_64BIT
32206 ? default_external_stack_protect_fail ()
32207 : default_hidden_stack_protect_fail ();
32208 }
32209
32210 /* Select a format to encode pointers in exception handling data. CODE
32211 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
32212 true if the symbol may be affected by dynamic relocations.
32213
32214 ??? All x86 object file formats are capable of representing this.
32215 After all, the relocation needed is the same as for the call insn.
32216 Whether or not a particular assembler allows us to enter such, I
32217 guess we'll have to see. */
32218 int
32219 asm_preferred_eh_data_format (int code, int global)
32220 {
32221 if (flag_pic)
32222 {
32223 int type = DW_EH_PE_sdata8;
32224 if (!TARGET_64BIT
32225 || ix86_cmodel == CM_SMALL_PIC
32226 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
32227 type = DW_EH_PE_sdata4;
32228 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
32229 }
32230 if (ix86_cmodel == CM_SMALL
32231 || (ix86_cmodel == CM_MEDIUM && code))
32232 return DW_EH_PE_udata4;
32233 return DW_EH_PE_absptr;
32234 }
32235 \f
32236 /* Expand copysign from SIGN to the positive value ABS_VALUE
32237 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
32238 the sign-bit. */
32239 static void
32240 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
32241 {
32242 enum machine_mode mode = GET_MODE (sign);
32243 rtx sgn = gen_reg_rtx (mode);
32244 if (mask == NULL_RTX)
32245 {
32246 enum machine_mode vmode;
32247
32248 if (mode == SFmode)
32249 vmode = V4SFmode;
32250 else if (mode == DFmode)
32251 vmode = V2DFmode;
32252 else
32253 vmode = mode;
32254
32255 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), false);
32256 if (!VECTOR_MODE_P (mode))
32257 {
32258 /* We need to generate a scalar mode mask in this case. */
32259 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
32260 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
32261 mask = gen_reg_rtx (mode);
32262 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
32263 }
32264 }
32265 else
32266 mask = gen_rtx_NOT (mode, mask);
32267 emit_insn (gen_rtx_SET (VOIDmode, sgn,
32268 gen_rtx_AND (mode, mask, sign)));
32269 emit_insn (gen_rtx_SET (VOIDmode, result,
32270 gen_rtx_IOR (mode, abs_value, sgn)));
32271 }
32272
32273 /* Expand fabs (OP0) and return a new rtx that holds the result. The
32274 mask for masking out the sign-bit is stored in *SMASK, if that is
32275 non-null. */
32276 static rtx
32277 ix86_expand_sse_fabs (rtx op0, rtx *smask)
32278 {
32279 enum machine_mode vmode, mode = GET_MODE (op0);
32280 rtx xa, mask;
32281
32282 xa = gen_reg_rtx (mode);
32283 if (mode == SFmode)
32284 vmode = V4SFmode;
32285 else if (mode == DFmode)
32286 vmode = V2DFmode;
32287 else
32288 vmode = mode;
32289 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), true);
32290 if (!VECTOR_MODE_P (mode))
32291 {
32292 /* We need to generate a scalar mode mask in this case. */
32293 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
32294 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
32295 mask = gen_reg_rtx (mode);
32296 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
32297 }
32298 emit_insn (gen_rtx_SET (VOIDmode, xa,
32299 gen_rtx_AND (mode, op0, mask)));
32300
32301 if (smask)
32302 *smask = mask;
32303
32304 return xa;
32305 }
32306
32307 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
32308 swapping the operands if SWAP_OPERANDS is true. The expanded
32309 code is a forward jump to a newly created label in case the
32310 comparison is true. The generated label rtx is returned. */
32311 static rtx
32312 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
32313 bool swap_operands)
32314 {
32315 rtx label, tmp;
32316
32317 if (swap_operands)
32318 {
32319 tmp = op0;
32320 op0 = op1;
32321 op1 = tmp;
32322 }
32323
32324 label = gen_label_rtx ();
32325 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
32326 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32327 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
32328 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
32329 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
32330 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
32331 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
32332 JUMP_LABEL (tmp) = label;
32333
32334 return label;
32335 }
32336
32337 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
32338 using comparison code CODE. Operands are swapped for the comparison if
32339 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
32340 static rtx
32341 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
32342 bool swap_operands)
32343 {
32344 rtx (*insn)(rtx, rtx, rtx, rtx);
32345 enum machine_mode mode = GET_MODE (op0);
32346 rtx mask = gen_reg_rtx (mode);
32347
32348 if (swap_operands)
32349 {
32350 rtx tmp = op0;
32351 op0 = op1;
32352 op1 = tmp;
32353 }
32354
32355 insn = mode == DFmode ? gen_setcc_df_sse : gen_setcc_sf_sse;
32356
32357 emit_insn (insn (mask, op0, op1,
32358 gen_rtx_fmt_ee (code, mode, op0, op1)));
32359 return mask;
32360 }
32361
32362 /* Generate and return a rtx of mode MODE for 2**n where n is the number
32363 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
32364 static rtx
32365 ix86_gen_TWO52 (enum machine_mode mode)
32366 {
32367 REAL_VALUE_TYPE TWO52r;
32368 rtx TWO52;
32369
32370 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
32371 TWO52 = const_double_from_real_value (TWO52r, mode);
32372 TWO52 = force_reg (mode, TWO52);
32373
32374 return TWO52;
32375 }
32376
32377 /* Expand SSE sequence for computing lround from OP1 storing
32378 into OP0. */
32379 void
32380 ix86_expand_lround (rtx op0, rtx op1)
32381 {
32382 /* C code for the stuff we're doing below:
32383 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
32384 return (long)tmp;
32385 */
32386 enum machine_mode mode = GET_MODE (op1);
32387 const struct real_format *fmt;
32388 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32389 rtx adj;
32390
32391 /* load nextafter (0.5, 0.0) */
32392 fmt = REAL_MODE_FORMAT (mode);
32393 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32394 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32395
32396 /* adj = copysign (0.5, op1) */
32397 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
32398 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
32399
32400 /* adj = op1 + adj */
32401 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
32402
32403 /* op0 = (imode)adj */
32404 expand_fix (op0, adj, 0);
32405 }
32406
32407 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
32408 into OPERAND0. */
32409 void
32410 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
32411 {
32412 /* C code for the stuff we're doing below (for do_floor):
32413 xi = (long)op1;
32414 xi -= (double)xi > op1 ? 1 : 0;
32415 return xi;
32416 */
32417 enum machine_mode fmode = GET_MODE (op1);
32418 enum machine_mode imode = GET_MODE (op0);
32419 rtx ireg, freg, label, tmp;
32420
32421 /* reg = (long)op1 */
32422 ireg = gen_reg_rtx (imode);
32423 expand_fix (ireg, op1, 0);
32424
32425 /* freg = (double)reg */
32426 freg = gen_reg_rtx (fmode);
32427 expand_float (freg, ireg, 0);
32428
32429 /* ireg = (freg > op1) ? ireg - 1 : ireg */
32430 label = ix86_expand_sse_compare_and_jump (UNLE,
32431 freg, op1, !do_floor);
32432 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
32433 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
32434 emit_move_insn (ireg, tmp);
32435
32436 emit_label (label);
32437 LABEL_NUSES (label) = 1;
32438
32439 emit_move_insn (op0, ireg);
32440 }
32441
32442 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
32443 result in OPERAND0. */
32444 void
32445 ix86_expand_rint (rtx operand0, rtx operand1)
32446 {
32447 /* C code for the stuff we're doing below:
32448 xa = fabs (operand1);
32449 if (!isless (xa, 2**52))
32450 return operand1;
32451 xa = xa + 2**52 - 2**52;
32452 return copysign (xa, operand1);
32453 */
32454 enum machine_mode mode = GET_MODE (operand0);
32455 rtx res, xa, label, TWO52, mask;
32456
32457 res = gen_reg_rtx (mode);
32458 emit_move_insn (res, operand1);
32459
32460 /* xa = abs (operand1) */
32461 xa = ix86_expand_sse_fabs (res, &mask);
32462
32463 /* if (!isless (xa, TWO52)) goto label; */
32464 TWO52 = ix86_gen_TWO52 (mode);
32465 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32466
32467 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32468 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32469
32470 ix86_sse_copysign_to_positive (res, xa, res, mask);
32471
32472 emit_label (label);
32473 LABEL_NUSES (label) = 1;
32474
32475 emit_move_insn (operand0, res);
32476 }
32477
32478 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32479 into OPERAND0. */
32480 void
32481 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
32482 {
32483 /* C code for the stuff we expand below.
32484 double xa = fabs (x), x2;
32485 if (!isless (xa, TWO52))
32486 return x;
32487 xa = xa + TWO52 - TWO52;
32488 x2 = copysign (xa, x);
32489 Compensate. Floor:
32490 if (x2 > x)
32491 x2 -= 1;
32492 Compensate. Ceil:
32493 if (x2 < x)
32494 x2 -= -1;
32495 return x2;
32496 */
32497 enum machine_mode mode = GET_MODE (operand0);
32498 rtx xa, TWO52, tmp, label, one, res, mask;
32499
32500 TWO52 = ix86_gen_TWO52 (mode);
32501
32502 /* Temporary for holding the result, initialized to the input
32503 operand to ease control flow. */
32504 res = gen_reg_rtx (mode);
32505 emit_move_insn (res, operand1);
32506
32507 /* xa = abs (operand1) */
32508 xa = ix86_expand_sse_fabs (res, &mask);
32509
32510 /* if (!isless (xa, TWO52)) goto label; */
32511 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32512
32513 /* xa = xa + TWO52 - TWO52; */
32514 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32515 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32516
32517 /* xa = copysign (xa, operand1) */
32518 ix86_sse_copysign_to_positive (xa, xa, res, mask);
32519
32520 /* generate 1.0 or -1.0 */
32521 one = force_reg (mode,
32522 const_double_from_real_value (do_floor
32523 ? dconst1 : dconstm1, mode));
32524
32525 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32526 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32527 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32528 gen_rtx_AND (mode, one, tmp)));
32529 /* We always need to subtract here to preserve signed zero. */
32530 tmp = expand_simple_binop (mode, MINUS,
32531 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32532 emit_move_insn (res, tmp);
32533
32534 emit_label (label);
32535 LABEL_NUSES (label) = 1;
32536
32537 emit_move_insn (operand0, res);
32538 }
32539
32540 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32541 into OPERAND0. */
32542 void
32543 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
32544 {
32545 /* C code for the stuff we expand below.
32546 double xa = fabs (x), x2;
32547 if (!isless (xa, TWO52))
32548 return x;
32549 x2 = (double)(long)x;
32550 Compensate. Floor:
32551 if (x2 > x)
32552 x2 -= 1;
32553 Compensate. Ceil:
32554 if (x2 < x)
32555 x2 += 1;
32556 if (HONOR_SIGNED_ZEROS (mode))
32557 return copysign (x2, x);
32558 return x2;
32559 */
32560 enum machine_mode mode = GET_MODE (operand0);
32561 rtx xa, xi, TWO52, tmp, label, one, res, mask;
32562
32563 TWO52 = ix86_gen_TWO52 (mode);
32564
32565 /* Temporary for holding the result, initialized to the input
32566 operand to ease control flow. */
32567 res = gen_reg_rtx (mode);
32568 emit_move_insn (res, operand1);
32569
32570 /* xa = abs (operand1) */
32571 xa = ix86_expand_sse_fabs (res, &mask);
32572
32573 /* if (!isless (xa, TWO52)) goto label; */
32574 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32575
32576 /* xa = (double)(long)x */
32577 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32578 expand_fix (xi, res, 0);
32579 expand_float (xa, xi, 0);
32580
32581 /* generate 1.0 */
32582 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32583
32584 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32585 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32586 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32587 gen_rtx_AND (mode, one, tmp)));
32588 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
32589 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32590 emit_move_insn (res, tmp);
32591
32592 if (HONOR_SIGNED_ZEROS (mode))
32593 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32594
32595 emit_label (label);
32596 LABEL_NUSES (label) = 1;
32597
32598 emit_move_insn (operand0, res);
32599 }
32600
32601 /* Expand SSE sequence for computing round from OPERAND1 storing
32602 into OPERAND0. Sequence that works without relying on DImode truncation
32603 via cvttsd2siq that is only available on 64bit targets. */
32604 void
32605 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
32606 {
32607 /* C code for the stuff we expand below.
32608 double xa = fabs (x), xa2, x2;
32609 if (!isless (xa, TWO52))
32610 return x;
32611 Using the absolute value and copying back sign makes
32612 -0.0 -> -0.0 correct.
32613 xa2 = xa + TWO52 - TWO52;
32614 Compensate.
32615 dxa = xa2 - xa;
32616 if (dxa <= -0.5)
32617 xa2 += 1;
32618 else if (dxa > 0.5)
32619 xa2 -= 1;
32620 x2 = copysign (xa2, x);
32621 return x2;
32622 */
32623 enum machine_mode mode = GET_MODE (operand0);
32624 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
32625
32626 TWO52 = ix86_gen_TWO52 (mode);
32627
32628 /* Temporary for holding the result, initialized to the input
32629 operand to ease control flow. */
32630 res = gen_reg_rtx (mode);
32631 emit_move_insn (res, operand1);
32632
32633 /* xa = abs (operand1) */
32634 xa = ix86_expand_sse_fabs (res, &mask);
32635
32636 /* if (!isless (xa, TWO52)) goto label; */
32637 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32638
32639 /* xa2 = xa + TWO52 - TWO52; */
32640 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32641 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
32642
32643 /* dxa = xa2 - xa; */
32644 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
32645
32646 /* generate 0.5, 1.0 and -0.5 */
32647 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
32648 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
32649 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
32650 0, OPTAB_DIRECT);
32651
32652 /* Compensate. */
32653 tmp = gen_reg_rtx (mode);
32654 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
32655 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
32656 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32657 gen_rtx_AND (mode, one, tmp)));
32658 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32659 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
32660 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
32661 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32662 gen_rtx_AND (mode, one, tmp)));
32663 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32664
32665 /* res = copysign (xa2, operand1) */
32666 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
32667
32668 emit_label (label);
32669 LABEL_NUSES (label) = 1;
32670
32671 emit_move_insn (operand0, res);
32672 }
32673
32674 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32675 into OPERAND0. */
32676 void
32677 ix86_expand_trunc (rtx operand0, rtx operand1)
32678 {
32679 /* C code for SSE variant we expand below.
32680 double xa = fabs (x), x2;
32681 if (!isless (xa, TWO52))
32682 return x;
32683 x2 = (double)(long)x;
32684 if (HONOR_SIGNED_ZEROS (mode))
32685 return copysign (x2, x);
32686 return x2;
32687 */
32688 enum machine_mode mode = GET_MODE (operand0);
32689 rtx xa, xi, TWO52, label, res, mask;
32690
32691 TWO52 = ix86_gen_TWO52 (mode);
32692
32693 /* Temporary for holding the result, initialized to the input
32694 operand to ease control flow. */
32695 res = gen_reg_rtx (mode);
32696 emit_move_insn (res, operand1);
32697
32698 /* xa = abs (operand1) */
32699 xa = ix86_expand_sse_fabs (res, &mask);
32700
32701 /* if (!isless (xa, TWO52)) goto label; */
32702 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32703
32704 /* x = (double)(long)x */
32705 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32706 expand_fix (xi, res, 0);
32707 expand_float (res, xi, 0);
32708
32709 if (HONOR_SIGNED_ZEROS (mode))
32710 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32711
32712 emit_label (label);
32713 LABEL_NUSES (label) = 1;
32714
32715 emit_move_insn (operand0, res);
32716 }
32717
32718 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32719 into OPERAND0. */
32720 void
32721 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
32722 {
32723 enum machine_mode mode = GET_MODE (operand0);
32724 rtx xa, mask, TWO52, label, one, res, smask, tmp;
32725
32726 /* C code for SSE variant we expand below.
32727 double xa = fabs (x), x2;
32728 if (!isless (xa, TWO52))
32729 return x;
32730 xa2 = xa + TWO52 - TWO52;
32731 Compensate:
32732 if (xa2 > xa)
32733 xa2 -= 1.0;
32734 x2 = copysign (xa2, x);
32735 return x2;
32736 */
32737
32738 TWO52 = ix86_gen_TWO52 (mode);
32739
32740 /* Temporary for holding the result, initialized to the input
32741 operand to ease control flow. */
32742 res = gen_reg_rtx (mode);
32743 emit_move_insn (res, operand1);
32744
32745 /* xa = abs (operand1) */
32746 xa = ix86_expand_sse_fabs (res, &smask);
32747
32748 /* if (!isless (xa, TWO52)) goto label; */
32749 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32750
32751 /* res = xa + TWO52 - TWO52; */
32752 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32753 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
32754 emit_move_insn (res, tmp);
32755
32756 /* generate 1.0 */
32757 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32758
32759 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
32760 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
32761 emit_insn (gen_rtx_SET (VOIDmode, mask,
32762 gen_rtx_AND (mode, mask, one)));
32763 tmp = expand_simple_binop (mode, MINUS,
32764 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
32765 emit_move_insn (res, tmp);
32766
32767 /* res = copysign (res, operand1) */
32768 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
32769
32770 emit_label (label);
32771 LABEL_NUSES (label) = 1;
32772
32773 emit_move_insn (operand0, res);
32774 }
32775
32776 /* Expand SSE sequence for computing round from OPERAND1 storing
32777 into OPERAND0. */
32778 void
32779 ix86_expand_round (rtx operand0, rtx operand1)
32780 {
32781 /* C code for the stuff we're doing below:
32782 double xa = fabs (x);
32783 if (!isless (xa, TWO52))
32784 return x;
32785 xa = (double)(long)(xa + nextafter (0.5, 0.0));
32786 return copysign (xa, x);
32787 */
32788 enum machine_mode mode = GET_MODE (operand0);
32789 rtx res, TWO52, xa, label, xi, half, mask;
32790 const struct real_format *fmt;
32791 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32792
32793 /* Temporary for holding the result, initialized to the input
32794 operand to ease control flow. */
32795 res = gen_reg_rtx (mode);
32796 emit_move_insn (res, operand1);
32797
32798 TWO52 = ix86_gen_TWO52 (mode);
32799 xa = ix86_expand_sse_fabs (res, &mask);
32800 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32801
32802 /* load nextafter (0.5, 0.0) */
32803 fmt = REAL_MODE_FORMAT (mode);
32804 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32805 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32806
32807 /* xa = xa + 0.5 */
32808 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
32809 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
32810
32811 /* xa = (double)(int64_t)xa */
32812 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32813 expand_fix (xi, xa, 0);
32814 expand_float (xa, xi, 0);
32815
32816 /* res = copysign (xa, operand1) */
32817 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
32818
32819 emit_label (label);
32820 LABEL_NUSES (label) = 1;
32821
32822 emit_move_insn (operand0, res);
32823 }
32824 \f
32825
32826 /* Table of valid machine attributes. */
32827 static const struct attribute_spec ix86_attribute_table[] =
32828 {
32829 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
32830 affects_type_identity } */
32831 /* Stdcall attribute says callee is responsible for popping arguments
32832 if they are not variable. */
32833 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32834 true },
32835 /* Fastcall attribute says callee is responsible for popping arguments
32836 if they are not variable. */
32837 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32838 true },
32839 /* Thiscall attribute says callee is responsible for popping arguments
32840 if they are not variable. */
32841 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32842 true },
32843 /* Cdecl attribute says the callee is a normal C declaration */
32844 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32845 true },
32846 /* Regparm attribute specifies how many integer arguments are to be
32847 passed in registers. */
32848 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute,
32849 true },
32850 /* Sseregparm attribute says we are using x86_64 calling conventions
32851 for FP arguments. */
32852 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32853 true },
32854 /* force_align_arg_pointer says this function realigns the stack at entry. */
32855 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
32856 false, true, true, ix86_handle_cconv_attribute, false },
32857 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
32858 { "dllimport", 0, 0, false, false, false, handle_dll_attribute, false },
32859 { "dllexport", 0, 0, false, false, false, handle_dll_attribute, false },
32860 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute,
32861 false },
32862 #endif
32863 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
32864 false },
32865 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
32866 false },
32867 #ifdef SUBTARGET_ATTRIBUTE_TABLE
32868 SUBTARGET_ATTRIBUTE_TABLE,
32869 #endif
32870 /* ms_abi and sysv_abi calling convention function attributes. */
32871 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
32872 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
32873 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute,
32874 false },
32875 { "callee_pop_aggregate_return", 1, 1, false, true, true,
32876 ix86_handle_callee_pop_aggregate_return, true },
32877 /* End element. */
32878 { NULL, 0, 0, false, false, false, NULL, false }
32879 };
32880
32881 /* Implement targetm.vectorize.builtin_vectorization_cost. */
32882 static int
32883 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
32884 tree vectype ATTRIBUTE_UNUSED,
32885 int misalign ATTRIBUTE_UNUSED)
32886 {
32887 switch (type_of_cost)
32888 {
32889 case scalar_stmt:
32890 return ix86_cost->scalar_stmt_cost;
32891
32892 case scalar_load:
32893 return ix86_cost->scalar_load_cost;
32894
32895 case scalar_store:
32896 return ix86_cost->scalar_store_cost;
32897
32898 case vector_stmt:
32899 return ix86_cost->vec_stmt_cost;
32900
32901 case vector_load:
32902 return ix86_cost->vec_align_load_cost;
32903
32904 case vector_store:
32905 return ix86_cost->vec_store_cost;
32906
32907 case vec_to_scalar:
32908 return ix86_cost->vec_to_scalar_cost;
32909
32910 case scalar_to_vec:
32911 return ix86_cost->scalar_to_vec_cost;
32912
32913 case unaligned_load:
32914 case unaligned_store:
32915 return ix86_cost->vec_unalign_load_cost;
32916
32917 case cond_branch_taken:
32918 return ix86_cost->cond_taken_branch_cost;
32919
32920 case cond_branch_not_taken:
32921 return ix86_cost->cond_not_taken_branch_cost;
32922
32923 case vec_perm:
32924 return 1;
32925
32926 default:
32927 gcc_unreachable ();
32928 }
32929 }
32930
32931
32932 /* Implement targetm.vectorize.builtin_vec_perm. */
32933
32934 static tree
32935 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
32936 {
32937 tree itype = TREE_TYPE (vec_type);
32938 bool u = TYPE_UNSIGNED (itype);
32939 enum machine_mode vmode = TYPE_MODE (vec_type);
32940 enum ix86_builtins fcode;
32941 bool ok = TARGET_SSE2;
32942
32943 switch (vmode)
32944 {
32945 case V4DFmode:
32946 ok = TARGET_AVX;
32947 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
32948 goto get_di;
32949 case V2DFmode:
32950 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
32951 get_di:
32952 itype = ix86_get_builtin_type (IX86_BT_DI);
32953 break;
32954
32955 case V8SFmode:
32956 ok = TARGET_AVX;
32957 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
32958 goto get_si;
32959 case V4SFmode:
32960 ok = TARGET_SSE;
32961 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
32962 get_si:
32963 itype = ix86_get_builtin_type (IX86_BT_SI);
32964 break;
32965
32966 case V2DImode:
32967 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
32968 break;
32969 case V4SImode:
32970 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
32971 break;
32972 case V8HImode:
32973 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
32974 break;
32975 case V16QImode:
32976 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
32977 break;
32978 default:
32979 ok = false;
32980 break;
32981 }
32982
32983 if (!ok)
32984 return NULL_TREE;
32985
32986 *mask_type = itype;
32987 return ix86_builtins[(int) fcode];
32988 }
32989
32990 /* Return a vector mode with twice as many elements as VMODE. */
32991 /* ??? Consider moving this to a table generated by genmodes.c. */
32992
32993 static enum machine_mode
32994 doublesize_vector_mode (enum machine_mode vmode)
32995 {
32996 switch (vmode)
32997 {
32998 case V2SFmode: return V4SFmode;
32999 case V1DImode: return V2DImode;
33000 case V2SImode: return V4SImode;
33001 case V4HImode: return V8HImode;
33002 case V8QImode: return V16QImode;
33003
33004 case V2DFmode: return V4DFmode;
33005 case V4SFmode: return V8SFmode;
33006 case V2DImode: return V4DImode;
33007 case V4SImode: return V8SImode;
33008 case V8HImode: return V16HImode;
33009 case V16QImode: return V32QImode;
33010
33011 case V4DFmode: return V8DFmode;
33012 case V8SFmode: return V16SFmode;
33013 case V4DImode: return V8DImode;
33014 case V8SImode: return V16SImode;
33015 case V16HImode: return V32HImode;
33016 case V32QImode: return V64QImode;
33017
33018 default:
33019 gcc_unreachable ();
33020 }
33021 }
33022
33023 /* Construct (set target (vec_select op0 (parallel perm))) and
33024 return true if that's a valid instruction in the active ISA. */
33025
33026 static bool
33027 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
33028 {
33029 rtx rperm[MAX_VECT_LEN], x;
33030 unsigned i;
33031
33032 for (i = 0; i < nelt; ++i)
33033 rperm[i] = GEN_INT (perm[i]);
33034
33035 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
33036 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
33037 x = gen_rtx_SET (VOIDmode, target, x);
33038
33039 x = emit_insn (x);
33040 if (recog_memoized (x) < 0)
33041 {
33042 remove_insn (x);
33043 return false;
33044 }
33045 return true;
33046 }
33047
33048 /* Similar, but generate a vec_concat from op0 and op1 as well. */
33049
33050 static bool
33051 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
33052 const unsigned char *perm, unsigned nelt)
33053 {
33054 enum machine_mode v2mode;
33055 rtx x;
33056
33057 v2mode = doublesize_vector_mode (GET_MODE (op0));
33058 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
33059 return expand_vselect (target, x, perm, nelt);
33060 }
33061
33062 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33063 in terms of blendp[sd] / pblendw / pblendvb. */
33064
33065 static bool
33066 expand_vec_perm_blend (struct expand_vec_perm_d *d)
33067 {
33068 enum machine_mode vmode = d->vmode;
33069 unsigned i, mask, nelt = d->nelt;
33070 rtx target, op0, op1, x;
33071
33072 if (!TARGET_SSE4_1 || d->op0 == d->op1)
33073 return false;
33074 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
33075 return false;
33076
33077 /* This is a blend, not a permute. Elements must stay in their
33078 respective lanes. */
33079 for (i = 0; i < nelt; ++i)
33080 {
33081 unsigned e = d->perm[i];
33082 if (!(e == i || e == i + nelt))
33083 return false;
33084 }
33085
33086 if (d->testing_p)
33087 return true;
33088
33089 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
33090 decision should be extracted elsewhere, so that we only try that
33091 sequence once all budget==3 options have been tried. */
33092
33093 /* For bytes, see if bytes move in pairs so we can use pblendw with
33094 an immediate argument, rather than pblendvb with a vector argument. */
33095 if (vmode == V16QImode)
33096 {
33097 bool pblendw_ok = true;
33098 for (i = 0; i < 16 && pblendw_ok; i += 2)
33099 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
33100
33101 if (!pblendw_ok)
33102 {
33103 rtx rperm[16], vperm;
33104
33105 for (i = 0; i < nelt; ++i)
33106 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
33107
33108 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
33109 vperm = force_reg (V16QImode, vperm);
33110
33111 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
33112 return true;
33113 }
33114 }
33115
33116 target = d->target;
33117 op0 = d->op0;
33118 op1 = d->op1;
33119 mask = 0;
33120
33121 switch (vmode)
33122 {
33123 case V4DFmode:
33124 case V8SFmode:
33125 case V2DFmode:
33126 case V4SFmode:
33127 case V8HImode:
33128 for (i = 0; i < nelt; ++i)
33129 mask |= (d->perm[i] >= nelt) << i;
33130 break;
33131
33132 case V2DImode:
33133 for (i = 0; i < 2; ++i)
33134 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
33135 goto do_subreg;
33136
33137 case V4SImode:
33138 for (i = 0; i < 4; ++i)
33139 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
33140 goto do_subreg;
33141
33142 case V16QImode:
33143 for (i = 0; i < 8; ++i)
33144 mask |= (d->perm[i * 2] >= 16) << i;
33145
33146 do_subreg:
33147 vmode = V8HImode;
33148 target = gen_lowpart (vmode, target);
33149 op0 = gen_lowpart (vmode, op0);
33150 op1 = gen_lowpart (vmode, op1);
33151 break;
33152
33153 default:
33154 gcc_unreachable ();
33155 }
33156
33157 /* This matches five different patterns with the different modes. */
33158 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
33159 x = gen_rtx_SET (VOIDmode, target, x);
33160 emit_insn (x);
33161
33162 return true;
33163 }
33164
33165 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33166 in terms of the variable form of vpermilps.
33167
33168 Note that we will have already failed the immediate input vpermilps,
33169 which requires that the high and low part shuffle be identical; the
33170 variable form doesn't require that. */
33171
33172 static bool
33173 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
33174 {
33175 rtx rperm[8], vperm;
33176 unsigned i;
33177
33178 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
33179 return false;
33180
33181 /* We can only permute within the 128-bit lane. */
33182 for (i = 0; i < 8; ++i)
33183 {
33184 unsigned e = d->perm[i];
33185 if (i < 4 ? e >= 4 : e < 4)
33186 return false;
33187 }
33188
33189 if (d->testing_p)
33190 return true;
33191
33192 for (i = 0; i < 8; ++i)
33193 {
33194 unsigned e = d->perm[i];
33195
33196 /* Within each 128-bit lane, the elements of op0 are numbered
33197 from 0 and the elements of op1 are numbered from 4. */
33198 if (e >= 8 + 4)
33199 e -= 8;
33200 else if (e >= 4)
33201 e -= 4;
33202
33203 rperm[i] = GEN_INT (e);
33204 }
33205
33206 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
33207 vperm = force_reg (V8SImode, vperm);
33208 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
33209
33210 return true;
33211 }
33212
33213 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33214 in terms of pshufb or vpperm. */
33215
33216 static bool
33217 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
33218 {
33219 unsigned i, nelt, eltsz;
33220 rtx rperm[16], vperm, target, op0, op1;
33221
33222 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
33223 return false;
33224 if (GET_MODE_SIZE (d->vmode) != 16)
33225 return false;
33226
33227 if (d->testing_p)
33228 return true;
33229
33230 nelt = d->nelt;
33231 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33232
33233 for (i = 0; i < nelt; ++i)
33234 {
33235 unsigned j, e = d->perm[i];
33236 for (j = 0; j < eltsz; ++j)
33237 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
33238 }
33239
33240 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
33241 vperm = force_reg (V16QImode, vperm);
33242
33243 target = gen_lowpart (V16QImode, d->target);
33244 op0 = gen_lowpart (V16QImode, d->op0);
33245 if (d->op0 == d->op1)
33246 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
33247 else
33248 {
33249 op1 = gen_lowpart (V16QImode, d->op1);
33250 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
33251 }
33252
33253 return true;
33254 }
33255
33256 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
33257 in a single instruction. */
33258
33259 static bool
33260 expand_vec_perm_1 (struct expand_vec_perm_d *d)
33261 {
33262 unsigned i, nelt = d->nelt;
33263 unsigned char perm2[MAX_VECT_LEN];
33264
33265 /* Check plain VEC_SELECT first, because AVX has instructions that could
33266 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
33267 input where SEL+CONCAT may not. */
33268 if (d->op0 == d->op1)
33269 {
33270 int mask = nelt - 1;
33271
33272 for (i = 0; i < nelt; i++)
33273 perm2[i] = d->perm[i] & mask;
33274
33275 if (expand_vselect (d->target, d->op0, perm2, nelt))
33276 return true;
33277
33278 /* There are plenty of patterns in sse.md that are written for
33279 SEL+CONCAT and are not replicated for a single op. Perhaps
33280 that should be changed, to avoid the nastiness here. */
33281
33282 /* Recognize interleave style patterns, which means incrementing
33283 every other permutation operand. */
33284 for (i = 0; i < nelt; i += 2)
33285 {
33286 perm2[i] = d->perm[i] & mask;
33287 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
33288 }
33289 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
33290 return true;
33291
33292 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
33293 if (nelt >= 4)
33294 {
33295 for (i = 0; i < nelt; i += 4)
33296 {
33297 perm2[i + 0] = d->perm[i + 0] & mask;
33298 perm2[i + 1] = d->perm[i + 1] & mask;
33299 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
33300 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
33301 }
33302
33303 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
33304 return true;
33305 }
33306 }
33307
33308 /* Finally, try the fully general two operand permute. */
33309 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
33310 return true;
33311
33312 /* Recognize interleave style patterns with reversed operands. */
33313 if (d->op0 != d->op1)
33314 {
33315 for (i = 0; i < nelt; ++i)
33316 {
33317 unsigned e = d->perm[i];
33318 if (e >= nelt)
33319 e -= nelt;
33320 else
33321 e += nelt;
33322 perm2[i] = e;
33323 }
33324
33325 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
33326 return true;
33327 }
33328
33329 /* Try the SSE4.1 blend variable merge instructions. */
33330 if (expand_vec_perm_blend (d))
33331 return true;
33332
33333 /* Try one of the AVX vpermil variable permutations. */
33334 if (expand_vec_perm_vpermil (d))
33335 return true;
33336
33337 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
33338 if (expand_vec_perm_pshufb (d))
33339 return true;
33340
33341 return false;
33342 }
33343
33344 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33345 in terms of a pair of pshuflw + pshufhw instructions. */
33346
33347 static bool
33348 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
33349 {
33350 unsigned char perm2[MAX_VECT_LEN];
33351 unsigned i;
33352 bool ok;
33353
33354 if (d->vmode != V8HImode || d->op0 != d->op1)
33355 return false;
33356
33357 /* The two permutations only operate in 64-bit lanes. */
33358 for (i = 0; i < 4; ++i)
33359 if (d->perm[i] >= 4)
33360 return false;
33361 for (i = 4; i < 8; ++i)
33362 if (d->perm[i] < 4)
33363 return false;
33364
33365 if (d->testing_p)
33366 return true;
33367
33368 /* Emit the pshuflw. */
33369 memcpy (perm2, d->perm, 4);
33370 for (i = 4; i < 8; ++i)
33371 perm2[i] = i;
33372 ok = expand_vselect (d->target, d->op0, perm2, 8);
33373 gcc_assert (ok);
33374
33375 /* Emit the pshufhw. */
33376 memcpy (perm2 + 4, d->perm + 4, 4);
33377 for (i = 0; i < 4; ++i)
33378 perm2[i] = i;
33379 ok = expand_vselect (d->target, d->target, perm2, 8);
33380 gcc_assert (ok);
33381
33382 return true;
33383 }
33384
33385 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33386 the permutation using the SSSE3 palignr instruction. This succeeds
33387 when all of the elements in PERM fit within one vector and we merely
33388 need to shift them down so that a single vector permutation has a
33389 chance to succeed. */
33390
33391 static bool
33392 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
33393 {
33394 unsigned i, nelt = d->nelt;
33395 unsigned min, max;
33396 bool in_order, ok;
33397 rtx shift;
33398
33399 /* Even with AVX, palignr only operates on 128-bit vectors. */
33400 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33401 return false;
33402
33403 min = nelt, max = 0;
33404 for (i = 0; i < nelt; ++i)
33405 {
33406 unsigned e = d->perm[i];
33407 if (e < min)
33408 min = e;
33409 if (e > max)
33410 max = e;
33411 }
33412 if (min == 0 || max - min >= nelt)
33413 return false;
33414
33415 /* Given that we have SSSE3, we know we'll be able to implement the
33416 single operand permutation after the palignr with pshufb. */
33417 if (d->testing_p)
33418 return true;
33419
33420 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
33421 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
33422 gen_lowpart (TImode, d->op1),
33423 gen_lowpart (TImode, d->op0), shift));
33424
33425 d->op0 = d->op1 = d->target;
33426
33427 in_order = true;
33428 for (i = 0; i < nelt; ++i)
33429 {
33430 unsigned e = d->perm[i] - min;
33431 if (e != i)
33432 in_order = false;
33433 d->perm[i] = e;
33434 }
33435
33436 /* Test for the degenerate case where the alignment by itself
33437 produces the desired permutation. */
33438 if (in_order)
33439 return true;
33440
33441 ok = expand_vec_perm_1 (d);
33442 gcc_assert (ok);
33443
33444 return ok;
33445 }
33446
33447 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33448 a two vector permutation into a single vector permutation by using
33449 an interleave operation to merge the vectors. */
33450
33451 static bool
33452 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
33453 {
33454 struct expand_vec_perm_d dremap, dfinal;
33455 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
33456 unsigned contents, h1, h2, h3, h4;
33457 unsigned char remap[2 * MAX_VECT_LEN];
33458 rtx seq;
33459 bool ok;
33460
33461 if (d->op0 == d->op1)
33462 return false;
33463
33464 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
33465 lanes. We can use similar techniques with the vperm2f128 instruction,
33466 but it requires slightly different logic. */
33467 if (GET_MODE_SIZE (d->vmode) != 16)
33468 return false;
33469
33470 /* Examine from whence the elements come. */
33471 contents = 0;
33472 for (i = 0; i < nelt; ++i)
33473 contents |= 1u << d->perm[i];
33474
33475 /* Split the two input vectors into 4 halves. */
33476 h1 = (1u << nelt2) - 1;
33477 h2 = h1 << nelt2;
33478 h3 = h2 << nelt2;
33479 h4 = h3 << nelt2;
33480
33481 memset (remap, 0xff, sizeof (remap));
33482 dremap = *d;
33483
33484 /* If the elements from the low halves use interleave low, and similarly
33485 for interleave high. If the elements are from mis-matched halves, we
33486 can use shufps for V4SF/V4SI or do a DImode shuffle. */
33487 if ((contents & (h1 | h3)) == contents)
33488 {
33489 for (i = 0; i < nelt2; ++i)
33490 {
33491 remap[i] = i * 2;
33492 remap[i + nelt] = i * 2 + 1;
33493 dremap.perm[i * 2] = i;
33494 dremap.perm[i * 2 + 1] = i + nelt;
33495 }
33496 }
33497 else if ((contents & (h2 | h4)) == contents)
33498 {
33499 for (i = 0; i < nelt2; ++i)
33500 {
33501 remap[i + nelt2] = i * 2;
33502 remap[i + nelt + nelt2] = i * 2 + 1;
33503 dremap.perm[i * 2] = i + nelt2;
33504 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
33505 }
33506 }
33507 else if ((contents & (h1 | h4)) == contents)
33508 {
33509 for (i = 0; i < nelt2; ++i)
33510 {
33511 remap[i] = i;
33512 remap[i + nelt + nelt2] = i + nelt2;
33513 dremap.perm[i] = i;
33514 dremap.perm[i + nelt2] = i + nelt + nelt2;
33515 }
33516 if (nelt != 4)
33517 {
33518 dremap.vmode = V2DImode;
33519 dremap.nelt = 2;
33520 dremap.perm[0] = 0;
33521 dremap.perm[1] = 3;
33522 }
33523 }
33524 else if ((contents & (h2 | h3)) == contents)
33525 {
33526 for (i = 0; i < nelt2; ++i)
33527 {
33528 remap[i + nelt2] = i;
33529 remap[i + nelt] = i + nelt2;
33530 dremap.perm[i] = i + nelt2;
33531 dremap.perm[i + nelt2] = i + nelt;
33532 }
33533 if (nelt != 4)
33534 {
33535 dremap.vmode = V2DImode;
33536 dremap.nelt = 2;
33537 dremap.perm[0] = 1;
33538 dremap.perm[1] = 2;
33539 }
33540 }
33541 else
33542 return false;
33543
33544 /* Use the remapping array set up above to move the elements from their
33545 swizzled locations into their final destinations. */
33546 dfinal = *d;
33547 for (i = 0; i < nelt; ++i)
33548 {
33549 unsigned e = remap[d->perm[i]];
33550 gcc_assert (e < nelt);
33551 dfinal.perm[i] = e;
33552 }
33553 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
33554 dfinal.op1 = dfinal.op0;
33555 dremap.target = dfinal.op0;
33556
33557 /* Test if the final remap can be done with a single insn. For V4SFmode or
33558 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
33559 start_sequence ();
33560 ok = expand_vec_perm_1 (&dfinal);
33561 seq = get_insns ();
33562 end_sequence ();
33563
33564 if (!ok)
33565 return false;
33566
33567 if (dremap.vmode != dfinal.vmode)
33568 {
33569 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
33570 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
33571 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
33572 }
33573
33574 ok = expand_vec_perm_1 (&dremap);
33575 gcc_assert (ok);
33576
33577 emit_insn (seq);
33578 return true;
33579 }
33580
33581 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
33582 permutation with two pshufb insns and an ior. We should have already
33583 failed all two instruction sequences. */
33584
33585 static bool
33586 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
33587 {
33588 rtx rperm[2][16], vperm, l, h, op, m128;
33589 unsigned int i, nelt, eltsz;
33590
33591 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33592 return false;
33593 gcc_assert (d->op0 != d->op1);
33594
33595 nelt = d->nelt;
33596 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33597
33598 /* Generate two permutation masks. If the required element is within
33599 the given vector it is shuffled into the proper lane. If the required
33600 element is in the other vector, force a zero into the lane by setting
33601 bit 7 in the permutation mask. */
33602 m128 = GEN_INT (-128);
33603 for (i = 0; i < nelt; ++i)
33604 {
33605 unsigned j, e = d->perm[i];
33606 unsigned which = (e >= nelt);
33607 if (e >= nelt)
33608 e -= nelt;
33609
33610 for (j = 0; j < eltsz; ++j)
33611 {
33612 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
33613 rperm[1-which][i*eltsz + j] = m128;
33614 }
33615 }
33616
33617 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
33618 vperm = force_reg (V16QImode, vperm);
33619
33620 l = gen_reg_rtx (V16QImode);
33621 op = gen_lowpart (V16QImode, d->op0);
33622 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
33623
33624 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
33625 vperm = force_reg (V16QImode, vperm);
33626
33627 h = gen_reg_rtx (V16QImode);
33628 op = gen_lowpart (V16QImode, d->op1);
33629 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
33630
33631 op = gen_lowpart (V16QImode, d->target);
33632 emit_insn (gen_iorv16qi3 (op, l, h));
33633
33634 return true;
33635 }
33636
33637 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
33638 and extract-odd permutations. */
33639
33640 static bool
33641 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
33642 {
33643 rtx t1, t2, t3;
33644
33645 switch (d->vmode)
33646 {
33647 case V4DFmode:
33648 t1 = gen_reg_rtx (V4DFmode);
33649 t2 = gen_reg_rtx (V4DFmode);
33650
33651 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
33652 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
33653 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
33654
33655 /* Now an unpck[lh]pd will produce the result required. */
33656 if (odd)
33657 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
33658 else
33659 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
33660 emit_insn (t3);
33661 break;
33662
33663 case V8SFmode:
33664 {
33665 int mask = odd ? 0xdd : 0x88;
33666
33667 t1 = gen_reg_rtx (V8SFmode);
33668 t2 = gen_reg_rtx (V8SFmode);
33669 t3 = gen_reg_rtx (V8SFmode);
33670
33671 /* Shuffle within the 128-bit lanes to produce:
33672 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
33673 emit_insn (gen_avx_shufps256 (t1, d->op0, d->op1,
33674 GEN_INT (mask)));
33675
33676 /* Shuffle the lanes around to produce:
33677 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
33678 emit_insn (gen_avx_vperm2f128v8sf3 (t2, t1, t1,
33679 GEN_INT (0x3)));
33680
33681 /* Shuffle within the 128-bit lanes to produce:
33682 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
33683 emit_insn (gen_avx_shufps256 (t3, t1, t2, GEN_INT (0x44)));
33684
33685 /* Shuffle within the 128-bit lanes to produce:
33686 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
33687 emit_insn (gen_avx_shufps256 (t2, t1, t2, GEN_INT (0xee)));
33688
33689 /* Shuffle the lanes around to produce:
33690 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
33691 emit_insn (gen_avx_vperm2f128v8sf3 (d->target, t3, t2,
33692 GEN_INT (0x20)));
33693 }
33694 break;
33695
33696 case V2DFmode:
33697 case V4SFmode:
33698 case V2DImode:
33699 case V4SImode:
33700 /* These are always directly implementable by expand_vec_perm_1. */
33701 gcc_unreachable ();
33702
33703 case V8HImode:
33704 if (TARGET_SSSE3)
33705 return expand_vec_perm_pshufb2 (d);
33706 else
33707 {
33708 /* We need 2*log2(N)-1 operations to achieve odd/even
33709 with interleave. */
33710 t1 = gen_reg_rtx (V8HImode);
33711 t2 = gen_reg_rtx (V8HImode);
33712 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
33713 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
33714 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
33715 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
33716 if (odd)
33717 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
33718 else
33719 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
33720 emit_insn (t3);
33721 }
33722 break;
33723
33724 case V16QImode:
33725 if (TARGET_SSSE3)
33726 return expand_vec_perm_pshufb2 (d);
33727 else
33728 {
33729 t1 = gen_reg_rtx (V16QImode);
33730 t2 = gen_reg_rtx (V16QImode);
33731 t3 = gen_reg_rtx (V16QImode);
33732 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
33733 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
33734 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
33735 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
33736 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
33737 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
33738 if (odd)
33739 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
33740 else
33741 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
33742 emit_insn (t3);
33743 }
33744 break;
33745
33746 default:
33747 gcc_unreachable ();
33748 }
33749
33750 return true;
33751 }
33752
33753 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33754 extract-even and extract-odd permutations. */
33755
33756 static bool
33757 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
33758 {
33759 unsigned i, odd, nelt = d->nelt;
33760
33761 odd = d->perm[0];
33762 if (odd != 0 && odd != 1)
33763 return false;
33764
33765 for (i = 1; i < nelt; ++i)
33766 if (d->perm[i] != 2 * i + odd)
33767 return false;
33768
33769 return expand_vec_perm_even_odd_1 (d, odd);
33770 }
33771
33772 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
33773 permutations. We assume that expand_vec_perm_1 has already failed. */
33774
33775 static bool
33776 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
33777 {
33778 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
33779 enum machine_mode vmode = d->vmode;
33780 unsigned char perm2[4];
33781 rtx op0 = d->op0;
33782 bool ok;
33783
33784 switch (vmode)
33785 {
33786 case V4DFmode:
33787 case V8SFmode:
33788 /* These are special-cased in sse.md so that we can optionally
33789 use the vbroadcast instruction. They expand to two insns
33790 if the input happens to be in a register. */
33791 gcc_unreachable ();
33792
33793 case V2DFmode:
33794 case V2DImode:
33795 case V4SFmode:
33796 case V4SImode:
33797 /* These are always implementable using standard shuffle patterns. */
33798 gcc_unreachable ();
33799
33800 case V8HImode:
33801 case V16QImode:
33802 /* These can be implemented via interleave. We save one insn by
33803 stopping once we have promoted to V4SImode and then use pshufd. */
33804 do
33805 {
33806 optab otab = vec_interleave_low_optab;
33807
33808 if (elt >= nelt2)
33809 {
33810 otab = vec_interleave_high_optab;
33811 elt -= nelt2;
33812 }
33813 nelt2 /= 2;
33814
33815 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
33816 vmode = get_mode_wider_vector (vmode);
33817 op0 = gen_lowpart (vmode, op0);
33818 }
33819 while (vmode != V4SImode);
33820
33821 memset (perm2, elt, 4);
33822 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
33823 gcc_assert (ok);
33824 return true;
33825
33826 default:
33827 gcc_unreachable ();
33828 }
33829 }
33830
33831 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33832 broadcast permutations. */
33833
33834 static bool
33835 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
33836 {
33837 unsigned i, elt, nelt = d->nelt;
33838
33839 if (d->op0 != d->op1)
33840 return false;
33841
33842 elt = d->perm[0];
33843 for (i = 1; i < nelt; ++i)
33844 if (d->perm[i] != elt)
33845 return false;
33846
33847 return expand_vec_perm_broadcast_1 (d);
33848 }
33849
33850 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
33851 With all of the interface bits taken care of, perform the expansion
33852 in D and return true on success. */
33853
33854 static bool
33855 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
33856 {
33857 /* Try a single instruction expansion. */
33858 if (expand_vec_perm_1 (d))
33859 return true;
33860
33861 /* Try sequences of two instructions. */
33862
33863 if (expand_vec_perm_pshuflw_pshufhw (d))
33864 return true;
33865
33866 if (expand_vec_perm_palignr (d))
33867 return true;
33868
33869 if (expand_vec_perm_interleave2 (d))
33870 return true;
33871
33872 if (expand_vec_perm_broadcast (d))
33873 return true;
33874
33875 /* Try sequences of three instructions. */
33876
33877 if (expand_vec_perm_pshufb2 (d))
33878 return true;
33879
33880 /* ??? Look for narrow permutations whose element orderings would
33881 allow the promotion to a wider mode. */
33882
33883 /* ??? Look for sequences of interleave or a wider permute that place
33884 the data into the correct lanes for a half-vector shuffle like
33885 pshuf[lh]w or vpermilps. */
33886
33887 /* ??? Look for sequences of interleave that produce the desired results.
33888 The combinatorics of punpck[lh] get pretty ugly... */
33889
33890 if (expand_vec_perm_even_odd (d))
33891 return true;
33892
33893 return false;
33894 }
33895
33896 /* Extract the values from the vector CST into the permutation array in D.
33897 Return 0 on error, 1 if all values from the permutation come from the
33898 first vector, 2 if all values from the second vector, and 3 otherwise. */
33899
33900 static int
33901 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
33902 {
33903 tree list = TREE_VECTOR_CST_ELTS (cst);
33904 unsigned i, nelt = d->nelt;
33905 int ret = 0;
33906
33907 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
33908 {
33909 unsigned HOST_WIDE_INT e;
33910
33911 if (!host_integerp (TREE_VALUE (list), 1))
33912 return 0;
33913 e = tree_low_cst (TREE_VALUE (list), 1);
33914 if (e >= 2 * nelt)
33915 return 0;
33916
33917 ret |= (e < nelt ? 1 : 2);
33918 d->perm[i] = e;
33919 }
33920 gcc_assert (list == NULL);
33921
33922 /* For all elements from second vector, fold the elements to first. */
33923 if (ret == 2)
33924 for (i = 0; i < nelt; ++i)
33925 d->perm[i] -= nelt;
33926
33927 return ret;
33928 }
33929
33930 static rtx
33931 ix86_expand_vec_perm_builtin (tree exp)
33932 {
33933 struct expand_vec_perm_d d;
33934 tree arg0, arg1, arg2;
33935
33936 arg0 = CALL_EXPR_ARG (exp, 0);
33937 arg1 = CALL_EXPR_ARG (exp, 1);
33938 arg2 = CALL_EXPR_ARG (exp, 2);
33939
33940 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
33941 d.nelt = GET_MODE_NUNITS (d.vmode);
33942 d.testing_p = false;
33943 gcc_assert (VECTOR_MODE_P (d.vmode));
33944
33945 if (TREE_CODE (arg2) != VECTOR_CST)
33946 {
33947 error_at (EXPR_LOCATION (exp),
33948 "vector permutation requires vector constant");
33949 goto exit_error;
33950 }
33951
33952 switch (extract_vec_perm_cst (&d, arg2))
33953 {
33954 default:
33955 gcc_unreachable();
33956
33957 case 0:
33958 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
33959 goto exit_error;
33960
33961 case 3:
33962 if (!operand_equal_p (arg0, arg1, 0))
33963 {
33964 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33965 d.op0 = force_reg (d.vmode, d.op0);
33966 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33967 d.op1 = force_reg (d.vmode, d.op1);
33968 break;
33969 }
33970
33971 /* The elements of PERM do not suggest that only the first operand
33972 is used, but both operands are identical. Allow easier matching
33973 of the permutation by folding the permutation into the single
33974 input vector. */
33975 {
33976 unsigned i, nelt = d.nelt;
33977 for (i = 0; i < nelt; ++i)
33978 if (d.perm[i] >= nelt)
33979 d.perm[i] -= nelt;
33980 }
33981 /* FALLTHRU */
33982
33983 case 1:
33984 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33985 d.op0 = force_reg (d.vmode, d.op0);
33986 d.op1 = d.op0;
33987 break;
33988
33989 case 2:
33990 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33991 d.op0 = force_reg (d.vmode, d.op0);
33992 d.op1 = d.op0;
33993 break;
33994 }
33995
33996 d.target = gen_reg_rtx (d.vmode);
33997 if (ix86_expand_vec_perm_builtin_1 (&d))
33998 return d.target;
33999
34000 /* For compiler generated permutations, we should never got here, because
34001 the compiler should also be checking the ok hook. But since this is a
34002 builtin the user has access too, so don't abort. */
34003 switch (d.nelt)
34004 {
34005 case 2:
34006 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
34007 break;
34008 case 4:
34009 sorry ("vector permutation (%d %d %d %d)",
34010 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
34011 break;
34012 case 8:
34013 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
34014 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
34015 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
34016 break;
34017 case 16:
34018 sorry ("vector permutation "
34019 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
34020 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
34021 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
34022 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
34023 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
34024 break;
34025 default:
34026 gcc_unreachable ();
34027 }
34028 exit_error:
34029 return CONST0_RTX (d.vmode);
34030 }
34031
34032 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
34033
34034 static bool
34035 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
34036 {
34037 struct expand_vec_perm_d d;
34038 int vec_mask;
34039 bool ret, one_vec;
34040
34041 d.vmode = TYPE_MODE (vec_type);
34042 d.nelt = GET_MODE_NUNITS (d.vmode);
34043 d.testing_p = true;
34044
34045 /* Given sufficient ISA support we can just return true here
34046 for selected vector modes. */
34047 if (GET_MODE_SIZE (d.vmode) == 16)
34048 {
34049 /* All implementable with a single vpperm insn. */
34050 if (TARGET_XOP)
34051 return true;
34052 /* All implementable with 2 pshufb + 1 ior. */
34053 if (TARGET_SSSE3)
34054 return true;
34055 /* All implementable with shufpd or unpck[lh]pd. */
34056 if (d.nelt == 2)
34057 return true;
34058 }
34059
34060 vec_mask = extract_vec_perm_cst (&d, mask);
34061
34062 /* This hook is cannot be called in response to something that the
34063 user does (unlike the builtin expander) so we shouldn't ever see
34064 an error generated from the extract. */
34065 gcc_assert (vec_mask > 0 && vec_mask <= 3);
34066 one_vec = (vec_mask != 3);
34067
34068 /* Implementable with shufps or pshufd. */
34069 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
34070 return true;
34071
34072 /* Otherwise we have to go through the motions and see if we can
34073 figure out how to generate the requested permutation. */
34074 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
34075 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
34076 if (!one_vec)
34077 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
34078
34079 start_sequence ();
34080 ret = ix86_expand_vec_perm_builtin_1 (&d);
34081 end_sequence ();
34082
34083 return ret;
34084 }
34085
34086 void
34087 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
34088 {
34089 struct expand_vec_perm_d d;
34090 unsigned i, nelt;
34091
34092 d.target = targ;
34093 d.op0 = op0;
34094 d.op1 = op1;
34095 d.vmode = GET_MODE (targ);
34096 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
34097 d.testing_p = false;
34098
34099 for (i = 0; i < nelt; ++i)
34100 d.perm[i] = i * 2 + odd;
34101
34102 /* We'll either be able to implement the permutation directly... */
34103 if (expand_vec_perm_1 (&d))
34104 return;
34105
34106 /* ... or we use the special-case patterns. */
34107 expand_vec_perm_even_odd_1 (&d, odd);
34108 }
34109
34110 /* Expand an insert into a vector register through pinsr insn.
34111 Return true if successful. */
34112
34113 bool
34114 ix86_expand_pinsr (rtx *operands)
34115 {
34116 rtx dst = operands[0];
34117 rtx src = operands[3];
34118
34119 unsigned int size = INTVAL (operands[1]);
34120 unsigned int pos = INTVAL (operands[2]);
34121
34122 if (GET_CODE (dst) == SUBREG)
34123 {
34124 pos += SUBREG_BYTE (dst) * BITS_PER_UNIT;
34125 dst = SUBREG_REG (dst);
34126 }
34127
34128 if (GET_CODE (src) == SUBREG)
34129 src = SUBREG_REG (src);
34130
34131 switch (GET_MODE (dst))
34132 {
34133 case V16QImode:
34134 case V8HImode:
34135 case V4SImode:
34136 case V2DImode:
34137 {
34138 enum machine_mode srcmode, dstmode;
34139 rtx (*pinsr)(rtx, rtx, rtx, rtx);
34140
34141 srcmode = mode_for_size (size, MODE_INT, 0);
34142
34143 switch (srcmode)
34144 {
34145 case QImode:
34146 if (!TARGET_SSE4_1)
34147 return false;
34148 dstmode = V16QImode;
34149 pinsr = gen_sse4_1_pinsrb;
34150 break;
34151
34152 case HImode:
34153 if (!TARGET_SSE2)
34154 return false;
34155 dstmode = V8HImode;
34156 pinsr = gen_sse2_pinsrw;
34157 break;
34158
34159 case SImode:
34160 if (!TARGET_SSE4_1)
34161 return false;
34162 dstmode = V4SImode;
34163 pinsr = gen_sse4_1_pinsrd;
34164 break;
34165
34166 case DImode:
34167 gcc_assert (TARGET_64BIT);
34168 if (!TARGET_SSE4_1)
34169 return false;
34170 dstmode = V2DImode;
34171 pinsr = gen_sse4_1_pinsrq;
34172 break;
34173
34174 default:
34175 return false;
34176 }
34177
34178 dst = gen_lowpart (dstmode, dst);
34179 src = gen_lowpart (srcmode, src);
34180
34181 pos /= size;
34182
34183 emit_insn (pinsr (dst, dst, src, GEN_INT (1 << pos)));
34184 return true;
34185 }
34186
34187 default:
34188 return false;
34189 }
34190 }
34191 \f
34192 /* This function returns the calling abi specific va_list type node.
34193 It returns the FNDECL specific va_list type. */
34194
34195 static tree
34196 ix86_fn_abi_va_list (tree fndecl)
34197 {
34198 if (!TARGET_64BIT)
34199 return va_list_type_node;
34200 gcc_assert (fndecl != NULL_TREE);
34201
34202 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
34203 return ms_va_list_type_node;
34204 else
34205 return sysv_va_list_type_node;
34206 }
34207
34208 /* Returns the canonical va_list type specified by TYPE. If there
34209 is no valid TYPE provided, it return NULL_TREE. */
34210
34211 static tree
34212 ix86_canonical_va_list_type (tree type)
34213 {
34214 tree wtype, htype;
34215
34216 /* Resolve references and pointers to va_list type. */
34217 if (TREE_CODE (type) == MEM_REF)
34218 type = TREE_TYPE (type);
34219 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
34220 type = TREE_TYPE (type);
34221 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
34222 type = TREE_TYPE (type);
34223
34224 if (TARGET_64BIT && va_list_type_node != NULL_TREE)
34225 {
34226 wtype = va_list_type_node;
34227 gcc_assert (wtype != NULL_TREE);
34228 htype = type;
34229 if (TREE_CODE (wtype) == ARRAY_TYPE)
34230 {
34231 /* If va_list is an array type, the argument may have decayed
34232 to a pointer type, e.g. by being passed to another function.
34233 In that case, unwrap both types so that we can compare the
34234 underlying records. */
34235 if (TREE_CODE (htype) == ARRAY_TYPE
34236 || POINTER_TYPE_P (htype))
34237 {
34238 wtype = TREE_TYPE (wtype);
34239 htype = TREE_TYPE (htype);
34240 }
34241 }
34242 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34243 return va_list_type_node;
34244 wtype = sysv_va_list_type_node;
34245 gcc_assert (wtype != NULL_TREE);
34246 htype = type;
34247 if (TREE_CODE (wtype) == ARRAY_TYPE)
34248 {
34249 /* If va_list is an array type, the argument may have decayed
34250 to a pointer type, e.g. by being passed to another function.
34251 In that case, unwrap both types so that we can compare the
34252 underlying records. */
34253 if (TREE_CODE (htype) == ARRAY_TYPE
34254 || POINTER_TYPE_P (htype))
34255 {
34256 wtype = TREE_TYPE (wtype);
34257 htype = TREE_TYPE (htype);
34258 }
34259 }
34260 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34261 return sysv_va_list_type_node;
34262 wtype = ms_va_list_type_node;
34263 gcc_assert (wtype != NULL_TREE);
34264 htype = type;
34265 if (TREE_CODE (wtype) == ARRAY_TYPE)
34266 {
34267 /* If va_list is an array type, the argument may have decayed
34268 to a pointer type, e.g. by being passed to another function.
34269 In that case, unwrap both types so that we can compare the
34270 underlying records. */
34271 if (TREE_CODE (htype) == ARRAY_TYPE
34272 || POINTER_TYPE_P (htype))
34273 {
34274 wtype = TREE_TYPE (wtype);
34275 htype = TREE_TYPE (htype);
34276 }
34277 }
34278 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34279 return ms_va_list_type_node;
34280 return NULL_TREE;
34281 }
34282 return std_canonical_va_list_type (type);
34283 }
34284
34285 /* Iterate through the target-specific builtin types for va_list.
34286 IDX denotes the iterator, *PTREE is set to the result type of
34287 the va_list builtin, and *PNAME to its internal type.
34288 Returns zero if there is no element for this index, otherwise
34289 IDX should be increased upon the next call.
34290 Note, do not iterate a base builtin's name like __builtin_va_list.
34291 Used from c_common_nodes_and_builtins. */
34292
34293 static int
34294 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
34295 {
34296 if (TARGET_64BIT)
34297 {
34298 switch (idx)
34299 {
34300 default:
34301 break;
34302
34303 case 0:
34304 *ptree = ms_va_list_type_node;
34305 *pname = "__builtin_ms_va_list";
34306 return 1;
34307
34308 case 1:
34309 *ptree = sysv_va_list_type_node;
34310 *pname = "__builtin_sysv_va_list";
34311 return 1;
34312 }
34313 }
34314
34315 return 0;
34316 }
34317
34318 #undef TARGET_SCHED_DISPATCH
34319 #define TARGET_SCHED_DISPATCH has_dispatch
34320 #undef TARGET_SCHED_DISPATCH_DO
34321 #define TARGET_SCHED_DISPATCH_DO do_dispatch
34322
34323 /* The size of the dispatch window is the total number of bytes of
34324 object code allowed in a window. */
34325 #define DISPATCH_WINDOW_SIZE 16
34326
34327 /* Number of dispatch windows considered for scheduling. */
34328 #define MAX_DISPATCH_WINDOWS 3
34329
34330 /* Maximum number of instructions in a window. */
34331 #define MAX_INSN 4
34332
34333 /* Maximum number of immediate operands in a window. */
34334 #define MAX_IMM 4
34335
34336 /* Maximum number of immediate bits allowed in a window. */
34337 #define MAX_IMM_SIZE 128
34338
34339 /* Maximum number of 32 bit immediates allowed in a window. */
34340 #define MAX_IMM_32 4
34341
34342 /* Maximum number of 64 bit immediates allowed in a window. */
34343 #define MAX_IMM_64 2
34344
34345 /* Maximum total of loads or prefetches allowed in a window. */
34346 #define MAX_LOAD 2
34347
34348 /* Maximum total of stores allowed in a window. */
34349 #define MAX_STORE 1
34350
34351 #undef BIG
34352 #define BIG 100
34353
34354
34355 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
34356 enum dispatch_group {
34357 disp_no_group = 0,
34358 disp_load,
34359 disp_store,
34360 disp_load_store,
34361 disp_prefetch,
34362 disp_imm,
34363 disp_imm_32,
34364 disp_imm_64,
34365 disp_branch,
34366 disp_cmp,
34367 disp_jcc,
34368 disp_last
34369 };
34370
34371 /* Number of allowable groups in a dispatch window. It is an array
34372 indexed by dispatch_group enum. 100 is used as a big number,
34373 because the number of these kind of operations does not have any
34374 effect in dispatch window, but we need them for other reasons in
34375 the table. */
34376 static unsigned int num_allowable_groups[disp_last] = {
34377 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG, BIG
34378 };
34379
34380 char group_name[disp_last + 1][16] = {
34381 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
34382 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
34383 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
34384 };
34385
34386 /* Instruction path. */
34387 enum insn_path {
34388 no_path = 0,
34389 path_single, /* Single micro op. */
34390 path_double, /* Double micro op. */
34391 path_multi, /* Instructions with more than 2 micro op.. */
34392 last_path
34393 };
34394
34395 /* sched_insn_info defines a window to the instructions scheduled in
34396 the basic block. It contains a pointer to the insn_info table and
34397 the instruction scheduled.
34398
34399 Windows are allocated for each basic block and are linked
34400 together. */
34401 typedef struct sched_insn_info_s {
34402 rtx insn;
34403 enum dispatch_group group;
34404 enum insn_path path;
34405 int byte_len;
34406 int imm_bytes;
34407 } sched_insn_info;
34408
34409 /* Linked list of dispatch windows. This is a two way list of
34410 dispatch windows of a basic block. It contains information about
34411 the number of uops in the window and the total number of
34412 instructions and of bytes in the object code for this dispatch
34413 window. */
34414 typedef struct dispatch_windows_s {
34415 int num_insn; /* Number of insn in the window. */
34416 int num_uops; /* Number of uops in the window. */
34417 int window_size; /* Number of bytes in the window. */
34418 int window_num; /* Window number between 0 or 1. */
34419 int num_imm; /* Number of immediates in an insn. */
34420 int num_imm_32; /* Number of 32 bit immediates in an insn. */
34421 int num_imm_64; /* Number of 64 bit immediates in an insn. */
34422 int imm_size; /* Total immediates in the window. */
34423 int num_loads; /* Total memory loads in the window. */
34424 int num_stores; /* Total memory stores in the window. */
34425 int violation; /* Violation exists in window. */
34426 sched_insn_info *window; /* Pointer to the window. */
34427 struct dispatch_windows_s *next;
34428 struct dispatch_windows_s *prev;
34429 } dispatch_windows;
34430
34431 /* Immediate valuse used in an insn. */
34432 typedef struct imm_info_s
34433 {
34434 int imm;
34435 int imm32;
34436 int imm64;
34437 } imm_info;
34438
34439 static dispatch_windows *dispatch_window_list;
34440 static dispatch_windows *dispatch_window_list1;
34441
34442 /* Get dispatch group of insn. */
34443
34444 static enum dispatch_group
34445 get_mem_group (rtx insn)
34446 {
34447 enum attr_memory memory;
34448
34449 if (INSN_CODE (insn) < 0)
34450 return disp_no_group;
34451 memory = get_attr_memory (insn);
34452 if (memory == MEMORY_STORE)
34453 return disp_store;
34454
34455 if (memory == MEMORY_LOAD)
34456 return disp_load;
34457
34458 if (memory == MEMORY_BOTH)
34459 return disp_load_store;
34460
34461 return disp_no_group;
34462 }
34463
34464 /* Return true if insn is a compare instruction. */
34465
34466 static bool
34467 is_cmp (rtx insn)
34468 {
34469 enum attr_type type;
34470
34471 type = get_attr_type (insn);
34472 return (type == TYPE_TEST
34473 || type == TYPE_ICMP
34474 || type == TYPE_FCMP
34475 || GET_CODE (PATTERN (insn)) == COMPARE);
34476 }
34477
34478 /* Return true if a dispatch violation encountered. */
34479
34480 static bool
34481 dispatch_violation (void)
34482 {
34483 if (dispatch_window_list->next)
34484 return dispatch_window_list->next->violation;
34485 return dispatch_window_list->violation;
34486 }
34487
34488 /* Return true if insn is a branch instruction. */
34489
34490 static bool
34491 is_branch (rtx insn)
34492 {
34493 return (CALL_P (insn) || JUMP_P (insn));
34494 }
34495
34496 /* Return true if insn is a prefetch instruction. */
34497
34498 static bool
34499 is_prefetch (rtx insn)
34500 {
34501 return NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == PREFETCH;
34502 }
34503
34504 /* This function initializes a dispatch window and the list container holding a
34505 pointer to the window. */
34506
34507 static void
34508 init_window (int window_num)
34509 {
34510 int i;
34511 dispatch_windows *new_list;
34512
34513 if (window_num == 0)
34514 new_list = dispatch_window_list;
34515 else
34516 new_list = dispatch_window_list1;
34517
34518 new_list->num_insn = 0;
34519 new_list->num_uops = 0;
34520 new_list->window_size = 0;
34521 new_list->next = NULL;
34522 new_list->prev = NULL;
34523 new_list->window_num = window_num;
34524 new_list->num_imm = 0;
34525 new_list->num_imm_32 = 0;
34526 new_list->num_imm_64 = 0;
34527 new_list->imm_size = 0;
34528 new_list->num_loads = 0;
34529 new_list->num_stores = 0;
34530 new_list->violation = false;
34531
34532 for (i = 0; i < MAX_INSN; i++)
34533 {
34534 new_list->window[i].insn = NULL;
34535 new_list->window[i].group = disp_no_group;
34536 new_list->window[i].path = no_path;
34537 new_list->window[i].byte_len = 0;
34538 new_list->window[i].imm_bytes = 0;
34539 }
34540 return;
34541 }
34542
34543 /* This function allocates and initializes a dispatch window and the
34544 list container holding a pointer to the window. */
34545
34546 static dispatch_windows *
34547 allocate_window (void)
34548 {
34549 dispatch_windows *new_list = XNEW (struct dispatch_windows_s);
34550 new_list->window = XNEWVEC (struct sched_insn_info_s, MAX_INSN + 1);
34551
34552 return new_list;
34553 }
34554
34555 /* This routine initializes the dispatch scheduling information. It
34556 initiates building dispatch scheduler tables and constructs the
34557 first dispatch window. */
34558
34559 static void
34560 init_dispatch_sched (void)
34561 {
34562 /* Allocate a dispatch list and a window. */
34563 dispatch_window_list = allocate_window ();
34564 dispatch_window_list1 = allocate_window ();
34565 init_window (0);
34566 init_window (1);
34567 }
34568
34569 /* This function returns true if a branch is detected. End of a basic block
34570 does not have to be a branch, but here we assume only branches end a
34571 window. */
34572
34573 static bool
34574 is_end_basic_block (enum dispatch_group group)
34575 {
34576 return group == disp_branch;
34577 }
34578
34579 /* This function is called when the end of a window processing is reached. */
34580
34581 static void
34582 process_end_window (void)
34583 {
34584 gcc_assert (dispatch_window_list->num_insn <= MAX_INSN);
34585 if (dispatch_window_list->next)
34586 {
34587 gcc_assert (dispatch_window_list1->num_insn <= MAX_INSN);
34588 gcc_assert (dispatch_window_list->window_size
34589 + dispatch_window_list1->window_size <= 48);
34590 init_window (1);
34591 }
34592 init_window (0);
34593 }
34594
34595 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
34596 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
34597 for 48 bytes of instructions. Note that these windows are not dispatch
34598 windows that their sizes are DISPATCH_WINDOW_SIZE. */
34599
34600 static dispatch_windows *
34601 allocate_next_window (int window_num)
34602 {
34603 if (window_num == 0)
34604 {
34605 if (dispatch_window_list->next)
34606 init_window (1);
34607 init_window (0);
34608 return dispatch_window_list;
34609 }
34610
34611 dispatch_window_list->next = dispatch_window_list1;
34612 dispatch_window_list1->prev = dispatch_window_list;
34613
34614 return dispatch_window_list1;
34615 }
34616
34617 /* Increment the number of immediate operands of an instruction. */
34618
34619 static int
34620 find_constant_1 (rtx *in_rtx, imm_info *imm_values)
34621 {
34622 if (*in_rtx == 0)
34623 return 0;
34624
34625 switch ( GET_CODE (*in_rtx))
34626 {
34627 case CONST:
34628 case SYMBOL_REF:
34629 case CONST_INT:
34630 (imm_values->imm)++;
34631 if (x86_64_immediate_operand (*in_rtx, SImode))
34632 (imm_values->imm32)++;
34633 else
34634 (imm_values->imm64)++;
34635 break;
34636
34637 case CONST_DOUBLE:
34638 (imm_values->imm)++;
34639 (imm_values->imm64)++;
34640 break;
34641
34642 case CODE_LABEL:
34643 if (LABEL_KIND (*in_rtx) == LABEL_NORMAL)
34644 {
34645 (imm_values->imm)++;
34646 (imm_values->imm32)++;
34647 }
34648 break;
34649
34650 default:
34651 break;
34652 }
34653
34654 return 0;
34655 }
34656
34657 /* Compute number of immediate operands of an instruction. */
34658
34659 static void
34660 find_constant (rtx in_rtx, imm_info *imm_values)
34661 {
34662 for_each_rtx (INSN_P (in_rtx) ? &PATTERN (in_rtx) : &in_rtx,
34663 (rtx_function) find_constant_1, (void *) imm_values);
34664 }
34665
34666 /* Return total size of immediate operands of an instruction along with number
34667 of corresponding immediate-operands. It initializes its parameters to zero
34668 befor calling FIND_CONSTANT.
34669 INSN is the input instruction. IMM is the total of immediates.
34670 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
34671 bit immediates. */
34672
34673 static int
34674 get_num_immediates (rtx insn, int *imm, int *imm32, int *imm64)
34675 {
34676 imm_info imm_values = {0, 0, 0};
34677
34678 find_constant (insn, &imm_values);
34679 *imm = imm_values.imm;
34680 *imm32 = imm_values.imm32;
34681 *imm64 = imm_values.imm64;
34682 return imm_values.imm32 * 4 + imm_values.imm64 * 8;
34683 }
34684
34685 /* This function indicates if an operand of an instruction is an
34686 immediate. */
34687
34688 static bool
34689 has_immediate (rtx insn)
34690 {
34691 int num_imm_operand;
34692 int num_imm32_operand;
34693 int num_imm64_operand;
34694
34695 if (insn)
34696 return get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34697 &num_imm64_operand);
34698 return false;
34699 }
34700
34701 /* Return single or double path for instructions. */
34702
34703 static enum insn_path
34704 get_insn_path (rtx insn)
34705 {
34706 enum attr_amdfam10_decode path = get_attr_amdfam10_decode (insn);
34707
34708 if ((int)path == 0)
34709 return path_single;
34710
34711 if ((int)path == 1)
34712 return path_double;
34713
34714 return path_multi;
34715 }
34716
34717 /* Return insn dispatch group. */
34718
34719 static enum dispatch_group
34720 get_insn_group (rtx insn)
34721 {
34722 enum dispatch_group group = get_mem_group (insn);
34723 if (group)
34724 return group;
34725
34726 if (is_branch (insn))
34727 return disp_branch;
34728
34729 if (is_cmp (insn))
34730 return disp_cmp;
34731
34732 if (has_immediate (insn))
34733 return disp_imm;
34734
34735 if (is_prefetch (insn))
34736 return disp_prefetch;
34737
34738 return disp_no_group;
34739 }
34740
34741 /* Count number of GROUP restricted instructions in a dispatch
34742 window WINDOW_LIST. */
34743
34744 static int
34745 count_num_restricted (rtx insn, dispatch_windows *window_list)
34746 {
34747 enum dispatch_group group = get_insn_group (insn);
34748 int imm_size;
34749 int num_imm_operand;
34750 int num_imm32_operand;
34751 int num_imm64_operand;
34752
34753 if (group == disp_no_group)
34754 return 0;
34755
34756 if (group == disp_imm)
34757 {
34758 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34759 &num_imm64_operand);
34760 if (window_list->imm_size + imm_size > MAX_IMM_SIZE
34761 || num_imm_operand + window_list->num_imm > MAX_IMM
34762 || (num_imm32_operand > 0
34763 && (window_list->num_imm_32 + num_imm32_operand > MAX_IMM_32
34764 || window_list->num_imm_64 * 2 + num_imm32_operand > MAX_IMM_32))
34765 || (num_imm64_operand > 0
34766 && (window_list->num_imm_64 + num_imm64_operand > MAX_IMM_64
34767 || window_list->num_imm_32 + num_imm64_operand * 2 > MAX_IMM_32))
34768 || (window_list->imm_size + imm_size == MAX_IMM_SIZE
34769 && num_imm64_operand > 0
34770 && ((window_list->num_imm_64 > 0
34771 && window_list->num_insn >= 2)
34772 || window_list->num_insn >= 3)))
34773 return BIG;
34774
34775 return 1;
34776 }
34777
34778 if ((group == disp_load_store
34779 && (window_list->num_loads >= MAX_LOAD
34780 || window_list->num_stores >= MAX_STORE))
34781 || ((group == disp_load
34782 || group == disp_prefetch)
34783 && window_list->num_loads >= MAX_LOAD)
34784 || (group == disp_store
34785 && window_list->num_stores >= MAX_STORE))
34786 return BIG;
34787
34788 return 1;
34789 }
34790
34791 /* This function returns true if insn satisfies dispatch rules on the
34792 last window scheduled. */
34793
34794 static bool
34795 fits_dispatch_window (rtx insn)
34796 {
34797 dispatch_windows *window_list = dispatch_window_list;
34798 dispatch_windows *window_list_next = dispatch_window_list->next;
34799 unsigned int num_restrict;
34800 enum dispatch_group group = get_insn_group (insn);
34801 enum insn_path path = get_insn_path (insn);
34802 int sum;
34803
34804 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
34805 instructions should be given the lowest priority in the
34806 scheduling process in Haifa scheduler to make sure they will be
34807 scheduled in the same dispatch window as the refrence to them. */
34808 if (group == disp_jcc || group == disp_cmp)
34809 return false;
34810
34811 /* Check nonrestricted. */
34812 if (group == disp_no_group || group == disp_branch)
34813 return true;
34814
34815 /* Get last dispatch window. */
34816 if (window_list_next)
34817 window_list = window_list_next;
34818
34819 if (window_list->window_num == 1)
34820 {
34821 sum = window_list->prev->window_size + window_list->window_size;
34822
34823 if (sum == 32
34824 || (min_insn_size (insn) + sum) >= 48)
34825 /* Window 1 is full. Go for next window. */
34826 return true;
34827 }
34828
34829 num_restrict = count_num_restricted (insn, window_list);
34830
34831 if (num_restrict > num_allowable_groups[group])
34832 return false;
34833
34834 /* See if it fits in the first window. */
34835 if (window_list->window_num == 0)
34836 {
34837 /* The first widow should have only single and double path
34838 uops. */
34839 if (path == path_double
34840 && (window_list->num_uops + 2) > MAX_INSN)
34841 return false;
34842 else if (path != path_single)
34843 return false;
34844 }
34845 return true;
34846 }
34847
34848 /* Add an instruction INSN with NUM_UOPS micro-operations to the
34849 dispatch window WINDOW_LIST. */
34850
34851 static void
34852 add_insn_window (rtx insn, dispatch_windows *window_list, int num_uops)
34853 {
34854 int byte_len = min_insn_size (insn);
34855 int num_insn = window_list->num_insn;
34856 int imm_size;
34857 sched_insn_info *window = window_list->window;
34858 enum dispatch_group group = get_insn_group (insn);
34859 enum insn_path path = get_insn_path (insn);
34860 int num_imm_operand;
34861 int num_imm32_operand;
34862 int num_imm64_operand;
34863
34864 if (!window_list->violation && group != disp_cmp
34865 && !fits_dispatch_window (insn))
34866 window_list->violation = true;
34867
34868 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34869 &num_imm64_operand);
34870
34871 /* Initialize window with new instruction. */
34872 window[num_insn].insn = insn;
34873 window[num_insn].byte_len = byte_len;
34874 window[num_insn].group = group;
34875 window[num_insn].path = path;
34876 window[num_insn].imm_bytes = imm_size;
34877
34878 window_list->window_size += byte_len;
34879 window_list->num_insn = num_insn + 1;
34880 window_list->num_uops = window_list->num_uops + num_uops;
34881 window_list->imm_size += imm_size;
34882 window_list->num_imm += num_imm_operand;
34883 window_list->num_imm_32 += num_imm32_operand;
34884 window_list->num_imm_64 += num_imm64_operand;
34885
34886 if (group == disp_store)
34887 window_list->num_stores += 1;
34888 else if (group == disp_load
34889 || group == disp_prefetch)
34890 window_list->num_loads += 1;
34891 else if (group == disp_load_store)
34892 {
34893 window_list->num_stores += 1;
34894 window_list->num_loads += 1;
34895 }
34896 }
34897
34898 /* Adds a scheduled instruction, INSN, to the current dispatch window.
34899 If the total bytes of instructions or the number of instructions in
34900 the window exceed allowable, it allocates a new window. */
34901
34902 static void
34903 add_to_dispatch_window (rtx insn)
34904 {
34905 int byte_len;
34906 dispatch_windows *window_list;
34907 dispatch_windows *next_list;
34908 dispatch_windows *window0_list;
34909 enum insn_path path;
34910 enum dispatch_group insn_group;
34911 bool insn_fits;
34912 int num_insn;
34913 int num_uops;
34914 int window_num;
34915 int insn_num_uops;
34916 int sum;
34917
34918 if (INSN_CODE (insn) < 0)
34919 return;
34920
34921 byte_len = min_insn_size (insn);
34922 window_list = dispatch_window_list;
34923 next_list = window_list->next;
34924 path = get_insn_path (insn);
34925 insn_group = get_insn_group (insn);
34926
34927 /* Get the last dispatch window. */
34928 if (next_list)
34929 window_list = dispatch_window_list->next;
34930
34931 if (path == path_single)
34932 insn_num_uops = 1;
34933 else if (path == path_double)
34934 insn_num_uops = 2;
34935 else
34936 insn_num_uops = (int) path;
34937
34938 /* If current window is full, get a new window.
34939 Window number zero is full, if MAX_INSN uops are scheduled in it.
34940 Window number one is full, if window zero's bytes plus window
34941 one's bytes is 32, or if the bytes of the new instruction added
34942 to the total makes it greater than 48, or it has already MAX_INSN
34943 instructions in it. */
34944 num_insn = window_list->num_insn;
34945 num_uops = window_list->num_uops;
34946 window_num = window_list->window_num;
34947 insn_fits = fits_dispatch_window (insn);
34948
34949 if (num_insn >= MAX_INSN
34950 || num_uops + insn_num_uops > MAX_INSN
34951 || !(insn_fits))
34952 {
34953 window_num = ~window_num & 1;
34954 window_list = allocate_next_window (window_num);
34955 }
34956
34957 if (window_num == 0)
34958 {
34959 add_insn_window (insn, window_list, insn_num_uops);
34960 if (window_list->num_insn >= MAX_INSN
34961 && insn_group == disp_branch)
34962 {
34963 process_end_window ();
34964 return;
34965 }
34966 }
34967 else if (window_num == 1)
34968 {
34969 window0_list = window_list->prev;
34970 sum = window0_list->window_size + window_list->window_size;
34971 if (sum == 32
34972 || (byte_len + sum) >= 48)
34973 {
34974 process_end_window ();
34975 window_list = dispatch_window_list;
34976 }
34977
34978 add_insn_window (insn, window_list, insn_num_uops);
34979 }
34980 else
34981 gcc_unreachable ();
34982
34983 if (is_end_basic_block (insn_group))
34984 {
34985 /* End of basic block is reached do end-basic-block process. */
34986 process_end_window ();
34987 return;
34988 }
34989 }
34990
34991 /* Print the dispatch window, WINDOW_NUM, to FILE. */
34992
34993 DEBUG_FUNCTION static void
34994 debug_dispatch_window_file (FILE *file, int window_num)
34995 {
34996 dispatch_windows *list;
34997 int i;
34998
34999 if (window_num == 0)
35000 list = dispatch_window_list;
35001 else
35002 list = dispatch_window_list1;
35003
35004 fprintf (file, "Window #%d:\n", list->window_num);
35005 fprintf (file, " num_insn = %d, num_uops = %d, window_size = %d\n",
35006 list->num_insn, list->num_uops, list->window_size);
35007 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
35008 list->num_imm, list->num_imm_32, list->num_imm_64, list->imm_size);
35009
35010 fprintf (file, " num_loads = %d, num_stores = %d\n", list->num_loads,
35011 list->num_stores);
35012 fprintf (file, " insn info:\n");
35013
35014 for (i = 0; i < MAX_INSN; i++)
35015 {
35016 if (!list->window[i].insn)
35017 break;
35018 fprintf (file, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
35019 i, group_name[list->window[i].group],
35020 i, (void *)list->window[i].insn,
35021 i, list->window[i].path,
35022 i, list->window[i].byte_len,
35023 i, list->window[i].imm_bytes);
35024 }
35025 }
35026
35027 /* Print to stdout a dispatch window. */
35028
35029 DEBUG_FUNCTION void
35030 debug_dispatch_window (int window_num)
35031 {
35032 debug_dispatch_window_file (stdout, window_num);
35033 }
35034
35035 /* Print INSN dispatch information to FILE. */
35036
35037 DEBUG_FUNCTION static void
35038 debug_insn_dispatch_info_file (FILE *file, rtx insn)
35039 {
35040 int byte_len;
35041 enum insn_path path;
35042 enum dispatch_group group;
35043 int imm_size;
35044 int num_imm_operand;
35045 int num_imm32_operand;
35046 int num_imm64_operand;
35047
35048 if (INSN_CODE (insn) < 0)
35049 return;
35050
35051 byte_len = min_insn_size (insn);
35052 path = get_insn_path (insn);
35053 group = get_insn_group (insn);
35054 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
35055 &num_imm64_operand);
35056
35057 fprintf (file, " insn info:\n");
35058 fprintf (file, " group = %s, path = %d, byte_len = %d\n",
35059 group_name[group], path, byte_len);
35060 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
35061 num_imm_operand, num_imm32_operand, num_imm64_operand, imm_size);
35062 }
35063
35064 /* Print to STDERR the status of the ready list with respect to
35065 dispatch windows. */
35066
35067 DEBUG_FUNCTION void
35068 debug_ready_dispatch (void)
35069 {
35070 int i;
35071 int no_ready = number_in_ready ();
35072
35073 fprintf (stdout, "Number of ready: %d\n", no_ready);
35074
35075 for (i = 0; i < no_ready; i++)
35076 debug_insn_dispatch_info_file (stdout, get_ready_element (i));
35077 }
35078
35079 /* This routine is the driver of the dispatch scheduler. */
35080
35081 static void
35082 do_dispatch (rtx insn, int mode)
35083 {
35084 if (mode == DISPATCH_INIT)
35085 init_dispatch_sched ();
35086 else if (mode == ADD_TO_DISPATCH_WINDOW)
35087 add_to_dispatch_window (insn);
35088 }
35089
35090 /* Return TRUE if Dispatch Scheduling is supported. */
35091
35092 static bool
35093 has_dispatch (rtx insn, int action)
35094 {
35095 if (ix86_tune == PROCESSOR_BDVER1 && flag_dispatch_scheduler)
35096 switch (action)
35097 {
35098 default:
35099 return false;
35100
35101 case IS_DISPATCH_ON:
35102 return true;
35103 break;
35104
35105 case IS_CMP:
35106 return is_cmp (insn);
35107
35108 case DISPATCH_VIOLATION:
35109 return dispatch_violation ();
35110
35111 case FITS_DISPATCH_WINDOW:
35112 return fits_dispatch_window (insn);
35113 }
35114
35115 return false;
35116 }
35117
35118 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
35119 place emms and femms instructions. */
35120
35121 static enum machine_mode
35122 ix86_preferred_simd_mode (enum machine_mode mode)
35123 {
35124 /* Disable double precision vectorizer if needed. */
35125 if (mode == DFmode && !TARGET_VECTORIZE_DOUBLE)
35126 return word_mode;
35127
35128 if (!TARGET_AVX && !TARGET_SSE)
35129 return word_mode;
35130
35131 switch (mode)
35132 {
35133 case SFmode:
35134 return (TARGET_AVX && !flag_prefer_avx128) ? V8SFmode : V4SFmode;
35135 case DFmode:
35136 return (TARGET_AVX && !flag_prefer_avx128) ? V4DFmode : V2DFmode;
35137 case DImode:
35138 return V2DImode;
35139 case SImode:
35140 return V4SImode;
35141 case HImode:
35142 return V8HImode;
35143 case QImode:
35144 return V16QImode;
35145
35146 default:;
35147 }
35148
35149 return word_mode;
35150 }
35151
35152 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
35153 vectors. */
35154
35155 static unsigned int
35156 ix86_autovectorize_vector_sizes (void)
35157 {
35158 return TARGET_AVX ? 32 | 16 : 0;
35159 }
35160
35161 /* Initialize the GCC target structure. */
35162 #undef TARGET_RETURN_IN_MEMORY
35163 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
35164
35165 #undef TARGET_LEGITIMIZE_ADDRESS
35166 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
35167
35168 #undef TARGET_ATTRIBUTE_TABLE
35169 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
35170 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35171 # undef TARGET_MERGE_DECL_ATTRIBUTES
35172 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
35173 #endif
35174
35175 #undef TARGET_COMP_TYPE_ATTRIBUTES
35176 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
35177
35178 #undef TARGET_INIT_BUILTINS
35179 #define TARGET_INIT_BUILTINS ix86_init_builtins
35180 #undef TARGET_BUILTIN_DECL
35181 #define TARGET_BUILTIN_DECL ix86_builtin_decl
35182 #undef TARGET_EXPAND_BUILTIN
35183 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
35184
35185 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
35186 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
35187 ix86_builtin_vectorized_function
35188
35189 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
35190 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
35191
35192 #undef TARGET_BUILTIN_RECIPROCAL
35193 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
35194
35195 #undef TARGET_ASM_FUNCTION_EPILOGUE
35196 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
35197
35198 #undef TARGET_ENCODE_SECTION_INFO
35199 #ifndef SUBTARGET_ENCODE_SECTION_INFO
35200 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
35201 #else
35202 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
35203 #endif
35204
35205 #undef TARGET_ASM_OPEN_PAREN
35206 #define TARGET_ASM_OPEN_PAREN ""
35207 #undef TARGET_ASM_CLOSE_PAREN
35208 #define TARGET_ASM_CLOSE_PAREN ""
35209
35210 #undef TARGET_ASM_BYTE_OP
35211 #define TARGET_ASM_BYTE_OP ASM_BYTE
35212
35213 #undef TARGET_ASM_ALIGNED_HI_OP
35214 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
35215 #undef TARGET_ASM_ALIGNED_SI_OP
35216 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
35217 #ifdef ASM_QUAD
35218 #undef TARGET_ASM_ALIGNED_DI_OP
35219 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
35220 #endif
35221
35222 #undef TARGET_PROFILE_BEFORE_PROLOGUE
35223 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
35224
35225 #undef TARGET_ASM_UNALIGNED_HI_OP
35226 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
35227 #undef TARGET_ASM_UNALIGNED_SI_OP
35228 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
35229 #undef TARGET_ASM_UNALIGNED_DI_OP
35230 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
35231
35232 #undef TARGET_PRINT_OPERAND
35233 #define TARGET_PRINT_OPERAND ix86_print_operand
35234 #undef TARGET_PRINT_OPERAND_ADDRESS
35235 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
35236 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
35237 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
35238 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
35239 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
35240
35241 #undef TARGET_SCHED_INIT_GLOBAL
35242 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
35243 #undef TARGET_SCHED_ADJUST_COST
35244 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
35245 #undef TARGET_SCHED_ISSUE_RATE
35246 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
35247 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
35248 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
35249 ia32_multipass_dfa_lookahead
35250
35251 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
35252 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
35253
35254 #ifdef HAVE_AS_TLS
35255 #undef TARGET_HAVE_TLS
35256 #define TARGET_HAVE_TLS true
35257 #endif
35258 #undef TARGET_CANNOT_FORCE_CONST_MEM
35259 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
35260 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
35261 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
35262
35263 #undef TARGET_DELEGITIMIZE_ADDRESS
35264 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
35265
35266 #undef TARGET_MS_BITFIELD_LAYOUT_P
35267 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
35268
35269 #if TARGET_MACHO
35270 #undef TARGET_BINDS_LOCAL_P
35271 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
35272 #endif
35273 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35274 #undef TARGET_BINDS_LOCAL_P
35275 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
35276 #endif
35277
35278 #undef TARGET_ASM_OUTPUT_MI_THUNK
35279 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
35280 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
35281 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
35282
35283 #undef TARGET_ASM_FILE_START
35284 #define TARGET_ASM_FILE_START x86_file_start
35285
35286 #undef TARGET_DEFAULT_TARGET_FLAGS
35287 #define TARGET_DEFAULT_TARGET_FLAGS \
35288 (TARGET_DEFAULT \
35289 | TARGET_SUBTARGET_DEFAULT \
35290 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
35291
35292 #undef TARGET_HANDLE_OPTION
35293 #define TARGET_HANDLE_OPTION ix86_handle_option
35294
35295 #undef TARGET_OPTION_OVERRIDE
35296 #define TARGET_OPTION_OVERRIDE ix86_option_override
35297 #undef TARGET_OPTION_OPTIMIZATION_TABLE
35298 #define TARGET_OPTION_OPTIMIZATION_TABLE ix86_option_optimization_table
35299 #undef TARGET_OPTION_INIT_STRUCT
35300 #define TARGET_OPTION_INIT_STRUCT ix86_option_init_struct
35301
35302 #undef TARGET_REGISTER_MOVE_COST
35303 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
35304 #undef TARGET_MEMORY_MOVE_COST
35305 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
35306 #undef TARGET_RTX_COSTS
35307 #define TARGET_RTX_COSTS ix86_rtx_costs
35308 #undef TARGET_ADDRESS_COST
35309 #define TARGET_ADDRESS_COST ix86_address_cost
35310
35311 #undef TARGET_FIXED_CONDITION_CODE_REGS
35312 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
35313 #undef TARGET_CC_MODES_COMPATIBLE
35314 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
35315
35316 #undef TARGET_MACHINE_DEPENDENT_REORG
35317 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
35318
35319 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
35320 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
35321
35322 #undef TARGET_BUILD_BUILTIN_VA_LIST
35323 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
35324
35325 #undef TARGET_ENUM_VA_LIST_P
35326 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
35327
35328 #undef TARGET_FN_ABI_VA_LIST
35329 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
35330
35331 #undef TARGET_CANONICAL_VA_LIST_TYPE
35332 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
35333
35334 #undef TARGET_EXPAND_BUILTIN_VA_START
35335 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
35336
35337 #undef TARGET_MD_ASM_CLOBBERS
35338 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
35339
35340 #undef TARGET_PROMOTE_PROTOTYPES
35341 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
35342 #undef TARGET_STRUCT_VALUE_RTX
35343 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
35344 #undef TARGET_SETUP_INCOMING_VARARGS
35345 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
35346 #undef TARGET_MUST_PASS_IN_STACK
35347 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
35348 #undef TARGET_FUNCTION_ARG_ADVANCE
35349 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
35350 #undef TARGET_FUNCTION_ARG
35351 #define TARGET_FUNCTION_ARG ix86_function_arg
35352 #undef TARGET_FUNCTION_ARG_BOUNDARY
35353 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
35354 #undef TARGET_PASS_BY_REFERENCE
35355 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
35356 #undef TARGET_INTERNAL_ARG_POINTER
35357 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
35358 #undef TARGET_UPDATE_STACK_BOUNDARY
35359 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
35360 #undef TARGET_GET_DRAP_RTX
35361 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
35362 #undef TARGET_STRICT_ARGUMENT_NAMING
35363 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
35364 #undef TARGET_STATIC_CHAIN
35365 #define TARGET_STATIC_CHAIN ix86_static_chain
35366 #undef TARGET_TRAMPOLINE_INIT
35367 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
35368 #undef TARGET_RETURN_POPS_ARGS
35369 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
35370
35371 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
35372 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
35373
35374 #undef TARGET_SCALAR_MODE_SUPPORTED_P
35375 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
35376
35377 #undef TARGET_VECTOR_MODE_SUPPORTED_P
35378 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
35379
35380 #undef TARGET_C_MODE_FOR_SUFFIX
35381 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
35382
35383 #ifdef HAVE_AS_TLS
35384 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
35385 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
35386 #endif
35387
35388 #ifdef SUBTARGET_INSERT_ATTRIBUTES
35389 #undef TARGET_INSERT_ATTRIBUTES
35390 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
35391 #endif
35392
35393 #undef TARGET_MANGLE_TYPE
35394 #define TARGET_MANGLE_TYPE ix86_mangle_type
35395
35396 #undef TARGET_STACK_PROTECT_FAIL
35397 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
35398
35399 #undef TARGET_SUPPORTS_SPLIT_STACK
35400 #define TARGET_SUPPORTS_SPLIT_STACK ix86_supports_split_stack
35401
35402 #undef TARGET_FUNCTION_VALUE
35403 #define TARGET_FUNCTION_VALUE ix86_function_value
35404
35405 #undef TARGET_FUNCTION_VALUE_REGNO_P
35406 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
35407
35408 #undef TARGET_SECONDARY_RELOAD
35409 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
35410
35411 #undef TARGET_PREFERRED_RELOAD_CLASS
35412 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
35413 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
35414 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
35415 #undef TARGET_CLASS_LIKELY_SPILLED_P
35416 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
35417
35418 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
35419 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
35420 ix86_builtin_vectorization_cost
35421 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
35422 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
35423 ix86_vectorize_builtin_vec_perm
35424 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
35425 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
35426 ix86_vectorize_builtin_vec_perm_ok
35427 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
35428 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
35429 ix86_preferred_simd_mode
35430 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
35431 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
35432 ix86_autovectorize_vector_sizes
35433
35434 #undef TARGET_SET_CURRENT_FUNCTION
35435 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
35436
35437 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
35438 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
35439
35440 #undef TARGET_OPTION_SAVE
35441 #define TARGET_OPTION_SAVE ix86_function_specific_save
35442
35443 #undef TARGET_OPTION_RESTORE
35444 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
35445
35446 #undef TARGET_OPTION_PRINT
35447 #define TARGET_OPTION_PRINT ix86_function_specific_print
35448
35449 #undef TARGET_CAN_INLINE_P
35450 #define TARGET_CAN_INLINE_P ix86_can_inline_p
35451
35452 #undef TARGET_EXPAND_TO_RTL_HOOK
35453 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
35454
35455 #undef TARGET_LEGITIMATE_ADDRESS_P
35456 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
35457
35458 #undef TARGET_LEGITIMATE_CONSTANT_P
35459 #define TARGET_LEGITIMATE_CONSTANT_P ix86_legitimate_constant_p
35460
35461 #undef TARGET_FRAME_POINTER_REQUIRED
35462 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
35463
35464 #undef TARGET_CAN_ELIMINATE
35465 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
35466
35467 #undef TARGET_EXTRA_LIVE_ON_ENTRY
35468 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
35469
35470 #undef TARGET_ASM_CODE_END
35471 #define TARGET_ASM_CODE_END ix86_code_end
35472
35473 #undef TARGET_CONDITIONAL_REGISTER_USAGE
35474 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
35475
35476 #if TARGET_MACHO
35477 #undef TARGET_INIT_LIBFUNCS
35478 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
35479 #endif
35480
35481 struct gcc_target targetm = TARGET_INITIALIZER;
35482 \f
35483 #include "gt-i386.h"