]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/i386/i386.c
i386-protos.h (ix86_print_operand): Declare.
[thirdparty/gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "toplev.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "langhooks.h"
48 #include "cgraph.h"
49 #include "gimple.h"
50 #include "dwarf2.h"
51 #include "df.h"
52 #include "tm-constrs.h"
53 #include "params.h"
54 #include "cselib.h"
55 #include "debug.h"
56 #include "dwarf2out.h"
57
58 static rtx legitimize_dllimport_symbol (rtx, bool);
59
60 #ifndef CHECK_STACK_LIMIT
61 #define CHECK_STACK_LIMIT (-1)
62 #endif
63
64 /* Return index of given mode in mult and division cost tables. */
65 #define MODE_INDEX(mode) \
66 ((mode) == QImode ? 0 \
67 : (mode) == HImode ? 1 \
68 : (mode) == SImode ? 2 \
69 : (mode) == DImode ? 3 \
70 : 4)
71
72 /* Processor costs (relative to an add) */
73 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
74 #define COSTS_N_BYTES(N) ((N) * 2)
75
76 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
77
78 const
79 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
80 COSTS_N_BYTES (2), /* cost of an add instruction */
81 COSTS_N_BYTES (3), /* cost of a lea instruction */
82 COSTS_N_BYTES (2), /* variable shift costs */
83 COSTS_N_BYTES (3), /* constant shift costs */
84 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
85 COSTS_N_BYTES (3), /* HI */
86 COSTS_N_BYTES (3), /* SI */
87 COSTS_N_BYTES (3), /* DI */
88 COSTS_N_BYTES (5)}, /* other */
89 0, /* cost of multiply per each bit set */
90 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
91 COSTS_N_BYTES (3), /* HI */
92 COSTS_N_BYTES (3), /* SI */
93 COSTS_N_BYTES (3), /* DI */
94 COSTS_N_BYTES (5)}, /* other */
95 COSTS_N_BYTES (3), /* cost of movsx */
96 COSTS_N_BYTES (3), /* cost of movzx */
97 0, /* "large" insn */
98 2, /* MOVE_RATIO */
99 2, /* cost for loading QImode using movzbl */
100 {2, 2, 2}, /* cost of loading integer registers
101 in QImode, HImode and SImode.
102 Relative to reg-reg move (2). */
103 {2, 2, 2}, /* cost of storing integer registers */
104 2, /* cost of reg,reg fld/fst */
105 {2, 2, 2}, /* cost of loading fp registers
106 in SFmode, DFmode and XFmode */
107 {2, 2, 2}, /* cost of storing fp registers
108 in SFmode, DFmode and XFmode */
109 3, /* cost of moving MMX register */
110 {3, 3}, /* cost of loading MMX registers
111 in SImode and DImode */
112 {3, 3}, /* cost of storing MMX registers
113 in SImode and DImode */
114 3, /* cost of moving SSE register */
115 {3, 3, 3}, /* cost of loading SSE registers
116 in SImode, DImode and TImode */
117 {3, 3, 3}, /* cost of storing SSE registers
118 in SImode, DImode and TImode */
119 3, /* MMX or SSE register to integer */
120 0, /* size of l1 cache */
121 0, /* size of l2 cache */
122 0, /* size of prefetch block */
123 0, /* number of parallel prefetches */
124 2, /* Branch cost */
125 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
126 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
127 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
128 COSTS_N_BYTES (2), /* cost of FABS instruction. */
129 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
130 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
131 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
132 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
133 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
134 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
135 1, /* scalar_stmt_cost. */
136 1, /* scalar load_cost. */
137 1, /* scalar_store_cost. */
138 1, /* vec_stmt_cost. */
139 1, /* vec_to_scalar_cost. */
140 1, /* scalar_to_vec_cost. */
141 1, /* vec_align_load_cost. */
142 1, /* vec_unalign_load_cost. */
143 1, /* vec_store_cost. */
144 1, /* cond_taken_branch_cost. */
145 1, /* cond_not_taken_branch_cost. */
146 };
147
148 /* Processor costs (relative to an add) */
149 static const
150 struct processor_costs i386_cost = { /* 386 specific costs */
151 COSTS_N_INSNS (1), /* cost of an add instruction */
152 COSTS_N_INSNS (1), /* cost of a lea instruction */
153 COSTS_N_INSNS (3), /* variable shift costs */
154 COSTS_N_INSNS (2), /* constant shift costs */
155 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
156 COSTS_N_INSNS (6), /* HI */
157 COSTS_N_INSNS (6), /* SI */
158 COSTS_N_INSNS (6), /* DI */
159 COSTS_N_INSNS (6)}, /* other */
160 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
161 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
162 COSTS_N_INSNS (23), /* HI */
163 COSTS_N_INSNS (23), /* SI */
164 COSTS_N_INSNS (23), /* DI */
165 COSTS_N_INSNS (23)}, /* other */
166 COSTS_N_INSNS (3), /* cost of movsx */
167 COSTS_N_INSNS (2), /* cost of movzx */
168 15, /* "large" insn */
169 3, /* MOVE_RATIO */
170 4, /* cost for loading QImode using movzbl */
171 {2, 4, 2}, /* cost of loading integer registers
172 in QImode, HImode and SImode.
173 Relative to reg-reg move (2). */
174 {2, 4, 2}, /* cost of storing integer registers */
175 2, /* cost of reg,reg fld/fst */
176 {8, 8, 8}, /* cost of loading fp registers
177 in SFmode, DFmode and XFmode */
178 {8, 8, 8}, /* cost of storing fp registers
179 in SFmode, DFmode and XFmode */
180 2, /* cost of moving MMX register */
181 {4, 8}, /* cost of loading MMX registers
182 in SImode and DImode */
183 {4, 8}, /* cost of storing MMX registers
184 in SImode and DImode */
185 2, /* cost of moving SSE register */
186 {4, 8, 16}, /* cost of loading SSE registers
187 in SImode, DImode and TImode */
188 {4, 8, 16}, /* cost of storing SSE registers
189 in SImode, DImode and TImode */
190 3, /* MMX or SSE register to integer */
191 0, /* size of l1 cache */
192 0, /* size of l2 cache */
193 0, /* size of prefetch block */
194 0, /* number of parallel prefetches */
195 1, /* Branch cost */
196 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
197 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
198 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
199 COSTS_N_INSNS (22), /* cost of FABS instruction. */
200 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
201 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
202 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
203 DUMMY_STRINGOP_ALGS},
204 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
205 DUMMY_STRINGOP_ALGS},
206 1, /* scalar_stmt_cost. */
207 1, /* scalar load_cost. */
208 1, /* scalar_store_cost. */
209 1, /* vec_stmt_cost. */
210 1, /* vec_to_scalar_cost. */
211 1, /* scalar_to_vec_cost. */
212 1, /* vec_align_load_cost. */
213 2, /* vec_unalign_load_cost. */
214 1, /* vec_store_cost. */
215 3, /* cond_taken_branch_cost. */
216 1, /* cond_not_taken_branch_cost. */
217 };
218
219 static const
220 struct processor_costs i486_cost = { /* 486 specific costs */
221 COSTS_N_INSNS (1), /* cost of an add instruction */
222 COSTS_N_INSNS (1), /* cost of a lea instruction */
223 COSTS_N_INSNS (3), /* variable shift costs */
224 COSTS_N_INSNS (2), /* constant shift costs */
225 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
226 COSTS_N_INSNS (12), /* HI */
227 COSTS_N_INSNS (12), /* SI */
228 COSTS_N_INSNS (12), /* DI */
229 COSTS_N_INSNS (12)}, /* other */
230 1, /* cost of multiply per each bit set */
231 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
232 COSTS_N_INSNS (40), /* HI */
233 COSTS_N_INSNS (40), /* SI */
234 COSTS_N_INSNS (40), /* DI */
235 COSTS_N_INSNS (40)}, /* other */
236 COSTS_N_INSNS (3), /* cost of movsx */
237 COSTS_N_INSNS (2), /* cost of movzx */
238 15, /* "large" insn */
239 3, /* MOVE_RATIO */
240 4, /* cost for loading QImode using movzbl */
241 {2, 4, 2}, /* cost of loading integer registers
242 in QImode, HImode and SImode.
243 Relative to reg-reg move (2). */
244 {2, 4, 2}, /* cost of storing integer registers */
245 2, /* cost of reg,reg fld/fst */
246 {8, 8, 8}, /* cost of loading fp registers
247 in SFmode, DFmode and XFmode */
248 {8, 8, 8}, /* cost of storing fp registers
249 in SFmode, DFmode and XFmode */
250 2, /* cost of moving MMX register */
251 {4, 8}, /* cost of loading MMX registers
252 in SImode and DImode */
253 {4, 8}, /* cost of storing MMX registers
254 in SImode and DImode */
255 2, /* cost of moving SSE register */
256 {4, 8, 16}, /* cost of loading SSE registers
257 in SImode, DImode and TImode */
258 {4, 8, 16}, /* cost of storing SSE registers
259 in SImode, DImode and TImode */
260 3, /* MMX or SSE register to integer */
261 4, /* size of l1 cache. 486 has 8kB cache
262 shared for code and data, so 4kB is
263 not really precise. */
264 4, /* size of l2 cache */
265 0, /* size of prefetch block */
266 0, /* number of parallel prefetches */
267 1, /* Branch cost */
268 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
269 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
270 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
271 COSTS_N_INSNS (3), /* cost of FABS instruction. */
272 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
273 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
274 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
275 DUMMY_STRINGOP_ALGS},
276 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
277 DUMMY_STRINGOP_ALGS},
278 1, /* scalar_stmt_cost. */
279 1, /* scalar load_cost. */
280 1, /* scalar_store_cost. */
281 1, /* vec_stmt_cost. */
282 1, /* vec_to_scalar_cost. */
283 1, /* scalar_to_vec_cost. */
284 1, /* vec_align_load_cost. */
285 2, /* vec_unalign_load_cost. */
286 1, /* vec_store_cost. */
287 3, /* cond_taken_branch_cost. */
288 1, /* cond_not_taken_branch_cost. */
289 };
290
291 static const
292 struct processor_costs pentium_cost = {
293 COSTS_N_INSNS (1), /* cost of an add instruction */
294 COSTS_N_INSNS (1), /* cost of a lea instruction */
295 COSTS_N_INSNS (4), /* variable shift costs */
296 COSTS_N_INSNS (1), /* constant shift costs */
297 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
298 COSTS_N_INSNS (11), /* HI */
299 COSTS_N_INSNS (11), /* SI */
300 COSTS_N_INSNS (11), /* DI */
301 COSTS_N_INSNS (11)}, /* other */
302 0, /* cost of multiply per each bit set */
303 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
304 COSTS_N_INSNS (25), /* HI */
305 COSTS_N_INSNS (25), /* SI */
306 COSTS_N_INSNS (25), /* DI */
307 COSTS_N_INSNS (25)}, /* other */
308 COSTS_N_INSNS (3), /* cost of movsx */
309 COSTS_N_INSNS (2), /* cost of movzx */
310 8, /* "large" insn */
311 6, /* MOVE_RATIO */
312 6, /* cost for loading QImode using movzbl */
313 {2, 4, 2}, /* cost of loading integer registers
314 in QImode, HImode and SImode.
315 Relative to reg-reg move (2). */
316 {2, 4, 2}, /* cost of storing integer registers */
317 2, /* cost of reg,reg fld/fst */
318 {2, 2, 6}, /* cost of loading fp registers
319 in SFmode, DFmode and XFmode */
320 {4, 4, 6}, /* cost of storing fp registers
321 in SFmode, DFmode and XFmode */
322 8, /* cost of moving MMX register */
323 {8, 8}, /* cost of loading MMX registers
324 in SImode and DImode */
325 {8, 8}, /* cost of storing MMX registers
326 in SImode and DImode */
327 2, /* cost of moving SSE register */
328 {4, 8, 16}, /* cost of loading SSE registers
329 in SImode, DImode and TImode */
330 {4, 8, 16}, /* cost of storing SSE registers
331 in SImode, DImode and TImode */
332 3, /* MMX or SSE register to integer */
333 8, /* size of l1 cache. */
334 8, /* size of l2 cache */
335 0, /* size of prefetch block */
336 0, /* number of parallel prefetches */
337 2, /* Branch cost */
338 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
339 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
340 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
341 COSTS_N_INSNS (1), /* cost of FABS instruction. */
342 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
343 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
344 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
345 DUMMY_STRINGOP_ALGS},
346 {{libcall, {{-1, rep_prefix_4_byte}}},
347 DUMMY_STRINGOP_ALGS},
348 1, /* scalar_stmt_cost. */
349 1, /* scalar load_cost. */
350 1, /* scalar_store_cost. */
351 1, /* vec_stmt_cost. */
352 1, /* vec_to_scalar_cost. */
353 1, /* scalar_to_vec_cost. */
354 1, /* vec_align_load_cost. */
355 2, /* vec_unalign_load_cost. */
356 1, /* vec_store_cost. */
357 3, /* cond_taken_branch_cost. */
358 1, /* cond_not_taken_branch_cost. */
359 };
360
361 static const
362 struct processor_costs pentiumpro_cost = {
363 COSTS_N_INSNS (1), /* cost of an add instruction */
364 COSTS_N_INSNS (1), /* cost of a lea instruction */
365 COSTS_N_INSNS (1), /* variable shift costs */
366 COSTS_N_INSNS (1), /* constant shift costs */
367 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
368 COSTS_N_INSNS (4), /* HI */
369 COSTS_N_INSNS (4), /* SI */
370 COSTS_N_INSNS (4), /* DI */
371 COSTS_N_INSNS (4)}, /* other */
372 0, /* cost of multiply per each bit set */
373 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
374 COSTS_N_INSNS (17), /* HI */
375 COSTS_N_INSNS (17), /* SI */
376 COSTS_N_INSNS (17), /* DI */
377 COSTS_N_INSNS (17)}, /* other */
378 COSTS_N_INSNS (1), /* cost of movsx */
379 COSTS_N_INSNS (1), /* cost of movzx */
380 8, /* "large" insn */
381 6, /* MOVE_RATIO */
382 2, /* cost for loading QImode using movzbl */
383 {4, 4, 4}, /* cost of loading integer registers
384 in QImode, HImode and SImode.
385 Relative to reg-reg move (2). */
386 {2, 2, 2}, /* cost of storing integer registers */
387 2, /* cost of reg,reg fld/fst */
388 {2, 2, 6}, /* cost of loading fp registers
389 in SFmode, DFmode and XFmode */
390 {4, 4, 6}, /* cost of storing fp registers
391 in SFmode, DFmode and XFmode */
392 2, /* cost of moving MMX register */
393 {2, 2}, /* cost of loading MMX registers
394 in SImode and DImode */
395 {2, 2}, /* cost of storing MMX registers
396 in SImode and DImode */
397 2, /* cost of moving SSE register */
398 {2, 2, 8}, /* cost of loading SSE registers
399 in SImode, DImode and TImode */
400 {2, 2, 8}, /* cost of storing SSE registers
401 in SImode, DImode and TImode */
402 3, /* MMX or SSE register to integer */
403 8, /* size of l1 cache. */
404 256, /* size of l2 cache */
405 32, /* size of prefetch block */
406 6, /* number of parallel prefetches */
407 2, /* Branch cost */
408 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
409 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
410 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
411 COSTS_N_INSNS (2), /* cost of FABS instruction. */
412 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
413 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
414 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
415 the alignment). For small blocks inline loop is still a noticeable win, for bigger
416 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
417 more expensive startup time in CPU, but after 4K the difference is down in the noise.
418 */
419 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
420 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
421 DUMMY_STRINGOP_ALGS},
422 {{rep_prefix_4_byte, {{1024, unrolled_loop},
423 {8192, rep_prefix_4_byte}, {-1, libcall}}},
424 DUMMY_STRINGOP_ALGS},
425 1, /* scalar_stmt_cost. */
426 1, /* scalar load_cost. */
427 1, /* scalar_store_cost. */
428 1, /* vec_stmt_cost. */
429 1, /* vec_to_scalar_cost. */
430 1, /* scalar_to_vec_cost. */
431 1, /* vec_align_load_cost. */
432 2, /* vec_unalign_load_cost. */
433 1, /* vec_store_cost. */
434 3, /* cond_taken_branch_cost. */
435 1, /* cond_not_taken_branch_cost. */
436 };
437
438 static const
439 struct processor_costs geode_cost = {
440 COSTS_N_INSNS (1), /* cost of an add instruction */
441 COSTS_N_INSNS (1), /* cost of a lea instruction */
442 COSTS_N_INSNS (2), /* variable shift costs */
443 COSTS_N_INSNS (1), /* constant shift costs */
444 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
445 COSTS_N_INSNS (4), /* HI */
446 COSTS_N_INSNS (7), /* SI */
447 COSTS_N_INSNS (7), /* DI */
448 COSTS_N_INSNS (7)}, /* other */
449 0, /* cost of multiply per each bit set */
450 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
451 COSTS_N_INSNS (23), /* HI */
452 COSTS_N_INSNS (39), /* SI */
453 COSTS_N_INSNS (39), /* DI */
454 COSTS_N_INSNS (39)}, /* other */
455 COSTS_N_INSNS (1), /* cost of movsx */
456 COSTS_N_INSNS (1), /* cost of movzx */
457 8, /* "large" insn */
458 4, /* MOVE_RATIO */
459 1, /* cost for loading QImode using movzbl */
460 {1, 1, 1}, /* cost of loading integer registers
461 in QImode, HImode and SImode.
462 Relative to reg-reg move (2). */
463 {1, 1, 1}, /* cost of storing integer registers */
464 1, /* cost of reg,reg fld/fst */
465 {1, 1, 1}, /* cost of loading fp registers
466 in SFmode, DFmode and XFmode */
467 {4, 6, 6}, /* cost of storing fp registers
468 in SFmode, DFmode and XFmode */
469
470 1, /* cost of moving MMX register */
471 {1, 1}, /* cost of loading MMX registers
472 in SImode and DImode */
473 {1, 1}, /* cost of storing MMX registers
474 in SImode and DImode */
475 1, /* cost of moving SSE register */
476 {1, 1, 1}, /* cost of loading SSE registers
477 in SImode, DImode and TImode */
478 {1, 1, 1}, /* cost of storing SSE registers
479 in SImode, DImode and TImode */
480 1, /* MMX or SSE register to integer */
481 64, /* size of l1 cache. */
482 128, /* size of l2 cache. */
483 32, /* size of prefetch block */
484 1, /* number of parallel prefetches */
485 1, /* Branch cost */
486 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
487 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
488 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
489 COSTS_N_INSNS (1), /* cost of FABS instruction. */
490 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
491 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
492 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
493 DUMMY_STRINGOP_ALGS},
494 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
495 DUMMY_STRINGOP_ALGS},
496 1, /* scalar_stmt_cost. */
497 1, /* scalar load_cost. */
498 1, /* scalar_store_cost. */
499 1, /* vec_stmt_cost. */
500 1, /* vec_to_scalar_cost. */
501 1, /* scalar_to_vec_cost. */
502 1, /* vec_align_load_cost. */
503 2, /* vec_unalign_load_cost. */
504 1, /* vec_store_cost. */
505 3, /* cond_taken_branch_cost. */
506 1, /* cond_not_taken_branch_cost. */
507 };
508
509 static const
510 struct processor_costs k6_cost = {
511 COSTS_N_INSNS (1), /* cost of an add instruction */
512 COSTS_N_INSNS (2), /* cost of a lea instruction */
513 COSTS_N_INSNS (1), /* variable shift costs */
514 COSTS_N_INSNS (1), /* constant shift costs */
515 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
516 COSTS_N_INSNS (3), /* HI */
517 COSTS_N_INSNS (3), /* SI */
518 COSTS_N_INSNS (3), /* DI */
519 COSTS_N_INSNS (3)}, /* other */
520 0, /* cost of multiply per each bit set */
521 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
522 COSTS_N_INSNS (18), /* HI */
523 COSTS_N_INSNS (18), /* SI */
524 COSTS_N_INSNS (18), /* DI */
525 COSTS_N_INSNS (18)}, /* other */
526 COSTS_N_INSNS (2), /* cost of movsx */
527 COSTS_N_INSNS (2), /* cost of movzx */
528 8, /* "large" insn */
529 4, /* MOVE_RATIO */
530 3, /* cost for loading QImode using movzbl */
531 {4, 5, 4}, /* cost of loading integer registers
532 in QImode, HImode and SImode.
533 Relative to reg-reg move (2). */
534 {2, 3, 2}, /* cost of storing integer registers */
535 4, /* cost of reg,reg fld/fst */
536 {6, 6, 6}, /* cost of loading fp registers
537 in SFmode, DFmode and XFmode */
538 {4, 4, 4}, /* cost of storing fp registers
539 in SFmode, DFmode and XFmode */
540 2, /* cost of moving MMX register */
541 {2, 2}, /* cost of loading MMX registers
542 in SImode and DImode */
543 {2, 2}, /* cost of storing MMX registers
544 in SImode and DImode */
545 2, /* cost of moving SSE register */
546 {2, 2, 8}, /* cost of loading SSE registers
547 in SImode, DImode and TImode */
548 {2, 2, 8}, /* cost of storing SSE registers
549 in SImode, DImode and TImode */
550 6, /* MMX or SSE register to integer */
551 32, /* size of l1 cache. */
552 32, /* size of l2 cache. Some models
553 have integrated l2 cache, but
554 optimizing for k6 is not important
555 enough to worry about that. */
556 32, /* size of prefetch block */
557 1, /* number of parallel prefetches */
558 1, /* Branch cost */
559 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
560 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
561 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
562 COSTS_N_INSNS (2), /* cost of FABS instruction. */
563 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
564 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
565 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
566 DUMMY_STRINGOP_ALGS},
567 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
568 DUMMY_STRINGOP_ALGS},
569 1, /* scalar_stmt_cost. */
570 1, /* scalar load_cost. */
571 1, /* scalar_store_cost. */
572 1, /* vec_stmt_cost. */
573 1, /* vec_to_scalar_cost. */
574 1, /* scalar_to_vec_cost. */
575 1, /* vec_align_load_cost. */
576 2, /* vec_unalign_load_cost. */
577 1, /* vec_store_cost. */
578 3, /* cond_taken_branch_cost. */
579 1, /* cond_not_taken_branch_cost. */
580 };
581
582 static const
583 struct processor_costs athlon_cost = {
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (2), /* cost of a lea instruction */
586 COSTS_N_INSNS (1), /* variable shift costs */
587 COSTS_N_INSNS (1), /* constant shift costs */
588 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (5), /* HI */
590 COSTS_N_INSNS (5), /* SI */
591 COSTS_N_INSNS (5), /* DI */
592 COSTS_N_INSNS (5)}, /* other */
593 0, /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (26), /* HI */
596 COSTS_N_INSNS (42), /* SI */
597 COSTS_N_INSNS (74), /* DI */
598 COSTS_N_INSNS (74)}, /* other */
599 COSTS_N_INSNS (1), /* cost of movsx */
600 COSTS_N_INSNS (1), /* cost of movzx */
601 8, /* "large" insn */
602 9, /* MOVE_RATIO */
603 4, /* cost for loading QImode using movzbl */
604 {3, 4, 3}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {3, 4, 3}, /* cost of storing integer registers */
608 4, /* cost of reg,reg fld/fst */
609 {4, 4, 12}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {6, 6, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 4}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 4}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 4, 6}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 4, 5}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 5, /* MMX or SSE register to integer */
624 64, /* size of l1 cache. */
625 256, /* size of l2 cache. */
626 64, /* size of prefetch block */
627 6, /* number of parallel prefetches */
628 5, /* Branch cost */
629 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (2), /* cost of FABS instruction. */
633 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
635 /* For some reason, Athlon deals better with REP prefix (relative to loops)
636 compared to K8. Alignment becomes important after 8 bytes for memcpy and
637 128 bytes for memset. */
638 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
639 DUMMY_STRINGOP_ALGS},
640 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
641 DUMMY_STRINGOP_ALGS},
642 1, /* scalar_stmt_cost. */
643 1, /* scalar load_cost. */
644 1, /* scalar_store_cost. */
645 1, /* vec_stmt_cost. */
646 1, /* vec_to_scalar_cost. */
647 1, /* scalar_to_vec_cost. */
648 1, /* vec_align_load_cost. */
649 2, /* vec_unalign_load_cost. */
650 1, /* vec_store_cost. */
651 3, /* cond_taken_branch_cost. */
652 1, /* cond_not_taken_branch_cost. */
653 };
654
655 static const
656 struct processor_costs k8_cost = {
657 COSTS_N_INSNS (1), /* cost of an add instruction */
658 COSTS_N_INSNS (2), /* cost of a lea instruction */
659 COSTS_N_INSNS (1), /* variable shift costs */
660 COSTS_N_INSNS (1), /* constant shift costs */
661 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
662 COSTS_N_INSNS (4), /* HI */
663 COSTS_N_INSNS (3), /* SI */
664 COSTS_N_INSNS (4), /* DI */
665 COSTS_N_INSNS (5)}, /* other */
666 0, /* cost of multiply per each bit set */
667 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
668 COSTS_N_INSNS (26), /* HI */
669 COSTS_N_INSNS (42), /* SI */
670 COSTS_N_INSNS (74), /* DI */
671 COSTS_N_INSNS (74)}, /* other */
672 COSTS_N_INSNS (1), /* cost of movsx */
673 COSTS_N_INSNS (1), /* cost of movzx */
674 8, /* "large" insn */
675 9, /* MOVE_RATIO */
676 4, /* cost for loading QImode using movzbl */
677 {3, 4, 3}, /* cost of loading integer registers
678 in QImode, HImode and SImode.
679 Relative to reg-reg move (2). */
680 {3, 4, 3}, /* cost of storing integer registers */
681 4, /* cost of reg,reg fld/fst */
682 {4, 4, 12}, /* cost of loading fp registers
683 in SFmode, DFmode and XFmode */
684 {6, 6, 8}, /* cost of storing fp registers
685 in SFmode, DFmode and XFmode */
686 2, /* cost of moving MMX register */
687 {3, 3}, /* cost of loading MMX registers
688 in SImode and DImode */
689 {4, 4}, /* cost of storing MMX registers
690 in SImode and DImode */
691 2, /* cost of moving SSE register */
692 {4, 3, 6}, /* cost of loading SSE registers
693 in SImode, DImode and TImode */
694 {4, 4, 5}, /* cost of storing SSE registers
695 in SImode, DImode and TImode */
696 5, /* MMX or SSE register to integer */
697 64, /* size of l1 cache. */
698 512, /* size of l2 cache. */
699 64, /* size of prefetch block */
700 /* New AMD processors never drop prefetches; if they cannot be performed
701 immediately, they are queued. We set number of simultaneous prefetches
702 to a large constant to reflect this (it probably is not a good idea not
703 to limit number of prefetches at all, as their execution also takes some
704 time). */
705 100, /* number of parallel prefetches */
706 3, /* Branch cost */
707 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
708 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
709 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
710 COSTS_N_INSNS (2), /* cost of FABS instruction. */
711 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
712 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
713 /* K8 has optimized REP instruction for medium sized blocks, but for very small
714 blocks it is better to use loop. For large blocks, libcall can do
715 nontemporary accesses and beat inline considerably. */
716 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
718 {{libcall, {{8, loop}, {24, unrolled_loop},
719 {2048, rep_prefix_4_byte}, {-1, libcall}}},
720 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
721 4, /* scalar_stmt_cost. */
722 2, /* scalar load_cost. */
723 2, /* scalar_store_cost. */
724 5, /* vec_stmt_cost. */
725 0, /* vec_to_scalar_cost. */
726 2, /* scalar_to_vec_cost. */
727 2, /* vec_align_load_cost. */
728 3, /* vec_unalign_load_cost. */
729 3, /* vec_store_cost. */
730 3, /* cond_taken_branch_cost. */
731 2, /* cond_not_taken_branch_cost. */
732 };
733
734 struct processor_costs amdfam10_cost = {
735 COSTS_N_INSNS (1), /* cost of an add instruction */
736 COSTS_N_INSNS (2), /* cost of a lea instruction */
737 COSTS_N_INSNS (1), /* variable shift costs */
738 COSTS_N_INSNS (1), /* constant shift costs */
739 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
740 COSTS_N_INSNS (4), /* HI */
741 COSTS_N_INSNS (3), /* SI */
742 COSTS_N_INSNS (4), /* DI */
743 COSTS_N_INSNS (5)}, /* other */
744 0, /* cost of multiply per each bit set */
745 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
746 COSTS_N_INSNS (35), /* HI */
747 COSTS_N_INSNS (51), /* SI */
748 COSTS_N_INSNS (83), /* DI */
749 COSTS_N_INSNS (83)}, /* other */
750 COSTS_N_INSNS (1), /* cost of movsx */
751 COSTS_N_INSNS (1), /* cost of movzx */
752 8, /* "large" insn */
753 9, /* MOVE_RATIO */
754 4, /* cost for loading QImode using movzbl */
755 {3, 4, 3}, /* cost of loading integer registers
756 in QImode, HImode and SImode.
757 Relative to reg-reg move (2). */
758 {3, 4, 3}, /* cost of storing integer registers */
759 4, /* cost of reg,reg fld/fst */
760 {4, 4, 12}, /* cost of loading fp registers
761 in SFmode, DFmode and XFmode */
762 {6, 6, 8}, /* cost of storing fp registers
763 in SFmode, DFmode and XFmode */
764 2, /* cost of moving MMX register */
765 {3, 3}, /* cost of loading MMX registers
766 in SImode and DImode */
767 {4, 4}, /* cost of storing MMX registers
768 in SImode and DImode */
769 2, /* cost of moving SSE register */
770 {4, 4, 3}, /* cost of loading SSE registers
771 in SImode, DImode and TImode */
772 {4, 4, 5}, /* cost of storing SSE registers
773 in SImode, DImode and TImode */
774 3, /* MMX or SSE register to integer */
775 /* On K8
776 MOVD reg64, xmmreg Double FSTORE 4
777 MOVD reg32, xmmreg Double FSTORE 4
778 On AMDFAM10
779 MOVD reg64, xmmreg Double FADD 3
780 1/1 1/1
781 MOVD reg32, xmmreg Double FADD 3
782 1/1 1/1 */
783 64, /* size of l1 cache. */
784 512, /* size of l2 cache. */
785 64, /* size of prefetch block */
786 /* New AMD processors never drop prefetches; if they cannot be performed
787 immediately, they are queued. We set number of simultaneous prefetches
788 to a large constant to reflect this (it probably is not a good idea not
789 to limit number of prefetches at all, as their execution also takes some
790 time). */
791 100, /* number of parallel prefetches */
792 2, /* Branch cost */
793 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
794 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
795 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
796 COSTS_N_INSNS (2), /* cost of FABS instruction. */
797 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
798 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
799
800 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
801 very small blocks it is better to use loop. For large blocks, libcall can
802 do nontemporary accesses and beat inline considerably. */
803 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
804 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
805 {{libcall, {{8, loop}, {24, unrolled_loop},
806 {2048, rep_prefix_4_byte}, {-1, libcall}}},
807 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
808 4, /* scalar_stmt_cost. */
809 2, /* scalar load_cost. */
810 2, /* scalar_store_cost. */
811 6, /* vec_stmt_cost. */
812 0, /* vec_to_scalar_cost. */
813 2, /* scalar_to_vec_cost. */
814 2, /* vec_align_load_cost. */
815 2, /* vec_unalign_load_cost. */
816 2, /* vec_store_cost. */
817 2, /* cond_taken_branch_cost. */
818 1, /* cond_not_taken_branch_cost. */
819 };
820
821 struct processor_costs bdver1_cost = {
822 COSTS_N_INSNS (1), /* cost of an add instruction */
823 COSTS_N_INSNS (2), /* cost of a lea instruction */
824 COSTS_N_INSNS (1), /* variable shift costs */
825 COSTS_N_INSNS (1), /* constant shift costs */
826 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
827 COSTS_N_INSNS (4), /* HI */
828 COSTS_N_INSNS (3), /* SI */
829 COSTS_N_INSNS (4), /* DI */
830 COSTS_N_INSNS (5)}, /* other */
831 0, /* cost of multiply per each bit set */
832 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
833 COSTS_N_INSNS (35), /* HI */
834 COSTS_N_INSNS (51), /* SI */
835 COSTS_N_INSNS (83), /* DI */
836 COSTS_N_INSNS (83)}, /* other */
837 COSTS_N_INSNS (1), /* cost of movsx */
838 COSTS_N_INSNS (1), /* cost of movzx */
839 8, /* "large" insn */
840 9, /* MOVE_RATIO */
841 4, /* cost for loading QImode using movzbl */
842 {3, 4, 3}, /* cost of loading integer registers
843 in QImode, HImode and SImode.
844 Relative to reg-reg move (2). */
845 {3, 4, 3}, /* cost of storing integer registers */
846 4, /* cost of reg,reg fld/fst */
847 {4, 4, 12}, /* cost of loading fp registers
848 in SFmode, DFmode and XFmode */
849 {6, 6, 8}, /* cost of storing fp registers
850 in SFmode, DFmode and XFmode */
851 2, /* cost of moving MMX register */
852 {3, 3}, /* cost of loading MMX registers
853 in SImode and DImode */
854 {4, 4}, /* cost of storing MMX registers
855 in SImode and DImode */
856 2, /* cost of moving SSE register */
857 {4, 4, 3}, /* cost of loading SSE registers
858 in SImode, DImode and TImode */
859 {4, 4, 5}, /* cost of storing SSE registers
860 in SImode, DImode and TImode */
861 3, /* MMX or SSE register to integer */
862 /* On K8
863 MOVD reg64, xmmreg Double FSTORE 4
864 MOVD reg32, xmmreg Double FSTORE 4
865 On AMDFAM10
866 MOVD reg64, xmmreg Double FADD 3
867 1/1 1/1
868 MOVD reg32, xmmreg Double FADD 3
869 1/1 1/1 */
870 64, /* size of l1 cache. */
871 1024, /* size of l2 cache. */
872 64, /* size of prefetch block */
873 /* New AMD processors never drop prefetches; if they cannot be performed
874 immediately, they are queued. We set number of simultaneous prefetches
875 to a large constant to reflect this (it probably is not a good idea not
876 to limit number of prefetches at all, as their execution also takes some
877 time). */
878 100, /* number of parallel prefetches */
879 2, /* Branch cost */
880 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
881 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
882 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
883 COSTS_N_INSNS (2), /* cost of FABS instruction. */
884 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
885 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
886
887 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
888 very small blocks it is better to use loop. For large blocks, libcall can
889 do nontemporary accesses and beat inline considerably. */
890 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
891 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
892 {{libcall, {{8, loop}, {24, unrolled_loop},
893 {2048, rep_prefix_4_byte}, {-1, libcall}}},
894 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
895 4, /* scalar_stmt_cost. */
896 2, /* scalar load_cost. */
897 2, /* scalar_store_cost. */
898 6, /* vec_stmt_cost. */
899 0, /* vec_to_scalar_cost. */
900 2, /* scalar_to_vec_cost. */
901 2, /* vec_align_load_cost. */
902 2, /* vec_unalign_load_cost. */
903 2, /* vec_store_cost. */
904 2, /* cond_taken_branch_cost. */
905 1, /* cond_not_taken_branch_cost. */
906 };
907
908 static const
909 struct processor_costs pentium4_cost = {
910 COSTS_N_INSNS (1), /* cost of an add instruction */
911 COSTS_N_INSNS (3), /* cost of a lea instruction */
912 COSTS_N_INSNS (4), /* variable shift costs */
913 COSTS_N_INSNS (4), /* constant shift costs */
914 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
915 COSTS_N_INSNS (15), /* HI */
916 COSTS_N_INSNS (15), /* SI */
917 COSTS_N_INSNS (15), /* DI */
918 COSTS_N_INSNS (15)}, /* other */
919 0, /* cost of multiply per each bit set */
920 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
921 COSTS_N_INSNS (56), /* HI */
922 COSTS_N_INSNS (56), /* SI */
923 COSTS_N_INSNS (56), /* DI */
924 COSTS_N_INSNS (56)}, /* other */
925 COSTS_N_INSNS (1), /* cost of movsx */
926 COSTS_N_INSNS (1), /* cost of movzx */
927 16, /* "large" insn */
928 6, /* MOVE_RATIO */
929 2, /* cost for loading QImode using movzbl */
930 {4, 5, 4}, /* cost of loading integer registers
931 in QImode, HImode and SImode.
932 Relative to reg-reg move (2). */
933 {2, 3, 2}, /* cost of storing integer registers */
934 2, /* cost of reg,reg fld/fst */
935 {2, 2, 6}, /* cost of loading fp registers
936 in SFmode, DFmode and XFmode */
937 {4, 4, 6}, /* cost of storing fp registers
938 in SFmode, DFmode and XFmode */
939 2, /* cost of moving MMX register */
940 {2, 2}, /* cost of loading MMX registers
941 in SImode and DImode */
942 {2, 2}, /* cost of storing MMX registers
943 in SImode and DImode */
944 12, /* cost of moving SSE register */
945 {12, 12, 12}, /* cost of loading SSE registers
946 in SImode, DImode and TImode */
947 {2, 2, 8}, /* cost of storing SSE registers
948 in SImode, DImode and TImode */
949 10, /* MMX or SSE register to integer */
950 8, /* size of l1 cache. */
951 256, /* size of l2 cache. */
952 64, /* size of prefetch block */
953 6, /* number of parallel prefetches */
954 2, /* Branch cost */
955 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
956 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
957 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
958 COSTS_N_INSNS (2), /* cost of FABS instruction. */
959 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
960 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
961 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
962 DUMMY_STRINGOP_ALGS},
963 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
964 {-1, libcall}}},
965 DUMMY_STRINGOP_ALGS},
966 1, /* scalar_stmt_cost. */
967 1, /* scalar load_cost. */
968 1, /* scalar_store_cost. */
969 1, /* vec_stmt_cost. */
970 1, /* vec_to_scalar_cost. */
971 1, /* scalar_to_vec_cost. */
972 1, /* vec_align_load_cost. */
973 2, /* vec_unalign_load_cost. */
974 1, /* vec_store_cost. */
975 3, /* cond_taken_branch_cost. */
976 1, /* cond_not_taken_branch_cost. */
977 };
978
979 static const
980 struct processor_costs nocona_cost = {
981 COSTS_N_INSNS (1), /* cost of an add instruction */
982 COSTS_N_INSNS (1), /* cost of a lea instruction */
983 COSTS_N_INSNS (1), /* variable shift costs */
984 COSTS_N_INSNS (1), /* constant shift costs */
985 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
986 COSTS_N_INSNS (10), /* HI */
987 COSTS_N_INSNS (10), /* SI */
988 COSTS_N_INSNS (10), /* DI */
989 COSTS_N_INSNS (10)}, /* other */
990 0, /* cost of multiply per each bit set */
991 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
992 COSTS_N_INSNS (66), /* HI */
993 COSTS_N_INSNS (66), /* SI */
994 COSTS_N_INSNS (66), /* DI */
995 COSTS_N_INSNS (66)}, /* other */
996 COSTS_N_INSNS (1), /* cost of movsx */
997 COSTS_N_INSNS (1), /* cost of movzx */
998 16, /* "large" insn */
999 17, /* MOVE_RATIO */
1000 4, /* cost for loading QImode using movzbl */
1001 {4, 4, 4}, /* cost of loading integer registers
1002 in QImode, HImode and SImode.
1003 Relative to reg-reg move (2). */
1004 {4, 4, 4}, /* cost of storing integer registers */
1005 3, /* cost of reg,reg fld/fst */
1006 {12, 12, 12}, /* cost of loading fp registers
1007 in SFmode, DFmode and XFmode */
1008 {4, 4, 4}, /* cost of storing fp registers
1009 in SFmode, DFmode and XFmode */
1010 6, /* cost of moving MMX register */
1011 {12, 12}, /* cost of loading MMX registers
1012 in SImode and DImode */
1013 {12, 12}, /* cost of storing MMX registers
1014 in SImode and DImode */
1015 6, /* cost of moving SSE register */
1016 {12, 12, 12}, /* cost of loading SSE registers
1017 in SImode, DImode and TImode */
1018 {12, 12, 12}, /* cost of storing SSE registers
1019 in SImode, DImode and TImode */
1020 8, /* MMX or SSE register to integer */
1021 8, /* size of l1 cache. */
1022 1024, /* size of l2 cache. */
1023 128, /* size of prefetch block */
1024 8, /* number of parallel prefetches */
1025 1, /* Branch cost */
1026 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1027 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1028 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1029 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1030 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1031 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1032 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1033 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1034 {100000, unrolled_loop}, {-1, libcall}}}},
1035 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1036 {-1, libcall}}},
1037 {libcall, {{24, loop}, {64, unrolled_loop},
1038 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1039 1, /* scalar_stmt_cost. */
1040 1, /* scalar load_cost. */
1041 1, /* scalar_store_cost. */
1042 1, /* vec_stmt_cost. */
1043 1, /* vec_to_scalar_cost. */
1044 1, /* scalar_to_vec_cost. */
1045 1, /* vec_align_load_cost. */
1046 2, /* vec_unalign_load_cost. */
1047 1, /* vec_store_cost. */
1048 3, /* cond_taken_branch_cost. */
1049 1, /* cond_not_taken_branch_cost. */
1050 };
1051
1052 static const
1053 struct processor_costs core2_cost = {
1054 COSTS_N_INSNS (1), /* cost of an add instruction */
1055 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1056 COSTS_N_INSNS (1), /* variable shift costs */
1057 COSTS_N_INSNS (1), /* constant shift costs */
1058 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1059 COSTS_N_INSNS (3), /* HI */
1060 COSTS_N_INSNS (3), /* SI */
1061 COSTS_N_INSNS (3), /* DI */
1062 COSTS_N_INSNS (3)}, /* other */
1063 0, /* cost of multiply per each bit set */
1064 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
1065 COSTS_N_INSNS (22), /* HI */
1066 COSTS_N_INSNS (22), /* SI */
1067 COSTS_N_INSNS (22), /* DI */
1068 COSTS_N_INSNS (22)}, /* other */
1069 COSTS_N_INSNS (1), /* cost of movsx */
1070 COSTS_N_INSNS (1), /* cost of movzx */
1071 8, /* "large" insn */
1072 16, /* MOVE_RATIO */
1073 2, /* cost for loading QImode using movzbl */
1074 {6, 6, 6}, /* cost of loading integer registers
1075 in QImode, HImode and SImode.
1076 Relative to reg-reg move (2). */
1077 {4, 4, 4}, /* cost of storing integer registers */
1078 2, /* cost of reg,reg fld/fst */
1079 {6, 6, 6}, /* cost of loading fp registers
1080 in SFmode, DFmode and XFmode */
1081 {4, 4, 4}, /* cost of storing fp registers
1082 in SFmode, DFmode and XFmode */
1083 2, /* cost of moving MMX register */
1084 {6, 6}, /* cost of loading MMX registers
1085 in SImode and DImode */
1086 {4, 4}, /* cost of storing MMX registers
1087 in SImode and DImode */
1088 2, /* cost of moving SSE register */
1089 {6, 6, 6}, /* cost of loading SSE registers
1090 in SImode, DImode and TImode */
1091 {4, 4, 4}, /* cost of storing SSE registers
1092 in SImode, DImode and TImode */
1093 2, /* MMX or SSE register to integer */
1094 32, /* size of l1 cache. */
1095 2048, /* size of l2 cache. */
1096 128, /* size of prefetch block */
1097 8, /* number of parallel prefetches */
1098 3, /* Branch cost */
1099 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1100 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1101 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1102 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1103 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1104 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1105 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1106 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1107 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1108 {{libcall, {{8, loop}, {15, unrolled_loop},
1109 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1110 {libcall, {{24, loop}, {32, unrolled_loop},
1111 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1112 1, /* scalar_stmt_cost. */
1113 1, /* scalar load_cost. */
1114 1, /* scalar_store_cost. */
1115 1, /* vec_stmt_cost. */
1116 1, /* vec_to_scalar_cost. */
1117 1, /* scalar_to_vec_cost. */
1118 1, /* vec_align_load_cost. */
1119 2, /* vec_unalign_load_cost. */
1120 1, /* vec_store_cost. */
1121 3, /* cond_taken_branch_cost. */
1122 1, /* cond_not_taken_branch_cost. */
1123 };
1124
1125 static const
1126 struct processor_costs atom_cost = {
1127 COSTS_N_INSNS (1), /* cost of an add instruction */
1128 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1129 COSTS_N_INSNS (1), /* variable shift costs */
1130 COSTS_N_INSNS (1), /* constant shift costs */
1131 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1132 COSTS_N_INSNS (4), /* HI */
1133 COSTS_N_INSNS (3), /* SI */
1134 COSTS_N_INSNS (4), /* DI */
1135 COSTS_N_INSNS (2)}, /* other */
1136 0, /* cost of multiply per each bit set */
1137 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1138 COSTS_N_INSNS (26), /* HI */
1139 COSTS_N_INSNS (42), /* SI */
1140 COSTS_N_INSNS (74), /* DI */
1141 COSTS_N_INSNS (74)}, /* other */
1142 COSTS_N_INSNS (1), /* cost of movsx */
1143 COSTS_N_INSNS (1), /* cost of movzx */
1144 8, /* "large" insn */
1145 17, /* MOVE_RATIO */
1146 2, /* cost for loading QImode using movzbl */
1147 {4, 4, 4}, /* cost of loading integer registers
1148 in QImode, HImode and SImode.
1149 Relative to reg-reg move (2). */
1150 {4, 4, 4}, /* cost of storing integer registers */
1151 4, /* cost of reg,reg fld/fst */
1152 {12, 12, 12}, /* cost of loading fp registers
1153 in SFmode, DFmode and XFmode */
1154 {6, 6, 8}, /* cost of storing fp registers
1155 in SFmode, DFmode and XFmode */
1156 2, /* cost of moving MMX register */
1157 {8, 8}, /* cost of loading MMX registers
1158 in SImode and DImode */
1159 {8, 8}, /* cost of storing MMX registers
1160 in SImode and DImode */
1161 2, /* cost of moving SSE register */
1162 {8, 8, 8}, /* cost of loading SSE registers
1163 in SImode, DImode and TImode */
1164 {8, 8, 8}, /* cost of storing SSE registers
1165 in SImode, DImode and TImode */
1166 5, /* MMX or SSE register to integer */
1167 32, /* size of l1 cache. */
1168 256, /* size of l2 cache. */
1169 64, /* size of prefetch block */
1170 6, /* number of parallel prefetches */
1171 3, /* Branch cost */
1172 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1173 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1174 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1175 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1176 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1177 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1178 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1179 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1180 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1181 {{libcall, {{8, loop}, {15, unrolled_loop},
1182 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1183 {libcall, {{24, loop}, {32, unrolled_loop},
1184 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1185 1, /* scalar_stmt_cost. */
1186 1, /* scalar load_cost. */
1187 1, /* scalar_store_cost. */
1188 1, /* vec_stmt_cost. */
1189 1, /* vec_to_scalar_cost. */
1190 1, /* scalar_to_vec_cost. */
1191 1, /* vec_align_load_cost. */
1192 2, /* vec_unalign_load_cost. */
1193 1, /* vec_store_cost. */
1194 3, /* cond_taken_branch_cost. */
1195 1, /* cond_not_taken_branch_cost. */
1196 };
1197
1198 /* Generic64 should produce code tuned for Nocona and K8. */
1199 static const
1200 struct processor_costs generic64_cost = {
1201 COSTS_N_INSNS (1), /* cost of an add instruction */
1202 /* On all chips taken into consideration lea is 2 cycles and more. With
1203 this cost however our current implementation of synth_mult results in
1204 use of unnecessary temporary registers causing regression on several
1205 SPECfp benchmarks. */
1206 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1207 COSTS_N_INSNS (1), /* variable shift costs */
1208 COSTS_N_INSNS (1), /* constant shift costs */
1209 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1210 COSTS_N_INSNS (4), /* HI */
1211 COSTS_N_INSNS (3), /* SI */
1212 COSTS_N_INSNS (4), /* DI */
1213 COSTS_N_INSNS (2)}, /* other */
1214 0, /* cost of multiply per each bit set */
1215 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1216 COSTS_N_INSNS (26), /* HI */
1217 COSTS_N_INSNS (42), /* SI */
1218 COSTS_N_INSNS (74), /* DI */
1219 COSTS_N_INSNS (74)}, /* other */
1220 COSTS_N_INSNS (1), /* cost of movsx */
1221 COSTS_N_INSNS (1), /* cost of movzx */
1222 8, /* "large" insn */
1223 17, /* MOVE_RATIO */
1224 4, /* cost for loading QImode using movzbl */
1225 {4, 4, 4}, /* cost of loading integer registers
1226 in QImode, HImode and SImode.
1227 Relative to reg-reg move (2). */
1228 {4, 4, 4}, /* cost of storing integer registers */
1229 4, /* cost of reg,reg fld/fst */
1230 {12, 12, 12}, /* cost of loading fp registers
1231 in SFmode, DFmode and XFmode */
1232 {6, 6, 8}, /* cost of storing fp registers
1233 in SFmode, DFmode and XFmode */
1234 2, /* cost of moving MMX register */
1235 {8, 8}, /* cost of loading MMX registers
1236 in SImode and DImode */
1237 {8, 8}, /* cost of storing MMX registers
1238 in SImode and DImode */
1239 2, /* cost of moving SSE register */
1240 {8, 8, 8}, /* cost of loading SSE registers
1241 in SImode, DImode and TImode */
1242 {8, 8, 8}, /* cost of storing SSE registers
1243 in SImode, DImode and TImode */
1244 5, /* MMX or SSE register to integer */
1245 32, /* size of l1 cache. */
1246 512, /* size of l2 cache. */
1247 64, /* size of prefetch block */
1248 6, /* number of parallel prefetches */
1249 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1250 is increased to perhaps more appropriate value of 5. */
1251 3, /* Branch cost */
1252 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1253 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1254 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1255 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1256 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1257 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1258 {DUMMY_STRINGOP_ALGS,
1259 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1260 {DUMMY_STRINGOP_ALGS,
1261 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1262 1, /* scalar_stmt_cost. */
1263 1, /* scalar load_cost. */
1264 1, /* scalar_store_cost. */
1265 1, /* vec_stmt_cost. */
1266 1, /* vec_to_scalar_cost. */
1267 1, /* scalar_to_vec_cost. */
1268 1, /* vec_align_load_cost. */
1269 2, /* vec_unalign_load_cost. */
1270 1, /* vec_store_cost. */
1271 3, /* cond_taken_branch_cost. */
1272 1, /* cond_not_taken_branch_cost. */
1273 };
1274
1275 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1276 static const
1277 struct processor_costs generic32_cost = {
1278 COSTS_N_INSNS (1), /* cost of an add instruction */
1279 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1280 COSTS_N_INSNS (1), /* variable shift costs */
1281 COSTS_N_INSNS (1), /* constant shift costs */
1282 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1283 COSTS_N_INSNS (4), /* HI */
1284 COSTS_N_INSNS (3), /* SI */
1285 COSTS_N_INSNS (4), /* DI */
1286 COSTS_N_INSNS (2)}, /* other */
1287 0, /* cost of multiply per each bit set */
1288 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1289 COSTS_N_INSNS (26), /* HI */
1290 COSTS_N_INSNS (42), /* SI */
1291 COSTS_N_INSNS (74), /* DI */
1292 COSTS_N_INSNS (74)}, /* other */
1293 COSTS_N_INSNS (1), /* cost of movsx */
1294 COSTS_N_INSNS (1), /* cost of movzx */
1295 8, /* "large" insn */
1296 17, /* MOVE_RATIO */
1297 4, /* cost for loading QImode using movzbl */
1298 {4, 4, 4}, /* cost of loading integer registers
1299 in QImode, HImode and SImode.
1300 Relative to reg-reg move (2). */
1301 {4, 4, 4}, /* cost of storing integer registers */
1302 4, /* cost of reg,reg fld/fst */
1303 {12, 12, 12}, /* cost of loading fp registers
1304 in SFmode, DFmode and XFmode */
1305 {6, 6, 8}, /* cost of storing fp registers
1306 in SFmode, DFmode and XFmode */
1307 2, /* cost of moving MMX register */
1308 {8, 8}, /* cost of loading MMX registers
1309 in SImode and DImode */
1310 {8, 8}, /* cost of storing MMX registers
1311 in SImode and DImode */
1312 2, /* cost of moving SSE register */
1313 {8, 8, 8}, /* cost of loading SSE registers
1314 in SImode, DImode and TImode */
1315 {8, 8, 8}, /* cost of storing SSE registers
1316 in SImode, DImode and TImode */
1317 5, /* MMX or SSE register to integer */
1318 32, /* size of l1 cache. */
1319 256, /* size of l2 cache. */
1320 64, /* size of prefetch block */
1321 6, /* number of parallel prefetches */
1322 3, /* Branch cost */
1323 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1324 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1325 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1326 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1327 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1328 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1329 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1330 DUMMY_STRINGOP_ALGS},
1331 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1332 DUMMY_STRINGOP_ALGS},
1333 1, /* scalar_stmt_cost. */
1334 1, /* scalar load_cost. */
1335 1, /* scalar_store_cost. */
1336 1, /* vec_stmt_cost. */
1337 1, /* vec_to_scalar_cost. */
1338 1, /* scalar_to_vec_cost. */
1339 1, /* vec_align_load_cost. */
1340 2, /* vec_unalign_load_cost. */
1341 1, /* vec_store_cost. */
1342 3, /* cond_taken_branch_cost. */
1343 1, /* cond_not_taken_branch_cost. */
1344 };
1345
1346 const struct processor_costs *ix86_cost = &pentium_cost;
1347
1348 /* Processor feature/optimization bitmasks. */
1349 #define m_386 (1<<PROCESSOR_I386)
1350 #define m_486 (1<<PROCESSOR_I486)
1351 #define m_PENT (1<<PROCESSOR_PENTIUM)
1352 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1353 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1354 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1355 #define m_CORE2 (1<<PROCESSOR_CORE2)
1356 #define m_ATOM (1<<PROCESSOR_ATOM)
1357
1358 #define m_GEODE (1<<PROCESSOR_GEODE)
1359 #define m_K6 (1<<PROCESSOR_K6)
1360 #define m_K6_GEODE (m_K6 | m_GEODE)
1361 #define m_K8 (1<<PROCESSOR_K8)
1362 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1363 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1364 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1365 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1366 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1)
1367
1368 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1369 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1370
1371 /* Generic instruction choice should be common subset of supported CPUs
1372 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1373 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1374
1375 /* Feature tests against the various tunings. */
1376 unsigned char ix86_tune_features[X86_TUNE_LAST];
1377
1378 /* Feature tests against the various tunings used to create ix86_tune_features
1379 based on the processor mask. */
1380 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1381 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1382 negatively, so enabling for Generic64 seems like good code size
1383 tradeoff. We can't enable it for 32bit generic because it does not
1384 work well with PPro base chips. */
1385 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1386
1387 /* X86_TUNE_PUSH_MEMORY */
1388 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1389 | m_NOCONA | m_CORE2 | m_GENERIC,
1390
1391 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1392 m_486 | m_PENT,
1393
1394 /* X86_TUNE_UNROLL_STRLEN */
1395 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1396 | m_CORE2 | m_GENERIC,
1397
1398 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1399 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1400
1401 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1402 on simulation result. But after P4 was made, no performance benefit
1403 was observed with branch hints. It also increases the code size.
1404 As a result, icc never generates branch hints. */
1405 0,
1406
1407 /* X86_TUNE_DOUBLE_WITH_ADD */
1408 ~m_386,
1409
1410 /* X86_TUNE_USE_SAHF */
1411 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_PENT4
1412 | m_NOCONA | m_CORE2 | m_GENERIC,
1413
1414 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1415 partial dependencies. */
1416 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1417 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1418
1419 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1420 register stalls on Generic32 compilation setting as well. However
1421 in current implementation the partial register stalls are not eliminated
1422 very well - they can be introduced via subregs synthesized by combine
1423 and can happen in caller/callee saving sequences. Because this option
1424 pays back little on PPro based chips and is in conflict with partial reg
1425 dependencies used by Athlon/P4 based chips, it is better to leave it off
1426 for generic32 for now. */
1427 m_PPRO,
1428
1429 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1430 m_CORE2 | m_GENERIC,
1431
1432 /* X86_TUNE_USE_HIMODE_FIOP */
1433 m_386 | m_486 | m_K6_GEODE,
1434
1435 /* X86_TUNE_USE_SIMODE_FIOP */
1436 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1437
1438 /* X86_TUNE_USE_MOV0 */
1439 m_K6,
1440
1441 /* X86_TUNE_USE_CLTD */
1442 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1443
1444 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1445 m_PENT4,
1446
1447 /* X86_TUNE_SPLIT_LONG_MOVES */
1448 m_PPRO,
1449
1450 /* X86_TUNE_READ_MODIFY_WRITE */
1451 ~m_PENT,
1452
1453 /* X86_TUNE_READ_MODIFY */
1454 ~(m_PENT | m_PPRO),
1455
1456 /* X86_TUNE_PROMOTE_QIMODE */
1457 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1458 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1459
1460 /* X86_TUNE_FAST_PREFIX */
1461 ~(m_PENT | m_486 | m_386),
1462
1463 /* X86_TUNE_SINGLE_STRINGOP */
1464 m_386 | m_PENT4 | m_NOCONA,
1465
1466 /* X86_TUNE_QIMODE_MATH */
1467 ~0,
1468
1469 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1470 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1471 might be considered for Generic32 if our scheme for avoiding partial
1472 stalls was more effective. */
1473 ~m_PPRO,
1474
1475 /* X86_TUNE_PROMOTE_QI_REGS */
1476 0,
1477
1478 /* X86_TUNE_PROMOTE_HI_REGS */
1479 m_PPRO,
1480
1481 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1482 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1483 | m_CORE2 | m_GENERIC,
1484
1485 /* X86_TUNE_ADD_ESP_8 */
1486 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1487 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1488
1489 /* X86_TUNE_SUB_ESP_4 */
1490 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1491 | m_GENERIC,
1492
1493 /* X86_TUNE_SUB_ESP_8 */
1494 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1495 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1496
1497 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1498 for DFmode copies */
1499 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1500 | m_GENERIC | m_GEODE),
1501
1502 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1503 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1504
1505 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1506 conflict here in between PPro/Pentium4 based chips that thread 128bit
1507 SSE registers as single units versus K8 based chips that divide SSE
1508 registers to two 64bit halves. This knob promotes all store destinations
1509 to be 128bit to allow register renaming on 128bit SSE units, but usually
1510 results in one extra microop on 64bit SSE units. Experimental results
1511 shows that disabling this option on P4 brings over 20% SPECfp regression,
1512 while enabling it on K8 brings roughly 2.4% regression that can be partly
1513 masked by careful scheduling of moves. */
1514 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1515 | m_AMDFAM10 | m_BDVER1,
1516
1517 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1518 m_AMDFAM10 | m_BDVER1,
1519
1520 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1521 m_BDVER1,
1522
1523 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1524 m_BDVER1,
1525
1526 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1527 are resolved on SSE register parts instead of whole registers, so we may
1528 maintain just lower part of scalar values in proper format leaving the
1529 upper part undefined. */
1530 m_ATHLON_K8,
1531
1532 /* X86_TUNE_SSE_TYPELESS_STORES */
1533 m_AMD_MULTIPLE,
1534
1535 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1536 m_PPRO | m_PENT4 | m_NOCONA,
1537
1538 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1539 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1540
1541 /* X86_TUNE_PROLOGUE_USING_MOVE */
1542 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1543
1544 /* X86_TUNE_EPILOGUE_USING_MOVE */
1545 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1546
1547 /* X86_TUNE_SHIFT1 */
1548 ~m_486,
1549
1550 /* X86_TUNE_USE_FFREEP */
1551 m_AMD_MULTIPLE,
1552
1553 /* X86_TUNE_INTER_UNIT_MOVES */
1554 ~(m_AMD_MULTIPLE | m_GENERIC),
1555
1556 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1557 ~(m_AMDFAM10 | m_BDVER1),
1558
1559 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1560 than 4 branch instructions in the 16 byte window. */
1561 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1562 | m_GENERIC,
1563
1564 /* X86_TUNE_SCHEDULE */
1565 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1566 | m_GENERIC,
1567
1568 /* X86_TUNE_USE_BT */
1569 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1570
1571 /* X86_TUNE_USE_INCDEC */
1572 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1573
1574 /* X86_TUNE_PAD_RETURNS */
1575 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1576
1577 /* X86_TUNE_EXT_80387_CONSTANTS */
1578 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1579 | m_CORE2 | m_GENERIC,
1580
1581 /* X86_TUNE_SHORTEN_X87_SSE */
1582 ~m_K8,
1583
1584 /* X86_TUNE_AVOID_VECTOR_DECODE */
1585 m_K8 | m_GENERIC64,
1586
1587 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1588 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1589 ~(m_386 | m_486),
1590
1591 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1592 vector path on AMD machines. */
1593 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1594
1595 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1596 machines. */
1597 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1598
1599 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1600 than a MOV. */
1601 m_PENT,
1602
1603 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1604 but one byte longer. */
1605 m_PENT,
1606
1607 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1608 operand that cannot be represented using a modRM byte. The XOR
1609 replacement is long decoded, so this split helps here as well. */
1610 m_K6,
1611
1612 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1613 from FP to FP. */
1614 m_AMDFAM10 | m_GENERIC,
1615
1616 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1617 from integer to FP. */
1618 m_AMDFAM10,
1619
1620 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1621 with a subsequent conditional jump instruction into a single
1622 compare-and-branch uop. */
1623 m_CORE2 | m_BDVER1,
1624
1625 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1626 will impact LEA instruction selection. */
1627 m_ATOM,
1628 };
1629
1630 /* Feature tests against the various architecture variations. */
1631 unsigned char ix86_arch_features[X86_ARCH_LAST];
1632
1633 /* Feature tests against the various architecture variations, used to create
1634 ix86_arch_features based on the processor mask. */
1635 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1636 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1637 ~(m_386 | m_486 | m_PENT | m_K6),
1638
1639 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1640 ~m_386,
1641
1642 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1643 ~(m_386 | m_486),
1644
1645 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1646 ~m_386,
1647
1648 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1649 ~m_386,
1650 };
1651
1652 static const unsigned int x86_accumulate_outgoing_args
1653 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1654 | m_GENERIC;
1655
1656 static const unsigned int x86_arch_always_fancy_math_387
1657 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1658 | m_NOCONA | m_CORE2 | m_GENERIC;
1659
1660 static enum stringop_alg stringop_alg = no_stringop;
1661
1662 /* In case the average insn count for single function invocation is
1663 lower than this constant, emit fast (but longer) prologue and
1664 epilogue code. */
1665 #define FAST_PROLOGUE_INSN_COUNT 20
1666
1667 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1668 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1669 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1670 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1671
1672 /* Array of the smallest class containing reg number REGNO, indexed by
1673 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1674
1675 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1676 {
1677 /* ax, dx, cx, bx */
1678 AREG, DREG, CREG, BREG,
1679 /* si, di, bp, sp */
1680 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1681 /* FP registers */
1682 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1683 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1684 /* arg pointer */
1685 NON_Q_REGS,
1686 /* flags, fpsr, fpcr, frame */
1687 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1688 /* SSE registers */
1689 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1690 SSE_REGS, SSE_REGS,
1691 /* MMX registers */
1692 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1693 MMX_REGS, MMX_REGS,
1694 /* REX registers */
1695 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1696 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1697 /* SSE REX registers */
1698 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1699 SSE_REGS, SSE_REGS,
1700 };
1701
1702 /* The "default" register map used in 32bit mode. */
1703
1704 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1705 {
1706 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1707 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1708 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1709 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1710 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1711 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1712 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1713 };
1714
1715 /* The "default" register map used in 64bit mode. */
1716
1717 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1718 {
1719 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1720 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1721 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1722 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1723 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1724 8,9,10,11,12,13,14,15, /* extended integer registers */
1725 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1726 };
1727
1728 /* Define the register numbers to be used in Dwarf debugging information.
1729 The SVR4 reference port C compiler uses the following register numbers
1730 in its Dwarf output code:
1731 0 for %eax (gcc regno = 0)
1732 1 for %ecx (gcc regno = 2)
1733 2 for %edx (gcc regno = 1)
1734 3 for %ebx (gcc regno = 3)
1735 4 for %esp (gcc regno = 7)
1736 5 for %ebp (gcc regno = 6)
1737 6 for %esi (gcc regno = 4)
1738 7 for %edi (gcc regno = 5)
1739 The following three DWARF register numbers are never generated by
1740 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1741 believes these numbers have these meanings.
1742 8 for %eip (no gcc equivalent)
1743 9 for %eflags (gcc regno = 17)
1744 10 for %trapno (no gcc equivalent)
1745 It is not at all clear how we should number the FP stack registers
1746 for the x86 architecture. If the version of SDB on x86/svr4 were
1747 a bit less brain dead with respect to floating-point then we would
1748 have a precedent to follow with respect to DWARF register numbers
1749 for x86 FP registers, but the SDB on x86/svr4 is so completely
1750 broken with respect to FP registers that it is hardly worth thinking
1751 of it as something to strive for compatibility with.
1752 The version of x86/svr4 SDB I have at the moment does (partially)
1753 seem to believe that DWARF register number 11 is associated with
1754 the x86 register %st(0), but that's about all. Higher DWARF
1755 register numbers don't seem to be associated with anything in
1756 particular, and even for DWARF regno 11, SDB only seems to under-
1757 stand that it should say that a variable lives in %st(0) (when
1758 asked via an `=' command) if we said it was in DWARF regno 11,
1759 but SDB still prints garbage when asked for the value of the
1760 variable in question (via a `/' command).
1761 (Also note that the labels SDB prints for various FP stack regs
1762 when doing an `x' command are all wrong.)
1763 Note that these problems generally don't affect the native SVR4
1764 C compiler because it doesn't allow the use of -O with -g and
1765 because when it is *not* optimizing, it allocates a memory
1766 location for each floating-point variable, and the memory
1767 location is what gets described in the DWARF AT_location
1768 attribute for the variable in question.
1769 Regardless of the severe mental illness of the x86/svr4 SDB, we
1770 do something sensible here and we use the following DWARF
1771 register numbers. Note that these are all stack-top-relative
1772 numbers.
1773 11 for %st(0) (gcc regno = 8)
1774 12 for %st(1) (gcc regno = 9)
1775 13 for %st(2) (gcc regno = 10)
1776 14 for %st(3) (gcc regno = 11)
1777 15 for %st(4) (gcc regno = 12)
1778 16 for %st(5) (gcc regno = 13)
1779 17 for %st(6) (gcc regno = 14)
1780 18 for %st(7) (gcc regno = 15)
1781 */
1782 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1783 {
1784 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1785 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1786 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1787 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1788 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1789 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1790 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1791 };
1792
1793 /* Test and compare insns in i386.md store the information needed to
1794 generate branch and scc insns here. */
1795
1796 rtx ix86_compare_op0 = NULL_RTX;
1797 rtx ix86_compare_op1 = NULL_RTX;
1798
1799 /* Define parameter passing and return registers. */
1800
1801 static int const x86_64_int_parameter_registers[6] =
1802 {
1803 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1804 };
1805
1806 static int const x86_64_ms_abi_int_parameter_registers[4] =
1807 {
1808 CX_REG, DX_REG, R8_REG, R9_REG
1809 };
1810
1811 static int const x86_64_int_return_registers[4] =
1812 {
1813 AX_REG, DX_REG, DI_REG, SI_REG
1814 };
1815
1816 /* Define the structure for the machine field in struct function. */
1817
1818 struct GTY(()) stack_local_entry {
1819 unsigned short mode;
1820 unsigned short n;
1821 rtx rtl;
1822 struct stack_local_entry *next;
1823 };
1824
1825 /* Structure describing stack frame layout.
1826 Stack grows downward:
1827
1828 [arguments]
1829 <- ARG_POINTER
1830 saved pc
1831
1832 saved frame pointer if frame_pointer_needed
1833 <- HARD_FRAME_POINTER
1834 [saved regs]
1835
1836 [padding0]
1837
1838 [saved SSE regs]
1839
1840 [padding1] \
1841 )
1842 [va_arg registers] (
1843 > to_allocate <- FRAME_POINTER
1844 [frame] (
1845 )
1846 [padding2] /
1847 */
1848 struct ix86_frame
1849 {
1850 int padding0;
1851 int nsseregs;
1852 int nregs;
1853 int padding1;
1854 int va_arg_size;
1855 int red_zone_size;
1856 HOST_WIDE_INT frame;
1857 int padding2;
1858 int outgoing_arguments_size;
1859
1860 HOST_WIDE_INT to_allocate;
1861 /* The offsets relative to ARG_POINTER. */
1862 HOST_WIDE_INT frame_pointer_offset;
1863 HOST_WIDE_INT hard_frame_pointer_offset;
1864 HOST_WIDE_INT stack_pointer_offset;
1865
1866 /* When save_regs_using_mov is set, emit prologue using
1867 move instead of push instructions. */
1868 bool save_regs_using_mov;
1869 };
1870
1871 /* Code model option. */
1872 enum cmodel ix86_cmodel;
1873 /* Asm dialect. */
1874 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1875 /* TLS dialects. */
1876 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1877
1878 /* Which unit we are generating floating point math for. */
1879 enum fpmath_unit ix86_fpmath;
1880
1881 /* Which cpu are we scheduling for. */
1882 enum attr_cpu ix86_schedule;
1883
1884 /* Which cpu are we optimizing for. */
1885 enum processor_type ix86_tune;
1886
1887 /* Which instruction set architecture to use. */
1888 enum processor_type ix86_arch;
1889
1890 /* true if sse prefetch instruction is not NOOP. */
1891 int x86_prefetch_sse;
1892
1893 /* ix86_regparm_string as a number */
1894 static int ix86_regparm;
1895
1896 /* -mstackrealign option */
1897 extern int ix86_force_align_arg_pointer;
1898 static const char ix86_force_align_arg_pointer_string[]
1899 = "force_align_arg_pointer";
1900
1901 static rtx (*ix86_gen_leave) (void);
1902 static rtx (*ix86_gen_pop1) (rtx);
1903 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1904 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1905 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1906 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1907 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1908 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1909
1910 /* Preferred alignment for stack boundary in bits. */
1911 unsigned int ix86_preferred_stack_boundary;
1912
1913 /* Alignment for incoming stack boundary in bits specified at
1914 command line. */
1915 static unsigned int ix86_user_incoming_stack_boundary;
1916
1917 /* Default alignment for incoming stack boundary in bits. */
1918 static unsigned int ix86_default_incoming_stack_boundary;
1919
1920 /* Alignment for incoming stack boundary in bits. */
1921 unsigned int ix86_incoming_stack_boundary;
1922
1923 /* The abi used by target. */
1924 enum calling_abi ix86_abi;
1925
1926 /* Values 1-5: see jump.c */
1927 int ix86_branch_cost;
1928
1929 /* Calling abi specific va_list type nodes. */
1930 static GTY(()) tree sysv_va_list_type_node;
1931 static GTY(()) tree ms_va_list_type_node;
1932
1933 /* Variables which are this size or smaller are put in the data/bss
1934 or ldata/lbss sections. */
1935
1936 int ix86_section_threshold = 65536;
1937
1938 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1939 char internal_label_prefix[16];
1940 int internal_label_prefix_len;
1941
1942 /* Fence to use after loop using movnt. */
1943 tree x86_mfence;
1944
1945 /* Register class used for passing given 64bit part of the argument.
1946 These represent classes as documented by the PS ABI, with the exception
1947 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1948 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1949
1950 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1951 whenever possible (upper half does contain padding). */
1952 enum x86_64_reg_class
1953 {
1954 X86_64_NO_CLASS,
1955 X86_64_INTEGER_CLASS,
1956 X86_64_INTEGERSI_CLASS,
1957 X86_64_SSE_CLASS,
1958 X86_64_SSESF_CLASS,
1959 X86_64_SSEDF_CLASS,
1960 X86_64_SSEUP_CLASS,
1961 X86_64_X87_CLASS,
1962 X86_64_X87UP_CLASS,
1963 X86_64_COMPLEX_X87_CLASS,
1964 X86_64_MEMORY_CLASS
1965 };
1966
1967 #define MAX_CLASSES 4
1968
1969 /* Table of constants used by fldpi, fldln2, etc.... */
1970 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1971 static bool ext_80387_constants_init = 0;
1972
1973 \f
1974 static struct machine_function * ix86_init_machine_status (void);
1975 static rtx ix86_function_value (const_tree, const_tree, bool);
1976 static bool ix86_function_value_regno_p (const unsigned int);
1977 static rtx ix86_static_chain (const_tree, bool);
1978 static int ix86_function_regparm (const_tree, const_tree);
1979 static void ix86_compute_frame_layout (struct ix86_frame *);
1980 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1981 rtx, rtx, int);
1982 static void ix86_add_new_builtins (int);
1983 static rtx ix86_expand_vec_perm_builtin (tree);
1984 static tree ix86_canonical_va_list_type (tree);
1985
1986 enum ix86_function_specific_strings
1987 {
1988 IX86_FUNCTION_SPECIFIC_ARCH,
1989 IX86_FUNCTION_SPECIFIC_TUNE,
1990 IX86_FUNCTION_SPECIFIC_FPMATH,
1991 IX86_FUNCTION_SPECIFIC_MAX
1992 };
1993
1994 static char *ix86_target_string (int, int, const char *, const char *,
1995 const char *, bool);
1996 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1997 static void ix86_function_specific_save (struct cl_target_option *);
1998 static void ix86_function_specific_restore (struct cl_target_option *);
1999 static void ix86_function_specific_print (FILE *, int,
2000 struct cl_target_option *);
2001 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2002 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2003 static bool ix86_can_inline_p (tree, tree);
2004 static void ix86_set_current_function (tree);
2005 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2006
2007 static enum calling_abi ix86_function_abi (const_tree);
2008
2009 \f
2010 #ifndef SUBTARGET32_DEFAULT_CPU
2011 #define SUBTARGET32_DEFAULT_CPU "i386"
2012 #endif
2013
2014 /* The svr4 ABI for the i386 says that records and unions are returned
2015 in memory. */
2016 #ifndef DEFAULT_PCC_STRUCT_RETURN
2017 #define DEFAULT_PCC_STRUCT_RETURN 1
2018 #endif
2019
2020 /* Whether -mtune= or -march= were specified */
2021 static int ix86_tune_defaulted;
2022 static int ix86_arch_specified;
2023
2024 /* Bit flags that specify the ISA we are compiling for. */
2025 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
2026
2027 /* A mask of ix86_isa_flags that includes bit X if X
2028 was set or cleared on the command line. */
2029 static int ix86_isa_flags_explicit;
2030
2031 /* Define a set of ISAs which are available when a given ISA is
2032 enabled. MMX and SSE ISAs are handled separately. */
2033
2034 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2035 #define OPTION_MASK_ISA_3DNOW_SET \
2036 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2037
2038 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2039 #define OPTION_MASK_ISA_SSE2_SET \
2040 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2041 #define OPTION_MASK_ISA_SSE3_SET \
2042 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2043 #define OPTION_MASK_ISA_SSSE3_SET \
2044 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2045 #define OPTION_MASK_ISA_SSE4_1_SET \
2046 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2047 #define OPTION_MASK_ISA_SSE4_2_SET \
2048 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2049 #define OPTION_MASK_ISA_AVX_SET \
2050 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2051 #define OPTION_MASK_ISA_FMA_SET \
2052 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2053
2054 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2055 as -msse4.2. */
2056 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2057
2058 #define OPTION_MASK_ISA_SSE4A_SET \
2059 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2060 #define OPTION_MASK_ISA_FMA4_SET \
2061 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2062 | OPTION_MASK_ISA_AVX_SET)
2063 #define OPTION_MASK_ISA_XOP_SET \
2064 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2065 #define OPTION_MASK_ISA_LWP_SET \
2066 OPTION_MASK_ISA_LWP
2067
2068 /* AES and PCLMUL need SSE2 because they use xmm registers */
2069 #define OPTION_MASK_ISA_AES_SET \
2070 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2071 #define OPTION_MASK_ISA_PCLMUL_SET \
2072 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2073
2074 #define OPTION_MASK_ISA_ABM_SET \
2075 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2076
2077 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2078 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2079 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2080 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2081 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2082
2083 /* Define a set of ISAs which aren't available when a given ISA is
2084 disabled. MMX and SSE ISAs are handled separately. */
2085
2086 #define OPTION_MASK_ISA_MMX_UNSET \
2087 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2088 #define OPTION_MASK_ISA_3DNOW_UNSET \
2089 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2090 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2091
2092 #define OPTION_MASK_ISA_SSE_UNSET \
2093 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2094 #define OPTION_MASK_ISA_SSE2_UNSET \
2095 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2096 #define OPTION_MASK_ISA_SSE3_UNSET \
2097 (OPTION_MASK_ISA_SSE3 \
2098 | OPTION_MASK_ISA_SSSE3_UNSET \
2099 | OPTION_MASK_ISA_SSE4A_UNSET )
2100 #define OPTION_MASK_ISA_SSSE3_UNSET \
2101 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2102 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2103 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2104 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2105 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2106 #define OPTION_MASK_ISA_AVX_UNSET \
2107 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2108 | OPTION_MASK_ISA_FMA4_UNSET)
2109 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2110
2111 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2112 as -mno-sse4.1. */
2113 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2114
2115 #define OPTION_MASK_ISA_SSE4A_UNSET \
2116 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2117
2118 #define OPTION_MASK_ISA_FMA4_UNSET \
2119 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2120 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2121 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2122
2123 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2124 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2125 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2126 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2127 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2128 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2129 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2130 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2131
2132 /* Vectorization library interface and handlers. */
2133 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2134 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2135 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2136
2137 /* Processor target table, indexed by processor number */
2138 struct ptt
2139 {
2140 const struct processor_costs *cost; /* Processor costs */
2141 const int align_loop; /* Default alignments. */
2142 const int align_loop_max_skip;
2143 const int align_jump;
2144 const int align_jump_max_skip;
2145 const int align_func;
2146 };
2147
2148 static const struct ptt processor_target_table[PROCESSOR_max] =
2149 {
2150 {&i386_cost, 4, 3, 4, 3, 4},
2151 {&i486_cost, 16, 15, 16, 15, 16},
2152 {&pentium_cost, 16, 7, 16, 7, 16},
2153 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2154 {&geode_cost, 0, 0, 0, 0, 0},
2155 {&k6_cost, 32, 7, 32, 7, 32},
2156 {&athlon_cost, 16, 7, 16, 7, 16},
2157 {&pentium4_cost, 0, 0, 0, 0, 0},
2158 {&k8_cost, 16, 7, 16, 7, 16},
2159 {&nocona_cost, 0, 0, 0, 0, 0},
2160 {&core2_cost, 16, 10, 16, 10, 16},
2161 {&generic32_cost, 16, 7, 16, 7, 16},
2162 {&generic64_cost, 16, 10, 16, 10, 16},
2163 {&amdfam10_cost, 32, 24, 32, 7, 32},
2164 {&bdver1_cost, 32, 24, 32, 7, 32},
2165 {&atom_cost, 16, 7, 16, 7, 16}
2166 };
2167
2168 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2169 {
2170 "generic",
2171 "i386",
2172 "i486",
2173 "pentium",
2174 "pentium-mmx",
2175 "pentiumpro",
2176 "pentium2",
2177 "pentium3",
2178 "pentium4",
2179 "pentium-m",
2180 "prescott",
2181 "nocona",
2182 "core2",
2183 "atom",
2184 "geode",
2185 "k6",
2186 "k6-2",
2187 "k6-3",
2188 "athlon",
2189 "athlon-4",
2190 "k8",
2191 "amdfam10",
2192 "bdver1"
2193 };
2194 \f
2195 /* Implement TARGET_HANDLE_OPTION. */
2196
2197 static bool
2198 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2199 {
2200 switch (code)
2201 {
2202 case OPT_mmmx:
2203 if (value)
2204 {
2205 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2206 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2207 }
2208 else
2209 {
2210 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2211 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2212 }
2213 return true;
2214
2215 case OPT_m3dnow:
2216 if (value)
2217 {
2218 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2219 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2220 }
2221 else
2222 {
2223 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2224 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2225 }
2226 return true;
2227
2228 case OPT_m3dnowa:
2229 return false;
2230
2231 case OPT_msse:
2232 if (value)
2233 {
2234 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2235 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2236 }
2237 else
2238 {
2239 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2241 }
2242 return true;
2243
2244 case OPT_msse2:
2245 if (value)
2246 {
2247 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2248 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2249 }
2250 else
2251 {
2252 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2253 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2254 }
2255 return true;
2256
2257 case OPT_msse3:
2258 if (value)
2259 {
2260 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2261 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2262 }
2263 else
2264 {
2265 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2266 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2267 }
2268 return true;
2269
2270 case OPT_mssse3:
2271 if (value)
2272 {
2273 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2274 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2275 }
2276 else
2277 {
2278 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2279 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2280 }
2281 return true;
2282
2283 case OPT_msse4_1:
2284 if (value)
2285 {
2286 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2287 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2288 }
2289 else
2290 {
2291 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2292 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2293 }
2294 return true;
2295
2296 case OPT_msse4_2:
2297 if (value)
2298 {
2299 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2300 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2301 }
2302 else
2303 {
2304 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2305 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2306 }
2307 return true;
2308
2309 case OPT_mavx:
2310 if (value)
2311 {
2312 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2313 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2314 }
2315 else
2316 {
2317 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2318 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2319 }
2320 return true;
2321
2322 case OPT_mfma:
2323 if (value)
2324 {
2325 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2326 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2327 }
2328 else
2329 {
2330 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2331 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2332 }
2333 return true;
2334
2335 case OPT_msse4:
2336 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2337 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2338 return true;
2339
2340 case OPT_mno_sse4:
2341 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2342 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2343 return true;
2344
2345 case OPT_msse4a:
2346 if (value)
2347 {
2348 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2349 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2350 }
2351 else
2352 {
2353 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2354 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2355 }
2356 return true;
2357
2358 case OPT_mfma4:
2359 if (value)
2360 {
2361 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2362 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2363 }
2364 else
2365 {
2366 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2367 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2368 }
2369 return true;
2370
2371 case OPT_mxop:
2372 if (value)
2373 {
2374 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2375 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2376 }
2377 else
2378 {
2379 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2380 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2381 }
2382 return true;
2383
2384 case OPT_mlwp:
2385 if (value)
2386 {
2387 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2388 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2389 }
2390 else
2391 {
2392 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2393 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2394 }
2395 return true;
2396
2397 case OPT_mabm:
2398 if (value)
2399 {
2400 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2401 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2402 }
2403 else
2404 {
2405 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2406 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2407 }
2408 return true;
2409
2410 case OPT_mpopcnt:
2411 if (value)
2412 {
2413 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2414 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2415 }
2416 else
2417 {
2418 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2419 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2420 }
2421 return true;
2422
2423 case OPT_msahf:
2424 if (value)
2425 {
2426 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2427 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2428 }
2429 else
2430 {
2431 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2432 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2433 }
2434 return true;
2435
2436 case OPT_mcx16:
2437 if (value)
2438 {
2439 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2440 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2441 }
2442 else
2443 {
2444 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2445 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2446 }
2447 return true;
2448
2449 case OPT_mmovbe:
2450 if (value)
2451 {
2452 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2453 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2454 }
2455 else
2456 {
2457 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2458 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2459 }
2460 return true;
2461
2462 case OPT_mcrc32:
2463 if (value)
2464 {
2465 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2466 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2467 }
2468 else
2469 {
2470 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2471 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2472 }
2473 return true;
2474
2475 case OPT_maes:
2476 if (value)
2477 {
2478 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2479 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2480 }
2481 else
2482 {
2483 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2484 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2485 }
2486 return true;
2487
2488 case OPT_mpclmul:
2489 if (value)
2490 {
2491 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2492 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2493 }
2494 else
2495 {
2496 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2497 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2498 }
2499 return true;
2500
2501 default:
2502 return true;
2503 }
2504 }
2505 \f
2506 /* Return a string that documents the current -m options. The caller is
2507 responsible for freeing the string. */
2508
2509 static char *
2510 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2511 const char *fpmath, bool add_nl_p)
2512 {
2513 struct ix86_target_opts
2514 {
2515 const char *option; /* option string */
2516 int mask; /* isa mask options */
2517 };
2518
2519 /* This table is ordered so that options like -msse4.2 that imply
2520 preceding options while match those first. */
2521 static struct ix86_target_opts isa_opts[] =
2522 {
2523 { "-m64", OPTION_MASK_ISA_64BIT },
2524 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2525 { "-mfma", OPTION_MASK_ISA_FMA },
2526 { "-mxop", OPTION_MASK_ISA_XOP },
2527 { "-mlwp", OPTION_MASK_ISA_LWP },
2528 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2529 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2530 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2531 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2532 { "-msse3", OPTION_MASK_ISA_SSE3 },
2533 { "-msse2", OPTION_MASK_ISA_SSE2 },
2534 { "-msse", OPTION_MASK_ISA_SSE },
2535 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2536 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2537 { "-mmmx", OPTION_MASK_ISA_MMX },
2538 { "-mabm", OPTION_MASK_ISA_ABM },
2539 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2540 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2541 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2542 { "-maes", OPTION_MASK_ISA_AES },
2543 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2544 };
2545
2546 /* Flag options. */
2547 static struct ix86_target_opts flag_opts[] =
2548 {
2549 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2550 { "-m80387", MASK_80387 },
2551 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2552 { "-malign-double", MASK_ALIGN_DOUBLE },
2553 { "-mcld", MASK_CLD },
2554 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2555 { "-mieee-fp", MASK_IEEE_FP },
2556 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2557 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2558 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2559 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2560 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2561 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2562 { "-mno-red-zone", MASK_NO_RED_ZONE },
2563 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2564 { "-mrecip", MASK_RECIP },
2565 { "-mrtd", MASK_RTD },
2566 { "-msseregparm", MASK_SSEREGPARM },
2567 { "-mstack-arg-probe", MASK_STACK_PROBE },
2568 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2569 };
2570
2571 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2572
2573 char isa_other[40];
2574 char target_other[40];
2575 unsigned num = 0;
2576 unsigned i, j;
2577 char *ret;
2578 char *ptr;
2579 size_t len;
2580 size_t line_len;
2581 size_t sep_len;
2582
2583 memset (opts, '\0', sizeof (opts));
2584
2585 /* Add -march= option. */
2586 if (arch)
2587 {
2588 opts[num][0] = "-march=";
2589 opts[num++][1] = arch;
2590 }
2591
2592 /* Add -mtune= option. */
2593 if (tune)
2594 {
2595 opts[num][0] = "-mtune=";
2596 opts[num++][1] = tune;
2597 }
2598
2599 /* Pick out the options in isa options. */
2600 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2601 {
2602 if ((isa & isa_opts[i].mask) != 0)
2603 {
2604 opts[num++][0] = isa_opts[i].option;
2605 isa &= ~ isa_opts[i].mask;
2606 }
2607 }
2608
2609 if (isa && add_nl_p)
2610 {
2611 opts[num++][0] = isa_other;
2612 sprintf (isa_other, "(other isa: %#x)", isa);
2613 }
2614
2615 /* Add flag options. */
2616 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2617 {
2618 if ((flags & flag_opts[i].mask) != 0)
2619 {
2620 opts[num++][0] = flag_opts[i].option;
2621 flags &= ~ flag_opts[i].mask;
2622 }
2623 }
2624
2625 if (flags && add_nl_p)
2626 {
2627 opts[num++][0] = target_other;
2628 sprintf (target_other, "(other flags: %#x)", flags);
2629 }
2630
2631 /* Add -fpmath= option. */
2632 if (fpmath)
2633 {
2634 opts[num][0] = "-mfpmath=";
2635 opts[num++][1] = fpmath;
2636 }
2637
2638 /* Any options? */
2639 if (num == 0)
2640 return NULL;
2641
2642 gcc_assert (num < ARRAY_SIZE (opts));
2643
2644 /* Size the string. */
2645 len = 0;
2646 sep_len = (add_nl_p) ? 3 : 1;
2647 for (i = 0; i < num; i++)
2648 {
2649 len += sep_len;
2650 for (j = 0; j < 2; j++)
2651 if (opts[i][j])
2652 len += strlen (opts[i][j]);
2653 }
2654
2655 /* Build the string. */
2656 ret = ptr = (char *) xmalloc (len);
2657 line_len = 0;
2658
2659 for (i = 0; i < num; i++)
2660 {
2661 size_t len2[2];
2662
2663 for (j = 0; j < 2; j++)
2664 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2665
2666 if (i != 0)
2667 {
2668 *ptr++ = ' ';
2669 line_len++;
2670
2671 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2672 {
2673 *ptr++ = '\\';
2674 *ptr++ = '\n';
2675 line_len = 0;
2676 }
2677 }
2678
2679 for (j = 0; j < 2; j++)
2680 if (opts[i][j])
2681 {
2682 memcpy (ptr, opts[i][j], len2[j]);
2683 ptr += len2[j];
2684 line_len += len2[j];
2685 }
2686 }
2687
2688 *ptr = '\0';
2689 gcc_assert (ret + len >= ptr);
2690
2691 return ret;
2692 }
2693
2694 /* Function that is callable from the debugger to print the current
2695 options. */
2696 void
2697 ix86_debug_options (void)
2698 {
2699 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2700 ix86_arch_string, ix86_tune_string,
2701 ix86_fpmath_string, true);
2702
2703 if (opts)
2704 {
2705 fprintf (stderr, "%s\n\n", opts);
2706 free (opts);
2707 }
2708 else
2709 fputs ("<no options>\n\n", stderr);
2710
2711 return;
2712 }
2713 \f
2714 /* Sometimes certain combinations of command options do not make
2715 sense on a particular target machine. You can define a macro
2716 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2717 defined, is executed once just after all the command options have
2718 been parsed.
2719
2720 Don't use this macro to turn on various extra optimizations for
2721 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2722
2723 void
2724 override_options (bool main_args_p)
2725 {
2726 int i;
2727 unsigned int ix86_arch_mask, ix86_tune_mask;
2728 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2729 const char *prefix;
2730 const char *suffix;
2731 const char *sw;
2732
2733 /* Comes from final.c -- no real reason to change it. */
2734 #define MAX_CODE_ALIGN 16
2735
2736 enum pta_flags
2737 {
2738 PTA_SSE = 1 << 0,
2739 PTA_SSE2 = 1 << 1,
2740 PTA_SSE3 = 1 << 2,
2741 PTA_MMX = 1 << 3,
2742 PTA_PREFETCH_SSE = 1 << 4,
2743 PTA_3DNOW = 1 << 5,
2744 PTA_3DNOW_A = 1 << 6,
2745 PTA_64BIT = 1 << 7,
2746 PTA_SSSE3 = 1 << 8,
2747 PTA_CX16 = 1 << 9,
2748 PTA_POPCNT = 1 << 10,
2749 PTA_ABM = 1 << 11,
2750 PTA_SSE4A = 1 << 12,
2751 PTA_NO_SAHF = 1 << 13,
2752 PTA_SSE4_1 = 1 << 14,
2753 PTA_SSE4_2 = 1 << 15,
2754 PTA_AES = 1 << 16,
2755 PTA_PCLMUL = 1 << 17,
2756 PTA_AVX = 1 << 18,
2757 PTA_FMA = 1 << 19,
2758 PTA_MOVBE = 1 << 20,
2759 PTA_FMA4 = 1 << 21,
2760 PTA_XOP = 1 << 22,
2761 PTA_LWP = 1 << 23
2762 };
2763
2764 static struct pta
2765 {
2766 const char *const name; /* processor name or nickname. */
2767 const enum processor_type processor;
2768 const enum attr_cpu schedule;
2769 const unsigned /*enum pta_flags*/ flags;
2770 }
2771 const processor_alias_table[] =
2772 {
2773 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2774 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2775 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2776 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2777 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2778 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2779 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2780 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2781 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2782 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2783 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2784 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2785 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2786 PTA_MMX | PTA_SSE},
2787 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2788 PTA_MMX | PTA_SSE},
2789 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2790 PTA_MMX | PTA_SSE | PTA_SSE2},
2791 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2792 PTA_MMX |PTA_SSE | PTA_SSE2},
2793 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2794 PTA_MMX | PTA_SSE | PTA_SSE2},
2795 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2796 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2797 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2798 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2799 | PTA_CX16 | PTA_NO_SAHF},
2800 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2801 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2802 | PTA_SSSE3 | PTA_CX16},
2803 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2804 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2805 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2806 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2807 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2808 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2809 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2810 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2811 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2812 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2813 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2814 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2815 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2816 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2817 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2818 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2819 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2820 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2821 {"x86-64", PROCESSOR_K8, CPU_K8,
2822 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2823 {"k8", PROCESSOR_K8, CPU_K8,
2824 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2825 | PTA_SSE2 | PTA_NO_SAHF},
2826 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2827 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2828 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2829 {"opteron", PROCESSOR_K8, CPU_K8,
2830 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2831 | PTA_SSE2 | PTA_NO_SAHF},
2832 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2833 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2834 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2835 {"athlon64", PROCESSOR_K8, CPU_K8,
2836 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2837 | PTA_SSE2 | PTA_NO_SAHF},
2838 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2839 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2840 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2841 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2842 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2843 | PTA_SSE2 | PTA_NO_SAHF},
2844 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2845 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2846 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2847 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2848 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2849 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2850 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
2851 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2852 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM
2853 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES
2854 | PTA_PCLMUL | PTA_AVX | PTA_FMA4 | PTA_XOP | PTA_LWP},
2855 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2856 0 /* flags are only used for -march switch. */ },
2857 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2858 PTA_64BIT /* flags are only used for -march switch. */ },
2859 };
2860
2861 int const pta_size = ARRAY_SIZE (processor_alias_table);
2862
2863 /* Set up prefix/suffix so the error messages refer to either the command
2864 line argument, or the attribute(target). */
2865 if (main_args_p)
2866 {
2867 prefix = "-m";
2868 suffix = "";
2869 sw = "switch";
2870 }
2871 else
2872 {
2873 prefix = "option(\"";
2874 suffix = "\")";
2875 sw = "attribute";
2876 }
2877
2878 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2879 SUBTARGET_OVERRIDE_OPTIONS;
2880 #endif
2881
2882 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2883 SUBSUBTARGET_OVERRIDE_OPTIONS;
2884 #endif
2885
2886 /* -fPIC is the default for x86_64. */
2887 if (TARGET_MACHO && TARGET_64BIT)
2888 flag_pic = 2;
2889
2890 /* Set the default values for switches whose default depends on TARGET_64BIT
2891 in case they weren't overwritten by command line options. */
2892 if (TARGET_64BIT)
2893 {
2894 /* Mach-O doesn't support omitting the frame pointer for now. */
2895 if (flag_omit_frame_pointer == 2)
2896 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2897 if (flag_asynchronous_unwind_tables == 2)
2898 flag_asynchronous_unwind_tables = 1;
2899 if (flag_pcc_struct_return == 2)
2900 flag_pcc_struct_return = 0;
2901 }
2902 else
2903 {
2904 if (flag_omit_frame_pointer == 2)
2905 flag_omit_frame_pointer = 0;
2906 if (flag_asynchronous_unwind_tables == 2)
2907 flag_asynchronous_unwind_tables = 0;
2908 if (flag_pcc_struct_return == 2)
2909 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2910 }
2911
2912 /* Need to check -mtune=generic first. */
2913 if (ix86_tune_string)
2914 {
2915 if (!strcmp (ix86_tune_string, "generic")
2916 || !strcmp (ix86_tune_string, "i686")
2917 /* As special support for cross compilers we read -mtune=native
2918 as -mtune=generic. With native compilers we won't see the
2919 -mtune=native, as it was changed by the driver. */
2920 || !strcmp (ix86_tune_string, "native"))
2921 {
2922 if (TARGET_64BIT)
2923 ix86_tune_string = "generic64";
2924 else
2925 ix86_tune_string = "generic32";
2926 }
2927 /* If this call is for setting the option attribute, allow the
2928 generic32/generic64 that was previously set. */
2929 else if (!main_args_p
2930 && (!strcmp (ix86_tune_string, "generic32")
2931 || !strcmp (ix86_tune_string, "generic64")))
2932 ;
2933 else if (!strncmp (ix86_tune_string, "generic", 7))
2934 error ("bad value (%s) for %stune=%s %s",
2935 ix86_tune_string, prefix, suffix, sw);
2936 else if (!strcmp (ix86_tune_string, "x86-64"))
2937 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2938 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2939 prefix, suffix, prefix, suffix, prefix, suffix);
2940 }
2941 else
2942 {
2943 if (ix86_arch_string)
2944 ix86_tune_string = ix86_arch_string;
2945 if (!ix86_tune_string)
2946 {
2947 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2948 ix86_tune_defaulted = 1;
2949 }
2950
2951 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2952 need to use a sensible tune option. */
2953 if (!strcmp (ix86_tune_string, "generic")
2954 || !strcmp (ix86_tune_string, "x86-64")
2955 || !strcmp (ix86_tune_string, "i686"))
2956 {
2957 if (TARGET_64BIT)
2958 ix86_tune_string = "generic64";
2959 else
2960 ix86_tune_string = "generic32";
2961 }
2962 }
2963
2964 if (ix86_stringop_string)
2965 {
2966 if (!strcmp (ix86_stringop_string, "rep_byte"))
2967 stringop_alg = rep_prefix_1_byte;
2968 else if (!strcmp (ix86_stringop_string, "libcall"))
2969 stringop_alg = libcall;
2970 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2971 stringop_alg = rep_prefix_4_byte;
2972 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2973 && TARGET_64BIT)
2974 /* rep; movq isn't available in 32-bit code. */
2975 stringop_alg = rep_prefix_8_byte;
2976 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2977 stringop_alg = loop_1_byte;
2978 else if (!strcmp (ix86_stringop_string, "loop"))
2979 stringop_alg = loop;
2980 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2981 stringop_alg = unrolled_loop;
2982 else
2983 error ("bad value (%s) for %sstringop-strategy=%s %s",
2984 ix86_stringop_string, prefix, suffix, sw);
2985 }
2986
2987 if (!ix86_arch_string)
2988 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2989 else
2990 ix86_arch_specified = 1;
2991
2992 /* Validate -mabi= value. */
2993 if (ix86_abi_string)
2994 {
2995 if (strcmp (ix86_abi_string, "sysv") == 0)
2996 ix86_abi = SYSV_ABI;
2997 else if (strcmp (ix86_abi_string, "ms") == 0)
2998 ix86_abi = MS_ABI;
2999 else
3000 error ("unknown ABI (%s) for %sabi=%s %s",
3001 ix86_abi_string, prefix, suffix, sw);
3002 }
3003 else
3004 ix86_abi = DEFAULT_ABI;
3005
3006 if (ix86_cmodel_string != 0)
3007 {
3008 if (!strcmp (ix86_cmodel_string, "small"))
3009 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3010 else if (!strcmp (ix86_cmodel_string, "medium"))
3011 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3012 else if (!strcmp (ix86_cmodel_string, "large"))
3013 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3014 else if (flag_pic)
3015 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3016 else if (!strcmp (ix86_cmodel_string, "32"))
3017 ix86_cmodel = CM_32;
3018 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3019 ix86_cmodel = CM_KERNEL;
3020 else
3021 error ("bad value (%s) for %scmodel=%s %s",
3022 ix86_cmodel_string, prefix, suffix, sw);
3023 }
3024 else
3025 {
3026 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3027 use of rip-relative addressing. This eliminates fixups that
3028 would otherwise be needed if this object is to be placed in a
3029 DLL, and is essentially just as efficient as direct addressing. */
3030 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3031 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3032 else if (TARGET_64BIT)
3033 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3034 else
3035 ix86_cmodel = CM_32;
3036 }
3037 if (ix86_asm_string != 0)
3038 {
3039 if (! TARGET_MACHO
3040 && !strcmp (ix86_asm_string, "intel"))
3041 ix86_asm_dialect = ASM_INTEL;
3042 else if (!strcmp (ix86_asm_string, "att"))
3043 ix86_asm_dialect = ASM_ATT;
3044 else
3045 error ("bad value (%s) for %sasm=%s %s",
3046 ix86_asm_string, prefix, suffix, sw);
3047 }
3048 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3049 error ("code model %qs not supported in the %s bit mode",
3050 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3051 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3052 sorry ("%i-bit mode not compiled in",
3053 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3054
3055 for (i = 0; i < pta_size; i++)
3056 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3057 {
3058 ix86_schedule = processor_alias_table[i].schedule;
3059 ix86_arch = processor_alias_table[i].processor;
3060 /* Default cpu tuning to the architecture. */
3061 ix86_tune = ix86_arch;
3062
3063 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3064 error ("CPU you selected does not support x86-64 "
3065 "instruction set");
3066
3067 if (processor_alias_table[i].flags & PTA_MMX
3068 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3069 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3070 if (processor_alias_table[i].flags & PTA_3DNOW
3071 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3072 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3073 if (processor_alias_table[i].flags & PTA_3DNOW_A
3074 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3075 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3076 if (processor_alias_table[i].flags & PTA_SSE
3077 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3078 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3079 if (processor_alias_table[i].flags & PTA_SSE2
3080 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3081 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3082 if (processor_alias_table[i].flags & PTA_SSE3
3083 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3084 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3085 if (processor_alias_table[i].flags & PTA_SSSE3
3086 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3087 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3088 if (processor_alias_table[i].flags & PTA_SSE4_1
3089 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3090 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3091 if (processor_alias_table[i].flags & PTA_SSE4_2
3092 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3093 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3094 if (processor_alias_table[i].flags & PTA_AVX
3095 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3096 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3097 if (processor_alias_table[i].flags & PTA_FMA
3098 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3099 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3100 if (processor_alias_table[i].flags & PTA_SSE4A
3101 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3102 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3103 if (processor_alias_table[i].flags & PTA_FMA4
3104 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3105 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3106 if (processor_alias_table[i].flags & PTA_XOP
3107 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3108 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3109 if (processor_alias_table[i].flags & PTA_LWP
3110 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3111 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3112 if (processor_alias_table[i].flags & PTA_ABM
3113 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3114 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3115 if (processor_alias_table[i].flags & PTA_CX16
3116 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3117 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3118 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3119 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3120 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3121 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3122 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3123 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3124 if (processor_alias_table[i].flags & PTA_MOVBE
3125 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3126 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3127 if (processor_alias_table[i].flags & PTA_AES
3128 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3129 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3130 if (processor_alias_table[i].flags & PTA_PCLMUL
3131 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3132 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3133 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3134 x86_prefetch_sse = true;
3135
3136 break;
3137 }
3138
3139 if (!strcmp (ix86_arch_string, "generic"))
3140 error ("generic CPU can be used only for %stune=%s %s",
3141 prefix, suffix, sw);
3142 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3143 error ("bad value (%s) for %sarch=%s %s",
3144 ix86_arch_string, prefix, suffix, sw);
3145
3146 ix86_arch_mask = 1u << ix86_arch;
3147 for (i = 0; i < X86_ARCH_LAST; ++i)
3148 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3149
3150 for (i = 0; i < pta_size; i++)
3151 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3152 {
3153 ix86_schedule = processor_alias_table[i].schedule;
3154 ix86_tune = processor_alias_table[i].processor;
3155 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3156 {
3157 if (ix86_tune_defaulted)
3158 {
3159 ix86_tune_string = "x86-64";
3160 for (i = 0; i < pta_size; i++)
3161 if (! strcmp (ix86_tune_string,
3162 processor_alias_table[i].name))
3163 break;
3164 ix86_schedule = processor_alias_table[i].schedule;
3165 ix86_tune = processor_alias_table[i].processor;
3166 }
3167 else
3168 error ("CPU you selected does not support x86-64 "
3169 "instruction set");
3170 }
3171 /* Intel CPUs have always interpreted SSE prefetch instructions as
3172 NOPs; so, we can enable SSE prefetch instructions even when
3173 -mtune (rather than -march) points us to a processor that has them.
3174 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3175 higher processors. */
3176 if (TARGET_CMOVE
3177 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3178 x86_prefetch_sse = true;
3179 break;
3180 }
3181
3182 if (ix86_tune_specified && i == pta_size)
3183 error ("bad value (%s) for %stune=%s %s",
3184 ix86_tune_string, prefix, suffix, sw);
3185
3186 ix86_tune_mask = 1u << ix86_tune;
3187 for (i = 0; i < X86_TUNE_LAST; ++i)
3188 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3189
3190 if (optimize_size)
3191 ix86_cost = &ix86_size_cost;
3192 else
3193 ix86_cost = processor_target_table[ix86_tune].cost;
3194
3195 /* Arrange to set up i386_stack_locals for all functions. */
3196 init_machine_status = ix86_init_machine_status;
3197
3198 /* Validate -mregparm= value. */
3199 if (ix86_regparm_string)
3200 {
3201 if (TARGET_64BIT)
3202 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3203 i = atoi (ix86_regparm_string);
3204 if (i < 0 || i > REGPARM_MAX)
3205 error ("%sregparm=%d%s is not between 0 and %d",
3206 prefix, i, suffix, REGPARM_MAX);
3207 else
3208 ix86_regparm = i;
3209 }
3210 if (TARGET_64BIT)
3211 ix86_regparm = REGPARM_MAX;
3212
3213 /* If the user has provided any of the -malign-* options,
3214 warn and use that value only if -falign-* is not set.
3215 Remove this code in GCC 3.2 or later. */
3216 if (ix86_align_loops_string)
3217 {
3218 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3219 prefix, suffix, suffix);
3220 if (align_loops == 0)
3221 {
3222 i = atoi (ix86_align_loops_string);
3223 if (i < 0 || i > MAX_CODE_ALIGN)
3224 error ("%salign-loops=%d%s is not between 0 and %d",
3225 prefix, i, suffix, MAX_CODE_ALIGN);
3226 else
3227 align_loops = 1 << i;
3228 }
3229 }
3230
3231 if (ix86_align_jumps_string)
3232 {
3233 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3234 prefix, suffix, suffix);
3235 if (align_jumps == 0)
3236 {
3237 i = atoi (ix86_align_jumps_string);
3238 if (i < 0 || i > MAX_CODE_ALIGN)
3239 error ("%salign-loops=%d%s is not between 0 and %d",
3240 prefix, i, suffix, MAX_CODE_ALIGN);
3241 else
3242 align_jumps = 1 << i;
3243 }
3244 }
3245
3246 if (ix86_align_funcs_string)
3247 {
3248 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3249 prefix, suffix, suffix);
3250 if (align_functions == 0)
3251 {
3252 i = atoi (ix86_align_funcs_string);
3253 if (i < 0 || i > MAX_CODE_ALIGN)
3254 error ("%salign-loops=%d%s is not between 0 and %d",
3255 prefix, i, suffix, MAX_CODE_ALIGN);
3256 else
3257 align_functions = 1 << i;
3258 }
3259 }
3260
3261 /* Default align_* from the processor table. */
3262 if (align_loops == 0)
3263 {
3264 align_loops = processor_target_table[ix86_tune].align_loop;
3265 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3266 }
3267 if (align_jumps == 0)
3268 {
3269 align_jumps = processor_target_table[ix86_tune].align_jump;
3270 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3271 }
3272 if (align_functions == 0)
3273 {
3274 align_functions = processor_target_table[ix86_tune].align_func;
3275 }
3276
3277 /* Validate -mbranch-cost= value, or provide default. */
3278 ix86_branch_cost = ix86_cost->branch_cost;
3279 if (ix86_branch_cost_string)
3280 {
3281 i = atoi (ix86_branch_cost_string);
3282 if (i < 0 || i > 5)
3283 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3284 else
3285 ix86_branch_cost = i;
3286 }
3287 if (ix86_section_threshold_string)
3288 {
3289 i = atoi (ix86_section_threshold_string);
3290 if (i < 0)
3291 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3292 else
3293 ix86_section_threshold = i;
3294 }
3295
3296 if (ix86_tls_dialect_string)
3297 {
3298 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3299 ix86_tls_dialect = TLS_DIALECT_GNU;
3300 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3301 ix86_tls_dialect = TLS_DIALECT_GNU2;
3302 else
3303 error ("bad value (%s) for %stls-dialect=%s %s",
3304 ix86_tls_dialect_string, prefix, suffix, sw);
3305 }
3306
3307 if (ix87_precision_string)
3308 {
3309 i = atoi (ix87_precision_string);
3310 if (i != 32 && i != 64 && i != 80)
3311 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3312 }
3313
3314 if (TARGET_64BIT)
3315 {
3316 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3317
3318 /* Enable by default the SSE and MMX builtins. Do allow the user to
3319 explicitly disable any of these. In particular, disabling SSE and
3320 MMX for kernel code is extremely useful. */
3321 if (!ix86_arch_specified)
3322 ix86_isa_flags
3323 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3324 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3325
3326 if (TARGET_RTD)
3327 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3328 }
3329 else
3330 {
3331 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3332
3333 if (!ix86_arch_specified)
3334 ix86_isa_flags
3335 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3336
3337 /* i386 ABI does not specify red zone. It still makes sense to use it
3338 when programmer takes care to stack from being destroyed. */
3339 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3340 target_flags |= MASK_NO_RED_ZONE;
3341 }
3342
3343 /* Keep nonleaf frame pointers. */
3344 if (flag_omit_frame_pointer)
3345 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3346 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3347 flag_omit_frame_pointer = 1;
3348
3349 /* If we're doing fast math, we don't care about comparison order
3350 wrt NaNs. This lets us use a shorter comparison sequence. */
3351 if (flag_finite_math_only)
3352 target_flags &= ~MASK_IEEE_FP;
3353
3354 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3355 since the insns won't need emulation. */
3356 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3357 target_flags &= ~MASK_NO_FANCY_MATH_387;
3358
3359 /* Likewise, if the target doesn't have a 387, or we've specified
3360 software floating point, don't use 387 inline intrinsics. */
3361 if (!TARGET_80387)
3362 target_flags |= MASK_NO_FANCY_MATH_387;
3363
3364 /* Turn on MMX builtins for -msse. */
3365 if (TARGET_SSE)
3366 {
3367 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3368 x86_prefetch_sse = true;
3369 }
3370
3371 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3372 if (TARGET_SSE4_2 || TARGET_ABM)
3373 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3374
3375 /* Validate -mpreferred-stack-boundary= value or default it to
3376 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3377 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3378 if (ix86_preferred_stack_boundary_string)
3379 {
3380 i = atoi (ix86_preferred_stack_boundary_string);
3381 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3382 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3383 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3384 else
3385 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3386 }
3387
3388 /* Set the default value for -mstackrealign. */
3389 if (ix86_force_align_arg_pointer == -1)
3390 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3391
3392 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3393
3394 /* Validate -mincoming-stack-boundary= value or default it to
3395 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3396 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3397 if (ix86_incoming_stack_boundary_string)
3398 {
3399 i = atoi (ix86_incoming_stack_boundary_string);
3400 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3401 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3402 i, TARGET_64BIT ? 4 : 2);
3403 else
3404 {
3405 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3406 ix86_incoming_stack_boundary
3407 = ix86_user_incoming_stack_boundary;
3408 }
3409 }
3410
3411 /* Accept -msseregparm only if at least SSE support is enabled. */
3412 if (TARGET_SSEREGPARM
3413 && ! TARGET_SSE)
3414 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3415
3416 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3417 if (ix86_fpmath_string != 0)
3418 {
3419 if (! strcmp (ix86_fpmath_string, "387"))
3420 ix86_fpmath = FPMATH_387;
3421 else if (! strcmp (ix86_fpmath_string, "sse"))
3422 {
3423 if (!TARGET_SSE)
3424 {
3425 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3426 ix86_fpmath = FPMATH_387;
3427 }
3428 else
3429 ix86_fpmath = FPMATH_SSE;
3430 }
3431 else if (! strcmp (ix86_fpmath_string, "387,sse")
3432 || ! strcmp (ix86_fpmath_string, "387+sse")
3433 || ! strcmp (ix86_fpmath_string, "sse,387")
3434 || ! strcmp (ix86_fpmath_string, "sse+387")
3435 || ! strcmp (ix86_fpmath_string, "both"))
3436 {
3437 if (!TARGET_SSE)
3438 {
3439 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3440 ix86_fpmath = FPMATH_387;
3441 }
3442 else if (!TARGET_80387)
3443 {
3444 warning (0, "387 instruction set disabled, using SSE arithmetics");
3445 ix86_fpmath = FPMATH_SSE;
3446 }
3447 else
3448 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3449 }
3450 else
3451 error ("bad value (%s) for %sfpmath=%s %s",
3452 ix86_fpmath_string, prefix, suffix, sw);
3453 }
3454
3455 /* If the i387 is disabled, then do not return values in it. */
3456 if (!TARGET_80387)
3457 target_flags &= ~MASK_FLOAT_RETURNS;
3458
3459 /* Use external vectorized library in vectorizing intrinsics. */
3460 if (ix86_veclibabi_string)
3461 {
3462 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3463 ix86_veclib_handler = ix86_veclibabi_svml;
3464 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3465 ix86_veclib_handler = ix86_veclibabi_acml;
3466 else
3467 error ("unknown vectorization library ABI type (%s) for "
3468 "%sveclibabi=%s %s", ix86_veclibabi_string,
3469 prefix, suffix, sw);
3470 }
3471
3472 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3473 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3474 && !optimize_size)
3475 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3476
3477 /* ??? Unwind info is not correct around the CFG unless either a frame
3478 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3479 unwind info generation to be aware of the CFG and propagating states
3480 around edges. */
3481 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3482 || flag_exceptions || flag_non_call_exceptions)
3483 && flag_omit_frame_pointer
3484 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3485 {
3486 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3487 warning (0, "unwind tables currently require either a frame pointer "
3488 "or %saccumulate-outgoing-args%s for correctness",
3489 prefix, suffix);
3490 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3491 }
3492
3493 /* If stack probes are required, the space used for large function
3494 arguments on the stack must also be probed, so enable
3495 -maccumulate-outgoing-args so this happens in the prologue. */
3496 if (TARGET_STACK_PROBE
3497 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3498 {
3499 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3500 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3501 "for correctness", prefix, suffix);
3502 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3503 }
3504
3505 /* For sane SSE instruction set generation we need fcomi instruction.
3506 It is safe to enable all CMOVE instructions. */
3507 if (TARGET_SSE)
3508 TARGET_CMOVE = 1;
3509
3510 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3511 {
3512 char *p;
3513 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3514 p = strchr (internal_label_prefix, 'X');
3515 internal_label_prefix_len = p - internal_label_prefix;
3516 *p = '\0';
3517 }
3518
3519 /* When scheduling description is not available, disable scheduler pass
3520 so it won't slow down the compilation and make x87 code slower. */
3521 if (!TARGET_SCHEDULE)
3522 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3523
3524 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3525 set_param_value ("simultaneous-prefetches",
3526 ix86_cost->simultaneous_prefetches);
3527 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3528 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3529 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3530 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3531 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3532 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3533
3534 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3535 can be optimized to ap = __builtin_next_arg (0). */
3536 if (!TARGET_64BIT)
3537 targetm.expand_builtin_va_start = NULL;
3538
3539 if (TARGET_64BIT)
3540 {
3541 ix86_gen_leave = gen_leave_rex64;
3542 ix86_gen_pop1 = gen_popdi1;
3543 ix86_gen_add3 = gen_adddi3;
3544 ix86_gen_sub3 = gen_subdi3;
3545 ix86_gen_sub3_carry = gen_subdi3_carry;
3546 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3547 ix86_gen_monitor = gen_sse3_monitor64;
3548 ix86_gen_andsp = gen_anddi3;
3549 }
3550 else
3551 {
3552 ix86_gen_leave = gen_leave;
3553 ix86_gen_pop1 = gen_popsi1;
3554 ix86_gen_add3 = gen_addsi3;
3555 ix86_gen_sub3 = gen_subsi3;
3556 ix86_gen_sub3_carry = gen_subsi3_carry;
3557 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3558 ix86_gen_monitor = gen_sse3_monitor;
3559 ix86_gen_andsp = gen_andsi3;
3560 }
3561
3562 #ifdef USE_IX86_CLD
3563 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3564 if (!TARGET_64BIT)
3565 target_flags |= MASK_CLD & ~target_flags_explicit;
3566 #endif
3567
3568 /* Save the initial options in case the user does function specific options */
3569 if (main_args_p)
3570 target_option_default_node = target_option_current_node
3571 = build_target_option_node ();
3572 }
3573
3574 /* Update register usage after having seen the compiler flags. */
3575
3576 void
3577 ix86_conditional_register_usage (void)
3578 {
3579 int i;
3580 unsigned int j;
3581
3582 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3583 {
3584 if (fixed_regs[i] > 1)
3585 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3586 if (call_used_regs[i] > 1)
3587 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3588 }
3589
3590 /* The PIC register, if it exists, is fixed. */
3591 j = PIC_OFFSET_TABLE_REGNUM;
3592 if (j != INVALID_REGNUM)
3593 fixed_regs[j] = call_used_regs[j] = 1;
3594
3595 /* The MS_ABI changes the set of call-used registers. */
3596 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3597 {
3598 call_used_regs[SI_REG] = 0;
3599 call_used_regs[DI_REG] = 0;
3600 call_used_regs[XMM6_REG] = 0;
3601 call_used_regs[XMM7_REG] = 0;
3602 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3603 call_used_regs[i] = 0;
3604 }
3605
3606 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3607 other call-clobbered regs for 64-bit. */
3608 if (TARGET_64BIT)
3609 {
3610 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3611
3612 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3613 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3614 && call_used_regs[i])
3615 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3616 }
3617
3618 /* If MMX is disabled, squash the registers. */
3619 if (! TARGET_MMX)
3620 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3621 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3622 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3623
3624 /* If SSE is disabled, squash the registers. */
3625 if (! TARGET_SSE)
3626 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3627 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3628 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3629
3630 /* If the FPU is disabled, squash the registers. */
3631 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3632 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3633 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3634 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3635
3636 /* If 32-bit, squash the 64-bit registers. */
3637 if (! TARGET_64BIT)
3638 {
3639 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3640 reg_names[i] = "";
3641 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3642 reg_names[i] = "";
3643 }
3644 }
3645
3646 \f
3647 /* Save the current options */
3648
3649 static void
3650 ix86_function_specific_save (struct cl_target_option *ptr)
3651 {
3652 ptr->arch = ix86_arch;
3653 ptr->schedule = ix86_schedule;
3654 ptr->tune = ix86_tune;
3655 ptr->fpmath = ix86_fpmath;
3656 ptr->branch_cost = ix86_branch_cost;
3657 ptr->tune_defaulted = ix86_tune_defaulted;
3658 ptr->arch_specified = ix86_arch_specified;
3659 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3660 ptr->target_flags_explicit = target_flags_explicit;
3661
3662 /* The fields are char but the variables are not; make sure the
3663 values fit in the fields. */
3664 gcc_assert (ptr->arch == ix86_arch);
3665 gcc_assert (ptr->schedule == ix86_schedule);
3666 gcc_assert (ptr->tune == ix86_tune);
3667 gcc_assert (ptr->fpmath == ix86_fpmath);
3668 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3669 }
3670
3671 /* Restore the current options */
3672
3673 static void
3674 ix86_function_specific_restore (struct cl_target_option *ptr)
3675 {
3676 enum processor_type old_tune = ix86_tune;
3677 enum processor_type old_arch = ix86_arch;
3678 unsigned int ix86_arch_mask, ix86_tune_mask;
3679 int i;
3680
3681 ix86_arch = (enum processor_type) ptr->arch;
3682 ix86_schedule = (enum attr_cpu) ptr->schedule;
3683 ix86_tune = (enum processor_type) ptr->tune;
3684 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3685 ix86_branch_cost = ptr->branch_cost;
3686 ix86_tune_defaulted = ptr->tune_defaulted;
3687 ix86_arch_specified = ptr->arch_specified;
3688 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3689 target_flags_explicit = ptr->target_flags_explicit;
3690
3691 /* Recreate the arch feature tests if the arch changed */
3692 if (old_arch != ix86_arch)
3693 {
3694 ix86_arch_mask = 1u << ix86_arch;
3695 for (i = 0; i < X86_ARCH_LAST; ++i)
3696 ix86_arch_features[i]
3697 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3698 }
3699
3700 /* Recreate the tune optimization tests */
3701 if (old_tune != ix86_tune)
3702 {
3703 ix86_tune_mask = 1u << ix86_tune;
3704 for (i = 0; i < X86_TUNE_LAST; ++i)
3705 ix86_tune_features[i]
3706 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3707 }
3708 }
3709
3710 /* Print the current options */
3711
3712 static void
3713 ix86_function_specific_print (FILE *file, int indent,
3714 struct cl_target_option *ptr)
3715 {
3716 char *target_string
3717 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3718 NULL, NULL, NULL, false);
3719
3720 fprintf (file, "%*sarch = %d (%s)\n",
3721 indent, "",
3722 ptr->arch,
3723 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3724 ? cpu_names[ptr->arch]
3725 : "<unknown>"));
3726
3727 fprintf (file, "%*stune = %d (%s)\n",
3728 indent, "",
3729 ptr->tune,
3730 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3731 ? cpu_names[ptr->tune]
3732 : "<unknown>"));
3733
3734 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3735 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3736 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3737 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3738
3739 if (target_string)
3740 {
3741 fprintf (file, "%*s%s\n", indent, "", target_string);
3742 free (target_string);
3743 }
3744 }
3745
3746 \f
3747 /* Inner function to process the attribute((target(...))), take an argument and
3748 set the current options from the argument. If we have a list, recursively go
3749 over the list. */
3750
3751 static bool
3752 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3753 {
3754 char *next_optstr;
3755 bool ret = true;
3756
3757 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3758 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3759 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3760 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3761
3762 enum ix86_opt_type
3763 {
3764 ix86_opt_unknown,
3765 ix86_opt_yes,
3766 ix86_opt_no,
3767 ix86_opt_str,
3768 ix86_opt_isa
3769 };
3770
3771 static const struct
3772 {
3773 const char *string;
3774 size_t len;
3775 enum ix86_opt_type type;
3776 int opt;
3777 int mask;
3778 } attrs[] = {
3779 /* isa options */
3780 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3781 IX86_ATTR_ISA ("abm", OPT_mabm),
3782 IX86_ATTR_ISA ("aes", OPT_maes),
3783 IX86_ATTR_ISA ("avx", OPT_mavx),
3784 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3785 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3786 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3787 IX86_ATTR_ISA ("sse", OPT_msse),
3788 IX86_ATTR_ISA ("sse2", OPT_msse2),
3789 IX86_ATTR_ISA ("sse3", OPT_msse3),
3790 IX86_ATTR_ISA ("sse4", OPT_msse4),
3791 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3792 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3793 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3794 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3795 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3796 IX86_ATTR_ISA ("xop", OPT_mxop),
3797 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3798
3799 /* string options */
3800 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3801 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3802 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3803
3804 /* flag options */
3805 IX86_ATTR_YES ("cld",
3806 OPT_mcld,
3807 MASK_CLD),
3808
3809 IX86_ATTR_NO ("fancy-math-387",
3810 OPT_mfancy_math_387,
3811 MASK_NO_FANCY_MATH_387),
3812
3813 IX86_ATTR_YES ("ieee-fp",
3814 OPT_mieee_fp,
3815 MASK_IEEE_FP),
3816
3817 IX86_ATTR_YES ("inline-all-stringops",
3818 OPT_minline_all_stringops,
3819 MASK_INLINE_ALL_STRINGOPS),
3820
3821 IX86_ATTR_YES ("inline-stringops-dynamically",
3822 OPT_minline_stringops_dynamically,
3823 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3824
3825 IX86_ATTR_NO ("align-stringops",
3826 OPT_mno_align_stringops,
3827 MASK_NO_ALIGN_STRINGOPS),
3828
3829 IX86_ATTR_YES ("recip",
3830 OPT_mrecip,
3831 MASK_RECIP),
3832
3833 };
3834
3835 /* If this is a list, recurse to get the options. */
3836 if (TREE_CODE (args) == TREE_LIST)
3837 {
3838 bool ret = true;
3839
3840 for (; args; args = TREE_CHAIN (args))
3841 if (TREE_VALUE (args)
3842 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3843 ret = false;
3844
3845 return ret;
3846 }
3847
3848 else if (TREE_CODE (args) != STRING_CST)
3849 gcc_unreachable ();
3850
3851 /* Handle multiple arguments separated by commas. */
3852 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3853
3854 while (next_optstr && *next_optstr != '\0')
3855 {
3856 char *p = next_optstr;
3857 char *orig_p = p;
3858 char *comma = strchr (next_optstr, ',');
3859 const char *opt_string;
3860 size_t len, opt_len;
3861 int opt;
3862 bool opt_set_p;
3863 char ch;
3864 unsigned i;
3865 enum ix86_opt_type type = ix86_opt_unknown;
3866 int mask = 0;
3867
3868 if (comma)
3869 {
3870 *comma = '\0';
3871 len = comma - next_optstr;
3872 next_optstr = comma + 1;
3873 }
3874 else
3875 {
3876 len = strlen (p);
3877 next_optstr = NULL;
3878 }
3879
3880 /* Recognize no-xxx. */
3881 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3882 {
3883 opt_set_p = false;
3884 p += 3;
3885 len -= 3;
3886 }
3887 else
3888 opt_set_p = true;
3889
3890 /* Find the option. */
3891 ch = *p;
3892 opt = N_OPTS;
3893 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3894 {
3895 type = attrs[i].type;
3896 opt_len = attrs[i].len;
3897 if (ch == attrs[i].string[0]
3898 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3899 && memcmp (p, attrs[i].string, opt_len) == 0)
3900 {
3901 opt = attrs[i].opt;
3902 mask = attrs[i].mask;
3903 opt_string = attrs[i].string;
3904 break;
3905 }
3906 }
3907
3908 /* Process the option. */
3909 if (opt == N_OPTS)
3910 {
3911 error ("attribute(target(\"%s\")) is unknown", orig_p);
3912 ret = false;
3913 }
3914
3915 else if (type == ix86_opt_isa)
3916 ix86_handle_option (opt, p, opt_set_p);
3917
3918 else if (type == ix86_opt_yes || type == ix86_opt_no)
3919 {
3920 if (type == ix86_opt_no)
3921 opt_set_p = !opt_set_p;
3922
3923 if (opt_set_p)
3924 target_flags |= mask;
3925 else
3926 target_flags &= ~mask;
3927 }
3928
3929 else if (type == ix86_opt_str)
3930 {
3931 if (p_strings[opt])
3932 {
3933 error ("option(\"%s\") was already specified", opt_string);
3934 ret = false;
3935 }
3936 else
3937 p_strings[opt] = xstrdup (p + opt_len);
3938 }
3939
3940 else
3941 gcc_unreachable ();
3942 }
3943
3944 return ret;
3945 }
3946
3947 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3948
3949 tree
3950 ix86_valid_target_attribute_tree (tree args)
3951 {
3952 const char *orig_arch_string = ix86_arch_string;
3953 const char *orig_tune_string = ix86_tune_string;
3954 const char *orig_fpmath_string = ix86_fpmath_string;
3955 int orig_tune_defaulted = ix86_tune_defaulted;
3956 int orig_arch_specified = ix86_arch_specified;
3957 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3958 tree t = NULL_TREE;
3959 int i;
3960 struct cl_target_option *def
3961 = TREE_TARGET_OPTION (target_option_default_node);
3962
3963 /* Process each of the options on the chain. */
3964 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3965 return NULL_TREE;
3966
3967 /* If the changed options are different from the default, rerun override_options,
3968 and then save the options away. The string options are are attribute options,
3969 and will be undone when we copy the save structure. */
3970 if (ix86_isa_flags != def->ix86_isa_flags
3971 || target_flags != def->target_flags
3972 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3973 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3974 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3975 {
3976 /* If we are using the default tune= or arch=, undo the string assigned,
3977 and use the default. */
3978 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3979 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3980 else if (!orig_arch_specified)
3981 ix86_arch_string = NULL;
3982
3983 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3984 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3985 else if (orig_tune_defaulted)
3986 ix86_tune_string = NULL;
3987
3988 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3989 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3990 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3991 else if (!TARGET_64BIT && TARGET_SSE)
3992 ix86_fpmath_string = "sse,387";
3993
3994 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3995 override_options (false);
3996
3997 /* Add any builtin functions with the new isa if any. */
3998 ix86_add_new_builtins (ix86_isa_flags);
3999
4000 /* Save the current options unless we are validating options for
4001 #pragma. */
4002 t = build_target_option_node ();
4003
4004 ix86_arch_string = orig_arch_string;
4005 ix86_tune_string = orig_tune_string;
4006 ix86_fpmath_string = orig_fpmath_string;
4007
4008 /* Free up memory allocated to hold the strings */
4009 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4010 if (option_strings[i])
4011 free (option_strings[i]);
4012 }
4013
4014 return t;
4015 }
4016
4017 /* Hook to validate attribute((target("string"))). */
4018
4019 static bool
4020 ix86_valid_target_attribute_p (tree fndecl,
4021 tree ARG_UNUSED (name),
4022 tree args,
4023 int ARG_UNUSED (flags))
4024 {
4025 struct cl_target_option cur_target;
4026 bool ret = true;
4027 tree old_optimize = build_optimization_node ();
4028 tree new_target, new_optimize;
4029 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4030
4031 /* If the function changed the optimization levels as well as setting target
4032 options, start with the optimizations specified. */
4033 if (func_optimize && func_optimize != old_optimize)
4034 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
4035
4036 /* The target attributes may also change some optimization flags, so update
4037 the optimization options if necessary. */
4038 cl_target_option_save (&cur_target);
4039 new_target = ix86_valid_target_attribute_tree (args);
4040 new_optimize = build_optimization_node ();
4041
4042 if (!new_target)
4043 ret = false;
4044
4045 else if (fndecl)
4046 {
4047 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4048
4049 if (old_optimize != new_optimize)
4050 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4051 }
4052
4053 cl_target_option_restore (&cur_target);
4054
4055 if (old_optimize != new_optimize)
4056 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
4057
4058 return ret;
4059 }
4060
4061 \f
4062 /* Hook to determine if one function can safely inline another. */
4063
4064 static bool
4065 ix86_can_inline_p (tree caller, tree callee)
4066 {
4067 bool ret = false;
4068 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4069 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4070
4071 /* If callee has no option attributes, then it is ok to inline. */
4072 if (!callee_tree)
4073 ret = true;
4074
4075 /* If caller has no option attributes, but callee does then it is not ok to
4076 inline. */
4077 else if (!caller_tree)
4078 ret = false;
4079
4080 else
4081 {
4082 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4083 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4084
4085 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4086 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4087 function. */
4088 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
4089 != callee_opts->ix86_isa_flags)
4090 ret = false;
4091
4092 /* See if we have the same non-isa options. */
4093 else if (caller_opts->target_flags != callee_opts->target_flags)
4094 ret = false;
4095
4096 /* See if arch, tune, etc. are the same. */
4097 else if (caller_opts->arch != callee_opts->arch)
4098 ret = false;
4099
4100 else if (caller_opts->tune != callee_opts->tune)
4101 ret = false;
4102
4103 else if (caller_opts->fpmath != callee_opts->fpmath)
4104 ret = false;
4105
4106 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4107 ret = false;
4108
4109 else
4110 ret = true;
4111 }
4112
4113 return ret;
4114 }
4115
4116 \f
4117 /* Remember the last target of ix86_set_current_function. */
4118 static GTY(()) tree ix86_previous_fndecl;
4119
4120 /* Establish appropriate back-end context for processing the function
4121 FNDECL. The argument might be NULL to indicate processing at top
4122 level, outside of any function scope. */
4123 static void
4124 ix86_set_current_function (tree fndecl)
4125 {
4126 /* Only change the context if the function changes. This hook is called
4127 several times in the course of compiling a function, and we don't want to
4128 slow things down too much or call target_reinit when it isn't safe. */
4129 if (fndecl && fndecl != ix86_previous_fndecl)
4130 {
4131 tree old_tree = (ix86_previous_fndecl
4132 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4133 : NULL_TREE);
4134
4135 tree new_tree = (fndecl
4136 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4137 : NULL_TREE);
4138
4139 ix86_previous_fndecl = fndecl;
4140 if (old_tree == new_tree)
4141 ;
4142
4143 else if (new_tree)
4144 {
4145 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4146 target_reinit ();
4147 }
4148
4149 else if (old_tree)
4150 {
4151 struct cl_target_option *def
4152 = TREE_TARGET_OPTION (target_option_current_node);
4153
4154 cl_target_option_restore (def);
4155 target_reinit ();
4156 }
4157 }
4158 }
4159
4160 \f
4161 /* Return true if this goes in large data/bss. */
4162
4163 static bool
4164 ix86_in_large_data_p (tree exp)
4165 {
4166 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4167 return false;
4168
4169 /* Functions are never large data. */
4170 if (TREE_CODE (exp) == FUNCTION_DECL)
4171 return false;
4172
4173 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4174 {
4175 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4176 if (strcmp (section, ".ldata") == 0
4177 || strcmp (section, ".lbss") == 0)
4178 return true;
4179 return false;
4180 }
4181 else
4182 {
4183 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4184
4185 /* If this is an incomplete type with size 0, then we can't put it
4186 in data because it might be too big when completed. */
4187 if (!size || size > ix86_section_threshold)
4188 return true;
4189 }
4190
4191 return false;
4192 }
4193
4194 /* Switch to the appropriate section for output of DECL.
4195 DECL is either a `VAR_DECL' node or a constant of some sort.
4196 RELOC indicates whether forming the initial value of DECL requires
4197 link-time relocations. */
4198
4199 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4200 ATTRIBUTE_UNUSED;
4201
4202 static section *
4203 x86_64_elf_select_section (tree decl, int reloc,
4204 unsigned HOST_WIDE_INT align)
4205 {
4206 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4207 && ix86_in_large_data_p (decl))
4208 {
4209 const char *sname = NULL;
4210 unsigned int flags = SECTION_WRITE;
4211 switch (categorize_decl_for_section (decl, reloc))
4212 {
4213 case SECCAT_DATA:
4214 sname = ".ldata";
4215 break;
4216 case SECCAT_DATA_REL:
4217 sname = ".ldata.rel";
4218 break;
4219 case SECCAT_DATA_REL_LOCAL:
4220 sname = ".ldata.rel.local";
4221 break;
4222 case SECCAT_DATA_REL_RO:
4223 sname = ".ldata.rel.ro";
4224 break;
4225 case SECCAT_DATA_REL_RO_LOCAL:
4226 sname = ".ldata.rel.ro.local";
4227 break;
4228 case SECCAT_BSS:
4229 sname = ".lbss";
4230 flags |= SECTION_BSS;
4231 break;
4232 case SECCAT_RODATA:
4233 case SECCAT_RODATA_MERGE_STR:
4234 case SECCAT_RODATA_MERGE_STR_INIT:
4235 case SECCAT_RODATA_MERGE_CONST:
4236 sname = ".lrodata";
4237 flags = 0;
4238 break;
4239 case SECCAT_SRODATA:
4240 case SECCAT_SDATA:
4241 case SECCAT_SBSS:
4242 gcc_unreachable ();
4243 case SECCAT_TEXT:
4244 case SECCAT_TDATA:
4245 case SECCAT_TBSS:
4246 /* We don't split these for medium model. Place them into
4247 default sections and hope for best. */
4248 break;
4249 case SECCAT_EMUTLS_VAR:
4250 case SECCAT_EMUTLS_TMPL:
4251 gcc_unreachable ();
4252 }
4253 if (sname)
4254 {
4255 /* We might get called with string constants, but get_named_section
4256 doesn't like them as they are not DECLs. Also, we need to set
4257 flags in that case. */
4258 if (!DECL_P (decl))
4259 return get_section (sname, flags, NULL);
4260 return get_named_section (decl, sname, reloc);
4261 }
4262 }
4263 return default_elf_select_section (decl, reloc, align);
4264 }
4265
4266 /* Build up a unique section name, expressed as a
4267 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4268 RELOC indicates whether the initial value of EXP requires
4269 link-time relocations. */
4270
4271 static void ATTRIBUTE_UNUSED
4272 x86_64_elf_unique_section (tree decl, int reloc)
4273 {
4274 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4275 && ix86_in_large_data_p (decl))
4276 {
4277 const char *prefix = NULL;
4278 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4279 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4280
4281 switch (categorize_decl_for_section (decl, reloc))
4282 {
4283 case SECCAT_DATA:
4284 case SECCAT_DATA_REL:
4285 case SECCAT_DATA_REL_LOCAL:
4286 case SECCAT_DATA_REL_RO:
4287 case SECCAT_DATA_REL_RO_LOCAL:
4288 prefix = one_only ? ".ld" : ".ldata";
4289 break;
4290 case SECCAT_BSS:
4291 prefix = one_only ? ".lb" : ".lbss";
4292 break;
4293 case SECCAT_RODATA:
4294 case SECCAT_RODATA_MERGE_STR:
4295 case SECCAT_RODATA_MERGE_STR_INIT:
4296 case SECCAT_RODATA_MERGE_CONST:
4297 prefix = one_only ? ".lr" : ".lrodata";
4298 break;
4299 case SECCAT_SRODATA:
4300 case SECCAT_SDATA:
4301 case SECCAT_SBSS:
4302 gcc_unreachable ();
4303 case SECCAT_TEXT:
4304 case SECCAT_TDATA:
4305 case SECCAT_TBSS:
4306 /* We don't split these for medium model. Place them into
4307 default sections and hope for best. */
4308 break;
4309 case SECCAT_EMUTLS_VAR:
4310 prefix = targetm.emutls.var_section;
4311 break;
4312 case SECCAT_EMUTLS_TMPL:
4313 prefix = targetm.emutls.tmpl_section;
4314 break;
4315 }
4316 if (prefix)
4317 {
4318 const char *name, *linkonce;
4319 char *string;
4320
4321 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4322 name = targetm.strip_name_encoding (name);
4323
4324 /* If we're using one_only, then there needs to be a .gnu.linkonce
4325 prefix to the section name. */
4326 linkonce = one_only ? ".gnu.linkonce" : "";
4327
4328 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4329
4330 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4331 return;
4332 }
4333 }
4334 default_unique_section (decl, reloc);
4335 }
4336
4337 #ifdef COMMON_ASM_OP
4338 /* This says how to output assembler code to declare an
4339 uninitialized external linkage data object.
4340
4341 For medium model x86-64 we need to use .largecomm opcode for
4342 large objects. */
4343 void
4344 x86_elf_aligned_common (FILE *file,
4345 const char *name, unsigned HOST_WIDE_INT size,
4346 int align)
4347 {
4348 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4349 && size > (unsigned int)ix86_section_threshold)
4350 fputs (".largecomm\t", file);
4351 else
4352 fputs (COMMON_ASM_OP, file);
4353 assemble_name (file, name);
4354 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4355 size, align / BITS_PER_UNIT);
4356 }
4357 #endif
4358
4359 /* Utility function for targets to use in implementing
4360 ASM_OUTPUT_ALIGNED_BSS. */
4361
4362 void
4363 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4364 const char *name, unsigned HOST_WIDE_INT size,
4365 int align)
4366 {
4367 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4368 && size > (unsigned int)ix86_section_threshold)
4369 switch_to_section (get_named_section (decl, ".lbss", 0));
4370 else
4371 switch_to_section (bss_section);
4372 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4373 #ifdef ASM_DECLARE_OBJECT_NAME
4374 last_assemble_variable_decl = decl;
4375 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4376 #else
4377 /* Standard thing is just output label for the object. */
4378 ASM_OUTPUT_LABEL (file, name);
4379 #endif /* ASM_DECLARE_OBJECT_NAME */
4380 ASM_OUTPUT_SKIP (file, size ? size : 1);
4381 }
4382 \f
4383 void
4384 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4385 {
4386 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4387 make the problem with not enough registers even worse. */
4388 #ifdef INSN_SCHEDULING
4389 if (level > 1)
4390 flag_schedule_insns = 0;
4391 #endif
4392
4393 /* For -O2 and beyond, turn on -fzee for x86_64 target. */
4394 if (level > 1 && TARGET_64BIT)
4395 flag_zee = 1;
4396
4397 if (TARGET_MACHO)
4398 /* The Darwin libraries never set errno, so we might as well
4399 avoid calling them when that's the only reason we would. */
4400 flag_errno_math = 0;
4401
4402 /* The default values of these switches depend on the TARGET_64BIT
4403 that is not known at this moment. Mark these values with 2 and
4404 let user the to override these. In case there is no command line option
4405 specifying them, we will set the defaults in override_options. */
4406 if (optimize >= 1)
4407 flag_omit_frame_pointer = 2;
4408 flag_pcc_struct_return = 2;
4409 flag_asynchronous_unwind_tables = 2;
4410 flag_vect_cost_model = 1;
4411 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4412 SUBTARGET_OPTIMIZATION_OPTIONS;
4413 #endif
4414 }
4415 \f
4416 /* Decide whether we can make a sibling call to a function. DECL is the
4417 declaration of the function being targeted by the call and EXP is the
4418 CALL_EXPR representing the call. */
4419
4420 static bool
4421 ix86_function_ok_for_sibcall (tree decl, tree exp)
4422 {
4423 tree type, decl_or_type;
4424 rtx a, b;
4425
4426 /* If we are generating position-independent code, we cannot sibcall
4427 optimize any indirect call, or a direct call to a global function,
4428 as the PLT requires %ebx be live. */
4429 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4430 return false;
4431
4432 /* If we need to align the outgoing stack, then sibcalling would
4433 unalign the stack, which may break the called function. */
4434 if (ix86_minimum_incoming_stack_boundary (true)
4435 < PREFERRED_STACK_BOUNDARY)
4436 return false;
4437
4438 if (decl)
4439 {
4440 decl_or_type = decl;
4441 type = TREE_TYPE (decl);
4442 }
4443 else
4444 {
4445 /* We're looking at the CALL_EXPR, we need the type of the function. */
4446 type = CALL_EXPR_FN (exp); /* pointer expression */
4447 type = TREE_TYPE (type); /* pointer type */
4448 type = TREE_TYPE (type); /* function type */
4449 decl_or_type = type;
4450 }
4451
4452 /* Check that the return value locations are the same. Like
4453 if we are returning floats on the 80387 register stack, we cannot
4454 make a sibcall from a function that doesn't return a float to a
4455 function that does or, conversely, from a function that does return
4456 a float to a function that doesn't; the necessary stack adjustment
4457 would not be executed. This is also the place we notice
4458 differences in the return value ABI. Note that it is ok for one
4459 of the functions to have void return type as long as the return
4460 value of the other is passed in a register. */
4461 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4462 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4463 cfun->decl, false);
4464 if (STACK_REG_P (a) || STACK_REG_P (b))
4465 {
4466 if (!rtx_equal_p (a, b))
4467 return false;
4468 }
4469 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4470 ;
4471 else if (!rtx_equal_p (a, b))
4472 return false;
4473
4474 if (TARGET_64BIT)
4475 {
4476 /* The SYSV ABI has more call-clobbered registers;
4477 disallow sibcalls from MS to SYSV. */
4478 if (cfun->machine->call_abi == MS_ABI
4479 && ix86_function_type_abi (type) == SYSV_ABI)
4480 return false;
4481 }
4482 else
4483 {
4484 /* If this call is indirect, we'll need to be able to use a
4485 call-clobbered register for the address of the target function.
4486 Make sure that all such registers are not used for passing
4487 parameters. Note that DLLIMPORT functions are indirect. */
4488 if (!decl
4489 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4490 {
4491 if (ix86_function_regparm (type, NULL) >= 3)
4492 {
4493 /* ??? Need to count the actual number of registers to be used,
4494 not the possible number of registers. Fix later. */
4495 return false;
4496 }
4497 }
4498 }
4499
4500 /* Otherwise okay. That also includes certain types of indirect calls. */
4501 return true;
4502 }
4503
4504 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4505 and "sseregparm" calling convention attributes;
4506 arguments as in struct attribute_spec.handler. */
4507
4508 static tree
4509 ix86_handle_cconv_attribute (tree *node, tree name,
4510 tree args,
4511 int flags ATTRIBUTE_UNUSED,
4512 bool *no_add_attrs)
4513 {
4514 if (TREE_CODE (*node) != FUNCTION_TYPE
4515 && TREE_CODE (*node) != METHOD_TYPE
4516 && TREE_CODE (*node) != FIELD_DECL
4517 && TREE_CODE (*node) != TYPE_DECL)
4518 {
4519 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4520 name);
4521 *no_add_attrs = true;
4522 return NULL_TREE;
4523 }
4524
4525 /* Can combine regparm with all attributes but fastcall. */
4526 if (is_attribute_p ("regparm", name))
4527 {
4528 tree cst;
4529
4530 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4531 {
4532 error ("fastcall and regparm attributes are not compatible");
4533 }
4534
4535 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4536 {
4537 error ("regparam and thiscall attributes are not compatible");
4538 }
4539
4540 cst = TREE_VALUE (args);
4541 if (TREE_CODE (cst) != INTEGER_CST)
4542 {
4543 warning (OPT_Wattributes,
4544 "%qE attribute requires an integer constant argument",
4545 name);
4546 *no_add_attrs = true;
4547 }
4548 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4549 {
4550 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4551 name, REGPARM_MAX);
4552 *no_add_attrs = true;
4553 }
4554
4555 return NULL_TREE;
4556 }
4557
4558 if (TARGET_64BIT)
4559 {
4560 /* Do not warn when emulating the MS ABI. */
4561 if ((TREE_CODE (*node) != FUNCTION_TYPE
4562 && TREE_CODE (*node) != METHOD_TYPE)
4563 || ix86_function_type_abi (*node) != MS_ABI)
4564 warning (OPT_Wattributes, "%qE attribute ignored",
4565 name);
4566 *no_add_attrs = true;
4567 return NULL_TREE;
4568 }
4569
4570 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4571 if (is_attribute_p ("fastcall", name))
4572 {
4573 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4574 {
4575 error ("fastcall and cdecl attributes are not compatible");
4576 }
4577 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4578 {
4579 error ("fastcall and stdcall attributes are not compatible");
4580 }
4581 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4582 {
4583 error ("fastcall and regparm attributes are not compatible");
4584 }
4585 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4586 {
4587 error ("fastcall and thiscall attributes are not compatible");
4588 }
4589 }
4590
4591 /* Can combine stdcall with fastcall (redundant), regparm and
4592 sseregparm. */
4593 else if (is_attribute_p ("stdcall", name))
4594 {
4595 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4596 {
4597 error ("stdcall and cdecl attributes are not compatible");
4598 }
4599 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4600 {
4601 error ("stdcall and fastcall attributes are not compatible");
4602 }
4603 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4604 {
4605 error ("stdcall and thiscall attributes are not compatible");
4606 }
4607 }
4608
4609 /* Can combine cdecl with regparm and sseregparm. */
4610 else if (is_attribute_p ("cdecl", name))
4611 {
4612 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4613 {
4614 error ("stdcall and cdecl attributes are not compatible");
4615 }
4616 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4617 {
4618 error ("fastcall and cdecl attributes are not compatible");
4619 }
4620 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4621 {
4622 error ("cdecl and thiscall attributes are not compatible");
4623 }
4624 }
4625 else if (is_attribute_p ("thiscall", name))
4626 {
4627 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4628 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4629 name);
4630 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4631 {
4632 error ("stdcall and thiscall attributes are not compatible");
4633 }
4634 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4635 {
4636 error ("fastcall and thiscall attributes are not compatible");
4637 }
4638 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4639 {
4640 error ("cdecl and thiscall attributes are not compatible");
4641 }
4642 }
4643
4644 /* Can combine sseregparm with all attributes. */
4645
4646 return NULL_TREE;
4647 }
4648
4649 /* Return 0 if the attributes for two types are incompatible, 1 if they
4650 are compatible, and 2 if they are nearly compatible (which causes a
4651 warning to be generated). */
4652
4653 static int
4654 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4655 {
4656 /* Check for mismatch of non-default calling convention. */
4657 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4658
4659 if (TREE_CODE (type1) != FUNCTION_TYPE
4660 && TREE_CODE (type1) != METHOD_TYPE)
4661 return 1;
4662
4663 /* Check for mismatched fastcall/regparm types. */
4664 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4665 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4666 || (ix86_function_regparm (type1, NULL)
4667 != ix86_function_regparm (type2, NULL)))
4668 return 0;
4669
4670 /* Check for mismatched sseregparm types. */
4671 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4672 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4673 return 0;
4674
4675 /* Check for mismatched thiscall types. */
4676 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4677 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4678 return 0;
4679
4680 /* Check for mismatched return types (cdecl vs stdcall). */
4681 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4682 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4683 return 0;
4684
4685 return 1;
4686 }
4687 \f
4688 /* Return the regparm value for a function with the indicated TYPE and DECL.
4689 DECL may be NULL when calling function indirectly
4690 or considering a libcall. */
4691
4692 static int
4693 ix86_function_regparm (const_tree type, const_tree decl)
4694 {
4695 tree attr;
4696 int regparm;
4697
4698 if (TARGET_64BIT)
4699 return (ix86_function_type_abi (type) == SYSV_ABI
4700 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4701
4702 regparm = ix86_regparm;
4703 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4704 if (attr)
4705 {
4706 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4707 return regparm;
4708 }
4709
4710 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4711 return 2;
4712
4713 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4714 return 1;
4715
4716 /* Use register calling convention for local functions when possible. */
4717 if (decl
4718 && TREE_CODE (decl) == FUNCTION_DECL
4719 && optimize
4720 && !profile_flag)
4721 {
4722 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4723 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4724 if (i && i->local)
4725 {
4726 int local_regparm, globals = 0, regno;
4727
4728 /* Make sure no regparm register is taken by a
4729 fixed register variable. */
4730 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4731 if (fixed_regs[local_regparm])
4732 break;
4733
4734 /* We don't want to use regparm(3) for nested functions as
4735 these use a static chain pointer in the third argument. */
4736 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4737 local_regparm = 2;
4738
4739 /* Each fixed register usage increases register pressure,
4740 so less registers should be used for argument passing.
4741 This functionality can be overriden by an explicit
4742 regparm value. */
4743 for (regno = 0; regno <= DI_REG; regno++)
4744 if (fixed_regs[regno])
4745 globals++;
4746
4747 local_regparm
4748 = globals < local_regparm ? local_regparm - globals : 0;
4749
4750 if (local_regparm > regparm)
4751 regparm = local_regparm;
4752 }
4753 }
4754
4755 return regparm;
4756 }
4757
4758 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4759 DFmode (2) arguments in SSE registers for a function with the
4760 indicated TYPE and DECL. DECL may be NULL when calling function
4761 indirectly or considering a libcall. Otherwise return 0. */
4762
4763 static int
4764 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4765 {
4766 gcc_assert (!TARGET_64BIT);
4767
4768 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4769 by the sseregparm attribute. */
4770 if (TARGET_SSEREGPARM
4771 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4772 {
4773 if (!TARGET_SSE)
4774 {
4775 if (warn)
4776 {
4777 if (decl)
4778 error ("Calling %qD with attribute sseregparm without "
4779 "SSE/SSE2 enabled", decl);
4780 else
4781 error ("Calling %qT with attribute sseregparm without "
4782 "SSE/SSE2 enabled", type);
4783 }
4784 return 0;
4785 }
4786
4787 return 2;
4788 }
4789
4790 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4791 (and DFmode for SSE2) arguments in SSE registers. */
4792 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4793 {
4794 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4795 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4796 if (i && i->local)
4797 return TARGET_SSE2 ? 2 : 1;
4798 }
4799
4800 return 0;
4801 }
4802
4803 /* Return true if EAX is live at the start of the function. Used by
4804 ix86_expand_prologue to determine if we need special help before
4805 calling allocate_stack_worker. */
4806
4807 static bool
4808 ix86_eax_live_at_start_p (void)
4809 {
4810 /* Cheat. Don't bother working forward from ix86_function_regparm
4811 to the function type to whether an actual argument is located in
4812 eax. Instead just look at cfg info, which is still close enough
4813 to correct at this point. This gives false positives for broken
4814 functions that might use uninitialized data that happens to be
4815 allocated in eax, but who cares? */
4816 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4817 }
4818
4819 /* Value is the number of bytes of arguments automatically
4820 popped when returning from a subroutine call.
4821 FUNDECL is the declaration node of the function (as a tree),
4822 FUNTYPE is the data type of the function (as a tree),
4823 or for a library call it is an identifier node for the subroutine name.
4824 SIZE is the number of bytes of arguments passed on the stack.
4825
4826 On the 80386, the RTD insn may be used to pop them if the number
4827 of args is fixed, but if the number is variable then the caller
4828 must pop them all. RTD can't be used for library calls now
4829 because the library is compiled with the Unix compiler.
4830 Use of RTD is a selectable option, since it is incompatible with
4831 standard Unix calling sequences. If the option is not selected,
4832 the caller must always pop the args.
4833
4834 The attribute stdcall is equivalent to RTD on a per module basis. */
4835
4836 int
4837 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4838 {
4839 int rtd;
4840
4841 /* None of the 64-bit ABIs pop arguments. */
4842 if (TARGET_64BIT)
4843 return 0;
4844
4845 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4846
4847 /* Cdecl functions override -mrtd, and never pop the stack. */
4848 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4849 {
4850 /* Stdcall and fastcall functions will pop the stack if not
4851 variable args. */
4852 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4853 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4854 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4855 rtd = 1;
4856
4857 if (rtd && ! stdarg_p (funtype))
4858 return size;
4859 }
4860
4861 /* Lose any fake structure return argument if it is passed on the stack. */
4862 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4863 && !KEEP_AGGREGATE_RETURN_POINTER)
4864 {
4865 int nregs = ix86_function_regparm (funtype, fundecl);
4866 if (nregs == 0)
4867 return GET_MODE_SIZE (Pmode);
4868 }
4869
4870 return 0;
4871 }
4872 \f
4873 /* Argument support functions. */
4874
4875 /* Return true when register may be used to pass function parameters. */
4876 bool
4877 ix86_function_arg_regno_p (int regno)
4878 {
4879 int i;
4880 const int *parm_regs;
4881
4882 if (!TARGET_64BIT)
4883 {
4884 if (TARGET_MACHO)
4885 return (regno < REGPARM_MAX
4886 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4887 else
4888 return (regno < REGPARM_MAX
4889 || (TARGET_MMX && MMX_REGNO_P (regno)
4890 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4891 || (TARGET_SSE && SSE_REGNO_P (regno)
4892 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4893 }
4894
4895 if (TARGET_MACHO)
4896 {
4897 if (SSE_REGNO_P (regno) && TARGET_SSE)
4898 return true;
4899 }
4900 else
4901 {
4902 if (TARGET_SSE && SSE_REGNO_P (regno)
4903 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4904 return true;
4905 }
4906
4907 /* TODO: The function should depend on current function ABI but
4908 builtins.c would need updating then. Therefore we use the
4909 default ABI. */
4910
4911 /* RAX is used as hidden argument to va_arg functions. */
4912 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4913 return true;
4914
4915 if (ix86_abi == MS_ABI)
4916 parm_regs = x86_64_ms_abi_int_parameter_registers;
4917 else
4918 parm_regs = x86_64_int_parameter_registers;
4919 for (i = 0; i < (ix86_abi == MS_ABI
4920 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4921 if (regno == parm_regs[i])
4922 return true;
4923 return false;
4924 }
4925
4926 /* Return if we do not know how to pass TYPE solely in registers. */
4927
4928 static bool
4929 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4930 {
4931 if (must_pass_in_stack_var_size_or_pad (mode, type))
4932 return true;
4933
4934 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4935 The layout_type routine is crafty and tries to trick us into passing
4936 currently unsupported vector types on the stack by using TImode. */
4937 return (!TARGET_64BIT && mode == TImode
4938 && type && TREE_CODE (type) != VECTOR_TYPE);
4939 }
4940
4941 /* It returns the size, in bytes, of the area reserved for arguments passed
4942 in registers for the function represented by fndecl dependent to the used
4943 abi format. */
4944 int
4945 ix86_reg_parm_stack_space (const_tree fndecl)
4946 {
4947 enum calling_abi call_abi = SYSV_ABI;
4948 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4949 call_abi = ix86_function_abi (fndecl);
4950 else
4951 call_abi = ix86_function_type_abi (fndecl);
4952 if (call_abi == MS_ABI)
4953 return 32;
4954 return 0;
4955 }
4956
4957 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4958 call abi used. */
4959 enum calling_abi
4960 ix86_function_type_abi (const_tree fntype)
4961 {
4962 if (TARGET_64BIT && fntype != NULL)
4963 {
4964 enum calling_abi abi = ix86_abi;
4965 if (abi == SYSV_ABI)
4966 {
4967 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4968 abi = MS_ABI;
4969 }
4970 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4971 abi = SYSV_ABI;
4972 return abi;
4973 }
4974 return ix86_abi;
4975 }
4976
4977 static bool
4978 ix86_function_ms_hook_prologue (const_tree fntype)
4979 {
4980 if (!TARGET_64BIT)
4981 {
4982 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4983 {
4984 if (decl_function_context (fntype) != NULL_TREE)
4985 {
4986 error_at (DECL_SOURCE_LOCATION (fntype),
4987 "ms_hook_prologue is not compatible with nested function");
4988 }
4989
4990 return true;
4991 }
4992 }
4993 return false;
4994 }
4995
4996 static enum calling_abi
4997 ix86_function_abi (const_tree fndecl)
4998 {
4999 if (! fndecl)
5000 return ix86_abi;
5001 return ix86_function_type_abi (TREE_TYPE (fndecl));
5002 }
5003
5004 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5005 call abi used. */
5006 enum calling_abi
5007 ix86_cfun_abi (void)
5008 {
5009 if (! cfun || ! TARGET_64BIT)
5010 return ix86_abi;
5011 return cfun->machine->call_abi;
5012 }
5013
5014 /* regclass.c */
5015 extern void init_regs (void);
5016
5017 /* Implementation of call abi switching target hook. Specific to FNDECL
5018 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
5019 for more details. */
5020 void
5021 ix86_call_abi_override (const_tree fndecl)
5022 {
5023 if (fndecl == NULL_TREE)
5024 cfun->machine->call_abi = ix86_abi;
5025 else
5026 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5027 }
5028
5029 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
5030 re-initialization of init_regs each time we switch function context since
5031 this is needed only during RTL expansion. */
5032 static void
5033 ix86_maybe_switch_abi (void)
5034 {
5035 if (TARGET_64BIT &&
5036 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5037 reinit_regs ();
5038 }
5039
5040 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5041 for a call to a function whose data type is FNTYPE.
5042 For a library call, FNTYPE is 0. */
5043
5044 void
5045 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5046 tree fntype, /* tree ptr for function decl */
5047 rtx libname, /* SYMBOL_REF of library name or 0 */
5048 tree fndecl)
5049 {
5050 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
5051 memset (cum, 0, sizeof (*cum));
5052
5053 if (fndecl)
5054 cum->call_abi = ix86_function_abi (fndecl);
5055 else
5056 cum->call_abi = ix86_function_type_abi (fntype);
5057 /* Set up the number of registers to use for passing arguments. */
5058
5059 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5060 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5061 "or subtarget optimization implying it");
5062 cum->nregs = ix86_regparm;
5063 if (TARGET_64BIT)
5064 {
5065 cum->nregs = (cum->call_abi == SYSV_ABI
5066 ? X86_64_REGPARM_MAX
5067 : X86_64_MS_REGPARM_MAX);
5068 }
5069 if (TARGET_SSE)
5070 {
5071 cum->sse_nregs = SSE_REGPARM_MAX;
5072 if (TARGET_64BIT)
5073 {
5074 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5075 ? X86_64_SSE_REGPARM_MAX
5076 : X86_64_MS_SSE_REGPARM_MAX);
5077 }
5078 }
5079 if (TARGET_MMX)
5080 cum->mmx_nregs = MMX_REGPARM_MAX;
5081 cum->warn_avx = true;
5082 cum->warn_sse = true;
5083 cum->warn_mmx = true;
5084
5085 /* Because type might mismatch in between caller and callee, we need to
5086 use actual type of function for local calls.
5087 FIXME: cgraph_analyze can be told to actually record if function uses
5088 va_start so for local functions maybe_vaarg can be made aggressive
5089 helping K&R code.
5090 FIXME: once typesytem is fixed, we won't need this code anymore. */
5091 if (i && i->local)
5092 fntype = TREE_TYPE (fndecl);
5093 cum->maybe_vaarg = (fntype
5094 ? (!prototype_p (fntype) || stdarg_p (fntype))
5095 : !libname);
5096
5097 if (!TARGET_64BIT)
5098 {
5099 /* If there are variable arguments, then we won't pass anything
5100 in registers in 32-bit mode. */
5101 if (stdarg_p (fntype))
5102 {
5103 cum->nregs = 0;
5104 cum->sse_nregs = 0;
5105 cum->mmx_nregs = 0;
5106 cum->warn_avx = 0;
5107 cum->warn_sse = 0;
5108 cum->warn_mmx = 0;
5109 return;
5110 }
5111
5112 /* Use ecx and edx registers if function has fastcall attribute,
5113 else look for regparm information. */
5114 if (fntype)
5115 {
5116 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5117 {
5118 cum->nregs = 1;
5119 cum->fastcall = 1; /* Same first register as in fastcall. */
5120 }
5121 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5122 {
5123 cum->nregs = 2;
5124 cum->fastcall = 1;
5125 }
5126 else
5127 cum->nregs = ix86_function_regparm (fntype, fndecl);
5128 }
5129
5130 /* Set up the number of SSE registers used for passing SFmode
5131 and DFmode arguments. Warn for mismatching ABI. */
5132 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5133 }
5134 }
5135
5136 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5137 But in the case of vector types, it is some vector mode.
5138
5139 When we have only some of our vector isa extensions enabled, then there
5140 are some modes for which vector_mode_supported_p is false. For these
5141 modes, the generic vector support in gcc will choose some non-vector mode
5142 in order to implement the type. By computing the natural mode, we'll
5143 select the proper ABI location for the operand and not depend on whatever
5144 the middle-end decides to do with these vector types.
5145
5146 The midde-end can't deal with the vector types > 16 bytes. In this
5147 case, we return the original mode and warn ABI change if CUM isn't
5148 NULL. */
5149
5150 static enum machine_mode
5151 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
5152 {
5153 enum machine_mode mode = TYPE_MODE (type);
5154
5155 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5156 {
5157 HOST_WIDE_INT size = int_size_in_bytes (type);
5158 if ((size == 8 || size == 16 || size == 32)
5159 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5160 && TYPE_VECTOR_SUBPARTS (type) > 1)
5161 {
5162 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5163
5164 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5165 mode = MIN_MODE_VECTOR_FLOAT;
5166 else
5167 mode = MIN_MODE_VECTOR_INT;
5168
5169 /* Get the mode which has this inner mode and number of units. */
5170 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5171 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5172 && GET_MODE_INNER (mode) == innermode)
5173 {
5174 if (size == 32 && !TARGET_AVX)
5175 {
5176 static bool warnedavx;
5177
5178 if (cum
5179 && !warnedavx
5180 && cum->warn_avx)
5181 {
5182 warnedavx = true;
5183 warning (0, "AVX vector argument without AVX "
5184 "enabled changes the ABI");
5185 }
5186 return TYPE_MODE (type);
5187 }
5188 else
5189 return mode;
5190 }
5191
5192 gcc_unreachable ();
5193 }
5194 }
5195
5196 return mode;
5197 }
5198
5199 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5200 this may not agree with the mode that the type system has chosen for the
5201 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5202 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5203
5204 static rtx
5205 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5206 unsigned int regno)
5207 {
5208 rtx tmp;
5209
5210 if (orig_mode != BLKmode)
5211 tmp = gen_rtx_REG (orig_mode, regno);
5212 else
5213 {
5214 tmp = gen_rtx_REG (mode, regno);
5215 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5216 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5217 }
5218
5219 return tmp;
5220 }
5221
5222 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5223 of this code is to classify each 8bytes of incoming argument by the register
5224 class and assign registers accordingly. */
5225
5226 /* Return the union class of CLASS1 and CLASS2.
5227 See the x86-64 PS ABI for details. */
5228
5229 static enum x86_64_reg_class
5230 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5231 {
5232 /* Rule #1: If both classes are equal, this is the resulting class. */
5233 if (class1 == class2)
5234 return class1;
5235
5236 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5237 the other class. */
5238 if (class1 == X86_64_NO_CLASS)
5239 return class2;
5240 if (class2 == X86_64_NO_CLASS)
5241 return class1;
5242
5243 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5244 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5245 return X86_64_MEMORY_CLASS;
5246
5247 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5248 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5249 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5250 return X86_64_INTEGERSI_CLASS;
5251 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5252 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5253 return X86_64_INTEGER_CLASS;
5254
5255 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5256 MEMORY is used. */
5257 if (class1 == X86_64_X87_CLASS
5258 || class1 == X86_64_X87UP_CLASS
5259 || class1 == X86_64_COMPLEX_X87_CLASS
5260 || class2 == X86_64_X87_CLASS
5261 || class2 == X86_64_X87UP_CLASS
5262 || class2 == X86_64_COMPLEX_X87_CLASS)
5263 return X86_64_MEMORY_CLASS;
5264
5265 /* Rule #6: Otherwise class SSE is used. */
5266 return X86_64_SSE_CLASS;
5267 }
5268
5269 /* Classify the argument of type TYPE and mode MODE.
5270 CLASSES will be filled by the register class used to pass each word
5271 of the operand. The number of words is returned. In case the parameter
5272 should be passed in memory, 0 is returned. As a special case for zero
5273 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5274
5275 BIT_OFFSET is used internally for handling records and specifies offset
5276 of the offset in bits modulo 256 to avoid overflow cases.
5277
5278 See the x86-64 PS ABI for details.
5279 */
5280
5281 static int
5282 classify_argument (enum machine_mode mode, const_tree type,
5283 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5284 {
5285 HOST_WIDE_INT bytes =
5286 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5287 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5288
5289 /* Variable sized entities are always passed/returned in memory. */
5290 if (bytes < 0)
5291 return 0;
5292
5293 if (mode != VOIDmode
5294 && targetm.calls.must_pass_in_stack (mode, type))
5295 return 0;
5296
5297 if (type && AGGREGATE_TYPE_P (type))
5298 {
5299 int i;
5300 tree field;
5301 enum x86_64_reg_class subclasses[MAX_CLASSES];
5302
5303 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5304 if (bytes > 32)
5305 return 0;
5306
5307 for (i = 0; i < words; i++)
5308 classes[i] = X86_64_NO_CLASS;
5309
5310 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5311 signalize memory class, so handle it as special case. */
5312 if (!words)
5313 {
5314 classes[0] = X86_64_NO_CLASS;
5315 return 1;
5316 }
5317
5318 /* Classify each field of record and merge classes. */
5319 switch (TREE_CODE (type))
5320 {
5321 case RECORD_TYPE:
5322 /* And now merge the fields of structure. */
5323 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5324 {
5325 if (TREE_CODE (field) == FIELD_DECL)
5326 {
5327 int num;
5328
5329 if (TREE_TYPE (field) == error_mark_node)
5330 continue;
5331
5332 /* Bitfields are always classified as integer. Handle them
5333 early, since later code would consider them to be
5334 misaligned integers. */
5335 if (DECL_BIT_FIELD (field))
5336 {
5337 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5338 i < ((int_bit_position (field) + (bit_offset % 64))
5339 + tree_low_cst (DECL_SIZE (field), 0)
5340 + 63) / 8 / 8; i++)
5341 classes[i] =
5342 merge_classes (X86_64_INTEGER_CLASS,
5343 classes[i]);
5344 }
5345 else
5346 {
5347 int pos;
5348
5349 type = TREE_TYPE (field);
5350
5351 /* Flexible array member is ignored. */
5352 if (TYPE_MODE (type) == BLKmode
5353 && TREE_CODE (type) == ARRAY_TYPE
5354 && TYPE_SIZE (type) == NULL_TREE
5355 && TYPE_DOMAIN (type) != NULL_TREE
5356 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5357 == NULL_TREE))
5358 {
5359 static bool warned;
5360
5361 if (!warned && warn_psabi)
5362 {
5363 warned = true;
5364 inform (input_location,
5365 "The ABI of passing struct with"
5366 " a flexible array member has"
5367 " changed in GCC 4.4");
5368 }
5369 continue;
5370 }
5371 num = classify_argument (TYPE_MODE (type), type,
5372 subclasses,
5373 (int_bit_position (field)
5374 + bit_offset) % 256);
5375 if (!num)
5376 return 0;
5377 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5378 for (i = 0; i < num && (i + pos) < words; i++)
5379 classes[i + pos] =
5380 merge_classes (subclasses[i], classes[i + pos]);
5381 }
5382 }
5383 }
5384 break;
5385
5386 case ARRAY_TYPE:
5387 /* Arrays are handled as small records. */
5388 {
5389 int num;
5390 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5391 TREE_TYPE (type), subclasses, bit_offset);
5392 if (!num)
5393 return 0;
5394
5395 /* The partial classes are now full classes. */
5396 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5397 subclasses[0] = X86_64_SSE_CLASS;
5398 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5399 && !((bit_offset % 64) == 0 && bytes == 4))
5400 subclasses[0] = X86_64_INTEGER_CLASS;
5401
5402 for (i = 0; i < words; i++)
5403 classes[i] = subclasses[i % num];
5404
5405 break;
5406 }
5407 case UNION_TYPE:
5408 case QUAL_UNION_TYPE:
5409 /* Unions are similar to RECORD_TYPE but offset is always 0.
5410 */
5411 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5412 {
5413 if (TREE_CODE (field) == FIELD_DECL)
5414 {
5415 int num;
5416
5417 if (TREE_TYPE (field) == error_mark_node)
5418 continue;
5419
5420 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5421 TREE_TYPE (field), subclasses,
5422 bit_offset);
5423 if (!num)
5424 return 0;
5425 for (i = 0; i < num; i++)
5426 classes[i] = merge_classes (subclasses[i], classes[i]);
5427 }
5428 }
5429 break;
5430
5431 default:
5432 gcc_unreachable ();
5433 }
5434
5435 if (words > 2)
5436 {
5437 /* When size > 16 bytes, if the first one isn't
5438 X86_64_SSE_CLASS or any other ones aren't
5439 X86_64_SSEUP_CLASS, everything should be passed in
5440 memory. */
5441 if (classes[0] != X86_64_SSE_CLASS)
5442 return 0;
5443
5444 for (i = 1; i < words; i++)
5445 if (classes[i] != X86_64_SSEUP_CLASS)
5446 return 0;
5447 }
5448
5449 /* Final merger cleanup. */
5450 for (i = 0; i < words; i++)
5451 {
5452 /* If one class is MEMORY, everything should be passed in
5453 memory. */
5454 if (classes[i] == X86_64_MEMORY_CLASS)
5455 return 0;
5456
5457 /* The X86_64_SSEUP_CLASS should be always preceded by
5458 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5459 if (classes[i] == X86_64_SSEUP_CLASS
5460 && classes[i - 1] != X86_64_SSE_CLASS
5461 && classes[i - 1] != X86_64_SSEUP_CLASS)
5462 {
5463 /* The first one should never be X86_64_SSEUP_CLASS. */
5464 gcc_assert (i != 0);
5465 classes[i] = X86_64_SSE_CLASS;
5466 }
5467
5468 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5469 everything should be passed in memory. */
5470 if (classes[i] == X86_64_X87UP_CLASS
5471 && (classes[i - 1] != X86_64_X87_CLASS))
5472 {
5473 static bool warned;
5474
5475 /* The first one should never be X86_64_X87UP_CLASS. */
5476 gcc_assert (i != 0);
5477 if (!warned && warn_psabi)
5478 {
5479 warned = true;
5480 inform (input_location,
5481 "The ABI of passing union with long double"
5482 " has changed in GCC 4.4");
5483 }
5484 return 0;
5485 }
5486 }
5487 return words;
5488 }
5489
5490 /* Compute alignment needed. We align all types to natural boundaries with
5491 exception of XFmode that is aligned to 64bits. */
5492 if (mode != VOIDmode && mode != BLKmode)
5493 {
5494 int mode_alignment = GET_MODE_BITSIZE (mode);
5495
5496 if (mode == XFmode)
5497 mode_alignment = 128;
5498 else if (mode == XCmode)
5499 mode_alignment = 256;
5500 if (COMPLEX_MODE_P (mode))
5501 mode_alignment /= 2;
5502 /* Misaligned fields are always returned in memory. */
5503 if (bit_offset % mode_alignment)
5504 return 0;
5505 }
5506
5507 /* for V1xx modes, just use the base mode */
5508 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5509 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5510 mode = GET_MODE_INNER (mode);
5511
5512 /* Classification of atomic types. */
5513 switch (mode)
5514 {
5515 case SDmode:
5516 case DDmode:
5517 classes[0] = X86_64_SSE_CLASS;
5518 return 1;
5519 case TDmode:
5520 classes[0] = X86_64_SSE_CLASS;
5521 classes[1] = X86_64_SSEUP_CLASS;
5522 return 2;
5523 case DImode:
5524 case SImode:
5525 case HImode:
5526 case QImode:
5527 case CSImode:
5528 case CHImode:
5529 case CQImode:
5530 {
5531 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5532
5533 if (size <= 32)
5534 {
5535 classes[0] = X86_64_INTEGERSI_CLASS;
5536 return 1;
5537 }
5538 else if (size <= 64)
5539 {
5540 classes[0] = X86_64_INTEGER_CLASS;
5541 return 1;
5542 }
5543 else if (size <= 64+32)
5544 {
5545 classes[0] = X86_64_INTEGER_CLASS;
5546 classes[1] = X86_64_INTEGERSI_CLASS;
5547 return 2;
5548 }
5549 else if (size <= 64+64)
5550 {
5551 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5552 return 2;
5553 }
5554 else
5555 gcc_unreachable ();
5556 }
5557 case CDImode:
5558 case TImode:
5559 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5560 return 2;
5561 case COImode:
5562 case OImode:
5563 /* OImode shouldn't be used directly. */
5564 gcc_unreachable ();
5565 case CTImode:
5566 return 0;
5567 case SFmode:
5568 if (!(bit_offset % 64))
5569 classes[0] = X86_64_SSESF_CLASS;
5570 else
5571 classes[0] = X86_64_SSE_CLASS;
5572 return 1;
5573 case DFmode:
5574 classes[0] = X86_64_SSEDF_CLASS;
5575 return 1;
5576 case XFmode:
5577 classes[0] = X86_64_X87_CLASS;
5578 classes[1] = X86_64_X87UP_CLASS;
5579 return 2;
5580 case TFmode:
5581 classes[0] = X86_64_SSE_CLASS;
5582 classes[1] = X86_64_SSEUP_CLASS;
5583 return 2;
5584 case SCmode:
5585 classes[0] = X86_64_SSE_CLASS;
5586 if (!(bit_offset % 64))
5587 return 1;
5588 else
5589 {
5590 static bool warned;
5591
5592 if (!warned && warn_psabi)
5593 {
5594 warned = true;
5595 inform (input_location,
5596 "The ABI of passing structure with complex float"
5597 " member has changed in GCC 4.4");
5598 }
5599 classes[1] = X86_64_SSESF_CLASS;
5600 return 2;
5601 }
5602 case DCmode:
5603 classes[0] = X86_64_SSEDF_CLASS;
5604 classes[1] = X86_64_SSEDF_CLASS;
5605 return 2;
5606 case XCmode:
5607 classes[0] = X86_64_COMPLEX_X87_CLASS;
5608 return 1;
5609 case TCmode:
5610 /* This modes is larger than 16 bytes. */
5611 return 0;
5612 case V8SFmode:
5613 case V8SImode:
5614 case V32QImode:
5615 case V16HImode:
5616 case V4DFmode:
5617 case V4DImode:
5618 classes[0] = X86_64_SSE_CLASS;
5619 classes[1] = X86_64_SSEUP_CLASS;
5620 classes[2] = X86_64_SSEUP_CLASS;
5621 classes[3] = X86_64_SSEUP_CLASS;
5622 return 4;
5623 case V4SFmode:
5624 case V4SImode:
5625 case V16QImode:
5626 case V8HImode:
5627 case V2DFmode:
5628 case V2DImode:
5629 classes[0] = X86_64_SSE_CLASS;
5630 classes[1] = X86_64_SSEUP_CLASS;
5631 return 2;
5632 case V1TImode:
5633 case V1DImode:
5634 case V2SFmode:
5635 case V2SImode:
5636 case V4HImode:
5637 case V8QImode:
5638 classes[0] = X86_64_SSE_CLASS;
5639 return 1;
5640 case BLKmode:
5641 case VOIDmode:
5642 return 0;
5643 default:
5644 gcc_assert (VECTOR_MODE_P (mode));
5645
5646 if (bytes > 16)
5647 return 0;
5648
5649 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5650
5651 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5652 classes[0] = X86_64_INTEGERSI_CLASS;
5653 else
5654 classes[0] = X86_64_INTEGER_CLASS;
5655 classes[1] = X86_64_INTEGER_CLASS;
5656 return 1 + (bytes > 8);
5657 }
5658 }
5659
5660 /* Examine the argument and return set number of register required in each
5661 class. Return 0 iff parameter should be passed in memory. */
5662 static int
5663 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5664 int *int_nregs, int *sse_nregs)
5665 {
5666 enum x86_64_reg_class regclass[MAX_CLASSES];
5667 int n = classify_argument (mode, type, regclass, 0);
5668
5669 *int_nregs = 0;
5670 *sse_nregs = 0;
5671 if (!n)
5672 return 0;
5673 for (n--; n >= 0; n--)
5674 switch (regclass[n])
5675 {
5676 case X86_64_INTEGER_CLASS:
5677 case X86_64_INTEGERSI_CLASS:
5678 (*int_nregs)++;
5679 break;
5680 case X86_64_SSE_CLASS:
5681 case X86_64_SSESF_CLASS:
5682 case X86_64_SSEDF_CLASS:
5683 (*sse_nregs)++;
5684 break;
5685 case X86_64_NO_CLASS:
5686 case X86_64_SSEUP_CLASS:
5687 break;
5688 case X86_64_X87_CLASS:
5689 case X86_64_X87UP_CLASS:
5690 if (!in_return)
5691 return 0;
5692 break;
5693 case X86_64_COMPLEX_X87_CLASS:
5694 return in_return ? 2 : 0;
5695 case X86_64_MEMORY_CLASS:
5696 gcc_unreachable ();
5697 }
5698 return 1;
5699 }
5700
5701 /* Construct container for the argument used by GCC interface. See
5702 FUNCTION_ARG for the detailed description. */
5703
5704 static rtx
5705 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5706 const_tree type, int in_return, int nintregs, int nsseregs,
5707 const int *intreg, int sse_regno)
5708 {
5709 /* The following variables hold the static issued_error state. */
5710 static bool issued_sse_arg_error;
5711 static bool issued_sse_ret_error;
5712 static bool issued_x87_ret_error;
5713
5714 enum machine_mode tmpmode;
5715 int bytes =
5716 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5717 enum x86_64_reg_class regclass[MAX_CLASSES];
5718 int n;
5719 int i;
5720 int nexps = 0;
5721 int needed_sseregs, needed_intregs;
5722 rtx exp[MAX_CLASSES];
5723 rtx ret;
5724
5725 n = classify_argument (mode, type, regclass, 0);
5726 if (!n)
5727 return NULL;
5728 if (!examine_argument (mode, type, in_return, &needed_intregs,
5729 &needed_sseregs))
5730 return NULL;
5731 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5732 return NULL;
5733
5734 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5735 some less clueful developer tries to use floating-point anyway. */
5736 if (needed_sseregs && !TARGET_SSE)
5737 {
5738 if (in_return)
5739 {
5740 if (!issued_sse_ret_error)
5741 {
5742 error ("SSE register return with SSE disabled");
5743 issued_sse_ret_error = true;
5744 }
5745 }
5746 else if (!issued_sse_arg_error)
5747 {
5748 error ("SSE register argument with SSE disabled");
5749 issued_sse_arg_error = true;
5750 }
5751 return NULL;
5752 }
5753
5754 /* Likewise, error if the ABI requires us to return values in the
5755 x87 registers and the user specified -mno-80387. */
5756 if (!TARGET_80387 && in_return)
5757 for (i = 0; i < n; i++)
5758 if (regclass[i] == X86_64_X87_CLASS
5759 || regclass[i] == X86_64_X87UP_CLASS
5760 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5761 {
5762 if (!issued_x87_ret_error)
5763 {
5764 error ("x87 register return with x87 disabled");
5765 issued_x87_ret_error = true;
5766 }
5767 return NULL;
5768 }
5769
5770 /* First construct simple cases. Avoid SCmode, since we want to use
5771 single register to pass this type. */
5772 if (n == 1 && mode != SCmode)
5773 switch (regclass[0])
5774 {
5775 case X86_64_INTEGER_CLASS:
5776 case X86_64_INTEGERSI_CLASS:
5777 return gen_rtx_REG (mode, intreg[0]);
5778 case X86_64_SSE_CLASS:
5779 case X86_64_SSESF_CLASS:
5780 case X86_64_SSEDF_CLASS:
5781 if (mode != BLKmode)
5782 return gen_reg_or_parallel (mode, orig_mode,
5783 SSE_REGNO (sse_regno));
5784 break;
5785 case X86_64_X87_CLASS:
5786 case X86_64_COMPLEX_X87_CLASS:
5787 return gen_rtx_REG (mode, FIRST_STACK_REG);
5788 case X86_64_NO_CLASS:
5789 /* Zero sized array, struct or class. */
5790 return NULL;
5791 default:
5792 gcc_unreachable ();
5793 }
5794 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5795 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5796 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5797 if (n == 4
5798 && regclass[0] == X86_64_SSE_CLASS
5799 && regclass[1] == X86_64_SSEUP_CLASS
5800 && regclass[2] == X86_64_SSEUP_CLASS
5801 && regclass[3] == X86_64_SSEUP_CLASS
5802 && mode != BLKmode)
5803 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5804
5805 if (n == 2
5806 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5807 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5808 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5809 && regclass[1] == X86_64_INTEGER_CLASS
5810 && (mode == CDImode || mode == TImode || mode == TFmode)
5811 && intreg[0] + 1 == intreg[1])
5812 return gen_rtx_REG (mode, intreg[0]);
5813
5814 /* Otherwise figure out the entries of the PARALLEL. */
5815 for (i = 0; i < n; i++)
5816 {
5817 int pos;
5818
5819 switch (regclass[i])
5820 {
5821 case X86_64_NO_CLASS:
5822 break;
5823 case X86_64_INTEGER_CLASS:
5824 case X86_64_INTEGERSI_CLASS:
5825 /* Merge TImodes on aligned occasions here too. */
5826 if (i * 8 + 8 > bytes)
5827 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5828 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5829 tmpmode = SImode;
5830 else
5831 tmpmode = DImode;
5832 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5833 if (tmpmode == BLKmode)
5834 tmpmode = DImode;
5835 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5836 gen_rtx_REG (tmpmode, *intreg),
5837 GEN_INT (i*8));
5838 intreg++;
5839 break;
5840 case X86_64_SSESF_CLASS:
5841 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5842 gen_rtx_REG (SFmode,
5843 SSE_REGNO (sse_regno)),
5844 GEN_INT (i*8));
5845 sse_regno++;
5846 break;
5847 case X86_64_SSEDF_CLASS:
5848 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5849 gen_rtx_REG (DFmode,
5850 SSE_REGNO (sse_regno)),
5851 GEN_INT (i*8));
5852 sse_regno++;
5853 break;
5854 case X86_64_SSE_CLASS:
5855 pos = i;
5856 switch (n)
5857 {
5858 case 1:
5859 tmpmode = DImode;
5860 break;
5861 case 2:
5862 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5863 {
5864 tmpmode = TImode;
5865 i++;
5866 }
5867 else
5868 tmpmode = DImode;
5869 break;
5870 case 4:
5871 gcc_assert (i == 0
5872 && regclass[1] == X86_64_SSEUP_CLASS
5873 && regclass[2] == X86_64_SSEUP_CLASS
5874 && regclass[3] == X86_64_SSEUP_CLASS);
5875 tmpmode = OImode;
5876 i += 3;
5877 break;
5878 default:
5879 gcc_unreachable ();
5880 }
5881 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5882 gen_rtx_REG (tmpmode,
5883 SSE_REGNO (sse_regno)),
5884 GEN_INT (pos*8));
5885 sse_regno++;
5886 break;
5887 default:
5888 gcc_unreachable ();
5889 }
5890 }
5891
5892 /* Empty aligned struct, union or class. */
5893 if (nexps == 0)
5894 return NULL;
5895
5896 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5897 for (i = 0; i < nexps; i++)
5898 XVECEXP (ret, 0, i) = exp [i];
5899 return ret;
5900 }
5901
5902 /* Update the data in CUM to advance over an argument of mode MODE
5903 and data type TYPE. (TYPE is null for libcalls where that information
5904 may not be available.) */
5905
5906 static void
5907 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5908 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5909 {
5910 switch (mode)
5911 {
5912 default:
5913 break;
5914
5915 case BLKmode:
5916 if (bytes < 0)
5917 break;
5918 /* FALLTHRU */
5919
5920 case DImode:
5921 case SImode:
5922 case HImode:
5923 case QImode:
5924 cum->words += words;
5925 cum->nregs -= words;
5926 cum->regno += words;
5927
5928 if (cum->nregs <= 0)
5929 {
5930 cum->nregs = 0;
5931 cum->regno = 0;
5932 }
5933 break;
5934
5935 case OImode:
5936 /* OImode shouldn't be used directly. */
5937 gcc_unreachable ();
5938
5939 case DFmode:
5940 if (cum->float_in_sse < 2)
5941 break;
5942 case SFmode:
5943 if (cum->float_in_sse < 1)
5944 break;
5945 /* FALLTHRU */
5946
5947 case V8SFmode:
5948 case V8SImode:
5949 case V32QImode:
5950 case V16HImode:
5951 case V4DFmode:
5952 case V4DImode:
5953 case TImode:
5954 case V16QImode:
5955 case V8HImode:
5956 case V4SImode:
5957 case V2DImode:
5958 case V4SFmode:
5959 case V2DFmode:
5960 if (!type || !AGGREGATE_TYPE_P (type))
5961 {
5962 cum->sse_words += words;
5963 cum->sse_nregs -= 1;
5964 cum->sse_regno += 1;
5965 if (cum->sse_nregs <= 0)
5966 {
5967 cum->sse_nregs = 0;
5968 cum->sse_regno = 0;
5969 }
5970 }
5971 break;
5972
5973 case V8QImode:
5974 case V4HImode:
5975 case V2SImode:
5976 case V2SFmode:
5977 case V1TImode:
5978 case V1DImode:
5979 if (!type || !AGGREGATE_TYPE_P (type))
5980 {
5981 cum->mmx_words += words;
5982 cum->mmx_nregs -= 1;
5983 cum->mmx_regno += 1;
5984 if (cum->mmx_nregs <= 0)
5985 {
5986 cum->mmx_nregs = 0;
5987 cum->mmx_regno = 0;
5988 }
5989 }
5990 break;
5991 }
5992 }
5993
5994 static void
5995 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5996 tree type, HOST_WIDE_INT words, int named)
5997 {
5998 int int_nregs, sse_nregs;
5999
6000 /* Unnamed 256bit vector mode parameters are passed on stack. */
6001 if (!named && VALID_AVX256_REG_MODE (mode))
6002 return;
6003
6004 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
6005 cum->words += words;
6006 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6007 {
6008 cum->nregs -= int_nregs;
6009 cum->sse_nregs -= sse_nregs;
6010 cum->regno += int_nregs;
6011 cum->sse_regno += sse_nregs;
6012 }
6013 else
6014 cum->words += words;
6015 }
6016
6017 static void
6018 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6019 HOST_WIDE_INT words)
6020 {
6021 /* Otherwise, this should be passed indirect. */
6022 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6023
6024 cum->words += words;
6025 if (cum->nregs > 0)
6026 {
6027 cum->nregs -= 1;
6028 cum->regno += 1;
6029 }
6030 }
6031
6032 void
6033 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6034 tree type, int named)
6035 {
6036 HOST_WIDE_INT bytes, words;
6037
6038 if (mode == BLKmode)
6039 bytes = int_size_in_bytes (type);
6040 else
6041 bytes = GET_MODE_SIZE (mode);
6042 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6043
6044 if (type)
6045 mode = type_natural_mode (type, NULL);
6046
6047 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6048 function_arg_advance_ms_64 (cum, bytes, words);
6049 else if (TARGET_64BIT)
6050 function_arg_advance_64 (cum, mode, type, words, named);
6051 else
6052 function_arg_advance_32 (cum, mode, type, bytes, words);
6053 }
6054
6055 /* Define where to put the arguments to a function.
6056 Value is zero to push the argument on the stack,
6057 or a hard register in which to store the argument.
6058
6059 MODE is the argument's machine mode.
6060 TYPE is the data type of the argument (as a tree).
6061 This is null for libcalls where that information may
6062 not be available.
6063 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6064 the preceding args and about the function being called.
6065 NAMED is nonzero if this argument is a named parameter
6066 (otherwise it is an extra parameter matching an ellipsis). */
6067
6068 static rtx
6069 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6070 enum machine_mode orig_mode, tree type,
6071 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6072 {
6073 static bool warnedsse, warnedmmx;
6074
6075 /* Avoid the AL settings for the Unix64 ABI. */
6076 if (mode == VOIDmode)
6077 return constm1_rtx;
6078
6079 switch (mode)
6080 {
6081 default:
6082 break;
6083
6084 case BLKmode:
6085 if (bytes < 0)
6086 break;
6087 /* FALLTHRU */
6088 case DImode:
6089 case SImode:
6090 case HImode:
6091 case QImode:
6092 if (words <= cum->nregs)
6093 {
6094 int regno = cum->regno;
6095
6096 /* Fastcall allocates the first two DWORD (SImode) or
6097 smaller arguments to ECX and EDX if it isn't an
6098 aggregate type . */
6099 if (cum->fastcall)
6100 {
6101 if (mode == BLKmode
6102 || mode == DImode
6103 || (type && AGGREGATE_TYPE_P (type)))
6104 break;
6105
6106 /* ECX not EAX is the first allocated register. */
6107 if (regno == AX_REG)
6108 regno = CX_REG;
6109 }
6110 return gen_rtx_REG (mode, regno);
6111 }
6112 break;
6113
6114 case DFmode:
6115 if (cum->float_in_sse < 2)
6116 break;
6117 case SFmode:
6118 if (cum->float_in_sse < 1)
6119 break;
6120 /* FALLTHRU */
6121 case TImode:
6122 /* In 32bit, we pass TImode in xmm registers. */
6123 case V16QImode:
6124 case V8HImode:
6125 case V4SImode:
6126 case V2DImode:
6127 case V4SFmode:
6128 case V2DFmode:
6129 if (!type || !AGGREGATE_TYPE_P (type))
6130 {
6131 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6132 {
6133 warnedsse = true;
6134 warning (0, "SSE vector argument without SSE enabled "
6135 "changes the ABI");
6136 }
6137 if (cum->sse_nregs)
6138 return gen_reg_or_parallel (mode, orig_mode,
6139 cum->sse_regno + FIRST_SSE_REG);
6140 }
6141 break;
6142
6143 case OImode:
6144 /* OImode shouldn't be used directly. */
6145 gcc_unreachable ();
6146
6147 case V8SFmode:
6148 case V8SImode:
6149 case V32QImode:
6150 case V16HImode:
6151 case V4DFmode:
6152 case V4DImode:
6153 if (!type || !AGGREGATE_TYPE_P (type))
6154 {
6155 if (cum->sse_nregs)
6156 return gen_reg_or_parallel (mode, orig_mode,
6157 cum->sse_regno + FIRST_SSE_REG);
6158 }
6159 break;
6160
6161 case V8QImode:
6162 case V4HImode:
6163 case V2SImode:
6164 case V2SFmode:
6165 case V1TImode:
6166 case V1DImode:
6167 if (!type || !AGGREGATE_TYPE_P (type))
6168 {
6169 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6170 {
6171 warnedmmx = true;
6172 warning (0, "MMX vector argument without MMX enabled "
6173 "changes the ABI");
6174 }
6175 if (cum->mmx_nregs)
6176 return gen_reg_or_parallel (mode, orig_mode,
6177 cum->mmx_regno + FIRST_MMX_REG);
6178 }
6179 break;
6180 }
6181
6182 return NULL_RTX;
6183 }
6184
6185 static rtx
6186 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6187 enum machine_mode orig_mode, tree type, int named)
6188 {
6189 /* Handle a hidden AL argument containing number of registers
6190 for varargs x86-64 functions. */
6191 if (mode == VOIDmode)
6192 return GEN_INT (cum->maybe_vaarg
6193 ? (cum->sse_nregs < 0
6194 ? X86_64_SSE_REGPARM_MAX
6195 : cum->sse_regno)
6196 : -1);
6197
6198 switch (mode)
6199 {
6200 default:
6201 break;
6202
6203 case V8SFmode:
6204 case V8SImode:
6205 case V32QImode:
6206 case V16HImode:
6207 case V4DFmode:
6208 case V4DImode:
6209 /* Unnamed 256bit vector mode parameters are passed on stack. */
6210 if (!named)
6211 return NULL;
6212 break;
6213 }
6214
6215 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6216 cum->sse_nregs,
6217 &x86_64_int_parameter_registers [cum->regno],
6218 cum->sse_regno);
6219 }
6220
6221 static rtx
6222 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6223 enum machine_mode orig_mode, int named,
6224 HOST_WIDE_INT bytes)
6225 {
6226 unsigned int regno;
6227
6228 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6229 We use value of -2 to specify that current function call is MSABI. */
6230 if (mode == VOIDmode)
6231 return GEN_INT (-2);
6232
6233 /* If we've run out of registers, it goes on the stack. */
6234 if (cum->nregs == 0)
6235 return NULL_RTX;
6236
6237 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6238
6239 /* Only floating point modes are passed in anything but integer regs. */
6240 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6241 {
6242 if (named)
6243 regno = cum->regno + FIRST_SSE_REG;
6244 else
6245 {
6246 rtx t1, t2;
6247
6248 /* Unnamed floating parameters are passed in both the
6249 SSE and integer registers. */
6250 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6251 t2 = gen_rtx_REG (mode, regno);
6252 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6253 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6254 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6255 }
6256 }
6257 /* Handle aggregated types passed in register. */
6258 if (orig_mode == BLKmode)
6259 {
6260 if (bytes > 0 && bytes <= 8)
6261 mode = (bytes > 4 ? DImode : SImode);
6262 if (mode == BLKmode)
6263 mode = DImode;
6264 }
6265
6266 return gen_reg_or_parallel (mode, orig_mode, regno);
6267 }
6268
6269 rtx
6270 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6271 tree type, int named)
6272 {
6273 enum machine_mode mode = omode;
6274 HOST_WIDE_INT bytes, words;
6275
6276 if (mode == BLKmode)
6277 bytes = int_size_in_bytes (type);
6278 else
6279 bytes = GET_MODE_SIZE (mode);
6280 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6281
6282 /* To simplify the code below, represent vector types with a vector mode
6283 even if MMX/SSE are not active. */
6284 if (type && TREE_CODE (type) == VECTOR_TYPE)
6285 mode = type_natural_mode (type, cum);
6286
6287 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6288 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6289 else if (TARGET_64BIT)
6290 return function_arg_64 (cum, mode, omode, type, named);
6291 else
6292 return function_arg_32 (cum, mode, omode, type, bytes, words);
6293 }
6294
6295 /* A C expression that indicates when an argument must be passed by
6296 reference. If nonzero for an argument, a copy of that argument is
6297 made in memory and a pointer to the argument is passed instead of
6298 the argument itself. The pointer is passed in whatever way is
6299 appropriate for passing a pointer to that type. */
6300
6301 static bool
6302 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6303 enum machine_mode mode ATTRIBUTE_UNUSED,
6304 const_tree type, bool named ATTRIBUTE_UNUSED)
6305 {
6306 /* See Windows x64 Software Convention. */
6307 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6308 {
6309 int msize = (int) GET_MODE_SIZE (mode);
6310 if (type)
6311 {
6312 /* Arrays are passed by reference. */
6313 if (TREE_CODE (type) == ARRAY_TYPE)
6314 return true;
6315
6316 if (AGGREGATE_TYPE_P (type))
6317 {
6318 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6319 are passed by reference. */
6320 msize = int_size_in_bytes (type);
6321 }
6322 }
6323
6324 /* __m128 is passed by reference. */
6325 switch (msize) {
6326 case 1: case 2: case 4: case 8:
6327 break;
6328 default:
6329 return true;
6330 }
6331 }
6332 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6333 return 1;
6334
6335 return 0;
6336 }
6337
6338 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6339 ABI. */
6340 static bool
6341 contains_aligned_value_p (tree type)
6342 {
6343 enum machine_mode mode = TYPE_MODE (type);
6344 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6345 || mode == TDmode
6346 || mode == TFmode
6347 || mode == TCmode)
6348 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6349 return true;
6350 if (TYPE_ALIGN (type) < 128)
6351 return false;
6352
6353 if (AGGREGATE_TYPE_P (type))
6354 {
6355 /* Walk the aggregates recursively. */
6356 switch (TREE_CODE (type))
6357 {
6358 case RECORD_TYPE:
6359 case UNION_TYPE:
6360 case QUAL_UNION_TYPE:
6361 {
6362 tree field;
6363
6364 /* Walk all the structure fields. */
6365 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6366 {
6367 if (TREE_CODE (field) == FIELD_DECL
6368 && contains_aligned_value_p (TREE_TYPE (field)))
6369 return true;
6370 }
6371 break;
6372 }
6373
6374 case ARRAY_TYPE:
6375 /* Just for use if some languages passes arrays by value. */
6376 if (contains_aligned_value_p (TREE_TYPE (type)))
6377 return true;
6378 break;
6379
6380 default:
6381 gcc_unreachable ();
6382 }
6383 }
6384 return false;
6385 }
6386
6387 /* Gives the alignment boundary, in bits, of an argument with the
6388 specified mode and type. */
6389
6390 int
6391 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6392 {
6393 int align;
6394 if (type)
6395 {
6396 /* Since canonical type is used for call, we convert it to
6397 canonical type if needed. */
6398 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6399 type = TYPE_CANONICAL (type);
6400 align = TYPE_ALIGN (type);
6401 }
6402 else
6403 align = GET_MODE_ALIGNMENT (mode);
6404 if (align < PARM_BOUNDARY)
6405 align = PARM_BOUNDARY;
6406 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6407 natural boundaries. */
6408 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6409 {
6410 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6411 make an exception for SSE modes since these require 128bit
6412 alignment.
6413
6414 The handling here differs from field_alignment. ICC aligns MMX
6415 arguments to 4 byte boundaries, while structure fields are aligned
6416 to 8 byte boundaries. */
6417 if (!type)
6418 {
6419 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6420 align = PARM_BOUNDARY;
6421 }
6422 else
6423 {
6424 if (!contains_aligned_value_p (type))
6425 align = PARM_BOUNDARY;
6426 }
6427 }
6428 if (align > BIGGEST_ALIGNMENT)
6429 align = BIGGEST_ALIGNMENT;
6430 return align;
6431 }
6432
6433 /* Return true if N is a possible register number of function value. */
6434
6435 static bool
6436 ix86_function_value_regno_p (const unsigned int regno)
6437 {
6438 switch (regno)
6439 {
6440 case 0:
6441 return true;
6442
6443 case FIRST_FLOAT_REG:
6444 /* TODO: The function should depend on current function ABI but
6445 builtins.c would need updating then. Therefore we use the
6446 default ABI. */
6447 if (TARGET_64BIT && ix86_abi == MS_ABI)
6448 return false;
6449 return TARGET_FLOAT_RETURNS_IN_80387;
6450
6451 case FIRST_SSE_REG:
6452 return TARGET_SSE;
6453
6454 case FIRST_MMX_REG:
6455 if (TARGET_MACHO || TARGET_64BIT)
6456 return false;
6457 return TARGET_MMX;
6458 }
6459
6460 return false;
6461 }
6462
6463 /* Define how to find the value returned by a function.
6464 VALTYPE is the data type of the value (as a tree).
6465 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6466 otherwise, FUNC is 0. */
6467
6468 static rtx
6469 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6470 const_tree fntype, const_tree fn)
6471 {
6472 unsigned int regno;
6473
6474 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6475 we normally prevent this case when mmx is not available. However
6476 some ABIs may require the result to be returned like DImode. */
6477 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6478 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6479
6480 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6481 we prevent this case when sse is not available. However some ABIs
6482 may require the result to be returned like integer TImode. */
6483 else if (mode == TImode
6484 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6485 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6486
6487 /* 32-byte vector modes in %ymm0. */
6488 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6489 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6490
6491 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6492 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6493 regno = FIRST_FLOAT_REG;
6494 else
6495 /* Most things go in %eax. */
6496 regno = AX_REG;
6497
6498 /* Override FP return register with %xmm0 for local functions when
6499 SSE math is enabled or for functions with sseregparm attribute. */
6500 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6501 {
6502 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6503 if ((sse_level >= 1 && mode == SFmode)
6504 || (sse_level == 2 && mode == DFmode))
6505 regno = FIRST_SSE_REG;
6506 }
6507
6508 /* OImode shouldn't be used directly. */
6509 gcc_assert (mode != OImode);
6510
6511 return gen_rtx_REG (orig_mode, regno);
6512 }
6513
6514 static rtx
6515 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6516 const_tree valtype)
6517 {
6518 rtx ret;
6519
6520 /* Handle libcalls, which don't provide a type node. */
6521 if (valtype == NULL)
6522 {
6523 switch (mode)
6524 {
6525 case SFmode:
6526 case SCmode:
6527 case DFmode:
6528 case DCmode:
6529 case TFmode:
6530 case SDmode:
6531 case DDmode:
6532 case TDmode:
6533 return gen_rtx_REG (mode, FIRST_SSE_REG);
6534 case XFmode:
6535 case XCmode:
6536 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6537 case TCmode:
6538 return NULL;
6539 default:
6540 return gen_rtx_REG (mode, AX_REG);
6541 }
6542 }
6543
6544 ret = construct_container (mode, orig_mode, valtype, 1,
6545 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6546 x86_64_int_return_registers, 0);
6547
6548 /* For zero sized structures, construct_container returns NULL, but we
6549 need to keep rest of compiler happy by returning meaningful value. */
6550 if (!ret)
6551 ret = gen_rtx_REG (orig_mode, AX_REG);
6552
6553 return ret;
6554 }
6555
6556 static rtx
6557 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6558 {
6559 unsigned int regno = AX_REG;
6560
6561 if (TARGET_SSE)
6562 {
6563 switch (GET_MODE_SIZE (mode))
6564 {
6565 case 16:
6566 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6567 && !COMPLEX_MODE_P (mode))
6568 regno = FIRST_SSE_REG;
6569 break;
6570 case 8:
6571 case 4:
6572 if (mode == SFmode || mode == DFmode)
6573 regno = FIRST_SSE_REG;
6574 break;
6575 default:
6576 break;
6577 }
6578 }
6579 return gen_rtx_REG (orig_mode, regno);
6580 }
6581
6582 static rtx
6583 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6584 enum machine_mode orig_mode, enum machine_mode mode)
6585 {
6586 const_tree fn, fntype;
6587
6588 fn = NULL_TREE;
6589 if (fntype_or_decl && DECL_P (fntype_or_decl))
6590 fn = fntype_or_decl;
6591 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6592
6593 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6594 return function_value_ms_64 (orig_mode, mode);
6595 else if (TARGET_64BIT)
6596 return function_value_64 (orig_mode, mode, valtype);
6597 else
6598 return function_value_32 (orig_mode, mode, fntype, fn);
6599 }
6600
6601 static rtx
6602 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6603 bool outgoing ATTRIBUTE_UNUSED)
6604 {
6605 enum machine_mode mode, orig_mode;
6606
6607 orig_mode = TYPE_MODE (valtype);
6608 mode = type_natural_mode (valtype, NULL);
6609 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6610 }
6611
6612 rtx
6613 ix86_libcall_value (enum machine_mode mode)
6614 {
6615 return ix86_function_value_1 (NULL, NULL, mode, mode);
6616 }
6617
6618 /* Return true iff type is returned in memory. */
6619
6620 static int ATTRIBUTE_UNUSED
6621 return_in_memory_32 (const_tree type, enum machine_mode mode)
6622 {
6623 HOST_WIDE_INT size;
6624
6625 if (mode == BLKmode)
6626 return 1;
6627
6628 size = int_size_in_bytes (type);
6629
6630 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6631 return 0;
6632
6633 if (VECTOR_MODE_P (mode) || mode == TImode)
6634 {
6635 /* User-created vectors small enough to fit in EAX. */
6636 if (size < 8)
6637 return 0;
6638
6639 /* MMX/3dNow values are returned in MM0,
6640 except when it doesn't exits. */
6641 if (size == 8)
6642 return (TARGET_MMX ? 0 : 1);
6643
6644 /* SSE values are returned in XMM0, except when it doesn't exist. */
6645 if (size == 16)
6646 return (TARGET_SSE ? 0 : 1);
6647
6648 /* AVX values are returned in YMM0, except when it doesn't exist. */
6649 if (size == 32)
6650 return TARGET_AVX ? 0 : 1;
6651 }
6652
6653 if (mode == XFmode)
6654 return 0;
6655
6656 if (size > 12)
6657 return 1;
6658
6659 /* OImode shouldn't be used directly. */
6660 gcc_assert (mode != OImode);
6661
6662 return 0;
6663 }
6664
6665 static int ATTRIBUTE_UNUSED
6666 return_in_memory_64 (const_tree type, enum machine_mode mode)
6667 {
6668 int needed_intregs, needed_sseregs;
6669 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6670 }
6671
6672 static int ATTRIBUTE_UNUSED
6673 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6674 {
6675 HOST_WIDE_INT size = int_size_in_bytes (type);
6676
6677 /* __m128 is returned in xmm0. */
6678 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6679 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6680 return 0;
6681
6682 /* Otherwise, the size must be exactly in [1248]. */
6683 return (size != 1 && size != 2 && size != 4 && size != 8);
6684 }
6685
6686 static bool
6687 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6688 {
6689 #ifdef SUBTARGET_RETURN_IN_MEMORY
6690 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6691 #else
6692 const enum machine_mode mode = type_natural_mode (type, NULL);
6693
6694 if (TARGET_64BIT)
6695 {
6696 if (ix86_function_type_abi (fntype) == MS_ABI)
6697 return return_in_memory_ms_64 (type, mode);
6698 else
6699 return return_in_memory_64 (type, mode);
6700 }
6701 else
6702 return return_in_memory_32 (type, mode);
6703 #endif
6704 }
6705
6706 /* Return false iff TYPE is returned in memory. This version is used
6707 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6708 but differs notably in that when MMX is available, 8-byte vectors
6709 are returned in memory, rather than in MMX registers. */
6710
6711 bool
6712 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6713 {
6714 int size;
6715 enum machine_mode mode = type_natural_mode (type, NULL);
6716
6717 if (TARGET_64BIT)
6718 return return_in_memory_64 (type, mode);
6719
6720 if (mode == BLKmode)
6721 return 1;
6722
6723 size = int_size_in_bytes (type);
6724
6725 if (VECTOR_MODE_P (mode))
6726 {
6727 /* Return in memory only if MMX registers *are* available. This
6728 seems backwards, but it is consistent with the existing
6729 Solaris x86 ABI. */
6730 if (size == 8)
6731 return TARGET_MMX;
6732 if (size == 16)
6733 return !TARGET_SSE;
6734 }
6735 else if (mode == TImode)
6736 return !TARGET_SSE;
6737 else if (mode == XFmode)
6738 return 0;
6739
6740 return size > 12;
6741 }
6742
6743 /* When returning SSE vector types, we have a choice of either
6744 (1) being abi incompatible with a -march switch, or
6745 (2) generating an error.
6746 Given no good solution, I think the safest thing is one warning.
6747 The user won't be able to use -Werror, but....
6748
6749 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6750 called in response to actually generating a caller or callee that
6751 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6752 via aggregate_value_p for general type probing from tree-ssa. */
6753
6754 static rtx
6755 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6756 {
6757 static bool warnedsse, warnedmmx;
6758
6759 if (!TARGET_64BIT && type)
6760 {
6761 /* Look at the return type of the function, not the function type. */
6762 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6763
6764 if (!TARGET_SSE && !warnedsse)
6765 {
6766 if (mode == TImode
6767 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6768 {
6769 warnedsse = true;
6770 warning (0, "SSE vector return without SSE enabled "
6771 "changes the ABI");
6772 }
6773 }
6774
6775 if (!TARGET_MMX && !warnedmmx)
6776 {
6777 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6778 {
6779 warnedmmx = true;
6780 warning (0, "MMX vector return without MMX enabled "
6781 "changes the ABI");
6782 }
6783 }
6784 }
6785
6786 return NULL;
6787 }
6788
6789 \f
6790 /* Create the va_list data type. */
6791
6792 /* Returns the calling convention specific va_list date type.
6793 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6794
6795 static tree
6796 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6797 {
6798 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6799
6800 /* For i386 we use plain pointer to argument area. */
6801 if (!TARGET_64BIT || abi == MS_ABI)
6802 return build_pointer_type (char_type_node);
6803
6804 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6805 type_decl = build_decl (BUILTINS_LOCATION,
6806 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6807
6808 f_gpr = build_decl (BUILTINS_LOCATION,
6809 FIELD_DECL, get_identifier ("gp_offset"),
6810 unsigned_type_node);
6811 f_fpr = build_decl (BUILTINS_LOCATION,
6812 FIELD_DECL, get_identifier ("fp_offset"),
6813 unsigned_type_node);
6814 f_ovf = build_decl (BUILTINS_LOCATION,
6815 FIELD_DECL, get_identifier ("overflow_arg_area"),
6816 ptr_type_node);
6817 f_sav = build_decl (BUILTINS_LOCATION,
6818 FIELD_DECL, get_identifier ("reg_save_area"),
6819 ptr_type_node);
6820
6821 va_list_gpr_counter_field = f_gpr;
6822 va_list_fpr_counter_field = f_fpr;
6823
6824 DECL_FIELD_CONTEXT (f_gpr) = record;
6825 DECL_FIELD_CONTEXT (f_fpr) = record;
6826 DECL_FIELD_CONTEXT (f_ovf) = record;
6827 DECL_FIELD_CONTEXT (f_sav) = record;
6828
6829 TREE_CHAIN (record) = type_decl;
6830 TYPE_NAME (record) = type_decl;
6831 TYPE_FIELDS (record) = f_gpr;
6832 TREE_CHAIN (f_gpr) = f_fpr;
6833 TREE_CHAIN (f_fpr) = f_ovf;
6834 TREE_CHAIN (f_ovf) = f_sav;
6835
6836 layout_type (record);
6837
6838 /* The correct type is an array type of one element. */
6839 return build_array_type (record, build_index_type (size_zero_node));
6840 }
6841
6842 /* Setup the builtin va_list data type and for 64-bit the additional
6843 calling convention specific va_list data types. */
6844
6845 static tree
6846 ix86_build_builtin_va_list (void)
6847 {
6848 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6849
6850 /* Initialize abi specific va_list builtin types. */
6851 if (TARGET_64BIT)
6852 {
6853 tree t;
6854 if (ix86_abi == MS_ABI)
6855 {
6856 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6857 if (TREE_CODE (t) != RECORD_TYPE)
6858 t = build_variant_type_copy (t);
6859 sysv_va_list_type_node = t;
6860 }
6861 else
6862 {
6863 t = ret;
6864 if (TREE_CODE (t) != RECORD_TYPE)
6865 t = build_variant_type_copy (t);
6866 sysv_va_list_type_node = t;
6867 }
6868 if (ix86_abi != MS_ABI)
6869 {
6870 t = ix86_build_builtin_va_list_abi (MS_ABI);
6871 if (TREE_CODE (t) != RECORD_TYPE)
6872 t = build_variant_type_copy (t);
6873 ms_va_list_type_node = t;
6874 }
6875 else
6876 {
6877 t = ret;
6878 if (TREE_CODE (t) != RECORD_TYPE)
6879 t = build_variant_type_copy (t);
6880 ms_va_list_type_node = t;
6881 }
6882 }
6883
6884 return ret;
6885 }
6886
6887 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6888
6889 static void
6890 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6891 {
6892 rtx save_area, mem;
6893 rtx label;
6894 rtx tmp_reg;
6895 rtx nsse_reg;
6896 alias_set_type set;
6897 int i;
6898
6899 /* GPR size of varargs save area. */
6900 if (cfun->va_list_gpr_size)
6901 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6902 else
6903 ix86_varargs_gpr_size = 0;
6904
6905 /* FPR size of varargs save area. We don't need it if we don't pass
6906 anything in SSE registers. */
6907 if (cum->sse_nregs && cfun->va_list_fpr_size)
6908 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6909 else
6910 ix86_varargs_fpr_size = 0;
6911
6912 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6913 return;
6914
6915 save_area = frame_pointer_rtx;
6916 set = get_varargs_alias_set ();
6917
6918 for (i = cum->regno;
6919 i < X86_64_REGPARM_MAX
6920 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6921 i++)
6922 {
6923 mem = gen_rtx_MEM (Pmode,
6924 plus_constant (save_area, i * UNITS_PER_WORD));
6925 MEM_NOTRAP_P (mem) = 1;
6926 set_mem_alias_set (mem, set);
6927 emit_move_insn (mem, gen_rtx_REG (Pmode,
6928 x86_64_int_parameter_registers[i]));
6929 }
6930
6931 if (ix86_varargs_fpr_size)
6932 {
6933 /* Now emit code to save SSE registers. The AX parameter contains number
6934 of SSE parameter registers used to call this function. We use
6935 sse_prologue_save insn template that produces computed jump across
6936 SSE saves. We need some preparation work to get this working. */
6937
6938 label = gen_label_rtx ();
6939
6940 nsse_reg = gen_reg_rtx (Pmode);
6941 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6942
6943 /* Compute address of memory block we save into. We always use pointer
6944 pointing 127 bytes after first byte to store - this is needed to keep
6945 instruction size limited by 4 bytes (5 bytes for AVX) with one
6946 byte displacement. */
6947 tmp_reg = gen_reg_rtx (Pmode);
6948 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6949 plus_constant (save_area,
6950 ix86_varargs_gpr_size + 127)));
6951 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6952 MEM_NOTRAP_P (mem) = 1;
6953 set_mem_alias_set (mem, set);
6954 set_mem_align (mem, 64);
6955
6956 /* And finally do the dirty job! */
6957 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6958 GEN_INT (cum->sse_regno), label,
6959 gen_reg_rtx (Pmode)));
6960 }
6961 }
6962
6963 static void
6964 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6965 {
6966 alias_set_type set = get_varargs_alias_set ();
6967 int i;
6968
6969 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6970 {
6971 rtx reg, mem;
6972
6973 mem = gen_rtx_MEM (Pmode,
6974 plus_constant (virtual_incoming_args_rtx,
6975 i * UNITS_PER_WORD));
6976 MEM_NOTRAP_P (mem) = 1;
6977 set_mem_alias_set (mem, set);
6978
6979 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6980 emit_move_insn (mem, reg);
6981 }
6982 }
6983
6984 static void
6985 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6986 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6987 int no_rtl)
6988 {
6989 CUMULATIVE_ARGS next_cum;
6990 tree fntype;
6991
6992 /* This argument doesn't appear to be used anymore. Which is good,
6993 because the old code here didn't suppress rtl generation. */
6994 gcc_assert (!no_rtl);
6995
6996 if (!TARGET_64BIT)
6997 return;
6998
6999 fntype = TREE_TYPE (current_function_decl);
7000
7001 /* For varargs, we do not want to skip the dummy va_dcl argument.
7002 For stdargs, we do want to skip the last named argument. */
7003 next_cum = *cum;
7004 if (stdarg_p (fntype))
7005 function_arg_advance (&next_cum, mode, type, 1);
7006
7007 if (cum->call_abi == MS_ABI)
7008 setup_incoming_varargs_ms_64 (&next_cum);
7009 else
7010 setup_incoming_varargs_64 (&next_cum);
7011 }
7012
7013 /* Checks if TYPE is of kind va_list char *. */
7014
7015 static bool
7016 is_va_list_char_pointer (tree type)
7017 {
7018 tree canonic;
7019
7020 /* For 32-bit it is always true. */
7021 if (!TARGET_64BIT)
7022 return true;
7023 canonic = ix86_canonical_va_list_type (type);
7024 return (canonic == ms_va_list_type_node
7025 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7026 }
7027
7028 /* Implement va_start. */
7029
7030 static void
7031 ix86_va_start (tree valist, rtx nextarg)
7032 {
7033 HOST_WIDE_INT words, n_gpr, n_fpr;
7034 tree f_gpr, f_fpr, f_ovf, f_sav;
7035 tree gpr, fpr, ovf, sav, t;
7036 tree type;
7037
7038 /* Only 64bit target needs something special. */
7039 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7040 {
7041 std_expand_builtin_va_start (valist, nextarg);
7042 return;
7043 }
7044
7045 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7046 f_fpr = TREE_CHAIN (f_gpr);
7047 f_ovf = TREE_CHAIN (f_fpr);
7048 f_sav = TREE_CHAIN (f_ovf);
7049
7050 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
7051 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
7052 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7053 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7054 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7055
7056 /* Count number of gp and fp argument registers used. */
7057 words = crtl->args.info.words;
7058 n_gpr = crtl->args.info.regno;
7059 n_fpr = crtl->args.info.sse_regno;
7060
7061 if (cfun->va_list_gpr_size)
7062 {
7063 type = TREE_TYPE (gpr);
7064 t = build2 (MODIFY_EXPR, type,
7065 gpr, build_int_cst (type, n_gpr * 8));
7066 TREE_SIDE_EFFECTS (t) = 1;
7067 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7068 }
7069
7070 if (TARGET_SSE && cfun->va_list_fpr_size)
7071 {
7072 type = TREE_TYPE (fpr);
7073 t = build2 (MODIFY_EXPR, type, fpr,
7074 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7075 TREE_SIDE_EFFECTS (t) = 1;
7076 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7077 }
7078
7079 /* Find the overflow area. */
7080 type = TREE_TYPE (ovf);
7081 t = make_tree (type, crtl->args.internal_arg_pointer);
7082 if (words != 0)
7083 t = build2 (POINTER_PLUS_EXPR, type, t,
7084 size_int (words * UNITS_PER_WORD));
7085 t = build2 (MODIFY_EXPR, type, ovf, t);
7086 TREE_SIDE_EFFECTS (t) = 1;
7087 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7088
7089 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7090 {
7091 /* Find the register save area.
7092 Prologue of the function save it right above stack frame. */
7093 type = TREE_TYPE (sav);
7094 t = make_tree (type, frame_pointer_rtx);
7095 if (!ix86_varargs_gpr_size)
7096 t = build2 (POINTER_PLUS_EXPR, type, t,
7097 size_int (-8 * X86_64_REGPARM_MAX));
7098 t = build2 (MODIFY_EXPR, type, sav, t);
7099 TREE_SIDE_EFFECTS (t) = 1;
7100 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7101 }
7102 }
7103
7104 /* Implement va_arg. */
7105
7106 static tree
7107 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7108 gimple_seq *post_p)
7109 {
7110 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7111 tree f_gpr, f_fpr, f_ovf, f_sav;
7112 tree gpr, fpr, ovf, sav, t;
7113 int size, rsize;
7114 tree lab_false, lab_over = NULL_TREE;
7115 tree addr, t2;
7116 rtx container;
7117 int indirect_p = 0;
7118 tree ptrtype;
7119 enum machine_mode nat_mode;
7120 unsigned int arg_boundary;
7121
7122 /* Only 64bit target needs something special. */
7123 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7124 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7125
7126 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7127 f_fpr = TREE_CHAIN (f_gpr);
7128 f_ovf = TREE_CHAIN (f_fpr);
7129 f_sav = TREE_CHAIN (f_ovf);
7130
7131 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7132 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7133 valist = build_va_arg_indirect_ref (valist);
7134 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7135 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7136 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7137
7138 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7139 if (indirect_p)
7140 type = build_pointer_type (type);
7141 size = int_size_in_bytes (type);
7142 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7143
7144 nat_mode = type_natural_mode (type, NULL);
7145 switch (nat_mode)
7146 {
7147 case V8SFmode:
7148 case V8SImode:
7149 case V32QImode:
7150 case V16HImode:
7151 case V4DFmode:
7152 case V4DImode:
7153 /* Unnamed 256bit vector mode parameters are passed on stack. */
7154 if (ix86_cfun_abi () == SYSV_ABI)
7155 {
7156 container = NULL;
7157 break;
7158 }
7159
7160 default:
7161 container = construct_container (nat_mode, TYPE_MODE (type),
7162 type, 0, X86_64_REGPARM_MAX,
7163 X86_64_SSE_REGPARM_MAX, intreg,
7164 0);
7165 break;
7166 }
7167
7168 /* Pull the value out of the saved registers. */
7169
7170 addr = create_tmp_var (ptr_type_node, "addr");
7171
7172 if (container)
7173 {
7174 int needed_intregs, needed_sseregs;
7175 bool need_temp;
7176 tree int_addr, sse_addr;
7177
7178 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7179 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7180
7181 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7182
7183 need_temp = (!REG_P (container)
7184 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7185 || TYPE_ALIGN (type) > 128));
7186
7187 /* In case we are passing structure, verify that it is consecutive block
7188 on the register save area. If not we need to do moves. */
7189 if (!need_temp && !REG_P (container))
7190 {
7191 /* Verify that all registers are strictly consecutive */
7192 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7193 {
7194 int i;
7195
7196 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7197 {
7198 rtx slot = XVECEXP (container, 0, i);
7199 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7200 || INTVAL (XEXP (slot, 1)) != i * 16)
7201 need_temp = 1;
7202 }
7203 }
7204 else
7205 {
7206 int i;
7207
7208 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7209 {
7210 rtx slot = XVECEXP (container, 0, i);
7211 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7212 || INTVAL (XEXP (slot, 1)) != i * 8)
7213 need_temp = 1;
7214 }
7215 }
7216 }
7217 if (!need_temp)
7218 {
7219 int_addr = addr;
7220 sse_addr = addr;
7221 }
7222 else
7223 {
7224 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7225 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7226 }
7227
7228 /* First ensure that we fit completely in registers. */
7229 if (needed_intregs)
7230 {
7231 t = build_int_cst (TREE_TYPE (gpr),
7232 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7233 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7234 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7235 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7236 gimplify_and_add (t, pre_p);
7237 }
7238 if (needed_sseregs)
7239 {
7240 t = build_int_cst (TREE_TYPE (fpr),
7241 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7242 + X86_64_REGPARM_MAX * 8);
7243 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7244 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7245 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7246 gimplify_and_add (t, pre_p);
7247 }
7248
7249 /* Compute index to start of area used for integer regs. */
7250 if (needed_intregs)
7251 {
7252 /* int_addr = gpr + sav; */
7253 t = fold_convert (sizetype, gpr);
7254 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7255 gimplify_assign (int_addr, t, pre_p);
7256 }
7257 if (needed_sseregs)
7258 {
7259 /* sse_addr = fpr + sav; */
7260 t = fold_convert (sizetype, fpr);
7261 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7262 gimplify_assign (sse_addr, t, pre_p);
7263 }
7264 if (need_temp)
7265 {
7266 int i;
7267 tree temp = create_tmp_var (type, "va_arg_tmp");
7268
7269 /* addr = &temp; */
7270 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7271 gimplify_assign (addr, t, pre_p);
7272
7273 for (i = 0; i < XVECLEN (container, 0); i++)
7274 {
7275 rtx slot = XVECEXP (container, 0, i);
7276 rtx reg = XEXP (slot, 0);
7277 enum machine_mode mode = GET_MODE (reg);
7278 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7279 tree addr_type = build_pointer_type (piece_type);
7280 tree daddr_type = build_pointer_type_for_mode (piece_type,
7281 ptr_mode, true);
7282 tree src_addr, src;
7283 int src_offset;
7284 tree dest_addr, dest;
7285
7286 if (SSE_REGNO_P (REGNO (reg)))
7287 {
7288 src_addr = sse_addr;
7289 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7290 }
7291 else
7292 {
7293 src_addr = int_addr;
7294 src_offset = REGNO (reg) * 8;
7295 }
7296 src_addr = fold_convert (addr_type, src_addr);
7297 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7298 size_int (src_offset));
7299 src = build_va_arg_indirect_ref (src_addr);
7300
7301 dest_addr = fold_convert (daddr_type, addr);
7302 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7303 size_int (INTVAL (XEXP (slot, 1))));
7304 dest = build_va_arg_indirect_ref (dest_addr);
7305
7306 gimplify_assign (dest, src, pre_p);
7307 }
7308 }
7309
7310 if (needed_intregs)
7311 {
7312 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7313 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7314 gimplify_assign (gpr, t, pre_p);
7315 }
7316
7317 if (needed_sseregs)
7318 {
7319 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7320 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7321 gimplify_assign (fpr, t, pre_p);
7322 }
7323
7324 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7325
7326 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7327 }
7328
7329 /* ... otherwise out of the overflow area. */
7330
7331 /* When we align parameter on stack for caller, if the parameter
7332 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7333 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7334 here with caller. */
7335 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7336 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7337 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7338
7339 /* Care for on-stack alignment if needed. */
7340 if (arg_boundary <= 64
7341 || integer_zerop (TYPE_SIZE (type)))
7342 t = ovf;
7343 else
7344 {
7345 HOST_WIDE_INT align = arg_boundary / 8;
7346 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7347 size_int (align - 1));
7348 t = fold_convert (sizetype, t);
7349 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7350 size_int (-align));
7351 t = fold_convert (TREE_TYPE (ovf), t);
7352 if (crtl->stack_alignment_needed < arg_boundary)
7353 crtl->stack_alignment_needed = arg_boundary;
7354 }
7355 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7356 gimplify_assign (addr, t, pre_p);
7357
7358 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7359 size_int (rsize * UNITS_PER_WORD));
7360 gimplify_assign (unshare_expr (ovf), t, pre_p);
7361
7362 if (container)
7363 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7364
7365 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7366 addr = fold_convert (ptrtype, addr);
7367
7368 if (indirect_p)
7369 addr = build_va_arg_indirect_ref (addr);
7370 return build_va_arg_indirect_ref (addr);
7371 }
7372 \f
7373 /* Return nonzero if OPNUM's MEM should be matched
7374 in movabs* patterns. */
7375
7376 int
7377 ix86_check_movabs (rtx insn, int opnum)
7378 {
7379 rtx set, mem;
7380
7381 set = PATTERN (insn);
7382 if (GET_CODE (set) == PARALLEL)
7383 set = XVECEXP (set, 0, 0);
7384 gcc_assert (GET_CODE (set) == SET);
7385 mem = XEXP (set, opnum);
7386 while (GET_CODE (mem) == SUBREG)
7387 mem = SUBREG_REG (mem);
7388 gcc_assert (MEM_P (mem));
7389 return (volatile_ok || !MEM_VOLATILE_P (mem));
7390 }
7391 \f
7392 /* Initialize the table of extra 80387 mathematical constants. */
7393
7394 static void
7395 init_ext_80387_constants (void)
7396 {
7397 static const char * cst[5] =
7398 {
7399 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7400 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7401 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7402 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7403 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7404 };
7405 int i;
7406
7407 for (i = 0; i < 5; i++)
7408 {
7409 real_from_string (&ext_80387_constants_table[i], cst[i]);
7410 /* Ensure each constant is rounded to XFmode precision. */
7411 real_convert (&ext_80387_constants_table[i],
7412 XFmode, &ext_80387_constants_table[i]);
7413 }
7414
7415 ext_80387_constants_init = 1;
7416 }
7417
7418 /* Return true if the constant is something that can be loaded with
7419 a special instruction. */
7420
7421 int
7422 standard_80387_constant_p (rtx x)
7423 {
7424 enum machine_mode mode = GET_MODE (x);
7425
7426 REAL_VALUE_TYPE r;
7427
7428 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7429 return -1;
7430
7431 if (x == CONST0_RTX (mode))
7432 return 1;
7433 if (x == CONST1_RTX (mode))
7434 return 2;
7435
7436 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7437
7438 /* For XFmode constants, try to find a special 80387 instruction when
7439 optimizing for size or on those CPUs that benefit from them. */
7440 if (mode == XFmode
7441 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7442 {
7443 int i;
7444
7445 if (! ext_80387_constants_init)
7446 init_ext_80387_constants ();
7447
7448 for (i = 0; i < 5; i++)
7449 if (real_identical (&r, &ext_80387_constants_table[i]))
7450 return i + 3;
7451 }
7452
7453 /* Load of the constant -0.0 or -1.0 will be split as
7454 fldz;fchs or fld1;fchs sequence. */
7455 if (real_isnegzero (&r))
7456 return 8;
7457 if (real_identical (&r, &dconstm1))
7458 return 9;
7459
7460 return 0;
7461 }
7462
7463 /* Return the opcode of the special instruction to be used to load
7464 the constant X. */
7465
7466 const char *
7467 standard_80387_constant_opcode (rtx x)
7468 {
7469 switch (standard_80387_constant_p (x))
7470 {
7471 case 1:
7472 return "fldz";
7473 case 2:
7474 return "fld1";
7475 case 3:
7476 return "fldlg2";
7477 case 4:
7478 return "fldln2";
7479 case 5:
7480 return "fldl2e";
7481 case 6:
7482 return "fldl2t";
7483 case 7:
7484 return "fldpi";
7485 case 8:
7486 case 9:
7487 return "#";
7488 default:
7489 gcc_unreachable ();
7490 }
7491 }
7492
7493 /* Return the CONST_DOUBLE representing the 80387 constant that is
7494 loaded by the specified special instruction. The argument IDX
7495 matches the return value from standard_80387_constant_p. */
7496
7497 rtx
7498 standard_80387_constant_rtx (int idx)
7499 {
7500 int i;
7501
7502 if (! ext_80387_constants_init)
7503 init_ext_80387_constants ();
7504
7505 switch (idx)
7506 {
7507 case 3:
7508 case 4:
7509 case 5:
7510 case 6:
7511 case 7:
7512 i = idx - 3;
7513 break;
7514
7515 default:
7516 gcc_unreachable ();
7517 }
7518
7519 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7520 XFmode);
7521 }
7522
7523 /* Return 1 if X is all 0s and 2 if x is all 1s
7524 in supported SSE vector mode. */
7525
7526 int
7527 standard_sse_constant_p (rtx x)
7528 {
7529 enum machine_mode mode = GET_MODE (x);
7530
7531 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7532 return 1;
7533 if (vector_all_ones_operand (x, mode))
7534 switch (mode)
7535 {
7536 case V16QImode:
7537 case V8HImode:
7538 case V4SImode:
7539 case V2DImode:
7540 if (TARGET_SSE2)
7541 return 2;
7542 default:
7543 break;
7544 }
7545
7546 return 0;
7547 }
7548
7549 /* Return the opcode of the special instruction to be used to load
7550 the constant X. */
7551
7552 const char *
7553 standard_sse_constant_opcode (rtx insn, rtx x)
7554 {
7555 switch (standard_sse_constant_p (x))
7556 {
7557 case 1:
7558 switch (get_attr_mode (insn))
7559 {
7560 case MODE_V4SF:
7561 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7562 case MODE_V2DF:
7563 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7564 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7565 else
7566 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7567 case MODE_TI:
7568 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7569 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7570 else
7571 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7572 case MODE_V8SF:
7573 return "vxorps\t%x0, %x0, %x0";
7574 case MODE_V4DF:
7575 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7576 return "vxorps\t%x0, %x0, %x0";
7577 else
7578 return "vxorpd\t%x0, %x0, %x0";
7579 case MODE_OI:
7580 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7581 return "vxorps\t%x0, %x0, %x0";
7582 else
7583 return "vpxor\t%x0, %x0, %x0";
7584 default:
7585 break;
7586 }
7587 case 2:
7588 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7589 default:
7590 break;
7591 }
7592 gcc_unreachable ();
7593 }
7594
7595 /* Returns 1 if OP contains a symbol reference */
7596
7597 int
7598 symbolic_reference_mentioned_p (rtx op)
7599 {
7600 const char *fmt;
7601 int i;
7602
7603 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7604 return 1;
7605
7606 fmt = GET_RTX_FORMAT (GET_CODE (op));
7607 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7608 {
7609 if (fmt[i] == 'E')
7610 {
7611 int j;
7612
7613 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7614 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7615 return 1;
7616 }
7617
7618 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7619 return 1;
7620 }
7621
7622 return 0;
7623 }
7624
7625 /* Return 1 if it is appropriate to emit `ret' instructions in the
7626 body of a function. Do this only if the epilogue is simple, needing a
7627 couple of insns. Prior to reloading, we can't tell how many registers
7628 must be saved, so return 0 then. Return 0 if there is no frame
7629 marker to de-allocate. */
7630
7631 int
7632 ix86_can_use_return_insn_p (void)
7633 {
7634 struct ix86_frame frame;
7635
7636 if (! reload_completed || frame_pointer_needed)
7637 return 0;
7638
7639 /* Don't allow more than 32 pop, since that's all we can do
7640 with one instruction. */
7641 if (crtl->args.pops_args
7642 && crtl->args.size >= 32768)
7643 return 0;
7644
7645 ix86_compute_frame_layout (&frame);
7646 return frame.to_allocate == 0 && frame.padding0 == 0
7647 && (frame.nregs + frame.nsseregs) == 0;
7648 }
7649 \f
7650 /* Value should be nonzero if functions must have frame pointers.
7651 Zero means the frame pointer need not be set up (and parms may
7652 be accessed via the stack pointer) in functions that seem suitable. */
7653
7654 static bool
7655 ix86_frame_pointer_required (void)
7656 {
7657 /* If we accessed previous frames, then the generated code expects
7658 to be able to access the saved ebp value in our frame. */
7659 if (cfun->machine->accesses_prev_frame)
7660 return true;
7661
7662 /* Several x86 os'es need a frame pointer for other reasons,
7663 usually pertaining to setjmp. */
7664 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7665 return true;
7666
7667 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7668 the frame pointer by default. Turn it back on now if we've not
7669 got a leaf function. */
7670 if (TARGET_OMIT_LEAF_FRAME_POINTER
7671 && (!current_function_is_leaf
7672 || ix86_current_function_calls_tls_descriptor))
7673 return true;
7674
7675 if (crtl->profile)
7676 return true;
7677
7678 return false;
7679 }
7680
7681 /* Record that the current function accesses previous call frames. */
7682
7683 void
7684 ix86_setup_frame_addresses (void)
7685 {
7686 cfun->machine->accesses_prev_frame = 1;
7687 }
7688 \f
7689 #ifndef USE_HIDDEN_LINKONCE
7690 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7691 # define USE_HIDDEN_LINKONCE 1
7692 # else
7693 # define USE_HIDDEN_LINKONCE 0
7694 # endif
7695 #endif
7696
7697 static int pic_labels_used;
7698
7699 /* Fills in the label name that should be used for a pc thunk for
7700 the given register. */
7701
7702 static void
7703 get_pc_thunk_name (char name[32], unsigned int regno)
7704 {
7705 gcc_assert (!TARGET_64BIT);
7706
7707 if (USE_HIDDEN_LINKONCE)
7708 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7709 else
7710 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7711 }
7712
7713
7714 /* This function generates code for -fpic that loads %ebx with
7715 the return address of the caller and then returns. */
7716
7717 static void
7718 ix86_code_end (void)
7719 {
7720 rtx xops[2];
7721 int regno;
7722
7723 for (regno = 0; regno < 8; ++regno)
7724 {
7725 char name[32];
7726 tree decl;
7727
7728 if (! ((pic_labels_used >> regno) & 1))
7729 continue;
7730
7731 get_pc_thunk_name (name, regno);
7732
7733 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7734 get_identifier (name),
7735 build_function_type (void_type_node, void_list_node));
7736 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7737 NULL_TREE, void_type_node);
7738 TREE_PUBLIC (decl) = 1;
7739 TREE_STATIC (decl) = 1;
7740
7741 #if TARGET_MACHO
7742 if (TARGET_MACHO)
7743 {
7744 switch_to_section (darwin_sections[text_coal_section]);
7745 fputs ("\t.weak_definition\t", asm_out_file);
7746 assemble_name (asm_out_file, name);
7747 fputs ("\n\t.private_extern\t", asm_out_file);
7748 assemble_name (asm_out_file, name);
7749 fputs ("\n", asm_out_file);
7750 ASM_OUTPUT_LABEL (asm_out_file, name);
7751 DECL_WEAK (decl) = 1;
7752 }
7753 else
7754 #endif
7755 if (USE_HIDDEN_LINKONCE)
7756 {
7757 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7758
7759 (*targetm.asm_out.unique_section) (decl, 0);
7760 switch_to_section (get_named_section (decl, NULL, 0));
7761
7762 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7763 fputs ("\t.hidden\t", asm_out_file);
7764 assemble_name (asm_out_file, name);
7765 putc ('\n', asm_out_file);
7766 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7767 }
7768 else
7769 {
7770 switch_to_section (text_section);
7771 ASM_OUTPUT_LABEL (asm_out_file, name);
7772 }
7773
7774 DECL_INITIAL (decl) = make_node (BLOCK);
7775 current_function_decl = decl;
7776 init_function_start (decl);
7777 first_function_block_is_cold = false;
7778 /* Make sure unwind info is emitted for the thunk if needed. */
7779 final_start_function (emit_barrier (), asm_out_file, 1);
7780
7781 xops[0] = gen_rtx_REG (Pmode, regno);
7782 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7783 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7784 output_asm_insn ("ret", xops);
7785 final_end_function ();
7786 init_insn_lengths ();
7787 free_after_compilation (cfun);
7788 set_cfun (NULL);
7789 current_function_decl = NULL;
7790 }
7791 }
7792
7793 /* Emit code for the SET_GOT patterns. */
7794
7795 const char *
7796 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7797 {
7798 rtx xops[3];
7799
7800 xops[0] = dest;
7801
7802 if (TARGET_VXWORKS_RTP && flag_pic)
7803 {
7804 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7805 xops[2] = gen_rtx_MEM (Pmode,
7806 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7807 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7808
7809 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7810 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7811 an unadorned address. */
7812 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7813 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7814 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7815 return "";
7816 }
7817
7818 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7819
7820 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7821 {
7822 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7823
7824 if (!flag_pic)
7825 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7826 else
7827 {
7828 output_asm_insn ("call\t%a2", xops);
7829 #ifdef DWARF2_UNWIND_INFO
7830 /* The call to next label acts as a push. */
7831 if (dwarf2out_do_frame ())
7832 {
7833 rtx insn;
7834 start_sequence ();
7835 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7836 gen_rtx_PLUS (Pmode,
7837 stack_pointer_rtx,
7838 GEN_INT (-4))));
7839 RTX_FRAME_RELATED_P (insn) = 1;
7840 dwarf2out_frame_debug (insn, true);
7841 end_sequence ();
7842 }
7843 #endif
7844 }
7845
7846 #if TARGET_MACHO
7847 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7848 is what will be referenced by the Mach-O PIC subsystem. */
7849 if (!label)
7850 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7851 #endif
7852
7853 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7854 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7855
7856 if (flag_pic)
7857 {
7858 output_asm_insn ("pop%z0\t%0", xops);
7859 #ifdef DWARF2_UNWIND_INFO
7860 /* The pop is a pop and clobbers dest, but doesn't restore it
7861 for unwind info purposes. */
7862 if (dwarf2out_do_frame ())
7863 {
7864 rtx insn;
7865 start_sequence ();
7866 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7867 dwarf2out_frame_debug (insn, true);
7868 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7869 gen_rtx_PLUS (Pmode,
7870 stack_pointer_rtx,
7871 GEN_INT (4))));
7872 RTX_FRAME_RELATED_P (insn) = 1;
7873 dwarf2out_frame_debug (insn, true);
7874 end_sequence ();
7875 }
7876 #endif
7877 }
7878 }
7879 else
7880 {
7881 char name[32];
7882 get_pc_thunk_name (name, REGNO (dest));
7883 pic_labels_used |= 1 << REGNO (dest);
7884
7885 #ifdef DWARF2_UNWIND_INFO
7886 /* Ensure all queued register saves are flushed before the
7887 call. */
7888 if (dwarf2out_do_frame ())
7889 {
7890 rtx insn;
7891 start_sequence ();
7892 insn = emit_barrier ();
7893 end_sequence ();
7894 dwarf2out_frame_debug (insn, false);
7895 }
7896 #endif
7897 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7898 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7899 output_asm_insn ("call\t%X2", xops);
7900 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7901 is what will be referenced by the Mach-O PIC subsystem. */
7902 #if TARGET_MACHO
7903 if (!label)
7904 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7905 else
7906 targetm.asm_out.internal_label (asm_out_file, "L",
7907 CODE_LABEL_NUMBER (label));
7908 #endif
7909 }
7910
7911 if (TARGET_MACHO)
7912 return "";
7913
7914 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7915 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7916 else
7917 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7918
7919 return "";
7920 }
7921
7922 /* Generate an "push" pattern for input ARG. */
7923
7924 static rtx
7925 gen_push (rtx arg)
7926 {
7927 if (ix86_cfa_state->reg == stack_pointer_rtx)
7928 ix86_cfa_state->offset += UNITS_PER_WORD;
7929
7930 return gen_rtx_SET (VOIDmode,
7931 gen_rtx_MEM (Pmode,
7932 gen_rtx_PRE_DEC (Pmode,
7933 stack_pointer_rtx)),
7934 arg);
7935 }
7936
7937 /* Return >= 0 if there is an unused call-clobbered register available
7938 for the entire function. */
7939
7940 static unsigned int
7941 ix86_select_alt_pic_regnum (void)
7942 {
7943 if (current_function_is_leaf && !crtl->profile
7944 && !ix86_current_function_calls_tls_descriptor)
7945 {
7946 int i, drap;
7947 /* Can't use the same register for both PIC and DRAP. */
7948 if (crtl->drap_reg)
7949 drap = REGNO (crtl->drap_reg);
7950 else
7951 drap = -1;
7952 for (i = 2; i >= 0; --i)
7953 if (i != drap && !df_regs_ever_live_p (i))
7954 return i;
7955 }
7956
7957 return INVALID_REGNUM;
7958 }
7959
7960 /* Return 1 if we need to save REGNO. */
7961 static int
7962 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7963 {
7964 if (pic_offset_table_rtx
7965 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7966 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7967 || crtl->profile
7968 || crtl->calls_eh_return
7969 || crtl->uses_const_pool))
7970 {
7971 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7972 return 0;
7973 return 1;
7974 }
7975
7976 if (crtl->calls_eh_return && maybe_eh_return)
7977 {
7978 unsigned i;
7979 for (i = 0; ; i++)
7980 {
7981 unsigned test = EH_RETURN_DATA_REGNO (i);
7982 if (test == INVALID_REGNUM)
7983 break;
7984 if (test == regno)
7985 return 1;
7986 }
7987 }
7988
7989 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7990 return 1;
7991
7992 return (df_regs_ever_live_p (regno)
7993 && !call_used_regs[regno]
7994 && !fixed_regs[regno]
7995 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7996 }
7997
7998 /* Return number of saved general prupose registers. */
7999
8000 static int
8001 ix86_nsaved_regs (void)
8002 {
8003 int nregs = 0;
8004 int regno;
8005
8006 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8007 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8008 nregs ++;
8009 return nregs;
8010 }
8011
8012 /* Return number of saved SSE registrers. */
8013
8014 static int
8015 ix86_nsaved_sseregs (void)
8016 {
8017 int nregs = 0;
8018 int regno;
8019
8020 if (ix86_cfun_abi () != MS_ABI)
8021 return 0;
8022 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8023 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8024 nregs ++;
8025 return nregs;
8026 }
8027
8028 /* Given FROM and TO register numbers, say whether this elimination is
8029 allowed. If stack alignment is needed, we can only replace argument
8030 pointer with hard frame pointer, or replace frame pointer with stack
8031 pointer. Otherwise, frame pointer elimination is automatically
8032 handled and all other eliminations are valid. */
8033
8034 static bool
8035 ix86_can_eliminate (const int from, const int to)
8036 {
8037 if (stack_realign_fp)
8038 return ((from == ARG_POINTER_REGNUM
8039 && to == HARD_FRAME_POINTER_REGNUM)
8040 || (from == FRAME_POINTER_REGNUM
8041 && to == STACK_POINTER_REGNUM));
8042 else
8043 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8044 }
8045
8046 /* Return the offset between two registers, one to be eliminated, and the other
8047 its replacement, at the start of a routine. */
8048
8049 HOST_WIDE_INT
8050 ix86_initial_elimination_offset (int from, int to)
8051 {
8052 struct ix86_frame frame;
8053 ix86_compute_frame_layout (&frame);
8054
8055 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8056 return frame.hard_frame_pointer_offset;
8057 else if (from == FRAME_POINTER_REGNUM
8058 && to == HARD_FRAME_POINTER_REGNUM)
8059 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8060 else
8061 {
8062 gcc_assert (to == STACK_POINTER_REGNUM);
8063
8064 if (from == ARG_POINTER_REGNUM)
8065 return frame.stack_pointer_offset;
8066
8067 gcc_assert (from == FRAME_POINTER_REGNUM);
8068 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8069 }
8070 }
8071
8072 /* In a dynamically-aligned function, we can't know the offset from
8073 stack pointer to frame pointer, so we must ensure that setjmp
8074 eliminates fp against the hard fp (%ebp) rather than trying to
8075 index from %esp up to the top of the frame across a gap that is
8076 of unknown (at compile-time) size. */
8077 static rtx
8078 ix86_builtin_setjmp_frame_value (void)
8079 {
8080 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8081 }
8082
8083 /* Fill structure ix86_frame about frame of currently computed function. */
8084
8085 static void
8086 ix86_compute_frame_layout (struct ix86_frame *frame)
8087 {
8088 unsigned int stack_alignment_needed;
8089 HOST_WIDE_INT offset;
8090 unsigned int preferred_alignment;
8091 HOST_WIDE_INT size = get_frame_size ();
8092
8093 frame->nregs = ix86_nsaved_regs ();
8094 frame->nsseregs = ix86_nsaved_sseregs ();
8095
8096 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8097 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8098
8099 /* MS ABI seem to require stack alignment to be always 16 except for function
8100 prologues. */
8101 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8102 {
8103 preferred_alignment = 16;
8104 stack_alignment_needed = 16;
8105 crtl->preferred_stack_boundary = 128;
8106 crtl->stack_alignment_needed = 128;
8107 }
8108
8109 gcc_assert (!size || stack_alignment_needed);
8110 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8111 gcc_assert (preferred_alignment <= stack_alignment_needed);
8112
8113 /* During reload iteration the amount of registers saved can change.
8114 Recompute the value as needed. Do not recompute when amount of registers
8115 didn't change as reload does multiple calls to the function and does not
8116 expect the decision to change within single iteration. */
8117 if (!optimize_function_for_size_p (cfun)
8118 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8119 {
8120 int count = frame->nregs;
8121 struct cgraph_node *node = cgraph_node (current_function_decl);
8122
8123 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8124 /* The fast prologue uses move instead of push to save registers. This
8125 is significantly longer, but also executes faster as modern hardware
8126 can execute the moves in parallel, but can't do that for push/pop.
8127
8128 Be careful about choosing what prologue to emit: When function takes
8129 many instructions to execute we may use slow version as well as in
8130 case function is known to be outside hot spot (this is known with
8131 feedback only). Weight the size of function by number of registers
8132 to save as it is cheap to use one or two push instructions but very
8133 slow to use many of them. */
8134 if (count)
8135 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8136 if (node->frequency < NODE_FREQUENCY_NORMAL
8137 || (flag_branch_probabilities
8138 && node->frequency < NODE_FREQUENCY_HOT))
8139 cfun->machine->use_fast_prologue_epilogue = false;
8140 else
8141 cfun->machine->use_fast_prologue_epilogue
8142 = !expensive_function_p (count);
8143 }
8144 if (TARGET_PROLOGUE_USING_MOVE
8145 && cfun->machine->use_fast_prologue_epilogue)
8146 frame->save_regs_using_mov = true;
8147 else
8148 frame->save_regs_using_mov = false;
8149
8150 /* Skip return address. */
8151 offset = UNITS_PER_WORD;
8152
8153 /* Skip pushed static chain. */
8154 if (ix86_static_chain_on_stack)
8155 offset += UNITS_PER_WORD;
8156
8157 /* Skip saved base pointer. */
8158 if (frame_pointer_needed)
8159 offset += UNITS_PER_WORD;
8160
8161 frame->hard_frame_pointer_offset = offset;
8162
8163 /* Set offset to aligned because the realigned frame starts from
8164 here. */
8165 if (stack_realign_fp)
8166 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8167
8168 /* Register save area */
8169 offset += frame->nregs * UNITS_PER_WORD;
8170
8171 /* Align SSE reg save area. */
8172 if (frame->nsseregs)
8173 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8174 else
8175 frame->padding0 = 0;
8176
8177 /* SSE register save area. */
8178 offset += frame->padding0 + frame->nsseregs * 16;
8179
8180 /* Va-arg area */
8181 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8182 offset += frame->va_arg_size;
8183
8184 /* Align start of frame for local function. */
8185 frame->padding1 = ((offset + stack_alignment_needed - 1)
8186 & -stack_alignment_needed) - offset;
8187
8188 offset += frame->padding1;
8189
8190 /* Frame pointer points here. */
8191 frame->frame_pointer_offset = offset;
8192
8193 offset += size;
8194
8195 /* Add outgoing arguments area. Can be skipped if we eliminated
8196 all the function calls as dead code.
8197 Skipping is however impossible when function calls alloca. Alloca
8198 expander assumes that last crtl->outgoing_args_size
8199 of stack frame are unused. */
8200 if (ACCUMULATE_OUTGOING_ARGS
8201 && (!current_function_is_leaf || cfun->calls_alloca
8202 || ix86_current_function_calls_tls_descriptor))
8203 {
8204 offset += crtl->outgoing_args_size;
8205 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8206 }
8207 else
8208 frame->outgoing_arguments_size = 0;
8209
8210 /* Align stack boundary. Only needed if we're calling another function
8211 or using alloca. */
8212 if (!current_function_is_leaf || cfun->calls_alloca
8213 || ix86_current_function_calls_tls_descriptor)
8214 frame->padding2 = ((offset + preferred_alignment - 1)
8215 & -preferred_alignment) - offset;
8216 else
8217 frame->padding2 = 0;
8218
8219 offset += frame->padding2;
8220
8221 /* We've reached end of stack frame. */
8222 frame->stack_pointer_offset = offset;
8223
8224 /* Size prologue needs to allocate. */
8225 frame->to_allocate =
8226 (size + frame->padding1 + frame->padding2
8227 + frame->outgoing_arguments_size + frame->va_arg_size);
8228
8229 if ((!frame->to_allocate && frame->nregs <= 1)
8230 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8231 frame->save_regs_using_mov = false;
8232
8233 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8234 && current_function_sp_is_unchanging
8235 && current_function_is_leaf
8236 && !ix86_current_function_calls_tls_descriptor)
8237 {
8238 frame->red_zone_size = frame->to_allocate;
8239 if (frame->save_regs_using_mov)
8240 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8241 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8242 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8243 }
8244 else
8245 frame->red_zone_size = 0;
8246 frame->to_allocate -= frame->red_zone_size;
8247 frame->stack_pointer_offset -= frame->red_zone_size;
8248 }
8249
8250 /* Emit code to save registers in the prologue. */
8251
8252 static void
8253 ix86_emit_save_regs (void)
8254 {
8255 unsigned int regno;
8256 rtx insn;
8257
8258 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8259 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8260 {
8261 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8262 RTX_FRAME_RELATED_P (insn) = 1;
8263 }
8264 }
8265
8266 /* Emit code to save registers using MOV insns. First register
8267 is restored from POINTER + OFFSET. */
8268 static void
8269 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8270 {
8271 unsigned int regno;
8272 rtx insn;
8273
8274 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8275 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8276 {
8277 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8278 Pmode, offset),
8279 gen_rtx_REG (Pmode, regno));
8280 RTX_FRAME_RELATED_P (insn) = 1;
8281 offset += UNITS_PER_WORD;
8282 }
8283 }
8284
8285 /* Emit code to save registers using MOV insns. First register
8286 is restored from POINTER + OFFSET. */
8287 static void
8288 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8289 {
8290 unsigned int regno;
8291 rtx insn;
8292 rtx mem;
8293
8294 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8295 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8296 {
8297 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8298 set_mem_align (mem, 128);
8299 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8300 RTX_FRAME_RELATED_P (insn) = 1;
8301 offset += 16;
8302 }
8303 }
8304
8305 static GTY(()) rtx queued_cfa_restores;
8306
8307 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8308 manipulation insn. Don't add it if the previously
8309 saved value will be left untouched within stack red-zone till return,
8310 as unwinders can find the same value in the register and
8311 on the stack. */
8312
8313 static void
8314 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8315 {
8316 if (TARGET_RED_ZONE
8317 && !TARGET_64BIT_MS_ABI
8318 && red_offset + RED_ZONE_SIZE >= 0
8319 && crtl->args.pops_args < 65536)
8320 return;
8321
8322 if (insn)
8323 {
8324 add_reg_note (insn, REG_CFA_RESTORE, reg);
8325 RTX_FRAME_RELATED_P (insn) = 1;
8326 }
8327 else
8328 queued_cfa_restores
8329 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8330 }
8331
8332 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8333
8334 static void
8335 ix86_add_queued_cfa_restore_notes (rtx insn)
8336 {
8337 rtx last;
8338 if (!queued_cfa_restores)
8339 return;
8340 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8341 ;
8342 XEXP (last, 1) = REG_NOTES (insn);
8343 REG_NOTES (insn) = queued_cfa_restores;
8344 queued_cfa_restores = NULL_RTX;
8345 RTX_FRAME_RELATED_P (insn) = 1;
8346 }
8347
8348 /* Expand prologue or epilogue stack adjustment.
8349 The pattern exist to put a dependency on all ebp-based memory accesses.
8350 STYLE should be negative if instructions should be marked as frame related,
8351 zero if %r11 register is live and cannot be freely used and positive
8352 otherwise. */
8353
8354 static void
8355 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8356 int style, bool set_cfa)
8357 {
8358 rtx insn;
8359
8360 if (! TARGET_64BIT)
8361 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8362 else if (x86_64_immediate_operand (offset, DImode))
8363 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8364 else
8365 {
8366 rtx tmp;
8367 /* r11 is used by indirect sibcall return as well, set before the
8368 epilogue and used after the epilogue. */
8369 if (style)
8370 tmp = gen_rtx_REG (DImode, R11_REG);
8371 else
8372 {
8373 gcc_assert (src != hard_frame_pointer_rtx
8374 && dest != hard_frame_pointer_rtx);
8375 tmp = hard_frame_pointer_rtx;
8376 }
8377 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
8378 if (style < 0)
8379 RTX_FRAME_RELATED_P (insn) = 1;
8380 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, tmp,
8381 offset));
8382 }
8383
8384 if (style >= 0)
8385 ix86_add_queued_cfa_restore_notes (insn);
8386
8387 if (set_cfa)
8388 {
8389 rtx r;
8390
8391 gcc_assert (ix86_cfa_state->reg == src);
8392 ix86_cfa_state->offset += INTVAL (offset);
8393 ix86_cfa_state->reg = dest;
8394
8395 r = gen_rtx_PLUS (Pmode, src, offset);
8396 r = gen_rtx_SET (VOIDmode, dest, r);
8397 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8398 RTX_FRAME_RELATED_P (insn) = 1;
8399 }
8400 else if (style < 0)
8401 RTX_FRAME_RELATED_P (insn) = 1;
8402 }
8403
8404 /* Find an available register to be used as dynamic realign argument
8405 pointer regsiter. Such a register will be written in prologue and
8406 used in begin of body, so it must not be
8407 1. parameter passing register.
8408 2. GOT pointer.
8409 We reuse static-chain register if it is available. Otherwise, we
8410 use DI for i386 and R13 for x86-64. We chose R13 since it has
8411 shorter encoding.
8412
8413 Return: the regno of chosen register. */
8414
8415 static unsigned int
8416 find_drap_reg (void)
8417 {
8418 tree decl = cfun->decl;
8419
8420 if (TARGET_64BIT)
8421 {
8422 /* Use R13 for nested function or function need static chain.
8423 Since function with tail call may use any caller-saved
8424 registers in epilogue, DRAP must not use caller-saved
8425 register in such case. */
8426 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8427 return R13_REG;
8428
8429 return R10_REG;
8430 }
8431 else
8432 {
8433 /* Use DI for nested function or function need static chain.
8434 Since function with tail call may use any caller-saved
8435 registers in epilogue, DRAP must not use caller-saved
8436 register in such case. */
8437 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8438 return DI_REG;
8439
8440 /* Reuse static chain register if it isn't used for parameter
8441 passing. */
8442 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8443 && !lookup_attribute ("fastcall",
8444 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8445 && !lookup_attribute ("thiscall",
8446 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8447 return CX_REG;
8448 else
8449 return DI_REG;
8450 }
8451 }
8452
8453 /* Return minimum incoming stack alignment. */
8454
8455 static unsigned int
8456 ix86_minimum_incoming_stack_boundary (bool sibcall)
8457 {
8458 unsigned int incoming_stack_boundary;
8459
8460 /* Prefer the one specified at command line. */
8461 if (ix86_user_incoming_stack_boundary)
8462 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8463 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8464 if -mstackrealign is used, it isn't used for sibcall check and
8465 estimated stack alignment is 128bit. */
8466 else if (!sibcall
8467 && !TARGET_64BIT
8468 && ix86_force_align_arg_pointer
8469 && crtl->stack_alignment_estimated == 128)
8470 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8471 else
8472 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8473
8474 /* Incoming stack alignment can be changed on individual functions
8475 via force_align_arg_pointer attribute. We use the smallest
8476 incoming stack boundary. */
8477 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8478 && lookup_attribute (ix86_force_align_arg_pointer_string,
8479 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8480 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8481
8482 /* The incoming stack frame has to be aligned at least at
8483 parm_stack_boundary. */
8484 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8485 incoming_stack_boundary = crtl->parm_stack_boundary;
8486
8487 /* Stack at entrance of main is aligned by runtime. We use the
8488 smallest incoming stack boundary. */
8489 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8490 && DECL_NAME (current_function_decl)
8491 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8492 && DECL_FILE_SCOPE_P (current_function_decl))
8493 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8494
8495 return incoming_stack_boundary;
8496 }
8497
8498 /* Update incoming stack boundary and estimated stack alignment. */
8499
8500 static void
8501 ix86_update_stack_boundary (void)
8502 {
8503 ix86_incoming_stack_boundary
8504 = ix86_minimum_incoming_stack_boundary (false);
8505
8506 /* x86_64 vararg needs 16byte stack alignment for register save
8507 area. */
8508 if (TARGET_64BIT
8509 && cfun->stdarg
8510 && crtl->stack_alignment_estimated < 128)
8511 crtl->stack_alignment_estimated = 128;
8512 }
8513
8514 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8515 needed or an rtx for DRAP otherwise. */
8516
8517 static rtx
8518 ix86_get_drap_rtx (void)
8519 {
8520 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8521 crtl->need_drap = true;
8522
8523 if (stack_realign_drap)
8524 {
8525 /* Assign DRAP to vDRAP and returns vDRAP */
8526 unsigned int regno = find_drap_reg ();
8527 rtx drap_vreg;
8528 rtx arg_ptr;
8529 rtx seq, insn;
8530
8531 arg_ptr = gen_rtx_REG (Pmode, regno);
8532 crtl->drap_reg = arg_ptr;
8533
8534 start_sequence ();
8535 drap_vreg = copy_to_reg (arg_ptr);
8536 seq = get_insns ();
8537 end_sequence ();
8538
8539 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8540 if (!optimize)
8541 {
8542 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8543 RTX_FRAME_RELATED_P (insn) = 1;
8544 }
8545 return drap_vreg;
8546 }
8547 else
8548 return NULL;
8549 }
8550
8551 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8552
8553 static rtx
8554 ix86_internal_arg_pointer (void)
8555 {
8556 return virtual_incoming_args_rtx;
8557 }
8558
8559 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8560 to be generated in correct form. */
8561 static void
8562 ix86_finalize_stack_realign_flags (void)
8563 {
8564 /* Check if stack realign is really needed after reload, and
8565 stores result in cfun */
8566 unsigned int incoming_stack_boundary
8567 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8568 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8569 unsigned int stack_realign = (incoming_stack_boundary
8570 < (current_function_is_leaf
8571 ? crtl->max_used_stack_slot_alignment
8572 : crtl->stack_alignment_needed));
8573
8574 if (crtl->stack_realign_finalized)
8575 {
8576 /* After stack_realign_needed is finalized, we can't no longer
8577 change it. */
8578 gcc_assert (crtl->stack_realign_needed == stack_realign);
8579 }
8580 else
8581 {
8582 crtl->stack_realign_needed = stack_realign;
8583 crtl->stack_realign_finalized = true;
8584 }
8585 }
8586
8587 /* Expand the prologue into a bunch of separate insns. */
8588
8589 void
8590 ix86_expand_prologue (void)
8591 {
8592 rtx insn;
8593 bool pic_reg_used;
8594 struct ix86_frame frame;
8595 HOST_WIDE_INT allocate;
8596 int gen_frame_pointer = frame_pointer_needed;
8597
8598 ix86_finalize_stack_realign_flags ();
8599
8600 /* DRAP should not coexist with stack_realign_fp */
8601 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8602
8603 /* Initialize CFA state for before the prologue. */
8604 ix86_cfa_state->reg = stack_pointer_rtx;
8605 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8606
8607 ix86_compute_frame_layout (&frame);
8608
8609 if (ix86_function_ms_hook_prologue (current_function_decl))
8610 {
8611 rtx push, mov;
8612
8613 /* Make sure the function starts with
8614 8b ff movl.s %edi,%edi
8615 55 push %ebp
8616 8b ec movl.s %esp,%ebp
8617
8618 This matches the hookable function prologue in Win32 API
8619 functions in Microsoft Windows XP Service Pack 2 and newer.
8620 Wine uses this to enable Windows apps to hook the Win32 API
8621 functions provided by Wine. */
8622 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8623 gen_rtx_REG (SImode, DI_REG)));
8624 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8625 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8626 stack_pointer_rtx));
8627
8628 if (frame_pointer_needed && !(crtl->drap_reg
8629 && crtl->stack_realign_needed))
8630 {
8631 /* The push %ebp and movl.s %esp, %ebp already set up
8632 the frame pointer. No need to do this again. */
8633 gen_frame_pointer = 0;
8634 RTX_FRAME_RELATED_P (push) = 1;
8635 RTX_FRAME_RELATED_P (mov) = 1;
8636 if (ix86_cfa_state->reg == stack_pointer_rtx)
8637 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8638 }
8639 else
8640 /* If the frame pointer is not needed, pop %ebp again. This
8641 could be optimized for cases where ebp needs to be backed up
8642 for some other reason. If stack realignment is needed, pop
8643 the base pointer again, align the stack, and later regenerate
8644 the frame pointer setup. The frame pointer generated by the
8645 hook prologue is not aligned, so it can't be used. */
8646 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8647 }
8648
8649 /* The first insn of a function that accepts its static chain on the
8650 stack is to push the register that would be filled in by a direct
8651 call. This insn will be skipped by the trampoline. */
8652 if (ix86_static_chain_on_stack)
8653 {
8654 rtx t;
8655
8656 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8657 emit_insn (gen_blockage ());
8658
8659 /* We don't want to interpret this push insn as a register save,
8660 only as a stack adjustment. The real copy of the register as
8661 a save will be done later, if needed. */
8662 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8663 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8664 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8665 RTX_FRAME_RELATED_P (insn) = 1;
8666 }
8667
8668 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8669 of DRAP is needed and stack realignment is really needed after reload */
8670 if (crtl->drap_reg && crtl->stack_realign_needed)
8671 {
8672 rtx x, y;
8673 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8674 int param_ptr_offset = UNITS_PER_WORD;
8675
8676 if (ix86_static_chain_on_stack)
8677 param_ptr_offset += UNITS_PER_WORD;
8678 if (!call_used_regs[REGNO (crtl->drap_reg)])
8679 param_ptr_offset += UNITS_PER_WORD;
8680
8681 gcc_assert (stack_realign_drap);
8682
8683 /* Grab the argument pointer. */
8684 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8685 y = crtl->drap_reg;
8686
8687 /* Only need to push parameter pointer reg if it is caller
8688 saved reg */
8689 if (!call_used_regs[REGNO (crtl->drap_reg)])
8690 {
8691 /* Push arg pointer reg */
8692 insn = emit_insn (gen_push (y));
8693 RTX_FRAME_RELATED_P (insn) = 1;
8694 }
8695
8696 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8697 RTX_FRAME_RELATED_P (insn) = 1;
8698 ix86_cfa_state->reg = crtl->drap_reg;
8699
8700 /* Align the stack. */
8701 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8702 stack_pointer_rtx,
8703 GEN_INT (-align_bytes)));
8704 RTX_FRAME_RELATED_P (insn) = 1;
8705
8706 /* Replicate the return address on the stack so that return
8707 address can be reached via (argp - 1) slot. This is needed
8708 to implement macro RETURN_ADDR_RTX and intrinsic function
8709 expand_builtin_return_addr etc. */
8710 x = crtl->drap_reg;
8711 x = gen_frame_mem (Pmode,
8712 plus_constant (x, -UNITS_PER_WORD));
8713 insn = emit_insn (gen_push (x));
8714 RTX_FRAME_RELATED_P (insn) = 1;
8715 }
8716
8717 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8718 slower on all targets. Also sdb doesn't like it. */
8719
8720 if (gen_frame_pointer)
8721 {
8722 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8723 RTX_FRAME_RELATED_P (insn) = 1;
8724
8725 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8726 RTX_FRAME_RELATED_P (insn) = 1;
8727
8728 if (ix86_cfa_state->reg == stack_pointer_rtx)
8729 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8730 }
8731
8732 if (stack_realign_fp)
8733 {
8734 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8735 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8736
8737 /* Align the stack. */
8738 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8739 stack_pointer_rtx,
8740 GEN_INT (-align_bytes)));
8741 RTX_FRAME_RELATED_P (insn) = 1;
8742 }
8743
8744 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8745
8746 if (!frame.save_regs_using_mov)
8747 ix86_emit_save_regs ();
8748 else
8749 allocate += frame.nregs * UNITS_PER_WORD;
8750
8751 /* When using red zone we may start register saving before allocating
8752 the stack frame saving one cycle of the prologue. However I will
8753 avoid doing this if I am going to have to probe the stack since
8754 at least on x86_64 the stack probe can turn into a call that clobbers
8755 a red zone location */
8756 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8757 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8758 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8759 && !crtl->stack_realign_needed)
8760 ? hard_frame_pointer_rtx
8761 : stack_pointer_rtx,
8762 -frame.nregs * UNITS_PER_WORD);
8763
8764 if (allocate == 0)
8765 ;
8766 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8767 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8768 GEN_INT (-allocate), -1,
8769 ix86_cfa_state->reg == stack_pointer_rtx);
8770 else
8771 {
8772 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8773 bool eax_live;
8774 rtx t;
8775
8776 if (cfun->machine->call_abi == MS_ABI)
8777 eax_live = false;
8778 else
8779 eax_live = ix86_eax_live_at_start_p ();
8780
8781 if (eax_live)
8782 {
8783 emit_insn (gen_push (eax));
8784 allocate -= UNITS_PER_WORD;
8785 }
8786
8787 emit_move_insn (eax, GEN_INT (allocate));
8788
8789 if (TARGET_64BIT)
8790 insn = gen_allocate_stack_worker_64 (eax, eax);
8791 else
8792 insn = gen_allocate_stack_worker_32 (eax, eax);
8793 insn = emit_insn (insn);
8794
8795 if (ix86_cfa_state->reg == stack_pointer_rtx)
8796 {
8797 ix86_cfa_state->offset += allocate;
8798 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8799 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8800 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8801 RTX_FRAME_RELATED_P (insn) = 1;
8802 }
8803
8804 if (eax_live)
8805 {
8806 if (frame_pointer_needed)
8807 t = plus_constant (hard_frame_pointer_rtx,
8808 allocate
8809 - frame.to_allocate
8810 - frame.nregs * UNITS_PER_WORD);
8811 else
8812 t = plus_constant (stack_pointer_rtx, allocate);
8813 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8814 }
8815 }
8816
8817 if (frame.save_regs_using_mov
8818 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8819 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8820 {
8821 if (!frame_pointer_needed
8822 || !(frame.to_allocate + frame.padding0)
8823 || crtl->stack_realign_needed)
8824 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8825 frame.to_allocate
8826 + frame.nsseregs * 16 + frame.padding0);
8827 else
8828 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8829 -frame.nregs * UNITS_PER_WORD);
8830 }
8831 if (!frame_pointer_needed
8832 || !(frame.to_allocate + frame.padding0)
8833 || crtl->stack_realign_needed)
8834 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8835 frame.to_allocate);
8836 else
8837 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8838 - frame.nregs * UNITS_PER_WORD
8839 - frame.nsseregs * 16
8840 - frame.padding0);
8841
8842 pic_reg_used = false;
8843 if (pic_offset_table_rtx
8844 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8845 || crtl->profile))
8846 {
8847 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8848
8849 if (alt_pic_reg_used != INVALID_REGNUM)
8850 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8851
8852 pic_reg_used = true;
8853 }
8854
8855 if (pic_reg_used)
8856 {
8857 if (TARGET_64BIT)
8858 {
8859 if (ix86_cmodel == CM_LARGE_PIC)
8860 {
8861 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8862 rtx label = gen_label_rtx ();
8863 emit_label (label);
8864 LABEL_PRESERVE_P (label) = 1;
8865 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8866 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8867 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8868 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8869 pic_offset_table_rtx, tmp_reg));
8870 }
8871 else
8872 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8873 }
8874 else
8875 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8876 }
8877
8878 /* In the pic_reg_used case, make sure that the got load isn't deleted
8879 when mcount needs it. Blockage to avoid call movement across mcount
8880 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8881 note. */
8882 if (crtl->profile && pic_reg_used)
8883 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8884
8885 if (crtl->drap_reg && !crtl->stack_realign_needed)
8886 {
8887 /* vDRAP is setup but after reload it turns out stack realign
8888 isn't necessary, here we will emit prologue to setup DRAP
8889 without stack realign adjustment */
8890 rtx x;
8891 int drap_bp_offset = UNITS_PER_WORD * 2;
8892
8893 if (ix86_static_chain_on_stack)
8894 drap_bp_offset += UNITS_PER_WORD;
8895 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8896 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8897 }
8898
8899 /* Prevent instructions from being scheduled into register save push
8900 sequence when access to the redzone area is done through frame pointer.
8901 The offset between the frame pointer and the stack pointer is calculated
8902 relative to the value of the stack pointer at the end of the function
8903 prologue, and moving instructions that access redzone area via frame
8904 pointer inside push sequence violates this assumption. */
8905 if (frame_pointer_needed && frame.red_zone_size)
8906 emit_insn (gen_memory_blockage ());
8907
8908 /* Emit cld instruction if stringops are used in the function. */
8909 if (TARGET_CLD && ix86_current_function_needs_cld)
8910 emit_insn (gen_cld ());
8911 }
8912
8913 /* Emit code to restore REG using a POP insn. */
8914
8915 static void
8916 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8917 {
8918 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8919
8920 if (ix86_cfa_state->reg == crtl->drap_reg
8921 && REGNO (reg) == REGNO (crtl->drap_reg))
8922 {
8923 /* Previously we'd represented the CFA as an expression
8924 like *(%ebp - 8). We've just popped that value from
8925 the stack, which means we need to reset the CFA to
8926 the drap register. This will remain until we restore
8927 the stack pointer. */
8928 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8929 RTX_FRAME_RELATED_P (insn) = 1;
8930 return;
8931 }
8932
8933 if (ix86_cfa_state->reg == stack_pointer_rtx)
8934 {
8935 ix86_cfa_state->offset -= UNITS_PER_WORD;
8936 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8937 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8938 RTX_FRAME_RELATED_P (insn) = 1;
8939 }
8940
8941 /* When the frame pointer is the CFA, and we pop it, we are
8942 swapping back to the stack pointer as the CFA. This happens
8943 for stack frames that don't allocate other data, so we assume
8944 the stack pointer is now pointing at the return address, i.e.
8945 the function entry state, which makes the offset be 1 word. */
8946 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8947 && reg == hard_frame_pointer_rtx)
8948 {
8949 ix86_cfa_state->reg = stack_pointer_rtx;
8950 ix86_cfa_state->offset -= UNITS_PER_WORD;
8951
8952 add_reg_note (insn, REG_CFA_DEF_CFA,
8953 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8954 GEN_INT (ix86_cfa_state->offset)));
8955 RTX_FRAME_RELATED_P (insn) = 1;
8956 }
8957
8958 ix86_add_cfa_restore_note (insn, reg, red_offset);
8959 }
8960
8961 /* Emit code to restore saved registers using POP insns. */
8962
8963 static void
8964 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8965 {
8966 int regno;
8967
8968 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8969 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8970 {
8971 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8972 red_offset);
8973 red_offset += UNITS_PER_WORD;
8974 }
8975 }
8976
8977 /* Emit code and notes for the LEAVE instruction. */
8978
8979 static void
8980 ix86_emit_leave (HOST_WIDE_INT red_offset)
8981 {
8982 rtx insn = emit_insn (ix86_gen_leave ());
8983
8984 ix86_add_queued_cfa_restore_notes (insn);
8985
8986 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8987 {
8988 ix86_cfa_state->reg = stack_pointer_rtx;
8989 ix86_cfa_state->offset -= UNITS_PER_WORD;
8990
8991 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8992 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8993 RTX_FRAME_RELATED_P (insn) = 1;
8994 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8995 }
8996 }
8997
8998 /* Emit code to restore saved registers using MOV insns. First register
8999 is restored from POINTER + OFFSET. */
9000 static void
9001 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9002 HOST_WIDE_INT red_offset,
9003 int maybe_eh_return)
9004 {
9005 unsigned int regno;
9006 rtx base_address = gen_rtx_MEM (Pmode, pointer);
9007 rtx insn;
9008
9009 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9010 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9011 {
9012 rtx reg = gen_rtx_REG (Pmode, regno);
9013
9014 /* Ensure that adjust_address won't be forced to produce pointer
9015 out of range allowed by x86-64 instruction set. */
9016 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9017 {
9018 rtx r11;
9019
9020 r11 = gen_rtx_REG (DImode, R11_REG);
9021 emit_move_insn (r11, GEN_INT (offset));
9022 emit_insn (gen_adddi3 (r11, r11, pointer));
9023 base_address = gen_rtx_MEM (Pmode, r11);
9024 offset = 0;
9025 }
9026 insn = emit_move_insn (reg,
9027 adjust_address (base_address, Pmode, offset));
9028 offset += UNITS_PER_WORD;
9029
9030 if (ix86_cfa_state->reg == crtl->drap_reg
9031 && regno == REGNO (crtl->drap_reg))
9032 {
9033 /* Previously we'd represented the CFA as an expression
9034 like *(%ebp - 8). We've just popped that value from
9035 the stack, which means we need to reset the CFA to
9036 the drap register. This will remain until we restore
9037 the stack pointer. */
9038 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
9039 RTX_FRAME_RELATED_P (insn) = 1;
9040 }
9041 else
9042 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9043
9044 red_offset += UNITS_PER_WORD;
9045 }
9046 }
9047
9048 /* Emit code to restore saved registers using MOV insns. First register
9049 is restored from POINTER + OFFSET. */
9050 static void
9051 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9052 HOST_WIDE_INT red_offset,
9053 int maybe_eh_return)
9054 {
9055 int regno;
9056 rtx base_address = gen_rtx_MEM (TImode, pointer);
9057 rtx mem;
9058
9059 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9060 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9061 {
9062 rtx reg = gen_rtx_REG (TImode, regno);
9063
9064 /* Ensure that adjust_address won't be forced to produce pointer
9065 out of range allowed by x86-64 instruction set. */
9066 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9067 {
9068 rtx r11;
9069
9070 r11 = gen_rtx_REG (DImode, R11_REG);
9071 emit_move_insn (r11, GEN_INT (offset));
9072 emit_insn (gen_adddi3 (r11, r11, pointer));
9073 base_address = gen_rtx_MEM (TImode, r11);
9074 offset = 0;
9075 }
9076 mem = adjust_address (base_address, TImode, offset);
9077 set_mem_align (mem, 128);
9078 emit_move_insn (reg, mem);
9079 offset += 16;
9080
9081 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9082
9083 red_offset += 16;
9084 }
9085 }
9086
9087 /* Restore function stack, frame, and registers. */
9088
9089 void
9090 ix86_expand_epilogue (int style)
9091 {
9092 int sp_valid;
9093 struct ix86_frame frame;
9094 HOST_WIDE_INT offset, red_offset;
9095 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
9096 bool using_drap;
9097
9098 ix86_finalize_stack_realign_flags ();
9099
9100 /* When stack is realigned, SP must be valid. */
9101 sp_valid = (!frame_pointer_needed
9102 || current_function_sp_is_unchanging
9103 || stack_realign_fp);
9104
9105 ix86_compute_frame_layout (&frame);
9106
9107 /* See the comment about red zone and frame
9108 pointer usage in ix86_expand_prologue. */
9109 if (frame_pointer_needed && frame.red_zone_size)
9110 emit_insn (gen_memory_blockage ());
9111
9112 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9113 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9114
9115 /* Calculate start of saved registers relative to ebp. Special care
9116 must be taken for the normal return case of a function using
9117 eh_return: the eax and edx registers are marked as saved, but not
9118 restored along this path. */
9119 offset = frame.nregs;
9120 if (crtl->calls_eh_return && style != 2)
9121 offset -= 2;
9122 offset *= -UNITS_PER_WORD;
9123 offset -= frame.nsseregs * 16 + frame.padding0;
9124
9125 /* Calculate start of saved registers relative to esp on entry of the
9126 function. When realigning stack, this needs to be the most negative
9127 value possible at runtime. */
9128 red_offset = offset;
9129 if (using_drap)
9130 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9131 + UNITS_PER_WORD;
9132 else if (stack_realign_fp)
9133 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9134 - UNITS_PER_WORD;
9135 if (ix86_static_chain_on_stack)
9136 red_offset -= UNITS_PER_WORD;
9137 if (frame_pointer_needed)
9138 red_offset -= UNITS_PER_WORD;
9139
9140 /* If we're only restoring one register and sp is not valid then
9141 using a move instruction to restore the register since it's
9142 less work than reloading sp and popping the register.
9143
9144 The default code result in stack adjustment using add/lea instruction,
9145 while this code results in LEAVE instruction (or discrete equivalent),
9146 so it is profitable in some other cases as well. Especially when there
9147 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9148 and there is exactly one register to pop. This heuristic may need some
9149 tuning in future. */
9150 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9151 || (TARGET_EPILOGUE_USING_MOVE
9152 && cfun->machine->use_fast_prologue_epilogue
9153 && ((frame.nregs + frame.nsseregs) > 1
9154 || (frame.to_allocate + frame.padding0) != 0))
9155 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9156 && (frame.to_allocate + frame.padding0) != 0)
9157 || (frame_pointer_needed && TARGET_USE_LEAVE
9158 && cfun->machine->use_fast_prologue_epilogue
9159 && (frame.nregs + frame.nsseregs) == 1)
9160 || crtl->calls_eh_return)
9161 {
9162 /* Restore registers. We can use ebp or esp to address the memory
9163 locations. If both are available, default to ebp, since offsets
9164 are known to be small. Only exception is esp pointing directly
9165 to the end of block of saved registers, where we may simplify
9166 addressing mode.
9167
9168 If we are realigning stack with bp and sp, regs restore can't
9169 be addressed by bp. sp must be used instead. */
9170
9171 if (!frame_pointer_needed
9172 || (sp_valid && !(frame.to_allocate + frame.padding0))
9173 || stack_realign_fp)
9174 {
9175 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9176 frame.to_allocate, red_offset,
9177 style == 2);
9178 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9179 frame.to_allocate
9180 + frame.nsseregs * 16
9181 + frame.padding0,
9182 red_offset
9183 + frame.nsseregs * 16
9184 + frame.padding0, style == 2);
9185 }
9186 else
9187 {
9188 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9189 offset, red_offset,
9190 style == 2);
9191 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9192 offset
9193 + frame.nsseregs * 16
9194 + frame.padding0,
9195 red_offset
9196 + frame.nsseregs * 16
9197 + frame.padding0, style == 2);
9198 }
9199
9200 red_offset -= offset;
9201
9202 /* eh_return epilogues need %ecx added to the stack pointer. */
9203 if (style == 2)
9204 {
9205 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9206
9207 /* Stack align doesn't work with eh_return. */
9208 gcc_assert (!crtl->stack_realign_needed);
9209 /* Neither does regparm nested functions. */
9210 gcc_assert (!ix86_static_chain_on_stack);
9211
9212 if (frame_pointer_needed)
9213 {
9214 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9215 tmp = plus_constant (tmp, UNITS_PER_WORD);
9216 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9217
9218 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9219 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9220
9221 /* Note that we use SA as a temporary CFA, as the return
9222 address is at the proper place relative to it. We
9223 pretend this happens at the FP restore insn because
9224 prior to this insn the FP would be stored at the wrong
9225 offset relative to SA, and after this insn we have no
9226 other reasonable register to use for the CFA. We don't
9227 bother resetting the CFA to the SP for the duration of
9228 the return insn. */
9229 add_reg_note (tmp, REG_CFA_DEF_CFA,
9230 plus_constant (sa, UNITS_PER_WORD));
9231 ix86_add_queued_cfa_restore_notes (tmp);
9232 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9233 RTX_FRAME_RELATED_P (tmp) = 1;
9234 ix86_cfa_state->reg = sa;
9235 ix86_cfa_state->offset = UNITS_PER_WORD;
9236
9237 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9238 const0_rtx, style, false);
9239 }
9240 else
9241 {
9242 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9243 tmp = plus_constant (tmp, (frame.to_allocate
9244 + frame.nregs * UNITS_PER_WORD
9245 + frame.nsseregs * 16
9246 + frame.padding0));
9247 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9248 ix86_add_queued_cfa_restore_notes (tmp);
9249
9250 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9251 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9252 {
9253 ix86_cfa_state->offset = UNITS_PER_WORD;
9254 add_reg_note (tmp, REG_CFA_DEF_CFA,
9255 plus_constant (stack_pointer_rtx,
9256 UNITS_PER_WORD));
9257 RTX_FRAME_RELATED_P (tmp) = 1;
9258 }
9259 }
9260 }
9261 else if (!frame_pointer_needed)
9262 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9263 GEN_INT (frame.to_allocate
9264 + frame.nregs * UNITS_PER_WORD
9265 + frame.nsseregs * 16
9266 + frame.padding0),
9267 style, !using_drap);
9268 /* If not an i386, mov & pop is faster than "leave". */
9269 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9270 || !cfun->machine->use_fast_prologue_epilogue)
9271 ix86_emit_leave (red_offset);
9272 else
9273 {
9274 pro_epilogue_adjust_stack (stack_pointer_rtx,
9275 hard_frame_pointer_rtx,
9276 const0_rtx, style, !using_drap);
9277
9278 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9279 }
9280 }
9281 else
9282 {
9283 /* First step is to deallocate the stack frame so that we can
9284 pop the registers.
9285
9286 If we realign stack with frame pointer, then stack pointer
9287 won't be able to recover via lea $offset(%bp), %sp, because
9288 there is a padding area between bp and sp for realign.
9289 "add $to_allocate, %sp" must be used instead. */
9290 if (!sp_valid)
9291 {
9292 gcc_assert (frame_pointer_needed);
9293 gcc_assert (!stack_realign_fp);
9294 pro_epilogue_adjust_stack (stack_pointer_rtx,
9295 hard_frame_pointer_rtx,
9296 GEN_INT (offset), style, false);
9297 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9298 0, red_offset,
9299 style == 2);
9300 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9301 GEN_INT (frame.nsseregs * 16
9302 + frame.padding0),
9303 style, false);
9304 }
9305 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9306 {
9307 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9308 frame.to_allocate, red_offset,
9309 style == 2);
9310 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9311 GEN_INT (frame.to_allocate
9312 + frame.nsseregs * 16
9313 + frame.padding0), style,
9314 !using_drap && !frame_pointer_needed);
9315 }
9316
9317 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9318 + frame.padding0);
9319 red_offset -= offset;
9320
9321 if (frame_pointer_needed)
9322 {
9323 /* Leave results in shorter dependency chains on CPUs that are
9324 able to grok it fast. */
9325 if (TARGET_USE_LEAVE)
9326 ix86_emit_leave (red_offset);
9327 else
9328 {
9329 /* For stack realigned really happens, recover stack
9330 pointer to hard frame pointer is a must, if not using
9331 leave. */
9332 if (stack_realign_fp)
9333 pro_epilogue_adjust_stack (stack_pointer_rtx,
9334 hard_frame_pointer_rtx,
9335 const0_rtx, style, !using_drap);
9336 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9337 red_offset);
9338 }
9339 }
9340 }
9341
9342 if (using_drap)
9343 {
9344 int param_ptr_offset = UNITS_PER_WORD;
9345 rtx insn;
9346
9347 gcc_assert (stack_realign_drap);
9348
9349 if (ix86_static_chain_on_stack)
9350 param_ptr_offset += UNITS_PER_WORD;
9351 if (!call_used_regs[REGNO (crtl->drap_reg)])
9352 param_ptr_offset += UNITS_PER_WORD;
9353
9354 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9355 crtl->drap_reg,
9356 GEN_INT (-param_ptr_offset)));
9357
9358 ix86_cfa_state->reg = stack_pointer_rtx;
9359 ix86_cfa_state->offset = param_ptr_offset;
9360
9361 add_reg_note (insn, REG_CFA_DEF_CFA,
9362 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9363 GEN_INT (ix86_cfa_state->offset)));
9364 RTX_FRAME_RELATED_P (insn) = 1;
9365
9366 if (!call_used_regs[REGNO (crtl->drap_reg)])
9367 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9368 }
9369
9370 /* Remove the saved static chain from the stack. The use of ECX is
9371 merely as a scratch register, not as the actual static chain. */
9372 if (ix86_static_chain_on_stack)
9373 {
9374 rtx r, insn;
9375
9376 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9377 ix86_cfa_state->offset += UNITS_PER_WORD;
9378
9379 r = gen_rtx_REG (Pmode, CX_REG);
9380 insn = emit_insn (ix86_gen_pop1 (r));
9381
9382 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9383 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9384 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9385 RTX_FRAME_RELATED_P (insn) = 1;
9386 }
9387
9388 /* Sibcall epilogues don't want a return instruction. */
9389 if (style == 0)
9390 {
9391 *ix86_cfa_state = cfa_state_save;
9392 return;
9393 }
9394
9395 if (crtl->args.pops_args && crtl->args.size)
9396 {
9397 rtx popc = GEN_INT (crtl->args.pops_args);
9398
9399 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9400 address, do explicit add, and jump indirectly to the caller. */
9401
9402 if (crtl->args.pops_args >= 65536)
9403 {
9404 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9405 rtx insn;
9406
9407 /* There is no "pascal" calling convention in any 64bit ABI. */
9408 gcc_assert (!TARGET_64BIT);
9409
9410 insn = emit_insn (gen_popsi1 (ecx));
9411 ix86_cfa_state->offset -= UNITS_PER_WORD;
9412
9413 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9414 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9415 add_reg_note (insn, REG_CFA_REGISTER,
9416 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9417 RTX_FRAME_RELATED_P (insn) = 1;
9418
9419 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9420 popc, -1, true);
9421 emit_jump_insn (gen_return_indirect_internal (ecx));
9422 }
9423 else
9424 emit_jump_insn (gen_return_pop_internal (popc));
9425 }
9426 else
9427 emit_jump_insn (gen_return_internal ());
9428
9429 /* Restore the state back to the state from the prologue,
9430 so that it's correct for the next epilogue. */
9431 *ix86_cfa_state = cfa_state_save;
9432 }
9433
9434 /* Reset from the function's potential modifications. */
9435
9436 static void
9437 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9438 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9439 {
9440 if (pic_offset_table_rtx)
9441 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9442 #if TARGET_MACHO
9443 /* Mach-O doesn't support labels at the end of objects, so if
9444 it looks like we might want one, insert a NOP. */
9445 {
9446 rtx insn = get_last_insn ();
9447 while (insn
9448 && NOTE_P (insn)
9449 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9450 insn = PREV_INSN (insn);
9451 if (insn
9452 && (LABEL_P (insn)
9453 || (NOTE_P (insn)
9454 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9455 fputs ("\tnop\n", file);
9456 }
9457 #endif
9458
9459 }
9460 \f
9461 /* Extract the parts of an RTL expression that is a valid memory address
9462 for an instruction. Return 0 if the structure of the address is
9463 grossly off. Return -1 if the address contains ASHIFT, so it is not
9464 strictly valid, but still used for computing length of lea instruction. */
9465
9466 int
9467 ix86_decompose_address (rtx addr, struct ix86_address *out)
9468 {
9469 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9470 rtx base_reg, index_reg;
9471 HOST_WIDE_INT scale = 1;
9472 rtx scale_rtx = NULL_RTX;
9473 rtx tmp;
9474 int retval = 1;
9475 enum ix86_address_seg seg = SEG_DEFAULT;
9476
9477 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9478 base = addr;
9479 else if (GET_CODE (addr) == PLUS)
9480 {
9481 rtx addends[4], op;
9482 int n = 0, i;
9483
9484 op = addr;
9485 do
9486 {
9487 if (n >= 4)
9488 return 0;
9489 addends[n++] = XEXP (op, 1);
9490 op = XEXP (op, 0);
9491 }
9492 while (GET_CODE (op) == PLUS);
9493 if (n >= 4)
9494 return 0;
9495 addends[n] = op;
9496
9497 for (i = n; i >= 0; --i)
9498 {
9499 op = addends[i];
9500 switch (GET_CODE (op))
9501 {
9502 case MULT:
9503 if (index)
9504 return 0;
9505 index = XEXP (op, 0);
9506 scale_rtx = XEXP (op, 1);
9507 break;
9508
9509 case ASHIFT:
9510 if (index)
9511 return 0;
9512 index = XEXP (op, 0);
9513 tmp = XEXP (op, 1);
9514 if (!CONST_INT_P (tmp))
9515 return 0;
9516 scale = INTVAL (tmp);
9517 if ((unsigned HOST_WIDE_INT) scale > 3)
9518 return 0;
9519 scale = 1 << scale;
9520 break;
9521
9522 case UNSPEC:
9523 if (XINT (op, 1) == UNSPEC_TP
9524 && TARGET_TLS_DIRECT_SEG_REFS
9525 && seg == SEG_DEFAULT)
9526 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9527 else
9528 return 0;
9529 break;
9530
9531 case REG:
9532 case SUBREG:
9533 if (!base)
9534 base = op;
9535 else if (!index)
9536 index = op;
9537 else
9538 return 0;
9539 break;
9540
9541 case CONST:
9542 case CONST_INT:
9543 case SYMBOL_REF:
9544 case LABEL_REF:
9545 if (disp)
9546 return 0;
9547 disp = op;
9548 break;
9549
9550 default:
9551 return 0;
9552 }
9553 }
9554 }
9555 else if (GET_CODE (addr) == MULT)
9556 {
9557 index = XEXP (addr, 0); /* index*scale */
9558 scale_rtx = XEXP (addr, 1);
9559 }
9560 else if (GET_CODE (addr) == ASHIFT)
9561 {
9562 /* We're called for lea too, which implements ashift on occasion. */
9563 index = XEXP (addr, 0);
9564 tmp = XEXP (addr, 1);
9565 if (!CONST_INT_P (tmp))
9566 return 0;
9567 scale = INTVAL (tmp);
9568 if ((unsigned HOST_WIDE_INT) scale > 3)
9569 return 0;
9570 scale = 1 << scale;
9571 retval = -1;
9572 }
9573 else
9574 disp = addr; /* displacement */
9575
9576 /* Extract the integral value of scale. */
9577 if (scale_rtx)
9578 {
9579 if (!CONST_INT_P (scale_rtx))
9580 return 0;
9581 scale = INTVAL (scale_rtx);
9582 }
9583
9584 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9585 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9586
9587 /* Avoid useless 0 displacement. */
9588 if (disp == const0_rtx && (base || index))
9589 disp = NULL_RTX;
9590
9591 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9592 if (base_reg && index_reg && scale == 1
9593 && (index_reg == arg_pointer_rtx
9594 || index_reg == frame_pointer_rtx
9595 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9596 {
9597 rtx tmp;
9598 tmp = base, base = index, index = tmp;
9599 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9600 }
9601
9602 /* Special case: %ebp cannot be encoded as a base without a displacement.
9603 Similarly %r13. */
9604 if (!disp
9605 && base_reg
9606 && (base_reg == hard_frame_pointer_rtx
9607 || base_reg == frame_pointer_rtx
9608 || base_reg == arg_pointer_rtx
9609 || (REG_P (base_reg)
9610 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9611 || REGNO (base_reg) == R13_REG))))
9612 disp = const0_rtx;
9613
9614 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9615 Avoid this by transforming to [%esi+0].
9616 Reload calls address legitimization without cfun defined, so we need
9617 to test cfun for being non-NULL. */
9618 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9619 && base_reg && !index_reg && !disp
9620 && REG_P (base_reg)
9621 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9622 disp = const0_rtx;
9623
9624 /* Special case: encode reg+reg instead of reg*2. */
9625 if (!base && index && scale == 2)
9626 base = index, base_reg = index_reg, scale = 1;
9627
9628 /* Special case: scaling cannot be encoded without base or displacement. */
9629 if (!base && !disp && index && scale != 1)
9630 disp = const0_rtx;
9631
9632 out->base = base;
9633 out->index = index;
9634 out->disp = disp;
9635 out->scale = scale;
9636 out->seg = seg;
9637
9638 return retval;
9639 }
9640 \f
9641 /* Return cost of the memory address x.
9642 For i386, it is better to use a complex address than let gcc copy
9643 the address into a reg and make a new pseudo. But not if the address
9644 requires to two regs - that would mean more pseudos with longer
9645 lifetimes. */
9646 static int
9647 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9648 {
9649 struct ix86_address parts;
9650 int cost = 1;
9651 int ok = ix86_decompose_address (x, &parts);
9652
9653 gcc_assert (ok);
9654
9655 if (parts.base && GET_CODE (parts.base) == SUBREG)
9656 parts.base = SUBREG_REG (parts.base);
9657 if (parts.index && GET_CODE (parts.index) == SUBREG)
9658 parts.index = SUBREG_REG (parts.index);
9659
9660 /* Attempt to minimize number of registers in the address. */
9661 if ((parts.base
9662 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9663 || (parts.index
9664 && (!REG_P (parts.index)
9665 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9666 cost++;
9667
9668 if (parts.base
9669 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9670 && parts.index
9671 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9672 && parts.base != parts.index)
9673 cost++;
9674
9675 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9676 since it's predecode logic can't detect the length of instructions
9677 and it degenerates to vector decoded. Increase cost of such
9678 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9679 to split such addresses or even refuse such addresses at all.
9680
9681 Following addressing modes are affected:
9682 [base+scale*index]
9683 [scale*index+disp]
9684 [base+index]
9685
9686 The first and last case may be avoidable by explicitly coding the zero in
9687 memory address, but I don't have AMD-K6 machine handy to check this
9688 theory. */
9689
9690 if (TARGET_K6
9691 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9692 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9693 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9694 cost += 10;
9695
9696 return cost;
9697 }
9698 \f
9699 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9700 this is used for to form addresses to local data when -fPIC is in
9701 use. */
9702
9703 static bool
9704 darwin_local_data_pic (rtx disp)
9705 {
9706 return (GET_CODE (disp) == UNSPEC
9707 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9708 }
9709
9710 /* Determine if a given RTX is a valid constant. We already know this
9711 satisfies CONSTANT_P. */
9712
9713 bool
9714 legitimate_constant_p (rtx x)
9715 {
9716 switch (GET_CODE (x))
9717 {
9718 case CONST:
9719 x = XEXP (x, 0);
9720
9721 if (GET_CODE (x) == PLUS)
9722 {
9723 if (!CONST_INT_P (XEXP (x, 1)))
9724 return false;
9725 x = XEXP (x, 0);
9726 }
9727
9728 if (TARGET_MACHO && darwin_local_data_pic (x))
9729 return true;
9730
9731 /* Only some unspecs are valid as "constants". */
9732 if (GET_CODE (x) == UNSPEC)
9733 switch (XINT (x, 1))
9734 {
9735 case UNSPEC_GOT:
9736 case UNSPEC_GOTOFF:
9737 case UNSPEC_PLTOFF:
9738 return TARGET_64BIT;
9739 case UNSPEC_TPOFF:
9740 case UNSPEC_NTPOFF:
9741 x = XVECEXP (x, 0, 0);
9742 return (GET_CODE (x) == SYMBOL_REF
9743 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9744 case UNSPEC_DTPOFF:
9745 x = XVECEXP (x, 0, 0);
9746 return (GET_CODE (x) == SYMBOL_REF
9747 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9748 default:
9749 return false;
9750 }
9751
9752 /* We must have drilled down to a symbol. */
9753 if (GET_CODE (x) == LABEL_REF)
9754 return true;
9755 if (GET_CODE (x) != SYMBOL_REF)
9756 return false;
9757 /* FALLTHRU */
9758
9759 case SYMBOL_REF:
9760 /* TLS symbols are never valid. */
9761 if (SYMBOL_REF_TLS_MODEL (x))
9762 return false;
9763
9764 /* DLLIMPORT symbols are never valid. */
9765 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9766 && SYMBOL_REF_DLLIMPORT_P (x))
9767 return false;
9768 break;
9769
9770 case CONST_DOUBLE:
9771 if (GET_MODE (x) == TImode
9772 && x != CONST0_RTX (TImode)
9773 && !TARGET_64BIT)
9774 return false;
9775 break;
9776
9777 case CONST_VECTOR:
9778 if (!standard_sse_constant_p (x))
9779 return false;
9780
9781 default:
9782 break;
9783 }
9784
9785 /* Otherwise we handle everything else in the move patterns. */
9786 return true;
9787 }
9788
9789 /* Determine if it's legal to put X into the constant pool. This
9790 is not possible for the address of thread-local symbols, which
9791 is checked above. */
9792
9793 static bool
9794 ix86_cannot_force_const_mem (rtx x)
9795 {
9796 /* We can always put integral constants and vectors in memory. */
9797 switch (GET_CODE (x))
9798 {
9799 case CONST_INT:
9800 case CONST_DOUBLE:
9801 case CONST_VECTOR:
9802 return false;
9803
9804 default:
9805 break;
9806 }
9807 return !legitimate_constant_p (x);
9808 }
9809
9810
9811 /* Nonzero if the constant value X is a legitimate general operand
9812 when generating PIC code. It is given that flag_pic is on and
9813 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9814
9815 bool
9816 legitimate_pic_operand_p (rtx x)
9817 {
9818 rtx inner;
9819
9820 switch (GET_CODE (x))
9821 {
9822 case CONST:
9823 inner = XEXP (x, 0);
9824 if (GET_CODE (inner) == PLUS
9825 && CONST_INT_P (XEXP (inner, 1)))
9826 inner = XEXP (inner, 0);
9827
9828 /* Only some unspecs are valid as "constants". */
9829 if (GET_CODE (inner) == UNSPEC)
9830 switch (XINT (inner, 1))
9831 {
9832 case UNSPEC_GOT:
9833 case UNSPEC_GOTOFF:
9834 case UNSPEC_PLTOFF:
9835 return TARGET_64BIT;
9836 case UNSPEC_TPOFF:
9837 x = XVECEXP (inner, 0, 0);
9838 return (GET_CODE (x) == SYMBOL_REF
9839 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9840 case UNSPEC_MACHOPIC_OFFSET:
9841 return legitimate_pic_address_disp_p (x);
9842 default:
9843 return false;
9844 }
9845 /* FALLTHRU */
9846
9847 case SYMBOL_REF:
9848 case LABEL_REF:
9849 return legitimate_pic_address_disp_p (x);
9850
9851 default:
9852 return true;
9853 }
9854 }
9855
9856 /* Determine if a given CONST RTX is a valid memory displacement
9857 in PIC mode. */
9858
9859 int
9860 legitimate_pic_address_disp_p (rtx disp)
9861 {
9862 bool saw_plus;
9863
9864 /* In 64bit mode we can allow direct addresses of symbols and labels
9865 when they are not dynamic symbols. */
9866 if (TARGET_64BIT)
9867 {
9868 rtx op0 = disp, op1;
9869
9870 switch (GET_CODE (disp))
9871 {
9872 case LABEL_REF:
9873 return true;
9874
9875 case CONST:
9876 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9877 break;
9878 op0 = XEXP (XEXP (disp, 0), 0);
9879 op1 = XEXP (XEXP (disp, 0), 1);
9880 if (!CONST_INT_P (op1)
9881 || INTVAL (op1) >= 16*1024*1024
9882 || INTVAL (op1) < -16*1024*1024)
9883 break;
9884 if (GET_CODE (op0) == LABEL_REF)
9885 return true;
9886 if (GET_CODE (op0) != SYMBOL_REF)
9887 break;
9888 /* FALLTHRU */
9889
9890 case SYMBOL_REF:
9891 /* TLS references should always be enclosed in UNSPEC. */
9892 if (SYMBOL_REF_TLS_MODEL (op0))
9893 return false;
9894 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9895 && ix86_cmodel != CM_LARGE_PIC)
9896 return true;
9897 break;
9898
9899 default:
9900 break;
9901 }
9902 }
9903 if (GET_CODE (disp) != CONST)
9904 return 0;
9905 disp = XEXP (disp, 0);
9906
9907 if (TARGET_64BIT)
9908 {
9909 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9910 of GOT tables. We should not need these anyway. */
9911 if (GET_CODE (disp) != UNSPEC
9912 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9913 && XINT (disp, 1) != UNSPEC_GOTOFF
9914 && XINT (disp, 1) != UNSPEC_PLTOFF))
9915 return 0;
9916
9917 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9918 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9919 return 0;
9920 return 1;
9921 }
9922
9923 saw_plus = false;
9924 if (GET_CODE (disp) == PLUS)
9925 {
9926 if (!CONST_INT_P (XEXP (disp, 1)))
9927 return 0;
9928 disp = XEXP (disp, 0);
9929 saw_plus = true;
9930 }
9931
9932 if (TARGET_MACHO && darwin_local_data_pic (disp))
9933 return 1;
9934
9935 if (GET_CODE (disp) != UNSPEC)
9936 return 0;
9937
9938 switch (XINT (disp, 1))
9939 {
9940 case UNSPEC_GOT:
9941 if (saw_plus)
9942 return false;
9943 /* We need to check for both symbols and labels because VxWorks loads
9944 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9945 details. */
9946 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9947 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9948 case UNSPEC_GOTOFF:
9949 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9950 While ABI specify also 32bit relocation but we don't produce it in
9951 small PIC model at all. */
9952 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9953 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9954 && !TARGET_64BIT)
9955 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9956 return false;
9957 case UNSPEC_GOTTPOFF:
9958 case UNSPEC_GOTNTPOFF:
9959 case UNSPEC_INDNTPOFF:
9960 if (saw_plus)
9961 return false;
9962 disp = XVECEXP (disp, 0, 0);
9963 return (GET_CODE (disp) == SYMBOL_REF
9964 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9965 case UNSPEC_NTPOFF:
9966 disp = XVECEXP (disp, 0, 0);
9967 return (GET_CODE (disp) == SYMBOL_REF
9968 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9969 case UNSPEC_DTPOFF:
9970 disp = XVECEXP (disp, 0, 0);
9971 return (GET_CODE (disp) == SYMBOL_REF
9972 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9973 }
9974
9975 return 0;
9976 }
9977
9978 /* Recognizes RTL expressions that are valid memory addresses for an
9979 instruction. The MODE argument is the machine mode for the MEM
9980 expression that wants to use this address.
9981
9982 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9983 convert common non-canonical forms to canonical form so that they will
9984 be recognized. */
9985
9986 static bool
9987 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9988 rtx addr, bool strict)
9989 {
9990 struct ix86_address parts;
9991 rtx base, index, disp;
9992 HOST_WIDE_INT scale;
9993
9994 if (ix86_decompose_address (addr, &parts) <= 0)
9995 /* Decomposition failed. */
9996 return false;
9997
9998 base = parts.base;
9999 index = parts.index;
10000 disp = parts.disp;
10001 scale = parts.scale;
10002
10003 /* Validate base register.
10004
10005 Don't allow SUBREG's that span more than a word here. It can lead to spill
10006 failures when the base is one word out of a two word structure, which is
10007 represented internally as a DImode int. */
10008
10009 if (base)
10010 {
10011 rtx reg;
10012
10013 if (REG_P (base))
10014 reg = base;
10015 else if (GET_CODE (base) == SUBREG
10016 && REG_P (SUBREG_REG (base))
10017 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
10018 <= UNITS_PER_WORD)
10019 reg = SUBREG_REG (base);
10020 else
10021 /* Base is not a register. */
10022 return false;
10023
10024 if (GET_MODE (base) != Pmode)
10025 /* Base is not in Pmode. */
10026 return false;
10027
10028 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
10029 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
10030 /* Base is not valid. */
10031 return false;
10032 }
10033
10034 /* Validate index register.
10035
10036 Don't allow SUBREG's that span more than a word here -- same as above. */
10037
10038 if (index)
10039 {
10040 rtx reg;
10041
10042 if (REG_P (index))
10043 reg = index;
10044 else if (GET_CODE (index) == SUBREG
10045 && REG_P (SUBREG_REG (index))
10046 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
10047 <= UNITS_PER_WORD)
10048 reg = SUBREG_REG (index);
10049 else
10050 /* Index is not a register. */
10051 return false;
10052
10053 if (GET_MODE (index) != Pmode)
10054 /* Index is not in Pmode. */
10055 return false;
10056
10057 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
10058 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
10059 /* Index is not valid. */
10060 return false;
10061 }
10062
10063 /* Validate scale factor. */
10064 if (scale != 1)
10065 {
10066 if (!index)
10067 /* Scale without index. */
10068 return false;
10069
10070 if (scale != 2 && scale != 4 && scale != 8)
10071 /* Scale is not a valid multiplier. */
10072 return false;
10073 }
10074
10075 /* Validate displacement. */
10076 if (disp)
10077 {
10078 if (GET_CODE (disp) == CONST
10079 && GET_CODE (XEXP (disp, 0)) == UNSPEC
10080 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
10081 switch (XINT (XEXP (disp, 0), 1))
10082 {
10083 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
10084 used. While ABI specify also 32bit relocations, we don't produce
10085 them at all and use IP relative instead. */
10086 case UNSPEC_GOT:
10087 case UNSPEC_GOTOFF:
10088 gcc_assert (flag_pic);
10089 if (!TARGET_64BIT)
10090 goto is_legitimate_pic;
10091
10092 /* 64bit address unspec. */
10093 return false;
10094
10095 case UNSPEC_GOTPCREL:
10096 gcc_assert (flag_pic);
10097 goto is_legitimate_pic;
10098
10099 case UNSPEC_GOTTPOFF:
10100 case UNSPEC_GOTNTPOFF:
10101 case UNSPEC_INDNTPOFF:
10102 case UNSPEC_NTPOFF:
10103 case UNSPEC_DTPOFF:
10104 break;
10105
10106 default:
10107 /* Invalid address unspec. */
10108 return false;
10109 }
10110
10111 else if (SYMBOLIC_CONST (disp)
10112 && (flag_pic
10113 || (TARGET_MACHO
10114 #if TARGET_MACHO
10115 && MACHOPIC_INDIRECT
10116 && !machopic_operand_p (disp)
10117 #endif
10118 )))
10119 {
10120
10121 is_legitimate_pic:
10122 if (TARGET_64BIT && (index || base))
10123 {
10124 /* foo@dtpoff(%rX) is ok. */
10125 if (GET_CODE (disp) != CONST
10126 || GET_CODE (XEXP (disp, 0)) != PLUS
10127 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10128 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10129 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10130 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10131 /* Non-constant pic memory reference. */
10132 return false;
10133 }
10134 else if (! legitimate_pic_address_disp_p (disp))
10135 /* Displacement is an invalid pic construct. */
10136 return false;
10137
10138 /* This code used to verify that a symbolic pic displacement
10139 includes the pic_offset_table_rtx register.
10140
10141 While this is good idea, unfortunately these constructs may
10142 be created by "adds using lea" optimization for incorrect
10143 code like:
10144
10145 int a;
10146 int foo(int i)
10147 {
10148 return *(&a+i);
10149 }
10150
10151 This code is nonsensical, but results in addressing
10152 GOT table with pic_offset_table_rtx base. We can't
10153 just refuse it easily, since it gets matched by
10154 "addsi3" pattern, that later gets split to lea in the
10155 case output register differs from input. While this
10156 can be handled by separate addsi pattern for this case
10157 that never results in lea, this seems to be easier and
10158 correct fix for crash to disable this test. */
10159 }
10160 else if (GET_CODE (disp) != LABEL_REF
10161 && !CONST_INT_P (disp)
10162 && (GET_CODE (disp) != CONST
10163 || !legitimate_constant_p (disp))
10164 && (GET_CODE (disp) != SYMBOL_REF
10165 || !legitimate_constant_p (disp)))
10166 /* Displacement is not constant. */
10167 return false;
10168 else if (TARGET_64BIT
10169 && !x86_64_immediate_operand (disp, VOIDmode))
10170 /* Displacement is out of range. */
10171 return false;
10172 }
10173
10174 /* Everything looks valid. */
10175 return true;
10176 }
10177
10178 /* Determine if a given RTX is a valid constant address. */
10179
10180 bool
10181 constant_address_p (rtx x)
10182 {
10183 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10184 }
10185 \f
10186 /* Return a unique alias set for the GOT. */
10187
10188 static alias_set_type
10189 ix86_GOT_alias_set (void)
10190 {
10191 static alias_set_type set = -1;
10192 if (set == -1)
10193 set = new_alias_set ();
10194 return set;
10195 }
10196
10197 /* Return a legitimate reference for ORIG (an address) using the
10198 register REG. If REG is 0, a new pseudo is generated.
10199
10200 There are two types of references that must be handled:
10201
10202 1. Global data references must load the address from the GOT, via
10203 the PIC reg. An insn is emitted to do this load, and the reg is
10204 returned.
10205
10206 2. Static data references, constant pool addresses, and code labels
10207 compute the address as an offset from the GOT, whose base is in
10208 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10209 differentiate them from global data objects. The returned
10210 address is the PIC reg + an unspec constant.
10211
10212 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10213 reg also appears in the address. */
10214
10215 static rtx
10216 legitimize_pic_address (rtx orig, rtx reg)
10217 {
10218 rtx addr = orig;
10219 rtx new_rtx = orig;
10220 rtx base;
10221
10222 #if TARGET_MACHO
10223 if (TARGET_MACHO && !TARGET_64BIT)
10224 {
10225 if (reg == 0)
10226 reg = gen_reg_rtx (Pmode);
10227 /* Use the generic Mach-O PIC machinery. */
10228 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10229 }
10230 #endif
10231
10232 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10233 new_rtx = addr;
10234 else if (TARGET_64BIT
10235 && ix86_cmodel != CM_SMALL_PIC
10236 && gotoff_operand (addr, Pmode))
10237 {
10238 rtx tmpreg;
10239 /* This symbol may be referenced via a displacement from the PIC
10240 base address (@GOTOFF). */
10241
10242 if (reload_in_progress)
10243 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10244 if (GET_CODE (addr) == CONST)
10245 addr = XEXP (addr, 0);
10246 if (GET_CODE (addr) == PLUS)
10247 {
10248 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10249 UNSPEC_GOTOFF);
10250 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10251 }
10252 else
10253 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10254 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10255 if (!reg)
10256 tmpreg = gen_reg_rtx (Pmode);
10257 else
10258 tmpreg = reg;
10259 emit_move_insn (tmpreg, new_rtx);
10260
10261 if (reg != 0)
10262 {
10263 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10264 tmpreg, 1, OPTAB_DIRECT);
10265 new_rtx = reg;
10266 }
10267 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10268 }
10269 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10270 {
10271 /* This symbol may be referenced via a displacement from the PIC
10272 base address (@GOTOFF). */
10273
10274 if (reload_in_progress)
10275 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10276 if (GET_CODE (addr) == CONST)
10277 addr = XEXP (addr, 0);
10278 if (GET_CODE (addr) == PLUS)
10279 {
10280 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10281 UNSPEC_GOTOFF);
10282 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10283 }
10284 else
10285 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10286 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10287 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10288
10289 if (reg != 0)
10290 {
10291 emit_move_insn (reg, new_rtx);
10292 new_rtx = reg;
10293 }
10294 }
10295 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10296 /* We can't use @GOTOFF for text labels on VxWorks;
10297 see gotoff_operand. */
10298 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10299 {
10300 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10301 {
10302 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10303 return legitimize_dllimport_symbol (addr, true);
10304 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10305 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10306 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10307 {
10308 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10309 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10310 }
10311 }
10312
10313 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10314 {
10315 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10316 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10317 new_rtx = gen_const_mem (Pmode, new_rtx);
10318 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10319
10320 if (reg == 0)
10321 reg = gen_reg_rtx (Pmode);
10322 /* Use directly gen_movsi, otherwise the address is loaded
10323 into register for CSE. We don't want to CSE this addresses,
10324 instead we CSE addresses from the GOT table, so skip this. */
10325 emit_insn (gen_movsi (reg, new_rtx));
10326 new_rtx = reg;
10327 }
10328 else
10329 {
10330 /* This symbol must be referenced via a load from the
10331 Global Offset Table (@GOT). */
10332
10333 if (reload_in_progress)
10334 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10335 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10336 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10337 if (TARGET_64BIT)
10338 new_rtx = force_reg (Pmode, new_rtx);
10339 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10340 new_rtx = gen_const_mem (Pmode, new_rtx);
10341 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10342
10343 if (reg == 0)
10344 reg = gen_reg_rtx (Pmode);
10345 emit_move_insn (reg, new_rtx);
10346 new_rtx = reg;
10347 }
10348 }
10349 else
10350 {
10351 if (CONST_INT_P (addr)
10352 && !x86_64_immediate_operand (addr, VOIDmode))
10353 {
10354 if (reg)
10355 {
10356 emit_move_insn (reg, addr);
10357 new_rtx = reg;
10358 }
10359 else
10360 new_rtx = force_reg (Pmode, addr);
10361 }
10362 else if (GET_CODE (addr) == CONST)
10363 {
10364 addr = XEXP (addr, 0);
10365
10366 /* We must match stuff we generate before. Assume the only
10367 unspecs that can get here are ours. Not that we could do
10368 anything with them anyway.... */
10369 if (GET_CODE (addr) == UNSPEC
10370 || (GET_CODE (addr) == PLUS
10371 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10372 return orig;
10373 gcc_assert (GET_CODE (addr) == PLUS);
10374 }
10375 if (GET_CODE (addr) == PLUS)
10376 {
10377 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10378
10379 /* Check first to see if this is a constant offset from a @GOTOFF
10380 symbol reference. */
10381 if (gotoff_operand (op0, Pmode)
10382 && CONST_INT_P (op1))
10383 {
10384 if (!TARGET_64BIT)
10385 {
10386 if (reload_in_progress)
10387 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10388 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10389 UNSPEC_GOTOFF);
10390 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10391 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10392 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10393
10394 if (reg != 0)
10395 {
10396 emit_move_insn (reg, new_rtx);
10397 new_rtx = reg;
10398 }
10399 }
10400 else
10401 {
10402 if (INTVAL (op1) < -16*1024*1024
10403 || INTVAL (op1) >= 16*1024*1024)
10404 {
10405 if (!x86_64_immediate_operand (op1, Pmode))
10406 op1 = force_reg (Pmode, op1);
10407 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10408 }
10409 }
10410 }
10411 else
10412 {
10413 base = legitimize_pic_address (XEXP (addr, 0), reg);
10414 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10415 base == reg ? NULL_RTX : reg);
10416
10417 if (CONST_INT_P (new_rtx))
10418 new_rtx = plus_constant (base, INTVAL (new_rtx));
10419 else
10420 {
10421 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10422 {
10423 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10424 new_rtx = XEXP (new_rtx, 1);
10425 }
10426 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10427 }
10428 }
10429 }
10430 }
10431 return new_rtx;
10432 }
10433 \f
10434 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10435
10436 static rtx
10437 get_thread_pointer (int to_reg)
10438 {
10439 rtx tp, reg, insn;
10440
10441 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10442 if (!to_reg)
10443 return tp;
10444
10445 reg = gen_reg_rtx (Pmode);
10446 insn = gen_rtx_SET (VOIDmode, reg, tp);
10447 insn = emit_insn (insn);
10448
10449 return reg;
10450 }
10451
10452 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10453 false if we expect this to be used for a memory address and true if
10454 we expect to load the address into a register. */
10455
10456 static rtx
10457 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10458 {
10459 rtx dest, base, off, pic, tp;
10460 int type;
10461
10462 switch (model)
10463 {
10464 case TLS_MODEL_GLOBAL_DYNAMIC:
10465 dest = gen_reg_rtx (Pmode);
10466 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10467
10468 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10469 {
10470 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10471
10472 start_sequence ();
10473 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10474 insns = get_insns ();
10475 end_sequence ();
10476
10477 RTL_CONST_CALL_P (insns) = 1;
10478 emit_libcall_block (insns, dest, rax, x);
10479 }
10480 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10481 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10482 else
10483 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10484
10485 if (TARGET_GNU2_TLS)
10486 {
10487 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10488
10489 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10490 }
10491 break;
10492
10493 case TLS_MODEL_LOCAL_DYNAMIC:
10494 base = gen_reg_rtx (Pmode);
10495 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10496
10497 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10498 {
10499 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10500
10501 start_sequence ();
10502 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10503 insns = get_insns ();
10504 end_sequence ();
10505
10506 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10507 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10508 RTL_CONST_CALL_P (insns) = 1;
10509 emit_libcall_block (insns, base, rax, note);
10510 }
10511 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10512 emit_insn (gen_tls_local_dynamic_base_64 (base));
10513 else
10514 emit_insn (gen_tls_local_dynamic_base_32 (base));
10515
10516 if (TARGET_GNU2_TLS)
10517 {
10518 rtx x = ix86_tls_module_base ();
10519
10520 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10521 gen_rtx_MINUS (Pmode, x, tp));
10522 }
10523
10524 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10525 off = gen_rtx_CONST (Pmode, off);
10526
10527 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10528
10529 if (TARGET_GNU2_TLS)
10530 {
10531 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10532
10533 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10534 }
10535
10536 break;
10537
10538 case TLS_MODEL_INITIAL_EXEC:
10539 if (TARGET_64BIT)
10540 {
10541 pic = NULL;
10542 type = UNSPEC_GOTNTPOFF;
10543 }
10544 else if (flag_pic)
10545 {
10546 if (reload_in_progress)
10547 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10548 pic = pic_offset_table_rtx;
10549 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10550 }
10551 else if (!TARGET_ANY_GNU_TLS)
10552 {
10553 pic = gen_reg_rtx (Pmode);
10554 emit_insn (gen_set_got (pic));
10555 type = UNSPEC_GOTTPOFF;
10556 }
10557 else
10558 {
10559 pic = NULL;
10560 type = UNSPEC_INDNTPOFF;
10561 }
10562
10563 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10564 off = gen_rtx_CONST (Pmode, off);
10565 if (pic)
10566 off = gen_rtx_PLUS (Pmode, pic, off);
10567 off = gen_const_mem (Pmode, off);
10568 set_mem_alias_set (off, ix86_GOT_alias_set ());
10569
10570 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10571 {
10572 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10573 off = force_reg (Pmode, off);
10574 return gen_rtx_PLUS (Pmode, base, off);
10575 }
10576 else
10577 {
10578 base = get_thread_pointer (true);
10579 dest = gen_reg_rtx (Pmode);
10580 emit_insn (gen_subsi3 (dest, base, off));
10581 }
10582 break;
10583
10584 case TLS_MODEL_LOCAL_EXEC:
10585 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10586 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10587 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10588 off = gen_rtx_CONST (Pmode, off);
10589
10590 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10591 {
10592 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10593 return gen_rtx_PLUS (Pmode, base, off);
10594 }
10595 else
10596 {
10597 base = get_thread_pointer (true);
10598 dest = gen_reg_rtx (Pmode);
10599 emit_insn (gen_subsi3 (dest, base, off));
10600 }
10601 break;
10602
10603 default:
10604 gcc_unreachable ();
10605 }
10606
10607 return dest;
10608 }
10609
10610 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10611 to symbol DECL. */
10612
10613 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10614 htab_t dllimport_map;
10615
10616 static tree
10617 get_dllimport_decl (tree decl)
10618 {
10619 struct tree_map *h, in;
10620 void **loc;
10621 const char *name;
10622 const char *prefix;
10623 size_t namelen, prefixlen;
10624 char *imp_name;
10625 tree to;
10626 rtx rtl;
10627
10628 if (!dllimport_map)
10629 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10630
10631 in.hash = htab_hash_pointer (decl);
10632 in.base.from = decl;
10633 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10634 h = (struct tree_map *) *loc;
10635 if (h)
10636 return h->to;
10637
10638 *loc = h = GGC_NEW (struct tree_map);
10639 h->hash = in.hash;
10640 h->base.from = decl;
10641 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10642 VAR_DECL, NULL, ptr_type_node);
10643 DECL_ARTIFICIAL (to) = 1;
10644 DECL_IGNORED_P (to) = 1;
10645 DECL_EXTERNAL (to) = 1;
10646 TREE_READONLY (to) = 1;
10647
10648 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10649 name = targetm.strip_name_encoding (name);
10650 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10651 ? "*__imp_" : "*__imp__";
10652 namelen = strlen (name);
10653 prefixlen = strlen (prefix);
10654 imp_name = (char *) alloca (namelen + prefixlen + 1);
10655 memcpy (imp_name, prefix, prefixlen);
10656 memcpy (imp_name + prefixlen, name, namelen + 1);
10657
10658 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10659 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10660 SET_SYMBOL_REF_DECL (rtl, to);
10661 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10662
10663 rtl = gen_const_mem (Pmode, rtl);
10664 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10665
10666 SET_DECL_RTL (to, rtl);
10667 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10668
10669 return to;
10670 }
10671
10672 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10673 true if we require the result be a register. */
10674
10675 static rtx
10676 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10677 {
10678 tree imp_decl;
10679 rtx x;
10680
10681 gcc_assert (SYMBOL_REF_DECL (symbol));
10682 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10683
10684 x = DECL_RTL (imp_decl);
10685 if (want_reg)
10686 x = force_reg (Pmode, x);
10687 return x;
10688 }
10689
10690 /* Try machine-dependent ways of modifying an illegitimate address
10691 to be legitimate. If we find one, return the new, valid address.
10692 This macro is used in only one place: `memory_address' in explow.c.
10693
10694 OLDX is the address as it was before break_out_memory_refs was called.
10695 In some cases it is useful to look at this to decide what needs to be done.
10696
10697 It is always safe for this macro to do nothing. It exists to recognize
10698 opportunities to optimize the output.
10699
10700 For the 80386, we handle X+REG by loading X into a register R and
10701 using R+REG. R will go in a general reg and indexing will be used.
10702 However, if REG is a broken-out memory address or multiplication,
10703 nothing needs to be done because REG can certainly go in a general reg.
10704
10705 When -fpic is used, special handling is needed for symbolic references.
10706 See comments by legitimize_pic_address in i386.c for details. */
10707
10708 static rtx
10709 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10710 enum machine_mode mode)
10711 {
10712 int changed = 0;
10713 unsigned log;
10714
10715 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10716 if (log)
10717 return legitimize_tls_address (x, (enum tls_model) log, false);
10718 if (GET_CODE (x) == CONST
10719 && GET_CODE (XEXP (x, 0)) == PLUS
10720 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10721 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10722 {
10723 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10724 (enum tls_model) log, false);
10725 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10726 }
10727
10728 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10729 {
10730 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10731 return legitimize_dllimport_symbol (x, true);
10732 if (GET_CODE (x) == CONST
10733 && GET_CODE (XEXP (x, 0)) == PLUS
10734 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10735 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10736 {
10737 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10738 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10739 }
10740 }
10741
10742 if (flag_pic && SYMBOLIC_CONST (x))
10743 return legitimize_pic_address (x, 0);
10744
10745 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10746 if (GET_CODE (x) == ASHIFT
10747 && CONST_INT_P (XEXP (x, 1))
10748 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10749 {
10750 changed = 1;
10751 log = INTVAL (XEXP (x, 1));
10752 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10753 GEN_INT (1 << log));
10754 }
10755
10756 if (GET_CODE (x) == PLUS)
10757 {
10758 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10759
10760 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10761 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10762 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10763 {
10764 changed = 1;
10765 log = INTVAL (XEXP (XEXP (x, 0), 1));
10766 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10767 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10768 GEN_INT (1 << log));
10769 }
10770
10771 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10772 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10773 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10774 {
10775 changed = 1;
10776 log = INTVAL (XEXP (XEXP (x, 1), 1));
10777 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10778 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10779 GEN_INT (1 << log));
10780 }
10781
10782 /* Put multiply first if it isn't already. */
10783 if (GET_CODE (XEXP (x, 1)) == MULT)
10784 {
10785 rtx tmp = XEXP (x, 0);
10786 XEXP (x, 0) = XEXP (x, 1);
10787 XEXP (x, 1) = tmp;
10788 changed = 1;
10789 }
10790
10791 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10792 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10793 created by virtual register instantiation, register elimination, and
10794 similar optimizations. */
10795 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10796 {
10797 changed = 1;
10798 x = gen_rtx_PLUS (Pmode,
10799 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10800 XEXP (XEXP (x, 1), 0)),
10801 XEXP (XEXP (x, 1), 1));
10802 }
10803
10804 /* Canonicalize
10805 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10806 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10807 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10808 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10809 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10810 && CONSTANT_P (XEXP (x, 1)))
10811 {
10812 rtx constant;
10813 rtx other = NULL_RTX;
10814
10815 if (CONST_INT_P (XEXP (x, 1)))
10816 {
10817 constant = XEXP (x, 1);
10818 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10819 }
10820 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10821 {
10822 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10823 other = XEXP (x, 1);
10824 }
10825 else
10826 constant = 0;
10827
10828 if (constant)
10829 {
10830 changed = 1;
10831 x = gen_rtx_PLUS (Pmode,
10832 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10833 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10834 plus_constant (other, INTVAL (constant)));
10835 }
10836 }
10837
10838 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10839 return x;
10840
10841 if (GET_CODE (XEXP (x, 0)) == MULT)
10842 {
10843 changed = 1;
10844 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10845 }
10846
10847 if (GET_CODE (XEXP (x, 1)) == MULT)
10848 {
10849 changed = 1;
10850 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10851 }
10852
10853 if (changed
10854 && REG_P (XEXP (x, 1))
10855 && REG_P (XEXP (x, 0)))
10856 return x;
10857
10858 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10859 {
10860 changed = 1;
10861 x = legitimize_pic_address (x, 0);
10862 }
10863
10864 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10865 return x;
10866
10867 if (REG_P (XEXP (x, 0)))
10868 {
10869 rtx temp = gen_reg_rtx (Pmode);
10870 rtx val = force_operand (XEXP (x, 1), temp);
10871 if (val != temp)
10872 emit_move_insn (temp, val);
10873
10874 XEXP (x, 1) = temp;
10875 return x;
10876 }
10877
10878 else if (REG_P (XEXP (x, 1)))
10879 {
10880 rtx temp = gen_reg_rtx (Pmode);
10881 rtx val = force_operand (XEXP (x, 0), temp);
10882 if (val != temp)
10883 emit_move_insn (temp, val);
10884
10885 XEXP (x, 0) = temp;
10886 return x;
10887 }
10888 }
10889
10890 return x;
10891 }
10892 \f
10893 /* Print an integer constant expression in assembler syntax. Addition
10894 and subtraction are the only arithmetic that may appear in these
10895 expressions. FILE is the stdio stream to write to, X is the rtx, and
10896 CODE is the operand print code from the output string. */
10897
10898 static void
10899 output_pic_addr_const (FILE *file, rtx x, int code)
10900 {
10901 char buf[256];
10902
10903 switch (GET_CODE (x))
10904 {
10905 case PC:
10906 gcc_assert (flag_pic);
10907 putc ('.', file);
10908 break;
10909
10910 case SYMBOL_REF:
10911 if (! TARGET_MACHO || TARGET_64BIT)
10912 output_addr_const (file, x);
10913 else
10914 {
10915 const char *name = XSTR (x, 0);
10916
10917 /* Mark the decl as referenced so that cgraph will
10918 output the function. */
10919 if (SYMBOL_REF_DECL (x))
10920 mark_decl_referenced (SYMBOL_REF_DECL (x));
10921
10922 #if TARGET_MACHO
10923 if (MACHOPIC_INDIRECT
10924 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10925 name = machopic_indirection_name (x, /*stub_p=*/true);
10926 #endif
10927 assemble_name (file, name);
10928 }
10929 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10930 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10931 fputs ("@PLT", file);
10932 break;
10933
10934 case LABEL_REF:
10935 x = XEXP (x, 0);
10936 /* FALLTHRU */
10937 case CODE_LABEL:
10938 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10939 assemble_name (asm_out_file, buf);
10940 break;
10941
10942 case CONST_INT:
10943 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10944 break;
10945
10946 case CONST:
10947 /* This used to output parentheses around the expression,
10948 but that does not work on the 386 (either ATT or BSD assembler). */
10949 output_pic_addr_const (file, XEXP (x, 0), code);
10950 break;
10951
10952 case CONST_DOUBLE:
10953 if (GET_MODE (x) == VOIDmode)
10954 {
10955 /* We can use %d if the number is <32 bits and positive. */
10956 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10957 fprintf (file, "0x%lx%08lx",
10958 (unsigned long) CONST_DOUBLE_HIGH (x),
10959 (unsigned long) CONST_DOUBLE_LOW (x));
10960 else
10961 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10962 }
10963 else
10964 /* We can't handle floating point constants;
10965 TARGET_PRINT_OPERAND must handle them. */
10966 output_operand_lossage ("floating constant misused");
10967 break;
10968
10969 case PLUS:
10970 /* Some assemblers need integer constants to appear first. */
10971 if (CONST_INT_P (XEXP (x, 0)))
10972 {
10973 output_pic_addr_const (file, XEXP (x, 0), code);
10974 putc ('+', file);
10975 output_pic_addr_const (file, XEXP (x, 1), code);
10976 }
10977 else
10978 {
10979 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10980 output_pic_addr_const (file, XEXP (x, 1), code);
10981 putc ('+', file);
10982 output_pic_addr_const (file, XEXP (x, 0), code);
10983 }
10984 break;
10985
10986 case MINUS:
10987 if (!TARGET_MACHO)
10988 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10989 output_pic_addr_const (file, XEXP (x, 0), code);
10990 putc ('-', file);
10991 output_pic_addr_const (file, XEXP (x, 1), code);
10992 if (!TARGET_MACHO)
10993 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10994 break;
10995
10996 case UNSPEC:
10997 gcc_assert (XVECLEN (x, 0) == 1);
10998 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10999 switch (XINT (x, 1))
11000 {
11001 case UNSPEC_GOT:
11002 fputs ("@GOT", file);
11003 break;
11004 case UNSPEC_GOTOFF:
11005 fputs ("@GOTOFF", file);
11006 break;
11007 case UNSPEC_PLTOFF:
11008 fputs ("@PLTOFF", file);
11009 break;
11010 case UNSPEC_GOTPCREL:
11011 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11012 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
11013 break;
11014 case UNSPEC_GOTTPOFF:
11015 /* FIXME: This might be @TPOFF in Sun ld too. */
11016 fputs ("@gottpoff", file);
11017 break;
11018 case UNSPEC_TPOFF:
11019 fputs ("@tpoff", file);
11020 break;
11021 case UNSPEC_NTPOFF:
11022 if (TARGET_64BIT)
11023 fputs ("@tpoff", file);
11024 else
11025 fputs ("@ntpoff", file);
11026 break;
11027 case UNSPEC_DTPOFF:
11028 fputs ("@dtpoff", file);
11029 break;
11030 case UNSPEC_GOTNTPOFF:
11031 if (TARGET_64BIT)
11032 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11033 "@gottpoff(%rip)": "@gottpoff[rip]", file);
11034 else
11035 fputs ("@gotntpoff", file);
11036 break;
11037 case UNSPEC_INDNTPOFF:
11038 fputs ("@indntpoff", file);
11039 break;
11040 #if TARGET_MACHO
11041 case UNSPEC_MACHOPIC_OFFSET:
11042 putc ('-', file);
11043 machopic_output_function_base_name (file);
11044 break;
11045 #endif
11046 default:
11047 output_operand_lossage ("invalid UNSPEC as operand");
11048 break;
11049 }
11050 break;
11051
11052 default:
11053 output_operand_lossage ("invalid expression as operand");
11054 }
11055 }
11056
11057 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
11058 We need to emit DTP-relative relocations. */
11059
11060 static void ATTRIBUTE_UNUSED
11061 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
11062 {
11063 fputs (ASM_LONG, file);
11064 output_addr_const (file, x);
11065 fputs ("@dtpoff", file);
11066 switch (size)
11067 {
11068 case 4:
11069 break;
11070 case 8:
11071 fputs (", 0", file);
11072 break;
11073 default:
11074 gcc_unreachable ();
11075 }
11076 }
11077
11078 /* Return true if X is a representation of the PIC register. This copes
11079 with calls from ix86_find_base_term, where the register might have
11080 been replaced by a cselib value. */
11081
11082 static bool
11083 ix86_pic_register_p (rtx x)
11084 {
11085 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
11086 return (pic_offset_table_rtx
11087 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
11088 else
11089 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
11090 }
11091
11092 /* In the name of slightly smaller debug output, and to cater to
11093 general assembler lossage, recognize PIC+GOTOFF and turn it back
11094 into a direct symbol reference.
11095
11096 On Darwin, this is necessary to avoid a crash, because Darwin
11097 has a different PIC label for each routine but the DWARF debugging
11098 information is not associated with any particular routine, so it's
11099 necessary to remove references to the PIC label from RTL stored by
11100 the DWARF output code. */
11101
11102 static rtx
11103 ix86_delegitimize_address (rtx x)
11104 {
11105 rtx orig_x = delegitimize_mem_from_attrs (x);
11106 /* addend is NULL or some rtx if x is something+GOTOFF where
11107 something doesn't include the PIC register. */
11108 rtx addend = NULL_RTX;
11109 /* reg_addend is NULL or a multiple of some register. */
11110 rtx reg_addend = NULL_RTX;
11111 /* const_addend is NULL or a const_int. */
11112 rtx const_addend = NULL_RTX;
11113 /* This is the result, or NULL. */
11114 rtx result = NULL_RTX;
11115
11116 x = orig_x;
11117
11118 if (MEM_P (x))
11119 x = XEXP (x, 0);
11120
11121 if (TARGET_64BIT)
11122 {
11123 if (GET_CODE (x) != CONST
11124 || GET_CODE (XEXP (x, 0)) != UNSPEC
11125 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11126 || !MEM_P (orig_x))
11127 return orig_x;
11128 x = XVECEXP (XEXP (x, 0), 0, 0);
11129 if (GET_MODE (orig_x) != Pmode)
11130 return simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
11131 return x;
11132 }
11133
11134 if (GET_CODE (x) != PLUS
11135 || GET_CODE (XEXP (x, 1)) != CONST)
11136 return orig_x;
11137
11138 if (ix86_pic_register_p (XEXP (x, 0)))
11139 /* %ebx + GOT/GOTOFF */
11140 ;
11141 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11142 {
11143 /* %ebx + %reg * scale + GOT/GOTOFF */
11144 reg_addend = XEXP (x, 0);
11145 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11146 reg_addend = XEXP (reg_addend, 1);
11147 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11148 reg_addend = XEXP (reg_addend, 0);
11149 else
11150 {
11151 reg_addend = NULL_RTX;
11152 addend = XEXP (x, 0);
11153 }
11154 }
11155 else
11156 addend = XEXP (x, 0);
11157
11158 x = XEXP (XEXP (x, 1), 0);
11159 if (GET_CODE (x) == PLUS
11160 && CONST_INT_P (XEXP (x, 1)))
11161 {
11162 const_addend = XEXP (x, 1);
11163 x = XEXP (x, 0);
11164 }
11165
11166 if (GET_CODE (x) == UNSPEC
11167 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11168 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11169 result = XVECEXP (x, 0, 0);
11170
11171 if (TARGET_MACHO && darwin_local_data_pic (x)
11172 && !MEM_P (orig_x))
11173 result = XVECEXP (x, 0, 0);
11174
11175 if (! result)
11176 return orig_x;
11177
11178 if (const_addend)
11179 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11180 if (reg_addend)
11181 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11182 if (addend)
11183 {
11184 /* If the rest of original X doesn't involve the PIC register, add
11185 addend and subtract pic_offset_table_rtx. This can happen e.g.
11186 for code like:
11187 leal (%ebx, %ecx, 4), %ecx
11188 ...
11189 movl foo@GOTOFF(%ecx), %edx
11190 in which case we return (%ecx - %ebx) + foo. */
11191 if (pic_offset_table_rtx)
11192 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11193 pic_offset_table_rtx),
11194 result);
11195 else
11196 return orig_x;
11197 }
11198 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
11199 return simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
11200 return result;
11201 }
11202
11203 /* If X is a machine specific address (i.e. a symbol or label being
11204 referenced as a displacement from the GOT implemented using an
11205 UNSPEC), then return the base term. Otherwise return X. */
11206
11207 rtx
11208 ix86_find_base_term (rtx x)
11209 {
11210 rtx term;
11211
11212 if (TARGET_64BIT)
11213 {
11214 if (GET_CODE (x) != CONST)
11215 return x;
11216 term = XEXP (x, 0);
11217 if (GET_CODE (term) == PLUS
11218 && (CONST_INT_P (XEXP (term, 1))
11219 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11220 term = XEXP (term, 0);
11221 if (GET_CODE (term) != UNSPEC
11222 || XINT (term, 1) != UNSPEC_GOTPCREL)
11223 return x;
11224
11225 return XVECEXP (term, 0, 0);
11226 }
11227
11228 return ix86_delegitimize_address (x);
11229 }
11230 \f
11231 static void
11232 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11233 int fp, FILE *file)
11234 {
11235 const char *suffix;
11236
11237 if (mode == CCFPmode || mode == CCFPUmode)
11238 {
11239 code = ix86_fp_compare_code_to_integer (code);
11240 mode = CCmode;
11241 }
11242 if (reverse)
11243 code = reverse_condition (code);
11244
11245 switch (code)
11246 {
11247 case EQ:
11248 switch (mode)
11249 {
11250 case CCAmode:
11251 suffix = "a";
11252 break;
11253
11254 case CCCmode:
11255 suffix = "c";
11256 break;
11257
11258 case CCOmode:
11259 suffix = "o";
11260 break;
11261
11262 case CCSmode:
11263 suffix = "s";
11264 break;
11265
11266 default:
11267 suffix = "e";
11268 }
11269 break;
11270 case NE:
11271 switch (mode)
11272 {
11273 case CCAmode:
11274 suffix = "na";
11275 break;
11276
11277 case CCCmode:
11278 suffix = "nc";
11279 break;
11280
11281 case CCOmode:
11282 suffix = "no";
11283 break;
11284
11285 case CCSmode:
11286 suffix = "ns";
11287 break;
11288
11289 default:
11290 suffix = "ne";
11291 }
11292 break;
11293 case GT:
11294 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11295 suffix = "g";
11296 break;
11297 case GTU:
11298 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11299 Those same assemblers have the same but opposite lossage on cmov. */
11300 if (mode == CCmode)
11301 suffix = fp ? "nbe" : "a";
11302 else if (mode == CCCmode)
11303 suffix = "b";
11304 else
11305 gcc_unreachable ();
11306 break;
11307 case LT:
11308 switch (mode)
11309 {
11310 case CCNOmode:
11311 case CCGOCmode:
11312 suffix = "s";
11313 break;
11314
11315 case CCmode:
11316 case CCGCmode:
11317 suffix = "l";
11318 break;
11319
11320 default:
11321 gcc_unreachable ();
11322 }
11323 break;
11324 case LTU:
11325 gcc_assert (mode == CCmode || mode == CCCmode);
11326 suffix = "b";
11327 break;
11328 case GE:
11329 switch (mode)
11330 {
11331 case CCNOmode:
11332 case CCGOCmode:
11333 suffix = "ns";
11334 break;
11335
11336 case CCmode:
11337 case CCGCmode:
11338 suffix = "ge";
11339 break;
11340
11341 default:
11342 gcc_unreachable ();
11343 }
11344 break;
11345 case GEU:
11346 /* ??? As above. */
11347 gcc_assert (mode == CCmode || mode == CCCmode);
11348 suffix = fp ? "nb" : "ae";
11349 break;
11350 case LE:
11351 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11352 suffix = "le";
11353 break;
11354 case LEU:
11355 /* ??? As above. */
11356 if (mode == CCmode)
11357 suffix = "be";
11358 else if (mode == CCCmode)
11359 suffix = fp ? "nb" : "ae";
11360 else
11361 gcc_unreachable ();
11362 break;
11363 case UNORDERED:
11364 suffix = fp ? "u" : "p";
11365 break;
11366 case ORDERED:
11367 suffix = fp ? "nu" : "np";
11368 break;
11369 default:
11370 gcc_unreachable ();
11371 }
11372 fputs (suffix, file);
11373 }
11374
11375 /* Print the name of register X to FILE based on its machine mode and number.
11376 If CODE is 'w', pretend the mode is HImode.
11377 If CODE is 'b', pretend the mode is QImode.
11378 If CODE is 'k', pretend the mode is SImode.
11379 If CODE is 'q', pretend the mode is DImode.
11380 If CODE is 'x', pretend the mode is V4SFmode.
11381 If CODE is 't', pretend the mode is V8SFmode.
11382 If CODE is 'h', pretend the reg is the 'high' byte register.
11383 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11384 If CODE is 'd', duplicate the operand for AVX instruction.
11385 */
11386
11387 void
11388 print_reg (rtx x, int code, FILE *file)
11389 {
11390 const char *reg;
11391 bool duplicated = code == 'd' && TARGET_AVX;
11392
11393 gcc_assert (x == pc_rtx
11394 || (REGNO (x) != ARG_POINTER_REGNUM
11395 && REGNO (x) != FRAME_POINTER_REGNUM
11396 && REGNO (x) != FLAGS_REG
11397 && REGNO (x) != FPSR_REG
11398 && REGNO (x) != FPCR_REG));
11399
11400 if (ASSEMBLER_DIALECT == ASM_ATT)
11401 putc ('%', file);
11402
11403 if (x == pc_rtx)
11404 {
11405 gcc_assert (TARGET_64BIT);
11406 fputs ("rip", file);
11407 return;
11408 }
11409
11410 if (code == 'w' || MMX_REG_P (x))
11411 code = 2;
11412 else if (code == 'b')
11413 code = 1;
11414 else if (code == 'k')
11415 code = 4;
11416 else if (code == 'q')
11417 code = 8;
11418 else if (code == 'y')
11419 code = 3;
11420 else if (code == 'h')
11421 code = 0;
11422 else if (code == 'x')
11423 code = 16;
11424 else if (code == 't')
11425 code = 32;
11426 else
11427 code = GET_MODE_SIZE (GET_MODE (x));
11428
11429 /* Irritatingly, AMD extended registers use different naming convention
11430 from the normal registers. */
11431 if (REX_INT_REG_P (x))
11432 {
11433 gcc_assert (TARGET_64BIT);
11434 switch (code)
11435 {
11436 case 0:
11437 error ("extended registers have no high halves");
11438 break;
11439 case 1:
11440 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11441 break;
11442 case 2:
11443 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11444 break;
11445 case 4:
11446 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11447 break;
11448 case 8:
11449 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11450 break;
11451 default:
11452 error ("unsupported operand size for extended register");
11453 break;
11454 }
11455 return;
11456 }
11457
11458 reg = NULL;
11459 switch (code)
11460 {
11461 case 3:
11462 if (STACK_TOP_P (x))
11463 {
11464 reg = "st(0)";
11465 break;
11466 }
11467 /* FALLTHRU */
11468 case 8:
11469 case 4:
11470 case 12:
11471 if (! ANY_FP_REG_P (x))
11472 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11473 /* FALLTHRU */
11474 case 16:
11475 case 2:
11476 normal:
11477 reg = hi_reg_name[REGNO (x)];
11478 break;
11479 case 1:
11480 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11481 goto normal;
11482 reg = qi_reg_name[REGNO (x)];
11483 break;
11484 case 0:
11485 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11486 goto normal;
11487 reg = qi_high_reg_name[REGNO (x)];
11488 break;
11489 case 32:
11490 if (SSE_REG_P (x))
11491 {
11492 gcc_assert (!duplicated);
11493 putc ('y', file);
11494 fputs (hi_reg_name[REGNO (x)] + 1, file);
11495 return;
11496 }
11497 break;
11498 default:
11499 gcc_unreachable ();
11500 }
11501
11502 fputs (reg, file);
11503 if (duplicated)
11504 {
11505 if (ASSEMBLER_DIALECT == ASM_ATT)
11506 fprintf (file, ", %%%s", reg);
11507 else
11508 fprintf (file, ", %s", reg);
11509 }
11510 }
11511
11512 /* Locate some local-dynamic symbol still in use by this function
11513 so that we can print its name in some tls_local_dynamic_base
11514 pattern. */
11515
11516 static int
11517 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11518 {
11519 rtx x = *px;
11520
11521 if (GET_CODE (x) == SYMBOL_REF
11522 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11523 {
11524 cfun->machine->some_ld_name = XSTR (x, 0);
11525 return 1;
11526 }
11527
11528 return 0;
11529 }
11530
11531 static const char *
11532 get_some_local_dynamic_name (void)
11533 {
11534 rtx insn;
11535
11536 if (cfun->machine->some_ld_name)
11537 return cfun->machine->some_ld_name;
11538
11539 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11540 if (NONDEBUG_INSN_P (insn)
11541 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11542 return cfun->machine->some_ld_name;
11543
11544 return NULL;
11545 }
11546
11547 /* Meaning of CODE:
11548 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11549 C -- print opcode suffix for set/cmov insn.
11550 c -- like C, but print reversed condition
11551 F,f -- likewise, but for floating-point.
11552 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11553 otherwise nothing
11554 R -- print the prefix for register names.
11555 z -- print the opcode suffix for the size of the current operand.
11556 Z -- likewise, with special suffixes for x87 instructions.
11557 * -- print a star (in certain assembler syntax)
11558 A -- print an absolute memory reference.
11559 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11560 s -- print a shift double count, followed by the assemblers argument
11561 delimiter.
11562 b -- print the QImode name of the register for the indicated operand.
11563 %b0 would print %al if operands[0] is reg 0.
11564 w -- likewise, print the HImode name of the register.
11565 k -- likewise, print the SImode name of the register.
11566 q -- likewise, print the DImode name of the register.
11567 x -- likewise, print the V4SFmode name of the register.
11568 t -- likewise, print the V8SFmode name of the register.
11569 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11570 y -- print "st(0)" instead of "st" as a register.
11571 d -- print duplicated register operand for AVX instruction.
11572 D -- print condition for SSE cmp instruction.
11573 P -- if PIC, print an @PLT suffix.
11574 X -- don't print any sort of PIC '@' suffix for a symbol.
11575 & -- print some in-use local-dynamic symbol name.
11576 H -- print a memory address offset by 8; used for sse high-parts
11577 Y -- print condition for XOP pcom* instruction.
11578 + -- print a branch hint as 'cs' or 'ds' prefix
11579 ; -- print a semicolon (after prefixes due to bug in older gas).
11580 */
11581
11582 void
11583 ix86_print_operand (FILE *file, rtx x, int code)
11584 {
11585 if (code)
11586 {
11587 switch (code)
11588 {
11589 case '*':
11590 if (ASSEMBLER_DIALECT == ASM_ATT)
11591 putc ('*', file);
11592 return;
11593
11594 case '&':
11595 {
11596 const char *name = get_some_local_dynamic_name ();
11597 if (name == NULL)
11598 output_operand_lossage ("'%%&' used without any "
11599 "local dynamic TLS references");
11600 else
11601 assemble_name (file, name);
11602 return;
11603 }
11604
11605 case 'A':
11606 switch (ASSEMBLER_DIALECT)
11607 {
11608 case ASM_ATT:
11609 putc ('*', file);
11610 break;
11611
11612 case ASM_INTEL:
11613 /* Intel syntax. For absolute addresses, registers should not
11614 be surrounded by braces. */
11615 if (!REG_P (x))
11616 {
11617 putc ('[', file);
11618 ix86_print_operand (file, x, 0);
11619 putc (']', file);
11620 return;
11621 }
11622 break;
11623
11624 default:
11625 gcc_unreachable ();
11626 }
11627
11628 ix86_print_operand (file, x, 0);
11629 return;
11630
11631
11632 case 'L':
11633 if (ASSEMBLER_DIALECT == ASM_ATT)
11634 putc ('l', file);
11635 return;
11636
11637 case 'W':
11638 if (ASSEMBLER_DIALECT == ASM_ATT)
11639 putc ('w', file);
11640 return;
11641
11642 case 'B':
11643 if (ASSEMBLER_DIALECT == ASM_ATT)
11644 putc ('b', file);
11645 return;
11646
11647 case 'Q':
11648 if (ASSEMBLER_DIALECT == ASM_ATT)
11649 putc ('l', file);
11650 return;
11651
11652 case 'S':
11653 if (ASSEMBLER_DIALECT == ASM_ATT)
11654 putc ('s', file);
11655 return;
11656
11657 case 'T':
11658 if (ASSEMBLER_DIALECT == ASM_ATT)
11659 putc ('t', file);
11660 return;
11661
11662 case 'z':
11663 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11664 {
11665 /* Opcodes don't get size suffixes if using Intel opcodes. */
11666 if (ASSEMBLER_DIALECT == ASM_INTEL)
11667 return;
11668
11669 switch (GET_MODE_SIZE (GET_MODE (x)))
11670 {
11671 case 1:
11672 putc ('b', file);
11673 return;
11674
11675 case 2:
11676 putc ('w', file);
11677 return;
11678
11679 case 4:
11680 putc ('l', file);
11681 return;
11682
11683 case 8:
11684 putc ('q', file);
11685 return;
11686
11687 default:
11688 output_operand_lossage
11689 ("invalid operand size for operand code '%c'", code);
11690 return;
11691 }
11692 }
11693
11694 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11695 warning
11696 (0, "non-integer operand used with operand code '%c'", code);
11697 /* FALLTHRU */
11698
11699 case 'Z':
11700 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11701 if (ASSEMBLER_DIALECT == ASM_INTEL)
11702 return;
11703
11704 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11705 {
11706 switch (GET_MODE_SIZE (GET_MODE (x)))
11707 {
11708 case 2:
11709 #ifdef HAVE_AS_IX86_FILDS
11710 putc ('s', file);
11711 #endif
11712 return;
11713
11714 case 4:
11715 putc ('l', file);
11716 return;
11717
11718 case 8:
11719 #ifdef HAVE_AS_IX86_FILDQ
11720 putc ('q', file);
11721 #else
11722 fputs ("ll", file);
11723 #endif
11724 return;
11725
11726 default:
11727 break;
11728 }
11729 }
11730 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11731 {
11732 /* 387 opcodes don't get size suffixes
11733 if the operands are registers. */
11734 if (STACK_REG_P (x))
11735 return;
11736
11737 switch (GET_MODE_SIZE (GET_MODE (x)))
11738 {
11739 case 4:
11740 putc ('s', file);
11741 return;
11742
11743 case 8:
11744 putc ('l', file);
11745 return;
11746
11747 case 12:
11748 case 16:
11749 putc ('t', file);
11750 return;
11751
11752 default:
11753 break;
11754 }
11755 }
11756 else
11757 {
11758 output_operand_lossage
11759 ("invalid operand type used with operand code '%c'", code);
11760 return;
11761 }
11762
11763 output_operand_lossage
11764 ("invalid operand size for operand code '%c'", code);
11765 return;
11766
11767 case 'd':
11768 case 'b':
11769 case 'w':
11770 case 'k':
11771 case 'q':
11772 case 'h':
11773 case 't':
11774 case 'y':
11775 case 'x':
11776 case 'X':
11777 case 'P':
11778 break;
11779
11780 case 's':
11781 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11782 {
11783 ix86_print_operand (file, x, 0);
11784 fputs (", ", file);
11785 }
11786 return;
11787
11788 case 'D':
11789 /* Little bit of braindamage here. The SSE compare instructions
11790 does use completely different names for the comparisons that the
11791 fp conditional moves. */
11792 if (TARGET_AVX)
11793 {
11794 switch (GET_CODE (x))
11795 {
11796 case EQ:
11797 fputs ("eq", file);
11798 break;
11799 case UNEQ:
11800 fputs ("eq_us", file);
11801 break;
11802 case LT:
11803 fputs ("lt", file);
11804 break;
11805 case UNLT:
11806 fputs ("nge", file);
11807 break;
11808 case LE:
11809 fputs ("le", file);
11810 break;
11811 case UNLE:
11812 fputs ("ngt", file);
11813 break;
11814 case UNORDERED:
11815 fputs ("unord", file);
11816 break;
11817 case NE:
11818 fputs ("neq", file);
11819 break;
11820 case LTGT:
11821 fputs ("neq_oq", file);
11822 break;
11823 case GE:
11824 fputs ("ge", file);
11825 break;
11826 case UNGE:
11827 fputs ("nlt", file);
11828 break;
11829 case GT:
11830 fputs ("gt", file);
11831 break;
11832 case UNGT:
11833 fputs ("nle", file);
11834 break;
11835 case ORDERED:
11836 fputs ("ord", file);
11837 break;
11838 default:
11839 output_operand_lossage ("operand is not a condition code, "
11840 "invalid operand code 'D'");
11841 return;
11842 }
11843 }
11844 else
11845 {
11846 switch (GET_CODE (x))
11847 {
11848 case EQ:
11849 case UNEQ:
11850 fputs ("eq", file);
11851 break;
11852 case LT:
11853 case UNLT:
11854 fputs ("lt", file);
11855 break;
11856 case LE:
11857 case UNLE:
11858 fputs ("le", file);
11859 break;
11860 case UNORDERED:
11861 fputs ("unord", file);
11862 break;
11863 case NE:
11864 case LTGT:
11865 fputs ("neq", file);
11866 break;
11867 case UNGE:
11868 case GE:
11869 fputs ("nlt", file);
11870 break;
11871 case UNGT:
11872 case GT:
11873 fputs ("nle", file);
11874 break;
11875 case ORDERED:
11876 fputs ("ord", file);
11877 break;
11878 default:
11879 output_operand_lossage ("operand is not a condition code, "
11880 "invalid operand code 'D'");
11881 return;
11882 }
11883 }
11884 return;
11885 case 'O':
11886 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11887 if (ASSEMBLER_DIALECT == ASM_ATT)
11888 {
11889 switch (GET_MODE (x))
11890 {
11891 case HImode: putc ('w', file); break;
11892 case SImode:
11893 case SFmode: putc ('l', file); break;
11894 case DImode:
11895 case DFmode: putc ('q', file); break;
11896 default: gcc_unreachable ();
11897 }
11898 putc ('.', file);
11899 }
11900 #endif
11901 return;
11902 case 'C':
11903 if (!COMPARISON_P (x))
11904 {
11905 output_operand_lossage ("operand is neither a constant nor a "
11906 "condition code, invalid operand code "
11907 "'C'");
11908 return;
11909 }
11910 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11911 return;
11912 case 'F':
11913 if (!COMPARISON_P (x))
11914 {
11915 output_operand_lossage ("operand is neither a constant nor a "
11916 "condition code, invalid operand code "
11917 "'F'");
11918 return;
11919 }
11920 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11921 if (ASSEMBLER_DIALECT == ASM_ATT)
11922 putc ('.', file);
11923 #endif
11924 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11925 return;
11926
11927 /* Like above, but reverse condition */
11928 case 'c':
11929 /* Check to see if argument to %c is really a constant
11930 and not a condition code which needs to be reversed. */
11931 if (!COMPARISON_P (x))
11932 {
11933 output_operand_lossage ("operand is neither a constant nor a "
11934 "condition code, invalid operand "
11935 "code 'c'");
11936 return;
11937 }
11938 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11939 return;
11940 case 'f':
11941 if (!COMPARISON_P (x))
11942 {
11943 output_operand_lossage ("operand is neither a constant nor a "
11944 "condition code, invalid operand "
11945 "code 'f'");
11946 return;
11947 }
11948 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11949 if (ASSEMBLER_DIALECT == ASM_ATT)
11950 putc ('.', file);
11951 #endif
11952 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11953 return;
11954
11955 case 'H':
11956 /* It doesn't actually matter what mode we use here, as we're
11957 only going to use this for printing. */
11958 x = adjust_address_nv (x, DImode, 8);
11959 break;
11960
11961 case '+':
11962 {
11963 rtx x;
11964
11965 if (!optimize
11966 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11967 return;
11968
11969 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11970 if (x)
11971 {
11972 int pred_val = INTVAL (XEXP (x, 0));
11973
11974 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11975 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11976 {
11977 int taken = pred_val > REG_BR_PROB_BASE / 2;
11978 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11979
11980 /* Emit hints only in the case default branch prediction
11981 heuristics would fail. */
11982 if (taken != cputaken)
11983 {
11984 /* We use 3e (DS) prefix for taken branches and
11985 2e (CS) prefix for not taken branches. */
11986 if (taken)
11987 fputs ("ds ; ", file);
11988 else
11989 fputs ("cs ; ", file);
11990 }
11991 }
11992 }
11993 return;
11994 }
11995
11996 case 'Y':
11997 switch (GET_CODE (x))
11998 {
11999 case NE:
12000 fputs ("neq", file);
12001 break;
12002 case EQ:
12003 fputs ("eq", file);
12004 break;
12005 case GE:
12006 case GEU:
12007 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
12008 break;
12009 case GT:
12010 case GTU:
12011 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
12012 break;
12013 case LE:
12014 case LEU:
12015 fputs ("le", file);
12016 break;
12017 case LT:
12018 case LTU:
12019 fputs ("lt", file);
12020 break;
12021 case UNORDERED:
12022 fputs ("unord", file);
12023 break;
12024 case ORDERED:
12025 fputs ("ord", file);
12026 break;
12027 case UNEQ:
12028 fputs ("ueq", file);
12029 break;
12030 case UNGE:
12031 fputs ("nlt", file);
12032 break;
12033 case UNGT:
12034 fputs ("nle", file);
12035 break;
12036 case UNLE:
12037 fputs ("ule", file);
12038 break;
12039 case UNLT:
12040 fputs ("ult", file);
12041 break;
12042 case LTGT:
12043 fputs ("une", file);
12044 break;
12045 default:
12046 output_operand_lossage ("operand is not a condition code, "
12047 "invalid operand code 'Y'");
12048 return;
12049 }
12050 return;
12051
12052 case ';':
12053 #if TARGET_MACHO || !HAVE_AS_IX86_REP_LOCK_PREFIX
12054 fputs (";", file);
12055 #endif
12056 return;
12057
12058 default:
12059 output_operand_lossage ("invalid operand code '%c'", code);
12060 }
12061 }
12062
12063 if (REG_P (x))
12064 print_reg (x, code, file);
12065
12066 else if (MEM_P (x))
12067 {
12068 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
12069 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
12070 && GET_MODE (x) != BLKmode)
12071 {
12072 const char * size;
12073 switch (GET_MODE_SIZE (GET_MODE (x)))
12074 {
12075 case 1: size = "BYTE"; break;
12076 case 2: size = "WORD"; break;
12077 case 4: size = "DWORD"; break;
12078 case 8: size = "QWORD"; break;
12079 case 12: size = "TBYTE"; break;
12080 case 16:
12081 if (GET_MODE (x) == XFmode)
12082 size = "TBYTE";
12083 else
12084 size = "XMMWORD";
12085 break;
12086 case 32: size = "YMMWORD"; break;
12087 default:
12088 gcc_unreachable ();
12089 }
12090
12091 /* Check for explicit size override (codes 'b', 'w' and 'k') */
12092 if (code == 'b')
12093 size = "BYTE";
12094 else if (code == 'w')
12095 size = "WORD";
12096 else if (code == 'k')
12097 size = "DWORD";
12098
12099 fputs (size, file);
12100 fputs (" PTR ", file);
12101 }
12102
12103 x = XEXP (x, 0);
12104 /* Avoid (%rip) for call operands. */
12105 if (CONSTANT_ADDRESS_P (x) && code == 'P'
12106 && !CONST_INT_P (x))
12107 output_addr_const (file, x);
12108 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
12109 output_operand_lossage ("invalid constraints for operand");
12110 else
12111 output_address (x);
12112 }
12113
12114 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12115 {
12116 REAL_VALUE_TYPE r;
12117 long l;
12118
12119 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12120 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12121
12122 if (ASSEMBLER_DIALECT == ASM_ATT)
12123 putc ('$', file);
12124 fprintf (file, "0x%08lx", (long unsigned int) l);
12125 }
12126
12127 /* These float cases don't actually occur as immediate operands. */
12128 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12129 {
12130 char dstr[30];
12131
12132 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12133 fputs (dstr, file);
12134 }
12135
12136 else if (GET_CODE (x) == CONST_DOUBLE
12137 && GET_MODE (x) == XFmode)
12138 {
12139 char dstr[30];
12140
12141 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12142 fputs (dstr, file);
12143 }
12144
12145 else
12146 {
12147 /* We have patterns that allow zero sets of memory, for instance.
12148 In 64-bit mode, we should probably support all 8-byte vectors,
12149 since we can in fact encode that into an immediate. */
12150 if (GET_CODE (x) == CONST_VECTOR)
12151 {
12152 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12153 x = const0_rtx;
12154 }
12155
12156 if (code != 'P')
12157 {
12158 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12159 {
12160 if (ASSEMBLER_DIALECT == ASM_ATT)
12161 putc ('$', file);
12162 }
12163 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12164 || GET_CODE (x) == LABEL_REF)
12165 {
12166 if (ASSEMBLER_DIALECT == ASM_ATT)
12167 putc ('$', file);
12168 else
12169 fputs ("OFFSET FLAT:", file);
12170 }
12171 }
12172 if (CONST_INT_P (x))
12173 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12174 else if (flag_pic)
12175 output_pic_addr_const (file, x, code);
12176 else
12177 output_addr_const (file, x);
12178 }
12179 }
12180
12181 static bool
12182 ix86_print_operand_punct_valid_p (unsigned char code)
12183 {
12184 return (code == '*' || code == '+' || code == '&' || code == ';');
12185 }
12186 \f
12187 /* Print a memory operand whose address is ADDR. */
12188
12189 static void
12190 ix86_print_operand_address (FILE *file, rtx addr)
12191 {
12192 struct ix86_address parts;
12193 rtx base, index, disp;
12194 int scale;
12195 int ok = ix86_decompose_address (addr, &parts);
12196
12197 gcc_assert (ok);
12198
12199 base = parts.base;
12200 index = parts.index;
12201 disp = parts.disp;
12202 scale = parts.scale;
12203
12204 switch (parts.seg)
12205 {
12206 case SEG_DEFAULT:
12207 break;
12208 case SEG_FS:
12209 case SEG_GS:
12210 if (ASSEMBLER_DIALECT == ASM_ATT)
12211 putc ('%', file);
12212 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12213 break;
12214 default:
12215 gcc_unreachable ();
12216 }
12217
12218 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12219 if (TARGET_64BIT && !base && !index)
12220 {
12221 rtx symbol = disp;
12222
12223 if (GET_CODE (disp) == CONST
12224 && GET_CODE (XEXP (disp, 0)) == PLUS
12225 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12226 symbol = XEXP (XEXP (disp, 0), 0);
12227
12228 if (GET_CODE (symbol) == LABEL_REF
12229 || (GET_CODE (symbol) == SYMBOL_REF
12230 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12231 base = pc_rtx;
12232 }
12233 if (!base && !index)
12234 {
12235 /* Displacement only requires special attention. */
12236
12237 if (CONST_INT_P (disp))
12238 {
12239 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12240 fputs ("ds:", file);
12241 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12242 }
12243 else if (flag_pic)
12244 output_pic_addr_const (file, disp, 0);
12245 else
12246 output_addr_const (file, disp);
12247 }
12248 else
12249 {
12250 if (ASSEMBLER_DIALECT == ASM_ATT)
12251 {
12252 if (disp)
12253 {
12254 if (flag_pic)
12255 output_pic_addr_const (file, disp, 0);
12256 else if (GET_CODE (disp) == LABEL_REF)
12257 output_asm_label (disp);
12258 else
12259 output_addr_const (file, disp);
12260 }
12261
12262 putc ('(', file);
12263 if (base)
12264 print_reg (base, 0, file);
12265 if (index)
12266 {
12267 putc (',', file);
12268 print_reg (index, 0, file);
12269 if (scale != 1)
12270 fprintf (file, ",%d", scale);
12271 }
12272 putc (')', file);
12273 }
12274 else
12275 {
12276 rtx offset = NULL_RTX;
12277
12278 if (disp)
12279 {
12280 /* Pull out the offset of a symbol; print any symbol itself. */
12281 if (GET_CODE (disp) == CONST
12282 && GET_CODE (XEXP (disp, 0)) == PLUS
12283 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12284 {
12285 offset = XEXP (XEXP (disp, 0), 1);
12286 disp = gen_rtx_CONST (VOIDmode,
12287 XEXP (XEXP (disp, 0), 0));
12288 }
12289
12290 if (flag_pic)
12291 output_pic_addr_const (file, disp, 0);
12292 else if (GET_CODE (disp) == LABEL_REF)
12293 output_asm_label (disp);
12294 else if (CONST_INT_P (disp))
12295 offset = disp;
12296 else
12297 output_addr_const (file, disp);
12298 }
12299
12300 putc ('[', file);
12301 if (base)
12302 {
12303 print_reg (base, 0, file);
12304 if (offset)
12305 {
12306 if (INTVAL (offset) >= 0)
12307 putc ('+', file);
12308 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12309 }
12310 }
12311 else if (offset)
12312 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12313 else
12314 putc ('0', file);
12315
12316 if (index)
12317 {
12318 putc ('+', file);
12319 print_reg (index, 0, file);
12320 if (scale != 1)
12321 fprintf (file, "*%d", scale);
12322 }
12323 putc (']', file);
12324 }
12325 }
12326 }
12327
12328 bool
12329 output_addr_const_extra (FILE *file, rtx x)
12330 {
12331 rtx op;
12332
12333 if (GET_CODE (x) != UNSPEC)
12334 return false;
12335
12336 op = XVECEXP (x, 0, 0);
12337 switch (XINT (x, 1))
12338 {
12339 case UNSPEC_GOTTPOFF:
12340 output_addr_const (file, op);
12341 /* FIXME: This might be @TPOFF in Sun ld. */
12342 fputs ("@gottpoff", file);
12343 break;
12344 case UNSPEC_TPOFF:
12345 output_addr_const (file, op);
12346 fputs ("@tpoff", file);
12347 break;
12348 case UNSPEC_NTPOFF:
12349 output_addr_const (file, op);
12350 if (TARGET_64BIT)
12351 fputs ("@tpoff", file);
12352 else
12353 fputs ("@ntpoff", file);
12354 break;
12355 case UNSPEC_DTPOFF:
12356 output_addr_const (file, op);
12357 fputs ("@dtpoff", file);
12358 break;
12359 case UNSPEC_GOTNTPOFF:
12360 output_addr_const (file, op);
12361 if (TARGET_64BIT)
12362 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12363 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12364 else
12365 fputs ("@gotntpoff", file);
12366 break;
12367 case UNSPEC_INDNTPOFF:
12368 output_addr_const (file, op);
12369 fputs ("@indntpoff", file);
12370 break;
12371 #if TARGET_MACHO
12372 case UNSPEC_MACHOPIC_OFFSET:
12373 output_addr_const (file, op);
12374 putc ('-', file);
12375 machopic_output_function_base_name (file);
12376 break;
12377 #endif
12378
12379 default:
12380 return false;
12381 }
12382
12383 return true;
12384 }
12385 \f
12386 /* Split one or more DImode RTL references into pairs of SImode
12387 references. The RTL can be REG, offsettable MEM, integer constant, or
12388 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12389 split and "num" is its length. lo_half and hi_half are output arrays
12390 that parallel "operands". */
12391
12392 void
12393 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12394 {
12395 while (num--)
12396 {
12397 rtx op = operands[num];
12398
12399 /* simplify_subreg refuse to split volatile memory addresses,
12400 but we still have to handle it. */
12401 if (MEM_P (op))
12402 {
12403 lo_half[num] = adjust_address (op, SImode, 0);
12404 hi_half[num] = adjust_address (op, SImode, 4);
12405 }
12406 else
12407 {
12408 lo_half[num] = simplify_gen_subreg (SImode, op,
12409 GET_MODE (op) == VOIDmode
12410 ? DImode : GET_MODE (op), 0);
12411 hi_half[num] = simplify_gen_subreg (SImode, op,
12412 GET_MODE (op) == VOIDmode
12413 ? DImode : GET_MODE (op), 4);
12414 }
12415 }
12416 }
12417 /* Split one or more TImode RTL references into pairs of DImode
12418 references. The RTL can be REG, offsettable MEM, integer constant, or
12419 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12420 split and "num" is its length. lo_half and hi_half are output arrays
12421 that parallel "operands". */
12422
12423 void
12424 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12425 {
12426 while (num--)
12427 {
12428 rtx op = operands[num];
12429
12430 /* simplify_subreg refuse to split volatile memory addresses, but we
12431 still have to handle it. */
12432 if (MEM_P (op))
12433 {
12434 lo_half[num] = adjust_address (op, DImode, 0);
12435 hi_half[num] = adjust_address (op, DImode, 8);
12436 }
12437 else
12438 {
12439 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12440 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12441 }
12442 }
12443 }
12444 \f
12445 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12446 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12447 is the expression of the binary operation. The output may either be
12448 emitted here, or returned to the caller, like all output_* functions.
12449
12450 There is no guarantee that the operands are the same mode, as they
12451 might be within FLOAT or FLOAT_EXTEND expressions. */
12452
12453 #ifndef SYSV386_COMPAT
12454 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12455 wants to fix the assemblers because that causes incompatibility
12456 with gcc. No-one wants to fix gcc because that causes
12457 incompatibility with assemblers... You can use the option of
12458 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12459 #define SYSV386_COMPAT 1
12460 #endif
12461
12462 const char *
12463 output_387_binary_op (rtx insn, rtx *operands)
12464 {
12465 static char buf[40];
12466 const char *p;
12467 const char *ssep;
12468 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12469
12470 #ifdef ENABLE_CHECKING
12471 /* Even if we do not want to check the inputs, this documents input
12472 constraints. Which helps in understanding the following code. */
12473 if (STACK_REG_P (operands[0])
12474 && ((REG_P (operands[1])
12475 && REGNO (operands[0]) == REGNO (operands[1])
12476 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12477 || (REG_P (operands[2])
12478 && REGNO (operands[0]) == REGNO (operands[2])
12479 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12480 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12481 ; /* ok */
12482 else
12483 gcc_assert (is_sse);
12484 #endif
12485
12486 switch (GET_CODE (operands[3]))
12487 {
12488 case PLUS:
12489 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12490 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12491 p = "fiadd";
12492 else
12493 p = "fadd";
12494 ssep = "vadd";
12495 break;
12496
12497 case MINUS:
12498 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12499 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12500 p = "fisub";
12501 else
12502 p = "fsub";
12503 ssep = "vsub";
12504 break;
12505
12506 case MULT:
12507 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12508 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12509 p = "fimul";
12510 else
12511 p = "fmul";
12512 ssep = "vmul";
12513 break;
12514
12515 case DIV:
12516 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12517 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12518 p = "fidiv";
12519 else
12520 p = "fdiv";
12521 ssep = "vdiv";
12522 break;
12523
12524 default:
12525 gcc_unreachable ();
12526 }
12527
12528 if (is_sse)
12529 {
12530 if (TARGET_AVX)
12531 {
12532 strcpy (buf, ssep);
12533 if (GET_MODE (operands[0]) == SFmode)
12534 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12535 else
12536 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12537 }
12538 else
12539 {
12540 strcpy (buf, ssep + 1);
12541 if (GET_MODE (operands[0]) == SFmode)
12542 strcat (buf, "ss\t{%2, %0|%0, %2}");
12543 else
12544 strcat (buf, "sd\t{%2, %0|%0, %2}");
12545 }
12546 return buf;
12547 }
12548 strcpy (buf, p);
12549
12550 switch (GET_CODE (operands[3]))
12551 {
12552 case MULT:
12553 case PLUS:
12554 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12555 {
12556 rtx temp = operands[2];
12557 operands[2] = operands[1];
12558 operands[1] = temp;
12559 }
12560
12561 /* know operands[0] == operands[1]. */
12562
12563 if (MEM_P (operands[2]))
12564 {
12565 p = "%Z2\t%2";
12566 break;
12567 }
12568
12569 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12570 {
12571 if (STACK_TOP_P (operands[0]))
12572 /* How is it that we are storing to a dead operand[2]?
12573 Well, presumably operands[1] is dead too. We can't
12574 store the result to st(0) as st(0) gets popped on this
12575 instruction. Instead store to operands[2] (which I
12576 think has to be st(1)). st(1) will be popped later.
12577 gcc <= 2.8.1 didn't have this check and generated
12578 assembly code that the Unixware assembler rejected. */
12579 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12580 else
12581 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12582 break;
12583 }
12584
12585 if (STACK_TOP_P (operands[0]))
12586 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12587 else
12588 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12589 break;
12590
12591 case MINUS:
12592 case DIV:
12593 if (MEM_P (operands[1]))
12594 {
12595 p = "r%Z1\t%1";
12596 break;
12597 }
12598
12599 if (MEM_P (operands[2]))
12600 {
12601 p = "%Z2\t%2";
12602 break;
12603 }
12604
12605 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12606 {
12607 #if SYSV386_COMPAT
12608 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12609 derived assemblers, confusingly reverse the direction of
12610 the operation for fsub{r} and fdiv{r} when the
12611 destination register is not st(0). The Intel assembler
12612 doesn't have this brain damage. Read !SYSV386_COMPAT to
12613 figure out what the hardware really does. */
12614 if (STACK_TOP_P (operands[0]))
12615 p = "{p\t%0, %2|rp\t%2, %0}";
12616 else
12617 p = "{rp\t%2, %0|p\t%0, %2}";
12618 #else
12619 if (STACK_TOP_P (operands[0]))
12620 /* As above for fmul/fadd, we can't store to st(0). */
12621 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12622 else
12623 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12624 #endif
12625 break;
12626 }
12627
12628 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12629 {
12630 #if SYSV386_COMPAT
12631 if (STACK_TOP_P (operands[0]))
12632 p = "{rp\t%0, %1|p\t%1, %0}";
12633 else
12634 p = "{p\t%1, %0|rp\t%0, %1}";
12635 #else
12636 if (STACK_TOP_P (operands[0]))
12637 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12638 else
12639 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12640 #endif
12641 break;
12642 }
12643
12644 if (STACK_TOP_P (operands[0]))
12645 {
12646 if (STACK_TOP_P (operands[1]))
12647 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12648 else
12649 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12650 break;
12651 }
12652 else if (STACK_TOP_P (operands[1]))
12653 {
12654 #if SYSV386_COMPAT
12655 p = "{\t%1, %0|r\t%0, %1}";
12656 #else
12657 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12658 #endif
12659 }
12660 else
12661 {
12662 #if SYSV386_COMPAT
12663 p = "{r\t%2, %0|\t%0, %2}";
12664 #else
12665 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12666 #endif
12667 }
12668 break;
12669
12670 default:
12671 gcc_unreachable ();
12672 }
12673
12674 strcat (buf, p);
12675 return buf;
12676 }
12677
12678 /* Return needed mode for entity in optimize_mode_switching pass. */
12679
12680 int
12681 ix86_mode_needed (int entity, rtx insn)
12682 {
12683 enum attr_i387_cw mode;
12684
12685 /* The mode UNINITIALIZED is used to store control word after a
12686 function call or ASM pattern. The mode ANY specify that function
12687 has no requirements on the control word and make no changes in the
12688 bits we are interested in. */
12689
12690 if (CALL_P (insn)
12691 || (NONJUMP_INSN_P (insn)
12692 && (asm_noperands (PATTERN (insn)) >= 0
12693 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12694 return I387_CW_UNINITIALIZED;
12695
12696 if (recog_memoized (insn) < 0)
12697 return I387_CW_ANY;
12698
12699 mode = get_attr_i387_cw (insn);
12700
12701 switch (entity)
12702 {
12703 case I387_TRUNC:
12704 if (mode == I387_CW_TRUNC)
12705 return mode;
12706 break;
12707
12708 case I387_FLOOR:
12709 if (mode == I387_CW_FLOOR)
12710 return mode;
12711 break;
12712
12713 case I387_CEIL:
12714 if (mode == I387_CW_CEIL)
12715 return mode;
12716 break;
12717
12718 case I387_MASK_PM:
12719 if (mode == I387_CW_MASK_PM)
12720 return mode;
12721 break;
12722
12723 default:
12724 gcc_unreachable ();
12725 }
12726
12727 return I387_CW_ANY;
12728 }
12729
12730 /* Output code to initialize control word copies used by trunc?f?i and
12731 rounding patterns. CURRENT_MODE is set to current control word,
12732 while NEW_MODE is set to new control word. */
12733
12734 void
12735 emit_i387_cw_initialization (int mode)
12736 {
12737 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12738 rtx new_mode;
12739
12740 enum ix86_stack_slot slot;
12741
12742 rtx reg = gen_reg_rtx (HImode);
12743
12744 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12745 emit_move_insn (reg, copy_rtx (stored_mode));
12746
12747 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12748 || optimize_function_for_size_p (cfun))
12749 {
12750 switch (mode)
12751 {
12752 case I387_CW_TRUNC:
12753 /* round toward zero (truncate) */
12754 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12755 slot = SLOT_CW_TRUNC;
12756 break;
12757
12758 case I387_CW_FLOOR:
12759 /* round down toward -oo */
12760 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12761 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12762 slot = SLOT_CW_FLOOR;
12763 break;
12764
12765 case I387_CW_CEIL:
12766 /* round up toward +oo */
12767 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12768 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12769 slot = SLOT_CW_CEIL;
12770 break;
12771
12772 case I387_CW_MASK_PM:
12773 /* mask precision exception for nearbyint() */
12774 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12775 slot = SLOT_CW_MASK_PM;
12776 break;
12777
12778 default:
12779 gcc_unreachable ();
12780 }
12781 }
12782 else
12783 {
12784 switch (mode)
12785 {
12786 case I387_CW_TRUNC:
12787 /* round toward zero (truncate) */
12788 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12789 slot = SLOT_CW_TRUNC;
12790 break;
12791
12792 case I387_CW_FLOOR:
12793 /* round down toward -oo */
12794 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12795 slot = SLOT_CW_FLOOR;
12796 break;
12797
12798 case I387_CW_CEIL:
12799 /* round up toward +oo */
12800 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12801 slot = SLOT_CW_CEIL;
12802 break;
12803
12804 case I387_CW_MASK_PM:
12805 /* mask precision exception for nearbyint() */
12806 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12807 slot = SLOT_CW_MASK_PM;
12808 break;
12809
12810 default:
12811 gcc_unreachable ();
12812 }
12813 }
12814
12815 gcc_assert (slot < MAX_386_STACK_LOCALS);
12816
12817 new_mode = assign_386_stack_local (HImode, slot);
12818 emit_move_insn (new_mode, reg);
12819 }
12820
12821 /* Output code for INSN to convert a float to a signed int. OPERANDS
12822 are the insn operands. The output may be [HSD]Imode and the input
12823 operand may be [SDX]Fmode. */
12824
12825 const char *
12826 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12827 {
12828 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12829 int dimode_p = GET_MODE (operands[0]) == DImode;
12830 int round_mode = get_attr_i387_cw (insn);
12831
12832 /* Jump through a hoop or two for DImode, since the hardware has no
12833 non-popping instruction. We used to do this a different way, but
12834 that was somewhat fragile and broke with post-reload splitters. */
12835 if ((dimode_p || fisttp) && !stack_top_dies)
12836 output_asm_insn ("fld\t%y1", operands);
12837
12838 gcc_assert (STACK_TOP_P (operands[1]));
12839 gcc_assert (MEM_P (operands[0]));
12840 gcc_assert (GET_MODE (operands[1]) != TFmode);
12841
12842 if (fisttp)
12843 output_asm_insn ("fisttp%Z0\t%0", operands);
12844 else
12845 {
12846 if (round_mode != I387_CW_ANY)
12847 output_asm_insn ("fldcw\t%3", operands);
12848 if (stack_top_dies || dimode_p)
12849 output_asm_insn ("fistp%Z0\t%0", operands);
12850 else
12851 output_asm_insn ("fist%Z0\t%0", operands);
12852 if (round_mode != I387_CW_ANY)
12853 output_asm_insn ("fldcw\t%2", operands);
12854 }
12855
12856 return "";
12857 }
12858
12859 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12860 have the values zero or one, indicates the ffreep insn's operand
12861 from the OPERANDS array. */
12862
12863 static const char *
12864 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12865 {
12866 if (TARGET_USE_FFREEP)
12867 #ifdef HAVE_AS_IX86_FFREEP
12868 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12869 #else
12870 {
12871 static char retval[32];
12872 int regno = REGNO (operands[opno]);
12873
12874 gcc_assert (FP_REGNO_P (regno));
12875
12876 regno -= FIRST_STACK_REG;
12877
12878 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12879 return retval;
12880 }
12881 #endif
12882
12883 return opno ? "fstp\t%y1" : "fstp\t%y0";
12884 }
12885
12886
12887 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12888 should be used. UNORDERED_P is true when fucom should be used. */
12889
12890 const char *
12891 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12892 {
12893 int stack_top_dies;
12894 rtx cmp_op0, cmp_op1;
12895 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12896
12897 if (eflags_p)
12898 {
12899 cmp_op0 = operands[0];
12900 cmp_op1 = operands[1];
12901 }
12902 else
12903 {
12904 cmp_op0 = operands[1];
12905 cmp_op1 = operands[2];
12906 }
12907
12908 if (is_sse)
12909 {
12910 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12911 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12912 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12913 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12914
12915 if (GET_MODE (operands[0]) == SFmode)
12916 if (unordered_p)
12917 return &ucomiss[TARGET_AVX ? 0 : 1];
12918 else
12919 return &comiss[TARGET_AVX ? 0 : 1];
12920 else
12921 if (unordered_p)
12922 return &ucomisd[TARGET_AVX ? 0 : 1];
12923 else
12924 return &comisd[TARGET_AVX ? 0 : 1];
12925 }
12926
12927 gcc_assert (STACK_TOP_P (cmp_op0));
12928
12929 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12930
12931 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12932 {
12933 if (stack_top_dies)
12934 {
12935 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12936 return output_387_ffreep (operands, 1);
12937 }
12938 else
12939 return "ftst\n\tfnstsw\t%0";
12940 }
12941
12942 if (STACK_REG_P (cmp_op1)
12943 && stack_top_dies
12944 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12945 && REGNO (cmp_op1) != FIRST_STACK_REG)
12946 {
12947 /* If both the top of the 387 stack dies, and the other operand
12948 is also a stack register that dies, then this must be a
12949 `fcompp' float compare */
12950
12951 if (eflags_p)
12952 {
12953 /* There is no double popping fcomi variant. Fortunately,
12954 eflags is immune from the fstp's cc clobbering. */
12955 if (unordered_p)
12956 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12957 else
12958 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12959 return output_387_ffreep (operands, 0);
12960 }
12961 else
12962 {
12963 if (unordered_p)
12964 return "fucompp\n\tfnstsw\t%0";
12965 else
12966 return "fcompp\n\tfnstsw\t%0";
12967 }
12968 }
12969 else
12970 {
12971 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12972
12973 static const char * const alt[16] =
12974 {
12975 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12976 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12977 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12978 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12979
12980 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12981 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12982 NULL,
12983 NULL,
12984
12985 "fcomi\t{%y1, %0|%0, %y1}",
12986 "fcomip\t{%y1, %0|%0, %y1}",
12987 "fucomi\t{%y1, %0|%0, %y1}",
12988 "fucomip\t{%y1, %0|%0, %y1}",
12989
12990 NULL,
12991 NULL,
12992 NULL,
12993 NULL
12994 };
12995
12996 int mask;
12997 const char *ret;
12998
12999 mask = eflags_p << 3;
13000 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
13001 mask |= unordered_p << 1;
13002 mask |= stack_top_dies;
13003
13004 gcc_assert (mask < 16);
13005 ret = alt[mask];
13006 gcc_assert (ret);
13007
13008 return ret;
13009 }
13010 }
13011
13012 void
13013 ix86_output_addr_vec_elt (FILE *file, int value)
13014 {
13015 const char *directive = ASM_LONG;
13016
13017 #ifdef ASM_QUAD
13018 if (TARGET_64BIT)
13019 directive = ASM_QUAD;
13020 #else
13021 gcc_assert (!TARGET_64BIT);
13022 #endif
13023
13024 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
13025 }
13026
13027 void
13028 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
13029 {
13030 const char *directive = ASM_LONG;
13031
13032 #ifdef ASM_QUAD
13033 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
13034 directive = ASM_QUAD;
13035 #else
13036 gcc_assert (!TARGET_64BIT);
13037 #endif
13038 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
13039 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
13040 fprintf (file, "%s%s%d-%s%d\n",
13041 directive, LPREFIX, value, LPREFIX, rel);
13042 else if (HAVE_AS_GOTOFF_IN_DATA)
13043 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
13044 #if TARGET_MACHO
13045 else if (TARGET_MACHO)
13046 {
13047 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
13048 machopic_output_function_base_name (file);
13049 putc ('\n', file);
13050 }
13051 #endif
13052 else
13053 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
13054 GOT_SYMBOL_NAME, LPREFIX, value);
13055 }
13056 \f
13057 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
13058 for the target. */
13059
13060 void
13061 ix86_expand_clear (rtx dest)
13062 {
13063 rtx tmp;
13064
13065 /* We play register width games, which are only valid after reload. */
13066 gcc_assert (reload_completed);
13067
13068 /* Avoid HImode and its attendant prefix byte. */
13069 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
13070 dest = gen_rtx_REG (SImode, REGNO (dest));
13071 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
13072
13073 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
13074 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
13075 {
13076 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13077 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
13078 }
13079
13080 emit_insn (tmp);
13081 }
13082
13083 /* X is an unchanging MEM. If it is a constant pool reference, return
13084 the constant pool rtx, else NULL. */
13085
13086 rtx
13087 maybe_get_pool_constant (rtx x)
13088 {
13089 x = ix86_delegitimize_address (XEXP (x, 0));
13090
13091 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
13092 return get_pool_constant (x);
13093
13094 return NULL_RTX;
13095 }
13096
13097 void
13098 ix86_expand_move (enum machine_mode mode, rtx operands[])
13099 {
13100 rtx op0, op1;
13101 enum tls_model model;
13102
13103 op0 = operands[0];
13104 op1 = operands[1];
13105
13106 if (GET_CODE (op1) == SYMBOL_REF)
13107 {
13108 model = SYMBOL_REF_TLS_MODEL (op1);
13109 if (model)
13110 {
13111 op1 = legitimize_tls_address (op1, model, true);
13112 op1 = force_operand (op1, op0);
13113 if (op1 == op0)
13114 return;
13115 }
13116 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13117 && SYMBOL_REF_DLLIMPORT_P (op1))
13118 op1 = legitimize_dllimport_symbol (op1, false);
13119 }
13120 else if (GET_CODE (op1) == CONST
13121 && GET_CODE (XEXP (op1, 0)) == PLUS
13122 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13123 {
13124 rtx addend = XEXP (XEXP (op1, 0), 1);
13125 rtx symbol = XEXP (XEXP (op1, 0), 0);
13126 rtx tmp = NULL;
13127
13128 model = SYMBOL_REF_TLS_MODEL (symbol);
13129 if (model)
13130 tmp = legitimize_tls_address (symbol, model, true);
13131 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13132 && SYMBOL_REF_DLLIMPORT_P (symbol))
13133 tmp = legitimize_dllimport_symbol (symbol, true);
13134
13135 if (tmp)
13136 {
13137 tmp = force_operand (tmp, NULL);
13138 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13139 op0, 1, OPTAB_DIRECT);
13140 if (tmp == op0)
13141 return;
13142 }
13143 }
13144
13145 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13146 {
13147 if (TARGET_MACHO && !TARGET_64BIT)
13148 {
13149 #if TARGET_MACHO
13150 if (MACHOPIC_PURE)
13151 {
13152 rtx temp = ((reload_in_progress
13153 || ((op0 && REG_P (op0))
13154 && mode == Pmode))
13155 ? op0 : gen_reg_rtx (Pmode));
13156 op1 = machopic_indirect_data_reference (op1, temp);
13157 op1 = machopic_legitimize_pic_address (op1, mode,
13158 temp == op1 ? 0 : temp);
13159 }
13160 else if (MACHOPIC_INDIRECT)
13161 op1 = machopic_indirect_data_reference (op1, 0);
13162 if (op0 == op1)
13163 return;
13164 #endif
13165 }
13166 else
13167 {
13168 if (MEM_P (op0))
13169 op1 = force_reg (Pmode, op1);
13170 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13171 {
13172 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13173 op1 = legitimize_pic_address (op1, reg);
13174 if (op0 == op1)
13175 return;
13176 }
13177 }
13178 }
13179 else
13180 {
13181 if (MEM_P (op0)
13182 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13183 || !push_operand (op0, mode))
13184 && MEM_P (op1))
13185 op1 = force_reg (mode, op1);
13186
13187 if (push_operand (op0, mode)
13188 && ! general_no_elim_operand (op1, mode))
13189 op1 = copy_to_mode_reg (mode, op1);
13190
13191 /* Force large constants in 64bit compilation into register
13192 to get them CSEed. */
13193 if (can_create_pseudo_p ()
13194 && (mode == DImode) && TARGET_64BIT
13195 && immediate_operand (op1, mode)
13196 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13197 && !register_operand (op0, mode)
13198 && optimize)
13199 op1 = copy_to_mode_reg (mode, op1);
13200
13201 if (can_create_pseudo_p ()
13202 && FLOAT_MODE_P (mode)
13203 && GET_CODE (op1) == CONST_DOUBLE)
13204 {
13205 /* If we are loading a floating point constant to a register,
13206 force the value to memory now, since we'll get better code
13207 out the back end. */
13208
13209 op1 = validize_mem (force_const_mem (mode, op1));
13210 if (!register_operand (op0, mode))
13211 {
13212 rtx temp = gen_reg_rtx (mode);
13213 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13214 emit_move_insn (op0, temp);
13215 return;
13216 }
13217 }
13218 }
13219
13220 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13221 }
13222
13223 void
13224 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13225 {
13226 rtx op0 = operands[0], op1 = operands[1];
13227 unsigned int align = GET_MODE_ALIGNMENT (mode);
13228
13229 /* Force constants other than zero into memory. We do not know how
13230 the instructions used to build constants modify the upper 64 bits
13231 of the register, once we have that information we may be able
13232 to handle some of them more efficiently. */
13233 if (can_create_pseudo_p ()
13234 && register_operand (op0, mode)
13235 && (CONSTANT_P (op1)
13236 || (GET_CODE (op1) == SUBREG
13237 && CONSTANT_P (SUBREG_REG (op1))))
13238 && !standard_sse_constant_p (op1))
13239 op1 = validize_mem (force_const_mem (mode, op1));
13240
13241 /* We need to check memory alignment for SSE mode since attribute
13242 can make operands unaligned. */
13243 if (can_create_pseudo_p ()
13244 && SSE_REG_MODE_P (mode)
13245 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13246 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13247 {
13248 rtx tmp[2];
13249
13250 /* ix86_expand_vector_move_misalign() does not like constants ... */
13251 if (CONSTANT_P (op1)
13252 || (GET_CODE (op1) == SUBREG
13253 && CONSTANT_P (SUBREG_REG (op1))))
13254 op1 = validize_mem (force_const_mem (mode, op1));
13255
13256 /* ... nor both arguments in memory. */
13257 if (!register_operand (op0, mode)
13258 && !register_operand (op1, mode))
13259 op1 = force_reg (mode, op1);
13260
13261 tmp[0] = op0; tmp[1] = op1;
13262 ix86_expand_vector_move_misalign (mode, tmp);
13263 return;
13264 }
13265
13266 /* Make operand1 a register if it isn't already. */
13267 if (can_create_pseudo_p ()
13268 && !register_operand (op0, mode)
13269 && !register_operand (op1, mode))
13270 {
13271 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13272 return;
13273 }
13274
13275 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13276 }
13277
13278 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13279 straight to ix86_expand_vector_move. */
13280 /* Code generation for scalar reg-reg moves of single and double precision data:
13281 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13282 movaps reg, reg
13283 else
13284 movss reg, reg
13285 if (x86_sse_partial_reg_dependency == true)
13286 movapd reg, reg
13287 else
13288 movsd reg, reg
13289
13290 Code generation for scalar loads of double precision data:
13291 if (x86_sse_split_regs == true)
13292 movlpd mem, reg (gas syntax)
13293 else
13294 movsd mem, reg
13295
13296 Code generation for unaligned packed loads of single precision data
13297 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13298 if (x86_sse_unaligned_move_optimal)
13299 movups mem, reg
13300
13301 if (x86_sse_partial_reg_dependency == true)
13302 {
13303 xorps reg, reg
13304 movlps mem, reg
13305 movhps mem+8, reg
13306 }
13307 else
13308 {
13309 movlps mem, reg
13310 movhps mem+8, reg
13311 }
13312
13313 Code generation for unaligned packed loads of double precision data
13314 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13315 if (x86_sse_unaligned_move_optimal)
13316 movupd mem, reg
13317
13318 if (x86_sse_split_regs == true)
13319 {
13320 movlpd mem, reg
13321 movhpd mem+8, reg
13322 }
13323 else
13324 {
13325 movsd mem, reg
13326 movhpd mem+8, reg
13327 }
13328 */
13329
13330 void
13331 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13332 {
13333 rtx op0, op1, m;
13334
13335 op0 = operands[0];
13336 op1 = operands[1];
13337
13338 if (TARGET_AVX)
13339 {
13340 switch (GET_MODE_CLASS (mode))
13341 {
13342 case MODE_VECTOR_INT:
13343 case MODE_INT:
13344 switch (GET_MODE_SIZE (mode))
13345 {
13346 case 16:
13347 /* If we're optimizing for size, movups is the smallest. */
13348 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13349 {
13350 op0 = gen_lowpart (V4SFmode, op0);
13351 op1 = gen_lowpart (V4SFmode, op1);
13352 emit_insn (gen_avx_movups (op0, op1));
13353 return;
13354 }
13355 op0 = gen_lowpart (V16QImode, op0);
13356 op1 = gen_lowpart (V16QImode, op1);
13357 emit_insn (gen_avx_movdqu (op0, op1));
13358 break;
13359 case 32:
13360 op0 = gen_lowpart (V32QImode, op0);
13361 op1 = gen_lowpart (V32QImode, op1);
13362 emit_insn (gen_avx_movdqu256 (op0, op1));
13363 break;
13364 default:
13365 gcc_unreachable ();
13366 }
13367 break;
13368 case MODE_VECTOR_FLOAT:
13369 op0 = gen_lowpart (mode, op0);
13370 op1 = gen_lowpart (mode, op1);
13371
13372 switch (mode)
13373 {
13374 case V4SFmode:
13375 emit_insn (gen_avx_movups (op0, op1));
13376 break;
13377 case V8SFmode:
13378 emit_insn (gen_avx_movups256 (op0, op1));
13379 break;
13380 case V2DFmode:
13381 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13382 {
13383 op0 = gen_lowpart (V4SFmode, op0);
13384 op1 = gen_lowpart (V4SFmode, op1);
13385 emit_insn (gen_avx_movups (op0, op1));
13386 return;
13387 }
13388 emit_insn (gen_avx_movupd (op0, op1));
13389 break;
13390 case V4DFmode:
13391 emit_insn (gen_avx_movupd256 (op0, op1));
13392 break;
13393 default:
13394 gcc_unreachable ();
13395 }
13396 break;
13397
13398 default:
13399 gcc_unreachable ();
13400 }
13401
13402 return;
13403 }
13404
13405 if (MEM_P (op1))
13406 {
13407 /* If we're optimizing for size, movups is the smallest. */
13408 if (optimize_insn_for_size_p ()
13409 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13410 {
13411 op0 = gen_lowpart (V4SFmode, op0);
13412 op1 = gen_lowpart (V4SFmode, op1);
13413 emit_insn (gen_sse_movups (op0, op1));
13414 return;
13415 }
13416
13417 /* ??? If we have typed data, then it would appear that using
13418 movdqu is the only way to get unaligned data loaded with
13419 integer type. */
13420 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13421 {
13422 op0 = gen_lowpart (V16QImode, op0);
13423 op1 = gen_lowpart (V16QImode, op1);
13424 emit_insn (gen_sse2_movdqu (op0, op1));
13425 return;
13426 }
13427
13428 if (TARGET_SSE2 && mode == V2DFmode)
13429 {
13430 rtx zero;
13431
13432 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13433 {
13434 op0 = gen_lowpart (V2DFmode, op0);
13435 op1 = gen_lowpart (V2DFmode, op1);
13436 emit_insn (gen_sse2_movupd (op0, op1));
13437 return;
13438 }
13439
13440 /* When SSE registers are split into halves, we can avoid
13441 writing to the top half twice. */
13442 if (TARGET_SSE_SPLIT_REGS)
13443 {
13444 emit_clobber (op0);
13445 zero = op0;
13446 }
13447 else
13448 {
13449 /* ??? Not sure about the best option for the Intel chips.
13450 The following would seem to satisfy; the register is
13451 entirely cleared, breaking the dependency chain. We
13452 then store to the upper half, with a dependency depth
13453 of one. A rumor has it that Intel recommends two movsd
13454 followed by an unpacklpd, but this is unconfirmed. And
13455 given that the dependency depth of the unpacklpd would
13456 still be one, I'm not sure why this would be better. */
13457 zero = CONST0_RTX (V2DFmode);
13458 }
13459
13460 m = adjust_address (op1, DFmode, 0);
13461 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13462 m = adjust_address (op1, DFmode, 8);
13463 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13464 }
13465 else
13466 {
13467 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13468 {
13469 op0 = gen_lowpart (V4SFmode, op0);
13470 op1 = gen_lowpart (V4SFmode, op1);
13471 emit_insn (gen_sse_movups (op0, op1));
13472 return;
13473 }
13474
13475 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13476 emit_move_insn (op0, CONST0_RTX (mode));
13477 else
13478 emit_clobber (op0);
13479
13480 if (mode != V4SFmode)
13481 op0 = gen_lowpart (V4SFmode, op0);
13482 m = adjust_address (op1, V2SFmode, 0);
13483 emit_insn (gen_sse_loadlps (op0, op0, m));
13484 m = adjust_address (op1, V2SFmode, 8);
13485 emit_insn (gen_sse_loadhps (op0, op0, m));
13486 }
13487 }
13488 else if (MEM_P (op0))
13489 {
13490 /* If we're optimizing for size, movups is the smallest. */
13491 if (optimize_insn_for_size_p ()
13492 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13493 {
13494 op0 = gen_lowpart (V4SFmode, op0);
13495 op1 = gen_lowpart (V4SFmode, op1);
13496 emit_insn (gen_sse_movups (op0, op1));
13497 return;
13498 }
13499
13500 /* ??? Similar to above, only less clear because of quote
13501 typeless stores unquote. */
13502 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13503 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13504 {
13505 op0 = gen_lowpart (V16QImode, op0);
13506 op1 = gen_lowpart (V16QImode, op1);
13507 emit_insn (gen_sse2_movdqu (op0, op1));
13508 return;
13509 }
13510
13511 if (TARGET_SSE2 && mode == V2DFmode)
13512 {
13513 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13514 {
13515 op0 = gen_lowpart (V2DFmode, op0);
13516 op1 = gen_lowpart (V2DFmode, op1);
13517 emit_insn (gen_sse2_movupd (op0, op1));
13518 }
13519 else
13520 {
13521 m = adjust_address (op0, DFmode, 0);
13522 emit_insn (gen_sse2_storelpd (m, op1));
13523 m = adjust_address (op0, DFmode, 8);
13524 emit_insn (gen_sse2_storehpd (m, op1));
13525 }
13526 }
13527 else
13528 {
13529 if (mode != V4SFmode)
13530 op1 = gen_lowpart (V4SFmode, op1);
13531
13532 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13533 {
13534 op0 = gen_lowpart (V4SFmode, op0);
13535 emit_insn (gen_sse_movups (op0, op1));
13536 }
13537 else
13538 {
13539 m = adjust_address (op0, V2SFmode, 0);
13540 emit_insn (gen_sse_storelps (m, op1));
13541 m = adjust_address (op0, V2SFmode, 8);
13542 emit_insn (gen_sse_storehps (m, op1));
13543 }
13544 }
13545 }
13546 else
13547 gcc_unreachable ();
13548 }
13549
13550 /* Expand a push in MODE. This is some mode for which we do not support
13551 proper push instructions, at least from the registers that we expect
13552 the value to live in. */
13553
13554 void
13555 ix86_expand_push (enum machine_mode mode, rtx x)
13556 {
13557 rtx tmp;
13558
13559 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13560 GEN_INT (-GET_MODE_SIZE (mode)),
13561 stack_pointer_rtx, 1, OPTAB_DIRECT);
13562 if (tmp != stack_pointer_rtx)
13563 emit_move_insn (stack_pointer_rtx, tmp);
13564
13565 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13566
13567 /* When we push an operand onto stack, it has to be aligned at least
13568 at the function argument boundary. However since we don't have
13569 the argument type, we can't determine the actual argument
13570 boundary. */
13571 emit_move_insn (tmp, x);
13572 }
13573
13574 /* Helper function of ix86_fixup_binary_operands to canonicalize
13575 operand order. Returns true if the operands should be swapped. */
13576
13577 static bool
13578 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13579 rtx operands[])
13580 {
13581 rtx dst = operands[0];
13582 rtx src1 = operands[1];
13583 rtx src2 = operands[2];
13584
13585 /* If the operation is not commutative, we can't do anything. */
13586 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13587 return false;
13588
13589 /* Highest priority is that src1 should match dst. */
13590 if (rtx_equal_p (dst, src1))
13591 return false;
13592 if (rtx_equal_p (dst, src2))
13593 return true;
13594
13595 /* Next highest priority is that immediate constants come second. */
13596 if (immediate_operand (src2, mode))
13597 return false;
13598 if (immediate_operand (src1, mode))
13599 return true;
13600
13601 /* Lowest priority is that memory references should come second. */
13602 if (MEM_P (src2))
13603 return false;
13604 if (MEM_P (src1))
13605 return true;
13606
13607 return false;
13608 }
13609
13610
13611 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13612 destination to use for the operation. If different from the true
13613 destination in operands[0], a copy operation will be required. */
13614
13615 rtx
13616 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13617 rtx operands[])
13618 {
13619 rtx dst = operands[0];
13620 rtx src1 = operands[1];
13621 rtx src2 = operands[2];
13622
13623 /* Canonicalize operand order. */
13624 if (ix86_swap_binary_operands_p (code, mode, operands))
13625 {
13626 rtx temp;
13627
13628 /* It is invalid to swap operands of different modes. */
13629 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13630
13631 temp = src1;
13632 src1 = src2;
13633 src2 = temp;
13634 }
13635
13636 /* Both source operands cannot be in memory. */
13637 if (MEM_P (src1) && MEM_P (src2))
13638 {
13639 /* Optimization: Only read from memory once. */
13640 if (rtx_equal_p (src1, src2))
13641 {
13642 src2 = force_reg (mode, src2);
13643 src1 = src2;
13644 }
13645 else
13646 src2 = force_reg (mode, src2);
13647 }
13648
13649 /* If the destination is memory, and we do not have matching source
13650 operands, do things in registers. */
13651 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13652 dst = gen_reg_rtx (mode);
13653
13654 /* Source 1 cannot be a constant. */
13655 if (CONSTANT_P (src1))
13656 src1 = force_reg (mode, src1);
13657
13658 /* Source 1 cannot be a non-matching memory. */
13659 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13660 src1 = force_reg (mode, src1);
13661
13662 operands[1] = src1;
13663 operands[2] = src2;
13664 return dst;
13665 }
13666
13667 /* Similarly, but assume that the destination has already been
13668 set up properly. */
13669
13670 void
13671 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13672 enum machine_mode mode, rtx operands[])
13673 {
13674 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13675 gcc_assert (dst == operands[0]);
13676 }
13677
13678 /* Attempt to expand a binary operator. Make the expansion closer to the
13679 actual machine, then just general_operand, which will allow 3 separate
13680 memory references (one output, two input) in a single insn. */
13681
13682 void
13683 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13684 rtx operands[])
13685 {
13686 rtx src1, src2, dst, op, clob;
13687
13688 dst = ix86_fixup_binary_operands (code, mode, operands);
13689 src1 = operands[1];
13690 src2 = operands[2];
13691
13692 /* Emit the instruction. */
13693
13694 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13695 if (reload_in_progress)
13696 {
13697 /* Reload doesn't know about the flags register, and doesn't know that
13698 it doesn't want to clobber it. We can only do this with PLUS. */
13699 gcc_assert (code == PLUS);
13700 emit_insn (op);
13701 }
13702 else
13703 {
13704 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13705 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13706 }
13707
13708 /* Fix up the destination if needed. */
13709 if (dst != operands[0])
13710 emit_move_insn (operands[0], dst);
13711 }
13712
13713 /* Return TRUE or FALSE depending on whether the binary operator meets the
13714 appropriate constraints. */
13715
13716 int
13717 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13718 rtx operands[3])
13719 {
13720 rtx dst = operands[0];
13721 rtx src1 = operands[1];
13722 rtx src2 = operands[2];
13723
13724 /* Both source operands cannot be in memory. */
13725 if (MEM_P (src1) && MEM_P (src2))
13726 return 0;
13727
13728 /* Canonicalize operand order for commutative operators. */
13729 if (ix86_swap_binary_operands_p (code, mode, operands))
13730 {
13731 rtx temp = src1;
13732 src1 = src2;
13733 src2 = temp;
13734 }
13735
13736 /* If the destination is memory, we must have a matching source operand. */
13737 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13738 return 0;
13739
13740 /* Source 1 cannot be a constant. */
13741 if (CONSTANT_P (src1))
13742 return 0;
13743
13744 /* Source 1 cannot be a non-matching memory. */
13745 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13746 return 0;
13747
13748 return 1;
13749 }
13750
13751 /* Attempt to expand a unary operator. Make the expansion closer to the
13752 actual machine, then just general_operand, which will allow 2 separate
13753 memory references (one output, one input) in a single insn. */
13754
13755 void
13756 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13757 rtx operands[])
13758 {
13759 int matching_memory;
13760 rtx src, dst, op, clob;
13761
13762 dst = operands[0];
13763 src = operands[1];
13764
13765 /* If the destination is memory, and we do not have matching source
13766 operands, do things in registers. */
13767 matching_memory = 0;
13768 if (MEM_P (dst))
13769 {
13770 if (rtx_equal_p (dst, src))
13771 matching_memory = 1;
13772 else
13773 dst = gen_reg_rtx (mode);
13774 }
13775
13776 /* When source operand is memory, destination must match. */
13777 if (MEM_P (src) && !matching_memory)
13778 src = force_reg (mode, src);
13779
13780 /* Emit the instruction. */
13781
13782 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13783 if (reload_in_progress || code == NOT)
13784 {
13785 /* Reload doesn't know about the flags register, and doesn't know that
13786 it doesn't want to clobber it. */
13787 gcc_assert (code == NOT);
13788 emit_insn (op);
13789 }
13790 else
13791 {
13792 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13793 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13794 }
13795
13796 /* Fix up the destination if needed. */
13797 if (dst != operands[0])
13798 emit_move_insn (operands[0], dst);
13799 }
13800
13801 #define LEA_SEARCH_THRESHOLD 12
13802
13803 /* Search backward for non-agu definition of register number REGNO1
13804 or register number REGNO2 in INSN's basic block until
13805 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13806 2. Reach BB boundary, or
13807 3. Reach agu definition.
13808 Returns the distance between the non-agu definition point and INSN.
13809 If no definition point, returns -1. */
13810
13811 static int
13812 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13813 rtx insn)
13814 {
13815 basic_block bb = BLOCK_FOR_INSN (insn);
13816 int distance = 0;
13817 df_ref *def_rec;
13818 enum attr_type insn_type;
13819
13820 if (insn != BB_HEAD (bb))
13821 {
13822 rtx prev = PREV_INSN (insn);
13823 while (prev && distance < LEA_SEARCH_THRESHOLD)
13824 {
13825 if (NONDEBUG_INSN_P (prev))
13826 {
13827 distance++;
13828 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13829 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13830 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13831 && (regno1 == DF_REF_REGNO (*def_rec)
13832 || regno2 == DF_REF_REGNO (*def_rec)))
13833 {
13834 insn_type = get_attr_type (prev);
13835 if (insn_type != TYPE_LEA)
13836 goto done;
13837 }
13838 }
13839 if (prev == BB_HEAD (bb))
13840 break;
13841 prev = PREV_INSN (prev);
13842 }
13843 }
13844
13845 if (distance < LEA_SEARCH_THRESHOLD)
13846 {
13847 edge e;
13848 edge_iterator ei;
13849 bool simple_loop = false;
13850
13851 FOR_EACH_EDGE (e, ei, bb->preds)
13852 if (e->src == bb)
13853 {
13854 simple_loop = true;
13855 break;
13856 }
13857
13858 if (simple_loop)
13859 {
13860 rtx prev = BB_END (bb);
13861 while (prev
13862 && prev != insn
13863 && distance < LEA_SEARCH_THRESHOLD)
13864 {
13865 if (NONDEBUG_INSN_P (prev))
13866 {
13867 distance++;
13868 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13869 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13870 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13871 && (regno1 == DF_REF_REGNO (*def_rec)
13872 || regno2 == DF_REF_REGNO (*def_rec)))
13873 {
13874 insn_type = get_attr_type (prev);
13875 if (insn_type != TYPE_LEA)
13876 goto done;
13877 }
13878 }
13879 prev = PREV_INSN (prev);
13880 }
13881 }
13882 }
13883
13884 distance = -1;
13885
13886 done:
13887 /* get_attr_type may modify recog data. We want to make sure
13888 that recog data is valid for instruction INSN, on which
13889 distance_non_agu_define is called. INSN is unchanged here. */
13890 extract_insn_cached (insn);
13891 return distance;
13892 }
13893
13894 /* Return the distance between INSN and the next insn that uses
13895 register number REGNO0 in memory address. Return -1 if no such
13896 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13897
13898 static int
13899 distance_agu_use (unsigned int regno0, rtx insn)
13900 {
13901 basic_block bb = BLOCK_FOR_INSN (insn);
13902 int distance = 0;
13903 df_ref *def_rec;
13904 df_ref *use_rec;
13905
13906 if (insn != BB_END (bb))
13907 {
13908 rtx next = NEXT_INSN (insn);
13909 while (next && distance < LEA_SEARCH_THRESHOLD)
13910 {
13911 if (NONDEBUG_INSN_P (next))
13912 {
13913 distance++;
13914
13915 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13916 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13917 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13918 && regno0 == DF_REF_REGNO (*use_rec))
13919 {
13920 /* Return DISTANCE if OP0 is used in memory
13921 address in NEXT. */
13922 return distance;
13923 }
13924
13925 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13926 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13927 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13928 && regno0 == DF_REF_REGNO (*def_rec))
13929 {
13930 /* Return -1 if OP0 is set in NEXT. */
13931 return -1;
13932 }
13933 }
13934 if (next == BB_END (bb))
13935 break;
13936 next = NEXT_INSN (next);
13937 }
13938 }
13939
13940 if (distance < LEA_SEARCH_THRESHOLD)
13941 {
13942 edge e;
13943 edge_iterator ei;
13944 bool simple_loop = false;
13945
13946 FOR_EACH_EDGE (e, ei, bb->succs)
13947 if (e->dest == bb)
13948 {
13949 simple_loop = true;
13950 break;
13951 }
13952
13953 if (simple_loop)
13954 {
13955 rtx next = BB_HEAD (bb);
13956 while (next
13957 && next != insn
13958 && distance < LEA_SEARCH_THRESHOLD)
13959 {
13960 if (NONDEBUG_INSN_P (next))
13961 {
13962 distance++;
13963
13964 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13965 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13966 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13967 && regno0 == DF_REF_REGNO (*use_rec))
13968 {
13969 /* Return DISTANCE if OP0 is used in memory
13970 address in NEXT. */
13971 return distance;
13972 }
13973
13974 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13975 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13976 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13977 && regno0 == DF_REF_REGNO (*def_rec))
13978 {
13979 /* Return -1 if OP0 is set in NEXT. */
13980 return -1;
13981 }
13982
13983 }
13984 next = NEXT_INSN (next);
13985 }
13986 }
13987 }
13988
13989 return -1;
13990 }
13991
13992 /* Define this macro to tune LEA priority vs ADD, it take effect when
13993 there is a dilemma of choicing LEA or ADD
13994 Negative value: ADD is more preferred than LEA
13995 Zero: Netrual
13996 Positive value: LEA is more preferred than ADD*/
13997 #define IX86_LEA_PRIORITY 2
13998
13999 /* Return true if it is ok to optimize an ADD operation to LEA
14000 operation to avoid flag register consumation. For the processors
14001 like ATOM, if the destination register of LEA holds an actual
14002 address which will be used soon, LEA is better and otherwise ADD
14003 is better. */
14004
14005 bool
14006 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14007 rtx insn, rtx operands[])
14008 {
14009 unsigned int regno0 = true_regnum (operands[0]);
14010 unsigned int regno1 = true_regnum (operands[1]);
14011 unsigned int regno2;
14012
14013 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
14014 return regno0 != regno1;
14015
14016 regno2 = true_regnum (operands[2]);
14017
14018 /* If a = b + c, (a!=b && a!=c), must use lea form. */
14019 if (regno0 != regno1 && regno0 != regno2)
14020 return true;
14021 else
14022 {
14023 int dist_define, dist_use;
14024 dist_define = distance_non_agu_define (regno1, regno2, insn);
14025 if (dist_define <= 0)
14026 return true;
14027
14028 /* If this insn has both backward non-agu dependence and forward
14029 agu dependence, the one with short distance take effect. */
14030 dist_use = distance_agu_use (regno0, insn);
14031 if (dist_use <= 0
14032 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
14033 return false;
14034
14035 return true;
14036 }
14037 }
14038
14039 /* Return true if destination reg of SET_BODY is shift count of
14040 USE_BODY. */
14041
14042 static bool
14043 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
14044 {
14045 rtx set_dest;
14046 rtx shift_rtx;
14047 int i;
14048
14049 /* Retrieve destination of SET_BODY. */
14050 switch (GET_CODE (set_body))
14051 {
14052 case SET:
14053 set_dest = SET_DEST (set_body);
14054 if (!set_dest || !REG_P (set_dest))
14055 return false;
14056 break;
14057 case PARALLEL:
14058 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
14059 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
14060 use_body))
14061 return true;
14062 default:
14063 return false;
14064 break;
14065 }
14066
14067 /* Retrieve shift count of USE_BODY. */
14068 switch (GET_CODE (use_body))
14069 {
14070 case SET:
14071 shift_rtx = XEXP (use_body, 1);
14072 break;
14073 case PARALLEL:
14074 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
14075 if (ix86_dep_by_shift_count_body (set_body,
14076 XVECEXP (use_body, 0, i)))
14077 return true;
14078 default:
14079 return false;
14080 break;
14081 }
14082
14083 if (shift_rtx
14084 && (GET_CODE (shift_rtx) == ASHIFT
14085 || GET_CODE (shift_rtx) == LSHIFTRT
14086 || GET_CODE (shift_rtx) == ASHIFTRT
14087 || GET_CODE (shift_rtx) == ROTATE
14088 || GET_CODE (shift_rtx) == ROTATERT))
14089 {
14090 rtx shift_count = XEXP (shift_rtx, 1);
14091
14092 /* Return true if shift count is dest of SET_BODY. */
14093 if (REG_P (shift_count)
14094 && true_regnum (set_dest) == true_regnum (shift_count))
14095 return true;
14096 }
14097
14098 return false;
14099 }
14100
14101 /* Return true if destination reg of SET_INSN is shift count of
14102 USE_INSN. */
14103
14104 bool
14105 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
14106 {
14107 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
14108 PATTERN (use_insn));
14109 }
14110
14111 /* Return TRUE or FALSE depending on whether the unary operator meets the
14112 appropriate constraints. */
14113
14114 int
14115 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14116 enum machine_mode mode ATTRIBUTE_UNUSED,
14117 rtx operands[2] ATTRIBUTE_UNUSED)
14118 {
14119 /* If one of operands is memory, source and destination must match. */
14120 if ((MEM_P (operands[0])
14121 || MEM_P (operands[1]))
14122 && ! rtx_equal_p (operands[0], operands[1]))
14123 return FALSE;
14124 return TRUE;
14125 }
14126
14127 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
14128 are ok, keeping in mind the possible movddup alternative. */
14129
14130 bool
14131 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
14132 {
14133 if (MEM_P (operands[0]))
14134 return rtx_equal_p (operands[0], operands[1 + high]);
14135 if (MEM_P (operands[1]) && MEM_P (operands[2]))
14136 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
14137 return true;
14138 }
14139
14140 /* Post-reload splitter for converting an SF or DFmode value in an
14141 SSE register into an unsigned SImode. */
14142
14143 void
14144 ix86_split_convert_uns_si_sse (rtx operands[])
14145 {
14146 enum machine_mode vecmode;
14147 rtx value, large, zero_or_two31, input, two31, x;
14148
14149 large = operands[1];
14150 zero_or_two31 = operands[2];
14151 input = operands[3];
14152 two31 = operands[4];
14153 vecmode = GET_MODE (large);
14154 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14155
14156 /* Load up the value into the low element. We must ensure that the other
14157 elements are valid floats -- zero is the easiest such value. */
14158 if (MEM_P (input))
14159 {
14160 if (vecmode == V4SFmode)
14161 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14162 else
14163 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14164 }
14165 else
14166 {
14167 input = gen_rtx_REG (vecmode, REGNO (input));
14168 emit_move_insn (value, CONST0_RTX (vecmode));
14169 if (vecmode == V4SFmode)
14170 emit_insn (gen_sse_movss (value, value, input));
14171 else
14172 emit_insn (gen_sse2_movsd (value, value, input));
14173 }
14174
14175 emit_move_insn (large, two31);
14176 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14177
14178 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14179 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14180
14181 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14182 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14183
14184 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14185 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14186
14187 large = gen_rtx_REG (V4SImode, REGNO (large));
14188 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14189
14190 x = gen_rtx_REG (V4SImode, REGNO (value));
14191 if (vecmode == V4SFmode)
14192 emit_insn (gen_sse2_cvttps2dq (x, value));
14193 else
14194 emit_insn (gen_sse2_cvttpd2dq (x, value));
14195 value = x;
14196
14197 emit_insn (gen_xorv4si3 (value, value, large));
14198 }
14199
14200 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14201 Expects the 64-bit DImode to be supplied in a pair of integral
14202 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14203 -mfpmath=sse, !optimize_size only. */
14204
14205 void
14206 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14207 {
14208 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14209 rtx int_xmm, fp_xmm;
14210 rtx biases, exponents;
14211 rtx x;
14212
14213 int_xmm = gen_reg_rtx (V4SImode);
14214 if (TARGET_INTER_UNIT_MOVES)
14215 emit_insn (gen_movdi_to_sse (int_xmm, input));
14216 else if (TARGET_SSE_SPLIT_REGS)
14217 {
14218 emit_clobber (int_xmm);
14219 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14220 }
14221 else
14222 {
14223 x = gen_reg_rtx (V2DImode);
14224 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14225 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14226 }
14227
14228 x = gen_rtx_CONST_VECTOR (V4SImode,
14229 gen_rtvec (4, GEN_INT (0x43300000UL),
14230 GEN_INT (0x45300000UL),
14231 const0_rtx, const0_rtx));
14232 exponents = validize_mem (force_const_mem (V4SImode, x));
14233
14234 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14235 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14236
14237 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14238 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14239 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14240 (0x1.0p84 + double(fp_value_hi_xmm)).
14241 Note these exponents differ by 32. */
14242
14243 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14244
14245 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14246 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14247 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14248 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14249 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14250 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14251 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14252 biases = validize_mem (force_const_mem (V2DFmode, biases));
14253 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14254
14255 /* Add the upper and lower DFmode values together. */
14256 if (TARGET_SSE3)
14257 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14258 else
14259 {
14260 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14261 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14262 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14263 }
14264
14265 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14266 }
14267
14268 /* Not used, but eases macroization of patterns. */
14269 void
14270 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14271 rtx input ATTRIBUTE_UNUSED)
14272 {
14273 gcc_unreachable ();
14274 }
14275
14276 /* Convert an unsigned SImode value into a DFmode. Only currently used
14277 for SSE, but applicable anywhere. */
14278
14279 void
14280 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14281 {
14282 REAL_VALUE_TYPE TWO31r;
14283 rtx x, fp;
14284
14285 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14286 NULL, 1, OPTAB_DIRECT);
14287
14288 fp = gen_reg_rtx (DFmode);
14289 emit_insn (gen_floatsidf2 (fp, x));
14290
14291 real_ldexp (&TWO31r, &dconst1, 31);
14292 x = const_double_from_real_value (TWO31r, DFmode);
14293
14294 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14295 if (x != target)
14296 emit_move_insn (target, x);
14297 }
14298
14299 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14300 32-bit mode; otherwise we have a direct convert instruction. */
14301
14302 void
14303 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14304 {
14305 REAL_VALUE_TYPE TWO32r;
14306 rtx fp_lo, fp_hi, x;
14307
14308 fp_lo = gen_reg_rtx (DFmode);
14309 fp_hi = gen_reg_rtx (DFmode);
14310
14311 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14312
14313 real_ldexp (&TWO32r, &dconst1, 32);
14314 x = const_double_from_real_value (TWO32r, DFmode);
14315 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14316
14317 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14318
14319 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14320 0, OPTAB_DIRECT);
14321 if (x != target)
14322 emit_move_insn (target, x);
14323 }
14324
14325 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14326 For x86_32, -mfpmath=sse, !optimize_size only. */
14327 void
14328 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14329 {
14330 REAL_VALUE_TYPE ONE16r;
14331 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14332
14333 real_ldexp (&ONE16r, &dconst1, 16);
14334 x = const_double_from_real_value (ONE16r, SFmode);
14335 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14336 NULL, 0, OPTAB_DIRECT);
14337 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14338 NULL, 0, OPTAB_DIRECT);
14339 fp_hi = gen_reg_rtx (SFmode);
14340 fp_lo = gen_reg_rtx (SFmode);
14341 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14342 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14343 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14344 0, OPTAB_DIRECT);
14345 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14346 0, OPTAB_DIRECT);
14347 if (!rtx_equal_p (target, fp_hi))
14348 emit_move_insn (target, fp_hi);
14349 }
14350
14351 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14352 then replicate the value for all elements of the vector
14353 register. */
14354
14355 rtx
14356 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14357 {
14358 rtvec v;
14359 switch (mode)
14360 {
14361 case SImode:
14362 gcc_assert (vect);
14363 v = gen_rtvec (4, value, value, value, value);
14364 return gen_rtx_CONST_VECTOR (V4SImode, v);
14365
14366 case DImode:
14367 gcc_assert (vect);
14368 v = gen_rtvec (2, value, value);
14369 return gen_rtx_CONST_VECTOR (V2DImode, v);
14370
14371 case SFmode:
14372 if (vect)
14373 v = gen_rtvec (4, value, value, value, value);
14374 else
14375 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14376 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14377 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14378
14379 case DFmode:
14380 if (vect)
14381 v = gen_rtvec (2, value, value);
14382 else
14383 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14384 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14385
14386 default:
14387 gcc_unreachable ();
14388 }
14389 }
14390
14391 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14392 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14393 for an SSE register. If VECT is true, then replicate the mask for
14394 all elements of the vector register. If INVERT is true, then create
14395 a mask excluding the sign bit. */
14396
14397 rtx
14398 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14399 {
14400 enum machine_mode vec_mode, imode;
14401 HOST_WIDE_INT hi, lo;
14402 int shift = 63;
14403 rtx v;
14404 rtx mask;
14405
14406 /* Find the sign bit, sign extended to 2*HWI. */
14407 switch (mode)
14408 {
14409 case SImode:
14410 case SFmode:
14411 imode = SImode;
14412 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14413 lo = 0x80000000, hi = lo < 0;
14414 break;
14415
14416 case DImode:
14417 case DFmode:
14418 imode = DImode;
14419 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14420 if (HOST_BITS_PER_WIDE_INT >= 64)
14421 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14422 else
14423 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14424 break;
14425
14426 case TImode:
14427 case TFmode:
14428 vec_mode = VOIDmode;
14429 if (HOST_BITS_PER_WIDE_INT >= 64)
14430 {
14431 imode = TImode;
14432 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14433 }
14434 else
14435 {
14436 rtvec vec;
14437
14438 imode = DImode;
14439 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14440
14441 if (invert)
14442 {
14443 lo = ~lo, hi = ~hi;
14444 v = constm1_rtx;
14445 }
14446 else
14447 v = const0_rtx;
14448
14449 mask = immed_double_const (lo, hi, imode);
14450
14451 vec = gen_rtvec (2, v, mask);
14452 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14453 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14454
14455 return v;
14456 }
14457 break;
14458
14459 default:
14460 gcc_unreachable ();
14461 }
14462
14463 if (invert)
14464 lo = ~lo, hi = ~hi;
14465
14466 /* Force this value into the low part of a fp vector constant. */
14467 mask = immed_double_const (lo, hi, imode);
14468 mask = gen_lowpart (mode, mask);
14469
14470 if (vec_mode == VOIDmode)
14471 return force_reg (mode, mask);
14472
14473 v = ix86_build_const_vector (mode, vect, mask);
14474 return force_reg (vec_mode, v);
14475 }
14476
14477 /* Generate code for floating point ABS or NEG. */
14478
14479 void
14480 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14481 rtx operands[])
14482 {
14483 rtx mask, set, use, clob, dst, src;
14484 bool use_sse = false;
14485 bool vector_mode = VECTOR_MODE_P (mode);
14486 enum machine_mode elt_mode = mode;
14487
14488 if (vector_mode)
14489 {
14490 elt_mode = GET_MODE_INNER (mode);
14491 use_sse = true;
14492 }
14493 else if (mode == TFmode)
14494 use_sse = true;
14495 else if (TARGET_SSE_MATH)
14496 use_sse = SSE_FLOAT_MODE_P (mode);
14497
14498 /* NEG and ABS performed with SSE use bitwise mask operations.
14499 Create the appropriate mask now. */
14500 if (use_sse)
14501 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14502 else
14503 mask = NULL_RTX;
14504
14505 dst = operands[0];
14506 src = operands[1];
14507
14508 if (vector_mode)
14509 {
14510 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14511 set = gen_rtx_SET (VOIDmode, dst, set);
14512 emit_insn (set);
14513 }
14514 else
14515 {
14516 set = gen_rtx_fmt_e (code, mode, src);
14517 set = gen_rtx_SET (VOIDmode, dst, set);
14518 if (mask)
14519 {
14520 use = gen_rtx_USE (VOIDmode, mask);
14521 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14522 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14523 gen_rtvec (3, set, use, clob)));
14524 }
14525 else
14526 emit_insn (set);
14527 }
14528 }
14529
14530 /* Expand a copysign operation. Special case operand 0 being a constant. */
14531
14532 void
14533 ix86_expand_copysign (rtx operands[])
14534 {
14535 enum machine_mode mode;
14536 rtx dest, op0, op1, mask, nmask;
14537
14538 dest = operands[0];
14539 op0 = operands[1];
14540 op1 = operands[2];
14541
14542 mode = GET_MODE (dest);
14543
14544 if (GET_CODE (op0) == CONST_DOUBLE)
14545 {
14546 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14547
14548 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14549 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14550
14551 if (mode == SFmode || mode == DFmode)
14552 {
14553 enum machine_mode vmode;
14554
14555 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14556
14557 if (op0 == CONST0_RTX (mode))
14558 op0 = CONST0_RTX (vmode);
14559 else
14560 {
14561 rtx v = ix86_build_const_vector (mode, false, op0);
14562
14563 op0 = force_reg (vmode, v);
14564 }
14565 }
14566 else if (op0 != CONST0_RTX (mode))
14567 op0 = force_reg (mode, op0);
14568
14569 mask = ix86_build_signbit_mask (mode, 0, 0);
14570
14571 if (mode == SFmode)
14572 copysign_insn = gen_copysignsf3_const;
14573 else if (mode == DFmode)
14574 copysign_insn = gen_copysigndf3_const;
14575 else
14576 copysign_insn = gen_copysigntf3_const;
14577
14578 emit_insn (copysign_insn (dest, op0, op1, mask));
14579 }
14580 else
14581 {
14582 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14583
14584 nmask = ix86_build_signbit_mask (mode, 0, 1);
14585 mask = ix86_build_signbit_mask (mode, 0, 0);
14586
14587 if (mode == SFmode)
14588 copysign_insn = gen_copysignsf3_var;
14589 else if (mode == DFmode)
14590 copysign_insn = gen_copysigndf3_var;
14591 else
14592 copysign_insn = gen_copysigntf3_var;
14593
14594 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14595 }
14596 }
14597
14598 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14599 be a constant, and so has already been expanded into a vector constant. */
14600
14601 void
14602 ix86_split_copysign_const (rtx operands[])
14603 {
14604 enum machine_mode mode, vmode;
14605 rtx dest, op0, mask, x;
14606
14607 dest = operands[0];
14608 op0 = operands[1];
14609 mask = operands[3];
14610
14611 mode = GET_MODE (dest);
14612 vmode = GET_MODE (mask);
14613
14614 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14615 x = gen_rtx_AND (vmode, dest, mask);
14616 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14617
14618 if (op0 != CONST0_RTX (vmode))
14619 {
14620 x = gen_rtx_IOR (vmode, dest, op0);
14621 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14622 }
14623 }
14624
14625 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14626 so we have to do two masks. */
14627
14628 void
14629 ix86_split_copysign_var (rtx operands[])
14630 {
14631 enum machine_mode mode, vmode;
14632 rtx dest, scratch, op0, op1, mask, nmask, x;
14633
14634 dest = operands[0];
14635 scratch = operands[1];
14636 op0 = operands[2];
14637 op1 = operands[3];
14638 nmask = operands[4];
14639 mask = operands[5];
14640
14641 mode = GET_MODE (dest);
14642 vmode = GET_MODE (mask);
14643
14644 if (rtx_equal_p (op0, op1))
14645 {
14646 /* Shouldn't happen often (it's useless, obviously), but when it does
14647 we'd generate incorrect code if we continue below. */
14648 emit_move_insn (dest, op0);
14649 return;
14650 }
14651
14652 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14653 {
14654 gcc_assert (REGNO (op1) == REGNO (scratch));
14655
14656 x = gen_rtx_AND (vmode, scratch, mask);
14657 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14658
14659 dest = mask;
14660 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14661 x = gen_rtx_NOT (vmode, dest);
14662 x = gen_rtx_AND (vmode, x, op0);
14663 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14664 }
14665 else
14666 {
14667 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14668 {
14669 x = gen_rtx_AND (vmode, scratch, mask);
14670 }
14671 else /* alternative 2,4 */
14672 {
14673 gcc_assert (REGNO (mask) == REGNO (scratch));
14674 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14675 x = gen_rtx_AND (vmode, scratch, op1);
14676 }
14677 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14678
14679 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14680 {
14681 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14682 x = gen_rtx_AND (vmode, dest, nmask);
14683 }
14684 else /* alternative 3,4 */
14685 {
14686 gcc_assert (REGNO (nmask) == REGNO (dest));
14687 dest = nmask;
14688 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14689 x = gen_rtx_AND (vmode, dest, op0);
14690 }
14691 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14692 }
14693
14694 x = gen_rtx_IOR (vmode, dest, scratch);
14695 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14696 }
14697
14698 /* Return TRUE or FALSE depending on whether the first SET in INSN
14699 has source and destination with matching CC modes, and that the
14700 CC mode is at least as constrained as REQ_MODE. */
14701
14702 int
14703 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14704 {
14705 rtx set;
14706 enum machine_mode set_mode;
14707
14708 set = PATTERN (insn);
14709 if (GET_CODE (set) == PARALLEL)
14710 set = XVECEXP (set, 0, 0);
14711 gcc_assert (GET_CODE (set) == SET);
14712 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14713
14714 set_mode = GET_MODE (SET_DEST (set));
14715 switch (set_mode)
14716 {
14717 case CCNOmode:
14718 if (req_mode != CCNOmode
14719 && (req_mode != CCmode
14720 || XEXP (SET_SRC (set), 1) != const0_rtx))
14721 return 0;
14722 break;
14723 case CCmode:
14724 if (req_mode == CCGCmode)
14725 return 0;
14726 /* FALLTHRU */
14727 case CCGCmode:
14728 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14729 return 0;
14730 /* FALLTHRU */
14731 case CCGOCmode:
14732 if (req_mode == CCZmode)
14733 return 0;
14734 /* FALLTHRU */
14735 case CCAmode:
14736 case CCCmode:
14737 case CCOmode:
14738 case CCSmode:
14739 case CCZmode:
14740 break;
14741
14742 default:
14743 gcc_unreachable ();
14744 }
14745
14746 return (GET_MODE (SET_SRC (set)) == set_mode);
14747 }
14748
14749 /* Generate insn patterns to do an integer compare of OPERANDS. */
14750
14751 static rtx
14752 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14753 {
14754 enum machine_mode cmpmode;
14755 rtx tmp, flags;
14756
14757 cmpmode = SELECT_CC_MODE (code, op0, op1);
14758 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14759
14760 /* This is very simple, but making the interface the same as in the
14761 FP case makes the rest of the code easier. */
14762 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14763 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14764
14765 /* Return the test that should be put into the flags user, i.e.
14766 the bcc, scc, or cmov instruction. */
14767 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14768 }
14769
14770 /* Figure out whether to use ordered or unordered fp comparisons.
14771 Return the appropriate mode to use. */
14772
14773 enum machine_mode
14774 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14775 {
14776 /* ??? In order to make all comparisons reversible, we do all comparisons
14777 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14778 all forms trapping and nontrapping comparisons, we can make inequality
14779 comparisons trapping again, since it results in better code when using
14780 FCOM based compares. */
14781 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14782 }
14783
14784 enum machine_mode
14785 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14786 {
14787 enum machine_mode mode = GET_MODE (op0);
14788
14789 if (SCALAR_FLOAT_MODE_P (mode))
14790 {
14791 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14792 return ix86_fp_compare_mode (code);
14793 }
14794
14795 switch (code)
14796 {
14797 /* Only zero flag is needed. */
14798 case EQ: /* ZF=0 */
14799 case NE: /* ZF!=0 */
14800 return CCZmode;
14801 /* Codes needing carry flag. */
14802 case GEU: /* CF=0 */
14803 case LTU: /* CF=1 */
14804 /* Detect overflow checks. They need just the carry flag. */
14805 if (GET_CODE (op0) == PLUS
14806 && rtx_equal_p (op1, XEXP (op0, 0)))
14807 return CCCmode;
14808 else
14809 return CCmode;
14810 case GTU: /* CF=0 & ZF=0 */
14811 case LEU: /* CF=1 | ZF=1 */
14812 /* Detect overflow checks. They need just the carry flag. */
14813 if (GET_CODE (op0) == MINUS
14814 && rtx_equal_p (op1, XEXP (op0, 0)))
14815 return CCCmode;
14816 else
14817 return CCmode;
14818 /* Codes possibly doable only with sign flag when
14819 comparing against zero. */
14820 case GE: /* SF=OF or SF=0 */
14821 case LT: /* SF<>OF or SF=1 */
14822 if (op1 == const0_rtx)
14823 return CCGOCmode;
14824 else
14825 /* For other cases Carry flag is not required. */
14826 return CCGCmode;
14827 /* Codes doable only with sign flag when comparing
14828 against zero, but we miss jump instruction for it
14829 so we need to use relational tests against overflow
14830 that thus needs to be zero. */
14831 case GT: /* ZF=0 & SF=OF */
14832 case LE: /* ZF=1 | SF<>OF */
14833 if (op1 == const0_rtx)
14834 return CCNOmode;
14835 else
14836 return CCGCmode;
14837 /* strcmp pattern do (use flags) and combine may ask us for proper
14838 mode. */
14839 case USE:
14840 return CCmode;
14841 default:
14842 gcc_unreachable ();
14843 }
14844 }
14845
14846 /* Return the fixed registers used for condition codes. */
14847
14848 static bool
14849 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14850 {
14851 *p1 = FLAGS_REG;
14852 *p2 = FPSR_REG;
14853 return true;
14854 }
14855
14856 /* If two condition code modes are compatible, return a condition code
14857 mode which is compatible with both. Otherwise, return
14858 VOIDmode. */
14859
14860 static enum machine_mode
14861 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14862 {
14863 if (m1 == m2)
14864 return m1;
14865
14866 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14867 return VOIDmode;
14868
14869 if ((m1 == CCGCmode && m2 == CCGOCmode)
14870 || (m1 == CCGOCmode && m2 == CCGCmode))
14871 return CCGCmode;
14872
14873 switch (m1)
14874 {
14875 default:
14876 gcc_unreachable ();
14877
14878 case CCmode:
14879 case CCGCmode:
14880 case CCGOCmode:
14881 case CCNOmode:
14882 case CCAmode:
14883 case CCCmode:
14884 case CCOmode:
14885 case CCSmode:
14886 case CCZmode:
14887 switch (m2)
14888 {
14889 default:
14890 return VOIDmode;
14891
14892 case CCmode:
14893 case CCGCmode:
14894 case CCGOCmode:
14895 case CCNOmode:
14896 case CCAmode:
14897 case CCCmode:
14898 case CCOmode:
14899 case CCSmode:
14900 case CCZmode:
14901 return CCmode;
14902 }
14903
14904 case CCFPmode:
14905 case CCFPUmode:
14906 /* These are only compatible with themselves, which we already
14907 checked above. */
14908 return VOIDmode;
14909 }
14910 }
14911
14912
14913 /* Return a comparison we can do and that it is equivalent to
14914 swap_condition (code) apart possibly from orderedness.
14915 But, never change orderedness if TARGET_IEEE_FP, returning
14916 UNKNOWN in that case if necessary. */
14917
14918 static enum rtx_code
14919 ix86_fp_swap_condition (enum rtx_code code)
14920 {
14921 switch (code)
14922 {
14923 case GT: /* GTU - CF=0 & ZF=0 */
14924 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14925 case GE: /* GEU - CF=0 */
14926 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14927 case UNLT: /* LTU - CF=1 */
14928 return TARGET_IEEE_FP ? UNKNOWN : GT;
14929 case UNLE: /* LEU - CF=1 | ZF=1 */
14930 return TARGET_IEEE_FP ? UNKNOWN : GE;
14931 default:
14932 return swap_condition (code);
14933 }
14934 }
14935
14936 /* Return cost of comparison CODE using the best strategy for performance.
14937 All following functions do use number of instructions as a cost metrics.
14938 In future this should be tweaked to compute bytes for optimize_size and
14939 take into account performance of various instructions on various CPUs. */
14940
14941 static int
14942 ix86_fp_comparison_cost (enum rtx_code code)
14943 {
14944 int arith_cost;
14945
14946 /* The cost of code using bit-twiddling on %ah. */
14947 switch (code)
14948 {
14949 case UNLE:
14950 case UNLT:
14951 case LTGT:
14952 case GT:
14953 case GE:
14954 case UNORDERED:
14955 case ORDERED:
14956 case UNEQ:
14957 arith_cost = 4;
14958 break;
14959 case LT:
14960 case NE:
14961 case EQ:
14962 case UNGE:
14963 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14964 break;
14965 case LE:
14966 case UNGT:
14967 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14968 break;
14969 default:
14970 gcc_unreachable ();
14971 }
14972
14973 switch (ix86_fp_comparison_strategy (code))
14974 {
14975 case IX86_FPCMP_COMI:
14976 return arith_cost > 4 ? 3 : 2;
14977 case IX86_FPCMP_SAHF:
14978 return arith_cost > 4 ? 4 : 3;
14979 default:
14980 return arith_cost;
14981 }
14982 }
14983
14984 /* Return strategy to use for floating-point. We assume that fcomi is always
14985 preferrable where available, since that is also true when looking at size
14986 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14987
14988 enum ix86_fpcmp_strategy
14989 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14990 {
14991 /* Do fcomi/sahf based test when profitable. */
14992
14993 if (TARGET_CMOVE)
14994 return IX86_FPCMP_COMI;
14995
14996 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14997 return IX86_FPCMP_SAHF;
14998
14999 return IX86_FPCMP_ARITH;
15000 }
15001
15002 /* Swap, force into registers, or otherwise massage the two operands
15003 to a fp comparison. The operands are updated in place; the new
15004 comparison code is returned. */
15005
15006 static enum rtx_code
15007 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
15008 {
15009 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
15010 rtx op0 = *pop0, op1 = *pop1;
15011 enum machine_mode op_mode = GET_MODE (op0);
15012 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
15013
15014 /* All of the unordered compare instructions only work on registers.
15015 The same is true of the fcomi compare instructions. The XFmode
15016 compare instructions require registers except when comparing
15017 against zero or when converting operand 1 from fixed point to
15018 floating point. */
15019
15020 if (!is_sse
15021 && (fpcmp_mode == CCFPUmode
15022 || (op_mode == XFmode
15023 && ! (standard_80387_constant_p (op0) == 1
15024 || standard_80387_constant_p (op1) == 1)
15025 && GET_CODE (op1) != FLOAT)
15026 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
15027 {
15028 op0 = force_reg (op_mode, op0);
15029 op1 = force_reg (op_mode, op1);
15030 }
15031 else
15032 {
15033 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
15034 things around if they appear profitable, otherwise force op0
15035 into a register. */
15036
15037 if (standard_80387_constant_p (op0) == 0
15038 || (MEM_P (op0)
15039 && ! (standard_80387_constant_p (op1) == 0
15040 || MEM_P (op1))))
15041 {
15042 enum rtx_code new_code = ix86_fp_swap_condition (code);
15043 if (new_code != UNKNOWN)
15044 {
15045 rtx tmp;
15046 tmp = op0, op0 = op1, op1 = tmp;
15047 code = new_code;
15048 }
15049 }
15050
15051 if (!REG_P (op0))
15052 op0 = force_reg (op_mode, op0);
15053
15054 if (CONSTANT_P (op1))
15055 {
15056 int tmp = standard_80387_constant_p (op1);
15057 if (tmp == 0)
15058 op1 = validize_mem (force_const_mem (op_mode, op1));
15059 else if (tmp == 1)
15060 {
15061 if (TARGET_CMOVE)
15062 op1 = force_reg (op_mode, op1);
15063 }
15064 else
15065 op1 = force_reg (op_mode, op1);
15066 }
15067 }
15068
15069 /* Try to rearrange the comparison to make it cheaper. */
15070 if (ix86_fp_comparison_cost (code)
15071 > ix86_fp_comparison_cost (swap_condition (code))
15072 && (REG_P (op1) || can_create_pseudo_p ()))
15073 {
15074 rtx tmp;
15075 tmp = op0, op0 = op1, op1 = tmp;
15076 code = swap_condition (code);
15077 if (!REG_P (op0))
15078 op0 = force_reg (op_mode, op0);
15079 }
15080
15081 *pop0 = op0;
15082 *pop1 = op1;
15083 return code;
15084 }
15085
15086 /* Convert comparison codes we use to represent FP comparison to integer
15087 code that will result in proper branch. Return UNKNOWN if no such code
15088 is available. */
15089
15090 enum rtx_code
15091 ix86_fp_compare_code_to_integer (enum rtx_code code)
15092 {
15093 switch (code)
15094 {
15095 case GT:
15096 return GTU;
15097 case GE:
15098 return GEU;
15099 case ORDERED:
15100 case UNORDERED:
15101 return code;
15102 break;
15103 case UNEQ:
15104 return EQ;
15105 break;
15106 case UNLT:
15107 return LTU;
15108 break;
15109 case UNLE:
15110 return LEU;
15111 break;
15112 case LTGT:
15113 return NE;
15114 break;
15115 default:
15116 return UNKNOWN;
15117 }
15118 }
15119
15120 /* Generate insn patterns to do a floating point compare of OPERANDS. */
15121
15122 static rtx
15123 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
15124 {
15125 enum machine_mode fpcmp_mode, intcmp_mode;
15126 rtx tmp, tmp2;
15127
15128 fpcmp_mode = ix86_fp_compare_mode (code);
15129 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
15130
15131 /* Do fcomi/sahf based test when profitable. */
15132 switch (ix86_fp_comparison_strategy (code))
15133 {
15134 case IX86_FPCMP_COMI:
15135 intcmp_mode = fpcmp_mode;
15136 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15137 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15138 tmp);
15139 emit_insn (tmp);
15140 break;
15141
15142 case IX86_FPCMP_SAHF:
15143 intcmp_mode = fpcmp_mode;
15144 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15145 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15146 tmp);
15147
15148 if (!scratch)
15149 scratch = gen_reg_rtx (HImode);
15150 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
15151 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
15152 break;
15153
15154 case IX86_FPCMP_ARITH:
15155 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15156 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15157 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15158 if (!scratch)
15159 scratch = gen_reg_rtx (HImode);
15160 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15161
15162 /* In the unordered case, we have to check C2 for NaN's, which
15163 doesn't happen to work out to anything nice combination-wise.
15164 So do some bit twiddling on the value we've got in AH to come
15165 up with an appropriate set of condition codes. */
15166
15167 intcmp_mode = CCNOmode;
15168 switch (code)
15169 {
15170 case GT:
15171 case UNGT:
15172 if (code == GT || !TARGET_IEEE_FP)
15173 {
15174 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15175 code = EQ;
15176 }
15177 else
15178 {
15179 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15180 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15181 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15182 intcmp_mode = CCmode;
15183 code = GEU;
15184 }
15185 break;
15186 case LT:
15187 case UNLT:
15188 if (code == LT && TARGET_IEEE_FP)
15189 {
15190 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15191 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15192 intcmp_mode = CCmode;
15193 code = EQ;
15194 }
15195 else
15196 {
15197 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15198 code = NE;
15199 }
15200 break;
15201 case GE:
15202 case UNGE:
15203 if (code == GE || !TARGET_IEEE_FP)
15204 {
15205 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15206 code = EQ;
15207 }
15208 else
15209 {
15210 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15211 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15212 code = NE;
15213 }
15214 break;
15215 case LE:
15216 case UNLE:
15217 if (code == LE && TARGET_IEEE_FP)
15218 {
15219 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15220 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15221 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15222 intcmp_mode = CCmode;
15223 code = LTU;
15224 }
15225 else
15226 {
15227 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15228 code = NE;
15229 }
15230 break;
15231 case EQ:
15232 case UNEQ:
15233 if (code == EQ && TARGET_IEEE_FP)
15234 {
15235 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15236 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15237 intcmp_mode = CCmode;
15238 code = EQ;
15239 }
15240 else
15241 {
15242 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15243 code = NE;
15244 }
15245 break;
15246 case NE:
15247 case LTGT:
15248 if (code == NE && TARGET_IEEE_FP)
15249 {
15250 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15251 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15252 GEN_INT (0x40)));
15253 code = NE;
15254 }
15255 else
15256 {
15257 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15258 code = EQ;
15259 }
15260 break;
15261
15262 case UNORDERED:
15263 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15264 code = NE;
15265 break;
15266 case ORDERED:
15267 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15268 code = EQ;
15269 break;
15270
15271 default:
15272 gcc_unreachable ();
15273 }
15274 break;
15275
15276 default:
15277 gcc_unreachable();
15278 }
15279
15280 /* Return the test that should be put into the flags user, i.e.
15281 the bcc, scc, or cmov instruction. */
15282 return gen_rtx_fmt_ee (code, VOIDmode,
15283 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15284 const0_rtx);
15285 }
15286
15287 rtx
15288 ix86_expand_compare (enum rtx_code code)
15289 {
15290 rtx op0, op1, ret;
15291 op0 = ix86_compare_op0;
15292 op1 = ix86_compare_op1;
15293
15294 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15295 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15296
15297 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15298 {
15299 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15300 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15301 }
15302 else
15303 ret = ix86_expand_int_compare (code, op0, op1);
15304
15305 return ret;
15306 }
15307
15308 void
15309 ix86_expand_branch (enum rtx_code code, rtx label)
15310 {
15311 rtx tmp;
15312
15313 switch (GET_MODE (ix86_compare_op0))
15314 {
15315 case SFmode:
15316 case DFmode:
15317 case XFmode:
15318 case QImode:
15319 case HImode:
15320 case SImode:
15321 simple:
15322 tmp = ix86_expand_compare (code);
15323 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15324 gen_rtx_LABEL_REF (VOIDmode, label),
15325 pc_rtx);
15326 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15327 return;
15328
15329 case DImode:
15330 if (TARGET_64BIT)
15331 goto simple;
15332 case TImode:
15333 /* Expand DImode branch into multiple compare+branch. */
15334 {
15335 rtx lo[2], hi[2], label2;
15336 enum rtx_code code1, code2, code3;
15337 enum machine_mode submode;
15338
15339 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15340 {
15341 tmp = ix86_compare_op0;
15342 ix86_compare_op0 = ix86_compare_op1;
15343 ix86_compare_op1 = tmp;
15344 code = swap_condition (code);
15345 }
15346 if (GET_MODE (ix86_compare_op0) == DImode)
15347 {
15348 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15349 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15350 submode = SImode;
15351 }
15352 else
15353 {
15354 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15355 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15356 submode = DImode;
15357 }
15358
15359 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15360 avoid two branches. This costs one extra insn, so disable when
15361 optimizing for size. */
15362
15363 if ((code == EQ || code == NE)
15364 && (!optimize_insn_for_size_p ()
15365 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15366 {
15367 rtx xor0, xor1;
15368
15369 xor1 = hi[0];
15370 if (hi[1] != const0_rtx)
15371 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15372 NULL_RTX, 0, OPTAB_WIDEN);
15373
15374 xor0 = lo[0];
15375 if (lo[1] != const0_rtx)
15376 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15377 NULL_RTX, 0, OPTAB_WIDEN);
15378
15379 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15380 NULL_RTX, 0, OPTAB_WIDEN);
15381
15382 ix86_compare_op0 = tmp;
15383 ix86_compare_op1 = const0_rtx;
15384 ix86_expand_branch (code, label);
15385 return;
15386 }
15387
15388 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15389 op1 is a constant and the low word is zero, then we can just
15390 examine the high word. Similarly for low word -1 and
15391 less-or-equal-than or greater-than. */
15392
15393 if (CONST_INT_P (hi[1]))
15394 switch (code)
15395 {
15396 case LT: case LTU: case GE: case GEU:
15397 if (lo[1] == const0_rtx)
15398 {
15399 ix86_compare_op0 = hi[0];
15400 ix86_compare_op1 = hi[1];
15401 ix86_expand_branch (code, label);
15402 return;
15403 }
15404 break;
15405 case LE: case LEU: case GT: case GTU:
15406 if (lo[1] == constm1_rtx)
15407 {
15408 ix86_compare_op0 = hi[0];
15409 ix86_compare_op1 = hi[1];
15410 ix86_expand_branch (code, label);
15411 return;
15412 }
15413 break;
15414 default:
15415 break;
15416 }
15417
15418 /* Otherwise, we need two or three jumps. */
15419
15420 label2 = gen_label_rtx ();
15421
15422 code1 = code;
15423 code2 = swap_condition (code);
15424 code3 = unsigned_condition (code);
15425
15426 switch (code)
15427 {
15428 case LT: case GT: case LTU: case GTU:
15429 break;
15430
15431 case LE: code1 = LT; code2 = GT; break;
15432 case GE: code1 = GT; code2 = LT; break;
15433 case LEU: code1 = LTU; code2 = GTU; break;
15434 case GEU: code1 = GTU; code2 = LTU; break;
15435
15436 case EQ: code1 = UNKNOWN; code2 = NE; break;
15437 case NE: code2 = UNKNOWN; break;
15438
15439 default:
15440 gcc_unreachable ();
15441 }
15442
15443 /*
15444 * a < b =>
15445 * if (hi(a) < hi(b)) goto true;
15446 * if (hi(a) > hi(b)) goto false;
15447 * if (lo(a) < lo(b)) goto true;
15448 * false:
15449 */
15450
15451 ix86_compare_op0 = hi[0];
15452 ix86_compare_op1 = hi[1];
15453
15454 if (code1 != UNKNOWN)
15455 ix86_expand_branch (code1, label);
15456 if (code2 != UNKNOWN)
15457 ix86_expand_branch (code2, label2);
15458
15459 ix86_compare_op0 = lo[0];
15460 ix86_compare_op1 = lo[1];
15461 ix86_expand_branch (code3, label);
15462
15463 if (code2 != UNKNOWN)
15464 emit_label (label2);
15465 return;
15466 }
15467
15468 default:
15469 /* If we have already emitted a compare insn, go straight to simple.
15470 ix86_expand_compare won't emit anything if ix86_compare_emitted
15471 is non NULL. */
15472 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15473 goto simple;
15474 }
15475 }
15476
15477 /* Split branch based on floating point condition. */
15478 void
15479 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15480 rtx target1, rtx target2, rtx tmp, rtx pushed)
15481 {
15482 rtx condition;
15483 rtx i;
15484
15485 if (target2 != pc_rtx)
15486 {
15487 rtx tmp = target2;
15488 code = reverse_condition_maybe_unordered (code);
15489 target2 = target1;
15490 target1 = tmp;
15491 }
15492
15493 condition = ix86_expand_fp_compare (code, op1, op2,
15494 tmp);
15495
15496 /* Remove pushed operand from stack. */
15497 if (pushed)
15498 ix86_free_from_memory (GET_MODE (pushed));
15499
15500 i = emit_jump_insn (gen_rtx_SET
15501 (VOIDmode, pc_rtx,
15502 gen_rtx_IF_THEN_ELSE (VOIDmode,
15503 condition, target1, target2)));
15504 if (split_branch_probability >= 0)
15505 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15506 }
15507
15508 void
15509 ix86_expand_setcc (enum rtx_code code, rtx dest)
15510 {
15511 rtx ret;
15512
15513 gcc_assert (GET_MODE (dest) == QImode);
15514
15515 ret = ix86_expand_compare (code);
15516 PUT_MODE (ret, QImode);
15517 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15518 }
15519
15520 /* Expand comparison setting or clearing carry flag. Return true when
15521 successful and set pop for the operation. */
15522 static bool
15523 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15524 {
15525 enum machine_mode mode =
15526 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15527
15528 /* Do not handle DImode compares that go through special path. */
15529 if (mode == (TARGET_64BIT ? TImode : DImode))
15530 return false;
15531
15532 if (SCALAR_FLOAT_MODE_P (mode))
15533 {
15534 rtx compare_op, compare_seq;
15535
15536 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15537
15538 /* Shortcut: following common codes never translate
15539 into carry flag compares. */
15540 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15541 || code == ORDERED || code == UNORDERED)
15542 return false;
15543
15544 /* These comparisons require zero flag; swap operands so they won't. */
15545 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15546 && !TARGET_IEEE_FP)
15547 {
15548 rtx tmp = op0;
15549 op0 = op1;
15550 op1 = tmp;
15551 code = swap_condition (code);
15552 }
15553
15554 /* Try to expand the comparison and verify that we end up with
15555 carry flag based comparison. This fails to be true only when
15556 we decide to expand comparison using arithmetic that is not
15557 too common scenario. */
15558 start_sequence ();
15559 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15560 compare_seq = get_insns ();
15561 end_sequence ();
15562
15563 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15564 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15565 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15566 else
15567 code = GET_CODE (compare_op);
15568
15569 if (code != LTU && code != GEU)
15570 return false;
15571
15572 emit_insn (compare_seq);
15573 *pop = compare_op;
15574 return true;
15575 }
15576
15577 if (!INTEGRAL_MODE_P (mode))
15578 return false;
15579
15580 switch (code)
15581 {
15582 case LTU:
15583 case GEU:
15584 break;
15585
15586 /* Convert a==0 into (unsigned)a<1. */
15587 case EQ:
15588 case NE:
15589 if (op1 != const0_rtx)
15590 return false;
15591 op1 = const1_rtx;
15592 code = (code == EQ ? LTU : GEU);
15593 break;
15594
15595 /* Convert a>b into b<a or a>=b-1. */
15596 case GTU:
15597 case LEU:
15598 if (CONST_INT_P (op1))
15599 {
15600 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15601 /* Bail out on overflow. We still can swap operands but that
15602 would force loading of the constant into register. */
15603 if (op1 == const0_rtx
15604 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15605 return false;
15606 code = (code == GTU ? GEU : LTU);
15607 }
15608 else
15609 {
15610 rtx tmp = op1;
15611 op1 = op0;
15612 op0 = tmp;
15613 code = (code == GTU ? LTU : GEU);
15614 }
15615 break;
15616
15617 /* Convert a>=0 into (unsigned)a<0x80000000. */
15618 case LT:
15619 case GE:
15620 if (mode == DImode || op1 != const0_rtx)
15621 return false;
15622 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15623 code = (code == LT ? GEU : LTU);
15624 break;
15625 case LE:
15626 case GT:
15627 if (mode == DImode || op1 != constm1_rtx)
15628 return false;
15629 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15630 code = (code == LE ? GEU : LTU);
15631 break;
15632
15633 default:
15634 return false;
15635 }
15636 /* Swapping operands may cause constant to appear as first operand. */
15637 if (!nonimmediate_operand (op0, VOIDmode))
15638 {
15639 if (!can_create_pseudo_p ())
15640 return false;
15641 op0 = force_reg (mode, op0);
15642 }
15643 ix86_compare_op0 = op0;
15644 ix86_compare_op1 = op1;
15645 *pop = ix86_expand_compare (code);
15646 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15647 return true;
15648 }
15649
15650 int
15651 ix86_expand_int_movcc (rtx operands[])
15652 {
15653 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15654 rtx compare_seq, compare_op;
15655 enum machine_mode mode = GET_MODE (operands[0]);
15656 bool sign_bit_compare_p = false;
15657
15658 start_sequence ();
15659 ix86_compare_op0 = XEXP (operands[1], 0);
15660 ix86_compare_op1 = XEXP (operands[1], 1);
15661 compare_op = ix86_expand_compare (code);
15662 compare_seq = get_insns ();
15663 end_sequence ();
15664
15665 compare_code = GET_CODE (compare_op);
15666
15667 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15668 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15669 sign_bit_compare_p = true;
15670
15671 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15672 HImode insns, we'd be swallowed in word prefix ops. */
15673
15674 if ((mode != HImode || TARGET_FAST_PREFIX)
15675 && (mode != (TARGET_64BIT ? TImode : DImode))
15676 && CONST_INT_P (operands[2])
15677 && CONST_INT_P (operands[3]))
15678 {
15679 rtx out = operands[0];
15680 HOST_WIDE_INT ct = INTVAL (operands[2]);
15681 HOST_WIDE_INT cf = INTVAL (operands[3]);
15682 HOST_WIDE_INT diff;
15683
15684 diff = ct - cf;
15685 /* Sign bit compares are better done using shifts than we do by using
15686 sbb. */
15687 if (sign_bit_compare_p
15688 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15689 ix86_compare_op1, &compare_op))
15690 {
15691 /* Detect overlap between destination and compare sources. */
15692 rtx tmp = out;
15693
15694 if (!sign_bit_compare_p)
15695 {
15696 rtx flags;
15697 bool fpcmp = false;
15698
15699 compare_code = GET_CODE (compare_op);
15700
15701 flags = XEXP (compare_op, 0);
15702
15703 if (GET_MODE (flags) == CCFPmode
15704 || GET_MODE (flags) == CCFPUmode)
15705 {
15706 fpcmp = true;
15707 compare_code
15708 = ix86_fp_compare_code_to_integer (compare_code);
15709 }
15710
15711 /* To simplify rest of code, restrict to the GEU case. */
15712 if (compare_code == LTU)
15713 {
15714 HOST_WIDE_INT tmp = ct;
15715 ct = cf;
15716 cf = tmp;
15717 compare_code = reverse_condition (compare_code);
15718 code = reverse_condition (code);
15719 }
15720 else
15721 {
15722 if (fpcmp)
15723 PUT_CODE (compare_op,
15724 reverse_condition_maybe_unordered
15725 (GET_CODE (compare_op)));
15726 else
15727 PUT_CODE (compare_op,
15728 reverse_condition (GET_CODE (compare_op)));
15729 }
15730 diff = ct - cf;
15731
15732 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15733 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15734 tmp = gen_reg_rtx (mode);
15735
15736 if (mode == DImode)
15737 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15738 else
15739 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15740 flags, compare_op));
15741 }
15742 else
15743 {
15744 if (code == GT || code == GE)
15745 code = reverse_condition (code);
15746 else
15747 {
15748 HOST_WIDE_INT tmp = ct;
15749 ct = cf;
15750 cf = tmp;
15751 diff = ct - cf;
15752 }
15753 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15754 ix86_compare_op1, VOIDmode, 0, -1);
15755 }
15756
15757 if (diff == 1)
15758 {
15759 /*
15760 * cmpl op0,op1
15761 * sbbl dest,dest
15762 * [addl dest, ct]
15763 *
15764 * Size 5 - 8.
15765 */
15766 if (ct)
15767 tmp = expand_simple_binop (mode, PLUS,
15768 tmp, GEN_INT (ct),
15769 copy_rtx (tmp), 1, OPTAB_DIRECT);
15770 }
15771 else if (cf == -1)
15772 {
15773 /*
15774 * cmpl op0,op1
15775 * sbbl dest,dest
15776 * orl $ct, dest
15777 *
15778 * Size 8.
15779 */
15780 tmp = expand_simple_binop (mode, IOR,
15781 tmp, GEN_INT (ct),
15782 copy_rtx (tmp), 1, OPTAB_DIRECT);
15783 }
15784 else if (diff == -1 && ct)
15785 {
15786 /*
15787 * cmpl op0,op1
15788 * sbbl dest,dest
15789 * notl dest
15790 * [addl dest, cf]
15791 *
15792 * Size 8 - 11.
15793 */
15794 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15795 if (cf)
15796 tmp = expand_simple_binop (mode, PLUS,
15797 copy_rtx (tmp), GEN_INT (cf),
15798 copy_rtx (tmp), 1, OPTAB_DIRECT);
15799 }
15800 else
15801 {
15802 /*
15803 * cmpl op0,op1
15804 * sbbl dest,dest
15805 * [notl dest]
15806 * andl cf - ct, dest
15807 * [addl dest, ct]
15808 *
15809 * Size 8 - 11.
15810 */
15811
15812 if (cf == 0)
15813 {
15814 cf = ct;
15815 ct = 0;
15816 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15817 }
15818
15819 tmp = expand_simple_binop (mode, AND,
15820 copy_rtx (tmp),
15821 gen_int_mode (cf - ct, mode),
15822 copy_rtx (tmp), 1, OPTAB_DIRECT);
15823 if (ct)
15824 tmp = expand_simple_binop (mode, PLUS,
15825 copy_rtx (tmp), GEN_INT (ct),
15826 copy_rtx (tmp), 1, OPTAB_DIRECT);
15827 }
15828
15829 if (!rtx_equal_p (tmp, out))
15830 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15831
15832 return 1; /* DONE */
15833 }
15834
15835 if (diff < 0)
15836 {
15837 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15838
15839 HOST_WIDE_INT tmp;
15840 tmp = ct, ct = cf, cf = tmp;
15841 diff = -diff;
15842
15843 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15844 {
15845 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15846
15847 /* We may be reversing unordered compare to normal compare, that
15848 is not valid in general (we may convert non-trapping condition
15849 to trapping one), however on i386 we currently emit all
15850 comparisons unordered. */
15851 compare_code = reverse_condition_maybe_unordered (compare_code);
15852 code = reverse_condition_maybe_unordered (code);
15853 }
15854 else
15855 {
15856 compare_code = reverse_condition (compare_code);
15857 code = reverse_condition (code);
15858 }
15859 }
15860
15861 compare_code = UNKNOWN;
15862 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15863 && CONST_INT_P (ix86_compare_op1))
15864 {
15865 if (ix86_compare_op1 == const0_rtx
15866 && (code == LT || code == GE))
15867 compare_code = code;
15868 else if (ix86_compare_op1 == constm1_rtx)
15869 {
15870 if (code == LE)
15871 compare_code = LT;
15872 else if (code == GT)
15873 compare_code = GE;
15874 }
15875 }
15876
15877 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15878 if (compare_code != UNKNOWN
15879 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15880 && (cf == -1 || ct == -1))
15881 {
15882 /* If lea code below could be used, only optimize
15883 if it results in a 2 insn sequence. */
15884
15885 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15886 || diff == 3 || diff == 5 || diff == 9)
15887 || (compare_code == LT && ct == -1)
15888 || (compare_code == GE && cf == -1))
15889 {
15890 /*
15891 * notl op1 (if necessary)
15892 * sarl $31, op1
15893 * orl cf, op1
15894 */
15895 if (ct != -1)
15896 {
15897 cf = ct;
15898 ct = -1;
15899 code = reverse_condition (code);
15900 }
15901
15902 out = emit_store_flag (out, code, ix86_compare_op0,
15903 ix86_compare_op1, VOIDmode, 0, -1);
15904
15905 out = expand_simple_binop (mode, IOR,
15906 out, GEN_INT (cf),
15907 out, 1, OPTAB_DIRECT);
15908 if (out != operands[0])
15909 emit_move_insn (operands[0], out);
15910
15911 return 1; /* DONE */
15912 }
15913 }
15914
15915
15916 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15917 || diff == 3 || diff == 5 || diff == 9)
15918 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15919 && (mode != DImode
15920 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15921 {
15922 /*
15923 * xorl dest,dest
15924 * cmpl op1,op2
15925 * setcc dest
15926 * lea cf(dest*(ct-cf)),dest
15927 *
15928 * Size 14.
15929 *
15930 * This also catches the degenerate setcc-only case.
15931 */
15932
15933 rtx tmp;
15934 int nops;
15935
15936 out = emit_store_flag (out, code, ix86_compare_op0,
15937 ix86_compare_op1, VOIDmode, 0, 1);
15938
15939 nops = 0;
15940 /* On x86_64 the lea instruction operates on Pmode, so we need
15941 to get arithmetics done in proper mode to match. */
15942 if (diff == 1)
15943 tmp = copy_rtx (out);
15944 else
15945 {
15946 rtx out1;
15947 out1 = copy_rtx (out);
15948 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15949 nops++;
15950 if (diff & 1)
15951 {
15952 tmp = gen_rtx_PLUS (mode, tmp, out1);
15953 nops++;
15954 }
15955 }
15956 if (cf != 0)
15957 {
15958 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15959 nops++;
15960 }
15961 if (!rtx_equal_p (tmp, out))
15962 {
15963 if (nops == 1)
15964 out = force_operand (tmp, copy_rtx (out));
15965 else
15966 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15967 }
15968 if (!rtx_equal_p (out, operands[0]))
15969 emit_move_insn (operands[0], copy_rtx (out));
15970
15971 return 1; /* DONE */
15972 }
15973
15974 /*
15975 * General case: Jumpful:
15976 * xorl dest,dest cmpl op1, op2
15977 * cmpl op1, op2 movl ct, dest
15978 * setcc dest jcc 1f
15979 * decl dest movl cf, dest
15980 * andl (cf-ct),dest 1:
15981 * addl ct,dest
15982 *
15983 * Size 20. Size 14.
15984 *
15985 * This is reasonably steep, but branch mispredict costs are
15986 * high on modern cpus, so consider failing only if optimizing
15987 * for space.
15988 */
15989
15990 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15991 && BRANCH_COST (optimize_insn_for_speed_p (),
15992 false) >= 2)
15993 {
15994 if (cf == 0)
15995 {
15996 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15997
15998 cf = ct;
15999 ct = 0;
16000
16001 if (SCALAR_FLOAT_MODE_P (cmp_mode))
16002 {
16003 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
16004
16005 /* We may be reversing unordered compare to normal compare,
16006 that is not valid in general (we may convert non-trapping
16007 condition to trapping one), however on i386 we currently
16008 emit all comparisons unordered. */
16009 code = reverse_condition_maybe_unordered (code);
16010 }
16011 else
16012 {
16013 code = reverse_condition (code);
16014 if (compare_code != UNKNOWN)
16015 compare_code = reverse_condition (compare_code);
16016 }
16017 }
16018
16019 if (compare_code != UNKNOWN)
16020 {
16021 /* notl op1 (if needed)
16022 sarl $31, op1
16023 andl (cf-ct), op1
16024 addl ct, op1
16025
16026 For x < 0 (resp. x <= -1) there will be no notl,
16027 so if possible swap the constants to get rid of the
16028 complement.
16029 True/false will be -1/0 while code below (store flag
16030 followed by decrement) is 0/-1, so the constants need
16031 to be exchanged once more. */
16032
16033 if (compare_code == GE || !cf)
16034 {
16035 code = reverse_condition (code);
16036 compare_code = LT;
16037 }
16038 else
16039 {
16040 HOST_WIDE_INT tmp = cf;
16041 cf = ct;
16042 ct = tmp;
16043 }
16044
16045 out = emit_store_flag (out, code, ix86_compare_op0,
16046 ix86_compare_op1, VOIDmode, 0, -1);
16047 }
16048 else
16049 {
16050 out = emit_store_flag (out, code, ix86_compare_op0,
16051 ix86_compare_op1, VOIDmode, 0, 1);
16052
16053 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
16054 copy_rtx (out), 1, OPTAB_DIRECT);
16055 }
16056
16057 out = expand_simple_binop (mode, AND, copy_rtx (out),
16058 gen_int_mode (cf - ct, mode),
16059 copy_rtx (out), 1, OPTAB_DIRECT);
16060 if (ct)
16061 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
16062 copy_rtx (out), 1, OPTAB_DIRECT);
16063 if (!rtx_equal_p (out, operands[0]))
16064 emit_move_insn (operands[0], copy_rtx (out));
16065
16066 return 1; /* DONE */
16067 }
16068 }
16069
16070 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
16071 {
16072 /* Try a few things more with specific constants and a variable. */
16073
16074 optab op;
16075 rtx var, orig_out, out, tmp;
16076
16077 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
16078 return 0; /* FAIL */
16079
16080 /* If one of the two operands is an interesting constant, load a
16081 constant with the above and mask it in with a logical operation. */
16082
16083 if (CONST_INT_P (operands[2]))
16084 {
16085 var = operands[3];
16086 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
16087 operands[3] = constm1_rtx, op = and_optab;
16088 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
16089 operands[3] = const0_rtx, op = ior_optab;
16090 else
16091 return 0; /* FAIL */
16092 }
16093 else if (CONST_INT_P (operands[3]))
16094 {
16095 var = operands[2];
16096 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
16097 operands[2] = constm1_rtx, op = and_optab;
16098 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
16099 operands[2] = const0_rtx, op = ior_optab;
16100 else
16101 return 0; /* FAIL */
16102 }
16103 else
16104 return 0; /* FAIL */
16105
16106 orig_out = operands[0];
16107 tmp = gen_reg_rtx (mode);
16108 operands[0] = tmp;
16109
16110 /* Recurse to get the constant loaded. */
16111 if (ix86_expand_int_movcc (operands) == 0)
16112 return 0; /* FAIL */
16113
16114 /* Mask in the interesting variable. */
16115 out = expand_binop (mode, op, var, tmp, orig_out, 0,
16116 OPTAB_WIDEN);
16117 if (!rtx_equal_p (out, orig_out))
16118 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
16119
16120 return 1; /* DONE */
16121 }
16122
16123 /*
16124 * For comparison with above,
16125 *
16126 * movl cf,dest
16127 * movl ct,tmp
16128 * cmpl op1,op2
16129 * cmovcc tmp,dest
16130 *
16131 * Size 15.
16132 */
16133
16134 if (! nonimmediate_operand (operands[2], mode))
16135 operands[2] = force_reg (mode, operands[2]);
16136 if (! nonimmediate_operand (operands[3], mode))
16137 operands[3] = force_reg (mode, operands[3]);
16138
16139 if (! register_operand (operands[2], VOIDmode)
16140 && (mode == QImode
16141 || ! register_operand (operands[3], VOIDmode)))
16142 operands[2] = force_reg (mode, operands[2]);
16143
16144 if (mode == QImode
16145 && ! register_operand (operands[3], VOIDmode))
16146 operands[3] = force_reg (mode, operands[3]);
16147
16148 emit_insn (compare_seq);
16149 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16150 gen_rtx_IF_THEN_ELSE (mode,
16151 compare_op, operands[2],
16152 operands[3])));
16153
16154 return 1; /* DONE */
16155 }
16156
16157 /* Swap, force into registers, or otherwise massage the two operands
16158 to an sse comparison with a mask result. Thus we differ a bit from
16159 ix86_prepare_fp_compare_args which expects to produce a flags result.
16160
16161 The DEST operand exists to help determine whether to commute commutative
16162 operators. The POP0/POP1 operands are updated in place. The new
16163 comparison code is returned, or UNKNOWN if not implementable. */
16164
16165 static enum rtx_code
16166 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16167 rtx *pop0, rtx *pop1)
16168 {
16169 rtx tmp;
16170
16171 switch (code)
16172 {
16173 case LTGT:
16174 case UNEQ:
16175 /* We have no LTGT as an operator. We could implement it with
16176 NE & ORDERED, but this requires an extra temporary. It's
16177 not clear that it's worth it. */
16178 return UNKNOWN;
16179
16180 case LT:
16181 case LE:
16182 case UNGT:
16183 case UNGE:
16184 /* These are supported directly. */
16185 break;
16186
16187 case EQ:
16188 case NE:
16189 case UNORDERED:
16190 case ORDERED:
16191 /* For commutative operators, try to canonicalize the destination
16192 operand to be first in the comparison - this helps reload to
16193 avoid extra moves. */
16194 if (!dest || !rtx_equal_p (dest, *pop1))
16195 break;
16196 /* FALLTHRU */
16197
16198 case GE:
16199 case GT:
16200 case UNLE:
16201 case UNLT:
16202 /* These are not supported directly. Swap the comparison operands
16203 to transform into something that is supported. */
16204 tmp = *pop0;
16205 *pop0 = *pop1;
16206 *pop1 = tmp;
16207 code = swap_condition (code);
16208 break;
16209
16210 default:
16211 gcc_unreachable ();
16212 }
16213
16214 return code;
16215 }
16216
16217 /* Detect conditional moves that exactly match min/max operational
16218 semantics. Note that this is IEEE safe, as long as we don't
16219 interchange the operands.
16220
16221 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16222 and TRUE if the operation is successful and instructions are emitted. */
16223
16224 static bool
16225 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16226 rtx cmp_op1, rtx if_true, rtx if_false)
16227 {
16228 enum machine_mode mode;
16229 bool is_min;
16230 rtx tmp;
16231
16232 if (code == LT)
16233 ;
16234 else if (code == UNGE)
16235 {
16236 tmp = if_true;
16237 if_true = if_false;
16238 if_false = tmp;
16239 }
16240 else
16241 return false;
16242
16243 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16244 is_min = true;
16245 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16246 is_min = false;
16247 else
16248 return false;
16249
16250 mode = GET_MODE (dest);
16251
16252 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16253 but MODE may be a vector mode and thus not appropriate. */
16254 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16255 {
16256 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16257 rtvec v;
16258
16259 if_true = force_reg (mode, if_true);
16260 v = gen_rtvec (2, if_true, if_false);
16261 tmp = gen_rtx_UNSPEC (mode, v, u);
16262 }
16263 else
16264 {
16265 code = is_min ? SMIN : SMAX;
16266 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16267 }
16268
16269 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16270 return true;
16271 }
16272
16273 /* Expand an sse vector comparison. Return the register with the result. */
16274
16275 static rtx
16276 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16277 rtx op_true, rtx op_false)
16278 {
16279 enum machine_mode mode = GET_MODE (dest);
16280 rtx x;
16281
16282 cmp_op0 = force_reg (mode, cmp_op0);
16283 if (!nonimmediate_operand (cmp_op1, mode))
16284 cmp_op1 = force_reg (mode, cmp_op1);
16285
16286 if (optimize
16287 || reg_overlap_mentioned_p (dest, op_true)
16288 || reg_overlap_mentioned_p (dest, op_false))
16289 dest = gen_reg_rtx (mode);
16290
16291 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16292 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16293
16294 return dest;
16295 }
16296
16297 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16298 operations. This is used for both scalar and vector conditional moves. */
16299
16300 static void
16301 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16302 {
16303 enum machine_mode mode = GET_MODE (dest);
16304 rtx t2, t3, x;
16305
16306 if (op_false == CONST0_RTX (mode))
16307 {
16308 op_true = force_reg (mode, op_true);
16309 x = gen_rtx_AND (mode, cmp, op_true);
16310 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16311 }
16312 else if (op_true == CONST0_RTX (mode))
16313 {
16314 op_false = force_reg (mode, op_false);
16315 x = gen_rtx_NOT (mode, cmp);
16316 x = gen_rtx_AND (mode, x, op_false);
16317 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16318 }
16319 else if (TARGET_XOP)
16320 {
16321 rtx pcmov = gen_rtx_SET (mode, dest,
16322 gen_rtx_IF_THEN_ELSE (mode, cmp,
16323 op_true,
16324 op_false));
16325 emit_insn (pcmov);
16326 }
16327 else
16328 {
16329 op_true = force_reg (mode, op_true);
16330 op_false = force_reg (mode, op_false);
16331
16332 t2 = gen_reg_rtx (mode);
16333 if (optimize)
16334 t3 = gen_reg_rtx (mode);
16335 else
16336 t3 = dest;
16337
16338 x = gen_rtx_AND (mode, op_true, cmp);
16339 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16340
16341 x = gen_rtx_NOT (mode, cmp);
16342 x = gen_rtx_AND (mode, x, op_false);
16343 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16344
16345 x = gen_rtx_IOR (mode, t3, t2);
16346 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16347 }
16348 }
16349
16350 /* Expand a floating-point conditional move. Return true if successful. */
16351
16352 int
16353 ix86_expand_fp_movcc (rtx operands[])
16354 {
16355 enum machine_mode mode = GET_MODE (operands[0]);
16356 enum rtx_code code = GET_CODE (operands[1]);
16357 rtx tmp, compare_op;
16358
16359 ix86_compare_op0 = XEXP (operands[1], 0);
16360 ix86_compare_op1 = XEXP (operands[1], 1);
16361 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16362 {
16363 enum machine_mode cmode;
16364
16365 /* Since we've no cmove for sse registers, don't force bad register
16366 allocation just to gain access to it. Deny movcc when the
16367 comparison mode doesn't match the move mode. */
16368 cmode = GET_MODE (ix86_compare_op0);
16369 if (cmode == VOIDmode)
16370 cmode = GET_MODE (ix86_compare_op1);
16371 if (cmode != mode)
16372 return 0;
16373
16374 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16375 &ix86_compare_op0,
16376 &ix86_compare_op1);
16377 if (code == UNKNOWN)
16378 return 0;
16379
16380 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16381 ix86_compare_op1, operands[2],
16382 operands[3]))
16383 return 1;
16384
16385 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16386 ix86_compare_op1, operands[2], operands[3]);
16387 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16388 return 1;
16389 }
16390
16391 /* The floating point conditional move instructions don't directly
16392 support conditions resulting from a signed integer comparison. */
16393
16394 compare_op = ix86_expand_compare (code);
16395 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16396 {
16397 tmp = gen_reg_rtx (QImode);
16398 ix86_expand_setcc (code, tmp);
16399 code = NE;
16400 ix86_compare_op0 = tmp;
16401 ix86_compare_op1 = const0_rtx;
16402 compare_op = ix86_expand_compare (code);
16403 }
16404
16405 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16406 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16407 operands[2], operands[3])));
16408
16409 return 1;
16410 }
16411
16412 /* Expand a floating-point vector conditional move; a vcond operation
16413 rather than a movcc operation. */
16414
16415 bool
16416 ix86_expand_fp_vcond (rtx operands[])
16417 {
16418 enum rtx_code code = GET_CODE (operands[3]);
16419 rtx cmp;
16420
16421 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16422 &operands[4], &operands[5]);
16423 if (code == UNKNOWN)
16424 return false;
16425
16426 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16427 operands[5], operands[1], operands[2]))
16428 return true;
16429
16430 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16431 operands[1], operands[2]);
16432 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16433 return true;
16434 }
16435
16436 /* Expand a signed/unsigned integral vector conditional move. */
16437
16438 bool
16439 ix86_expand_int_vcond (rtx operands[])
16440 {
16441 enum machine_mode mode = GET_MODE (operands[0]);
16442 enum rtx_code code = GET_CODE (operands[3]);
16443 bool negate = false;
16444 rtx x, cop0, cop1;
16445
16446 cop0 = operands[4];
16447 cop1 = operands[5];
16448
16449 /* XOP supports all of the comparisons on all vector int types. */
16450 if (!TARGET_XOP)
16451 {
16452 /* Canonicalize the comparison to EQ, GT, GTU. */
16453 switch (code)
16454 {
16455 case EQ:
16456 case GT:
16457 case GTU:
16458 break;
16459
16460 case NE:
16461 case LE:
16462 case LEU:
16463 code = reverse_condition (code);
16464 negate = true;
16465 break;
16466
16467 case GE:
16468 case GEU:
16469 code = reverse_condition (code);
16470 negate = true;
16471 /* FALLTHRU */
16472
16473 case LT:
16474 case LTU:
16475 code = swap_condition (code);
16476 x = cop0, cop0 = cop1, cop1 = x;
16477 break;
16478
16479 default:
16480 gcc_unreachable ();
16481 }
16482
16483 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16484 if (mode == V2DImode)
16485 {
16486 switch (code)
16487 {
16488 case EQ:
16489 /* SSE4.1 supports EQ. */
16490 if (!TARGET_SSE4_1)
16491 return false;
16492 break;
16493
16494 case GT:
16495 case GTU:
16496 /* SSE4.2 supports GT/GTU. */
16497 if (!TARGET_SSE4_2)
16498 return false;
16499 break;
16500
16501 default:
16502 gcc_unreachable ();
16503 }
16504 }
16505
16506 /* Unsigned parallel compare is not supported by the hardware.
16507 Play some tricks to turn this into a signed comparison
16508 against 0. */
16509 if (code == GTU)
16510 {
16511 cop0 = force_reg (mode, cop0);
16512
16513 switch (mode)
16514 {
16515 case V4SImode:
16516 case V2DImode:
16517 {
16518 rtx t1, t2, mask;
16519 rtx (*gen_sub3) (rtx, rtx, rtx);
16520
16521 /* Subtract (-(INT MAX) - 1) from both operands to make
16522 them signed. */
16523 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16524 true, false);
16525 gen_sub3 = (mode == V4SImode
16526 ? gen_subv4si3 : gen_subv2di3);
16527 t1 = gen_reg_rtx (mode);
16528 emit_insn (gen_sub3 (t1, cop0, mask));
16529
16530 t2 = gen_reg_rtx (mode);
16531 emit_insn (gen_sub3 (t2, cop1, mask));
16532
16533 cop0 = t1;
16534 cop1 = t2;
16535 code = GT;
16536 }
16537 break;
16538
16539 case V16QImode:
16540 case V8HImode:
16541 /* Perform a parallel unsigned saturating subtraction. */
16542 x = gen_reg_rtx (mode);
16543 emit_insn (gen_rtx_SET (VOIDmode, x,
16544 gen_rtx_US_MINUS (mode, cop0, cop1)));
16545
16546 cop0 = x;
16547 cop1 = CONST0_RTX (mode);
16548 code = EQ;
16549 negate = !negate;
16550 break;
16551
16552 default:
16553 gcc_unreachable ();
16554 }
16555 }
16556 }
16557
16558 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16559 operands[1+negate], operands[2-negate]);
16560
16561 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16562 operands[2-negate]);
16563 return true;
16564 }
16565
16566 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16567 true if we should do zero extension, else sign extension. HIGH_P is
16568 true if we want the N/2 high elements, else the low elements. */
16569
16570 void
16571 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16572 {
16573 enum machine_mode imode = GET_MODE (operands[1]);
16574 rtx (*unpack)(rtx, rtx, rtx);
16575 rtx se, dest;
16576
16577 switch (imode)
16578 {
16579 case V16QImode:
16580 if (high_p)
16581 unpack = gen_vec_interleave_highv16qi;
16582 else
16583 unpack = gen_vec_interleave_lowv16qi;
16584 break;
16585 case V8HImode:
16586 if (high_p)
16587 unpack = gen_vec_interleave_highv8hi;
16588 else
16589 unpack = gen_vec_interleave_lowv8hi;
16590 break;
16591 case V4SImode:
16592 if (high_p)
16593 unpack = gen_vec_interleave_highv4si;
16594 else
16595 unpack = gen_vec_interleave_lowv4si;
16596 break;
16597 default:
16598 gcc_unreachable ();
16599 }
16600
16601 dest = gen_lowpart (imode, operands[0]);
16602
16603 if (unsigned_p)
16604 se = force_reg (imode, CONST0_RTX (imode));
16605 else
16606 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16607 operands[1], pc_rtx, pc_rtx);
16608
16609 emit_insn (unpack (dest, operands[1], se));
16610 }
16611
16612 /* This function performs the same task as ix86_expand_sse_unpack,
16613 but with SSE4.1 instructions. */
16614
16615 void
16616 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16617 {
16618 enum machine_mode imode = GET_MODE (operands[1]);
16619 rtx (*unpack)(rtx, rtx);
16620 rtx src, dest;
16621
16622 switch (imode)
16623 {
16624 case V16QImode:
16625 if (unsigned_p)
16626 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16627 else
16628 unpack = gen_sse4_1_extendv8qiv8hi2;
16629 break;
16630 case V8HImode:
16631 if (unsigned_p)
16632 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16633 else
16634 unpack = gen_sse4_1_extendv4hiv4si2;
16635 break;
16636 case V4SImode:
16637 if (unsigned_p)
16638 unpack = gen_sse4_1_zero_extendv2siv2di2;
16639 else
16640 unpack = gen_sse4_1_extendv2siv2di2;
16641 break;
16642 default:
16643 gcc_unreachable ();
16644 }
16645
16646 dest = operands[0];
16647 if (high_p)
16648 {
16649 /* Shift higher 8 bytes to lower 8 bytes. */
16650 src = gen_reg_rtx (imode);
16651 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16652 gen_lowpart (V1TImode, operands[1]),
16653 GEN_INT (64)));
16654 }
16655 else
16656 src = operands[1];
16657
16658 emit_insn (unpack (dest, src));
16659 }
16660
16661 /* Expand conditional increment or decrement using adb/sbb instructions.
16662 The default case using setcc followed by the conditional move can be
16663 done by generic code. */
16664 int
16665 ix86_expand_int_addcc (rtx operands[])
16666 {
16667 enum rtx_code code = GET_CODE (operands[1]);
16668 rtx flags;
16669 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16670 rtx compare_op;
16671 rtx val = const0_rtx;
16672 bool fpcmp = false;
16673 enum machine_mode mode;
16674
16675 ix86_compare_op0 = XEXP (operands[1], 0);
16676 ix86_compare_op1 = XEXP (operands[1], 1);
16677 if (operands[3] != const1_rtx
16678 && operands[3] != constm1_rtx)
16679 return 0;
16680 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16681 ix86_compare_op1, &compare_op))
16682 return 0;
16683 code = GET_CODE (compare_op);
16684
16685 flags = XEXP (compare_op, 0);
16686
16687 if (GET_MODE (flags) == CCFPmode
16688 || GET_MODE (flags) == CCFPUmode)
16689 {
16690 fpcmp = true;
16691 code = ix86_fp_compare_code_to_integer (code);
16692 }
16693
16694 if (code != LTU)
16695 {
16696 val = constm1_rtx;
16697 if (fpcmp)
16698 PUT_CODE (compare_op,
16699 reverse_condition_maybe_unordered
16700 (GET_CODE (compare_op)));
16701 else
16702 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16703 }
16704
16705 mode = GET_MODE (operands[0]);
16706
16707 /* Construct either adc or sbb insn. */
16708 if ((code == LTU) == (operands[3] == constm1_rtx))
16709 {
16710 switch (mode)
16711 {
16712 case QImode:
16713 insn = gen_subqi3_carry;
16714 break;
16715 case HImode:
16716 insn = gen_subhi3_carry;
16717 break;
16718 case SImode:
16719 insn = gen_subsi3_carry;
16720 break;
16721 case DImode:
16722 insn = gen_subdi3_carry;
16723 break;
16724 default:
16725 gcc_unreachable ();
16726 }
16727 }
16728 else
16729 {
16730 switch (mode)
16731 {
16732 case QImode:
16733 insn = gen_addqi3_carry;
16734 break;
16735 case HImode:
16736 insn = gen_addhi3_carry;
16737 break;
16738 case SImode:
16739 insn = gen_addsi3_carry;
16740 break;
16741 case DImode:
16742 insn = gen_adddi3_carry;
16743 break;
16744 default:
16745 gcc_unreachable ();
16746 }
16747 }
16748 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16749
16750 return 1; /* DONE */
16751 }
16752
16753
16754 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16755 works for floating pointer parameters and nonoffsetable memories.
16756 For pushes, it returns just stack offsets; the values will be saved
16757 in the right order. Maximally three parts are generated. */
16758
16759 static int
16760 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16761 {
16762 int size;
16763
16764 if (!TARGET_64BIT)
16765 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16766 else
16767 size = (GET_MODE_SIZE (mode) + 4) / 8;
16768
16769 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16770 gcc_assert (size >= 2 && size <= 4);
16771
16772 /* Optimize constant pool reference to immediates. This is used by fp
16773 moves, that force all constants to memory to allow combining. */
16774 if (MEM_P (operand) && MEM_READONLY_P (operand))
16775 {
16776 rtx tmp = maybe_get_pool_constant (operand);
16777 if (tmp)
16778 operand = tmp;
16779 }
16780
16781 if (MEM_P (operand) && !offsettable_memref_p (operand))
16782 {
16783 /* The only non-offsetable memories we handle are pushes. */
16784 int ok = push_operand (operand, VOIDmode);
16785
16786 gcc_assert (ok);
16787
16788 operand = copy_rtx (operand);
16789 PUT_MODE (operand, Pmode);
16790 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16791 return size;
16792 }
16793
16794 if (GET_CODE (operand) == CONST_VECTOR)
16795 {
16796 enum machine_mode imode = int_mode_for_mode (mode);
16797 /* Caution: if we looked through a constant pool memory above,
16798 the operand may actually have a different mode now. That's
16799 ok, since we want to pun this all the way back to an integer. */
16800 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16801 gcc_assert (operand != NULL);
16802 mode = imode;
16803 }
16804
16805 if (!TARGET_64BIT)
16806 {
16807 if (mode == DImode)
16808 split_di (&operand, 1, &parts[0], &parts[1]);
16809 else
16810 {
16811 int i;
16812
16813 if (REG_P (operand))
16814 {
16815 gcc_assert (reload_completed);
16816 for (i = 0; i < size; i++)
16817 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16818 }
16819 else if (offsettable_memref_p (operand))
16820 {
16821 operand = adjust_address (operand, SImode, 0);
16822 parts[0] = operand;
16823 for (i = 1; i < size; i++)
16824 parts[i] = adjust_address (operand, SImode, 4 * i);
16825 }
16826 else if (GET_CODE (operand) == CONST_DOUBLE)
16827 {
16828 REAL_VALUE_TYPE r;
16829 long l[4];
16830
16831 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16832 switch (mode)
16833 {
16834 case TFmode:
16835 real_to_target (l, &r, mode);
16836 parts[3] = gen_int_mode (l[3], SImode);
16837 parts[2] = gen_int_mode (l[2], SImode);
16838 break;
16839 case XFmode:
16840 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16841 parts[2] = gen_int_mode (l[2], SImode);
16842 break;
16843 case DFmode:
16844 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16845 break;
16846 default:
16847 gcc_unreachable ();
16848 }
16849 parts[1] = gen_int_mode (l[1], SImode);
16850 parts[0] = gen_int_mode (l[0], SImode);
16851 }
16852 else
16853 gcc_unreachable ();
16854 }
16855 }
16856 else
16857 {
16858 if (mode == TImode)
16859 split_ti (&operand, 1, &parts[0], &parts[1]);
16860 if (mode == XFmode || mode == TFmode)
16861 {
16862 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16863 if (REG_P (operand))
16864 {
16865 gcc_assert (reload_completed);
16866 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16867 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16868 }
16869 else if (offsettable_memref_p (operand))
16870 {
16871 operand = adjust_address (operand, DImode, 0);
16872 parts[0] = operand;
16873 parts[1] = adjust_address (operand, upper_mode, 8);
16874 }
16875 else if (GET_CODE (operand) == CONST_DOUBLE)
16876 {
16877 REAL_VALUE_TYPE r;
16878 long l[4];
16879
16880 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16881 real_to_target (l, &r, mode);
16882
16883 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16884 if (HOST_BITS_PER_WIDE_INT >= 64)
16885 parts[0]
16886 = gen_int_mode
16887 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16888 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16889 DImode);
16890 else
16891 parts[0] = immed_double_const (l[0], l[1], DImode);
16892
16893 if (upper_mode == SImode)
16894 parts[1] = gen_int_mode (l[2], SImode);
16895 else if (HOST_BITS_PER_WIDE_INT >= 64)
16896 parts[1]
16897 = gen_int_mode
16898 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16899 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16900 DImode);
16901 else
16902 parts[1] = immed_double_const (l[2], l[3], DImode);
16903 }
16904 else
16905 gcc_unreachable ();
16906 }
16907 }
16908
16909 return size;
16910 }
16911
16912 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16913 Return false when normal moves are needed; true when all required
16914 insns have been emitted. Operands 2-4 contain the input values
16915 int the correct order; operands 5-7 contain the output values. */
16916
16917 void
16918 ix86_split_long_move (rtx operands[])
16919 {
16920 rtx part[2][4];
16921 int nparts, i, j;
16922 int push = 0;
16923 int collisions = 0;
16924 enum machine_mode mode = GET_MODE (operands[0]);
16925 bool collisionparts[4];
16926
16927 /* The DFmode expanders may ask us to move double.
16928 For 64bit target this is single move. By hiding the fact
16929 here we simplify i386.md splitters. */
16930 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16931 {
16932 /* Optimize constant pool reference to immediates. This is used by
16933 fp moves, that force all constants to memory to allow combining. */
16934
16935 if (MEM_P (operands[1])
16936 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16937 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16938 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16939 if (push_operand (operands[0], VOIDmode))
16940 {
16941 operands[0] = copy_rtx (operands[0]);
16942 PUT_MODE (operands[0], Pmode);
16943 }
16944 else
16945 operands[0] = gen_lowpart (DImode, operands[0]);
16946 operands[1] = gen_lowpart (DImode, operands[1]);
16947 emit_move_insn (operands[0], operands[1]);
16948 return;
16949 }
16950
16951 /* The only non-offsettable memory we handle is push. */
16952 if (push_operand (operands[0], VOIDmode))
16953 push = 1;
16954 else
16955 gcc_assert (!MEM_P (operands[0])
16956 || offsettable_memref_p (operands[0]));
16957
16958 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16959 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16960
16961 /* When emitting push, take care for source operands on the stack. */
16962 if (push && MEM_P (operands[1])
16963 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16964 {
16965 rtx src_base = XEXP (part[1][nparts - 1], 0);
16966
16967 /* Compensate for the stack decrement by 4. */
16968 if (!TARGET_64BIT && nparts == 3
16969 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16970 src_base = plus_constant (src_base, 4);
16971
16972 /* src_base refers to the stack pointer and is
16973 automatically decreased by emitted push. */
16974 for (i = 0; i < nparts; i++)
16975 part[1][i] = change_address (part[1][i],
16976 GET_MODE (part[1][i]), src_base);
16977 }
16978
16979 /* We need to do copy in the right order in case an address register
16980 of the source overlaps the destination. */
16981 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16982 {
16983 rtx tmp;
16984
16985 for (i = 0; i < nparts; i++)
16986 {
16987 collisionparts[i]
16988 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16989 if (collisionparts[i])
16990 collisions++;
16991 }
16992
16993 /* Collision in the middle part can be handled by reordering. */
16994 if (collisions == 1 && nparts == 3 && collisionparts [1])
16995 {
16996 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16997 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16998 }
16999 else if (collisions == 1
17000 && nparts == 4
17001 && (collisionparts [1] || collisionparts [2]))
17002 {
17003 if (collisionparts [1])
17004 {
17005 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
17006 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
17007 }
17008 else
17009 {
17010 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
17011 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
17012 }
17013 }
17014
17015 /* If there are more collisions, we can't handle it by reordering.
17016 Do an lea to the last part and use only one colliding move. */
17017 else if (collisions > 1)
17018 {
17019 rtx base;
17020
17021 collisions = 1;
17022
17023 base = part[0][nparts - 1];
17024
17025 /* Handle the case when the last part isn't valid for lea.
17026 Happens in 64-bit mode storing the 12-byte XFmode. */
17027 if (GET_MODE (base) != Pmode)
17028 base = gen_rtx_REG (Pmode, REGNO (base));
17029
17030 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
17031 part[1][0] = replace_equiv_address (part[1][0], base);
17032 for (i = 1; i < nparts; i++)
17033 {
17034 tmp = plus_constant (base, UNITS_PER_WORD * i);
17035 part[1][i] = replace_equiv_address (part[1][i], tmp);
17036 }
17037 }
17038 }
17039
17040 if (push)
17041 {
17042 if (!TARGET_64BIT)
17043 {
17044 if (nparts == 3)
17045 {
17046 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
17047 emit_insn (gen_addsi3 (stack_pointer_rtx,
17048 stack_pointer_rtx, GEN_INT (-4)));
17049 emit_move_insn (part[0][2], part[1][2]);
17050 }
17051 else if (nparts == 4)
17052 {
17053 emit_move_insn (part[0][3], part[1][3]);
17054 emit_move_insn (part[0][2], part[1][2]);
17055 }
17056 }
17057 else
17058 {
17059 /* In 64bit mode we don't have 32bit push available. In case this is
17060 register, it is OK - we will just use larger counterpart. We also
17061 retype memory - these comes from attempt to avoid REX prefix on
17062 moving of second half of TFmode value. */
17063 if (GET_MODE (part[1][1]) == SImode)
17064 {
17065 switch (GET_CODE (part[1][1]))
17066 {
17067 case MEM:
17068 part[1][1] = adjust_address (part[1][1], DImode, 0);
17069 break;
17070
17071 case REG:
17072 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
17073 break;
17074
17075 default:
17076 gcc_unreachable ();
17077 }
17078
17079 if (GET_MODE (part[1][0]) == SImode)
17080 part[1][0] = part[1][1];
17081 }
17082 }
17083 emit_move_insn (part[0][1], part[1][1]);
17084 emit_move_insn (part[0][0], part[1][0]);
17085 return;
17086 }
17087
17088 /* Choose correct order to not overwrite the source before it is copied. */
17089 if ((REG_P (part[0][0])
17090 && REG_P (part[1][1])
17091 && (REGNO (part[0][0]) == REGNO (part[1][1])
17092 || (nparts == 3
17093 && REGNO (part[0][0]) == REGNO (part[1][2]))
17094 || (nparts == 4
17095 && REGNO (part[0][0]) == REGNO (part[1][3]))))
17096 || (collisions > 0
17097 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
17098 {
17099 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
17100 {
17101 operands[2 + i] = part[0][j];
17102 operands[6 + i] = part[1][j];
17103 }
17104 }
17105 else
17106 {
17107 for (i = 0; i < nparts; i++)
17108 {
17109 operands[2 + i] = part[0][i];
17110 operands[6 + i] = part[1][i];
17111 }
17112 }
17113
17114 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
17115 if (optimize_insn_for_size_p ())
17116 {
17117 for (j = 0; j < nparts - 1; j++)
17118 if (CONST_INT_P (operands[6 + j])
17119 && operands[6 + j] != const0_rtx
17120 && REG_P (operands[2 + j]))
17121 for (i = j; i < nparts - 1; i++)
17122 if (CONST_INT_P (operands[7 + i])
17123 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
17124 operands[7 + i] = operands[2 + j];
17125 }
17126
17127 for (i = 0; i < nparts; i++)
17128 emit_move_insn (operands[2 + i], operands[6 + i]);
17129
17130 return;
17131 }
17132
17133 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
17134 left shift by a constant, either using a single shift or
17135 a sequence of add instructions. */
17136
17137 static void
17138 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
17139 {
17140 if (count == 1)
17141 {
17142 emit_insn ((mode == DImode
17143 ? gen_addsi3
17144 : gen_adddi3) (operand, operand, operand));
17145 }
17146 else if (!optimize_insn_for_size_p ()
17147 && count * ix86_cost->add <= ix86_cost->shift_const)
17148 {
17149 int i;
17150 for (i=0; i<count; i++)
17151 {
17152 emit_insn ((mode == DImode
17153 ? gen_addsi3
17154 : gen_adddi3) (operand, operand, operand));
17155 }
17156 }
17157 else
17158 emit_insn ((mode == DImode
17159 ? gen_ashlsi3
17160 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17161 }
17162
17163 void
17164 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17165 {
17166 rtx low[2], high[2];
17167 int count;
17168 const int single_width = mode == DImode ? 32 : 64;
17169
17170 if (CONST_INT_P (operands[2]))
17171 {
17172 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17173 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17174
17175 if (count >= single_width)
17176 {
17177 emit_move_insn (high[0], low[1]);
17178 emit_move_insn (low[0], const0_rtx);
17179
17180 if (count > single_width)
17181 ix86_expand_ashl_const (high[0], count - single_width, mode);
17182 }
17183 else
17184 {
17185 if (!rtx_equal_p (operands[0], operands[1]))
17186 emit_move_insn (operands[0], operands[1]);
17187 emit_insn ((mode == DImode
17188 ? gen_x86_shld
17189 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17190 ix86_expand_ashl_const (low[0], count, mode);
17191 }
17192 return;
17193 }
17194
17195 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17196
17197 if (operands[1] == const1_rtx)
17198 {
17199 /* Assuming we've chosen a QImode capable registers, then 1 << N
17200 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17201 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17202 {
17203 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17204
17205 ix86_expand_clear (low[0]);
17206 ix86_expand_clear (high[0]);
17207 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17208
17209 d = gen_lowpart (QImode, low[0]);
17210 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17211 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17212 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17213
17214 d = gen_lowpart (QImode, high[0]);
17215 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17216 s = gen_rtx_NE (QImode, flags, const0_rtx);
17217 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17218 }
17219
17220 /* Otherwise, we can get the same results by manually performing
17221 a bit extract operation on bit 5/6, and then performing the two
17222 shifts. The two methods of getting 0/1 into low/high are exactly
17223 the same size. Avoiding the shift in the bit extract case helps
17224 pentium4 a bit; no one else seems to care much either way. */
17225 else
17226 {
17227 rtx x;
17228
17229 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17230 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17231 else
17232 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17233 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17234
17235 emit_insn ((mode == DImode
17236 ? gen_lshrsi3
17237 : gen_lshrdi3) (high[0], high[0],
17238 GEN_INT (mode == DImode ? 5 : 6)));
17239 emit_insn ((mode == DImode
17240 ? gen_andsi3
17241 : gen_anddi3) (high[0], high[0], const1_rtx));
17242 emit_move_insn (low[0], high[0]);
17243 emit_insn ((mode == DImode
17244 ? gen_xorsi3
17245 : gen_xordi3) (low[0], low[0], const1_rtx));
17246 }
17247
17248 emit_insn ((mode == DImode
17249 ? gen_ashlsi3
17250 : gen_ashldi3) (low[0], low[0], operands[2]));
17251 emit_insn ((mode == DImode
17252 ? gen_ashlsi3
17253 : gen_ashldi3) (high[0], high[0], operands[2]));
17254 return;
17255 }
17256
17257 if (operands[1] == constm1_rtx)
17258 {
17259 /* For -1 << N, we can avoid the shld instruction, because we
17260 know that we're shifting 0...31/63 ones into a -1. */
17261 emit_move_insn (low[0], constm1_rtx);
17262 if (optimize_insn_for_size_p ())
17263 emit_move_insn (high[0], low[0]);
17264 else
17265 emit_move_insn (high[0], constm1_rtx);
17266 }
17267 else
17268 {
17269 if (!rtx_equal_p (operands[0], operands[1]))
17270 emit_move_insn (operands[0], operands[1]);
17271
17272 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17273 emit_insn ((mode == DImode
17274 ? gen_x86_shld
17275 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17276 }
17277
17278 emit_insn ((mode == DImode
17279 ? gen_ashlsi3
17280 : gen_ashldi3) (low[0], low[0], operands[2]));
17281
17282 if (TARGET_CMOVE && scratch)
17283 {
17284 ix86_expand_clear (scratch);
17285 emit_insn ((mode == DImode
17286 ? gen_x86_shiftsi_adj_1
17287 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17288 scratch));
17289 }
17290 else
17291 emit_insn ((mode == DImode
17292 ? gen_x86_shiftsi_adj_2
17293 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17294 }
17295
17296 void
17297 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17298 {
17299 rtx low[2], high[2];
17300 int count;
17301 const int single_width = mode == DImode ? 32 : 64;
17302
17303 if (CONST_INT_P (operands[2]))
17304 {
17305 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17306 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17307
17308 if (count == single_width * 2 - 1)
17309 {
17310 emit_move_insn (high[0], high[1]);
17311 emit_insn ((mode == DImode
17312 ? gen_ashrsi3
17313 : gen_ashrdi3) (high[0], high[0],
17314 GEN_INT (single_width - 1)));
17315 emit_move_insn (low[0], high[0]);
17316
17317 }
17318 else if (count >= single_width)
17319 {
17320 emit_move_insn (low[0], high[1]);
17321 emit_move_insn (high[0], low[0]);
17322 emit_insn ((mode == DImode
17323 ? gen_ashrsi3
17324 : gen_ashrdi3) (high[0], high[0],
17325 GEN_INT (single_width - 1)));
17326 if (count > single_width)
17327 emit_insn ((mode == DImode
17328 ? gen_ashrsi3
17329 : gen_ashrdi3) (low[0], low[0],
17330 GEN_INT (count - single_width)));
17331 }
17332 else
17333 {
17334 if (!rtx_equal_p (operands[0], operands[1]))
17335 emit_move_insn (operands[0], operands[1]);
17336 emit_insn ((mode == DImode
17337 ? gen_x86_shrd
17338 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17339 emit_insn ((mode == DImode
17340 ? gen_ashrsi3
17341 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17342 }
17343 }
17344 else
17345 {
17346 if (!rtx_equal_p (operands[0], operands[1]))
17347 emit_move_insn (operands[0], operands[1]);
17348
17349 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17350
17351 emit_insn ((mode == DImode
17352 ? gen_x86_shrd
17353 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17354 emit_insn ((mode == DImode
17355 ? gen_ashrsi3
17356 : gen_ashrdi3) (high[0], high[0], operands[2]));
17357
17358 if (TARGET_CMOVE && scratch)
17359 {
17360 emit_move_insn (scratch, high[0]);
17361 emit_insn ((mode == DImode
17362 ? gen_ashrsi3
17363 : gen_ashrdi3) (scratch, scratch,
17364 GEN_INT (single_width - 1)));
17365 emit_insn ((mode == DImode
17366 ? gen_x86_shiftsi_adj_1
17367 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17368 scratch));
17369 }
17370 else
17371 emit_insn ((mode == DImode
17372 ? gen_x86_shiftsi_adj_3
17373 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17374 }
17375 }
17376
17377 void
17378 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17379 {
17380 rtx low[2], high[2];
17381 int count;
17382 const int single_width = mode == DImode ? 32 : 64;
17383
17384 if (CONST_INT_P (operands[2]))
17385 {
17386 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17387 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17388
17389 if (count >= single_width)
17390 {
17391 emit_move_insn (low[0], high[1]);
17392 ix86_expand_clear (high[0]);
17393
17394 if (count > single_width)
17395 emit_insn ((mode == DImode
17396 ? gen_lshrsi3
17397 : gen_lshrdi3) (low[0], low[0],
17398 GEN_INT (count - single_width)));
17399 }
17400 else
17401 {
17402 if (!rtx_equal_p (operands[0], operands[1]))
17403 emit_move_insn (operands[0], operands[1]);
17404 emit_insn ((mode == DImode
17405 ? gen_x86_shrd
17406 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17407 emit_insn ((mode == DImode
17408 ? gen_lshrsi3
17409 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17410 }
17411 }
17412 else
17413 {
17414 if (!rtx_equal_p (operands[0], operands[1]))
17415 emit_move_insn (operands[0], operands[1]);
17416
17417 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17418
17419 emit_insn ((mode == DImode
17420 ? gen_x86_shrd
17421 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17422 emit_insn ((mode == DImode
17423 ? gen_lshrsi3
17424 : gen_lshrdi3) (high[0], high[0], operands[2]));
17425
17426 /* Heh. By reversing the arguments, we can reuse this pattern. */
17427 if (TARGET_CMOVE && scratch)
17428 {
17429 ix86_expand_clear (scratch);
17430 emit_insn ((mode == DImode
17431 ? gen_x86_shiftsi_adj_1
17432 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17433 scratch));
17434 }
17435 else
17436 emit_insn ((mode == DImode
17437 ? gen_x86_shiftsi_adj_2
17438 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17439 }
17440 }
17441
17442 /* Predict just emitted jump instruction to be taken with probability PROB. */
17443 static void
17444 predict_jump (int prob)
17445 {
17446 rtx insn = get_last_insn ();
17447 gcc_assert (JUMP_P (insn));
17448 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17449 }
17450
17451 /* Helper function for the string operations below. Dest VARIABLE whether
17452 it is aligned to VALUE bytes. If true, jump to the label. */
17453 static rtx
17454 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17455 {
17456 rtx label = gen_label_rtx ();
17457 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17458 if (GET_MODE (variable) == DImode)
17459 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17460 else
17461 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17462 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17463 1, label);
17464 if (epilogue)
17465 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17466 else
17467 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17468 return label;
17469 }
17470
17471 /* Adjust COUNTER by the VALUE. */
17472 static void
17473 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17474 {
17475 if (GET_MODE (countreg) == DImode)
17476 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17477 else
17478 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17479 }
17480
17481 /* Zero extend possibly SImode EXP to Pmode register. */
17482 rtx
17483 ix86_zero_extend_to_Pmode (rtx exp)
17484 {
17485 rtx r;
17486 if (GET_MODE (exp) == VOIDmode)
17487 return force_reg (Pmode, exp);
17488 if (GET_MODE (exp) == Pmode)
17489 return copy_to_mode_reg (Pmode, exp);
17490 r = gen_reg_rtx (Pmode);
17491 emit_insn (gen_zero_extendsidi2 (r, exp));
17492 return r;
17493 }
17494
17495 /* Divide COUNTREG by SCALE. */
17496 static rtx
17497 scale_counter (rtx countreg, int scale)
17498 {
17499 rtx sc;
17500
17501 if (scale == 1)
17502 return countreg;
17503 if (CONST_INT_P (countreg))
17504 return GEN_INT (INTVAL (countreg) / scale);
17505 gcc_assert (REG_P (countreg));
17506
17507 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17508 GEN_INT (exact_log2 (scale)),
17509 NULL, 1, OPTAB_DIRECT);
17510 return sc;
17511 }
17512
17513 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17514 DImode for constant loop counts. */
17515
17516 static enum machine_mode
17517 counter_mode (rtx count_exp)
17518 {
17519 if (GET_MODE (count_exp) != VOIDmode)
17520 return GET_MODE (count_exp);
17521 if (!CONST_INT_P (count_exp))
17522 return Pmode;
17523 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17524 return DImode;
17525 return SImode;
17526 }
17527
17528 /* When SRCPTR is non-NULL, output simple loop to move memory
17529 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17530 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17531 equivalent loop to set memory by VALUE (supposed to be in MODE).
17532
17533 The size is rounded down to whole number of chunk size moved at once.
17534 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17535
17536
17537 static void
17538 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17539 rtx destptr, rtx srcptr, rtx value,
17540 rtx count, enum machine_mode mode, int unroll,
17541 int expected_size)
17542 {
17543 rtx out_label, top_label, iter, tmp;
17544 enum machine_mode iter_mode = counter_mode (count);
17545 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17546 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17547 rtx size;
17548 rtx x_addr;
17549 rtx y_addr;
17550 int i;
17551
17552 top_label = gen_label_rtx ();
17553 out_label = gen_label_rtx ();
17554 iter = gen_reg_rtx (iter_mode);
17555
17556 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17557 NULL, 1, OPTAB_DIRECT);
17558 /* Those two should combine. */
17559 if (piece_size == const1_rtx)
17560 {
17561 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17562 true, out_label);
17563 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17564 }
17565 emit_move_insn (iter, const0_rtx);
17566
17567 emit_label (top_label);
17568
17569 tmp = convert_modes (Pmode, iter_mode, iter, true);
17570 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17571 destmem = change_address (destmem, mode, x_addr);
17572
17573 if (srcmem)
17574 {
17575 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17576 srcmem = change_address (srcmem, mode, y_addr);
17577
17578 /* When unrolling for chips that reorder memory reads and writes,
17579 we can save registers by using single temporary.
17580 Also using 4 temporaries is overkill in 32bit mode. */
17581 if (!TARGET_64BIT && 0)
17582 {
17583 for (i = 0; i < unroll; i++)
17584 {
17585 if (i)
17586 {
17587 destmem =
17588 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17589 srcmem =
17590 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17591 }
17592 emit_move_insn (destmem, srcmem);
17593 }
17594 }
17595 else
17596 {
17597 rtx tmpreg[4];
17598 gcc_assert (unroll <= 4);
17599 for (i = 0; i < unroll; i++)
17600 {
17601 tmpreg[i] = gen_reg_rtx (mode);
17602 if (i)
17603 {
17604 srcmem =
17605 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17606 }
17607 emit_move_insn (tmpreg[i], srcmem);
17608 }
17609 for (i = 0; i < unroll; i++)
17610 {
17611 if (i)
17612 {
17613 destmem =
17614 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17615 }
17616 emit_move_insn (destmem, tmpreg[i]);
17617 }
17618 }
17619 }
17620 else
17621 for (i = 0; i < unroll; i++)
17622 {
17623 if (i)
17624 destmem =
17625 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17626 emit_move_insn (destmem, value);
17627 }
17628
17629 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17630 true, OPTAB_LIB_WIDEN);
17631 if (tmp != iter)
17632 emit_move_insn (iter, tmp);
17633
17634 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17635 true, top_label);
17636 if (expected_size != -1)
17637 {
17638 expected_size /= GET_MODE_SIZE (mode) * unroll;
17639 if (expected_size == 0)
17640 predict_jump (0);
17641 else if (expected_size > REG_BR_PROB_BASE)
17642 predict_jump (REG_BR_PROB_BASE - 1);
17643 else
17644 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17645 }
17646 else
17647 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17648 iter = ix86_zero_extend_to_Pmode (iter);
17649 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17650 true, OPTAB_LIB_WIDEN);
17651 if (tmp != destptr)
17652 emit_move_insn (destptr, tmp);
17653 if (srcptr)
17654 {
17655 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17656 true, OPTAB_LIB_WIDEN);
17657 if (tmp != srcptr)
17658 emit_move_insn (srcptr, tmp);
17659 }
17660 emit_label (out_label);
17661 }
17662
17663 /* Output "rep; mov" instruction.
17664 Arguments have same meaning as for previous function */
17665 static void
17666 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17667 rtx destptr, rtx srcptr,
17668 rtx count,
17669 enum machine_mode mode)
17670 {
17671 rtx destexp;
17672 rtx srcexp;
17673 rtx countreg;
17674
17675 /* If the size is known, it is shorter to use rep movs. */
17676 if (mode == QImode && CONST_INT_P (count)
17677 && !(INTVAL (count) & 3))
17678 mode = SImode;
17679
17680 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17681 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17682 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17683 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17684 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17685 if (mode != QImode)
17686 {
17687 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17688 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17689 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17690 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17691 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17692 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17693 }
17694 else
17695 {
17696 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17697 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17698 }
17699 if (CONST_INT_P (count))
17700 {
17701 count = GEN_INT (INTVAL (count)
17702 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17703 destmem = shallow_copy_rtx (destmem);
17704 srcmem = shallow_copy_rtx (srcmem);
17705 set_mem_size (destmem, count);
17706 set_mem_size (srcmem, count);
17707 }
17708 else
17709 {
17710 if (MEM_SIZE (destmem))
17711 set_mem_size (destmem, NULL_RTX);
17712 if (MEM_SIZE (srcmem))
17713 set_mem_size (srcmem, NULL_RTX);
17714 }
17715 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17716 destexp, srcexp));
17717 }
17718
17719 /* Output "rep; stos" instruction.
17720 Arguments have same meaning as for previous function */
17721 static void
17722 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17723 rtx count, enum machine_mode mode,
17724 rtx orig_value)
17725 {
17726 rtx destexp;
17727 rtx countreg;
17728
17729 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17730 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17731 value = force_reg (mode, gen_lowpart (mode, value));
17732 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17733 if (mode != QImode)
17734 {
17735 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17736 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17737 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17738 }
17739 else
17740 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17741 if (orig_value == const0_rtx && CONST_INT_P (count))
17742 {
17743 count = GEN_INT (INTVAL (count)
17744 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17745 destmem = shallow_copy_rtx (destmem);
17746 set_mem_size (destmem, count);
17747 }
17748 else if (MEM_SIZE (destmem))
17749 set_mem_size (destmem, NULL_RTX);
17750 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17751 }
17752
17753 static void
17754 emit_strmov (rtx destmem, rtx srcmem,
17755 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17756 {
17757 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17758 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17759 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17760 }
17761
17762 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17763 static void
17764 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17765 rtx destptr, rtx srcptr, rtx count, int max_size)
17766 {
17767 rtx src, dest;
17768 if (CONST_INT_P (count))
17769 {
17770 HOST_WIDE_INT countval = INTVAL (count);
17771 int offset = 0;
17772
17773 if ((countval & 0x10) && max_size > 16)
17774 {
17775 if (TARGET_64BIT)
17776 {
17777 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17778 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17779 }
17780 else
17781 gcc_unreachable ();
17782 offset += 16;
17783 }
17784 if ((countval & 0x08) && max_size > 8)
17785 {
17786 if (TARGET_64BIT)
17787 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17788 else
17789 {
17790 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17791 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17792 }
17793 offset += 8;
17794 }
17795 if ((countval & 0x04) && max_size > 4)
17796 {
17797 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17798 offset += 4;
17799 }
17800 if ((countval & 0x02) && max_size > 2)
17801 {
17802 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17803 offset += 2;
17804 }
17805 if ((countval & 0x01) && max_size > 1)
17806 {
17807 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17808 offset += 1;
17809 }
17810 return;
17811 }
17812 if (max_size > 8)
17813 {
17814 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17815 count, 1, OPTAB_DIRECT);
17816 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17817 count, QImode, 1, 4);
17818 return;
17819 }
17820
17821 /* When there are stringops, we can cheaply increase dest and src pointers.
17822 Otherwise we save code size by maintaining offset (zero is readily
17823 available from preceding rep operation) and using x86 addressing modes.
17824 */
17825 if (TARGET_SINGLE_STRINGOP)
17826 {
17827 if (max_size > 4)
17828 {
17829 rtx label = ix86_expand_aligntest (count, 4, true);
17830 src = change_address (srcmem, SImode, srcptr);
17831 dest = change_address (destmem, SImode, destptr);
17832 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17833 emit_label (label);
17834 LABEL_NUSES (label) = 1;
17835 }
17836 if (max_size > 2)
17837 {
17838 rtx label = ix86_expand_aligntest (count, 2, true);
17839 src = change_address (srcmem, HImode, srcptr);
17840 dest = change_address (destmem, HImode, destptr);
17841 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17842 emit_label (label);
17843 LABEL_NUSES (label) = 1;
17844 }
17845 if (max_size > 1)
17846 {
17847 rtx label = ix86_expand_aligntest (count, 1, true);
17848 src = change_address (srcmem, QImode, srcptr);
17849 dest = change_address (destmem, QImode, destptr);
17850 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17851 emit_label (label);
17852 LABEL_NUSES (label) = 1;
17853 }
17854 }
17855 else
17856 {
17857 rtx offset = force_reg (Pmode, const0_rtx);
17858 rtx tmp;
17859
17860 if (max_size > 4)
17861 {
17862 rtx label = ix86_expand_aligntest (count, 4, true);
17863 src = change_address (srcmem, SImode, srcptr);
17864 dest = change_address (destmem, SImode, destptr);
17865 emit_move_insn (dest, src);
17866 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17867 true, OPTAB_LIB_WIDEN);
17868 if (tmp != offset)
17869 emit_move_insn (offset, tmp);
17870 emit_label (label);
17871 LABEL_NUSES (label) = 1;
17872 }
17873 if (max_size > 2)
17874 {
17875 rtx label = ix86_expand_aligntest (count, 2, true);
17876 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17877 src = change_address (srcmem, HImode, tmp);
17878 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17879 dest = change_address (destmem, HImode, tmp);
17880 emit_move_insn (dest, src);
17881 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17882 true, OPTAB_LIB_WIDEN);
17883 if (tmp != offset)
17884 emit_move_insn (offset, tmp);
17885 emit_label (label);
17886 LABEL_NUSES (label) = 1;
17887 }
17888 if (max_size > 1)
17889 {
17890 rtx label = ix86_expand_aligntest (count, 1, true);
17891 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17892 src = change_address (srcmem, QImode, tmp);
17893 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17894 dest = change_address (destmem, QImode, tmp);
17895 emit_move_insn (dest, src);
17896 emit_label (label);
17897 LABEL_NUSES (label) = 1;
17898 }
17899 }
17900 }
17901
17902 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17903 static void
17904 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17905 rtx count, int max_size)
17906 {
17907 count =
17908 expand_simple_binop (counter_mode (count), AND, count,
17909 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17910 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17911 gen_lowpart (QImode, value), count, QImode,
17912 1, max_size / 2);
17913 }
17914
17915 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17916 static void
17917 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17918 {
17919 rtx dest;
17920
17921 if (CONST_INT_P (count))
17922 {
17923 HOST_WIDE_INT countval = INTVAL (count);
17924 int offset = 0;
17925
17926 if ((countval & 0x10) && max_size > 16)
17927 {
17928 if (TARGET_64BIT)
17929 {
17930 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17931 emit_insn (gen_strset (destptr, dest, value));
17932 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17933 emit_insn (gen_strset (destptr, dest, value));
17934 }
17935 else
17936 gcc_unreachable ();
17937 offset += 16;
17938 }
17939 if ((countval & 0x08) && max_size > 8)
17940 {
17941 if (TARGET_64BIT)
17942 {
17943 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17944 emit_insn (gen_strset (destptr, dest, value));
17945 }
17946 else
17947 {
17948 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17949 emit_insn (gen_strset (destptr, dest, value));
17950 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17951 emit_insn (gen_strset (destptr, dest, value));
17952 }
17953 offset += 8;
17954 }
17955 if ((countval & 0x04) && max_size > 4)
17956 {
17957 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17958 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17959 offset += 4;
17960 }
17961 if ((countval & 0x02) && max_size > 2)
17962 {
17963 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17964 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17965 offset += 2;
17966 }
17967 if ((countval & 0x01) && max_size > 1)
17968 {
17969 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17970 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17971 offset += 1;
17972 }
17973 return;
17974 }
17975 if (max_size > 32)
17976 {
17977 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17978 return;
17979 }
17980 if (max_size > 16)
17981 {
17982 rtx label = ix86_expand_aligntest (count, 16, true);
17983 if (TARGET_64BIT)
17984 {
17985 dest = change_address (destmem, DImode, destptr);
17986 emit_insn (gen_strset (destptr, dest, value));
17987 emit_insn (gen_strset (destptr, dest, value));
17988 }
17989 else
17990 {
17991 dest = change_address (destmem, SImode, destptr);
17992 emit_insn (gen_strset (destptr, dest, value));
17993 emit_insn (gen_strset (destptr, dest, value));
17994 emit_insn (gen_strset (destptr, dest, value));
17995 emit_insn (gen_strset (destptr, dest, value));
17996 }
17997 emit_label (label);
17998 LABEL_NUSES (label) = 1;
17999 }
18000 if (max_size > 8)
18001 {
18002 rtx label = ix86_expand_aligntest (count, 8, true);
18003 if (TARGET_64BIT)
18004 {
18005 dest = change_address (destmem, DImode, destptr);
18006 emit_insn (gen_strset (destptr, dest, value));
18007 }
18008 else
18009 {
18010 dest = change_address (destmem, SImode, destptr);
18011 emit_insn (gen_strset (destptr, dest, value));
18012 emit_insn (gen_strset (destptr, dest, value));
18013 }
18014 emit_label (label);
18015 LABEL_NUSES (label) = 1;
18016 }
18017 if (max_size > 4)
18018 {
18019 rtx label = ix86_expand_aligntest (count, 4, true);
18020 dest = change_address (destmem, SImode, destptr);
18021 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
18022 emit_label (label);
18023 LABEL_NUSES (label) = 1;
18024 }
18025 if (max_size > 2)
18026 {
18027 rtx label = ix86_expand_aligntest (count, 2, true);
18028 dest = change_address (destmem, HImode, destptr);
18029 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
18030 emit_label (label);
18031 LABEL_NUSES (label) = 1;
18032 }
18033 if (max_size > 1)
18034 {
18035 rtx label = ix86_expand_aligntest (count, 1, true);
18036 dest = change_address (destmem, QImode, destptr);
18037 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
18038 emit_label (label);
18039 LABEL_NUSES (label) = 1;
18040 }
18041 }
18042
18043 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
18044 DESIRED_ALIGNMENT. */
18045 static void
18046 expand_movmem_prologue (rtx destmem, rtx srcmem,
18047 rtx destptr, rtx srcptr, rtx count,
18048 int align, int desired_alignment)
18049 {
18050 if (align <= 1 && desired_alignment > 1)
18051 {
18052 rtx label = ix86_expand_aligntest (destptr, 1, false);
18053 srcmem = change_address (srcmem, QImode, srcptr);
18054 destmem = change_address (destmem, QImode, destptr);
18055 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18056 ix86_adjust_counter (count, 1);
18057 emit_label (label);
18058 LABEL_NUSES (label) = 1;
18059 }
18060 if (align <= 2 && desired_alignment > 2)
18061 {
18062 rtx label = ix86_expand_aligntest (destptr, 2, false);
18063 srcmem = change_address (srcmem, HImode, srcptr);
18064 destmem = change_address (destmem, HImode, destptr);
18065 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18066 ix86_adjust_counter (count, 2);
18067 emit_label (label);
18068 LABEL_NUSES (label) = 1;
18069 }
18070 if (align <= 4 && desired_alignment > 4)
18071 {
18072 rtx label = ix86_expand_aligntest (destptr, 4, false);
18073 srcmem = change_address (srcmem, SImode, srcptr);
18074 destmem = change_address (destmem, SImode, destptr);
18075 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18076 ix86_adjust_counter (count, 4);
18077 emit_label (label);
18078 LABEL_NUSES (label) = 1;
18079 }
18080 gcc_assert (desired_alignment <= 8);
18081 }
18082
18083 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
18084 ALIGN_BYTES is how many bytes need to be copied. */
18085 static rtx
18086 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
18087 int desired_align, int align_bytes)
18088 {
18089 rtx src = *srcp;
18090 rtx src_size, dst_size;
18091 int off = 0;
18092 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
18093 if (src_align_bytes >= 0)
18094 src_align_bytes = desired_align - src_align_bytes;
18095 src_size = MEM_SIZE (src);
18096 dst_size = MEM_SIZE (dst);
18097 if (align_bytes & 1)
18098 {
18099 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18100 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
18101 off = 1;
18102 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18103 }
18104 if (align_bytes & 2)
18105 {
18106 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18107 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
18108 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18109 set_mem_align (dst, 2 * BITS_PER_UNIT);
18110 if (src_align_bytes >= 0
18111 && (src_align_bytes & 1) == (align_bytes & 1)
18112 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
18113 set_mem_align (src, 2 * BITS_PER_UNIT);
18114 off = 2;
18115 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18116 }
18117 if (align_bytes & 4)
18118 {
18119 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18120 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
18121 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18122 set_mem_align (dst, 4 * BITS_PER_UNIT);
18123 if (src_align_bytes >= 0)
18124 {
18125 unsigned int src_align = 0;
18126 if ((src_align_bytes & 3) == (align_bytes & 3))
18127 src_align = 4;
18128 else if ((src_align_bytes & 1) == (align_bytes & 1))
18129 src_align = 2;
18130 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18131 set_mem_align (src, src_align * BITS_PER_UNIT);
18132 }
18133 off = 4;
18134 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18135 }
18136 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18137 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
18138 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18139 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18140 if (src_align_bytes >= 0)
18141 {
18142 unsigned int src_align = 0;
18143 if ((src_align_bytes & 7) == (align_bytes & 7))
18144 src_align = 8;
18145 else if ((src_align_bytes & 3) == (align_bytes & 3))
18146 src_align = 4;
18147 else if ((src_align_bytes & 1) == (align_bytes & 1))
18148 src_align = 2;
18149 if (src_align > (unsigned int) desired_align)
18150 src_align = desired_align;
18151 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18152 set_mem_align (src, src_align * BITS_PER_UNIT);
18153 }
18154 if (dst_size)
18155 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18156 if (src_size)
18157 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18158 *srcp = src;
18159 return dst;
18160 }
18161
18162 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18163 DESIRED_ALIGNMENT. */
18164 static void
18165 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18166 int align, int desired_alignment)
18167 {
18168 if (align <= 1 && desired_alignment > 1)
18169 {
18170 rtx label = ix86_expand_aligntest (destptr, 1, false);
18171 destmem = change_address (destmem, QImode, destptr);
18172 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18173 ix86_adjust_counter (count, 1);
18174 emit_label (label);
18175 LABEL_NUSES (label) = 1;
18176 }
18177 if (align <= 2 && desired_alignment > 2)
18178 {
18179 rtx label = ix86_expand_aligntest (destptr, 2, false);
18180 destmem = change_address (destmem, HImode, destptr);
18181 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18182 ix86_adjust_counter (count, 2);
18183 emit_label (label);
18184 LABEL_NUSES (label) = 1;
18185 }
18186 if (align <= 4 && desired_alignment > 4)
18187 {
18188 rtx label = ix86_expand_aligntest (destptr, 4, false);
18189 destmem = change_address (destmem, SImode, destptr);
18190 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18191 ix86_adjust_counter (count, 4);
18192 emit_label (label);
18193 LABEL_NUSES (label) = 1;
18194 }
18195 gcc_assert (desired_alignment <= 8);
18196 }
18197
18198 /* Set enough from DST to align DST known to by aligned by ALIGN to
18199 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18200 static rtx
18201 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18202 int desired_align, int align_bytes)
18203 {
18204 int off = 0;
18205 rtx dst_size = MEM_SIZE (dst);
18206 if (align_bytes & 1)
18207 {
18208 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18209 off = 1;
18210 emit_insn (gen_strset (destreg, dst,
18211 gen_lowpart (QImode, value)));
18212 }
18213 if (align_bytes & 2)
18214 {
18215 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18216 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18217 set_mem_align (dst, 2 * BITS_PER_UNIT);
18218 off = 2;
18219 emit_insn (gen_strset (destreg, dst,
18220 gen_lowpart (HImode, value)));
18221 }
18222 if (align_bytes & 4)
18223 {
18224 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18225 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18226 set_mem_align (dst, 4 * BITS_PER_UNIT);
18227 off = 4;
18228 emit_insn (gen_strset (destreg, dst,
18229 gen_lowpart (SImode, value)));
18230 }
18231 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18232 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18233 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18234 if (dst_size)
18235 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18236 return dst;
18237 }
18238
18239 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18240 static enum stringop_alg
18241 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18242 int *dynamic_check)
18243 {
18244 const struct stringop_algs * algs;
18245 bool optimize_for_speed;
18246 /* Algorithms using the rep prefix want at least edi and ecx;
18247 additionally, memset wants eax and memcpy wants esi. Don't
18248 consider such algorithms if the user has appropriated those
18249 registers for their own purposes. */
18250 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18251 || (memset
18252 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18253
18254 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18255 || (alg != rep_prefix_1_byte \
18256 && alg != rep_prefix_4_byte \
18257 && alg != rep_prefix_8_byte))
18258 const struct processor_costs *cost;
18259
18260 /* Even if the string operation call is cold, we still might spend a lot
18261 of time processing large blocks. */
18262 if (optimize_function_for_size_p (cfun)
18263 || (optimize_insn_for_size_p ()
18264 && expected_size != -1 && expected_size < 256))
18265 optimize_for_speed = false;
18266 else
18267 optimize_for_speed = true;
18268
18269 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18270
18271 *dynamic_check = -1;
18272 if (memset)
18273 algs = &cost->memset[TARGET_64BIT != 0];
18274 else
18275 algs = &cost->memcpy[TARGET_64BIT != 0];
18276 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18277 return stringop_alg;
18278 /* rep; movq or rep; movl is the smallest variant. */
18279 else if (!optimize_for_speed)
18280 {
18281 if (!count || (count & 3))
18282 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18283 else
18284 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18285 }
18286 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18287 */
18288 else if (expected_size != -1 && expected_size < 4)
18289 return loop_1_byte;
18290 else if (expected_size != -1)
18291 {
18292 unsigned int i;
18293 enum stringop_alg alg = libcall;
18294 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18295 {
18296 /* We get here if the algorithms that were not libcall-based
18297 were rep-prefix based and we are unable to use rep prefixes
18298 based on global register usage. Break out of the loop and
18299 use the heuristic below. */
18300 if (algs->size[i].max == 0)
18301 break;
18302 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18303 {
18304 enum stringop_alg candidate = algs->size[i].alg;
18305
18306 if (candidate != libcall && ALG_USABLE_P (candidate))
18307 alg = candidate;
18308 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18309 last non-libcall inline algorithm. */
18310 if (TARGET_INLINE_ALL_STRINGOPS)
18311 {
18312 /* When the current size is best to be copied by a libcall,
18313 but we are still forced to inline, run the heuristic below
18314 that will pick code for medium sized blocks. */
18315 if (alg != libcall)
18316 return alg;
18317 break;
18318 }
18319 else if (ALG_USABLE_P (candidate))
18320 return candidate;
18321 }
18322 }
18323 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18324 }
18325 /* When asked to inline the call anyway, try to pick meaningful choice.
18326 We look for maximal size of block that is faster to copy by hand and
18327 take blocks of at most of that size guessing that average size will
18328 be roughly half of the block.
18329
18330 If this turns out to be bad, we might simply specify the preferred
18331 choice in ix86_costs. */
18332 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18333 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18334 {
18335 int max = -1;
18336 enum stringop_alg alg;
18337 int i;
18338 bool any_alg_usable_p = true;
18339
18340 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18341 {
18342 enum stringop_alg candidate = algs->size[i].alg;
18343 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18344
18345 if (candidate != libcall && candidate
18346 && ALG_USABLE_P (candidate))
18347 max = algs->size[i].max;
18348 }
18349 /* If there aren't any usable algorithms, then recursing on
18350 smaller sizes isn't going to find anything. Just return the
18351 simple byte-at-a-time copy loop. */
18352 if (!any_alg_usable_p)
18353 {
18354 /* Pick something reasonable. */
18355 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18356 *dynamic_check = 128;
18357 return loop_1_byte;
18358 }
18359 if (max == -1)
18360 max = 4096;
18361 alg = decide_alg (count, max / 2, memset, dynamic_check);
18362 gcc_assert (*dynamic_check == -1);
18363 gcc_assert (alg != libcall);
18364 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18365 *dynamic_check = max;
18366 return alg;
18367 }
18368 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18369 #undef ALG_USABLE_P
18370 }
18371
18372 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18373 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18374 static int
18375 decide_alignment (int align,
18376 enum stringop_alg alg,
18377 int expected_size)
18378 {
18379 int desired_align = 0;
18380 switch (alg)
18381 {
18382 case no_stringop:
18383 gcc_unreachable ();
18384 case loop:
18385 case unrolled_loop:
18386 desired_align = GET_MODE_SIZE (Pmode);
18387 break;
18388 case rep_prefix_8_byte:
18389 desired_align = 8;
18390 break;
18391 case rep_prefix_4_byte:
18392 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18393 copying whole cacheline at once. */
18394 if (TARGET_PENTIUMPRO)
18395 desired_align = 8;
18396 else
18397 desired_align = 4;
18398 break;
18399 case rep_prefix_1_byte:
18400 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18401 copying whole cacheline at once. */
18402 if (TARGET_PENTIUMPRO)
18403 desired_align = 8;
18404 else
18405 desired_align = 1;
18406 break;
18407 case loop_1_byte:
18408 desired_align = 1;
18409 break;
18410 case libcall:
18411 return 0;
18412 }
18413
18414 if (optimize_size)
18415 desired_align = 1;
18416 if (desired_align < align)
18417 desired_align = align;
18418 if (expected_size != -1 && expected_size < 4)
18419 desired_align = align;
18420 return desired_align;
18421 }
18422
18423 /* Return the smallest power of 2 greater than VAL. */
18424 static int
18425 smallest_pow2_greater_than (int val)
18426 {
18427 int ret = 1;
18428 while (ret <= val)
18429 ret <<= 1;
18430 return ret;
18431 }
18432
18433 /* Expand string move (memcpy) operation. Use i386 string operations when
18434 profitable. expand_setmem contains similar code. The code depends upon
18435 architecture, block size and alignment, but always has the same
18436 overall structure:
18437
18438 1) Prologue guard: Conditional that jumps up to epilogues for small
18439 blocks that can be handled by epilogue alone. This is faster but
18440 also needed for correctness, since prologue assume the block is larger
18441 than the desired alignment.
18442
18443 Optional dynamic check for size and libcall for large
18444 blocks is emitted here too, with -minline-stringops-dynamically.
18445
18446 2) Prologue: copy first few bytes in order to get destination aligned
18447 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18448 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18449 We emit either a jump tree on power of two sized blocks, or a byte loop.
18450
18451 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18452 with specified algorithm.
18453
18454 4) Epilogue: code copying tail of the block that is too small to be
18455 handled by main body (or up to size guarded by prologue guard). */
18456
18457 int
18458 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18459 rtx expected_align_exp, rtx expected_size_exp)
18460 {
18461 rtx destreg;
18462 rtx srcreg;
18463 rtx label = NULL;
18464 rtx tmp;
18465 rtx jump_around_label = NULL;
18466 HOST_WIDE_INT align = 1;
18467 unsigned HOST_WIDE_INT count = 0;
18468 HOST_WIDE_INT expected_size = -1;
18469 int size_needed = 0, epilogue_size_needed;
18470 int desired_align = 0, align_bytes = 0;
18471 enum stringop_alg alg;
18472 int dynamic_check;
18473 bool need_zero_guard = false;
18474
18475 if (CONST_INT_P (align_exp))
18476 align = INTVAL (align_exp);
18477 /* i386 can do misaligned access on reasonably increased cost. */
18478 if (CONST_INT_P (expected_align_exp)
18479 && INTVAL (expected_align_exp) > align)
18480 align = INTVAL (expected_align_exp);
18481 /* ALIGN is the minimum of destination and source alignment, but we care here
18482 just about destination alignment. */
18483 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18484 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18485
18486 if (CONST_INT_P (count_exp))
18487 count = expected_size = INTVAL (count_exp);
18488 if (CONST_INT_P (expected_size_exp) && count == 0)
18489 expected_size = INTVAL (expected_size_exp);
18490
18491 /* Make sure we don't need to care about overflow later on. */
18492 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18493 return 0;
18494
18495 /* Step 0: Decide on preferred algorithm, desired alignment and
18496 size of chunks to be copied by main loop. */
18497
18498 alg = decide_alg (count, expected_size, false, &dynamic_check);
18499 desired_align = decide_alignment (align, alg, expected_size);
18500
18501 if (!TARGET_ALIGN_STRINGOPS)
18502 align = desired_align;
18503
18504 if (alg == libcall)
18505 return 0;
18506 gcc_assert (alg != no_stringop);
18507 if (!count)
18508 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18509 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18510 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18511 switch (alg)
18512 {
18513 case libcall:
18514 case no_stringop:
18515 gcc_unreachable ();
18516 case loop:
18517 need_zero_guard = true;
18518 size_needed = GET_MODE_SIZE (Pmode);
18519 break;
18520 case unrolled_loop:
18521 need_zero_guard = true;
18522 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18523 break;
18524 case rep_prefix_8_byte:
18525 size_needed = 8;
18526 break;
18527 case rep_prefix_4_byte:
18528 size_needed = 4;
18529 break;
18530 case rep_prefix_1_byte:
18531 size_needed = 1;
18532 break;
18533 case loop_1_byte:
18534 need_zero_guard = true;
18535 size_needed = 1;
18536 break;
18537 }
18538
18539 epilogue_size_needed = size_needed;
18540
18541 /* Step 1: Prologue guard. */
18542
18543 /* Alignment code needs count to be in register. */
18544 if (CONST_INT_P (count_exp) && desired_align > align)
18545 {
18546 if (INTVAL (count_exp) > desired_align
18547 && INTVAL (count_exp) > size_needed)
18548 {
18549 align_bytes
18550 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18551 if (align_bytes <= 0)
18552 align_bytes = 0;
18553 else
18554 align_bytes = desired_align - align_bytes;
18555 }
18556 if (align_bytes == 0)
18557 count_exp = force_reg (counter_mode (count_exp), count_exp);
18558 }
18559 gcc_assert (desired_align >= 1 && align >= 1);
18560
18561 /* Ensure that alignment prologue won't copy past end of block. */
18562 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18563 {
18564 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18565 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18566 Make sure it is power of 2. */
18567 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18568
18569 if (count)
18570 {
18571 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18572 {
18573 /* If main algorithm works on QImode, no epilogue is needed.
18574 For small sizes just don't align anything. */
18575 if (size_needed == 1)
18576 desired_align = align;
18577 else
18578 goto epilogue;
18579 }
18580 }
18581 else
18582 {
18583 label = gen_label_rtx ();
18584 emit_cmp_and_jump_insns (count_exp,
18585 GEN_INT (epilogue_size_needed),
18586 LTU, 0, counter_mode (count_exp), 1, label);
18587 if (expected_size == -1 || expected_size < epilogue_size_needed)
18588 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18589 else
18590 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18591 }
18592 }
18593
18594 /* Emit code to decide on runtime whether library call or inline should be
18595 used. */
18596 if (dynamic_check != -1)
18597 {
18598 if (CONST_INT_P (count_exp))
18599 {
18600 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18601 {
18602 emit_block_move_via_libcall (dst, src, count_exp, false);
18603 count_exp = const0_rtx;
18604 goto epilogue;
18605 }
18606 }
18607 else
18608 {
18609 rtx hot_label = gen_label_rtx ();
18610 jump_around_label = gen_label_rtx ();
18611 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18612 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18613 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18614 emit_block_move_via_libcall (dst, src, count_exp, false);
18615 emit_jump (jump_around_label);
18616 emit_label (hot_label);
18617 }
18618 }
18619
18620 /* Step 2: Alignment prologue. */
18621
18622 if (desired_align > align)
18623 {
18624 if (align_bytes == 0)
18625 {
18626 /* Except for the first move in epilogue, we no longer know
18627 constant offset in aliasing info. It don't seems to worth
18628 the pain to maintain it for the first move, so throw away
18629 the info early. */
18630 src = change_address (src, BLKmode, srcreg);
18631 dst = change_address (dst, BLKmode, destreg);
18632 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18633 desired_align);
18634 }
18635 else
18636 {
18637 /* If we know how many bytes need to be stored before dst is
18638 sufficiently aligned, maintain aliasing info accurately. */
18639 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18640 desired_align, align_bytes);
18641 count_exp = plus_constant (count_exp, -align_bytes);
18642 count -= align_bytes;
18643 }
18644 if (need_zero_guard
18645 && (count < (unsigned HOST_WIDE_INT) size_needed
18646 || (align_bytes == 0
18647 && count < ((unsigned HOST_WIDE_INT) size_needed
18648 + desired_align - align))))
18649 {
18650 /* It is possible that we copied enough so the main loop will not
18651 execute. */
18652 gcc_assert (size_needed > 1);
18653 if (label == NULL_RTX)
18654 label = gen_label_rtx ();
18655 emit_cmp_and_jump_insns (count_exp,
18656 GEN_INT (size_needed),
18657 LTU, 0, counter_mode (count_exp), 1, label);
18658 if (expected_size == -1
18659 || expected_size < (desired_align - align) / 2 + size_needed)
18660 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18661 else
18662 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18663 }
18664 }
18665 if (label && size_needed == 1)
18666 {
18667 emit_label (label);
18668 LABEL_NUSES (label) = 1;
18669 label = NULL;
18670 epilogue_size_needed = 1;
18671 }
18672 else if (label == NULL_RTX)
18673 epilogue_size_needed = size_needed;
18674
18675 /* Step 3: Main loop. */
18676
18677 switch (alg)
18678 {
18679 case libcall:
18680 case no_stringop:
18681 gcc_unreachable ();
18682 case loop_1_byte:
18683 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18684 count_exp, QImode, 1, expected_size);
18685 break;
18686 case loop:
18687 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18688 count_exp, Pmode, 1, expected_size);
18689 break;
18690 case unrolled_loop:
18691 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18692 registers for 4 temporaries anyway. */
18693 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18694 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18695 expected_size);
18696 break;
18697 case rep_prefix_8_byte:
18698 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18699 DImode);
18700 break;
18701 case rep_prefix_4_byte:
18702 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18703 SImode);
18704 break;
18705 case rep_prefix_1_byte:
18706 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18707 QImode);
18708 break;
18709 }
18710 /* Adjust properly the offset of src and dest memory for aliasing. */
18711 if (CONST_INT_P (count_exp))
18712 {
18713 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18714 (count / size_needed) * size_needed);
18715 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18716 (count / size_needed) * size_needed);
18717 }
18718 else
18719 {
18720 src = change_address (src, BLKmode, srcreg);
18721 dst = change_address (dst, BLKmode, destreg);
18722 }
18723
18724 /* Step 4: Epilogue to copy the remaining bytes. */
18725 epilogue:
18726 if (label)
18727 {
18728 /* When the main loop is done, COUNT_EXP might hold original count,
18729 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18730 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18731 bytes. Compensate if needed. */
18732
18733 if (size_needed < epilogue_size_needed)
18734 {
18735 tmp =
18736 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18737 GEN_INT (size_needed - 1), count_exp, 1,
18738 OPTAB_DIRECT);
18739 if (tmp != count_exp)
18740 emit_move_insn (count_exp, tmp);
18741 }
18742 emit_label (label);
18743 LABEL_NUSES (label) = 1;
18744 }
18745
18746 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18747 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18748 epilogue_size_needed);
18749 if (jump_around_label)
18750 emit_label (jump_around_label);
18751 return 1;
18752 }
18753
18754 /* Helper function for memcpy. For QImode value 0xXY produce
18755 0xXYXYXYXY of wide specified by MODE. This is essentially
18756 a * 0x10101010, but we can do slightly better than
18757 synth_mult by unwinding the sequence by hand on CPUs with
18758 slow multiply. */
18759 static rtx
18760 promote_duplicated_reg (enum machine_mode mode, rtx val)
18761 {
18762 enum machine_mode valmode = GET_MODE (val);
18763 rtx tmp;
18764 int nops = mode == DImode ? 3 : 2;
18765
18766 gcc_assert (mode == SImode || mode == DImode);
18767 if (val == const0_rtx)
18768 return copy_to_mode_reg (mode, const0_rtx);
18769 if (CONST_INT_P (val))
18770 {
18771 HOST_WIDE_INT v = INTVAL (val) & 255;
18772
18773 v |= v << 8;
18774 v |= v << 16;
18775 if (mode == DImode)
18776 v |= (v << 16) << 16;
18777 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18778 }
18779
18780 if (valmode == VOIDmode)
18781 valmode = QImode;
18782 if (valmode != QImode)
18783 val = gen_lowpart (QImode, val);
18784 if (mode == QImode)
18785 return val;
18786 if (!TARGET_PARTIAL_REG_STALL)
18787 nops--;
18788 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18789 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18790 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18791 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18792 {
18793 rtx reg = convert_modes (mode, QImode, val, true);
18794 tmp = promote_duplicated_reg (mode, const1_rtx);
18795 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18796 OPTAB_DIRECT);
18797 }
18798 else
18799 {
18800 rtx reg = convert_modes (mode, QImode, val, true);
18801
18802 if (!TARGET_PARTIAL_REG_STALL)
18803 if (mode == SImode)
18804 emit_insn (gen_movsi_insv_1 (reg, reg));
18805 else
18806 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18807 else
18808 {
18809 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18810 NULL, 1, OPTAB_DIRECT);
18811 reg =
18812 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18813 }
18814 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18815 NULL, 1, OPTAB_DIRECT);
18816 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18817 if (mode == SImode)
18818 return reg;
18819 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18820 NULL, 1, OPTAB_DIRECT);
18821 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18822 return reg;
18823 }
18824 }
18825
18826 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18827 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18828 alignment from ALIGN to DESIRED_ALIGN. */
18829 static rtx
18830 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18831 {
18832 rtx promoted_val;
18833
18834 if (TARGET_64BIT
18835 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18836 promoted_val = promote_duplicated_reg (DImode, val);
18837 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18838 promoted_val = promote_duplicated_reg (SImode, val);
18839 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18840 promoted_val = promote_duplicated_reg (HImode, val);
18841 else
18842 promoted_val = val;
18843
18844 return promoted_val;
18845 }
18846
18847 /* Expand string clear operation (bzero). Use i386 string operations when
18848 profitable. See expand_movmem comment for explanation of individual
18849 steps performed. */
18850 int
18851 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18852 rtx expected_align_exp, rtx expected_size_exp)
18853 {
18854 rtx destreg;
18855 rtx label = NULL;
18856 rtx tmp;
18857 rtx jump_around_label = NULL;
18858 HOST_WIDE_INT align = 1;
18859 unsigned HOST_WIDE_INT count = 0;
18860 HOST_WIDE_INT expected_size = -1;
18861 int size_needed = 0, epilogue_size_needed;
18862 int desired_align = 0, align_bytes = 0;
18863 enum stringop_alg alg;
18864 rtx promoted_val = NULL;
18865 bool force_loopy_epilogue = false;
18866 int dynamic_check;
18867 bool need_zero_guard = false;
18868
18869 if (CONST_INT_P (align_exp))
18870 align = INTVAL (align_exp);
18871 /* i386 can do misaligned access on reasonably increased cost. */
18872 if (CONST_INT_P (expected_align_exp)
18873 && INTVAL (expected_align_exp) > align)
18874 align = INTVAL (expected_align_exp);
18875 if (CONST_INT_P (count_exp))
18876 count = expected_size = INTVAL (count_exp);
18877 if (CONST_INT_P (expected_size_exp) && count == 0)
18878 expected_size = INTVAL (expected_size_exp);
18879
18880 /* Make sure we don't need to care about overflow later on. */
18881 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18882 return 0;
18883
18884 /* Step 0: Decide on preferred algorithm, desired alignment and
18885 size of chunks to be copied by main loop. */
18886
18887 alg = decide_alg (count, expected_size, true, &dynamic_check);
18888 desired_align = decide_alignment (align, alg, expected_size);
18889
18890 if (!TARGET_ALIGN_STRINGOPS)
18891 align = desired_align;
18892
18893 if (alg == libcall)
18894 return 0;
18895 gcc_assert (alg != no_stringop);
18896 if (!count)
18897 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18898 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18899 switch (alg)
18900 {
18901 case libcall:
18902 case no_stringop:
18903 gcc_unreachable ();
18904 case loop:
18905 need_zero_guard = true;
18906 size_needed = GET_MODE_SIZE (Pmode);
18907 break;
18908 case unrolled_loop:
18909 need_zero_guard = true;
18910 size_needed = GET_MODE_SIZE (Pmode) * 4;
18911 break;
18912 case rep_prefix_8_byte:
18913 size_needed = 8;
18914 break;
18915 case rep_prefix_4_byte:
18916 size_needed = 4;
18917 break;
18918 case rep_prefix_1_byte:
18919 size_needed = 1;
18920 break;
18921 case loop_1_byte:
18922 need_zero_guard = true;
18923 size_needed = 1;
18924 break;
18925 }
18926 epilogue_size_needed = size_needed;
18927
18928 /* Step 1: Prologue guard. */
18929
18930 /* Alignment code needs count to be in register. */
18931 if (CONST_INT_P (count_exp) && desired_align > align)
18932 {
18933 if (INTVAL (count_exp) > desired_align
18934 && INTVAL (count_exp) > size_needed)
18935 {
18936 align_bytes
18937 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18938 if (align_bytes <= 0)
18939 align_bytes = 0;
18940 else
18941 align_bytes = desired_align - align_bytes;
18942 }
18943 if (align_bytes == 0)
18944 {
18945 enum machine_mode mode = SImode;
18946 if (TARGET_64BIT && (count & ~0xffffffff))
18947 mode = DImode;
18948 count_exp = force_reg (mode, count_exp);
18949 }
18950 }
18951 /* Do the cheap promotion to allow better CSE across the
18952 main loop and epilogue (ie one load of the big constant in the
18953 front of all code. */
18954 if (CONST_INT_P (val_exp))
18955 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18956 desired_align, align);
18957 /* Ensure that alignment prologue won't copy past end of block. */
18958 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18959 {
18960 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18961 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18962 Make sure it is power of 2. */
18963 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18964
18965 /* To improve performance of small blocks, we jump around the VAL
18966 promoting mode. This mean that if the promoted VAL is not constant,
18967 we might not use it in the epilogue and have to use byte
18968 loop variant. */
18969 if (epilogue_size_needed > 2 && !promoted_val)
18970 force_loopy_epilogue = true;
18971 if (count)
18972 {
18973 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18974 {
18975 /* If main algorithm works on QImode, no epilogue is needed.
18976 For small sizes just don't align anything. */
18977 if (size_needed == 1)
18978 desired_align = align;
18979 else
18980 goto epilogue;
18981 }
18982 }
18983 else
18984 {
18985 label = gen_label_rtx ();
18986 emit_cmp_and_jump_insns (count_exp,
18987 GEN_INT (epilogue_size_needed),
18988 LTU, 0, counter_mode (count_exp), 1, label);
18989 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18990 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18991 else
18992 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18993 }
18994 }
18995 if (dynamic_check != -1)
18996 {
18997 rtx hot_label = gen_label_rtx ();
18998 jump_around_label = gen_label_rtx ();
18999 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
19000 LEU, 0, counter_mode (count_exp), 1, hot_label);
19001 predict_jump (REG_BR_PROB_BASE * 90 / 100);
19002 set_storage_via_libcall (dst, count_exp, val_exp, false);
19003 emit_jump (jump_around_label);
19004 emit_label (hot_label);
19005 }
19006
19007 /* Step 2: Alignment prologue. */
19008
19009 /* Do the expensive promotion once we branched off the small blocks. */
19010 if (!promoted_val)
19011 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
19012 desired_align, align);
19013 gcc_assert (desired_align >= 1 && align >= 1);
19014
19015 if (desired_align > align)
19016 {
19017 if (align_bytes == 0)
19018 {
19019 /* Except for the first move in epilogue, we no longer know
19020 constant offset in aliasing info. It don't seems to worth
19021 the pain to maintain it for the first move, so throw away
19022 the info early. */
19023 dst = change_address (dst, BLKmode, destreg);
19024 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
19025 desired_align);
19026 }
19027 else
19028 {
19029 /* If we know how many bytes need to be stored before dst is
19030 sufficiently aligned, maintain aliasing info accurately. */
19031 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
19032 desired_align, align_bytes);
19033 count_exp = plus_constant (count_exp, -align_bytes);
19034 count -= align_bytes;
19035 }
19036 if (need_zero_guard
19037 && (count < (unsigned HOST_WIDE_INT) size_needed
19038 || (align_bytes == 0
19039 && count < ((unsigned HOST_WIDE_INT) size_needed
19040 + desired_align - align))))
19041 {
19042 /* It is possible that we copied enough so the main loop will not
19043 execute. */
19044 gcc_assert (size_needed > 1);
19045 if (label == NULL_RTX)
19046 label = gen_label_rtx ();
19047 emit_cmp_and_jump_insns (count_exp,
19048 GEN_INT (size_needed),
19049 LTU, 0, counter_mode (count_exp), 1, label);
19050 if (expected_size == -1
19051 || expected_size < (desired_align - align) / 2 + size_needed)
19052 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19053 else
19054 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19055 }
19056 }
19057 if (label && size_needed == 1)
19058 {
19059 emit_label (label);
19060 LABEL_NUSES (label) = 1;
19061 label = NULL;
19062 promoted_val = val_exp;
19063 epilogue_size_needed = 1;
19064 }
19065 else if (label == NULL_RTX)
19066 epilogue_size_needed = size_needed;
19067
19068 /* Step 3: Main loop. */
19069
19070 switch (alg)
19071 {
19072 case libcall:
19073 case no_stringop:
19074 gcc_unreachable ();
19075 case loop_1_byte:
19076 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19077 count_exp, QImode, 1, expected_size);
19078 break;
19079 case loop:
19080 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19081 count_exp, Pmode, 1, expected_size);
19082 break;
19083 case unrolled_loop:
19084 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19085 count_exp, Pmode, 4, expected_size);
19086 break;
19087 case rep_prefix_8_byte:
19088 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19089 DImode, val_exp);
19090 break;
19091 case rep_prefix_4_byte:
19092 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19093 SImode, val_exp);
19094 break;
19095 case rep_prefix_1_byte:
19096 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19097 QImode, val_exp);
19098 break;
19099 }
19100 /* Adjust properly the offset of src and dest memory for aliasing. */
19101 if (CONST_INT_P (count_exp))
19102 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
19103 (count / size_needed) * size_needed);
19104 else
19105 dst = change_address (dst, BLKmode, destreg);
19106
19107 /* Step 4: Epilogue to copy the remaining bytes. */
19108
19109 if (label)
19110 {
19111 /* When the main loop is done, COUNT_EXP might hold original count,
19112 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
19113 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
19114 bytes. Compensate if needed. */
19115
19116 if (size_needed < epilogue_size_needed)
19117 {
19118 tmp =
19119 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
19120 GEN_INT (size_needed - 1), count_exp, 1,
19121 OPTAB_DIRECT);
19122 if (tmp != count_exp)
19123 emit_move_insn (count_exp, tmp);
19124 }
19125 emit_label (label);
19126 LABEL_NUSES (label) = 1;
19127 }
19128 epilogue:
19129 if (count_exp != const0_rtx && epilogue_size_needed > 1)
19130 {
19131 if (force_loopy_epilogue)
19132 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
19133 epilogue_size_needed);
19134 else
19135 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
19136 epilogue_size_needed);
19137 }
19138 if (jump_around_label)
19139 emit_label (jump_around_label);
19140 return 1;
19141 }
19142
19143 /* Expand the appropriate insns for doing strlen if not just doing
19144 repnz; scasb
19145
19146 out = result, initialized with the start address
19147 align_rtx = alignment of the address.
19148 scratch = scratch register, initialized with the startaddress when
19149 not aligned, otherwise undefined
19150
19151 This is just the body. It needs the initializations mentioned above and
19152 some address computing at the end. These things are done in i386.md. */
19153
19154 static void
19155 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19156 {
19157 int align;
19158 rtx tmp;
19159 rtx align_2_label = NULL_RTX;
19160 rtx align_3_label = NULL_RTX;
19161 rtx align_4_label = gen_label_rtx ();
19162 rtx end_0_label = gen_label_rtx ();
19163 rtx mem;
19164 rtx tmpreg = gen_reg_rtx (SImode);
19165 rtx scratch = gen_reg_rtx (SImode);
19166 rtx cmp;
19167
19168 align = 0;
19169 if (CONST_INT_P (align_rtx))
19170 align = INTVAL (align_rtx);
19171
19172 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19173
19174 /* Is there a known alignment and is it less than 4? */
19175 if (align < 4)
19176 {
19177 rtx scratch1 = gen_reg_rtx (Pmode);
19178 emit_move_insn (scratch1, out);
19179 /* Is there a known alignment and is it not 2? */
19180 if (align != 2)
19181 {
19182 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19183 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19184
19185 /* Leave just the 3 lower bits. */
19186 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19187 NULL_RTX, 0, OPTAB_WIDEN);
19188
19189 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19190 Pmode, 1, align_4_label);
19191 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19192 Pmode, 1, align_2_label);
19193 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19194 Pmode, 1, align_3_label);
19195 }
19196 else
19197 {
19198 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19199 check if is aligned to 4 - byte. */
19200
19201 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19202 NULL_RTX, 0, OPTAB_WIDEN);
19203
19204 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19205 Pmode, 1, align_4_label);
19206 }
19207
19208 mem = change_address (src, QImode, out);
19209
19210 /* Now compare the bytes. */
19211
19212 /* Compare the first n unaligned byte on a byte per byte basis. */
19213 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19214 QImode, 1, end_0_label);
19215
19216 /* Increment the address. */
19217 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19218
19219 /* Not needed with an alignment of 2 */
19220 if (align != 2)
19221 {
19222 emit_label (align_2_label);
19223
19224 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19225 end_0_label);
19226
19227 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19228
19229 emit_label (align_3_label);
19230 }
19231
19232 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19233 end_0_label);
19234
19235 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19236 }
19237
19238 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19239 align this loop. It gives only huge programs, but does not help to
19240 speed up. */
19241 emit_label (align_4_label);
19242
19243 mem = change_address (src, SImode, out);
19244 emit_move_insn (scratch, mem);
19245 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19246
19247 /* This formula yields a nonzero result iff one of the bytes is zero.
19248 This saves three branches inside loop and many cycles. */
19249
19250 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19251 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19252 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19253 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19254 gen_int_mode (0x80808080, SImode)));
19255 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19256 align_4_label);
19257
19258 if (TARGET_CMOVE)
19259 {
19260 rtx reg = gen_reg_rtx (SImode);
19261 rtx reg2 = gen_reg_rtx (Pmode);
19262 emit_move_insn (reg, tmpreg);
19263 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19264
19265 /* If zero is not in the first two bytes, move two bytes forward. */
19266 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19267 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19268 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19269 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19270 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19271 reg,
19272 tmpreg)));
19273 /* Emit lea manually to avoid clobbering of flags. */
19274 emit_insn (gen_rtx_SET (SImode, reg2,
19275 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19276
19277 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19278 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19279 emit_insn (gen_rtx_SET (VOIDmode, out,
19280 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19281 reg2,
19282 out)));
19283 }
19284 else
19285 {
19286 rtx end_2_label = gen_label_rtx ();
19287 /* Is zero in the first two bytes? */
19288
19289 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19290 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19291 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19292 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19293 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19294 pc_rtx);
19295 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19296 JUMP_LABEL (tmp) = end_2_label;
19297
19298 /* Not in the first two. Move two bytes forward. */
19299 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19300 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19301
19302 emit_label (end_2_label);
19303
19304 }
19305
19306 /* Avoid branch in fixing the byte. */
19307 tmpreg = gen_lowpart (QImode, tmpreg);
19308 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19309 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19310 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19311 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19312
19313 emit_label (end_0_label);
19314 }
19315
19316 /* Expand strlen. */
19317
19318 int
19319 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19320 {
19321 rtx addr, scratch1, scratch2, scratch3, scratch4;
19322
19323 /* The generic case of strlen expander is long. Avoid it's
19324 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19325
19326 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19327 && !TARGET_INLINE_ALL_STRINGOPS
19328 && !optimize_insn_for_size_p ()
19329 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19330 return 0;
19331
19332 addr = force_reg (Pmode, XEXP (src, 0));
19333 scratch1 = gen_reg_rtx (Pmode);
19334
19335 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19336 && !optimize_insn_for_size_p ())
19337 {
19338 /* Well it seems that some optimizer does not combine a call like
19339 foo(strlen(bar), strlen(bar));
19340 when the move and the subtraction is done here. It does calculate
19341 the length just once when these instructions are done inside of
19342 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19343 often used and I use one fewer register for the lifetime of
19344 output_strlen_unroll() this is better. */
19345
19346 emit_move_insn (out, addr);
19347
19348 ix86_expand_strlensi_unroll_1 (out, src, align);
19349
19350 /* strlensi_unroll_1 returns the address of the zero at the end of
19351 the string, like memchr(), so compute the length by subtracting
19352 the start address. */
19353 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19354 }
19355 else
19356 {
19357 rtx unspec;
19358
19359 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19360 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19361 return false;
19362
19363 scratch2 = gen_reg_rtx (Pmode);
19364 scratch3 = gen_reg_rtx (Pmode);
19365 scratch4 = force_reg (Pmode, constm1_rtx);
19366
19367 emit_move_insn (scratch3, addr);
19368 eoschar = force_reg (QImode, eoschar);
19369
19370 src = replace_equiv_address_nv (src, scratch3);
19371
19372 /* If .md starts supporting :P, this can be done in .md. */
19373 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19374 scratch4), UNSPEC_SCAS);
19375 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19376 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19377 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19378 }
19379 return 1;
19380 }
19381
19382 /* For given symbol (function) construct code to compute address of it's PLT
19383 entry in large x86-64 PIC model. */
19384 rtx
19385 construct_plt_address (rtx symbol)
19386 {
19387 rtx tmp = gen_reg_rtx (Pmode);
19388 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19389
19390 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19391 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19392
19393 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19394 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19395 return tmp;
19396 }
19397
19398 void
19399 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19400 rtx callarg2,
19401 rtx pop, int sibcall)
19402 {
19403 rtx use = NULL, call;
19404
19405 if (pop == const0_rtx)
19406 pop = NULL;
19407 gcc_assert (!TARGET_64BIT || !pop);
19408
19409 if (TARGET_MACHO && !TARGET_64BIT)
19410 {
19411 #if TARGET_MACHO
19412 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19413 fnaddr = machopic_indirect_call_target (fnaddr);
19414 #endif
19415 }
19416 else
19417 {
19418 /* Static functions and indirect calls don't need the pic register. */
19419 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19420 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19421 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19422 use_reg (&use, pic_offset_table_rtx);
19423 }
19424
19425 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19426 {
19427 rtx al = gen_rtx_REG (QImode, AX_REG);
19428 emit_move_insn (al, callarg2);
19429 use_reg (&use, al);
19430 }
19431
19432 if (ix86_cmodel == CM_LARGE_PIC
19433 && MEM_P (fnaddr)
19434 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19435 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19436 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19437 else if (sibcall
19438 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19439 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19440 {
19441 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19442 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19443 }
19444
19445 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19446 if (retval)
19447 call = gen_rtx_SET (VOIDmode, retval, call);
19448 if (pop)
19449 {
19450 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19451 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19452 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19453 }
19454 if (TARGET_64BIT
19455 && ix86_cfun_abi () == MS_ABI
19456 && (!callarg2 || INTVAL (callarg2) != -2))
19457 {
19458 /* We need to represent that SI and DI registers are clobbered
19459 by SYSV calls. */
19460 static int clobbered_registers[] = {
19461 XMM6_REG, XMM7_REG, XMM8_REG,
19462 XMM9_REG, XMM10_REG, XMM11_REG,
19463 XMM12_REG, XMM13_REG, XMM14_REG,
19464 XMM15_REG, SI_REG, DI_REG
19465 };
19466 unsigned int i;
19467 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19468 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19469 UNSPEC_MS_TO_SYSV_CALL);
19470
19471 vec[0] = call;
19472 vec[1] = unspec;
19473 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19474 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19475 ? TImode : DImode,
19476 gen_rtx_REG
19477 (SSE_REGNO_P (clobbered_registers[i])
19478 ? TImode : DImode,
19479 clobbered_registers[i]));
19480
19481 call = gen_rtx_PARALLEL (VOIDmode,
19482 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19483 + 2, vec));
19484 }
19485
19486 call = emit_call_insn (call);
19487 if (use)
19488 CALL_INSN_FUNCTION_USAGE (call) = use;
19489 }
19490
19491 \f
19492 /* Clear stack slot assignments remembered from previous functions.
19493 This is called from INIT_EXPANDERS once before RTL is emitted for each
19494 function. */
19495
19496 static struct machine_function *
19497 ix86_init_machine_status (void)
19498 {
19499 struct machine_function *f;
19500
19501 f = GGC_CNEW (struct machine_function);
19502 f->use_fast_prologue_epilogue_nregs = -1;
19503 f->tls_descriptor_call_expanded_p = 0;
19504 f->call_abi = ix86_abi;
19505
19506 return f;
19507 }
19508
19509 /* Return a MEM corresponding to a stack slot with mode MODE.
19510 Allocate a new slot if necessary.
19511
19512 The RTL for a function can have several slots available: N is
19513 which slot to use. */
19514
19515 rtx
19516 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19517 {
19518 struct stack_local_entry *s;
19519
19520 gcc_assert (n < MAX_386_STACK_LOCALS);
19521
19522 /* Virtual slot is valid only before vregs are instantiated. */
19523 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19524
19525 for (s = ix86_stack_locals; s; s = s->next)
19526 if (s->mode == mode && s->n == n)
19527 return copy_rtx (s->rtl);
19528
19529 s = (struct stack_local_entry *)
19530 ggc_alloc (sizeof (struct stack_local_entry));
19531 s->n = n;
19532 s->mode = mode;
19533 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19534
19535 s->next = ix86_stack_locals;
19536 ix86_stack_locals = s;
19537 return s->rtl;
19538 }
19539
19540 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19541
19542 static GTY(()) rtx ix86_tls_symbol;
19543 rtx
19544 ix86_tls_get_addr (void)
19545 {
19546
19547 if (!ix86_tls_symbol)
19548 {
19549 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19550 (TARGET_ANY_GNU_TLS
19551 && !TARGET_64BIT)
19552 ? "___tls_get_addr"
19553 : "__tls_get_addr");
19554 }
19555
19556 return ix86_tls_symbol;
19557 }
19558
19559 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19560
19561 static GTY(()) rtx ix86_tls_module_base_symbol;
19562 rtx
19563 ix86_tls_module_base (void)
19564 {
19565
19566 if (!ix86_tls_module_base_symbol)
19567 {
19568 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19569 "_TLS_MODULE_BASE_");
19570 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19571 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19572 }
19573
19574 return ix86_tls_module_base_symbol;
19575 }
19576 \f
19577 /* Calculate the length of the memory address in the instruction
19578 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19579
19580 int
19581 memory_address_length (rtx addr)
19582 {
19583 struct ix86_address parts;
19584 rtx base, index, disp;
19585 int len;
19586 int ok;
19587
19588 if (GET_CODE (addr) == PRE_DEC
19589 || GET_CODE (addr) == POST_INC
19590 || GET_CODE (addr) == PRE_MODIFY
19591 || GET_CODE (addr) == POST_MODIFY)
19592 return 0;
19593
19594 ok = ix86_decompose_address (addr, &parts);
19595 gcc_assert (ok);
19596
19597 if (parts.base && GET_CODE (parts.base) == SUBREG)
19598 parts.base = SUBREG_REG (parts.base);
19599 if (parts.index && GET_CODE (parts.index) == SUBREG)
19600 parts.index = SUBREG_REG (parts.index);
19601
19602 base = parts.base;
19603 index = parts.index;
19604 disp = parts.disp;
19605 len = 0;
19606
19607 /* Rule of thumb:
19608 - esp as the base always wants an index,
19609 - ebp as the base always wants a displacement,
19610 - r12 as the base always wants an index,
19611 - r13 as the base always wants a displacement. */
19612
19613 /* Register Indirect. */
19614 if (base && !index && !disp)
19615 {
19616 /* esp (for its index) and ebp (for its displacement) need
19617 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19618 code. */
19619 if (REG_P (addr)
19620 && (addr == arg_pointer_rtx
19621 || addr == frame_pointer_rtx
19622 || REGNO (addr) == SP_REG
19623 || REGNO (addr) == BP_REG
19624 || REGNO (addr) == R12_REG
19625 || REGNO (addr) == R13_REG))
19626 len = 1;
19627 }
19628
19629 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19630 is not disp32, but disp32(%rip), so for disp32
19631 SIB byte is needed, unless print_operand_address
19632 optimizes it into disp32(%rip) or (%rip) is implied
19633 by UNSPEC. */
19634 else if (disp && !base && !index)
19635 {
19636 len = 4;
19637 if (TARGET_64BIT)
19638 {
19639 rtx symbol = disp;
19640
19641 if (GET_CODE (disp) == CONST)
19642 symbol = XEXP (disp, 0);
19643 if (GET_CODE (symbol) == PLUS
19644 && CONST_INT_P (XEXP (symbol, 1)))
19645 symbol = XEXP (symbol, 0);
19646
19647 if (GET_CODE (symbol) != LABEL_REF
19648 && (GET_CODE (symbol) != SYMBOL_REF
19649 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19650 && (GET_CODE (symbol) != UNSPEC
19651 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19652 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19653 len += 1;
19654 }
19655 }
19656
19657 else
19658 {
19659 /* Find the length of the displacement constant. */
19660 if (disp)
19661 {
19662 if (base && satisfies_constraint_K (disp))
19663 len = 1;
19664 else
19665 len = 4;
19666 }
19667 /* ebp always wants a displacement. Similarly r13. */
19668 else if (base && REG_P (base)
19669 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19670 len = 1;
19671
19672 /* An index requires the two-byte modrm form.... */
19673 if (index
19674 /* ...like esp (or r12), which always wants an index. */
19675 || base == arg_pointer_rtx
19676 || base == frame_pointer_rtx
19677 || (base && REG_P (base)
19678 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19679 len += 1;
19680 }
19681
19682 switch (parts.seg)
19683 {
19684 case SEG_FS:
19685 case SEG_GS:
19686 len += 1;
19687 break;
19688 default:
19689 break;
19690 }
19691
19692 return len;
19693 }
19694
19695 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19696 is set, expect that insn have 8bit immediate alternative. */
19697 int
19698 ix86_attr_length_immediate_default (rtx insn, int shortform)
19699 {
19700 int len = 0;
19701 int i;
19702 extract_insn_cached (insn);
19703 for (i = recog_data.n_operands - 1; i >= 0; --i)
19704 if (CONSTANT_P (recog_data.operand[i]))
19705 {
19706 enum attr_mode mode = get_attr_mode (insn);
19707
19708 gcc_assert (!len);
19709 if (shortform && CONST_INT_P (recog_data.operand[i]))
19710 {
19711 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19712 switch (mode)
19713 {
19714 case MODE_QI:
19715 len = 1;
19716 continue;
19717 case MODE_HI:
19718 ival = trunc_int_for_mode (ival, HImode);
19719 break;
19720 case MODE_SI:
19721 ival = trunc_int_for_mode (ival, SImode);
19722 break;
19723 default:
19724 break;
19725 }
19726 if (IN_RANGE (ival, -128, 127))
19727 {
19728 len = 1;
19729 continue;
19730 }
19731 }
19732 switch (mode)
19733 {
19734 case MODE_QI:
19735 len = 1;
19736 break;
19737 case MODE_HI:
19738 len = 2;
19739 break;
19740 case MODE_SI:
19741 len = 4;
19742 break;
19743 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19744 case MODE_DI:
19745 len = 4;
19746 break;
19747 default:
19748 fatal_insn ("unknown insn mode", insn);
19749 }
19750 }
19751 return len;
19752 }
19753 /* Compute default value for "length_address" attribute. */
19754 int
19755 ix86_attr_length_address_default (rtx insn)
19756 {
19757 int i;
19758
19759 if (get_attr_type (insn) == TYPE_LEA)
19760 {
19761 rtx set = PATTERN (insn), addr;
19762
19763 if (GET_CODE (set) == PARALLEL)
19764 set = XVECEXP (set, 0, 0);
19765
19766 gcc_assert (GET_CODE (set) == SET);
19767
19768 addr = SET_SRC (set);
19769 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19770 {
19771 if (GET_CODE (addr) == ZERO_EXTEND)
19772 addr = XEXP (addr, 0);
19773 if (GET_CODE (addr) == SUBREG)
19774 addr = SUBREG_REG (addr);
19775 }
19776
19777 return memory_address_length (addr);
19778 }
19779
19780 extract_insn_cached (insn);
19781 for (i = recog_data.n_operands - 1; i >= 0; --i)
19782 if (MEM_P (recog_data.operand[i]))
19783 {
19784 constrain_operands_cached (reload_completed);
19785 if (which_alternative != -1)
19786 {
19787 const char *constraints = recog_data.constraints[i];
19788 int alt = which_alternative;
19789
19790 while (*constraints == '=' || *constraints == '+')
19791 constraints++;
19792 while (alt-- > 0)
19793 while (*constraints++ != ',')
19794 ;
19795 /* Skip ignored operands. */
19796 if (*constraints == 'X')
19797 continue;
19798 }
19799 return memory_address_length (XEXP (recog_data.operand[i], 0));
19800 }
19801 return 0;
19802 }
19803
19804 /* Compute default value for "length_vex" attribute. It includes
19805 2 or 3 byte VEX prefix and 1 opcode byte. */
19806
19807 int
19808 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19809 int has_vex_w)
19810 {
19811 int i;
19812
19813 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19814 byte VEX prefix. */
19815 if (!has_0f_opcode || has_vex_w)
19816 return 3 + 1;
19817
19818 /* We can always use 2 byte VEX prefix in 32bit. */
19819 if (!TARGET_64BIT)
19820 return 2 + 1;
19821
19822 extract_insn_cached (insn);
19823
19824 for (i = recog_data.n_operands - 1; i >= 0; --i)
19825 if (REG_P (recog_data.operand[i]))
19826 {
19827 /* REX.W bit uses 3 byte VEX prefix. */
19828 if (GET_MODE (recog_data.operand[i]) == DImode
19829 && GENERAL_REG_P (recog_data.operand[i]))
19830 return 3 + 1;
19831 }
19832 else
19833 {
19834 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19835 if (MEM_P (recog_data.operand[i])
19836 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19837 return 3 + 1;
19838 }
19839
19840 return 2 + 1;
19841 }
19842 \f
19843 /* Return the maximum number of instructions a cpu can issue. */
19844
19845 static int
19846 ix86_issue_rate (void)
19847 {
19848 switch (ix86_tune)
19849 {
19850 case PROCESSOR_PENTIUM:
19851 case PROCESSOR_ATOM:
19852 case PROCESSOR_K6:
19853 return 2;
19854
19855 case PROCESSOR_PENTIUMPRO:
19856 case PROCESSOR_PENTIUM4:
19857 case PROCESSOR_ATHLON:
19858 case PROCESSOR_K8:
19859 case PROCESSOR_AMDFAM10:
19860 case PROCESSOR_NOCONA:
19861 case PROCESSOR_GENERIC32:
19862 case PROCESSOR_GENERIC64:
19863 case PROCESSOR_BDVER1:
19864 return 3;
19865
19866 case PROCESSOR_CORE2:
19867 return 4;
19868
19869 default:
19870 return 1;
19871 }
19872 }
19873
19874 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19875 by DEP_INSN and nothing set by DEP_INSN. */
19876
19877 static int
19878 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19879 {
19880 rtx set, set2;
19881
19882 /* Simplify the test for uninteresting insns. */
19883 if (insn_type != TYPE_SETCC
19884 && insn_type != TYPE_ICMOV
19885 && insn_type != TYPE_FCMOV
19886 && insn_type != TYPE_IBR)
19887 return 0;
19888
19889 if ((set = single_set (dep_insn)) != 0)
19890 {
19891 set = SET_DEST (set);
19892 set2 = NULL_RTX;
19893 }
19894 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19895 && XVECLEN (PATTERN (dep_insn), 0) == 2
19896 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19897 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19898 {
19899 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19900 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19901 }
19902 else
19903 return 0;
19904
19905 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19906 return 0;
19907
19908 /* This test is true if the dependent insn reads the flags but
19909 not any other potentially set register. */
19910 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19911 return 0;
19912
19913 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19914 return 0;
19915
19916 return 1;
19917 }
19918
19919 /* Return true iff USE_INSN has a memory address with operands set by
19920 SET_INSN. */
19921
19922 bool
19923 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19924 {
19925 int i;
19926 extract_insn_cached (use_insn);
19927 for (i = recog_data.n_operands - 1; i >= 0; --i)
19928 if (MEM_P (recog_data.operand[i]))
19929 {
19930 rtx addr = XEXP (recog_data.operand[i], 0);
19931 return modified_in_p (addr, set_insn) != 0;
19932 }
19933 return false;
19934 }
19935
19936 static int
19937 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19938 {
19939 enum attr_type insn_type, dep_insn_type;
19940 enum attr_memory memory;
19941 rtx set, set2;
19942 int dep_insn_code_number;
19943
19944 /* Anti and output dependencies have zero cost on all CPUs. */
19945 if (REG_NOTE_KIND (link) != 0)
19946 return 0;
19947
19948 dep_insn_code_number = recog_memoized (dep_insn);
19949
19950 /* If we can't recognize the insns, we can't really do anything. */
19951 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19952 return cost;
19953
19954 insn_type = get_attr_type (insn);
19955 dep_insn_type = get_attr_type (dep_insn);
19956
19957 switch (ix86_tune)
19958 {
19959 case PROCESSOR_PENTIUM:
19960 /* Address Generation Interlock adds a cycle of latency. */
19961 if (insn_type == TYPE_LEA)
19962 {
19963 rtx addr = PATTERN (insn);
19964
19965 if (GET_CODE (addr) == PARALLEL)
19966 addr = XVECEXP (addr, 0, 0);
19967
19968 gcc_assert (GET_CODE (addr) == SET);
19969
19970 addr = SET_SRC (addr);
19971 if (modified_in_p (addr, dep_insn))
19972 cost += 1;
19973 }
19974 else if (ix86_agi_dependent (dep_insn, insn))
19975 cost += 1;
19976
19977 /* ??? Compares pair with jump/setcc. */
19978 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19979 cost = 0;
19980
19981 /* Floating point stores require value to be ready one cycle earlier. */
19982 if (insn_type == TYPE_FMOV
19983 && get_attr_memory (insn) == MEMORY_STORE
19984 && !ix86_agi_dependent (dep_insn, insn))
19985 cost += 1;
19986 break;
19987
19988 case PROCESSOR_PENTIUMPRO:
19989 memory = get_attr_memory (insn);
19990
19991 /* INT->FP conversion is expensive. */
19992 if (get_attr_fp_int_src (dep_insn))
19993 cost += 5;
19994
19995 /* There is one cycle extra latency between an FP op and a store. */
19996 if (insn_type == TYPE_FMOV
19997 && (set = single_set (dep_insn)) != NULL_RTX
19998 && (set2 = single_set (insn)) != NULL_RTX
19999 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
20000 && MEM_P (SET_DEST (set2)))
20001 cost += 1;
20002
20003 /* Show ability of reorder buffer to hide latency of load by executing
20004 in parallel with previous instruction in case
20005 previous instruction is not needed to compute the address. */
20006 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20007 && !ix86_agi_dependent (dep_insn, insn))
20008 {
20009 /* Claim moves to take one cycle, as core can issue one load
20010 at time and the next load can start cycle later. */
20011 if (dep_insn_type == TYPE_IMOV
20012 || dep_insn_type == TYPE_FMOV)
20013 cost = 1;
20014 else if (cost > 1)
20015 cost--;
20016 }
20017 break;
20018
20019 case PROCESSOR_K6:
20020 memory = get_attr_memory (insn);
20021
20022 /* The esp dependency is resolved before the instruction is really
20023 finished. */
20024 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
20025 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
20026 return 1;
20027
20028 /* INT->FP conversion is expensive. */
20029 if (get_attr_fp_int_src (dep_insn))
20030 cost += 5;
20031
20032 /* Show ability of reorder buffer to hide latency of load by executing
20033 in parallel with previous instruction in case
20034 previous instruction is not needed to compute the address. */
20035 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20036 && !ix86_agi_dependent (dep_insn, insn))
20037 {
20038 /* Claim moves to take one cycle, as core can issue one load
20039 at time and the next load can start cycle later. */
20040 if (dep_insn_type == TYPE_IMOV
20041 || dep_insn_type == TYPE_FMOV)
20042 cost = 1;
20043 else if (cost > 2)
20044 cost -= 2;
20045 else
20046 cost = 1;
20047 }
20048 break;
20049
20050 case PROCESSOR_ATHLON:
20051 case PROCESSOR_K8:
20052 case PROCESSOR_AMDFAM10:
20053 case PROCESSOR_BDVER1:
20054 case PROCESSOR_ATOM:
20055 case PROCESSOR_GENERIC32:
20056 case PROCESSOR_GENERIC64:
20057 memory = get_attr_memory (insn);
20058
20059 /* Show ability of reorder buffer to hide latency of load by executing
20060 in parallel with previous instruction in case
20061 previous instruction is not needed to compute the address. */
20062 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20063 && !ix86_agi_dependent (dep_insn, insn))
20064 {
20065 enum attr_unit unit = get_attr_unit (insn);
20066 int loadcost = 3;
20067
20068 /* Because of the difference between the length of integer and
20069 floating unit pipeline preparation stages, the memory operands
20070 for floating point are cheaper.
20071
20072 ??? For Athlon it the difference is most probably 2. */
20073 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
20074 loadcost = 3;
20075 else
20076 loadcost = TARGET_ATHLON ? 2 : 0;
20077
20078 if (cost >= loadcost)
20079 cost -= loadcost;
20080 else
20081 cost = 0;
20082 }
20083
20084 default:
20085 break;
20086 }
20087
20088 return cost;
20089 }
20090
20091 /* How many alternative schedules to try. This should be as wide as the
20092 scheduling freedom in the DFA, but no wider. Making this value too
20093 large results extra work for the scheduler. */
20094
20095 static int
20096 ia32_multipass_dfa_lookahead (void)
20097 {
20098 switch (ix86_tune)
20099 {
20100 case PROCESSOR_PENTIUM:
20101 return 2;
20102
20103 case PROCESSOR_PENTIUMPRO:
20104 case PROCESSOR_K6:
20105 return 1;
20106
20107 default:
20108 return 0;
20109 }
20110 }
20111
20112 \f
20113 /* Compute the alignment given to a constant that is being placed in memory.
20114 EXP is the constant and ALIGN is the alignment that the object would
20115 ordinarily have.
20116 The value of this function is used instead of that alignment to align
20117 the object. */
20118
20119 int
20120 ix86_constant_alignment (tree exp, int align)
20121 {
20122 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
20123 || TREE_CODE (exp) == INTEGER_CST)
20124 {
20125 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
20126 return 64;
20127 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
20128 return 128;
20129 }
20130 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
20131 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
20132 return BITS_PER_WORD;
20133
20134 return align;
20135 }
20136
20137 /* Compute the alignment for a static variable.
20138 TYPE is the data type, and ALIGN is the alignment that
20139 the object would ordinarily have. The value of this function is used
20140 instead of that alignment to align the object. */
20141
20142 int
20143 ix86_data_alignment (tree type, int align)
20144 {
20145 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
20146
20147 if (AGGREGATE_TYPE_P (type)
20148 && TYPE_SIZE (type)
20149 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20150 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
20151 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
20152 && align < max_align)
20153 align = max_align;
20154
20155 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20156 to 16byte boundary. */
20157 if (TARGET_64BIT)
20158 {
20159 if (AGGREGATE_TYPE_P (type)
20160 && TYPE_SIZE (type)
20161 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20162 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20163 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20164 return 128;
20165 }
20166
20167 if (TREE_CODE (type) == ARRAY_TYPE)
20168 {
20169 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20170 return 64;
20171 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20172 return 128;
20173 }
20174 else if (TREE_CODE (type) == COMPLEX_TYPE)
20175 {
20176
20177 if (TYPE_MODE (type) == DCmode && align < 64)
20178 return 64;
20179 if ((TYPE_MODE (type) == XCmode
20180 || TYPE_MODE (type) == TCmode) && align < 128)
20181 return 128;
20182 }
20183 else if ((TREE_CODE (type) == RECORD_TYPE
20184 || TREE_CODE (type) == UNION_TYPE
20185 || TREE_CODE (type) == QUAL_UNION_TYPE)
20186 && TYPE_FIELDS (type))
20187 {
20188 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20189 return 64;
20190 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20191 return 128;
20192 }
20193 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20194 || TREE_CODE (type) == INTEGER_TYPE)
20195 {
20196 if (TYPE_MODE (type) == DFmode && align < 64)
20197 return 64;
20198 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20199 return 128;
20200 }
20201
20202 return align;
20203 }
20204
20205 /* Compute the alignment for a local variable or a stack slot. EXP is
20206 the data type or decl itself, MODE is the widest mode available and
20207 ALIGN is the alignment that the object would ordinarily have. The
20208 value of this macro is used instead of that alignment to align the
20209 object. */
20210
20211 unsigned int
20212 ix86_local_alignment (tree exp, enum machine_mode mode,
20213 unsigned int align)
20214 {
20215 tree type, decl;
20216
20217 if (exp && DECL_P (exp))
20218 {
20219 type = TREE_TYPE (exp);
20220 decl = exp;
20221 }
20222 else
20223 {
20224 type = exp;
20225 decl = NULL;
20226 }
20227
20228 /* Don't do dynamic stack realignment for long long objects with
20229 -mpreferred-stack-boundary=2. */
20230 if (!TARGET_64BIT
20231 && align == 64
20232 && ix86_preferred_stack_boundary < 64
20233 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20234 && (!type || !TYPE_USER_ALIGN (type))
20235 && (!decl || !DECL_USER_ALIGN (decl)))
20236 align = 32;
20237
20238 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20239 register in MODE. We will return the largest alignment of XF
20240 and DF. */
20241 if (!type)
20242 {
20243 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20244 align = GET_MODE_ALIGNMENT (DFmode);
20245 return align;
20246 }
20247
20248 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20249 to 16byte boundary. Exact wording is:
20250
20251 An array uses the same alignment as its elements, except that a local or
20252 global array variable of length at least 16 bytes or
20253 a C99 variable-length array variable always has alignment of at least 16 bytes.
20254
20255 This was added to allow use of aligned SSE instructions at arrays. This
20256 rule is meant for static storage (where compiler can not do the analysis
20257 by itself). We follow it for automatic variables only when convenient.
20258 We fully control everything in the function compiled and functions from
20259 other unit can not rely on the alignment.
20260
20261 Exclude va_list type. It is the common case of local array where
20262 we can not benefit from the alignment. */
20263 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
20264 && TARGET_SSE)
20265 {
20266 if (AGGREGATE_TYPE_P (type)
20267 && (TYPE_MAIN_VARIANT (type)
20268 != TYPE_MAIN_VARIANT (va_list_type_node))
20269 && TYPE_SIZE (type)
20270 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20271 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20272 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20273 return 128;
20274 }
20275 if (TREE_CODE (type) == ARRAY_TYPE)
20276 {
20277 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20278 return 64;
20279 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20280 return 128;
20281 }
20282 else if (TREE_CODE (type) == COMPLEX_TYPE)
20283 {
20284 if (TYPE_MODE (type) == DCmode && align < 64)
20285 return 64;
20286 if ((TYPE_MODE (type) == XCmode
20287 || TYPE_MODE (type) == TCmode) && align < 128)
20288 return 128;
20289 }
20290 else if ((TREE_CODE (type) == RECORD_TYPE
20291 || TREE_CODE (type) == UNION_TYPE
20292 || TREE_CODE (type) == QUAL_UNION_TYPE)
20293 && TYPE_FIELDS (type))
20294 {
20295 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20296 return 64;
20297 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20298 return 128;
20299 }
20300 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20301 || TREE_CODE (type) == INTEGER_TYPE)
20302 {
20303
20304 if (TYPE_MODE (type) == DFmode && align < 64)
20305 return 64;
20306 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20307 return 128;
20308 }
20309 return align;
20310 }
20311
20312 /* Compute the minimum required alignment for dynamic stack realignment
20313 purposes for a local variable, parameter or a stack slot. EXP is
20314 the data type or decl itself, MODE is its mode and ALIGN is the
20315 alignment that the object would ordinarily have. */
20316
20317 unsigned int
20318 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20319 unsigned int align)
20320 {
20321 tree type, decl;
20322
20323 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20324 return align;
20325
20326 if (exp && DECL_P (exp))
20327 {
20328 type = TREE_TYPE (exp);
20329 decl = exp;
20330 }
20331 else
20332 {
20333 type = exp;
20334 decl = NULL;
20335 }
20336
20337 /* Don't do dynamic stack realignment for long long objects with
20338 -mpreferred-stack-boundary=2. */
20339 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20340 && (!type || !TYPE_USER_ALIGN (type))
20341 && (!decl || !DECL_USER_ALIGN (decl)))
20342 return 32;
20343
20344 return align;
20345 }
20346 \f
20347 /* Find a location for the static chain incoming to a nested function.
20348 This is a register, unless all free registers are used by arguments. */
20349
20350 static rtx
20351 ix86_static_chain (const_tree fndecl, bool incoming_p)
20352 {
20353 unsigned regno;
20354
20355 if (!DECL_STATIC_CHAIN (fndecl))
20356 return NULL;
20357
20358 if (TARGET_64BIT)
20359 {
20360 /* We always use R10 in 64-bit mode. */
20361 regno = R10_REG;
20362 }
20363 else
20364 {
20365 tree fntype;
20366 /* By default in 32-bit mode we use ECX to pass the static chain. */
20367 regno = CX_REG;
20368
20369 fntype = TREE_TYPE (fndecl);
20370 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20371 {
20372 /* Fastcall functions use ecx/edx for arguments, which leaves
20373 us with EAX for the static chain. */
20374 regno = AX_REG;
20375 }
20376 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20377 {
20378 /* Thiscall functions use ecx for arguments, which leaves
20379 us with EAX for the static chain. */
20380 regno = AX_REG;
20381 }
20382 else if (ix86_function_regparm (fntype, fndecl) == 3)
20383 {
20384 /* For regparm 3, we have no free call-clobbered registers in
20385 which to store the static chain. In order to implement this,
20386 we have the trampoline push the static chain to the stack.
20387 However, we can't push a value below the return address when
20388 we call the nested function directly, so we have to use an
20389 alternate entry point. For this we use ESI, and have the
20390 alternate entry point push ESI, so that things appear the
20391 same once we're executing the nested function. */
20392 if (incoming_p)
20393 {
20394 if (fndecl == current_function_decl)
20395 ix86_static_chain_on_stack = true;
20396 return gen_frame_mem (SImode,
20397 plus_constant (arg_pointer_rtx, -8));
20398 }
20399 regno = SI_REG;
20400 }
20401 }
20402
20403 return gen_rtx_REG (Pmode, regno);
20404 }
20405
20406 /* Emit RTL insns to initialize the variable parts of a trampoline.
20407 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20408 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20409 to be passed to the target function. */
20410
20411 static void
20412 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20413 {
20414 rtx mem, fnaddr;
20415
20416 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20417
20418 if (!TARGET_64BIT)
20419 {
20420 rtx disp, chain;
20421 int opcode;
20422
20423 /* Depending on the static chain location, either load a register
20424 with a constant, or push the constant to the stack. All of the
20425 instructions are the same size. */
20426 chain = ix86_static_chain (fndecl, true);
20427 if (REG_P (chain))
20428 {
20429 if (REGNO (chain) == CX_REG)
20430 opcode = 0xb9;
20431 else if (REGNO (chain) == AX_REG)
20432 opcode = 0xb8;
20433 else
20434 gcc_unreachable ();
20435 }
20436 else
20437 opcode = 0x68;
20438
20439 mem = adjust_address (m_tramp, QImode, 0);
20440 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20441
20442 mem = adjust_address (m_tramp, SImode, 1);
20443 emit_move_insn (mem, chain_value);
20444
20445 /* Compute offset from the end of the jmp to the target function.
20446 In the case in which the trampoline stores the static chain on
20447 the stack, we need to skip the first insn which pushes the
20448 (call-saved) register static chain; this push is 1 byte. */
20449 disp = expand_binop (SImode, sub_optab, fnaddr,
20450 plus_constant (XEXP (m_tramp, 0),
20451 MEM_P (chain) ? 9 : 10),
20452 NULL_RTX, 1, OPTAB_DIRECT);
20453
20454 mem = adjust_address (m_tramp, QImode, 5);
20455 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20456
20457 mem = adjust_address (m_tramp, SImode, 6);
20458 emit_move_insn (mem, disp);
20459 }
20460 else
20461 {
20462 int offset = 0;
20463
20464 /* Load the function address to r11. Try to load address using
20465 the shorter movl instead of movabs. We may want to support
20466 movq for kernel mode, but kernel does not use trampolines at
20467 the moment. */
20468 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20469 {
20470 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20471
20472 mem = adjust_address (m_tramp, HImode, offset);
20473 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20474
20475 mem = adjust_address (m_tramp, SImode, offset + 2);
20476 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20477 offset += 6;
20478 }
20479 else
20480 {
20481 mem = adjust_address (m_tramp, HImode, offset);
20482 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20483
20484 mem = adjust_address (m_tramp, DImode, offset + 2);
20485 emit_move_insn (mem, fnaddr);
20486 offset += 10;
20487 }
20488
20489 /* Load static chain using movabs to r10. */
20490 mem = adjust_address (m_tramp, HImode, offset);
20491 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20492
20493 mem = adjust_address (m_tramp, DImode, offset + 2);
20494 emit_move_insn (mem, chain_value);
20495 offset += 10;
20496
20497 /* Jump to r11; the last (unused) byte is a nop, only there to
20498 pad the write out to a single 32-bit store. */
20499 mem = adjust_address (m_tramp, SImode, offset);
20500 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20501 offset += 4;
20502
20503 gcc_assert (offset <= TRAMPOLINE_SIZE);
20504 }
20505
20506 #ifdef ENABLE_EXECUTE_STACK
20507 #ifdef CHECK_EXECUTE_STACK_ENABLED
20508 if (CHECK_EXECUTE_STACK_ENABLED)
20509 #endif
20510 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20511 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20512 #endif
20513 }
20514 \f
20515 /* The following file contains several enumerations and data structures
20516 built from the definitions in i386-builtin-types.def. */
20517
20518 #include "i386-builtin-types.inc"
20519
20520 /* Table for the ix86 builtin non-function types. */
20521 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20522
20523 /* Retrieve an element from the above table, building some of
20524 the types lazily. */
20525
20526 static tree
20527 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20528 {
20529 unsigned int index;
20530 tree type, itype;
20531
20532 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20533
20534 type = ix86_builtin_type_tab[(int) tcode];
20535 if (type != NULL)
20536 return type;
20537
20538 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20539 if (tcode <= IX86_BT_LAST_VECT)
20540 {
20541 enum machine_mode mode;
20542
20543 index = tcode - IX86_BT_LAST_PRIM - 1;
20544 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20545 mode = ix86_builtin_type_vect_mode[index];
20546
20547 type = build_vector_type_for_mode (itype, mode);
20548 }
20549 else
20550 {
20551 int quals;
20552
20553 index = tcode - IX86_BT_LAST_VECT - 1;
20554 if (tcode <= IX86_BT_LAST_PTR)
20555 quals = TYPE_UNQUALIFIED;
20556 else
20557 quals = TYPE_QUAL_CONST;
20558
20559 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20560 if (quals != TYPE_UNQUALIFIED)
20561 itype = build_qualified_type (itype, quals);
20562
20563 type = build_pointer_type (itype);
20564 }
20565
20566 ix86_builtin_type_tab[(int) tcode] = type;
20567 return type;
20568 }
20569
20570 /* Table for the ix86 builtin function types. */
20571 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20572
20573 /* Retrieve an element from the above table, building some of
20574 the types lazily. */
20575
20576 static tree
20577 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20578 {
20579 tree type;
20580
20581 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20582
20583 type = ix86_builtin_func_type_tab[(int) tcode];
20584 if (type != NULL)
20585 return type;
20586
20587 if (tcode <= IX86_BT_LAST_FUNC)
20588 {
20589 unsigned start = ix86_builtin_func_start[(int) tcode];
20590 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20591 tree rtype, atype, args = void_list_node;
20592 unsigned i;
20593
20594 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20595 for (i = after - 1; i > start; --i)
20596 {
20597 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20598 args = tree_cons (NULL, atype, args);
20599 }
20600
20601 type = build_function_type (rtype, args);
20602 }
20603 else
20604 {
20605 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20606 enum ix86_builtin_func_type icode;
20607
20608 icode = ix86_builtin_func_alias_base[index];
20609 type = ix86_get_builtin_func_type (icode);
20610 }
20611
20612 ix86_builtin_func_type_tab[(int) tcode] = type;
20613 return type;
20614 }
20615
20616
20617 /* Codes for all the SSE/MMX builtins. */
20618 enum ix86_builtins
20619 {
20620 IX86_BUILTIN_ADDPS,
20621 IX86_BUILTIN_ADDSS,
20622 IX86_BUILTIN_DIVPS,
20623 IX86_BUILTIN_DIVSS,
20624 IX86_BUILTIN_MULPS,
20625 IX86_BUILTIN_MULSS,
20626 IX86_BUILTIN_SUBPS,
20627 IX86_BUILTIN_SUBSS,
20628
20629 IX86_BUILTIN_CMPEQPS,
20630 IX86_BUILTIN_CMPLTPS,
20631 IX86_BUILTIN_CMPLEPS,
20632 IX86_BUILTIN_CMPGTPS,
20633 IX86_BUILTIN_CMPGEPS,
20634 IX86_BUILTIN_CMPNEQPS,
20635 IX86_BUILTIN_CMPNLTPS,
20636 IX86_BUILTIN_CMPNLEPS,
20637 IX86_BUILTIN_CMPNGTPS,
20638 IX86_BUILTIN_CMPNGEPS,
20639 IX86_BUILTIN_CMPORDPS,
20640 IX86_BUILTIN_CMPUNORDPS,
20641 IX86_BUILTIN_CMPEQSS,
20642 IX86_BUILTIN_CMPLTSS,
20643 IX86_BUILTIN_CMPLESS,
20644 IX86_BUILTIN_CMPNEQSS,
20645 IX86_BUILTIN_CMPNLTSS,
20646 IX86_BUILTIN_CMPNLESS,
20647 IX86_BUILTIN_CMPNGTSS,
20648 IX86_BUILTIN_CMPNGESS,
20649 IX86_BUILTIN_CMPORDSS,
20650 IX86_BUILTIN_CMPUNORDSS,
20651
20652 IX86_BUILTIN_COMIEQSS,
20653 IX86_BUILTIN_COMILTSS,
20654 IX86_BUILTIN_COMILESS,
20655 IX86_BUILTIN_COMIGTSS,
20656 IX86_BUILTIN_COMIGESS,
20657 IX86_BUILTIN_COMINEQSS,
20658 IX86_BUILTIN_UCOMIEQSS,
20659 IX86_BUILTIN_UCOMILTSS,
20660 IX86_BUILTIN_UCOMILESS,
20661 IX86_BUILTIN_UCOMIGTSS,
20662 IX86_BUILTIN_UCOMIGESS,
20663 IX86_BUILTIN_UCOMINEQSS,
20664
20665 IX86_BUILTIN_CVTPI2PS,
20666 IX86_BUILTIN_CVTPS2PI,
20667 IX86_BUILTIN_CVTSI2SS,
20668 IX86_BUILTIN_CVTSI642SS,
20669 IX86_BUILTIN_CVTSS2SI,
20670 IX86_BUILTIN_CVTSS2SI64,
20671 IX86_BUILTIN_CVTTPS2PI,
20672 IX86_BUILTIN_CVTTSS2SI,
20673 IX86_BUILTIN_CVTTSS2SI64,
20674
20675 IX86_BUILTIN_MAXPS,
20676 IX86_BUILTIN_MAXSS,
20677 IX86_BUILTIN_MINPS,
20678 IX86_BUILTIN_MINSS,
20679
20680 IX86_BUILTIN_LOADUPS,
20681 IX86_BUILTIN_STOREUPS,
20682 IX86_BUILTIN_MOVSS,
20683
20684 IX86_BUILTIN_MOVHLPS,
20685 IX86_BUILTIN_MOVLHPS,
20686 IX86_BUILTIN_LOADHPS,
20687 IX86_BUILTIN_LOADLPS,
20688 IX86_BUILTIN_STOREHPS,
20689 IX86_BUILTIN_STORELPS,
20690
20691 IX86_BUILTIN_MASKMOVQ,
20692 IX86_BUILTIN_MOVMSKPS,
20693 IX86_BUILTIN_PMOVMSKB,
20694
20695 IX86_BUILTIN_MOVNTPS,
20696 IX86_BUILTIN_MOVNTQ,
20697
20698 IX86_BUILTIN_LOADDQU,
20699 IX86_BUILTIN_STOREDQU,
20700
20701 IX86_BUILTIN_PACKSSWB,
20702 IX86_BUILTIN_PACKSSDW,
20703 IX86_BUILTIN_PACKUSWB,
20704
20705 IX86_BUILTIN_PADDB,
20706 IX86_BUILTIN_PADDW,
20707 IX86_BUILTIN_PADDD,
20708 IX86_BUILTIN_PADDQ,
20709 IX86_BUILTIN_PADDSB,
20710 IX86_BUILTIN_PADDSW,
20711 IX86_BUILTIN_PADDUSB,
20712 IX86_BUILTIN_PADDUSW,
20713 IX86_BUILTIN_PSUBB,
20714 IX86_BUILTIN_PSUBW,
20715 IX86_BUILTIN_PSUBD,
20716 IX86_BUILTIN_PSUBQ,
20717 IX86_BUILTIN_PSUBSB,
20718 IX86_BUILTIN_PSUBSW,
20719 IX86_BUILTIN_PSUBUSB,
20720 IX86_BUILTIN_PSUBUSW,
20721
20722 IX86_BUILTIN_PAND,
20723 IX86_BUILTIN_PANDN,
20724 IX86_BUILTIN_POR,
20725 IX86_BUILTIN_PXOR,
20726
20727 IX86_BUILTIN_PAVGB,
20728 IX86_BUILTIN_PAVGW,
20729
20730 IX86_BUILTIN_PCMPEQB,
20731 IX86_BUILTIN_PCMPEQW,
20732 IX86_BUILTIN_PCMPEQD,
20733 IX86_BUILTIN_PCMPGTB,
20734 IX86_BUILTIN_PCMPGTW,
20735 IX86_BUILTIN_PCMPGTD,
20736
20737 IX86_BUILTIN_PMADDWD,
20738
20739 IX86_BUILTIN_PMAXSW,
20740 IX86_BUILTIN_PMAXUB,
20741 IX86_BUILTIN_PMINSW,
20742 IX86_BUILTIN_PMINUB,
20743
20744 IX86_BUILTIN_PMULHUW,
20745 IX86_BUILTIN_PMULHW,
20746 IX86_BUILTIN_PMULLW,
20747
20748 IX86_BUILTIN_PSADBW,
20749 IX86_BUILTIN_PSHUFW,
20750
20751 IX86_BUILTIN_PSLLW,
20752 IX86_BUILTIN_PSLLD,
20753 IX86_BUILTIN_PSLLQ,
20754 IX86_BUILTIN_PSRAW,
20755 IX86_BUILTIN_PSRAD,
20756 IX86_BUILTIN_PSRLW,
20757 IX86_BUILTIN_PSRLD,
20758 IX86_BUILTIN_PSRLQ,
20759 IX86_BUILTIN_PSLLWI,
20760 IX86_BUILTIN_PSLLDI,
20761 IX86_BUILTIN_PSLLQI,
20762 IX86_BUILTIN_PSRAWI,
20763 IX86_BUILTIN_PSRADI,
20764 IX86_BUILTIN_PSRLWI,
20765 IX86_BUILTIN_PSRLDI,
20766 IX86_BUILTIN_PSRLQI,
20767
20768 IX86_BUILTIN_PUNPCKHBW,
20769 IX86_BUILTIN_PUNPCKHWD,
20770 IX86_BUILTIN_PUNPCKHDQ,
20771 IX86_BUILTIN_PUNPCKLBW,
20772 IX86_BUILTIN_PUNPCKLWD,
20773 IX86_BUILTIN_PUNPCKLDQ,
20774
20775 IX86_BUILTIN_SHUFPS,
20776
20777 IX86_BUILTIN_RCPPS,
20778 IX86_BUILTIN_RCPSS,
20779 IX86_BUILTIN_RSQRTPS,
20780 IX86_BUILTIN_RSQRTPS_NR,
20781 IX86_BUILTIN_RSQRTSS,
20782 IX86_BUILTIN_RSQRTF,
20783 IX86_BUILTIN_SQRTPS,
20784 IX86_BUILTIN_SQRTPS_NR,
20785 IX86_BUILTIN_SQRTSS,
20786
20787 IX86_BUILTIN_UNPCKHPS,
20788 IX86_BUILTIN_UNPCKLPS,
20789
20790 IX86_BUILTIN_ANDPS,
20791 IX86_BUILTIN_ANDNPS,
20792 IX86_BUILTIN_ORPS,
20793 IX86_BUILTIN_XORPS,
20794
20795 IX86_BUILTIN_EMMS,
20796 IX86_BUILTIN_LDMXCSR,
20797 IX86_BUILTIN_STMXCSR,
20798 IX86_BUILTIN_SFENCE,
20799
20800 /* 3DNow! Original */
20801 IX86_BUILTIN_FEMMS,
20802 IX86_BUILTIN_PAVGUSB,
20803 IX86_BUILTIN_PF2ID,
20804 IX86_BUILTIN_PFACC,
20805 IX86_BUILTIN_PFADD,
20806 IX86_BUILTIN_PFCMPEQ,
20807 IX86_BUILTIN_PFCMPGE,
20808 IX86_BUILTIN_PFCMPGT,
20809 IX86_BUILTIN_PFMAX,
20810 IX86_BUILTIN_PFMIN,
20811 IX86_BUILTIN_PFMUL,
20812 IX86_BUILTIN_PFRCP,
20813 IX86_BUILTIN_PFRCPIT1,
20814 IX86_BUILTIN_PFRCPIT2,
20815 IX86_BUILTIN_PFRSQIT1,
20816 IX86_BUILTIN_PFRSQRT,
20817 IX86_BUILTIN_PFSUB,
20818 IX86_BUILTIN_PFSUBR,
20819 IX86_BUILTIN_PI2FD,
20820 IX86_BUILTIN_PMULHRW,
20821
20822 /* 3DNow! Athlon Extensions */
20823 IX86_BUILTIN_PF2IW,
20824 IX86_BUILTIN_PFNACC,
20825 IX86_BUILTIN_PFPNACC,
20826 IX86_BUILTIN_PI2FW,
20827 IX86_BUILTIN_PSWAPDSI,
20828 IX86_BUILTIN_PSWAPDSF,
20829
20830 /* SSE2 */
20831 IX86_BUILTIN_ADDPD,
20832 IX86_BUILTIN_ADDSD,
20833 IX86_BUILTIN_DIVPD,
20834 IX86_BUILTIN_DIVSD,
20835 IX86_BUILTIN_MULPD,
20836 IX86_BUILTIN_MULSD,
20837 IX86_BUILTIN_SUBPD,
20838 IX86_BUILTIN_SUBSD,
20839
20840 IX86_BUILTIN_CMPEQPD,
20841 IX86_BUILTIN_CMPLTPD,
20842 IX86_BUILTIN_CMPLEPD,
20843 IX86_BUILTIN_CMPGTPD,
20844 IX86_BUILTIN_CMPGEPD,
20845 IX86_BUILTIN_CMPNEQPD,
20846 IX86_BUILTIN_CMPNLTPD,
20847 IX86_BUILTIN_CMPNLEPD,
20848 IX86_BUILTIN_CMPNGTPD,
20849 IX86_BUILTIN_CMPNGEPD,
20850 IX86_BUILTIN_CMPORDPD,
20851 IX86_BUILTIN_CMPUNORDPD,
20852 IX86_BUILTIN_CMPEQSD,
20853 IX86_BUILTIN_CMPLTSD,
20854 IX86_BUILTIN_CMPLESD,
20855 IX86_BUILTIN_CMPNEQSD,
20856 IX86_BUILTIN_CMPNLTSD,
20857 IX86_BUILTIN_CMPNLESD,
20858 IX86_BUILTIN_CMPORDSD,
20859 IX86_BUILTIN_CMPUNORDSD,
20860
20861 IX86_BUILTIN_COMIEQSD,
20862 IX86_BUILTIN_COMILTSD,
20863 IX86_BUILTIN_COMILESD,
20864 IX86_BUILTIN_COMIGTSD,
20865 IX86_BUILTIN_COMIGESD,
20866 IX86_BUILTIN_COMINEQSD,
20867 IX86_BUILTIN_UCOMIEQSD,
20868 IX86_BUILTIN_UCOMILTSD,
20869 IX86_BUILTIN_UCOMILESD,
20870 IX86_BUILTIN_UCOMIGTSD,
20871 IX86_BUILTIN_UCOMIGESD,
20872 IX86_BUILTIN_UCOMINEQSD,
20873
20874 IX86_BUILTIN_MAXPD,
20875 IX86_BUILTIN_MAXSD,
20876 IX86_BUILTIN_MINPD,
20877 IX86_BUILTIN_MINSD,
20878
20879 IX86_BUILTIN_ANDPD,
20880 IX86_BUILTIN_ANDNPD,
20881 IX86_BUILTIN_ORPD,
20882 IX86_BUILTIN_XORPD,
20883
20884 IX86_BUILTIN_SQRTPD,
20885 IX86_BUILTIN_SQRTSD,
20886
20887 IX86_BUILTIN_UNPCKHPD,
20888 IX86_BUILTIN_UNPCKLPD,
20889
20890 IX86_BUILTIN_SHUFPD,
20891
20892 IX86_BUILTIN_LOADUPD,
20893 IX86_BUILTIN_STOREUPD,
20894 IX86_BUILTIN_MOVSD,
20895
20896 IX86_BUILTIN_LOADHPD,
20897 IX86_BUILTIN_LOADLPD,
20898
20899 IX86_BUILTIN_CVTDQ2PD,
20900 IX86_BUILTIN_CVTDQ2PS,
20901
20902 IX86_BUILTIN_CVTPD2DQ,
20903 IX86_BUILTIN_CVTPD2PI,
20904 IX86_BUILTIN_CVTPD2PS,
20905 IX86_BUILTIN_CVTTPD2DQ,
20906 IX86_BUILTIN_CVTTPD2PI,
20907
20908 IX86_BUILTIN_CVTPI2PD,
20909 IX86_BUILTIN_CVTSI2SD,
20910 IX86_BUILTIN_CVTSI642SD,
20911
20912 IX86_BUILTIN_CVTSD2SI,
20913 IX86_BUILTIN_CVTSD2SI64,
20914 IX86_BUILTIN_CVTSD2SS,
20915 IX86_BUILTIN_CVTSS2SD,
20916 IX86_BUILTIN_CVTTSD2SI,
20917 IX86_BUILTIN_CVTTSD2SI64,
20918
20919 IX86_BUILTIN_CVTPS2DQ,
20920 IX86_BUILTIN_CVTPS2PD,
20921 IX86_BUILTIN_CVTTPS2DQ,
20922
20923 IX86_BUILTIN_MOVNTI,
20924 IX86_BUILTIN_MOVNTPD,
20925 IX86_BUILTIN_MOVNTDQ,
20926
20927 IX86_BUILTIN_MOVQ128,
20928
20929 /* SSE2 MMX */
20930 IX86_BUILTIN_MASKMOVDQU,
20931 IX86_BUILTIN_MOVMSKPD,
20932 IX86_BUILTIN_PMOVMSKB128,
20933
20934 IX86_BUILTIN_PACKSSWB128,
20935 IX86_BUILTIN_PACKSSDW128,
20936 IX86_BUILTIN_PACKUSWB128,
20937
20938 IX86_BUILTIN_PADDB128,
20939 IX86_BUILTIN_PADDW128,
20940 IX86_BUILTIN_PADDD128,
20941 IX86_BUILTIN_PADDQ128,
20942 IX86_BUILTIN_PADDSB128,
20943 IX86_BUILTIN_PADDSW128,
20944 IX86_BUILTIN_PADDUSB128,
20945 IX86_BUILTIN_PADDUSW128,
20946 IX86_BUILTIN_PSUBB128,
20947 IX86_BUILTIN_PSUBW128,
20948 IX86_BUILTIN_PSUBD128,
20949 IX86_BUILTIN_PSUBQ128,
20950 IX86_BUILTIN_PSUBSB128,
20951 IX86_BUILTIN_PSUBSW128,
20952 IX86_BUILTIN_PSUBUSB128,
20953 IX86_BUILTIN_PSUBUSW128,
20954
20955 IX86_BUILTIN_PAND128,
20956 IX86_BUILTIN_PANDN128,
20957 IX86_BUILTIN_POR128,
20958 IX86_BUILTIN_PXOR128,
20959
20960 IX86_BUILTIN_PAVGB128,
20961 IX86_BUILTIN_PAVGW128,
20962
20963 IX86_BUILTIN_PCMPEQB128,
20964 IX86_BUILTIN_PCMPEQW128,
20965 IX86_BUILTIN_PCMPEQD128,
20966 IX86_BUILTIN_PCMPGTB128,
20967 IX86_BUILTIN_PCMPGTW128,
20968 IX86_BUILTIN_PCMPGTD128,
20969
20970 IX86_BUILTIN_PMADDWD128,
20971
20972 IX86_BUILTIN_PMAXSW128,
20973 IX86_BUILTIN_PMAXUB128,
20974 IX86_BUILTIN_PMINSW128,
20975 IX86_BUILTIN_PMINUB128,
20976
20977 IX86_BUILTIN_PMULUDQ,
20978 IX86_BUILTIN_PMULUDQ128,
20979 IX86_BUILTIN_PMULHUW128,
20980 IX86_BUILTIN_PMULHW128,
20981 IX86_BUILTIN_PMULLW128,
20982
20983 IX86_BUILTIN_PSADBW128,
20984 IX86_BUILTIN_PSHUFHW,
20985 IX86_BUILTIN_PSHUFLW,
20986 IX86_BUILTIN_PSHUFD,
20987
20988 IX86_BUILTIN_PSLLDQI128,
20989 IX86_BUILTIN_PSLLWI128,
20990 IX86_BUILTIN_PSLLDI128,
20991 IX86_BUILTIN_PSLLQI128,
20992 IX86_BUILTIN_PSRAWI128,
20993 IX86_BUILTIN_PSRADI128,
20994 IX86_BUILTIN_PSRLDQI128,
20995 IX86_BUILTIN_PSRLWI128,
20996 IX86_BUILTIN_PSRLDI128,
20997 IX86_BUILTIN_PSRLQI128,
20998
20999 IX86_BUILTIN_PSLLDQ128,
21000 IX86_BUILTIN_PSLLW128,
21001 IX86_BUILTIN_PSLLD128,
21002 IX86_BUILTIN_PSLLQ128,
21003 IX86_BUILTIN_PSRAW128,
21004 IX86_BUILTIN_PSRAD128,
21005 IX86_BUILTIN_PSRLW128,
21006 IX86_BUILTIN_PSRLD128,
21007 IX86_BUILTIN_PSRLQ128,
21008
21009 IX86_BUILTIN_PUNPCKHBW128,
21010 IX86_BUILTIN_PUNPCKHWD128,
21011 IX86_BUILTIN_PUNPCKHDQ128,
21012 IX86_BUILTIN_PUNPCKHQDQ128,
21013 IX86_BUILTIN_PUNPCKLBW128,
21014 IX86_BUILTIN_PUNPCKLWD128,
21015 IX86_BUILTIN_PUNPCKLDQ128,
21016 IX86_BUILTIN_PUNPCKLQDQ128,
21017
21018 IX86_BUILTIN_CLFLUSH,
21019 IX86_BUILTIN_MFENCE,
21020 IX86_BUILTIN_LFENCE,
21021
21022 IX86_BUILTIN_BSRSI,
21023 IX86_BUILTIN_BSRDI,
21024 IX86_BUILTIN_RDPMC,
21025 IX86_BUILTIN_RDTSC,
21026 IX86_BUILTIN_RDTSCP,
21027 IX86_BUILTIN_ROLQI,
21028 IX86_BUILTIN_ROLHI,
21029 IX86_BUILTIN_RORQI,
21030 IX86_BUILTIN_RORHI,
21031
21032 /* SSE3. */
21033 IX86_BUILTIN_ADDSUBPS,
21034 IX86_BUILTIN_HADDPS,
21035 IX86_BUILTIN_HSUBPS,
21036 IX86_BUILTIN_MOVSHDUP,
21037 IX86_BUILTIN_MOVSLDUP,
21038 IX86_BUILTIN_ADDSUBPD,
21039 IX86_BUILTIN_HADDPD,
21040 IX86_BUILTIN_HSUBPD,
21041 IX86_BUILTIN_LDDQU,
21042
21043 IX86_BUILTIN_MONITOR,
21044 IX86_BUILTIN_MWAIT,
21045
21046 /* SSSE3. */
21047 IX86_BUILTIN_PHADDW,
21048 IX86_BUILTIN_PHADDD,
21049 IX86_BUILTIN_PHADDSW,
21050 IX86_BUILTIN_PHSUBW,
21051 IX86_BUILTIN_PHSUBD,
21052 IX86_BUILTIN_PHSUBSW,
21053 IX86_BUILTIN_PMADDUBSW,
21054 IX86_BUILTIN_PMULHRSW,
21055 IX86_BUILTIN_PSHUFB,
21056 IX86_BUILTIN_PSIGNB,
21057 IX86_BUILTIN_PSIGNW,
21058 IX86_BUILTIN_PSIGND,
21059 IX86_BUILTIN_PALIGNR,
21060 IX86_BUILTIN_PABSB,
21061 IX86_BUILTIN_PABSW,
21062 IX86_BUILTIN_PABSD,
21063
21064 IX86_BUILTIN_PHADDW128,
21065 IX86_BUILTIN_PHADDD128,
21066 IX86_BUILTIN_PHADDSW128,
21067 IX86_BUILTIN_PHSUBW128,
21068 IX86_BUILTIN_PHSUBD128,
21069 IX86_BUILTIN_PHSUBSW128,
21070 IX86_BUILTIN_PMADDUBSW128,
21071 IX86_BUILTIN_PMULHRSW128,
21072 IX86_BUILTIN_PSHUFB128,
21073 IX86_BUILTIN_PSIGNB128,
21074 IX86_BUILTIN_PSIGNW128,
21075 IX86_BUILTIN_PSIGND128,
21076 IX86_BUILTIN_PALIGNR128,
21077 IX86_BUILTIN_PABSB128,
21078 IX86_BUILTIN_PABSW128,
21079 IX86_BUILTIN_PABSD128,
21080
21081 /* AMDFAM10 - SSE4A New Instructions. */
21082 IX86_BUILTIN_MOVNTSD,
21083 IX86_BUILTIN_MOVNTSS,
21084 IX86_BUILTIN_EXTRQI,
21085 IX86_BUILTIN_EXTRQ,
21086 IX86_BUILTIN_INSERTQI,
21087 IX86_BUILTIN_INSERTQ,
21088
21089 /* SSE4.1. */
21090 IX86_BUILTIN_BLENDPD,
21091 IX86_BUILTIN_BLENDPS,
21092 IX86_BUILTIN_BLENDVPD,
21093 IX86_BUILTIN_BLENDVPS,
21094 IX86_BUILTIN_PBLENDVB128,
21095 IX86_BUILTIN_PBLENDW128,
21096
21097 IX86_BUILTIN_DPPD,
21098 IX86_BUILTIN_DPPS,
21099
21100 IX86_BUILTIN_INSERTPS128,
21101
21102 IX86_BUILTIN_MOVNTDQA,
21103 IX86_BUILTIN_MPSADBW128,
21104 IX86_BUILTIN_PACKUSDW128,
21105 IX86_BUILTIN_PCMPEQQ,
21106 IX86_BUILTIN_PHMINPOSUW128,
21107
21108 IX86_BUILTIN_PMAXSB128,
21109 IX86_BUILTIN_PMAXSD128,
21110 IX86_BUILTIN_PMAXUD128,
21111 IX86_BUILTIN_PMAXUW128,
21112
21113 IX86_BUILTIN_PMINSB128,
21114 IX86_BUILTIN_PMINSD128,
21115 IX86_BUILTIN_PMINUD128,
21116 IX86_BUILTIN_PMINUW128,
21117
21118 IX86_BUILTIN_PMOVSXBW128,
21119 IX86_BUILTIN_PMOVSXBD128,
21120 IX86_BUILTIN_PMOVSXBQ128,
21121 IX86_BUILTIN_PMOVSXWD128,
21122 IX86_BUILTIN_PMOVSXWQ128,
21123 IX86_BUILTIN_PMOVSXDQ128,
21124
21125 IX86_BUILTIN_PMOVZXBW128,
21126 IX86_BUILTIN_PMOVZXBD128,
21127 IX86_BUILTIN_PMOVZXBQ128,
21128 IX86_BUILTIN_PMOVZXWD128,
21129 IX86_BUILTIN_PMOVZXWQ128,
21130 IX86_BUILTIN_PMOVZXDQ128,
21131
21132 IX86_BUILTIN_PMULDQ128,
21133 IX86_BUILTIN_PMULLD128,
21134
21135 IX86_BUILTIN_ROUNDPD,
21136 IX86_BUILTIN_ROUNDPS,
21137 IX86_BUILTIN_ROUNDSD,
21138 IX86_BUILTIN_ROUNDSS,
21139
21140 IX86_BUILTIN_PTESTZ,
21141 IX86_BUILTIN_PTESTC,
21142 IX86_BUILTIN_PTESTNZC,
21143
21144 IX86_BUILTIN_VEC_INIT_V2SI,
21145 IX86_BUILTIN_VEC_INIT_V4HI,
21146 IX86_BUILTIN_VEC_INIT_V8QI,
21147 IX86_BUILTIN_VEC_EXT_V2DF,
21148 IX86_BUILTIN_VEC_EXT_V2DI,
21149 IX86_BUILTIN_VEC_EXT_V4SF,
21150 IX86_BUILTIN_VEC_EXT_V4SI,
21151 IX86_BUILTIN_VEC_EXT_V8HI,
21152 IX86_BUILTIN_VEC_EXT_V2SI,
21153 IX86_BUILTIN_VEC_EXT_V4HI,
21154 IX86_BUILTIN_VEC_EXT_V16QI,
21155 IX86_BUILTIN_VEC_SET_V2DI,
21156 IX86_BUILTIN_VEC_SET_V4SF,
21157 IX86_BUILTIN_VEC_SET_V4SI,
21158 IX86_BUILTIN_VEC_SET_V8HI,
21159 IX86_BUILTIN_VEC_SET_V4HI,
21160 IX86_BUILTIN_VEC_SET_V16QI,
21161
21162 IX86_BUILTIN_VEC_PACK_SFIX,
21163
21164 /* SSE4.2. */
21165 IX86_BUILTIN_CRC32QI,
21166 IX86_BUILTIN_CRC32HI,
21167 IX86_BUILTIN_CRC32SI,
21168 IX86_BUILTIN_CRC32DI,
21169
21170 IX86_BUILTIN_PCMPESTRI128,
21171 IX86_BUILTIN_PCMPESTRM128,
21172 IX86_BUILTIN_PCMPESTRA128,
21173 IX86_BUILTIN_PCMPESTRC128,
21174 IX86_BUILTIN_PCMPESTRO128,
21175 IX86_BUILTIN_PCMPESTRS128,
21176 IX86_BUILTIN_PCMPESTRZ128,
21177 IX86_BUILTIN_PCMPISTRI128,
21178 IX86_BUILTIN_PCMPISTRM128,
21179 IX86_BUILTIN_PCMPISTRA128,
21180 IX86_BUILTIN_PCMPISTRC128,
21181 IX86_BUILTIN_PCMPISTRO128,
21182 IX86_BUILTIN_PCMPISTRS128,
21183 IX86_BUILTIN_PCMPISTRZ128,
21184
21185 IX86_BUILTIN_PCMPGTQ,
21186
21187 /* AES instructions */
21188 IX86_BUILTIN_AESENC128,
21189 IX86_BUILTIN_AESENCLAST128,
21190 IX86_BUILTIN_AESDEC128,
21191 IX86_BUILTIN_AESDECLAST128,
21192 IX86_BUILTIN_AESIMC128,
21193 IX86_BUILTIN_AESKEYGENASSIST128,
21194
21195 /* PCLMUL instruction */
21196 IX86_BUILTIN_PCLMULQDQ128,
21197
21198 /* AVX */
21199 IX86_BUILTIN_ADDPD256,
21200 IX86_BUILTIN_ADDPS256,
21201 IX86_BUILTIN_ADDSUBPD256,
21202 IX86_BUILTIN_ADDSUBPS256,
21203 IX86_BUILTIN_ANDPD256,
21204 IX86_BUILTIN_ANDPS256,
21205 IX86_BUILTIN_ANDNPD256,
21206 IX86_BUILTIN_ANDNPS256,
21207 IX86_BUILTIN_BLENDPD256,
21208 IX86_BUILTIN_BLENDPS256,
21209 IX86_BUILTIN_BLENDVPD256,
21210 IX86_BUILTIN_BLENDVPS256,
21211 IX86_BUILTIN_DIVPD256,
21212 IX86_BUILTIN_DIVPS256,
21213 IX86_BUILTIN_DPPS256,
21214 IX86_BUILTIN_HADDPD256,
21215 IX86_BUILTIN_HADDPS256,
21216 IX86_BUILTIN_HSUBPD256,
21217 IX86_BUILTIN_HSUBPS256,
21218 IX86_BUILTIN_MAXPD256,
21219 IX86_BUILTIN_MAXPS256,
21220 IX86_BUILTIN_MINPD256,
21221 IX86_BUILTIN_MINPS256,
21222 IX86_BUILTIN_MULPD256,
21223 IX86_BUILTIN_MULPS256,
21224 IX86_BUILTIN_ORPD256,
21225 IX86_BUILTIN_ORPS256,
21226 IX86_BUILTIN_SHUFPD256,
21227 IX86_BUILTIN_SHUFPS256,
21228 IX86_BUILTIN_SUBPD256,
21229 IX86_BUILTIN_SUBPS256,
21230 IX86_BUILTIN_XORPD256,
21231 IX86_BUILTIN_XORPS256,
21232 IX86_BUILTIN_CMPSD,
21233 IX86_BUILTIN_CMPSS,
21234 IX86_BUILTIN_CMPPD,
21235 IX86_BUILTIN_CMPPS,
21236 IX86_BUILTIN_CMPPD256,
21237 IX86_BUILTIN_CMPPS256,
21238 IX86_BUILTIN_CVTDQ2PD256,
21239 IX86_BUILTIN_CVTDQ2PS256,
21240 IX86_BUILTIN_CVTPD2PS256,
21241 IX86_BUILTIN_CVTPS2DQ256,
21242 IX86_BUILTIN_CVTPS2PD256,
21243 IX86_BUILTIN_CVTTPD2DQ256,
21244 IX86_BUILTIN_CVTPD2DQ256,
21245 IX86_BUILTIN_CVTTPS2DQ256,
21246 IX86_BUILTIN_EXTRACTF128PD256,
21247 IX86_BUILTIN_EXTRACTF128PS256,
21248 IX86_BUILTIN_EXTRACTF128SI256,
21249 IX86_BUILTIN_VZEROALL,
21250 IX86_BUILTIN_VZEROUPPER,
21251 IX86_BUILTIN_VPERMILVARPD,
21252 IX86_BUILTIN_VPERMILVARPS,
21253 IX86_BUILTIN_VPERMILVARPD256,
21254 IX86_BUILTIN_VPERMILVARPS256,
21255 IX86_BUILTIN_VPERMILPD,
21256 IX86_BUILTIN_VPERMILPS,
21257 IX86_BUILTIN_VPERMILPD256,
21258 IX86_BUILTIN_VPERMILPS256,
21259 IX86_BUILTIN_VPERMIL2PD,
21260 IX86_BUILTIN_VPERMIL2PS,
21261 IX86_BUILTIN_VPERMIL2PD256,
21262 IX86_BUILTIN_VPERMIL2PS256,
21263 IX86_BUILTIN_VPERM2F128PD256,
21264 IX86_BUILTIN_VPERM2F128PS256,
21265 IX86_BUILTIN_VPERM2F128SI256,
21266 IX86_BUILTIN_VBROADCASTSS,
21267 IX86_BUILTIN_VBROADCASTSD256,
21268 IX86_BUILTIN_VBROADCASTSS256,
21269 IX86_BUILTIN_VBROADCASTPD256,
21270 IX86_BUILTIN_VBROADCASTPS256,
21271 IX86_BUILTIN_VINSERTF128PD256,
21272 IX86_BUILTIN_VINSERTF128PS256,
21273 IX86_BUILTIN_VINSERTF128SI256,
21274 IX86_BUILTIN_LOADUPD256,
21275 IX86_BUILTIN_LOADUPS256,
21276 IX86_BUILTIN_STOREUPD256,
21277 IX86_BUILTIN_STOREUPS256,
21278 IX86_BUILTIN_LDDQU256,
21279 IX86_BUILTIN_MOVNTDQ256,
21280 IX86_BUILTIN_MOVNTPD256,
21281 IX86_BUILTIN_MOVNTPS256,
21282 IX86_BUILTIN_LOADDQU256,
21283 IX86_BUILTIN_STOREDQU256,
21284 IX86_BUILTIN_MASKLOADPD,
21285 IX86_BUILTIN_MASKLOADPS,
21286 IX86_BUILTIN_MASKSTOREPD,
21287 IX86_BUILTIN_MASKSTOREPS,
21288 IX86_BUILTIN_MASKLOADPD256,
21289 IX86_BUILTIN_MASKLOADPS256,
21290 IX86_BUILTIN_MASKSTOREPD256,
21291 IX86_BUILTIN_MASKSTOREPS256,
21292 IX86_BUILTIN_MOVSHDUP256,
21293 IX86_BUILTIN_MOVSLDUP256,
21294 IX86_BUILTIN_MOVDDUP256,
21295
21296 IX86_BUILTIN_SQRTPD256,
21297 IX86_BUILTIN_SQRTPS256,
21298 IX86_BUILTIN_SQRTPS_NR256,
21299 IX86_BUILTIN_RSQRTPS256,
21300 IX86_BUILTIN_RSQRTPS_NR256,
21301
21302 IX86_BUILTIN_RCPPS256,
21303
21304 IX86_BUILTIN_ROUNDPD256,
21305 IX86_BUILTIN_ROUNDPS256,
21306
21307 IX86_BUILTIN_UNPCKHPD256,
21308 IX86_BUILTIN_UNPCKLPD256,
21309 IX86_BUILTIN_UNPCKHPS256,
21310 IX86_BUILTIN_UNPCKLPS256,
21311
21312 IX86_BUILTIN_SI256_SI,
21313 IX86_BUILTIN_PS256_PS,
21314 IX86_BUILTIN_PD256_PD,
21315 IX86_BUILTIN_SI_SI256,
21316 IX86_BUILTIN_PS_PS256,
21317 IX86_BUILTIN_PD_PD256,
21318
21319 IX86_BUILTIN_VTESTZPD,
21320 IX86_BUILTIN_VTESTCPD,
21321 IX86_BUILTIN_VTESTNZCPD,
21322 IX86_BUILTIN_VTESTZPS,
21323 IX86_BUILTIN_VTESTCPS,
21324 IX86_BUILTIN_VTESTNZCPS,
21325 IX86_BUILTIN_VTESTZPD256,
21326 IX86_BUILTIN_VTESTCPD256,
21327 IX86_BUILTIN_VTESTNZCPD256,
21328 IX86_BUILTIN_VTESTZPS256,
21329 IX86_BUILTIN_VTESTCPS256,
21330 IX86_BUILTIN_VTESTNZCPS256,
21331 IX86_BUILTIN_PTESTZ256,
21332 IX86_BUILTIN_PTESTC256,
21333 IX86_BUILTIN_PTESTNZC256,
21334
21335 IX86_BUILTIN_MOVMSKPD256,
21336 IX86_BUILTIN_MOVMSKPS256,
21337
21338 /* TFmode support builtins. */
21339 IX86_BUILTIN_INFQ,
21340 IX86_BUILTIN_HUGE_VALQ,
21341 IX86_BUILTIN_FABSQ,
21342 IX86_BUILTIN_COPYSIGNQ,
21343
21344 /* Vectorizer support builtins. */
21345 IX86_BUILTIN_CPYSGNPS,
21346 IX86_BUILTIN_CPYSGNPD,
21347
21348 IX86_BUILTIN_CVTUDQ2PS,
21349
21350 IX86_BUILTIN_VEC_PERM_V2DF,
21351 IX86_BUILTIN_VEC_PERM_V4SF,
21352 IX86_BUILTIN_VEC_PERM_V2DI,
21353 IX86_BUILTIN_VEC_PERM_V4SI,
21354 IX86_BUILTIN_VEC_PERM_V8HI,
21355 IX86_BUILTIN_VEC_PERM_V16QI,
21356 IX86_BUILTIN_VEC_PERM_V2DI_U,
21357 IX86_BUILTIN_VEC_PERM_V4SI_U,
21358 IX86_BUILTIN_VEC_PERM_V8HI_U,
21359 IX86_BUILTIN_VEC_PERM_V16QI_U,
21360 IX86_BUILTIN_VEC_PERM_V4DF,
21361 IX86_BUILTIN_VEC_PERM_V8SF,
21362
21363 /* FMA4 and XOP instructions. */
21364 IX86_BUILTIN_VFMADDSS,
21365 IX86_BUILTIN_VFMADDSD,
21366 IX86_BUILTIN_VFMADDPS,
21367 IX86_BUILTIN_VFMADDPD,
21368 IX86_BUILTIN_VFMSUBSS,
21369 IX86_BUILTIN_VFMSUBSD,
21370 IX86_BUILTIN_VFMSUBPS,
21371 IX86_BUILTIN_VFMSUBPD,
21372 IX86_BUILTIN_VFMADDSUBPS,
21373 IX86_BUILTIN_VFMADDSUBPD,
21374 IX86_BUILTIN_VFMSUBADDPS,
21375 IX86_BUILTIN_VFMSUBADDPD,
21376 IX86_BUILTIN_VFNMADDSS,
21377 IX86_BUILTIN_VFNMADDSD,
21378 IX86_BUILTIN_VFNMADDPS,
21379 IX86_BUILTIN_VFNMADDPD,
21380 IX86_BUILTIN_VFNMSUBSS,
21381 IX86_BUILTIN_VFNMSUBSD,
21382 IX86_BUILTIN_VFNMSUBPS,
21383 IX86_BUILTIN_VFNMSUBPD,
21384 IX86_BUILTIN_VFMADDPS256,
21385 IX86_BUILTIN_VFMADDPD256,
21386 IX86_BUILTIN_VFMSUBPS256,
21387 IX86_BUILTIN_VFMSUBPD256,
21388 IX86_BUILTIN_VFMADDSUBPS256,
21389 IX86_BUILTIN_VFMADDSUBPD256,
21390 IX86_BUILTIN_VFMSUBADDPS256,
21391 IX86_BUILTIN_VFMSUBADDPD256,
21392 IX86_BUILTIN_VFNMADDPS256,
21393 IX86_BUILTIN_VFNMADDPD256,
21394 IX86_BUILTIN_VFNMSUBPS256,
21395 IX86_BUILTIN_VFNMSUBPD256,
21396
21397 IX86_BUILTIN_VPCMOV,
21398 IX86_BUILTIN_VPCMOV_V2DI,
21399 IX86_BUILTIN_VPCMOV_V4SI,
21400 IX86_BUILTIN_VPCMOV_V8HI,
21401 IX86_BUILTIN_VPCMOV_V16QI,
21402 IX86_BUILTIN_VPCMOV_V4SF,
21403 IX86_BUILTIN_VPCMOV_V2DF,
21404 IX86_BUILTIN_VPCMOV256,
21405 IX86_BUILTIN_VPCMOV_V4DI256,
21406 IX86_BUILTIN_VPCMOV_V8SI256,
21407 IX86_BUILTIN_VPCMOV_V16HI256,
21408 IX86_BUILTIN_VPCMOV_V32QI256,
21409 IX86_BUILTIN_VPCMOV_V8SF256,
21410 IX86_BUILTIN_VPCMOV_V4DF256,
21411
21412 IX86_BUILTIN_VPPERM,
21413
21414 IX86_BUILTIN_VPMACSSWW,
21415 IX86_BUILTIN_VPMACSWW,
21416 IX86_BUILTIN_VPMACSSWD,
21417 IX86_BUILTIN_VPMACSWD,
21418 IX86_BUILTIN_VPMACSSDD,
21419 IX86_BUILTIN_VPMACSDD,
21420 IX86_BUILTIN_VPMACSSDQL,
21421 IX86_BUILTIN_VPMACSSDQH,
21422 IX86_BUILTIN_VPMACSDQL,
21423 IX86_BUILTIN_VPMACSDQH,
21424 IX86_BUILTIN_VPMADCSSWD,
21425 IX86_BUILTIN_VPMADCSWD,
21426
21427 IX86_BUILTIN_VPHADDBW,
21428 IX86_BUILTIN_VPHADDBD,
21429 IX86_BUILTIN_VPHADDBQ,
21430 IX86_BUILTIN_VPHADDWD,
21431 IX86_BUILTIN_VPHADDWQ,
21432 IX86_BUILTIN_VPHADDDQ,
21433 IX86_BUILTIN_VPHADDUBW,
21434 IX86_BUILTIN_VPHADDUBD,
21435 IX86_BUILTIN_VPHADDUBQ,
21436 IX86_BUILTIN_VPHADDUWD,
21437 IX86_BUILTIN_VPHADDUWQ,
21438 IX86_BUILTIN_VPHADDUDQ,
21439 IX86_BUILTIN_VPHSUBBW,
21440 IX86_BUILTIN_VPHSUBWD,
21441 IX86_BUILTIN_VPHSUBDQ,
21442
21443 IX86_BUILTIN_VPROTB,
21444 IX86_BUILTIN_VPROTW,
21445 IX86_BUILTIN_VPROTD,
21446 IX86_BUILTIN_VPROTQ,
21447 IX86_BUILTIN_VPROTB_IMM,
21448 IX86_BUILTIN_VPROTW_IMM,
21449 IX86_BUILTIN_VPROTD_IMM,
21450 IX86_BUILTIN_VPROTQ_IMM,
21451
21452 IX86_BUILTIN_VPSHLB,
21453 IX86_BUILTIN_VPSHLW,
21454 IX86_BUILTIN_VPSHLD,
21455 IX86_BUILTIN_VPSHLQ,
21456 IX86_BUILTIN_VPSHAB,
21457 IX86_BUILTIN_VPSHAW,
21458 IX86_BUILTIN_VPSHAD,
21459 IX86_BUILTIN_VPSHAQ,
21460
21461 IX86_BUILTIN_VFRCZSS,
21462 IX86_BUILTIN_VFRCZSD,
21463 IX86_BUILTIN_VFRCZPS,
21464 IX86_BUILTIN_VFRCZPD,
21465 IX86_BUILTIN_VFRCZPS256,
21466 IX86_BUILTIN_VFRCZPD256,
21467
21468 IX86_BUILTIN_VPCOMEQUB,
21469 IX86_BUILTIN_VPCOMNEUB,
21470 IX86_BUILTIN_VPCOMLTUB,
21471 IX86_BUILTIN_VPCOMLEUB,
21472 IX86_BUILTIN_VPCOMGTUB,
21473 IX86_BUILTIN_VPCOMGEUB,
21474 IX86_BUILTIN_VPCOMFALSEUB,
21475 IX86_BUILTIN_VPCOMTRUEUB,
21476
21477 IX86_BUILTIN_VPCOMEQUW,
21478 IX86_BUILTIN_VPCOMNEUW,
21479 IX86_BUILTIN_VPCOMLTUW,
21480 IX86_BUILTIN_VPCOMLEUW,
21481 IX86_BUILTIN_VPCOMGTUW,
21482 IX86_BUILTIN_VPCOMGEUW,
21483 IX86_BUILTIN_VPCOMFALSEUW,
21484 IX86_BUILTIN_VPCOMTRUEUW,
21485
21486 IX86_BUILTIN_VPCOMEQUD,
21487 IX86_BUILTIN_VPCOMNEUD,
21488 IX86_BUILTIN_VPCOMLTUD,
21489 IX86_BUILTIN_VPCOMLEUD,
21490 IX86_BUILTIN_VPCOMGTUD,
21491 IX86_BUILTIN_VPCOMGEUD,
21492 IX86_BUILTIN_VPCOMFALSEUD,
21493 IX86_BUILTIN_VPCOMTRUEUD,
21494
21495 IX86_BUILTIN_VPCOMEQUQ,
21496 IX86_BUILTIN_VPCOMNEUQ,
21497 IX86_BUILTIN_VPCOMLTUQ,
21498 IX86_BUILTIN_VPCOMLEUQ,
21499 IX86_BUILTIN_VPCOMGTUQ,
21500 IX86_BUILTIN_VPCOMGEUQ,
21501 IX86_BUILTIN_VPCOMFALSEUQ,
21502 IX86_BUILTIN_VPCOMTRUEUQ,
21503
21504 IX86_BUILTIN_VPCOMEQB,
21505 IX86_BUILTIN_VPCOMNEB,
21506 IX86_BUILTIN_VPCOMLTB,
21507 IX86_BUILTIN_VPCOMLEB,
21508 IX86_BUILTIN_VPCOMGTB,
21509 IX86_BUILTIN_VPCOMGEB,
21510 IX86_BUILTIN_VPCOMFALSEB,
21511 IX86_BUILTIN_VPCOMTRUEB,
21512
21513 IX86_BUILTIN_VPCOMEQW,
21514 IX86_BUILTIN_VPCOMNEW,
21515 IX86_BUILTIN_VPCOMLTW,
21516 IX86_BUILTIN_VPCOMLEW,
21517 IX86_BUILTIN_VPCOMGTW,
21518 IX86_BUILTIN_VPCOMGEW,
21519 IX86_BUILTIN_VPCOMFALSEW,
21520 IX86_BUILTIN_VPCOMTRUEW,
21521
21522 IX86_BUILTIN_VPCOMEQD,
21523 IX86_BUILTIN_VPCOMNED,
21524 IX86_BUILTIN_VPCOMLTD,
21525 IX86_BUILTIN_VPCOMLED,
21526 IX86_BUILTIN_VPCOMGTD,
21527 IX86_BUILTIN_VPCOMGED,
21528 IX86_BUILTIN_VPCOMFALSED,
21529 IX86_BUILTIN_VPCOMTRUED,
21530
21531 IX86_BUILTIN_VPCOMEQQ,
21532 IX86_BUILTIN_VPCOMNEQ,
21533 IX86_BUILTIN_VPCOMLTQ,
21534 IX86_BUILTIN_VPCOMLEQ,
21535 IX86_BUILTIN_VPCOMGTQ,
21536 IX86_BUILTIN_VPCOMGEQ,
21537 IX86_BUILTIN_VPCOMFALSEQ,
21538 IX86_BUILTIN_VPCOMTRUEQ,
21539
21540 /* LWP instructions. */
21541 IX86_BUILTIN_LLWPCB,
21542 IX86_BUILTIN_SLWPCB,
21543 IX86_BUILTIN_LWPVAL32,
21544 IX86_BUILTIN_LWPVAL64,
21545 IX86_BUILTIN_LWPINS32,
21546 IX86_BUILTIN_LWPINS64,
21547
21548 IX86_BUILTIN_CLZS,
21549
21550 IX86_BUILTIN_MAX
21551 };
21552
21553 /* Table for the ix86 builtin decls. */
21554 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21555
21556 /* Table of all of the builtin functions that are possible with different ISA's
21557 but are waiting to be built until a function is declared to use that
21558 ISA. */
21559 struct builtin_isa {
21560 const char *name; /* function name */
21561 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21562 int isa; /* isa_flags this builtin is defined for */
21563 bool const_p; /* true if the declaration is constant */
21564 bool set_and_not_built_p;
21565 };
21566
21567 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21568
21569
21570 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21571 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21572 function decl in the ix86_builtins array. Returns the function decl or
21573 NULL_TREE, if the builtin was not added.
21574
21575 If the front end has a special hook for builtin functions, delay adding
21576 builtin functions that aren't in the current ISA until the ISA is changed
21577 with function specific optimization. Doing so, can save about 300K for the
21578 default compiler. When the builtin is expanded, check at that time whether
21579 it is valid.
21580
21581 If the front end doesn't have a special hook, record all builtins, even if
21582 it isn't an instruction set in the current ISA in case the user uses
21583 function specific options for a different ISA, so that we don't get scope
21584 errors if a builtin is added in the middle of a function scope. */
21585
21586 static inline tree
21587 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21588 enum ix86_builtins code)
21589 {
21590 tree decl = NULL_TREE;
21591
21592 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21593 {
21594 ix86_builtins_isa[(int) code].isa = mask;
21595
21596 mask &= ~OPTION_MASK_ISA_64BIT;
21597 if (mask == 0
21598 || (mask & ix86_isa_flags) != 0
21599 || (lang_hooks.builtin_function
21600 == lang_hooks.builtin_function_ext_scope))
21601
21602 {
21603 tree type = ix86_get_builtin_func_type (tcode);
21604 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21605 NULL, NULL_TREE);
21606 ix86_builtins[(int) code] = decl;
21607 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21608 }
21609 else
21610 {
21611 ix86_builtins[(int) code] = NULL_TREE;
21612 ix86_builtins_isa[(int) code].tcode = tcode;
21613 ix86_builtins_isa[(int) code].name = name;
21614 ix86_builtins_isa[(int) code].const_p = false;
21615 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21616 }
21617 }
21618
21619 return decl;
21620 }
21621
21622 /* Like def_builtin, but also marks the function decl "const". */
21623
21624 static inline tree
21625 def_builtin_const (int mask, const char *name,
21626 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21627 {
21628 tree decl = def_builtin (mask, name, tcode, code);
21629 if (decl)
21630 TREE_READONLY (decl) = 1;
21631 else
21632 ix86_builtins_isa[(int) code].const_p = true;
21633
21634 return decl;
21635 }
21636
21637 /* Add any new builtin functions for a given ISA that may not have been
21638 declared. This saves a bit of space compared to adding all of the
21639 declarations to the tree, even if we didn't use them. */
21640
21641 static void
21642 ix86_add_new_builtins (int isa)
21643 {
21644 int i;
21645
21646 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21647 {
21648 if ((ix86_builtins_isa[i].isa & isa) != 0
21649 && ix86_builtins_isa[i].set_and_not_built_p)
21650 {
21651 tree decl, type;
21652
21653 /* Don't define the builtin again. */
21654 ix86_builtins_isa[i].set_and_not_built_p = false;
21655
21656 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21657 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21658 type, i, BUILT_IN_MD, NULL,
21659 NULL_TREE);
21660
21661 ix86_builtins[i] = decl;
21662 if (ix86_builtins_isa[i].const_p)
21663 TREE_READONLY (decl) = 1;
21664 }
21665 }
21666 }
21667
21668 /* Bits for builtin_description.flag. */
21669
21670 /* Set when we don't support the comparison natively, and should
21671 swap_comparison in order to support it. */
21672 #define BUILTIN_DESC_SWAP_OPERANDS 1
21673
21674 struct builtin_description
21675 {
21676 const unsigned int mask;
21677 const enum insn_code icode;
21678 const char *const name;
21679 const enum ix86_builtins code;
21680 const enum rtx_code comparison;
21681 const int flag;
21682 };
21683
21684 static const struct builtin_description bdesc_comi[] =
21685 {
21686 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21687 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21688 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21689 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21690 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21691 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21692 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21693 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21694 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21695 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21696 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21697 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21698 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21699 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21700 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21701 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21702 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21703 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21704 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21705 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21706 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21707 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21708 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21709 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21710 };
21711
21712 static const struct builtin_description bdesc_pcmpestr[] =
21713 {
21714 /* SSE4.2 */
21715 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21716 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21717 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21718 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21719 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21720 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21721 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21722 };
21723
21724 static const struct builtin_description bdesc_pcmpistr[] =
21725 {
21726 /* SSE4.2 */
21727 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21728 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21729 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21730 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21731 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21732 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21733 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21734 };
21735
21736 /* Special builtins with variable number of arguments. */
21737 static const struct builtin_description bdesc_special_args[] =
21738 {
21739 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21740 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21741
21742 /* MMX */
21743 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21744
21745 /* 3DNow! */
21746 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21747
21748 /* SSE */
21749 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21750 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21751 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21752
21753 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21754 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21755 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21756 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21757
21758 /* SSE or 3DNow!A */
21759 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21760 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21761
21762 /* SSE2 */
21763 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21764 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21765 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21766 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21767 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21768 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21769 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21770 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21771 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21772
21773 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21774 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21775
21776 /* SSE3 */
21777 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21778
21779 /* SSE4.1 */
21780 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21781
21782 /* SSE4A */
21783 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21784 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21785
21786 /* AVX */
21787 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21788 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21789
21790 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21791 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21792 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21793 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21794 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21795
21796 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21797 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21798 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21799 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21800 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21801 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21802 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21803
21804 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21805 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21806 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21807
21808 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21809 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21810 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21811 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21812 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21813 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21814 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21815 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21816
21817 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21818 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21819 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21820 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21821 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21822 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21823
21824 };
21825
21826 /* Builtins with variable number of arguments. */
21827 static const struct builtin_description bdesc_args[] =
21828 {
21829 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21830 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21831 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21832 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21833 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21834 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21835 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21836
21837 /* MMX */
21838 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21839 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21840 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21841 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21842 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21843 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21844
21845 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21846 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21847 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21848 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21849 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21850 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21851 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21852 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21853
21854 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21855 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21856
21857 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21858 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21859 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21860 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21861
21862 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21863 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21864 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21865 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21866 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21867 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21868
21869 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21870 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21871 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21872 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21873 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21874 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21875
21876 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21877 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21878 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21879
21880 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21881
21882 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21883 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21884 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21885 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21886 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21887 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21888
21889 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21890 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21891 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21892 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21893 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21894 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21895
21896 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21897 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21898 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21899 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21900
21901 /* 3DNow! */
21902 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21903 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21904 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21905 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21906
21907 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21908 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21909 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21910 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21911 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21912 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21913 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21914 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21915 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21916 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21917 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21918 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21919 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21920 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21921 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21922
21923 /* 3DNow!A */
21924 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21925 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21926 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21927 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21928 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21929 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21930
21931 /* SSE */
21932 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21933 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21934 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21935 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21936 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21937 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21938 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21939 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21940 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21941 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21942 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21943 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21944
21945 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21946
21947 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21948 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21949 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21950 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21951 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21953 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21954 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21955
21956 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21957 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21958 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21959 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21960 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21961 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21962 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21963 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21964 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21965 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21966 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21967 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21968 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21969 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21970 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21971 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21972 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21973 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21974 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21975 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21976 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21977 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21978
21979 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21980 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21981 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21982 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21983
21984 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21985 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21986 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21987 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21988
21989 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21990
21991 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21992 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21993 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21994 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21995 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21996
21997 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21998 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21999 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
22000
22001 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
22002
22003 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22004 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22005 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22006
22007 /* SSE MMX or 3Dnow!A */
22008 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22009 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22010 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22011
22012 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22013 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22014 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22015 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22016
22017 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
22018 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
22019
22020 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
22021
22022 /* SSE2 */
22023 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22024
22025 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
22026 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
22027 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
22028 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22031 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
22032 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
22033 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
22034 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
22035 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
22036 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
22037
22038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
22039 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
22040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
22041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
22042 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22043 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22044
22045 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22046 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22047 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
22048 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22049 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22050
22051 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
22052
22053 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22054 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22055 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22056 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22057
22058 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22059 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
22060 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22061
22062 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22063 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22064 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22065 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22066 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22067 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22068 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22069 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22070
22071 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22072 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22073 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22074 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22075 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
22076 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22077 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22078 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22079 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22080 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22081 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22082 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22083 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22084 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22085 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22086 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22087 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22088 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22089 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22090 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22091
22092 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22093 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22094 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22095 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22096
22097 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22098 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22099 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22100 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22101
22102 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22103
22104 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22105 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22106 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22107
22108 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
22109
22110 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22111 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22112 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22113 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22114 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22115 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22116 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22117 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22118
22119 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22120 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22122 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22123 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22124 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22125 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22126 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22127
22128 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22129 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
22130
22131 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22132 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22133 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22134 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22135
22136 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22137 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22138
22139 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22141 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22142 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22143 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22144 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22145
22146 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22147 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22148 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22149 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22150
22151 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22152 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22153 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22154 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22155 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22156 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22157 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22158 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22159
22160 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22161 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22162 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22163
22164 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22165 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22166
22167 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22168 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22169
22170 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22171
22172 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22173 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22174 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22175 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22176
22177 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22178 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22179 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22180 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22181 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22182 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22183 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22184
22185 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22186 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22187 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22188 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22189 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22190 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22191 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22192
22193 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22194 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22195 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22196 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22197
22198 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22199 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22200 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22201
22202 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22203
22204 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22205 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22206
22207 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22208
22209 /* SSE2 MMX */
22210 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22211 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22212
22213 /* SSE3 */
22214 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22215 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22216
22217 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22218 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22219 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22220 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22221 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22222 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22223
22224 /* SSSE3 */
22225 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22226 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22227 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22228 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22229 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22230 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22231
22232 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22233 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22234 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22235 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22236 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22237 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22238 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22239 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22240 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22241 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22242 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22243 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22244 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22245 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22246 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22247 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22248 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22249 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22250 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22251 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22252 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22253 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22254 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22255 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22256
22257 /* SSSE3. */
22258 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22259 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22260
22261 /* SSE4.1 */
22262 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22263 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22264 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22265 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22266 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22267 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22268 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22269 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22270 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22271 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22272
22273 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22274 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22275 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22276 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22277 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22278 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22279 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22280 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22281 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22282 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22283 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22284 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22285 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22286
22287 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22288 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22289 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22290 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22291 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22292 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22293 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22294 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22295 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22296 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22297 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22298 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22299
22300 /* SSE4.1 */
22301 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22302 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22303 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22304 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22305
22306 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22307 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22308 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22309
22310 /* SSE4.2 */
22311 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22312 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22313 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22314 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22315 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22316
22317 /* SSE4A */
22318 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22319 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22320 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22321 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22322
22323 /* AES */
22324 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22325 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22326
22327 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22328 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22329 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22330 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22331
22332 /* PCLMUL */
22333 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22334
22335 /* AVX */
22336 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22337 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22338 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22339 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22340 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22341 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22342 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22343 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22344 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22345 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22346 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22347 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22348 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22349 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22350 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22351 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22352 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22353 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22354 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22355 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22356 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22357 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22358 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22359 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22360 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22361 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22362
22363 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22364 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22365 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22366 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22367
22368 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22369 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22370 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22371 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22372 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22373 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22374 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22375 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22376 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22377 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22378 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22379 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22380 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22381 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22382 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22383 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22384 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22385 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22386 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22387 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22388 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22389 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22390 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22391 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22392 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22393 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22394 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22395 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22397 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22398 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22400 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22401 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22402
22403 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22404 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22405 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22406
22407 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22408 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22409 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22410 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22411 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22412
22413 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22414
22415 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22416 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22417
22418 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22419 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22420 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22421 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22422
22423 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22424 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22426 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22427 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22428 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22429
22430 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22431 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22432 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22433 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22434 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22435 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22436 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22437 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22438 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22439 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22440 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22441 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22442 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22443 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22444 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22445
22446 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22447 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22448
22449 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22450 };
22451
22452 /* FMA4 and XOP. */
22453 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22454 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22455 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22456 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22457 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22458 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22459 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22460 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22461 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22462 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22463 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22464 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22465 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22466 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22467 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22468 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22469 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22470 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22471 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22472 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22473 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22474 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22475 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22476 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22477 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22478 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22479 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22480 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22481 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22482 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22483 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22484 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22485 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22486 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22487 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22488 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22489 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22490 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22491 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22492 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22493 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22494 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22495 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22496 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22497 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22498 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22499 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22500 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22501 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22502 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22503 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22504 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22505
22506 static const struct builtin_description bdesc_multi_arg[] =
22507 {
22508 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22509 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22510 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22511 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22512 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22513 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22514 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22515 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22516
22517 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22518 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22519 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22520 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22521 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22522 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22523 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22524 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22525
22526 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22527 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22528 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22529 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22530
22531 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22532 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22533 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22534 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22535
22536 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22537 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22538 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22539 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22540
22541 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22542 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22543 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22544 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22545
22546 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22547 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22548 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22549 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22550 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22551 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22552 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22553
22554 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22555 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22556 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22557 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22558 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22559 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22560 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22561
22562 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22563
22564 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22565 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22566 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22567 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22568 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22569 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22570 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22571 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22572 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22573 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22574 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22575 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22576
22577 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22578 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22579 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22580 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22581 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22582 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22583 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22584 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22585 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22586 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22587 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22588 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22589 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22590 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22591 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22592 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22593
22594 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22595 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22596 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22597 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22598 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22599 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22600
22601 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22602 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22603 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22604 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22605 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22606 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22607 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22608 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22609 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22610 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22611 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22612 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22613 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22614 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22615 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22616
22617 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22618 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22619 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22620 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22621 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22622 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22623 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22624
22625 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22626 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22627 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22628 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22629 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22630 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22631 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22632
22633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22634 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22635 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22637 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22638 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22639 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22640
22641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22642 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22643 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22644 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22645 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22646 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22647 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22648
22649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22650 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22651 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22652 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22654 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22655 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22656
22657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22658 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22659 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22660 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22662 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22663 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22664
22665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22666 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22668 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22669 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22670 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22671 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22672
22673 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22674 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22675 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22676 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22677 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22678 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22679 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22680
22681 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22682 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22683 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22684 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22685 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22686 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22687 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22688 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22689
22690 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22691 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22692 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22693 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22694 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22695 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22696 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22697 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22698
22699 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22700 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22701 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22702 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22703
22704 };
22705
22706 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22707 in the current target ISA to allow the user to compile particular modules
22708 with different target specific options that differ from the command line
22709 options. */
22710 static void
22711 ix86_init_mmx_sse_builtins (void)
22712 {
22713 const struct builtin_description * d;
22714 enum ix86_builtin_func_type ftype;
22715 size_t i;
22716
22717 /* Add all special builtins with variable number of operands. */
22718 for (i = 0, d = bdesc_special_args;
22719 i < ARRAY_SIZE (bdesc_special_args);
22720 i++, d++)
22721 {
22722 if (d->name == 0)
22723 continue;
22724
22725 ftype = (enum ix86_builtin_func_type) d->flag;
22726 def_builtin (d->mask, d->name, ftype, d->code);
22727 }
22728
22729 /* Add all builtins with variable number of operands. */
22730 for (i = 0, d = bdesc_args;
22731 i < ARRAY_SIZE (bdesc_args);
22732 i++, d++)
22733 {
22734 if (d->name == 0)
22735 continue;
22736
22737 ftype = (enum ix86_builtin_func_type) d->flag;
22738 def_builtin_const (d->mask, d->name, ftype, d->code);
22739 }
22740
22741 /* pcmpestr[im] insns. */
22742 for (i = 0, d = bdesc_pcmpestr;
22743 i < ARRAY_SIZE (bdesc_pcmpestr);
22744 i++, d++)
22745 {
22746 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22747 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22748 else
22749 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22750 def_builtin_const (d->mask, d->name, ftype, d->code);
22751 }
22752
22753 /* pcmpistr[im] insns. */
22754 for (i = 0, d = bdesc_pcmpistr;
22755 i < ARRAY_SIZE (bdesc_pcmpistr);
22756 i++, d++)
22757 {
22758 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22759 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22760 else
22761 ftype = INT_FTYPE_V16QI_V16QI_INT;
22762 def_builtin_const (d->mask, d->name, ftype, d->code);
22763 }
22764
22765 /* comi/ucomi insns. */
22766 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22767 {
22768 if (d->mask == OPTION_MASK_ISA_SSE2)
22769 ftype = INT_FTYPE_V2DF_V2DF;
22770 else
22771 ftype = INT_FTYPE_V4SF_V4SF;
22772 def_builtin_const (d->mask, d->name, ftype, d->code);
22773 }
22774
22775 /* SSE */
22776 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22777 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22778 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22779 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22780
22781 /* SSE or 3DNow!A */
22782 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22783 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22784 IX86_BUILTIN_MASKMOVQ);
22785
22786 /* SSE2 */
22787 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22788 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22789
22790 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22791 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22792 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22793 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22794
22795 /* SSE3. */
22796 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22797 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22798 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22799 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22800
22801 /* AES */
22802 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22803 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22804 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22805 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22806 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22807 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22808 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22809 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22810 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22811 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22812 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22813 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22814
22815 /* PCLMUL */
22816 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22817 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22818
22819 /* MMX access to the vec_init patterns. */
22820 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22821 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22822
22823 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22824 V4HI_FTYPE_HI_HI_HI_HI,
22825 IX86_BUILTIN_VEC_INIT_V4HI);
22826
22827 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22828 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22829 IX86_BUILTIN_VEC_INIT_V8QI);
22830
22831 /* Access to the vec_extract patterns. */
22832 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22833 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22834 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22835 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22836 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22837 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22838 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22839 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22840 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22841 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22842
22843 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22844 "__builtin_ia32_vec_ext_v4hi",
22845 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22846
22847 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22848 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22849
22850 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22851 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22852
22853 /* Access to the vec_set patterns. */
22854 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22855 "__builtin_ia32_vec_set_v2di",
22856 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22857
22858 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22859 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22860
22861 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22862 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22863
22864 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22865 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22866
22867 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22868 "__builtin_ia32_vec_set_v4hi",
22869 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22870
22871 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22872 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22873
22874 /* Add FMA4 multi-arg argument instructions */
22875 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22876 {
22877 if (d->name == 0)
22878 continue;
22879
22880 ftype = (enum ix86_builtin_func_type) d->flag;
22881 def_builtin_const (d->mask, d->name, ftype, d->code);
22882 }
22883 }
22884
22885 /* Internal method for ix86_init_builtins. */
22886
22887 static void
22888 ix86_init_builtins_va_builtins_abi (void)
22889 {
22890 tree ms_va_ref, sysv_va_ref;
22891 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22892 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22893 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22894 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22895
22896 if (!TARGET_64BIT)
22897 return;
22898 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22899 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22900 ms_va_ref = build_reference_type (ms_va_list_type_node);
22901 sysv_va_ref =
22902 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22903
22904 fnvoid_va_end_ms =
22905 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22906 fnvoid_va_start_ms =
22907 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22908 fnvoid_va_end_sysv =
22909 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22910 fnvoid_va_start_sysv =
22911 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22912 NULL_TREE);
22913 fnvoid_va_copy_ms =
22914 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22915 NULL_TREE);
22916 fnvoid_va_copy_sysv =
22917 build_function_type_list (void_type_node, sysv_va_ref,
22918 sysv_va_ref, NULL_TREE);
22919
22920 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22921 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22922 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22923 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22924 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22925 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22926 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22927 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22928 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22929 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22930 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22931 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22932 }
22933
22934 static void
22935 ix86_init_builtin_types (void)
22936 {
22937 tree float128_type_node, float80_type_node;
22938
22939 /* The __float80 type. */
22940 float80_type_node = long_double_type_node;
22941 if (TYPE_MODE (float80_type_node) != XFmode)
22942 {
22943 /* The __float80 type. */
22944 float80_type_node = make_node (REAL_TYPE);
22945
22946 TYPE_PRECISION (float80_type_node) = 80;
22947 layout_type (float80_type_node);
22948 }
22949 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22950
22951 /* The __float128 type. */
22952 float128_type_node = make_node (REAL_TYPE);
22953 TYPE_PRECISION (float128_type_node) = 128;
22954 layout_type (float128_type_node);
22955 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22956
22957 /* This macro is built by i386-builtin-types.awk. */
22958 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22959 }
22960
22961 static void
22962 ix86_init_builtins (void)
22963 {
22964 tree t;
22965
22966 ix86_init_builtin_types ();
22967
22968 /* TFmode support builtins. */
22969 def_builtin_const (0, "__builtin_infq",
22970 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22971 def_builtin_const (0, "__builtin_huge_valq",
22972 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22973
22974 /* We will expand them to normal call if SSE2 isn't available since
22975 they are used by libgcc. */
22976 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22977 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22978 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22979 TREE_READONLY (t) = 1;
22980 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22981
22982 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22983 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22984 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22985 TREE_READONLY (t) = 1;
22986 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22987
22988 ix86_init_mmx_sse_builtins ();
22989
22990 if (TARGET_64BIT)
22991 ix86_init_builtins_va_builtins_abi ();
22992 }
22993
22994 /* Return the ix86 builtin for CODE. */
22995
22996 static tree
22997 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22998 {
22999 if (code >= IX86_BUILTIN_MAX)
23000 return error_mark_node;
23001
23002 return ix86_builtins[code];
23003 }
23004
23005 /* Errors in the source file can cause expand_expr to return const0_rtx
23006 where we expect a vector. To avoid crashing, use one of the vector
23007 clear instructions. */
23008 static rtx
23009 safe_vector_operand (rtx x, enum machine_mode mode)
23010 {
23011 if (x == const0_rtx)
23012 x = CONST0_RTX (mode);
23013 return x;
23014 }
23015
23016 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
23017
23018 static rtx
23019 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
23020 {
23021 rtx pat;
23022 tree arg0 = CALL_EXPR_ARG (exp, 0);
23023 tree arg1 = CALL_EXPR_ARG (exp, 1);
23024 rtx op0 = expand_normal (arg0);
23025 rtx op1 = expand_normal (arg1);
23026 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23027 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23028 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
23029
23030 if (VECTOR_MODE_P (mode0))
23031 op0 = safe_vector_operand (op0, mode0);
23032 if (VECTOR_MODE_P (mode1))
23033 op1 = safe_vector_operand (op1, mode1);
23034
23035 if (optimize || !target
23036 || GET_MODE (target) != tmode
23037 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23038 target = gen_reg_rtx (tmode);
23039
23040 if (GET_MODE (op1) == SImode && mode1 == TImode)
23041 {
23042 rtx x = gen_reg_rtx (V4SImode);
23043 emit_insn (gen_sse2_loadd (x, op1));
23044 op1 = gen_lowpart (TImode, x);
23045 }
23046
23047 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
23048 op0 = copy_to_mode_reg (mode0, op0);
23049 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
23050 op1 = copy_to_mode_reg (mode1, op1);
23051
23052 pat = GEN_FCN (icode) (target, op0, op1);
23053 if (! pat)
23054 return 0;
23055
23056 emit_insn (pat);
23057
23058 return target;
23059 }
23060
23061 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
23062
23063 static rtx
23064 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
23065 enum ix86_builtin_func_type m_type,
23066 enum rtx_code sub_code)
23067 {
23068 rtx pat;
23069 int i;
23070 int nargs;
23071 bool comparison_p = false;
23072 bool tf_p = false;
23073 bool last_arg_constant = false;
23074 int num_memory = 0;
23075 struct {
23076 rtx op;
23077 enum machine_mode mode;
23078 } args[4];
23079
23080 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23081
23082 switch (m_type)
23083 {
23084 case MULTI_ARG_4_DF2_DI_I:
23085 case MULTI_ARG_4_DF2_DI_I1:
23086 case MULTI_ARG_4_SF2_SI_I:
23087 case MULTI_ARG_4_SF2_SI_I1:
23088 nargs = 4;
23089 last_arg_constant = true;
23090 break;
23091
23092 case MULTI_ARG_3_SF:
23093 case MULTI_ARG_3_DF:
23094 case MULTI_ARG_3_SF2:
23095 case MULTI_ARG_3_DF2:
23096 case MULTI_ARG_3_DI:
23097 case MULTI_ARG_3_SI:
23098 case MULTI_ARG_3_SI_DI:
23099 case MULTI_ARG_3_HI:
23100 case MULTI_ARG_3_HI_SI:
23101 case MULTI_ARG_3_QI:
23102 case MULTI_ARG_3_DI2:
23103 case MULTI_ARG_3_SI2:
23104 case MULTI_ARG_3_HI2:
23105 case MULTI_ARG_3_QI2:
23106 nargs = 3;
23107 break;
23108
23109 case MULTI_ARG_2_SF:
23110 case MULTI_ARG_2_DF:
23111 case MULTI_ARG_2_DI:
23112 case MULTI_ARG_2_SI:
23113 case MULTI_ARG_2_HI:
23114 case MULTI_ARG_2_QI:
23115 nargs = 2;
23116 break;
23117
23118 case MULTI_ARG_2_DI_IMM:
23119 case MULTI_ARG_2_SI_IMM:
23120 case MULTI_ARG_2_HI_IMM:
23121 case MULTI_ARG_2_QI_IMM:
23122 nargs = 2;
23123 last_arg_constant = true;
23124 break;
23125
23126 case MULTI_ARG_1_SF:
23127 case MULTI_ARG_1_DF:
23128 case MULTI_ARG_1_SF2:
23129 case MULTI_ARG_1_DF2:
23130 case MULTI_ARG_1_DI:
23131 case MULTI_ARG_1_SI:
23132 case MULTI_ARG_1_HI:
23133 case MULTI_ARG_1_QI:
23134 case MULTI_ARG_1_SI_DI:
23135 case MULTI_ARG_1_HI_DI:
23136 case MULTI_ARG_1_HI_SI:
23137 case MULTI_ARG_1_QI_DI:
23138 case MULTI_ARG_1_QI_SI:
23139 case MULTI_ARG_1_QI_HI:
23140 nargs = 1;
23141 break;
23142
23143 case MULTI_ARG_2_DI_CMP:
23144 case MULTI_ARG_2_SI_CMP:
23145 case MULTI_ARG_2_HI_CMP:
23146 case MULTI_ARG_2_QI_CMP:
23147 nargs = 2;
23148 comparison_p = true;
23149 break;
23150
23151 case MULTI_ARG_2_SF_TF:
23152 case MULTI_ARG_2_DF_TF:
23153 case MULTI_ARG_2_DI_TF:
23154 case MULTI_ARG_2_SI_TF:
23155 case MULTI_ARG_2_HI_TF:
23156 case MULTI_ARG_2_QI_TF:
23157 nargs = 2;
23158 tf_p = true;
23159 break;
23160
23161 default:
23162 gcc_unreachable ();
23163 }
23164
23165 if (optimize || !target
23166 || GET_MODE (target) != tmode
23167 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23168 target = gen_reg_rtx (tmode);
23169
23170 gcc_assert (nargs <= 4);
23171
23172 for (i = 0; i < nargs; i++)
23173 {
23174 tree arg = CALL_EXPR_ARG (exp, i);
23175 rtx op = expand_normal (arg);
23176 int adjust = (comparison_p) ? 1 : 0;
23177 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23178
23179 if (last_arg_constant && i == nargs-1)
23180 {
23181 if (!CONST_INT_P (op))
23182 {
23183 error ("last argument must be an immediate");
23184 return gen_reg_rtx (tmode);
23185 }
23186 }
23187 else
23188 {
23189 if (VECTOR_MODE_P (mode))
23190 op = safe_vector_operand (op, mode);
23191
23192 /* If we aren't optimizing, only allow one memory operand to be
23193 generated. */
23194 if (memory_operand (op, mode))
23195 num_memory++;
23196
23197 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23198
23199 if (optimize
23200 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23201 || num_memory > 1)
23202 op = force_reg (mode, op);
23203 }
23204
23205 args[i].op = op;
23206 args[i].mode = mode;
23207 }
23208
23209 switch (nargs)
23210 {
23211 case 1:
23212 pat = GEN_FCN (icode) (target, args[0].op);
23213 break;
23214
23215 case 2:
23216 if (tf_p)
23217 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23218 GEN_INT ((int)sub_code));
23219 else if (! comparison_p)
23220 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23221 else
23222 {
23223 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23224 args[0].op,
23225 args[1].op);
23226
23227 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23228 }
23229 break;
23230
23231 case 3:
23232 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23233 break;
23234
23235 case 4:
23236 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23237 break;
23238
23239 default:
23240 gcc_unreachable ();
23241 }
23242
23243 if (! pat)
23244 return 0;
23245
23246 emit_insn (pat);
23247 return target;
23248 }
23249
23250 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23251 insns with vec_merge. */
23252
23253 static rtx
23254 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23255 rtx target)
23256 {
23257 rtx pat;
23258 tree arg0 = CALL_EXPR_ARG (exp, 0);
23259 rtx op1, op0 = expand_normal (arg0);
23260 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23261 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23262
23263 if (optimize || !target
23264 || GET_MODE (target) != tmode
23265 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23266 target = gen_reg_rtx (tmode);
23267
23268 if (VECTOR_MODE_P (mode0))
23269 op0 = safe_vector_operand (op0, mode0);
23270
23271 if ((optimize && !register_operand (op0, mode0))
23272 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23273 op0 = copy_to_mode_reg (mode0, op0);
23274
23275 op1 = op0;
23276 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23277 op1 = copy_to_mode_reg (mode0, op1);
23278
23279 pat = GEN_FCN (icode) (target, op0, op1);
23280 if (! pat)
23281 return 0;
23282 emit_insn (pat);
23283 return target;
23284 }
23285
23286 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23287
23288 static rtx
23289 ix86_expand_sse_compare (const struct builtin_description *d,
23290 tree exp, rtx target, bool swap)
23291 {
23292 rtx pat;
23293 tree arg0 = CALL_EXPR_ARG (exp, 0);
23294 tree arg1 = CALL_EXPR_ARG (exp, 1);
23295 rtx op0 = expand_normal (arg0);
23296 rtx op1 = expand_normal (arg1);
23297 rtx op2;
23298 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23299 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23300 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23301 enum rtx_code comparison = d->comparison;
23302
23303 if (VECTOR_MODE_P (mode0))
23304 op0 = safe_vector_operand (op0, mode0);
23305 if (VECTOR_MODE_P (mode1))
23306 op1 = safe_vector_operand (op1, mode1);
23307
23308 /* Swap operands if we have a comparison that isn't available in
23309 hardware. */
23310 if (swap)
23311 {
23312 rtx tmp = gen_reg_rtx (mode1);
23313 emit_move_insn (tmp, op1);
23314 op1 = op0;
23315 op0 = tmp;
23316 }
23317
23318 if (optimize || !target
23319 || GET_MODE (target) != tmode
23320 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23321 target = gen_reg_rtx (tmode);
23322
23323 if ((optimize && !register_operand (op0, mode0))
23324 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23325 op0 = copy_to_mode_reg (mode0, op0);
23326 if ((optimize && !register_operand (op1, mode1))
23327 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23328 op1 = copy_to_mode_reg (mode1, op1);
23329
23330 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23331 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23332 if (! pat)
23333 return 0;
23334 emit_insn (pat);
23335 return target;
23336 }
23337
23338 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23339
23340 static rtx
23341 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23342 rtx target)
23343 {
23344 rtx pat;
23345 tree arg0 = CALL_EXPR_ARG (exp, 0);
23346 tree arg1 = CALL_EXPR_ARG (exp, 1);
23347 rtx op0 = expand_normal (arg0);
23348 rtx op1 = expand_normal (arg1);
23349 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23350 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23351 enum rtx_code comparison = d->comparison;
23352
23353 if (VECTOR_MODE_P (mode0))
23354 op0 = safe_vector_operand (op0, mode0);
23355 if (VECTOR_MODE_P (mode1))
23356 op1 = safe_vector_operand (op1, mode1);
23357
23358 /* Swap operands if we have a comparison that isn't available in
23359 hardware. */
23360 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23361 {
23362 rtx tmp = op1;
23363 op1 = op0;
23364 op0 = tmp;
23365 }
23366
23367 target = gen_reg_rtx (SImode);
23368 emit_move_insn (target, const0_rtx);
23369 target = gen_rtx_SUBREG (QImode, target, 0);
23370
23371 if ((optimize && !register_operand (op0, mode0))
23372 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23373 op0 = copy_to_mode_reg (mode0, op0);
23374 if ((optimize && !register_operand (op1, mode1))
23375 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23376 op1 = copy_to_mode_reg (mode1, op1);
23377
23378 pat = GEN_FCN (d->icode) (op0, op1);
23379 if (! pat)
23380 return 0;
23381 emit_insn (pat);
23382 emit_insn (gen_rtx_SET (VOIDmode,
23383 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23384 gen_rtx_fmt_ee (comparison, QImode,
23385 SET_DEST (pat),
23386 const0_rtx)));
23387
23388 return SUBREG_REG (target);
23389 }
23390
23391 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23392
23393 static rtx
23394 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23395 rtx target)
23396 {
23397 rtx pat;
23398 tree arg0 = CALL_EXPR_ARG (exp, 0);
23399 tree arg1 = CALL_EXPR_ARG (exp, 1);
23400 rtx op0 = expand_normal (arg0);
23401 rtx op1 = expand_normal (arg1);
23402 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23403 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23404 enum rtx_code comparison = d->comparison;
23405
23406 if (VECTOR_MODE_P (mode0))
23407 op0 = safe_vector_operand (op0, mode0);
23408 if (VECTOR_MODE_P (mode1))
23409 op1 = safe_vector_operand (op1, mode1);
23410
23411 target = gen_reg_rtx (SImode);
23412 emit_move_insn (target, const0_rtx);
23413 target = gen_rtx_SUBREG (QImode, target, 0);
23414
23415 if ((optimize && !register_operand (op0, mode0))
23416 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23417 op0 = copy_to_mode_reg (mode0, op0);
23418 if ((optimize && !register_operand (op1, mode1))
23419 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23420 op1 = copy_to_mode_reg (mode1, op1);
23421
23422 pat = GEN_FCN (d->icode) (op0, op1);
23423 if (! pat)
23424 return 0;
23425 emit_insn (pat);
23426 emit_insn (gen_rtx_SET (VOIDmode,
23427 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23428 gen_rtx_fmt_ee (comparison, QImode,
23429 SET_DEST (pat),
23430 const0_rtx)));
23431
23432 return SUBREG_REG (target);
23433 }
23434
23435 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23436
23437 static rtx
23438 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23439 tree exp, rtx target)
23440 {
23441 rtx pat;
23442 tree arg0 = CALL_EXPR_ARG (exp, 0);
23443 tree arg1 = CALL_EXPR_ARG (exp, 1);
23444 tree arg2 = CALL_EXPR_ARG (exp, 2);
23445 tree arg3 = CALL_EXPR_ARG (exp, 3);
23446 tree arg4 = CALL_EXPR_ARG (exp, 4);
23447 rtx scratch0, scratch1;
23448 rtx op0 = expand_normal (arg0);
23449 rtx op1 = expand_normal (arg1);
23450 rtx op2 = expand_normal (arg2);
23451 rtx op3 = expand_normal (arg3);
23452 rtx op4 = expand_normal (arg4);
23453 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23454
23455 tmode0 = insn_data[d->icode].operand[0].mode;
23456 tmode1 = insn_data[d->icode].operand[1].mode;
23457 modev2 = insn_data[d->icode].operand[2].mode;
23458 modei3 = insn_data[d->icode].operand[3].mode;
23459 modev4 = insn_data[d->icode].operand[4].mode;
23460 modei5 = insn_data[d->icode].operand[5].mode;
23461 modeimm = insn_data[d->icode].operand[6].mode;
23462
23463 if (VECTOR_MODE_P (modev2))
23464 op0 = safe_vector_operand (op0, modev2);
23465 if (VECTOR_MODE_P (modev4))
23466 op2 = safe_vector_operand (op2, modev4);
23467
23468 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23469 op0 = copy_to_mode_reg (modev2, op0);
23470 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23471 op1 = copy_to_mode_reg (modei3, op1);
23472 if ((optimize && !register_operand (op2, modev4))
23473 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23474 op2 = copy_to_mode_reg (modev4, op2);
23475 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23476 op3 = copy_to_mode_reg (modei5, op3);
23477
23478 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23479 {
23480 error ("the fifth argument must be a 8-bit immediate");
23481 return const0_rtx;
23482 }
23483
23484 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23485 {
23486 if (optimize || !target
23487 || GET_MODE (target) != tmode0
23488 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23489 target = gen_reg_rtx (tmode0);
23490
23491 scratch1 = gen_reg_rtx (tmode1);
23492
23493 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23494 }
23495 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23496 {
23497 if (optimize || !target
23498 || GET_MODE (target) != tmode1
23499 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23500 target = gen_reg_rtx (tmode1);
23501
23502 scratch0 = gen_reg_rtx (tmode0);
23503
23504 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23505 }
23506 else
23507 {
23508 gcc_assert (d->flag);
23509
23510 scratch0 = gen_reg_rtx (tmode0);
23511 scratch1 = gen_reg_rtx (tmode1);
23512
23513 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23514 }
23515
23516 if (! pat)
23517 return 0;
23518
23519 emit_insn (pat);
23520
23521 if (d->flag)
23522 {
23523 target = gen_reg_rtx (SImode);
23524 emit_move_insn (target, const0_rtx);
23525 target = gen_rtx_SUBREG (QImode, target, 0);
23526
23527 emit_insn
23528 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23529 gen_rtx_fmt_ee (EQ, QImode,
23530 gen_rtx_REG ((enum machine_mode) d->flag,
23531 FLAGS_REG),
23532 const0_rtx)));
23533 return SUBREG_REG (target);
23534 }
23535 else
23536 return target;
23537 }
23538
23539
23540 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23541
23542 static rtx
23543 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23544 tree exp, rtx target)
23545 {
23546 rtx pat;
23547 tree arg0 = CALL_EXPR_ARG (exp, 0);
23548 tree arg1 = CALL_EXPR_ARG (exp, 1);
23549 tree arg2 = CALL_EXPR_ARG (exp, 2);
23550 rtx scratch0, scratch1;
23551 rtx op0 = expand_normal (arg0);
23552 rtx op1 = expand_normal (arg1);
23553 rtx op2 = expand_normal (arg2);
23554 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23555
23556 tmode0 = insn_data[d->icode].operand[0].mode;
23557 tmode1 = insn_data[d->icode].operand[1].mode;
23558 modev2 = insn_data[d->icode].operand[2].mode;
23559 modev3 = insn_data[d->icode].operand[3].mode;
23560 modeimm = insn_data[d->icode].operand[4].mode;
23561
23562 if (VECTOR_MODE_P (modev2))
23563 op0 = safe_vector_operand (op0, modev2);
23564 if (VECTOR_MODE_P (modev3))
23565 op1 = safe_vector_operand (op1, modev3);
23566
23567 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23568 op0 = copy_to_mode_reg (modev2, op0);
23569 if ((optimize && !register_operand (op1, modev3))
23570 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23571 op1 = copy_to_mode_reg (modev3, op1);
23572
23573 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23574 {
23575 error ("the third argument must be a 8-bit immediate");
23576 return const0_rtx;
23577 }
23578
23579 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23580 {
23581 if (optimize || !target
23582 || GET_MODE (target) != tmode0
23583 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23584 target = gen_reg_rtx (tmode0);
23585
23586 scratch1 = gen_reg_rtx (tmode1);
23587
23588 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23589 }
23590 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23591 {
23592 if (optimize || !target
23593 || GET_MODE (target) != tmode1
23594 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23595 target = gen_reg_rtx (tmode1);
23596
23597 scratch0 = gen_reg_rtx (tmode0);
23598
23599 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23600 }
23601 else
23602 {
23603 gcc_assert (d->flag);
23604
23605 scratch0 = gen_reg_rtx (tmode0);
23606 scratch1 = gen_reg_rtx (tmode1);
23607
23608 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23609 }
23610
23611 if (! pat)
23612 return 0;
23613
23614 emit_insn (pat);
23615
23616 if (d->flag)
23617 {
23618 target = gen_reg_rtx (SImode);
23619 emit_move_insn (target, const0_rtx);
23620 target = gen_rtx_SUBREG (QImode, target, 0);
23621
23622 emit_insn
23623 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23624 gen_rtx_fmt_ee (EQ, QImode,
23625 gen_rtx_REG ((enum machine_mode) d->flag,
23626 FLAGS_REG),
23627 const0_rtx)));
23628 return SUBREG_REG (target);
23629 }
23630 else
23631 return target;
23632 }
23633
23634 /* Subroutine of ix86_expand_builtin to take care of insns with
23635 variable number of operands. */
23636
23637 static rtx
23638 ix86_expand_args_builtin (const struct builtin_description *d,
23639 tree exp, rtx target)
23640 {
23641 rtx pat, real_target;
23642 unsigned int i, nargs;
23643 unsigned int nargs_constant = 0;
23644 int num_memory = 0;
23645 struct
23646 {
23647 rtx op;
23648 enum machine_mode mode;
23649 } args[4];
23650 bool last_arg_count = false;
23651 enum insn_code icode = d->icode;
23652 const struct insn_data *insn_p = &insn_data[icode];
23653 enum machine_mode tmode = insn_p->operand[0].mode;
23654 enum machine_mode rmode = VOIDmode;
23655 bool swap = false;
23656 enum rtx_code comparison = d->comparison;
23657
23658 switch ((enum ix86_builtin_func_type) d->flag)
23659 {
23660 case INT_FTYPE_V8SF_V8SF_PTEST:
23661 case INT_FTYPE_V4DI_V4DI_PTEST:
23662 case INT_FTYPE_V4DF_V4DF_PTEST:
23663 case INT_FTYPE_V4SF_V4SF_PTEST:
23664 case INT_FTYPE_V2DI_V2DI_PTEST:
23665 case INT_FTYPE_V2DF_V2DF_PTEST:
23666 return ix86_expand_sse_ptest (d, exp, target);
23667 case FLOAT128_FTYPE_FLOAT128:
23668 case FLOAT_FTYPE_FLOAT:
23669 case INT_FTYPE_INT:
23670 case UINT64_FTYPE_INT:
23671 case UINT16_FTYPE_UINT16:
23672 case INT64_FTYPE_INT64:
23673 case INT64_FTYPE_V4SF:
23674 case INT64_FTYPE_V2DF:
23675 case INT_FTYPE_V16QI:
23676 case INT_FTYPE_V8QI:
23677 case INT_FTYPE_V8SF:
23678 case INT_FTYPE_V4DF:
23679 case INT_FTYPE_V4SF:
23680 case INT_FTYPE_V2DF:
23681 case V16QI_FTYPE_V16QI:
23682 case V8SI_FTYPE_V8SF:
23683 case V8SI_FTYPE_V4SI:
23684 case V8HI_FTYPE_V8HI:
23685 case V8HI_FTYPE_V16QI:
23686 case V8QI_FTYPE_V8QI:
23687 case V8SF_FTYPE_V8SF:
23688 case V8SF_FTYPE_V8SI:
23689 case V8SF_FTYPE_V4SF:
23690 case V4SI_FTYPE_V4SI:
23691 case V4SI_FTYPE_V16QI:
23692 case V4SI_FTYPE_V4SF:
23693 case V4SI_FTYPE_V8SI:
23694 case V4SI_FTYPE_V8HI:
23695 case V4SI_FTYPE_V4DF:
23696 case V4SI_FTYPE_V2DF:
23697 case V4HI_FTYPE_V4HI:
23698 case V4DF_FTYPE_V4DF:
23699 case V4DF_FTYPE_V4SI:
23700 case V4DF_FTYPE_V4SF:
23701 case V4DF_FTYPE_V2DF:
23702 case V4SF_FTYPE_V4SF:
23703 case V4SF_FTYPE_V4SI:
23704 case V4SF_FTYPE_V8SF:
23705 case V4SF_FTYPE_V4DF:
23706 case V4SF_FTYPE_V2DF:
23707 case V2DI_FTYPE_V2DI:
23708 case V2DI_FTYPE_V16QI:
23709 case V2DI_FTYPE_V8HI:
23710 case V2DI_FTYPE_V4SI:
23711 case V2DF_FTYPE_V2DF:
23712 case V2DF_FTYPE_V4SI:
23713 case V2DF_FTYPE_V4DF:
23714 case V2DF_FTYPE_V4SF:
23715 case V2DF_FTYPE_V2SI:
23716 case V2SI_FTYPE_V2SI:
23717 case V2SI_FTYPE_V4SF:
23718 case V2SI_FTYPE_V2SF:
23719 case V2SI_FTYPE_V2DF:
23720 case V2SF_FTYPE_V2SF:
23721 case V2SF_FTYPE_V2SI:
23722 nargs = 1;
23723 break;
23724 case V4SF_FTYPE_V4SF_VEC_MERGE:
23725 case V2DF_FTYPE_V2DF_VEC_MERGE:
23726 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23727 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23728 case V16QI_FTYPE_V16QI_V16QI:
23729 case V16QI_FTYPE_V8HI_V8HI:
23730 case V8QI_FTYPE_V8QI_V8QI:
23731 case V8QI_FTYPE_V4HI_V4HI:
23732 case V8HI_FTYPE_V8HI_V8HI:
23733 case V8HI_FTYPE_V16QI_V16QI:
23734 case V8HI_FTYPE_V4SI_V4SI:
23735 case V8SF_FTYPE_V8SF_V8SF:
23736 case V8SF_FTYPE_V8SF_V8SI:
23737 case V4SI_FTYPE_V4SI_V4SI:
23738 case V4SI_FTYPE_V8HI_V8HI:
23739 case V4SI_FTYPE_V4SF_V4SF:
23740 case V4SI_FTYPE_V2DF_V2DF:
23741 case V4HI_FTYPE_V4HI_V4HI:
23742 case V4HI_FTYPE_V8QI_V8QI:
23743 case V4HI_FTYPE_V2SI_V2SI:
23744 case V4DF_FTYPE_V4DF_V4DF:
23745 case V4DF_FTYPE_V4DF_V4DI:
23746 case V4SF_FTYPE_V4SF_V4SF:
23747 case V4SF_FTYPE_V4SF_V4SI:
23748 case V4SF_FTYPE_V4SF_V2SI:
23749 case V4SF_FTYPE_V4SF_V2DF:
23750 case V4SF_FTYPE_V4SF_DI:
23751 case V4SF_FTYPE_V4SF_SI:
23752 case V2DI_FTYPE_V2DI_V2DI:
23753 case V2DI_FTYPE_V16QI_V16QI:
23754 case V2DI_FTYPE_V4SI_V4SI:
23755 case V2DI_FTYPE_V2DI_V16QI:
23756 case V2DI_FTYPE_V2DF_V2DF:
23757 case V2SI_FTYPE_V2SI_V2SI:
23758 case V2SI_FTYPE_V4HI_V4HI:
23759 case V2SI_FTYPE_V2SF_V2SF:
23760 case V2DF_FTYPE_V2DF_V2DF:
23761 case V2DF_FTYPE_V2DF_V4SF:
23762 case V2DF_FTYPE_V2DF_V2DI:
23763 case V2DF_FTYPE_V2DF_DI:
23764 case V2DF_FTYPE_V2DF_SI:
23765 case V2SF_FTYPE_V2SF_V2SF:
23766 case V1DI_FTYPE_V1DI_V1DI:
23767 case V1DI_FTYPE_V8QI_V8QI:
23768 case V1DI_FTYPE_V2SI_V2SI:
23769 if (comparison == UNKNOWN)
23770 return ix86_expand_binop_builtin (icode, exp, target);
23771 nargs = 2;
23772 break;
23773 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23774 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23775 gcc_assert (comparison != UNKNOWN);
23776 nargs = 2;
23777 swap = true;
23778 break;
23779 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23780 case V8HI_FTYPE_V8HI_SI_COUNT:
23781 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23782 case V4SI_FTYPE_V4SI_SI_COUNT:
23783 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23784 case V4HI_FTYPE_V4HI_SI_COUNT:
23785 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23786 case V2DI_FTYPE_V2DI_SI_COUNT:
23787 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23788 case V2SI_FTYPE_V2SI_SI_COUNT:
23789 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23790 case V1DI_FTYPE_V1DI_SI_COUNT:
23791 nargs = 2;
23792 last_arg_count = true;
23793 break;
23794 case UINT64_FTYPE_UINT64_UINT64:
23795 case UINT_FTYPE_UINT_UINT:
23796 case UINT_FTYPE_UINT_USHORT:
23797 case UINT_FTYPE_UINT_UCHAR:
23798 case UINT16_FTYPE_UINT16_INT:
23799 case UINT8_FTYPE_UINT8_INT:
23800 nargs = 2;
23801 break;
23802 case V2DI_FTYPE_V2DI_INT_CONVERT:
23803 nargs = 2;
23804 rmode = V1TImode;
23805 nargs_constant = 1;
23806 break;
23807 case V8HI_FTYPE_V8HI_INT:
23808 case V8SF_FTYPE_V8SF_INT:
23809 case V4SI_FTYPE_V4SI_INT:
23810 case V4SI_FTYPE_V8SI_INT:
23811 case V4HI_FTYPE_V4HI_INT:
23812 case V4DF_FTYPE_V4DF_INT:
23813 case V4SF_FTYPE_V4SF_INT:
23814 case V4SF_FTYPE_V8SF_INT:
23815 case V2DI_FTYPE_V2DI_INT:
23816 case V2DF_FTYPE_V2DF_INT:
23817 case V2DF_FTYPE_V4DF_INT:
23818 nargs = 2;
23819 nargs_constant = 1;
23820 break;
23821 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23822 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23823 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23824 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23825 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23826 nargs = 3;
23827 break;
23828 case V16QI_FTYPE_V16QI_V16QI_INT:
23829 case V8HI_FTYPE_V8HI_V8HI_INT:
23830 case V8SI_FTYPE_V8SI_V8SI_INT:
23831 case V8SI_FTYPE_V8SI_V4SI_INT:
23832 case V8SF_FTYPE_V8SF_V8SF_INT:
23833 case V8SF_FTYPE_V8SF_V4SF_INT:
23834 case V4SI_FTYPE_V4SI_V4SI_INT:
23835 case V4DF_FTYPE_V4DF_V4DF_INT:
23836 case V4DF_FTYPE_V4DF_V2DF_INT:
23837 case V4SF_FTYPE_V4SF_V4SF_INT:
23838 case V2DI_FTYPE_V2DI_V2DI_INT:
23839 case V2DF_FTYPE_V2DF_V2DF_INT:
23840 nargs = 3;
23841 nargs_constant = 1;
23842 break;
23843 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23844 nargs = 3;
23845 rmode = V2DImode;
23846 nargs_constant = 1;
23847 break;
23848 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23849 nargs = 3;
23850 rmode = DImode;
23851 nargs_constant = 1;
23852 break;
23853 case V2DI_FTYPE_V2DI_UINT_UINT:
23854 nargs = 3;
23855 nargs_constant = 2;
23856 break;
23857 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23858 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23859 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23860 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23861 nargs = 4;
23862 nargs_constant = 1;
23863 break;
23864 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23865 nargs = 4;
23866 nargs_constant = 2;
23867 break;
23868 default:
23869 gcc_unreachable ();
23870 }
23871
23872 gcc_assert (nargs <= ARRAY_SIZE (args));
23873
23874 if (comparison != UNKNOWN)
23875 {
23876 gcc_assert (nargs == 2);
23877 return ix86_expand_sse_compare (d, exp, target, swap);
23878 }
23879
23880 if (rmode == VOIDmode || rmode == tmode)
23881 {
23882 if (optimize
23883 || target == 0
23884 || GET_MODE (target) != tmode
23885 || ! (*insn_p->operand[0].predicate) (target, tmode))
23886 target = gen_reg_rtx (tmode);
23887 real_target = target;
23888 }
23889 else
23890 {
23891 target = gen_reg_rtx (rmode);
23892 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23893 }
23894
23895 for (i = 0; i < nargs; i++)
23896 {
23897 tree arg = CALL_EXPR_ARG (exp, i);
23898 rtx op = expand_normal (arg);
23899 enum machine_mode mode = insn_p->operand[i + 1].mode;
23900 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23901
23902 if (last_arg_count && (i + 1) == nargs)
23903 {
23904 /* SIMD shift insns take either an 8-bit immediate or
23905 register as count. But builtin functions take int as
23906 count. If count doesn't match, we put it in register. */
23907 if (!match)
23908 {
23909 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23910 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23911 op = copy_to_reg (op);
23912 }
23913 }
23914 else if ((nargs - i) <= nargs_constant)
23915 {
23916 if (!match)
23917 switch (icode)
23918 {
23919 case CODE_FOR_sse4_1_roundpd:
23920 case CODE_FOR_sse4_1_roundps:
23921 case CODE_FOR_sse4_1_roundsd:
23922 case CODE_FOR_sse4_1_roundss:
23923 case CODE_FOR_sse4_1_blendps:
23924 case CODE_FOR_avx_blendpd256:
23925 case CODE_FOR_avx_vpermilv4df:
23926 case CODE_FOR_avx_roundpd256:
23927 case CODE_FOR_avx_roundps256:
23928 error ("the last argument must be a 4-bit immediate");
23929 return const0_rtx;
23930
23931 case CODE_FOR_sse4_1_blendpd:
23932 case CODE_FOR_avx_vpermilv2df:
23933 case CODE_FOR_xop_vpermil2v2df3:
23934 case CODE_FOR_xop_vpermil2v4sf3:
23935 case CODE_FOR_xop_vpermil2v4df3:
23936 case CODE_FOR_xop_vpermil2v8sf3:
23937 error ("the last argument must be a 2-bit immediate");
23938 return const0_rtx;
23939
23940 case CODE_FOR_avx_vextractf128v4df:
23941 case CODE_FOR_avx_vextractf128v8sf:
23942 case CODE_FOR_avx_vextractf128v8si:
23943 case CODE_FOR_avx_vinsertf128v4df:
23944 case CODE_FOR_avx_vinsertf128v8sf:
23945 case CODE_FOR_avx_vinsertf128v8si:
23946 error ("the last argument must be a 1-bit immediate");
23947 return const0_rtx;
23948
23949 case CODE_FOR_avx_cmpsdv2df3:
23950 case CODE_FOR_avx_cmpssv4sf3:
23951 case CODE_FOR_avx_cmppdv2df3:
23952 case CODE_FOR_avx_cmppsv4sf3:
23953 case CODE_FOR_avx_cmppdv4df3:
23954 case CODE_FOR_avx_cmppsv8sf3:
23955 error ("the last argument must be a 5-bit immediate");
23956 return const0_rtx;
23957
23958 default:
23959 switch (nargs_constant)
23960 {
23961 case 2:
23962 if ((nargs - i) == nargs_constant)
23963 {
23964 error ("the next to last argument must be an 8-bit immediate");
23965 break;
23966 }
23967 case 1:
23968 error ("the last argument must be an 8-bit immediate");
23969 break;
23970 default:
23971 gcc_unreachable ();
23972 }
23973 return const0_rtx;
23974 }
23975 }
23976 else
23977 {
23978 if (VECTOR_MODE_P (mode))
23979 op = safe_vector_operand (op, mode);
23980
23981 /* If we aren't optimizing, only allow one memory operand to
23982 be generated. */
23983 if (memory_operand (op, mode))
23984 num_memory++;
23985
23986 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23987 {
23988 if (optimize || !match || num_memory > 1)
23989 op = copy_to_mode_reg (mode, op);
23990 }
23991 else
23992 {
23993 op = copy_to_reg (op);
23994 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23995 }
23996 }
23997
23998 args[i].op = op;
23999 args[i].mode = mode;
24000 }
24001
24002 switch (nargs)
24003 {
24004 case 1:
24005 pat = GEN_FCN (icode) (real_target, args[0].op);
24006 break;
24007 case 2:
24008 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
24009 break;
24010 case 3:
24011 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24012 args[2].op);
24013 break;
24014 case 4:
24015 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24016 args[2].op, args[3].op);
24017 break;
24018 default:
24019 gcc_unreachable ();
24020 }
24021
24022 if (! pat)
24023 return 0;
24024
24025 emit_insn (pat);
24026 return target;
24027 }
24028
24029 /* Subroutine of ix86_expand_builtin to take care of special insns
24030 with variable number of operands. */
24031
24032 static rtx
24033 ix86_expand_special_args_builtin (const struct builtin_description *d,
24034 tree exp, rtx target)
24035 {
24036 tree arg;
24037 rtx pat, op;
24038 unsigned int i, nargs, arg_adjust, memory;
24039 struct
24040 {
24041 rtx op;
24042 enum machine_mode mode;
24043 } args[3];
24044 enum insn_code icode = d->icode;
24045 bool last_arg_constant = false;
24046 const struct insn_data *insn_p = &insn_data[icode];
24047 enum machine_mode tmode = insn_p->operand[0].mode;
24048 enum { load, store } klass;
24049
24050 switch ((enum ix86_builtin_func_type) d->flag)
24051 {
24052 case VOID_FTYPE_VOID:
24053 emit_insn (GEN_FCN (icode) (target));
24054 return 0;
24055 case UINT64_FTYPE_VOID:
24056 nargs = 0;
24057 klass = load;
24058 memory = 0;
24059 break;
24060 case UINT64_FTYPE_PUNSIGNED:
24061 case V2DI_FTYPE_PV2DI:
24062 case V32QI_FTYPE_PCCHAR:
24063 case V16QI_FTYPE_PCCHAR:
24064 case V8SF_FTYPE_PCV4SF:
24065 case V8SF_FTYPE_PCFLOAT:
24066 case V4SF_FTYPE_PCFLOAT:
24067 case V4DF_FTYPE_PCV2DF:
24068 case V4DF_FTYPE_PCDOUBLE:
24069 case V2DF_FTYPE_PCDOUBLE:
24070 case VOID_FTYPE_PVOID:
24071 nargs = 1;
24072 klass = load;
24073 memory = 0;
24074 break;
24075 case VOID_FTYPE_PV2SF_V4SF:
24076 case VOID_FTYPE_PV4DI_V4DI:
24077 case VOID_FTYPE_PV2DI_V2DI:
24078 case VOID_FTYPE_PCHAR_V32QI:
24079 case VOID_FTYPE_PCHAR_V16QI:
24080 case VOID_FTYPE_PFLOAT_V8SF:
24081 case VOID_FTYPE_PFLOAT_V4SF:
24082 case VOID_FTYPE_PDOUBLE_V4DF:
24083 case VOID_FTYPE_PDOUBLE_V2DF:
24084 case VOID_FTYPE_PULONGLONG_ULONGLONG:
24085 case VOID_FTYPE_PINT_INT:
24086 nargs = 1;
24087 klass = store;
24088 /* Reserve memory operand for target. */
24089 memory = ARRAY_SIZE (args);
24090 break;
24091 case V4SF_FTYPE_V4SF_PCV2SF:
24092 case V2DF_FTYPE_V2DF_PCDOUBLE:
24093 nargs = 2;
24094 klass = load;
24095 memory = 1;
24096 break;
24097 case V8SF_FTYPE_PCV8SF_V8SF:
24098 case V4DF_FTYPE_PCV4DF_V4DF:
24099 case V4SF_FTYPE_PCV4SF_V4SF:
24100 case V2DF_FTYPE_PCV2DF_V2DF:
24101 nargs = 2;
24102 klass = load;
24103 memory = 0;
24104 break;
24105 case VOID_FTYPE_PV8SF_V8SF_V8SF:
24106 case VOID_FTYPE_PV4DF_V4DF_V4DF:
24107 case VOID_FTYPE_PV4SF_V4SF_V4SF:
24108 case VOID_FTYPE_PV2DF_V2DF_V2DF:
24109 nargs = 2;
24110 klass = store;
24111 /* Reserve memory operand for target. */
24112 memory = ARRAY_SIZE (args);
24113 break;
24114 case VOID_FTYPE_UINT_UINT_UINT:
24115 case VOID_FTYPE_UINT64_UINT_UINT:
24116 case UCHAR_FTYPE_UINT_UINT_UINT:
24117 case UCHAR_FTYPE_UINT64_UINT_UINT:
24118 nargs = 3;
24119 klass = load;
24120 memory = ARRAY_SIZE (args);
24121 last_arg_constant = true;
24122 break;
24123 default:
24124 gcc_unreachable ();
24125 }
24126
24127 gcc_assert (nargs <= ARRAY_SIZE (args));
24128
24129 if (klass == store)
24130 {
24131 arg = CALL_EXPR_ARG (exp, 0);
24132 op = expand_normal (arg);
24133 gcc_assert (target == 0);
24134 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
24135 arg_adjust = 1;
24136 }
24137 else
24138 {
24139 arg_adjust = 0;
24140 if (optimize
24141 || target == 0
24142 || GET_MODE (target) != tmode
24143 || ! (*insn_p->operand[0].predicate) (target, tmode))
24144 target = gen_reg_rtx (tmode);
24145 }
24146
24147 for (i = 0; i < nargs; i++)
24148 {
24149 enum machine_mode mode = insn_p->operand[i + 1].mode;
24150 bool match;
24151
24152 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24153 op = expand_normal (arg);
24154 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24155
24156 if (last_arg_constant && (i + 1) == nargs)
24157 {
24158 if (!match)
24159 {
24160 if (icode == CODE_FOR_lwp_lwpvalsi3
24161 || icode == CODE_FOR_lwp_lwpinssi3
24162 || icode == CODE_FOR_lwp_lwpvaldi3
24163 || icode == CODE_FOR_lwp_lwpinsdi3)
24164 error ("the last argument must be a 32-bit immediate");
24165 else
24166 error ("the last argument must be an 8-bit immediate");
24167 return const0_rtx;
24168 }
24169 }
24170 else
24171 {
24172 if (i == memory)
24173 {
24174 /* This must be the memory operand. */
24175 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24176 gcc_assert (GET_MODE (op) == mode
24177 || GET_MODE (op) == VOIDmode);
24178 }
24179 else
24180 {
24181 /* This must be register. */
24182 if (VECTOR_MODE_P (mode))
24183 op = safe_vector_operand (op, mode);
24184
24185 gcc_assert (GET_MODE (op) == mode
24186 || GET_MODE (op) == VOIDmode);
24187 op = copy_to_mode_reg (mode, op);
24188 }
24189 }
24190
24191 args[i].op = op;
24192 args[i].mode = mode;
24193 }
24194
24195 switch (nargs)
24196 {
24197 case 0:
24198 pat = GEN_FCN (icode) (target);
24199 break;
24200 case 1:
24201 pat = GEN_FCN (icode) (target, args[0].op);
24202 break;
24203 case 2:
24204 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24205 break;
24206 case 3:
24207 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24208 break;
24209 default:
24210 gcc_unreachable ();
24211 }
24212
24213 if (! pat)
24214 return 0;
24215 emit_insn (pat);
24216 return klass == store ? 0 : target;
24217 }
24218
24219 /* Return the integer constant in ARG. Constrain it to be in the range
24220 of the subparts of VEC_TYPE; issue an error if not. */
24221
24222 static int
24223 get_element_number (tree vec_type, tree arg)
24224 {
24225 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24226
24227 if (!host_integerp (arg, 1)
24228 || (elt = tree_low_cst (arg, 1), elt > max))
24229 {
24230 error ("selector must be an integer constant in the range 0..%wi", max);
24231 return 0;
24232 }
24233
24234 return elt;
24235 }
24236
24237 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24238 ix86_expand_vector_init. We DO have language-level syntax for this, in
24239 the form of (type){ init-list }. Except that since we can't place emms
24240 instructions from inside the compiler, we can't allow the use of MMX
24241 registers unless the user explicitly asks for it. So we do *not* define
24242 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24243 we have builtins invoked by mmintrin.h that gives us license to emit
24244 these sorts of instructions. */
24245
24246 static rtx
24247 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24248 {
24249 enum machine_mode tmode = TYPE_MODE (type);
24250 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24251 int i, n_elt = GET_MODE_NUNITS (tmode);
24252 rtvec v = rtvec_alloc (n_elt);
24253
24254 gcc_assert (VECTOR_MODE_P (tmode));
24255 gcc_assert (call_expr_nargs (exp) == n_elt);
24256
24257 for (i = 0; i < n_elt; ++i)
24258 {
24259 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24260 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24261 }
24262
24263 if (!target || !register_operand (target, tmode))
24264 target = gen_reg_rtx (tmode);
24265
24266 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24267 return target;
24268 }
24269
24270 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24271 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24272 had a language-level syntax for referencing vector elements. */
24273
24274 static rtx
24275 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24276 {
24277 enum machine_mode tmode, mode0;
24278 tree arg0, arg1;
24279 int elt;
24280 rtx op0;
24281
24282 arg0 = CALL_EXPR_ARG (exp, 0);
24283 arg1 = CALL_EXPR_ARG (exp, 1);
24284
24285 op0 = expand_normal (arg0);
24286 elt = get_element_number (TREE_TYPE (arg0), arg1);
24287
24288 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24289 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24290 gcc_assert (VECTOR_MODE_P (mode0));
24291
24292 op0 = force_reg (mode0, op0);
24293
24294 if (optimize || !target || !register_operand (target, tmode))
24295 target = gen_reg_rtx (tmode);
24296
24297 ix86_expand_vector_extract (true, target, op0, elt);
24298
24299 return target;
24300 }
24301
24302 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24303 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24304 a language-level syntax for referencing vector elements. */
24305
24306 static rtx
24307 ix86_expand_vec_set_builtin (tree exp)
24308 {
24309 enum machine_mode tmode, mode1;
24310 tree arg0, arg1, arg2;
24311 int elt;
24312 rtx op0, op1, target;
24313
24314 arg0 = CALL_EXPR_ARG (exp, 0);
24315 arg1 = CALL_EXPR_ARG (exp, 1);
24316 arg2 = CALL_EXPR_ARG (exp, 2);
24317
24318 tmode = TYPE_MODE (TREE_TYPE (arg0));
24319 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24320 gcc_assert (VECTOR_MODE_P (tmode));
24321
24322 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24323 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24324 elt = get_element_number (TREE_TYPE (arg0), arg2);
24325
24326 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24327 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24328
24329 op0 = force_reg (tmode, op0);
24330 op1 = force_reg (mode1, op1);
24331
24332 /* OP0 is the source of these builtin functions and shouldn't be
24333 modified. Create a copy, use it and return it as target. */
24334 target = gen_reg_rtx (tmode);
24335 emit_move_insn (target, op0);
24336 ix86_expand_vector_set (true, target, op1, elt);
24337
24338 return target;
24339 }
24340
24341 /* Expand an expression EXP that calls a built-in function,
24342 with result going to TARGET if that's convenient
24343 (and in mode MODE if that's convenient).
24344 SUBTARGET may be used as the target for computing one of EXP's operands.
24345 IGNORE is nonzero if the value is to be ignored. */
24346
24347 static rtx
24348 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24349 enum machine_mode mode ATTRIBUTE_UNUSED,
24350 int ignore ATTRIBUTE_UNUSED)
24351 {
24352 const struct builtin_description *d;
24353 size_t i;
24354 enum insn_code icode;
24355 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24356 tree arg0, arg1, arg2;
24357 rtx op0, op1, op2, pat;
24358 enum machine_mode mode0, mode1, mode2;
24359 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24360
24361 /* Determine whether the builtin function is available under the current ISA.
24362 Originally the builtin was not created if it wasn't applicable to the
24363 current ISA based on the command line switches. With function specific
24364 options, we need to check in the context of the function making the call
24365 whether it is supported. */
24366 if (ix86_builtins_isa[fcode].isa
24367 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24368 {
24369 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24370 NULL, NULL, false);
24371
24372 if (!opts)
24373 error ("%qE needs unknown isa option", fndecl);
24374 else
24375 {
24376 gcc_assert (opts != NULL);
24377 error ("%qE needs isa option %s", fndecl, opts);
24378 free (opts);
24379 }
24380 return const0_rtx;
24381 }
24382
24383 switch (fcode)
24384 {
24385 case IX86_BUILTIN_MASKMOVQ:
24386 case IX86_BUILTIN_MASKMOVDQU:
24387 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24388 ? CODE_FOR_mmx_maskmovq
24389 : CODE_FOR_sse2_maskmovdqu);
24390 /* Note the arg order is different from the operand order. */
24391 arg1 = CALL_EXPR_ARG (exp, 0);
24392 arg2 = CALL_EXPR_ARG (exp, 1);
24393 arg0 = CALL_EXPR_ARG (exp, 2);
24394 op0 = expand_normal (arg0);
24395 op1 = expand_normal (arg1);
24396 op2 = expand_normal (arg2);
24397 mode0 = insn_data[icode].operand[0].mode;
24398 mode1 = insn_data[icode].operand[1].mode;
24399 mode2 = insn_data[icode].operand[2].mode;
24400
24401 op0 = force_reg (Pmode, op0);
24402 op0 = gen_rtx_MEM (mode1, op0);
24403
24404 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24405 op0 = copy_to_mode_reg (mode0, op0);
24406 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24407 op1 = copy_to_mode_reg (mode1, op1);
24408 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24409 op2 = copy_to_mode_reg (mode2, op2);
24410 pat = GEN_FCN (icode) (op0, op1, op2);
24411 if (! pat)
24412 return 0;
24413 emit_insn (pat);
24414 return 0;
24415
24416 case IX86_BUILTIN_LDMXCSR:
24417 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24418 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24419 emit_move_insn (target, op0);
24420 emit_insn (gen_sse_ldmxcsr (target));
24421 return 0;
24422
24423 case IX86_BUILTIN_STMXCSR:
24424 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24425 emit_insn (gen_sse_stmxcsr (target));
24426 return copy_to_mode_reg (SImode, target);
24427
24428 case IX86_BUILTIN_CLFLUSH:
24429 arg0 = CALL_EXPR_ARG (exp, 0);
24430 op0 = expand_normal (arg0);
24431 icode = CODE_FOR_sse2_clflush;
24432 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24433 op0 = copy_to_mode_reg (Pmode, op0);
24434
24435 emit_insn (gen_sse2_clflush (op0));
24436 return 0;
24437
24438 case IX86_BUILTIN_MONITOR:
24439 arg0 = CALL_EXPR_ARG (exp, 0);
24440 arg1 = CALL_EXPR_ARG (exp, 1);
24441 arg2 = CALL_EXPR_ARG (exp, 2);
24442 op0 = expand_normal (arg0);
24443 op1 = expand_normal (arg1);
24444 op2 = expand_normal (arg2);
24445 if (!REG_P (op0))
24446 op0 = copy_to_mode_reg (Pmode, op0);
24447 if (!REG_P (op1))
24448 op1 = copy_to_mode_reg (SImode, op1);
24449 if (!REG_P (op2))
24450 op2 = copy_to_mode_reg (SImode, op2);
24451 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24452 return 0;
24453
24454 case IX86_BUILTIN_MWAIT:
24455 arg0 = CALL_EXPR_ARG (exp, 0);
24456 arg1 = CALL_EXPR_ARG (exp, 1);
24457 op0 = expand_normal (arg0);
24458 op1 = expand_normal (arg1);
24459 if (!REG_P (op0))
24460 op0 = copy_to_mode_reg (SImode, op0);
24461 if (!REG_P (op1))
24462 op1 = copy_to_mode_reg (SImode, op1);
24463 emit_insn (gen_sse3_mwait (op0, op1));
24464 return 0;
24465
24466 case IX86_BUILTIN_VEC_INIT_V2SI:
24467 case IX86_BUILTIN_VEC_INIT_V4HI:
24468 case IX86_BUILTIN_VEC_INIT_V8QI:
24469 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24470
24471 case IX86_BUILTIN_VEC_EXT_V2DF:
24472 case IX86_BUILTIN_VEC_EXT_V2DI:
24473 case IX86_BUILTIN_VEC_EXT_V4SF:
24474 case IX86_BUILTIN_VEC_EXT_V4SI:
24475 case IX86_BUILTIN_VEC_EXT_V8HI:
24476 case IX86_BUILTIN_VEC_EXT_V2SI:
24477 case IX86_BUILTIN_VEC_EXT_V4HI:
24478 case IX86_BUILTIN_VEC_EXT_V16QI:
24479 return ix86_expand_vec_ext_builtin (exp, target);
24480
24481 case IX86_BUILTIN_VEC_SET_V2DI:
24482 case IX86_BUILTIN_VEC_SET_V4SF:
24483 case IX86_BUILTIN_VEC_SET_V4SI:
24484 case IX86_BUILTIN_VEC_SET_V8HI:
24485 case IX86_BUILTIN_VEC_SET_V4HI:
24486 case IX86_BUILTIN_VEC_SET_V16QI:
24487 return ix86_expand_vec_set_builtin (exp);
24488
24489 case IX86_BUILTIN_VEC_PERM_V2DF:
24490 case IX86_BUILTIN_VEC_PERM_V4SF:
24491 case IX86_BUILTIN_VEC_PERM_V2DI:
24492 case IX86_BUILTIN_VEC_PERM_V4SI:
24493 case IX86_BUILTIN_VEC_PERM_V8HI:
24494 case IX86_BUILTIN_VEC_PERM_V16QI:
24495 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24496 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24497 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24498 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24499 case IX86_BUILTIN_VEC_PERM_V4DF:
24500 case IX86_BUILTIN_VEC_PERM_V8SF:
24501 return ix86_expand_vec_perm_builtin (exp);
24502
24503 case IX86_BUILTIN_INFQ:
24504 case IX86_BUILTIN_HUGE_VALQ:
24505 {
24506 REAL_VALUE_TYPE inf;
24507 rtx tmp;
24508
24509 real_inf (&inf);
24510 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24511
24512 tmp = validize_mem (force_const_mem (mode, tmp));
24513
24514 if (target == 0)
24515 target = gen_reg_rtx (mode);
24516
24517 emit_move_insn (target, tmp);
24518 return target;
24519 }
24520
24521 case IX86_BUILTIN_LLWPCB:
24522 arg0 = CALL_EXPR_ARG (exp, 0);
24523 op0 = expand_normal (arg0);
24524 icode = CODE_FOR_lwp_llwpcb;
24525 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24526 op0 = copy_to_mode_reg (Pmode, op0);
24527 emit_insn (gen_lwp_llwpcb (op0));
24528 return 0;
24529
24530 case IX86_BUILTIN_SLWPCB:
24531 icode = CODE_FOR_lwp_slwpcb;
24532 if (!target
24533 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24534 target = gen_reg_rtx (Pmode);
24535 emit_insn (gen_lwp_slwpcb (target));
24536 return target;
24537
24538 default:
24539 break;
24540 }
24541
24542 for (i = 0, d = bdesc_special_args;
24543 i < ARRAY_SIZE (bdesc_special_args);
24544 i++, d++)
24545 if (d->code == fcode)
24546 return ix86_expand_special_args_builtin (d, exp, target);
24547
24548 for (i = 0, d = bdesc_args;
24549 i < ARRAY_SIZE (bdesc_args);
24550 i++, d++)
24551 if (d->code == fcode)
24552 switch (fcode)
24553 {
24554 case IX86_BUILTIN_FABSQ:
24555 case IX86_BUILTIN_COPYSIGNQ:
24556 if (!TARGET_SSE2)
24557 /* Emit a normal call if SSE2 isn't available. */
24558 return expand_call (exp, target, ignore);
24559 default:
24560 return ix86_expand_args_builtin (d, exp, target);
24561 }
24562
24563 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24564 if (d->code == fcode)
24565 return ix86_expand_sse_comi (d, exp, target);
24566
24567 for (i = 0, d = bdesc_pcmpestr;
24568 i < ARRAY_SIZE (bdesc_pcmpestr);
24569 i++, d++)
24570 if (d->code == fcode)
24571 return ix86_expand_sse_pcmpestr (d, exp, target);
24572
24573 for (i = 0, d = bdesc_pcmpistr;
24574 i < ARRAY_SIZE (bdesc_pcmpistr);
24575 i++, d++)
24576 if (d->code == fcode)
24577 return ix86_expand_sse_pcmpistr (d, exp, target);
24578
24579 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24580 if (d->code == fcode)
24581 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24582 (enum ix86_builtin_func_type)
24583 d->flag, d->comparison);
24584
24585 gcc_unreachable ();
24586 }
24587
24588 /* Returns a function decl for a vectorized version of the builtin function
24589 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24590 if it is not available. */
24591
24592 static tree
24593 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24594 tree type_in)
24595 {
24596 enum machine_mode in_mode, out_mode;
24597 int in_n, out_n;
24598 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24599
24600 if (TREE_CODE (type_out) != VECTOR_TYPE
24601 || TREE_CODE (type_in) != VECTOR_TYPE
24602 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24603 return NULL_TREE;
24604
24605 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24606 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24607 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24608 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24609
24610 switch (fn)
24611 {
24612 case BUILT_IN_SQRT:
24613 if (out_mode == DFmode && out_n == 2
24614 && in_mode == DFmode && in_n == 2)
24615 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24616 break;
24617
24618 case BUILT_IN_SQRTF:
24619 if (out_mode == SFmode && out_n == 4
24620 && in_mode == SFmode && in_n == 4)
24621 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24622 break;
24623
24624 case BUILT_IN_LRINT:
24625 if (out_mode == SImode && out_n == 4
24626 && in_mode == DFmode && in_n == 2)
24627 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24628 break;
24629
24630 case BUILT_IN_LRINTF:
24631 if (out_mode == SImode && out_n == 4
24632 && in_mode == SFmode && in_n == 4)
24633 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24634 break;
24635
24636 case BUILT_IN_COPYSIGN:
24637 if (out_mode == DFmode && out_n == 2
24638 && in_mode == DFmode && in_n == 2)
24639 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24640 break;
24641
24642 case BUILT_IN_COPYSIGNF:
24643 if (out_mode == SFmode && out_n == 4
24644 && in_mode == SFmode && in_n == 4)
24645 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24646 break;
24647
24648 default:
24649 ;
24650 }
24651
24652 /* Dispatch to a handler for a vectorization library. */
24653 if (ix86_veclib_handler)
24654 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24655 type_in);
24656
24657 return NULL_TREE;
24658 }
24659
24660 /* Handler for an SVML-style interface to
24661 a library with vectorized intrinsics. */
24662
24663 static tree
24664 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24665 {
24666 char name[20];
24667 tree fntype, new_fndecl, args;
24668 unsigned arity;
24669 const char *bname;
24670 enum machine_mode el_mode, in_mode;
24671 int n, in_n;
24672
24673 /* The SVML is suitable for unsafe math only. */
24674 if (!flag_unsafe_math_optimizations)
24675 return NULL_TREE;
24676
24677 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24678 n = TYPE_VECTOR_SUBPARTS (type_out);
24679 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24680 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24681 if (el_mode != in_mode
24682 || n != in_n)
24683 return NULL_TREE;
24684
24685 switch (fn)
24686 {
24687 case BUILT_IN_EXP:
24688 case BUILT_IN_LOG:
24689 case BUILT_IN_LOG10:
24690 case BUILT_IN_POW:
24691 case BUILT_IN_TANH:
24692 case BUILT_IN_TAN:
24693 case BUILT_IN_ATAN:
24694 case BUILT_IN_ATAN2:
24695 case BUILT_IN_ATANH:
24696 case BUILT_IN_CBRT:
24697 case BUILT_IN_SINH:
24698 case BUILT_IN_SIN:
24699 case BUILT_IN_ASINH:
24700 case BUILT_IN_ASIN:
24701 case BUILT_IN_COSH:
24702 case BUILT_IN_COS:
24703 case BUILT_IN_ACOSH:
24704 case BUILT_IN_ACOS:
24705 if (el_mode != DFmode || n != 2)
24706 return NULL_TREE;
24707 break;
24708
24709 case BUILT_IN_EXPF:
24710 case BUILT_IN_LOGF:
24711 case BUILT_IN_LOG10F:
24712 case BUILT_IN_POWF:
24713 case BUILT_IN_TANHF:
24714 case BUILT_IN_TANF:
24715 case BUILT_IN_ATANF:
24716 case BUILT_IN_ATAN2F:
24717 case BUILT_IN_ATANHF:
24718 case BUILT_IN_CBRTF:
24719 case BUILT_IN_SINHF:
24720 case BUILT_IN_SINF:
24721 case BUILT_IN_ASINHF:
24722 case BUILT_IN_ASINF:
24723 case BUILT_IN_COSHF:
24724 case BUILT_IN_COSF:
24725 case BUILT_IN_ACOSHF:
24726 case BUILT_IN_ACOSF:
24727 if (el_mode != SFmode || n != 4)
24728 return NULL_TREE;
24729 break;
24730
24731 default:
24732 return NULL_TREE;
24733 }
24734
24735 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24736
24737 if (fn == BUILT_IN_LOGF)
24738 strcpy (name, "vmlsLn4");
24739 else if (fn == BUILT_IN_LOG)
24740 strcpy (name, "vmldLn2");
24741 else if (n == 4)
24742 {
24743 sprintf (name, "vmls%s", bname+10);
24744 name[strlen (name)-1] = '4';
24745 }
24746 else
24747 sprintf (name, "vmld%s2", bname+10);
24748
24749 /* Convert to uppercase. */
24750 name[4] &= ~0x20;
24751
24752 arity = 0;
24753 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24754 args = TREE_CHAIN (args))
24755 arity++;
24756
24757 if (arity == 1)
24758 fntype = build_function_type_list (type_out, type_in, NULL);
24759 else
24760 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24761
24762 /* Build a function declaration for the vectorized function. */
24763 new_fndecl = build_decl (BUILTINS_LOCATION,
24764 FUNCTION_DECL, get_identifier (name), fntype);
24765 TREE_PUBLIC (new_fndecl) = 1;
24766 DECL_EXTERNAL (new_fndecl) = 1;
24767 DECL_IS_NOVOPS (new_fndecl) = 1;
24768 TREE_READONLY (new_fndecl) = 1;
24769
24770 return new_fndecl;
24771 }
24772
24773 /* Handler for an ACML-style interface to
24774 a library with vectorized intrinsics. */
24775
24776 static tree
24777 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24778 {
24779 char name[20] = "__vr.._";
24780 tree fntype, new_fndecl, args;
24781 unsigned arity;
24782 const char *bname;
24783 enum machine_mode el_mode, in_mode;
24784 int n, in_n;
24785
24786 /* The ACML is 64bits only and suitable for unsafe math only as
24787 it does not correctly support parts of IEEE with the required
24788 precision such as denormals. */
24789 if (!TARGET_64BIT
24790 || !flag_unsafe_math_optimizations)
24791 return NULL_TREE;
24792
24793 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24794 n = TYPE_VECTOR_SUBPARTS (type_out);
24795 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24796 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24797 if (el_mode != in_mode
24798 || n != in_n)
24799 return NULL_TREE;
24800
24801 switch (fn)
24802 {
24803 case BUILT_IN_SIN:
24804 case BUILT_IN_COS:
24805 case BUILT_IN_EXP:
24806 case BUILT_IN_LOG:
24807 case BUILT_IN_LOG2:
24808 case BUILT_IN_LOG10:
24809 name[4] = 'd';
24810 name[5] = '2';
24811 if (el_mode != DFmode
24812 || n != 2)
24813 return NULL_TREE;
24814 break;
24815
24816 case BUILT_IN_SINF:
24817 case BUILT_IN_COSF:
24818 case BUILT_IN_EXPF:
24819 case BUILT_IN_POWF:
24820 case BUILT_IN_LOGF:
24821 case BUILT_IN_LOG2F:
24822 case BUILT_IN_LOG10F:
24823 name[4] = 's';
24824 name[5] = '4';
24825 if (el_mode != SFmode
24826 || n != 4)
24827 return NULL_TREE;
24828 break;
24829
24830 default:
24831 return NULL_TREE;
24832 }
24833
24834 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24835 sprintf (name + 7, "%s", bname+10);
24836
24837 arity = 0;
24838 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24839 args = TREE_CHAIN (args))
24840 arity++;
24841
24842 if (arity == 1)
24843 fntype = build_function_type_list (type_out, type_in, NULL);
24844 else
24845 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24846
24847 /* Build a function declaration for the vectorized function. */
24848 new_fndecl = build_decl (BUILTINS_LOCATION,
24849 FUNCTION_DECL, get_identifier (name), fntype);
24850 TREE_PUBLIC (new_fndecl) = 1;
24851 DECL_EXTERNAL (new_fndecl) = 1;
24852 DECL_IS_NOVOPS (new_fndecl) = 1;
24853 TREE_READONLY (new_fndecl) = 1;
24854
24855 return new_fndecl;
24856 }
24857
24858
24859 /* Returns a decl of a function that implements conversion of an integer vector
24860 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24861 are the types involved when converting according to CODE.
24862 Return NULL_TREE if it is not available. */
24863
24864 static tree
24865 ix86_vectorize_builtin_conversion (unsigned int code,
24866 tree dest_type, tree src_type)
24867 {
24868 if (! TARGET_SSE2)
24869 return NULL_TREE;
24870
24871 switch (code)
24872 {
24873 case FLOAT_EXPR:
24874 switch (TYPE_MODE (src_type))
24875 {
24876 case V4SImode:
24877 switch (TYPE_MODE (dest_type))
24878 {
24879 case V4SFmode:
24880 return (TYPE_UNSIGNED (src_type)
24881 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24882 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24883 case V4DFmode:
24884 return (TYPE_UNSIGNED (src_type)
24885 ? NULL_TREE
24886 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24887 default:
24888 return NULL_TREE;
24889 }
24890 break;
24891 case V8SImode:
24892 switch (TYPE_MODE (dest_type))
24893 {
24894 case V8SFmode:
24895 return (TYPE_UNSIGNED (src_type)
24896 ? NULL_TREE
24897 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24898 default:
24899 return NULL_TREE;
24900 }
24901 break;
24902 default:
24903 return NULL_TREE;
24904 }
24905
24906 case FIX_TRUNC_EXPR:
24907 switch (TYPE_MODE (dest_type))
24908 {
24909 case V4SImode:
24910 switch (TYPE_MODE (src_type))
24911 {
24912 case V4SFmode:
24913 return (TYPE_UNSIGNED (dest_type)
24914 ? NULL_TREE
24915 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24916 case V4DFmode:
24917 return (TYPE_UNSIGNED (dest_type)
24918 ? NULL_TREE
24919 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
24920 default:
24921 return NULL_TREE;
24922 }
24923 break;
24924
24925 case V8SImode:
24926 switch (TYPE_MODE (src_type))
24927 {
24928 case V8SFmode:
24929 return (TYPE_UNSIGNED (dest_type)
24930 ? NULL_TREE
24931 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
24932 default:
24933 return NULL_TREE;
24934 }
24935 break;
24936
24937 default:
24938 return NULL_TREE;
24939 }
24940
24941 default:
24942 return NULL_TREE;
24943 }
24944
24945 return NULL_TREE;
24946 }
24947
24948 /* Returns a code for a target-specific builtin that implements
24949 reciprocal of the function, or NULL_TREE if not available. */
24950
24951 static tree
24952 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24953 bool sqrt ATTRIBUTE_UNUSED)
24954 {
24955 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24956 && flag_finite_math_only && !flag_trapping_math
24957 && flag_unsafe_math_optimizations))
24958 return NULL_TREE;
24959
24960 if (md_fn)
24961 /* Machine dependent builtins. */
24962 switch (fn)
24963 {
24964 /* Vectorized version of sqrt to rsqrt conversion. */
24965 case IX86_BUILTIN_SQRTPS_NR:
24966 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24967
24968 default:
24969 return NULL_TREE;
24970 }
24971 else
24972 /* Normal builtins. */
24973 switch (fn)
24974 {
24975 /* Sqrt to rsqrt conversion. */
24976 case BUILT_IN_SQRTF:
24977 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24978
24979 default:
24980 return NULL_TREE;
24981 }
24982 }
24983 \f
24984 /* Helper for avx_vpermilps256_operand et al. This is also used by
24985 the expansion functions to turn the parallel back into a mask.
24986 The return value is 0 for no match and the imm8+1 for a match. */
24987
24988 int
24989 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24990 {
24991 unsigned i, nelt = GET_MODE_NUNITS (mode);
24992 unsigned mask = 0;
24993 unsigned char ipar[8];
24994
24995 if (XVECLEN (par, 0) != (int) nelt)
24996 return 0;
24997
24998 /* Validate that all of the elements are constants, and not totally
24999 out of range. Copy the data into an integral array to make the
25000 subsequent checks easier. */
25001 for (i = 0; i < nelt; ++i)
25002 {
25003 rtx er = XVECEXP (par, 0, i);
25004 unsigned HOST_WIDE_INT ei;
25005
25006 if (!CONST_INT_P (er))
25007 return 0;
25008 ei = INTVAL (er);
25009 if (ei >= nelt)
25010 return 0;
25011 ipar[i] = ei;
25012 }
25013
25014 switch (mode)
25015 {
25016 case V4DFmode:
25017 /* In the 256-bit DFmode case, we can only move elements within
25018 a 128-bit lane. */
25019 for (i = 0; i < 2; ++i)
25020 {
25021 if (ipar[i] >= 2)
25022 return 0;
25023 mask |= ipar[i] << i;
25024 }
25025 for (i = 2; i < 4; ++i)
25026 {
25027 if (ipar[i] < 2)
25028 return 0;
25029 mask |= (ipar[i] - 2) << i;
25030 }
25031 break;
25032
25033 case V8SFmode:
25034 /* In the 256-bit SFmode case, we have full freedom of movement
25035 within the low 128-bit lane, but the high 128-bit lane must
25036 mirror the exact same pattern. */
25037 for (i = 0; i < 4; ++i)
25038 if (ipar[i] + 4 != ipar[i + 4])
25039 return 0;
25040 nelt = 4;
25041 /* FALLTHRU */
25042
25043 case V2DFmode:
25044 case V4SFmode:
25045 /* In the 128-bit case, we've full freedom in the placement of
25046 the elements from the source operand. */
25047 for (i = 0; i < nelt; ++i)
25048 mask |= ipar[i] << (i * (nelt / 2));
25049 break;
25050
25051 default:
25052 gcc_unreachable ();
25053 }
25054
25055 /* Make sure success has a non-zero value by adding one. */
25056 return mask + 1;
25057 }
25058
25059 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
25060 the expansion functions to turn the parallel back into a mask.
25061 The return value is 0 for no match and the imm8+1 for a match. */
25062
25063 int
25064 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
25065 {
25066 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
25067 unsigned mask = 0;
25068 unsigned char ipar[8];
25069
25070 if (XVECLEN (par, 0) != (int) nelt)
25071 return 0;
25072
25073 /* Validate that all of the elements are constants, and not totally
25074 out of range. Copy the data into an integral array to make the
25075 subsequent checks easier. */
25076 for (i = 0; i < nelt; ++i)
25077 {
25078 rtx er = XVECEXP (par, 0, i);
25079 unsigned HOST_WIDE_INT ei;
25080
25081 if (!CONST_INT_P (er))
25082 return 0;
25083 ei = INTVAL (er);
25084 if (ei >= 2 * nelt)
25085 return 0;
25086 ipar[i] = ei;
25087 }
25088
25089 /* Validate that the halves of the permute are halves. */
25090 for (i = 0; i < nelt2 - 1; ++i)
25091 if (ipar[i] + 1 != ipar[i + 1])
25092 return 0;
25093 for (i = nelt2; i < nelt - 1; ++i)
25094 if (ipar[i] + 1 != ipar[i + 1])
25095 return 0;
25096
25097 /* Reconstruct the mask. */
25098 for (i = 0; i < 2; ++i)
25099 {
25100 unsigned e = ipar[i * nelt2];
25101 if (e % nelt2)
25102 return 0;
25103 e /= nelt2;
25104 mask |= e << (i * 4);
25105 }
25106
25107 /* Make sure success has a non-zero value by adding one. */
25108 return mask + 1;
25109 }
25110 \f
25111
25112 /* Store OPERAND to the memory after reload is completed. This means
25113 that we can't easily use assign_stack_local. */
25114 rtx
25115 ix86_force_to_memory (enum machine_mode mode, rtx operand)
25116 {
25117 rtx result;
25118
25119 gcc_assert (reload_completed);
25120 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
25121 {
25122 result = gen_rtx_MEM (mode,
25123 gen_rtx_PLUS (Pmode,
25124 stack_pointer_rtx,
25125 GEN_INT (-RED_ZONE_SIZE)));
25126 emit_move_insn (result, operand);
25127 }
25128 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
25129 {
25130 switch (mode)
25131 {
25132 case HImode:
25133 case SImode:
25134 operand = gen_lowpart (DImode, operand);
25135 /* FALLTHRU */
25136 case DImode:
25137 emit_insn (
25138 gen_rtx_SET (VOIDmode,
25139 gen_rtx_MEM (DImode,
25140 gen_rtx_PRE_DEC (DImode,
25141 stack_pointer_rtx)),
25142 operand));
25143 break;
25144 default:
25145 gcc_unreachable ();
25146 }
25147 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25148 }
25149 else
25150 {
25151 switch (mode)
25152 {
25153 case DImode:
25154 {
25155 rtx operands[2];
25156 split_di (&operand, 1, operands, operands + 1);
25157 emit_insn (
25158 gen_rtx_SET (VOIDmode,
25159 gen_rtx_MEM (SImode,
25160 gen_rtx_PRE_DEC (Pmode,
25161 stack_pointer_rtx)),
25162 operands[1]));
25163 emit_insn (
25164 gen_rtx_SET (VOIDmode,
25165 gen_rtx_MEM (SImode,
25166 gen_rtx_PRE_DEC (Pmode,
25167 stack_pointer_rtx)),
25168 operands[0]));
25169 }
25170 break;
25171 case HImode:
25172 /* Store HImodes as SImodes. */
25173 operand = gen_lowpart (SImode, operand);
25174 /* FALLTHRU */
25175 case SImode:
25176 emit_insn (
25177 gen_rtx_SET (VOIDmode,
25178 gen_rtx_MEM (GET_MODE (operand),
25179 gen_rtx_PRE_DEC (SImode,
25180 stack_pointer_rtx)),
25181 operand));
25182 break;
25183 default:
25184 gcc_unreachable ();
25185 }
25186 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25187 }
25188 return result;
25189 }
25190
25191 /* Free operand from the memory. */
25192 void
25193 ix86_free_from_memory (enum machine_mode mode)
25194 {
25195 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25196 {
25197 int size;
25198
25199 if (mode == DImode || TARGET_64BIT)
25200 size = 8;
25201 else
25202 size = 4;
25203 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25204 to pop or add instruction if registers are available. */
25205 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25206 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25207 GEN_INT (size))));
25208 }
25209 }
25210
25211 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25212 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25213 same. */
25214 static const enum reg_class *
25215 i386_ira_cover_classes (void)
25216 {
25217 static const enum reg_class sse_fpmath_classes[] = {
25218 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25219 };
25220 static const enum reg_class no_sse_fpmath_classes[] = {
25221 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25222 };
25223
25224 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25225 }
25226
25227 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25228 QImode must go into class Q_REGS.
25229 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25230 movdf to do mem-to-mem moves through integer regs. */
25231 enum reg_class
25232 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25233 {
25234 enum machine_mode mode = GET_MODE (x);
25235
25236 /* We're only allowed to return a subclass of CLASS. Many of the
25237 following checks fail for NO_REGS, so eliminate that early. */
25238 if (regclass == NO_REGS)
25239 return NO_REGS;
25240
25241 /* All classes can load zeros. */
25242 if (x == CONST0_RTX (mode))
25243 return regclass;
25244
25245 /* Force constants into memory if we are loading a (nonzero) constant into
25246 an MMX or SSE register. This is because there are no MMX/SSE instructions
25247 to load from a constant. */
25248 if (CONSTANT_P (x)
25249 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25250 return NO_REGS;
25251
25252 /* Prefer SSE regs only, if we can use them for math. */
25253 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25254 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25255
25256 /* Floating-point constants need more complex checks. */
25257 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25258 {
25259 /* General regs can load everything. */
25260 if (reg_class_subset_p (regclass, GENERAL_REGS))
25261 return regclass;
25262
25263 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25264 zero above. We only want to wind up preferring 80387 registers if
25265 we plan on doing computation with them. */
25266 if (TARGET_80387
25267 && standard_80387_constant_p (x))
25268 {
25269 /* Limit class to non-sse. */
25270 if (regclass == FLOAT_SSE_REGS)
25271 return FLOAT_REGS;
25272 if (regclass == FP_TOP_SSE_REGS)
25273 return FP_TOP_REG;
25274 if (regclass == FP_SECOND_SSE_REGS)
25275 return FP_SECOND_REG;
25276 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25277 return regclass;
25278 }
25279
25280 return NO_REGS;
25281 }
25282
25283 /* Generally when we see PLUS here, it's the function invariant
25284 (plus soft-fp const_int). Which can only be computed into general
25285 regs. */
25286 if (GET_CODE (x) == PLUS)
25287 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25288
25289 /* QImode constants are easy to load, but non-constant QImode data
25290 must go into Q_REGS. */
25291 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25292 {
25293 if (reg_class_subset_p (regclass, Q_REGS))
25294 return regclass;
25295 if (reg_class_subset_p (Q_REGS, regclass))
25296 return Q_REGS;
25297 return NO_REGS;
25298 }
25299
25300 return regclass;
25301 }
25302
25303 /* Discourage putting floating-point values in SSE registers unless
25304 SSE math is being used, and likewise for the 387 registers. */
25305 enum reg_class
25306 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25307 {
25308 enum machine_mode mode = GET_MODE (x);
25309
25310 /* Restrict the output reload class to the register bank that we are doing
25311 math on. If we would like not to return a subset of CLASS, reject this
25312 alternative: if reload cannot do this, it will still use its choice. */
25313 mode = GET_MODE (x);
25314 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25315 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25316
25317 if (X87_FLOAT_MODE_P (mode))
25318 {
25319 if (regclass == FP_TOP_SSE_REGS)
25320 return FP_TOP_REG;
25321 else if (regclass == FP_SECOND_SSE_REGS)
25322 return FP_SECOND_REG;
25323 else
25324 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25325 }
25326
25327 return regclass;
25328 }
25329
25330 static enum reg_class
25331 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25332 enum machine_mode mode,
25333 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25334 {
25335 /* QImode spills from non-QI registers require
25336 intermediate register on 32bit targets. */
25337 if (!in_p && mode == QImode && !TARGET_64BIT
25338 && (rclass == GENERAL_REGS
25339 || rclass == LEGACY_REGS
25340 || rclass == INDEX_REGS))
25341 {
25342 int regno;
25343
25344 if (REG_P (x))
25345 regno = REGNO (x);
25346 else
25347 regno = -1;
25348
25349 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25350 regno = true_regnum (x);
25351
25352 /* Return Q_REGS if the operand is in memory. */
25353 if (regno == -1)
25354 return Q_REGS;
25355 }
25356
25357 return NO_REGS;
25358 }
25359
25360 /* If we are copying between general and FP registers, we need a memory
25361 location. The same is true for SSE and MMX registers.
25362
25363 To optimize register_move_cost performance, allow inline variant.
25364
25365 The macro can't work reliably when one of the CLASSES is class containing
25366 registers from multiple units (SSE, MMX, integer). We avoid this by never
25367 combining those units in single alternative in the machine description.
25368 Ensure that this constraint holds to avoid unexpected surprises.
25369
25370 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25371 enforce these sanity checks. */
25372
25373 static inline int
25374 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25375 enum machine_mode mode, int strict)
25376 {
25377 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25378 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25379 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25380 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25381 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25382 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25383 {
25384 gcc_assert (!strict);
25385 return true;
25386 }
25387
25388 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25389 return true;
25390
25391 /* ??? This is a lie. We do have moves between mmx/general, and for
25392 mmx/sse2. But by saying we need secondary memory we discourage the
25393 register allocator from using the mmx registers unless needed. */
25394 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25395 return true;
25396
25397 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25398 {
25399 /* SSE1 doesn't have any direct moves from other classes. */
25400 if (!TARGET_SSE2)
25401 return true;
25402
25403 /* If the target says that inter-unit moves are more expensive
25404 than moving through memory, then don't generate them. */
25405 if (!TARGET_INTER_UNIT_MOVES)
25406 return true;
25407
25408 /* Between SSE and general, we have moves no larger than word size. */
25409 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25410 return true;
25411 }
25412
25413 return false;
25414 }
25415
25416 int
25417 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25418 enum machine_mode mode, int strict)
25419 {
25420 return inline_secondary_memory_needed (class1, class2, mode, strict);
25421 }
25422
25423 /* Return true if the registers in CLASS cannot represent the change from
25424 modes FROM to TO. */
25425
25426 bool
25427 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25428 enum reg_class regclass)
25429 {
25430 if (from == to)
25431 return false;
25432
25433 /* x87 registers can't do subreg at all, as all values are reformatted
25434 to extended precision. */
25435 if (MAYBE_FLOAT_CLASS_P (regclass))
25436 return true;
25437
25438 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25439 {
25440 /* Vector registers do not support QI or HImode loads. If we don't
25441 disallow a change to these modes, reload will assume it's ok to
25442 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25443 the vec_dupv4hi pattern. */
25444 if (GET_MODE_SIZE (from) < 4)
25445 return true;
25446
25447 /* Vector registers do not support subreg with nonzero offsets, which
25448 are otherwise valid for integer registers. Since we can't see
25449 whether we have a nonzero offset from here, prohibit all
25450 nonparadoxical subregs changing size. */
25451 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25452 return true;
25453 }
25454
25455 return false;
25456 }
25457
25458 /* Return the cost of moving data of mode M between a
25459 register and memory. A value of 2 is the default; this cost is
25460 relative to those in `REGISTER_MOVE_COST'.
25461
25462 This function is used extensively by register_move_cost that is used to
25463 build tables at startup. Make it inline in this case.
25464 When IN is 2, return maximum of in and out move cost.
25465
25466 If moving between registers and memory is more expensive than
25467 between two registers, you should define this macro to express the
25468 relative cost.
25469
25470 Model also increased moving costs of QImode registers in non
25471 Q_REGS classes.
25472 */
25473 static inline int
25474 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25475 int in)
25476 {
25477 int cost;
25478 if (FLOAT_CLASS_P (regclass))
25479 {
25480 int index;
25481 switch (mode)
25482 {
25483 case SFmode:
25484 index = 0;
25485 break;
25486 case DFmode:
25487 index = 1;
25488 break;
25489 case XFmode:
25490 index = 2;
25491 break;
25492 default:
25493 return 100;
25494 }
25495 if (in == 2)
25496 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25497 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25498 }
25499 if (SSE_CLASS_P (regclass))
25500 {
25501 int index;
25502 switch (GET_MODE_SIZE (mode))
25503 {
25504 case 4:
25505 index = 0;
25506 break;
25507 case 8:
25508 index = 1;
25509 break;
25510 case 16:
25511 index = 2;
25512 break;
25513 default:
25514 return 100;
25515 }
25516 if (in == 2)
25517 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25518 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25519 }
25520 if (MMX_CLASS_P (regclass))
25521 {
25522 int index;
25523 switch (GET_MODE_SIZE (mode))
25524 {
25525 case 4:
25526 index = 0;
25527 break;
25528 case 8:
25529 index = 1;
25530 break;
25531 default:
25532 return 100;
25533 }
25534 if (in)
25535 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25536 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25537 }
25538 switch (GET_MODE_SIZE (mode))
25539 {
25540 case 1:
25541 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25542 {
25543 if (!in)
25544 return ix86_cost->int_store[0];
25545 if (TARGET_PARTIAL_REG_DEPENDENCY
25546 && optimize_function_for_speed_p (cfun))
25547 cost = ix86_cost->movzbl_load;
25548 else
25549 cost = ix86_cost->int_load[0];
25550 if (in == 2)
25551 return MAX (cost, ix86_cost->int_store[0]);
25552 return cost;
25553 }
25554 else
25555 {
25556 if (in == 2)
25557 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25558 if (in)
25559 return ix86_cost->movzbl_load;
25560 else
25561 return ix86_cost->int_store[0] + 4;
25562 }
25563 break;
25564 case 2:
25565 if (in == 2)
25566 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25567 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25568 default:
25569 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25570 if (mode == TFmode)
25571 mode = XFmode;
25572 if (in == 2)
25573 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25574 else if (in)
25575 cost = ix86_cost->int_load[2];
25576 else
25577 cost = ix86_cost->int_store[2];
25578 return (cost * (((int) GET_MODE_SIZE (mode)
25579 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25580 }
25581 }
25582
25583 int
25584 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25585 {
25586 return inline_memory_move_cost (mode, regclass, in);
25587 }
25588
25589
25590 /* Return the cost of moving data from a register in class CLASS1 to
25591 one in class CLASS2.
25592
25593 It is not required that the cost always equal 2 when FROM is the same as TO;
25594 on some machines it is expensive to move between registers if they are not
25595 general registers. */
25596
25597 int
25598 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25599 enum reg_class class2)
25600 {
25601 /* In case we require secondary memory, compute cost of the store followed
25602 by load. In order to avoid bad register allocation choices, we need
25603 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25604
25605 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25606 {
25607 int cost = 1;
25608
25609 cost += inline_memory_move_cost (mode, class1, 2);
25610 cost += inline_memory_move_cost (mode, class2, 2);
25611
25612 /* In case of copying from general_purpose_register we may emit multiple
25613 stores followed by single load causing memory size mismatch stall.
25614 Count this as arbitrarily high cost of 20. */
25615 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25616 cost += 20;
25617
25618 /* In the case of FP/MMX moves, the registers actually overlap, and we
25619 have to switch modes in order to treat them differently. */
25620 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25621 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25622 cost += 20;
25623
25624 return cost;
25625 }
25626
25627 /* Moves between SSE/MMX and integer unit are expensive. */
25628 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25629 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25630
25631 /* ??? By keeping returned value relatively high, we limit the number
25632 of moves between integer and MMX/SSE registers for all targets.
25633 Additionally, high value prevents problem with x86_modes_tieable_p(),
25634 where integer modes in MMX/SSE registers are not tieable
25635 because of missing QImode and HImode moves to, from or between
25636 MMX/SSE registers. */
25637 return MAX (8, ix86_cost->mmxsse_to_integer);
25638
25639 if (MAYBE_FLOAT_CLASS_P (class1))
25640 return ix86_cost->fp_move;
25641 if (MAYBE_SSE_CLASS_P (class1))
25642 return ix86_cost->sse_move;
25643 if (MAYBE_MMX_CLASS_P (class1))
25644 return ix86_cost->mmx_move;
25645 return 2;
25646 }
25647
25648 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25649
25650 bool
25651 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25652 {
25653 /* Flags and only flags can only hold CCmode values. */
25654 if (CC_REGNO_P (regno))
25655 return GET_MODE_CLASS (mode) == MODE_CC;
25656 if (GET_MODE_CLASS (mode) == MODE_CC
25657 || GET_MODE_CLASS (mode) == MODE_RANDOM
25658 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25659 return 0;
25660 if (FP_REGNO_P (regno))
25661 return VALID_FP_MODE_P (mode);
25662 if (SSE_REGNO_P (regno))
25663 {
25664 /* We implement the move patterns for all vector modes into and
25665 out of SSE registers, even when no operation instructions
25666 are available. OImode move is available only when AVX is
25667 enabled. */
25668 return ((TARGET_AVX && mode == OImode)
25669 || VALID_AVX256_REG_MODE (mode)
25670 || VALID_SSE_REG_MODE (mode)
25671 || VALID_SSE2_REG_MODE (mode)
25672 || VALID_MMX_REG_MODE (mode)
25673 || VALID_MMX_REG_MODE_3DNOW (mode));
25674 }
25675 if (MMX_REGNO_P (regno))
25676 {
25677 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25678 so if the register is available at all, then we can move data of
25679 the given mode into or out of it. */
25680 return (VALID_MMX_REG_MODE (mode)
25681 || VALID_MMX_REG_MODE_3DNOW (mode));
25682 }
25683
25684 if (mode == QImode)
25685 {
25686 /* Take care for QImode values - they can be in non-QI regs,
25687 but then they do cause partial register stalls. */
25688 if (regno <= BX_REG || TARGET_64BIT)
25689 return 1;
25690 if (!TARGET_PARTIAL_REG_STALL)
25691 return 1;
25692 return reload_in_progress || reload_completed;
25693 }
25694 /* We handle both integer and floats in the general purpose registers. */
25695 else if (VALID_INT_MODE_P (mode))
25696 return 1;
25697 else if (VALID_FP_MODE_P (mode))
25698 return 1;
25699 else if (VALID_DFP_MODE_P (mode))
25700 return 1;
25701 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25702 on to use that value in smaller contexts, this can easily force a
25703 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25704 supporting DImode, allow it. */
25705 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25706 return 1;
25707
25708 return 0;
25709 }
25710
25711 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25712 tieable integer mode. */
25713
25714 static bool
25715 ix86_tieable_integer_mode_p (enum machine_mode mode)
25716 {
25717 switch (mode)
25718 {
25719 case HImode:
25720 case SImode:
25721 return true;
25722
25723 case QImode:
25724 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25725
25726 case DImode:
25727 return TARGET_64BIT;
25728
25729 default:
25730 return false;
25731 }
25732 }
25733
25734 /* Return true if MODE1 is accessible in a register that can hold MODE2
25735 without copying. That is, all register classes that can hold MODE2
25736 can also hold MODE1. */
25737
25738 bool
25739 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25740 {
25741 if (mode1 == mode2)
25742 return true;
25743
25744 if (ix86_tieable_integer_mode_p (mode1)
25745 && ix86_tieable_integer_mode_p (mode2))
25746 return true;
25747
25748 /* MODE2 being XFmode implies fp stack or general regs, which means we
25749 can tie any smaller floating point modes to it. Note that we do not
25750 tie this with TFmode. */
25751 if (mode2 == XFmode)
25752 return mode1 == SFmode || mode1 == DFmode;
25753
25754 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25755 that we can tie it with SFmode. */
25756 if (mode2 == DFmode)
25757 return mode1 == SFmode;
25758
25759 /* If MODE2 is only appropriate for an SSE register, then tie with
25760 any other mode acceptable to SSE registers. */
25761 if (GET_MODE_SIZE (mode2) == 16
25762 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25763 return (GET_MODE_SIZE (mode1) == 16
25764 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25765
25766 /* If MODE2 is appropriate for an MMX register, then tie
25767 with any other mode acceptable to MMX registers. */
25768 if (GET_MODE_SIZE (mode2) == 8
25769 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25770 return (GET_MODE_SIZE (mode1) == 8
25771 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25772
25773 return false;
25774 }
25775
25776 /* Compute a (partial) cost for rtx X. Return true if the complete
25777 cost has been computed, and false if subexpressions should be
25778 scanned. In either case, *TOTAL contains the cost result. */
25779
25780 static bool
25781 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25782 {
25783 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25784 enum machine_mode mode = GET_MODE (x);
25785 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25786
25787 switch (code)
25788 {
25789 case CONST_INT:
25790 case CONST:
25791 case LABEL_REF:
25792 case SYMBOL_REF:
25793 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25794 *total = 3;
25795 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25796 *total = 2;
25797 else if (flag_pic && SYMBOLIC_CONST (x)
25798 && (!TARGET_64BIT
25799 || (!GET_CODE (x) != LABEL_REF
25800 && (GET_CODE (x) != SYMBOL_REF
25801 || !SYMBOL_REF_LOCAL_P (x)))))
25802 *total = 1;
25803 else
25804 *total = 0;
25805 return true;
25806
25807 case CONST_DOUBLE:
25808 if (mode == VOIDmode)
25809 *total = 0;
25810 else
25811 switch (standard_80387_constant_p (x))
25812 {
25813 case 1: /* 0.0 */
25814 *total = 1;
25815 break;
25816 default: /* Other constants */
25817 *total = 2;
25818 break;
25819 case 0:
25820 case -1:
25821 /* Start with (MEM (SYMBOL_REF)), since that's where
25822 it'll probably end up. Add a penalty for size. */
25823 *total = (COSTS_N_INSNS (1)
25824 + (flag_pic != 0 && !TARGET_64BIT)
25825 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25826 break;
25827 }
25828 return true;
25829
25830 case ZERO_EXTEND:
25831 /* The zero extensions is often completely free on x86_64, so make
25832 it as cheap as possible. */
25833 if (TARGET_64BIT && mode == DImode
25834 && GET_MODE (XEXP (x, 0)) == SImode)
25835 *total = 1;
25836 else if (TARGET_ZERO_EXTEND_WITH_AND)
25837 *total = cost->add;
25838 else
25839 *total = cost->movzx;
25840 return false;
25841
25842 case SIGN_EXTEND:
25843 *total = cost->movsx;
25844 return false;
25845
25846 case ASHIFT:
25847 if (CONST_INT_P (XEXP (x, 1))
25848 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25849 {
25850 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25851 if (value == 1)
25852 {
25853 *total = cost->add;
25854 return false;
25855 }
25856 if ((value == 2 || value == 3)
25857 && cost->lea <= cost->shift_const)
25858 {
25859 *total = cost->lea;
25860 return false;
25861 }
25862 }
25863 /* FALLTHRU */
25864
25865 case ROTATE:
25866 case ASHIFTRT:
25867 case LSHIFTRT:
25868 case ROTATERT:
25869 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25870 {
25871 if (CONST_INT_P (XEXP (x, 1)))
25872 {
25873 if (INTVAL (XEXP (x, 1)) > 32)
25874 *total = cost->shift_const + COSTS_N_INSNS (2);
25875 else
25876 *total = cost->shift_const * 2;
25877 }
25878 else
25879 {
25880 if (GET_CODE (XEXP (x, 1)) == AND)
25881 *total = cost->shift_var * 2;
25882 else
25883 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25884 }
25885 }
25886 else
25887 {
25888 if (CONST_INT_P (XEXP (x, 1)))
25889 *total = cost->shift_const;
25890 else
25891 *total = cost->shift_var;
25892 }
25893 return false;
25894
25895 case MULT:
25896 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25897 {
25898 /* ??? SSE scalar cost should be used here. */
25899 *total = cost->fmul;
25900 return false;
25901 }
25902 else if (X87_FLOAT_MODE_P (mode))
25903 {
25904 *total = cost->fmul;
25905 return false;
25906 }
25907 else if (FLOAT_MODE_P (mode))
25908 {
25909 /* ??? SSE vector cost should be used here. */
25910 *total = cost->fmul;
25911 return false;
25912 }
25913 else
25914 {
25915 rtx op0 = XEXP (x, 0);
25916 rtx op1 = XEXP (x, 1);
25917 int nbits;
25918 if (CONST_INT_P (XEXP (x, 1)))
25919 {
25920 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25921 for (nbits = 0; value != 0; value &= value - 1)
25922 nbits++;
25923 }
25924 else
25925 /* This is arbitrary. */
25926 nbits = 7;
25927
25928 /* Compute costs correctly for widening multiplication. */
25929 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25930 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25931 == GET_MODE_SIZE (mode))
25932 {
25933 int is_mulwiden = 0;
25934 enum machine_mode inner_mode = GET_MODE (op0);
25935
25936 if (GET_CODE (op0) == GET_CODE (op1))
25937 is_mulwiden = 1, op1 = XEXP (op1, 0);
25938 else if (CONST_INT_P (op1))
25939 {
25940 if (GET_CODE (op0) == SIGN_EXTEND)
25941 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25942 == INTVAL (op1);
25943 else
25944 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25945 }
25946
25947 if (is_mulwiden)
25948 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25949 }
25950
25951 *total = (cost->mult_init[MODE_INDEX (mode)]
25952 + nbits * cost->mult_bit
25953 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25954
25955 return true;
25956 }
25957
25958 case DIV:
25959 case UDIV:
25960 case MOD:
25961 case UMOD:
25962 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25963 /* ??? SSE cost should be used here. */
25964 *total = cost->fdiv;
25965 else if (X87_FLOAT_MODE_P (mode))
25966 *total = cost->fdiv;
25967 else if (FLOAT_MODE_P (mode))
25968 /* ??? SSE vector cost should be used here. */
25969 *total = cost->fdiv;
25970 else
25971 *total = cost->divide[MODE_INDEX (mode)];
25972 return false;
25973
25974 case PLUS:
25975 if (GET_MODE_CLASS (mode) == MODE_INT
25976 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25977 {
25978 if (GET_CODE (XEXP (x, 0)) == PLUS
25979 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25980 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25981 && CONSTANT_P (XEXP (x, 1)))
25982 {
25983 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25984 if (val == 2 || val == 4 || val == 8)
25985 {
25986 *total = cost->lea;
25987 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25988 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25989 outer_code, speed);
25990 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25991 return true;
25992 }
25993 }
25994 else if (GET_CODE (XEXP (x, 0)) == MULT
25995 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25996 {
25997 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25998 if (val == 2 || val == 4 || val == 8)
25999 {
26000 *total = cost->lea;
26001 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26002 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26003 return true;
26004 }
26005 }
26006 else if (GET_CODE (XEXP (x, 0)) == PLUS)
26007 {
26008 *total = cost->lea;
26009 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26010 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26011 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26012 return true;
26013 }
26014 }
26015 /* FALLTHRU */
26016
26017 case MINUS:
26018 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26019 {
26020 /* ??? SSE cost should be used here. */
26021 *total = cost->fadd;
26022 return false;
26023 }
26024 else if (X87_FLOAT_MODE_P (mode))
26025 {
26026 *total = cost->fadd;
26027 return false;
26028 }
26029 else if (FLOAT_MODE_P (mode))
26030 {
26031 /* ??? SSE vector cost should be used here. */
26032 *total = cost->fadd;
26033 return false;
26034 }
26035 /* FALLTHRU */
26036
26037 case AND:
26038 case IOR:
26039 case XOR:
26040 if (!TARGET_64BIT && mode == DImode)
26041 {
26042 *total = (cost->add * 2
26043 + (rtx_cost (XEXP (x, 0), outer_code, speed)
26044 << (GET_MODE (XEXP (x, 0)) != DImode))
26045 + (rtx_cost (XEXP (x, 1), outer_code, speed)
26046 << (GET_MODE (XEXP (x, 1)) != DImode)));
26047 return true;
26048 }
26049 /* FALLTHRU */
26050
26051 case NEG:
26052 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26053 {
26054 /* ??? SSE cost should be used here. */
26055 *total = cost->fchs;
26056 return false;
26057 }
26058 else if (X87_FLOAT_MODE_P (mode))
26059 {
26060 *total = cost->fchs;
26061 return false;
26062 }
26063 else if (FLOAT_MODE_P (mode))
26064 {
26065 /* ??? SSE vector cost should be used here. */
26066 *total = cost->fchs;
26067 return false;
26068 }
26069 /* FALLTHRU */
26070
26071 case NOT:
26072 if (!TARGET_64BIT && mode == DImode)
26073 *total = cost->add * 2;
26074 else
26075 *total = cost->add;
26076 return false;
26077
26078 case COMPARE:
26079 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
26080 && XEXP (XEXP (x, 0), 1) == const1_rtx
26081 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
26082 && XEXP (x, 1) == const0_rtx)
26083 {
26084 /* This kind of construct is implemented using test[bwl].
26085 Treat it as if we had an AND. */
26086 *total = (cost->add
26087 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
26088 + rtx_cost (const1_rtx, outer_code, speed));
26089 return true;
26090 }
26091 return false;
26092
26093 case FLOAT_EXTEND:
26094 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
26095 *total = 0;
26096 return false;
26097
26098 case ABS:
26099 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26100 /* ??? SSE cost should be used here. */
26101 *total = cost->fabs;
26102 else if (X87_FLOAT_MODE_P (mode))
26103 *total = cost->fabs;
26104 else if (FLOAT_MODE_P (mode))
26105 /* ??? SSE vector cost should be used here. */
26106 *total = cost->fabs;
26107 return false;
26108
26109 case SQRT:
26110 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26111 /* ??? SSE cost should be used here. */
26112 *total = cost->fsqrt;
26113 else if (X87_FLOAT_MODE_P (mode))
26114 *total = cost->fsqrt;
26115 else if (FLOAT_MODE_P (mode))
26116 /* ??? SSE vector cost should be used here. */
26117 *total = cost->fsqrt;
26118 return false;
26119
26120 case UNSPEC:
26121 if (XINT (x, 1) == UNSPEC_TP)
26122 *total = 0;
26123 return false;
26124
26125 case VEC_SELECT:
26126 case VEC_CONCAT:
26127 case VEC_MERGE:
26128 case VEC_DUPLICATE:
26129 /* ??? Assume all of these vector manipulation patterns are
26130 recognizable. In which case they all pretty much have the
26131 same cost. */
26132 *total = COSTS_N_INSNS (1);
26133 return true;
26134
26135 default:
26136 return false;
26137 }
26138 }
26139
26140 #if TARGET_MACHO
26141
26142 static int current_machopic_label_num;
26143
26144 /* Given a symbol name and its associated stub, write out the
26145 definition of the stub. */
26146
26147 void
26148 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26149 {
26150 unsigned int length;
26151 char *binder_name, *symbol_name, lazy_ptr_name[32];
26152 int label = ++current_machopic_label_num;
26153
26154 /* For 64-bit we shouldn't get here. */
26155 gcc_assert (!TARGET_64BIT);
26156
26157 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26158 symb = (*targetm.strip_name_encoding) (symb);
26159
26160 length = strlen (stub);
26161 binder_name = XALLOCAVEC (char, length + 32);
26162 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26163
26164 length = strlen (symb);
26165 symbol_name = XALLOCAVEC (char, length + 32);
26166 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26167
26168 sprintf (lazy_ptr_name, "L%d$lz", label);
26169
26170 if (MACHOPIC_PURE)
26171 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26172 else
26173 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26174
26175 fprintf (file, "%s:\n", stub);
26176 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26177
26178 if (MACHOPIC_PURE)
26179 {
26180 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26181 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26182 fprintf (file, "\tjmp\t*%%edx\n");
26183 }
26184 else
26185 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26186
26187 fprintf (file, "%s:\n", binder_name);
26188
26189 if (MACHOPIC_PURE)
26190 {
26191 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26192 fputs ("\tpushl\t%eax\n", file);
26193 }
26194 else
26195 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26196
26197 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26198
26199 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26200 fprintf (file, "%s:\n", lazy_ptr_name);
26201 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26202 fprintf (file, ASM_LONG "%s\n", binder_name);
26203 }
26204 #endif /* TARGET_MACHO */
26205
26206 /* Order the registers for register allocator. */
26207
26208 void
26209 x86_order_regs_for_local_alloc (void)
26210 {
26211 int pos = 0;
26212 int i;
26213
26214 /* First allocate the local general purpose registers. */
26215 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26216 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26217 reg_alloc_order [pos++] = i;
26218
26219 /* Global general purpose registers. */
26220 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26221 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26222 reg_alloc_order [pos++] = i;
26223
26224 /* x87 registers come first in case we are doing FP math
26225 using them. */
26226 if (!TARGET_SSE_MATH)
26227 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26228 reg_alloc_order [pos++] = i;
26229
26230 /* SSE registers. */
26231 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26232 reg_alloc_order [pos++] = i;
26233 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26234 reg_alloc_order [pos++] = i;
26235
26236 /* x87 registers. */
26237 if (TARGET_SSE_MATH)
26238 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26239 reg_alloc_order [pos++] = i;
26240
26241 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26242 reg_alloc_order [pos++] = i;
26243
26244 /* Initialize the rest of array as we do not allocate some registers
26245 at all. */
26246 while (pos < FIRST_PSEUDO_REGISTER)
26247 reg_alloc_order [pos++] = 0;
26248 }
26249
26250 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26251 struct attribute_spec.handler. */
26252 static tree
26253 ix86_handle_abi_attribute (tree *node, tree name,
26254 tree args ATTRIBUTE_UNUSED,
26255 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26256 {
26257 if (TREE_CODE (*node) != FUNCTION_TYPE
26258 && TREE_CODE (*node) != METHOD_TYPE
26259 && TREE_CODE (*node) != FIELD_DECL
26260 && TREE_CODE (*node) != TYPE_DECL)
26261 {
26262 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26263 name);
26264 *no_add_attrs = true;
26265 return NULL_TREE;
26266 }
26267 if (!TARGET_64BIT)
26268 {
26269 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26270 name);
26271 *no_add_attrs = true;
26272 return NULL_TREE;
26273 }
26274
26275 /* Can combine regparm with all attributes but fastcall. */
26276 if (is_attribute_p ("ms_abi", name))
26277 {
26278 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26279 {
26280 error ("ms_abi and sysv_abi attributes are not compatible");
26281 }
26282
26283 return NULL_TREE;
26284 }
26285 else if (is_attribute_p ("sysv_abi", name))
26286 {
26287 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26288 {
26289 error ("ms_abi and sysv_abi attributes are not compatible");
26290 }
26291
26292 return NULL_TREE;
26293 }
26294
26295 return NULL_TREE;
26296 }
26297
26298 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26299 struct attribute_spec.handler. */
26300 static tree
26301 ix86_handle_struct_attribute (tree *node, tree name,
26302 tree args ATTRIBUTE_UNUSED,
26303 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26304 {
26305 tree *type = NULL;
26306 if (DECL_P (*node))
26307 {
26308 if (TREE_CODE (*node) == TYPE_DECL)
26309 type = &TREE_TYPE (*node);
26310 }
26311 else
26312 type = node;
26313
26314 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26315 || TREE_CODE (*type) == UNION_TYPE)))
26316 {
26317 warning (OPT_Wattributes, "%qE attribute ignored",
26318 name);
26319 *no_add_attrs = true;
26320 }
26321
26322 else if ((is_attribute_p ("ms_struct", name)
26323 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26324 || ((is_attribute_p ("gcc_struct", name)
26325 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26326 {
26327 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26328 name);
26329 *no_add_attrs = true;
26330 }
26331
26332 return NULL_TREE;
26333 }
26334
26335 static tree
26336 ix86_handle_fndecl_attribute (tree *node, tree name,
26337 tree args ATTRIBUTE_UNUSED,
26338 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26339 {
26340 if (TREE_CODE (*node) != FUNCTION_DECL)
26341 {
26342 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26343 name);
26344 *no_add_attrs = true;
26345 return NULL_TREE;
26346 }
26347
26348 if (TARGET_64BIT)
26349 {
26350 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26351 name);
26352 return NULL_TREE;
26353 }
26354
26355 #ifndef HAVE_AS_IX86_SWAP
26356 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26357 #endif
26358
26359 return NULL_TREE;
26360 }
26361
26362 static bool
26363 ix86_ms_bitfield_layout_p (const_tree record_type)
26364 {
26365 return (TARGET_MS_BITFIELD_LAYOUT &&
26366 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26367 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26368 }
26369
26370 /* Returns an expression indicating where the this parameter is
26371 located on entry to the FUNCTION. */
26372
26373 static rtx
26374 x86_this_parameter (tree function)
26375 {
26376 tree type = TREE_TYPE (function);
26377 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26378 int nregs;
26379
26380 if (TARGET_64BIT)
26381 {
26382 const int *parm_regs;
26383
26384 if (ix86_function_type_abi (type) == MS_ABI)
26385 parm_regs = x86_64_ms_abi_int_parameter_registers;
26386 else
26387 parm_regs = x86_64_int_parameter_registers;
26388 return gen_rtx_REG (DImode, parm_regs[aggr]);
26389 }
26390
26391 nregs = ix86_function_regparm (type, function);
26392
26393 if (nregs > 0 && !stdarg_p (type))
26394 {
26395 int regno;
26396
26397 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26398 regno = aggr ? DX_REG : CX_REG;
26399 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26400 {
26401 regno = CX_REG;
26402 if (aggr)
26403 return gen_rtx_MEM (SImode,
26404 plus_constant (stack_pointer_rtx, 4));
26405 }
26406 else
26407 {
26408 regno = AX_REG;
26409 if (aggr)
26410 {
26411 regno = DX_REG;
26412 if (nregs == 1)
26413 return gen_rtx_MEM (SImode,
26414 plus_constant (stack_pointer_rtx, 4));
26415 }
26416 }
26417 return gen_rtx_REG (SImode, regno);
26418 }
26419
26420 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26421 }
26422
26423 /* Determine whether x86_output_mi_thunk can succeed. */
26424
26425 static bool
26426 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26427 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26428 HOST_WIDE_INT vcall_offset, const_tree function)
26429 {
26430 /* 64-bit can handle anything. */
26431 if (TARGET_64BIT)
26432 return true;
26433
26434 /* For 32-bit, everything's fine if we have one free register. */
26435 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26436 return true;
26437
26438 /* Need a free register for vcall_offset. */
26439 if (vcall_offset)
26440 return false;
26441
26442 /* Need a free register for GOT references. */
26443 if (flag_pic && !(*targetm.binds_local_p) (function))
26444 return false;
26445
26446 /* Otherwise ok. */
26447 return true;
26448 }
26449
26450 /* Output the assembler code for a thunk function. THUNK_DECL is the
26451 declaration for the thunk function itself, FUNCTION is the decl for
26452 the target function. DELTA is an immediate constant offset to be
26453 added to THIS. If VCALL_OFFSET is nonzero, the word at
26454 *(*this + vcall_offset) should be added to THIS. */
26455
26456 static void
26457 x86_output_mi_thunk (FILE *file,
26458 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26459 HOST_WIDE_INT vcall_offset, tree function)
26460 {
26461 rtx xops[3];
26462 rtx this_param = x86_this_parameter (function);
26463 rtx this_reg, tmp;
26464
26465 /* Make sure unwind info is emitted for the thunk if needed. */
26466 final_start_function (emit_barrier (), file, 1);
26467
26468 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26469 pull it in now and let DELTA benefit. */
26470 if (REG_P (this_param))
26471 this_reg = this_param;
26472 else if (vcall_offset)
26473 {
26474 /* Put the this parameter into %eax. */
26475 xops[0] = this_param;
26476 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26477 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26478 }
26479 else
26480 this_reg = NULL_RTX;
26481
26482 /* Adjust the this parameter by a fixed constant. */
26483 if (delta)
26484 {
26485 xops[0] = GEN_INT (delta);
26486 xops[1] = this_reg ? this_reg : this_param;
26487 if (TARGET_64BIT)
26488 {
26489 if (!x86_64_general_operand (xops[0], DImode))
26490 {
26491 tmp = gen_rtx_REG (DImode, R10_REG);
26492 xops[1] = tmp;
26493 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26494 xops[0] = tmp;
26495 xops[1] = this_param;
26496 }
26497 if (x86_maybe_negate_const_int (&xops[0], DImode))
26498 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26499 else
26500 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26501 }
26502 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26503 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26504 else
26505 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26506 }
26507
26508 /* Adjust the this parameter by a value stored in the vtable. */
26509 if (vcall_offset)
26510 {
26511 if (TARGET_64BIT)
26512 tmp = gen_rtx_REG (DImode, R10_REG);
26513 else
26514 {
26515 int tmp_regno = CX_REG;
26516 if (lookup_attribute ("fastcall",
26517 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26518 || lookup_attribute ("thiscall",
26519 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26520 tmp_regno = AX_REG;
26521 tmp = gen_rtx_REG (SImode, tmp_regno);
26522 }
26523
26524 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26525 xops[1] = tmp;
26526 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26527
26528 /* Adjust the this parameter. */
26529 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26530 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26531 {
26532 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26533 xops[0] = GEN_INT (vcall_offset);
26534 xops[1] = tmp2;
26535 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26536 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26537 }
26538 xops[1] = this_reg;
26539 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26540 }
26541
26542 /* If necessary, drop THIS back to its stack slot. */
26543 if (this_reg && this_reg != this_param)
26544 {
26545 xops[0] = this_reg;
26546 xops[1] = this_param;
26547 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26548 }
26549
26550 xops[0] = XEXP (DECL_RTL (function), 0);
26551 if (TARGET_64BIT)
26552 {
26553 if (!flag_pic || (*targetm.binds_local_p) (function))
26554 output_asm_insn ("jmp\t%P0", xops);
26555 /* All thunks should be in the same object as their target,
26556 and thus binds_local_p should be true. */
26557 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26558 gcc_unreachable ();
26559 else
26560 {
26561 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26562 tmp = gen_rtx_CONST (Pmode, tmp);
26563 tmp = gen_rtx_MEM (QImode, tmp);
26564 xops[0] = tmp;
26565 output_asm_insn ("jmp\t%A0", xops);
26566 }
26567 }
26568 else
26569 {
26570 if (!flag_pic || (*targetm.binds_local_p) (function))
26571 output_asm_insn ("jmp\t%P0", xops);
26572 else
26573 #if TARGET_MACHO
26574 if (TARGET_MACHO)
26575 {
26576 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26577 tmp = (gen_rtx_SYMBOL_REF
26578 (Pmode,
26579 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26580 tmp = gen_rtx_MEM (QImode, tmp);
26581 xops[0] = tmp;
26582 output_asm_insn ("jmp\t%0", xops);
26583 }
26584 else
26585 #endif /* TARGET_MACHO */
26586 {
26587 tmp = gen_rtx_REG (SImode, CX_REG);
26588 output_set_got (tmp, NULL_RTX);
26589
26590 xops[1] = tmp;
26591 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26592 output_asm_insn ("jmp\t{*}%1", xops);
26593 }
26594 }
26595 final_end_function ();
26596 }
26597
26598 static void
26599 x86_file_start (void)
26600 {
26601 default_file_start ();
26602 #if TARGET_MACHO
26603 darwin_file_start ();
26604 #endif
26605 if (X86_FILE_START_VERSION_DIRECTIVE)
26606 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26607 if (X86_FILE_START_FLTUSED)
26608 fputs ("\t.global\t__fltused\n", asm_out_file);
26609 if (ix86_asm_dialect == ASM_INTEL)
26610 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26611 }
26612
26613 int
26614 x86_field_alignment (tree field, int computed)
26615 {
26616 enum machine_mode mode;
26617 tree type = TREE_TYPE (field);
26618
26619 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26620 return computed;
26621 mode = TYPE_MODE (strip_array_types (type));
26622 if (mode == DFmode || mode == DCmode
26623 || GET_MODE_CLASS (mode) == MODE_INT
26624 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26625 return MIN (32, computed);
26626 return computed;
26627 }
26628
26629 /* Output assembler code to FILE to increment profiler label # LABELNO
26630 for profiling a function entry. */
26631 void
26632 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26633 {
26634 if (TARGET_64BIT)
26635 {
26636 #ifndef NO_PROFILE_COUNTERS
26637 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
26638 #endif
26639
26640 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26641 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26642 else
26643 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26644 }
26645 else if (flag_pic)
26646 {
26647 #ifndef NO_PROFILE_COUNTERS
26648 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26649 LPREFIX, labelno);
26650 #endif
26651 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26652 }
26653 else
26654 {
26655 #ifndef NO_PROFILE_COUNTERS
26656 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
26657 LPREFIX, labelno);
26658 #endif
26659 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26660 }
26661 }
26662
26663 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26664 /* We don't have exact information about the insn sizes, but we may assume
26665 quite safely that we are informed about all 1 byte insns and memory
26666 address sizes. This is enough to eliminate unnecessary padding in
26667 99% of cases. */
26668
26669 static int
26670 min_insn_size (rtx insn)
26671 {
26672 int l = 0, len;
26673
26674 if (!INSN_P (insn) || !active_insn_p (insn))
26675 return 0;
26676
26677 /* Discard alignments we've emit and jump instructions. */
26678 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26679 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26680 return 0;
26681 if (JUMP_TABLE_DATA_P (insn))
26682 return 0;
26683
26684 /* Important case - calls are always 5 bytes.
26685 It is common to have many calls in the row. */
26686 if (CALL_P (insn)
26687 && symbolic_reference_mentioned_p (PATTERN (insn))
26688 && !SIBLING_CALL_P (insn))
26689 return 5;
26690 len = get_attr_length (insn);
26691 if (len <= 1)
26692 return 1;
26693
26694 /* For normal instructions we rely on get_attr_length being exact,
26695 with a few exceptions. */
26696 if (!JUMP_P (insn))
26697 {
26698 enum attr_type type = get_attr_type (insn);
26699
26700 switch (type)
26701 {
26702 case TYPE_MULTI:
26703 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26704 || asm_noperands (PATTERN (insn)) >= 0)
26705 return 0;
26706 break;
26707 case TYPE_OTHER:
26708 case TYPE_FCMP:
26709 break;
26710 default:
26711 /* Otherwise trust get_attr_length. */
26712 return len;
26713 }
26714
26715 l = get_attr_length_address (insn);
26716 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26717 l = 4;
26718 }
26719 if (l)
26720 return 1+l;
26721 else
26722 return 2;
26723 }
26724
26725 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26726 window. */
26727
26728 static void
26729 ix86_avoid_jump_mispredicts (void)
26730 {
26731 rtx insn, start = get_insns ();
26732 int nbytes = 0, njumps = 0;
26733 int isjump = 0;
26734
26735 /* Look for all minimal intervals of instructions containing 4 jumps.
26736 The intervals are bounded by START and INSN. NBYTES is the total
26737 size of instructions in the interval including INSN and not including
26738 START. When the NBYTES is smaller than 16 bytes, it is possible
26739 that the end of START and INSN ends up in the same 16byte page.
26740
26741 The smallest offset in the page INSN can start is the case where START
26742 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26743 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26744 */
26745 for (insn = start; insn; insn = NEXT_INSN (insn))
26746 {
26747 int min_size;
26748
26749 if (LABEL_P (insn))
26750 {
26751 int align = label_to_alignment (insn);
26752 int max_skip = label_to_max_skip (insn);
26753
26754 if (max_skip > 15)
26755 max_skip = 15;
26756 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26757 already in the current 16 byte page, because otherwise
26758 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26759 bytes to reach 16 byte boundary. */
26760 if (align <= 0
26761 || (align <= 3 && max_skip != (1 << align) - 1))
26762 max_skip = 0;
26763 if (dump_file)
26764 fprintf (dump_file, "Label %i with max_skip %i\n",
26765 INSN_UID (insn), max_skip);
26766 if (max_skip)
26767 {
26768 while (nbytes + max_skip >= 16)
26769 {
26770 start = NEXT_INSN (start);
26771 if ((JUMP_P (start)
26772 && GET_CODE (PATTERN (start)) != ADDR_VEC
26773 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26774 || CALL_P (start))
26775 njumps--, isjump = 1;
26776 else
26777 isjump = 0;
26778 nbytes -= min_insn_size (start);
26779 }
26780 }
26781 continue;
26782 }
26783
26784 min_size = min_insn_size (insn);
26785 nbytes += min_size;
26786 if (dump_file)
26787 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26788 INSN_UID (insn), min_size);
26789 if ((JUMP_P (insn)
26790 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26791 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26792 || CALL_P (insn))
26793 njumps++;
26794 else
26795 continue;
26796
26797 while (njumps > 3)
26798 {
26799 start = NEXT_INSN (start);
26800 if ((JUMP_P (start)
26801 && GET_CODE (PATTERN (start)) != ADDR_VEC
26802 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26803 || CALL_P (start))
26804 njumps--, isjump = 1;
26805 else
26806 isjump = 0;
26807 nbytes -= min_insn_size (start);
26808 }
26809 gcc_assert (njumps >= 0);
26810 if (dump_file)
26811 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26812 INSN_UID (start), INSN_UID (insn), nbytes);
26813
26814 if (njumps == 3 && isjump && nbytes < 16)
26815 {
26816 int padsize = 15 - nbytes + min_insn_size (insn);
26817
26818 if (dump_file)
26819 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26820 INSN_UID (insn), padsize);
26821 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26822 }
26823 }
26824 }
26825 #endif
26826
26827 /* AMD Athlon works faster
26828 when RET is not destination of conditional jump or directly preceded
26829 by other jump instruction. We avoid the penalty by inserting NOP just
26830 before the RET instructions in such cases. */
26831 static void
26832 ix86_pad_returns (void)
26833 {
26834 edge e;
26835 edge_iterator ei;
26836
26837 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26838 {
26839 basic_block bb = e->src;
26840 rtx ret = BB_END (bb);
26841 rtx prev;
26842 bool replace = false;
26843
26844 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26845 || optimize_bb_for_size_p (bb))
26846 continue;
26847 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26848 if (active_insn_p (prev) || LABEL_P (prev))
26849 break;
26850 if (prev && LABEL_P (prev))
26851 {
26852 edge e;
26853 edge_iterator ei;
26854
26855 FOR_EACH_EDGE (e, ei, bb->preds)
26856 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26857 && !(e->flags & EDGE_FALLTHRU))
26858 replace = true;
26859 }
26860 if (!replace)
26861 {
26862 prev = prev_active_insn (ret);
26863 if (prev
26864 && ((JUMP_P (prev) && any_condjump_p (prev))
26865 || CALL_P (prev)))
26866 replace = true;
26867 /* Empty functions get branch mispredict even when the jump destination
26868 is not visible to us. */
26869 if (!prev && !optimize_function_for_size_p (cfun))
26870 replace = true;
26871 }
26872 if (replace)
26873 {
26874 emit_jump_insn_before (gen_return_internal_long (), ret);
26875 delete_insn (ret);
26876 }
26877 }
26878 }
26879
26880 /* Implement machine specific optimizations. We implement padding of returns
26881 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26882 static void
26883 ix86_reorg (void)
26884 {
26885 if (optimize && optimize_function_for_speed_p (cfun))
26886 {
26887 if (TARGET_PAD_RETURNS)
26888 ix86_pad_returns ();
26889 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26890 if (TARGET_FOUR_JUMP_LIMIT)
26891 ix86_avoid_jump_mispredicts ();
26892 #endif
26893 }
26894 }
26895
26896 /* Return nonzero when QImode register that must be represented via REX prefix
26897 is used. */
26898 bool
26899 x86_extended_QIreg_mentioned_p (rtx insn)
26900 {
26901 int i;
26902 extract_insn_cached (insn);
26903 for (i = 0; i < recog_data.n_operands; i++)
26904 if (REG_P (recog_data.operand[i])
26905 && REGNO (recog_data.operand[i]) > BX_REG)
26906 return true;
26907 return false;
26908 }
26909
26910 /* Return nonzero when P points to register encoded via REX prefix.
26911 Called via for_each_rtx. */
26912 static int
26913 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26914 {
26915 unsigned int regno;
26916 if (!REG_P (*p))
26917 return 0;
26918 regno = REGNO (*p);
26919 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26920 }
26921
26922 /* Return true when INSN mentions register that must be encoded using REX
26923 prefix. */
26924 bool
26925 x86_extended_reg_mentioned_p (rtx insn)
26926 {
26927 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26928 extended_reg_mentioned_1, NULL);
26929 }
26930
26931 /* If profitable, negate (without causing overflow) integer constant
26932 of mode MODE at location LOC. Return true in this case. */
26933 bool
26934 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26935 {
26936 HOST_WIDE_INT val;
26937
26938 if (!CONST_INT_P (*loc))
26939 return false;
26940
26941 switch (mode)
26942 {
26943 case DImode:
26944 /* DImode x86_64 constants must fit in 32 bits. */
26945 gcc_assert (x86_64_immediate_operand (*loc, mode));
26946
26947 mode = SImode;
26948 break;
26949
26950 case SImode:
26951 case HImode:
26952 case QImode:
26953 break;
26954
26955 default:
26956 gcc_unreachable ();
26957 }
26958
26959 /* Avoid overflows. */
26960 if (mode_signbit_p (mode, *loc))
26961 return false;
26962
26963 val = INTVAL (*loc);
26964
26965 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26966 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26967 if ((val < 0 && val != -128)
26968 || val == 128)
26969 {
26970 *loc = GEN_INT (-val);
26971 return true;
26972 }
26973
26974 return false;
26975 }
26976
26977 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26978 optabs would emit if we didn't have TFmode patterns. */
26979
26980 void
26981 x86_emit_floatuns (rtx operands[2])
26982 {
26983 rtx neglab, donelab, i0, i1, f0, in, out;
26984 enum machine_mode mode, inmode;
26985
26986 inmode = GET_MODE (operands[1]);
26987 gcc_assert (inmode == SImode || inmode == DImode);
26988
26989 out = operands[0];
26990 in = force_reg (inmode, operands[1]);
26991 mode = GET_MODE (out);
26992 neglab = gen_label_rtx ();
26993 donelab = gen_label_rtx ();
26994 f0 = gen_reg_rtx (mode);
26995
26996 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26997
26998 expand_float (out, in, 0);
26999
27000 emit_jump_insn (gen_jump (donelab));
27001 emit_barrier ();
27002
27003 emit_label (neglab);
27004
27005 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
27006 1, OPTAB_DIRECT);
27007 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
27008 1, OPTAB_DIRECT);
27009 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
27010
27011 expand_float (f0, i0, 0);
27012
27013 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
27014
27015 emit_label (donelab);
27016 }
27017 \f
27018 /* AVX does not support 32-byte integer vector operations,
27019 thus the longest vector we are faced with is V16QImode. */
27020 #define MAX_VECT_LEN 16
27021
27022 struct expand_vec_perm_d
27023 {
27024 rtx target, op0, op1;
27025 unsigned char perm[MAX_VECT_LEN];
27026 enum machine_mode vmode;
27027 unsigned char nelt;
27028 bool testing_p;
27029 };
27030
27031 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
27032 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
27033
27034 /* Get a vector mode of the same size as the original but with elements
27035 twice as wide. This is only guaranteed to apply to integral vectors. */
27036
27037 static inline enum machine_mode
27038 get_mode_wider_vector (enum machine_mode o)
27039 {
27040 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
27041 enum machine_mode n = GET_MODE_WIDER_MODE (o);
27042 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
27043 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
27044 return n;
27045 }
27046
27047 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27048 with all elements equal to VAR. Return true if successful. */
27049
27050 static bool
27051 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
27052 rtx target, rtx val)
27053 {
27054 bool ok;
27055
27056 switch (mode)
27057 {
27058 case V2SImode:
27059 case V2SFmode:
27060 if (!mmx_ok)
27061 return false;
27062 /* FALLTHRU */
27063
27064 case V4DFmode:
27065 case V4DImode:
27066 case V8SFmode:
27067 case V8SImode:
27068 case V2DFmode:
27069 case V2DImode:
27070 case V4SFmode:
27071 case V4SImode:
27072 {
27073 rtx insn, dup;
27074
27075 /* First attempt to recognize VAL as-is. */
27076 dup = gen_rtx_VEC_DUPLICATE (mode, val);
27077 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
27078 if (recog_memoized (insn) < 0)
27079 {
27080 rtx seq;
27081 /* If that fails, force VAL into a register. */
27082
27083 start_sequence ();
27084 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
27085 seq = get_insns ();
27086 end_sequence ();
27087 if (seq)
27088 emit_insn_before (seq, insn);
27089
27090 ok = recog_memoized (insn) >= 0;
27091 gcc_assert (ok);
27092 }
27093 }
27094 return true;
27095
27096 case V4HImode:
27097 if (!mmx_ok)
27098 return false;
27099 if (TARGET_SSE || TARGET_3DNOW_A)
27100 {
27101 rtx x;
27102
27103 val = gen_lowpart (SImode, val);
27104 x = gen_rtx_TRUNCATE (HImode, val);
27105 x = gen_rtx_VEC_DUPLICATE (mode, x);
27106 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27107 return true;
27108 }
27109 goto widen;
27110
27111 case V8QImode:
27112 if (!mmx_ok)
27113 return false;
27114 goto widen;
27115
27116 case V8HImode:
27117 if (TARGET_SSE2)
27118 {
27119 struct expand_vec_perm_d dperm;
27120 rtx tmp1, tmp2;
27121
27122 permute:
27123 memset (&dperm, 0, sizeof (dperm));
27124 dperm.target = target;
27125 dperm.vmode = mode;
27126 dperm.nelt = GET_MODE_NUNITS (mode);
27127 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
27128
27129 /* Extend to SImode using a paradoxical SUBREG. */
27130 tmp1 = gen_reg_rtx (SImode);
27131 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27132
27133 /* Insert the SImode value as low element of a V4SImode vector. */
27134 tmp2 = gen_lowpart (V4SImode, dperm.op0);
27135 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
27136
27137 ok = (expand_vec_perm_1 (&dperm)
27138 || expand_vec_perm_broadcast_1 (&dperm));
27139 gcc_assert (ok);
27140 return ok;
27141 }
27142 goto widen;
27143
27144 case V16QImode:
27145 if (TARGET_SSE2)
27146 goto permute;
27147 goto widen;
27148
27149 widen:
27150 /* Replicate the value once into the next wider mode and recurse. */
27151 {
27152 enum machine_mode smode, wsmode, wvmode;
27153 rtx x;
27154
27155 smode = GET_MODE_INNER (mode);
27156 wvmode = get_mode_wider_vector (mode);
27157 wsmode = GET_MODE_INNER (wvmode);
27158
27159 val = convert_modes (wsmode, smode, val, true);
27160 x = expand_simple_binop (wsmode, ASHIFT, val,
27161 GEN_INT (GET_MODE_BITSIZE (smode)),
27162 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27163 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27164
27165 x = gen_lowpart (wvmode, target);
27166 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27167 gcc_assert (ok);
27168 return ok;
27169 }
27170
27171 case V16HImode:
27172 case V32QImode:
27173 {
27174 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27175 rtx x = gen_reg_rtx (hvmode);
27176
27177 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27178 gcc_assert (ok);
27179
27180 x = gen_rtx_VEC_CONCAT (mode, x, x);
27181 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27182 }
27183 return true;
27184
27185 default:
27186 return false;
27187 }
27188 }
27189
27190 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27191 whose ONE_VAR element is VAR, and other elements are zero. Return true
27192 if successful. */
27193
27194 static bool
27195 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27196 rtx target, rtx var, int one_var)
27197 {
27198 enum machine_mode vsimode;
27199 rtx new_target;
27200 rtx x, tmp;
27201 bool use_vector_set = false;
27202
27203 switch (mode)
27204 {
27205 case V2DImode:
27206 /* For SSE4.1, we normally use vector set. But if the second
27207 element is zero and inter-unit moves are OK, we use movq
27208 instead. */
27209 use_vector_set = (TARGET_64BIT
27210 && TARGET_SSE4_1
27211 && !(TARGET_INTER_UNIT_MOVES
27212 && one_var == 0));
27213 break;
27214 case V16QImode:
27215 case V4SImode:
27216 case V4SFmode:
27217 use_vector_set = TARGET_SSE4_1;
27218 break;
27219 case V8HImode:
27220 use_vector_set = TARGET_SSE2;
27221 break;
27222 case V4HImode:
27223 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27224 break;
27225 case V32QImode:
27226 case V16HImode:
27227 case V8SImode:
27228 case V8SFmode:
27229 case V4DFmode:
27230 use_vector_set = TARGET_AVX;
27231 break;
27232 case V4DImode:
27233 /* Use ix86_expand_vector_set in 64bit mode only. */
27234 use_vector_set = TARGET_AVX && TARGET_64BIT;
27235 break;
27236 default:
27237 break;
27238 }
27239
27240 if (use_vector_set)
27241 {
27242 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27243 var = force_reg (GET_MODE_INNER (mode), var);
27244 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27245 return true;
27246 }
27247
27248 switch (mode)
27249 {
27250 case V2SFmode:
27251 case V2SImode:
27252 if (!mmx_ok)
27253 return false;
27254 /* FALLTHRU */
27255
27256 case V2DFmode:
27257 case V2DImode:
27258 if (one_var != 0)
27259 return false;
27260 var = force_reg (GET_MODE_INNER (mode), var);
27261 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27262 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27263 return true;
27264
27265 case V4SFmode:
27266 case V4SImode:
27267 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27268 new_target = gen_reg_rtx (mode);
27269 else
27270 new_target = target;
27271 var = force_reg (GET_MODE_INNER (mode), var);
27272 x = gen_rtx_VEC_DUPLICATE (mode, var);
27273 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27274 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27275 if (one_var != 0)
27276 {
27277 /* We need to shuffle the value to the correct position, so
27278 create a new pseudo to store the intermediate result. */
27279
27280 /* With SSE2, we can use the integer shuffle insns. */
27281 if (mode != V4SFmode && TARGET_SSE2)
27282 {
27283 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27284 const1_rtx,
27285 GEN_INT (one_var == 1 ? 0 : 1),
27286 GEN_INT (one_var == 2 ? 0 : 1),
27287 GEN_INT (one_var == 3 ? 0 : 1)));
27288 if (target != new_target)
27289 emit_move_insn (target, new_target);
27290 return true;
27291 }
27292
27293 /* Otherwise convert the intermediate result to V4SFmode and
27294 use the SSE1 shuffle instructions. */
27295 if (mode != V4SFmode)
27296 {
27297 tmp = gen_reg_rtx (V4SFmode);
27298 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27299 }
27300 else
27301 tmp = new_target;
27302
27303 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27304 const1_rtx,
27305 GEN_INT (one_var == 1 ? 0 : 1),
27306 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27307 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27308
27309 if (mode != V4SFmode)
27310 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27311 else if (tmp != target)
27312 emit_move_insn (target, tmp);
27313 }
27314 else if (target != new_target)
27315 emit_move_insn (target, new_target);
27316 return true;
27317
27318 case V8HImode:
27319 case V16QImode:
27320 vsimode = V4SImode;
27321 goto widen;
27322 case V4HImode:
27323 case V8QImode:
27324 if (!mmx_ok)
27325 return false;
27326 vsimode = V2SImode;
27327 goto widen;
27328 widen:
27329 if (one_var != 0)
27330 return false;
27331
27332 /* Zero extend the variable element to SImode and recurse. */
27333 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27334
27335 x = gen_reg_rtx (vsimode);
27336 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27337 var, one_var))
27338 gcc_unreachable ();
27339
27340 emit_move_insn (target, gen_lowpart (mode, x));
27341 return true;
27342
27343 default:
27344 return false;
27345 }
27346 }
27347
27348 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27349 consisting of the values in VALS. It is known that all elements
27350 except ONE_VAR are constants. Return true if successful. */
27351
27352 static bool
27353 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27354 rtx target, rtx vals, int one_var)
27355 {
27356 rtx var = XVECEXP (vals, 0, one_var);
27357 enum machine_mode wmode;
27358 rtx const_vec, x;
27359
27360 const_vec = copy_rtx (vals);
27361 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27362 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27363
27364 switch (mode)
27365 {
27366 case V2DFmode:
27367 case V2DImode:
27368 case V2SFmode:
27369 case V2SImode:
27370 /* For the two element vectors, it's just as easy to use
27371 the general case. */
27372 return false;
27373
27374 case V4DImode:
27375 /* Use ix86_expand_vector_set in 64bit mode only. */
27376 if (!TARGET_64BIT)
27377 return false;
27378 case V4DFmode:
27379 case V8SFmode:
27380 case V8SImode:
27381 case V16HImode:
27382 case V32QImode:
27383 case V4SFmode:
27384 case V4SImode:
27385 case V8HImode:
27386 case V4HImode:
27387 break;
27388
27389 case V16QImode:
27390 if (TARGET_SSE4_1)
27391 break;
27392 wmode = V8HImode;
27393 goto widen;
27394 case V8QImode:
27395 wmode = V4HImode;
27396 goto widen;
27397 widen:
27398 /* There's no way to set one QImode entry easily. Combine
27399 the variable value with its adjacent constant value, and
27400 promote to an HImode set. */
27401 x = XVECEXP (vals, 0, one_var ^ 1);
27402 if (one_var & 1)
27403 {
27404 var = convert_modes (HImode, QImode, var, true);
27405 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27406 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27407 x = GEN_INT (INTVAL (x) & 0xff);
27408 }
27409 else
27410 {
27411 var = convert_modes (HImode, QImode, var, true);
27412 x = gen_int_mode (INTVAL (x) << 8, HImode);
27413 }
27414 if (x != const0_rtx)
27415 var = expand_simple_binop (HImode, IOR, var, x, var,
27416 1, OPTAB_LIB_WIDEN);
27417
27418 x = gen_reg_rtx (wmode);
27419 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27420 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27421
27422 emit_move_insn (target, gen_lowpart (mode, x));
27423 return true;
27424
27425 default:
27426 return false;
27427 }
27428
27429 emit_move_insn (target, const_vec);
27430 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27431 return true;
27432 }
27433
27434 /* A subroutine of ix86_expand_vector_init_general. Use vector
27435 concatenate to handle the most general case: all values variable,
27436 and none identical. */
27437
27438 static void
27439 ix86_expand_vector_init_concat (enum machine_mode mode,
27440 rtx target, rtx *ops, int n)
27441 {
27442 enum machine_mode cmode, hmode = VOIDmode;
27443 rtx first[8], second[4];
27444 rtvec v;
27445 int i, j;
27446
27447 switch (n)
27448 {
27449 case 2:
27450 switch (mode)
27451 {
27452 case V8SImode:
27453 cmode = V4SImode;
27454 break;
27455 case V8SFmode:
27456 cmode = V4SFmode;
27457 break;
27458 case V4DImode:
27459 cmode = V2DImode;
27460 break;
27461 case V4DFmode:
27462 cmode = V2DFmode;
27463 break;
27464 case V4SImode:
27465 cmode = V2SImode;
27466 break;
27467 case V4SFmode:
27468 cmode = V2SFmode;
27469 break;
27470 case V2DImode:
27471 cmode = DImode;
27472 break;
27473 case V2SImode:
27474 cmode = SImode;
27475 break;
27476 case V2DFmode:
27477 cmode = DFmode;
27478 break;
27479 case V2SFmode:
27480 cmode = SFmode;
27481 break;
27482 default:
27483 gcc_unreachable ();
27484 }
27485
27486 if (!register_operand (ops[1], cmode))
27487 ops[1] = force_reg (cmode, ops[1]);
27488 if (!register_operand (ops[0], cmode))
27489 ops[0] = force_reg (cmode, ops[0]);
27490 emit_insn (gen_rtx_SET (VOIDmode, target,
27491 gen_rtx_VEC_CONCAT (mode, ops[0],
27492 ops[1])));
27493 break;
27494
27495 case 4:
27496 switch (mode)
27497 {
27498 case V4DImode:
27499 cmode = V2DImode;
27500 break;
27501 case V4DFmode:
27502 cmode = V2DFmode;
27503 break;
27504 case V4SImode:
27505 cmode = V2SImode;
27506 break;
27507 case V4SFmode:
27508 cmode = V2SFmode;
27509 break;
27510 default:
27511 gcc_unreachable ();
27512 }
27513 goto half;
27514
27515 case 8:
27516 switch (mode)
27517 {
27518 case V8SImode:
27519 cmode = V2SImode;
27520 hmode = V4SImode;
27521 break;
27522 case V8SFmode:
27523 cmode = V2SFmode;
27524 hmode = V4SFmode;
27525 break;
27526 default:
27527 gcc_unreachable ();
27528 }
27529 goto half;
27530
27531 half:
27532 /* FIXME: We process inputs backward to help RA. PR 36222. */
27533 i = n - 1;
27534 j = (n >> 1) - 1;
27535 for (; i > 0; i -= 2, j--)
27536 {
27537 first[j] = gen_reg_rtx (cmode);
27538 v = gen_rtvec (2, ops[i - 1], ops[i]);
27539 ix86_expand_vector_init (false, first[j],
27540 gen_rtx_PARALLEL (cmode, v));
27541 }
27542
27543 n >>= 1;
27544 if (n > 2)
27545 {
27546 gcc_assert (hmode != VOIDmode);
27547 for (i = j = 0; i < n; i += 2, j++)
27548 {
27549 second[j] = gen_reg_rtx (hmode);
27550 ix86_expand_vector_init_concat (hmode, second [j],
27551 &first [i], 2);
27552 }
27553 n >>= 1;
27554 ix86_expand_vector_init_concat (mode, target, second, n);
27555 }
27556 else
27557 ix86_expand_vector_init_concat (mode, target, first, n);
27558 break;
27559
27560 default:
27561 gcc_unreachable ();
27562 }
27563 }
27564
27565 /* A subroutine of ix86_expand_vector_init_general. Use vector
27566 interleave to handle the most general case: all values variable,
27567 and none identical. */
27568
27569 static void
27570 ix86_expand_vector_init_interleave (enum machine_mode mode,
27571 rtx target, rtx *ops, int n)
27572 {
27573 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27574 int i, j;
27575 rtx op0, op1;
27576 rtx (*gen_load_even) (rtx, rtx, rtx);
27577 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27578 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27579
27580 switch (mode)
27581 {
27582 case V8HImode:
27583 gen_load_even = gen_vec_setv8hi;
27584 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27585 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27586 inner_mode = HImode;
27587 first_imode = V4SImode;
27588 second_imode = V2DImode;
27589 third_imode = VOIDmode;
27590 break;
27591 case V16QImode:
27592 gen_load_even = gen_vec_setv16qi;
27593 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27594 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27595 inner_mode = QImode;
27596 first_imode = V8HImode;
27597 second_imode = V4SImode;
27598 third_imode = V2DImode;
27599 break;
27600 default:
27601 gcc_unreachable ();
27602 }
27603
27604 for (i = 0; i < n; i++)
27605 {
27606 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27607 op0 = gen_reg_rtx (SImode);
27608 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27609
27610 /* Insert the SImode value as low element of V4SImode vector. */
27611 op1 = gen_reg_rtx (V4SImode);
27612 op0 = gen_rtx_VEC_MERGE (V4SImode,
27613 gen_rtx_VEC_DUPLICATE (V4SImode,
27614 op0),
27615 CONST0_RTX (V4SImode),
27616 const1_rtx);
27617 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27618
27619 /* Cast the V4SImode vector back to a vector in orignal mode. */
27620 op0 = gen_reg_rtx (mode);
27621 emit_move_insn (op0, gen_lowpart (mode, op1));
27622
27623 /* Load even elements into the second positon. */
27624 emit_insn ((*gen_load_even) (op0,
27625 force_reg (inner_mode,
27626 ops [i + i + 1]),
27627 const1_rtx));
27628
27629 /* Cast vector to FIRST_IMODE vector. */
27630 ops[i] = gen_reg_rtx (first_imode);
27631 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27632 }
27633
27634 /* Interleave low FIRST_IMODE vectors. */
27635 for (i = j = 0; i < n; i += 2, j++)
27636 {
27637 op0 = gen_reg_rtx (first_imode);
27638 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27639
27640 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27641 ops[j] = gen_reg_rtx (second_imode);
27642 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27643 }
27644
27645 /* Interleave low SECOND_IMODE vectors. */
27646 switch (second_imode)
27647 {
27648 case V4SImode:
27649 for (i = j = 0; i < n / 2; i += 2, j++)
27650 {
27651 op0 = gen_reg_rtx (second_imode);
27652 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27653 ops[i + 1]));
27654
27655 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27656 vector. */
27657 ops[j] = gen_reg_rtx (third_imode);
27658 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27659 }
27660 second_imode = V2DImode;
27661 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27662 /* FALLTHRU */
27663
27664 case V2DImode:
27665 op0 = gen_reg_rtx (second_imode);
27666 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27667 ops[1]));
27668
27669 /* Cast the SECOND_IMODE vector back to a vector on original
27670 mode. */
27671 emit_insn (gen_rtx_SET (VOIDmode, target,
27672 gen_lowpart (mode, op0)));
27673 break;
27674
27675 default:
27676 gcc_unreachable ();
27677 }
27678 }
27679
27680 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27681 all values variable, and none identical. */
27682
27683 static void
27684 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27685 rtx target, rtx vals)
27686 {
27687 rtx ops[32], op0, op1;
27688 enum machine_mode half_mode = VOIDmode;
27689 int n, i;
27690
27691 switch (mode)
27692 {
27693 case V2SFmode:
27694 case V2SImode:
27695 if (!mmx_ok && !TARGET_SSE)
27696 break;
27697 /* FALLTHRU */
27698
27699 case V8SFmode:
27700 case V8SImode:
27701 case V4DFmode:
27702 case V4DImode:
27703 case V4SFmode:
27704 case V4SImode:
27705 case V2DFmode:
27706 case V2DImode:
27707 n = GET_MODE_NUNITS (mode);
27708 for (i = 0; i < n; i++)
27709 ops[i] = XVECEXP (vals, 0, i);
27710 ix86_expand_vector_init_concat (mode, target, ops, n);
27711 return;
27712
27713 case V32QImode:
27714 half_mode = V16QImode;
27715 goto half;
27716
27717 case V16HImode:
27718 half_mode = V8HImode;
27719 goto half;
27720
27721 half:
27722 n = GET_MODE_NUNITS (mode);
27723 for (i = 0; i < n; i++)
27724 ops[i] = XVECEXP (vals, 0, i);
27725 op0 = gen_reg_rtx (half_mode);
27726 op1 = gen_reg_rtx (half_mode);
27727 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27728 n >> 2);
27729 ix86_expand_vector_init_interleave (half_mode, op1,
27730 &ops [n >> 1], n >> 2);
27731 emit_insn (gen_rtx_SET (VOIDmode, target,
27732 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27733 return;
27734
27735 case V16QImode:
27736 if (!TARGET_SSE4_1)
27737 break;
27738 /* FALLTHRU */
27739
27740 case V8HImode:
27741 if (!TARGET_SSE2)
27742 break;
27743
27744 /* Don't use ix86_expand_vector_init_interleave if we can't
27745 move from GPR to SSE register directly. */
27746 if (!TARGET_INTER_UNIT_MOVES)
27747 break;
27748
27749 n = GET_MODE_NUNITS (mode);
27750 for (i = 0; i < n; i++)
27751 ops[i] = XVECEXP (vals, 0, i);
27752 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27753 return;
27754
27755 case V4HImode:
27756 case V8QImode:
27757 break;
27758
27759 default:
27760 gcc_unreachable ();
27761 }
27762
27763 {
27764 int i, j, n_elts, n_words, n_elt_per_word;
27765 enum machine_mode inner_mode;
27766 rtx words[4], shift;
27767
27768 inner_mode = GET_MODE_INNER (mode);
27769 n_elts = GET_MODE_NUNITS (mode);
27770 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27771 n_elt_per_word = n_elts / n_words;
27772 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27773
27774 for (i = 0; i < n_words; ++i)
27775 {
27776 rtx word = NULL_RTX;
27777
27778 for (j = 0; j < n_elt_per_word; ++j)
27779 {
27780 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27781 elt = convert_modes (word_mode, inner_mode, elt, true);
27782
27783 if (j == 0)
27784 word = elt;
27785 else
27786 {
27787 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27788 word, 1, OPTAB_LIB_WIDEN);
27789 word = expand_simple_binop (word_mode, IOR, word, elt,
27790 word, 1, OPTAB_LIB_WIDEN);
27791 }
27792 }
27793
27794 words[i] = word;
27795 }
27796
27797 if (n_words == 1)
27798 emit_move_insn (target, gen_lowpart (mode, words[0]));
27799 else if (n_words == 2)
27800 {
27801 rtx tmp = gen_reg_rtx (mode);
27802 emit_clobber (tmp);
27803 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27804 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27805 emit_move_insn (target, tmp);
27806 }
27807 else if (n_words == 4)
27808 {
27809 rtx tmp = gen_reg_rtx (V4SImode);
27810 gcc_assert (word_mode == SImode);
27811 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27812 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27813 emit_move_insn (target, gen_lowpart (mode, tmp));
27814 }
27815 else
27816 gcc_unreachable ();
27817 }
27818 }
27819
27820 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27821 instructions unless MMX_OK is true. */
27822
27823 void
27824 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27825 {
27826 enum machine_mode mode = GET_MODE (target);
27827 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27828 int n_elts = GET_MODE_NUNITS (mode);
27829 int n_var = 0, one_var = -1;
27830 bool all_same = true, all_const_zero = true;
27831 int i;
27832 rtx x;
27833
27834 for (i = 0; i < n_elts; ++i)
27835 {
27836 x = XVECEXP (vals, 0, i);
27837 if (!(CONST_INT_P (x)
27838 || GET_CODE (x) == CONST_DOUBLE
27839 || GET_CODE (x) == CONST_FIXED))
27840 n_var++, one_var = i;
27841 else if (x != CONST0_RTX (inner_mode))
27842 all_const_zero = false;
27843 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27844 all_same = false;
27845 }
27846
27847 /* Constants are best loaded from the constant pool. */
27848 if (n_var == 0)
27849 {
27850 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27851 return;
27852 }
27853
27854 /* If all values are identical, broadcast the value. */
27855 if (all_same
27856 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27857 XVECEXP (vals, 0, 0)))
27858 return;
27859
27860 /* Values where only one field is non-constant are best loaded from
27861 the pool and overwritten via move later. */
27862 if (n_var == 1)
27863 {
27864 if (all_const_zero
27865 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27866 XVECEXP (vals, 0, one_var),
27867 one_var))
27868 return;
27869
27870 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27871 return;
27872 }
27873
27874 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27875 }
27876
27877 void
27878 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27879 {
27880 enum machine_mode mode = GET_MODE (target);
27881 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27882 enum machine_mode half_mode;
27883 bool use_vec_merge = false;
27884 rtx tmp;
27885 static rtx (*gen_extract[6][2]) (rtx, rtx)
27886 = {
27887 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27888 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27889 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27890 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27891 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27892 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27893 };
27894 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27895 = {
27896 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27897 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27898 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27899 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27900 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27901 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27902 };
27903 int i, j, n;
27904
27905 switch (mode)
27906 {
27907 case V2SFmode:
27908 case V2SImode:
27909 if (mmx_ok)
27910 {
27911 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27912 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27913 if (elt == 0)
27914 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27915 else
27916 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27917 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27918 return;
27919 }
27920 break;
27921
27922 case V2DImode:
27923 use_vec_merge = TARGET_SSE4_1;
27924 if (use_vec_merge)
27925 break;
27926
27927 case V2DFmode:
27928 {
27929 rtx op0, op1;
27930
27931 /* For the two element vectors, we implement a VEC_CONCAT with
27932 the extraction of the other element. */
27933
27934 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27935 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27936
27937 if (elt == 0)
27938 op0 = val, op1 = tmp;
27939 else
27940 op0 = tmp, op1 = val;
27941
27942 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27943 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27944 }
27945 return;
27946
27947 case V4SFmode:
27948 use_vec_merge = TARGET_SSE4_1;
27949 if (use_vec_merge)
27950 break;
27951
27952 switch (elt)
27953 {
27954 case 0:
27955 use_vec_merge = true;
27956 break;
27957
27958 case 1:
27959 /* tmp = target = A B C D */
27960 tmp = copy_to_reg (target);
27961 /* target = A A B B */
27962 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27963 /* target = X A B B */
27964 ix86_expand_vector_set (false, target, val, 0);
27965 /* target = A X C D */
27966 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27967 const1_rtx, const0_rtx,
27968 GEN_INT (2+4), GEN_INT (3+4)));
27969 return;
27970
27971 case 2:
27972 /* tmp = target = A B C D */
27973 tmp = copy_to_reg (target);
27974 /* tmp = X B C D */
27975 ix86_expand_vector_set (false, tmp, val, 0);
27976 /* target = A B X D */
27977 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27978 const0_rtx, const1_rtx,
27979 GEN_INT (0+4), GEN_INT (3+4)));
27980 return;
27981
27982 case 3:
27983 /* tmp = target = A B C D */
27984 tmp = copy_to_reg (target);
27985 /* tmp = X B C D */
27986 ix86_expand_vector_set (false, tmp, val, 0);
27987 /* target = A B X D */
27988 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27989 const0_rtx, const1_rtx,
27990 GEN_INT (2+4), GEN_INT (0+4)));
27991 return;
27992
27993 default:
27994 gcc_unreachable ();
27995 }
27996 break;
27997
27998 case V4SImode:
27999 use_vec_merge = TARGET_SSE4_1;
28000 if (use_vec_merge)
28001 break;
28002
28003 /* Element 0 handled by vec_merge below. */
28004 if (elt == 0)
28005 {
28006 use_vec_merge = true;
28007 break;
28008 }
28009
28010 if (TARGET_SSE2)
28011 {
28012 /* With SSE2, use integer shuffles to swap element 0 and ELT,
28013 store into element 0, then shuffle them back. */
28014
28015 rtx order[4];
28016
28017 order[0] = GEN_INT (elt);
28018 order[1] = const1_rtx;
28019 order[2] = const2_rtx;
28020 order[3] = GEN_INT (3);
28021 order[elt] = const0_rtx;
28022
28023 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28024 order[1], order[2], order[3]));
28025
28026 ix86_expand_vector_set (false, target, val, 0);
28027
28028 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28029 order[1], order[2], order[3]));
28030 }
28031 else
28032 {
28033 /* For SSE1, we have to reuse the V4SF code. */
28034 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
28035 gen_lowpart (SFmode, val), elt);
28036 }
28037 return;
28038
28039 case V8HImode:
28040 use_vec_merge = TARGET_SSE2;
28041 break;
28042 case V4HImode:
28043 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28044 break;
28045
28046 case V16QImode:
28047 use_vec_merge = TARGET_SSE4_1;
28048 break;
28049
28050 case V8QImode:
28051 break;
28052
28053 case V32QImode:
28054 half_mode = V16QImode;
28055 j = 0;
28056 n = 16;
28057 goto half;
28058
28059 case V16HImode:
28060 half_mode = V8HImode;
28061 j = 1;
28062 n = 8;
28063 goto half;
28064
28065 case V8SImode:
28066 half_mode = V4SImode;
28067 j = 2;
28068 n = 4;
28069 goto half;
28070
28071 case V4DImode:
28072 half_mode = V2DImode;
28073 j = 3;
28074 n = 2;
28075 goto half;
28076
28077 case V8SFmode:
28078 half_mode = V4SFmode;
28079 j = 4;
28080 n = 4;
28081 goto half;
28082
28083 case V4DFmode:
28084 half_mode = V2DFmode;
28085 j = 5;
28086 n = 2;
28087 goto half;
28088
28089 half:
28090 /* Compute offset. */
28091 i = elt / n;
28092 elt %= n;
28093
28094 gcc_assert (i <= 1);
28095
28096 /* Extract the half. */
28097 tmp = gen_reg_rtx (half_mode);
28098 emit_insn ((*gen_extract[j][i]) (tmp, target));
28099
28100 /* Put val in tmp at elt. */
28101 ix86_expand_vector_set (false, tmp, val, elt);
28102
28103 /* Put it back. */
28104 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
28105 return;
28106
28107 default:
28108 break;
28109 }
28110
28111 if (use_vec_merge)
28112 {
28113 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
28114 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
28115 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28116 }
28117 else
28118 {
28119 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28120
28121 emit_move_insn (mem, target);
28122
28123 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28124 emit_move_insn (tmp, val);
28125
28126 emit_move_insn (target, mem);
28127 }
28128 }
28129
28130 void
28131 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
28132 {
28133 enum machine_mode mode = GET_MODE (vec);
28134 enum machine_mode inner_mode = GET_MODE_INNER (mode);
28135 bool use_vec_extr = false;
28136 rtx tmp;
28137
28138 switch (mode)
28139 {
28140 case V2SImode:
28141 case V2SFmode:
28142 if (!mmx_ok)
28143 break;
28144 /* FALLTHRU */
28145
28146 case V2DFmode:
28147 case V2DImode:
28148 use_vec_extr = true;
28149 break;
28150
28151 case V4SFmode:
28152 use_vec_extr = TARGET_SSE4_1;
28153 if (use_vec_extr)
28154 break;
28155
28156 switch (elt)
28157 {
28158 case 0:
28159 tmp = vec;
28160 break;
28161
28162 case 1:
28163 case 3:
28164 tmp = gen_reg_rtx (mode);
28165 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28166 GEN_INT (elt), GEN_INT (elt),
28167 GEN_INT (elt+4), GEN_INT (elt+4)));
28168 break;
28169
28170 case 2:
28171 tmp = gen_reg_rtx (mode);
28172 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28173 break;
28174
28175 default:
28176 gcc_unreachable ();
28177 }
28178 vec = tmp;
28179 use_vec_extr = true;
28180 elt = 0;
28181 break;
28182
28183 case V4SImode:
28184 use_vec_extr = TARGET_SSE4_1;
28185 if (use_vec_extr)
28186 break;
28187
28188 if (TARGET_SSE2)
28189 {
28190 switch (elt)
28191 {
28192 case 0:
28193 tmp = vec;
28194 break;
28195
28196 case 1:
28197 case 3:
28198 tmp = gen_reg_rtx (mode);
28199 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28200 GEN_INT (elt), GEN_INT (elt),
28201 GEN_INT (elt), GEN_INT (elt)));
28202 break;
28203
28204 case 2:
28205 tmp = gen_reg_rtx (mode);
28206 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28207 break;
28208
28209 default:
28210 gcc_unreachable ();
28211 }
28212 vec = tmp;
28213 use_vec_extr = true;
28214 elt = 0;
28215 }
28216 else
28217 {
28218 /* For SSE1, we have to reuse the V4SF code. */
28219 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28220 gen_lowpart (V4SFmode, vec), elt);
28221 return;
28222 }
28223 break;
28224
28225 case V8HImode:
28226 use_vec_extr = TARGET_SSE2;
28227 break;
28228 case V4HImode:
28229 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28230 break;
28231
28232 case V16QImode:
28233 use_vec_extr = TARGET_SSE4_1;
28234 break;
28235
28236 case V8QImode:
28237 /* ??? Could extract the appropriate HImode element and shift. */
28238 default:
28239 break;
28240 }
28241
28242 if (use_vec_extr)
28243 {
28244 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28245 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28246
28247 /* Let the rtl optimizers know about the zero extension performed. */
28248 if (inner_mode == QImode || inner_mode == HImode)
28249 {
28250 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28251 target = gen_lowpart (SImode, target);
28252 }
28253
28254 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28255 }
28256 else
28257 {
28258 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28259
28260 emit_move_insn (mem, vec);
28261
28262 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28263 emit_move_insn (target, tmp);
28264 }
28265 }
28266
28267 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28268 pattern to reduce; DEST is the destination; IN is the input vector. */
28269
28270 void
28271 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28272 {
28273 rtx tmp1, tmp2, tmp3;
28274
28275 tmp1 = gen_reg_rtx (V4SFmode);
28276 tmp2 = gen_reg_rtx (V4SFmode);
28277 tmp3 = gen_reg_rtx (V4SFmode);
28278
28279 emit_insn (gen_sse_movhlps (tmp1, in, in));
28280 emit_insn (fn (tmp2, tmp1, in));
28281
28282 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28283 const1_rtx, const1_rtx,
28284 GEN_INT (1+4), GEN_INT (1+4)));
28285 emit_insn (fn (dest, tmp2, tmp3));
28286 }
28287 \f
28288 /* Target hook for scalar_mode_supported_p. */
28289 static bool
28290 ix86_scalar_mode_supported_p (enum machine_mode mode)
28291 {
28292 if (DECIMAL_FLOAT_MODE_P (mode))
28293 return default_decimal_float_supported_p ();
28294 else if (mode == TFmode)
28295 return true;
28296 else
28297 return default_scalar_mode_supported_p (mode);
28298 }
28299
28300 /* Implements target hook vector_mode_supported_p. */
28301 static bool
28302 ix86_vector_mode_supported_p (enum machine_mode mode)
28303 {
28304 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28305 return true;
28306 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28307 return true;
28308 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28309 return true;
28310 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28311 return true;
28312 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28313 return true;
28314 return false;
28315 }
28316
28317 /* Target hook for c_mode_for_suffix. */
28318 static enum machine_mode
28319 ix86_c_mode_for_suffix (char suffix)
28320 {
28321 if (suffix == 'q')
28322 return TFmode;
28323 if (suffix == 'w')
28324 return XFmode;
28325
28326 return VOIDmode;
28327 }
28328
28329 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28330
28331 We do this in the new i386 backend to maintain source compatibility
28332 with the old cc0-based compiler. */
28333
28334 static tree
28335 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28336 tree inputs ATTRIBUTE_UNUSED,
28337 tree clobbers)
28338 {
28339 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28340 clobbers);
28341 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28342 clobbers);
28343 return clobbers;
28344 }
28345
28346 /* Implements target vector targetm.asm.encode_section_info. This
28347 is not used by netware. */
28348
28349 static void ATTRIBUTE_UNUSED
28350 ix86_encode_section_info (tree decl, rtx rtl, int first)
28351 {
28352 default_encode_section_info (decl, rtl, first);
28353
28354 if (TREE_CODE (decl) == VAR_DECL
28355 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28356 && ix86_in_large_data_p (decl))
28357 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28358 }
28359
28360 /* Worker function for REVERSE_CONDITION. */
28361
28362 enum rtx_code
28363 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28364 {
28365 return (mode != CCFPmode && mode != CCFPUmode
28366 ? reverse_condition (code)
28367 : reverse_condition_maybe_unordered (code));
28368 }
28369
28370 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28371 to OPERANDS[0]. */
28372
28373 const char *
28374 output_387_reg_move (rtx insn, rtx *operands)
28375 {
28376 if (REG_P (operands[0]))
28377 {
28378 if (REG_P (operands[1])
28379 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28380 {
28381 if (REGNO (operands[0]) == FIRST_STACK_REG)
28382 return output_387_ffreep (operands, 0);
28383 return "fstp\t%y0";
28384 }
28385 if (STACK_TOP_P (operands[0]))
28386 return "fld%Z1\t%y1";
28387 return "fst\t%y0";
28388 }
28389 else if (MEM_P (operands[0]))
28390 {
28391 gcc_assert (REG_P (operands[1]));
28392 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28393 return "fstp%Z0\t%y0";
28394 else
28395 {
28396 /* There is no non-popping store to memory for XFmode.
28397 So if we need one, follow the store with a load. */
28398 if (GET_MODE (operands[0]) == XFmode)
28399 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28400 else
28401 return "fst%Z0\t%y0";
28402 }
28403 }
28404 else
28405 gcc_unreachable();
28406 }
28407
28408 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28409 FP status register is set. */
28410
28411 void
28412 ix86_emit_fp_unordered_jump (rtx label)
28413 {
28414 rtx reg = gen_reg_rtx (HImode);
28415 rtx temp;
28416
28417 emit_insn (gen_x86_fnstsw_1 (reg));
28418
28419 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28420 {
28421 emit_insn (gen_x86_sahf_1 (reg));
28422
28423 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28424 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28425 }
28426 else
28427 {
28428 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28429
28430 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28431 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28432 }
28433
28434 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28435 gen_rtx_LABEL_REF (VOIDmode, label),
28436 pc_rtx);
28437 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28438
28439 emit_jump_insn (temp);
28440 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28441 }
28442
28443 /* Output code to perform a log1p XFmode calculation. */
28444
28445 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28446 {
28447 rtx label1 = gen_label_rtx ();
28448 rtx label2 = gen_label_rtx ();
28449
28450 rtx tmp = gen_reg_rtx (XFmode);
28451 rtx tmp2 = gen_reg_rtx (XFmode);
28452 rtx test;
28453
28454 emit_insn (gen_absxf2 (tmp, op1));
28455 test = gen_rtx_GE (VOIDmode, tmp,
28456 CONST_DOUBLE_FROM_REAL_VALUE (
28457 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28458 XFmode));
28459 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28460
28461 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28462 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28463 emit_jump (label2);
28464
28465 emit_label (label1);
28466 emit_move_insn (tmp, CONST1_RTX (XFmode));
28467 emit_insn (gen_addxf3 (tmp, op1, tmp));
28468 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28469 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28470
28471 emit_label (label2);
28472 }
28473
28474 /* Output code to perform a Newton-Rhapson approximation of a single precision
28475 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28476
28477 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28478 {
28479 rtx x0, x1, e0, e1, two;
28480
28481 x0 = gen_reg_rtx (mode);
28482 e0 = gen_reg_rtx (mode);
28483 e1 = gen_reg_rtx (mode);
28484 x1 = gen_reg_rtx (mode);
28485
28486 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28487
28488 if (VECTOR_MODE_P (mode))
28489 two = ix86_build_const_vector (SFmode, true, two);
28490
28491 two = force_reg (mode, two);
28492
28493 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28494
28495 /* x0 = rcp(b) estimate */
28496 emit_insn (gen_rtx_SET (VOIDmode, x0,
28497 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28498 UNSPEC_RCP)));
28499 /* e0 = x0 * a */
28500 emit_insn (gen_rtx_SET (VOIDmode, e0,
28501 gen_rtx_MULT (mode, x0, a)));
28502 /* e1 = x0 * b */
28503 emit_insn (gen_rtx_SET (VOIDmode, e1,
28504 gen_rtx_MULT (mode, x0, b)));
28505 /* x1 = 2. - e1 */
28506 emit_insn (gen_rtx_SET (VOIDmode, x1,
28507 gen_rtx_MINUS (mode, two, e1)));
28508 /* res = e0 * x1 */
28509 emit_insn (gen_rtx_SET (VOIDmode, res,
28510 gen_rtx_MULT (mode, e0, x1)));
28511 }
28512
28513 /* Output code to perform a Newton-Rhapson approximation of a
28514 single precision floating point [reciprocal] square root. */
28515
28516 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28517 bool recip)
28518 {
28519 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28520 REAL_VALUE_TYPE r;
28521
28522 x0 = gen_reg_rtx (mode);
28523 e0 = gen_reg_rtx (mode);
28524 e1 = gen_reg_rtx (mode);
28525 e2 = gen_reg_rtx (mode);
28526 e3 = gen_reg_rtx (mode);
28527
28528 real_from_integer (&r, VOIDmode, -3, -1, 0);
28529 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28530
28531 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28532 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28533
28534 if (VECTOR_MODE_P (mode))
28535 {
28536 mthree = ix86_build_const_vector (SFmode, true, mthree);
28537 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28538 }
28539
28540 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28541 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28542
28543 /* x0 = rsqrt(a) estimate */
28544 emit_insn (gen_rtx_SET (VOIDmode, x0,
28545 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28546 UNSPEC_RSQRT)));
28547
28548 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28549 if (!recip)
28550 {
28551 rtx zero, mask;
28552
28553 zero = gen_reg_rtx (mode);
28554 mask = gen_reg_rtx (mode);
28555
28556 zero = force_reg (mode, CONST0_RTX(mode));
28557 emit_insn (gen_rtx_SET (VOIDmode, mask,
28558 gen_rtx_NE (mode, zero, a)));
28559
28560 emit_insn (gen_rtx_SET (VOIDmode, x0,
28561 gen_rtx_AND (mode, x0, mask)));
28562 }
28563
28564 /* e0 = x0 * a */
28565 emit_insn (gen_rtx_SET (VOIDmode, e0,
28566 gen_rtx_MULT (mode, x0, a)));
28567 /* e1 = e0 * x0 */
28568 emit_insn (gen_rtx_SET (VOIDmode, e1,
28569 gen_rtx_MULT (mode, e0, x0)));
28570
28571 /* e2 = e1 - 3. */
28572 mthree = force_reg (mode, mthree);
28573 emit_insn (gen_rtx_SET (VOIDmode, e2,
28574 gen_rtx_PLUS (mode, e1, mthree)));
28575
28576 mhalf = force_reg (mode, mhalf);
28577 if (recip)
28578 /* e3 = -.5 * x0 */
28579 emit_insn (gen_rtx_SET (VOIDmode, e3,
28580 gen_rtx_MULT (mode, x0, mhalf)));
28581 else
28582 /* e3 = -.5 * e0 */
28583 emit_insn (gen_rtx_SET (VOIDmode, e3,
28584 gen_rtx_MULT (mode, e0, mhalf)));
28585 /* ret = e2 * e3 */
28586 emit_insn (gen_rtx_SET (VOIDmode, res,
28587 gen_rtx_MULT (mode, e2, e3)));
28588 }
28589
28590 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28591
28592 static void ATTRIBUTE_UNUSED
28593 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28594 tree decl)
28595 {
28596 /* With Binutils 2.15, the "@unwind" marker must be specified on
28597 every occurrence of the ".eh_frame" section, not just the first
28598 one. */
28599 if (TARGET_64BIT
28600 && strcmp (name, ".eh_frame") == 0)
28601 {
28602 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28603 flags & SECTION_WRITE ? "aw" : "a");
28604 return;
28605 }
28606 default_elf_asm_named_section (name, flags, decl);
28607 }
28608
28609 /* Return the mangling of TYPE if it is an extended fundamental type. */
28610
28611 static const char *
28612 ix86_mangle_type (const_tree type)
28613 {
28614 type = TYPE_MAIN_VARIANT (type);
28615
28616 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28617 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28618 return NULL;
28619
28620 switch (TYPE_MODE (type))
28621 {
28622 case TFmode:
28623 /* __float128 is "g". */
28624 return "g";
28625 case XFmode:
28626 /* "long double" or __float80 is "e". */
28627 return "e";
28628 default:
28629 return NULL;
28630 }
28631 }
28632
28633 /* For 32-bit code we can save PIC register setup by using
28634 __stack_chk_fail_local hidden function instead of calling
28635 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28636 register, so it is better to call __stack_chk_fail directly. */
28637
28638 static tree
28639 ix86_stack_protect_fail (void)
28640 {
28641 return TARGET_64BIT
28642 ? default_external_stack_protect_fail ()
28643 : default_hidden_stack_protect_fail ();
28644 }
28645
28646 /* Select a format to encode pointers in exception handling data. CODE
28647 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28648 true if the symbol may be affected by dynamic relocations.
28649
28650 ??? All x86 object file formats are capable of representing this.
28651 After all, the relocation needed is the same as for the call insn.
28652 Whether or not a particular assembler allows us to enter such, I
28653 guess we'll have to see. */
28654 int
28655 asm_preferred_eh_data_format (int code, int global)
28656 {
28657 if (flag_pic)
28658 {
28659 int type = DW_EH_PE_sdata8;
28660 if (!TARGET_64BIT
28661 || ix86_cmodel == CM_SMALL_PIC
28662 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28663 type = DW_EH_PE_sdata4;
28664 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28665 }
28666 if (ix86_cmodel == CM_SMALL
28667 || (ix86_cmodel == CM_MEDIUM && code))
28668 return DW_EH_PE_udata4;
28669 return DW_EH_PE_absptr;
28670 }
28671 \f
28672 /* Expand copysign from SIGN to the positive value ABS_VALUE
28673 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28674 the sign-bit. */
28675 static void
28676 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28677 {
28678 enum machine_mode mode = GET_MODE (sign);
28679 rtx sgn = gen_reg_rtx (mode);
28680 if (mask == NULL_RTX)
28681 {
28682 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28683 if (!VECTOR_MODE_P (mode))
28684 {
28685 /* We need to generate a scalar mode mask in this case. */
28686 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28687 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28688 mask = gen_reg_rtx (mode);
28689 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28690 }
28691 }
28692 else
28693 mask = gen_rtx_NOT (mode, mask);
28694 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28695 gen_rtx_AND (mode, mask, sign)));
28696 emit_insn (gen_rtx_SET (VOIDmode, result,
28697 gen_rtx_IOR (mode, abs_value, sgn)));
28698 }
28699
28700 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28701 mask for masking out the sign-bit is stored in *SMASK, if that is
28702 non-null. */
28703 static rtx
28704 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28705 {
28706 enum machine_mode mode = GET_MODE (op0);
28707 rtx xa, mask;
28708
28709 xa = gen_reg_rtx (mode);
28710 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28711 if (!VECTOR_MODE_P (mode))
28712 {
28713 /* We need to generate a scalar mode mask in this case. */
28714 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28715 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28716 mask = gen_reg_rtx (mode);
28717 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28718 }
28719 emit_insn (gen_rtx_SET (VOIDmode, xa,
28720 gen_rtx_AND (mode, op0, mask)));
28721
28722 if (smask)
28723 *smask = mask;
28724
28725 return xa;
28726 }
28727
28728 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28729 swapping the operands if SWAP_OPERANDS is true. The expanded
28730 code is a forward jump to a newly created label in case the
28731 comparison is true. The generated label rtx is returned. */
28732 static rtx
28733 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28734 bool swap_operands)
28735 {
28736 rtx label, tmp;
28737
28738 if (swap_operands)
28739 {
28740 tmp = op0;
28741 op0 = op1;
28742 op1 = tmp;
28743 }
28744
28745 label = gen_label_rtx ();
28746 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28747 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28748 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28749 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28750 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28751 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28752 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28753 JUMP_LABEL (tmp) = label;
28754
28755 return label;
28756 }
28757
28758 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28759 using comparison code CODE. Operands are swapped for the comparison if
28760 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28761 static rtx
28762 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28763 bool swap_operands)
28764 {
28765 enum machine_mode mode = GET_MODE (op0);
28766 rtx mask = gen_reg_rtx (mode);
28767
28768 if (swap_operands)
28769 {
28770 rtx tmp = op0;
28771 op0 = op1;
28772 op1 = tmp;
28773 }
28774
28775 if (mode == DFmode)
28776 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28777 gen_rtx_fmt_ee (code, mode, op0, op1)));
28778 else
28779 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28780 gen_rtx_fmt_ee (code, mode, op0, op1)));
28781
28782 return mask;
28783 }
28784
28785 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28786 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28787 static rtx
28788 ix86_gen_TWO52 (enum machine_mode mode)
28789 {
28790 REAL_VALUE_TYPE TWO52r;
28791 rtx TWO52;
28792
28793 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28794 TWO52 = const_double_from_real_value (TWO52r, mode);
28795 TWO52 = force_reg (mode, TWO52);
28796
28797 return TWO52;
28798 }
28799
28800 /* Expand SSE sequence for computing lround from OP1 storing
28801 into OP0. */
28802 void
28803 ix86_expand_lround (rtx op0, rtx op1)
28804 {
28805 /* C code for the stuff we're doing below:
28806 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28807 return (long)tmp;
28808 */
28809 enum machine_mode mode = GET_MODE (op1);
28810 const struct real_format *fmt;
28811 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28812 rtx adj;
28813
28814 /* load nextafter (0.5, 0.0) */
28815 fmt = REAL_MODE_FORMAT (mode);
28816 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28817 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28818
28819 /* adj = copysign (0.5, op1) */
28820 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28821 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28822
28823 /* adj = op1 + adj */
28824 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28825
28826 /* op0 = (imode)adj */
28827 expand_fix (op0, adj, 0);
28828 }
28829
28830 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28831 into OPERAND0. */
28832 void
28833 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28834 {
28835 /* C code for the stuff we're doing below (for do_floor):
28836 xi = (long)op1;
28837 xi -= (double)xi > op1 ? 1 : 0;
28838 return xi;
28839 */
28840 enum machine_mode fmode = GET_MODE (op1);
28841 enum machine_mode imode = GET_MODE (op0);
28842 rtx ireg, freg, label, tmp;
28843
28844 /* reg = (long)op1 */
28845 ireg = gen_reg_rtx (imode);
28846 expand_fix (ireg, op1, 0);
28847
28848 /* freg = (double)reg */
28849 freg = gen_reg_rtx (fmode);
28850 expand_float (freg, ireg, 0);
28851
28852 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28853 label = ix86_expand_sse_compare_and_jump (UNLE,
28854 freg, op1, !do_floor);
28855 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28856 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28857 emit_move_insn (ireg, tmp);
28858
28859 emit_label (label);
28860 LABEL_NUSES (label) = 1;
28861
28862 emit_move_insn (op0, ireg);
28863 }
28864
28865 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28866 result in OPERAND0. */
28867 void
28868 ix86_expand_rint (rtx operand0, rtx operand1)
28869 {
28870 /* C code for the stuff we're doing below:
28871 xa = fabs (operand1);
28872 if (!isless (xa, 2**52))
28873 return operand1;
28874 xa = xa + 2**52 - 2**52;
28875 return copysign (xa, operand1);
28876 */
28877 enum machine_mode mode = GET_MODE (operand0);
28878 rtx res, xa, label, TWO52, mask;
28879
28880 res = gen_reg_rtx (mode);
28881 emit_move_insn (res, operand1);
28882
28883 /* xa = abs (operand1) */
28884 xa = ix86_expand_sse_fabs (res, &mask);
28885
28886 /* if (!isless (xa, TWO52)) goto label; */
28887 TWO52 = ix86_gen_TWO52 (mode);
28888 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28889
28890 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28891 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28892
28893 ix86_sse_copysign_to_positive (res, xa, res, mask);
28894
28895 emit_label (label);
28896 LABEL_NUSES (label) = 1;
28897
28898 emit_move_insn (operand0, res);
28899 }
28900
28901 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28902 into OPERAND0. */
28903 void
28904 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28905 {
28906 /* C code for the stuff we expand below.
28907 double xa = fabs (x), x2;
28908 if (!isless (xa, TWO52))
28909 return x;
28910 xa = xa + TWO52 - TWO52;
28911 x2 = copysign (xa, x);
28912 Compensate. Floor:
28913 if (x2 > x)
28914 x2 -= 1;
28915 Compensate. Ceil:
28916 if (x2 < x)
28917 x2 -= -1;
28918 return x2;
28919 */
28920 enum machine_mode mode = GET_MODE (operand0);
28921 rtx xa, TWO52, tmp, label, one, res, mask;
28922
28923 TWO52 = ix86_gen_TWO52 (mode);
28924
28925 /* Temporary for holding the result, initialized to the input
28926 operand to ease control flow. */
28927 res = gen_reg_rtx (mode);
28928 emit_move_insn (res, operand1);
28929
28930 /* xa = abs (operand1) */
28931 xa = ix86_expand_sse_fabs (res, &mask);
28932
28933 /* if (!isless (xa, TWO52)) goto label; */
28934 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28935
28936 /* xa = xa + TWO52 - TWO52; */
28937 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28938 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28939
28940 /* xa = copysign (xa, operand1) */
28941 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28942
28943 /* generate 1.0 or -1.0 */
28944 one = force_reg (mode,
28945 const_double_from_real_value (do_floor
28946 ? dconst1 : dconstm1, mode));
28947
28948 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28949 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28950 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28951 gen_rtx_AND (mode, one, tmp)));
28952 /* We always need to subtract here to preserve signed zero. */
28953 tmp = expand_simple_binop (mode, MINUS,
28954 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28955 emit_move_insn (res, tmp);
28956
28957 emit_label (label);
28958 LABEL_NUSES (label) = 1;
28959
28960 emit_move_insn (operand0, res);
28961 }
28962
28963 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28964 into OPERAND0. */
28965 void
28966 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28967 {
28968 /* C code for the stuff we expand below.
28969 double xa = fabs (x), x2;
28970 if (!isless (xa, TWO52))
28971 return x;
28972 x2 = (double)(long)x;
28973 Compensate. Floor:
28974 if (x2 > x)
28975 x2 -= 1;
28976 Compensate. Ceil:
28977 if (x2 < x)
28978 x2 += 1;
28979 if (HONOR_SIGNED_ZEROS (mode))
28980 return copysign (x2, x);
28981 return x2;
28982 */
28983 enum machine_mode mode = GET_MODE (operand0);
28984 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28985
28986 TWO52 = ix86_gen_TWO52 (mode);
28987
28988 /* Temporary for holding the result, initialized to the input
28989 operand to ease control flow. */
28990 res = gen_reg_rtx (mode);
28991 emit_move_insn (res, operand1);
28992
28993 /* xa = abs (operand1) */
28994 xa = ix86_expand_sse_fabs (res, &mask);
28995
28996 /* if (!isless (xa, TWO52)) goto label; */
28997 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28998
28999 /* xa = (double)(long)x */
29000 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29001 expand_fix (xi, res, 0);
29002 expand_float (xa, xi, 0);
29003
29004 /* generate 1.0 */
29005 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29006
29007 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29008 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29009 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29010 gen_rtx_AND (mode, one, tmp)));
29011 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
29012 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29013 emit_move_insn (res, tmp);
29014
29015 if (HONOR_SIGNED_ZEROS (mode))
29016 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29017
29018 emit_label (label);
29019 LABEL_NUSES (label) = 1;
29020
29021 emit_move_insn (operand0, res);
29022 }
29023
29024 /* Expand SSE sequence for computing round from OPERAND1 storing
29025 into OPERAND0. Sequence that works without relying on DImode truncation
29026 via cvttsd2siq that is only available on 64bit targets. */
29027 void
29028 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
29029 {
29030 /* C code for the stuff we expand below.
29031 double xa = fabs (x), xa2, x2;
29032 if (!isless (xa, TWO52))
29033 return x;
29034 Using the absolute value and copying back sign makes
29035 -0.0 -> -0.0 correct.
29036 xa2 = xa + TWO52 - TWO52;
29037 Compensate.
29038 dxa = xa2 - xa;
29039 if (dxa <= -0.5)
29040 xa2 += 1;
29041 else if (dxa > 0.5)
29042 xa2 -= 1;
29043 x2 = copysign (xa2, x);
29044 return x2;
29045 */
29046 enum machine_mode mode = GET_MODE (operand0);
29047 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
29048
29049 TWO52 = ix86_gen_TWO52 (mode);
29050
29051 /* Temporary for holding the result, initialized to the input
29052 operand to ease control flow. */
29053 res = gen_reg_rtx (mode);
29054 emit_move_insn (res, operand1);
29055
29056 /* xa = abs (operand1) */
29057 xa = ix86_expand_sse_fabs (res, &mask);
29058
29059 /* if (!isless (xa, TWO52)) goto label; */
29060 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29061
29062 /* xa2 = xa + TWO52 - TWO52; */
29063 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29064 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
29065
29066 /* dxa = xa2 - xa; */
29067 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
29068
29069 /* generate 0.5, 1.0 and -0.5 */
29070 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
29071 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
29072 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
29073 0, OPTAB_DIRECT);
29074
29075 /* Compensate. */
29076 tmp = gen_reg_rtx (mode);
29077 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
29078 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
29079 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29080 gen_rtx_AND (mode, one, tmp)));
29081 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29082 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
29083 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
29084 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29085 gen_rtx_AND (mode, one, tmp)));
29086 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29087
29088 /* res = copysign (xa2, operand1) */
29089 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
29090
29091 emit_label (label);
29092 LABEL_NUSES (label) = 1;
29093
29094 emit_move_insn (operand0, res);
29095 }
29096
29097 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29098 into OPERAND0. */
29099 void
29100 ix86_expand_trunc (rtx operand0, rtx operand1)
29101 {
29102 /* C code for SSE variant we expand below.
29103 double xa = fabs (x), x2;
29104 if (!isless (xa, TWO52))
29105 return x;
29106 x2 = (double)(long)x;
29107 if (HONOR_SIGNED_ZEROS (mode))
29108 return copysign (x2, x);
29109 return x2;
29110 */
29111 enum machine_mode mode = GET_MODE (operand0);
29112 rtx xa, xi, TWO52, label, res, mask;
29113
29114 TWO52 = ix86_gen_TWO52 (mode);
29115
29116 /* Temporary for holding the result, initialized to the input
29117 operand to ease control flow. */
29118 res = gen_reg_rtx (mode);
29119 emit_move_insn (res, operand1);
29120
29121 /* xa = abs (operand1) */
29122 xa = ix86_expand_sse_fabs (res, &mask);
29123
29124 /* if (!isless (xa, TWO52)) goto label; */
29125 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29126
29127 /* x = (double)(long)x */
29128 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29129 expand_fix (xi, res, 0);
29130 expand_float (res, xi, 0);
29131
29132 if (HONOR_SIGNED_ZEROS (mode))
29133 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29134
29135 emit_label (label);
29136 LABEL_NUSES (label) = 1;
29137
29138 emit_move_insn (operand0, res);
29139 }
29140
29141 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29142 into OPERAND0. */
29143 void
29144 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
29145 {
29146 enum machine_mode mode = GET_MODE (operand0);
29147 rtx xa, mask, TWO52, label, one, res, smask, tmp;
29148
29149 /* C code for SSE variant we expand below.
29150 double xa = fabs (x), x2;
29151 if (!isless (xa, TWO52))
29152 return x;
29153 xa2 = xa + TWO52 - TWO52;
29154 Compensate:
29155 if (xa2 > xa)
29156 xa2 -= 1.0;
29157 x2 = copysign (xa2, x);
29158 return x2;
29159 */
29160
29161 TWO52 = ix86_gen_TWO52 (mode);
29162
29163 /* Temporary for holding the result, initialized to the input
29164 operand to ease control flow. */
29165 res = gen_reg_rtx (mode);
29166 emit_move_insn (res, operand1);
29167
29168 /* xa = abs (operand1) */
29169 xa = ix86_expand_sse_fabs (res, &smask);
29170
29171 /* if (!isless (xa, TWO52)) goto label; */
29172 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29173
29174 /* res = xa + TWO52 - TWO52; */
29175 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29176 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29177 emit_move_insn (res, tmp);
29178
29179 /* generate 1.0 */
29180 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29181
29182 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29183 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29184 emit_insn (gen_rtx_SET (VOIDmode, mask,
29185 gen_rtx_AND (mode, mask, one)));
29186 tmp = expand_simple_binop (mode, MINUS,
29187 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29188 emit_move_insn (res, tmp);
29189
29190 /* res = copysign (res, operand1) */
29191 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29192
29193 emit_label (label);
29194 LABEL_NUSES (label) = 1;
29195
29196 emit_move_insn (operand0, res);
29197 }
29198
29199 /* Expand SSE sequence for computing round from OPERAND1 storing
29200 into OPERAND0. */
29201 void
29202 ix86_expand_round (rtx operand0, rtx operand1)
29203 {
29204 /* C code for the stuff we're doing below:
29205 double xa = fabs (x);
29206 if (!isless (xa, TWO52))
29207 return x;
29208 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29209 return copysign (xa, x);
29210 */
29211 enum machine_mode mode = GET_MODE (operand0);
29212 rtx res, TWO52, xa, label, xi, half, mask;
29213 const struct real_format *fmt;
29214 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29215
29216 /* Temporary for holding the result, initialized to the input
29217 operand to ease control flow. */
29218 res = gen_reg_rtx (mode);
29219 emit_move_insn (res, operand1);
29220
29221 TWO52 = ix86_gen_TWO52 (mode);
29222 xa = ix86_expand_sse_fabs (res, &mask);
29223 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29224
29225 /* load nextafter (0.5, 0.0) */
29226 fmt = REAL_MODE_FORMAT (mode);
29227 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29228 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29229
29230 /* xa = xa + 0.5 */
29231 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29232 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29233
29234 /* xa = (double)(int64_t)xa */
29235 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29236 expand_fix (xi, xa, 0);
29237 expand_float (xa, xi, 0);
29238
29239 /* res = copysign (xa, operand1) */
29240 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29241
29242 emit_label (label);
29243 LABEL_NUSES (label) = 1;
29244
29245 emit_move_insn (operand0, res);
29246 }
29247 \f
29248
29249 /* Table of valid machine attributes. */
29250 static const struct attribute_spec ix86_attribute_table[] =
29251 {
29252 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29253 /* Stdcall attribute says callee is responsible for popping arguments
29254 if they are not variable. */
29255 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29256 /* Fastcall attribute says callee is responsible for popping arguments
29257 if they are not variable. */
29258 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29259 /* Thiscall attribute says callee is responsible for popping arguments
29260 if they are not variable. */
29261 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29262 /* Cdecl attribute says the callee is a normal C declaration */
29263 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29264 /* Regparm attribute specifies how many integer arguments are to be
29265 passed in registers. */
29266 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29267 /* Sseregparm attribute says we are using x86_64 calling conventions
29268 for FP arguments. */
29269 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29270 /* force_align_arg_pointer says this function realigns the stack at entry. */
29271 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29272 false, true, true, ix86_handle_cconv_attribute },
29273 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29274 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29275 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29276 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29277 #endif
29278 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29279 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29280 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29281 SUBTARGET_ATTRIBUTE_TABLE,
29282 #endif
29283 /* ms_abi and sysv_abi calling convention function attributes. */
29284 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29285 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29286 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29287 /* End element. */
29288 { NULL, 0, 0, false, false, false, NULL }
29289 };
29290
29291 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29292 static int
29293 ix86_builtin_vectorization_cost (bool runtime_test)
29294 {
29295 /* If the branch of the runtime test is taken - i.e. - the vectorized
29296 version is skipped - this incurs a misprediction cost (because the
29297 vectorized version is expected to be the fall-through). So we subtract
29298 the latency of a mispredicted branch from the costs that are incured
29299 when the vectorized version is executed.
29300
29301 TODO: The values in individual target tables have to be tuned or new
29302 fields may be needed. For eg. on K8, the default branch path is the
29303 not-taken path. If the taken path is predicted correctly, the minimum
29304 penalty of going down the taken-path is 1 cycle. If the taken-path is
29305 not predicted correctly, then the minimum penalty is 10 cycles. */
29306
29307 if (runtime_test)
29308 {
29309 return (-(ix86_cost->cond_taken_branch_cost));
29310 }
29311 else
29312 return 0;
29313 }
29314
29315 /* Implement targetm.vectorize.builtin_vec_perm. */
29316
29317 static tree
29318 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29319 {
29320 tree itype = TREE_TYPE (vec_type);
29321 bool u = TYPE_UNSIGNED (itype);
29322 enum machine_mode vmode = TYPE_MODE (vec_type);
29323 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29324 bool ok = TARGET_SSE2;
29325
29326 switch (vmode)
29327 {
29328 case V4DFmode:
29329 ok = TARGET_AVX;
29330 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29331 goto get_di;
29332 case V2DFmode:
29333 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29334 get_di:
29335 itype = ix86_get_builtin_type (IX86_BT_DI);
29336 break;
29337
29338 case V8SFmode:
29339 ok = TARGET_AVX;
29340 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29341 goto get_si;
29342 case V4SFmode:
29343 ok = TARGET_SSE;
29344 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29345 get_si:
29346 itype = ix86_get_builtin_type (IX86_BT_SI);
29347 break;
29348
29349 case V2DImode:
29350 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29351 break;
29352 case V4SImode:
29353 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29354 break;
29355 case V8HImode:
29356 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29357 break;
29358 case V16QImode:
29359 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29360 break;
29361 default:
29362 ok = false;
29363 break;
29364 }
29365
29366 if (!ok)
29367 return NULL_TREE;
29368
29369 *mask_type = itype;
29370 return ix86_builtins[(int) fcode];
29371 }
29372
29373 /* Return a vector mode with twice as many elements as VMODE. */
29374 /* ??? Consider moving this to a table generated by genmodes.c. */
29375
29376 static enum machine_mode
29377 doublesize_vector_mode (enum machine_mode vmode)
29378 {
29379 switch (vmode)
29380 {
29381 case V2SFmode: return V4SFmode;
29382 case V1DImode: return V2DImode;
29383 case V2SImode: return V4SImode;
29384 case V4HImode: return V8HImode;
29385 case V8QImode: return V16QImode;
29386
29387 case V2DFmode: return V4DFmode;
29388 case V4SFmode: return V8SFmode;
29389 case V2DImode: return V4DImode;
29390 case V4SImode: return V8SImode;
29391 case V8HImode: return V16HImode;
29392 case V16QImode: return V32QImode;
29393
29394 case V4DFmode: return V8DFmode;
29395 case V8SFmode: return V16SFmode;
29396 case V4DImode: return V8DImode;
29397 case V8SImode: return V16SImode;
29398 case V16HImode: return V32HImode;
29399 case V32QImode: return V64QImode;
29400
29401 default:
29402 gcc_unreachable ();
29403 }
29404 }
29405
29406 /* Construct (set target (vec_select op0 (parallel perm))) and
29407 return true if that's a valid instruction in the active ISA. */
29408
29409 static bool
29410 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29411 {
29412 rtx rperm[MAX_VECT_LEN], x;
29413 unsigned i;
29414
29415 for (i = 0; i < nelt; ++i)
29416 rperm[i] = GEN_INT (perm[i]);
29417
29418 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29419 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29420 x = gen_rtx_SET (VOIDmode, target, x);
29421
29422 x = emit_insn (x);
29423 if (recog_memoized (x) < 0)
29424 {
29425 remove_insn (x);
29426 return false;
29427 }
29428 return true;
29429 }
29430
29431 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29432
29433 static bool
29434 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29435 const unsigned char *perm, unsigned nelt)
29436 {
29437 enum machine_mode v2mode;
29438 rtx x;
29439
29440 v2mode = doublesize_vector_mode (GET_MODE (op0));
29441 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29442 return expand_vselect (target, x, perm, nelt);
29443 }
29444
29445 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29446 in terms of blendp[sd] / pblendw / pblendvb. */
29447
29448 static bool
29449 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29450 {
29451 enum machine_mode vmode = d->vmode;
29452 unsigned i, mask, nelt = d->nelt;
29453 rtx target, op0, op1, x;
29454
29455 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29456 return false;
29457 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29458 return false;
29459
29460 /* This is a blend, not a permute. Elements must stay in their
29461 respective lanes. */
29462 for (i = 0; i < nelt; ++i)
29463 {
29464 unsigned e = d->perm[i];
29465 if (!(e == i || e == i + nelt))
29466 return false;
29467 }
29468
29469 if (d->testing_p)
29470 return true;
29471
29472 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29473 decision should be extracted elsewhere, so that we only try that
29474 sequence once all budget==3 options have been tried. */
29475
29476 /* For bytes, see if bytes move in pairs so we can use pblendw with
29477 an immediate argument, rather than pblendvb with a vector argument. */
29478 if (vmode == V16QImode)
29479 {
29480 bool pblendw_ok = true;
29481 for (i = 0; i < 16 && pblendw_ok; i += 2)
29482 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29483
29484 if (!pblendw_ok)
29485 {
29486 rtx rperm[16], vperm;
29487
29488 for (i = 0; i < nelt; ++i)
29489 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29490
29491 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29492 vperm = force_reg (V16QImode, vperm);
29493
29494 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29495 return true;
29496 }
29497 }
29498
29499 target = d->target;
29500 op0 = d->op0;
29501 op1 = d->op1;
29502 mask = 0;
29503
29504 switch (vmode)
29505 {
29506 case V4DFmode:
29507 case V8SFmode:
29508 case V2DFmode:
29509 case V4SFmode:
29510 case V8HImode:
29511 for (i = 0; i < nelt; ++i)
29512 mask |= (d->perm[i] >= nelt) << i;
29513 break;
29514
29515 case V2DImode:
29516 for (i = 0; i < 2; ++i)
29517 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29518 goto do_subreg;
29519
29520 case V4SImode:
29521 for (i = 0; i < 4; ++i)
29522 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29523 goto do_subreg;
29524
29525 case V16QImode:
29526 for (i = 0; i < 8; ++i)
29527 mask |= (d->perm[i * 2] >= 16) << i;
29528
29529 do_subreg:
29530 vmode = V8HImode;
29531 target = gen_lowpart (vmode, target);
29532 op0 = gen_lowpart (vmode, op0);
29533 op1 = gen_lowpart (vmode, op1);
29534 break;
29535
29536 default:
29537 gcc_unreachable ();
29538 }
29539
29540 /* This matches five different patterns with the different modes. */
29541 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29542 x = gen_rtx_SET (VOIDmode, target, x);
29543 emit_insn (x);
29544
29545 return true;
29546 }
29547
29548 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29549 in terms of the variable form of vpermilps.
29550
29551 Note that we will have already failed the immediate input vpermilps,
29552 which requires that the high and low part shuffle be identical; the
29553 variable form doesn't require that. */
29554
29555 static bool
29556 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29557 {
29558 rtx rperm[8], vperm;
29559 unsigned i;
29560
29561 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29562 return false;
29563
29564 /* We can only permute within the 128-bit lane. */
29565 for (i = 0; i < 8; ++i)
29566 {
29567 unsigned e = d->perm[i];
29568 if (i < 4 ? e >= 4 : e < 4)
29569 return false;
29570 }
29571
29572 if (d->testing_p)
29573 return true;
29574
29575 for (i = 0; i < 8; ++i)
29576 {
29577 unsigned e = d->perm[i];
29578
29579 /* Within each 128-bit lane, the elements of op0 are numbered
29580 from 0 and the elements of op1 are numbered from 4. */
29581 if (e >= 8 + 4)
29582 e -= 8;
29583 else if (e >= 4)
29584 e -= 4;
29585
29586 rperm[i] = GEN_INT (e);
29587 }
29588
29589 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29590 vperm = force_reg (V8SImode, vperm);
29591 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29592
29593 return true;
29594 }
29595
29596 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29597 in terms of pshufb or vpperm. */
29598
29599 static bool
29600 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29601 {
29602 unsigned i, nelt, eltsz;
29603 rtx rperm[16], vperm, target, op0, op1;
29604
29605 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29606 return false;
29607 if (GET_MODE_SIZE (d->vmode) != 16)
29608 return false;
29609
29610 if (d->testing_p)
29611 return true;
29612
29613 nelt = d->nelt;
29614 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29615
29616 for (i = 0; i < nelt; ++i)
29617 {
29618 unsigned j, e = d->perm[i];
29619 for (j = 0; j < eltsz; ++j)
29620 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29621 }
29622
29623 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29624 vperm = force_reg (V16QImode, vperm);
29625
29626 target = gen_lowpart (V16QImode, d->target);
29627 op0 = gen_lowpart (V16QImode, d->op0);
29628 if (d->op0 == d->op1)
29629 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29630 else
29631 {
29632 op1 = gen_lowpart (V16QImode, d->op1);
29633 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29634 }
29635
29636 return true;
29637 }
29638
29639 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29640 in a single instruction. */
29641
29642 static bool
29643 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29644 {
29645 unsigned i, nelt = d->nelt;
29646 unsigned char perm2[MAX_VECT_LEN];
29647
29648 /* Check plain VEC_SELECT first, because AVX has instructions that could
29649 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29650 input where SEL+CONCAT may not. */
29651 if (d->op0 == d->op1)
29652 {
29653 int mask = nelt - 1;
29654
29655 for (i = 0; i < nelt; i++)
29656 perm2[i] = d->perm[i] & mask;
29657
29658 if (expand_vselect (d->target, d->op0, perm2, nelt))
29659 return true;
29660
29661 /* There are plenty of patterns in sse.md that are written for
29662 SEL+CONCAT and are not replicated for a single op. Perhaps
29663 that should be changed, to avoid the nastiness here. */
29664
29665 /* Recognize interleave style patterns, which means incrementing
29666 every other permutation operand. */
29667 for (i = 0; i < nelt; i += 2)
29668 {
29669 perm2[i] = d->perm[i] & mask;
29670 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29671 }
29672 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29673 return true;
29674
29675 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29676 if (nelt >= 4)
29677 {
29678 for (i = 0; i < nelt; i += 4)
29679 {
29680 perm2[i + 0] = d->perm[i + 0] & mask;
29681 perm2[i + 1] = d->perm[i + 1] & mask;
29682 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29683 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29684 }
29685
29686 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29687 return true;
29688 }
29689 }
29690
29691 /* Finally, try the fully general two operand permute. */
29692 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29693 return true;
29694
29695 /* Recognize interleave style patterns with reversed operands. */
29696 if (d->op0 != d->op1)
29697 {
29698 for (i = 0; i < nelt; ++i)
29699 {
29700 unsigned e = d->perm[i];
29701 if (e >= nelt)
29702 e -= nelt;
29703 else
29704 e += nelt;
29705 perm2[i] = e;
29706 }
29707
29708 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29709 return true;
29710 }
29711
29712 /* Try the SSE4.1 blend variable merge instructions. */
29713 if (expand_vec_perm_blend (d))
29714 return true;
29715
29716 /* Try one of the AVX vpermil variable permutations. */
29717 if (expand_vec_perm_vpermil (d))
29718 return true;
29719
29720 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29721 if (expand_vec_perm_pshufb (d))
29722 return true;
29723
29724 return false;
29725 }
29726
29727 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29728 in terms of a pair of pshuflw + pshufhw instructions. */
29729
29730 static bool
29731 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29732 {
29733 unsigned char perm2[MAX_VECT_LEN];
29734 unsigned i;
29735 bool ok;
29736
29737 if (d->vmode != V8HImode || d->op0 != d->op1)
29738 return false;
29739
29740 /* The two permutations only operate in 64-bit lanes. */
29741 for (i = 0; i < 4; ++i)
29742 if (d->perm[i] >= 4)
29743 return false;
29744 for (i = 4; i < 8; ++i)
29745 if (d->perm[i] < 4)
29746 return false;
29747
29748 if (d->testing_p)
29749 return true;
29750
29751 /* Emit the pshuflw. */
29752 memcpy (perm2, d->perm, 4);
29753 for (i = 4; i < 8; ++i)
29754 perm2[i] = i;
29755 ok = expand_vselect (d->target, d->op0, perm2, 8);
29756 gcc_assert (ok);
29757
29758 /* Emit the pshufhw. */
29759 memcpy (perm2 + 4, d->perm + 4, 4);
29760 for (i = 0; i < 4; ++i)
29761 perm2[i] = i;
29762 ok = expand_vselect (d->target, d->target, perm2, 8);
29763 gcc_assert (ok);
29764
29765 return true;
29766 }
29767
29768 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29769 the permutation using the SSSE3 palignr instruction. This succeeds
29770 when all of the elements in PERM fit within one vector and we merely
29771 need to shift them down so that a single vector permutation has a
29772 chance to succeed. */
29773
29774 static bool
29775 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29776 {
29777 unsigned i, nelt = d->nelt;
29778 unsigned min, max;
29779 bool in_order, ok;
29780 rtx shift;
29781
29782 /* Even with AVX, palignr only operates on 128-bit vectors. */
29783 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29784 return false;
29785
29786 min = nelt, max = 0;
29787 for (i = 0; i < nelt; ++i)
29788 {
29789 unsigned e = d->perm[i];
29790 if (e < min)
29791 min = e;
29792 if (e > max)
29793 max = e;
29794 }
29795 if (min == 0 || max - min >= nelt)
29796 return false;
29797
29798 /* Given that we have SSSE3, we know we'll be able to implement the
29799 single operand permutation after the palignr with pshufb. */
29800 if (d->testing_p)
29801 return true;
29802
29803 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29804 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29805 gen_lowpart (TImode, d->op1),
29806 gen_lowpart (TImode, d->op0), shift));
29807
29808 d->op0 = d->op1 = d->target;
29809
29810 in_order = true;
29811 for (i = 0; i < nelt; ++i)
29812 {
29813 unsigned e = d->perm[i] - min;
29814 if (e != i)
29815 in_order = false;
29816 d->perm[i] = e;
29817 }
29818
29819 /* Test for the degenerate case where the alignment by itself
29820 produces the desired permutation. */
29821 if (in_order)
29822 return true;
29823
29824 ok = expand_vec_perm_1 (d);
29825 gcc_assert (ok);
29826
29827 return ok;
29828 }
29829
29830 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29831 a two vector permutation into a single vector permutation by using
29832 an interleave operation to merge the vectors. */
29833
29834 static bool
29835 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29836 {
29837 struct expand_vec_perm_d dremap, dfinal;
29838 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29839 unsigned contents, h1, h2, h3, h4;
29840 unsigned char remap[2 * MAX_VECT_LEN];
29841 rtx seq;
29842 bool ok;
29843
29844 if (d->op0 == d->op1)
29845 return false;
29846
29847 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29848 lanes. We can use similar techniques with the vperm2f128 instruction,
29849 but it requires slightly different logic. */
29850 if (GET_MODE_SIZE (d->vmode) != 16)
29851 return false;
29852
29853 /* Examine from whence the elements come. */
29854 contents = 0;
29855 for (i = 0; i < nelt; ++i)
29856 contents |= 1u << d->perm[i];
29857
29858 /* Split the two input vectors into 4 halves. */
29859 h1 = (1u << nelt2) - 1;
29860 h2 = h1 << nelt2;
29861 h3 = h2 << nelt2;
29862 h4 = h3 << nelt2;
29863
29864 memset (remap, 0xff, sizeof (remap));
29865 dremap = *d;
29866
29867 /* If the elements from the low halves use interleave low, and similarly
29868 for interleave high. If the elements are from mis-matched halves, we
29869 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29870 if ((contents & (h1 | h3)) == contents)
29871 {
29872 for (i = 0; i < nelt2; ++i)
29873 {
29874 remap[i] = i * 2;
29875 remap[i + nelt] = i * 2 + 1;
29876 dremap.perm[i * 2] = i;
29877 dremap.perm[i * 2 + 1] = i + nelt;
29878 }
29879 }
29880 else if ((contents & (h2 | h4)) == contents)
29881 {
29882 for (i = 0; i < nelt2; ++i)
29883 {
29884 remap[i + nelt2] = i * 2;
29885 remap[i + nelt + nelt2] = i * 2 + 1;
29886 dremap.perm[i * 2] = i + nelt2;
29887 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29888 }
29889 }
29890 else if ((contents & (h1 | h4)) == contents)
29891 {
29892 for (i = 0; i < nelt2; ++i)
29893 {
29894 remap[i] = i;
29895 remap[i + nelt + nelt2] = i + nelt2;
29896 dremap.perm[i] = i;
29897 dremap.perm[i + nelt2] = i + nelt + nelt2;
29898 }
29899 if (nelt != 4)
29900 {
29901 dremap.vmode = V2DImode;
29902 dremap.nelt = 2;
29903 dremap.perm[0] = 0;
29904 dremap.perm[1] = 3;
29905 }
29906 }
29907 else if ((contents & (h2 | h3)) == contents)
29908 {
29909 for (i = 0; i < nelt2; ++i)
29910 {
29911 remap[i + nelt2] = i;
29912 remap[i + nelt] = i + nelt2;
29913 dremap.perm[i] = i + nelt2;
29914 dremap.perm[i + nelt2] = i + nelt;
29915 }
29916 if (nelt != 4)
29917 {
29918 dremap.vmode = V2DImode;
29919 dremap.nelt = 2;
29920 dremap.perm[0] = 1;
29921 dremap.perm[1] = 2;
29922 }
29923 }
29924 else
29925 return false;
29926
29927 /* Use the remapping array set up above to move the elements from their
29928 swizzled locations into their final destinations. */
29929 dfinal = *d;
29930 for (i = 0; i < nelt; ++i)
29931 {
29932 unsigned e = remap[d->perm[i]];
29933 gcc_assert (e < nelt);
29934 dfinal.perm[i] = e;
29935 }
29936 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29937 dfinal.op1 = dfinal.op0;
29938 dremap.target = dfinal.op0;
29939
29940 /* Test if the final remap can be done with a single insn. For V4SFmode or
29941 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29942 start_sequence ();
29943 ok = expand_vec_perm_1 (&dfinal);
29944 seq = get_insns ();
29945 end_sequence ();
29946
29947 if (!ok)
29948 return false;
29949
29950 if (dremap.vmode != dfinal.vmode)
29951 {
29952 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29953 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29954 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29955 }
29956
29957 ok = expand_vec_perm_1 (&dremap);
29958 gcc_assert (ok);
29959
29960 emit_insn (seq);
29961 return true;
29962 }
29963
29964 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29965 permutation with two pshufb insns and an ior. We should have already
29966 failed all two instruction sequences. */
29967
29968 static bool
29969 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29970 {
29971 rtx rperm[2][16], vperm, l, h, op, m128;
29972 unsigned int i, nelt, eltsz;
29973
29974 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29975 return false;
29976 gcc_assert (d->op0 != d->op1);
29977
29978 nelt = d->nelt;
29979 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29980
29981 /* Generate two permutation masks. If the required element is within
29982 the given vector it is shuffled into the proper lane. If the required
29983 element is in the other vector, force a zero into the lane by setting
29984 bit 7 in the permutation mask. */
29985 m128 = GEN_INT (-128);
29986 for (i = 0; i < nelt; ++i)
29987 {
29988 unsigned j, e = d->perm[i];
29989 unsigned which = (e >= nelt);
29990 if (e >= nelt)
29991 e -= nelt;
29992
29993 for (j = 0; j < eltsz; ++j)
29994 {
29995 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29996 rperm[1-which][i*eltsz + j] = m128;
29997 }
29998 }
29999
30000 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
30001 vperm = force_reg (V16QImode, vperm);
30002
30003 l = gen_reg_rtx (V16QImode);
30004 op = gen_lowpart (V16QImode, d->op0);
30005 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
30006
30007 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
30008 vperm = force_reg (V16QImode, vperm);
30009
30010 h = gen_reg_rtx (V16QImode);
30011 op = gen_lowpart (V16QImode, d->op1);
30012 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
30013
30014 op = gen_lowpart (V16QImode, d->target);
30015 emit_insn (gen_iorv16qi3 (op, l, h));
30016
30017 return true;
30018 }
30019
30020 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
30021 and extract-odd permutations. */
30022
30023 static bool
30024 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
30025 {
30026 rtx t1, t2, t3, t4;
30027
30028 switch (d->vmode)
30029 {
30030 case V4DFmode:
30031 t1 = gen_reg_rtx (V4DFmode);
30032 t2 = gen_reg_rtx (V4DFmode);
30033
30034 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
30035 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
30036 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
30037
30038 /* Now an unpck[lh]pd will produce the result required. */
30039 if (odd)
30040 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
30041 else
30042 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
30043 emit_insn (t3);
30044 break;
30045
30046 case V8SFmode:
30047 {
30048 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
30049 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
30050 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
30051
30052 t1 = gen_reg_rtx (V8SFmode);
30053 t2 = gen_reg_rtx (V8SFmode);
30054 t3 = gen_reg_rtx (V8SFmode);
30055 t4 = gen_reg_rtx (V8SFmode);
30056
30057 /* Shuffle within the 128-bit lanes to produce:
30058 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
30059 expand_vselect (t1, d->op0, perm1, 8);
30060 expand_vselect (t2, d->op1, perm1, 8);
30061
30062 /* Shuffle the lanes around to produce:
30063 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
30064 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
30065 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
30066
30067 /* Now a vpermil2p will produce the result required. */
30068 /* ??? The vpermil2p requires a vector constant. Another option
30069 is a unpck[lh]ps to merge the two vectors to produce
30070 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
30071 vpermilps to get the elements into the final order. */
30072 d->op0 = t3;
30073 d->op1 = t4;
30074 memcpy (d->perm, odd ? permo: perme, 8);
30075 expand_vec_perm_vpermil (d);
30076 }
30077 break;
30078
30079 case V2DFmode:
30080 case V4SFmode:
30081 case V2DImode:
30082 case V4SImode:
30083 /* These are always directly implementable by expand_vec_perm_1. */
30084 gcc_unreachable ();
30085
30086 case V8HImode:
30087 if (TARGET_SSSE3)
30088 return expand_vec_perm_pshufb2 (d);
30089 else
30090 {
30091 /* We need 2*log2(N)-1 operations to achieve odd/even
30092 with interleave. */
30093 t1 = gen_reg_rtx (V8HImode);
30094 t2 = gen_reg_rtx (V8HImode);
30095 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
30096 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
30097 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
30098 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
30099 if (odd)
30100 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
30101 else
30102 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
30103 emit_insn (t3);
30104 }
30105 break;
30106
30107 case V16QImode:
30108 if (TARGET_SSSE3)
30109 return expand_vec_perm_pshufb2 (d);
30110 else
30111 {
30112 t1 = gen_reg_rtx (V16QImode);
30113 t2 = gen_reg_rtx (V16QImode);
30114 t3 = gen_reg_rtx (V16QImode);
30115 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
30116 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
30117 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
30118 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
30119 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
30120 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
30121 if (odd)
30122 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
30123 else
30124 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
30125 emit_insn (t3);
30126 }
30127 break;
30128
30129 default:
30130 gcc_unreachable ();
30131 }
30132
30133 return true;
30134 }
30135
30136 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30137 extract-even and extract-odd permutations. */
30138
30139 static bool
30140 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
30141 {
30142 unsigned i, odd, nelt = d->nelt;
30143
30144 odd = d->perm[0];
30145 if (odd != 0 && odd != 1)
30146 return false;
30147
30148 for (i = 1; i < nelt; ++i)
30149 if (d->perm[i] != 2 * i + odd)
30150 return false;
30151
30152 return expand_vec_perm_even_odd_1 (d, odd);
30153 }
30154
30155 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
30156 permutations. We assume that expand_vec_perm_1 has already failed. */
30157
30158 static bool
30159 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
30160 {
30161 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
30162 enum machine_mode vmode = d->vmode;
30163 unsigned char perm2[4];
30164 rtx op0 = d->op0;
30165 bool ok;
30166
30167 switch (vmode)
30168 {
30169 case V4DFmode:
30170 case V8SFmode:
30171 /* These are special-cased in sse.md so that we can optionally
30172 use the vbroadcast instruction. They expand to two insns
30173 if the input happens to be in a register. */
30174 gcc_unreachable ();
30175
30176 case V2DFmode:
30177 case V2DImode:
30178 case V4SFmode:
30179 case V4SImode:
30180 /* These are always implementable using standard shuffle patterns. */
30181 gcc_unreachable ();
30182
30183 case V8HImode:
30184 case V16QImode:
30185 /* These can be implemented via interleave. We save one insn by
30186 stopping once we have promoted to V4SImode and then use pshufd. */
30187 do
30188 {
30189 optab otab = vec_interleave_low_optab;
30190
30191 if (elt >= nelt2)
30192 {
30193 otab = vec_interleave_high_optab;
30194 elt -= nelt2;
30195 }
30196 nelt2 /= 2;
30197
30198 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30199 vmode = get_mode_wider_vector (vmode);
30200 op0 = gen_lowpart (vmode, op0);
30201 }
30202 while (vmode != V4SImode);
30203
30204 memset (perm2, elt, 4);
30205 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30206 gcc_assert (ok);
30207 return true;
30208
30209 default:
30210 gcc_unreachable ();
30211 }
30212 }
30213
30214 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30215 broadcast permutations. */
30216
30217 static bool
30218 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30219 {
30220 unsigned i, elt, nelt = d->nelt;
30221
30222 if (d->op0 != d->op1)
30223 return false;
30224
30225 elt = d->perm[0];
30226 for (i = 1; i < nelt; ++i)
30227 if (d->perm[i] != elt)
30228 return false;
30229
30230 return expand_vec_perm_broadcast_1 (d);
30231 }
30232
30233 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30234 With all of the interface bits taken care of, perform the expansion
30235 in D and return true on success. */
30236
30237 static bool
30238 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30239 {
30240 /* Try a single instruction expansion. */
30241 if (expand_vec_perm_1 (d))
30242 return true;
30243
30244 /* Try sequences of two instructions. */
30245
30246 if (expand_vec_perm_pshuflw_pshufhw (d))
30247 return true;
30248
30249 if (expand_vec_perm_palignr (d))
30250 return true;
30251
30252 if (expand_vec_perm_interleave2 (d))
30253 return true;
30254
30255 if (expand_vec_perm_broadcast (d))
30256 return true;
30257
30258 /* Try sequences of three instructions. */
30259
30260 if (expand_vec_perm_pshufb2 (d))
30261 return true;
30262
30263 /* ??? Look for narrow permutations whose element orderings would
30264 allow the promotion to a wider mode. */
30265
30266 /* ??? Look for sequences of interleave or a wider permute that place
30267 the data into the correct lanes for a half-vector shuffle like
30268 pshuf[lh]w or vpermilps. */
30269
30270 /* ??? Look for sequences of interleave that produce the desired results.
30271 The combinatorics of punpck[lh] get pretty ugly... */
30272
30273 if (expand_vec_perm_even_odd (d))
30274 return true;
30275
30276 return false;
30277 }
30278
30279 /* Extract the values from the vector CST into the permutation array in D.
30280 Return 0 on error, 1 if all values from the permutation come from the
30281 first vector, 2 if all values from the second vector, and 3 otherwise. */
30282
30283 static int
30284 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30285 {
30286 tree list = TREE_VECTOR_CST_ELTS (cst);
30287 unsigned i, nelt = d->nelt;
30288 int ret = 0;
30289
30290 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30291 {
30292 unsigned HOST_WIDE_INT e;
30293
30294 if (!host_integerp (TREE_VALUE (list), 1))
30295 return 0;
30296 e = tree_low_cst (TREE_VALUE (list), 1);
30297 if (e >= 2 * nelt)
30298 return 0;
30299
30300 ret |= (e < nelt ? 1 : 2);
30301 d->perm[i] = e;
30302 }
30303 gcc_assert (list == NULL);
30304
30305 /* For all elements from second vector, fold the elements to first. */
30306 if (ret == 2)
30307 for (i = 0; i < nelt; ++i)
30308 d->perm[i] -= nelt;
30309
30310 return ret;
30311 }
30312
30313 static rtx
30314 ix86_expand_vec_perm_builtin (tree exp)
30315 {
30316 struct expand_vec_perm_d d;
30317 tree arg0, arg1, arg2;
30318
30319 arg0 = CALL_EXPR_ARG (exp, 0);
30320 arg1 = CALL_EXPR_ARG (exp, 1);
30321 arg2 = CALL_EXPR_ARG (exp, 2);
30322
30323 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30324 d.nelt = GET_MODE_NUNITS (d.vmode);
30325 d.testing_p = false;
30326 gcc_assert (VECTOR_MODE_P (d.vmode));
30327
30328 if (TREE_CODE (arg2) != VECTOR_CST)
30329 {
30330 error_at (EXPR_LOCATION (exp),
30331 "vector permutation requires vector constant");
30332 goto exit_error;
30333 }
30334
30335 switch (extract_vec_perm_cst (&d, arg2))
30336 {
30337 default:
30338 gcc_unreachable();
30339
30340 case 0:
30341 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30342 goto exit_error;
30343
30344 case 3:
30345 if (!operand_equal_p (arg0, arg1, 0))
30346 {
30347 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30348 d.op0 = force_reg (d.vmode, d.op0);
30349 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30350 d.op1 = force_reg (d.vmode, d.op1);
30351 break;
30352 }
30353
30354 /* The elements of PERM do not suggest that only the first operand
30355 is used, but both operands are identical. Allow easier matching
30356 of the permutation by folding the permutation into the single
30357 input vector. */
30358 {
30359 unsigned i, nelt = d.nelt;
30360 for (i = 0; i < nelt; ++i)
30361 if (d.perm[i] >= nelt)
30362 d.perm[i] -= nelt;
30363 }
30364 /* FALLTHRU */
30365
30366 case 1:
30367 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30368 d.op0 = force_reg (d.vmode, d.op0);
30369 d.op1 = d.op0;
30370 break;
30371
30372 case 2:
30373 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30374 d.op0 = force_reg (d.vmode, d.op0);
30375 d.op1 = d.op0;
30376 break;
30377 }
30378
30379 d.target = gen_reg_rtx (d.vmode);
30380 if (ix86_expand_vec_perm_builtin_1 (&d))
30381 return d.target;
30382
30383 /* For compiler generated permutations, we should never got here, because
30384 the compiler should also be checking the ok hook. But since this is a
30385 builtin the user has access too, so don't abort. */
30386 switch (d.nelt)
30387 {
30388 case 2:
30389 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30390 break;
30391 case 4:
30392 sorry ("vector permutation (%d %d %d %d)",
30393 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30394 break;
30395 case 8:
30396 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30397 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30398 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30399 break;
30400 case 16:
30401 sorry ("vector permutation "
30402 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30403 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30404 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30405 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30406 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30407 break;
30408 default:
30409 gcc_unreachable ();
30410 }
30411 exit_error:
30412 return CONST0_RTX (d.vmode);
30413 }
30414
30415 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30416
30417 static bool
30418 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30419 {
30420 struct expand_vec_perm_d d;
30421 int vec_mask;
30422 bool ret, one_vec;
30423
30424 d.vmode = TYPE_MODE (vec_type);
30425 d.nelt = GET_MODE_NUNITS (d.vmode);
30426 d.testing_p = true;
30427
30428 /* Given sufficient ISA support we can just return true here
30429 for selected vector modes. */
30430 if (GET_MODE_SIZE (d.vmode) == 16)
30431 {
30432 /* All implementable with a single vpperm insn. */
30433 if (TARGET_XOP)
30434 return true;
30435 /* All implementable with 2 pshufb + 1 ior. */
30436 if (TARGET_SSSE3)
30437 return true;
30438 /* All implementable with shufpd or unpck[lh]pd. */
30439 if (d.nelt == 2)
30440 return true;
30441 }
30442
30443 vec_mask = extract_vec_perm_cst (&d, mask);
30444
30445 /* This hook is cannot be called in response to something that the
30446 user does (unlike the builtin expander) so we shouldn't ever see
30447 an error generated from the extract. */
30448 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30449 one_vec = (vec_mask != 3);
30450
30451 /* Implementable with shufps or pshufd. */
30452 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30453 return true;
30454
30455 /* Otherwise we have to go through the motions and see if we can
30456 figure out how to generate the requested permutation. */
30457 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30458 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30459 if (!one_vec)
30460 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30461
30462 start_sequence ();
30463 ret = ix86_expand_vec_perm_builtin_1 (&d);
30464 end_sequence ();
30465
30466 return ret;
30467 }
30468
30469 void
30470 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30471 {
30472 struct expand_vec_perm_d d;
30473 unsigned i, nelt;
30474
30475 d.target = targ;
30476 d.op0 = op0;
30477 d.op1 = op1;
30478 d.vmode = GET_MODE (targ);
30479 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30480 d.testing_p = false;
30481
30482 for (i = 0; i < nelt; ++i)
30483 d.perm[i] = i * 2 + odd;
30484
30485 /* We'll either be able to implement the permutation directly... */
30486 if (expand_vec_perm_1 (&d))
30487 return;
30488
30489 /* ... or we use the special-case patterns. */
30490 expand_vec_perm_even_odd_1 (&d, odd);
30491 }
30492 \f
30493 /* This function returns the calling abi specific va_list type node.
30494 It returns the FNDECL specific va_list type. */
30495
30496 static tree
30497 ix86_fn_abi_va_list (tree fndecl)
30498 {
30499 if (!TARGET_64BIT)
30500 return va_list_type_node;
30501 gcc_assert (fndecl != NULL_TREE);
30502
30503 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30504 return ms_va_list_type_node;
30505 else
30506 return sysv_va_list_type_node;
30507 }
30508
30509 /* Returns the canonical va_list type specified by TYPE. If there
30510 is no valid TYPE provided, it return NULL_TREE. */
30511
30512 static tree
30513 ix86_canonical_va_list_type (tree type)
30514 {
30515 tree wtype, htype;
30516
30517 /* Resolve references and pointers to va_list type. */
30518 if (INDIRECT_REF_P (type))
30519 type = TREE_TYPE (type);
30520 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30521 type = TREE_TYPE (type);
30522
30523 if (TARGET_64BIT)
30524 {
30525 wtype = va_list_type_node;
30526 gcc_assert (wtype != NULL_TREE);
30527 htype = type;
30528 if (TREE_CODE (wtype) == ARRAY_TYPE)
30529 {
30530 /* If va_list is an array type, the argument may have decayed
30531 to a pointer type, e.g. by being passed to another function.
30532 In that case, unwrap both types so that we can compare the
30533 underlying records. */
30534 if (TREE_CODE (htype) == ARRAY_TYPE
30535 || POINTER_TYPE_P (htype))
30536 {
30537 wtype = TREE_TYPE (wtype);
30538 htype = TREE_TYPE (htype);
30539 }
30540 }
30541 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30542 return va_list_type_node;
30543 wtype = sysv_va_list_type_node;
30544 gcc_assert (wtype != NULL_TREE);
30545 htype = type;
30546 if (TREE_CODE (wtype) == ARRAY_TYPE)
30547 {
30548 /* If va_list is an array type, the argument may have decayed
30549 to a pointer type, e.g. by being passed to another function.
30550 In that case, unwrap both types so that we can compare the
30551 underlying records. */
30552 if (TREE_CODE (htype) == ARRAY_TYPE
30553 || POINTER_TYPE_P (htype))
30554 {
30555 wtype = TREE_TYPE (wtype);
30556 htype = TREE_TYPE (htype);
30557 }
30558 }
30559 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30560 return sysv_va_list_type_node;
30561 wtype = ms_va_list_type_node;
30562 gcc_assert (wtype != NULL_TREE);
30563 htype = type;
30564 if (TREE_CODE (wtype) == ARRAY_TYPE)
30565 {
30566 /* If va_list is an array type, the argument may have decayed
30567 to a pointer type, e.g. by being passed to another function.
30568 In that case, unwrap both types so that we can compare the
30569 underlying records. */
30570 if (TREE_CODE (htype) == ARRAY_TYPE
30571 || POINTER_TYPE_P (htype))
30572 {
30573 wtype = TREE_TYPE (wtype);
30574 htype = TREE_TYPE (htype);
30575 }
30576 }
30577 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30578 return ms_va_list_type_node;
30579 return NULL_TREE;
30580 }
30581 return std_canonical_va_list_type (type);
30582 }
30583
30584 /* Iterate through the target-specific builtin types for va_list.
30585 IDX denotes the iterator, *PTREE is set to the result type of
30586 the va_list builtin, and *PNAME to its internal type.
30587 Returns zero if there is no element for this index, otherwise
30588 IDX should be increased upon the next call.
30589 Note, do not iterate a base builtin's name like __builtin_va_list.
30590 Used from c_common_nodes_and_builtins. */
30591
30592 static int
30593 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30594 {
30595 if (TARGET_64BIT)
30596 {
30597 switch (idx)
30598 {
30599 default:
30600 break;
30601
30602 case 0:
30603 *ptree = ms_va_list_type_node;
30604 *pname = "__builtin_ms_va_list";
30605 return 1;
30606
30607 case 1:
30608 *ptree = sysv_va_list_type_node;
30609 *pname = "__builtin_sysv_va_list";
30610 return 1;
30611 }
30612 }
30613
30614 return 0;
30615 }
30616
30617 /* Initialize the GCC target structure. */
30618 #undef TARGET_RETURN_IN_MEMORY
30619 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30620
30621 #undef TARGET_LEGITIMIZE_ADDRESS
30622 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30623
30624 #undef TARGET_ATTRIBUTE_TABLE
30625 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30626 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30627 # undef TARGET_MERGE_DECL_ATTRIBUTES
30628 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30629 #endif
30630
30631 #undef TARGET_COMP_TYPE_ATTRIBUTES
30632 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30633
30634 #undef TARGET_INIT_BUILTINS
30635 #define TARGET_INIT_BUILTINS ix86_init_builtins
30636 #undef TARGET_BUILTIN_DECL
30637 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30638 #undef TARGET_EXPAND_BUILTIN
30639 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30640
30641 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30642 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30643 ix86_builtin_vectorized_function
30644
30645 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30646 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30647
30648 #undef TARGET_BUILTIN_RECIPROCAL
30649 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30650
30651 #undef TARGET_ASM_FUNCTION_EPILOGUE
30652 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30653
30654 #undef TARGET_ENCODE_SECTION_INFO
30655 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30656 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30657 #else
30658 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30659 #endif
30660
30661 #undef TARGET_ASM_OPEN_PAREN
30662 #define TARGET_ASM_OPEN_PAREN ""
30663 #undef TARGET_ASM_CLOSE_PAREN
30664 #define TARGET_ASM_CLOSE_PAREN ""
30665
30666 #undef TARGET_ASM_BYTE_OP
30667 #define TARGET_ASM_BYTE_OP ASM_BYTE
30668
30669 #undef TARGET_ASM_ALIGNED_HI_OP
30670 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30671 #undef TARGET_ASM_ALIGNED_SI_OP
30672 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30673 #ifdef ASM_QUAD
30674 #undef TARGET_ASM_ALIGNED_DI_OP
30675 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30676 #endif
30677
30678 #undef TARGET_ASM_UNALIGNED_HI_OP
30679 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30680 #undef TARGET_ASM_UNALIGNED_SI_OP
30681 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30682 #undef TARGET_ASM_UNALIGNED_DI_OP
30683 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30684
30685 #undef TARGET_PRINT_OPERAND
30686 #define TARGET_PRINT_OPERAND ix86_print_operand
30687 #undef TARGET_PRINT_OPERAND_ADDRESS
30688 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
30689 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
30690 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
30691
30692 #undef TARGET_SCHED_ADJUST_COST
30693 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30694 #undef TARGET_SCHED_ISSUE_RATE
30695 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30696 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30697 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30698 ia32_multipass_dfa_lookahead
30699
30700 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30701 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30702
30703 #ifdef HAVE_AS_TLS
30704 #undef TARGET_HAVE_TLS
30705 #define TARGET_HAVE_TLS true
30706 #endif
30707 #undef TARGET_CANNOT_FORCE_CONST_MEM
30708 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30709 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30710 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30711
30712 #undef TARGET_DELEGITIMIZE_ADDRESS
30713 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30714
30715 #undef TARGET_MS_BITFIELD_LAYOUT_P
30716 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30717
30718 #if TARGET_MACHO
30719 #undef TARGET_BINDS_LOCAL_P
30720 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30721 #endif
30722 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30723 #undef TARGET_BINDS_LOCAL_P
30724 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30725 #endif
30726
30727 #undef TARGET_ASM_OUTPUT_MI_THUNK
30728 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30729 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30730 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30731
30732 #undef TARGET_ASM_FILE_START
30733 #define TARGET_ASM_FILE_START x86_file_start
30734
30735 #undef TARGET_DEFAULT_TARGET_FLAGS
30736 #define TARGET_DEFAULT_TARGET_FLAGS \
30737 (TARGET_DEFAULT \
30738 | TARGET_SUBTARGET_DEFAULT \
30739 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30740 | MASK_FUSED_MADD)
30741
30742 #undef TARGET_HANDLE_OPTION
30743 #define TARGET_HANDLE_OPTION ix86_handle_option
30744
30745 #undef TARGET_RTX_COSTS
30746 #define TARGET_RTX_COSTS ix86_rtx_costs
30747 #undef TARGET_ADDRESS_COST
30748 #define TARGET_ADDRESS_COST ix86_address_cost
30749
30750 #undef TARGET_FIXED_CONDITION_CODE_REGS
30751 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30752 #undef TARGET_CC_MODES_COMPATIBLE
30753 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30754
30755 #undef TARGET_MACHINE_DEPENDENT_REORG
30756 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30757
30758 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30759 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30760
30761 #undef TARGET_BUILD_BUILTIN_VA_LIST
30762 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30763
30764 #undef TARGET_ENUM_VA_LIST_P
30765 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
30766
30767 #undef TARGET_FN_ABI_VA_LIST
30768 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30769
30770 #undef TARGET_CANONICAL_VA_LIST_TYPE
30771 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30772
30773 #undef TARGET_EXPAND_BUILTIN_VA_START
30774 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30775
30776 #undef TARGET_MD_ASM_CLOBBERS
30777 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30778
30779 #undef TARGET_PROMOTE_PROTOTYPES
30780 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30781 #undef TARGET_STRUCT_VALUE_RTX
30782 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30783 #undef TARGET_SETUP_INCOMING_VARARGS
30784 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30785 #undef TARGET_MUST_PASS_IN_STACK
30786 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30787 #undef TARGET_PASS_BY_REFERENCE
30788 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30789 #undef TARGET_INTERNAL_ARG_POINTER
30790 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30791 #undef TARGET_UPDATE_STACK_BOUNDARY
30792 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30793 #undef TARGET_GET_DRAP_RTX
30794 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30795 #undef TARGET_STRICT_ARGUMENT_NAMING
30796 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30797 #undef TARGET_STATIC_CHAIN
30798 #define TARGET_STATIC_CHAIN ix86_static_chain
30799 #undef TARGET_TRAMPOLINE_INIT
30800 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30801
30802 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30803 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30804
30805 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30806 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30807
30808 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30809 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30810
30811 #undef TARGET_C_MODE_FOR_SUFFIX
30812 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30813
30814 #ifdef HAVE_AS_TLS
30815 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30816 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30817 #endif
30818
30819 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30820 #undef TARGET_INSERT_ATTRIBUTES
30821 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30822 #endif
30823
30824 #undef TARGET_MANGLE_TYPE
30825 #define TARGET_MANGLE_TYPE ix86_mangle_type
30826
30827 #undef TARGET_STACK_PROTECT_FAIL
30828 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30829
30830 #undef TARGET_FUNCTION_VALUE
30831 #define TARGET_FUNCTION_VALUE ix86_function_value
30832
30833 #undef TARGET_FUNCTION_VALUE_REGNO_P
30834 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
30835
30836 #undef TARGET_SECONDARY_RELOAD
30837 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30838
30839 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30840 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30841 ix86_builtin_vectorization_cost
30842 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30843 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30844 ix86_vectorize_builtin_vec_perm
30845 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30846 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30847 ix86_vectorize_builtin_vec_perm_ok
30848
30849 #undef TARGET_SET_CURRENT_FUNCTION
30850 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30851
30852 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30853 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30854
30855 #undef TARGET_OPTION_SAVE
30856 #define TARGET_OPTION_SAVE ix86_function_specific_save
30857
30858 #undef TARGET_OPTION_RESTORE
30859 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30860
30861 #undef TARGET_OPTION_PRINT
30862 #define TARGET_OPTION_PRINT ix86_function_specific_print
30863
30864 #undef TARGET_CAN_INLINE_P
30865 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30866
30867 #undef TARGET_EXPAND_TO_RTL_HOOK
30868 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30869
30870 #undef TARGET_LEGITIMATE_ADDRESS_P
30871 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30872
30873 #undef TARGET_IRA_COVER_CLASSES
30874 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30875
30876 #undef TARGET_FRAME_POINTER_REQUIRED
30877 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30878
30879 #undef TARGET_CAN_ELIMINATE
30880 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30881
30882 #undef TARGET_ASM_CODE_END
30883 #define TARGET_ASM_CODE_END ix86_code_end
30884
30885 struct gcc_target targetm = TARGET_INITIALIZER;
30886 \f
30887 #include "gt-i386.h"