]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sparc/sparc.c
re PR target/91323 (LTGT rtx produces UCOMISS instead of COMISS)
[thirdparty/gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #define IN_TARGET_CODE 1
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "memmodel.h"
33 #include "gimple.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "varasm.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "explow.h"
52 #include "expr.h"
53 #include "debug.h"
54 #include "cfgrtl.h"
55 #include "common/common-target.h"
56 #include "gimplify.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "params.h"
60 #include "tree-pass.h"
61 #include "context.h"
62 #include "builtins.h"
63 #include "tree-vector-builder.h"
64
65 /* This file should be included last. */
66 #include "target-def.h"
67
68 /* Processor costs */
69
70 struct processor_costs {
71 /* Integer load */
72 const int int_load;
73
74 /* Integer signed load */
75 const int int_sload;
76
77 /* Integer zeroed load */
78 const int int_zload;
79
80 /* Float load */
81 const int float_load;
82
83 /* fmov, fneg, fabs */
84 const int float_move;
85
86 /* fadd, fsub */
87 const int float_plusminus;
88
89 /* fcmp */
90 const int float_cmp;
91
92 /* fmov, fmovr */
93 const int float_cmove;
94
95 /* fmul */
96 const int float_mul;
97
98 /* fdivs */
99 const int float_div_sf;
100
101 /* fdivd */
102 const int float_div_df;
103
104 /* fsqrts */
105 const int float_sqrt_sf;
106
107 /* fsqrtd */
108 const int float_sqrt_df;
109
110 /* umul/smul */
111 const int int_mul;
112
113 /* mulX */
114 const int int_mulX;
115
116 /* integer multiply cost for each bit set past the most
117 significant 3, so the formula for multiply cost becomes:
118
119 if (rs1 < 0)
120 highest_bit = highest_clear_bit(rs1);
121 else
122 highest_bit = highest_set_bit(rs1);
123 if (highest_bit < 3)
124 highest_bit = 3;
125 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
126
127 A value of zero indicates that the multiply costs is fixed,
128 and not variable. */
129 const int int_mul_bit_factor;
130
131 /* udiv/sdiv */
132 const int int_div;
133
134 /* divX */
135 const int int_divX;
136
137 /* movcc, movr */
138 const int int_cmove;
139
140 /* penalty for shifts, due to scheduling rules etc. */
141 const int shift_penalty;
142
143 /* cost of a (predictable) branch. */
144 const int branch_cost;
145 };
146
147 static const
148 struct processor_costs cypress_costs = {
149 COSTS_N_INSNS (2), /* int load */
150 COSTS_N_INSNS (2), /* int signed load */
151 COSTS_N_INSNS (2), /* int zeroed load */
152 COSTS_N_INSNS (2), /* float load */
153 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
154 COSTS_N_INSNS (5), /* fadd, fsub */
155 COSTS_N_INSNS (1), /* fcmp */
156 COSTS_N_INSNS (1), /* fmov, fmovr */
157 COSTS_N_INSNS (7), /* fmul */
158 COSTS_N_INSNS (37), /* fdivs */
159 COSTS_N_INSNS (37), /* fdivd */
160 COSTS_N_INSNS (63), /* fsqrts */
161 COSTS_N_INSNS (63), /* fsqrtd */
162 COSTS_N_INSNS (1), /* imul */
163 COSTS_N_INSNS (1), /* imulX */
164 0, /* imul bit factor */
165 COSTS_N_INSNS (1), /* idiv */
166 COSTS_N_INSNS (1), /* idivX */
167 COSTS_N_INSNS (1), /* movcc/movr */
168 0, /* shift penalty */
169 3 /* branch cost */
170 };
171
172 static const
173 struct processor_costs supersparc_costs = {
174 COSTS_N_INSNS (1), /* int load */
175 COSTS_N_INSNS (1), /* int signed load */
176 COSTS_N_INSNS (1), /* int zeroed load */
177 COSTS_N_INSNS (0), /* float load */
178 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
179 COSTS_N_INSNS (3), /* fadd, fsub */
180 COSTS_N_INSNS (3), /* fcmp */
181 COSTS_N_INSNS (1), /* fmov, fmovr */
182 COSTS_N_INSNS (3), /* fmul */
183 COSTS_N_INSNS (6), /* fdivs */
184 COSTS_N_INSNS (9), /* fdivd */
185 COSTS_N_INSNS (12), /* fsqrts */
186 COSTS_N_INSNS (12), /* fsqrtd */
187 COSTS_N_INSNS (4), /* imul */
188 COSTS_N_INSNS (4), /* imulX */
189 0, /* imul bit factor */
190 COSTS_N_INSNS (4), /* idiv */
191 COSTS_N_INSNS (4), /* idivX */
192 COSTS_N_INSNS (1), /* movcc/movr */
193 1, /* shift penalty */
194 3 /* branch cost */
195 };
196
197 static const
198 struct processor_costs hypersparc_costs = {
199 COSTS_N_INSNS (1), /* int load */
200 COSTS_N_INSNS (1), /* int signed load */
201 COSTS_N_INSNS (1), /* int zeroed load */
202 COSTS_N_INSNS (1), /* float load */
203 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
204 COSTS_N_INSNS (1), /* fadd, fsub */
205 COSTS_N_INSNS (1), /* fcmp */
206 COSTS_N_INSNS (1), /* fmov, fmovr */
207 COSTS_N_INSNS (1), /* fmul */
208 COSTS_N_INSNS (8), /* fdivs */
209 COSTS_N_INSNS (12), /* fdivd */
210 COSTS_N_INSNS (17), /* fsqrts */
211 COSTS_N_INSNS (17), /* fsqrtd */
212 COSTS_N_INSNS (17), /* imul */
213 COSTS_N_INSNS (17), /* imulX */
214 0, /* imul bit factor */
215 COSTS_N_INSNS (17), /* idiv */
216 COSTS_N_INSNS (17), /* idivX */
217 COSTS_N_INSNS (1), /* movcc/movr */
218 0, /* shift penalty */
219 3 /* branch cost */
220 };
221
222 static const
223 struct processor_costs leon_costs = {
224 COSTS_N_INSNS (1), /* int load */
225 COSTS_N_INSNS (1), /* int signed load */
226 COSTS_N_INSNS (1), /* int zeroed load */
227 COSTS_N_INSNS (1), /* float load */
228 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
229 COSTS_N_INSNS (1), /* fadd, fsub */
230 COSTS_N_INSNS (1), /* fcmp */
231 COSTS_N_INSNS (1), /* fmov, fmovr */
232 COSTS_N_INSNS (1), /* fmul */
233 COSTS_N_INSNS (15), /* fdivs */
234 COSTS_N_INSNS (15), /* fdivd */
235 COSTS_N_INSNS (23), /* fsqrts */
236 COSTS_N_INSNS (23), /* fsqrtd */
237 COSTS_N_INSNS (5), /* imul */
238 COSTS_N_INSNS (5), /* imulX */
239 0, /* imul bit factor */
240 COSTS_N_INSNS (5), /* idiv */
241 COSTS_N_INSNS (5), /* idivX */
242 COSTS_N_INSNS (1), /* movcc/movr */
243 0, /* shift penalty */
244 3 /* branch cost */
245 };
246
247 static const
248 struct processor_costs leon3_costs = {
249 COSTS_N_INSNS (1), /* int load */
250 COSTS_N_INSNS (1), /* int signed load */
251 COSTS_N_INSNS (1), /* int zeroed load */
252 COSTS_N_INSNS (1), /* float load */
253 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
254 COSTS_N_INSNS (1), /* fadd, fsub */
255 COSTS_N_INSNS (1), /* fcmp */
256 COSTS_N_INSNS (1), /* fmov, fmovr */
257 COSTS_N_INSNS (1), /* fmul */
258 COSTS_N_INSNS (14), /* fdivs */
259 COSTS_N_INSNS (15), /* fdivd */
260 COSTS_N_INSNS (22), /* fsqrts */
261 COSTS_N_INSNS (23), /* fsqrtd */
262 COSTS_N_INSNS (5), /* imul */
263 COSTS_N_INSNS (5), /* imulX */
264 0, /* imul bit factor */
265 COSTS_N_INSNS (35), /* idiv */
266 COSTS_N_INSNS (35), /* idivX */
267 COSTS_N_INSNS (1), /* movcc/movr */
268 0, /* shift penalty */
269 3 /* branch cost */
270 };
271
272 static const
273 struct processor_costs sparclet_costs = {
274 COSTS_N_INSNS (3), /* int load */
275 COSTS_N_INSNS (3), /* int signed load */
276 COSTS_N_INSNS (1), /* int zeroed load */
277 COSTS_N_INSNS (1), /* float load */
278 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
279 COSTS_N_INSNS (1), /* fadd, fsub */
280 COSTS_N_INSNS (1), /* fcmp */
281 COSTS_N_INSNS (1), /* fmov, fmovr */
282 COSTS_N_INSNS (1), /* fmul */
283 COSTS_N_INSNS (1), /* fdivs */
284 COSTS_N_INSNS (1), /* fdivd */
285 COSTS_N_INSNS (1), /* fsqrts */
286 COSTS_N_INSNS (1), /* fsqrtd */
287 COSTS_N_INSNS (5), /* imul */
288 COSTS_N_INSNS (5), /* imulX */
289 0, /* imul bit factor */
290 COSTS_N_INSNS (5), /* idiv */
291 COSTS_N_INSNS (5), /* idivX */
292 COSTS_N_INSNS (1), /* movcc/movr */
293 0, /* shift penalty */
294 3 /* branch cost */
295 };
296
297 static const
298 struct processor_costs ultrasparc_costs = {
299 COSTS_N_INSNS (2), /* int load */
300 COSTS_N_INSNS (3), /* int signed load */
301 COSTS_N_INSNS (2), /* int zeroed load */
302 COSTS_N_INSNS (2), /* float load */
303 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
304 COSTS_N_INSNS (4), /* fadd, fsub */
305 COSTS_N_INSNS (1), /* fcmp */
306 COSTS_N_INSNS (2), /* fmov, fmovr */
307 COSTS_N_INSNS (4), /* fmul */
308 COSTS_N_INSNS (13), /* fdivs */
309 COSTS_N_INSNS (23), /* fdivd */
310 COSTS_N_INSNS (13), /* fsqrts */
311 COSTS_N_INSNS (23), /* fsqrtd */
312 COSTS_N_INSNS (4), /* imul */
313 COSTS_N_INSNS (4), /* imulX */
314 2, /* imul bit factor */
315 COSTS_N_INSNS (37), /* idiv */
316 COSTS_N_INSNS (68), /* idivX */
317 COSTS_N_INSNS (2), /* movcc/movr */
318 2, /* shift penalty */
319 2 /* branch cost */
320 };
321
322 static const
323 struct processor_costs ultrasparc3_costs = {
324 COSTS_N_INSNS (2), /* int load */
325 COSTS_N_INSNS (3), /* int signed load */
326 COSTS_N_INSNS (3), /* int zeroed load */
327 COSTS_N_INSNS (2), /* float load */
328 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
329 COSTS_N_INSNS (4), /* fadd, fsub */
330 COSTS_N_INSNS (5), /* fcmp */
331 COSTS_N_INSNS (3), /* fmov, fmovr */
332 COSTS_N_INSNS (4), /* fmul */
333 COSTS_N_INSNS (17), /* fdivs */
334 COSTS_N_INSNS (20), /* fdivd */
335 COSTS_N_INSNS (20), /* fsqrts */
336 COSTS_N_INSNS (29), /* fsqrtd */
337 COSTS_N_INSNS (6), /* imul */
338 COSTS_N_INSNS (6), /* imulX */
339 0, /* imul bit factor */
340 COSTS_N_INSNS (40), /* idiv */
341 COSTS_N_INSNS (71), /* idivX */
342 COSTS_N_INSNS (2), /* movcc/movr */
343 0, /* shift penalty */
344 2 /* branch cost */
345 };
346
347 static const
348 struct processor_costs niagara_costs = {
349 COSTS_N_INSNS (3), /* int load */
350 COSTS_N_INSNS (3), /* int signed load */
351 COSTS_N_INSNS (3), /* int zeroed load */
352 COSTS_N_INSNS (9), /* float load */
353 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
354 COSTS_N_INSNS (8), /* fadd, fsub */
355 COSTS_N_INSNS (26), /* fcmp */
356 COSTS_N_INSNS (8), /* fmov, fmovr */
357 COSTS_N_INSNS (29), /* fmul */
358 COSTS_N_INSNS (54), /* fdivs */
359 COSTS_N_INSNS (83), /* fdivd */
360 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
361 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
362 COSTS_N_INSNS (11), /* imul */
363 COSTS_N_INSNS (11), /* imulX */
364 0, /* imul bit factor */
365 COSTS_N_INSNS (72), /* idiv */
366 COSTS_N_INSNS (72), /* idivX */
367 COSTS_N_INSNS (1), /* movcc/movr */
368 0, /* shift penalty */
369 4 /* branch cost */
370 };
371
372 static const
373 struct processor_costs niagara2_costs = {
374 COSTS_N_INSNS (3), /* int load */
375 COSTS_N_INSNS (3), /* int signed load */
376 COSTS_N_INSNS (3), /* int zeroed load */
377 COSTS_N_INSNS (3), /* float load */
378 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
379 COSTS_N_INSNS (6), /* fadd, fsub */
380 COSTS_N_INSNS (6), /* fcmp */
381 COSTS_N_INSNS (6), /* fmov, fmovr */
382 COSTS_N_INSNS (6), /* fmul */
383 COSTS_N_INSNS (19), /* fdivs */
384 COSTS_N_INSNS (33), /* fdivd */
385 COSTS_N_INSNS (19), /* fsqrts */
386 COSTS_N_INSNS (33), /* fsqrtd */
387 COSTS_N_INSNS (5), /* imul */
388 COSTS_N_INSNS (5), /* imulX */
389 0, /* imul bit factor */
390 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
391 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
392 COSTS_N_INSNS (1), /* movcc/movr */
393 0, /* shift penalty */
394 5 /* branch cost */
395 };
396
397 static const
398 struct processor_costs niagara3_costs = {
399 COSTS_N_INSNS (3), /* int load */
400 COSTS_N_INSNS (3), /* int signed load */
401 COSTS_N_INSNS (3), /* int zeroed load */
402 COSTS_N_INSNS (3), /* float load */
403 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
404 COSTS_N_INSNS (9), /* fadd, fsub */
405 COSTS_N_INSNS (9), /* fcmp */
406 COSTS_N_INSNS (9), /* fmov, fmovr */
407 COSTS_N_INSNS (9), /* fmul */
408 COSTS_N_INSNS (23), /* fdivs */
409 COSTS_N_INSNS (37), /* fdivd */
410 COSTS_N_INSNS (23), /* fsqrts */
411 COSTS_N_INSNS (37), /* fsqrtd */
412 COSTS_N_INSNS (9), /* imul */
413 COSTS_N_INSNS (9), /* imulX */
414 0, /* imul bit factor */
415 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
416 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
417 COSTS_N_INSNS (1), /* movcc/movr */
418 0, /* shift penalty */
419 5 /* branch cost */
420 };
421
422 static const
423 struct processor_costs niagara4_costs = {
424 COSTS_N_INSNS (5), /* int load */
425 COSTS_N_INSNS (5), /* int signed load */
426 COSTS_N_INSNS (5), /* int zeroed load */
427 COSTS_N_INSNS (5), /* float load */
428 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
429 COSTS_N_INSNS (11), /* fadd, fsub */
430 COSTS_N_INSNS (11), /* fcmp */
431 COSTS_N_INSNS (11), /* fmov, fmovr */
432 COSTS_N_INSNS (11), /* fmul */
433 COSTS_N_INSNS (24), /* fdivs */
434 COSTS_N_INSNS (37), /* fdivd */
435 COSTS_N_INSNS (24), /* fsqrts */
436 COSTS_N_INSNS (37), /* fsqrtd */
437 COSTS_N_INSNS (12), /* imul */
438 COSTS_N_INSNS (12), /* imulX */
439 0, /* imul bit factor */
440 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
441 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
442 COSTS_N_INSNS (1), /* movcc/movr */
443 0, /* shift penalty */
444 2 /* branch cost */
445 };
446
447 static const
448 struct processor_costs niagara7_costs = {
449 COSTS_N_INSNS (5), /* int load */
450 COSTS_N_INSNS (5), /* int signed load */
451 COSTS_N_INSNS (5), /* int zeroed load */
452 COSTS_N_INSNS (5), /* float load */
453 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
454 COSTS_N_INSNS (11), /* fadd, fsub */
455 COSTS_N_INSNS (11), /* fcmp */
456 COSTS_N_INSNS (11), /* fmov, fmovr */
457 COSTS_N_INSNS (11), /* fmul */
458 COSTS_N_INSNS (24), /* fdivs */
459 COSTS_N_INSNS (37), /* fdivd */
460 COSTS_N_INSNS (24), /* fsqrts */
461 COSTS_N_INSNS (37), /* fsqrtd */
462 COSTS_N_INSNS (12), /* imul */
463 COSTS_N_INSNS (12), /* imulX */
464 0, /* imul bit factor */
465 COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
466 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
467 COSTS_N_INSNS (1), /* movcc/movr */
468 0, /* shift penalty */
469 1 /* branch cost */
470 };
471
472 static const
473 struct processor_costs m8_costs = {
474 COSTS_N_INSNS (3), /* int load */
475 COSTS_N_INSNS (3), /* int signed load */
476 COSTS_N_INSNS (3), /* int zeroed load */
477 COSTS_N_INSNS (3), /* float load */
478 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
479 COSTS_N_INSNS (9), /* fadd, fsub */
480 COSTS_N_INSNS (9), /* fcmp */
481 COSTS_N_INSNS (9), /* fmov, fmovr */
482 COSTS_N_INSNS (9), /* fmul */
483 COSTS_N_INSNS (26), /* fdivs */
484 COSTS_N_INSNS (30), /* fdivd */
485 COSTS_N_INSNS (33), /* fsqrts */
486 COSTS_N_INSNS (41), /* fsqrtd */
487 COSTS_N_INSNS (12), /* imul */
488 COSTS_N_INSNS (10), /* imulX */
489 0, /* imul bit factor */
490 COSTS_N_INSNS (57), /* udiv/sdiv */
491 COSTS_N_INSNS (30), /* udivx/sdivx */
492 COSTS_N_INSNS (1), /* movcc/movr */
493 0, /* shift penalty */
494 1 /* branch cost */
495 };
496
497 static const struct processor_costs *sparc_costs = &cypress_costs;
498
499 #ifdef HAVE_AS_RELAX_OPTION
500 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
501 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
502 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
503 somebody does not branch between the sethi and jmp. */
504 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
505 #else
506 #define LEAF_SIBCALL_SLOT_RESERVED_P \
507 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
508 #endif
509
510 /* Vector to say how input registers are mapped to output registers.
511 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
512 eliminate it. You must use -fomit-frame-pointer to get that. */
513 char leaf_reg_remap[] =
514 { 0, 1, 2, 3, 4, 5, 6, 7,
515 -1, -1, -1, -1, -1, -1, 14, -1,
516 -1, -1, -1, -1, -1, -1, -1, -1,
517 8, 9, 10, 11, 12, 13, -1, 15,
518
519 32, 33, 34, 35, 36, 37, 38, 39,
520 40, 41, 42, 43, 44, 45, 46, 47,
521 48, 49, 50, 51, 52, 53, 54, 55,
522 56, 57, 58, 59, 60, 61, 62, 63,
523 64, 65, 66, 67, 68, 69, 70, 71,
524 72, 73, 74, 75, 76, 77, 78, 79,
525 80, 81, 82, 83, 84, 85, 86, 87,
526 88, 89, 90, 91, 92, 93, 94, 95,
527 96, 97, 98, 99, 100, 101, 102};
528
529 /* Vector, indexed by hard register number, which contains 1
530 for a register that is allowable in a candidate for leaf
531 function treatment. */
532 char sparc_leaf_regs[] =
533 { 1, 1, 1, 1, 1, 1, 1, 1,
534 0, 0, 0, 0, 0, 0, 1, 0,
535 0, 0, 0, 0, 0, 0, 0, 0,
536 1, 1, 1, 1, 1, 1, 0, 1,
537 1, 1, 1, 1, 1, 1, 1, 1,
538 1, 1, 1, 1, 1, 1, 1, 1,
539 1, 1, 1, 1, 1, 1, 1, 1,
540 1, 1, 1, 1, 1, 1, 1, 1,
541 1, 1, 1, 1, 1, 1, 1, 1,
542 1, 1, 1, 1, 1, 1, 1, 1,
543 1, 1, 1, 1, 1, 1, 1, 1,
544 1, 1, 1, 1, 1, 1, 1, 1,
545 1, 1, 1, 1, 1, 1, 1};
546
547 struct GTY(()) machine_function
548 {
549 /* Size of the frame of the function. */
550 HOST_WIDE_INT frame_size;
551
552 /* Size of the frame of the function minus the register window save area
553 and the outgoing argument area. */
554 HOST_WIDE_INT apparent_frame_size;
555
556 /* Register we pretend the frame pointer is allocated to. Normally, this
557 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
558 record "offset" separately as it may be too big for (reg + disp). */
559 rtx frame_base_reg;
560 HOST_WIDE_INT frame_base_offset;
561
562 /* Number of global or FP registers to be saved (as 4-byte quantities). */
563 int n_global_fp_regs;
564
565 /* True if the current function is leaf and uses only leaf regs,
566 so that the SPARC leaf function optimization can be applied.
567 Private version of crtl->uses_only_leaf_regs, see
568 sparc_expand_prologue for the rationale. */
569 int leaf_function_p;
570
571 /* True if the prologue saves local or in registers. */
572 bool save_local_in_regs_p;
573
574 /* True if the data calculated by sparc_expand_prologue are valid. */
575 bool prologue_data_valid_p;
576 };
577
578 #define sparc_frame_size cfun->machine->frame_size
579 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
580 #define sparc_frame_base_reg cfun->machine->frame_base_reg
581 #define sparc_frame_base_offset cfun->machine->frame_base_offset
582 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
583 #define sparc_leaf_function_p cfun->machine->leaf_function_p
584 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
585 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
586
587 /* 1 if the next opcode is to be specially indented. */
588 int sparc_indent_opcode = 0;
589
590 static void sparc_option_override (void);
591 static void sparc_init_modes (void);
592 static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
593 const_tree, bool, bool, int *, int *);
594
595 static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
596 static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
597
598 static void sparc_emit_set_const32 (rtx, rtx);
599 static void sparc_emit_set_const64 (rtx, rtx);
600 static void sparc_output_addr_vec (rtx);
601 static void sparc_output_addr_diff_vec (rtx);
602 static void sparc_output_deferred_case_vectors (void);
603 static bool sparc_legitimate_address_p (machine_mode, rtx, bool);
604 static bool sparc_legitimate_constant_p (machine_mode, rtx);
605 static rtx sparc_builtin_saveregs (void);
606 static int epilogue_renumber (rtx *, int);
607 static bool sparc_assemble_integer (rtx, unsigned int, int);
608 static int set_extends (rtx_insn *);
609 static void sparc_asm_function_prologue (FILE *);
610 static void sparc_asm_function_epilogue (FILE *);
611 #ifdef TARGET_SOLARIS
612 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
613 tree) ATTRIBUTE_UNUSED;
614 #endif
615 static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
616 static int sparc_issue_rate (void);
617 static void sparc_sched_init (FILE *, int, int);
618 static int sparc_use_sched_lookahead (void);
619
620 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
621 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
622 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
623 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
624 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
625
626 static bool sparc_function_ok_for_sibcall (tree, tree);
627 static void sparc_init_libfuncs (void);
628 static void sparc_init_builtins (void);
629 static void sparc_fpu_init_builtins (void);
630 static void sparc_vis_init_builtins (void);
631 static tree sparc_builtin_decl (unsigned, bool);
632 static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
633 static tree sparc_fold_builtin (tree, int, tree *, bool);
634 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
635 HOST_WIDE_INT, tree);
636 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
637 HOST_WIDE_INT, const_tree);
638 static struct machine_function * sparc_init_machine_status (void);
639 static bool sparc_cannot_force_const_mem (machine_mode, rtx);
640 static rtx sparc_tls_get_addr (void);
641 static rtx sparc_tls_got (void);
642 static int sparc_register_move_cost (machine_mode,
643 reg_class_t, reg_class_t);
644 static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
645 static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
646 int *, const_tree, int);
647 static bool sparc_strict_argument_naming (cumulative_args_t);
648 static void sparc_va_start (tree, rtx);
649 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
650 static bool sparc_vector_mode_supported_p (machine_mode);
651 static bool sparc_tls_referenced_p (rtx);
652 static rtx sparc_legitimize_tls_address (rtx);
653 static rtx sparc_legitimize_pic_address (rtx, rtx);
654 static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
655 static rtx sparc_delegitimize_address (rtx);
656 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
657 static bool sparc_pass_by_reference (cumulative_args_t,
658 const function_arg_info &);
659 static void sparc_function_arg_advance (cumulative_args_t,
660 const function_arg_info &);
661 static rtx sparc_function_arg (cumulative_args_t, const function_arg_info &);
662 static rtx sparc_function_incoming_arg (cumulative_args_t,
663 const function_arg_info &);
664 static pad_direction sparc_function_arg_padding (machine_mode, const_tree);
665 static unsigned int sparc_function_arg_boundary (machine_mode,
666 const_tree);
667 static int sparc_arg_partial_bytes (cumulative_args_t,
668 const function_arg_info &);
669 static bool sparc_return_in_memory (const_tree, const_tree);
670 static rtx sparc_struct_value_rtx (tree, int);
671 static rtx sparc_function_value (const_tree, const_tree, bool);
672 static rtx sparc_libcall_value (machine_mode, const_rtx);
673 static bool sparc_function_value_regno_p (const unsigned int);
674 static unsigned HOST_WIDE_INT sparc_asan_shadow_offset (void);
675 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
676 static void sparc_file_end (void);
677 static bool sparc_frame_pointer_required (void);
678 static bool sparc_can_eliminate (const int, const int);
679 static void sparc_conditional_register_usage (void);
680 static bool sparc_use_pseudo_pic_reg (void);
681 static void sparc_init_pic_reg (void);
682 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
683 static const char *sparc_mangle_type (const_tree);
684 #endif
685 static void sparc_trampoline_init (rtx, tree, rtx);
686 static machine_mode sparc_preferred_simd_mode (scalar_mode);
687 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
688 static bool sparc_lra_p (void);
689 static bool sparc_print_operand_punct_valid_p (unsigned char);
690 static void sparc_print_operand (FILE *, rtx, int);
691 static void sparc_print_operand_address (FILE *, machine_mode, rtx);
692 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
693 machine_mode,
694 secondary_reload_info *);
695 static bool sparc_secondary_memory_needed (machine_mode, reg_class_t,
696 reg_class_t);
697 static machine_mode sparc_secondary_memory_needed_mode (machine_mode);
698 static scalar_int_mode sparc_cstore_mode (enum insn_code icode);
699 static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
700 static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
701 static unsigned int sparc_min_arithmetic_precision (void);
702 static unsigned int sparc_hard_regno_nregs (unsigned int, machine_mode);
703 static bool sparc_hard_regno_mode_ok (unsigned int, machine_mode);
704 static bool sparc_modes_tieable_p (machine_mode, machine_mode);
705 static bool sparc_can_change_mode_class (machine_mode, machine_mode,
706 reg_class_t);
707 static HOST_WIDE_INT sparc_constant_alignment (const_tree, HOST_WIDE_INT);
708 static bool sparc_vectorize_vec_perm_const (machine_mode, rtx, rtx, rtx,
709 const vec_perm_indices &);
710 static bool sparc_can_follow_jump (const rtx_insn *, const rtx_insn *);
711 \f
712 #ifdef SUBTARGET_ATTRIBUTE_TABLE
713 /* Table of valid machine attributes. */
714 static const struct attribute_spec sparc_attribute_table[] =
715 {
716 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
717 do_diagnostic, handler, exclude } */
718 SUBTARGET_ATTRIBUTE_TABLE,
719 { NULL, 0, 0, false, false, false, false, NULL, NULL }
720 };
721 #endif
722 \f
723 char sparc_hard_reg_printed[8];
724
725 /* Initialize the GCC target structure. */
726
727 /* The default is to use .half rather than .short for aligned HI objects. */
728 #undef TARGET_ASM_ALIGNED_HI_OP
729 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
730
731 #undef TARGET_ASM_UNALIGNED_HI_OP
732 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
733 #undef TARGET_ASM_UNALIGNED_SI_OP
734 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
735 #undef TARGET_ASM_UNALIGNED_DI_OP
736 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
737
738 /* The target hook has to handle DI-mode values. */
739 #undef TARGET_ASM_INTEGER
740 #define TARGET_ASM_INTEGER sparc_assemble_integer
741
742 #undef TARGET_ASM_FUNCTION_PROLOGUE
743 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
744 #undef TARGET_ASM_FUNCTION_EPILOGUE
745 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
746
747 #undef TARGET_SCHED_ADJUST_COST
748 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
749 #undef TARGET_SCHED_ISSUE_RATE
750 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
751 #undef TARGET_SCHED_INIT
752 #define TARGET_SCHED_INIT sparc_sched_init
753 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
754 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
755
756 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
757 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
758
759 #undef TARGET_INIT_LIBFUNCS
760 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
761
762 #undef TARGET_LEGITIMIZE_ADDRESS
763 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
764 #undef TARGET_DELEGITIMIZE_ADDRESS
765 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
766 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
767 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
768
769 #undef TARGET_INIT_BUILTINS
770 #define TARGET_INIT_BUILTINS sparc_init_builtins
771 #undef TARGET_BUILTIN_DECL
772 #define TARGET_BUILTIN_DECL sparc_builtin_decl
773 #undef TARGET_EXPAND_BUILTIN
774 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
775 #undef TARGET_FOLD_BUILTIN
776 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
777
778 #if TARGET_TLS
779 #undef TARGET_HAVE_TLS
780 #define TARGET_HAVE_TLS true
781 #endif
782
783 #undef TARGET_CANNOT_FORCE_CONST_MEM
784 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
785
786 #undef TARGET_ASM_OUTPUT_MI_THUNK
787 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
788 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
789 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
790
791 #undef TARGET_RTX_COSTS
792 #define TARGET_RTX_COSTS sparc_rtx_costs
793 #undef TARGET_ADDRESS_COST
794 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
795 #undef TARGET_REGISTER_MOVE_COST
796 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
797
798 #undef TARGET_PROMOTE_FUNCTION_MODE
799 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
800 #undef TARGET_STRICT_ARGUMENT_NAMING
801 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
802
803 #undef TARGET_MUST_PASS_IN_STACK
804 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
805 #undef TARGET_PASS_BY_REFERENCE
806 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
807 #undef TARGET_ARG_PARTIAL_BYTES
808 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
809 #undef TARGET_FUNCTION_ARG_ADVANCE
810 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
811 #undef TARGET_FUNCTION_ARG
812 #define TARGET_FUNCTION_ARG sparc_function_arg
813 #undef TARGET_FUNCTION_INCOMING_ARG
814 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
815 #undef TARGET_FUNCTION_ARG_PADDING
816 #define TARGET_FUNCTION_ARG_PADDING sparc_function_arg_padding
817 #undef TARGET_FUNCTION_ARG_BOUNDARY
818 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
819
820 #undef TARGET_RETURN_IN_MEMORY
821 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
822 #undef TARGET_STRUCT_VALUE_RTX
823 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
824 #undef TARGET_FUNCTION_VALUE
825 #define TARGET_FUNCTION_VALUE sparc_function_value
826 #undef TARGET_LIBCALL_VALUE
827 #define TARGET_LIBCALL_VALUE sparc_libcall_value
828 #undef TARGET_FUNCTION_VALUE_REGNO_P
829 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
830
831 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
832 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
833
834 #undef TARGET_ASAN_SHADOW_OFFSET
835 #define TARGET_ASAN_SHADOW_OFFSET sparc_asan_shadow_offset
836
837 #undef TARGET_EXPAND_BUILTIN_VA_START
838 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
839 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
840 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
841
842 #undef TARGET_VECTOR_MODE_SUPPORTED_P
843 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
844
845 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
846 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
847
848 #ifdef SUBTARGET_INSERT_ATTRIBUTES
849 #undef TARGET_INSERT_ATTRIBUTES
850 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
851 #endif
852
853 #ifdef SUBTARGET_ATTRIBUTE_TABLE
854 #undef TARGET_ATTRIBUTE_TABLE
855 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
856 #endif
857
858 #undef TARGET_OPTION_OVERRIDE
859 #define TARGET_OPTION_OVERRIDE sparc_option_override
860
861 #ifdef TARGET_THREAD_SSP_OFFSET
862 #undef TARGET_STACK_PROTECT_GUARD
863 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
864 #endif
865
866 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
867 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
868 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
869 #endif
870
871 #undef TARGET_ASM_FILE_END
872 #define TARGET_ASM_FILE_END sparc_file_end
873
874 #undef TARGET_FRAME_POINTER_REQUIRED
875 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
876
877 #undef TARGET_CAN_ELIMINATE
878 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
879
880 #undef TARGET_PREFERRED_RELOAD_CLASS
881 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
882
883 #undef TARGET_SECONDARY_RELOAD
884 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
885 #undef TARGET_SECONDARY_MEMORY_NEEDED
886 #define TARGET_SECONDARY_MEMORY_NEEDED sparc_secondary_memory_needed
887 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
888 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE sparc_secondary_memory_needed_mode
889
890 #undef TARGET_CONDITIONAL_REGISTER_USAGE
891 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
892
893 #undef TARGET_INIT_PIC_REG
894 #define TARGET_INIT_PIC_REG sparc_init_pic_reg
895
896 #undef TARGET_USE_PSEUDO_PIC_REG
897 #define TARGET_USE_PSEUDO_PIC_REG sparc_use_pseudo_pic_reg
898
899 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
900 #undef TARGET_MANGLE_TYPE
901 #define TARGET_MANGLE_TYPE sparc_mangle_type
902 #endif
903
904 #undef TARGET_LRA_P
905 #define TARGET_LRA_P sparc_lra_p
906
907 #undef TARGET_LEGITIMATE_ADDRESS_P
908 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
909
910 #undef TARGET_LEGITIMATE_CONSTANT_P
911 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
912
913 #undef TARGET_TRAMPOLINE_INIT
914 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
915
916 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
917 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
918 #undef TARGET_PRINT_OPERAND
919 #define TARGET_PRINT_OPERAND sparc_print_operand
920 #undef TARGET_PRINT_OPERAND_ADDRESS
921 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
922
923 /* The value stored by LDSTUB. */
924 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
925 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
926
927 #undef TARGET_CSTORE_MODE
928 #define TARGET_CSTORE_MODE sparc_cstore_mode
929
930 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
931 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
932
933 #undef TARGET_FIXED_CONDITION_CODE_REGS
934 #define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
935
936 #undef TARGET_MIN_ARITHMETIC_PRECISION
937 #define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
938
939 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
940 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
941
942 #undef TARGET_HARD_REGNO_NREGS
943 #define TARGET_HARD_REGNO_NREGS sparc_hard_regno_nregs
944 #undef TARGET_HARD_REGNO_MODE_OK
945 #define TARGET_HARD_REGNO_MODE_OK sparc_hard_regno_mode_ok
946
947 #undef TARGET_MODES_TIEABLE_P
948 #define TARGET_MODES_TIEABLE_P sparc_modes_tieable_p
949
950 #undef TARGET_CAN_CHANGE_MODE_CLASS
951 #define TARGET_CAN_CHANGE_MODE_CLASS sparc_can_change_mode_class
952
953 #undef TARGET_CONSTANT_ALIGNMENT
954 #define TARGET_CONSTANT_ALIGNMENT sparc_constant_alignment
955
956 #undef TARGET_VECTORIZE_VEC_PERM_CONST
957 #define TARGET_VECTORIZE_VEC_PERM_CONST sparc_vectorize_vec_perm_const
958
959 #undef TARGET_CAN_FOLLOW_JUMP
960 #define TARGET_CAN_FOLLOW_JUMP sparc_can_follow_jump
961
962 struct gcc_target targetm = TARGET_INITIALIZER;
963
964 /* Return the memory reference contained in X if any, zero otherwise. */
965
966 static rtx
967 mem_ref (rtx x)
968 {
969 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
970 x = XEXP (x, 0);
971
972 if (MEM_P (x))
973 return x;
974
975 return NULL_RTX;
976 }
977
978 /* True if any of INSN's source register(s) is REG. */
979
980 static bool
981 insn_uses_reg_p (rtx_insn *insn, unsigned int reg)
982 {
983 extract_insn (insn);
984 return ((REG_P (recog_data.operand[1])
985 && REGNO (recog_data.operand[1]) == reg)
986 || (recog_data.n_operands == 3
987 && REG_P (recog_data.operand[2])
988 && REGNO (recog_data.operand[2]) == reg));
989 }
990
991 /* True if INSN is a floating-point division or square-root. */
992
993 static bool
994 div_sqrt_insn_p (rtx_insn *insn)
995 {
996 if (GET_CODE (PATTERN (insn)) != SET)
997 return false;
998
999 switch (get_attr_type (insn))
1000 {
1001 case TYPE_FPDIVS:
1002 case TYPE_FPSQRTS:
1003 case TYPE_FPDIVD:
1004 case TYPE_FPSQRTD:
1005 return true;
1006 default:
1007 return false;
1008 }
1009 }
1010
1011 /* True if INSN is a floating-point instruction. */
1012
1013 static bool
1014 fpop_insn_p (rtx_insn *insn)
1015 {
1016 if (GET_CODE (PATTERN (insn)) != SET)
1017 return false;
1018
1019 switch (get_attr_type (insn))
1020 {
1021 case TYPE_FPMOVE:
1022 case TYPE_FPCMOVE:
1023 case TYPE_FP:
1024 case TYPE_FPCMP:
1025 case TYPE_FPMUL:
1026 case TYPE_FPDIVS:
1027 case TYPE_FPSQRTS:
1028 case TYPE_FPDIVD:
1029 case TYPE_FPSQRTD:
1030 return true;
1031 default:
1032 return false;
1033 }
1034 }
1035
1036 /* True if INSN is an atomic instruction. */
1037
1038 static bool
1039 atomic_insn_for_leon3_p (rtx_insn *insn)
1040 {
1041 switch (INSN_CODE (insn))
1042 {
1043 case CODE_FOR_swapsi:
1044 case CODE_FOR_ldstub:
1045 case CODE_FOR_atomic_compare_and_swap_leon3_1:
1046 return true;
1047 default:
1048 return false;
1049 }
1050 }
1051
1052 /* We use a machine specific pass to enable workarounds for errata.
1053
1054 We need to have the (essentially) final form of the insn stream in order
1055 to properly detect the various hazards. Therefore, this machine specific
1056 pass runs as late as possible. */
1057
1058 /* True if INSN is a md pattern or asm statement. */
1059 #define USEFUL_INSN_P(INSN) \
1060 (NONDEBUG_INSN_P (INSN) \
1061 && GET_CODE (PATTERN (INSN)) != USE \
1062 && GET_CODE (PATTERN (INSN)) != CLOBBER)
1063
1064 static unsigned int
1065 sparc_do_work_around_errata (void)
1066 {
1067 rtx_insn *insn, *next;
1068
1069 /* Force all instructions to be split into their final form. */
1070 split_all_insns_noflow ();
1071
1072 /* Now look for specific patterns in the insn stream. */
1073 for (insn = get_insns (); insn; insn = next)
1074 {
1075 bool insert_nop = false;
1076 rtx set;
1077 rtx_insn *jump;
1078 rtx_sequence *seq;
1079
1080 /* Look into the instruction in a delay slot. */
1081 if (NONJUMP_INSN_P (insn)
1082 && (seq = dyn_cast <rtx_sequence *> (PATTERN (insn))))
1083 {
1084 jump = seq->insn (0);
1085 insn = seq->insn (1);
1086 }
1087 else if (JUMP_P (insn))
1088 jump = insn;
1089 else
1090 jump = NULL;
1091
1092 /* Place a NOP at the branch target of an integer branch if it is a
1093 floating-point operation or a floating-point branch. */
1094 if (sparc_fix_gr712rc
1095 && jump
1096 && jump_to_label_p (jump)
1097 && get_attr_branch_type (jump) == BRANCH_TYPE_ICC)
1098 {
1099 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1100 if (target
1101 && (fpop_insn_p (target)
1102 || (JUMP_P (target)
1103 && get_attr_branch_type (target) == BRANCH_TYPE_FCC)))
1104 emit_insn_before (gen_nop (), target);
1105 }
1106
1107 /* Insert a NOP between load instruction and atomic instruction. Insert
1108 a NOP at branch target if there is a load in delay slot and an atomic
1109 instruction at branch target. */
1110 if (sparc_fix_ut700
1111 && NONJUMP_INSN_P (insn)
1112 && (set = single_set (insn)) != NULL_RTX
1113 && mem_ref (SET_SRC (set))
1114 && REG_P (SET_DEST (set)))
1115 {
1116 if (jump && jump_to_label_p (jump))
1117 {
1118 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1119 if (target && atomic_insn_for_leon3_p (target))
1120 emit_insn_before (gen_nop (), target);
1121 }
1122
1123 next = next_active_insn (insn);
1124 if (!next)
1125 break;
1126
1127 if (atomic_insn_for_leon3_p (next))
1128 insert_nop = true;
1129 }
1130
1131 /* Look for a sequence that starts with a fdiv or fsqrt instruction and
1132 ends with another fdiv or fsqrt instruction with no dependencies on
1133 the former, along with an appropriate pattern in between. */
1134 if (sparc_fix_lost_divsqrt
1135 && NONJUMP_INSN_P (insn)
1136 && div_sqrt_insn_p (insn))
1137 {
1138 int i;
1139 int fp_found = 0;
1140 rtx_insn *after;
1141
1142 const unsigned int dest_reg = REGNO (SET_DEST (single_set (insn)));
1143
1144 next = next_active_insn (insn);
1145 if (!next)
1146 break;
1147
1148 for (after = next, i = 0; i < 4; i++)
1149 {
1150 /* Count floating-point operations. */
1151 if (i != 3 && fpop_insn_p (after))
1152 {
1153 /* If the insn uses the destination register of
1154 the div/sqrt, then it cannot be problematic. */
1155 if (insn_uses_reg_p (after, dest_reg))
1156 break;
1157 fp_found++;
1158 }
1159
1160 /* Count floating-point loads. */
1161 if (i != 3
1162 && (set = single_set (after)) != NULL_RTX
1163 && REG_P (SET_DEST (set))
1164 && REGNO (SET_DEST (set)) > 31)
1165 {
1166 /* If the insn uses the destination register of
1167 the div/sqrt, then it cannot be problematic. */
1168 if (REGNO (SET_DEST (set)) == dest_reg)
1169 break;
1170 fp_found++;
1171 }
1172
1173 /* Check if this is a problematic sequence. */
1174 if (i > 1
1175 && fp_found >= 2
1176 && div_sqrt_insn_p (after))
1177 {
1178 /* If this is the short version of the problematic
1179 sequence we add two NOPs in a row to also prevent
1180 the long version. */
1181 if (i == 2)
1182 emit_insn_before (gen_nop (), next);
1183 insert_nop = true;
1184 break;
1185 }
1186
1187 /* No need to scan past a second div/sqrt. */
1188 if (div_sqrt_insn_p (after))
1189 break;
1190
1191 /* Insert NOP before branch. */
1192 if (i < 3
1193 && (!NONJUMP_INSN_P (after)
1194 || GET_CODE (PATTERN (after)) == SEQUENCE))
1195 {
1196 insert_nop = true;
1197 break;
1198 }
1199
1200 after = next_active_insn (after);
1201 if (!after)
1202 break;
1203 }
1204 }
1205
1206 /* Look for either of these two sequences:
1207
1208 Sequence A:
1209 1. store of word size or less (e.g. st / stb / sth / stf)
1210 2. any single instruction that is not a load or store
1211 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
1212
1213 Sequence B:
1214 1. store of double word size (e.g. std / stdf)
1215 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
1216 if (sparc_fix_b2bst
1217 && NONJUMP_INSN_P (insn)
1218 && (set = single_set (insn)) != NULL_RTX
1219 && MEM_P (SET_DEST (set)))
1220 {
1221 /* Sequence B begins with a double-word store. */
1222 bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
1223 rtx_insn *after;
1224 int i;
1225
1226 next = next_active_insn (insn);
1227 if (!next)
1228 break;
1229
1230 for (after = next, i = 0; i < 2; i++)
1231 {
1232 /* Skip empty assembly statements. */
1233 if ((GET_CODE (PATTERN (after)) == UNSPEC_VOLATILE)
1234 || (USEFUL_INSN_P (after)
1235 && (asm_noperands (PATTERN (after))>=0)
1236 && !strcmp (decode_asm_operands (PATTERN (after),
1237 NULL, NULL, NULL,
1238 NULL, NULL), "")))
1239 after = next_active_insn (after);
1240 if (!after)
1241 break;
1242
1243 /* If the insn is a branch, then it cannot be problematic. */
1244 if (!NONJUMP_INSN_P (after)
1245 || GET_CODE (PATTERN (after)) == SEQUENCE)
1246 break;
1247
1248 /* Sequence B is only two instructions long. */
1249 if (seq_b)
1250 {
1251 /* Add NOP if followed by a store. */
1252 if ((set = single_set (after)) != NULL_RTX
1253 && MEM_P (SET_DEST (set)))
1254 insert_nop = true;
1255
1256 /* Otherwise it is ok. */
1257 break;
1258 }
1259
1260 /* If the second instruction is a load or a store,
1261 then the sequence cannot be problematic. */
1262 if (i == 0)
1263 {
1264 if ((set = single_set (after)) != NULL_RTX
1265 && (MEM_P (SET_DEST (set)) || mem_ref (SET_SRC (set))))
1266 break;
1267
1268 after = next_active_insn (after);
1269 if (!after)
1270 break;
1271 }
1272
1273 /* Add NOP if third instruction is a store. */
1274 if (i == 1
1275 && (set = single_set (after)) != NULL_RTX
1276 && MEM_P (SET_DEST (set)))
1277 insert_nop = true;
1278 }
1279 }
1280
1281 /* Look for a single-word load into an odd-numbered FP register. */
1282 else if (sparc_fix_at697f
1283 && NONJUMP_INSN_P (insn)
1284 && (set = single_set (insn)) != NULL_RTX
1285 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1286 && mem_ref (SET_SRC (set))
1287 && REG_P (SET_DEST (set))
1288 && REGNO (SET_DEST (set)) > 31
1289 && REGNO (SET_DEST (set)) % 2 != 0)
1290 {
1291 /* The wrong dependency is on the enclosing double register. */
1292 const unsigned int x = REGNO (SET_DEST (set)) - 1;
1293 unsigned int src1, src2, dest;
1294 int code;
1295
1296 next = next_active_insn (insn);
1297 if (!next)
1298 break;
1299 /* If the insn is a branch, then it cannot be problematic. */
1300 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1301 continue;
1302
1303 extract_insn (next);
1304 code = INSN_CODE (next);
1305
1306 switch (code)
1307 {
1308 case CODE_FOR_adddf3:
1309 case CODE_FOR_subdf3:
1310 case CODE_FOR_muldf3:
1311 case CODE_FOR_divdf3:
1312 dest = REGNO (recog_data.operand[0]);
1313 src1 = REGNO (recog_data.operand[1]);
1314 src2 = REGNO (recog_data.operand[2]);
1315 if (src1 != src2)
1316 {
1317 /* Case [1-4]:
1318 ld [address], %fx+1
1319 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
1320 if ((src1 == x || src2 == x)
1321 && (dest == src1 || dest == src2))
1322 insert_nop = true;
1323 }
1324 else
1325 {
1326 /* Case 5:
1327 ld [address], %fx+1
1328 FPOPd %fx, %fx, %fx */
1329 if (src1 == x
1330 && dest == src1
1331 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
1332 insert_nop = true;
1333 }
1334 break;
1335
1336 case CODE_FOR_sqrtdf2:
1337 dest = REGNO (recog_data.operand[0]);
1338 src1 = REGNO (recog_data.operand[1]);
1339 /* Case 6:
1340 ld [address], %fx+1
1341 fsqrtd %fx, %fx */
1342 if (src1 == x && dest == src1)
1343 insert_nop = true;
1344 break;
1345
1346 default:
1347 break;
1348 }
1349 }
1350
1351 /* Look for a single-word load into an integer register. */
1352 else if (sparc_fix_ut699
1353 && NONJUMP_INSN_P (insn)
1354 && (set = single_set (insn)) != NULL_RTX
1355 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
1356 && (mem_ref (SET_SRC (set)) != NULL_RTX
1357 || INSN_CODE (insn) == CODE_FOR_movsi_pic_gotdata_op)
1358 && REG_P (SET_DEST (set))
1359 && REGNO (SET_DEST (set)) < 32)
1360 {
1361 /* There is no problem if the second memory access has a data
1362 dependency on the first single-cycle load. */
1363 rtx x = SET_DEST (set);
1364
1365 next = next_active_insn (insn);
1366 if (!next)
1367 break;
1368 /* If the insn is a branch, then it cannot be problematic. */
1369 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1370 continue;
1371
1372 /* Look for a second memory access to/from an integer register. */
1373 if ((set = single_set (next)) != NULL_RTX)
1374 {
1375 rtx src = SET_SRC (set);
1376 rtx dest = SET_DEST (set);
1377 rtx mem;
1378
1379 /* LDD is affected. */
1380 if ((mem = mem_ref (src)) != NULL_RTX
1381 && REG_P (dest)
1382 && REGNO (dest) < 32
1383 && !reg_mentioned_p (x, XEXP (mem, 0)))
1384 insert_nop = true;
1385
1386 /* STD is *not* affected. */
1387 else if (MEM_P (dest)
1388 && GET_MODE_SIZE (GET_MODE (dest)) <= 4
1389 && (src == CONST0_RTX (GET_MODE (dest))
1390 || (REG_P (src)
1391 && REGNO (src) < 32
1392 && REGNO (src) != REGNO (x)))
1393 && !reg_mentioned_p (x, XEXP (dest, 0)))
1394 insert_nop = true;
1395
1396 /* GOT accesses uses LD. */
1397 else if (INSN_CODE (next) == CODE_FOR_movsi_pic_gotdata_op
1398 && !reg_mentioned_p (x, XEXP (XEXP (src, 0), 1)))
1399 insert_nop = true;
1400 }
1401 }
1402
1403 /* Look for a single-word load/operation into an FP register. */
1404 else if (sparc_fix_ut699
1405 && NONJUMP_INSN_P (insn)
1406 && (set = single_set (insn)) != NULL_RTX
1407 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1408 && REG_P (SET_DEST (set))
1409 && REGNO (SET_DEST (set)) > 31)
1410 {
1411 /* Number of instructions in the problematic window. */
1412 const int n_insns = 4;
1413 /* The problematic combination is with the sibling FP register. */
1414 const unsigned int x = REGNO (SET_DEST (set));
1415 const unsigned int y = x ^ 1;
1416 rtx_insn *after;
1417 int i;
1418
1419 next = next_active_insn (insn);
1420 if (!next)
1421 break;
1422 /* If the insn is a branch, then it cannot be problematic. */
1423 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1424 continue;
1425
1426 /* Look for a second load/operation into the sibling FP register. */
1427 if (!((set = single_set (next)) != NULL_RTX
1428 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1429 && REG_P (SET_DEST (set))
1430 && REGNO (SET_DEST (set)) == y))
1431 continue;
1432
1433 /* Look for a (possible) store from the FP register in the next N
1434 instructions, but bail out if it is again modified or if there
1435 is a store from the sibling FP register before this store. */
1436 for (after = next, i = 0; i < n_insns; i++)
1437 {
1438 bool branch_p;
1439
1440 after = next_active_insn (after);
1441 if (!after)
1442 break;
1443
1444 /* This is a branch with an empty delay slot. */
1445 if (!NONJUMP_INSN_P (after))
1446 {
1447 if (++i == n_insns)
1448 break;
1449 branch_p = true;
1450 after = NULL;
1451 }
1452 /* This is a branch with a filled delay slot. */
1453 else if (rtx_sequence *seq =
1454 dyn_cast <rtx_sequence *> (PATTERN (after)))
1455 {
1456 if (++i == n_insns)
1457 break;
1458 branch_p = true;
1459 after = seq->insn (1);
1460 }
1461 /* This is a regular instruction. */
1462 else
1463 branch_p = false;
1464
1465 if (after && (set = single_set (after)) != NULL_RTX)
1466 {
1467 const rtx src = SET_SRC (set);
1468 const rtx dest = SET_DEST (set);
1469 const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
1470
1471 /* If the FP register is again modified before the store,
1472 then the store isn't affected. */
1473 if (REG_P (dest)
1474 && (REGNO (dest) == x
1475 || (REGNO (dest) == y && size == 8)))
1476 break;
1477
1478 if (MEM_P (dest) && REG_P (src))
1479 {
1480 /* If there is a store from the sibling FP register
1481 before the store, then the store is not affected. */
1482 if (REGNO (src) == y || (REGNO (src) == x && size == 8))
1483 break;
1484
1485 /* Otherwise, the store is affected. */
1486 if (REGNO (src) == x && size == 4)
1487 {
1488 insert_nop = true;
1489 break;
1490 }
1491 }
1492 }
1493
1494 /* If we have a branch in the first M instructions, then we
1495 cannot see the (M+2)th instruction so we play safe. */
1496 if (branch_p && i <= (n_insns - 2))
1497 {
1498 insert_nop = true;
1499 break;
1500 }
1501 }
1502 }
1503
1504 else
1505 next = NEXT_INSN (insn);
1506
1507 if (insert_nop)
1508 emit_insn_before (gen_nop (), next);
1509 }
1510
1511 return 0;
1512 }
1513
1514 namespace {
1515
1516 const pass_data pass_data_work_around_errata =
1517 {
1518 RTL_PASS, /* type */
1519 "errata", /* name */
1520 OPTGROUP_NONE, /* optinfo_flags */
1521 TV_MACH_DEP, /* tv_id */
1522 0, /* properties_required */
1523 0, /* properties_provided */
1524 0, /* properties_destroyed */
1525 0, /* todo_flags_start */
1526 0, /* todo_flags_finish */
1527 };
1528
1529 class pass_work_around_errata : public rtl_opt_pass
1530 {
1531 public:
1532 pass_work_around_errata(gcc::context *ctxt)
1533 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1534 {}
1535
1536 /* opt_pass methods: */
1537 virtual bool gate (function *)
1538 {
1539 return sparc_fix_at697f
1540 || sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc
1541 || sparc_fix_b2bst || sparc_fix_lost_divsqrt;
1542 }
1543
1544 virtual unsigned int execute (function *)
1545 {
1546 return sparc_do_work_around_errata ();
1547 }
1548
1549 }; // class pass_work_around_errata
1550
1551 } // anon namespace
1552
1553 rtl_opt_pass *
1554 make_pass_work_around_errata (gcc::context *ctxt)
1555 {
1556 return new pass_work_around_errata (ctxt);
1557 }
1558
1559 /* Helpers for TARGET_DEBUG_OPTIONS. */
1560 static void
1561 dump_target_flag_bits (const int flags)
1562 {
1563 if (flags & MASK_64BIT)
1564 fprintf (stderr, "64BIT ");
1565 if (flags & MASK_APP_REGS)
1566 fprintf (stderr, "APP_REGS ");
1567 if (flags & MASK_FASTER_STRUCTS)
1568 fprintf (stderr, "FASTER_STRUCTS ");
1569 if (flags & MASK_FLAT)
1570 fprintf (stderr, "FLAT ");
1571 if (flags & MASK_FMAF)
1572 fprintf (stderr, "FMAF ");
1573 if (flags & MASK_FSMULD)
1574 fprintf (stderr, "FSMULD ");
1575 if (flags & MASK_FPU)
1576 fprintf (stderr, "FPU ");
1577 if (flags & MASK_HARD_QUAD)
1578 fprintf (stderr, "HARD_QUAD ");
1579 if (flags & MASK_POPC)
1580 fprintf (stderr, "POPC ");
1581 if (flags & MASK_PTR64)
1582 fprintf (stderr, "PTR64 ");
1583 if (flags & MASK_STACK_BIAS)
1584 fprintf (stderr, "STACK_BIAS ");
1585 if (flags & MASK_UNALIGNED_DOUBLES)
1586 fprintf (stderr, "UNALIGNED_DOUBLES ");
1587 if (flags & MASK_V8PLUS)
1588 fprintf (stderr, "V8PLUS ");
1589 if (flags & MASK_VIS)
1590 fprintf (stderr, "VIS ");
1591 if (flags & MASK_VIS2)
1592 fprintf (stderr, "VIS2 ");
1593 if (flags & MASK_VIS3)
1594 fprintf (stderr, "VIS3 ");
1595 if (flags & MASK_VIS4)
1596 fprintf (stderr, "VIS4 ");
1597 if (flags & MASK_VIS4B)
1598 fprintf (stderr, "VIS4B ");
1599 if (flags & MASK_CBCOND)
1600 fprintf (stderr, "CBCOND ");
1601 if (flags & MASK_DEPRECATED_V8_INSNS)
1602 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1603 if (flags & MASK_SPARCLET)
1604 fprintf (stderr, "SPARCLET ");
1605 if (flags & MASK_SPARCLITE)
1606 fprintf (stderr, "SPARCLITE ");
1607 if (flags & MASK_V8)
1608 fprintf (stderr, "V8 ");
1609 if (flags & MASK_V9)
1610 fprintf (stderr, "V9 ");
1611 }
1612
1613 static void
1614 dump_target_flags (const char *prefix, const int flags)
1615 {
1616 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1617 dump_target_flag_bits (flags);
1618 fprintf(stderr, "]\n");
1619 }
1620
1621 /* Validate and override various options, and do some machine dependent
1622 initialization. */
1623
1624 static void
1625 sparc_option_override (void)
1626 {
1627 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1628 static struct cpu_default {
1629 const int cpu;
1630 const enum sparc_processor_type processor;
1631 } const cpu_default[] = {
1632 /* There must be one entry here for each TARGET_CPU value. */
1633 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1634 { TARGET_CPU_v8, PROCESSOR_V8 },
1635 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1636 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1637 { TARGET_CPU_leon, PROCESSOR_LEON },
1638 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1639 { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
1640 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1641 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1642 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1643 { TARGET_CPU_v9, PROCESSOR_V9 },
1644 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1645 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1646 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1647 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1648 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1649 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1650 { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
1651 { TARGET_CPU_m8, PROCESSOR_M8 },
1652 { -1, PROCESSOR_V7 }
1653 };
1654 const struct cpu_default *def;
1655 /* Table of values for -m{cpu,tune}=. This must match the order of
1656 the enum processor_type in sparc-opts.h. */
1657 static struct cpu_table {
1658 const char *const name;
1659 const int disable;
1660 const int enable;
1661 } const cpu_table[] = {
1662 { "v7", MASK_ISA, 0 },
1663 { "cypress", MASK_ISA, 0 },
1664 { "v8", MASK_ISA, MASK_V8 },
1665 /* TI TMS390Z55 supersparc */
1666 { "supersparc", MASK_ISA, MASK_V8 },
1667 { "hypersparc", MASK_ISA, MASK_V8 },
1668 { "leon", MASK_ISA|MASK_FSMULD, MASK_V8|MASK_LEON },
1669 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3 },
1670 { "leon3v7", MASK_ISA, MASK_LEON3 },
1671 { "sparclite", MASK_ISA, MASK_SPARCLITE },
1672 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1673 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1674 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1675 { "f934", MASK_ISA, MASK_SPARCLITE },
1676 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1677 { "sparclet", MASK_ISA, MASK_SPARCLET },
1678 /* TEMIC sparclet */
1679 { "tsc701", MASK_ISA, MASK_SPARCLET },
1680 { "v9", MASK_ISA, MASK_V9 },
1681 /* UltraSPARC I, II, IIi */
1682 { "ultrasparc", MASK_ISA,
1683 /* Although insns using %y are deprecated, it is a clear win. */
1684 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1685 /* UltraSPARC III */
1686 /* ??? Check if %y issue still holds true. */
1687 { "ultrasparc3", MASK_ISA,
1688 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1689 /* UltraSPARC T1 */
1690 { "niagara", MASK_ISA,
1691 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1692 /* UltraSPARC T2 */
1693 { "niagara2", MASK_ISA,
1694 MASK_V9|MASK_POPC|MASK_VIS2 },
1695 /* UltraSPARC T3 */
1696 { "niagara3", MASK_ISA,
1697 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
1698 /* UltraSPARC T4 */
1699 { "niagara4", MASK_ISA,
1700 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1701 /* UltraSPARC M7 */
1702 { "niagara7", MASK_ISA,
1703 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
1704 /* UltraSPARC M8 */
1705 { "m8", MASK_ISA,
1706 MASK_V9|MASK_POPC|MASK_VIS4B|MASK_FMAF|MASK_CBCOND|MASK_SUBXC }
1707 };
1708 const struct cpu_table *cpu;
1709 unsigned int i;
1710
1711 if (sparc_debug_string != NULL)
1712 {
1713 const char *q;
1714 char *p;
1715
1716 p = ASTRDUP (sparc_debug_string);
1717 while ((q = strtok (p, ",")) != NULL)
1718 {
1719 bool invert;
1720 int mask;
1721
1722 p = NULL;
1723 if (*q == '!')
1724 {
1725 invert = true;
1726 q++;
1727 }
1728 else
1729 invert = false;
1730
1731 if (! strcmp (q, "all"))
1732 mask = MASK_DEBUG_ALL;
1733 else if (! strcmp (q, "options"))
1734 mask = MASK_DEBUG_OPTIONS;
1735 else
1736 error ("unknown %<-mdebug-%s%> switch", q);
1737
1738 if (invert)
1739 sparc_debug &= ~mask;
1740 else
1741 sparc_debug |= mask;
1742 }
1743 }
1744
1745 /* Enable the FsMULd instruction by default if not explicitly specified by
1746 the user. It may be later disabled by the CPU (explicitly or not). */
1747 if (TARGET_FPU && !(target_flags_explicit & MASK_FSMULD))
1748 target_flags |= MASK_FSMULD;
1749
1750 if (TARGET_DEBUG_OPTIONS)
1751 {
1752 dump_target_flags("Initial target_flags", target_flags);
1753 dump_target_flags("target_flags_explicit", target_flags_explicit);
1754 }
1755
1756 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1757 SUBTARGET_OVERRIDE_OPTIONS;
1758 #endif
1759
1760 #ifndef SPARC_BI_ARCH
1761 /* Check for unsupported architecture size. */
1762 if (!TARGET_64BIT != DEFAULT_ARCH32_P)
1763 error ("%s is not supported by this configuration",
1764 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1765 #endif
1766
1767 /* We force all 64bit archs to use 128 bit long double */
1768 if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
1769 {
1770 error ("%<-mlong-double-64%> not allowed with %<-m64%>");
1771 target_flags |= MASK_LONG_DOUBLE_128;
1772 }
1773
1774 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1775 for (i = 8; i < 16; i++)
1776 if (!call_used_regs [i])
1777 {
1778 error ("%<-fcall-saved-REG%> is not supported for out registers");
1779 call_used_regs [i] = 1;
1780 }
1781
1782 /* Set the default CPU if no -mcpu option was specified. */
1783 if (!global_options_set.x_sparc_cpu_and_features)
1784 {
1785 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1786 if (def->cpu == TARGET_CPU_DEFAULT)
1787 break;
1788 gcc_assert (def->cpu != -1);
1789 sparc_cpu_and_features = def->processor;
1790 }
1791
1792 /* Set the default CPU if no -mtune option was specified. */
1793 if (!global_options_set.x_sparc_cpu)
1794 sparc_cpu = sparc_cpu_and_features;
1795
1796 cpu = &cpu_table[(int) sparc_cpu_and_features];
1797
1798 if (TARGET_DEBUG_OPTIONS)
1799 {
1800 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1801 dump_target_flags ("cpu->disable", cpu->disable);
1802 dump_target_flags ("cpu->enable", cpu->enable);
1803 }
1804
1805 target_flags &= ~cpu->disable;
1806 target_flags |= (cpu->enable
1807 #ifndef HAVE_AS_FMAF_HPC_VIS3
1808 & ~(MASK_FMAF | MASK_VIS3)
1809 #endif
1810 #ifndef HAVE_AS_SPARC4
1811 & ~MASK_CBCOND
1812 #endif
1813 #ifndef HAVE_AS_SPARC5_VIS4
1814 & ~(MASK_VIS4 | MASK_SUBXC)
1815 #endif
1816 #ifndef HAVE_AS_SPARC6
1817 & ~(MASK_VIS4B)
1818 #endif
1819 #ifndef HAVE_AS_LEON
1820 & ~(MASK_LEON | MASK_LEON3)
1821 #endif
1822 & ~(target_flags_explicit & MASK_FEATURES)
1823 );
1824
1825 /* FsMULd is a V8 instruction. */
1826 if (!TARGET_V8 && !TARGET_V9)
1827 target_flags &= ~MASK_FSMULD;
1828
1829 /* -mvis2 implies -mvis. */
1830 if (TARGET_VIS2)
1831 target_flags |= MASK_VIS;
1832
1833 /* -mvis3 implies -mvis2 and -mvis. */
1834 if (TARGET_VIS3)
1835 target_flags |= MASK_VIS2 | MASK_VIS;
1836
1837 /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
1838 if (TARGET_VIS4)
1839 target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1840
1841 /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
1842 if (TARGET_VIS4B)
1843 target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1844
1845 /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b, -mfmaf and -mfsmuld if
1846 FPU is disabled. */
1847 if (!TARGET_FPU)
1848 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
1849 | MASK_VIS4B | MASK_FMAF | MASK_FSMULD);
1850
1851 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1852 are available; -m64 also implies v9. */
1853 if (TARGET_VIS || TARGET_ARCH64)
1854 {
1855 target_flags |= MASK_V9;
1856 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1857 }
1858
1859 /* -mvis also implies -mv8plus on 32-bit. */
1860 if (TARGET_VIS && !TARGET_ARCH64)
1861 target_flags |= MASK_V8PLUS;
1862
1863 /* Use the deprecated v8 insns for sparc64 in 32-bit mode. */
1864 if (TARGET_V9 && TARGET_ARCH32)
1865 target_flags |= MASK_DEPRECATED_V8_INSNS;
1866
1867 /* V8PLUS requires V9 and makes no sense in 64-bit mode. */
1868 if (!TARGET_V9 || TARGET_ARCH64)
1869 target_flags &= ~MASK_V8PLUS;
1870
1871 /* Don't use stack biasing in 32-bit mode. */
1872 if (TARGET_ARCH32)
1873 target_flags &= ~MASK_STACK_BIAS;
1874
1875 /* Use LRA instead of reload, unless otherwise instructed. */
1876 if (!(target_flags_explicit & MASK_LRA))
1877 target_flags |= MASK_LRA;
1878
1879 /* Enable applicable errata workarounds for LEON3FT. */
1880 if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
1881 {
1882 sparc_fix_b2bst = 1;
1883 sparc_fix_lost_divsqrt = 1;
1884 }
1885
1886 /* Disable FsMULd for the UT699 since it doesn't work correctly. */
1887 if (sparc_fix_ut699)
1888 target_flags &= ~MASK_FSMULD;
1889
1890 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1891 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1892 target_flags |= MASK_LONG_DOUBLE_128;
1893 #endif
1894
1895 if (TARGET_DEBUG_OPTIONS)
1896 dump_target_flags ("Final target_flags", target_flags);
1897
1898 /* Set the code model if no -mcmodel option was specified. */
1899 if (global_options_set.x_sparc_code_model)
1900 {
1901 if (TARGET_ARCH32)
1902 error ("%<-mcmodel=%> is not supported in 32-bit mode");
1903 }
1904 else
1905 {
1906 if (TARGET_ARCH32)
1907 sparc_code_model = CM_32;
1908 else
1909 sparc_code_model = SPARC_DEFAULT_CMODEL;
1910 }
1911
1912 /* Set the memory model if no -mmemory-model option was specified. */
1913 if (!global_options_set.x_sparc_memory_model)
1914 {
1915 /* Choose the memory model for the operating system. */
1916 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1917 if (os_default != SMM_DEFAULT)
1918 sparc_memory_model = os_default;
1919 /* Choose the most relaxed model for the processor. */
1920 else if (TARGET_V9)
1921 sparc_memory_model = SMM_RMO;
1922 else if (TARGET_LEON3)
1923 sparc_memory_model = SMM_TSO;
1924 else if (TARGET_LEON)
1925 sparc_memory_model = SMM_SC;
1926 else if (TARGET_V8)
1927 sparc_memory_model = SMM_PSO;
1928 else
1929 sparc_memory_model = SMM_SC;
1930 }
1931
1932 /* Supply a default value for align_functions. */
1933 if (flag_align_functions && !str_align_functions)
1934 {
1935 if (sparc_cpu == PROCESSOR_ULTRASPARC
1936 || sparc_cpu == PROCESSOR_ULTRASPARC3
1937 || sparc_cpu == PROCESSOR_NIAGARA
1938 || sparc_cpu == PROCESSOR_NIAGARA2
1939 || sparc_cpu == PROCESSOR_NIAGARA3
1940 || sparc_cpu == PROCESSOR_NIAGARA4)
1941 str_align_functions = "32";
1942 else if (sparc_cpu == PROCESSOR_NIAGARA7
1943 || sparc_cpu == PROCESSOR_M8)
1944 str_align_functions = "64";
1945 }
1946
1947 /* Validate PCC_STRUCT_RETURN. */
1948 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1949 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1950
1951 /* Only use .uaxword when compiling for a 64-bit target. */
1952 if (!TARGET_ARCH64)
1953 targetm.asm_out.unaligned_op.di = NULL;
1954
1955 /* Set the processor costs. */
1956 switch (sparc_cpu)
1957 {
1958 case PROCESSOR_V7:
1959 case PROCESSOR_CYPRESS:
1960 sparc_costs = &cypress_costs;
1961 break;
1962 case PROCESSOR_V8:
1963 case PROCESSOR_SPARCLITE:
1964 case PROCESSOR_SUPERSPARC:
1965 sparc_costs = &supersparc_costs;
1966 break;
1967 case PROCESSOR_F930:
1968 case PROCESSOR_F934:
1969 case PROCESSOR_HYPERSPARC:
1970 case PROCESSOR_SPARCLITE86X:
1971 sparc_costs = &hypersparc_costs;
1972 break;
1973 case PROCESSOR_LEON:
1974 sparc_costs = &leon_costs;
1975 break;
1976 case PROCESSOR_LEON3:
1977 case PROCESSOR_LEON3V7:
1978 sparc_costs = &leon3_costs;
1979 break;
1980 case PROCESSOR_SPARCLET:
1981 case PROCESSOR_TSC701:
1982 sparc_costs = &sparclet_costs;
1983 break;
1984 case PROCESSOR_V9:
1985 case PROCESSOR_ULTRASPARC:
1986 sparc_costs = &ultrasparc_costs;
1987 break;
1988 case PROCESSOR_ULTRASPARC3:
1989 sparc_costs = &ultrasparc3_costs;
1990 break;
1991 case PROCESSOR_NIAGARA:
1992 sparc_costs = &niagara_costs;
1993 break;
1994 case PROCESSOR_NIAGARA2:
1995 sparc_costs = &niagara2_costs;
1996 break;
1997 case PROCESSOR_NIAGARA3:
1998 sparc_costs = &niagara3_costs;
1999 break;
2000 case PROCESSOR_NIAGARA4:
2001 sparc_costs = &niagara4_costs;
2002 break;
2003 case PROCESSOR_NIAGARA7:
2004 sparc_costs = &niagara7_costs;
2005 break;
2006 case PROCESSOR_M8:
2007 sparc_costs = &m8_costs;
2008 break;
2009 case PROCESSOR_NATIVE:
2010 gcc_unreachable ();
2011 };
2012
2013 /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
2014 can run at the same time. More important, it is the threshold
2015 defining when additional prefetches will be dropped by the
2016 hardware.
2017
2018 The UltraSPARC-III features a documented prefetch queue with a
2019 size of 8. Additional prefetches issued in the cpu are
2020 dropped.
2021
2022 Niagara processors are different. In these processors prefetches
2023 are handled much like regular loads. The L1 miss buffer is 32
2024 entries, but prefetches start getting affected when 30 entries
2025 become occupied. That occupation could be a mix of regular loads
2026 and prefetches though. And that buffer is shared by all threads.
2027 Once the threshold is reached, if the core is running a single
2028 thread the prefetch will retry. If more than one thread is
2029 running, the prefetch will be dropped.
2030
2031 All this makes it very difficult to determine how many
2032 simultaneous prefetches can be issued simultaneously, even in a
2033 single-threaded program. Experimental results show that setting
2034 this parameter to 32 works well when the number of threads is not
2035 high. */
2036 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
2037 ((sparc_cpu == PROCESSOR_ULTRASPARC
2038 || sparc_cpu == PROCESSOR_NIAGARA
2039 || sparc_cpu == PROCESSOR_NIAGARA2
2040 || sparc_cpu == PROCESSOR_NIAGARA3
2041 || sparc_cpu == PROCESSOR_NIAGARA4)
2042 ? 2
2043 : (sparc_cpu == PROCESSOR_ULTRASPARC3
2044 ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
2045 || sparc_cpu == PROCESSOR_M8)
2046 ? 32 : 3))),
2047 global_options.x_param_values,
2048 global_options_set.x_param_values);
2049
2050 /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
2051 bytes.
2052
2053 The Oracle SPARC Architecture (previously the UltraSPARC
2054 Architecture) specification states that when a PREFETCH[A]
2055 instruction is executed an implementation-specific amount of data
2056 is prefetched, and that it is at least 64 bytes long (aligned to
2057 at least 64 bytes).
2058
2059 However, this is not correct. The M7 (and implementations prior
2060 to that) does not guarantee a 64B prefetch into a cache if the
2061 line size is smaller. A single cache line is all that is ever
2062 prefetched. So for the M7, where the L1D$ has 32B lines and the
2063 L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
2064 L2 and L3, but only 32B are brought into the L1D$. (Assuming it
2065 is a read_n prefetch, which is the only type which allocates to
2066 the L1.) */
2067 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
2068 (sparc_cpu == PROCESSOR_M8
2069 ? 64 : 32),
2070 global_options.x_param_values,
2071 global_options_set.x_param_values);
2072
2073 /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
2074 Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
2075 Niagara processors feature a L1D$ of 16KB. */
2076 maybe_set_param_value (PARAM_L1_CACHE_SIZE,
2077 ((sparc_cpu == PROCESSOR_ULTRASPARC
2078 || sparc_cpu == PROCESSOR_ULTRASPARC3
2079 || sparc_cpu == PROCESSOR_NIAGARA
2080 || sparc_cpu == PROCESSOR_NIAGARA2
2081 || sparc_cpu == PROCESSOR_NIAGARA3
2082 || sparc_cpu == PROCESSOR_NIAGARA4
2083 || sparc_cpu == PROCESSOR_NIAGARA7
2084 || sparc_cpu == PROCESSOR_M8)
2085 ? 16 : 64),
2086 global_options.x_param_values,
2087 global_options_set.x_param_values);
2088
2089
2090 /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
2091 that 512 is the default in params.def. */
2092 maybe_set_param_value (PARAM_L2_CACHE_SIZE,
2093 ((sparc_cpu == PROCESSOR_NIAGARA4
2094 || sparc_cpu == PROCESSOR_M8)
2095 ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
2096 ? 256 : 512)),
2097 global_options.x_param_values,
2098 global_options_set.x_param_values);
2099
2100
2101 /* Disable save slot sharing for call-clobbered registers by default.
2102 The IRA sharing algorithm works on single registers only and this
2103 pessimizes for double floating-point registers. */
2104 if (!global_options_set.x_flag_ira_share_save_slots)
2105 flag_ira_share_save_slots = 0;
2106
2107 /* Only enable REE by default in 64-bit mode where it helps to eliminate
2108 redundant 32-to-64-bit extensions. */
2109 if (!global_options_set.x_flag_ree && TARGET_ARCH32)
2110 flag_ree = 0;
2111
2112 /* Do various machine dependent initializations. */
2113 sparc_init_modes ();
2114
2115 /* Set up function hooks. */
2116 init_machine_status = sparc_init_machine_status;
2117 }
2118 \f
2119 /* Miscellaneous utilities. */
2120
2121 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
2122 or branch on register contents instructions. */
2123
2124 int
2125 v9_regcmp_p (enum rtx_code code)
2126 {
2127 return (code == EQ || code == NE || code == GE || code == LT
2128 || code == LE || code == GT);
2129 }
2130
2131 /* Nonzero if OP is a floating point constant which can
2132 be loaded into an integer register using a single
2133 sethi instruction. */
2134
2135 int
2136 fp_sethi_p (rtx op)
2137 {
2138 if (GET_CODE (op) == CONST_DOUBLE)
2139 {
2140 long i;
2141
2142 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2143 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
2144 }
2145
2146 return 0;
2147 }
2148
2149 /* Nonzero if OP is a floating point constant which can
2150 be loaded into an integer register using a single
2151 mov instruction. */
2152
2153 int
2154 fp_mov_p (rtx op)
2155 {
2156 if (GET_CODE (op) == CONST_DOUBLE)
2157 {
2158 long i;
2159
2160 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2161 return SPARC_SIMM13_P (i);
2162 }
2163
2164 return 0;
2165 }
2166
2167 /* Nonzero if OP is a floating point constant which can
2168 be loaded into an integer register using a high/losum
2169 instruction sequence. */
2170
2171 int
2172 fp_high_losum_p (rtx op)
2173 {
2174 /* The constraints calling this should only be in
2175 SFmode move insns, so any constant which cannot
2176 be moved using a single insn will do. */
2177 if (GET_CODE (op) == CONST_DOUBLE)
2178 {
2179 long i;
2180
2181 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2182 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
2183 }
2184
2185 return 0;
2186 }
2187
2188 /* Return true if the address of LABEL can be loaded by means of the
2189 mov{si,di}_pic_label_ref patterns in PIC mode. */
2190
2191 static bool
2192 can_use_mov_pic_label_ref (rtx label)
2193 {
2194 /* VxWorks does not impose a fixed gap between segments; the run-time
2195 gap can be different from the object-file gap. We therefore can't
2196 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
2197 are absolutely sure that X is in the same segment as the GOT.
2198 Unfortunately, the flexibility of linker scripts means that we
2199 can't be sure of that in general, so assume that GOT-relative
2200 accesses are never valid on VxWorks. */
2201 if (TARGET_VXWORKS_RTP)
2202 return false;
2203
2204 /* Similarly, if the label is non-local, it might end up being placed
2205 in a different section than the current one; now mov_pic_label_ref
2206 requires the label and the code to be in the same section. */
2207 if (LABEL_REF_NONLOCAL_P (label))
2208 return false;
2209
2210 /* Finally, if we are reordering basic blocks and partition into hot
2211 and cold sections, this might happen for any label. */
2212 if (flag_reorder_blocks_and_partition)
2213 return false;
2214
2215 return true;
2216 }
2217
2218 /* Expand a move instruction. Return true if all work is done. */
2219
2220 bool
2221 sparc_expand_move (machine_mode mode, rtx *operands)
2222 {
2223 /* Handle sets of MEM first. */
2224 if (GET_CODE (operands[0]) == MEM)
2225 {
2226 /* 0 is a register (or a pair of registers) on SPARC. */
2227 if (register_or_zero_operand (operands[1], mode))
2228 return false;
2229
2230 if (!reload_in_progress)
2231 {
2232 operands[0] = validize_mem (operands[0]);
2233 operands[1] = force_reg (mode, operands[1]);
2234 }
2235 }
2236
2237 /* Fix up TLS cases. */
2238 if (TARGET_HAVE_TLS
2239 && CONSTANT_P (operands[1])
2240 && sparc_tls_referenced_p (operands [1]))
2241 {
2242 operands[1] = sparc_legitimize_tls_address (operands[1]);
2243 return false;
2244 }
2245
2246 /* Fix up PIC cases. */
2247 if (flag_pic && CONSTANT_P (operands[1]))
2248 {
2249 if (pic_address_needs_scratch (operands[1]))
2250 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
2251
2252 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
2253 if ((GET_CODE (operands[1]) == LABEL_REF
2254 && can_use_mov_pic_label_ref (operands[1]))
2255 || (GET_CODE (operands[1]) == CONST
2256 && GET_CODE (XEXP (operands[1], 0)) == PLUS
2257 && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
2258 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT
2259 && can_use_mov_pic_label_ref (XEXP (XEXP (operands[1], 0), 0))))
2260 {
2261 if (mode == SImode)
2262 {
2263 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
2264 return true;
2265 }
2266
2267 if (mode == DImode)
2268 {
2269 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
2270 return true;
2271 }
2272 }
2273
2274 if (symbolic_operand (operands[1], mode))
2275 {
2276 operands[1]
2277 = sparc_legitimize_pic_address (operands[1],
2278 reload_in_progress
2279 ? operands[0] : NULL_RTX);
2280 return false;
2281 }
2282 }
2283
2284 /* If we are trying to toss an integer constant into FP registers,
2285 or loading a FP or vector constant, force it into memory. */
2286 if (CONSTANT_P (operands[1])
2287 && REG_P (operands[0])
2288 && (SPARC_FP_REG_P (REGNO (operands[0]))
2289 || SCALAR_FLOAT_MODE_P (mode)
2290 || VECTOR_MODE_P (mode)))
2291 {
2292 /* emit_group_store will send such bogosity to us when it is
2293 not storing directly into memory. So fix this up to avoid
2294 crashes in output_constant_pool. */
2295 if (operands [1] == const0_rtx)
2296 operands[1] = CONST0_RTX (mode);
2297
2298 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
2299 always other regs. */
2300 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
2301 && (const_zero_operand (operands[1], mode)
2302 || const_all_ones_operand (operands[1], mode)))
2303 return false;
2304
2305 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
2306 /* We are able to build any SF constant in integer registers
2307 with at most 2 instructions. */
2308 && (mode == SFmode
2309 /* And any DF constant in integer registers if needed. */
2310 || (mode == DFmode && !can_create_pseudo_p ())))
2311 return false;
2312
2313 operands[1] = force_const_mem (mode, operands[1]);
2314 if (!reload_in_progress)
2315 operands[1] = validize_mem (operands[1]);
2316 return false;
2317 }
2318
2319 /* Accept non-constants and valid constants unmodified. */
2320 if (!CONSTANT_P (operands[1])
2321 || GET_CODE (operands[1]) == HIGH
2322 || input_operand (operands[1], mode))
2323 return false;
2324
2325 switch (mode)
2326 {
2327 case E_QImode:
2328 /* All QImode constants require only one insn, so proceed. */
2329 break;
2330
2331 case E_HImode:
2332 case E_SImode:
2333 sparc_emit_set_const32 (operands[0], operands[1]);
2334 return true;
2335
2336 case E_DImode:
2337 /* input_operand should have filtered out 32-bit mode. */
2338 sparc_emit_set_const64 (operands[0], operands[1]);
2339 return true;
2340
2341 case E_TImode:
2342 {
2343 rtx high, low;
2344 /* TImode isn't available in 32-bit mode. */
2345 split_double (operands[1], &high, &low);
2346 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
2347 high));
2348 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
2349 low));
2350 }
2351 return true;
2352
2353 default:
2354 gcc_unreachable ();
2355 }
2356
2357 return false;
2358 }
2359
2360 /* Load OP1, a 32-bit constant, into OP0, a register.
2361 We know it can't be done in one insn when we get
2362 here, the move expander guarantees this. */
2363
2364 static void
2365 sparc_emit_set_const32 (rtx op0, rtx op1)
2366 {
2367 machine_mode mode = GET_MODE (op0);
2368 rtx temp = op0;
2369
2370 if (can_create_pseudo_p ())
2371 temp = gen_reg_rtx (mode);
2372
2373 if (GET_CODE (op1) == CONST_INT)
2374 {
2375 gcc_assert (!small_int_operand (op1, mode)
2376 && !const_high_operand (op1, mode));
2377
2378 /* Emit them as real moves instead of a HIGH/LO_SUM,
2379 this way CSE can see everything and reuse intermediate
2380 values if it wants. */
2381 emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
2382 & ~(HOST_WIDE_INT) 0x3ff)));
2383
2384 emit_insn (gen_rtx_SET (op0,
2385 gen_rtx_IOR (mode, temp,
2386 GEN_INT (INTVAL (op1) & 0x3ff))));
2387 }
2388 else
2389 {
2390 /* A symbol, emit in the traditional way. */
2391 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
2392 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
2393 }
2394 }
2395
2396 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
2397 If TEMP is nonzero, we are forbidden to use any other scratch
2398 registers. Otherwise, we are allowed to generate them as needed.
2399
2400 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
2401 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
2402
2403 void
2404 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
2405 {
2406 rtx cst, temp1, temp2, temp3, temp4, temp5;
2407 rtx ti_temp = 0;
2408
2409 /* Deal with too large offsets. */
2410 if (GET_CODE (op1) == CONST
2411 && GET_CODE (XEXP (op1, 0)) == PLUS
2412 && CONST_INT_P (cst = XEXP (XEXP (op1, 0), 1))
2413 && trunc_int_for_mode (INTVAL (cst), SImode) != INTVAL (cst))
2414 {
2415 gcc_assert (!temp);
2416 temp1 = gen_reg_rtx (DImode);
2417 temp2 = gen_reg_rtx (DImode);
2418 sparc_emit_set_const64 (temp2, cst);
2419 sparc_emit_set_symbolic_const64 (temp1, XEXP (XEXP (op1, 0), 0),
2420 NULL_RTX);
2421 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp1, temp2)));
2422 return;
2423 }
2424
2425 if (temp && GET_MODE (temp) == TImode)
2426 {
2427 ti_temp = temp;
2428 temp = gen_rtx_REG (DImode, REGNO (temp));
2429 }
2430
2431 /* SPARC-V9 code model support. */
2432 switch (sparc_code_model)
2433 {
2434 case CM_MEDLOW:
2435 /* The range spanned by all instructions in the object is less
2436 than 2^31 bytes (2GB) and the distance from any instruction
2437 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2438 than 2^31 bytes (2GB).
2439
2440 The executable must be in the low 4TB of the virtual address
2441 space.
2442
2443 sethi %hi(symbol), %temp1
2444 or %temp1, %lo(symbol), %reg */
2445 if (temp)
2446 temp1 = temp; /* op0 is allowed. */
2447 else
2448 temp1 = gen_reg_rtx (DImode);
2449
2450 emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
2451 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
2452 break;
2453
2454 case CM_MEDMID:
2455 /* The range spanned by all instructions in the object is less
2456 than 2^31 bytes (2GB) and the distance from any instruction
2457 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2458 than 2^31 bytes (2GB).
2459
2460 The executable must be in the low 16TB of the virtual address
2461 space.
2462
2463 sethi %h44(symbol), %temp1
2464 or %temp1, %m44(symbol), %temp2
2465 sllx %temp2, 12, %temp3
2466 or %temp3, %l44(symbol), %reg */
2467 if (temp)
2468 {
2469 temp1 = op0;
2470 temp2 = op0;
2471 temp3 = temp; /* op0 is allowed. */
2472 }
2473 else
2474 {
2475 temp1 = gen_reg_rtx (DImode);
2476 temp2 = gen_reg_rtx (DImode);
2477 temp3 = gen_reg_rtx (DImode);
2478 }
2479
2480 emit_insn (gen_seth44 (temp1, op1));
2481 emit_insn (gen_setm44 (temp2, temp1, op1));
2482 emit_insn (gen_rtx_SET (temp3,
2483 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
2484 emit_insn (gen_setl44 (op0, temp3, op1));
2485 break;
2486
2487 case CM_MEDANY:
2488 /* The range spanned by all instructions in the object is less
2489 than 2^31 bytes (2GB) and the distance from any instruction
2490 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2491 than 2^31 bytes (2GB).
2492
2493 The executable can be placed anywhere in the virtual address
2494 space.
2495
2496 sethi %hh(symbol), %temp1
2497 sethi %lm(symbol), %temp2
2498 or %temp1, %hm(symbol), %temp3
2499 sllx %temp3, 32, %temp4
2500 or %temp4, %temp2, %temp5
2501 or %temp5, %lo(symbol), %reg */
2502 if (temp)
2503 {
2504 /* It is possible that one of the registers we got for operands[2]
2505 might coincide with that of operands[0] (which is why we made
2506 it TImode). Pick the other one to use as our scratch. */
2507 if (rtx_equal_p (temp, op0))
2508 {
2509 gcc_assert (ti_temp);
2510 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2511 }
2512 temp1 = op0;
2513 temp2 = temp; /* op0 is _not_ allowed, see above. */
2514 temp3 = op0;
2515 temp4 = op0;
2516 temp5 = op0;
2517 }
2518 else
2519 {
2520 temp1 = gen_reg_rtx (DImode);
2521 temp2 = gen_reg_rtx (DImode);
2522 temp3 = gen_reg_rtx (DImode);
2523 temp4 = gen_reg_rtx (DImode);
2524 temp5 = gen_reg_rtx (DImode);
2525 }
2526
2527 emit_insn (gen_sethh (temp1, op1));
2528 emit_insn (gen_setlm (temp2, op1));
2529 emit_insn (gen_sethm (temp3, temp1, op1));
2530 emit_insn (gen_rtx_SET (temp4,
2531 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2532 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2533 emit_insn (gen_setlo (op0, temp5, op1));
2534 break;
2535
2536 case CM_EMBMEDANY:
2537 /* Old old old backwards compatibility kruft here.
2538 Essentially it is MEDLOW with a fixed 64-bit
2539 virtual base added to all data segment addresses.
2540 Text-segment stuff is computed like MEDANY, we can't
2541 reuse the code above because the relocation knobs
2542 look different.
2543
2544 Data segment: sethi %hi(symbol), %temp1
2545 add %temp1, EMBMEDANY_BASE_REG, %temp2
2546 or %temp2, %lo(symbol), %reg */
2547 if (data_segment_operand (op1, GET_MODE (op1)))
2548 {
2549 if (temp)
2550 {
2551 temp1 = temp; /* op0 is allowed. */
2552 temp2 = op0;
2553 }
2554 else
2555 {
2556 temp1 = gen_reg_rtx (DImode);
2557 temp2 = gen_reg_rtx (DImode);
2558 }
2559
2560 emit_insn (gen_embmedany_sethi (temp1, op1));
2561 emit_insn (gen_embmedany_brsum (temp2, temp1));
2562 emit_insn (gen_embmedany_losum (op0, temp2, op1));
2563 }
2564
2565 /* Text segment: sethi %uhi(symbol), %temp1
2566 sethi %hi(symbol), %temp2
2567 or %temp1, %ulo(symbol), %temp3
2568 sllx %temp3, 32, %temp4
2569 or %temp4, %temp2, %temp5
2570 or %temp5, %lo(symbol), %reg */
2571 else
2572 {
2573 if (temp)
2574 {
2575 /* It is possible that one of the registers we got for operands[2]
2576 might coincide with that of operands[0] (which is why we made
2577 it TImode). Pick the other one to use as our scratch. */
2578 if (rtx_equal_p (temp, op0))
2579 {
2580 gcc_assert (ti_temp);
2581 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2582 }
2583 temp1 = op0;
2584 temp2 = temp; /* op0 is _not_ allowed, see above. */
2585 temp3 = op0;
2586 temp4 = op0;
2587 temp5 = op0;
2588 }
2589 else
2590 {
2591 temp1 = gen_reg_rtx (DImode);
2592 temp2 = gen_reg_rtx (DImode);
2593 temp3 = gen_reg_rtx (DImode);
2594 temp4 = gen_reg_rtx (DImode);
2595 temp5 = gen_reg_rtx (DImode);
2596 }
2597
2598 emit_insn (gen_embmedany_textuhi (temp1, op1));
2599 emit_insn (gen_embmedany_texthi (temp2, op1));
2600 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
2601 emit_insn (gen_rtx_SET (temp4,
2602 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2603 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2604 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
2605 }
2606 break;
2607
2608 default:
2609 gcc_unreachable ();
2610 }
2611 }
2612
2613 /* These avoid problems when cross compiling. If we do not
2614 go through all this hair then the optimizer will see
2615 invalid REG_EQUAL notes or in some cases none at all. */
2616 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2617 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2618 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2619 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2620
2621 /* The optimizer is not to assume anything about exactly
2622 which bits are set for a HIGH, they are unspecified.
2623 Unfortunately this leads to many missed optimizations
2624 during CSE. We mask out the non-HIGH bits, and matches
2625 a plain movdi, to alleviate this problem. */
2626 static rtx
2627 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2628 {
2629 return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2630 }
2631
2632 static rtx
2633 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2634 {
2635 return gen_rtx_SET (dest, GEN_INT (val));
2636 }
2637
2638 static rtx
2639 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2640 {
2641 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2642 }
2643
2644 static rtx
2645 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2646 {
2647 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2648 }
2649
2650 /* Worker routines for 64-bit constant formation on arch64.
2651 One of the key things to be doing in these emissions is
2652 to create as many temp REGs as possible. This makes it
2653 possible for half-built constants to be used later when
2654 such values are similar to something required later on.
2655 Without doing this, the optimizer cannot see such
2656 opportunities. */
2657
2658 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2659 unsigned HOST_WIDE_INT, int);
2660
2661 static void
2662 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2663 unsigned HOST_WIDE_INT low_bits, int is_neg)
2664 {
2665 unsigned HOST_WIDE_INT high_bits;
2666
2667 if (is_neg)
2668 high_bits = (~low_bits) & 0xffffffff;
2669 else
2670 high_bits = low_bits;
2671
2672 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2673 if (!is_neg)
2674 {
2675 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2676 }
2677 else
2678 {
2679 /* If we are XOR'ing with -1, then we should emit a one's complement
2680 instead. This way the combiner will notice logical operations
2681 such as ANDN later on and substitute. */
2682 if ((low_bits & 0x3ff) == 0x3ff)
2683 {
2684 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2685 }
2686 else
2687 {
2688 emit_insn (gen_rtx_SET (op0,
2689 gen_safe_XOR64 (temp,
2690 (-(HOST_WIDE_INT)0x400
2691 | (low_bits & 0x3ff)))));
2692 }
2693 }
2694 }
2695
2696 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2697 unsigned HOST_WIDE_INT, int);
2698
2699 static void
2700 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2701 unsigned HOST_WIDE_INT high_bits,
2702 unsigned HOST_WIDE_INT low_immediate,
2703 int shift_count)
2704 {
2705 rtx temp2 = op0;
2706
2707 if ((high_bits & 0xfffffc00) != 0)
2708 {
2709 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2710 if ((high_bits & ~0xfffffc00) != 0)
2711 emit_insn (gen_rtx_SET (op0,
2712 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2713 else
2714 temp2 = temp;
2715 }
2716 else
2717 {
2718 emit_insn (gen_safe_SET64 (temp, high_bits));
2719 temp2 = temp;
2720 }
2721
2722 /* Now shift it up into place. */
2723 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
2724 GEN_INT (shift_count))));
2725
2726 /* If there is a low immediate part piece, finish up by
2727 putting that in as well. */
2728 if (low_immediate != 0)
2729 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
2730 }
2731
2732 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2733 unsigned HOST_WIDE_INT);
2734
2735 /* Full 64-bit constant decomposition. Even though this is the
2736 'worst' case, we still optimize a few things away. */
2737 static void
2738 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2739 unsigned HOST_WIDE_INT high_bits,
2740 unsigned HOST_WIDE_INT low_bits)
2741 {
2742 rtx sub_temp = op0;
2743
2744 if (can_create_pseudo_p ())
2745 sub_temp = gen_reg_rtx (DImode);
2746
2747 if ((high_bits & 0xfffffc00) != 0)
2748 {
2749 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2750 if ((high_bits & ~0xfffffc00) != 0)
2751 emit_insn (gen_rtx_SET (sub_temp,
2752 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2753 else
2754 sub_temp = temp;
2755 }
2756 else
2757 {
2758 emit_insn (gen_safe_SET64 (temp, high_bits));
2759 sub_temp = temp;
2760 }
2761
2762 if (can_create_pseudo_p ())
2763 {
2764 rtx temp2 = gen_reg_rtx (DImode);
2765 rtx temp3 = gen_reg_rtx (DImode);
2766 rtx temp4 = gen_reg_rtx (DImode);
2767
2768 emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
2769 GEN_INT (32))));
2770
2771 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2772 if ((low_bits & ~0xfffffc00) != 0)
2773 {
2774 emit_insn (gen_rtx_SET (temp3,
2775 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2776 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
2777 }
2778 else
2779 {
2780 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
2781 }
2782 }
2783 else
2784 {
2785 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2786 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2787 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2788 int to_shift = 12;
2789
2790 /* We are in the middle of reload, so this is really
2791 painful. However we do still make an attempt to
2792 avoid emitting truly stupid code. */
2793 if (low1 != const0_rtx)
2794 {
2795 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2796 GEN_INT (to_shift))));
2797 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
2798 sub_temp = op0;
2799 to_shift = 12;
2800 }
2801 else
2802 {
2803 to_shift += 12;
2804 }
2805 if (low2 != const0_rtx)
2806 {
2807 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2808 GEN_INT (to_shift))));
2809 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
2810 sub_temp = op0;
2811 to_shift = 8;
2812 }
2813 else
2814 {
2815 to_shift += 8;
2816 }
2817 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2818 GEN_INT (to_shift))));
2819 if (low3 != const0_rtx)
2820 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
2821 /* phew... */
2822 }
2823 }
2824
2825 /* Analyze a 64-bit constant for certain properties. */
2826 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2827 unsigned HOST_WIDE_INT,
2828 int *, int *, int *);
2829
2830 static void
2831 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2832 unsigned HOST_WIDE_INT low_bits,
2833 int *hbsp, int *lbsp, int *abbasp)
2834 {
2835 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2836 int i;
2837
2838 lowest_bit_set = highest_bit_set = -1;
2839 i = 0;
2840 do
2841 {
2842 if ((lowest_bit_set == -1)
2843 && ((low_bits >> i) & 1))
2844 lowest_bit_set = i;
2845 if ((highest_bit_set == -1)
2846 && ((high_bits >> (32 - i - 1)) & 1))
2847 highest_bit_set = (64 - i - 1);
2848 }
2849 while (++i < 32
2850 && ((highest_bit_set == -1)
2851 || (lowest_bit_set == -1)));
2852 if (i == 32)
2853 {
2854 i = 0;
2855 do
2856 {
2857 if ((lowest_bit_set == -1)
2858 && ((high_bits >> i) & 1))
2859 lowest_bit_set = i + 32;
2860 if ((highest_bit_set == -1)
2861 && ((low_bits >> (32 - i - 1)) & 1))
2862 highest_bit_set = 32 - i - 1;
2863 }
2864 while (++i < 32
2865 && ((highest_bit_set == -1)
2866 || (lowest_bit_set == -1)));
2867 }
2868 /* If there are no bits set this should have gone out
2869 as one instruction! */
2870 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2871 all_bits_between_are_set = 1;
2872 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2873 {
2874 if (i < 32)
2875 {
2876 if ((low_bits & (1 << i)) != 0)
2877 continue;
2878 }
2879 else
2880 {
2881 if ((high_bits & (1 << (i - 32))) != 0)
2882 continue;
2883 }
2884 all_bits_between_are_set = 0;
2885 break;
2886 }
2887 *hbsp = highest_bit_set;
2888 *lbsp = lowest_bit_set;
2889 *abbasp = all_bits_between_are_set;
2890 }
2891
2892 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2893
2894 static int
2895 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2896 unsigned HOST_WIDE_INT low_bits)
2897 {
2898 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2899
2900 if (high_bits == 0
2901 || high_bits == 0xffffffff)
2902 return 1;
2903
2904 analyze_64bit_constant (high_bits, low_bits,
2905 &highest_bit_set, &lowest_bit_set,
2906 &all_bits_between_are_set);
2907
2908 if ((highest_bit_set == 63
2909 || lowest_bit_set == 0)
2910 && all_bits_between_are_set != 0)
2911 return 1;
2912
2913 if ((highest_bit_set - lowest_bit_set) < 21)
2914 return 1;
2915
2916 return 0;
2917 }
2918
2919 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2920 unsigned HOST_WIDE_INT,
2921 int, int);
2922
2923 static unsigned HOST_WIDE_INT
2924 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2925 unsigned HOST_WIDE_INT low_bits,
2926 int lowest_bit_set, int shift)
2927 {
2928 HOST_WIDE_INT hi, lo;
2929
2930 if (lowest_bit_set < 32)
2931 {
2932 lo = (low_bits >> lowest_bit_set) << shift;
2933 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2934 }
2935 else
2936 {
2937 lo = 0;
2938 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2939 }
2940 gcc_assert (! (hi & lo));
2941 return (hi | lo);
2942 }
2943
2944 /* Here we are sure to be arch64 and this is an integer constant
2945 being loaded into a register. Emit the most efficient
2946 insn sequence possible. Detection of all the 1-insn cases
2947 has been done already. */
2948 static void
2949 sparc_emit_set_const64 (rtx op0, rtx op1)
2950 {
2951 unsigned HOST_WIDE_INT high_bits, low_bits;
2952 int lowest_bit_set, highest_bit_set;
2953 int all_bits_between_are_set;
2954 rtx temp = 0;
2955
2956 /* Sanity check that we know what we are working with. */
2957 gcc_assert (TARGET_ARCH64
2958 && (GET_CODE (op0) == SUBREG
2959 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2960
2961 if (! can_create_pseudo_p ())
2962 temp = op0;
2963
2964 if (GET_CODE (op1) != CONST_INT)
2965 {
2966 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2967 return;
2968 }
2969
2970 if (! temp)
2971 temp = gen_reg_rtx (DImode);
2972
2973 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2974 low_bits = (INTVAL (op1) & 0xffffffff);
2975
2976 /* low_bits bits 0 --> 31
2977 high_bits bits 32 --> 63 */
2978
2979 analyze_64bit_constant (high_bits, low_bits,
2980 &highest_bit_set, &lowest_bit_set,
2981 &all_bits_between_are_set);
2982
2983 /* First try for a 2-insn sequence. */
2984
2985 /* These situations are preferred because the optimizer can
2986 * do more things with them:
2987 * 1) mov -1, %reg
2988 * sllx %reg, shift, %reg
2989 * 2) mov -1, %reg
2990 * srlx %reg, shift, %reg
2991 * 3) mov some_small_const, %reg
2992 * sllx %reg, shift, %reg
2993 */
2994 if (((highest_bit_set == 63
2995 || lowest_bit_set == 0)
2996 && all_bits_between_are_set != 0)
2997 || ((highest_bit_set - lowest_bit_set) < 12))
2998 {
2999 HOST_WIDE_INT the_const = -1;
3000 int shift = lowest_bit_set;
3001
3002 if ((highest_bit_set != 63
3003 && lowest_bit_set != 0)
3004 || all_bits_between_are_set == 0)
3005 {
3006 the_const =
3007 create_simple_focus_bits (high_bits, low_bits,
3008 lowest_bit_set, 0);
3009 }
3010 else if (lowest_bit_set == 0)
3011 shift = -(63 - highest_bit_set);
3012
3013 gcc_assert (SPARC_SIMM13_P (the_const));
3014 gcc_assert (shift != 0);
3015
3016 emit_insn (gen_safe_SET64 (temp, the_const));
3017 if (shift > 0)
3018 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
3019 GEN_INT (shift))));
3020 else if (shift < 0)
3021 emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
3022 GEN_INT (-shift))));
3023 return;
3024 }
3025
3026 /* Now a range of 22 or less bits set somewhere.
3027 * 1) sethi %hi(focus_bits), %reg
3028 * sllx %reg, shift, %reg
3029 * 2) sethi %hi(focus_bits), %reg
3030 * srlx %reg, shift, %reg
3031 */
3032 if ((highest_bit_set - lowest_bit_set) < 21)
3033 {
3034 unsigned HOST_WIDE_INT focus_bits =
3035 create_simple_focus_bits (high_bits, low_bits,
3036 lowest_bit_set, 10);
3037
3038 gcc_assert (SPARC_SETHI_P (focus_bits));
3039 gcc_assert (lowest_bit_set != 10);
3040
3041 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
3042
3043 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
3044 if (lowest_bit_set < 10)
3045 emit_insn (gen_rtx_SET (op0,
3046 gen_rtx_LSHIFTRT (DImode, temp,
3047 GEN_INT (10 - lowest_bit_set))));
3048 else if (lowest_bit_set > 10)
3049 emit_insn (gen_rtx_SET (op0,
3050 gen_rtx_ASHIFT (DImode, temp,
3051 GEN_INT (lowest_bit_set - 10))));
3052 return;
3053 }
3054
3055 /* 1) sethi %hi(low_bits), %reg
3056 * or %reg, %lo(low_bits), %reg
3057 * 2) sethi %hi(~low_bits), %reg
3058 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
3059 */
3060 if (high_bits == 0
3061 || high_bits == 0xffffffff)
3062 {
3063 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
3064 (high_bits == 0xffffffff));
3065 return;
3066 }
3067
3068 /* Now, try 3-insn sequences. */
3069
3070 /* 1) sethi %hi(high_bits), %reg
3071 * or %reg, %lo(high_bits), %reg
3072 * sllx %reg, 32, %reg
3073 */
3074 if (low_bits == 0)
3075 {
3076 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
3077 return;
3078 }
3079
3080 /* We may be able to do something quick
3081 when the constant is negated, so try that. */
3082 if (const64_is_2insns ((~high_bits) & 0xffffffff,
3083 (~low_bits) & 0xfffffc00))
3084 {
3085 /* NOTE: The trailing bits get XOR'd so we need the
3086 non-negated bits, not the negated ones. */
3087 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
3088
3089 if ((((~high_bits) & 0xffffffff) == 0
3090 && ((~low_bits) & 0x80000000) == 0)
3091 || (((~high_bits) & 0xffffffff) == 0xffffffff
3092 && ((~low_bits) & 0x80000000) != 0))
3093 {
3094 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
3095
3096 if ((SPARC_SETHI_P (fast_int)
3097 && (~high_bits & 0xffffffff) == 0)
3098 || SPARC_SIMM13_P (fast_int))
3099 emit_insn (gen_safe_SET64 (temp, fast_int));
3100 else
3101 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
3102 }
3103 else
3104 {
3105 rtx negated_const;
3106 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
3107 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
3108 sparc_emit_set_const64 (temp, negated_const);
3109 }
3110
3111 /* If we are XOR'ing with -1, then we should emit a one's complement
3112 instead. This way the combiner will notice logical operations
3113 such as ANDN later on and substitute. */
3114 if (trailing_bits == 0x3ff)
3115 {
3116 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
3117 }
3118 else
3119 {
3120 emit_insn (gen_rtx_SET (op0,
3121 gen_safe_XOR64 (temp,
3122 (-0x400 | trailing_bits))));
3123 }
3124 return;
3125 }
3126
3127 /* 1) sethi %hi(xxx), %reg
3128 * or %reg, %lo(xxx), %reg
3129 * sllx %reg, yyy, %reg
3130 *
3131 * ??? This is just a generalized version of the low_bits==0
3132 * thing above, FIXME...
3133 */
3134 if ((highest_bit_set - lowest_bit_set) < 32)
3135 {
3136 unsigned HOST_WIDE_INT focus_bits =
3137 create_simple_focus_bits (high_bits, low_bits,
3138 lowest_bit_set, 0);
3139
3140 /* We can't get here in this state. */
3141 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
3142
3143 /* So what we know is that the set bits straddle the
3144 middle of the 64-bit word. */
3145 sparc_emit_set_const64_quick2 (op0, temp,
3146 focus_bits, 0,
3147 lowest_bit_set);
3148 return;
3149 }
3150
3151 /* 1) sethi %hi(high_bits), %reg
3152 * or %reg, %lo(high_bits), %reg
3153 * sllx %reg, 32, %reg
3154 * or %reg, low_bits, %reg
3155 */
3156 if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
3157 {
3158 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
3159 return;
3160 }
3161
3162 /* The easiest way when all else fails, is full decomposition. */
3163 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
3164 }
3165
3166 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
3167
3168 static bool
3169 sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
3170 {
3171 *p1 = SPARC_ICC_REG;
3172 *p2 = SPARC_FCC_REG;
3173 return true;
3174 }
3175
3176 /* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
3177
3178 static unsigned int
3179 sparc_min_arithmetic_precision (void)
3180 {
3181 return 32;
3182 }
3183
3184 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
3185 return the mode to be used for the comparison. For floating-point,
3186 CCFP[E]mode is used. CCNZmode should be used when the first operand
3187 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
3188 processing is needed. */
3189
3190 machine_mode
3191 select_cc_mode (enum rtx_code op, rtx x, rtx y)
3192 {
3193 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3194 {
3195 switch (op)
3196 {
3197 case EQ:
3198 case NE:
3199 case UNORDERED:
3200 case ORDERED:
3201 case UNLT:
3202 case UNLE:
3203 case UNGT:
3204 case UNGE:
3205 case UNEQ:
3206 return CCFPmode;
3207
3208 case LT:
3209 case LE:
3210 case GT:
3211 case GE:
3212 case LTGT:
3213 return CCFPEmode;
3214
3215 default:
3216 gcc_unreachable ();
3217 }
3218 }
3219 else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
3220 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
3221 && y == const0_rtx)
3222 {
3223 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3224 return CCXNZmode;
3225 else
3226 return CCNZmode;
3227 }
3228 else
3229 {
3230 /* This is for the cmp<mode>_sne pattern. */
3231 if (GET_CODE (x) == NOT && y == constm1_rtx)
3232 {
3233 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3234 return CCXCmode;
3235 else
3236 return CCCmode;
3237 }
3238
3239 /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
3240 if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
3241 {
3242 if (GET_CODE (y) == UNSPEC
3243 && (XINT (y, 1) == UNSPEC_ADDV
3244 || XINT (y, 1) == UNSPEC_SUBV
3245 || XINT (y, 1) == UNSPEC_NEGV))
3246 return CCVmode;
3247 else
3248 return CCCmode;
3249 }
3250
3251 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3252 return CCXmode;
3253 else
3254 return CCmode;
3255 }
3256 }
3257
3258 /* Emit the compare insn and return the CC reg for a CODE comparison
3259 with operands X and Y. */
3260
3261 static rtx
3262 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
3263 {
3264 machine_mode mode;
3265 rtx cc_reg;
3266
3267 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
3268 return x;
3269
3270 mode = SELECT_CC_MODE (code, x, y);
3271
3272 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
3273 fcc regs (cse can't tell they're really call clobbered regs and will
3274 remove a duplicate comparison even if there is an intervening function
3275 call - it will then try to reload the cc reg via an int reg which is why
3276 we need the movcc patterns). It is possible to provide the movcc
3277 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
3278 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
3279 to tell cse that CCFPE mode registers (even pseudos) are call
3280 clobbered. */
3281
3282 /* ??? This is an experiment. Rather than making changes to cse which may
3283 or may not be easy/clean, we do our own cse. This is possible because
3284 we will generate hard registers. Cse knows they're call clobbered (it
3285 doesn't know the same thing about pseudos). If we guess wrong, no big
3286 deal, but if we win, great! */
3287
3288 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3289 #if 1 /* experiment */
3290 {
3291 int reg;
3292 /* We cycle through the registers to ensure they're all exercised. */
3293 static int next_fcc_reg = 0;
3294 /* Previous x,y for each fcc reg. */
3295 static rtx prev_args[4][2];
3296
3297 /* Scan prev_args for x,y. */
3298 for (reg = 0; reg < 4; reg++)
3299 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
3300 break;
3301 if (reg == 4)
3302 {
3303 reg = next_fcc_reg;
3304 prev_args[reg][0] = x;
3305 prev_args[reg][1] = y;
3306 next_fcc_reg = (next_fcc_reg + 1) & 3;
3307 }
3308 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
3309 }
3310 #else
3311 cc_reg = gen_reg_rtx (mode);
3312 #endif /* ! experiment */
3313 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3314 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
3315 else
3316 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
3317
3318 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
3319 will only result in an unrecognizable insn so no point in asserting. */
3320 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
3321
3322 return cc_reg;
3323 }
3324
3325
3326 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
3327
3328 rtx
3329 gen_compare_reg (rtx cmp)
3330 {
3331 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
3332 }
3333
3334 /* This function is used for v9 only.
3335 DEST is the target of the Scc insn.
3336 CODE is the code for an Scc's comparison.
3337 X and Y are the values we compare.
3338
3339 This function is needed to turn
3340
3341 (set (reg:SI 110)
3342 (gt (reg:CCX 100 %icc)
3343 (const_int 0)))
3344 into
3345 (set (reg:SI 110)
3346 (gt:DI (reg:CCX 100 %icc)
3347 (const_int 0)))
3348
3349 IE: The instruction recognizer needs to see the mode of the comparison to
3350 find the right instruction. We could use "gt:DI" right in the
3351 define_expand, but leaving it out allows us to handle DI, SI, etc. */
3352
3353 static int
3354 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
3355 {
3356 if (! TARGET_ARCH64
3357 && (GET_MODE (x) == DImode
3358 || GET_MODE (dest) == DImode))
3359 return 0;
3360
3361 /* Try to use the movrCC insns. */
3362 if (TARGET_ARCH64
3363 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3364 && y == const0_rtx
3365 && v9_regcmp_p (compare_code))
3366 {
3367 rtx op0 = x;
3368 rtx temp;
3369
3370 /* Special case for op0 != 0. This can be done with one instruction if
3371 dest == x. */
3372
3373 if (compare_code == NE
3374 && GET_MODE (dest) == DImode
3375 && rtx_equal_p (op0, dest))
3376 {
3377 emit_insn (gen_rtx_SET (dest,
3378 gen_rtx_IF_THEN_ELSE (DImode,
3379 gen_rtx_fmt_ee (compare_code, DImode,
3380 op0, const0_rtx),
3381 const1_rtx,
3382 dest)));
3383 return 1;
3384 }
3385
3386 if (reg_overlap_mentioned_p (dest, op0))
3387 {
3388 /* Handle the case where dest == x.
3389 We "early clobber" the result. */
3390 op0 = gen_reg_rtx (GET_MODE (x));
3391 emit_move_insn (op0, x);
3392 }
3393
3394 emit_insn (gen_rtx_SET (dest, const0_rtx));
3395 if (GET_MODE (op0) != DImode)
3396 {
3397 temp = gen_reg_rtx (DImode);
3398 convert_move (temp, op0, 0);
3399 }
3400 else
3401 temp = op0;
3402 emit_insn (gen_rtx_SET (dest,
3403 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3404 gen_rtx_fmt_ee (compare_code, DImode,
3405 temp, const0_rtx),
3406 const1_rtx,
3407 dest)));
3408 return 1;
3409 }
3410 else
3411 {
3412 x = gen_compare_reg_1 (compare_code, x, y);
3413 y = const0_rtx;
3414
3415 emit_insn (gen_rtx_SET (dest, const0_rtx));
3416 emit_insn (gen_rtx_SET (dest,
3417 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3418 gen_rtx_fmt_ee (compare_code,
3419 GET_MODE (x), x, y),
3420 const1_rtx, dest)));
3421 return 1;
3422 }
3423 }
3424
3425
3426 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
3427 without jumps using the addx/subx instructions. */
3428
3429 bool
3430 emit_scc_insn (rtx operands[])
3431 {
3432 rtx tem, x, y;
3433 enum rtx_code code;
3434 machine_mode mode;
3435
3436 /* The quad-word fp compare library routines all return nonzero to indicate
3437 true, which is different from the equivalent libgcc routines, so we must
3438 handle them specially here. */
3439 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
3440 {
3441 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
3442 GET_CODE (operands[1]));
3443 operands[2] = XEXP (operands[1], 0);
3444 operands[3] = XEXP (operands[1], 1);
3445 }
3446
3447 code = GET_CODE (operands[1]);
3448 x = operands[2];
3449 y = operands[3];
3450 mode = GET_MODE (x);
3451
3452 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
3453 more applications). The exception to this is "reg != 0" which can
3454 be done in one instruction on v9 (so we do it). */
3455 if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
3456 {
3457 if (y != const0_rtx)
3458 x = force_reg (mode, gen_rtx_XOR (mode, x, y));
3459
3460 rtx pat = gen_rtx_SET (operands[0],
3461 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3462 x, const0_rtx));
3463
3464 /* If we can use addx/subx or addxc, add a clobber for CC. */
3465 if (mode == SImode || (code == NE && TARGET_VIS3))
3466 {
3467 rtx clobber
3468 = gen_rtx_CLOBBER (VOIDmode,
3469 gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
3470 SPARC_ICC_REG));
3471 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
3472 }
3473
3474 emit_insn (pat);
3475 return true;
3476 }
3477
3478 /* We can do LTU in DImode using the addxc instruction with VIS3. */
3479 if (TARGET_ARCH64
3480 && mode == DImode
3481 && !((code == LTU || code == GTU) && TARGET_VIS3)
3482 && gen_v9_scc (operands[0], code, x, y))
3483 return true;
3484
3485 /* We can do LTU and GEU using the addx/subx instructions too. And
3486 for GTU/LEU, if both operands are registers swap them and fall
3487 back to the easy case. */
3488 if (code == GTU || code == LEU)
3489 {
3490 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3491 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
3492 {
3493 tem = x;
3494 x = y;
3495 y = tem;
3496 code = swap_condition (code);
3497 }
3498 }
3499
3500 if (code == LTU || code == GEU)
3501 {
3502 emit_insn (gen_rtx_SET (operands[0],
3503 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3504 gen_compare_reg_1 (code, x, y),
3505 const0_rtx)));
3506 return true;
3507 }
3508
3509 /* All the posibilities to use addx/subx based sequences has been
3510 exhausted, try for a 3 instruction sequence using v9 conditional
3511 moves. */
3512 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
3513 return true;
3514
3515 /* Nope, do branches. */
3516 return false;
3517 }
3518
3519 /* Emit a conditional jump insn for the v9 architecture using comparison code
3520 CODE and jump target LABEL.
3521 This function exists to take advantage of the v9 brxx insns. */
3522
3523 static void
3524 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
3525 {
3526 emit_jump_insn (gen_rtx_SET (pc_rtx,
3527 gen_rtx_IF_THEN_ELSE (VOIDmode,
3528 gen_rtx_fmt_ee (code, GET_MODE (op0),
3529 op0, const0_rtx),
3530 gen_rtx_LABEL_REF (VOIDmode, label),
3531 pc_rtx)));
3532 }
3533
3534 /* Emit a conditional jump insn for the UA2011 architecture using
3535 comparison code CODE and jump target LABEL. This function exists
3536 to take advantage of the UA2011 Compare and Branch insns. */
3537
3538 static void
3539 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
3540 {
3541 rtx if_then_else;
3542
3543 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
3544 gen_rtx_fmt_ee(code, GET_MODE(op0),
3545 op0, op1),
3546 gen_rtx_LABEL_REF (VOIDmode, label),
3547 pc_rtx);
3548
3549 emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
3550 }
3551
3552 void
3553 emit_conditional_branch_insn (rtx operands[])
3554 {
3555 /* The quad-word fp compare library routines all return nonzero to indicate
3556 true, which is different from the equivalent libgcc routines, so we must
3557 handle them specially here. */
3558 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
3559 {
3560 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
3561 GET_CODE (operands[0]));
3562 operands[1] = XEXP (operands[0], 0);
3563 operands[2] = XEXP (operands[0], 1);
3564 }
3565
3566 /* If we can tell early on that the comparison is against a constant
3567 that won't fit in the 5-bit signed immediate field of a cbcond,
3568 use one of the other v9 conditional branch sequences. */
3569 if (TARGET_CBCOND
3570 && GET_CODE (operands[1]) == REG
3571 && (GET_MODE (operands[1]) == SImode
3572 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
3573 && (GET_CODE (operands[2]) != CONST_INT
3574 || SPARC_SIMM5_P (INTVAL (operands[2]))))
3575 {
3576 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
3577 return;
3578 }
3579
3580 if (TARGET_ARCH64 && operands[2] == const0_rtx
3581 && GET_CODE (operands[1]) == REG
3582 && GET_MODE (operands[1]) == DImode)
3583 {
3584 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
3585 return;
3586 }
3587
3588 operands[1] = gen_compare_reg (operands[0]);
3589 operands[2] = const0_rtx;
3590 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
3591 operands[1], operands[2]);
3592 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
3593 operands[3]));
3594 }
3595
3596
3597 /* Generate a DFmode part of a hard TFmode register.
3598 REG is the TFmode hard register, LOW is 1 for the
3599 low 64bit of the register and 0 otherwise.
3600 */
3601 rtx
3602 gen_df_reg (rtx reg, int low)
3603 {
3604 int regno = REGNO (reg);
3605
3606 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3607 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3608 return gen_rtx_REG (DFmode, regno);
3609 }
3610 \f
3611 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3612 Unlike normal calls, TFmode operands are passed by reference. It is
3613 assumed that no more than 3 operands are required. */
3614
3615 static void
3616 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3617 {
3618 rtx ret_slot = NULL, arg[3], func_sym;
3619 int i;
3620
3621 /* We only expect to be called for conversions, unary, and binary ops. */
3622 gcc_assert (nargs == 2 || nargs == 3);
3623
3624 for (i = 0; i < nargs; ++i)
3625 {
3626 rtx this_arg = operands[i];
3627 rtx this_slot;
3628
3629 /* TFmode arguments and return values are passed by reference. */
3630 if (GET_MODE (this_arg) == TFmode)
3631 {
3632 int force_stack_temp;
3633
3634 force_stack_temp = 0;
3635 if (TARGET_BUGGY_QP_LIB && i == 0)
3636 force_stack_temp = 1;
3637
3638 if (GET_CODE (this_arg) == MEM
3639 && ! force_stack_temp)
3640 {
3641 tree expr = MEM_EXPR (this_arg);
3642 if (expr)
3643 mark_addressable (expr);
3644 this_arg = XEXP (this_arg, 0);
3645 }
3646 else if (CONSTANT_P (this_arg)
3647 && ! force_stack_temp)
3648 {
3649 this_slot = force_const_mem (TFmode, this_arg);
3650 this_arg = XEXP (this_slot, 0);
3651 }
3652 else
3653 {
3654 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3655
3656 /* Operand 0 is the return value. We'll copy it out later. */
3657 if (i > 0)
3658 emit_move_insn (this_slot, this_arg);
3659 else
3660 ret_slot = this_slot;
3661
3662 this_arg = XEXP (this_slot, 0);
3663 }
3664 }
3665
3666 arg[i] = this_arg;
3667 }
3668
3669 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3670
3671 if (GET_MODE (operands[0]) == TFmode)
3672 {
3673 if (nargs == 2)
3674 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3675 arg[0], GET_MODE (arg[0]),
3676 arg[1], GET_MODE (arg[1]));
3677 else
3678 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3679 arg[0], GET_MODE (arg[0]),
3680 arg[1], GET_MODE (arg[1]),
3681 arg[2], GET_MODE (arg[2]));
3682
3683 if (ret_slot)
3684 emit_move_insn (operands[0], ret_slot);
3685 }
3686 else
3687 {
3688 rtx ret;
3689
3690 gcc_assert (nargs == 2);
3691
3692 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3693 GET_MODE (operands[0]),
3694 arg[1], GET_MODE (arg[1]));
3695
3696 if (ret != operands[0])
3697 emit_move_insn (operands[0], ret);
3698 }
3699 }
3700
3701 /* Expand soft-float TFmode calls to sparc abi routines. */
3702
3703 static void
3704 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3705 {
3706 const char *func;
3707
3708 switch (code)
3709 {
3710 case PLUS:
3711 func = "_Qp_add";
3712 break;
3713 case MINUS:
3714 func = "_Qp_sub";
3715 break;
3716 case MULT:
3717 func = "_Qp_mul";
3718 break;
3719 case DIV:
3720 func = "_Qp_div";
3721 break;
3722 default:
3723 gcc_unreachable ();
3724 }
3725
3726 emit_soft_tfmode_libcall (func, 3, operands);
3727 }
3728
3729 static void
3730 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3731 {
3732 const char *func;
3733
3734 gcc_assert (code == SQRT);
3735 func = "_Qp_sqrt";
3736
3737 emit_soft_tfmode_libcall (func, 2, operands);
3738 }
3739
3740 static void
3741 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3742 {
3743 const char *func;
3744
3745 switch (code)
3746 {
3747 case FLOAT_EXTEND:
3748 switch (GET_MODE (operands[1]))
3749 {
3750 case E_SFmode:
3751 func = "_Qp_stoq";
3752 break;
3753 case E_DFmode:
3754 func = "_Qp_dtoq";
3755 break;
3756 default:
3757 gcc_unreachable ();
3758 }
3759 break;
3760
3761 case FLOAT_TRUNCATE:
3762 switch (GET_MODE (operands[0]))
3763 {
3764 case E_SFmode:
3765 func = "_Qp_qtos";
3766 break;
3767 case E_DFmode:
3768 func = "_Qp_qtod";
3769 break;
3770 default:
3771 gcc_unreachable ();
3772 }
3773 break;
3774
3775 case FLOAT:
3776 switch (GET_MODE (operands[1]))
3777 {
3778 case E_SImode:
3779 func = "_Qp_itoq";
3780 if (TARGET_ARCH64)
3781 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3782 break;
3783 case E_DImode:
3784 func = "_Qp_xtoq";
3785 break;
3786 default:
3787 gcc_unreachable ();
3788 }
3789 break;
3790
3791 case UNSIGNED_FLOAT:
3792 switch (GET_MODE (operands[1]))
3793 {
3794 case E_SImode:
3795 func = "_Qp_uitoq";
3796 if (TARGET_ARCH64)
3797 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3798 break;
3799 case E_DImode:
3800 func = "_Qp_uxtoq";
3801 break;
3802 default:
3803 gcc_unreachable ();
3804 }
3805 break;
3806
3807 case FIX:
3808 switch (GET_MODE (operands[0]))
3809 {
3810 case E_SImode:
3811 func = "_Qp_qtoi";
3812 break;
3813 case E_DImode:
3814 func = "_Qp_qtox";
3815 break;
3816 default:
3817 gcc_unreachable ();
3818 }
3819 break;
3820
3821 case UNSIGNED_FIX:
3822 switch (GET_MODE (operands[0]))
3823 {
3824 case E_SImode:
3825 func = "_Qp_qtoui";
3826 break;
3827 case E_DImode:
3828 func = "_Qp_qtoux";
3829 break;
3830 default:
3831 gcc_unreachable ();
3832 }
3833 break;
3834
3835 default:
3836 gcc_unreachable ();
3837 }
3838
3839 emit_soft_tfmode_libcall (func, 2, operands);
3840 }
3841
3842 /* Expand a hard-float tfmode operation. All arguments must be in
3843 registers. */
3844
3845 static void
3846 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3847 {
3848 rtx op, dest;
3849
3850 if (GET_RTX_CLASS (code) == RTX_UNARY)
3851 {
3852 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3853 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3854 }
3855 else
3856 {
3857 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3858 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3859 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3860 operands[1], operands[2]);
3861 }
3862
3863 if (register_operand (operands[0], VOIDmode))
3864 dest = operands[0];
3865 else
3866 dest = gen_reg_rtx (GET_MODE (operands[0]));
3867
3868 emit_insn (gen_rtx_SET (dest, op));
3869
3870 if (dest != operands[0])
3871 emit_move_insn (operands[0], dest);
3872 }
3873
3874 void
3875 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3876 {
3877 if (TARGET_HARD_QUAD)
3878 emit_hard_tfmode_operation (code, operands);
3879 else
3880 emit_soft_tfmode_binop (code, operands);
3881 }
3882
3883 void
3884 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3885 {
3886 if (TARGET_HARD_QUAD)
3887 emit_hard_tfmode_operation (code, operands);
3888 else
3889 emit_soft_tfmode_unop (code, operands);
3890 }
3891
3892 void
3893 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3894 {
3895 if (TARGET_HARD_QUAD)
3896 emit_hard_tfmode_operation (code, operands);
3897 else
3898 emit_soft_tfmode_cvt (code, operands);
3899 }
3900 \f
3901 /* Return nonzero if a branch/jump/call instruction will be emitting
3902 nop into its delay slot. */
3903
3904 int
3905 empty_delay_slot (rtx_insn *insn)
3906 {
3907 rtx seq;
3908
3909 /* If no previous instruction (should not happen), return true. */
3910 if (PREV_INSN (insn) == NULL)
3911 return 1;
3912
3913 seq = NEXT_INSN (PREV_INSN (insn));
3914 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3915 return 0;
3916
3917 return 1;
3918 }
3919
3920 /* Return nonzero if we should emit a nop after a cbcond instruction.
3921 The cbcond instruction does not have a delay slot, however there is
3922 a severe performance penalty if a control transfer appears right
3923 after a cbcond. Therefore we emit a nop when we detect this
3924 situation. */
3925
3926 int
3927 emit_cbcond_nop (rtx_insn *insn)
3928 {
3929 rtx next = next_active_insn (insn);
3930
3931 if (!next)
3932 return 1;
3933
3934 if (NONJUMP_INSN_P (next)
3935 && GET_CODE (PATTERN (next)) == SEQUENCE)
3936 next = XVECEXP (PATTERN (next), 0, 0);
3937 else if (CALL_P (next)
3938 && GET_CODE (PATTERN (next)) == PARALLEL)
3939 {
3940 rtx delay = XVECEXP (PATTERN (next), 0, 1);
3941
3942 if (GET_CODE (delay) == RETURN)
3943 {
3944 /* It's a sibling call. Do not emit the nop if we're going
3945 to emit something other than the jump itself as the first
3946 instruction of the sibcall sequence. */
3947 if (sparc_leaf_function_p || TARGET_FLAT)
3948 return 0;
3949 }
3950 }
3951
3952 if (NONJUMP_INSN_P (next))
3953 return 0;
3954
3955 return 1;
3956 }
3957
3958 /* Return nonzero if TRIAL can go into the call delay slot. */
3959
3960 int
3961 eligible_for_call_delay (rtx_insn *trial)
3962 {
3963 rtx pat;
3964
3965 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3966 return 0;
3967
3968 /* Binutils allows
3969 call __tls_get_addr, %tgd_call (foo)
3970 add %l7, %o0, %o0, %tgd_add (foo)
3971 while Sun as/ld does not. */
3972 if (TARGET_GNU_TLS || !TARGET_TLS)
3973 return 1;
3974
3975 pat = PATTERN (trial);
3976
3977 /* We must reject tgd_add{32|64}, i.e.
3978 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3979 and tldm_add{32|64}, i.e.
3980 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3981 for Sun as/ld. */
3982 if (GET_CODE (pat) == SET
3983 && GET_CODE (SET_SRC (pat)) == PLUS)
3984 {
3985 rtx unspec = XEXP (SET_SRC (pat), 1);
3986
3987 if (GET_CODE (unspec) == UNSPEC
3988 && (XINT (unspec, 1) == UNSPEC_TLSGD
3989 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3990 return 0;
3991 }
3992
3993 return 1;
3994 }
3995
3996 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3997 instruction. RETURN_P is true if the v9 variant 'return' is to be
3998 considered in the test too.
3999
4000 TRIAL must be a SET whose destination is a REG appropriate for the
4001 'restore' instruction or, if RETURN_P is true, for the 'return'
4002 instruction. */
4003
4004 static int
4005 eligible_for_restore_insn (rtx trial, bool return_p)
4006 {
4007 rtx pat = PATTERN (trial);
4008 rtx src = SET_SRC (pat);
4009 bool src_is_freg = false;
4010 rtx src_reg;
4011
4012 /* Since we now can do moves between float and integer registers when
4013 VIS3 is enabled, we have to catch this case. We can allow such
4014 moves when doing a 'return' however. */
4015 src_reg = src;
4016 if (GET_CODE (src_reg) == SUBREG)
4017 src_reg = SUBREG_REG (src_reg);
4018 if (GET_CODE (src_reg) == REG
4019 && SPARC_FP_REG_P (REGNO (src_reg)))
4020 src_is_freg = true;
4021
4022 /* The 'restore src,%g0,dest' pattern for word mode and below. */
4023 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4024 && arith_operand (src, GET_MODE (src))
4025 && ! src_is_freg)
4026 {
4027 if (TARGET_ARCH64)
4028 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4029 else
4030 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
4031 }
4032
4033 /* The 'restore src,%g0,dest' pattern for double-word mode. */
4034 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4035 && arith_double_operand (src, GET_MODE (src))
4036 && ! src_is_freg)
4037 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4038
4039 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
4040 else if (! TARGET_FPU && register_operand (src, SFmode))
4041 return 1;
4042
4043 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
4044 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
4045 return 1;
4046
4047 /* If we have the 'return' instruction, anything that does not use
4048 local or output registers and can go into a delay slot wins. */
4049 else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
4050 return 1;
4051
4052 /* The 'restore src1,src2,dest' pattern for SImode. */
4053 else if (GET_CODE (src) == PLUS
4054 && register_operand (XEXP (src, 0), SImode)
4055 && arith_operand (XEXP (src, 1), SImode))
4056 return 1;
4057
4058 /* The 'restore src1,src2,dest' pattern for DImode. */
4059 else if (GET_CODE (src) == PLUS
4060 && register_operand (XEXP (src, 0), DImode)
4061 && arith_double_operand (XEXP (src, 1), DImode))
4062 return 1;
4063
4064 /* The 'restore src1,%lo(src2),dest' pattern. */
4065 else if (GET_CODE (src) == LO_SUM
4066 && ! TARGET_CM_MEDMID
4067 && ((register_operand (XEXP (src, 0), SImode)
4068 && immediate_operand (XEXP (src, 1), SImode))
4069 || (TARGET_ARCH64
4070 && register_operand (XEXP (src, 0), DImode)
4071 && immediate_operand (XEXP (src, 1), DImode))))
4072 return 1;
4073
4074 /* The 'restore src,src,dest' pattern. */
4075 else if (GET_CODE (src) == ASHIFT
4076 && (register_operand (XEXP (src, 0), SImode)
4077 || register_operand (XEXP (src, 0), DImode))
4078 && XEXP (src, 1) == const1_rtx)
4079 return 1;
4080
4081 return 0;
4082 }
4083
4084 /* Return nonzero if TRIAL can go into the function return's delay slot. */
4085
4086 int
4087 eligible_for_return_delay (rtx_insn *trial)
4088 {
4089 int regno;
4090 rtx pat;
4091
4092 /* If the function uses __builtin_eh_return, the eh_return machinery
4093 occupies the delay slot. */
4094 if (crtl->calls_eh_return)
4095 return 0;
4096
4097 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4098 return 0;
4099
4100 /* In the case of a leaf or flat function, anything can go into the slot. */
4101 if (sparc_leaf_function_p || TARGET_FLAT)
4102 return 1;
4103
4104 if (!NONJUMP_INSN_P (trial))
4105 return 0;
4106
4107 pat = PATTERN (trial);
4108 if (GET_CODE (pat) == PARALLEL)
4109 {
4110 int i;
4111
4112 if (! TARGET_V9)
4113 return 0;
4114 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
4115 {
4116 rtx expr = XVECEXP (pat, 0, i);
4117 if (GET_CODE (expr) != SET)
4118 return 0;
4119 if (GET_CODE (SET_DEST (expr)) != REG)
4120 return 0;
4121 regno = REGNO (SET_DEST (expr));
4122 if (regno >= 8 && regno < 24)
4123 return 0;
4124 }
4125 return !epilogue_renumber (&pat, 1);
4126 }
4127
4128 if (GET_CODE (pat) != SET)
4129 return 0;
4130
4131 if (GET_CODE (SET_DEST (pat)) != REG)
4132 return 0;
4133
4134 regno = REGNO (SET_DEST (pat));
4135
4136 /* Otherwise, only operations which can be done in tandem with
4137 a `restore' or `return' insn can go into the delay slot. */
4138 if (regno >= 8 && regno < 24)
4139 return 0;
4140
4141 /* If this instruction sets up floating point register and we have a return
4142 instruction, it can probably go in. But restore will not work
4143 with FP_REGS. */
4144 if (! SPARC_INT_REG_P (regno))
4145 return TARGET_V9 && !epilogue_renumber (&pat, 1);
4146
4147 return eligible_for_restore_insn (trial, true);
4148 }
4149
4150 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
4151
4152 int
4153 eligible_for_sibcall_delay (rtx_insn *trial)
4154 {
4155 rtx pat;
4156
4157 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4158 return 0;
4159
4160 if (!NONJUMP_INSN_P (trial))
4161 return 0;
4162
4163 pat = PATTERN (trial);
4164
4165 if (sparc_leaf_function_p || TARGET_FLAT)
4166 {
4167 /* If the tail call is done using the call instruction,
4168 we have to restore %o7 in the delay slot. */
4169 if (LEAF_SIBCALL_SLOT_RESERVED_P)
4170 return 0;
4171
4172 /* %g1 is used to build the function address */
4173 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
4174 return 0;
4175
4176 return 1;
4177 }
4178
4179 if (GET_CODE (pat) != SET)
4180 return 0;
4181
4182 /* Otherwise, only operations which can be done in tandem with
4183 a `restore' insn can go into the delay slot. */
4184 if (GET_CODE (SET_DEST (pat)) != REG
4185 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
4186 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
4187 return 0;
4188
4189 /* If it mentions %o7, it can't go in, because sibcall will clobber it
4190 in most cases. */
4191 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
4192 return 0;
4193
4194 return eligible_for_restore_insn (trial, false);
4195 }
4196 \f
4197 /* Determine if it's legal to put X into the constant pool. This
4198 is not possible if X contains the address of a symbol that is
4199 not constant (TLS) or not known at final link time (PIC). */
4200
4201 static bool
4202 sparc_cannot_force_const_mem (machine_mode mode, rtx x)
4203 {
4204 /* After IRA has run in PIC mode, it is too late to put anything into the
4205 constant pool if the PIC register hasn't already been initialized. */
4206 if ((lra_in_progress || reload_in_progress)
4207 && flag_pic
4208 && !crtl->uses_pic_offset_table)
4209 return true;
4210
4211 switch (GET_CODE (x))
4212 {
4213 case CONST_INT:
4214 case CONST_WIDE_INT:
4215 case CONST_DOUBLE:
4216 case CONST_VECTOR:
4217 /* Accept all non-symbolic constants. */
4218 return false;
4219
4220 case LABEL_REF:
4221 /* Labels are OK iff we are non-PIC. */
4222 return flag_pic != 0;
4223
4224 case SYMBOL_REF:
4225 /* 'Naked' TLS symbol references are never OK,
4226 non-TLS symbols are OK iff we are non-PIC. */
4227 if (SYMBOL_REF_TLS_MODEL (x))
4228 return true;
4229 else
4230 return flag_pic != 0;
4231
4232 case CONST:
4233 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
4234 case PLUS:
4235 case MINUS:
4236 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
4237 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
4238 case UNSPEC:
4239 return true;
4240 default:
4241 gcc_unreachable ();
4242 }
4243 }
4244 \f
4245 /* Global Offset Table support. */
4246 static GTY(()) rtx got_helper_rtx = NULL_RTX;
4247 static GTY(()) rtx got_register_rtx = NULL_RTX;
4248 static GTY(()) rtx got_symbol_rtx = NULL_RTX;
4249
4250 /* Return the SYMBOL_REF for the Global Offset Table. */
4251
4252 static rtx
4253 sparc_got (void)
4254 {
4255 if (!got_symbol_rtx)
4256 got_symbol_rtx = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
4257
4258 return got_symbol_rtx;
4259 }
4260
4261 #ifdef HAVE_GAS_HIDDEN
4262 # define USE_HIDDEN_LINKONCE 1
4263 #else
4264 # define USE_HIDDEN_LINKONCE 0
4265 #endif
4266
4267 static void
4268 get_pc_thunk_name (char name[32], unsigned int regno)
4269 {
4270 const char *reg_name = reg_names[regno];
4271
4272 /* Skip the leading '%' as that cannot be used in a
4273 symbol name. */
4274 reg_name += 1;
4275
4276 if (USE_HIDDEN_LINKONCE)
4277 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4278 else
4279 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4280 }
4281
4282 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4283
4284 static rtx
4285 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2)
4286 {
4287 int orig_flag_pic = flag_pic;
4288 rtx insn;
4289
4290 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4291 flag_pic = 0;
4292 if (TARGET_ARCH64)
4293 insn = gen_load_pcrel_symdi (op0, op1, op2, GEN_INT (REGNO (op0)));
4294 else
4295 insn = gen_load_pcrel_symsi (op0, op1, op2, GEN_INT (REGNO (op0)));
4296 flag_pic = orig_flag_pic;
4297
4298 return insn;
4299 }
4300
4301 /* Emit code to load the GOT register. */
4302
4303 void
4304 load_got_register (void)
4305 {
4306 if (!got_register_rtx)
4307 got_register_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4308
4309 if (TARGET_VXWORKS_RTP)
4310 emit_insn (gen_vxworks_load_got ());
4311 else
4312 {
4313 /* The GOT symbol is subject to a PC-relative relocation so we need a
4314 helper function to add the PC value and thus get the final value. */
4315 if (!got_helper_rtx)
4316 {
4317 char name[32];
4318 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4319 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4320 }
4321
4322 emit_insn (gen_load_pcrel_sym (got_register_rtx, sparc_got (),
4323 got_helper_rtx));
4324 }
4325 }
4326
4327 /* Ensure that we are not using patterns that are not OK with PIC. */
4328
4329 int
4330 check_pic (int i)
4331 {
4332 rtx op;
4333
4334 switch (flag_pic)
4335 {
4336 case 1:
4337 op = recog_data.operand[i];
4338 gcc_assert (GET_CODE (op) != SYMBOL_REF
4339 && (GET_CODE (op) != CONST
4340 || (GET_CODE (XEXP (op, 0)) == MINUS
4341 && XEXP (XEXP (op, 0), 0) == sparc_got ()
4342 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
4343 /* fallthrough */
4344 case 2:
4345 default:
4346 return 1;
4347 }
4348 }
4349
4350 /* Return true if X is an address which needs a temporary register when
4351 reloaded while generating PIC code. */
4352
4353 int
4354 pic_address_needs_scratch (rtx x)
4355 {
4356 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
4357 if (GET_CODE (x) == CONST
4358 && GET_CODE (XEXP (x, 0)) == PLUS
4359 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
4360 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4361 && !SMALL_INT (XEXP (XEXP (x, 0), 1)))
4362 return 1;
4363
4364 return 0;
4365 }
4366
4367 /* Determine if a given RTX is a valid constant. We already know this
4368 satisfies CONSTANT_P. */
4369
4370 static bool
4371 sparc_legitimate_constant_p (machine_mode mode, rtx x)
4372 {
4373 switch (GET_CODE (x))
4374 {
4375 case CONST:
4376 case SYMBOL_REF:
4377 if (sparc_tls_referenced_p (x))
4378 return false;
4379 break;
4380
4381 case CONST_DOUBLE:
4382 /* Floating point constants are generally not ok.
4383 The only exception is 0.0 and all-ones in VIS. */
4384 if (TARGET_VIS
4385 && SCALAR_FLOAT_MODE_P (mode)
4386 && (const_zero_operand (x, mode)
4387 || const_all_ones_operand (x, mode)))
4388 return true;
4389
4390 return false;
4391
4392 case CONST_VECTOR:
4393 /* Vector constants are generally not ok.
4394 The only exception is 0 or -1 in VIS. */
4395 if (TARGET_VIS
4396 && (const_zero_operand (x, mode)
4397 || const_all_ones_operand (x, mode)))
4398 return true;
4399
4400 return false;
4401
4402 default:
4403 break;
4404 }
4405
4406 return true;
4407 }
4408
4409 /* Determine if a given RTX is a valid constant address. */
4410
4411 bool
4412 constant_address_p (rtx x)
4413 {
4414 switch (GET_CODE (x))
4415 {
4416 case LABEL_REF:
4417 case CONST_INT:
4418 case HIGH:
4419 return true;
4420
4421 case CONST:
4422 if (flag_pic && pic_address_needs_scratch (x))
4423 return false;
4424 return sparc_legitimate_constant_p (Pmode, x);
4425
4426 case SYMBOL_REF:
4427 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
4428
4429 default:
4430 return false;
4431 }
4432 }
4433
4434 /* Nonzero if the constant value X is a legitimate general operand
4435 when generating PIC code. It is given that flag_pic is on and
4436 that X satisfies CONSTANT_P. */
4437
4438 bool
4439 legitimate_pic_operand_p (rtx x)
4440 {
4441 if (pic_address_needs_scratch (x))
4442 return false;
4443 if (sparc_tls_referenced_p (x))
4444 return false;
4445 return true;
4446 }
4447
4448 /* Return true if X is a representation of the PIC register. */
4449
4450 static bool
4451 sparc_pic_register_p (rtx x)
4452 {
4453 if (!REG_P (x) || !pic_offset_table_rtx)
4454 return false;
4455
4456 if (x == pic_offset_table_rtx)
4457 return true;
4458
4459 if (!HARD_REGISTER_P (pic_offset_table_rtx)
4460 && (HARD_REGISTER_P (x) || lra_in_progress || reload_in_progress)
4461 && ORIGINAL_REGNO (x) == REGNO (pic_offset_table_rtx))
4462 return true;
4463
4464 return false;
4465 }
4466
4467 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
4468 (CONST_INT_P (X) \
4469 && INTVAL (X) >= -0x1000 \
4470 && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
4471
4472 #define RTX_OK_FOR_OLO10_P(X, MODE) \
4473 (CONST_INT_P (X) \
4474 && INTVAL (X) >= -0x1000 \
4475 && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
4476
4477 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
4478
4479 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
4480 ordinarily. This changes a bit when generating PIC. */
4481
4482 static bool
4483 sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4484 {
4485 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
4486
4487 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
4488 rs1 = addr;
4489 else if (GET_CODE (addr) == PLUS)
4490 {
4491 rs1 = XEXP (addr, 0);
4492 rs2 = XEXP (addr, 1);
4493
4494 /* Canonicalize. REG comes first, if there are no regs,
4495 LO_SUM comes first. */
4496 if (!REG_P (rs1)
4497 && GET_CODE (rs1) != SUBREG
4498 && (REG_P (rs2)
4499 || GET_CODE (rs2) == SUBREG
4500 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
4501 {
4502 rs1 = XEXP (addr, 1);
4503 rs2 = XEXP (addr, 0);
4504 }
4505
4506 if ((flag_pic == 1
4507 && sparc_pic_register_p (rs1)
4508 && !REG_P (rs2)
4509 && GET_CODE (rs2) != SUBREG
4510 && GET_CODE (rs2) != LO_SUM
4511 && GET_CODE (rs2) != MEM
4512 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
4513 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
4514 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
4515 || ((REG_P (rs1)
4516 || GET_CODE (rs1) == SUBREG)
4517 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
4518 {
4519 imm1 = rs2;
4520 rs2 = NULL;
4521 }
4522 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
4523 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
4524 {
4525 /* We prohibit REG + REG for TFmode when there are no quad move insns
4526 and we consequently need to split. We do this because REG+REG
4527 is not an offsettable address. If we get the situation in reload
4528 where source and destination of a movtf pattern are both MEMs with
4529 REG+REG address, then only one of them gets converted to an
4530 offsettable address. */
4531 if (mode == TFmode
4532 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
4533 return 0;
4534
4535 /* Likewise for TImode, but in all cases. */
4536 if (mode == TImode)
4537 return 0;
4538
4539 /* We prohibit REG + REG on ARCH32 if not optimizing for
4540 DFmode/DImode because then mem_min_alignment is likely to be zero
4541 after reload and the forced split would lack a matching splitter
4542 pattern. */
4543 if (TARGET_ARCH32 && !optimize
4544 && (mode == DFmode || mode == DImode))
4545 return 0;
4546 }
4547 else if (USE_AS_OFFSETABLE_LO10
4548 && GET_CODE (rs1) == LO_SUM
4549 && TARGET_ARCH64
4550 && ! TARGET_CM_MEDMID
4551 && RTX_OK_FOR_OLO10_P (rs2, mode))
4552 {
4553 rs2 = NULL;
4554 imm1 = XEXP (rs1, 1);
4555 rs1 = XEXP (rs1, 0);
4556 if (!CONSTANT_P (imm1)
4557 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4558 return 0;
4559 }
4560 }
4561 else if (GET_CODE (addr) == LO_SUM)
4562 {
4563 rs1 = XEXP (addr, 0);
4564 imm1 = XEXP (addr, 1);
4565
4566 if (!CONSTANT_P (imm1)
4567 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4568 return 0;
4569
4570 /* We can't allow TFmode in 32-bit mode, because an offset greater
4571 than the alignment (8) may cause the LO_SUM to overflow. */
4572 if (mode == TFmode && TARGET_ARCH32)
4573 return 0;
4574
4575 /* During reload, accept the HIGH+LO_SUM construct generated by
4576 sparc_legitimize_reload_address. */
4577 if (reload_in_progress
4578 && GET_CODE (rs1) == HIGH
4579 && XEXP (rs1, 0) == imm1)
4580 return 1;
4581 }
4582 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
4583 return 1;
4584 else
4585 return 0;
4586
4587 if (GET_CODE (rs1) == SUBREG)
4588 rs1 = SUBREG_REG (rs1);
4589 if (!REG_P (rs1))
4590 return 0;
4591
4592 if (rs2)
4593 {
4594 if (GET_CODE (rs2) == SUBREG)
4595 rs2 = SUBREG_REG (rs2);
4596 if (!REG_P (rs2))
4597 return 0;
4598 }
4599
4600 if (strict)
4601 {
4602 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
4603 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
4604 return 0;
4605 }
4606 else
4607 {
4608 if ((! SPARC_INT_REG_P (REGNO (rs1))
4609 && REGNO (rs1) != FRAME_POINTER_REGNUM
4610 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
4611 || (rs2
4612 && (! SPARC_INT_REG_P (REGNO (rs2))
4613 && REGNO (rs2) != FRAME_POINTER_REGNUM
4614 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
4615 return 0;
4616 }
4617 return 1;
4618 }
4619
4620 /* Return the SYMBOL_REF for the tls_get_addr function. */
4621
4622 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
4623
4624 static rtx
4625 sparc_tls_get_addr (void)
4626 {
4627 if (!sparc_tls_symbol)
4628 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
4629
4630 return sparc_tls_symbol;
4631 }
4632
4633 /* Return the Global Offset Table to be used in TLS mode. */
4634
4635 static rtx
4636 sparc_tls_got (void)
4637 {
4638 /* In PIC mode, this is just the PIC offset table. */
4639 if (flag_pic)
4640 {
4641 crtl->uses_pic_offset_table = 1;
4642 return pic_offset_table_rtx;
4643 }
4644
4645 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4646 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4647 if (TARGET_SUN_TLS && TARGET_ARCH32)
4648 {
4649 load_got_register ();
4650 return got_register_rtx;
4651 }
4652
4653 /* In all other cases, we load a new pseudo with the GOT symbol. */
4654 return copy_to_reg (sparc_got ());
4655 }
4656
4657 /* Return true if X contains a thread-local symbol. */
4658
4659 static bool
4660 sparc_tls_referenced_p (rtx x)
4661 {
4662 if (!TARGET_HAVE_TLS)
4663 return false;
4664
4665 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4666 x = XEXP (XEXP (x, 0), 0);
4667
4668 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
4669 return true;
4670
4671 /* That's all we handle in sparc_legitimize_tls_address for now. */
4672 return false;
4673 }
4674
4675 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4676 this (thread-local) address. */
4677
4678 static rtx
4679 sparc_legitimize_tls_address (rtx addr)
4680 {
4681 rtx temp1, temp2, temp3, ret, o0, got;
4682 rtx_insn *insn;
4683
4684 gcc_assert (can_create_pseudo_p ());
4685
4686 if (GET_CODE (addr) == SYMBOL_REF)
4687 /* Although the various sethi/or sequences generate SImode values, many of
4688 them can be transformed by the linker when relaxing and, if relaxing to
4689 local-exec, will become a sethi/xor pair, which is signed and therefore
4690 a full DImode value in 64-bit mode. Thus we must use Pmode, lest these
4691 values be spilled onto the stack in 64-bit mode. */
4692 switch (SYMBOL_REF_TLS_MODEL (addr))
4693 {
4694 case TLS_MODEL_GLOBAL_DYNAMIC:
4695 start_sequence ();
4696 temp1 = gen_reg_rtx (Pmode);
4697 temp2 = gen_reg_rtx (Pmode);
4698 ret = gen_reg_rtx (Pmode);
4699 o0 = gen_rtx_REG (Pmode, 8);
4700 got = sparc_tls_got ();
4701 if (TARGET_ARCH32)
4702 {
4703 emit_insn (gen_tgd_hi22si (temp1, addr));
4704 emit_insn (gen_tgd_lo10si (temp2, temp1, addr));
4705 emit_insn (gen_tgd_addsi (o0, got, temp2, addr));
4706 insn = emit_call_insn (gen_tgd_callsi (o0, sparc_tls_get_addr (),
4707 addr, const1_rtx));
4708 }
4709 else
4710 {
4711 emit_insn (gen_tgd_hi22di (temp1, addr));
4712 emit_insn (gen_tgd_lo10di (temp2, temp1, addr));
4713 emit_insn (gen_tgd_adddi (o0, got, temp2, addr));
4714 insn = emit_call_insn (gen_tgd_calldi (o0, sparc_tls_get_addr (),
4715 addr, const1_rtx));
4716 }
4717 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4718 RTL_CONST_CALL_P (insn) = 1;
4719 insn = get_insns ();
4720 end_sequence ();
4721 emit_libcall_block (insn, ret, o0, addr);
4722 break;
4723
4724 case TLS_MODEL_LOCAL_DYNAMIC:
4725 start_sequence ();
4726 temp1 = gen_reg_rtx (Pmode);
4727 temp2 = gen_reg_rtx (Pmode);
4728 temp3 = gen_reg_rtx (Pmode);
4729 ret = gen_reg_rtx (Pmode);
4730 o0 = gen_rtx_REG (Pmode, 8);
4731 got = sparc_tls_got ();
4732 if (TARGET_ARCH32)
4733 {
4734 emit_insn (gen_tldm_hi22si (temp1));
4735 emit_insn (gen_tldm_lo10si (temp2, temp1));
4736 emit_insn (gen_tldm_addsi (o0, got, temp2));
4737 insn = emit_call_insn (gen_tldm_callsi (o0, sparc_tls_get_addr (),
4738 const1_rtx));
4739 }
4740 else
4741 {
4742 emit_insn (gen_tldm_hi22di (temp1));
4743 emit_insn (gen_tldm_lo10di (temp2, temp1));
4744 emit_insn (gen_tldm_adddi (o0, got, temp2));
4745 insn = emit_call_insn (gen_tldm_calldi (o0, sparc_tls_get_addr (),
4746 const1_rtx));
4747 }
4748 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4749 RTL_CONST_CALL_P (insn) = 1;
4750 insn = get_insns ();
4751 end_sequence ();
4752 /* Attach a unique REG_EQUAL, to allow the RTL optimizers to
4753 share the LD_BASE result with other LD model accesses. */
4754 emit_libcall_block (insn, temp3, o0,
4755 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4756 UNSPEC_TLSLD_BASE));
4757 temp1 = gen_reg_rtx (Pmode);
4758 temp2 = gen_reg_rtx (Pmode);
4759 if (TARGET_ARCH32)
4760 {
4761 emit_insn (gen_tldo_hix22si (temp1, addr));
4762 emit_insn (gen_tldo_lox10si (temp2, temp1, addr));
4763 emit_insn (gen_tldo_addsi (ret, temp3, temp2, addr));
4764 }
4765 else
4766 {
4767 emit_insn (gen_tldo_hix22di (temp1, addr));
4768 emit_insn (gen_tldo_lox10di (temp2, temp1, addr));
4769 emit_insn (gen_tldo_adddi (ret, temp3, temp2, addr));
4770 }
4771 break;
4772
4773 case TLS_MODEL_INITIAL_EXEC:
4774 temp1 = gen_reg_rtx (Pmode);
4775 temp2 = gen_reg_rtx (Pmode);
4776 temp3 = gen_reg_rtx (Pmode);
4777 got = sparc_tls_got ();
4778 if (TARGET_ARCH32)
4779 {
4780 emit_insn (gen_tie_hi22si (temp1, addr));
4781 emit_insn (gen_tie_lo10si (temp2, temp1, addr));
4782 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4783 }
4784 else
4785 {
4786 emit_insn (gen_tie_hi22di (temp1, addr));
4787 emit_insn (gen_tie_lo10di (temp2, temp1, addr));
4788 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4789 }
4790 if (TARGET_SUN_TLS)
4791 {
4792 ret = gen_reg_rtx (Pmode);
4793 if (TARGET_ARCH32)
4794 emit_insn (gen_tie_addsi (ret, gen_rtx_REG (Pmode, 7),
4795 temp3, addr));
4796 else
4797 emit_insn (gen_tie_adddi (ret, gen_rtx_REG (Pmode, 7),
4798 temp3, addr));
4799 }
4800 else
4801 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4802 break;
4803
4804 case TLS_MODEL_LOCAL_EXEC:
4805 temp1 = gen_reg_rtx (Pmode);
4806 temp2 = gen_reg_rtx (Pmode);
4807 if (TARGET_ARCH32)
4808 {
4809 emit_insn (gen_tle_hix22si (temp1, addr));
4810 emit_insn (gen_tle_lox10si (temp2, temp1, addr));
4811 }
4812 else
4813 {
4814 emit_insn (gen_tle_hix22di (temp1, addr));
4815 emit_insn (gen_tle_lox10di (temp2, temp1, addr));
4816 }
4817 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4818 break;
4819
4820 default:
4821 gcc_unreachable ();
4822 }
4823
4824 else if (GET_CODE (addr) == CONST)
4825 {
4826 rtx base, offset;
4827
4828 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4829
4830 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4831 offset = XEXP (XEXP (addr, 0), 1);
4832
4833 base = force_operand (base, NULL_RTX);
4834 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4835 offset = force_reg (Pmode, offset);
4836 ret = gen_rtx_PLUS (Pmode, base, offset);
4837 }
4838
4839 else
4840 gcc_unreachable (); /* for now ... */
4841
4842 return ret;
4843 }
4844
4845 /* Legitimize PIC addresses. If the address is already position-independent,
4846 we return ORIG. Newly generated position-independent addresses go into a
4847 reg. This is REG if nonzero, otherwise we allocate register(s) as
4848 necessary. */
4849
4850 static rtx
4851 sparc_legitimize_pic_address (rtx orig, rtx reg)
4852 {
4853 if (GET_CODE (orig) == SYMBOL_REF
4854 /* See the comment in sparc_expand_move. */
4855 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4856 {
4857 bool gotdata_op = false;
4858 rtx pic_ref, address;
4859 rtx_insn *insn;
4860
4861 if (!reg)
4862 {
4863 gcc_assert (can_create_pseudo_p ());
4864 reg = gen_reg_rtx (Pmode);
4865 }
4866
4867 if (flag_pic == 2)
4868 {
4869 /* If not during reload, allocate another temp reg here for loading
4870 in the address, so that these instructions can be optimized
4871 properly. */
4872 rtx temp_reg = can_create_pseudo_p () ? gen_reg_rtx (Pmode) : reg;
4873
4874 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4875 won't get confused into thinking that these two instructions
4876 are loading in the true address of the symbol. If in the
4877 future a PIC rtx exists, that should be used instead. */
4878 if (TARGET_ARCH64)
4879 {
4880 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4881 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4882 }
4883 else
4884 {
4885 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4886 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4887 }
4888
4889 address = temp_reg;
4890 gotdata_op = true;
4891 }
4892 else
4893 address = orig;
4894
4895 crtl->uses_pic_offset_table = 1;
4896 if (gotdata_op)
4897 {
4898 if (TARGET_ARCH64)
4899 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4900 pic_offset_table_rtx,
4901 address, orig));
4902 else
4903 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4904 pic_offset_table_rtx,
4905 address, orig));
4906 }
4907 else
4908 {
4909 pic_ref
4910 = gen_const_mem (Pmode,
4911 gen_rtx_PLUS (Pmode,
4912 pic_offset_table_rtx, address));
4913 insn = emit_move_insn (reg, pic_ref);
4914 }
4915
4916 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4917 by loop. */
4918 set_unique_reg_note (insn, REG_EQUAL, orig);
4919 return reg;
4920 }
4921 else if (GET_CODE (orig) == CONST)
4922 {
4923 rtx base, offset;
4924
4925 if (GET_CODE (XEXP (orig, 0)) == PLUS
4926 && sparc_pic_register_p (XEXP (XEXP (orig, 0), 0)))
4927 return orig;
4928
4929 if (!reg)
4930 {
4931 gcc_assert (can_create_pseudo_p ());
4932 reg = gen_reg_rtx (Pmode);
4933 }
4934
4935 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4936 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4937 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4938 base == reg ? NULL_RTX : reg);
4939
4940 if (GET_CODE (offset) == CONST_INT)
4941 {
4942 if (SMALL_INT (offset))
4943 return plus_constant (Pmode, base, INTVAL (offset));
4944 else if (can_create_pseudo_p ())
4945 offset = force_reg (Pmode, offset);
4946 else
4947 /* If we reach here, then something is seriously wrong. */
4948 gcc_unreachable ();
4949 }
4950 return gen_rtx_PLUS (Pmode, base, offset);
4951 }
4952 else if (GET_CODE (orig) == LABEL_REF)
4953 /* ??? We ought to be checking that the register is live instead, in case
4954 it is eliminated. */
4955 crtl->uses_pic_offset_table = 1;
4956
4957 return orig;
4958 }
4959
4960 /* Try machine-dependent ways of modifying an illegitimate address X
4961 to be legitimate. If we find one, return the new, valid address.
4962
4963 OLDX is the address as it was before break_out_memory_refs was called.
4964 In some cases it is useful to look at this to decide what needs to be done.
4965
4966 MODE is the mode of the operand pointed to by X.
4967
4968 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4969
4970 static rtx
4971 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4972 machine_mode mode)
4973 {
4974 rtx orig_x = x;
4975
4976 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4977 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4978 force_operand (XEXP (x, 0), NULL_RTX));
4979 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4980 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4981 force_operand (XEXP (x, 1), NULL_RTX));
4982 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4983 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4984 XEXP (x, 1));
4985 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4986 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4987 force_operand (XEXP (x, 1), NULL_RTX));
4988
4989 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4990 return x;
4991
4992 if (sparc_tls_referenced_p (x))
4993 x = sparc_legitimize_tls_address (x);
4994 else if (flag_pic)
4995 x = sparc_legitimize_pic_address (x, NULL_RTX);
4996 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4997 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4998 copy_to_mode_reg (Pmode, XEXP (x, 1)));
4999 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
5000 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
5001 copy_to_mode_reg (Pmode, XEXP (x, 0)));
5002 else if (GET_CODE (x) == SYMBOL_REF
5003 || GET_CODE (x) == CONST
5004 || GET_CODE (x) == LABEL_REF)
5005 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
5006
5007 return x;
5008 }
5009
5010 /* Delegitimize an address that was legitimized by the above function. */
5011
5012 static rtx
5013 sparc_delegitimize_address (rtx x)
5014 {
5015 x = delegitimize_mem_from_attrs (x);
5016
5017 if (GET_CODE (x) == LO_SUM)
5018 x = XEXP (x, 1);
5019
5020 if (GET_CODE (x) == UNSPEC)
5021 switch (XINT (x, 1))
5022 {
5023 case UNSPEC_MOVE_PIC:
5024 case UNSPEC_TLSLE:
5025 x = XVECEXP (x, 0, 0);
5026 gcc_assert (GET_CODE (x) == SYMBOL_REF);
5027 break;
5028 case UNSPEC_MOVE_GOTDATA:
5029 x = XVECEXP (x, 0, 2);
5030 gcc_assert (GET_CODE (x) == SYMBOL_REF);
5031 break;
5032 default:
5033 break;
5034 }
5035
5036 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
5037 if (GET_CODE (x) == MINUS
5038 && (XEXP (x, 0) == got_register_rtx
5039 || sparc_pic_register_p (XEXP (x, 0))))
5040 {
5041 rtx y = XEXP (x, 1);
5042
5043 if (GET_CODE (y) == LO_SUM)
5044 y = XEXP (y, 1);
5045
5046 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MOVE_PIC_LABEL)
5047 {
5048 x = XVECEXP (y, 0, 0);
5049 gcc_assert (GET_CODE (x) == LABEL_REF
5050 || (GET_CODE (x) == CONST
5051 && GET_CODE (XEXP (x, 0)) == PLUS
5052 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
5053 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT));
5054 }
5055 }
5056
5057 return x;
5058 }
5059
5060 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
5061 replace the input X, or the original X if no replacement is called for.
5062 The output parameter *WIN is 1 if the calling macro should goto WIN,
5063 0 if it should not.
5064
5065 For SPARC, we wish to handle addresses by splitting them into
5066 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
5067 This cuts the number of extra insns by one.
5068
5069 Do nothing when generating PIC code and the address is a symbolic
5070 operand or requires a scratch register. */
5071
5072 rtx
5073 sparc_legitimize_reload_address (rtx x, machine_mode mode,
5074 int opnum, int type,
5075 int ind_levels ATTRIBUTE_UNUSED, int *win)
5076 {
5077 /* Decompose SImode constants into HIGH+LO_SUM. */
5078 if (CONSTANT_P (x)
5079 && (mode != TFmode || TARGET_ARCH64)
5080 && GET_MODE (x) == SImode
5081 && GET_CODE (x) != LO_SUM
5082 && GET_CODE (x) != HIGH
5083 && sparc_code_model <= CM_MEDLOW
5084 && !(flag_pic
5085 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
5086 {
5087 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
5088 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5089 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5090 opnum, (enum reload_type)type);
5091 *win = 1;
5092 return x;
5093 }
5094
5095 /* We have to recognize what we have already generated above. */
5096 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
5097 {
5098 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5099 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5100 opnum, (enum reload_type)type);
5101 *win = 1;
5102 return x;
5103 }
5104
5105 *win = 0;
5106 return x;
5107 }
5108
5109 /* Return true if ADDR (a legitimate address expression)
5110 has an effect that depends on the machine mode it is used for.
5111
5112 In PIC mode,
5113
5114 (mem:HI [%l7+a])
5115
5116 is not equivalent to
5117
5118 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
5119
5120 because [%l7+a+1] is interpreted as the address of (a+1). */
5121
5122
5123 static bool
5124 sparc_mode_dependent_address_p (const_rtx addr,
5125 addr_space_t as ATTRIBUTE_UNUSED)
5126 {
5127 if (GET_CODE (addr) == PLUS
5128 && sparc_pic_register_p (XEXP (addr, 0))
5129 && symbolic_operand (XEXP (addr, 1), VOIDmode))
5130 return true;
5131
5132 return false;
5133 }
5134
5135 /* Emit a call instruction with the pattern given by PAT. ADDR is the
5136 address of the call target. */
5137
5138 void
5139 sparc_emit_call_insn (rtx pat, rtx addr)
5140 {
5141 rtx_insn *insn;
5142
5143 insn = emit_call_insn (pat);
5144
5145 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
5146 if (TARGET_VXWORKS_RTP
5147 && flag_pic
5148 && GET_CODE (addr) == SYMBOL_REF
5149 && (SYMBOL_REF_DECL (addr)
5150 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
5151 : !SYMBOL_REF_LOCAL_P (addr)))
5152 {
5153 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5154 crtl->uses_pic_offset_table = 1;
5155 }
5156 }
5157 \f
5158 /* Return 1 if RTX is a MEM which is known to be aligned to at
5159 least a DESIRED byte boundary. */
5160
5161 int
5162 mem_min_alignment (rtx mem, int desired)
5163 {
5164 rtx addr, base, offset;
5165
5166 /* If it's not a MEM we can't accept it. */
5167 if (GET_CODE (mem) != MEM)
5168 return 0;
5169
5170 /* Obviously... */
5171 if (!TARGET_UNALIGNED_DOUBLES
5172 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
5173 return 1;
5174
5175 /* ??? The rest of the function predates MEM_ALIGN so
5176 there is probably a bit of redundancy. */
5177 addr = XEXP (mem, 0);
5178 base = offset = NULL_RTX;
5179 if (GET_CODE (addr) == PLUS)
5180 {
5181 if (GET_CODE (XEXP (addr, 0)) == REG)
5182 {
5183 base = XEXP (addr, 0);
5184
5185 /* What we are saying here is that if the base
5186 REG is aligned properly, the compiler will make
5187 sure any REG based index upon it will be so
5188 as well. */
5189 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
5190 offset = XEXP (addr, 1);
5191 else
5192 offset = const0_rtx;
5193 }
5194 }
5195 else if (GET_CODE (addr) == REG)
5196 {
5197 base = addr;
5198 offset = const0_rtx;
5199 }
5200
5201 if (base != NULL_RTX)
5202 {
5203 int regno = REGNO (base);
5204
5205 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
5206 {
5207 /* Check if the compiler has recorded some information
5208 about the alignment of the base REG. If reload has
5209 completed, we already matched with proper alignments.
5210 If not running global_alloc, reload might give us
5211 unaligned pointer to local stack though. */
5212 if (((cfun != 0
5213 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
5214 || (optimize && reload_completed))
5215 && (INTVAL (offset) & (desired - 1)) == 0)
5216 return 1;
5217 }
5218 else
5219 {
5220 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
5221 return 1;
5222 }
5223 }
5224 else if (! TARGET_UNALIGNED_DOUBLES
5225 || CONSTANT_P (addr)
5226 || GET_CODE (addr) == LO_SUM)
5227 {
5228 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
5229 is true, in which case we can only assume that an access is aligned if
5230 it is to a constant address, or the address involves a LO_SUM. */
5231 return 1;
5232 }
5233
5234 /* An obviously unaligned address. */
5235 return 0;
5236 }
5237
5238 \f
5239 /* Vectors to keep interesting information about registers where it can easily
5240 be got. We used to use the actual mode value as the bit number, but there
5241 are more than 32 modes now. Instead we use two tables: one indexed by
5242 hard register number, and one indexed by mode. */
5243
5244 /* The purpose of sparc_mode_class is to shrink the range of modes so that
5245 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
5246 mapped into one sparc_mode_class mode. */
5247
5248 enum sparc_mode_class {
5249 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
5250 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
5251 CC_MODE, CCFP_MODE
5252 };
5253
5254 /* Modes for single-word and smaller quantities. */
5255 #define S_MODES \
5256 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
5257
5258 /* Modes for double-word and smaller quantities. */
5259 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5260
5261 /* Modes for quad-word and smaller quantities. */
5262 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
5263
5264 /* Modes for 8-word and smaller quantities. */
5265 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
5266
5267 /* Modes for single-float quantities. */
5268 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
5269
5270 /* Modes for double-float and smaller quantities. */
5271 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5272
5273 /* Modes for quad-float and smaller quantities. */
5274 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
5275
5276 /* Modes for quad-float pairs and smaller quantities. */
5277 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
5278
5279 /* Modes for double-float only quantities. */
5280 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
5281
5282 /* Modes for quad-float and double-float only quantities. */
5283 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
5284
5285 /* Modes for quad-float pairs and double-float only quantities. */
5286 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
5287
5288 /* Modes for condition codes. */
5289 #define CC_MODES (1 << (int) CC_MODE)
5290 #define CCFP_MODES (1 << (int) CCFP_MODE)
5291
5292 /* Value is 1 if register/mode pair is acceptable on sparc.
5293
5294 The funny mixture of D and T modes is because integer operations
5295 do not specially operate on tetra quantities, so non-quad-aligned
5296 registers can hold quadword quantities (except %o4 and %i4 because
5297 they cross fixed registers).
5298
5299 ??? Note that, despite the settings, non-double-aligned parameter
5300 registers can hold double-word quantities in 32-bit mode. */
5301
5302 /* This points to either the 32-bit or the 64-bit version. */
5303 static const int *hard_regno_mode_classes;
5304
5305 static const int hard_32bit_mode_classes[] = {
5306 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5307 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5308 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5309 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5310
5311 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5312 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5313 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5314 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5315
5316 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5317 and none can hold SFmode/SImode values. */
5318 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5319 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5320 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5321 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5322
5323 /* %fcc[0123] */
5324 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5325
5326 /* %icc, %sfp, %gsr */
5327 CC_MODES, 0, D_MODES
5328 };
5329
5330 static const int hard_64bit_mode_classes[] = {
5331 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5332 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5333 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5334 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5335
5336 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5337 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5338 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5339 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5340
5341 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5342 and none can hold SFmode/SImode values. */
5343 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5344 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5345 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5346 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5347
5348 /* %fcc[0123] */
5349 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5350
5351 /* %icc, %sfp, %gsr */
5352 CC_MODES, 0, D_MODES
5353 };
5354
5355 static int sparc_mode_class [NUM_MACHINE_MODES];
5356
5357 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
5358
5359 static void
5360 sparc_init_modes (void)
5361 {
5362 int i;
5363
5364 for (i = 0; i < NUM_MACHINE_MODES; i++)
5365 {
5366 machine_mode m = (machine_mode) i;
5367 unsigned int size = GET_MODE_SIZE (m);
5368
5369 switch (GET_MODE_CLASS (m))
5370 {
5371 case MODE_INT:
5372 case MODE_PARTIAL_INT:
5373 case MODE_COMPLEX_INT:
5374 if (size < 4)
5375 sparc_mode_class[i] = 1 << (int) H_MODE;
5376 else if (size == 4)
5377 sparc_mode_class[i] = 1 << (int) S_MODE;
5378 else if (size == 8)
5379 sparc_mode_class[i] = 1 << (int) D_MODE;
5380 else if (size == 16)
5381 sparc_mode_class[i] = 1 << (int) T_MODE;
5382 else if (size == 32)
5383 sparc_mode_class[i] = 1 << (int) O_MODE;
5384 else
5385 sparc_mode_class[i] = 0;
5386 break;
5387 case MODE_VECTOR_INT:
5388 if (size == 4)
5389 sparc_mode_class[i] = 1 << (int) SF_MODE;
5390 else if (size == 8)
5391 sparc_mode_class[i] = 1 << (int) DF_MODE;
5392 else
5393 sparc_mode_class[i] = 0;
5394 break;
5395 case MODE_FLOAT:
5396 case MODE_COMPLEX_FLOAT:
5397 if (size == 4)
5398 sparc_mode_class[i] = 1 << (int) SF_MODE;
5399 else if (size == 8)
5400 sparc_mode_class[i] = 1 << (int) DF_MODE;
5401 else if (size == 16)
5402 sparc_mode_class[i] = 1 << (int) TF_MODE;
5403 else if (size == 32)
5404 sparc_mode_class[i] = 1 << (int) OF_MODE;
5405 else
5406 sparc_mode_class[i] = 0;
5407 break;
5408 case MODE_CC:
5409 if (m == CCFPmode || m == CCFPEmode)
5410 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
5411 else
5412 sparc_mode_class[i] = 1 << (int) CC_MODE;
5413 break;
5414 default:
5415 sparc_mode_class[i] = 0;
5416 break;
5417 }
5418 }
5419
5420 if (TARGET_ARCH64)
5421 hard_regno_mode_classes = hard_64bit_mode_classes;
5422 else
5423 hard_regno_mode_classes = hard_32bit_mode_classes;
5424
5425 /* Initialize the array used by REGNO_REG_CLASS. */
5426 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5427 {
5428 if (i < 16 && TARGET_V8PLUS)
5429 sparc_regno_reg_class[i] = I64_REGS;
5430 else if (i < 32 || i == FRAME_POINTER_REGNUM)
5431 sparc_regno_reg_class[i] = GENERAL_REGS;
5432 else if (i < 64)
5433 sparc_regno_reg_class[i] = FP_REGS;
5434 else if (i < 96)
5435 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
5436 else if (i < 100)
5437 sparc_regno_reg_class[i] = FPCC_REGS;
5438 else
5439 sparc_regno_reg_class[i] = NO_REGS;
5440 }
5441 }
5442 \f
5443 /* Return whether REGNO, a global or FP register, must be saved/restored. */
5444
5445 static inline bool
5446 save_global_or_fp_reg_p (unsigned int regno,
5447 int leaf_function ATTRIBUTE_UNUSED)
5448 {
5449 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
5450 }
5451
5452 /* Return whether the return address register (%i7) is needed. */
5453
5454 static inline bool
5455 return_addr_reg_needed_p (int leaf_function)
5456 {
5457 /* If it is live, for example because of __builtin_return_address (0). */
5458 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
5459 return true;
5460
5461 /* Otherwise, it is needed as save register if %o7 is clobbered. */
5462 if (!leaf_function
5463 /* Loading the GOT register clobbers %o7. */
5464 || crtl->uses_pic_offset_table
5465 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
5466 return true;
5467
5468 return false;
5469 }
5470
5471 /* Return whether REGNO, a local or in register, must be saved/restored. */
5472
5473 static bool
5474 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
5475 {
5476 /* General case: call-saved registers live at some point. */
5477 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
5478 return true;
5479
5480 /* Frame pointer register (%fp) if needed. */
5481 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5482 return true;
5483
5484 /* Return address register (%i7) if needed. */
5485 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
5486 return true;
5487
5488 /* GOT register (%l7) if needed. */
5489 if (regno == GLOBAL_OFFSET_TABLE_REGNUM && got_register_rtx)
5490 return true;
5491
5492 /* If the function accesses prior frames, the frame pointer and the return
5493 address of the previous frame must be saved on the stack. */
5494 if (crtl->accesses_prior_frames
5495 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
5496 return true;
5497
5498 return false;
5499 }
5500
5501 /* Compute the frame size required by the function. This function is called
5502 during the reload pass and also by sparc_expand_prologue. */
5503
5504 static HOST_WIDE_INT
5505 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
5506 {
5507 HOST_WIDE_INT frame_size, apparent_frame_size;
5508 int args_size, n_global_fp_regs = 0;
5509 bool save_local_in_regs_p = false;
5510 unsigned int i;
5511
5512 /* If the function allocates dynamic stack space, the dynamic offset is
5513 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
5514 if (leaf_function && !cfun->calls_alloca)
5515 args_size = 0;
5516 else
5517 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
5518
5519 /* Calculate space needed for global registers. */
5520 if (TARGET_ARCH64)
5521 {
5522 for (i = 0; i < 8; i++)
5523 if (save_global_or_fp_reg_p (i, 0))
5524 n_global_fp_regs += 2;
5525 }
5526 else
5527 {
5528 for (i = 0; i < 8; i += 2)
5529 if (save_global_or_fp_reg_p (i, 0)
5530 || save_global_or_fp_reg_p (i + 1, 0))
5531 n_global_fp_regs += 2;
5532 }
5533
5534 /* In the flat window model, find out which local and in registers need to
5535 be saved. We don't reserve space in the current frame for them as they
5536 will be spilled into the register window save area of the caller's frame.
5537 However, as soon as we use this register window save area, we must create
5538 that of the current frame to make it the live one. */
5539 if (TARGET_FLAT)
5540 for (i = 16; i < 32; i++)
5541 if (save_local_or_in_reg_p (i, leaf_function))
5542 {
5543 save_local_in_regs_p = true;
5544 break;
5545 }
5546
5547 /* Calculate space needed for FP registers. */
5548 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
5549 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
5550 n_global_fp_regs += 2;
5551
5552 if (size == 0
5553 && n_global_fp_regs == 0
5554 && args_size == 0
5555 && !save_local_in_regs_p)
5556 frame_size = apparent_frame_size = 0;
5557 else
5558 {
5559 /* Start from the apparent frame size. */
5560 apparent_frame_size = ROUND_UP (size, 8) + n_global_fp_regs * 4;
5561
5562 /* We need to add the size of the outgoing argument area. */
5563 frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
5564
5565 /* And that of the register window save area. */
5566 frame_size += FIRST_PARM_OFFSET (cfun->decl);
5567
5568 /* Finally, bump to the appropriate alignment. */
5569 frame_size = SPARC_STACK_ALIGN (frame_size);
5570 }
5571
5572 /* Set up values for use in prologue and epilogue. */
5573 sparc_frame_size = frame_size;
5574 sparc_apparent_frame_size = apparent_frame_size;
5575 sparc_n_global_fp_regs = n_global_fp_regs;
5576 sparc_save_local_in_regs_p = save_local_in_regs_p;
5577
5578 return frame_size;
5579 }
5580
5581 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5582
5583 int
5584 sparc_initial_elimination_offset (int to)
5585 {
5586 int offset;
5587
5588 if (to == STACK_POINTER_REGNUM)
5589 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
5590 else
5591 offset = 0;
5592
5593 offset += SPARC_STACK_BIAS;
5594 return offset;
5595 }
5596
5597 /* Output any necessary .register pseudo-ops. */
5598
5599 void
5600 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
5601 {
5602 int i;
5603
5604 if (TARGET_ARCH32)
5605 return;
5606
5607 /* Check if %g[2367] were used without
5608 .register being printed for them already. */
5609 for (i = 2; i < 8; i++)
5610 {
5611 if (df_regs_ever_live_p (i)
5612 && ! sparc_hard_reg_printed [i])
5613 {
5614 sparc_hard_reg_printed [i] = 1;
5615 /* %g7 is used as TLS base register, use #ignore
5616 for it instead of #scratch. */
5617 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
5618 i == 7 ? "ignore" : "scratch");
5619 }
5620 if (i == 3) i = 5;
5621 }
5622 }
5623
5624 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5625
5626 #if PROBE_INTERVAL > 4096
5627 #error Cannot use indexed addressing mode for stack probing
5628 #endif
5629
5630 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5631 inclusive. These are offsets from the current stack pointer.
5632
5633 Note that we don't use the REG+REG addressing mode for the probes because
5634 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5635 so the advantages of having a single code win here. */
5636
5637 static void
5638 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
5639 {
5640 rtx g1 = gen_rtx_REG (Pmode, 1);
5641
5642 /* See if we have a constant small number of probes to generate. If so,
5643 that's the easy case. */
5644 if (size <= PROBE_INTERVAL)
5645 {
5646 emit_move_insn (g1, GEN_INT (first));
5647 emit_insn (gen_rtx_SET (g1,
5648 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5649 emit_stack_probe (plus_constant (Pmode, g1, -size));
5650 }
5651
5652 /* The run-time loop is made up of 9 insns in the generic case while the
5653 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5654 else if (size <= 4 * PROBE_INTERVAL)
5655 {
5656 HOST_WIDE_INT i;
5657
5658 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
5659 emit_insn (gen_rtx_SET (g1,
5660 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5661 emit_stack_probe (g1);
5662
5663 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5664 it exceeds SIZE. If only two probes are needed, this will not
5665 generate any code. Then probe at FIRST + SIZE. */
5666 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5667 {
5668 emit_insn (gen_rtx_SET (g1,
5669 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5670 emit_stack_probe (g1);
5671 }
5672
5673 emit_stack_probe (plus_constant (Pmode, g1,
5674 (i - PROBE_INTERVAL) - size));
5675 }
5676
5677 /* Otherwise, do the same as above, but in a loop. Note that we must be
5678 extra careful with variables wrapping around because we might be at
5679 the very top (or the very bottom) of the address space and we have
5680 to be able to handle this case properly; in particular, we use an
5681 equality test for the loop condition. */
5682 else
5683 {
5684 HOST_WIDE_INT rounded_size;
5685 rtx g4 = gen_rtx_REG (Pmode, 4);
5686
5687 emit_move_insn (g1, GEN_INT (first));
5688
5689
5690 /* Step 1: round SIZE to the previous multiple of the interval. */
5691
5692 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
5693 emit_move_insn (g4, GEN_INT (rounded_size));
5694
5695
5696 /* Step 2: compute initial and final value of the loop counter. */
5697
5698 /* TEST_ADDR = SP + FIRST. */
5699 emit_insn (gen_rtx_SET (g1,
5700 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5701
5702 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5703 emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
5704
5705
5706 /* Step 3: the loop
5707
5708 while (TEST_ADDR != LAST_ADDR)
5709 {
5710 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5711 probe at TEST_ADDR
5712 }
5713
5714 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5715 until it is equal to ROUNDED_SIZE. */
5716
5717 if (TARGET_ARCH64)
5718 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
5719 else
5720 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
5721
5722
5723 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5724 that SIZE is equal to ROUNDED_SIZE. */
5725
5726 if (size != rounded_size)
5727 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5728 }
5729
5730 /* Make sure nothing is scheduled before we are done. */
5731 emit_insn (gen_blockage ());
5732 }
5733
5734 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5735 absolute addresses. */
5736
5737 const char *
5738 output_probe_stack_range (rtx reg1, rtx reg2)
5739 {
5740 static int labelno = 0;
5741 char loop_lab[32];
5742 rtx xops[2];
5743
5744 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
5745
5746 /* Loop. */
5747 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5748
5749 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5750 xops[0] = reg1;
5751 xops[1] = GEN_INT (-PROBE_INTERVAL);
5752 output_asm_insn ("add\t%0, %1, %0", xops);
5753
5754 /* Test if TEST_ADDR == LAST_ADDR. */
5755 xops[1] = reg2;
5756 output_asm_insn ("cmp\t%0, %1", xops);
5757
5758 /* Probe at TEST_ADDR and branch. */
5759 if (TARGET_ARCH64)
5760 fputs ("\tbne,pt\t%xcc,", asm_out_file);
5761 else
5762 fputs ("\tbne\t", asm_out_file);
5763 assemble_name_raw (asm_out_file, loop_lab);
5764 fputc ('\n', asm_out_file);
5765 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5766 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5767
5768 return "";
5769 }
5770
5771 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5772 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5773 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5774 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5775 the action to be performed if it returns false. Return the new offset. */
5776
5777 typedef bool (*sorr_pred_t) (unsigned int, int);
5778 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5779
5780 static int
5781 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5782 int offset, int leaf_function, sorr_pred_t save_p,
5783 sorr_act_t action_true, sorr_act_t action_false)
5784 {
5785 unsigned int i;
5786 rtx mem;
5787 rtx_insn *insn;
5788
5789 if (TARGET_ARCH64 && high <= 32)
5790 {
5791 int fp_offset = -1;
5792
5793 for (i = low; i < high; i++)
5794 {
5795 if (save_p (i, leaf_function))
5796 {
5797 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5798 base, offset));
5799 if (action_true == SORR_SAVE)
5800 {
5801 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5802 RTX_FRAME_RELATED_P (insn) = 1;
5803 }
5804 else /* action_true == SORR_RESTORE */
5805 {
5806 /* The frame pointer must be restored last since its old
5807 value may be used as base address for the frame. This
5808 is problematic in 64-bit mode only because of the lack
5809 of double-word load instruction. */
5810 if (i == HARD_FRAME_POINTER_REGNUM)
5811 fp_offset = offset;
5812 else
5813 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5814 }
5815 offset += 8;
5816 }
5817 else if (action_false == SORR_ADVANCE)
5818 offset += 8;
5819 }
5820
5821 if (fp_offset >= 0)
5822 {
5823 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5824 emit_move_insn (hard_frame_pointer_rtx, mem);
5825 }
5826 }
5827 else
5828 {
5829 for (i = low; i < high; i += 2)
5830 {
5831 bool reg0 = save_p (i, leaf_function);
5832 bool reg1 = save_p (i + 1, leaf_function);
5833 machine_mode mode;
5834 int regno;
5835
5836 if (reg0 && reg1)
5837 {
5838 mode = SPARC_INT_REG_P (i) ? E_DImode : E_DFmode;
5839 regno = i;
5840 }
5841 else if (reg0)
5842 {
5843 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5844 regno = i;
5845 }
5846 else if (reg1)
5847 {
5848 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5849 regno = i + 1;
5850 offset += 4;
5851 }
5852 else
5853 {
5854 if (action_false == SORR_ADVANCE)
5855 offset += 8;
5856 continue;
5857 }
5858
5859 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5860 if (action_true == SORR_SAVE)
5861 {
5862 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5863 RTX_FRAME_RELATED_P (insn) = 1;
5864 if (mode == DImode)
5865 {
5866 rtx set1, set2;
5867 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5868 offset));
5869 set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
5870 RTX_FRAME_RELATED_P (set1) = 1;
5871 mem
5872 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5873 offset + 4));
5874 set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
5875 RTX_FRAME_RELATED_P (set2) = 1;
5876 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5877 gen_rtx_PARALLEL (VOIDmode,
5878 gen_rtvec (2, set1, set2)));
5879 }
5880 }
5881 else /* action_true == SORR_RESTORE */
5882 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5883
5884 /* Bump and round down to double word
5885 in case we already bumped by 4. */
5886 offset = ROUND_DOWN (offset + 8, 8);
5887 }
5888 }
5889
5890 return offset;
5891 }
5892
5893 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5894
5895 static rtx
5896 emit_adjust_base_to_offset (rtx base, int offset)
5897 {
5898 /* ??? This might be optimized a little as %g1 might already have a
5899 value close enough that a single add insn will do. */
5900 /* ??? Although, all of this is probably only a temporary fix because
5901 if %g1 can hold a function result, then sparc_expand_epilogue will
5902 lose (the result will be clobbered). */
5903 rtx new_base = gen_rtx_REG (Pmode, 1);
5904 emit_move_insn (new_base, GEN_INT (offset));
5905 emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5906 return new_base;
5907 }
5908
5909 /* Emit code to save/restore call-saved global and FP registers. */
5910
5911 static void
5912 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5913 {
5914 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5915 {
5916 base = emit_adjust_base_to_offset (base, offset);
5917 offset = 0;
5918 }
5919
5920 offset
5921 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5922 save_global_or_fp_reg_p, action, SORR_NONE);
5923 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5924 save_global_or_fp_reg_p, action, SORR_NONE);
5925 }
5926
5927 /* Emit code to save/restore call-saved local and in registers. */
5928
5929 static void
5930 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5931 {
5932 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5933 {
5934 base = emit_adjust_base_to_offset (base, offset);
5935 offset = 0;
5936 }
5937
5938 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5939 save_local_or_in_reg_p, action, SORR_ADVANCE);
5940 }
5941
5942 /* Emit a window_save insn. */
5943
5944 static rtx_insn *
5945 emit_window_save (rtx increment)
5946 {
5947 rtx_insn *insn = emit_insn (gen_window_save (increment));
5948 RTX_FRAME_RELATED_P (insn) = 1;
5949
5950 /* The incoming return address (%o7) is saved in %i7. */
5951 add_reg_note (insn, REG_CFA_REGISTER,
5952 gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5953 gen_rtx_REG (Pmode,
5954 INCOMING_RETURN_ADDR_REGNUM)));
5955
5956 /* The window save event. */
5957 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5958
5959 /* The CFA is %fp, the hard frame pointer. */
5960 add_reg_note (insn, REG_CFA_DEF_CFA,
5961 plus_constant (Pmode, hard_frame_pointer_rtx,
5962 INCOMING_FRAME_SP_OFFSET));
5963
5964 return insn;
5965 }
5966
5967 /* Generate an increment for the stack pointer. */
5968
5969 static rtx
5970 gen_stack_pointer_inc (rtx increment)
5971 {
5972 return gen_rtx_SET (stack_pointer_rtx,
5973 gen_rtx_PLUS (Pmode,
5974 stack_pointer_rtx,
5975 increment));
5976 }
5977
5978 /* Expand the function prologue. The prologue is responsible for reserving
5979 storage for the frame, saving the call-saved registers and loading the
5980 GOT register if needed. */
5981
5982 void
5983 sparc_expand_prologue (void)
5984 {
5985 HOST_WIDE_INT size;
5986 rtx_insn *insn;
5987
5988 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5989 on the final value of the flag means deferring the prologue/epilogue
5990 expansion until just before the second scheduling pass, which is too
5991 late to emit multiple epilogues or return insns.
5992
5993 Of course we are making the assumption that the value of the flag
5994 will not change between now and its final value. Of the three parts
5995 of the formula, only the last one can reasonably vary. Let's take a
5996 closer look, after assuming that the first two ones are set to true
5997 (otherwise the last value is effectively silenced).
5998
5999 If only_leaf_regs_used returns false, the global predicate will also
6000 be false so the actual frame size calculated below will be positive.
6001 As a consequence, the save_register_window insn will be emitted in
6002 the instruction stream; now this insn explicitly references %fp
6003 which is not a leaf register so only_leaf_regs_used will always
6004 return false subsequently.
6005
6006 If only_leaf_regs_used returns true, we hope that the subsequent
6007 optimization passes won't cause non-leaf registers to pop up. For
6008 example, the regrename pass has special provisions to not rename to
6009 non-leaf registers in a leaf function. */
6010 sparc_leaf_function_p
6011 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
6012
6013 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6014
6015 if (flag_stack_usage_info)
6016 current_function_static_stack_size = size;
6017
6018 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6019 || flag_stack_clash_protection)
6020 {
6021 if (crtl->is_leaf && !cfun->calls_alloca)
6022 {
6023 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6024 sparc_emit_probe_stack_range (get_stack_check_protect (),
6025 size - get_stack_check_protect ());
6026 }
6027 else if (size > 0)
6028 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6029 }
6030
6031 if (size == 0)
6032 ; /* do nothing. */
6033 else if (sparc_leaf_function_p)
6034 {
6035 rtx size_int_rtx = GEN_INT (-size);
6036
6037 if (size <= 4096)
6038 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6039 else if (size <= 8192)
6040 {
6041 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6042 RTX_FRAME_RELATED_P (insn) = 1;
6043
6044 /* %sp is still the CFA register. */
6045 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6046 }
6047 else
6048 {
6049 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6050 emit_move_insn (size_rtx, size_int_rtx);
6051 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6052 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
6053 gen_stack_pointer_inc (size_int_rtx));
6054 }
6055
6056 RTX_FRAME_RELATED_P (insn) = 1;
6057 }
6058 else
6059 {
6060 rtx size_int_rtx = GEN_INT (-size);
6061
6062 if (size <= 4096)
6063 emit_window_save (size_int_rtx);
6064 else if (size <= 8192)
6065 {
6066 emit_window_save (GEN_INT (-4096));
6067
6068 /* %sp is not the CFA register anymore. */
6069 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6070
6071 /* Make sure no %fp-based store is issued until after the frame is
6072 established. The offset between the frame pointer and the stack
6073 pointer is calculated relative to the value of the stack pointer
6074 at the end of the function prologue, and moving instructions that
6075 access the stack via the frame pointer between the instructions
6076 that decrement the stack pointer could result in accessing the
6077 register window save area, which is volatile. */
6078 emit_insn (gen_frame_blockage ());
6079 }
6080 else
6081 {
6082 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6083 emit_move_insn (size_rtx, size_int_rtx);
6084 emit_window_save (size_rtx);
6085 }
6086 }
6087
6088 if (sparc_leaf_function_p)
6089 {
6090 sparc_frame_base_reg = stack_pointer_rtx;
6091 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6092 }
6093 else
6094 {
6095 sparc_frame_base_reg = hard_frame_pointer_rtx;
6096 sparc_frame_base_offset = SPARC_STACK_BIAS;
6097 }
6098
6099 if (sparc_n_global_fp_regs > 0)
6100 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6101 sparc_frame_base_offset
6102 - sparc_apparent_frame_size,
6103 SORR_SAVE);
6104
6105 /* Advertise that the data calculated just above are now valid. */
6106 sparc_prologue_data_valid_p = true;
6107 }
6108
6109 /* Expand the function prologue. The prologue is responsible for reserving
6110 storage for the frame, saving the call-saved registers and loading the
6111 GOT register if needed. */
6112
6113 void
6114 sparc_flat_expand_prologue (void)
6115 {
6116 HOST_WIDE_INT size;
6117 rtx_insn *insn;
6118
6119 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
6120
6121 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6122
6123 if (flag_stack_usage_info)
6124 current_function_static_stack_size = size;
6125
6126 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6127 || flag_stack_clash_protection)
6128 {
6129 if (crtl->is_leaf && !cfun->calls_alloca)
6130 {
6131 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6132 sparc_emit_probe_stack_range (get_stack_check_protect (),
6133 size - get_stack_check_protect ());
6134 }
6135 else if (size > 0)
6136 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6137 }
6138
6139 if (sparc_save_local_in_regs_p)
6140 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
6141 SORR_SAVE);
6142
6143 if (size == 0)
6144 ; /* do nothing. */
6145 else
6146 {
6147 rtx size_int_rtx, size_rtx;
6148
6149 size_rtx = size_int_rtx = GEN_INT (-size);
6150
6151 /* We establish the frame (i.e. decrement the stack pointer) first, even
6152 if we use a frame pointer, because we cannot clobber any call-saved
6153 registers, including the frame pointer, if we haven't created a new
6154 register save area, for the sake of compatibility with the ABI. */
6155 if (size <= 4096)
6156 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6157 else if (size <= 8192 && !frame_pointer_needed)
6158 {
6159 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6160 RTX_FRAME_RELATED_P (insn) = 1;
6161 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6162 }
6163 else
6164 {
6165 size_rtx = gen_rtx_REG (Pmode, 1);
6166 emit_move_insn (size_rtx, size_int_rtx);
6167 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6168 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6169 gen_stack_pointer_inc (size_int_rtx));
6170 }
6171 RTX_FRAME_RELATED_P (insn) = 1;
6172
6173 /* Ensure nothing is scheduled until after the frame is established. */
6174 emit_insn (gen_blockage ());
6175
6176 if (frame_pointer_needed)
6177 {
6178 insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
6179 gen_rtx_MINUS (Pmode,
6180 stack_pointer_rtx,
6181 size_rtx)));
6182 RTX_FRAME_RELATED_P (insn) = 1;
6183
6184 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6185 gen_rtx_SET (hard_frame_pointer_rtx,
6186 plus_constant (Pmode, stack_pointer_rtx,
6187 size)));
6188 }
6189
6190 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6191 {
6192 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
6193 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
6194
6195 insn = emit_move_insn (i7, o7);
6196 RTX_FRAME_RELATED_P (insn) = 1;
6197
6198 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
6199
6200 /* Prevent this instruction from ever being considered dead,
6201 even if this function has no epilogue. */
6202 emit_use (i7);
6203 }
6204 }
6205
6206 if (frame_pointer_needed)
6207 {
6208 sparc_frame_base_reg = hard_frame_pointer_rtx;
6209 sparc_frame_base_offset = SPARC_STACK_BIAS;
6210 }
6211 else
6212 {
6213 sparc_frame_base_reg = stack_pointer_rtx;
6214 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6215 }
6216
6217 if (sparc_n_global_fp_regs > 0)
6218 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6219 sparc_frame_base_offset
6220 - sparc_apparent_frame_size,
6221 SORR_SAVE);
6222
6223 /* Advertise that the data calculated just above are now valid. */
6224 sparc_prologue_data_valid_p = true;
6225 }
6226
6227 /* This function generates the assembly code for function entry, which boils
6228 down to emitting the necessary .register directives. */
6229
6230 static void
6231 sparc_asm_function_prologue (FILE *file)
6232 {
6233 /* Check that the assumption we made in sparc_expand_prologue is valid. */
6234 if (!TARGET_FLAT)
6235 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
6236
6237 sparc_output_scratch_registers (file);
6238 }
6239
6240 /* Expand the function epilogue, either normal or part of a sibcall.
6241 We emit all the instructions except the return or the call. */
6242
6243 void
6244 sparc_expand_epilogue (bool for_eh)
6245 {
6246 HOST_WIDE_INT size = sparc_frame_size;
6247
6248 if (cfun->calls_alloca)
6249 emit_insn (gen_frame_blockage ());
6250
6251 if (sparc_n_global_fp_regs > 0)
6252 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6253 sparc_frame_base_offset
6254 - sparc_apparent_frame_size,
6255 SORR_RESTORE);
6256
6257 if (size == 0 || for_eh)
6258 ; /* do nothing. */
6259 else if (sparc_leaf_function_p)
6260 {
6261 if (size <= 4096)
6262 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6263 else if (size <= 8192)
6264 {
6265 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6266 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6267 }
6268 else
6269 {
6270 rtx reg = gen_rtx_REG (Pmode, 1);
6271 emit_move_insn (reg, GEN_INT (size));
6272 emit_insn (gen_stack_pointer_inc (reg));
6273 }
6274 }
6275 }
6276
6277 /* Expand the function epilogue, either normal or part of a sibcall.
6278 We emit all the instructions except the return or the call. */
6279
6280 void
6281 sparc_flat_expand_epilogue (bool for_eh)
6282 {
6283 HOST_WIDE_INT size = sparc_frame_size;
6284
6285 if (sparc_n_global_fp_regs > 0)
6286 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6287 sparc_frame_base_offset
6288 - sparc_apparent_frame_size,
6289 SORR_RESTORE);
6290
6291 /* If we have a frame pointer, we'll need both to restore it before the
6292 frame is destroyed and use its current value in destroying the frame.
6293 Since we don't have an atomic way to do that in the flat window model,
6294 we save the current value into a temporary register (%g1). */
6295 if (frame_pointer_needed && !for_eh)
6296 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
6297
6298 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6299 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
6300 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
6301
6302 if (sparc_save_local_in_regs_p)
6303 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
6304 sparc_frame_base_offset,
6305 SORR_RESTORE);
6306
6307 if (size == 0 || for_eh)
6308 ; /* do nothing. */
6309 else if (frame_pointer_needed)
6310 {
6311 /* Make sure the frame is destroyed after everything else is done. */
6312 emit_insn (gen_blockage ());
6313
6314 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
6315 }
6316 else
6317 {
6318 /* Likewise. */
6319 emit_insn (gen_blockage ());
6320
6321 if (size <= 4096)
6322 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6323 else if (size <= 8192)
6324 {
6325 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6326 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6327 }
6328 else
6329 {
6330 rtx reg = gen_rtx_REG (Pmode, 1);
6331 emit_move_insn (reg, GEN_INT (size));
6332 emit_insn (gen_stack_pointer_inc (reg));
6333 }
6334 }
6335 }
6336
6337 /* Return true if it is appropriate to emit `return' instructions in the
6338 body of a function. */
6339
6340 bool
6341 sparc_can_use_return_insn_p (void)
6342 {
6343 return sparc_prologue_data_valid_p
6344 && sparc_n_global_fp_regs == 0
6345 && TARGET_FLAT
6346 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
6347 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
6348 }
6349
6350 /* This function generates the assembly code for function exit. */
6351
6352 static void
6353 sparc_asm_function_epilogue (FILE *file)
6354 {
6355 /* If the last two instructions of a function are "call foo; dslot;"
6356 the return address might point to the first instruction in the next
6357 function and we have to output a dummy nop for the sake of sane
6358 backtraces in such cases. This is pointless for sibling calls since
6359 the return address is explicitly adjusted. */
6360
6361 rtx_insn *insn = get_last_insn ();
6362
6363 rtx last_real_insn = prev_real_insn (insn);
6364 if (last_real_insn
6365 && NONJUMP_INSN_P (last_real_insn)
6366 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
6367 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
6368
6369 if (last_real_insn
6370 && CALL_P (last_real_insn)
6371 && !SIBLING_CALL_P (last_real_insn))
6372 fputs("\tnop\n", file);
6373
6374 sparc_output_deferred_case_vectors ();
6375 }
6376
6377 /* Output a 'restore' instruction. */
6378
6379 static void
6380 output_restore (rtx pat)
6381 {
6382 rtx operands[3];
6383
6384 if (! pat)
6385 {
6386 fputs ("\t restore\n", asm_out_file);
6387 return;
6388 }
6389
6390 gcc_assert (GET_CODE (pat) == SET);
6391
6392 operands[0] = SET_DEST (pat);
6393 pat = SET_SRC (pat);
6394
6395 switch (GET_CODE (pat))
6396 {
6397 case PLUS:
6398 operands[1] = XEXP (pat, 0);
6399 operands[2] = XEXP (pat, 1);
6400 output_asm_insn (" restore %r1, %2, %Y0", operands);
6401 break;
6402 case LO_SUM:
6403 operands[1] = XEXP (pat, 0);
6404 operands[2] = XEXP (pat, 1);
6405 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
6406 break;
6407 case ASHIFT:
6408 operands[1] = XEXP (pat, 0);
6409 gcc_assert (XEXP (pat, 1) == const1_rtx);
6410 output_asm_insn (" restore %r1, %r1, %Y0", operands);
6411 break;
6412 default:
6413 operands[1] = pat;
6414 output_asm_insn (" restore %%g0, %1, %Y0", operands);
6415 break;
6416 }
6417 }
6418
6419 /* Output a return. */
6420
6421 const char *
6422 output_return (rtx_insn *insn)
6423 {
6424 if (crtl->calls_eh_return)
6425 {
6426 /* If the function uses __builtin_eh_return, the eh_return
6427 machinery occupies the delay slot. */
6428 gcc_assert (!final_sequence);
6429
6430 if (flag_delayed_branch)
6431 {
6432 if (!TARGET_FLAT && TARGET_V9)
6433 fputs ("\treturn\t%i7+8\n", asm_out_file);
6434 else
6435 {
6436 if (!TARGET_FLAT)
6437 fputs ("\trestore\n", asm_out_file);
6438
6439 fputs ("\tjmp\t%o7+8\n", asm_out_file);
6440 }
6441
6442 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
6443 }
6444 else
6445 {
6446 if (!TARGET_FLAT)
6447 fputs ("\trestore\n", asm_out_file);
6448
6449 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
6450 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
6451 }
6452 }
6453 else if (sparc_leaf_function_p || TARGET_FLAT)
6454 {
6455 /* This is a leaf or flat function so we don't have to bother restoring
6456 the register window, which frees us from dealing with the convoluted
6457 semantics of restore/return. We simply output the jump to the
6458 return address and the insn in the delay slot (if any). */
6459
6460 return "jmp\t%%o7+%)%#";
6461 }
6462 else
6463 {
6464 /* This is a regular function so we have to restore the register window.
6465 We may have a pending insn for the delay slot, which will be either
6466 combined with the 'restore' instruction or put in the delay slot of
6467 the 'return' instruction. */
6468
6469 if (final_sequence)
6470 {
6471 rtx_insn *delay;
6472 rtx pat;
6473
6474 delay = NEXT_INSN (insn);
6475 gcc_assert (delay);
6476
6477 pat = PATTERN (delay);
6478
6479 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
6480 {
6481 epilogue_renumber (&pat, 0);
6482 return "return\t%%i7+%)%#";
6483 }
6484 else
6485 {
6486 output_asm_insn ("jmp\t%%i7+%)", NULL);
6487
6488 /* We're going to output the insn in the delay slot manually.
6489 Make sure to output its source location first. */
6490 PATTERN (delay) = gen_blockage ();
6491 INSN_CODE (delay) = -1;
6492 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6493 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6494
6495 output_restore (pat);
6496 }
6497 }
6498 else
6499 {
6500 /* The delay slot is empty. */
6501 if (TARGET_V9)
6502 return "return\t%%i7+%)\n\t nop";
6503 else if (flag_delayed_branch)
6504 return "jmp\t%%i7+%)\n\t restore";
6505 else
6506 return "restore\n\tjmp\t%%o7+%)\n\t nop";
6507 }
6508 }
6509
6510 return "";
6511 }
6512
6513 /* Output a sibling call. */
6514
6515 const char *
6516 output_sibcall (rtx_insn *insn, rtx call_operand)
6517 {
6518 rtx operands[1];
6519
6520 gcc_assert (flag_delayed_branch);
6521
6522 operands[0] = call_operand;
6523
6524 if (sparc_leaf_function_p || TARGET_FLAT)
6525 {
6526 /* This is a leaf or flat function so we don't have to bother restoring
6527 the register window. We simply output the jump to the function and
6528 the insn in the delay slot (if any). */
6529
6530 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
6531
6532 if (final_sequence)
6533 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
6534 operands);
6535 else
6536 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6537 it into branch if possible. */
6538 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6539 operands);
6540 }
6541 else
6542 {
6543 /* This is a regular function so we have to restore the register window.
6544 We may have a pending insn for the delay slot, which will be combined
6545 with the 'restore' instruction. */
6546
6547 output_asm_insn ("call\t%a0, 0", operands);
6548
6549 if (final_sequence)
6550 {
6551 rtx_insn *delay;
6552 rtx pat;
6553
6554 delay = NEXT_INSN (insn);
6555 gcc_assert (delay);
6556
6557 pat = PATTERN (delay);
6558
6559 /* We're going to output the insn in the delay slot manually.
6560 Make sure to output its source location first. */
6561 PATTERN (delay) = gen_blockage ();
6562 INSN_CODE (delay) = -1;
6563 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6564 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6565
6566 output_restore (pat);
6567 }
6568 else
6569 output_restore (NULL_RTX);
6570 }
6571
6572 return "";
6573 }
6574 \f
6575 /* Functions for handling argument passing.
6576
6577 For 32-bit, the first 6 args are normally in registers and the rest are
6578 pushed. Any arg that starts within the first 6 words is at least
6579 partially passed in a register unless its data type forbids.
6580
6581 For 64-bit, the argument registers are laid out as an array of 16 elements
6582 and arguments are added sequentially. The first 6 int args and up to the
6583 first 16 fp args (depending on size) are passed in regs.
6584
6585 Slot Stack Integral Float Float in structure Double Long Double
6586 ---- ----- -------- ----- ------------------ ------ -----------
6587 15 [SP+248] %f31 %f30,%f31 %d30
6588 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6589 13 [SP+232] %f27 %f26,%f27 %d26
6590 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6591 11 [SP+216] %f23 %f22,%f23 %d22
6592 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6593 9 [SP+200] %f19 %f18,%f19 %d18
6594 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6595 7 [SP+184] %f15 %f14,%f15 %d14
6596 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6597 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6598 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6599 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6600 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6601 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6602 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6603
6604 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6605
6606 Integral arguments are always passed as 64-bit quantities appropriately
6607 extended.
6608
6609 Passing of floating point values is handled as follows.
6610 If a prototype is in scope:
6611 If the value is in a named argument (i.e. not a stdarg function or a
6612 value not part of the `...') then the value is passed in the appropriate
6613 fp reg.
6614 If the value is part of the `...' and is passed in one of the first 6
6615 slots then the value is passed in the appropriate int reg.
6616 If the value is part of the `...' and is not passed in one of the first 6
6617 slots then the value is passed in memory.
6618 If a prototype is not in scope:
6619 If the value is one of the first 6 arguments the value is passed in the
6620 appropriate integer reg and the appropriate fp reg.
6621 If the value is not one of the first 6 arguments the value is passed in
6622 the appropriate fp reg and in memory.
6623
6624
6625 Summary of the calling conventions implemented by GCC on the SPARC:
6626
6627 32-bit ABI:
6628 size argument return value
6629
6630 small integer <4 int. reg. int. reg.
6631 word 4 int. reg. int. reg.
6632 double word 8 int. reg. int. reg.
6633
6634 _Complex small integer <8 int. reg. int. reg.
6635 _Complex word 8 int. reg. int. reg.
6636 _Complex double word 16 memory int. reg.
6637
6638 vector integer <=8 int. reg. FP reg.
6639 vector integer >8 memory memory
6640
6641 float 4 int. reg. FP reg.
6642 double 8 int. reg. FP reg.
6643 long double 16 memory memory
6644
6645 _Complex float 8 memory FP reg.
6646 _Complex double 16 memory FP reg.
6647 _Complex long double 32 memory FP reg.
6648
6649 vector float any memory memory
6650
6651 aggregate any memory memory
6652
6653
6654
6655 64-bit ABI:
6656 size argument return value
6657
6658 small integer <8 int. reg. int. reg.
6659 word 8 int. reg. int. reg.
6660 double word 16 int. reg. int. reg.
6661
6662 _Complex small integer <16 int. reg. int. reg.
6663 _Complex word 16 int. reg. int. reg.
6664 _Complex double word 32 memory int. reg.
6665
6666 vector integer <=16 FP reg. FP reg.
6667 vector integer 16<s<=32 memory FP reg.
6668 vector integer >32 memory memory
6669
6670 float 4 FP reg. FP reg.
6671 double 8 FP reg. FP reg.
6672 long double 16 FP reg. FP reg.
6673
6674 _Complex float 8 FP reg. FP reg.
6675 _Complex double 16 FP reg. FP reg.
6676 _Complex long double 32 memory FP reg.
6677
6678 vector float <=16 FP reg. FP reg.
6679 vector float 16<s<=32 memory FP reg.
6680 vector float >32 memory memory
6681
6682 aggregate <=16 reg. reg.
6683 aggregate 16<s<=32 memory reg.
6684 aggregate >32 memory memory
6685
6686
6687
6688 Note #1: complex floating-point types follow the extended SPARC ABIs as
6689 implemented by the Sun compiler.
6690
6691 Note #2: integer vector types follow the scalar floating-point types
6692 conventions to match what is implemented by the Sun VIS SDK.
6693
6694 Note #3: floating-point vector types follow the aggregate types
6695 conventions. */
6696
6697
6698 /* Maximum number of int regs for args. */
6699 #define SPARC_INT_ARG_MAX 6
6700 /* Maximum number of fp regs for args. */
6701 #define SPARC_FP_ARG_MAX 16
6702 /* Number of words (partially) occupied for a given size in units. */
6703 #define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
6704
6705 /* Handle the INIT_CUMULATIVE_ARGS macro.
6706 Initialize a variable CUM of type CUMULATIVE_ARGS
6707 for a call to a function whose data type is FNTYPE.
6708 For a library call, FNTYPE is 0. */
6709
6710 void
6711 init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
6712 {
6713 cum->words = 0;
6714 cum->prototype_p = fntype && prototype_p (fntype);
6715 cum->libcall_p = !fntype;
6716 }
6717
6718 /* Handle promotion of pointer and integer arguments. */
6719
6720 static machine_mode
6721 sparc_promote_function_mode (const_tree type, machine_mode mode,
6722 int *punsignedp, const_tree, int)
6723 {
6724 if (type && POINTER_TYPE_P (type))
6725 {
6726 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6727 return Pmode;
6728 }
6729
6730 /* Integral arguments are passed as full words, as per the ABI. */
6731 if (GET_MODE_CLASS (mode) == MODE_INT
6732 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6733 return word_mode;
6734
6735 return mode;
6736 }
6737
6738 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6739
6740 static bool
6741 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6742 {
6743 return TARGET_ARCH64 ? true : false;
6744 }
6745
6746 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6747 Specify whether to pass the argument by reference. */
6748
6749 static bool
6750 sparc_pass_by_reference (cumulative_args_t, const function_arg_info &arg)
6751 {
6752 tree type = arg.type;
6753 machine_mode mode = arg.mode;
6754 if (TARGET_ARCH32)
6755 /* Original SPARC 32-bit ABI says that structures and unions,
6756 and quad-precision floats are passed by reference.
6757 All other base types are passed in registers.
6758
6759 Extended ABI (as implemented by the Sun compiler) says that all
6760 complex floats are passed by reference. Pass complex integers
6761 in registers up to 8 bytes. More generally, enforce the 2-word
6762 cap for passing arguments in registers.
6763
6764 Vector ABI (as implemented by the Sun VIS SDK) says that integer
6765 vectors are passed like floats of the same size, that is in
6766 registers up to 8 bytes. Pass all vector floats by reference
6767 like structure and unions. */
6768 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6769 || mode == SCmode
6770 /* Catch CDImode, TFmode, DCmode and TCmode. */
6771 || GET_MODE_SIZE (mode) > 8
6772 || (type
6773 && VECTOR_TYPE_P (type)
6774 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6775 else
6776 /* Original SPARC 64-bit ABI says that structures and unions
6777 smaller than 16 bytes are passed in registers, as well as
6778 all other base types.
6779
6780 Extended ABI (as implemented by the Sun compiler) says that
6781 complex floats are passed in registers up to 16 bytes. Pass
6782 all complex integers in registers up to 16 bytes. More generally,
6783 enforce the 2-word cap for passing arguments in registers.
6784
6785 Vector ABI (as implemented by the Sun VIS SDK) says that integer
6786 vectors are passed like floats of the same size, that is in
6787 registers (up to 16 bytes). Pass all vector floats like structure
6788 and unions. */
6789 return ((type
6790 && (AGGREGATE_TYPE_P (type) || VECTOR_TYPE_P (type))
6791 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6792 /* Catch CTImode and TCmode. */
6793 || GET_MODE_SIZE (mode) > 16);
6794 }
6795
6796 /* Traverse the record TYPE recursively and call FUNC on its fields.
6797 NAMED is true if this is for a named parameter. DATA is passed
6798 to FUNC for each field. OFFSET is the starting position and
6799 PACKED is true if we are inside a packed record. */
6800
6801 template <typename T, void Func (const_tree, int, bool, T*)>
6802 static void
6803 traverse_record_type (const_tree type, bool named, T *data,
6804 int offset = 0, bool packed = false)
6805 {
6806 /* The ABI obviously doesn't specify how packed structures are passed.
6807 These are passed in integer regs if possible, otherwise memory. */
6808 if (!packed)
6809 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6810 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6811 {
6812 packed = true;
6813 break;
6814 }
6815
6816 /* Walk the real fields, but skip those with no size or a zero size.
6817 ??? Fields with variable offset are handled as having zero offset. */
6818 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6819 if (TREE_CODE (field) == FIELD_DECL)
6820 {
6821 if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
6822 continue;
6823
6824 int bitpos = offset;
6825 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
6826 bitpos += int_bit_position (field);
6827
6828 tree field_type = TREE_TYPE (field);
6829 if (TREE_CODE (field_type) == RECORD_TYPE)
6830 traverse_record_type<T, Func> (field_type, named, data, bitpos,
6831 packed);
6832 else
6833 {
6834 const bool fp_type
6835 = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
6836 Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
6837 data);
6838 }
6839 }
6840 }
6841
6842 /* Handle recursive register classifying for structure layout. */
6843
6844 typedef struct
6845 {
6846 bool fp_regs; /* true if field eligible to FP registers. */
6847 bool fp_regs_in_first_word; /* true if such field in first word. */
6848 } classify_data_t;
6849
6850 /* A subroutine of function_arg_slotno. Classify the field. */
6851
6852 inline void
6853 classify_registers (const_tree, int bitpos, bool fp, classify_data_t *data)
6854 {
6855 if (fp)
6856 {
6857 data->fp_regs = true;
6858 if (bitpos < BITS_PER_WORD)
6859 data->fp_regs_in_first_word = true;
6860 }
6861 }
6862
6863 /* Compute the slot number to pass an argument in.
6864 Return the slot number or -1 if passing on the stack.
6865
6866 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6867 the preceding args and about the function being called.
6868 MODE is the argument's machine mode.
6869 TYPE is the data type of the argument (as a tree).
6870 This is null for libcalls where that information may
6871 not be available.
6872 NAMED is nonzero if this argument is a named parameter
6873 (otherwise it is an extra parameter matching an ellipsis).
6874 INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6875 *PREGNO records the register number to use if scalar type.
6876 *PPADDING records the amount of padding needed in words. */
6877
6878 static int
6879 function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
6880 const_tree type, bool named, bool incoming,
6881 int *pregno, int *ppadding)
6882 {
6883 const int regbase
6884 = incoming ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
6885 int slotno = cum->words, regno;
6886 enum mode_class mclass = GET_MODE_CLASS (mode);
6887
6888 /* Silence warnings in the callers. */
6889 *pregno = -1;
6890 *ppadding = -1;
6891
6892 if (type && TREE_ADDRESSABLE (type))
6893 return -1;
6894
6895 /* In 64-bit mode, objects requiring 16-byte alignment get it. */
6896 if (TARGET_ARCH64
6897 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6898 && (slotno & 1) != 0)
6899 {
6900 slotno++;
6901 *ppadding = 1;
6902 }
6903 else
6904 *ppadding = 0;
6905
6906 /* Vector types deserve special treatment because they are polymorphic wrt
6907 their mode, depending upon whether VIS instructions are enabled. */
6908 if (type && VECTOR_TYPE_P (type))
6909 {
6910 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6911 {
6912 /* The SPARC port defines no floating-point vector modes. */
6913 gcc_assert (mode == BLKmode);
6914 }
6915 else
6916 {
6917 /* Integer vector types should either have a vector
6918 mode or an integral mode, because we are guaranteed
6919 by pass_by_reference that their size is not greater
6920 than 16 bytes and TImode is 16-byte wide. */
6921 gcc_assert (mode != BLKmode);
6922
6923 /* Integer vectors are handled like floats as per
6924 the Sun VIS SDK. */
6925 mclass = MODE_FLOAT;
6926 }
6927 }
6928
6929 switch (mclass)
6930 {
6931 case MODE_FLOAT:
6932 case MODE_COMPLEX_FLOAT:
6933 case MODE_VECTOR_INT:
6934 if (TARGET_ARCH64 && TARGET_FPU && named)
6935 {
6936 /* If all arg slots are filled, then must pass on stack. */
6937 if (slotno >= SPARC_FP_ARG_MAX)
6938 return -1;
6939
6940 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6941 /* Arguments filling only one single FP register are
6942 right-justified in the outer double FP register. */
6943 if (GET_MODE_SIZE (mode) <= 4)
6944 regno++;
6945 break;
6946 }
6947 /* fallthrough */
6948
6949 case MODE_INT:
6950 case MODE_COMPLEX_INT:
6951 /* If all arg slots are filled, then must pass on stack. */
6952 if (slotno >= SPARC_INT_ARG_MAX)
6953 return -1;
6954
6955 regno = regbase + slotno;
6956 break;
6957
6958 case MODE_RANDOM:
6959 /* MODE is VOIDmode when generating the actual call. */
6960 if (mode == VOIDmode)
6961 return -1;
6962
6963 if (TARGET_64BIT && TARGET_FPU && named
6964 && type
6965 && (TREE_CODE (type) == RECORD_TYPE || VECTOR_TYPE_P (type)))
6966 {
6967 /* If all arg slots are filled, then must pass on stack. */
6968 if (slotno >= SPARC_FP_ARG_MAX)
6969 return -1;
6970
6971 if (TREE_CODE (type) == RECORD_TYPE)
6972 {
6973 classify_data_t data = { false, false };
6974 traverse_record_type<classify_data_t, classify_registers>
6975 (type, named, &data);
6976
6977 if (data.fp_regs)
6978 {
6979 /* If all FP slots are filled except for the last one and
6980 there is no FP field in the first word, then must pass
6981 on stack. */
6982 if (slotno >= SPARC_FP_ARG_MAX - 1
6983 && !data.fp_regs_in_first_word)
6984 return -1;
6985 }
6986 else
6987 {
6988 /* If all int slots are filled, then must pass on stack. */
6989 if (slotno >= SPARC_INT_ARG_MAX)
6990 return -1;
6991 }
6992
6993 /* PREGNO isn't set since both int and FP regs can be used. */
6994 return slotno;
6995 }
6996
6997 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6998 }
6999 else
7000 {
7001 /* If all arg slots are filled, then must pass on stack. */
7002 if (slotno >= SPARC_INT_ARG_MAX)
7003 return -1;
7004
7005 regno = regbase + slotno;
7006 }
7007 break;
7008
7009 default :
7010 gcc_unreachable ();
7011 }
7012
7013 *pregno = regno;
7014 return slotno;
7015 }
7016
7017 /* Handle recursive register counting/assigning for structure layout. */
7018
7019 typedef struct
7020 {
7021 int slotno; /* slot number of the argument. */
7022 int regbase; /* regno of the base register. */
7023 int intoffset; /* offset of the first pending integer field. */
7024 int nregs; /* number of words passed in registers. */
7025 bool stack; /* true if part of the argument is on the stack. */
7026 rtx ret; /* return expression being built. */
7027 } assign_data_t;
7028
7029 /* A subroutine of function_arg_record_value. Compute the number of integer
7030 registers to be assigned between PARMS->intoffset and BITPOS. Return
7031 true if at least one integer register is assigned or false otherwise. */
7032
7033 static bool
7034 compute_int_layout (int bitpos, assign_data_t *data, int *pnregs)
7035 {
7036 if (data->intoffset < 0)
7037 return false;
7038
7039 const int intoffset = data->intoffset;
7040 data->intoffset = -1;
7041
7042 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7043 const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
7044 const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
7045 int nregs = (endbit - startbit) / BITS_PER_WORD;
7046
7047 if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
7048 {
7049 nregs = SPARC_INT_ARG_MAX - this_slotno;
7050
7051 /* We need to pass this field (partly) on the stack. */
7052 data->stack = 1;
7053 }
7054
7055 if (nregs <= 0)
7056 return false;
7057
7058 *pnregs = nregs;
7059 return true;
7060 }
7061
7062 /* A subroutine of function_arg_record_value. Compute the number and the mode
7063 of the FP registers to be assigned for FIELD. Return true if at least one
7064 FP register is assigned or false otherwise. */
7065
7066 static bool
7067 compute_fp_layout (const_tree field, int bitpos, assign_data_t *data,
7068 int *pnregs, machine_mode *pmode)
7069 {
7070 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7071 machine_mode mode = DECL_MODE (field);
7072 int nregs, nslots;
7073
7074 /* Slots are counted as words while regs are counted as having the size of
7075 the (inner) mode. */
7076 if (VECTOR_TYPE_P (TREE_TYPE (field)) && mode == BLKmode)
7077 {
7078 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7079 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
7080 }
7081 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
7082 {
7083 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7084 nregs = 2;
7085 }
7086 else
7087 nregs = 1;
7088
7089 nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
7090
7091 if (nslots > SPARC_FP_ARG_MAX - this_slotno)
7092 {
7093 nslots = SPARC_FP_ARG_MAX - this_slotno;
7094 nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
7095
7096 /* We need to pass this field (partly) on the stack. */
7097 data->stack = 1;
7098
7099 if (nregs <= 0)
7100 return false;
7101 }
7102
7103 *pnregs = nregs;
7104 *pmode = mode;
7105 return true;
7106 }
7107
7108 /* A subroutine of function_arg_record_value. Count the number of registers
7109 to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
7110
7111 inline void
7112 count_registers (const_tree field, int bitpos, bool fp, assign_data_t *data)
7113 {
7114 if (fp)
7115 {
7116 int nregs;
7117 machine_mode mode;
7118
7119 if (compute_int_layout (bitpos, data, &nregs))
7120 data->nregs += nregs;
7121
7122 if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
7123 data->nregs += nregs;
7124 }
7125 else
7126 {
7127 if (data->intoffset < 0)
7128 data->intoffset = bitpos;
7129 }
7130 }
7131
7132 /* A subroutine of function_arg_record_value. Assign the bits of the
7133 structure between PARMS->intoffset and BITPOS to integer registers. */
7134
7135 static void
7136 assign_int_registers (int bitpos, assign_data_t *data)
7137 {
7138 int intoffset = data->intoffset;
7139 machine_mode mode;
7140 int nregs;
7141
7142 if (!compute_int_layout (bitpos, data, &nregs))
7143 return;
7144
7145 /* If this is the trailing part of a word, only load that much into
7146 the register. Otherwise load the whole register. Note that in
7147 the latter case we may pick up unwanted bits. It's not a problem
7148 at the moment but may wish to revisit. */
7149 if (intoffset % BITS_PER_WORD != 0)
7150 mode = smallest_int_mode_for_size (BITS_PER_WORD
7151 - intoffset % BITS_PER_WORD);
7152 else
7153 mode = word_mode;
7154
7155 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7156 unsigned int regno = data->regbase + this_slotno;
7157 intoffset /= BITS_PER_UNIT;
7158
7159 do
7160 {
7161 rtx reg = gen_rtx_REG (mode, regno);
7162 XVECEXP (data->ret, 0, data->stack + data->nregs)
7163 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
7164 data->nregs += 1;
7165 mode = word_mode;
7166 regno += 1;
7167 intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
7168 }
7169 while (--nregs > 0);
7170 }
7171
7172 /* A subroutine of function_arg_record_value. Assign FIELD at position
7173 BITPOS to FP registers. */
7174
7175 static void
7176 assign_fp_registers (const_tree field, int bitpos, assign_data_t *data)
7177 {
7178 int nregs;
7179 machine_mode mode;
7180
7181 if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
7182 return;
7183
7184 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7185 int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
7186 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
7187 regno++;
7188 int pos = bitpos / BITS_PER_UNIT;
7189
7190 do
7191 {
7192 rtx reg = gen_rtx_REG (mode, regno);
7193 XVECEXP (data->ret, 0, data->stack + data->nregs)
7194 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
7195 data->nregs += 1;
7196 regno += GET_MODE_SIZE (mode) / 4;
7197 pos += GET_MODE_SIZE (mode);
7198 }
7199 while (--nregs > 0);
7200 }
7201
7202 /* A subroutine of function_arg_record_value. Assign FIELD and the bits of
7203 the structure between PARMS->intoffset and BITPOS to registers. */
7204
7205 inline void
7206 assign_registers (const_tree field, int bitpos, bool fp, assign_data_t *data)
7207 {
7208 if (fp)
7209 {
7210 assign_int_registers (bitpos, data);
7211
7212 assign_fp_registers (field, bitpos, data);
7213 }
7214 else
7215 {
7216 if (data->intoffset < 0)
7217 data->intoffset = bitpos;
7218 }
7219 }
7220
7221 /* Used by function_arg and function_value to implement the complex
7222 conventions of the 64-bit ABI for passing and returning structures.
7223 Return an expression valid as a return value for the FUNCTION_ARG
7224 and TARGET_FUNCTION_VALUE.
7225
7226 TYPE is the data type of the argument (as a tree).
7227 This is null for libcalls where that information may
7228 not be available.
7229 MODE is the argument's machine mode.
7230 SLOTNO is the index number of the argument's slot in the parameter array.
7231 NAMED is true if this argument is a named parameter
7232 (otherwise it is an extra parameter matching an ellipsis).
7233 REGBASE is the regno of the base register for the parameter array. */
7234
7235 static rtx
7236 function_arg_record_value (const_tree type, machine_mode mode,
7237 int slotno, bool named, int regbase)
7238 {
7239 const int size = int_size_in_bytes (type);
7240 assign_data_t data;
7241 int nregs;
7242
7243 data.slotno = slotno;
7244 data.regbase = regbase;
7245
7246 /* Count how many registers we need. */
7247 data.nregs = 0;
7248 data.intoffset = 0;
7249 data.stack = false;
7250 traverse_record_type<assign_data_t, count_registers> (type, named, &data);
7251
7252 /* Take into account pending integer fields. */
7253 if (compute_int_layout (size * BITS_PER_UNIT, &data, &nregs))
7254 data.nregs += nregs;
7255
7256 /* Allocate the vector and handle some annoying special cases. */
7257 nregs = data.nregs;
7258
7259 if (nregs == 0)
7260 {
7261 /* ??? Empty structure has no value? Duh? */
7262 if (size <= 0)
7263 {
7264 /* Though there's nothing really to store, return a word register
7265 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
7266 leads to breakage due to the fact that there are zero bytes to
7267 load. */
7268 return gen_rtx_REG (mode, regbase);
7269 }
7270
7271 /* ??? C++ has structures with no fields, and yet a size. Give up
7272 for now and pass everything back in integer registers. */
7273 nregs = CEIL_NWORDS (size);
7274 if (nregs + slotno > SPARC_INT_ARG_MAX)
7275 nregs = SPARC_INT_ARG_MAX - slotno;
7276 }
7277
7278 gcc_assert (nregs > 0);
7279
7280 data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
7281
7282 /* If at least one field must be passed on the stack, generate
7283 (parallel [(expr_list (nil) ...) ...]) so that all fields will
7284 also be passed on the stack. We can't do much better because the
7285 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
7286 of structures for which the fields passed exclusively in registers
7287 are not at the beginning of the structure. */
7288 if (data.stack)
7289 XVECEXP (data.ret, 0, 0)
7290 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7291
7292 /* Assign the registers. */
7293 data.nregs = 0;
7294 data.intoffset = 0;
7295 traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
7296
7297 /* Assign pending integer fields. */
7298 assign_int_registers (size * BITS_PER_UNIT, &data);
7299
7300 gcc_assert (data.nregs == nregs);
7301
7302 return data.ret;
7303 }
7304
7305 /* Used by function_arg and function_value to implement the conventions
7306 of the 64-bit ABI for passing and returning unions.
7307 Return an expression valid as a return value for the FUNCTION_ARG
7308 and TARGET_FUNCTION_VALUE.
7309
7310 SIZE is the size in bytes of the union.
7311 MODE is the argument's machine mode.
7312 SLOTNO is the index number of the argument's slot in the parameter array.
7313 REGNO is the hard register the union will be passed in. */
7314
7315 static rtx
7316 function_arg_union_value (int size, machine_mode mode, int slotno, int regno)
7317 {
7318 unsigned int nwords;
7319
7320 /* See comment in function_arg_record_value for empty structures. */
7321 if (size <= 0)
7322 return gen_rtx_REG (mode, regno);
7323
7324 if (slotno == SPARC_INT_ARG_MAX - 1)
7325 nwords = 1;
7326 else
7327 nwords = CEIL_NWORDS (size);
7328
7329 rtx regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
7330
7331 /* Unions are passed left-justified. */
7332 for (unsigned int i = 0; i < nwords; i++)
7333 XVECEXP (regs, 0, i)
7334 = gen_rtx_EXPR_LIST (VOIDmode,
7335 gen_rtx_REG (word_mode, regno + i),
7336 GEN_INT (UNITS_PER_WORD * i));
7337
7338 return regs;
7339 }
7340
7341 /* Used by function_arg and function_value to implement the conventions
7342 of the 64-bit ABI for passing and returning BLKmode vectors.
7343 Return an expression valid as a return value for the FUNCTION_ARG
7344 and TARGET_FUNCTION_VALUE.
7345
7346 SIZE is the size in bytes of the vector.
7347 SLOTNO is the index number of the argument's slot in the parameter array.
7348 NAMED is true if this argument is a named parameter
7349 (otherwise it is an extra parameter matching an ellipsis).
7350 REGNO is the hard register the vector will be passed in. */
7351
7352 static rtx
7353 function_arg_vector_value (int size, int slotno, bool named, int regno)
7354 {
7355 const int mult = (named ? 2 : 1);
7356 unsigned int nwords;
7357
7358 if (slotno == (named ? SPARC_FP_ARG_MAX : SPARC_INT_ARG_MAX) - 1)
7359 nwords = 1;
7360 else
7361 nwords = CEIL_NWORDS (size);
7362
7363 rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nwords));
7364
7365 if (size < UNITS_PER_WORD)
7366 XVECEXP (regs, 0, 0)
7367 = gen_rtx_EXPR_LIST (VOIDmode,
7368 gen_rtx_REG (SImode, regno),
7369 const0_rtx);
7370 else
7371 for (unsigned int i = 0; i < nwords; i++)
7372 XVECEXP (regs, 0, i)
7373 = gen_rtx_EXPR_LIST (VOIDmode,
7374 gen_rtx_REG (word_mode, regno + i * mult),
7375 GEN_INT (i * UNITS_PER_WORD));
7376
7377 return regs;
7378 }
7379
7380 /* Determine where to put an argument to a function.
7381 Value is zero to push the argument on the stack,
7382 or a hard register in which to store the argument.
7383
7384 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7385 the preceding args and about the function being called.
7386 ARG is a description of the argument.
7387 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
7388 TARGET_FUNCTION_INCOMING_ARG. */
7389
7390 static rtx
7391 sparc_function_arg_1 (cumulative_args_t cum_v, const function_arg_info &arg,
7392 bool incoming)
7393 {
7394 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7395 const int regbase
7396 = incoming ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
7397 int slotno, regno, padding;
7398 tree type = arg.type;
7399 machine_mode mode = arg.mode;
7400 enum mode_class mclass = GET_MODE_CLASS (mode);
7401 bool named = arg.named;
7402
7403 slotno
7404 = function_arg_slotno (cum, mode, type, named, incoming, &regno, &padding);
7405 if (slotno == -1)
7406 return 0;
7407
7408 /* Integer vectors are handled like floats as per the Sun VIS SDK. */
7409 if (type && VECTOR_INTEGER_TYPE_P (type))
7410 mclass = MODE_FLOAT;
7411
7412 if (TARGET_ARCH32)
7413 return gen_rtx_REG (mode, regno);
7414
7415 /* Structures up to 16 bytes in size are passed in arg slots on the stack
7416 and are promoted to registers if possible. */
7417 if (type && TREE_CODE (type) == RECORD_TYPE)
7418 {
7419 const int size = int_size_in_bytes (type);
7420 gcc_assert (size <= 16);
7421
7422 return function_arg_record_value (type, mode, slotno, named, regbase);
7423 }
7424
7425 /* Unions up to 16 bytes in size are passed in integer registers. */
7426 else if (type && TREE_CODE (type) == UNION_TYPE)
7427 {
7428 const int size = int_size_in_bytes (type);
7429 gcc_assert (size <= 16);
7430
7431 return function_arg_union_value (size, mode, slotno, regno);
7432 }
7433
7434 /* Floating-point vectors up to 16 bytes are passed in registers. */
7435 else if (type && VECTOR_TYPE_P (type) && mode == BLKmode)
7436 {
7437 const int size = int_size_in_bytes (type);
7438 gcc_assert (size <= 16);
7439
7440 return function_arg_vector_value (size, slotno, named, regno);
7441 }
7442
7443 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
7444 but also have the slot allocated for them.
7445 If no prototype is in scope fp values in register slots get passed
7446 in two places, either fp regs and int regs or fp regs and memory. */
7447 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7448 && SPARC_FP_REG_P (regno))
7449 {
7450 rtx reg = gen_rtx_REG (mode, regno);
7451 if (cum->prototype_p || cum->libcall_p)
7452 return reg;
7453 else
7454 {
7455 rtx v0, v1;
7456
7457 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
7458 {
7459 int intreg;
7460
7461 /* On incoming, we don't need to know that the value
7462 is passed in %f0 and %i0, and it confuses other parts
7463 causing needless spillage even on the simplest cases. */
7464 if (incoming)
7465 return reg;
7466
7467 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
7468 + (regno - SPARC_FP_ARG_FIRST) / 2);
7469
7470 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7471 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
7472 const0_rtx);
7473 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7474 }
7475 else
7476 {
7477 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7478 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7479 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7480 }
7481 }
7482 }
7483
7484 /* All other aggregate types are passed in an integer register in a mode
7485 corresponding to the size of the type. */
7486 else if (type && AGGREGATE_TYPE_P (type))
7487 {
7488 const int size = int_size_in_bytes (type);
7489 gcc_assert (size <= 16);
7490
7491 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7492 }
7493
7494 return gen_rtx_REG (mode, regno);
7495 }
7496
7497 /* Handle the TARGET_FUNCTION_ARG target hook. */
7498
7499 static rtx
7500 sparc_function_arg (cumulative_args_t cum, const function_arg_info &arg)
7501 {
7502 return sparc_function_arg_1 (cum, arg, false);
7503 }
7504
7505 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
7506
7507 static rtx
7508 sparc_function_incoming_arg (cumulative_args_t cum,
7509 const function_arg_info &arg)
7510 {
7511 return sparc_function_arg_1 (cum, arg, true);
7512 }
7513
7514 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7515
7516 static unsigned int
7517 sparc_function_arg_boundary (machine_mode mode, const_tree type)
7518 {
7519 return ((TARGET_ARCH64
7520 && (GET_MODE_ALIGNMENT (mode) == 128
7521 || (type && TYPE_ALIGN (type) == 128)))
7522 ? 128
7523 : PARM_BOUNDARY);
7524 }
7525
7526 /* For an arg passed partly in registers and partly in memory,
7527 this is the number of bytes of registers used.
7528 For args passed entirely in registers or entirely in memory, zero.
7529
7530 Any arg that starts in the first 6 regs but won't entirely fit in them
7531 needs partial registers on v8. On v9, structures with integer
7532 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7533 values that begin in the last fp reg [where "last fp reg" varies with the
7534 mode] will be split between that reg and memory. */
7535
7536 static int
7537 sparc_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
7538 {
7539 int slotno, regno, padding;
7540
7541 /* We pass false for incoming here, it doesn't matter. */
7542 slotno = function_arg_slotno (get_cumulative_args (cum), arg.mode, arg.type,
7543 arg.named, false, &regno, &padding);
7544
7545 if (slotno == -1)
7546 return 0;
7547
7548 if (TARGET_ARCH32)
7549 {
7550 /* We are guaranteed by pass_by_reference that the size of the
7551 argument is not greater than 8 bytes, so we only need to return
7552 one word if the argument is partially passed in registers. */
7553 const int size = GET_MODE_SIZE (arg.mode);
7554
7555 if (size > UNITS_PER_WORD && slotno == SPARC_INT_ARG_MAX - 1)
7556 return UNITS_PER_WORD;
7557 }
7558 else
7559 {
7560 /* We are guaranteed by pass_by_reference that the size of the
7561 argument is not greater than 16 bytes, so we only need to return
7562 one word if the argument is partially passed in registers. */
7563 if (arg.aggregate_type_p ())
7564 {
7565 const int size = int_size_in_bytes (arg.type);
7566
7567 if (size > UNITS_PER_WORD
7568 && (slotno == SPARC_INT_ARG_MAX - 1
7569 || slotno == SPARC_FP_ARG_MAX - 1))
7570 return UNITS_PER_WORD;
7571 }
7572 else if (GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_INT
7573 || ((GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_FLOAT
7574 || (arg.type && VECTOR_TYPE_P (arg.type)))
7575 && !(TARGET_FPU && arg.named)))
7576 {
7577 const int size = (arg.type && VECTOR_FLOAT_TYPE_P (arg.type))
7578 ? int_size_in_bytes (arg.type)
7579 : GET_MODE_SIZE (arg.mode);
7580
7581 if (size > UNITS_PER_WORD && slotno == SPARC_INT_ARG_MAX - 1)
7582 return UNITS_PER_WORD;
7583 }
7584 else if (GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_FLOAT
7585 || (arg.type && VECTOR_TYPE_P (arg.type)))
7586 {
7587 const int size = (arg.type && VECTOR_FLOAT_TYPE_P (arg.type))
7588 ? int_size_in_bytes (arg.type)
7589 : GET_MODE_SIZE (arg.mode);
7590
7591 if (size > UNITS_PER_WORD && slotno == SPARC_FP_ARG_MAX - 1)
7592 return UNITS_PER_WORD;
7593 }
7594 }
7595
7596 return 0;
7597 }
7598
7599 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7600 Update the data in CUM to advance over argument ARG. */
7601
7602 static void
7603 sparc_function_arg_advance (cumulative_args_t cum_v,
7604 const function_arg_info &arg)
7605 {
7606 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7607 tree type = arg.type;
7608 machine_mode mode = arg.mode;
7609 int regno, padding;
7610
7611 /* We pass false for incoming here, it doesn't matter. */
7612 function_arg_slotno (cum, mode, type, arg.named, false, &regno, &padding);
7613
7614 /* If argument requires leading padding, add it. */
7615 cum->words += padding;
7616
7617 if (TARGET_ARCH32)
7618 cum->words += CEIL_NWORDS (GET_MODE_SIZE (mode));
7619 else
7620 {
7621 /* For types that can have BLKmode, get the size from the type. */
7622 if (type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
7623 {
7624 const int size = int_size_in_bytes (type);
7625
7626 /* See comment in function_arg_record_value for empty structures. */
7627 if (size <= 0)
7628 cum->words++;
7629 else
7630 cum->words += CEIL_NWORDS (size);
7631 }
7632 else
7633 cum->words += CEIL_NWORDS (GET_MODE_SIZE (mode));
7634 }
7635 }
7636
7637 /* Implement TARGET_FUNCTION_ARG_PADDING. For the 64-bit ABI structs
7638 are always stored left shifted in their argument slot. */
7639
7640 static pad_direction
7641 sparc_function_arg_padding (machine_mode mode, const_tree type)
7642 {
7643 if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
7644 return PAD_UPWARD;
7645
7646 /* Fall back to the default. */
7647 return default_function_arg_padding (mode, type);
7648 }
7649
7650 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7651 Specify whether to return the return value in memory. */
7652
7653 static bool
7654 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7655 {
7656 if (TARGET_ARCH32)
7657 /* Original SPARC 32-bit ABI says that structures and unions, and
7658 quad-precision floats are returned in memory. But note that the
7659 first part is implemented through -fpcc-struct-return being the
7660 default, so here we only implement -freg-struct-return instead.
7661 All other base types are returned in registers.
7662
7663 Extended ABI (as implemented by the Sun compiler) says that
7664 all complex floats are returned in registers (8 FP registers
7665 at most for '_Complex long double'). Return all complex integers
7666 in registers (4 at most for '_Complex long long').
7667
7668 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7669 integers are returned like floats of the same size, that is in
7670 registers up to 8 bytes and in memory otherwise. Return all
7671 vector floats in memory like structure and unions; note that
7672 they always have BLKmode like the latter. */
7673 return (TYPE_MODE (type) == BLKmode
7674 || TYPE_MODE (type) == TFmode
7675 || (TREE_CODE (type) == VECTOR_TYPE
7676 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7677 else
7678 /* Original SPARC 64-bit ABI says that structures and unions
7679 smaller than 32 bytes are returned in registers, as well as
7680 all other base types.
7681
7682 Extended ABI (as implemented by the Sun compiler) says that all
7683 complex floats are returned in registers (8 FP registers at most
7684 for '_Complex long double'). Return all complex integers in
7685 registers (4 at most for '_Complex TItype').
7686
7687 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7688 integers are returned like floats of the same size, that is in
7689 registers. Return all vector floats like structure and unions;
7690 note that they always have BLKmode like the latter. */
7691 return (TYPE_MODE (type) == BLKmode
7692 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7693 }
7694
7695 /* Handle the TARGET_STRUCT_VALUE target hook.
7696 Return where to find the structure return value address. */
7697
7698 static rtx
7699 sparc_struct_value_rtx (tree fndecl, int incoming)
7700 {
7701 if (TARGET_ARCH64)
7702 return NULL_RTX;
7703 else
7704 {
7705 rtx mem;
7706
7707 if (incoming)
7708 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7709 STRUCT_VALUE_OFFSET));
7710 else
7711 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7712 STRUCT_VALUE_OFFSET));
7713
7714 /* Only follow the SPARC ABI for fixed-size structure returns.
7715 Variable size structure returns are handled per the normal
7716 procedures in GCC. This is enabled by -mstd-struct-return */
7717 if (incoming == 2
7718 && sparc_std_struct_return
7719 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7720 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7721 {
7722 /* We must check and adjust the return address, as it is optional
7723 as to whether the return object is really provided. */
7724 rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
7725 rtx scratch = gen_reg_rtx (SImode);
7726 rtx_code_label *endlab = gen_label_rtx ();
7727
7728 /* Calculate the return object size. */
7729 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7730 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7731 /* Construct a temporary return value. */
7732 rtx temp_val
7733 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7734
7735 /* Implement SPARC 32-bit psABI callee return struct checking:
7736
7737 Fetch the instruction where we will return to and see if
7738 it's an unimp instruction (the most significant 10 bits
7739 will be zero). */
7740 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7741 plus_constant (Pmode,
7742 ret_reg, 8)));
7743 /* Assume the size is valid and pre-adjust. */
7744 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7745 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7746 0, endlab);
7747 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7748 /* Write the address of the memory pointed to by temp_val into
7749 the memory pointed to by mem. */
7750 emit_move_insn (mem, XEXP (temp_val, 0));
7751 emit_label (endlab);
7752 }
7753
7754 return mem;
7755 }
7756 }
7757
7758 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7759 For v9, function return values are subject to the same rules as arguments,
7760 except that up to 32 bytes may be returned in registers. */
7761
7762 static rtx
7763 sparc_function_value_1 (const_tree type, machine_mode mode, bool outgoing)
7764 {
7765 /* Beware that the two values are swapped here wrt function_arg. */
7766 const int regbase
7767 = outgoing ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
7768 enum mode_class mclass = GET_MODE_CLASS (mode);
7769 int regno;
7770
7771 /* Integer vectors are handled like floats as per the Sun VIS SDK.
7772 Note that integer vectors larger than 16 bytes have BLKmode so
7773 they need to be handled like floating-point vectors below. */
7774 if (type && VECTOR_INTEGER_TYPE_P (type) && mode != BLKmode)
7775 mclass = MODE_FLOAT;
7776
7777 if (TARGET_ARCH64 && type)
7778 {
7779 /* Structures up to 32 bytes in size are returned in registers. */
7780 if (TREE_CODE (type) == RECORD_TYPE)
7781 {
7782 const int size = int_size_in_bytes (type);
7783 gcc_assert (size <= 32);
7784
7785 return function_arg_record_value (type, mode, 0, true, regbase);
7786 }
7787
7788 /* Unions up to 32 bytes in size are returned in integer registers. */
7789 else if (TREE_CODE (type) == UNION_TYPE)
7790 {
7791 const int size = int_size_in_bytes (type);
7792 gcc_assert (size <= 32);
7793
7794 return function_arg_union_value (size, mode, 0, regbase);
7795 }
7796
7797 /* Vectors up to 32 bytes are returned in FP registers. */
7798 else if (VECTOR_TYPE_P (type) && mode == BLKmode)
7799 {
7800 const int size = int_size_in_bytes (type);
7801 gcc_assert (size <= 32);
7802
7803 return function_arg_vector_value (size, 0, true, SPARC_FP_ARG_FIRST);
7804 }
7805
7806 /* Objects that require it are returned in FP registers. */
7807 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7808 ;
7809
7810 /* All other aggregate types are returned in an integer register in a
7811 mode corresponding to the size of the type. */
7812 else if (AGGREGATE_TYPE_P (type))
7813 {
7814 /* All other aggregate types are passed in an integer register
7815 in a mode corresponding to the size of the type. */
7816 const int size = int_size_in_bytes (type);
7817 gcc_assert (size <= 32);
7818
7819 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7820
7821 /* ??? We probably should have made the same ABI change in
7822 3.4.0 as the one we made for unions. The latter was
7823 required by the SCD though, while the former is not
7824 specified, so we favored compatibility and efficiency.
7825
7826 Now we're stuck for aggregates larger than 16 bytes,
7827 because OImode vanished in the meantime. Let's not
7828 try to be unduly clever, and simply follow the ABI
7829 for unions in that case. */
7830 if (mode == BLKmode)
7831 return function_arg_union_value (size, mode, 0, regbase);
7832 else
7833 mclass = MODE_INT;
7834 }
7835
7836 /* We should only have pointer and integer types at this point. This
7837 must match sparc_promote_function_mode. */
7838 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7839 mode = word_mode;
7840 }
7841
7842 /* We should only have pointer and integer types at this point, except with
7843 -freg-struct-return. This must match sparc_promote_function_mode. */
7844 else if (TARGET_ARCH32
7845 && !(type && AGGREGATE_TYPE_P (type))
7846 && mclass == MODE_INT
7847 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7848 mode = word_mode;
7849
7850 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7851 regno = SPARC_FP_ARG_FIRST;
7852 else
7853 regno = regbase;
7854
7855 return gen_rtx_REG (mode, regno);
7856 }
7857
7858 /* Handle TARGET_FUNCTION_VALUE.
7859 On the SPARC, the value is found in the first "output" register, but the
7860 called function leaves it in the first "input" register. */
7861
7862 static rtx
7863 sparc_function_value (const_tree valtype,
7864 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7865 bool outgoing)
7866 {
7867 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7868 }
7869
7870 /* Handle TARGET_LIBCALL_VALUE. */
7871
7872 static rtx
7873 sparc_libcall_value (machine_mode mode,
7874 const_rtx fun ATTRIBUTE_UNUSED)
7875 {
7876 return sparc_function_value_1 (NULL_TREE, mode, false);
7877 }
7878
7879 /* Handle FUNCTION_VALUE_REGNO_P.
7880 On the SPARC, the first "output" reg is used for integer values, and the
7881 first floating point register is used for floating point values. */
7882
7883 static bool
7884 sparc_function_value_regno_p (const unsigned int regno)
7885 {
7886 return (regno == 8 || (TARGET_FPU && regno == 32));
7887 }
7888
7889 /* Do what is necessary for `va_start'. We look at the current function
7890 to determine if stdarg or varargs is used and return the address of
7891 the first unnamed parameter. */
7892
7893 static rtx
7894 sparc_builtin_saveregs (void)
7895 {
7896 int first_reg = crtl->args.info.words;
7897 rtx address;
7898 int regno;
7899
7900 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7901 emit_move_insn (gen_rtx_MEM (word_mode,
7902 gen_rtx_PLUS (Pmode,
7903 frame_pointer_rtx,
7904 GEN_INT (FIRST_PARM_OFFSET (0)
7905 + (UNITS_PER_WORD
7906 * regno)))),
7907 gen_rtx_REG (word_mode,
7908 SPARC_INCOMING_INT_ARG_FIRST + regno));
7909
7910 address = gen_rtx_PLUS (Pmode,
7911 frame_pointer_rtx,
7912 GEN_INT (FIRST_PARM_OFFSET (0)
7913 + UNITS_PER_WORD * first_reg));
7914
7915 return address;
7916 }
7917
7918 /* Implement `va_start' for stdarg. */
7919
7920 static void
7921 sparc_va_start (tree valist, rtx nextarg)
7922 {
7923 nextarg = expand_builtin_saveregs ();
7924 std_expand_builtin_va_start (valist, nextarg);
7925 }
7926
7927 /* Implement `va_arg' for stdarg. */
7928
7929 static tree
7930 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7931 gimple_seq *post_p)
7932 {
7933 HOST_WIDE_INT size, rsize, align;
7934 tree addr, incr;
7935 bool indirect;
7936 tree ptrtype = build_pointer_type (type);
7937
7938 if (pass_va_arg_by_reference (type))
7939 {
7940 indirect = true;
7941 size = rsize = UNITS_PER_WORD;
7942 align = 0;
7943 }
7944 else
7945 {
7946 indirect = false;
7947 size = int_size_in_bytes (type);
7948 rsize = ROUND_UP (size, UNITS_PER_WORD);
7949 align = 0;
7950
7951 if (TARGET_ARCH64)
7952 {
7953 /* For SPARC64, objects requiring 16-byte alignment get it. */
7954 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7955 align = 2 * UNITS_PER_WORD;
7956
7957 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7958 are left-justified in their slots. */
7959 if (AGGREGATE_TYPE_P (type))
7960 {
7961 if (size == 0)
7962 size = rsize = UNITS_PER_WORD;
7963 else
7964 size = rsize;
7965 }
7966 }
7967 }
7968
7969 incr = valist;
7970 if (align)
7971 {
7972 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7973 incr = fold_convert (sizetype, incr);
7974 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7975 size_int (-align));
7976 incr = fold_convert (ptr_type_node, incr);
7977 }
7978
7979 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7980 addr = incr;
7981
7982 if (BYTES_BIG_ENDIAN && size < rsize)
7983 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7984
7985 if (indirect)
7986 {
7987 addr = fold_convert (build_pointer_type (ptrtype), addr);
7988 addr = build_va_arg_indirect_ref (addr);
7989 }
7990
7991 /* If the address isn't aligned properly for the type, we need a temporary.
7992 FIXME: This is inefficient, usually we can do this in registers. */
7993 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7994 {
7995 tree tmp = create_tmp_var (type, "va_arg_tmp");
7996 tree dest_addr = build_fold_addr_expr (tmp);
7997 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7998 3, dest_addr, addr, size_int (rsize));
7999 TREE_ADDRESSABLE (tmp) = 1;
8000 gimplify_and_add (copy, pre_p);
8001 addr = dest_addr;
8002 }
8003
8004 else
8005 addr = fold_convert (ptrtype, addr);
8006
8007 incr = fold_build_pointer_plus_hwi (incr, rsize);
8008 gimplify_assign (valist, incr, post_p);
8009
8010 return build_va_arg_indirect_ref (addr);
8011 }
8012 \f
8013 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
8014 Specify whether the vector mode is supported by the hardware. */
8015
8016 static bool
8017 sparc_vector_mode_supported_p (machine_mode mode)
8018 {
8019 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
8020 }
8021 \f
8022 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
8023
8024 static machine_mode
8025 sparc_preferred_simd_mode (scalar_mode mode)
8026 {
8027 if (TARGET_VIS)
8028 switch (mode)
8029 {
8030 case E_SImode:
8031 return V2SImode;
8032 case E_HImode:
8033 return V4HImode;
8034 case E_QImode:
8035 return V8QImode;
8036
8037 default:;
8038 }
8039
8040 return word_mode;
8041 }
8042 \f
8043 \f/* Implement TARGET_CAN_FOLLOW_JUMP. */
8044
8045 static bool
8046 sparc_can_follow_jump (const rtx_insn *follower, const rtx_insn *followee)
8047 {
8048 /* Do not fold unconditional jumps that have been created for crossing
8049 partition boundaries. */
8050 if (CROSSING_JUMP_P (followee) && !CROSSING_JUMP_P (follower))
8051 return false;
8052
8053 return true;
8054 }
8055
8056 /* Return the string to output an unconditional branch to LABEL, which is
8057 the operand number of the label.
8058
8059 DEST is the destination insn (i.e. the label), INSN is the source. */
8060
8061 const char *
8062 output_ubranch (rtx dest, rtx_insn *insn)
8063 {
8064 static char string[64];
8065 bool v9_form = false;
8066 int delta;
8067 char *p;
8068
8069 /* Even if we are trying to use cbcond for this, evaluate
8070 whether we can use V9 branches as our backup plan. */
8071 delta = 5000000;
8072 if (!CROSSING_JUMP_P (insn) && INSN_ADDRESSES_SET_P ())
8073 delta = (INSN_ADDRESSES (INSN_UID (dest))
8074 - INSN_ADDRESSES (INSN_UID (insn)));
8075
8076 /* Leave some instructions for "slop". */
8077 if (TARGET_V9 && delta >= -260000 && delta < 260000)
8078 v9_form = true;
8079
8080 if (TARGET_CBCOND)
8081 {
8082 bool emit_nop = emit_cbcond_nop (insn);
8083 bool far = false;
8084 const char *rval;
8085
8086 if (delta < -500 || delta > 500)
8087 far = true;
8088
8089 if (far)
8090 {
8091 if (v9_form)
8092 rval = "ba,a,pt\t%%xcc, %l0";
8093 else
8094 rval = "b,a\t%l0";
8095 }
8096 else
8097 {
8098 if (emit_nop)
8099 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
8100 else
8101 rval = "cwbe\t%%g0, %%g0, %l0";
8102 }
8103 return rval;
8104 }
8105
8106 if (v9_form)
8107 strcpy (string, "ba%*,pt\t%%xcc, ");
8108 else
8109 strcpy (string, "b%*\t");
8110
8111 p = strchr (string, '\0');
8112 *p++ = '%';
8113 *p++ = 'l';
8114 *p++ = '0';
8115 *p++ = '%';
8116 *p++ = '(';
8117 *p = '\0';
8118
8119 return string;
8120 }
8121
8122 /* Return the string to output a conditional branch to LABEL, which is
8123 the operand number of the label. OP is the conditional expression.
8124 XEXP (OP, 0) is assumed to be a condition code register (integer or
8125 floating point) and its mode specifies what kind of comparison we made.
8126
8127 DEST is the destination insn (i.e. the label), INSN is the source.
8128
8129 REVERSED is nonzero if we should reverse the sense of the comparison.
8130
8131 ANNUL is nonzero if we should generate an annulling branch. */
8132
8133 const char *
8134 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
8135 rtx_insn *insn)
8136 {
8137 static char string[64];
8138 enum rtx_code code = GET_CODE (op);
8139 rtx cc_reg = XEXP (op, 0);
8140 machine_mode mode = GET_MODE (cc_reg);
8141 const char *labelno, *branch;
8142 int spaces = 8, far;
8143 char *p;
8144
8145 /* v9 branches are limited to +-1MB. If it is too far away,
8146 change
8147
8148 bne,pt %xcc, .LC30
8149
8150 to
8151
8152 be,pn %xcc, .+12
8153 nop
8154 ba .LC30
8155
8156 and
8157
8158 fbne,a,pn %fcc2, .LC29
8159
8160 to
8161
8162 fbe,pt %fcc2, .+16
8163 nop
8164 ba .LC29 */
8165
8166 far = TARGET_V9 && (get_attr_length (insn) >= 3);
8167 if (reversed ^ far)
8168 {
8169 /* Reversal of FP compares takes care -- an ordered compare
8170 becomes an unordered compare and vice versa. */
8171 if (mode == CCFPmode || mode == CCFPEmode)
8172 code = reverse_condition_maybe_unordered (code);
8173 else
8174 code = reverse_condition (code);
8175 }
8176
8177 /* Start by writing the branch condition. */
8178 if (mode == CCFPmode || mode == CCFPEmode)
8179 {
8180 switch (code)
8181 {
8182 case NE:
8183 branch = "fbne";
8184 break;
8185 case EQ:
8186 branch = "fbe";
8187 break;
8188 case GE:
8189 branch = "fbge";
8190 break;
8191 case GT:
8192 branch = "fbg";
8193 break;
8194 case LE:
8195 branch = "fble";
8196 break;
8197 case LT:
8198 branch = "fbl";
8199 break;
8200 case UNORDERED:
8201 branch = "fbu";
8202 break;
8203 case ORDERED:
8204 branch = "fbo";
8205 break;
8206 case UNGT:
8207 branch = "fbug";
8208 break;
8209 case UNLT:
8210 branch = "fbul";
8211 break;
8212 case UNEQ:
8213 branch = "fbue";
8214 break;
8215 case UNGE:
8216 branch = "fbuge";
8217 break;
8218 case UNLE:
8219 branch = "fbule";
8220 break;
8221 case LTGT:
8222 branch = "fblg";
8223 break;
8224 default:
8225 gcc_unreachable ();
8226 }
8227
8228 /* ??? !v9: FP branches cannot be preceded by another floating point
8229 insn. Because there is currently no concept of pre-delay slots,
8230 we can fix this only by always emitting a nop before a floating
8231 point branch. */
8232
8233 string[0] = '\0';
8234 if (! TARGET_V9)
8235 strcpy (string, "nop\n\t");
8236 strcat (string, branch);
8237 }
8238 else
8239 {
8240 switch (code)
8241 {
8242 case NE:
8243 if (mode == CCVmode || mode == CCXVmode)
8244 branch = "bvs";
8245 else
8246 branch = "bne";
8247 break;
8248 case EQ:
8249 if (mode == CCVmode || mode == CCXVmode)
8250 branch = "bvc";
8251 else
8252 branch = "be";
8253 break;
8254 case GE:
8255 if (mode == CCNZmode || mode == CCXNZmode)
8256 branch = "bpos";
8257 else
8258 branch = "bge";
8259 break;
8260 case GT:
8261 branch = "bg";
8262 break;
8263 case LE:
8264 branch = "ble";
8265 break;
8266 case LT:
8267 if (mode == CCNZmode || mode == CCXNZmode)
8268 branch = "bneg";
8269 else
8270 branch = "bl";
8271 break;
8272 case GEU:
8273 branch = "bgeu";
8274 break;
8275 case GTU:
8276 branch = "bgu";
8277 break;
8278 case LEU:
8279 branch = "bleu";
8280 break;
8281 case LTU:
8282 branch = "blu";
8283 break;
8284 default:
8285 gcc_unreachable ();
8286 }
8287 strcpy (string, branch);
8288 }
8289 spaces -= strlen (branch);
8290 p = strchr (string, '\0');
8291
8292 /* Now add the annulling, the label, and a possible noop. */
8293 if (annul && ! far)
8294 {
8295 strcpy (p, ",a");
8296 p += 2;
8297 spaces -= 2;
8298 }
8299
8300 if (TARGET_V9)
8301 {
8302 rtx note;
8303 int v8 = 0;
8304
8305 if (! far && insn && INSN_ADDRESSES_SET_P ())
8306 {
8307 int delta = (INSN_ADDRESSES (INSN_UID (dest))
8308 - INSN_ADDRESSES (INSN_UID (insn)));
8309 /* Leave some instructions for "slop". */
8310 if (delta < -260000 || delta >= 260000)
8311 v8 = 1;
8312 }
8313
8314 switch (mode)
8315 {
8316 case E_CCmode:
8317 case E_CCNZmode:
8318 case E_CCCmode:
8319 case E_CCVmode:
8320 labelno = "%%icc, ";
8321 if (v8)
8322 labelno = "";
8323 break;
8324 case E_CCXmode:
8325 case E_CCXNZmode:
8326 case E_CCXCmode:
8327 case E_CCXVmode:
8328 labelno = "%%xcc, ";
8329 gcc_assert (!v8);
8330 break;
8331 case E_CCFPmode:
8332 case E_CCFPEmode:
8333 {
8334 static char v9_fcc_labelno[] = "%%fccX, ";
8335 /* Set the char indicating the number of the fcc reg to use. */
8336 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
8337 labelno = v9_fcc_labelno;
8338 if (v8)
8339 {
8340 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
8341 labelno = "";
8342 }
8343 }
8344 break;
8345 default:
8346 gcc_unreachable ();
8347 }
8348
8349 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8350 {
8351 strcpy (p,
8352 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8353 >= profile_probability::even ()) ^ far)
8354 ? ",pt" : ",pn");
8355 p += 3;
8356 spaces -= 3;
8357 }
8358 }
8359 else
8360 labelno = "";
8361
8362 if (spaces > 0)
8363 *p++ = '\t';
8364 else
8365 *p++ = ' ';
8366 strcpy (p, labelno);
8367 p = strchr (p, '\0');
8368 if (far)
8369 {
8370 strcpy (p, ".+12\n\t nop\n\tb\t");
8371 /* Skip the next insn if requested or
8372 if we know that it will be a nop. */
8373 if (annul || ! final_sequence)
8374 p[3] = '6';
8375 p += 14;
8376 }
8377 *p++ = '%';
8378 *p++ = 'l';
8379 *p++ = label + '0';
8380 *p++ = '%';
8381 *p++ = '#';
8382 *p = '\0';
8383
8384 return string;
8385 }
8386
8387 /* Emit a library call comparison between floating point X and Y.
8388 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
8389 Return the new operator to be used in the comparison sequence.
8390
8391 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
8392 values as arguments instead of the TFmode registers themselves,
8393 that's why we cannot call emit_float_lib_cmp. */
8394
8395 rtx
8396 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
8397 {
8398 const char *qpfunc;
8399 rtx slot0, slot1, result, tem, tem2, libfunc;
8400 machine_mode mode;
8401 enum rtx_code new_comparison;
8402
8403 switch (comparison)
8404 {
8405 case EQ:
8406 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
8407 break;
8408
8409 case NE:
8410 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
8411 break;
8412
8413 case GT:
8414 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
8415 break;
8416
8417 case GE:
8418 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
8419 break;
8420
8421 case LT:
8422 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
8423 break;
8424
8425 case LE:
8426 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
8427 break;
8428
8429 case ORDERED:
8430 case UNORDERED:
8431 case UNGT:
8432 case UNLT:
8433 case UNEQ:
8434 case UNGE:
8435 case UNLE:
8436 case LTGT:
8437 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
8438 break;
8439
8440 default:
8441 gcc_unreachable ();
8442 }
8443
8444 if (TARGET_ARCH64)
8445 {
8446 if (MEM_P (x))
8447 {
8448 tree expr = MEM_EXPR (x);
8449 if (expr)
8450 mark_addressable (expr);
8451 slot0 = x;
8452 }
8453 else
8454 {
8455 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8456 emit_move_insn (slot0, x);
8457 }
8458
8459 if (MEM_P (y))
8460 {
8461 tree expr = MEM_EXPR (y);
8462 if (expr)
8463 mark_addressable (expr);
8464 slot1 = y;
8465 }
8466 else
8467 {
8468 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8469 emit_move_insn (slot1, y);
8470 }
8471
8472 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8473 emit_library_call (libfunc, LCT_NORMAL,
8474 DImode,
8475 XEXP (slot0, 0), Pmode,
8476 XEXP (slot1, 0), Pmode);
8477 mode = DImode;
8478 }
8479 else
8480 {
8481 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8482 emit_library_call (libfunc, LCT_NORMAL,
8483 SImode,
8484 x, TFmode, y, TFmode);
8485 mode = SImode;
8486 }
8487
8488
8489 /* Immediately move the result of the libcall into a pseudo
8490 register so reload doesn't clobber the value if it needs
8491 the return register for a spill reg. */
8492 result = gen_reg_rtx (mode);
8493 emit_move_insn (result, hard_libcall_value (mode, libfunc));
8494
8495 switch (comparison)
8496 {
8497 default:
8498 return gen_rtx_NE (VOIDmode, result, const0_rtx);
8499 case ORDERED:
8500 case UNORDERED:
8501 new_comparison = (comparison == UNORDERED ? EQ : NE);
8502 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
8503 case UNGT:
8504 case UNGE:
8505 new_comparison = (comparison == UNGT ? GT : NE);
8506 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
8507 case UNLE:
8508 return gen_rtx_NE (VOIDmode, result, const2_rtx);
8509 case UNLT:
8510 tem = gen_reg_rtx (mode);
8511 if (TARGET_ARCH32)
8512 emit_insn (gen_andsi3 (tem, result, const1_rtx));
8513 else
8514 emit_insn (gen_anddi3 (tem, result, const1_rtx));
8515 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
8516 case UNEQ:
8517 case LTGT:
8518 tem = gen_reg_rtx (mode);
8519 if (TARGET_ARCH32)
8520 emit_insn (gen_addsi3 (tem, result, const1_rtx));
8521 else
8522 emit_insn (gen_adddi3 (tem, result, const1_rtx));
8523 tem2 = gen_reg_rtx (mode);
8524 if (TARGET_ARCH32)
8525 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
8526 else
8527 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
8528 new_comparison = (comparison == UNEQ ? EQ : NE);
8529 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
8530 }
8531
8532 gcc_unreachable ();
8533 }
8534
8535 /* Generate an unsigned DImode to FP conversion. This is the same code
8536 optabs would emit if we didn't have TFmode patterns. */
8537
8538 void
8539 sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
8540 {
8541 rtx i0, i1, f0, in, out;
8542
8543 out = operands[0];
8544 in = force_reg (DImode, operands[1]);
8545 rtx_code_label *neglab = gen_label_rtx ();
8546 rtx_code_label *donelab = gen_label_rtx ();
8547 i0 = gen_reg_rtx (DImode);
8548 i1 = gen_reg_rtx (DImode);
8549 f0 = gen_reg_rtx (mode);
8550
8551 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
8552
8553 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
8554 emit_jump_insn (gen_jump (donelab));
8555 emit_barrier ();
8556
8557 emit_label (neglab);
8558
8559 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
8560 emit_insn (gen_anddi3 (i1, in, const1_rtx));
8561 emit_insn (gen_iordi3 (i0, i0, i1));
8562 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
8563 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
8564
8565 emit_label (donelab);
8566 }
8567
8568 /* Generate an FP to unsigned DImode conversion. This is the same code
8569 optabs would emit if we didn't have TFmode patterns. */
8570
8571 void
8572 sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
8573 {
8574 rtx i0, i1, f0, in, out, limit;
8575
8576 out = operands[0];
8577 in = force_reg (mode, operands[1]);
8578 rtx_code_label *neglab = gen_label_rtx ();
8579 rtx_code_label *donelab = gen_label_rtx ();
8580 i0 = gen_reg_rtx (DImode);
8581 i1 = gen_reg_rtx (DImode);
8582 limit = gen_reg_rtx (mode);
8583 f0 = gen_reg_rtx (mode);
8584
8585 emit_move_insn (limit,
8586 const_double_from_real_value (
8587 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
8588 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
8589
8590 emit_insn (gen_rtx_SET (out,
8591 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
8592 emit_jump_insn (gen_jump (donelab));
8593 emit_barrier ();
8594
8595 emit_label (neglab);
8596
8597 emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
8598 emit_insn (gen_rtx_SET (i0,
8599 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
8600 emit_insn (gen_movdi (i1, const1_rtx));
8601 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
8602 emit_insn (gen_xordi3 (out, i0, i1));
8603
8604 emit_label (donelab);
8605 }
8606
8607 /* Return the string to output a compare and branch instruction to DEST.
8608 DEST is the destination insn (i.e. the label), INSN is the source,
8609 and OP is the conditional expression. */
8610
8611 const char *
8612 output_cbcond (rtx op, rtx dest, rtx_insn *insn)
8613 {
8614 machine_mode mode = GET_MODE (XEXP (op, 0));
8615 enum rtx_code code = GET_CODE (op);
8616 const char *cond_str, *tmpl;
8617 int far, emit_nop, len;
8618 static char string[64];
8619 char size_char;
8620
8621 /* Compare and Branch is limited to +-2KB. If it is too far away,
8622 change
8623
8624 cxbne X, Y, .LC30
8625
8626 to
8627
8628 cxbe X, Y, .+16
8629 nop
8630 ba,pt xcc, .LC30
8631 nop */
8632
8633 len = get_attr_length (insn);
8634
8635 far = len == 4;
8636 emit_nop = len == 2;
8637
8638 if (far)
8639 code = reverse_condition (code);
8640
8641 size_char = ((mode == SImode) ? 'w' : 'x');
8642
8643 switch (code)
8644 {
8645 case NE:
8646 cond_str = "ne";
8647 break;
8648
8649 case EQ:
8650 cond_str = "e";
8651 break;
8652
8653 case GE:
8654 cond_str = "ge";
8655 break;
8656
8657 case GT:
8658 cond_str = "g";
8659 break;
8660
8661 case LE:
8662 cond_str = "le";
8663 break;
8664
8665 case LT:
8666 cond_str = "l";
8667 break;
8668
8669 case GEU:
8670 cond_str = "cc";
8671 break;
8672
8673 case GTU:
8674 cond_str = "gu";
8675 break;
8676
8677 case LEU:
8678 cond_str = "leu";
8679 break;
8680
8681 case LTU:
8682 cond_str = "cs";
8683 break;
8684
8685 default:
8686 gcc_unreachable ();
8687 }
8688
8689 if (far)
8690 {
8691 int veryfar = 1, delta;
8692
8693 if (INSN_ADDRESSES_SET_P ())
8694 {
8695 delta = (INSN_ADDRESSES (INSN_UID (dest))
8696 - INSN_ADDRESSES (INSN_UID (insn)));
8697 /* Leave some instructions for "slop". */
8698 if (delta >= -260000 && delta < 260000)
8699 veryfar = 0;
8700 }
8701
8702 if (veryfar)
8703 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8704 else
8705 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8706 }
8707 else
8708 {
8709 if (emit_nop)
8710 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8711 else
8712 tmpl = "c%cb%s\t%%1, %%2, %%3";
8713 }
8714
8715 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8716
8717 return string;
8718 }
8719
8720 /* Return the string to output a conditional branch to LABEL, testing
8721 register REG. LABEL is the operand number of the label; REG is the
8722 operand number of the reg. OP is the conditional expression. The mode
8723 of REG says what kind of comparison we made.
8724
8725 DEST is the destination insn (i.e. the label), INSN is the source.
8726
8727 REVERSED is nonzero if we should reverse the sense of the comparison.
8728
8729 ANNUL is nonzero if we should generate an annulling branch. */
8730
8731 const char *
8732 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8733 int annul, rtx_insn *insn)
8734 {
8735 static char string[64];
8736 enum rtx_code code = GET_CODE (op);
8737 machine_mode mode = GET_MODE (XEXP (op, 0));
8738 rtx note;
8739 int far;
8740 char *p;
8741
8742 /* branch on register are limited to +-128KB. If it is too far away,
8743 change
8744
8745 brnz,pt %g1, .LC30
8746
8747 to
8748
8749 brz,pn %g1, .+12
8750 nop
8751 ba,pt %xcc, .LC30
8752
8753 and
8754
8755 brgez,a,pn %o1, .LC29
8756
8757 to
8758
8759 brlz,pt %o1, .+16
8760 nop
8761 ba,pt %xcc, .LC29 */
8762
8763 far = get_attr_length (insn) >= 3;
8764
8765 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8766 if (reversed ^ far)
8767 code = reverse_condition (code);
8768
8769 /* Only 64-bit versions of these instructions exist. */
8770 gcc_assert (mode == DImode);
8771
8772 /* Start by writing the branch condition. */
8773
8774 switch (code)
8775 {
8776 case NE:
8777 strcpy (string, "brnz");
8778 break;
8779
8780 case EQ:
8781 strcpy (string, "brz");
8782 break;
8783
8784 case GE:
8785 strcpy (string, "brgez");
8786 break;
8787
8788 case LT:
8789 strcpy (string, "brlz");
8790 break;
8791
8792 case LE:
8793 strcpy (string, "brlez");
8794 break;
8795
8796 case GT:
8797 strcpy (string, "brgz");
8798 break;
8799
8800 default:
8801 gcc_unreachable ();
8802 }
8803
8804 p = strchr (string, '\0');
8805
8806 /* Now add the annulling, reg, label, and nop. */
8807 if (annul && ! far)
8808 {
8809 strcpy (p, ",a");
8810 p += 2;
8811 }
8812
8813 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8814 {
8815 strcpy (p,
8816 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8817 >= profile_probability::even ()) ^ far)
8818 ? ",pt" : ",pn");
8819 p += 3;
8820 }
8821
8822 *p = p < string + 8 ? '\t' : ' ';
8823 p++;
8824 *p++ = '%';
8825 *p++ = '0' + reg;
8826 *p++ = ',';
8827 *p++ = ' ';
8828 if (far)
8829 {
8830 int veryfar = 1, delta;
8831
8832 if (INSN_ADDRESSES_SET_P ())
8833 {
8834 delta = (INSN_ADDRESSES (INSN_UID (dest))
8835 - INSN_ADDRESSES (INSN_UID (insn)));
8836 /* Leave some instructions for "slop". */
8837 if (delta >= -260000 && delta < 260000)
8838 veryfar = 0;
8839 }
8840
8841 strcpy (p, ".+12\n\t nop\n\t");
8842 /* Skip the next insn if requested or
8843 if we know that it will be a nop. */
8844 if (annul || ! final_sequence)
8845 p[3] = '6';
8846 p += 12;
8847 if (veryfar)
8848 {
8849 strcpy (p, "b\t");
8850 p += 2;
8851 }
8852 else
8853 {
8854 strcpy (p, "ba,pt\t%%xcc, ");
8855 p += 13;
8856 }
8857 }
8858 *p++ = '%';
8859 *p++ = 'l';
8860 *p++ = '0' + label;
8861 *p++ = '%';
8862 *p++ = '#';
8863 *p = '\0';
8864
8865 return string;
8866 }
8867
8868 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8869 Such instructions cannot be used in the delay slot of return insn on v9.
8870 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8871 */
8872
8873 static int
8874 epilogue_renumber (register rtx *where, int test)
8875 {
8876 register const char *fmt;
8877 register int i;
8878 register enum rtx_code code;
8879
8880 if (*where == 0)
8881 return 0;
8882
8883 code = GET_CODE (*where);
8884
8885 switch (code)
8886 {
8887 case REG:
8888 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8889 return 1;
8890 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8891 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
8892 /* fallthrough */
8893 case SCRATCH:
8894 case CC0:
8895 case PC:
8896 case CONST_INT:
8897 case CONST_WIDE_INT:
8898 case CONST_DOUBLE:
8899 return 0;
8900
8901 /* Do not replace the frame pointer with the stack pointer because
8902 it can cause the delayed instruction to load below the stack.
8903 This occurs when instructions like:
8904
8905 (set (reg/i:SI 24 %i0)
8906 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8907 (const_int -20 [0xffffffec])) 0))
8908
8909 are in the return delayed slot. */
8910 case PLUS:
8911 if (GET_CODE (XEXP (*where, 0)) == REG
8912 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8913 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8914 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8915 return 1;
8916 break;
8917
8918 case MEM:
8919 if (SPARC_STACK_BIAS
8920 && GET_CODE (XEXP (*where, 0)) == REG
8921 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8922 return 1;
8923 break;
8924
8925 default:
8926 break;
8927 }
8928
8929 fmt = GET_RTX_FORMAT (code);
8930
8931 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8932 {
8933 if (fmt[i] == 'E')
8934 {
8935 register int j;
8936 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8937 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8938 return 1;
8939 }
8940 else if (fmt[i] == 'e'
8941 && epilogue_renumber (&(XEXP (*where, i)), test))
8942 return 1;
8943 }
8944 return 0;
8945 }
8946 \f
8947 /* Leaf functions and non-leaf functions have different needs. */
8948
8949 static const int
8950 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8951
8952 static const int
8953 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8954
8955 static const int *const reg_alloc_orders[] = {
8956 reg_leaf_alloc_order,
8957 reg_nonleaf_alloc_order};
8958
8959 void
8960 order_regs_for_local_alloc (void)
8961 {
8962 static int last_order_nonleaf = 1;
8963
8964 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8965 {
8966 last_order_nonleaf = !last_order_nonleaf;
8967 memcpy ((char *) reg_alloc_order,
8968 (const char *) reg_alloc_orders[last_order_nonleaf],
8969 FIRST_PSEUDO_REGISTER * sizeof (int));
8970 }
8971 }
8972 \f
8973 /* Return 1 if REG and MEM are legitimate enough to allow the various
8974 MEM<-->REG splits to be run. */
8975
8976 int
8977 sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
8978 {
8979 /* Punt if we are here by mistake. */
8980 gcc_assert (reload_completed);
8981
8982 /* We must have an offsettable memory reference. */
8983 if (!offsettable_memref_p (mem))
8984 return 0;
8985
8986 /* If we have legitimate args for ldd/std, we do not want
8987 the split to happen. */
8988 if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
8989 return 0;
8990
8991 /* Success. */
8992 return 1;
8993 }
8994
8995 /* Split a REG <-- MEM move into a pair of moves in MODE. */
8996
8997 void
8998 sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
8999 {
9000 rtx high_part = gen_highpart (mode, dest);
9001 rtx low_part = gen_lowpart (mode, dest);
9002 rtx word0 = adjust_address (src, mode, 0);
9003 rtx word1 = adjust_address (src, mode, 4);
9004
9005 if (reg_overlap_mentioned_p (high_part, word1))
9006 {
9007 emit_move_insn_1 (low_part, word1);
9008 emit_move_insn_1 (high_part, word0);
9009 }
9010 else
9011 {
9012 emit_move_insn_1 (high_part, word0);
9013 emit_move_insn_1 (low_part, word1);
9014 }
9015 }
9016
9017 /* Split a MEM <-- REG move into a pair of moves in MODE. */
9018
9019 void
9020 sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
9021 {
9022 rtx word0 = adjust_address (dest, mode, 0);
9023 rtx word1 = adjust_address (dest, mode, 4);
9024 rtx high_part = gen_highpart (mode, src);
9025 rtx low_part = gen_lowpart (mode, src);
9026
9027 emit_move_insn_1 (word0, high_part);
9028 emit_move_insn_1 (word1, low_part);
9029 }
9030
9031 /* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
9032
9033 int
9034 sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
9035 {
9036 /* Punt if we are here by mistake. */
9037 gcc_assert (reload_completed);
9038
9039 if (GET_CODE (reg1) == SUBREG)
9040 reg1 = SUBREG_REG (reg1);
9041 if (GET_CODE (reg1) != REG)
9042 return 0;
9043 const int regno1 = REGNO (reg1);
9044
9045 if (GET_CODE (reg2) == SUBREG)
9046 reg2 = SUBREG_REG (reg2);
9047 if (GET_CODE (reg2) != REG)
9048 return 0;
9049 const int regno2 = REGNO (reg2);
9050
9051 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
9052 return 1;
9053
9054 if (TARGET_VIS3)
9055 {
9056 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
9057 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
9058 return 1;
9059 }
9060
9061 return 0;
9062 }
9063
9064 /* Split a REG <--> REG move into a pair of moves in MODE. */
9065
9066 void
9067 sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
9068 {
9069 rtx dest1 = gen_highpart (mode, dest);
9070 rtx dest2 = gen_lowpart (mode, dest);
9071 rtx src1 = gen_highpart (mode, src);
9072 rtx src2 = gen_lowpart (mode, src);
9073
9074 /* Now emit using the real source and destination we found, swapping
9075 the order if we detect overlap. */
9076 if (reg_overlap_mentioned_p (dest1, src2))
9077 {
9078 emit_move_insn_1 (dest2, src2);
9079 emit_move_insn_1 (dest1, src1);
9080 }
9081 else
9082 {
9083 emit_move_insn_1 (dest1, src1);
9084 emit_move_insn_1 (dest2, src2);
9085 }
9086 }
9087
9088 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
9089 This makes them candidates for using ldd and std insns.
9090
9091 Note reg1 and reg2 *must* be hard registers. */
9092
9093 int
9094 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
9095 {
9096 /* We might have been passed a SUBREG. */
9097 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
9098 return 0;
9099
9100 if (REGNO (reg1) % 2 != 0)
9101 return 0;
9102
9103 /* Integer ldd is deprecated in SPARC V9 */
9104 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
9105 return 0;
9106
9107 return (REGNO (reg1) == REGNO (reg2) - 1);
9108 }
9109
9110 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
9111 an ldd or std insn.
9112
9113 This can only happen when addr1 and addr2, the addresses in mem1
9114 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
9115 addr1 must also be aligned on a 64-bit boundary.
9116
9117 Also iff dependent_reg_rtx is not null it should not be used to
9118 compute the address for mem1, i.e. we cannot optimize a sequence
9119 like:
9120 ld [%o0], %o0
9121 ld [%o0 + 4], %o1
9122 to
9123 ldd [%o0], %o0
9124 nor:
9125 ld [%g3 + 4], %g3
9126 ld [%g3], %g2
9127 to
9128 ldd [%g3], %g2
9129
9130 But, note that the transformation from:
9131 ld [%g2 + 4], %g3
9132 ld [%g2], %g2
9133 to
9134 ldd [%g2], %g2
9135 is perfectly fine. Thus, the peephole2 patterns always pass us
9136 the destination register of the first load, never the second one.
9137
9138 For stores we don't have a similar problem, so dependent_reg_rtx is
9139 NULL_RTX. */
9140
9141 int
9142 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
9143 {
9144 rtx addr1, addr2;
9145 unsigned int reg1;
9146 HOST_WIDE_INT offset1;
9147
9148 /* The mems cannot be volatile. */
9149 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
9150 return 0;
9151
9152 /* MEM1 should be aligned on a 64-bit boundary. */
9153 if (MEM_ALIGN (mem1) < 64)
9154 return 0;
9155
9156 addr1 = XEXP (mem1, 0);
9157 addr2 = XEXP (mem2, 0);
9158
9159 /* Extract a register number and offset (if used) from the first addr. */
9160 if (GET_CODE (addr1) == PLUS)
9161 {
9162 /* If not a REG, return zero. */
9163 if (GET_CODE (XEXP (addr1, 0)) != REG)
9164 return 0;
9165 else
9166 {
9167 reg1 = REGNO (XEXP (addr1, 0));
9168 /* The offset must be constant! */
9169 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
9170 return 0;
9171 offset1 = INTVAL (XEXP (addr1, 1));
9172 }
9173 }
9174 else if (GET_CODE (addr1) != REG)
9175 return 0;
9176 else
9177 {
9178 reg1 = REGNO (addr1);
9179 /* This was a simple (mem (reg)) expression. Offset is 0. */
9180 offset1 = 0;
9181 }
9182
9183 /* Make sure the second address is a (mem (plus (reg) (const_int). */
9184 if (GET_CODE (addr2) != PLUS)
9185 return 0;
9186
9187 if (GET_CODE (XEXP (addr2, 0)) != REG
9188 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
9189 return 0;
9190
9191 if (reg1 != REGNO (XEXP (addr2, 0)))
9192 return 0;
9193
9194 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
9195 return 0;
9196
9197 /* The first offset must be evenly divisible by 8 to ensure the
9198 address is 64-bit aligned. */
9199 if (offset1 % 8 != 0)
9200 return 0;
9201
9202 /* The offset for the second addr must be 4 more than the first addr. */
9203 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
9204 return 0;
9205
9206 /* All the tests passed. addr1 and addr2 are valid for ldd and std
9207 instructions. */
9208 return 1;
9209 }
9210
9211 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
9212
9213 rtx
9214 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
9215 {
9216 rtx x = widen_memory_access (mem1, mode, 0);
9217 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
9218 return x;
9219 }
9220
9221 /* Return 1 if reg is a pseudo, or is the first register in
9222 a hard register pair. This makes it suitable for use in
9223 ldd and std insns. */
9224
9225 int
9226 register_ok_for_ldd (rtx reg)
9227 {
9228 /* We might have been passed a SUBREG. */
9229 if (!REG_P (reg))
9230 return 0;
9231
9232 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
9233 return (REGNO (reg) % 2 == 0);
9234
9235 return 1;
9236 }
9237
9238 /* Return 1 if OP, a MEM, has an address which is known to be
9239 aligned to an 8-byte boundary. */
9240
9241 int
9242 memory_ok_for_ldd (rtx op)
9243 {
9244 /* In 64-bit mode, we assume that the address is word-aligned. */
9245 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
9246 return 0;
9247
9248 if (! can_create_pseudo_p ()
9249 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
9250 return 0;
9251
9252 return 1;
9253 }
9254 \f
9255 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
9256
9257 static bool
9258 sparc_print_operand_punct_valid_p (unsigned char code)
9259 {
9260 if (code == '#'
9261 || code == '*'
9262 || code == '('
9263 || code == ')'
9264 || code == '_'
9265 || code == '&')
9266 return true;
9267
9268 return false;
9269 }
9270
9271 /* Implement TARGET_PRINT_OPERAND.
9272 Print operand X (an rtx) in assembler syntax to file FILE.
9273 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
9274 For `%' followed by punctuation, CODE is the punctuation and X is null. */
9275
9276 static void
9277 sparc_print_operand (FILE *file, rtx x, int code)
9278 {
9279 const char *s;
9280
9281 switch (code)
9282 {
9283 case '#':
9284 /* Output an insn in a delay slot. */
9285 if (final_sequence)
9286 sparc_indent_opcode = 1;
9287 else
9288 fputs ("\n\t nop", file);
9289 return;
9290 case '*':
9291 /* Output an annul flag if there's nothing for the delay slot and we
9292 are optimizing. This is always used with '(' below.
9293 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
9294 this is a dbx bug. So, we only do this when optimizing.
9295 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
9296 Always emit a nop in case the next instruction is a branch. */
9297 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
9298 fputs (",a", file);
9299 return;
9300 case '(':
9301 /* Output a 'nop' if there's nothing for the delay slot and we are
9302 not optimizing. This is always used with '*' above. */
9303 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
9304 fputs ("\n\t nop", file);
9305 else if (final_sequence)
9306 sparc_indent_opcode = 1;
9307 return;
9308 case ')':
9309 /* Output the right displacement from the saved PC on function return.
9310 The caller may have placed an "unimp" insn immediately after the call
9311 so we have to account for it. This insn is used in the 32-bit ABI
9312 when calling a function that returns a non zero-sized structure. The
9313 64-bit ABI doesn't have it. Be careful to have this test be the same
9314 as that for the call. The exception is when sparc_std_struct_return
9315 is enabled, the psABI is followed exactly and the adjustment is made
9316 by the code in sparc_struct_value_rtx. The call emitted is the same
9317 when sparc_std_struct_return is enabled. */
9318 if (!TARGET_ARCH64
9319 && cfun->returns_struct
9320 && !sparc_std_struct_return
9321 && DECL_SIZE (DECL_RESULT (current_function_decl))
9322 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
9323 == INTEGER_CST
9324 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
9325 fputs ("12", file);
9326 else
9327 fputc ('8', file);
9328 return;
9329 case '_':
9330 /* Output the Embedded Medium/Anywhere code model base register. */
9331 fputs (EMBMEDANY_BASE_REG, file);
9332 return;
9333 case '&':
9334 /* Print some local dynamic TLS name. */
9335 if (const char *name = get_some_local_dynamic_name ())
9336 assemble_name (file, name);
9337 else
9338 output_operand_lossage ("'%%&' used without any "
9339 "local dynamic TLS references");
9340 return;
9341
9342 case 'Y':
9343 /* Adjust the operand to take into account a RESTORE operation. */
9344 if (GET_CODE (x) == CONST_INT)
9345 break;
9346 else if (GET_CODE (x) != REG)
9347 output_operand_lossage ("invalid %%Y operand");
9348 else if (REGNO (x) < 8)
9349 fputs (reg_names[REGNO (x)], file);
9350 else if (REGNO (x) >= 24 && REGNO (x) < 32)
9351 fputs (reg_names[REGNO (x)-16], file);
9352 else
9353 output_operand_lossage ("invalid %%Y operand");
9354 return;
9355 case 'L':
9356 /* Print out the low order register name of a register pair. */
9357 if (WORDS_BIG_ENDIAN)
9358 fputs (reg_names[REGNO (x)+1], file);
9359 else
9360 fputs (reg_names[REGNO (x)], file);
9361 return;
9362 case 'H':
9363 /* Print out the high order register name of a register pair. */
9364 if (WORDS_BIG_ENDIAN)
9365 fputs (reg_names[REGNO (x)], file);
9366 else
9367 fputs (reg_names[REGNO (x)+1], file);
9368 return;
9369 case 'R':
9370 /* Print out the second register name of a register pair or quad.
9371 I.e., R (%o0) => %o1. */
9372 fputs (reg_names[REGNO (x)+1], file);
9373 return;
9374 case 'S':
9375 /* Print out the third register name of a register quad.
9376 I.e., S (%o0) => %o2. */
9377 fputs (reg_names[REGNO (x)+2], file);
9378 return;
9379 case 'T':
9380 /* Print out the fourth register name of a register quad.
9381 I.e., T (%o0) => %o3. */
9382 fputs (reg_names[REGNO (x)+3], file);
9383 return;
9384 case 'x':
9385 /* Print a condition code register. */
9386 if (REGNO (x) == SPARC_ICC_REG)
9387 {
9388 switch (GET_MODE (x))
9389 {
9390 case E_CCmode:
9391 case E_CCNZmode:
9392 case E_CCCmode:
9393 case E_CCVmode:
9394 s = "%icc";
9395 break;
9396 case E_CCXmode:
9397 case E_CCXNZmode:
9398 case E_CCXCmode:
9399 case E_CCXVmode:
9400 s = "%xcc";
9401 break;
9402 default:
9403 gcc_unreachable ();
9404 }
9405 fputs (s, file);
9406 }
9407 else
9408 /* %fccN register */
9409 fputs (reg_names[REGNO (x)], file);
9410 return;
9411 case 'm':
9412 /* Print the operand's address only. */
9413 output_address (GET_MODE (x), XEXP (x, 0));
9414 return;
9415 case 'r':
9416 /* In this case we need a register. Use %g0 if the
9417 operand is const0_rtx. */
9418 if (x == const0_rtx
9419 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
9420 {
9421 fputs ("%g0", file);
9422 return;
9423 }
9424 else
9425 break;
9426
9427 case 'A':
9428 switch (GET_CODE (x))
9429 {
9430 case IOR:
9431 s = "or";
9432 break;
9433 case AND:
9434 s = "and";
9435 break;
9436 case XOR:
9437 s = "xor";
9438 break;
9439 default:
9440 output_operand_lossage ("invalid %%A operand");
9441 s = "";
9442 break;
9443 }
9444 fputs (s, file);
9445 return;
9446
9447 case 'B':
9448 switch (GET_CODE (x))
9449 {
9450 case IOR:
9451 s = "orn";
9452 break;
9453 case AND:
9454 s = "andn";
9455 break;
9456 case XOR:
9457 s = "xnor";
9458 break;
9459 default:
9460 output_operand_lossage ("invalid %%B operand");
9461 s = "";
9462 break;
9463 }
9464 fputs (s, file);
9465 return;
9466
9467 /* This is used by the conditional move instructions. */
9468 case 'C':
9469 {
9470 machine_mode mode = GET_MODE (XEXP (x, 0));
9471 switch (GET_CODE (x))
9472 {
9473 case NE:
9474 if (mode == CCVmode || mode == CCXVmode)
9475 s = "vs";
9476 else
9477 s = "ne";
9478 break;
9479 case EQ:
9480 if (mode == CCVmode || mode == CCXVmode)
9481 s = "vc";
9482 else
9483 s = "e";
9484 break;
9485 case GE:
9486 if (mode == CCNZmode || mode == CCXNZmode)
9487 s = "pos";
9488 else
9489 s = "ge";
9490 break;
9491 case GT:
9492 s = "g";
9493 break;
9494 case LE:
9495 s = "le";
9496 break;
9497 case LT:
9498 if (mode == CCNZmode || mode == CCXNZmode)
9499 s = "neg";
9500 else
9501 s = "l";
9502 break;
9503 case GEU:
9504 s = "geu";
9505 break;
9506 case GTU:
9507 s = "gu";
9508 break;
9509 case LEU:
9510 s = "leu";
9511 break;
9512 case LTU:
9513 s = "lu";
9514 break;
9515 case LTGT:
9516 s = "lg";
9517 break;
9518 case UNORDERED:
9519 s = "u";
9520 break;
9521 case ORDERED:
9522 s = "o";
9523 break;
9524 case UNLT:
9525 s = "ul";
9526 break;
9527 case UNLE:
9528 s = "ule";
9529 break;
9530 case UNGT:
9531 s = "ug";
9532 break;
9533 case UNGE:
9534 s = "uge"
9535 ; break;
9536 case UNEQ:
9537 s = "ue";
9538 break;
9539 default:
9540 output_operand_lossage ("invalid %%C operand");
9541 s = "";
9542 break;
9543 }
9544 fputs (s, file);
9545 return;
9546 }
9547
9548 /* This are used by the movr instruction pattern. */
9549 case 'D':
9550 {
9551 switch (GET_CODE (x))
9552 {
9553 case NE:
9554 s = "ne";
9555 break;
9556 case EQ:
9557 s = "e";
9558 break;
9559 case GE:
9560 s = "gez";
9561 break;
9562 case LT:
9563 s = "lz";
9564 break;
9565 case LE:
9566 s = "lez";
9567 break;
9568 case GT:
9569 s = "gz";
9570 break;
9571 default:
9572 output_operand_lossage ("invalid %%D operand");
9573 s = "";
9574 break;
9575 }
9576 fputs (s, file);
9577 return;
9578 }
9579
9580 case 'b':
9581 {
9582 /* Print a sign-extended character. */
9583 int i = trunc_int_for_mode (INTVAL (x), QImode);
9584 fprintf (file, "%d", i);
9585 return;
9586 }
9587
9588 case 'f':
9589 /* Operand must be a MEM; write its address. */
9590 if (GET_CODE (x) != MEM)
9591 output_operand_lossage ("invalid %%f operand");
9592 output_address (GET_MODE (x), XEXP (x, 0));
9593 return;
9594
9595 case 's':
9596 {
9597 /* Print a sign-extended 32-bit value. */
9598 HOST_WIDE_INT i;
9599 if (GET_CODE(x) == CONST_INT)
9600 i = INTVAL (x);
9601 else
9602 {
9603 output_operand_lossage ("invalid %%s operand");
9604 return;
9605 }
9606 i = trunc_int_for_mode (i, SImode);
9607 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
9608 return;
9609 }
9610
9611 case 0:
9612 /* Do nothing special. */
9613 break;
9614
9615 default:
9616 /* Undocumented flag. */
9617 output_operand_lossage ("invalid operand output code");
9618 }
9619
9620 if (GET_CODE (x) == REG)
9621 fputs (reg_names[REGNO (x)], file);
9622 else if (GET_CODE (x) == MEM)
9623 {
9624 fputc ('[', file);
9625 /* Poor Sun assembler doesn't understand absolute addressing. */
9626 if (CONSTANT_P (XEXP (x, 0)))
9627 fputs ("%g0+", file);
9628 output_address (GET_MODE (x), XEXP (x, 0));
9629 fputc (']', file);
9630 }
9631 else if (GET_CODE (x) == HIGH)
9632 {
9633 fputs ("%hi(", file);
9634 output_addr_const (file, XEXP (x, 0));
9635 fputc (')', file);
9636 }
9637 else if (GET_CODE (x) == LO_SUM)
9638 {
9639 sparc_print_operand (file, XEXP (x, 0), 0);
9640 if (TARGET_CM_MEDMID)
9641 fputs ("+%l44(", file);
9642 else
9643 fputs ("+%lo(", file);
9644 output_addr_const (file, XEXP (x, 1));
9645 fputc (')', file);
9646 }
9647 else if (GET_CODE (x) == CONST_DOUBLE)
9648 output_operand_lossage ("floating-point constant not a valid immediate operand");
9649 else
9650 output_addr_const (file, x);
9651 }
9652
9653 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9654
9655 static void
9656 sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9657 {
9658 register rtx base, index = 0;
9659 int offset = 0;
9660 register rtx addr = x;
9661
9662 if (REG_P (addr))
9663 fputs (reg_names[REGNO (addr)], file);
9664 else if (GET_CODE (addr) == PLUS)
9665 {
9666 if (CONST_INT_P (XEXP (addr, 0)))
9667 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
9668 else if (CONST_INT_P (XEXP (addr, 1)))
9669 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
9670 else
9671 base = XEXP (addr, 0), index = XEXP (addr, 1);
9672 if (GET_CODE (base) == LO_SUM)
9673 {
9674 gcc_assert (USE_AS_OFFSETABLE_LO10
9675 && TARGET_ARCH64
9676 && ! TARGET_CM_MEDMID);
9677 output_operand (XEXP (base, 0), 0);
9678 fputs ("+%lo(", file);
9679 output_address (VOIDmode, XEXP (base, 1));
9680 fprintf (file, ")+%d", offset);
9681 }
9682 else
9683 {
9684 fputs (reg_names[REGNO (base)], file);
9685 if (index == 0)
9686 fprintf (file, "%+d", offset);
9687 else if (REG_P (index))
9688 fprintf (file, "+%s", reg_names[REGNO (index)]);
9689 else if (GET_CODE (index) == SYMBOL_REF
9690 || GET_CODE (index) == LABEL_REF
9691 || GET_CODE (index) == CONST)
9692 fputc ('+', file), output_addr_const (file, index);
9693 else gcc_unreachable ();
9694 }
9695 }
9696 else if (GET_CODE (addr) == MINUS
9697 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
9698 {
9699 output_addr_const (file, XEXP (addr, 0));
9700 fputs ("-(", file);
9701 output_addr_const (file, XEXP (addr, 1));
9702 fputs ("-.)", file);
9703 }
9704 else if (GET_CODE (addr) == LO_SUM)
9705 {
9706 output_operand (XEXP (addr, 0), 0);
9707 if (TARGET_CM_MEDMID)
9708 fputs ("+%l44(", file);
9709 else
9710 fputs ("+%lo(", file);
9711 output_address (VOIDmode, XEXP (addr, 1));
9712 fputc (')', file);
9713 }
9714 else if (flag_pic
9715 && GET_CODE (addr) == CONST
9716 && GET_CODE (XEXP (addr, 0)) == MINUS
9717 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
9718 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
9719 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
9720 {
9721 addr = XEXP (addr, 0);
9722 output_addr_const (file, XEXP (addr, 0));
9723 /* Group the args of the second CONST in parenthesis. */
9724 fputs ("-(", file);
9725 /* Skip past the second CONST--it does nothing for us. */
9726 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
9727 /* Close the parenthesis. */
9728 fputc (')', file);
9729 }
9730 else
9731 {
9732 output_addr_const (file, addr);
9733 }
9734 }
9735 \f
9736 /* Target hook for assembling integer objects. The sparc version has
9737 special handling for aligned DI-mode objects. */
9738
9739 static bool
9740 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9741 {
9742 /* ??? We only output .xword's for symbols and only then in environments
9743 where the assembler can handle them. */
9744 if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
9745 {
9746 if (TARGET_V9)
9747 {
9748 assemble_integer_with_op ("\t.xword\t", x);
9749 return true;
9750 }
9751 else
9752 {
9753 assemble_aligned_integer (4, const0_rtx);
9754 assemble_aligned_integer (4, x);
9755 return true;
9756 }
9757 }
9758 return default_assemble_integer (x, size, aligned_p);
9759 }
9760 \f
9761 /* Return the value of a code used in the .proc pseudo-op that says
9762 what kind of result this function returns. For non-C types, we pick
9763 the closest C type. */
9764
9765 #ifndef SHORT_TYPE_SIZE
9766 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9767 #endif
9768
9769 #ifndef INT_TYPE_SIZE
9770 #define INT_TYPE_SIZE BITS_PER_WORD
9771 #endif
9772
9773 #ifndef LONG_TYPE_SIZE
9774 #define LONG_TYPE_SIZE BITS_PER_WORD
9775 #endif
9776
9777 #ifndef LONG_LONG_TYPE_SIZE
9778 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9779 #endif
9780
9781 #ifndef FLOAT_TYPE_SIZE
9782 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9783 #endif
9784
9785 #ifndef DOUBLE_TYPE_SIZE
9786 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9787 #endif
9788
9789 #ifndef LONG_DOUBLE_TYPE_SIZE
9790 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9791 #endif
9792
9793 unsigned long
9794 sparc_type_code (register tree type)
9795 {
9796 register unsigned long qualifiers = 0;
9797 register unsigned shift;
9798
9799 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9800 setting more, since some assemblers will give an error for this. Also,
9801 we must be careful to avoid shifts of 32 bits or more to avoid getting
9802 unpredictable results. */
9803
9804 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9805 {
9806 switch (TREE_CODE (type))
9807 {
9808 case ERROR_MARK:
9809 return qualifiers;
9810
9811 case ARRAY_TYPE:
9812 qualifiers |= (3 << shift);
9813 break;
9814
9815 case FUNCTION_TYPE:
9816 case METHOD_TYPE:
9817 qualifiers |= (2 << shift);
9818 break;
9819
9820 case POINTER_TYPE:
9821 case REFERENCE_TYPE:
9822 case OFFSET_TYPE:
9823 qualifiers |= (1 << shift);
9824 break;
9825
9826 case RECORD_TYPE:
9827 return (qualifiers | 8);
9828
9829 case UNION_TYPE:
9830 case QUAL_UNION_TYPE:
9831 return (qualifiers | 9);
9832
9833 case ENUMERAL_TYPE:
9834 return (qualifiers | 10);
9835
9836 case VOID_TYPE:
9837 return (qualifiers | 16);
9838
9839 case INTEGER_TYPE:
9840 /* If this is a range type, consider it to be the underlying
9841 type. */
9842 if (TREE_TYPE (type) != 0)
9843 break;
9844
9845 /* Carefully distinguish all the standard types of C,
9846 without messing up if the language is not C. We do this by
9847 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9848 look at both the names and the above fields, but that's redundant.
9849 Any type whose size is between two C types will be considered
9850 to be the wider of the two types. Also, we do not have a
9851 special code to use for "long long", so anything wider than
9852 long is treated the same. Note that we can't distinguish
9853 between "int" and "long" in this code if they are the same
9854 size, but that's fine, since neither can the assembler. */
9855
9856 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9857 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9858
9859 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9860 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9861
9862 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9863 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9864
9865 else
9866 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9867
9868 case REAL_TYPE:
9869 /* If this is a range type, consider it to be the underlying
9870 type. */
9871 if (TREE_TYPE (type) != 0)
9872 break;
9873
9874 /* Carefully distinguish all the standard types of C,
9875 without messing up if the language is not C. */
9876
9877 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9878 return (qualifiers | 6);
9879
9880 else
9881 return (qualifiers | 7);
9882
9883 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9884 /* ??? We need to distinguish between double and float complex types,
9885 but I don't know how yet because I can't reach this code from
9886 existing front-ends. */
9887 return (qualifiers | 7); /* Who knows? */
9888
9889 case VECTOR_TYPE:
9890 case BOOLEAN_TYPE: /* Boolean truth value type. */
9891 case LANG_TYPE:
9892 case NULLPTR_TYPE:
9893 return qualifiers;
9894
9895 default:
9896 gcc_unreachable (); /* Not a type! */
9897 }
9898 }
9899
9900 return qualifiers;
9901 }
9902 \f
9903 /* Nested function support. */
9904
9905 /* Emit RTL insns to initialize the variable parts of a trampoline.
9906 FNADDR is an RTX for the address of the function's pure code.
9907 CXT is an RTX for the static chain value for the function.
9908
9909 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9910 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9911 (to store insns). This is a bit excessive. Perhaps a different
9912 mechanism would be better here.
9913
9914 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9915
9916 static void
9917 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9918 {
9919 /* SPARC 32-bit trampoline:
9920
9921 sethi %hi(fn), %g1
9922 sethi %hi(static), %g2
9923 jmp %g1+%lo(fn)
9924 or %g2, %lo(static), %g2
9925
9926 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9927 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9928 */
9929
9930 emit_move_insn
9931 (adjust_address (m_tramp, SImode, 0),
9932 expand_binop (SImode, ior_optab,
9933 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9934 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9935 NULL_RTX, 1, OPTAB_DIRECT));
9936
9937 emit_move_insn
9938 (adjust_address (m_tramp, SImode, 4),
9939 expand_binop (SImode, ior_optab,
9940 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9941 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9942 NULL_RTX, 1, OPTAB_DIRECT));
9943
9944 emit_move_insn
9945 (adjust_address (m_tramp, SImode, 8),
9946 expand_binop (SImode, ior_optab,
9947 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9948 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9949 NULL_RTX, 1, OPTAB_DIRECT));
9950
9951 emit_move_insn
9952 (adjust_address (m_tramp, SImode, 12),
9953 expand_binop (SImode, ior_optab,
9954 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9955 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9956 NULL_RTX, 1, OPTAB_DIRECT));
9957
9958 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9959 aligned on a 16 byte boundary so one flush clears it all. */
9960 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
9961 if (sparc_cpu != PROCESSOR_ULTRASPARC
9962 && sparc_cpu != PROCESSOR_ULTRASPARC3
9963 && sparc_cpu != PROCESSOR_NIAGARA
9964 && sparc_cpu != PROCESSOR_NIAGARA2
9965 && sparc_cpu != PROCESSOR_NIAGARA3
9966 && sparc_cpu != PROCESSOR_NIAGARA4
9967 && sparc_cpu != PROCESSOR_NIAGARA7
9968 && sparc_cpu != PROCESSOR_M8)
9969 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
9970
9971 /* Call __enable_execute_stack after writing onto the stack to make sure
9972 the stack address is accessible. */
9973 #ifdef HAVE_ENABLE_EXECUTE_STACK
9974 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9975 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
9976 #endif
9977
9978 }
9979
9980 /* The 64-bit version is simpler because it makes more sense to load the
9981 values as "immediate" data out of the trampoline. It's also easier since
9982 we can read the PC without clobbering a register. */
9983
9984 static void
9985 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9986 {
9987 /* SPARC 64-bit trampoline:
9988
9989 rd %pc, %g1
9990 ldx [%g1+24], %g5
9991 jmp %g5
9992 ldx [%g1+16], %g5
9993 +16 bytes data
9994 */
9995
9996 emit_move_insn (adjust_address (m_tramp, SImode, 0),
9997 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
9998 emit_move_insn (adjust_address (m_tramp, SImode, 4),
9999 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
10000 emit_move_insn (adjust_address (m_tramp, SImode, 8),
10001 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
10002 emit_move_insn (adjust_address (m_tramp, SImode, 12),
10003 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
10004 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
10005 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
10006 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
10007
10008 if (sparc_cpu != PROCESSOR_ULTRASPARC
10009 && sparc_cpu != PROCESSOR_ULTRASPARC3
10010 && sparc_cpu != PROCESSOR_NIAGARA
10011 && sparc_cpu != PROCESSOR_NIAGARA2
10012 && sparc_cpu != PROCESSOR_NIAGARA3
10013 && sparc_cpu != PROCESSOR_NIAGARA4
10014 && sparc_cpu != PROCESSOR_NIAGARA7
10015 && sparc_cpu != PROCESSOR_M8)
10016 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
10017
10018 /* Call __enable_execute_stack after writing onto the stack to make sure
10019 the stack address is accessible. */
10020 #ifdef HAVE_ENABLE_EXECUTE_STACK
10021 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10022 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10023 #endif
10024 }
10025
10026 /* Worker for TARGET_TRAMPOLINE_INIT. */
10027
10028 static void
10029 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
10030 {
10031 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
10032 cxt = force_reg (Pmode, cxt);
10033 if (TARGET_ARCH64)
10034 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
10035 else
10036 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
10037 }
10038 \f
10039 /* Adjust the cost of a scheduling dependency. Return the new cost of
10040 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
10041
10042 static int
10043 supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
10044 int cost)
10045 {
10046 enum attr_type insn_type;
10047
10048 if (recog_memoized (insn) < 0)
10049 return cost;
10050
10051 insn_type = get_attr_type (insn);
10052
10053 if (dep_type == 0)
10054 {
10055 /* Data dependency; DEP_INSN writes a register that INSN reads some
10056 cycles later. */
10057
10058 /* if a load, then the dependence must be on the memory address;
10059 add an extra "cycle". Note that the cost could be two cycles
10060 if the reg was written late in an instruction group; we ca not tell
10061 here. */
10062 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
10063 return cost + 3;
10064
10065 /* Get the delay only if the address of the store is the dependence. */
10066 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
10067 {
10068 rtx pat = PATTERN(insn);
10069 rtx dep_pat = PATTERN (dep_insn);
10070
10071 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10072 return cost; /* This should not happen! */
10073
10074 /* The dependency between the two instructions was on the data that
10075 is being stored. Assume that this implies that the address of the
10076 store is not dependent. */
10077 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10078 return cost;
10079
10080 return cost + 3; /* An approximation. */
10081 }
10082
10083 /* A shift instruction cannot receive its data from an instruction
10084 in the same cycle; add a one cycle penalty. */
10085 if (insn_type == TYPE_SHIFT)
10086 return cost + 3; /* Split before cascade into shift. */
10087 }
10088 else
10089 {
10090 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
10091 INSN writes some cycles later. */
10092
10093 /* These are only significant for the fpu unit; writing a fp reg before
10094 the fpu has finished with it stalls the processor. */
10095
10096 /* Reusing an integer register causes no problems. */
10097 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10098 return 0;
10099 }
10100
10101 return cost;
10102 }
10103
10104 static int
10105 hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
10106 int cost)
10107 {
10108 enum attr_type insn_type, dep_type;
10109 rtx pat = PATTERN(insn);
10110 rtx dep_pat = PATTERN (dep_insn);
10111
10112 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
10113 return cost;
10114
10115 insn_type = get_attr_type (insn);
10116 dep_type = get_attr_type (dep_insn);
10117
10118 switch (dtype)
10119 {
10120 case 0:
10121 /* Data dependency; DEP_INSN writes a register that INSN reads some
10122 cycles later. */
10123
10124 switch (insn_type)
10125 {
10126 case TYPE_STORE:
10127 case TYPE_FPSTORE:
10128 /* Get the delay iff the address of the store is the dependence. */
10129 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10130 return cost;
10131
10132 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10133 return cost;
10134 return cost + 3;
10135
10136 case TYPE_LOAD:
10137 case TYPE_SLOAD:
10138 case TYPE_FPLOAD:
10139 /* If a load, then the dependence must be on the memory address. If
10140 the addresses aren't equal, then it might be a false dependency */
10141 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
10142 {
10143 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
10144 || GET_CODE (SET_DEST (dep_pat)) != MEM
10145 || GET_CODE (SET_SRC (pat)) != MEM
10146 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
10147 XEXP (SET_SRC (pat), 0)))
10148 return cost + 2;
10149
10150 return cost + 8;
10151 }
10152 break;
10153
10154 case TYPE_BRANCH:
10155 /* Compare to branch latency is 0. There is no benefit from
10156 separating compare and branch. */
10157 if (dep_type == TYPE_COMPARE)
10158 return 0;
10159 /* Floating point compare to branch latency is less than
10160 compare to conditional move. */
10161 if (dep_type == TYPE_FPCMP)
10162 return cost - 1;
10163 break;
10164 default:
10165 break;
10166 }
10167 break;
10168
10169 case REG_DEP_ANTI:
10170 /* Anti-dependencies only penalize the fpu unit. */
10171 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10172 return 0;
10173 break;
10174
10175 default:
10176 break;
10177 }
10178
10179 return cost;
10180 }
10181
10182 static int
10183 sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
10184 unsigned int)
10185 {
10186 switch (sparc_cpu)
10187 {
10188 case PROCESSOR_SUPERSPARC:
10189 cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
10190 break;
10191 case PROCESSOR_HYPERSPARC:
10192 case PROCESSOR_SPARCLITE86X:
10193 cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
10194 break;
10195 default:
10196 break;
10197 }
10198 return cost;
10199 }
10200
10201 static void
10202 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
10203 int sched_verbose ATTRIBUTE_UNUSED,
10204 int max_ready ATTRIBUTE_UNUSED)
10205 {}
10206
10207 static int
10208 sparc_use_sched_lookahead (void)
10209 {
10210 switch (sparc_cpu)
10211 {
10212 case PROCESSOR_ULTRASPARC:
10213 case PROCESSOR_ULTRASPARC3:
10214 return 4;
10215 case PROCESSOR_SUPERSPARC:
10216 case PROCESSOR_HYPERSPARC:
10217 case PROCESSOR_SPARCLITE86X:
10218 return 3;
10219 case PROCESSOR_NIAGARA4:
10220 case PROCESSOR_NIAGARA7:
10221 case PROCESSOR_M8:
10222 return 2;
10223 case PROCESSOR_NIAGARA:
10224 case PROCESSOR_NIAGARA2:
10225 case PROCESSOR_NIAGARA3:
10226 default:
10227 return 0;
10228 }
10229 }
10230
10231 static int
10232 sparc_issue_rate (void)
10233 {
10234 switch (sparc_cpu)
10235 {
10236 case PROCESSOR_ULTRASPARC:
10237 case PROCESSOR_ULTRASPARC3:
10238 case PROCESSOR_M8:
10239 return 4;
10240 case PROCESSOR_SUPERSPARC:
10241 return 3;
10242 case PROCESSOR_HYPERSPARC:
10243 case PROCESSOR_SPARCLITE86X:
10244 case PROCESSOR_V9:
10245 /* Assume V9 processors are capable of at least dual-issue. */
10246 case PROCESSOR_NIAGARA4:
10247 case PROCESSOR_NIAGARA7:
10248 return 2;
10249 case PROCESSOR_NIAGARA:
10250 case PROCESSOR_NIAGARA2:
10251 case PROCESSOR_NIAGARA3:
10252 default:
10253 return 1;
10254 }
10255 }
10256
10257 int
10258 sparc_branch_cost (bool speed_p, bool predictable_p)
10259 {
10260 if (!speed_p)
10261 return 2;
10262
10263 /* For pre-V9 processors we use a single value (usually 3) to take into
10264 account the potential annulling of the delay slot (which ends up being
10265 a bubble in the pipeline slot) plus a cycle to take into consideration
10266 the instruction cache effects.
10267
10268 On V9 and later processors, which have branch prediction facilities,
10269 we take into account whether the branch is (easily) predictable. */
10270 const int cost = sparc_costs->branch_cost;
10271
10272 switch (sparc_cpu)
10273 {
10274 case PROCESSOR_V9:
10275 case PROCESSOR_ULTRASPARC:
10276 case PROCESSOR_ULTRASPARC3:
10277 case PROCESSOR_NIAGARA:
10278 case PROCESSOR_NIAGARA2:
10279 case PROCESSOR_NIAGARA3:
10280 case PROCESSOR_NIAGARA4:
10281 case PROCESSOR_NIAGARA7:
10282 case PROCESSOR_M8:
10283 return cost + (predictable_p ? 0 : 2);
10284
10285 default:
10286 return cost;
10287 }
10288 }
10289
10290 static int
10291 set_extends (rtx_insn *insn)
10292 {
10293 register rtx pat = PATTERN (insn);
10294
10295 switch (GET_CODE (SET_SRC (pat)))
10296 {
10297 /* Load and some shift instructions zero extend. */
10298 case MEM:
10299 case ZERO_EXTEND:
10300 /* sethi clears the high bits */
10301 case HIGH:
10302 /* LO_SUM is used with sethi. sethi cleared the high
10303 bits and the values used with lo_sum are positive */
10304 case LO_SUM:
10305 /* Store flag stores 0 or 1 */
10306 case LT: case LTU:
10307 case GT: case GTU:
10308 case LE: case LEU:
10309 case GE: case GEU:
10310 case EQ:
10311 case NE:
10312 return 1;
10313 case AND:
10314 {
10315 rtx op0 = XEXP (SET_SRC (pat), 0);
10316 rtx op1 = XEXP (SET_SRC (pat), 1);
10317 if (GET_CODE (op1) == CONST_INT)
10318 return INTVAL (op1) >= 0;
10319 if (GET_CODE (op0) != REG)
10320 return 0;
10321 if (sparc_check_64 (op0, insn) == 1)
10322 return 1;
10323 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10324 }
10325 case IOR:
10326 case XOR:
10327 {
10328 rtx op0 = XEXP (SET_SRC (pat), 0);
10329 rtx op1 = XEXP (SET_SRC (pat), 1);
10330 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
10331 return 0;
10332 if (GET_CODE (op1) == CONST_INT)
10333 return INTVAL (op1) >= 0;
10334 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10335 }
10336 case LSHIFTRT:
10337 return GET_MODE (SET_SRC (pat)) == SImode;
10338 /* Positive integers leave the high bits zero. */
10339 case CONST_INT:
10340 return !(INTVAL (SET_SRC (pat)) & 0x80000000);
10341 case ASHIFTRT:
10342 case SIGN_EXTEND:
10343 return - (GET_MODE (SET_SRC (pat)) == SImode);
10344 case REG:
10345 return sparc_check_64 (SET_SRC (pat), insn);
10346 default:
10347 return 0;
10348 }
10349 }
10350
10351 /* We _ought_ to have only one kind per function, but... */
10352 static GTY(()) rtx sparc_addr_diff_list;
10353 static GTY(()) rtx sparc_addr_list;
10354
10355 void
10356 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
10357 {
10358 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10359 if (diff)
10360 sparc_addr_diff_list
10361 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
10362 else
10363 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
10364 }
10365
10366 static void
10367 sparc_output_addr_vec (rtx vec)
10368 {
10369 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10370 int idx, vlen = XVECLEN (body, 0);
10371
10372 #ifdef ASM_OUTPUT_ADDR_VEC_START
10373 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10374 #endif
10375
10376 #ifdef ASM_OUTPUT_CASE_LABEL
10377 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10378 NEXT_INSN (lab));
10379 #else
10380 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10381 #endif
10382
10383 for (idx = 0; idx < vlen; idx++)
10384 {
10385 ASM_OUTPUT_ADDR_VEC_ELT
10386 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10387 }
10388
10389 #ifdef ASM_OUTPUT_ADDR_VEC_END
10390 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10391 #endif
10392 }
10393
10394 static void
10395 sparc_output_addr_diff_vec (rtx vec)
10396 {
10397 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10398 rtx base = XEXP (XEXP (body, 0), 0);
10399 int idx, vlen = XVECLEN (body, 1);
10400
10401 #ifdef ASM_OUTPUT_ADDR_VEC_START
10402 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10403 #endif
10404
10405 #ifdef ASM_OUTPUT_CASE_LABEL
10406 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10407 NEXT_INSN (lab));
10408 #else
10409 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10410 #endif
10411
10412 for (idx = 0; idx < vlen; idx++)
10413 {
10414 ASM_OUTPUT_ADDR_DIFF_ELT
10415 (asm_out_file,
10416 body,
10417 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10418 CODE_LABEL_NUMBER (base));
10419 }
10420
10421 #ifdef ASM_OUTPUT_ADDR_VEC_END
10422 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10423 #endif
10424 }
10425
10426 static void
10427 sparc_output_deferred_case_vectors (void)
10428 {
10429 rtx t;
10430 int align;
10431
10432 if (sparc_addr_list == NULL_RTX
10433 && sparc_addr_diff_list == NULL_RTX)
10434 return;
10435
10436 /* Align to cache line in the function's code section. */
10437 switch_to_section (current_function_section ());
10438
10439 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10440 if (align > 0)
10441 ASM_OUTPUT_ALIGN (asm_out_file, align);
10442
10443 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
10444 sparc_output_addr_vec (XEXP (t, 0));
10445 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
10446 sparc_output_addr_diff_vec (XEXP (t, 0));
10447
10448 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
10449 }
10450
10451 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
10452 unknown. Return 1 if the high bits are zero, -1 if the register is
10453 sign extended. */
10454 int
10455 sparc_check_64 (rtx x, rtx_insn *insn)
10456 {
10457 /* If a register is set only once it is safe to ignore insns this
10458 code does not know how to handle. The loop will either recognize
10459 the single set and return the correct value or fail to recognize
10460 it and return 0. */
10461 int set_once = 0;
10462 rtx y = x;
10463
10464 gcc_assert (GET_CODE (x) == REG);
10465
10466 if (GET_MODE (x) == DImode)
10467 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
10468
10469 if (flag_expensive_optimizations
10470 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
10471 set_once = 1;
10472
10473 if (insn == 0)
10474 {
10475 if (set_once)
10476 insn = get_last_insn_anywhere ();
10477 else
10478 return 0;
10479 }
10480
10481 while ((insn = PREV_INSN (insn)))
10482 {
10483 switch (GET_CODE (insn))
10484 {
10485 case JUMP_INSN:
10486 case NOTE:
10487 break;
10488 case CODE_LABEL:
10489 case CALL_INSN:
10490 default:
10491 if (! set_once)
10492 return 0;
10493 break;
10494 case INSN:
10495 {
10496 rtx pat = PATTERN (insn);
10497 if (GET_CODE (pat) != SET)
10498 return 0;
10499 if (rtx_equal_p (x, SET_DEST (pat)))
10500 return set_extends (insn);
10501 if (y && rtx_equal_p (y, SET_DEST (pat)))
10502 return set_extends (insn);
10503 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
10504 return 0;
10505 }
10506 }
10507 }
10508 return 0;
10509 }
10510
10511 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
10512 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
10513
10514 const char *
10515 output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
10516 {
10517 static char asm_code[60];
10518
10519 /* The scratch register is only required when the destination
10520 register is not a 64-bit global or out register. */
10521 if (which_alternative != 2)
10522 operands[3] = operands[0];
10523
10524 /* We can only shift by constants <= 63. */
10525 if (GET_CODE (operands[2]) == CONST_INT)
10526 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
10527
10528 if (GET_CODE (operands[1]) == CONST_INT)
10529 {
10530 output_asm_insn ("mov\t%1, %3", operands);
10531 }
10532 else
10533 {
10534 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10535 if (sparc_check_64 (operands[1], insn) <= 0)
10536 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10537 output_asm_insn ("or\t%L1, %3, %3", operands);
10538 }
10539
10540 strcpy (asm_code, opcode);
10541
10542 if (which_alternative != 2)
10543 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
10544 else
10545 return
10546 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
10547 }
10548 \f
10549 /* Output rtl to increment the profiler label LABELNO
10550 for profiling a function entry. */
10551
10552 void
10553 sparc_profile_hook (int labelno)
10554 {
10555 char buf[32];
10556 rtx lab, fun;
10557
10558 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
10559 if (NO_PROFILE_COUNTERS)
10560 {
10561 emit_library_call (fun, LCT_NORMAL, VOIDmode);
10562 }
10563 else
10564 {
10565 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10566 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
10567 emit_library_call (fun, LCT_NORMAL, VOIDmode, lab, Pmode);
10568 }
10569 }
10570 \f
10571 #ifdef TARGET_SOLARIS
10572 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
10573
10574 static void
10575 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
10576 tree decl ATTRIBUTE_UNUSED)
10577 {
10578 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
10579 {
10580 solaris_elf_asm_comdat_section (name, flags, decl);
10581 return;
10582 }
10583
10584 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
10585
10586 if (!(flags & SECTION_DEBUG))
10587 fputs (",#alloc", asm_out_file);
10588 #if HAVE_GAS_SECTION_EXCLUDE
10589 if (flags & SECTION_EXCLUDE)
10590 fputs (",#exclude", asm_out_file);
10591 #endif
10592 if (flags & SECTION_WRITE)
10593 fputs (",#write", asm_out_file);
10594 if (flags & SECTION_TLS)
10595 fputs (",#tls", asm_out_file);
10596 if (flags & SECTION_CODE)
10597 fputs (",#execinstr", asm_out_file);
10598
10599 if (flags & SECTION_NOTYPE)
10600 ;
10601 else if (flags & SECTION_BSS)
10602 fputs (",#nobits", asm_out_file);
10603 else
10604 fputs (",#progbits", asm_out_file);
10605
10606 fputc ('\n', asm_out_file);
10607 }
10608 #endif /* TARGET_SOLARIS */
10609
10610 /* We do not allow indirect calls to be optimized into sibling calls.
10611
10612 We cannot use sibling calls when delayed branches are disabled
10613 because they will likely require the call delay slot to be filled.
10614
10615 Also, on SPARC 32-bit we cannot emit a sibling call when the
10616 current function returns a structure. This is because the "unimp
10617 after call" convention would cause the callee to return to the
10618 wrong place. The generic code already disallows cases where the
10619 function being called returns a structure.
10620
10621 It may seem strange how this last case could occur. Usually there
10622 is code after the call which jumps to epilogue code which dumps the
10623 return value into the struct return area. That ought to invalidate
10624 the sibling call right? Well, in the C++ case we can end up passing
10625 the pointer to the struct return area to a constructor (which returns
10626 void) and then nothing else happens. Such a sibling call would look
10627 valid without the added check here.
10628
10629 VxWorks PIC PLT entries require the global pointer to be initialized
10630 on entry. We therefore can't emit sibling calls to them. */
10631 static bool
10632 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10633 {
10634 return (decl
10635 && flag_delayed_branch
10636 && (TARGET_ARCH64 || ! cfun->returns_struct)
10637 && !(TARGET_VXWORKS_RTP
10638 && flag_pic
10639 && !targetm.binds_local_p (decl)));
10640 }
10641 \f
10642 /* libfunc renaming. */
10643
10644 static void
10645 sparc_init_libfuncs (void)
10646 {
10647 if (TARGET_ARCH32)
10648 {
10649 /* Use the subroutines that Sun's library provides for integer
10650 multiply and divide. The `*' prevents an underscore from
10651 being prepended by the compiler. .umul is a little faster
10652 than .mul. */
10653 set_optab_libfunc (smul_optab, SImode, "*.umul");
10654 set_optab_libfunc (sdiv_optab, SImode, "*.div");
10655 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
10656 set_optab_libfunc (smod_optab, SImode, "*.rem");
10657 set_optab_libfunc (umod_optab, SImode, "*.urem");
10658
10659 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
10660 set_optab_libfunc (add_optab, TFmode, "_Q_add");
10661 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
10662 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
10663 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
10664 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
10665
10666 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10667 is because with soft-float, the SFmode and DFmode sqrt
10668 instructions will be absent, and the compiler will notice and
10669 try to use the TFmode sqrt instruction for calls to the
10670 builtin function sqrt, but this fails. */
10671 if (TARGET_FPU)
10672 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
10673
10674 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
10675 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
10676 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
10677 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
10678 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
10679 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
10680
10681 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
10682 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
10683 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
10684 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
10685
10686 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
10687 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
10688 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
10689 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
10690
10691 if (DITF_CONVERSION_LIBFUNCS)
10692 {
10693 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
10694 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
10695 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
10696 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
10697 }
10698
10699 if (SUN_CONVERSION_LIBFUNCS)
10700 {
10701 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
10702 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
10703 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
10704 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
10705 }
10706 }
10707 if (TARGET_ARCH64)
10708 {
10709 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10710 do not exist in the library. Make sure the compiler does not
10711 emit calls to them by accident. (It should always use the
10712 hardware instructions.) */
10713 set_optab_libfunc (smul_optab, SImode, 0);
10714 set_optab_libfunc (sdiv_optab, SImode, 0);
10715 set_optab_libfunc (udiv_optab, SImode, 0);
10716 set_optab_libfunc (smod_optab, SImode, 0);
10717 set_optab_libfunc (umod_optab, SImode, 0);
10718
10719 if (SUN_INTEGER_MULTIPLY_64)
10720 {
10721 set_optab_libfunc (smul_optab, DImode, "__mul64");
10722 set_optab_libfunc (sdiv_optab, DImode, "__div64");
10723 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
10724 set_optab_libfunc (smod_optab, DImode, "__rem64");
10725 set_optab_libfunc (umod_optab, DImode, "__urem64");
10726 }
10727
10728 if (SUN_CONVERSION_LIBFUNCS)
10729 {
10730 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
10731 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
10732 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
10733 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
10734 }
10735 }
10736 }
10737 \f
10738 /* SPARC builtins. */
10739 enum sparc_builtins
10740 {
10741 /* FPU builtins. */
10742 SPARC_BUILTIN_LDFSR,
10743 SPARC_BUILTIN_STFSR,
10744
10745 /* VIS 1.0 builtins. */
10746 SPARC_BUILTIN_FPACK16,
10747 SPARC_BUILTIN_FPACK32,
10748 SPARC_BUILTIN_FPACKFIX,
10749 SPARC_BUILTIN_FEXPAND,
10750 SPARC_BUILTIN_FPMERGE,
10751 SPARC_BUILTIN_FMUL8X16,
10752 SPARC_BUILTIN_FMUL8X16AU,
10753 SPARC_BUILTIN_FMUL8X16AL,
10754 SPARC_BUILTIN_FMUL8SUX16,
10755 SPARC_BUILTIN_FMUL8ULX16,
10756 SPARC_BUILTIN_FMULD8SUX16,
10757 SPARC_BUILTIN_FMULD8ULX16,
10758 SPARC_BUILTIN_FALIGNDATAV4HI,
10759 SPARC_BUILTIN_FALIGNDATAV8QI,
10760 SPARC_BUILTIN_FALIGNDATAV2SI,
10761 SPARC_BUILTIN_FALIGNDATADI,
10762 SPARC_BUILTIN_WRGSR,
10763 SPARC_BUILTIN_RDGSR,
10764 SPARC_BUILTIN_ALIGNADDR,
10765 SPARC_BUILTIN_ALIGNADDRL,
10766 SPARC_BUILTIN_PDIST,
10767 SPARC_BUILTIN_EDGE8,
10768 SPARC_BUILTIN_EDGE8L,
10769 SPARC_BUILTIN_EDGE16,
10770 SPARC_BUILTIN_EDGE16L,
10771 SPARC_BUILTIN_EDGE32,
10772 SPARC_BUILTIN_EDGE32L,
10773 SPARC_BUILTIN_FCMPLE16,
10774 SPARC_BUILTIN_FCMPLE32,
10775 SPARC_BUILTIN_FCMPNE16,
10776 SPARC_BUILTIN_FCMPNE32,
10777 SPARC_BUILTIN_FCMPGT16,
10778 SPARC_BUILTIN_FCMPGT32,
10779 SPARC_BUILTIN_FCMPEQ16,
10780 SPARC_BUILTIN_FCMPEQ32,
10781 SPARC_BUILTIN_FPADD16,
10782 SPARC_BUILTIN_FPADD16S,
10783 SPARC_BUILTIN_FPADD32,
10784 SPARC_BUILTIN_FPADD32S,
10785 SPARC_BUILTIN_FPSUB16,
10786 SPARC_BUILTIN_FPSUB16S,
10787 SPARC_BUILTIN_FPSUB32,
10788 SPARC_BUILTIN_FPSUB32S,
10789 SPARC_BUILTIN_ARRAY8,
10790 SPARC_BUILTIN_ARRAY16,
10791 SPARC_BUILTIN_ARRAY32,
10792
10793 /* VIS 2.0 builtins. */
10794 SPARC_BUILTIN_EDGE8N,
10795 SPARC_BUILTIN_EDGE8LN,
10796 SPARC_BUILTIN_EDGE16N,
10797 SPARC_BUILTIN_EDGE16LN,
10798 SPARC_BUILTIN_EDGE32N,
10799 SPARC_BUILTIN_EDGE32LN,
10800 SPARC_BUILTIN_BMASK,
10801 SPARC_BUILTIN_BSHUFFLEV4HI,
10802 SPARC_BUILTIN_BSHUFFLEV8QI,
10803 SPARC_BUILTIN_BSHUFFLEV2SI,
10804 SPARC_BUILTIN_BSHUFFLEDI,
10805
10806 /* VIS 3.0 builtins. */
10807 SPARC_BUILTIN_CMASK8,
10808 SPARC_BUILTIN_CMASK16,
10809 SPARC_BUILTIN_CMASK32,
10810 SPARC_BUILTIN_FCHKSM16,
10811 SPARC_BUILTIN_FSLL16,
10812 SPARC_BUILTIN_FSLAS16,
10813 SPARC_BUILTIN_FSRL16,
10814 SPARC_BUILTIN_FSRA16,
10815 SPARC_BUILTIN_FSLL32,
10816 SPARC_BUILTIN_FSLAS32,
10817 SPARC_BUILTIN_FSRL32,
10818 SPARC_BUILTIN_FSRA32,
10819 SPARC_BUILTIN_PDISTN,
10820 SPARC_BUILTIN_FMEAN16,
10821 SPARC_BUILTIN_FPADD64,
10822 SPARC_BUILTIN_FPSUB64,
10823 SPARC_BUILTIN_FPADDS16,
10824 SPARC_BUILTIN_FPADDS16S,
10825 SPARC_BUILTIN_FPSUBS16,
10826 SPARC_BUILTIN_FPSUBS16S,
10827 SPARC_BUILTIN_FPADDS32,
10828 SPARC_BUILTIN_FPADDS32S,
10829 SPARC_BUILTIN_FPSUBS32,
10830 SPARC_BUILTIN_FPSUBS32S,
10831 SPARC_BUILTIN_FUCMPLE8,
10832 SPARC_BUILTIN_FUCMPNE8,
10833 SPARC_BUILTIN_FUCMPGT8,
10834 SPARC_BUILTIN_FUCMPEQ8,
10835 SPARC_BUILTIN_FHADDS,
10836 SPARC_BUILTIN_FHADDD,
10837 SPARC_BUILTIN_FHSUBS,
10838 SPARC_BUILTIN_FHSUBD,
10839 SPARC_BUILTIN_FNHADDS,
10840 SPARC_BUILTIN_FNHADDD,
10841 SPARC_BUILTIN_UMULXHI,
10842 SPARC_BUILTIN_XMULX,
10843 SPARC_BUILTIN_XMULXHI,
10844
10845 /* VIS 4.0 builtins. */
10846 SPARC_BUILTIN_FPADD8,
10847 SPARC_BUILTIN_FPADDS8,
10848 SPARC_BUILTIN_FPADDUS8,
10849 SPARC_BUILTIN_FPADDUS16,
10850 SPARC_BUILTIN_FPCMPLE8,
10851 SPARC_BUILTIN_FPCMPGT8,
10852 SPARC_BUILTIN_FPCMPULE16,
10853 SPARC_BUILTIN_FPCMPUGT16,
10854 SPARC_BUILTIN_FPCMPULE32,
10855 SPARC_BUILTIN_FPCMPUGT32,
10856 SPARC_BUILTIN_FPMAX8,
10857 SPARC_BUILTIN_FPMAX16,
10858 SPARC_BUILTIN_FPMAX32,
10859 SPARC_BUILTIN_FPMAXU8,
10860 SPARC_BUILTIN_FPMAXU16,
10861 SPARC_BUILTIN_FPMAXU32,
10862 SPARC_BUILTIN_FPMIN8,
10863 SPARC_BUILTIN_FPMIN16,
10864 SPARC_BUILTIN_FPMIN32,
10865 SPARC_BUILTIN_FPMINU8,
10866 SPARC_BUILTIN_FPMINU16,
10867 SPARC_BUILTIN_FPMINU32,
10868 SPARC_BUILTIN_FPSUB8,
10869 SPARC_BUILTIN_FPSUBS8,
10870 SPARC_BUILTIN_FPSUBUS8,
10871 SPARC_BUILTIN_FPSUBUS16,
10872
10873 /* VIS 4.0B builtins. */
10874
10875 /* Note that all the DICTUNPACK* entries should be kept
10876 contiguous. */
10877 SPARC_BUILTIN_FIRST_DICTUNPACK,
10878 SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
10879 SPARC_BUILTIN_DICTUNPACK16,
10880 SPARC_BUILTIN_DICTUNPACK32,
10881 SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
10882
10883 /* Note that all the FPCMP*SHL entries should be kept
10884 contiguous. */
10885 SPARC_BUILTIN_FIRST_FPCMPSHL,
10886 SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
10887 SPARC_BUILTIN_FPCMPGT8SHL,
10888 SPARC_BUILTIN_FPCMPEQ8SHL,
10889 SPARC_BUILTIN_FPCMPNE8SHL,
10890 SPARC_BUILTIN_FPCMPLE16SHL,
10891 SPARC_BUILTIN_FPCMPGT16SHL,
10892 SPARC_BUILTIN_FPCMPEQ16SHL,
10893 SPARC_BUILTIN_FPCMPNE16SHL,
10894 SPARC_BUILTIN_FPCMPLE32SHL,
10895 SPARC_BUILTIN_FPCMPGT32SHL,
10896 SPARC_BUILTIN_FPCMPEQ32SHL,
10897 SPARC_BUILTIN_FPCMPNE32SHL,
10898 SPARC_BUILTIN_FPCMPULE8SHL,
10899 SPARC_BUILTIN_FPCMPUGT8SHL,
10900 SPARC_BUILTIN_FPCMPULE16SHL,
10901 SPARC_BUILTIN_FPCMPUGT16SHL,
10902 SPARC_BUILTIN_FPCMPULE32SHL,
10903 SPARC_BUILTIN_FPCMPUGT32SHL,
10904 SPARC_BUILTIN_FPCMPDE8SHL,
10905 SPARC_BUILTIN_FPCMPDE16SHL,
10906 SPARC_BUILTIN_FPCMPDE32SHL,
10907 SPARC_BUILTIN_FPCMPUR8SHL,
10908 SPARC_BUILTIN_FPCMPUR16SHL,
10909 SPARC_BUILTIN_FPCMPUR32SHL,
10910 SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
10911
10912 SPARC_BUILTIN_MAX
10913 };
10914
10915 static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
10916 static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
10917
10918 /* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
10919 The instruction should require a constant operand of some sort. The
10920 function prints an error if OPVAL is not valid. */
10921
10922 static int
10923 check_constant_argument (enum insn_code icode, int opnum, rtx opval)
10924 {
10925 if (GET_CODE (opval) != CONST_INT)
10926 {
10927 error ("%qs expects a constant argument", insn_data[icode].name);
10928 return false;
10929 }
10930
10931 if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
10932 {
10933 error ("constant argument out of range for %qs", insn_data[icode].name);
10934 return false;
10935 }
10936 return true;
10937 }
10938
10939 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
10940 function decl or NULL_TREE if the builtin was not added. */
10941
10942 static tree
10943 def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
10944 tree type)
10945 {
10946 tree t
10947 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
10948
10949 if (t)
10950 {
10951 sparc_builtins[code] = t;
10952 sparc_builtins_icode[code] = icode;
10953 }
10954
10955 return t;
10956 }
10957
10958 /* Likewise, but also marks the function as "const". */
10959
10960 static tree
10961 def_builtin_const (const char *name, enum insn_code icode,
10962 enum sparc_builtins code, tree type)
10963 {
10964 tree t = def_builtin (name, icode, code, type);
10965
10966 if (t)
10967 TREE_READONLY (t) = 1;
10968
10969 return t;
10970 }
10971
10972 /* Implement the TARGET_INIT_BUILTINS target hook.
10973 Create builtin functions for special SPARC instructions. */
10974
10975 static void
10976 sparc_init_builtins (void)
10977 {
10978 if (TARGET_FPU)
10979 sparc_fpu_init_builtins ();
10980
10981 if (TARGET_VIS)
10982 sparc_vis_init_builtins ();
10983 }
10984
10985 /* Create builtin functions for FPU instructions. */
10986
10987 static void
10988 sparc_fpu_init_builtins (void)
10989 {
10990 tree ftype
10991 = build_function_type_list (void_type_node,
10992 build_pointer_type (unsigned_type_node), 0);
10993 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
10994 SPARC_BUILTIN_LDFSR, ftype);
10995 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
10996 SPARC_BUILTIN_STFSR, ftype);
10997 }
10998
10999 /* Create builtin functions for VIS instructions. */
11000
11001 static void
11002 sparc_vis_init_builtins (void)
11003 {
11004 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
11005 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
11006 tree v4hi = build_vector_type (intHI_type_node, 4);
11007 tree v2hi = build_vector_type (intHI_type_node, 2);
11008 tree v2si = build_vector_type (intSI_type_node, 2);
11009 tree v1si = build_vector_type (intSI_type_node, 1);
11010
11011 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
11012 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
11013 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
11014 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
11015 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
11016 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
11017 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
11018 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
11019 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
11020 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
11021 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
11022 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
11023 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
11024 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
11025 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
11026 v8qi, v8qi,
11027 intDI_type_node, 0);
11028 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
11029 v8qi, v8qi, 0);
11030 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
11031 v8qi, v8qi, 0);
11032 tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
11033 intSI_type_node, 0);
11034 tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
11035 intSI_type_node, 0);
11036 tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
11037 intDI_type_node, 0);
11038 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
11039 intDI_type_node,
11040 intDI_type_node, 0);
11041 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
11042 intSI_type_node,
11043 intSI_type_node, 0);
11044 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
11045 ptr_type_node,
11046 intSI_type_node, 0);
11047 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
11048 ptr_type_node,
11049 intDI_type_node, 0);
11050 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
11051 ptr_type_node,
11052 ptr_type_node, 0);
11053 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
11054 ptr_type_node,
11055 ptr_type_node, 0);
11056 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
11057 v4hi, v4hi, 0);
11058 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
11059 v2si, v2si, 0);
11060 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
11061 v4hi, v4hi, 0);
11062 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
11063 v2si, v2si, 0);
11064 tree void_ftype_di = build_function_type_list (void_type_node,
11065 intDI_type_node, 0);
11066 tree di_ftype_void = build_function_type_list (intDI_type_node,
11067 void_type_node, 0);
11068 tree void_ftype_si = build_function_type_list (void_type_node,
11069 intSI_type_node, 0);
11070 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
11071 float_type_node,
11072 float_type_node, 0);
11073 tree df_ftype_df_df = build_function_type_list (double_type_node,
11074 double_type_node,
11075 double_type_node, 0);
11076
11077 /* Packing and expanding vectors. */
11078 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
11079 SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
11080 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
11081 SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
11082 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
11083 SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
11084 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
11085 SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
11086 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
11087 SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
11088
11089 /* Multiplications. */
11090 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
11091 SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
11092 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
11093 SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
11094 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
11095 SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
11096 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
11097 SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
11098 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
11099 SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
11100 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
11101 SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
11102 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
11103 SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
11104
11105 /* Data aligning. */
11106 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
11107 SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
11108 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
11109 SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
11110 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
11111 SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
11112 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
11113 SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
11114
11115 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
11116 SPARC_BUILTIN_WRGSR, void_ftype_di);
11117 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
11118 SPARC_BUILTIN_RDGSR, di_ftype_void);
11119
11120 if (TARGET_ARCH64)
11121 {
11122 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
11123 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
11124 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
11125 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
11126 }
11127 else
11128 {
11129 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
11130 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
11131 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
11132 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
11133 }
11134
11135 /* Pixel distance. */
11136 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
11137 SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
11138
11139 /* Edge handling. */
11140 if (TARGET_ARCH64)
11141 {
11142 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
11143 SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
11144 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
11145 SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
11146 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
11147 SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
11148 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
11149 SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
11150 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
11151 SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
11152 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
11153 SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
11154 }
11155 else
11156 {
11157 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
11158 SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
11159 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
11160 SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
11161 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
11162 SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
11163 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
11164 SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
11165 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
11166 SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
11167 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
11168 SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
11169 }
11170
11171 /* Pixel compare. */
11172 if (TARGET_ARCH64)
11173 {
11174 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
11175 SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
11176 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
11177 SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
11178 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
11179 SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
11180 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
11181 SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
11182 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
11183 SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
11184 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
11185 SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
11186 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
11187 SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
11188 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
11189 SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
11190 }
11191 else
11192 {
11193 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
11194 SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
11195 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
11196 SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
11197 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
11198 SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
11199 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
11200 SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
11201 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
11202 SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
11203 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
11204 SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
11205 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
11206 SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
11207 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
11208 SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
11209 }
11210
11211 /* Addition and subtraction. */
11212 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
11213 SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
11214 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
11215 SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
11216 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
11217 SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
11218 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
11219 SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
11220 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
11221 SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
11222 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
11223 SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
11224 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
11225 SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
11226 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
11227 SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
11228
11229 /* Three-dimensional array addressing. */
11230 if (TARGET_ARCH64)
11231 {
11232 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
11233 SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
11234 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
11235 SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
11236 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
11237 SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
11238 }
11239 else
11240 {
11241 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
11242 SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
11243 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
11244 SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
11245 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
11246 SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
11247 }
11248
11249 if (TARGET_VIS2)
11250 {
11251 /* Edge handling. */
11252 if (TARGET_ARCH64)
11253 {
11254 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
11255 SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
11256 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
11257 SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
11258 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
11259 SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
11260 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
11261 SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
11262 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
11263 SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
11264 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
11265 SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
11266 }
11267 else
11268 {
11269 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
11270 SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
11271 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
11272 SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
11273 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
11274 SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
11275 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
11276 SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
11277 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
11278 SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
11279 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
11280 SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
11281 }
11282
11283 /* Byte mask and shuffle. */
11284 if (TARGET_ARCH64)
11285 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
11286 SPARC_BUILTIN_BMASK, di_ftype_di_di);
11287 else
11288 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
11289 SPARC_BUILTIN_BMASK, si_ftype_si_si);
11290 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
11291 SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
11292 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
11293 SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
11294 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
11295 SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
11296 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
11297 SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
11298 }
11299
11300 if (TARGET_VIS3)
11301 {
11302 if (TARGET_ARCH64)
11303 {
11304 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
11305 SPARC_BUILTIN_CMASK8, void_ftype_di);
11306 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
11307 SPARC_BUILTIN_CMASK16, void_ftype_di);
11308 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
11309 SPARC_BUILTIN_CMASK32, void_ftype_di);
11310 }
11311 else
11312 {
11313 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
11314 SPARC_BUILTIN_CMASK8, void_ftype_si);
11315 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
11316 SPARC_BUILTIN_CMASK16, void_ftype_si);
11317 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
11318 SPARC_BUILTIN_CMASK32, void_ftype_si);
11319 }
11320
11321 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
11322 SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
11323
11324 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
11325 SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
11326 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
11327 SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
11328 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
11329 SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
11330 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
11331 SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
11332 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
11333 SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
11334 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
11335 SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
11336 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
11337 SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
11338 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
11339 SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
11340
11341 if (TARGET_ARCH64)
11342 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
11343 SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
11344 else
11345 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
11346 SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
11347
11348 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
11349 SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
11350 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
11351 SPARC_BUILTIN_FPADD64, di_ftype_di_di);
11352 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
11353 SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
11354
11355 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
11356 SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
11357 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
11358 SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
11359 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
11360 SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
11361 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
11362 SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
11363 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
11364 SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
11365 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
11366 SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
11367 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
11368 SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
11369 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
11370 SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
11371
11372 if (TARGET_ARCH64)
11373 {
11374 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
11375 SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
11376 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
11377 SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
11378 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
11379 SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
11380 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
11381 SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
11382 }
11383 else
11384 {
11385 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
11386 SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
11387 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
11388 SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
11389 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
11390 SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
11391 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
11392 SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
11393 }
11394
11395 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
11396 SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
11397 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
11398 SPARC_BUILTIN_FHADDD, df_ftype_df_df);
11399 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
11400 SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
11401 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
11402 SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
11403 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
11404 SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
11405 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
11406 SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
11407
11408 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
11409 SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
11410 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
11411 SPARC_BUILTIN_XMULX, di_ftype_di_di);
11412 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
11413 SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
11414 }
11415
11416 if (TARGET_VIS4)
11417 {
11418 def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
11419 SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
11420 def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
11421 SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
11422 def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
11423 SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
11424 def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
11425 SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
11426
11427
11428 if (TARGET_ARCH64)
11429 {
11430 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
11431 SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
11432 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
11433 SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
11434 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
11435 SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
11436 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
11437 SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
11438 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
11439 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11440 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
11441 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11442 }
11443 else
11444 {
11445 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
11446 SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
11447 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
11448 SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
11449 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
11450 SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
11451 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
11452 SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
11453 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
11454 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11455 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
11456 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11457 }
11458
11459 def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
11460 SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
11461 def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
11462 SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
11463 def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
11464 SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
11465 def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
11466 SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
11467 def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
11468 SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
11469 def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
11470 SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
11471 def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
11472 SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
11473 def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
11474 SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
11475 def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
11476 SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
11477 def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
11478 SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
11479 def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
11480 SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
11481 def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
11482 SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
11483 def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
11484 SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
11485 def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
11486 SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
11487 def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
11488 SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
11489 def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
11490 SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
11491 }
11492
11493 if (TARGET_VIS4B)
11494 {
11495 def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
11496 SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
11497 def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
11498 SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
11499 def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
11500 SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
11501
11502 if (TARGET_ARCH64)
11503 {
11504 tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
11505 v8qi, v8qi,
11506 intSI_type_node, 0);
11507 tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
11508 v4hi, v4hi,
11509 intSI_type_node, 0);
11510 tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
11511 v2si, v2si,
11512 intSI_type_node, 0);
11513
11514 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
11515 SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
11516 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
11517 SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
11518 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
11519 SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
11520 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
11521 SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
11522
11523 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
11524 SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
11525 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
11526 SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
11527 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
11528 SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
11529 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
11530 SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
11531
11532 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
11533 SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
11534 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
11535 SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
11536 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
11537 SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
11538 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
11539 SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
11540
11541
11542 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
11543 SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
11544 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
11545 SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
11546
11547 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
11548 SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
11549 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
11550 SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
11551
11552 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
11553 SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
11554 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
11555 SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
11556
11557 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
11558 SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
11559 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
11560 SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
11561 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
11562 SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
11563
11564 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
11565 SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
11566 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
11567 SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
11568 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
11569 SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
11570
11571 }
11572 else
11573 {
11574 tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
11575 v8qi, v8qi,
11576 intSI_type_node, 0);
11577 tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
11578 v4hi, v4hi,
11579 intSI_type_node, 0);
11580 tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
11581 v2si, v2si,
11582 intSI_type_node, 0);
11583
11584 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
11585 SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
11586 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
11587 SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
11588 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
11589 SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
11590 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
11591 SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
11592
11593 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
11594 SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
11595 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
11596 SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
11597 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
11598 SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
11599 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
11600 SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
11601
11602 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
11603 SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
11604 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
11605 SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
11606 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
11607 SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
11608 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
11609 SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
11610
11611
11612 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
11613 SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
11614 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
11615 SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
11616
11617 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
11618 SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
11619 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
11620 SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
11621
11622 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
11623 SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
11624 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
11625 SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
11626
11627 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
11628 SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
11629 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
11630 SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
11631 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
11632 SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
11633
11634 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
11635 SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
11636 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
11637 SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
11638 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
11639 SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
11640 }
11641 }
11642 }
11643
11644 /* Implement TARGET_BUILTIN_DECL hook. */
11645
11646 static tree
11647 sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11648 {
11649 if (code >= SPARC_BUILTIN_MAX)
11650 return error_mark_node;
11651
11652 return sparc_builtins[code];
11653 }
11654
11655 /* Implemented TARGET_EXPAND_BUILTIN hook. */
11656
11657 static rtx
11658 sparc_expand_builtin (tree exp, rtx target,
11659 rtx subtarget ATTRIBUTE_UNUSED,
11660 machine_mode tmode ATTRIBUTE_UNUSED,
11661 int ignore ATTRIBUTE_UNUSED)
11662 {
11663 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11664 enum sparc_builtins code
11665 = (enum sparc_builtins) DECL_MD_FUNCTION_CODE (fndecl);
11666 enum insn_code icode = sparc_builtins_icode[code];
11667 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11668 call_expr_arg_iterator iter;
11669 int arg_count = 0;
11670 rtx pat, op[4];
11671 tree arg;
11672
11673 if (nonvoid)
11674 {
11675 machine_mode tmode = insn_data[icode].operand[0].mode;
11676 if (!target
11677 || GET_MODE (target) != tmode
11678 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11679 op[0] = gen_reg_rtx (tmode);
11680 else
11681 op[0] = target;
11682 }
11683 else
11684 op[0] = NULL_RTX;
11685
11686 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11687 {
11688 const struct insn_operand_data *insn_op;
11689 int idx;
11690
11691 if (arg == error_mark_node)
11692 return NULL_RTX;
11693
11694 arg_count++;
11695 idx = arg_count - !nonvoid;
11696 insn_op = &insn_data[icode].operand[idx];
11697 op[arg_count] = expand_normal (arg);
11698
11699 /* Some of the builtins require constant arguments. We check
11700 for this here. */
11701 if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
11702 && code <= SPARC_BUILTIN_LAST_FPCMPSHL
11703 && arg_count == 3)
11704 || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
11705 && code <= SPARC_BUILTIN_LAST_DICTUNPACK
11706 && arg_count == 2))
11707 {
11708 if (!check_constant_argument (icode, idx, op[arg_count]))
11709 return const0_rtx;
11710 }
11711
11712 if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
11713 {
11714 if (!address_operand (op[arg_count], SImode))
11715 {
11716 op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
11717 op[arg_count] = copy_addr_to_reg (op[arg_count]);
11718 }
11719 op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
11720 }
11721
11722 else if (insn_op->mode == V1DImode
11723 && GET_MODE (op[arg_count]) == DImode)
11724 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
11725
11726 else if (insn_op->mode == V1SImode
11727 && GET_MODE (op[arg_count]) == SImode)
11728 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
11729
11730 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
11731 insn_op->mode))
11732 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
11733 }
11734
11735 switch (arg_count)
11736 {
11737 case 0:
11738 pat = GEN_FCN (icode) (op[0]);
11739 break;
11740 case 1:
11741 if (nonvoid)
11742 pat = GEN_FCN (icode) (op[0], op[1]);
11743 else
11744 pat = GEN_FCN (icode) (op[1]);
11745 break;
11746 case 2:
11747 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11748 break;
11749 case 3:
11750 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11751 break;
11752 default:
11753 gcc_unreachable ();
11754 }
11755
11756 if (!pat)
11757 return NULL_RTX;
11758
11759 emit_insn (pat);
11760
11761 return (nonvoid ? op[0] : const0_rtx);
11762 }
11763
11764 /* Return the upper 16 bits of the 8x16 multiplication. */
11765
11766 static int
11767 sparc_vis_mul8x16 (int e8, int e16)
11768 {
11769 return (e8 * e16 + 128) / 256;
11770 }
11771
11772 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
11773 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
11774
11775 static void
11776 sparc_handle_vis_mul8x16 (vec<tree> *n_elts, enum sparc_builtins fncode,
11777 tree inner_type, tree cst0, tree cst1)
11778 {
11779 unsigned i, num = VECTOR_CST_NELTS (cst0);
11780 int scale;
11781
11782 switch (fncode)
11783 {
11784 case SPARC_BUILTIN_FMUL8X16:
11785 for (i = 0; i < num; ++i)
11786 {
11787 int val
11788 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11789 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
11790 n_elts->quick_push (build_int_cst (inner_type, val));
11791 }
11792 break;
11793
11794 case SPARC_BUILTIN_FMUL8X16AU:
11795 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
11796
11797 for (i = 0; i < num; ++i)
11798 {
11799 int val
11800 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11801 scale);
11802 n_elts->quick_push (build_int_cst (inner_type, val));
11803 }
11804 break;
11805
11806 case SPARC_BUILTIN_FMUL8X16AL:
11807 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
11808
11809 for (i = 0; i < num; ++i)
11810 {
11811 int val
11812 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11813 scale);
11814 n_elts->quick_push (build_int_cst (inner_type, val));
11815 }
11816 break;
11817
11818 default:
11819 gcc_unreachable ();
11820 }
11821 }
11822
11823 /* Implement TARGET_FOLD_BUILTIN hook.
11824
11825 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
11826 result of the function call is ignored. NULL_TREE is returned if the
11827 function could not be folded. */
11828
11829 static tree
11830 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
11831 tree *args, bool ignore)
11832 {
11833 enum sparc_builtins code
11834 = (enum sparc_builtins) DECL_MD_FUNCTION_CODE (fndecl);
11835 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
11836 tree arg0, arg1, arg2;
11837
11838 if (ignore)
11839 switch (code)
11840 {
11841 case SPARC_BUILTIN_LDFSR:
11842 case SPARC_BUILTIN_STFSR:
11843 case SPARC_BUILTIN_ALIGNADDR:
11844 case SPARC_BUILTIN_WRGSR:
11845 case SPARC_BUILTIN_BMASK:
11846 case SPARC_BUILTIN_CMASK8:
11847 case SPARC_BUILTIN_CMASK16:
11848 case SPARC_BUILTIN_CMASK32:
11849 break;
11850
11851 default:
11852 return build_zero_cst (rtype);
11853 }
11854
11855 switch (code)
11856 {
11857 case SPARC_BUILTIN_FEXPAND:
11858 arg0 = args[0];
11859 STRIP_NOPS (arg0);
11860
11861 if (TREE_CODE (arg0) == VECTOR_CST)
11862 {
11863 tree inner_type = TREE_TYPE (rtype);
11864 unsigned i;
11865
11866 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11867 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11868 {
11869 unsigned HOST_WIDE_INT val
11870 = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg0, i));
11871 n_elts.quick_push (build_int_cst (inner_type, val << 4));
11872 }
11873 return n_elts.build ();
11874 }
11875 break;
11876
11877 case SPARC_BUILTIN_FMUL8X16:
11878 case SPARC_BUILTIN_FMUL8X16AU:
11879 case SPARC_BUILTIN_FMUL8X16AL:
11880 arg0 = args[0];
11881 arg1 = args[1];
11882 STRIP_NOPS (arg0);
11883 STRIP_NOPS (arg1);
11884
11885 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11886 {
11887 tree inner_type = TREE_TYPE (rtype);
11888 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11889 sparc_handle_vis_mul8x16 (&n_elts, code, inner_type, arg0, arg1);
11890 return n_elts.build ();
11891 }
11892 break;
11893
11894 case SPARC_BUILTIN_FPMERGE:
11895 arg0 = args[0];
11896 arg1 = args[1];
11897 STRIP_NOPS (arg0);
11898 STRIP_NOPS (arg1);
11899
11900 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11901 {
11902 tree_vector_builder n_elts (rtype, 2 * VECTOR_CST_NELTS (arg0), 1);
11903 unsigned i;
11904 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11905 {
11906 n_elts.quick_push (VECTOR_CST_ELT (arg0, i));
11907 n_elts.quick_push (VECTOR_CST_ELT (arg1, i));
11908 }
11909
11910 return n_elts.build ();
11911 }
11912 break;
11913
11914 case SPARC_BUILTIN_PDIST:
11915 case SPARC_BUILTIN_PDISTN:
11916 arg0 = args[0];
11917 arg1 = args[1];
11918 STRIP_NOPS (arg0);
11919 STRIP_NOPS (arg1);
11920 if (code == SPARC_BUILTIN_PDIST)
11921 {
11922 arg2 = args[2];
11923 STRIP_NOPS (arg2);
11924 }
11925 else
11926 arg2 = integer_zero_node;
11927
11928 if (TREE_CODE (arg0) == VECTOR_CST
11929 && TREE_CODE (arg1) == VECTOR_CST
11930 && TREE_CODE (arg2) == INTEGER_CST)
11931 {
11932 bool overflow = false;
11933 widest_int result = wi::to_widest (arg2);
11934 widest_int tmp;
11935 unsigned i;
11936
11937 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11938 {
11939 tree e0 = VECTOR_CST_ELT (arg0, i);
11940 tree e1 = VECTOR_CST_ELT (arg1, i);
11941
11942 wi::overflow_type neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
11943
11944 tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
11945 tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
11946 if (wi::neg_p (tmp))
11947 tmp = wi::neg (tmp, &neg2_ovf);
11948 else
11949 neg2_ovf = wi::OVF_NONE;
11950 result = wi::add (result, tmp, SIGNED, &add2_ovf);
11951 overflow |= ((neg1_ovf != wi::OVF_NONE)
11952 | (neg2_ovf != wi::OVF_NONE)
11953 | (add1_ovf != wi::OVF_NONE)
11954 | (add2_ovf != wi::OVF_NONE));
11955 }
11956
11957 gcc_assert (!overflow);
11958
11959 return wide_int_to_tree (rtype, result);
11960 }
11961
11962 default:
11963 break;
11964 }
11965
11966 return NULL_TREE;
11967 }
11968 \f
11969 /* ??? This duplicates information provided to the compiler by the
11970 ??? scheduler description. Some day, teach genautomata to output
11971 ??? the latencies and then CSE will just use that. */
11972
11973 static bool
11974 sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
11975 int opno ATTRIBUTE_UNUSED,
11976 int *total, bool speed ATTRIBUTE_UNUSED)
11977 {
11978 int code = GET_CODE (x);
11979 bool float_mode_p = FLOAT_MODE_P (mode);
11980
11981 switch (code)
11982 {
11983 case CONST_INT:
11984 if (SMALL_INT (x))
11985 *total = 0;
11986 else
11987 *total = 2;
11988 return true;
11989
11990 case CONST_WIDE_INT:
11991 *total = 0;
11992 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
11993 *total += 2;
11994 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
11995 *total += 2;
11996 return true;
11997
11998 case HIGH:
11999 *total = 2;
12000 return true;
12001
12002 case CONST:
12003 case LABEL_REF:
12004 case SYMBOL_REF:
12005 *total = 4;
12006 return true;
12007
12008 case CONST_DOUBLE:
12009 *total = 8;
12010 return true;
12011
12012 case MEM:
12013 /* If outer-code was a sign or zero extension, a cost
12014 of COSTS_N_INSNS (1) was already added in. This is
12015 why we are subtracting it back out. */
12016 if (outer_code == ZERO_EXTEND)
12017 {
12018 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
12019 }
12020 else if (outer_code == SIGN_EXTEND)
12021 {
12022 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
12023 }
12024 else if (float_mode_p)
12025 {
12026 *total = sparc_costs->float_load;
12027 }
12028 else
12029 {
12030 *total = sparc_costs->int_load;
12031 }
12032
12033 return true;
12034
12035 case PLUS:
12036 case MINUS:
12037 if (float_mode_p)
12038 *total = sparc_costs->float_plusminus;
12039 else
12040 *total = COSTS_N_INSNS (1);
12041 return false;
12042
12043 case FMA:
12044 {
12045 rtx sub;
12046
12047 gcc_assert (float_mode_p);
12048 *total = sparc_costs->float_mul;
12049
12050 sub = XEXP (x, 0);
12051 if (GET_CODE (sub) == NEG)
12052 sub = XEXP (sub, 0);
12053 *total += rtx_cost (sub, mode, FMA, 0, speed);
12054
12055 sub = XEXP (x, 2);
12056 if (GET_CODE (sub) == NEG)
12057 sub = XEXP (sub, 0);
12058 *total += rtx_cost (sub, mode, FMA, 2, speed);
12059 return true;
12060 }
12061
12062 case MULT:
12063 if (float_mode_p)
12064 *total = sparc_costs->float_mul;
12065 else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
12066 *total = COSTS_N_INSNS (25);
12067 else
12068 {
12069 int bit_cost;
12070
12071 bit_cost = 0;
12072 if (sparc_costs->int_mul_bit_factor)
12073 {
12074 int nbits;
12075
12076 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
12077 {
12078 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
12079 for (nbits = 0; value != 0; value &= value - 1)
12080 nbits++;
12081 }
12082 else
12083 nbits = 7;
12084
12085 if (nbits < 3)
12086 nbits = 3;
12087 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
12088 bit_cost = COSTS_N_INSNS (bit_cost);
12089 }
12090
12091 if (mode == DImode || !TARGET_HARD_MUL)
12092 *total = sparc_costs->int_mulX + bit_cost;
12093 else
12094 *total = sparc_costs->int_mul + bit_cost;
12095 }
12096 return false;
12097
12098 case ASHIFT:
12099 case ASHIFTRT:
12100 case LSHIFTRT:
12101 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
12102 return false;
12103
12104 case DIV:
12105 case UDIV:
12106 case MOD:
12107 case UMOD:
12108 if (float_mode_p)
12109 {
12110 if (mode == DFmode)
12111 *total = sparc_costs->float_div_df;
12112 else
12113 *total = sparc_costs->float_div_sf;
12114 }
12115 else
12116 {
12117 if (mode == DImode)
12118 *total = sparc_costs->int_divX;
12119 else
12120 *total = sparc_costs->int_div;
12121 }
12122 return false;
12123
12124 case NEG:
12125 if (! float_mode_p)
12126 {
12127 *total = COSTS_N_INSNS (1);
12128 return false;
12129 }
12130 /* FALLTHRU */
12131
12132 case ABS:
12133 case FLOAT:
12134 case UNSIGNED_FLOAT:
12135 case FIX:
12136 case UNSIGNED_FIX:
12137 case FLOAT_EXTEND:
12138 case FLOAT_TRUNCATE:
12139 *total = sparc_costs->float_move;
12140 return false;
12141
12142 case SQRT:
12143 if (mode == DFmode)
12144 *total = sparc_costs->float_sqrt_df;
12145 else
12146 *total = sparc_costs->float_sqrt_sf;
12147 return false;
12148
12149 case COMPARE:
12150 if (float_mode_p)
12151 *total = sparc_costs->float_cmp;
12152 else
12153 *total = COSTS_N_INSNS (1);
12154 return false;
12155
12156 case IF_THEN_ELSE:
12157 if (float_mode_p)
12158 *total = sparc_costs->float_cmove;
12159 else
12160 *total = sparc_costs->int_cmove;
12161 return false;
12162
12163 case IOR:
12164 /* Handle the NAND vector patterns. */
12165 if (sparc_vector_mode_supported_p (mode)
12166 && GET_CODE (XEXP (x, 0)) == NOT
12167 && GET_CODE (XEXP (x, 1)) == NOT)
12168 {
12169 *total = COSTS_N_INSNS (1);
12170 return true;
12171 }
12172 else
12173 return false;
12174
12175 default:
12176 return false;
12177 }
12178 }
12179
12180 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
12181
12182 static inline bool
12183 general_or_i64_p (reg_class_t rclass)
12184 {
12185 return (rclass == GENERAL_REGS || rclass == I64_REGS);
12186 }
12187
12188 /* Implement TARGET_REGISTER_MOVE_COST. */
12189
12190 static int
12191 sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
12192 reg_class_t from, reg_class_t to)
12193 {
12194 bool need_memory = false;
12195
12196 /* This helps postreload CSE to eliminate redundant comparisons. */
12197 if (from == NO_REGS || to == NO_REGS)
12198 return 100;
12199
12200 if (from == FPCC_REGS || to == FPCC_REGS)
12201 need_memory = true;
12202 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
12203 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
12204 {
12205 if (TARGET_VIS3)
12206 {
12207 int size = GET_MODE_SIZE (mode);
12208 if (size == 8 || size == 4)
12209 {
12210 if (! TARGET_ARCH32 || size == 4)
12211 return 4;
12212 else
12213 return 6;
12214 }
12215 }
12216 need_memory = true;
12217 }
12218
12219 if (need_memory)
12220 {
12221 if (sparc_cpu == PROCESSOR_ULTRASPARC
12222 || sparc_cpu == PROCESSOR_ULTRASPARC3
12223 || sparc_cpu == PROCESSOR_NIAGARA
12224 || sparc_cpu == PROCESSOR_NIAGARA2
12225 || sparc_cpu == PROCESSOR_NIAGARA3
12226 || sparc_cpu == PROCESSOR_NIAGARA4
12227 || sparc_cpu == PROCESSOR_NIAGARA7
12228 || sparc_cpu == PROCESSOR_M8)
12229 return 12;
12230
12231 return 6;
12232 }
12233
12234 return 2;
12235 }
12236
12237 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
12238 This is achieved by means of a manual dynamic stack space allocation in
12239 the current frame. We make the assumption that SEQ doesn't contain any
12240 function calls, with the possible exception of calls to the GOT helper. */
12241
12242 static void
12243 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
12244 {
12245 /* We must preserve the lowest 16 words for the register save area. */
12246 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
12247 /* We really need only 2 words of fresh stack space. */
12248 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
12249
12250 rtx slot
12251 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
12252 SPARC_STACK_BIAS + offset));
12253
12254 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
12255 emit_insn (gen_rtx_SET (slot, reg));
12256 if (reg2)
12257 emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
12258 reg2));
12259 emit_insn (seq);
12260 if (reg2)
12261 emit_insn (gen_rtx_SET (reg2,
12262 adjust_address (slot, word_mode, UNITS_PER_WORD)));
12263 emit_insn (gen_rtx_SET (reg, slot));
12264 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
12265 }
12266
12267 /* Output the assembler code for a thunk function. THUNK_DECL is the
12268 declaration for the thunk function itself, FUNCTION is the decl for
12269 the target function. DELTA is an immediate constant offset to be
12270 added to THIS. If VCALL_OFFSET is nonzero, the word at address
12271 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
12272
12273 static void
12274 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12275 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12276 tree function)
12277 {
12278 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
12279 rtx this_rtx, funexp;
12280 rtx_insn *insn;
12281 unsigned int int_arg_first;
12282
12283 reload_completed = 1;
12284 epilogue_completed = 1;
12285
12286 emit_note (NOTE_INSN_PROLOGUE_END);
12287
12288 if (TARGET_FLAT)
12289 {
12290 sparc_leaf_function_p = 1;
12291
12292 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12293 }
12294 else if (flag_delayed_branch)
12295 {
12296 /* We will emit a regular sibcall below, so we need to instruct
12297 output_sibcall that we are in a leaf function. */
12298 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
12299
12300 /* This will cause final.c to invoke leaf_renumber_regs so we
12301 must behave as if we were in a not-yet-leafified function. */
12302 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
12303 }
12304 else
12305 {
12306 /* We will emit the sibcall manually below, so we will need to
12307 manually spill non-leaf registers. */
12308 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
12309
12310 /* We really are in a leaf function. */
12311 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12312 }
12313
12314 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
12315 returns a structure, the structure return pointer is there instead. */
12316 if (TARGET_ARCH64
12317 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12318 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
12319 else
12320 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
12321
12322 /* Add DELTA. When possible use a plain add, otherwise load it into
12323 a register first. */
12324 if (delta)
12325 {
12326 rtx delta_rtx = GEN_INT (delta);
12327
12328 if (! SPARC_SIMM13_P (delta))
12329 {
12330 rtx scratch = gen_rtx_REG (Pmode, 1);
12331 emit_move_insn (scratch, delta_rtx);
12332 delta_rtx = scratch;
12333 }
12334
12335 /* THIS_RTX += DELTA. */
12336 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
12337 }
12338
12339 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
12340 if (vcall_offset)
12341 {
12342 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
12343 rtx scratch = gen_rtx_REG (Pmode, 1);
12344
12345 gcc_assert (vcall_offset < 0);
12346
12347 /* SCRATCH = *THIS_RTX. */
12348 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
12349
12350 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
12351 may not have any available scratch register at this point. */
12352 if (SPARC_SIMM13_P (vcall_offset))
12353 ;
12354 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
12355 else if (! fixed_regs[5]
12356 /* The below sequence is made up of at least 2 insns,
12357 while the default method may need only one. */
12358 && vcall_offset < -8192)
12359 {
12360 rtx scratch2 = gen_rtx_REG (Pmode, 5);
12361 emit_move_insn (scratch2, vcall_offset_rtx);
12362 vcall_offset_rtx = scratch2;
12363 }
12364 else
12365 {
12366 rtx increment = GEN_INT (-4096);
12367
12368 /* VCALL_OFFSET is a negative number whose typical range can be
12369 estimated as -32768..0 in 32-bit mode. In almost all cases
12370 it is therefore cheaper to emit multiple add insns than
12371 spilling and loading the constant into a register (at least
12372 6 insns). */
12373 while (! SPARC_SIMM13_P (vcall_offset))
12374 {
12375 emit_insn (gen_add2_insn (scratch, increment));
12376 vcall_offset += 4096;
12377 }
12378 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
12379 }
12380
12381 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
12382 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
12383 gen_rtx_PLUS (Pmode,
12384 scratch,
12385 vcall_offset_rtx)));
12386
12387 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
12388 emit_insn (gen_add2_insn (this_rtx, scratch));
12389 }
12390
12391 /* Generate a tail call to the target function. */
12392 if (! TREE_USED (function))
12393 {
12394 assemble_external (function);
12395 TREE_USED (function) = 1;
12396 }
12397 funexp = XEXP (DECL_RTL (function), 0);
12398
12399 if (flag_delayed_branch)
12400 {
12401 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
12402 insn = emit_call_insn (gen_sibcall (funexp));
12403 SIBLING_CALL_P (insn) = 1;
12404 }
12405 else
12406 {
12407 /* The hoops we have to jump through in order to generate a sibcall
12408 without using delay slots... */
12409 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
12410
12411 if (flag_pic)
12412 {
12413 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
12414 start_sequence ();
12415 load_got_register (); /* clobbers %o7 */
12416 if (!TARGET_VXWORKS_RTP)
12417 pic_offset_table_rtx = got_register_rtx;
12418 scratch = sparc_legitimize_pic_address (funexp, scratch);
12419 seq = get_insns ();
12420 end_sequence ();
12421 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
12422 }
12423 else if (TARGET_ARCH32)
12424 {
12425 emit_insn (gen_rtx_SET (scratch,
12426 gen_rtx_HIGH (SImode, funexp)));
12427 emit_insn (gen_rtx_SET (scratch,
12428 gen_rtx_LO_SUM (SImode, scratch, funexp)));
12429 }
12430 else /* TARGET_ARCH64 */
12431 {
12432 switch (sparc_code_model)
12433 {
12434 case CM_MEDLOW:
12435 case CM_MEDMID:
12436 /* The destination can serve as a temporary. */
12437 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
12438 break;
12439
12440 case CM_MEDANY:
12441 case CM_EMBMEDANY:
12442 /* The destination cannot serve as a temporary. */
12443 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
12444 start_sequence ();
12445 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
12446 seq = get_insns ();
12447 end_sequence ();
12448 emit_and_preserve (seq, spill_reg, 0);
12449 break;
12450
12451 default:
12452 gcc_unreachable ();
12453 }
12454 }
12455
12456 emit_jump_insn (gen_indirect_jump (scratch));
12457 }
12458
12459 emit_barrier ();
12460
12461 /* Run just enough of rest_of_compilation to get the insns emitted.
12462 There's not really enough bulk here to make other passes such as
12463 instruction scheduling worth while. */
12464 insn = get_insns ();
12465 shorten_branches (insn);
12466 assemble_start_function (thunk_fndecl, fnname);
12467 final_start_function (insn, file, 1);
12468 final (insn, file, 1);
12469 final_end_function ();
12470 assemble_end_function (thunk_fndecl, fnname);
12471
12472 reload_completed = 0;
12473 epilogue_completed = 0;
12474 }
12475
12476 /* Return true if sparc_output_mi_thunk would be able to output the
12477 assembler code for the thunk function specified by the arguments
12478 it is passed, and false otherwise. */
12479 static bool
12480 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
12481 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
12482 HOST_WIDE_INT vcall_offset,
12483 const_tree function ATTRIBUTE_UNUSED)
12484 {
12485 /* Bound the loop used in the default method above. */
12486 return (vcall_offset >= -32768 || ! fixed_regs[5]);
12487 }
12488
12489 /* How to allocate a 'struct machine_function'. */
12490
12491 static struct machine_function *
12492 sparc_init_machine_status (void)
12493 {
12494 return ggc_cleared_alloc<machine_function> ();
12495 }
12496 \f
12497 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
12498
12499 static unsigned HOST_WIDE_INT
12500 sparc_asan_shadow_offset (void)
12501 {
12502 return TARGET_ARCH64 ? (HOST_WIDE_INT_1 << 43) : (HOST_WIDE_INT_1 << 29);
12503 }
12504 \f
12505 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12506 We need to emit DTP-relative relocations. */
12507
12508 static void
12509 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
12510 {
12511 switch (size)
12512 {
12513 case 4:
12514 fputs ("\t.word\t%r_tls_dtpoff32(", file);
12515 break;
12516 case 8:
12517 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
12518 break;
12519 default:
12520 gcc_unreachable ();
12521 }
12522 output_addr_const (file, x);
12523 fputs (")", file);
12524 }
12525
12526 /* Do whatever processing is required at the end of a file. */
12527
12528 static void
12529 sparc_file_end (void)
12530 {
12531 /* If we need to emit the special GOT helper function, do so now. */
12532 if (got_helper_rtx)
12533 {
12534 const char *name = XSTR (got_helper_rtx, 0);
12535 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
12536 #ifdef DWARF2_UNWIND_INFO
12537 bool do_cfi;
12538 #endif
12539
12540 if (USE_HIDDEN_LINKONCE)
12541 {
12542 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
12543 get_identifier (name),
12544 build_function_type_list (void_type_node,
12545 NULL_TREE));
12546 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
12547 NULL_TREE, void_type_node);
12548 TREE_PUBLIC (decl) = 1;
12549 TREE_STATIC (decl) = 1;
12550 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
12551 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
12552 DECL_VISIBILITY_SPECIFIED (decl) = 1;
12553 resolve_unique_section (decl, 0, flag_function_sections);
12554 allocate_struct_function (decl, true);
12555 cfun->is_thunk = 1;
12556 current_function_decl = decl;
12557 init_varasm_status ();
12558 assemble_start_function (decl, name);
12559 }
12560 else
12561 {
12562 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
12563 switch_to_section (text_section);
12564 if (align > 0)
12565 ASM_OUTPUT_ALIGN (asm_out_file, align);
12566 ASM_OUTPUT_LABEL (asm_out_file, name);
12567 }
12568
12569 #ifdef DWARF2_UNWIND_INFO
12570 do_cfi = dwarf2out_do_cfi_asm ();
12571 if (do_cfi)
12572 fprintf (asm_out_file, "\t.cfi_startproc\n");
12573 #endif
12574 if (flag_delayed_branch)
12575 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
12576 reg_name, reg_name);
12577 else
12578 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
12579 reg_name, reg_name);
12580 #ifdef DWARF2_UNWIND_INFO
12581 if (do_cfi)
12582 fprintf (asm_out_file, "\t.cfi_endproc\n");
12583 #endif
12584 }
12585
12586 if (NEED_INDICATE_EXEC_STACK)
12587 file_end_indicate_exec_stack ();
12588
12589 #ifdef TARGET_SOLARIS
12590 solaris_file_end ();
12591 #endif
12592 }
12593
12594 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12595 /* Implement TARGET_MANGLE_TYPE. */
12596
12597 static const char *
12598 sparc_mangle_type (const_tree type)
12599 {
12600 if (TARGET_ARCH32
12601 && TYPE_MAIN_VARIANT (type) == long_double_type_node
12602 && TARGET_LONG_DOUBLE_128)
12603 return "g";
12604
12605 /* For all other types, use normal C++ mangling. */
12606 return NULL;
12607 }
12608 #endif
12609
12610 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
12611 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
12612 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
12613
12614 void
12615 sparc_emit_membar_for_model (enum memmodel model,
12616 int load_store, int before_after)
12617 {
12618 /* Bits for the MEMBAR mmask field. */
12619 const int LoadLoad = 1;
12620 const int StoreLoad = 2;
12621 const int LoadStore = 4;
12622 const int StoreStore = 8;
12623
12624 int mm = 0, implied = 0;
12625
12626 switch (sparc_memory_model)
12627 {
12628 case SMM_SC:
12629 /* Sequential Consistency. All memory transactions are immediately
12630 visible in sequential execution order. No barriers needed. */
12631 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
12632 break;
12633
12634 case SMM_TSO:
12635 /* Total Store Ordering: all memory transactions with store semantics
12636 are followed by an implied StoreStore. */
12637 implied |= StoreStore;
12638
12639 /* If we're not looking for a raw barrer (before+after), then atomic
12640 operations get the benefit of being both load and store. */
12641 if (load_store == 3 && before_after == 1)
12642 implied |= StoreLoad;
12643 /* FALLTHRU */
12644
12645 case SMM_PSO:
12646 /* Partial Store Ordering: all memory transactions with load semantics
12647 are followed by an implied LoadLoad | LoadStore. */
12648 implied |= LoadLoad | LoadStore;
12649
12650 /* If we're not looking for a raw barrer (before+after), then atomic
12651 operations get the benefit of being both load and store. */
12652 if (load_store == 3 && before_after == 2)
12653 implied |= StoreLoad | StoreStore;
12654 /* FALLTHRU */
12655
12656 case SMM_RMO:
12657 /* Relaxed Memory Ordering: no implicit bits. */
12658 break;
12659
12660 default:
12661 gcc_unreachable ();
12662 }
12663
12664 if (before_after & 1)
12665 {
12666 if (is_mm_release (model) || is_mm_acq_rel (model)
12667 || is_mm_seq_cst (model))
12668 {
12669 if (load_store & 1)
12670 mm |= LoadLoad | StoreLoad;
12671 if (load_store & 2)
12672 mm |= LoadStore | StoreStore;
12673 }
12674 }
12675 if (before_after & 2)
12676 {
12677 if (is_mm_acquire (model) || is_mm_acq_rel (model)
12678 || is_mm_seq_cst (model))
12679 {
12680 if (load_store & 1)
12681 mm |= LoadLoad | LoadStore;
12682 if (load_store & 2)
12683 mm |= StoreLoad | StoreStore;
12684 }
12685 }
12686
12687 /* Remove the bits implied by the system memory model. */
12688 mm &= ~implied;
12689
12690 /* For raw barriers (before+after), always emit a barrier.
12691 This will become a compile-time barrier if needed. */
12692 if (mm || before_after == 3)
12693 emit_insn (gen_membar (GEN_INT (mm)));
12694 }
12695
12696 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
12697 compare and swap on the word containing the byte or half-word. */
12698
12699 static void
12700 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
12701 rtx oldval, rtx newval)
12702 {
12703 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
12704 rtx addr = gen_reg_rtx (Pmode);
12705 rtx off = gen_reg_rtx (SImode);
12706 rtx oldv = gen_reg_rtx (SImode);
12707 rtx newv = gen_reg_rtx (SImode);
12708 rtx oldvalue = gen_reg_rtx (SImode);
12709 rtx newvalue = gen_reg_rtx (SImode);
12710 rtx res = gen_reg_rtx (SImode);
12711 rtx resv = gen_reg_rtx (SImode);
12712 rtx memsi, val, mask, cc;
12713
12714 emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
12715
12716 if (Pmode != SImode)
12717 addr1 = gen_lowpart (SImode, addr1);
12718 emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
12719
12720 memsi = gen_rtx_MEM (SImode, addr);
12721 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
12722 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
12723
12724 val = copy_to_reg (memsi);
12725
12726 emit_insn (gen_rtx_SET (off,
12727 gen_rtx_XOR (SImode, off,
12728 GEN_INT (GET_MODE (mem) == QImode
12729 ? 3 : 2))));
12730
12731 emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
12732
12733 if (GET_MODE (mem) == QImode)
12734 mask = force_reg (SImode, GEN_INT (0xff));
12735 else
12736 mask = force_reg (SImode, GEN_INT (0xffff));
12737
12738 emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
12739
12740 emit_insn (gen_rtx_SET (val,
12741 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12742 val)));
12743
12744 oldval = gen_lowpart (SImode, oldval);
12745 emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
12746
12747 newval = gen_lowpart_common (SImode, newval);
12748 emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
12749
12750 emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
12751
12752 emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
12753
12754 rtx_code_label *end_label = gen_label_rtx ();
12755 rtx_code_label *loop_label = gen_label_rtx ();
12756 emit_label (loop_label);
12757
12758 emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
12759
12760 emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
12761
12762 emit_move_insn (bool_result, const1_rtx);
12763
12764 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
12765
12766 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
12767
12768 emit_insn (gen_rtx_SET (resv,
12769 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12770 res)));
12771
12772 emit_move_insn (bool_result, const0_rtx);
12773
12774 cc = gen_compare_reg_1 (NE, resv, val);
12775 emit_insn (gen_rtx_SET (val, resv));
12776
12777 /* Use cbranchcc4 to separate the compare and branch! */
12778 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
12779 cc, const0_rtx, loop_label));
12780
12781 emit_label (end_label);
12782
12783 emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
12784
12785 emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
12786
12787 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
12788 }
12789
12790 /* Expand code to perform a compare-and-swap. */
12791
12792 void
12793 sparc_expand_compare_and_swap (rtx operands[])
12794 {
12795 rtx bval, retval, mem, oldval, newval;
12796 machine_mode mode;
12797 enum memmodel model;
12798
12799 bval = operands[0];
12800 retval = operands[1];
12801 mem = operands[2];
12802 oldval = operands[3];
12803 newval = operands[4];
12804 model = (enum memmodel) INTVAL (operands[6]);
12805 mode = GET_MODE (mem);
12806
12807 sparc_emit_membar_for_model (model, 3, 1);
12808
12809 if (reg_overlap_mentioned_p (retval, oldval))
12810 oldval = copy_to_reg (oldval);
12811
12812 if (mode == QImode || mode == HImode)
12813 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
12814 else
12815 {
12816 rtx (*gen) (rtx, rtx, rtx, rtx);
12817 rtx x;
12818
12819 if (mode == SImode)
12820 gen = gen_atomic_compare_and_swapsi_1;
12821 else
12822 gen = gen_atomic_compare_and_swapdi_1;
12823 emit_insn (gen (retval, mem, oldval, newval));
12824
12825 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
12826 if (x != bval)
12827 convert_move (bval, x, 1);
12828 }
12829
12830 sparc_emit_membar_for_model (model, 3, 2);
12831 }
12832
12833 void
12834 sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
12835 {
12836 rtx t_1, t_2, t_3;
12837
12838 sel = gen_lowpart (DImode, sel);
12839 switch (vmode)
12840 {
12841 case E_V2SImode:
12842 /* inp = xxxxxxxAxxxxxxxB */
12843 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12844 NULL_RTX, 1, OPTAB_DIRECT);
12845 /* t_1 = ....xxxxxxxAxxx. */
12846 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12847 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
12848 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12849 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
12850 /* sel = .......B */
12851 /* t_1 = ...A.... */
12852 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12853 /* sel = ...A...B */
12854 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
12855 /* sel = AAAABBBB * 4 */
12856 t_1 = force_reg (SImode, GEN_INT (0x01230123));
12857 /* sel = { A*4, A*4+1, A*4+2, ... } */
12858 break;
12859
12860 case E_V4HImode:
12861 /* inp = xxxAxxxBxxxCxxxD */
12862 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12863 NULL_RTX, 1, OPTAB_DIRECT);
12864 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12865 NULL_RTX, 1, OPTAB_DIRECT);
12866 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
12867 NULL_RTX, 1, OPTAB_DIRECT);
12868 /* t_1 = ..xxxAxxxBxxxCxx */
12869 /* t_2 = ....xxxAxxxBxxxC */
12870 /* t_3 = ......xxxAxxxBxx */
12871 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12872 GEN_INT (0x07),
12873 NULL_RTX, 1, OPTAB_DIRECT);
12874 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12875 GEN_INT (0x0700),
12876 NULL_RTX, 1, OPTAB_DIRECT);
12877 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
12878 GEN_INT (0x070000),
12879 NULL_RTX, 1, OPTAB_DIRECT);
12880 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
12881 GEN_INT (0x07000000),
12882 NULL_RTX, 1, OPTAB_DIRECT);
12883 /* sel = .......D */
12884 /* t_1 = .....C.. */
12885 /* t_2 = ...B.... */
12886 /* t_3 = .A...... */
12887 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12888 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
12889 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
12890 /* sel = .A.B.C.D */
12891 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
12892 /* sel = AABBCCDD * 2 */
12893 t_1 = force_reg (SImode, GEN_INT (0x01010101));
12894 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
12895 break;
12896
12897 case E_V8QImode:
12898 /* input = xAxBxCxDxExFxGxH */
12899 sel = expand_simple_binop (DImode, AND, sel,
12900 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
12901 | 0x0f0f0f0f),
12902 NULL_RTX, 1, OPTAB_DIRECT);
12903 /* sel = .A.B.C.D.E.F.G.H */
12904 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
12905 NULL_RTX, 1, OPTAB_DIRECT);
12906 /* t_1 = ..A.B.C.D.E.F.G. */
12907 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12908 NULL_RTX, 1, OPTAB_DIRECT);
12909 /* sel = .AABBCCDDEEFFGGH */
12910 sel = expand_simple_binop (DImode, AND, sel,
12911 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
12912 | 0xff00ff),
12913 NULL_RTX, 1, OPTAB_DIRECT);
12914 /* sel = ..AB..CD..EF..GH */
12915 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12916 NULL_RTX, 1, OPTAB_DIRECT);
12917 /* t_1 = ....AB..CD..EF.. */
12918 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12919 NULL_RTX, 1, OPTAB_DIRECT);
12920 /* sel = ..ABABCDCDEFEFGH */
12921 sel = expand_simple_binop (DImode, AND, sel,
12922 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
12923 NULL_RTX, 1, OPTAB_DIRECT);
12924 /* sel = ....ABCD....EFGH */
12925 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12926 NULL_RTX, 1, OPTAB_DIRECT);
12927 /* t_1 = ........ABCD.... */
12928 sel = gen_lowpart (SImode, sel);
12929 t_1 = gen_lowpart (SImode, t_1);
12930 break;
12931
12932 default:
12933 gcc_unreachable ();
12934 }
12935
12936 /* Always perform the final addition/merge within the bmask insn. */
12937 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
12938 }
12939
12940 /* Implement TARGET_VEC_PERM_CONST. */
12941
12942 static bool
12943 sparc_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
12944 rtx op1, const vec_perm_indices &sel)
12945 {
12946 if (!TARGET_VIS2)
12947 return false;
12948
12949 /* All permutes are supported. */
12950 if (!target)
12951 return true;
12952
12953 /* Force target-independent code to convert constant permutations on other
12954 modes down to V8QI. Rely on this to avoid the complexity of the byte
12955 order of the permutation. */
12956 if (vmode != V8QImode)
12957 return false;
12958
12959 unsigned int i, mask;
12960 for (i = mask = 0; i < 8; ++i)
12961 mask |= (sel[i] & 0xf) << (28 - i*4);
12962 rtx mask_rtx = force_reg (SImode, gen_int_mode (mask, SImode));
12963
12964 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), mask_rtx, const0_rtx));
12965 emit_insn (gen_bshufflev8qi_vis (target, op0, op1));
12966 return true;
12967 }
12968
12969 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
12970
12971 static bool
12972 sparc_frame_pointer_required (void)
12973 {
12974 /* If the stack pointer is dynamically modified in the function, it cannot
12975 serve as the frame pointer. */
12976 if (cfun->calls_alloca)
12977 return true;
12978
12979 /* If the function receives nonlocal gotos, it needs to save the frame
12980 pointer in the nonlocal_goto_save_area object. */
12981 if (cfun->has_nonlocal_label)
12982 return true;
12983
12984 /* In flat mode, that's it. */
12985 if (TARGET_FLAT)
12986 return false;
12987
12988 /* Otherwise, the frame pointer is required if the function isn't leaf, but
12989 we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
12990 return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
12991 }
12992
12993 /* The way this is structured, we can't eliminate SFP in favor of SP
12994 if the frame pointer is required: we want to use the SFP->HFP elimination
12995 in that case. But the test in update_eliminables doesn't know we are
12996 assuming below that we only do the former elimination. */
12997
12998 static bool
12999 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
13000 {
13001 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
13002 }
13003
13004 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
13005 they won't be allocated. */
13006
13007 static void
13008 sparc_conditional_register_usage (void)
13009 {
13010 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
13011 {
13012 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13013 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13014 }
13015 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
13016 /* then honor it. */
13017 if (TARGET_ARCH32 && fixed_regs[5])
13018 fixed_regs[5] = 1;
13019 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
13020 fixed_regs[5] = 0;
13021 if (! TARGET_V9)
13022 {
13023 int regno;
13024 for (regno = SPARC_FIRST_V9_FP_REG;
13025 regno <= SPARC_LAST_V9_FP_REG;
13026 regno++)
13027 fixed_regs[regno] = 1;
13028 /* %fcc0 is used by v8 and v9. */
13029 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
13030 regno <= SPARC_LAST_V9_FCC_REG;
13031 regno++)
13032 fixed_regs[regno] = 1;
13033 }
13034 if (! TARGET_FPU)
13035 {
13036 int regno;
13037 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
13038 fixed_regs[regno] = 1;
13039 }
13040 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
13041 /* then honor it. Likewise with g3 and g4. */
13042 if (fixed_regs[2] == 2)
13043 fixed_regs[2] = ! TARGET_APP_REGS;
13044 if (fixed_regs[3] == 2)
13045 fixed_regs[3] = ! TARGET_APP_REGS;
13046 if (TARGET_ARCH32 && fixed_regs[4] == 2)
13047 fixed_regs[4] = ! TARGET_APP_REGS;
13048 else if (TARGET_CM_EMBMEDANY)
13049 fixed_regs[4] = 1;
13050 else if (fixed_regs[4] == 2)
13051 fixed_regs[4] = 0;
13052 if (TARGET_FLAT)
13053 {
13054 int regno;
13055 /* Disable leaf functions. */
13056 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
13057 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
13058 leaf_reg_remap [regno] = regno;
13059 }
13060 if (TARGET_VIS)
13061 global_regs[SPARC_GSR_REG] = 1;
13062 }
13063
13064 /* Implement TARGET_USE_PSEUDO_PIC_REG. */
13065
13066 static bool
13067 sparc_use_pseudo_pic_reg (void)
13068 {
13069 return !TARGET_VXWORKS_RTP && flag_pic;
13070 }
13071
13072 /* Implement TARGET_INIT_PIC_REG. */
13073
13074 static void
13075 sparc_init_pic_reg (void)
13076 {
13077 edge entry_edge;
13078 rtx_insn *seq;
13079
13080 if (!crtl->uses_pic_offset_table)
13081 return;
13082
13083 start_sequence ();
13084 load_got_register ();
13085 if (!TARGET_VXWORKS_RTP)
13086 emit_move_insn (pic_offset_table_rtx, got_register_rtx);
13087 seq = get_insns ();
13088 end_sequence ();
13089
13090 entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13091 insert_insn_on_edge (seq, entry_edge);
13092 commit_one_edge_insertion (entry_edge);
13093 }
13094
13095 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
13096
13097 - We can't load constants into FP registers.
13098 - We can't load FP constants into integer registers when soft-float,
13099 because there is no soft-float pattern with a r/F constraint.
13100 - We can't load FP constants into integer registers for TFmode unless
13101 it is 0.0L, because there is no movtf pattern with a r/F constraint.
13102 - Try and reload integer constants (symbolic or otherwise) back into
13103 registers directly, rather than having them dumped to memory. */
13104
13105 static reg_class_t
13106 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
13107 {
13108 machine_mode mode = GET_MODE (x);
13109 if (CONSTANT_P (x))
13110 {
13111 if (FP_REG_CLASS_P (rclass)
13112 || rclass == GENERAL_OR_FP_REGS
13113 || rclass == GENERAL_OR_EXTRA_FP_REGS
13114 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
13115 || (mode == TFmode && ! const_zero_operand (x, mode)))
13116 return NO_REGS;
13117
13118 if (GET_MODE_CLASS (mode) == MODE_INT)
13119 return GENERAL_REGS;
13120
13121 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13122 {
13123 if (! FP_REG_CLASS_P (rclass)
13124 || !(const_zero_operand (x, mode)
13125 || const_all_ones_operand (x, mode)))
13126 return NO_REGS;
13127 }
13128 }
13129
13130 if (TARGET_VIS3
13131 && ! TARGET_ARCH64
13132 && (rclass == EXTRA_FP_REGS
13133 || rclass == GENERAL_OR_EXTRA_FP_REGS))
13134 {
13135 int regno = true_regnum (x);
13136
13137 if (SPARC_INT_REG_P (regno))
13138 return (rclass == EXTRA_FP_REGS
13139 ? FP_REGS : GENERAL_OR_FP_REGS);
13140 }
13141
13142 return rclass;
13143 }
13144
13145 /* Return true if we use LRA instead of reload pass. */
13146
13147 static bool
13148 sparc_lra_p (void)
13149 {
13150 return TARGET_LRA;
13151 }
13152
13153 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
13154 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
13155
13156 const char *
13157 output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
13158 {
13159 char mulstr[32];
13160
13161 gcc_assert (! TARGET_ARCH64);
13162
13163 if (sparc_check_64 (operands[1], insn) <= 0)
13164 output_asm_insn ("srl\t%L1, 0, %L1", operands);
13165 if (which_alternative == 1)
13166 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
13167 if (GET_CODE (operands[2]) == CONST_INT)
13168 {
13169 if (which_alternative == 1)
13170 {
13171 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13172 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
13173 output_asm_insn (mulstr, operands);
13174 return "srlx\t%L0, 32, %H0";
13175 }
13176 else
13177 {
13178 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13179 output_asm_insn ("or\t%L1, %3, %3", operands);
13180 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
13181 output_asm_insn (mulstr, operands);
13182 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13183 return "mov\t%3, %L0";
13184 }
13185 }
13186 else if (rtx_equal_p (operands[1], operands[2]))
13187 {
13188 if (which_alternative == 1)
13189 {
13190 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13191 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
13192 output_asm_insn (mulstr, operands);
13193 return "srlx\t%L0, 32, %H0";
13194 }
13195 else
13196 {
13197 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13198 output_asm_insn ("or\t%L1, %3, %3", operands);
13199 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
13200 output_asm_insn (mulstr, operands);
13201 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13202 return "mov\t%3, %L0";
13203 }
13204 }
13205 if (sparc_check_64 (operands[2], insn) <= 0)
13206 output_asm_insn ("srl\t%L2, 0, %L2", operands);
13207 if (which_alternative == 1)
13208 {
13209 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13210 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
13211 output_asm_insn ("or\t%L2, %L1, %L1", operands);
13212 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
13213 output_asm_insn (mulstr, operands);
13214 return "srlx\t%L0, 32, %H0";
13215 }
13216 else
13217 {
13218 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13219 output_asm_insn ("sllx\t%H2, 32, %4", operands);
13220 output_asm_insn ("or\t%L1, %3, %3", operands);
13221 output_asm_insn ("or\t%L2, %4, %4", operands);
13222 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
13223 output_asm_insn (mulstr, operands);
13224 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13225 return "mov\t%3, %L0";
13226 }
13227 }
13228
13229 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13230 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
13231 and INNER_MODE are the modes describing TARGET. */
13232
13233 static void
13234 vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
13235 machine_mode inner_mode)
13236 {
13237 rtx t1, final_insn, sel;
13238 int bmask;
13239
13240 t1 = gen_reg_rtx (mode);
13241
13242 elt = convert_modes (SImode, inner_mode, elt, true);
13243 emit_move_insn (gen_lowpart(SImode, t1), elt);
13244
13245 switch (mode)
13246 {
13247 case E_V2SImode:
13248 final_insn = gen_bshufflev2si_vis (target, t1, t1);
13249 bmask = 0x45674567;
13250 break;
13251 case E_V4HImode:
13252 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
13253 bmask = 0x67676767;
13254 break;
13255 case E_V8QImode:
13256 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
13257 bmask = 0x77777777;
13258 break;
13259 default:
13260 gcc_unreachable ();
13261 }
13262
13263 sel = force_reg (SImode, GEN_INT (bmask));
13264 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
13265 emit_insn (final_insn);
13266 }
13267
13268 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13269 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
13270
13271 static void
13272 vector_init_fpmerge (rtx target, rtx elt)
13273 {
13274 rtx t1, t2, t2_low, t3, t3_low;
13275
13276 t1 = gen_reg_rtx (V4QImode);
13277 elt = convert_modes (SImode, QImode, elt, true);
13278 emit_move_insn (gen_lowpart (SImode, t1), elt);
13279
13280 t2 = gen_reg_rtx (V8QImode);
13281 t2_low = gen_lowpart (V4QImode, t2);
13282 emit_insn (gen_fpmerge_vis (t2, t1, t1));
13283
13284 t3 = gen_reg_rtx (V8QImode);
13285 t3_low = gen_lowpart (V4QImode, t3);
13286 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
13287
13288 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
13289 }
13290
13291 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13292 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
13293
13294 static void
13295 vector_init_faligndata (rtx target, rtx elt)
13296 {
13297 rtx t1 = gen_reg_rtx (V4HImode);
13298 int i;
13299
13300 elt = convert_modes (SImode, HImode, elt, true);
13301 emit_move_insn (gen_lowpart (SImode, t1), elt);
13302
13303 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
13304 force_reg (SImode, GEN_INT (6)),
13305 const0_rtx));
13306
13307 for (i = 0; i < 4; i++)
13308 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
13309 }
13310
13311 /* Emit code to initialize TARGET to values for individual fields VALS. */
13312
13313 void
13314 sparc_expand_vector_init (rtx target, rtx vals)
13315 {
13316 const machine_mode mode = GET_MODE (target);
13317 const machine_mode inner_mode = GET_MODE_INNER (mode);
13318 const int n_elts = GET_MODE_NUNITS (mode);
13319 int i, n_var = 0;
13320 bool all_same = true;
13321 rtx mem;
13322
13323 for (i = 0; i < n_elts; i++)
13324 {
13325 rtx x = XVECEXP (vals, 0, i);
13326 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
13327 n_var++;
13328
13329 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
13330 all_same = false;
13331 }
13332
13333 if (n_var == 0)
13334 {
13335 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
13336 return;
13337 }
13338
13339 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
13340 {
13341 if (GET_MODE_SIZE (inner_mode) == 4)
13342 {
13343 emit_move_insn (gen_lowpart (SImode, target),
13344 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
13345 return;
13346 }
13347 else if (GET_MODE_SIZE (inner_mode) == 8)
13348 {
13349 emit_move_insn (gen_lowpart (DImode, target),
13350 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
13351 return;
13352 }
13353 }
13354 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
13355 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
13356 {
13357 emit_move_insn (gen_highpart (word_mode, target),
13358 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
13359 emit_move_insn (gen_lowpart (word_mode, target),
13360 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
13361 return;
13362 }
13363
13364 if (all_same && GET_MODE_SIZE (mode) == 8)
13365 {
13366 if (TARGET_VIS2)
13367 {
13368 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
13369 return;
13370 }
13371 if (mode == V8QImode)
13372 {
13373 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
13374 return;
13375 }
13376 if (mode == V4HImode)
13377 {
13378 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
13379 return;
13380 }
13381 }
13382
13383 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
13384 for (i = 0; i < n_elts; i++)
13385 emit_move_insn (adjust_address_nv (mem, inner_mode,
13386 i * GET_MODE_SIZE (inner_mode)),
13387 XVECEXP (vals, 0, i));
13388 emit_move_insn (target, mem);
13389 }
13390
13391 /* Implement TARGET_SECONDARY_RELOAD. */
13392
13393 static reg_class_t
13394 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
13395 machine_mode mode, secondary_reload_info *sri)
13396 {
13397 enum reg_class rclass = (enum reg_class) rclass_i;
13398
13399 sri->icode = CODE_FOR_nothing;
13400 sri->extra_cost = 0;
13401
13402 /* We need a temporary when loading/storing a HImode/QImode value
13403 between memory and the FPU registers. This can happen when combine puts
13404 a paradoxical subreg in a float/fix conversion insn. */
13405 if (FP_REG_CLASS_P (rclass)
13406 && (mode == HImode || mode == QImode)
13407 && (GET_CODE (x) == MEM
13408 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
13409 && true_regnum (x) == -1)))
13410 return GENERAL_REGS;
13411
13412 /* On 32-bit we need a temporary when loading/storing a DFmode value
13413 between unaligned memory and the upper FPU registers. */
13414 if (TARGET_ARCH32
13415 && rclass == EXTRA_FP_REGS
13416 && mode == DFmode
13417 && GET_CODE (x) == MEM
13418 && ! mem_min_alignment (x, 8))
13419 return FP_REGS;
13420
13421 if (((TARGET_CM_MEDANY
13422 && symbolic_operand (x, mode))
13423 || (TARGET_CM_EMBMEDANY
13424 && text_segment_operand (x, mode)))
13425 && ! flag_pic)
13426 {
13427 if (in_p)
13428 sri->icode = direct_optab_handler (reload_in_optab, mode);
13429 else
13430 sri->icode = direct_optab_handler (reload_out_optab, mode);
13431 return NO_REGS;
13432 }
13433
13434 if (TARGET_VIS3 && TARGET_ARCH32)
13435 {
13436 int regno = true_regnum (x);
13437
13438 /* When using VIS3 fp<-->int register moves, on 32-bit we have
13439 to move 8-byte values in 4-byte pieces. This only works via
13440 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
13441 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
13442 an FP_REGS intermediate move. */
13443 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
13444 || ((general_or_i64_p (rclass)
13445 || rclass == GENERAL_OR_FP_REGS)
13446 && SPARC_FP_REG_P (regno)))
13447 {
13448 sri->extra_cost = 2;
13449 return FP_REGS;
13450 }
13451 }
13452
13453 return NO_REGS;
13454 }
13455
13456 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
13457
13458 On SPARC when not VIS3 it is not possible to directly move data
13459 between GENERAL_REGS and FP_REGS. */
13460
13461 static bool
13462 sparc_secondary_memory_needed (machine_mode mode, reg_class_t class1,
13463 reg_class_t class2)
13464 {
13465 return ((FP_REG_CLASS_P (class1) != FP_REG_CLASS_P (class2))
13466 && (! TARGET_VIS3
13467 || GET_MODE_SIZE (mode) > 8
13468 || GET_MODE_SIZE (mode) < 4));
13469 }
13470
13471 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
13472
13473 get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
13474 because the movsi and movsf patterns don't handle r/f moves.
13475 For v8 we copy the default definition. */
13476
13477 static machine_mode
13478 sparc_secondary_memory_needed_mode (machine_mode mode)
13479 {
13480 if (TARGET_ARCH64)
13481 {
13482 if (GET_MODE_BITSIZE (mode) < 32)
13483 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
13484 return mode;
13485 }
13486 else
13487 {
13488 if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
13489 return mode_for_size (BITS_PER_WORD,
13490 GET_MODE_CLASS (mode), 0).require ();
13491 return mode;
13492 }
13493 }
13494
13495 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
13496 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
13497
13498 bool
13499 sparc_expand_conditional_move (machine_mode mode, rtx *operands)
13500 {
13501 enum rtx_code rc = GET_CODE (operands[1]);
13502 machine_mode cmp_mode;
13503 rtx cc_reg, dst, cmp;
13504
13505 cmp = operands[1];
13506 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
13507 return false;
13508
13509 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
13510 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
13511
13512 cmp_mode = GET_MODE (XEXP (cmp, 0));
13513 rc = GET_CODE (cmp);
13514
13515 dst = operands[0];
13516 if (! rtx_equal_p (operands[2], dst)
13517 && ! rtx_equal_p (operands[3], dst))
13518 {
13519 if (reg_overlap_mentioned_p (dst, cmp))
13520 dst = gen_reg_rtx (mode);
13521
13522 emit_move_insn (dst, operands[3]);
13523 }
13524 else if (operands[2] == dst)
13525 {
13526 operands[2] = operands[3];
13527
13528 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
13529 rc = reverse_condition_maybe_unordered (rc);
13530 else
13531 rc = reverse_condition (rc);
13532 }
13533
13534 if (XEXP (cmp, 1) == const0_rtx
13535 && GET_CODE (XEXP (cmp, 0)) == REG
13536 && cmp_mode == DImode
13537 && v9_regcmp_p (rc))
13538 cc_reg = XEXP (cmp, 0);
13539 else
13540 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
13541
13542 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
13543
13544 emit_insn (gen_rtx_SET (dst,
13545 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
13546
13547 if (dst != operands[0])
13548 emit_move_insn (operands[0], dst);
13549
13550 return true;
13551 }
13552
13553 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
13554 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
13555 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
13556 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
13557 code to be used for the condition mask. */
13558
13559 void
13560 sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
13561 {
13562 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
13563 enum rtx_code code = GET_CODE (operands[3]);
13564
13565 mask = gen_reg_rtx (Pmode);
13566 cop0 = operands[4];
13567 cop1 = operands[5];
13568 if (code == LT || code == GE)
13569 {
13570 rtx t;
13571
13572 code = swap_condition (code);
13573 t = cop0; cop0 = cop1; cop1 = t;
13574 }
13575
13576 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
13577
13578 fcmp = gen_rtx_UNSPEC (Pmode,
13579 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
13580 fcode);
13581
13582 cmask = gen_rtx_UNSPEC (DImode,
13583 gen_rtvec (2, mask, gsr),
13584 ccode);
13585
13586 bshuf = gen_rtx_UNSPEC (mode,
13587 gen_rtvec (3, operands[1], operands[2], gsr),
13588 UNSPEC_BSHUFFLE);
13589
13590 emit_insn (gen_rtx_SET (mask, fcmp));
13591 emit_insn (gen_rtx_SET (gsr, cmask));
13592
13593 emit_insn (gen_rtx_SET (operands[0], bshuf));
13594 }
13595
13596 /* On sparc, any mode which naturally allocates into the float
13597 registers should return 4 here. */
13598
13599 unsigned int
13600 sparc_regmode_natural_size (machine_mode mode)
13601 {
13602 int size = UNITS_PER_WORD;
13603
13604 if (TARGET_ARCH64)
13605 {
13606 enum mode_class mclass = GET_MODE_CLASS (mode);
13607
13608 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
13609 size = 4;
13610 }
13611
13612 return size;
13613 }
13614
13615 /* Implement TARGET_HARD_REGNO_NREGS.
13616
13617 On SPARC, ordinary registers hold 32 bits worth; this means both
13618 integer and floating point registers. On v9, integer regs hold 64
13619 bits worth; floating point regs hold 32 bits worth (this includes the
13620 new fp regs as even the odd ones are included in the hard register
13621 count). */
13622
13623 static unsigned int
13624 sparc_hard_regno_nregs (unsigned int regno, machine_mode mode)
13625 {
13626 if (regno == SPARC_GSR_REG)
13627 return 1;
13628 if (TARGET_ARCH64)
13629 {
13630 if (SPARC_INT_REG_P (regno) || regno == FRAME_POINTER_REGNUM)
13631 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13632 return CEIL (GET_MODE_SIZE (mode), 4);
13633 }
13634 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13635 }
13636
13637 /* Implement TARGET_HARD_REGNO_MODE_OK.
13638
13639 ??? Because of the funny way we pass parameters we should allow certain
13640 ??? types of float/complex values to be in integer registers during
13641 ??? RTL generation. This only matters on arch32. */
13642
13643 static bool
13644 sparc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
13645 {
13646 return (hard_regno_mode_classes[regno] & sparc_mode_class[mode]) != 0;
13647 }
13648
13649 /* Implement TARGET_MODES_TIEABLE_P.
13650
13651 For V9 we have to deal with the fact that only the lower 32 floating
13652 point registers are 32-bit addressable. */
13653
13654 static bool
13655 sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13656 {
13657 enum mode_class mclass1, mclass2;
13658 unsigned short size1, size2;
13659
13660 if (mode1 == mode2)
13661 return true;
13662
13663 mclass1 = GET_MODE_CLASS (mode1);
13664 mclass2 = GET_MODE_CLASS (mode2);
13665 if (mclass1 != mclass2)
13666 return false;
13667
13668 if (! TARGET_V9)
13669 return true;
13670
13671 /* Classes are the same and we are V9 so we have to deal with upper
13672 vs. lower floating point registers. If one of the modes is a
13673 4-byte mode, and the other is not, we have to mark them as not
13674 tieable because only the lower 32 floating point register are
13675 addressable 32-bits at a time.
13676
13677 We can't just test explicitly for SFmode, otherwise we won't
13678 cover the vector mode cases properly. */
13679
13680 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
13681 return true;
13682
13683 size1 = GET_MODE_SIZE (mode1);
13684 size2 = GET_MODE_SIZE (mode2);
13685 if ((size1 > 4 && size2 == 4)
13686 || (size2 > 4 && size1 == 4))
13687 return false;
13688
13689 return true;
13690 }
13691
13692 /* Implement TARGET_CSTORE_MODE. */
13693
13694 static scalar_int_mode
13695 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
13696 {
13697 return (TARGET_ARCH64 ? DImode : SImode);
13698 }
13699
13700 /* Return the compound expression made of T1 and T2. */
13701
13702 static inline tree
13703 compound_expr (tree t1, tree t2)
13704 {
13705 return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
13706 }
13707
13708 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13709
13710 static void
13711 sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13712 {
13713 if (!TARGET_FPU)
13714 return;
13715
13716 const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
13717 const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
13718
13719 /* We generate the equivalent of feholdexcept (&fenv_var):
13720
13721 unsigned int fenv_var;
13722 __builtin_store_fsr (&fenv_var);
13723
13724 unsigned int tmp1_var;
13725 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
13726
13727 __builtin_load_fsr (&tmp1_var); */
13728
13729 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
13730 TREE_ADDRESSABLE (fenv_var) = 1;
13731 tree fenv_addr = build_fold_addr_expr (fenv_var);
13732 tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
13733 tree hold_stfsr
13734 = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
13735 build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
13736
13737 tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
13738 TREE_ADDRESSABLE (tmp1_var) = 1;
13739 tree masked_fenv_var
13740 = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13741 build_int_cst (unsigned_type_node,
13742 ~(accrued_exception_mask | trap_enable_mask)));
13743 tree hold_mask
13744 = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
13745 NULL_TREE, NULL_TREE);
13746
13747 tree tmp1_addr = build_fold_addr_expr (tmp1_var);
13748 tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
13749 tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
13750
13751 *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
13752
13753 /* We reload the value of tmp1_var to clear the exceptions:
13754
13755 __builtin_load_fsr (&tmp1_var); */
13756
13757 *clear = build_call_expr (ldfsr, 1, tmp1_addr);
13758
13759 /* We generate the equivalent of feupdateenv (&fenv_var):
13760
13761 unsigned int tmp2_var;
13762 __builtin_store_fsr (&tmp2_var);
13763
13764 __builtin_load_fsr (&fenv_var);
13765
13766 if (SPARC_LOW_FE_EXCEPT_VALUES)
13767 tmp2_var >>= 5;
13768 __atomic_feraiseexcept ((int) tmp2_var); */
13769
13770 tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
13771 TREE_ADDRESSABLE (tmp2_var) = 1;
13772 tree tmp2_addr = build_fold_addr_expr (tmp2_var);
13773 tree update_stfsr
13774 = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
13775 build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
13776
13777 tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
13778
13779 tree atomic_feraiseexcept
13780 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13781 tree update_call
13782 = build_call_expr (atomic_feraiseexcept, 1,
13783 fold_convert (integer_type_node, tmp2_var));
13784
13785 if (SPARC_LOW_FE_EXCEPT_VALUES)
13786 {
13787 tree shifted_tmp2_var
13788 = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
13789 build_int_cst (unsigned_type_node, 5));
13790 tree update_shift
13791 = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
13792 update_call = compound_expr (update_shift, update_call);
13793 }
13794
13795 *update
13796 = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
13797 }
13798
13799 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. Borrowed from the PA port.
13800
13801 SImode loads to floating-point registers are not zero-extended.
13802 The definition for LOAD_EXTEND_OP specifies that integer loads
13803 narrower than BITS_PER_WORD will be zero-extended. As a result,
13804 we inhibit changes from SImode unless they are to a mode that is
13805 identical in size.
13806
13807 Likewise for SFmode, since word-mode paradoxical subregs are
13808 problematic on big-endian architectures. */
13809
13810 static bool
13811 sparc_can_change_mode_class (machine_mode from, machine_mode to,
13812 reg_class_t rclass)
13813 {
13814 if (TARGET_ARCH64
13815 && GET_MODE_SIZE (from) == 4
13816 && GET_MODE_SIZE (to) != 4)
13817 return !reg_classes_intersect_p (rclass, FP_REGS);
13818 return true;
13819 }
13820
13821 /* Implement TARGET_CONSTANT_ALIGNMENT. */
13822
13823 static HOST_WIDE_INT
13824 sparc_constant_alignment (const_tree exp, HOST_WIDE_INT align)
13825 {
13826 if (TREE_CODE (exp) == STRING_CST)
13827 return MAX (align, FASTEST_ALIGNMENT);
13828 return align;
13829 }
13830
13831 #include "gt-sparc.h"