]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sparc/sparc.c
Use checking forms of DECL_FUNCTION_CODE (PR 91421)
[thirdparty/gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #define IN_TARGET_CODE 1
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "memmodel.h"
33 #include "gimple.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "varasm.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "explow.h"
52 #include "expr.h"
53 #include "debug.h"
54 #include "cfgrtl.h"
55 #include "common/common-target.h"
56 #include "gimplify.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "params.h"
60 #include "tree-pass.h"
61 #include "context.h"
62 #include "builtins.h"
63 #include "tree-vector-builder.h"
64
65 /* This file should be included last. */
66 #include "target-def.h"
67
68 /* Processor costs */
69
70 struct processor_costs {
71 /* Integer load */
72 const int int_load;
73
74 /* Integer signed load */
75 const int int_sload;
76
77 /* Integer zeroed load */
78 const int int_zload;
79
80 /* Float load */
81 const int float_load;
82
83 /* fmov, fneg, fabs */
84 const int float_move;
85
86 /* fadd, fsub */
87 const int float_plusminus;
88
89 /* fcmp */
90 const int float_cmp;
91
92 /* fmov, fmovr */
93 const int float_cmove;
94
95 /* fmul */
96 const int float_mul;
97
98 /* fdivs */
99 const int float_div_sf;
100
101 /* fdivd */
102 const int float_div_df;
103
104 /* fsqrts */
105 const int float_sqrt_sf;
106
107 /* fsqrtd */
108 const int float_sqrt_df;
109
110 /* umul/smul */
111 const int int_mul;
112
113 /* mulX */
114 const int int_mulX;
115
116 /* integer multiply cost for each bit set past the most
117 significant 3, so the formula for multiply cost becomes:
118
119 if (rs1 < 0)
120 highest_bit = highest_clear_bit(rs1);
121 else
122 highest_bit = highest_set_bit(rs1);
123 if (highest_bit < 3)
124 highest_bit = 3;
125 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
126
127 A value of zero indicates that the multiply costs is fixed,
128 and not variable. */
129 const int int_mul_bit_factor;
130
131 /* udiv/sdiv */
132 const int int_div;
133
134 /* divX */
135 const int int_divX;
136
137 /* movcc, movr */
138 const int int_cmove;
139
140 /* penalty for shifts, due to scheduling rules etc. */
141 const int shift_penalty;
142
143 /* cost of a (predictable) branch. */
144 const int branch_cost;
145 };
146
147 static const
148 struct processor_costs cypress_costs = {
149 COSTS_N_INSNS (2), /* int load */
150 COSTS_N_INSNS (2), /* int signed load */
151 COSTS_N_INSNS (2), /* int zeroed load */
152 COSTS_N_INSNS (2), /* float load */
153 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
154 COSTS_N_INSNS (5), /* fadd, fsub */
155 COSTS_N_INSNS (1), /* fcmp */
156 COSTS_N_INSNS (1), /* fmov, fmovr */
157 COSTS_N_INSNS (7), /* fmul */
158 COSTS_N_INSNS (37), /* fdivs */
159 COSTS_N_INSNS (37), /* fdivd */
160 COSTS_N_INSNS (63), /* fsqrts */
161 COSTS_N_INSNS (63), /* fsqrtd */
162 COSTS_N_INSNS (1), /* imul */
163 COSTS_N_INSNS (1), /* imulX */
164 0, /* imul bit factor */
165 COSTS_N_INSNS (1), /* idiv */
166 COSTS_N_INSNS (1), /* idivX */
167 COSTS_N_INSNS (1), /* movcc/movr */
168 0, /* shift penalty */
169 3 /* branch cost */
170 };
171
172 static const
173 struct processor_costs supersparc_costs = {
174 COSTS_N_INSNS (1), /* int load */
175 COSTS_N_INSNS (1), /* int signed load */
176 COSTS_N_INSNS (1), /* int zeroed load */
177 COSTS_N_INSNS (0), /* float load */
178 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
179 COSTS_N_INSNS (3), /* fadd, fsub */
180 COSTS_N_INSNS (3), /* fcmp */
181 COSTS_N_INSNS (1), /* fmov, fmovr */
182 COSTS_N_INSNS (3), /* fmul */
183 COSTS_N_INSNS (6), /* fdivs */
184 COSTS_N_INSNS (9), /* fdivd */
185 COSTS_N_INSNS (12), /* fsqrts */
186 COSTS_N_INSNS (12), /* fsqrtd */
187 COSTS_N_INSNS (4), /* imul */
188 COSTS_N_INSNS (4), /* imulX */
189 0, /* imul bit factor */
190 COSTS_N_INSNS (4), /* idiv */
191 COSTS_N_INSNS (4), /* idivX */
192 COSTS_N_INSNS (1), /* movcc/movr */
193 1, /* shift penalty */
194 3 /* branch cost */
195 };
196
197 static const
198 struct processor_costs hypersparc_costs = {
199 COSTS_N_INSNS (1), /* int load */
200 COSTS_N_INSNS (1), /* int signed load */
201 COSTS_N_INSNS (1), /* int zeroed load */
202 COSTS_N_INSNS (1), /* float load */
203 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
204 COSTS_N_INSNS (1), /* fadd, fsub */
205 COSTS_N_INSNS (1), /* fcmp */
206 COSTS_N_INSNS (1), /* fmov, fmovr */
207 COSTS_N_INSNS (1), /* fmul */
208 COSTS_N_INSNS (8), /* fdivs */
209 COSTS_N_INSNS (12), /* fdivd */
210 COSTS_N_INSNS (17), /* fsqrts */
211 COSTS_N_INSNS (17), /* fsqrtd */
212 COSTS_N_INSNS (17), /* imul */
213 COSTS_N_INSNS (17), /* imulX */
214 0, /* imul bit factor */
215 COSTS_N_INSNS (17), /* idiv */
216 COSTS_N_INSNS (17), /* idivX */
217 COSTS_N_INSNS (1), /* movcc/movr */
218 0, /* shift penalty */
219 3 /* branch cost */
220 };
221
222 static const
223 struct processor_costs leon_costs = {
224 COSTS_N_INSNS (1), /* int load */
225 COSTS_N_INSNS (1), /* int signed load */
226 COSTS_N_INSNS (1), /* int zeroed load */
227 COSTS_N_INSNS (1), /* float load */
228 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
229 COSTS_N_INSNS (1), /* fadd, fsub */
230 COSTS_N_INSNS (1), /* fcmp */
231 COSTS_N_INSNS (1), /* fmov, fmovr */
232 COSTS_N_INSNS (1), /* fmul */
233 COSTS_N_INSNS (15), /* fdivs */
234 COSTS_N_INSNS (15), /* fdivd */
235 COSTS_N_INSNS (23), /* fsqrts */
236 COSTS_N_INSNS (23), /* fsqrtd */
237 COSTS_N_INSNS (5), /* imul */
238 COSTS_N_INSNS (5), /* imulX */
239 0, /* imul bit factor */
240 COSTS_N_INSNS (5), /* idiv */
241 COSTS_N_INSNS (5), /* idivX */
242 COSTS_N_INSNS (1), /* movcc/movr */
243 0, /* shift penalty */
244 3 /* branch cost */
245 };
246
247 static const
248 struct processor_costs leon3_costs = {
249 COSTS_N_INSNS (1), /* int load */
250 COSTS_N_INSNS (1), /* int signed load */
251 COSTS_N_INSNS (1), /* int zeroed load */
252 COSTS_N_INSNS (1), /* float load */
253 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
254 COSTS_N_INSNS (1), /* fadd, fsub */
255 COSTS_N_INSNS (1), /* fcmp */
256 COSTS_N_INSNS (1), /* fmov, fmovr */
257 COSTS_N_INSNS (1), /* fmul */
258 COSTS_N_INSNS (14), /* fdivs */
259 COSTS_N_INSNS (15), /* fdivd */
260 COSTS_N_INSNS (22), /* fsqrts */
261 COSTS_N_INSNS (23), /* fsqrtd */
262 COSTS_N_INSNS (5), /* imul */
263 COSTS_N_INSNS (5), /* imulX */
264 0, /* imul bit factor */
265 COSTS_N_INSNS (35), /* idiv */
266 COSTS_N_INSNS (35), /* idivX */
267 COSTS_N_INSNS (1), /* movcc/movr */
268 0, /* shift penalty */
269 3 /* branch cost */
270 };
271
272 static const
273 struct processor_costs sparclet_costs = {
274 COSTS_N_INSNS (3), /* int load */
275 COSTS_N_INSNS (3), /* int signed load */
276 COSTS_N_INSNS (1), /* int zeroed load */
277 COSTS_N_INSNS (1), /* float load */
278 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
279 COSTS_N_INSNS (1), /* fadd, fsub */
280 COSTS_N_INSNS (1), /* fcmp */
281 COSTS_N_INSNS (1), /* fmov, fmovr */
282 COSTS_N_INSNS (1), /* fmul */
283 COSTS_N_INSNS (1), /* fdivs */
284 COSTS_N_INSNS (1), /* fdivd */
285 COSTS_N_INSNS (1), /* fsqrts */
286 COSTS_N_INSNS (1), /* fsqrtd */
287 COSTS_N_INSNS (5), /* imul */
288 COSTS_N_INSNS (5), /* imulX */
289 0, /* imul bit factor */
290 COSTS_N_INSNS (5), /* idiv */
291 COSTS_N_INSNS (5), /* idivX */
292 COSTS_N_INSNS (1), /* movcc/movr */
293 0, /* shift penalty */
294 3 /* branch cost */
295 };
296
297 static const
298 struct processor_costs ultrasparc_costs = {
299 COSTS_N_INSNS (2), /* int load */
300 COSTS_N_INSNS (3), /* int signed load */
301 COSTS_N_INSNS (2), /* int zeroed load */
302 COSTS_N_INSNS (2), /* float load */
303 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
304 COSTS_N_INSNS (4), /* fadd, fsub */
305 COSTS_N_INSNS (1), /* fcmp */
306 COSTS_N_INSNS (2), /* fmov, fmovr */
307 COSTS_N_INSNS (4), /* fmul */
308 COSTS_N_INSNS (13), /* fdivs */
309 COSTS_N_INSNS (23), /* fdivd */
310 COSTS_N_INSNS (13), /* fsqrts */
311 COSTS_N_INSNS (23), /* fsqrtd */
312 COSTS_N_INSNS (4), /* imul */
313 COSTS_N_INSNS (4), /* imulX */
314 2, /* imul bit factor */
315 COSTS_N_INSNS (37), /* idiv */
316 COSTS_N_INSNS (68), /* idivX */
317 COSTS_N_INSNS (2), /* movcc/movr */
318 2, /* shift penalty */
319 2 /* branch cost */
320 };
321
322 static const
323 struct processor_costs ultrasparc3_costs = {
324 COSTS_N_INSNS (2), /* int load */
325 COSTS_N_INSNS (3), /* int signed load */
326 COSTS_N_INSNS (3), /* int zeroed load */
327 COSTS_N_INSNS (2), /* float load */
328 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
329 COSTS_N_INSNS (4), /* fadd, fsub */
330 COSTS_N_INSNS (5), /* fcmp */
331 COSTS_N_INSNS (3), /* fmov, fmovr */
332 COSTS_N_INSNS (4), /* fmul */
333 COSTS_N_INSNS (17), /* fdivs */
334 COSTS_N_INSNS (20), /* fdivd */
335 COSTS_N_INSNS (20), /* fsqrts */
336 COSTS_N_INSNS (29), /* fsqrtd */
337 COSTS_N_INSNS (6), /* imul */
338 COSTS_N_INSNS (6), /* imulX */
339 0, /* imul bit factor */
340 COSTS_N_INSNS (40), /* idiv */
341 COSTS_N_INSNS (71), /* idivX */
342 COSTS_N_INSNS (2), /* movcc/movr */
343 0, /* shift penalty */
344 2 /* branch cost */
345 };
346
347 static const
348 struct processor_costs niagara_costs = {
349 COSTS_N_INSNS (3), /* int load */
350 COSTS_N_INSNS (3), /* int signed load */
351 COSTS_N_INSNS (3), /* int zeroed load */
352 COSTS_N_INSNS (9), /* float load */
353 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
354 COSTS_N_INSNS (8), /* fadd, fsub */
355 COSTS_N_INSNS (26), /* fcmp */
356 COSTS_N_INSNS (8), /* fmov, fmovr */
357 COSTS_N_INSNS (29), /* fmul */
358 COSTS_N_INSNS (54), /* fdivs */
359 COSTS_N_INSNS (83), /* fdivd */
360 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
361 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
362 COSTS_N_INSNS (11), /* imul */
363 COSTS_N_INSNS (11), /* imulX */
364 0, /* imul bit factor */
365 COSTS_N_INSNS (72), /* idiv */
366 COSTS_N_INSNS (72), /* idivX */
367 COSTS_N_INSNS (1), /* movcc/movr */
368 0, /* shift penalty */
369 4 /* branch cost */
370 };
371
372 static const
373 struct processor_costs niagara2_costs = {
374 COSTS_N_INSNS (3), /* int load */
375 COSTS_N_INSNS (3), /* int signed load */
376 COSTS_N_INSNS (3), /* int zeroed load */
377 COSTS_N_INSNS (3), /* float load */
378 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
379 COSTS_N_INSNS (6), /* fadd, fsub */
380 COSTS_N_INSNS (6), /* fcmp */
381 COSTS_N_INSNS (6), /* fmov, fmovr */
382 COSTS_N_INSNS (6), /* fmul */
383 COSTS_N_INSNS (19), /* fdivs */
384 COSTS_N_INSNS (33), /* fdivd */
385 COSTS_N_INSNS (19), /* fsqrts */
386 COSTS_N_INSNS (33), /* fsqrtd */
387 COSTS_N_INSNS (5), /* imul */
388 COSTS_N_INSNS (5), /* imulX */
389 0, /* imul bit factor */
390 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
391 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
392 COSTS_N_INSNS (1), /* movcc/movr */
393 0, /* shift penalty */
394 5 /* branch cost */
395 };
396
397 static const
398 struct processor_costs niagara3_costs = {
399 COSTS_N_INSNS (3), /* int load */
400 COSTS_N_INSNS (3), /* int signed load */
401 COSTS_N_INSNS (3), /* int zeroed load */
402 COSTS_N_INSNS (3), /* float load */
403 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
404 COSTS_N_INSNS (9), /* fadd, fsub */
405 COSTS_N_INSNS (9), /* fcmp */
406 COSTS_N_INSNS (9), /* fmov, fmovr */
407 COSTS_N_INSNS (9), /* fmul */
408 COSTS_N_INSNS (23), /* fdivs */
409 COSTS_N_INSNS (37), /* fdivd */
410 COSTS_N_INSNS (23), /* fsqrts */
411 COSTS_N_INSNS (37), /* fsqrtd */
412 COSTS_N_INSNS (9), /* imul */
413 COSTS_N_INSNS (9), /* imulX */
414 0, /* imul bit factor */
415 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
416 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
417 COSTS_N_INSNS (1), /* movcc/movr */
418 0, /* shift penalty */
419 5 /* branch cost */
420 };
421
422 static const
423 struct processor_costs niagara4_costs = {
424 COSTS_N_INSNS (5), /* int load */
425 COSTS_N_INSNS (5), /* int signed load */
426 COSTS_N_INSNS (5), /* int zeroed load */
427 COSTS_N_INSNS (5), /* float load */
428 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
429 COSTS_N_INSNS (11), /* fadd, fsub */
430 COSTS_N_INSNS (11), /* fcmp */
431 COSTS_N_INSNS (11), /* fmov, fmovr */
432 COSTS_N_INSNS (11), /* fmul */
433 COSTS_N_INSNS (24), /* fdivs */
434 COSTS_N_INSNS (37), /* fdivd */
435 COSTS_N_INSNS (24), /* fsqrts */
436 COSTS_N_INSNS (37), /* fsqrtd */
437 COSTS_N_INSNS (12), /* imul */
438 COSTS_N_INSNS (12), /* imulX */
439 0, /* imul bit factor */
440 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
441 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
442 COSTS_N_INSNS (1), /* movcc/movr */
443 0, /* shift penalty */
444 2 /* branch cost */
445 };
446
447 static const
448 struct processor_costs niagara7_costs = {
449 COSTS_N_INSNS (5), /* int load */
450 COSTS_N_INSNS (5), /* int signed load */
451 COSTS_N_INSNS (5), /* int zeroed load */
452 COSTS_N_INSNS (5), /* float load */
453 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
454 COSTS_N_INSNS (11), /* fadd, fsub */
455 COSTS_N_INSNS (11), /* fcmp */
456 COSTS_N_INSNS (11), /* fmov, fmovr */
457 COSTS_N_INSNS (11), /* fmul */
458 COSTS_N_INSNS (24), /* fdivs */
459 COSTS_N_INSNS (37), /* fdivd */
460 COSTS_N_INSNS (24), /* fsqrts */
461 COSTS_N_INSNS (37), /* fsqrtd */
462 COSTS_N_INSNS (12), /* imul */
463 COSTS_N_INSNS (12), /* imulX */
464 0, /* imul bit factor */
465 COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
466 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
467 COSTS_N_INSNS (1), /* movcc/movr */
468 0, /* shift penalty */
469 1 /* branch cost */
470 };
471
472 static const
473 struct processor_costs m8_costs = {
474 COSTS_N_INSNS (3), /* int load */
475 COSTS_N_INSNS (3), /* int signed load */
476 COSTS_N_INSNS (3), /* int zeroed load */
477 COSTS_N_INSNS (3), /* float load */
478 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
479 COSTS_N_INSNS (9), /* fadd, fsub */
480 COSTS_N_INSNS (9), /* fcmp */
481 COSTS_N_INSNS (9), /* fmov, fmovr */
482 COSTS_N_INSNS (9), /* fmul */
483 COSTS_N_INSNS (26), /* fdivs */
484 COSTS_N_INSNS (30), /* fdivd */
485 COSTS_N_INSNS (33), /* fsqrts */
486 COSTS_N_INSNS (41), /* fsqrtd */
487 COSTS_N_INSNS (12), /* imul */
488 COSTS_N_INSNS (10), /* imulX */
489 0, /* imul bit factor */
490 COSTS_N_INSNS (57), /* udiv/sdiv */
491 COSTS_N_INSNS (30), /* udivx/sdivx */
492 COSTS_N_INSNS (1), /* movcc/movr */
493 0, /* shift penalty */
494 1 /* branch cost */
495 };
496
497 static const struct processor_costs *sparc_costs = &cypress_costs;
498
499 #ifdef HAVE_AS_RELAX_OPTION
500 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
501 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
502 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
503 somebody does not branch between the sethi and jmp. */
504 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
505 #else
506 #define LEAF_SIBCALL_SLOT_RESERVED_P \
507 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
508 #endif
509
510 /* Vector to say how input registers are mapped to output registers.
511 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
512 eliminate it. You must use -fomit-frame-pointer to get that. */
513 char leaf_reg_remap[] =
514 { 0, 1, 2, 3, 4, 5, 6, 7,
515 -1, -1, -1, -1, -1, -1, 14, -1,
516 -1, -1, -1, -1, -1, -1, -1, -1,
517 8, 9, 10, 11, 12, 13, -1, 15,
518
519 32, 33, 34, 35, 36, 37, 38, 39,
520 40, 41, 42, 43, 44, 45, 46, 47,
521 48, 49, 50, 51, 52, 53, 54, 55,
522 56, 57, 58, 59, 60, 61, 62, 63,
523 64, 65, 66, 67, 68, 69, 70, 71,
524 72, 73, 74, 75, 76, 77, 78, 79,
525 80, 81, 82, 83, 84, 85, 86, 87,
526 88, 89, 90, 91, 92, 93, 94, 95,
527 96, 97, 98, 99, 100, 101, 102};
528
529 /* Vector, indexed by hard register number, which contains 1
530 for a register that is allowable in a candidate for leaf
531 function treatment. */
532 char sparc_leaf_regs[] =
533 { 1, 1, 1, 1, 1, 1, 1, 1,
534 0, 0, 0, 0, 0, 0, 1, 0,
535 0, 0, 0, 0, 0, 0, 0, 0,
536 1, 1, 1, 1, 1, 1, 0, 1,
537 1, 1, 1, 1, 1, 1, 1, 1,
538 1, 1, 1, 1, 1, 1, 1, 1,
539 1, 1, 1, 1, 1, 1, 1, 1,
540 1, 1, 1, 1, 1, 1, 1, 1,
541 1, 1, 1, 1, 1, 1, 1, 1,
542 1, 1, 1, 1, 1, 1, 1, 1,
543 1, 1, 1, 1, 1, 1, 1, 1,
544 1, 1, 1, 1, 1, 1, 1, 1,
545 1, 1, 1, 1, 1, 1, 1};
546
547 struct GTY(()) machine_function
548 {
549 /* Size of the frame of the function. */
550 HOST_WIDE_INT frame_size;
551
552 /* Size of the frame of the function minus the register window save area
553 and the outgoing argument area. */
554 HOST_WIDE_INT apparent_frame_size;
555
556 /* Register we pretend the frame pointer is allocated to. Normally, this
557 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
558 record "offset" separately as it may be too big for (reg + disp). */
559 rtx frame_base_reg;
560 HOST_WIDE_INT frame_base_offset;
561
562 /* Number of global or FP registers to be saved (as 4-byte quantities). */
563 int n_global_fp_regs;
564
565 /* True if the current function is leaf and uses only leaf regs,
566 so that the SPARC leaf function optimization can be applied.
567 Private version of crtl->uses_only_leaf_regs, see
568 sparc_expand_prologue for the rationale. */
569 int leaf_function_p;
570
571 /* True if the prologue saves local or in registers. */
572 bool save_local_in_regs_p;
573
574 /* True if the data calculated by sparc_expand_prologue are valid. */
575 bool prologue_data_valid_p;
576 };
577
578 #define sparc_frame_size cfun->machine->frame_size
579 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
580 #define sparc_frame_base_reg cfun->machine->frame_base_reg
581 #define sparc_frame_base_offset cfun->machine->frame_base_offset
582 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
583 #define sparc_leaf_function_p cfun->machine->leaf_function_p
584 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
585 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
586
587 /* 1 if the next opcode is to be specially indented. */
588 int sparc_indent_opcode = 0;
589
590 static void sparc_option_override (void);
591 static void sparc_init_modes (void);
592 static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
593 const_tree, bool, bool, int *, int *);
594
595 static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
596 static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
597
598 static void sparc_emit_set_const32 (rtx, rtx);
599 static void sparc_emit_set_const64 (rtx, rtx);
600 static void sparc_output_addr_vec (rtx);
601 static void sparc_output_addr_diff_vec (rtx);
602 static void sparc_output_deferred_case_vectors (void);
603 static bool sparc_legitimate_address_p (machine_mode, rtx, bool);
604 static bool sparc_legitimate_constant_p (machine_mode, rtx);
605 static rtx sparc_builtin_saveregs (void);
606 static int epilogue_renumber (rtx *, int);
607 static bool sparc_assemble_integer (rtx, unsigned int, int);
608 static int set_extends (rtx_insn *);
609 static void sparc_asm_function_prologue (FILE *);
610 static void sparc_asm_function_epilogue (FILE *);
611 #ifdef TARGET_SOLARIS
612 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
613 tree) ATTRIBUTE_UNUSED;
614 #endif
615 static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
616 static int sparc_issue_rate (void);
617 static void sparc_sched_init (FILE *, int, int);
618 static int sparc_use_sched_lookahead (void);
619
620 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
621 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
622 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
623 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
624 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
625
626 static bool sparc_function_ok_for_sibcall (tree, tree);
627 static void sparc_init_libfuncs (void);
628 static void sparc_init_builtins (void);
629 static void sparc_fpu_init_builtins (void);
630 static void sparc_vis_init_builtins (void);
631 static tree sparc_builtin_decl (unsigned, bool);
632 static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
633 static tree sparc_fold_builtin (tree, int, tree *, bool);
634 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
635 HOST_WIDE_INT, tree);
636 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
637 HOST_WIDE_INT, const_tree);
638 static struct machine_function * sparc_init_machine_status (void);
639 static bool sparc_cannot_force_const_mem (machine_mode, rtx);
640 static rtx sparc_tls_get_addr (void);
641 static rtx sparc_tls_got (void);
642 static int sparc_register_move_cost (machine_mode,
643 reg_class_t, reg_class_t);
644 static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
645 static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
646 int *, const_tree, int);
647 static bool sparc_strict_argument_naming (cumulative_args_t);
648 static void sparc_va_start (tree, rtx);
649 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
650 static bool sparc_vector_mode_supported_p (machine_mode);
651 static bool sparc_tls_referenced_p (rtx);
652 static rtx sparc_legitimize_tls_address (rtx);
653 static rtx sparc_legitimize_pic_address (rtx, rtx);
654 static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
655 static rtx sparc_delegitimize_address (rtx);
656 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
657 static bool sparc_pass_by_reference (cumulative_args_t,
658 machine_mode, const_tree, bool);
659 static void sparc_function_arg_advance (cumulative_args_t,
660 machine_mode, const_tree, bool);
661 static rtx sparc_function_arg_1 (cumulative_args_t,
662 machine_mode, const_tree, bool, bool);
663 static rtx sparc_function_arg (cumulative_args_t,
664 machine_mode, const_tree, bool);
665 static rtx sparc_function_incoming_arg (cumulative_args_t,
666 machine_mode, const_tree, bool);
667 static pad_direction sparc_function_arg_padding (machine_mode, const_tree);
668 static unsigned int sparc_function_arg_boundary (machine_mode,
669 const_tree);
670 static int sparc_arg_partial_bytes (cumulative_args_t,
671 machine_mode, tree, bool);
672 static bool sparc_return_in_memory (const_tree, const_tree);
673 static rtx sparc_struct_value_rtx (tree, int);
674 static rtx sparc_function_value (const_tree, const_tree, bool);
675 static rtx sparc_libcall_value (machine_mode, const_rtx);
676 static bool sparc_function_value_regno_p (const unsigned int);
677 static unsigned HOST_WIDE_INT sparc_asan_shadow_offset (void);
678 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
679 static void sparc_file_end (void);
680 static bool sparc_frame_pointer_required (void);
681 static bool sparc_can_eliminate (const int, const int);
682 static void sparc_conditional_register_usage (void);
683 static bool sparc_use_pseudo_pic_reg (void);
684 static void sparc_init_pic_reg (void);
685 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
686 static const char *sparc_mangle_type (const_tree);
687 #endif
688 static void sparc_trampoline_init (rtx, tree, rtx);
689 static machine_mode sparc_preferred_simd_mode (scalar_mode);
690 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
691 static bool sparc_lra_p (void);
692 static bool sparc_print_operand_punct_valid_p (unsigned char);
693 static void sparc_print_operand (FILE *, rtx, int);
694 static void sparc_print_operand_address (FILE *, machine_mode, rtx);
695 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
696 machine_mode,
697 secondary_reload_info *);
698 static bool sparc_secondary_memory_needed (machine_mode, reg_class_t,
699 reg_class_t);
700 static machine_mode sparc_secondary_memory_needed_mode (machine_mode);
701 static scalar_int_mode sparc_cstore_mode (enum insn_code icode);
702 static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
703 static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
704 static unsigned int sparc_min_arithmetic_precision (void);
705 static unsigned int sparc_hard_regno_nregs (unsigned int, machine_mode);
706 static bool sparc_hard_regno_mode_ok (unsigned int, machine_mode);
707 static bool sparc_modes_tieable_p (machine_mode, machine_mode);
708 static bool sparc_can_change_mode_class (machine_mode, machine_mode,
709 reg_class_t);
710 static HOST_WIDE_INT sparc_constant_alignment (const_tree, HOST_WIDE_INT);
711 static bool sparc_vectorize_vec_perm_const (machine_mode, rtx, rtx, rtx,
712 const vec_perm_indices &);
713 static bool sparc_can_follow_jump (const rtx_insn *, const rtx_insn *);
714 \f
715 #ifdef SUBTARGET_ATTRIBUTE_TABLE
716 /* Table of valid machine attributes. */
717 static const struct attribute_spec sparc_attribute_table[] =
718 {
719 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
720 do_diagnostic, handler, exclude } */
721 SUBTARGET_ATTRIBUTE_TABLE,
722 { NULL, 0, 0, false, false, false, false, NULL, NULL }
723 };
724 #endif
725 \f
726 char sparc_hard_reg_printed[8];
727
728 /* Initialize the GCC target structure. */
729
730 /* The default is to use .half rather than .short for aligned HI objects. */
731 #undef TARGET_ASM_ALIGNED_HI_OP
732 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
733
734 #undef TARGET_ASM_UNALIGNED_HI_OP
735 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
736 #undef TARGET_ASM_UNALIGNED_SI_OP
737 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
738 #undef TARGET_ASM_UNALIGNED_DI_OP
739 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
740
741 /* The target hook has to handle DI-mode values. */
742 #undef TARGET_ASM_INTEGER
743 #define TARGET_ASM_INTEGER sparc_assemble_integer
744
745 #undef TARGET_ASM_FUNCTION_PROLOGUE
746 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
747 #undef TARGET_ASM_FUNCTION_EPILOGUE
748 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
749
750 #undef TARGET_SCHED_ADJUST_COST
751 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
752 #undef TARGET_SCHED_ISSUE_RATE
753 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
754 #undef TARGET_SCHED_INIT
755 #define TARGET_SCHED_INIT sparc_sched_init
756 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
757 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
758
759 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
760 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
761
762 #undef TARGET_INIT_LIBFUNCS
763 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
764
765 #undef TARGET_LEGITIMIZE_ADDRESS
766 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
767 #undef TARGET_DELEGITIMIZE_ADDRESS
768 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
769 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
770 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
771
772 #undef TARGET_INIT_BUILTINS
773 #define TARGET_INIT_BUILTINS sparc_init_builtins
774 #undef TARGET_BUILTIN_DECL
775 #define TARGET_BUILTIN_DECL sparc_builtin_decl
776 #undef TARGET_EXPAND_BUILTIN
777 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
778 #undef TARGET_FOLD_BUILTIN
779 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
780
781 #if TARGET_TLS
782 #undef TARGET_HAVE_TLS
783 #define TARGET_HAVE_TLS true
784 #endif
785
786 #undef TARGET_CANNOT_FORCE_CONST_MEM
787 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
788
789 #undef TARGET_ASM_OUTPUT_MI_THUNK
790 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
791 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
792 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
793
794 #undef TARGET_RTX_COSTS
795 #define TARGET_RTX_COSTS sparc_rtx_costs
796 #undef TARGET_ADDRESS_COST
797 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
798 #undef TARGET_REGISTER_MOVE_COST
799 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
800
801 #undef TARGET_PROMOTE_FUNCTION_MODE
802 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
803 #undef TARGET_STRICT_ARGUMENT_NAMING
804 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
805
806 #undef TARGET_MUST_PASS_IN_STACK
807 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
808 #undef TARGET_PASS_BY_REFERENCE
809 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
810 #undef TARGET_ARG_PARTIAL_BYTES
811 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
812 #undef TARGET_FUNCTION_ARG_ADVANCE
813 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
814 #undef TARGET_FUNCTION_ARG
815 #define TARGET_FUNCTION_ARG sparc_function_arg
816 #undef TARGET_FUNCTION_INCOMING_ARG
817 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
818 #undef TARGET_FUNCTION_ARG_PADDING
819 #define TARGET_FUNCTION_ARG_PADDING sparc_function_arg_padding
820 #undef TARGET_FUNCTION_ARG_BOUNDARY
821 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
822
823 #undef TARGET_RETURN_IN_MEMORY
824 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
825 #undef TARGET_STRUCT_VALUE_RTX
826 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
827 #undef TARGET_FUNCTION_VALUE
828 #define TARGET_FUNCTION_VALUE sparc_function_value
829 #undef TARGET_LIBCALL_VALUE
830 #define TARGET_LIBCALL_VALUE sparc_libcall_value
831 #undef TARGET_FUNCTION_VALUE_REGNO_P
832 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
833
834 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
835 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
836
837 #undef TARGET_ASAN_SHADOW_OFFSET
838 #define TARGET_ASAN_SHADOW_OFFSET sparc_asan_shadow_offset
839
840 #undef TARGET_EXPAND_BUILTIN_VA_START
841 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
842 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
843 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
844
845 #undef TARGET_VECTOR_MODE_SUPPORTED_P
846 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
847
848 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
849 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
850
851 #ifdef SUBTARGET_INSERT_ATTRIBUTES
852 #undef TARGET_INSERT_ATTRIBUTES
853 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
854 #endif
855
856 #ifdef SUBTARGET_ATTRIBUTE_TABLE
857 #undef TARGET_ATTRIBUTE_TABLE
858 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
859 #endif
860
861 #undef TARGET_OPTION_OVERRIDE
862 #define TARGET_OPTION_OVERRIDE sparc_option_override
863
864 #ifdef TARGET_THREAD_SSP_OFFSET
865 #undef TARGET_STACK_PROTECT_GUARD
866 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
867 #endif
868
869 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
870 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
871 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
872 #endif
873
874 #undef TARGET_ASM_FILE_END
875 #define TARGET_ASM_FILE_END sparc_file_end
876
877 #undef TARGET_FRAME_POINTER_REQUIRED
878 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
879
880 #undef TARGET_CAN_ELIMINATE
881 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
882
883 #undef TARGET_PREFERRED_RELOAD_CLASS
884 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
885
886 #undef TARGET_SECONDARY_RELOAD
887 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
888 #undef TARGET_SECONDARY_MEMORY_NEEDED
889 #define TARGET_SECONDARY_MEMORY_NEEDED sparc_secondary_memory_needed
890 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
891 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE sparc_secondary_memory_needed_mode
892
893 #undef TARGET_CONDITIONAL_REGISTER_USAGE
894 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
895
896 #undef TARGET_INIT_PIC_REG
897 #define TARGET_INIT_PIC_REG sparc_init_pic_reg
898
899 #undef TARGET_USE_PSEUDO_PIC_REG
900 #define TARGET_USE_PSEUDO_PIC_REG sparc_use_pseudo_pic_reg
901
902 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
903 #undef TARGET_MANGLE_TYPE
904 #define TARGET_MANGLE_TYPE sparc_mangle_type
905 #endif
906
907 #undef TARGET_LRA_P
908 #define TARGET_LRA_P sparc_lra_p
909
910 #undef TARGET_LEGITIMATE_ADDRESS_P
911 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
912
913 #undef TARGET_LEGITIMATE_CONSTANT_P
914 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
915
916 #undef TARGET_TRAMPOLINE_INIT
917 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
918
919 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
920 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
921 #undef TARGET_PRINT_OPERAND
922 #define TARGET_PRINT_OPERAND sparc_print_operand
923 #undef TARGET_PRINT_OPERAND_ADDRESS
924 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
925
926 /* The value stored by LDSTUB. */
927 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
928 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
929
930 #undef TARGET_CSTORE_MODE
931 #define TARGET_CSTORE_MODE sparc_cstore_mode
932
933 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
934 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
935
936 #undef TARGET_FIXED_CONDITION_CODE_REGS
937 #define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
938
939 #undef TARGET_MIN_ARITHMETIC_PRECISION
940 #define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
941
942 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
943 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
944
945 #undef TARGET_HARD_REGNO_NREGS
946 #define TARGET_HARD_REGNO_NREGS sparc_hard_regno_nregs
947 #undef TARGET_HARD_REGNO_MODE_OK
948 #define TARGET_HARD_REGNO_MODE_OK sparc_hard_regno_mode_ok
949
950 #undef TARGET_MODES_TIEABLE_P
951 #define TARGET_MODES_TIEABLE_P sparc_modes_tieable_p
952
953 #undef TARGET_CAN_CHANGE_MODE_CLASS
954 #define TARGET_CAN_CHANGE_MODE_CLASS sparc_can_change_mode_class
955
956 #undef TARGET_CONSTANT_ALIGNMENT
957 #define TARGET_CONSTANT_ALIGNMENT sparc_constant_alignment
958
959 #undef TARGET_VECTORIZE_VEC_PERM_CONST
960 #define TARGET_VECTORIZE_VEC_PERM_CONST sparc_vectorize_vec_perm_const
961
962 #undef TARGET_CAN_FOLLOW_JUMP
963 #define TARGET_CAN_FOLLOW_JUMP sparc_can_follow_jump
964
965 struct gcc_target targetm = TARGET_INITIALIZER;
966
967 /* Return the memory reference contained in X if any, zero otherwise. */
968
969 static rtx
970 mem_ref (rtx x)
971 {
972 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
973 x = XEXP (x, 0);
974
975 if (MEM_P (x))
976 return x;
977
978 return NULL_RTX;
979 }
980
981 /* True if any of INSN's source register(s) is REG. */
982
983 static bool
984 insn_uses_reg_p (rtx_insn *insn, unsigned int reg)
985 {
986 extract_insn (insn);
987 return ((REG_P (recog_data.operand[1])
988 && REGNO (recog_data.operand[1]) == reg)
989 || (recog_data.n_operands == 3
990 && REG_P (recog_data.operand[2])
991 && REGNO (recog_data.operand[2]) == reg));
992 }
993
994 /* True if INSN is a floating-point division or square-root. */
995
996 static bool
997 div_sqrt_insn_p (rtx_insn *insn)
998 {
999 if (GET_CODE (PATTERN (insn)) != SET)
1000 return false;
1001
1002 switch (get_attr_type (insn))
1003 {
1004 case TYPE_FPDIVS:
1005 case TYPE_FPSQRTS:
1006 case TYPE_FPDIVD:
1007 case TYPE_FPSQRTD:
1008 return true;
1009 default:
1010 return false;
1011 }
1012 }
1013
1014 /* True if INSN is a floating-point instruction. */
1015
1016 static bool
1017 fpop_insn_p (rtx_insn *insn)
1018 {
1019 if (GET_CODE (PATTERN (insn)) != SET)
1020 return false;
1021
1022 switch (get_attr_type (insn))
1023 {
1024 case TYPE_FPMOVE:
1025 case TYPE_FPCMOVE:
1026 case TYPE_FP:
1027 case TYPE_FPCMP:
1028 case TYPE_FPMUL:
1029 case TYPE_FPDIVS:
1030 case TYPE_FPSQRTS:
1031 case TYPE_FPDIVD:
1032 case TYPE_FPSQRTD:
1033 return true;
1034 default:
1035 return false;
1036 }
1037 }
1038
1039 /* True if INSN is an atomic instruction. */
1040
1041 static bool
1042 atomic_insn_for_leon3_p (rtx_insn *insn)
1043 {
1044 switch (INSN_CODE (insn))
1045 {
1046 case CODE_FOR_swapsi:
1047 case CODE_FOR_ldstub:
1048 case CODE_FOR_atomic_compare_and_swap_leon3_1:
1049 return true;
1050 default:
1051 return false;
1052 }
1053 }
1054
1055 /* We use a machine specific pass to enable workarounds for errata.
1056
1057 We need to have the (essentially) final form of the insn stream in order
1058 to properly detect the various hazards. Therefore, this machine specific
1059 pass runs as late as possible. */
1060
1061 /* True if INSN is a md pattern or asm statement. */
1062 #define USEFUL_INSN_P(INSN) \
1063 (NONDEBUG_INSN_P (INSN) \
1064 && GET_CODE (PATTERN (INSN)) != USE \
1065 && GET_CODE (PATTERN (INSN)) != CLOBBER)
1066
1067 static unsigned int
1068 sparc_do_work_around_errata (void)
1069 {
1070 rtx_insn *insn, *next;
1071
1072 /* Force all instructions to be split into their final form. */
1073 split_all_insns_noflow ();
1074
1075 /* Now look for specific patterns in the insn stream. */
1076 for (insn = get_insns (); insn; insn = next)
1077 {
1078 bool insert_nop = false;
1079 rtx set;
1080 rtx_insn *jump;
1081 rtx_sequence *seq;
1082
1083 /* Look into the instruction in a delay slot. */
1084 if (NONJUMP_INSN_P (insn)
1085 && (seq = dyn_cast <rtx_sequence *> (PATTERN (insn))))
1086 {
1087 jump = seq->insn (0);
1088 insn = seq->insn (1);
1089 }
1090 else if (JUMP_P (insn))
1091 jump = insn;
1092 else
1093 jump = NULL;
1094
1095 /* Place a NOP at the branch target of an integer branch if it is a
1096 floating-point operation or a floating-point branch. */
1097 if (sparc_fix_gr712rc
1098 && jump
1099 && jump_to_label_p (jump)
1100 && get_attr_branch_type (jump) == BRANCH_TYPE_ICC)
1101 {
1102 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1103 if (target
1104 && (fpop_insn_p (target)
1105 || (JUMP_P (target)
1106 && get_attr_branch_type (target) == BRANCH_TYPE_FCC)))
1107 emit_insn_before (gen_nop (), target);
1108 }
1109
1110 /* Insert a NOP between load instruction and atomic instruction. Insert
1111 a NOP at branch target if there is a load in delay slot and an atomic
1112 instruction at branch target. */
1113 if (sparc_fix_ut700
1114 && NONJUMP_INSN_P (insn)
1115 && (set = single_set (insn)) != NULL_RTX
1116 && mem_ref (SET_SRC (set))
1117 && REG_P (SET_DEST (set)))
1118 {
1119 if (jump && jump_to_label_p (jump))
1120 {
1121 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1122 if (target && atomic_insn_for_leon3_p (target))
1123 emit_insn_before (gen_nop (), target);
1124 }
1125
1126 next = next_active_insn (insn);
1127 if (!next)
1128 break;
1129
1130 if (atomic_insn_for_leon3_p (next))
1131 insert_nop = true;
1132 }
1133
1134 /* Look for a sequence that starts with a fdiv or fsqrt instruction and
1135 ends with another fdiv or fsqrt instruction with no dependencies on
1136 the former, along with an appropriate pattern in between. */
1137 if (sparc_fix_lost_divsqrt
1138 && NONJUMP_INSN_P (insn)
1139 && div_sqrt_insn_p (insn))
1140 {
1141 int i;
1142 int fp_found = 0;
1143 rtx_insn *after;
1144
1145 const unsigned int dest_reg = REGNO (SET_DEST (single_set (insn)));
1146
1147 next = next_active_insn (insn);
1148 if (!next)
1149 break;
1150
1151 for (after = next, i = 0; i < 4; i++)
1152 {
1153 /* Count floating-point operations. */
1154 if (i != 3 && fpop_insn_p (after))
1155 {
1156 /* If the insn uses the destination register of
1157 the div/sqrt, then it cannot be problematic. */
1158 if (insn_uses_reg_p (after, dest_reg))
1159 break;
1160 fp_found++;
1161 }
1162
1163 /* Count floating-point loads. */
1164 if (i != 3
1165 && (set = single_set (after)) != NULL_RTX
1166 && REG_P (SET_DEST (set))
1167 && REGNO (SET_DEST (set)) > 31)
1168 {
1169 /* If the insn uses the destination register of
1170 the div/sqrt, then it cannot be problematic. */
1171 if (REGNO (SET_DEST (set)) == dest_reg)
1172 break;
1173 fp_found++;
1174 }
1175
1176 /* Check if this is a problematic sequence. */
1177 if (i > 1
1178 && fp_found >= 2
1179 && div_sqrt_insn_p (after))
1180 {
1181 /* If this is the short version of the problematic
1182 sequence we add two NOPs in a row to also prevent
1183 the long version. */
1184 if (i == 2)
1185 emit_insn_before (gen_nop (), next);
1186 insert_nop = true;
1187 break;
1188 }
1189
1190 /* No need to scan past a second div/sqrt. */
1191 if (div_sqrt_insn_p (after))
1192 break;
1193
1194 /* Insert NOP before branch. */
1195 if (i < 3
1196 && (!NONJUMP_INSN_P (after)
1197 || GET_CODE (PATTERN (after)) == SEQUENCE))
1198 {
1199 insert_nop = true;
1200 break;
1201 }
1202
1203 after = next_active_insn (after);
1204 if (!after)
1205 break;
1206 }
1207 }
1208
1209 /* Look for either of these two sequences:
1210
1211 Sequence A:
1212 1. store of word size or less (e.g. st / stb / sth / stf)
1213 2. any single instruction that is not a load or store
1214 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
1215
1216 Sequence B:
1217 1. store of double word size (e.g. std / stdf)
1218 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
1219 if (sparc_fix_b2bst
1220 && NONJUMP_INSN_P (insn)
1221 && (set = single_set (insn)) != NULL_RTX
1222 && MEM_P (SET_DEST (set)))
1223 {
1224 /* Sequence B begins with a double-word store. */
1225 bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
1226 rtx_insn *after;
1227 int i;
1228
1229 next = next_active_insn (insn);
1230 if (!next)
1231 break;
1232
1233 for (after = next, i = 0; i < 2; i++)
1234 {
1235 /* Skip empty assembly statements. */
1236 if ((GET_CODE (PATTERN (after)) == UNSPEC_VOLATILE)
1237 || (USEFUL_INSN_P (after)
1238 && (asm_noperands (PATTERN (after))>=0)
1239 && !strcmp (decode_asm_operands (PATTERN (after),
1240 NULL, NULL, NULL,
1241 NULL, NULL), "")))
1242 after = next_active_insn (after);
1243 if (!after)
1244 break;
1245
1246 /* If the insn is a branch, then it cannot be problematic. */
1247 if (!NONJUMP_INSN_P (after)
1248 || GET_CODE (PATTERN (after)) == SEQUENCE)
1249 break;
1250
1251 /* Sequence B is only two instructions long. */
1252 if (seq_b)
1253 {
1254 /* Add NOP if followed by a store. */
1255 if ((set = single_set (after)) != NULL_RTX
1256 && MEM_P (SET_DEST (set)))
1257 insert_nop = true;
1258
1259 /* Otherwise it is ok. */
1260 break;
1261 }
1262
1263 /* If the second instruction is a load or a store,
1264 then the sequence cannot be problematic. */
1265 if (i == 0)
1266 {
1267 if ((set = single_set (after)) != NULL_RTX
1268 && (MEM_P (SET_DEST (set)) || mem_ref (SET_SRC (set))))
1269 break;
1270
1271 after = next_active_insn (after);
1272 if (!after)
1273 break;
1274 }
1275
1276 /* Add NOP if third instruction is a store. */
1277 if (i == 1
1278 && (set = single_set (after)) != NULL_RTX
1279 && MEM_P (SET_DEST (set)))
1280 insert_nop = true;
1281 }
1282 }
1283
1284 /* Look for a single-word load into an odd-numbered FP register. */
1285 else if (sparc_fix_at697f
1286 && NONJUMP_INSN_P (insn)
1287 && (set = single_set (insn)) != NULL_RTX
1288 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1289 && mem_ref (SET_SRC (set))
1290 && REG_P (SET_DEST (set))
1291 && REGNO (SET_DEST (set)) > 31
1292 && REGNO (SET_DEST (set)) % 2 != 0)
1293 {
1294 /* The wrong dependency is on the enclosing double register. */
1295 const unsigned int x = REGNO (SET_DEST (set)) - 1;
1296 unsigned int src1, src2, dest;
1297 int code;
1298
1299 next = next_active_insn (insn);
1300 if (!next)
1301 break;
1302 /* If the insn is a branch, then it cannot be problematic. */
1303 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1304 continue;
1305
1306 extract_insn (next);
1307 code = INSN_CODE (next);
1308
1309 switch (code)
1310 {
1311 case CODE_FOR_adddf3:
1312 case CODE_FOR_subdf3:
1313 case CODE_FOR_muldf3:
1314 case CODE_FOR_divdf3:
1315 dest = REGNO (recog_data.operand[0]);
1316 src1 = REGNO (recog_data.operand[1]);
1317 src2 = REGNO (recog_data.operand[2]);
1318 if (src1 != src2)
1319 {
1320 /* Case [1-4]:
1321 ld [address], %fx+1
1322 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
1323 if ((src1 == x || src2 == x)
1324 && (dest == src1 || dest == src2))
1325 insert_nop = true;
1326 }
1327 else
1328 {
1329 /* Case 5:
1330 ld [address], %fx+1
1331 FPOPd %fx, %fx, %fx */
1332 if (src1 == x
1333 && dest == src1
1334 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
1335 insert_nop = true;
1336 }
1337 break;
1338
1339 case CODE_FOR_sqrtdf2:
1340 dest = REGNO (recog_data.operand[0]);
1341 src1 = REGNO (recog_data.operand[1]);
1342 /* Case 6:
1343 ld [address], %fx+1
1344 fsqrtd %fx, %fx */
1345 if (src1 == x && dest == src1)
1346 insert_nop = true;
1347 break;
1348
1349 default:
1350 break;
1351 }
1352 }
1353
1354 /* Look for a single-word load into an integer register. */
1355 else if (sparc_fix_ut699
1356 && NONJUMP_INSN_P (insn)
1357 && (set = single_set (insn)) != NULL_RTX
1358 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
1359 && (mem_ref (SET_SRC (set)) != NULL_RTX
1360 || INSN_CODE (insn) == CODE_FOR_movsi_pic_gotdata_op)
1361 && REG_P (SET_DEST (set))
1362 && REGNO (SET_DEST (set)) < 32)
1363 {
1364 /* There is no problem if the second memory access has a data
1365 dependency on the first single-cycle load. */
1366 rtx x = SET_DEST (set);
1367
1368 next = next_active_insn (insn);
1369 if (!next)
1370 break;
1371 /* If the insn is a branch, then it cannot be problematic. */
1372 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1373 continue;
1374
1375 /* Look for a second memory access to/from an integer register. */
1376 if ((set = single_set (next)) != NULL_RTX)
1377 {
1378 rtx src = SET_SRC (set);
1379 rtx dest = SET_DEST (set);
1380 rtx mem;
1381
1382 /* LDD is affected. */
1383 if ((mem = mem_ref (src)) != NULL_RTX
1384 && REG_P (dest)
1385 && REGNO (dest) < 32
1386 && !reg_mentioned_p (x, XEXP (mem, 0)))
1387 insert_nop = true;
1388
1389 /* STD is *not* affected. */
1390 else if (MEM_P (dest)
1391 && GET_MODE_SIZE (GET_MODE (dest)) <= 4
1392 && (src == CONST0_RTX (GET_MODE (dest))
1393 || (REG_P (src)
1394 && REGNO (src) < 32
1395 && REGNO (src) != REGNO (x)))
1396 && !reg_mentioned_p (x, XEXP (dest, 0)))
1397 insert_nop = true;
1398
1399 /* GOT accesses uses LD. */
1400 else if (INSN_CODE (next) == CODE_FOR_movsi_pic_gotdata_op
1401 && !reg_mentioned_p (x, XEXP (XEXP (src, 0), 1)))
1402 insert_nop = true;
1403 }
1404 }
1405
1406 /* Look for a single-word load/operation into an FP register. */
1407 else if (sparc_fix_ut699
1408 && NONJUMP_INSN_P (insn)
1409 && (set = single_set (insn)) != NULL_RTX
1410 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1411 && REG_P (SET_DEST (set))
1412 && REGNO (SET_DEST (set)) > 31)
1413 {
1414 /* Number of instructions in the problematic window. */
1415 const int n_insns = 4;
1416 /* The problematic combination is with the sibling FP register. */
1417 const unsigned int x = REGNO (SET_DEST (set));
1418 const unsigned int y = x ^ 1;
1419 rtx_insn *after;
1420 int i;
1421
1422 next = next_active_insn (insn);
1423 if (!next)
1424 break;
1425 /* If the insn is a branch, then it cannot be problematic. */
1426 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1427 continue;
1428
1429 /* Look for a second load/operation into the sibling FP register. */
1430 if (!((set = single_set (next)) != NULL_RTX
1431 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1432 && REG_P (SET_DEST (set))
1433 && REGNO (SET_DEST (set)) == y))
1434 continue;
1435
1436 /* Look for a (possible) store from the FP register in the next N
1437 instructions, but bail out if it is again modified or if there
1438 is a store from the sibling FP register before this store. */
1439 for (after = next, i = 0; i < n_insns; i++)
1440 {
1441 bool branch_p;
1442
1443 after = next_active_insn (after);
1444 if (!after)
1445 break;
1446
1447 /* This is a branch with an empty delay slot. */
1448 if (!NONJUMP_INSN_P (after))
1449 {
1450 if (++i == n_insns)
1451 break;
1452 branch_p = true;
1453 after = NULL;
1454 }
1455 /* This is a branch with a filled delay slot. */
1456 else if (rtx_sequence *seq =
1457 dyn_cast <rtx_sequence *> (PATTERN (after)))
1458 {
1459 if (++i == n_insns)
1460 break;
1461 branch_p = true;
1462 after = seq->insn (1);
1463 }
1464 /* This is a regular instruction. */
1465 else
1466 branch_p = false;
1467
1468 if (after && (set = single_set (after)) != NULL_RTX)
1469 {
1470 const rtx src = SET_SRC (set);
1471 const rtx dest = SET_DEST (set);
1472 const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
1473
1474 /* If the FP register is again modified before the store,
1475 then the store isn't affected. */
1476 if (REG_P (dest)
1477 && (REGNO (dest) == x
1478 || (REGNO (dest) == y && size == 8)))
1479 break;
1480
1481 if (MEM_P (dest) && REG_P (src))
1482 {
1483 /* If there is a store from the sibling FP register
1484 before the store, then the store is not affected. */
1485 if (REGNO (src) == y || (REGNO (src) == x && size == 8))
1486 break;
1487
1488 /* Otherwise, the store is affected. */
1489 if (REGNO (src) == x && size == 4)
1490 {
1491 insert_nop = true;
1492 break;
1493 }
1494 }
1495 }
1496
1497 /* If we have a branch in the first M instructions, then we
1498 cannot see the (M+2)th instruction so we play safe. */
1499 if (branch_p && i <= (n_insns - 2))
1500 {
1501 insert_nop = true;
1502 break;
1503 }
1504 }
1505 }
1506
1507 else
1508 next = NEXT_INSN (insn);
1509
1510 if (insert_nop)
1511 emit_insn_before (gen_nop (), next);
1512 }
1513
1514 return 0;
1515 }
1516
1517 namespace {
1518
1519 const pass_data pass_data_work_around_errata =
1520 {
1521 RTL_PASS, /* type */
1522 "errata", /* name */
1523 OPTGROUP_NONE, /* optinfo_flags */
1524 TV_MACH_DEP, /* tv_id */
1525 0, /* properties_required */
1526 0, /* properties_provided */
1527 0, /* properties_destroyed */
1528 0, /* todo_flags_start */
1529 0, /* todo_flags_finish */
1530 };
1531
1532 class pass_work_around_errata : public rtl_opt_pass
1533 {
1534 public:
1535 pass_work_around_errata(gcc::context *ctxt)
1536 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1537 {}
1538
1539 /* opt_pass methods: */
1540 virtual bool gate (function *)
1541 {
1542 return sparc_fix_at697f
1543 || sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc
1544 || sparc_fix_b2bst || sparc_fix_lost_divsqrt;
1545 }
1546
1547 virtual unsigned int execute (function *)
1548 {
1549 return sparc_do_work_around_errata ();
1550 }
1551
1552 }; // class pass_work_around_errata
1553
1554 } // anon namespace
1555
1556 rtl_opt_pass *
1557 make_pass_work_around_errata (gcc::context *ctxt)
1558 {
1559 return new pass_work_around_errata (ctxt);
1560 }
1561
1562 /* Helpers for TARGET_DEBUG_OPTIONS. */
1563 static void
1564 dump_target_flag_bits (const int flags)
1565 {
1566 if (flags & MASK_64BIT)
1567 fprintf (stderr, "64BIT ");
1568 if (flags & MASK_APP_REGS)
1569 fprintf (stderr, "APP_REGS ");
1570 if (flags & MASK_FASTER_STRUCTS)
1571 fprintf (stderr, "FASTER_STRUCTS ");
1572 if (flags & MASK_FLAT)
1573 fprintf (stderr, "FLAT ");
1574 if (flags & MASK_FMAF)
1575 fprintf (stderr, "FMAF ");
1576 if (flags & MASK_FSMULD)
1577 fprintf (stderr, "FSMULD ");
1578 if (flags & MASK_FPU)
1579 fprintf (stderr, "FPU ");
1580 if (flags & MASK_HARD_QUAD)
1581 fprintf (stderr, "HARD_QUAD ");
1582 if (flags & MASK_POPC)
1583 fprintf (stderr, "POPC ");
1584 if (flags & MASK_PTR64)
1585 fprintf (stderr, "PTR64 ");
1586 if (flags & MASK_STACK_BIAS)
1587 fprintf (stderr, "STACK_BIAS ");
1588 if (flags & MASK_UNALIGNED_DOUBLES)
1589 fprintf (stderr, "UNALIGNED_DOUBLES ");
1590 if (flags & MASK_V8PLUS)
1591 fprintf (stderr, "V8PLUS ");
1592 if (flags & MASK_VIS)
1593 fprintf (stderr, "VIS ");
1594 if (flags & MASK_VIS2)
1595 fprintf (stderr, "VIS2 ");
1596 if (flags & MASK_VIS3)
1597 fprintf (stderr, "VIS3 ");
1598 if (flags & MASK_VIS4)
1599 fprintf (stderr, "VIS4 ");
1600 if (flags & MASK_VIS4B)
1601 fprintf (stderr, "VIS4B ");
1602 if (flags & MASK_CBCOND)
1603 fprintf (stderr, "CBCOND ");
1604 if (flags & MASK_DEPRECATED_V8_INSNS)
1605 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1606 if (flags & MASK_SPARCLET)
1607 fprintf (stderr, "SPARCLET ");
1608 if (flags & MASK_SPARCLITE)
1609 fprintf (stderr, "SPARCLITE ");
1610 if (flags & MASK_V8)
1611 fprintf (stderr, "V8 ");
1612 if (flags & MASK_V9)
1613 fprintf (stderr, "V9 ");
1614 }
1615
1616 static void
1617 dump_target_flags (const char *prefix, const int flags)
1618 {
1619 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1620 dump_target_flag_bits (flags);
1621 fprintf(stderr, "]\n");
1622 }
1623
1624 /* Validate and override various options, and do some machine dependent
1625 initialization. */
1626
1627 static void
1628 sparc_option_override (void)
1629 {
1630 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1631 static struct cpu_default {
1632 const int cpu;
1633 const enum sparc_processor_type processor;
1634 } const cpu_default[] = {
1635 /* There must be one entry here for each TARGET_CPU value. */
1636 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1637 { TARGET_CPU_v8, PROCESSOR_V8 },
1638 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1639 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1640 { TARGET_CPU_leon, PROCESSOR_LEON },
1641 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1642 { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
1643 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1644 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1645 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1646 { TARGET_CPU_v9, PROCESSOR_V9 },
1647 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1648 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1649 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1650 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1651 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1652 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1653 { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
1654 { TARGET_CPU_m8, PROCESSOR_M8 },
1655 { -1, PROCESSOR_V7 }
1656 };
1657 const struct cpu_default *def;
1658 /* Table of values for -m{cpu,tune}=. This must match the order of
1659 the enum processor_type in sparc-opts.h. */
1660 static struct cpu_table {
1661 const char *const name;
1662 const int disable;
1663 const int enable;
1664 } const cpu_table[] = {
1665 { "v7", MASK_ISA, 0 },
1666 { "cypress", MASK_ISA, 0 },
1667 { "v8", MASK_ISA, MASK_V8 },
1668 /* TI TMS390Z55 supersparc */
1669 { "supersparc", MASK_ISA, MASK_V8 },
1670 { "hypersparc", MASK_ISA, MASK_V8 },
1671 { "leon", MASK_ISA|MASK_FSMULD, MASK_V8|MASK_LEON },
1672 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3 },
1673 { "leon3v7", MASK_ISA, MASK_LEON3 },
1674 { "sparclite", MASK_ISA, MASK_SPARCLITE },
1675 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1676 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1677 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1678 { "f934", MASK_ISA, MASK_SPARCLITE },
1679 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1680 { "sparclet", MASK_ISA, MASK_SPARCLET },
1681 /* TEMIC sparclet */
1682 { "tsc701", MASK_ISA, MASK_SPARCLET },
1683 { "v9", MASK_ISA, MASK_V9 },
1684 /* UltraSPARC I, II, IIi */
1685 { "ultrasparc", MASK_ISA,
1686 /* Although insns using %y are deprecated, it is a clear win. */
1687 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1688 /* UltraSPARC III */
1689 /* ??? Check if %y issue still holds true. */
1690 { "ultrasparc3", MASK_ISA,
1691 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1692 /* UltraSPARC T1 */
1693 { "niagara", MASK_ISA,
1694 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1695 /* UltraSPARC T2 */
1696 { "niagara2", MASK_ISA,
1697 MASK_V9|MASK_POPC|MASK_VIS2 },
1698 /* UltraSPARC T3 */
1699 { "niagara3", MASK_ISA,
1700 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
1701 /* UltraSPARC T4 */
1702 { "niagara4", MASK_ISA,
1703 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1704 /* UltraSPARC M7 */
1705 { "niagara7", MASK_ISA,
1706 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
1707 /* UltraSPARC M8 */
1708 { "m8", MASK_ISA,
1709 MASK_V9|MASK_POPC|MASK_VIS4B|MASK_FMAF|MASK_CBCOND|MASK_SUBXC }
1710 };
1711 const struct cpu_table *cpu;
1712 unsigned int i;
1713
1714 if (sparc_debug_string != NULL)
1715 {
1716 const char *q;
1717 char *p;
1718
1719 p = ASTRDUP (sparc_debug_string);
1720 while ((q = strtok (p, ",")) != NULL)
1721 {
1722 bool invert;
1723 int mask;
1724
1725 p = NULL;
1726 if (*q == '!')
1727 {
1728 invert = true;
1729 q++;
1730 }
1731 else
1732 invert = false;
1733
1734 if (! strcmp (q, "all"))
1735 mask = MASK_DEBUG_ALL;
1736 else if (! strcmp (q, "options"))
1737 mask = MASK_DEBUG_OPTIONS;
1738 else
1739 error ("unknown %<-mdebug-%s%> switch", q);
1740
1741 if (invert)
1742 sparc_debug &= ~mask;
1743 else
1744 sparc_debug |= mask;
1745 }
1746 }
1747
1748 /* Enable the FsMULd instruction by default if not explicitly specified by
1749 the user. It may be later disabled by the CPU (explicitly or not). */
1750 if (TARGET_FPU && !(target_flags_explicit & MASK_FSMULD))
1751 target_flags |= MASK_FSMULD;
1752
1753 if (TARGET_DEBUG_OPTIONS)
1754 {
1755 dump_target_flags("Initial target_flags", target_flags);
1756 dump_target_flags("target_flags_explicit", target_flags_explicit);
1757 }
1758
1759 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1760 SUBTARGET_OVERRIDE_OPTIONS;
1761 #endif
1762
1763 #ifndef SPARC_BI_ARCH
1764 /* Check for unsupported architecture size. */
1765 if (!TARGET_64BIT != DEFAULT_ARCH32_P)
1766 error ("%s is not supported by this configuration",
1767 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1768 #endif
1769
1770 /* We force all 64bit archs to use 128 bit long double */
1771 if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
1772 {
1773 error ("%<-mlong-double-64%> not allowed with %<-m64%>");
1774 target_flags |= MASK_LONG_DOUBLE_128;
1775 }
1776
1777 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1778 for (i = 8; i < 16; i++)
1779 if (!call_used_regs [i])
1780 {
1781 error ("%<-fcall-saved-REG%> is not supported for out registers");
1782 call_used_regs [i] = 1;
1783 }
1784
1785 /* Set the default CPU if no -mcpu option was specified. */
1786 if (!global_options_set.x_sparc_cpu_and_features)
1787 {
1788 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1789 if (def->cpu == TARGET_CPU_DEFAULT)
1790 break;
1791 gcc_assert (def->cpu != -1);
1792 sparc_cpu_and_features = def->processor;
1793 }
1794
1795 /* Set the default CPU if no -mtune option was specified. */
1796 if (!global_options_set.x_sparc_cpu)
1797 sparc_cpu = sparc_cpu_and_features;
1798
1799 cpu = &cpu_table[(int) sparc_cpu_and_features];
1800
1801 if (TARGET_DEBUG_OPTIONS)
1802 {
1803 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1804 dump_target_flags ("cpu->disable", cpu->disable);
1805 dump_target_flags ("cpu->enable", cpu->enable);
1806 }
1807
1808 target_flags &= ~cpu->disable;
1809 target_flags |= (cpu->enable
1810 #ifndef HAVE_AS_FMAF_HPC_VIS3
1811 & ~(MASK_FMAF | MASK_VIS3)
1812 #endif
1813 #ifndef HAVE_AS_SPARC4
1814 & ~MASK_CBCOND
1815 #endif
1816 #ifndef HAVE_AS_SPARC5_VIS4
1817 & ~(MASK_VIS4 | MASK_SUBXC)
1818 #endif
1819 #ifndef HAVE_AS_SPARC6
1820 & ~(MASK_VIS4B)
1821 #endif
1822 #ifndef HAVE_AS_LEON
1823 & ~(MASK_LEON | MASK_LEON3)
1824 #endif
1825 & ~(target_flags_explicit & MASK_FEATURES)
1826 );
1827
1828 /* FsMULd is a V8 instruction. */
1829 if (!TARGET_V8 && !TARGET_V9)
1830 target_flags &= ~MASK_FSMULD;
1831
1832 /* -mvis2 implies -mvis. */
1833 if (TARGET_VIS2)
1834 target_flags |= MASK_VIS;
1835
1836 /* -mvis3 implies -mvis2 and -mvis. */
1837 if (TARGET_VIS3)
1838 target_flags |= MASK_VIS2 | MASK_VIS;
1839
1840 /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
1841 if (TARGET_VIS4)
1842 target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1843
1844 /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
1845 if (TARGET_VIS4B)
1846 target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1847
1848 /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b, -mfmaf and -mfsmuld if
1849 FPU is disabled. */
1850 if (!TARGET_FPU)
1851 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
1852 | MASK_VIS4B | MASK_FMAF | MASK_FSMULD);
1853
1854 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1855 are available; -m64 also implies v9. */
1856 if (TARGET_VIS || TARGET_ARCH64)
1857 {
1858 target_flags |= MASK_V9;
1859 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1860 }
1861
1862 /* -mvis also implies -mv8plus on 32-bit. */
1863 if (TARGET_VIS && !TARGET_ARCH64)
1864 target_flags |= MASK_V8PLUS;
1865
1866 /* Use the deprecated v8 insns for sparc64 in 32-bit mode. */
1867 if (TARGET_V9 && TARGET_ARCH32)
1868 target_flags |= MASK_DEPRECATED_V8_INSNS;
1869
1870 /* V8PLUS requires V9 and makes no sense in 64-bit mode. */
1871 if (!TARGET_V9 || TARGET_ARCH64)
1872 target_flags &= ~MASK_V8PLUS;
1873
1874 /* Don't use stack biasing in 32-bit mode. */
1875 if (TARGET_ARCH32)
1876 target_flags &= ~MASK_STACK_BIAS;
1877
1878 /* Use LRA instead of reload, unless otherwise instructed. */
1879 if (!(target_flags_explicit & MASK_LRA))
1880 target_flags |= MASK_LRA;
1881
1882 /* Enable applicable errata workarounds for LEON3FT. */
1883 if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
1884 {
1885 sparc_fix_b2bst = 1;
1886 sparc_fix_lost_divsqrt = 1;
1887 }
1888
1889 /* Disable FsMULd for the UT699 since it doesn't work correctly. */
1890 if (sparc_fix_ut699)
1891 target_flags &= ~MASK_FSMULD;
1892
1893 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1894 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1895 target_flags |= MASK_LONG_DOUBLE_128;
1896 #endif
1897
1898 if (TARGET_DEBUG_OPTIONS)
1899 dump_target_flags ("Final target_flags", target_flags);
1900
1901 /* Set the code model if no -mcmodel option was specified. */
1902 if (global_options_set.x_sparc_code_model)
1903 {
1904 if (TARGET_ARCH32)
1905 error ("%<-mcmodel=%> is not supported in 32-bit mode");
1906 }
1907 else
1908 {
1909 if (TARGET_ARCH32)
1910 sparc_code_model = CM_32;
1911 else
1912 sparc_code_model = SPARC_DEFAULT_CMODEL;
1913 }
1914
1915 /* Set the memory model if no -mmemory-model option was specified. */
1916 if (!global_options_set.x_sparc_memory_model)
1917 {
1918 /* Choose the memory model for the operating system. */
1919 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1920 if (os_default != SMM_DEFAULT)
1921 sparc_memory_model = os_default;
1922 /* Choose the most relaxed model for the processor. */
1923 else if (TARGET_V9)
1924 sparc_memory_model = SMM_RMO;
1925 else if (TARGET_LEON3)
1926 sparc_memory_model = SMM_TSO;
1927 else if (TARGET_LEON)
1928 sparc_memory_model = SMM_SC;
1929 else if (TARGET_V8)
1930 sparc_memory_model = SMM_PSO;
1931 else
1932 sparc_memory_model = SMM_SC;
1933 }
1934
1935 /* Supply a default value for align_functions. */
1936 if (flag_align_functions && !str_align_functions)
1937 {
1938 if (sparc_cpu == PROCESSOR_ULTRASPARC
1939 || sparc_cpu == PROCESSOR_ULTRASPARC3
1940 || sparc_cpu == PROCESSOR_NIAGARA
1941 || sparc_cpu == PROCESSOR_NIAGARA2
1942 || sparc_cpu == PROCESSOR_NIAGARA3
1943 || sparc_cpu == PROCESSOR_NIAGARA4)
1944 str_align_functions = "32";
1945 else if (sparc_cpu == PROCESSOR_NIAGARA7
1946 || sparc_cpu == PROCESSOR_M8)
1947 str_align_functions = "64";
1948 }
1949
1950 /* Validate PCC_STRUCT_RETURN. */
1951 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1952 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1953
1954 /* Only use .uaxword when compiling for a 64-bit target. */
1955 if (!TARGET_ARCH64)
1956 targetm.asm_out.unaligned_op.di = NULL;
1957
1958 /* Set the processor costs. */
1959 switch (sparc_cpu)
1960 {
1961 case PROCESSOR_V7:
1962 case PROCESSOR_CYPRESS:
1963 sparc_costs = &cypress_costs;
1964 break;
1965 case PROCESSOR_V8:
1966 case PROCESSOR_SPARCLITE:
1967 case PROCESSOR_SUPERSPARC:
1968 sparc_costs = &supersparc_costs;
1969 break;
1970 case PROCESSOR_F930:
1971 case PROCESSOR_F934:
1972 case PROCESSOR_HYPERSPARC:
1973 case PROCESSOR_SPARCLITE86X:
1974 sparc_costs = &hypersparc_costs;
1975 break;
1976 case PROCESSOR_LEON:
1977 sparc_costs = &leon_costs;
1978 break;
1979 case PROCESSOR_LEON3:
1980 case PROCESSOR_LEON3V7:
1981 sparc_costs = &leon3_costs;
1982 break;
1983 case PROCESSOR_SPARCLET:
1984 case PROCESSOR_TSC701:
1985 sparc_costs = &sparclet_costs;
1986 break;
1987 case PROCESSOR_V9:
1988 case PROCESSOR_ULTRASPARC:
1989 sparc_costs = &ultrasparc_costs;
1990 break;
1991 case PROCESSOR_ULTRASPARC3:
1992 sparc_costs = &ultrasparc3_costs;
1993 break;
1994 case PROCESSOR_NIAGARA:
1995 sparc_costs = &niagara_costs;
1996 break;
1997 case PROCESSOR_NIAGARA2:
1998 sparc_costs = &niagara2_costs;
1999 break;
2000 case PROCESSOR_NIAGARA3:
2001 sparc_costs = &niagara3_costs;
2002 break;
2003 case PROCESSOR_NIAGARA4:
2004 sparc_costs = &niagara4_costs;
2005 break;
2006 case PROCESSOR_NIAGARA7:
2007 sparc_costs = &niagara7_costs;
2008 break;
2009 case PROCESSOR_M8:
2010 sparc_costs = &m8_costs;
2011 break;
2012 case PROCESSOR_NATIVE:
2013 gcc_unreachable ();
2014 };
2015
2016 /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
2017 can run at the same time. More important, it is the threshold
2018 defining when additional prefetches will be dropped by the
2019 hardware.
2020
2021 The UltraSPARC-III features a documented prefetch queue with a
2022 size of 8. Additional prefetches issued in the cpu are
2023 dropped.
2024
2025 Niagara processors are different. In these processors prefetches
2026 are handled much like regular loads. The L1 miss buffer is 32
2027 entries, but prefetches start getting affected when 30 entries
2028 become occupied. That occupation could be a mix of regular loads
2029 and prefetches though. And that buffer is shared by all threads.
2030 Once the threshold is reached, if the core is running a single
2031 thread the prefetch will retry. If more than one thread is
2032 running, the prefetch will be dropped.
2033
2034 All this makes it very difficult to determine how many
2035 simultaneous prefetches can be issued simultaneously, even in a
2036 single-threaded program. Experimental results show that setting
2037 this parameter to 32 works well when the number of threads is not
2038 high. */
2039 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
2040 ((sparc_cpu == PROCESSOR_ULTRASPARC
2041 || sparc_cpu == PROCESSOR_NIAGARA
2042 || sparc_cpu == PROCESSOR_NIAGARA2
2043 || sparc_cpu == PROCESSOR_NIAGARA3
2044 || sparc_cpu == PROCESSOR_NIAGARA4)
2045 ? 2
2046 : (sparc_cpu == PROCESSOR_ULTRASPARC3
2047 ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
2048 || sparc_cpu == PROCESSOR_M8)
2049 ? 32 : 3))),
2050 global_options.x_param_values,
2051 global_options_set.x_param_values);
2052
2053 /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
2054 bytes.
2055
2056 The Oracle SPARC Architecture (previously the UltraSPARC
2057 Architecture) specification states that when a PREFETCH[A]
2058 instruction is executed an implementation-specific amount of data
2059 is prefetched, and that it is at least 64 bytes long (aligned to
2060 at least 64 bytes).
2061
2062 However, this is not correct. The M7 (and implementations prior
2063 to that) does not guarantee a 64B prefetch into a cache if the
2064 line size is smaller. A single cache line is all that is ever
2065 prefetched. So for the M7, where the L1D$ has 32B lines and the
2066 L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
2067 L2 and L3, but only 32B are brought into the L1D$. (Assuming it
2068 is a read_n prefetch, which is the only type which allocates to
2069 the L1.) */
2070 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
2071 (sparc_cpu == PROCESSOR_M8
2072 ? 64 : 32),
2073 global_options.x_param_values,
2074 global_options_set.x_param_values);
2075
2076 /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
2077 Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
2078 Niagara processors feature a L1D$ of 16KB. */
2079 maybe_set_param_value (PARAM_L1_CACHE_SIZE,
2080 ((sparc_cpu == PROCESSOR_ULTRASPARC
2081 || sparc_cpu == PROCESSOR_ULTRASPARC3
2082 || sparc_cpu == PROCESSOR_NIAGARA
2083 || sparc_cpu == PROCESSOR_NIAGARA2
2084 || sparc_cpu == PROCESSOR_NIAGARA3
2085 || sparc_cpu == PROCESSOR_NIAGARA4
2086 || sparc_cpu == PROCESSOR_NIAGARA7
2087 || sparc_cpu == PROCESSOR_M8)
2088 ? 16 : 64),
2089 global_options.x_param_values,
2090 global_options_set.x_param_values);
2091
2092
2093 /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
2094 that 512 is the default in params.def. */
2095 maybe_set_param_value (PARAM_L2_CACHE_SIZE,
2096 ((sparc_cpu == PROCESSOR_NIAGARA4
2097 || sparc_cpu == PROCESSOR_M8)
2098 ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
2099 ? 256 : 512)),
2100 global_options.x_param_values,
2101 global_options_set.x_param_values);
2102
2103
2104 /* Disable save slot sharing for call-clobbered registers by default.
2105 The IRA sharing algorithm works on single registers only and this
2106 pessimizes for double floating-point registers. */
2107 if (!global_options_set.x_flag_ira_share_save_slots)
2108 flag_ira_share_save_slots = 0;
2109
2110 /* Only enable REE by default in 64-bit mode where it helps to eliminate
2111 redundant 32-to-64-bit extensions. */
2112 if (!global_options_set.x_flag_ree && TARGET_ARCH32)
2113 flag_ree = 0;
2114
2115 /* Do various machine dependent initializations. */
2116 sparc_init_modes ();
2117
2118 /* Set up function hooks. */
2119 init_machine_status = sparc_init_machine_status;
2120 }
2121 \f
2122 /* Miscellaneous utilities. */
2123
2124 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
2125 or branch on register contents instructions. */
2126
2127 int
2128 v9_regcmp_p (enum rtx_code code)
2129 {
2130 return (code == EQ || code == NE || code == GE || code == LT
2131 || code == LE || code == GT);
2132 }
2133
2134 /* Nonzero if OP is a floating point constant which can
2135 be loaded into an integer register using a single
2136 sethi instruction. */
2137
2138 int
2139 fp_sethi_p (rtx op)
2140 {
2141 if (GET_CODE (op) == CONST_DOUBLE)
2142 {
2143 long i;
2144
2145 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2146 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
2147 }
2148
2149 return 0;
2150 }
2151
2152 /* Nonzero if OP is a floating point constant which can
2153 be loaded into an integer register using a single
2154 mov instruction. */
2155
2156 int
2157 fp_mov_p (rtx op)
2158 {
2159 if (GET_CODE (op) == CONST_DOUBLE)
2160 {
2161 long i;
2162
2163 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2164 return SPARC_SIMM13_P (i);
2165 }
2166
2167 return 0;
2168 }
2169
2170 /* Nonzero if OP is a floating point constant which can
2171 be loaded into an integer register using a high/losum
2172 instruction sequence. */
2173
2174 int
2175 fp_high_losum_p (rtx op)
2176 {
2177 /* The constraints calling this should only be in
2178 SFmode move insns, so any constant which cannot
2179 be moved using a single insn will do. */
2180 if (GET_CODE (op) == CONST_DOUBLE)
2181 {
2182 long i;
2183
2184 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2185 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
2186 }
2187
2188 return 0;
2189 }
2190
2191 /* Return true if the address of LABEL can be loaded by means of the
2192 mov{si,di}_pic_label_ref patterns in PIC mode. */
2193
2194 static bool
2195 can_use_mov_pic_label_ref (rtx label)
2196 {
2197 /* VxWorks does not impose a fixed gap between segments; the run-time
2198 gap can be different from the object-file gap. We therefore can't
2199 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
2200 are absolutely sure that X is in the same segment as the GOT.
2201 Unfortunately, the flexibility of linker scripts means that we
2202 can't be sure of that in general, so assume that GOT-relative
2203 accesses are never valid on VxWorks. */
2204 if (TARGET_VXWORKS_RTP)
2205 return false;
2206
2207 /* Similarly, if the label is non-local, it might end up being placed
2208 in a different section than the current one; now mov_pic_label_ref
2209 requires the label and the code to be in the same section. */
2210 if (LABEL_REF_NONLOCAL_P (label))
2211 return false;
2212
2213 /* Finally, if we are reordering basic blocks and partition into hot
2214 and cold sections, this might happen for any label. */
2215 if (flag_reorder_blocks_and_partition)
2216 return false;
2217
2218 return true;
2219 }
2220
2221 /* Expand a move instruction. Return true if all work is done. */
2222
2223 bool
2224 sparc_expand_move (machine_mode mode, rtx *operands)
2225 {
2226 /* Handle sets of MEM first. */
2227 if (GET_CODE (operands[0]) == MEM)
2228 {
2229 /* 0 is a register (or a pair of registers) on SPARC. */
2230 if (register_or_zero_operand (operands[1], mode))
2231 return false;
2232
2233 if (!reload_in_progress)
2234 {
2235 operands[0] = validize_mem (operands[0]);
2236 operands[1] = force_reg (mode, operands[1]);
2237 }
2238 }
2239
2240 /* Fix up TLS cases. */
2241 if (TARGET_HAVE_TLS
2242 && CONSTANT_P (operands[1])
2243 && sparc_tls_referenced_p (operands [1]))
2244 {
2245 operands[1] = sparc_legitimize_tls_address (operands[1]);
2246 return false;
2247 }
2248
2249 /* Fix up PIC cases. */
2250 if (flag_pic && CONSTANT_P (operands[1]))
2251 {
2252 if (pic_address_needs_scratch (operands[1]))
2253 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
2254
2255 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
2256 if ((GET_CODE (operands[1]) == LABEL_REF
2257 && can_use_mov_pic_label_ref (operands[1]))
2258 || (GET_CODE (operands[1]) == CONST
2259 && GET_CODE (XEXP (operands[1], 0)) == PLUS
2260 && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
2261 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT
2262 && can_use_mov_pic_label_ref (XEXP (XEXP (operands[1], 0), 0))))
2263 {
2264 if (mode == SImode)
2265 {
2266 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
2267 return true;
2268 }
2269
2270 if (mode == DImode)
2271 {
2272 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
2273 return true;
2274 }
2275 }
2276
2277 if (symbolic_operand (operands[1], mode))
2278 {
2279 operands[1]
2280 = sparc_legitimize_pic_address (operands[1],
2281 reload_in_progress
2282 ? operands[0] : NULL_RTX);
2283 return false;
2284 }
2285 }
2286
2287 /* If we are trying to toss an integer constant into FP registers,
2288 or loading a FP or vector constant, force it into memory. */
2289 if (CONSTANT_P (operands[1])
2290 && REG_P (operands[0])
2291 && (SPARC_FP_REG_P (REGNO (operands[0]))
2292 || SCALAR_FLOAT_MODE_P (mode)
2293 || VECTOR_MODE_P (mode)))
2294 {
2295 /* emit_group_store will send such bogosity to us when it is
2296 not storing directly into memory. So fix this up to avoid
2297 crashes in output_constant_pool. */
2298 if (operands [1] == const0_rtx)
2299 operands[1] = CONST0_RTX (mode);
2300
2301 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
2302 always other regs. */
2303 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
2304 && (const_zero_operand (operands[1], mode)
2305 || const_all_ones_operand (operands[1], mode)))
2306 return false;
2307
2308 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
2309 /* We are able to build any SF constant in integer registers
2310 with at most 2 instructions. */
2311 && (mode == SFmode
2312 /* And any DF constant in integer registers if needed. */
2313 || (mode == DFmode && !can_create_pseudo_p ())))
2314 return false;
2315
2316 operands[1] = force_const_mem (mode, operands[1]);
2317 if (!reload_in_progress)
2318 operands[1] = validize_mem (operands[1]);
2319 return false;
2320 }
2321
2322 /* Accept non-constants and valid constants unmodified. */
2323 if (!CONSTANT_P (operands[1])
2324 || GET_CODE (operands[1]) == HIGH
2325 || input_operand (operands[1], mode))
2326 return false;
2327
2328 switch (mode)
2329 {
2330 case E_QImode:
2331 /* All QImode constants require only one insn, so proceed. */
2332 break;
2333
2334 case E_HImode:
2335 case E_SImode:
2336 sparc_emit_set_const32 (operands[0], operands[1]);
2337 return true;
2338
2339 case E_DImode:
2340 /* input_operand should have filtered out 32-bit mode. */
2341 sparc_emit_set_const64 (operands[0], operands[1]);
2342 return true;
2343
2344 case E_TImode:
2345 {
2346 rtx high, low;
2347 /* TImode isn't available in 32-bit mode. */
2348 split_double (operands[1], &high, &low);
2349 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
2350 high));
2351 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
2352 low));
2353 }
2354 return true;
2355
2356 default:
2357 gcc_unreachable ();
2358 }
2359
2360 return false;
2361 }
2362
2363 /* Load OP1, a 32-bit constant, into OP0, a register.
2364 We know it can't be done in one insn when we get
2365 here, the move expander guarantees this. */
2366
2367 static void
2368 sparc_emit_set_const32 (rtx op0, rtx op1)
2369 {
2370 machine_mode mode = GET_MODE (op0);
2371 rtx temp = op0;
2372
2373 if (can_create_pseudo_p ())
2374 temp = gen_reg_rtx (mode);
2375
2376 if (GET_CODE (op1) == CONST_INT)
2377 {
2378 gcc_assert (!small_int_operand (op1, mode)
2379 && !const_high_operand (op1, mode));
2380
2381 /* Emit them as real moves instead of a HIGH/LO_SUM,
2382 this way CSE can see everything and reuse intermediate
2383 values if it wants. */
2384 emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
2385 & ~(HOST_WIDE_INT) 0x3ff)));
2386
2387 emit_insn (gen_rtx_SET (op0,
2388 gen_rtx_IOR (mode, temp,
2389 GEN_INT (INTVAL (op1) & 0x3ff))));
2390 }
2391 else
2392 {
2393 /* A symbol, emit in the traditional way. */
2394 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
2395 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
2396 }
2397 }
2398
2399 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
2400 If TEMP is nonzero, we are forbidden to use any other scratch
2401 registers. Otherwise, we are allowed to generate them as needed.
2402
2403 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
2404 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
2405
2406 void
2407 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
2408 {
2409 rtx cst, temp1, temp2, temp3, temp4, temp5;
2410 rtx ti_temp = 0;
2411
2412 /* Deal with too large offsets. */
2413 if (GET_CODE (op1) == CONST
2414 && GET_CODE (XEXP (op1, 0)) == PLUS
2415 && CONST_INT_P (cst = XEXP (XEXP (op1, 0), 1))
2416 && trunc_int_for_mode (INTVAL (cst), SImode) != INTVAL (cst))
2417 {
2418 gcc_assert (!temp);
2419 temp1 = gen_reg_rtx (DImode);
2420 temp2 = gen_reg_rtx (DImode);
2421 sparc_emit_set_const64 (temp2, cst);
2422 sparc_emit_set_symbolic_const64 (temp1, XEXP (XEXP (op1, 0), 0),
2423 NULL_RTX);
2424 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp1, temp2)));
2425 return;
2426 }
2427
2428 if (temp && GET_MODE (temp) == TImode)
2429 {
2430 ti_temp = temp;
2431 temp = gen_rtx_REG (DImode, REGNO (temp));
2432 }
2433
2434 /* SPARC-V9 code model support. */
2435 switch (sparc_code_model)
2436 {
2437 case CM_MEDLOW:
2438 /* The range spanned by all instructions in the object is less
2439 than 2^31 bytes (2GB) and the distance from any instruction
2440 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2441 than 2^31 bytes (2GB).
2442
2443 The executable must be in the low 4TB of the virtual address
2444 space.
2445
2446 sethi %hi(symbol), %temp1
2447 or %temp1, %lo(symbol), %reg */
2448 if (temp)
2449 temp1 = temp; /* op0 is allowed. */
2450 else
2451 temp1 = gen_reg_rtx (DImode);
2452
2453 emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
2454 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
2455 break;
2456
2457 case CM_MEDMID:
2458 /* The range spanned by all instructions in the object is less
2459 than 2^31 bytes (2GB) and the distance from any instruction
2460 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2461 than 2^31 bytes (2GB).
2462
2463 The executable must be in the low 16TB of the virtual address
2464 space.
2465
2466 sethi %h44(symbol), %temp1
2467 or %temp1, %m44(symbol), %temp2
2468 sllx %temp2, 12, %temp3
2469 or %temp3, %l44(symbol), %reg */
2470 if (temp)
2471 {
2472 temp1 = op0;
2473 temp2 = op0;
2474 temp3 = temp; /* op0 is allowed. */
2475 }
2476 else
2477 {
2478 temp1 = gen_reg_rtx (DImode);
2479 temp2 = gen_reg_rtx (DImode);
2480 temp3 = gen_reg_rtx (DImode);
2481 }
2482
2483 emit_insn (gen_seth44 (temp1, op1));
2484 emit_insn (gen_setm44 (temp2, temp1, op1));
2485 emit_insn (gen_rtx_SET (temp3,
2486 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
2487 emit_insn (gen_setl44 (op0, temp3, op1));
2488 break;
2489
2490 case CM_MEDANY:
2491 /* The range spanned by all instructions in the object is less
2492 than 2^31 bytes (2GB) and the distance from any instruction
2493 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2494 than 2^31 bytes (2GB).
2495
2496 The executable can be placed anywhere in the virtual address
2497 space.
2498
2499 sethi %hh(symbol), %temp1
2500 sethi %lm(symbol), %temp2
2501 or %temp1, %hm(symbol), %temp3
2502 sllx %temp3, 32, %temp4
2503 or %temp4, %temp2, %temp5
2504 or %temp5, %lo(symbol), %reg */
2505 if (temp)
2506 {
2507 /* It is possible that one of the registers we got for operands[2]
2508 might coincide with that of operands[0] (which is why we made
2509 it TImode). Pick the other one to use as our scratch. */
2510 if (rtx_equal_p (temp, op0))
2511 {
2512 gcc_assert (ti_temp);
2513 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2514 }
2515 temp1 = op0;
2516 temp2 = temp; /* op0 is _not_ allowed, see above. */
2517 temp3 = op0;
2518 temp4 = op0;
2519 temp5 = op0;
2520 }
2521 else
2522 {
2523 temp1 = gen_reg_rtx (DImode);
2524 temp2 = gen_reg_rtx (DImode);
2525 temp3 = gen_reg_rtx (DImode);
2526 temp4 = gen_reg_rtx (DImode);
2527 temp5 = gen_reg_rtx (DImode);
2528 }
2529
2530 emit_insn (gen_sethh (temp1, op1));
2531 emit_insn (gen_setlm (temp2, op1));
2532 emit_insn (gen_sethm (temp3, temp1, op1));
2533 emit_insn (gen_rtx_SET (temp4,
2534 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2535 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2536 emit_insn (gen_setlo (op0, temp5, op1));
2537 break;
2538
2539 case CM_EMBMEDANY:
2540 /* Old old old backwards compatibility kruft here.
2541 Essentially it is MEDLOW with a fixed 64-bit
2542 virtual base added to all data segment addresses.
2543 Text-segment stuff is computed like MEDANY, we can't
2544 reuse the code above because the relocation knobs
2545 look different.
2546
2547 Data segment: sethi %hi(symbol), %temp1
2548 add %temp1, EMBMEDANY_BASE_REG, %temp2
2549 or %temp2, %lo(symbol), %reg */
2550 if (data_segment_operand (op1, GET_MODE (op1)))
2551 {
2552 if (temp)
2553 {
2554 temp1 = temp; /* op0 is allowed. */
2555 temp2 = op0;
2556 }
2557 else
2558 {
2559 temp1 = gen_reg_rtx (DImode);
2560 temp2 = gen_reg_rtx (DImode);
2561 }
2562
2563 emit_insn (gen_embmedany_sethi (temp1, op1));
2564 emit_insn (gen_embmedany_brsum (temp2, temp1));
2565 emit_insn (gen_embmedany_losum (op0, temp2, op1));
2566 }
2567
2568 /* Text segment: sethi %uhi(symbol), %temp1
2569 sethi %hi(symbol), %temp2
2570 or %temp1, %ulo(symbol), %temp3
2571 sllx %temp3, 32, %temp4
2572 or %temp4, %temp2, %temp5
2573 or %temp5, %lo(symbol), %reg */
2574 else
2575 {
2576 if (temp)
2577 {
2578 /* It is possible that one of the registers we got for operands[2]
2579 might coincide with that of operands[0] (which is why we made
2580 it TImode). Pick the other one to use as our scratch. */
2581 if (rtx_equal_p (temp, op0))
2582 {
2583 gcc_assert (ti_temp);
2584 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2585 }
2586 temp1 = op0;
2587 temp2 = temp; /* op0 is _not_ allowed, see above. */
2588 temp3 = op0;
2589 temp4 = op0;
2590 temp5 = op0;
2591 }
2592 else
2593 {
2594 temp1 = gen_reg_rtx (DImode);
2595 temp2 = gen_reg_rtx (DImode);
2596 temp3 = gen_reg_rtx (DImode);
2597 temp4 = gen_reg_rtx (DImode);
2598 temp5 = gen_reg_rtx (DImode);
2599 }
2600
2601 emit_insn (gen_embmedany_textuhi (temp1, op1));
2602 emit_insn (gen_embmedany_texthi (temp2, op1));
2603 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
2604 emit_insn (gen_rtx_SET (temp4,
2605 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2606 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2607 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
2608 }
2609 break;
2610
2611 default:
2612 gcc_unreachable ();
2613 }
2614 }
2615
2616 /* These avoid problems when cross compiling. If we do not
2617 go through all this hair then the optimizer will see
2618 invalid REG_EQUAL notes or in some cases none at all. */
2619 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2620 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2621 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2622 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2623
2624 /* The optimizer is not to assume anything about exactly
2625 which bits are set for a HIGH, they are unspecified.
2626 Unfortunately this leads to many missed optimizations
2627 during CSE. We mask out the non-HIGH bits, and matches
2628 a plain movdi, to alleviate this problem. */
2629 static rtx
2630 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2631 {
2632 return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2633 }
2634
2635 static rtx
2636 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2637 {
2638 return gen_rtx_SET (dest, GEN_INT (val));
2639 }
2640
2641 static rtx
2642 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2643 {
2644 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2645 }
2646
2647 static rtx
2648 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2649 {
2650 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2651 }
2652
2653 /* Worker routines for 64-bit constant formation on arch64.
2654 One of the key things to be doing in these emissions is
2655 to create as many temp REGs as possible. This makes it
2656 possible for half-built constants to be used later when
2657 such values are similar to something required later on.
2658 Without doing this, the optimizer cannot see such
2659 opportunities. */
2660
2661 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2662 unsigned HOST_WIDE_INT, int);
2663
2664 static void
2665 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2666 unsigned HOST_WIDE_INT low_bits, int is_neg)
2667 {
2668 unsigned HOST_WIDE_INT high_bits;
2669
2670 if (is_neg)
2671 high_bits = (~low_bits) & 0xffffffff;
2672 else
2673 high_bits = low_bits;
2674
2675 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2676 if (!is_neg)
2677 {
2678 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2679 }
2680 else
2681 {
2682 /* If we are XOR'ing with -1, then we should emit a one's complement
2683 instead. This way the combiner will notice logical operations
2684 such as ANDN later on and substitute. */
2685 if ((low_bits & 0x3ff) == 0x3ff)
2686 {
2687 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2688 }
2689 else
2690 {
2691 emit_insn (gen_rtx_SET (op0,
2692 gen_safe_XOR64 (temp,
2693 (-(HOST_WIDE_INT)0x400
2694 | (low_bits & 0x3ff)))));
2695 }
2696 }
2697 }
2698
2699 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2700 unsigned HOST_WIDE_INT, int);
2701
2702 static void
2703 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2704 unsigned HOST_WIDE_INT high_bits,
2705 unsigned HOST_WIDE_INT low_immediate,
2706 int shift_count)
2707 {
2708 rtx temp2 = op0;
2709
2710 if ((high_bits & 0xfffffc00) != 0)
2711 {
2712 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2713 if ((high_bits & ~0xfffffc00) != 0)
2714 emit_insn (gen_rtx_SET (op0,
2715 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2716 else
2717 temp2 = temp;
2718 }
2719 else
2720 {
2721 emit_insn (gen_safe_SET64 (temp, high_bits));
2722 temp2 = temp;
2723 }
2724
2725 /* Now shift it up into place. */
2726 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
2727 GEN_INT (shift_count))));
2728
2729 /* If there is a low immediate part piece, finish up by
2730 putting that in as well. */
2731 if (low_immediate != 0)
2732 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
2733 }
2734
2735 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2736 unsigned HOST_WIDE_INT);
2737
2738 /* Full 64-bit constant decomposition. Even though this is the
2739 'worst' case, we still optimize a few things away. */
2740 static void
2741 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2742 unsigned HOST_WIDE_INT high_bits,
2743 unsigned HOST_WIDE_INT low_bits)
2744 {
2745 rtx sub_temp = op0;
2746
2747 if (can_create_pseudo_p ())
2748 sub_temp = gen_reg_rtx (DImode);
2749
2750 if ((high_bits & 0xfffffc00) != 0)
2751 {
2752 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2753 if ((high_bits & ~0xfffffc00) != 0)
2754 emit_insn (gen_rtx_SET (sub_temp,
2755 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2756 else
2757 sub_temp = temp;
2758 }
2759 else
2760 {
2761 emit_insn (gen_safe_SET64 (temp, high_bits));
2762 sub_temp = temp;
2763 }
2764
2765 if (can_create_pseudo_p ())
2766 {
2767 rtx temp2 = gen_reg_rtx (DImode);
2768 rtx temp3 = gen_reg_rtx (DImode);
2769 rtx temp4 = gen_reg_rtx (DImode);
2770
2771 emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
2772 GEN_INT (32))));
2773
2774 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2775 if ((low_bits & ~0xfffffc00) != 0)
2776 {
2777 emit_insn (gen_rtx_SET (temp3,
2778 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2779 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
2780 }
2781 else
2782 {
2783 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
2784 }
2785 }
2786 else
2787 {
2788 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2789 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2790 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2791 int to_shift = 12;
2792
2793 /* We are in the middle of reload, so this is really
2794 painful. However we do still make an attempt to
2795 avoid emitting truly stupid code. */
2796 if (low1 != const0_rtx)
2797 {
2798 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2799 GEN_INT (to_shift))));
2800 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
2801 sub_temp = op0;
2802 to_shift = 12;
2803 }
2804 else
2805 {
2806 to_shift += 12;
2807 }
2808 if (low2 != const0_rtx)
2809 {
2810 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2811 GEN_INT (to_shift))));
2812 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
2813 sub_temp = op0;
2814 to_shift = 8;
2815 }
2816 else
2817 {
2818 to_shift += 8;
2819 }
2820 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2821 GEN_INT (to_shift))));
2822 if (low3 != const0_rtx)
2823 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
2824 /* phew... */
2825 }
2826 }
2827
2828 /* Analyze a 64-bit constant for certain properties. */
2829 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2830 unsigned HOST_WIDE_INT,
2831 int *, int *, int *);
2832
2833 static void
2834 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2835 unsigned HOST_WIDE_INT low_bits,
2836 int *hbsp, int *lbsp, int *abbasp)
2837 {
2838 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2839 int i;
2840
2841 lowest_bit_set = highest_bit_set = -1;
2842 i = 0;
2843 do
2844 {
2845 if ((lowest_bit_set == -1)
2846 && ((low_bits >> i) & 1))
2847 lowest_bit_set = i;
2848 if ((highest_bit_set == -1)
2849 && ((high_bits >> (32 - i - 1)) & 1))
2850 highest_bit_set = (64 - i - 1);
2851 }
2852 while (++i < 32
2853 && ((highest_bit_set == -1)
2854 || (lowest_bit_set == -1)));
2855 if (i == 32)
2856 {
2857 i = 0;
2858 do
2859 {
2860 if ((lowest_bit_set == -1)
2861 && ((high_bits >> i) & 1))
2862 lowest_bit_set = i + 32;
2863 if ((highest_bit_set == -1)
2864 && ((low_bits >> (32 - i - 1)) & 1))
2865 highest_bit_set = 32 - i - 1;
2866 }
2867 while (++i < 32
2868 && ((highest_bit_set == -1)
2869 || (lowest_bit_set == -1)));
2870 }
2871 /* If there are no bits set this should have gone out
2872 as one instruction! */
2873 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2874 all_bits_between_are_set = 1;
2875 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2876 {
2877 if (i < 32)
2878 {
2879 if ((low_bits & (1 << i)) != 0)
2880 continue;
2881 }
2882 else
2883 {
2884 if ((high_bits & (1 << (i - 32))) != 0)
2885 continue;
2886 }
2887 all_bits_between_are_set = 0;
2888 break;
2889 }
2890 *hbsp = highest_bit_set;
2891 *lbsp = lowest_bit_set;
2892 *abbasp = all_bits_between_are_set;
2893 }
2894
2895 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2896
2897 static int
2898 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2899 unsigned HOST_WIDE_INT low_bits)
2900 {
2901 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2902
2903 if (high_bits == 0
2904 || high_bits == 0xffffffff)
2905 return 1;
2906
2907 analyze_64bit_constant (high_bits, low_bits,
2908 &highest_bit_set, &lowest_bit_set,
2909 &all_bits_between_are_set);
2910
2911 if ((highest_bit_set == 63
2912 || lowest_bit_set == 0)
2913 && all_bits_between_are_set != 0)
2914 return 1;
2915
2916 if ((highest_bit_set - lowest_bit_set) < 21)
2917 return 1;
2918
2919 return 0;
2920 }
2921
2922 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2923 unsigned HOST_WIDE_INT,
2924 int, int);
2925
2926 static unsigned HOST_WIDE_INT
2927 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2928 unsigned HOST_WIDE_INT low_bits,
2929 int lowest_bit_set, int shift)
2930 {
2931 HOST_WIDE_INT hi, lo;
2932
2933 if (lowest_bit_set < 32)
2934 {
2935 lo = (low_bits >> lowest_bit_set) << shift;
2936 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2937 }
2938 else
2939 {
2940 lo = 0;
2941 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2942 }
2943 gcc_assert (! (hi & lo));
2944 return (hi | lo);
2945 }
2946
2947 /* Here we are sure to be arch64 and this is an integer constant
2948 being loaded into a register. Emit the most efficient
2949 insn sequence possible. Detection of all the 1-insn cases
2950 has been done already. */
2951 static void
2952 sparc_emit_set_const64 (rtx op0, rtx op1)
2953 {
2954 unsigned HOST_WIDE_INT high_bits, low_bits;
2955 int lowest_bit_set, highest_bit_set;
2956 int all_bits_between_are_set;
2957 rtx temp = 0;
2958
2959 /* Sanity check that we know what we are working with. */
2960 gcc_assert (TARGET_ARCH64
2961 && (GET_CODE (op0) == SUBREG
2962 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2963
2964 if (! can_create_pseudo_p ())
2965 temp = op0;
2966
2967 if (GET_CODE (op1) != CONST_INT)
2968 {
2969 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2970 return;
2971 }
2972
2973 if (! temp)
2974 temp = gen_reg_rtx (DImode);
2975
2976 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2977 low_bits = (INTVAL (op1) & 0xffffffff);
2978
2979 /* low_bits bits 0 --> 31
2980 high_bits bits 32 --> 63 */
2981
2982 analyze_64bit_constant (high_bits, low_bits,
2983 &highest_bit_set, &lowest_bit_set,
2984 &all_bits_between_are_set);
2985
2986 /* First try for a 2-insn sequence. */
2987
2988 /* These situations are preferred because the optimizer can
2989 * do more things with them:
2990 * 1) mov -1, %reg
2991 * sllx %reg, shift, %reg
2992 * 2) mov -1, %reg
2993 * srlx %reg, shift, %reg
2994 * 3) mov some_small_const, %reg
2995 * sllx %reg, shift, %reg
2996 */
2997 if (((highest_bit_set == 63
2998 || lowest_bit_set == 0)
2999 && all_bits_between_are_set != 0)
3000 || ((highest_bit_set - lowest_bit_set) < 12))
3001 {
3002 HOST_WIDE_INT the_const = -1;
3003 int shift = lowest_bit_set;
3004
3005 if ((highest_bit_set != 63
3006 && lowest_bit_set != 0)
3007 || all_bits_between_are_set == 0)
3008 {
3009 the_const =
3010 create_simple_focus_bits (high_bits, low_bits,
3011 lowest_bit_set, 0);
3012 }
3013 else if (lowest_bit_set == 0)
3014 shift = -(63 - highest_bit_set);
3015
3016 gcc_assert (SPARC_SIMM13_P (the_const));
3017 gcc_assert (shift != 0);
3018
3019 emit_insn (gen_safe_SET64 (temp, the_const));
3020 if (shift > 0)
3021 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
3022 GEN_INT (shift))));
3023 else if (shift < 0)
3024 emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
3025 GEN_INT (-shift))));
3026 return;
3027 }
3028
3029 /* Now a range of 22 or less bits set somewhere.
3030 * 1) sethi %hi(focus_bits), %reg
3031 * sllx %reg, shift, %reg
3032 * 2) sethi %hi(focus_bits), %reg
3033 * srlx %reg, shift, %reg
3034 */
3035 if ((highest_bit_set - lowest_bit_set) < 21)
3036 {
3037 unsigned HOST_WIDE_INT focus_bits =
3038 create_simple_focus_bits (high_bits, low_bits,
3039 lowest_bit_set, 10);
3040
3041 gcc_assert (SPARC_SETHI_P (focus_bits));
3042 gcc_assert (lowest_bit_set != 10);
3043
3044 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
3045
3046 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
3047 if (lowest_bit_set < 10)
3048 emit_insn (gen_rtx_SET (op0,
3049 gen_rtx_LSHIFTRT (DImode, temp,
3050 GEN_INT (10 - lowest_bit_set))));
3051 else if (lowest_bit_set > 10)
3052 emit_insn (gen_rtx_SET (op0,
3053 gen_rtx_ASHIFT (DImode, temp,
3054 GEN_INT (lowest_bit_set - 10))));
3055 return;
3056 }
3057
3058 /* 1) sethi %hi(low_bits), %reg
3059 * or %reg, %lo(low_bits), %reg
3060 * 2) sethi %hi(~low_bits), %reg
3061 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
3062 */
3063 if (high_bits == 0
3064 || high_bits == 0xffffffff)
3065 {
3066 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
3067 (high_bits == 0xffffffff));
3068 return;
3069 }
3070
3071 /* Now, try 3-insn sequences. */
3072
3073 /* 1) sethi %hi(high_bits), %reg
3074 * or %reg, %lo(high_bits), %reg
3075 * sllx %reg, 32, %reg
3076 */
3077 if (low_bits == 0)
3078 {
3079 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
3080 return;
3081 }
3082
3083 /* We may be able to do something quick
3084 when the constant is negated, so try that. */
3085 if (const64_is_2insns ((~high_bits) & 0xffffffff,
3086 (~low_bits) & 0xfffffc00))
3087 {
3088 /* NOTE: The trailing bits get XOR'd so we need the
3089 non-negated bits, not the negated ones. */
3090 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
3091
3092 if ((((~high_bits) & 0xffffffff) == 0
3093 && ((~low_bits) & 0x80000000) == 0)
3094 || (((~high_bits) & 0xffffffff) == 0xffffffff
3095 && ((~low_bits) & 0x80000000) != 0))
3096 {
3097 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
3098
3099 if ((SPARC_SETHI_P (fast_int)
3100 && (~high_bits & 0xffffffff) == 0)
3101 || SPARC_SIMM13_P (fast_int))
3102 emit_insn (gen_safe_SET64 (temp, fast_int));
3103 else
3104 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
3105 }
3106 else
3107 {
3108 rtx negated_const;
3109 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
3110 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
3111 sparc_emit_set_const64 (temp, negated_const);
3112 }
3113
3114 /* If we are XOR'ing with -1, then we should emit a one's complement
3115 instead. This way the combiner will notice logical operations
3116 such as ANDN later on and substitute. */
3117 if (trailing_bits == 0x3ff)
3118 {
3119 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
3120 }
3121 else
3122 {
3123 emit_insn (gen_rtx_SET (op0,
3124 gen_safe_XOR64 (temp,
3125 (-0x400 | trailing_bits))));
3126 }
3127 return;
3128 }
3129
3130 /* 1) sethi %hi(xxx), %reg
3131 * or %reg, %lo(xxx), %reg
3132 * sllx %reg, yyy, %reg
3133 *
3134 * ??? This is just a generalized version of the low_bits==0
3135 * thing above, FIXME...
3136 */
3137 if ((highest_bit_set - lowest_bit_set) < 32)
3138 {
3139 unsigned HOST_WIDE_INT focus_bits =
3140 create_simple_focus_bits (high_bits, low_bits,
3141 lowest_bit_set, 0);
3142
3143 /* We can't get here in this state. */
3144 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
3145
3146 /* So what we know is that the set bits straddle the
3147 middle of the 64-bit word. */
3148 sparc_emit_set_const64_quick2 (op0, temp,
3149 focus_bits, 0,
3150 lowest_bit_set);
3151 return;
3152 }
3153
3154 /* 1) sethi %hi(high_bits), %reg
3155 * or %reg, %lo(high_bits), %reg
3156 * sllx %reg, 32, %reg
3157 * or %reg, low_bits, %reg
3158 */
3159 if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
3160 {
3161 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
3162 return;
3163 }
3164
3165 /* The easiest way when all else fails, is full decomposition. */
3166 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
3167 }
3168
3169 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
3170
3171 static bool
3172 sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
3173 {
3174 *p1 = SPARC_ICC_REG;
3175 *p2 = SPARC_FCC_REG;
3176 return true;
3177 }
3178
3179 /* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
3180
3181 static unsigned int
3182 sparc_min_arithmetic_precision (void)
3183 {
3184 return 32;
3185 }
3186
3187 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
3188 return the mode to be used for the comparison. For floating-point,
3189 CCFP[E]mode is used. CCNZmode should be used when the first operand
3190 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
3191 processing is needed. */
3192
3193 machine_mode
3194 select_cc_mode (enum rtx_code op, rtx x, rtx y)
3195 {
3196 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3197 {
3198 switch (op)
3199 {
3200 case EQ:
3201 case NE:
3202 case UNORDERED:
3203 case ORDERED:
3204 case UNLT:
3205 case UNLE:
3206 case UNGT:
3207 case UNGE:
3208 case UNEQ:
3209 case LTGT:
3210 return CCFPmode;
3211
3212 case LT:
3213 case LE:
3214 case GT:
3215 case GE:
3216 return CCFPEmode;
3217
3218 default:
3219 gcc_unreachable ();
3220 }
3221 }
3222 else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
3223 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
3224 && y == const0_rtx)
3225 {
3226 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3227 return CCXNZmode;
3228 else
3229 return CCNZmode;
3230 }
3231 else
3232 {
3233 /* This is for the cmp<mode>_sne pattern. */
3234 if (GET_CODE (x) == NOT && y == constm1_rtx)
3235 {
3236 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3237 return CCXCmode;
3238 else
3239 return CCCmode;
3240 }
3241
3242 /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
3243 if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
3244 {
3245 if (GET_CODE (y) == UNSPEC
3246 && (XINT (y, 1) == UNSPEC_ADDV
3247 || XINT (y, 1) == UNSPEC_SUBV
3248 || XINT (y, 1) == UNSPEC_NEGV))
3249 return CCVmode;
3250 else
3251 return CCCmode;
3252 }
3253
3254 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3255 return CCXmode;
3256 else
3257 return CCmode;
3258 }
3259 }
3260
3261 /* Emit the compare insn and return the CC reg for a CODE comparison
3262 with operands X and Y. */
3263
3264 static rtx
3265 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
3266 {
3267 machine_mode mode;
3268 rtx cc_reg;
3269
3270 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
3271 return x;
3272
3273 mode = SELECT_CC_MODE (code, x, y);
3274
3275 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
3276 fcc regs (cse can't tell they're really call clobbered regs and will
3277 remove a duplicate comparison even if there is an intervening function
3278 call - it will then try to reload the cc reg via an int reg which is why
3279 we need the movcc patterns). It is possible to provide the movcc
3280 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
3281 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
3282 to tell cse that CCFPE mode registers (even pseudos) are call
3283 clobbered. */
3284
3285 /* ??? This is an experiment. Rather than making changes to cse which may
3286 or may not be easy/clean, we do our own cse. This is possible because
3287 we will generate hard registers. Cse knows they're call clobbered (it
3288 doesn't know the same thing about pseudos). If we guess wrong, no big
3289 deal, but if we win, great! */
3290
3291 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3292 #if 1 /* experiment */
3293 {
3294 int reg;
3295 /* We cycle through the registers to ensure they're all exercised. */
3296 static int next_fcc_reg = 0;
3297 /* Previous x,y for each fcc reg. */
3298 static rtx prev_args[4][2];
3299
3300 /* Scan prev_args for x,y. */
3301 for (reg = 0; reg < 4; reg++)
3302 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
3303 break;
3304 if (reg == 4)
3305 {
3306 reg = next_fcc_reg;
3307 prev_args[reg][0] = x;
3308 prev_args[reg][1] = y;
3309 next_fcc_reg = (next_fcc_reg + 1) & 3;
3310 }
3311 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
3312 }
3313 #else
3314 cc_reg = gen_reg_rtx (mode);
3315 #endif /* ! experiment */
3316 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3317 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
3318 else
3319 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
3320
3321 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
3322 will only result in an unrecognizable insn so no point in asserting. */
3323 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
3324
3325 return cc_reg;
3326 }
3327
3328
3329 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
3330
3331 rtx
3332 gen_compare_reg (rtx cmp)
3333 {
3334 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
3335 }
3336
3337 /* This function is used for v9 only.
3338 DEST is the target of the Scc insn.
3339 CODE is the code for an Scc's comparison.
3340 X and Y are the values we compare.
3341
3342 This function is needed to turn
3343
3344 (set (reg:SI 110)
3345 (gt (reg:CCX 100 %icc)
3346 (const_int 0)))
3347 into
3348 (set (reg:SI 110)
3349 (gt:DI (reg:CCX 100 %icc)
3350 (const_int 0)))
3351
3352 IE: The instruction recognizer needs to see the mode of the comparison to
3353 find the right instruction. We could use "gt:DI" right in the
3354 define_expand, but leaving it out allows us to handle DI, SI, etc. */
3355
3356 static int
3357 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
3358 {
3359 if (! TARGET_ARCH64
3360 && (GET_MODE (x) == DImode
3361 || GET_MODE (dest) == DImode))
3362 return 0;
3363
3364 /* Try to use the movrCC insns. */
3365 if (TARGET_ARCH64
3366 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3367 && y == const0_rtx
3368 && v9_regcmp_p (compare_code))
3369 {
3370 rtx op0 = x;
3371 rtx temp;
3372
3373 /* Special case for op0 != 0. This can be done with one instruction if
3374 dest == x. */
3375
3376 if (compare_code == NE
3377 && GET_MODE (dest) == DImode
3378 && rtx_equal_p (op0, dest))
3379 {
3380 emit_insn (gen_rtx_SET (dest,
3381 gen_rtx_IF_THEN_ELSE (DImode,
3382 gen_rtx_fmt_ee (compare_code, DImode,
3383 op0, const0_rtx),
3384 const1_rtx,
3385 dest)));
3386 return 1;
3387 }
3388
3389 if (reg_overlap_mentioned_p (dest, op0))
3390 {
3391 /* Handle the case where dest == x.
3392 We "early clobber" the result. */
3393 op0 = gen_reg_rtx (GET_MODE (x));
3394 emit_move_insn (op0, x);
3395 }
3396
3397 emit_insn (gen_rtx_SET (dest, const0_rtx));
3398 if (GET_MODE (op0) != DImode)
3399 {
3400 temp = gen_reg_rtx (DImode);
3401 convert_move (temp, op0, 0);
3402 }
3403 else
3404 temp = op0;
3405 emit_insn (gen_rtx_SET (dest,
3406 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3407 gen_rtx_fmt_ee (compare_code, DImode,
3408 temp, const0_rtx),
3409 const1_rtx,
3410 dest)));
3411 return 1;
3412 }
3413 else
3414 {
3415 x = gen_compare_reg_1 (compare_code, x, y);
3416 y = const0_rtx;
3417
3418 emit_insn (gen_rtx_SET (dest, const0_rtx));
3419 emit_insn (gen_rtx_SET (dest,
3420 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3421 gen_rtx_fmt_ee (compare_code,
3422 GET_MODE (x), x, y),
3423 const1_rtx, dest)));
3424 return 1;
3425 }
3426 }
3427
3428
3429 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
3430 without jumps using the addx/subx instructions. */
3431
3432 bool
3433 emit_scc_insn (rtx operands[])
3434 {
3435 rtx tem, x, y;
3436 enum rtx_code code;
3437 machine_mode mode;
3438
3439 /* The quad-word fp compare library routines all return nonzero to indicate
3440 true, which is different from the equivalent libgcc routines, so we must
3441 handle them specially here. */
3442 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
3443 {
3444 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
3445 GET_CODE (operands[1]));
3446 operands[2] = XEXP (operands[1], 0);
3447 operands[3] = XEXP (operands[1], 1);
3448 }
3449
3450 code = GET_CODE (operands[1]);
3451 x = operands[2];
3452 y = operands[3];
3453 mode = GET_MODE (x);
3454
3455 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
3456 more applications). The exception to this is "reg != 0" which can
3457 be done in one instruction on v9 (so we do it). */
3458 if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
3459 {
3460 if (y != const0_rtx)
3461 x = force_reg (mode, gen_rtx_XOR (mode, x, y));
3462
3463 rtx pat = gen_rtx_SET (operands[0],
3464 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3465 x, const0_rtx));
3466
3467 /* If we can use addx/subx or addxc, add a clobber for CC. */
3468 if (mode == SImode || (code == NE && TARGET_VIS3))
3469 {
3470 rtx clobber
3471 = gen_rtx_CLOBBER (VOIDmode,
3472 gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
3473 SPARC_ICC_REG));
3474 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
3475 }
3476
3477 emit_insn (pat);
3478 return true;
3479 }
3480
3481 /* We can do LTU in DImode using the addxc instruction with VIS3. */
3482 if (TARGET_ARCH64
3483 && mode == DImode
3484 && !((code == LTU || code == GTU) && TARGET_VIS3)
3485 && gen_v9_scc (operands[0], code, x, y))
3486 return true;
3487
3488 /* We can do LTU and GEU using the addx/subx instructions too. And
3489 for GTU/LEU, if both operands are registers swap them and fall
3490 back to the easy case. */
3491 if (code == GTU || code == LEU)
3492 {
3493 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3494 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
3495 {
3496 tem = x;
3497 x = y;
3498 y = tem;
3499 code = swap_condition (code);
3500 }
3501 }
3502
3503 if (code == LTU || code == GEU)
3504 {
3505 emit_insn (gen_rtx_SET (operands[0],
3506 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3507 gen_compare_reg_1 (code, x, y),
3508 const0_rtx)));
3509 return true;
3510 }
3511
3512 /* All the posibilities to use addx/subx based sequences has been
3513 exhausted, try for a 3 instruction sequence using v9 conditional
3514 moves. */
3515 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
3516 return true;
3517
3518 /* Nope, do branches. */
3519 return false;
3520 }
3521
3522 /* Emit a conditional jump insn for the v9 architecture using comparison code
3523 CODE and jump target LABEL.
3524 This function exists to take advantage of the v9 brxx insns. */
3525
3526 static void
3527 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
3528 {
3529 emit_jump_insn (gen_rtx_SET (pc_rtx,
3530 gen_rtx_IF_THEN_ELSE (VOIDmode,
3531 gen_rtx_fmt_ee (code, GET_MODE (op0),
3532 op0, const0_rtx),
3533 gen_rtx_LABEL_REF (VOIDmode, label),
3534 pc_rtx)));
3535 }
3536
3537 /* Emit a conditional jump insn for the UA2011 architecture using
3538 comparison code CODE and jump target LABEL. This function exists
3539 to take advantage of the UA2011 Compare and Branch insns. */
3540
3541 static void
3542 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
3543 {
3544 rtx if_then_else;
3545
3546 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
3547 gen_rtx_fmt_ee(code, GET_MODE(op0),
3548 op0, op1),
3549 gen_rtx_LABEL_REF (VOIDmode, label),
3550 pc_rtx);
3551
3552 emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
3553 }
3554
3555 void
3556 emit_conditional_branch_insn (rtx operands[])
3557 {
3558 /* The quad-word fp compare library routines all return nonzero to indicate
3559 true, which is different from the equivalent libgcc routines, so we must
3560 handle them specially here. */
3561 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
3562 {
3563 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
3564 GET_CODE (operands[0]));
3565 operands[1] = XEXP (operands[0], 0);
3566 operands[2] = XEXP (operands[0], 1);
3567 }
3568
3569 /* If we can tell early on that the comparison is against a constant
3570 that won't fit in the 5-bit signed immediate field of a cbcond,
3571 use one of the other v9 conditional branch sequences. */
3572 if (TARGET_CBCOND
3573 && GET_CODE (operands[1]) == REG
3574 && (GET_MODE (operands[1]) == SImode
3575 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
3576 && (GET_CODE (operands[2]) != CONST_INT
3577 || SPARC_SIMM5_P (INTVAL (operands[2]))))
3578 {
3579 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
3580 return;
3581 }
3582
3583 if (TARGET_ARCH64 && operands[2] == const0_rtx
3584 && GET_CODE (operands[1]) == REG
3585 && GET_MODE (operands[1]) == DImode)
3586 {
3587 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
3588 return;
3589 }
3590
3591 operands[1] = gen_compare_reg (operands[0]);
3592 operands[2] = const0_rtx;
3593 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
3594 operands[1], operands[2]);
3595 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
3596 operands[3]));
3597 }
3598
3599
3600 /* Generate a DFmode part of a hard TFmode register.
3601 REG is the TFmode hard register, LOW is 1 for the
3602 low 64bit of the register and 0 otherwise.
3603 */
3604 rtx
3605 gen_df_reg (rtx reg, int low)
3606 {
3607 int regno = REGNO (reg);
3608
3609 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3610 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3611 return gen_rtx_REG (DFmode, regno);
3612 }
3613 \f
3614 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3615 Unlike normal calls, TFmode operands are passed by reference. It is
3616 assumed that no more than 3 operands are required. */
3617
3618 static void
3619 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3620 {
3621 rtx ret_slot = NULL, arg[3], func_sym;
3622 int i;
3623
3624 /* We only expect to be called for conversions, unary, and binary ops. */
3625 gcc_assert (nargs == 2 || nargs == 3);
3626
3627 for (i = 0; i < nargs; ++i)
3628 {
3629 rtx this_arg = operands[i];
3630 rtx this_slot;
3631
3632 /* TFmode arguments and return values are passed by reference. */
3633 if (GET_MODE (this_arg) == TFmode)
3634 {
3635 int force_stack_temp;
3636
3637 force_stack_temp = 0;
3638 if (TARGET_BUGGY_QP_LIB && i == 0)
3639 force_stack_temp = 1;
3640
3641 if (GET_CODE (this_arg) == MEM
3642 && ! force_stack_temp)
3643 {
3644 tree expr = MEM_EXPR (this_arg);
3645 if (expr)
3646 mark_addressable (expr);
3647 this_arg = XEXP (this_arg, 0);
3648 }
3649 else if (CONSTANT_P (this_arg)
3650 && ! force_stack_temp)
3651 {
3652 this_slot = force_const_mem (TFmode, this_arg);
3653 this_arg = XEXP (this_slot, 0);
3654 }
3655 else
3656 {
3657 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3658
3659 /* Operand 0 is the return value. We'll copy it out later. */
3660 if (i > 0)
3661 emit_move_insn (this_slot, this_arg);
3662 else
3663 ret_slot = this_slot;
3664
3665 this_arg = XEXP (this_slot, 0);
3666 }
3667 }
3668
3669 arg[i] = this_arg;
3670 }
3671
3672 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3673
3674 if (GET_MODE (operands[0]) == TFmode)
3675 {
3676 if (nargs == 2)
3677 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3678 arg[0], GET_MODE (arg[0]),
3679 arg[1], GET_MODE (arg[1]));
3680 else
3681 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3682 arg[0], GET_MODE (arg[0]),
3683 arg[1], GET_MODE (arg[1]),
3684 arg[2], GET_MODE (arg[2]));
3685
3686 if (ret_slot)
3687 emit_move_insn (operands[0], ret_slot);
3688 }
3689 else
3690 {
3691 rtx ret;
3692
3693 gcc_assert (nargs == 2);
3694
3695 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3696 GET_MODE (operands[0]),
3697 arg[1], GET_MODE (arg[1]));
3698
3699 if (ret != operands[0])
3700 emit_move_insn (operands[0], ret);
3701 }
3702 }
3703
3704 /* Expand soft-float TFmode calls to sparc abi routines. */
3705
3706 static void
3707 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3708 {
3709 const char *func;
3710
3711 switch (code)
3712 {
3713 case PLUS:
3714 func = "_Qp_add";
3715 break;
3716 case MINUS:
3717 func = "_Qp_sub";
3718 break;
3719 case MULT:
3720 func = "_Qp_mul";
3721 break;
3722 case DIV:
3723 func = "_Qp_div";
3724 break;
3725 default:
3726 gcc_unreachable ();
3727 }
3728
3729 emit_soft_tfmode_libcall (func, 3, operands);
3730 }
3731
3732 static void
3733 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3734 {
3735 const char *func;
3736
3737 gcc_assert (code == SQRT);
3738 func = "_Qp_sqrt";
3739
3740 emit_soft_tfmode_libcall (func, 2, operands);
3741 }
3742
3743 static void
3744 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3745 {
3746 const char *func;
3747
3748 switch (code)
3749 {
3750 case FLOAT_EXTEND:
3751 switch (GET_MODE (operands[1]))
3752 {
3753 case E_SFmode:
3754 func = "_Qp_stoq";
3755 break;
3756 case E_DFmode:
3757 func = "_Qp_dtoq";
3758 break;
3759 default:
3760 gcc_unreachable ();
3761 }
3762 break;
3763
3764 case FLOAT_TRUNCATE:
3765 switch (GET_MODE (operands[0]))
3766 {
3767 case E_SFmode:
3768 func = "_Qp_qtos";
3769 break;
3770 case E_DFmode:
3771 func = "_Qp_qtod";
3772 break;
3773 default:
3774 gcc_unreachable ();
3775 }
3776 break;
3777
3778 case FLOAT:
3779 switch (GET_MODE (operands[1]))
3780 {
3781 case E_SImode:
3782 func = "_Qp_itoq";
3783 if (TARGET_ARCH64)
3784 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3785 break;
3786 case E_DImode:
3787 func = "_Qp_xtoq";
3788 break;
3789 default:
3790 gcc_unreachable ();
3791 }
3792 break;
3793
3794 case UNSIGNED_FLOAT:
3795 switch (GET_MODE (operands[1]))
3796 {
3797 case E_SImode:
3798 func = "_Qp_uitoq";
3799 if (TARGET_ARCH64)
3800 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3801 break;
3802 case E_DImode:
3803 func = "_Qp_uxtoq";
3804 break;
3805 default:
3806 gcc_unreachable ();
3807 }
3808 break;
3809
3810 case FIX:
3811 switch (GET_MODE (operands[0]))
3812 {
3813 case E_SImode:
3814 func = "_Qp_qtoi";
3815 break;
3816 case E_DImode:
3817 func = "_Qp_qtox";
3818 break;
3819 default:
3820 gcc_unreachable ();
3821 }
3822 break;
3823
3824 case UNSIGNED_FIX:
3825 switch (GET_MODE (operands[0]))
3826 {
3827 case E_SImode:
3828 func = "_Qp_qtoui";
3829 break;
3830 case E_DImode:
3831 func = "_Qp_qtoux";
3832 break;
3833 default:
3834 gcc_unreachable ();
3835 }
3836 break;
3837
3838 default:
3839 gcc_unreachable ();
3840 }
3841
3842 emit_soft_tfmode_libcall (func, 2, operands);
3843 }
3844
3845 /* Expand a hard-float tfmode operation. All arguments must be in
3846 registers. */
3847
3848 static void
3849 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3850 {
3851 rtx op, dest;
3852
3853 if (GET_RTX_CLASS (code) == RTX_UNARY)
3854 {
3855 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3856 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3857 }
3858 else
3859 {
3860 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3861 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3862 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3863 operands[1], operands[2]);
3864 }
3865
3866 if (register_operand (operands[0], VOIDmode))
3867 dest = operands[0];
3868 else
3869 dest = gen_reg_rtx (GET_MODE (operands[0]));
3870
3871 emit_insn (gen_rtx_SET (dest, op));
3872
3873 if (dest != operands[0])
3874 emit_move_insn (operands[0], dest);
3875 }
3876
3877 void
3878 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3879 {
3880 if (TARGET_HARD_QUAD)
3881 emit_hard_tfmode_operation (code, operands);
3882 else
3883 emit_soft_tfmode_binop (code, operands);
3884 }
3885
3886 void
3887 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3888 {
3889 if (TARGET_HARD_QUAD)
3890 emit_hard_tfmode_operation (code, operands);
3891 else
3892 emit_soft_tfmode_unop (code, operands);
3893 }
3894
3895 void
3896 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3897 {
3898 if (TARGET_HARD_QUAD)
3899 emit_hard_tfmode_operation (code, operands);
3900 else
3901 emit_soft_tfmode_cvt (code, operands);
3902 }
3903 \f
3904 /* Return nonzero if a branch/jump/call instruction will be emitting
3905 nop into its delay slot. */
3906
3907 int
3908 empty_delay_slot (rtx_insn *insn)
3909 {
3910 rtx seq;
3911
3912 /* If no previous instruction (should not happen), return true. */
3913 if (PREV_INSN (insn) == NULL)
3914 return 1;
3915
3916 seq = NEXT_INSN (PREV_INSN (insn));
3917 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3918 return 0;
3919
3920 return 1;
3921 }
3922
3923 /* Return nonzero if we should emit a nop after a cbcond instruction.
3924 The cbcond instruction does not have a delay slot, however there is
3925 a severe performance penalty if a control transfer appears right
3926 after a cbcond. Therefore we emit a nop when we detect this
3927 situation. */
3928
3929 int
3930 emit_cbcond_nop (rtx_insn *insn)
3931 {
3932 rtx next = next_active_insn (insn);
3933
3934 if (!next)
3935 return 1;
3936
3937 if (NONJUMP_INSN_P (next)
3938 && GET_CODE (PATTERN (next)) == SEQUENCE)
3939 next = XVECEXP (PATTERN (next), 0, 0);
3940 else if (CALL_P (next)
3941 && GET_CODE (PATTERN (next)) == PARALLEL)
3942 {
3943 rtx delay = XVECEXP (PATTERN (next), 0, 1);
3944
3945 if (GET_CODE (delay) == RETURN)
3946 {
3947 /* It's a sibling call. Do not emit the nop if we're going
3948 to emit something other than the jump itself as the first
3949 instruction of the sibcall sequence. */
3950 if (sparc_leaf_function_p || TARGET_FLAT)
3951 return 0;
3952 }
3953 }
3954
3955 if (NONJUMP_INSN_P (next))
3956 return 0;
3957
3958 return 1;
3959 }
3960
3961 /* Return nonzero if TRIAL can go into the call delay slot. */
3962
3963 int
3964 eligible_for_call_delay (rtx_insn *trial)
3965 {
3966 rtx pat;
3967
3968 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3969 return 0;
3970
3971 /* Binutils allows
3972 call __tls_get_addr, %tgd_call (foo)
3973 add %l7, %o0, %o0, %tgd_add (foo)
3974 while Sun as/ld does not. */
3975 if (TARGET_GNU_TLS || !TARGET_TLS)
3976 return 1;
3977
3978 pat = PATTERN (trial);
3979
3980 /* We must reject tgd_add{32|64}, i.e.
3981 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3982 and tldm_add{32|64}, i.e.
3983 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3984 for Sun as/ld. */
3985 if (GET_CODE (pat) == SET
3986 && GET_CODE (SET_SRC (pat)) == PLUS)
3987 {
3988 rtx unspec = XEXP (SET_SRC (pat), 1);
3989
3990 if (GET_CODE (unspec) == UNSPEC
3991 && (XINT (unspec, 1) == UNSPEC_TLSGD
3992 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3993 return 0;
3994 }
3995
3996 return 1;
3997 }
3998
3999 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
4000 instruction. RETURN_P is true if the v9 variant 'return' is to be
4001 considered in the test too.
4002
4003 TRIAL must be a SET whose destination is a REG appropriate for the
4004 'restore' instruction or, if RETURN_P is true, for the 'return'
4005 instruction. */
4006
4007 static int
4008 eligible_for_restore_insn (rtx trial, bool return_p)
4009 {
4010 rtx pat = PATTERN (trial);
4011 rtx src = SET_SRC (pat);
4012 bool src_is_freg = false;
4013 rtx src_reg;
4014
4015 /* Since we now can do moves between float and integer registers when
4016 VIS3 is enabled, we have to catch this case. We can allow such
4017 moves when doing a 'return' however. */
4018 src_reg = src;
4019 if (GET_CODE (src_reg) == SUBREG)
4020 src_reg = SUBREG_REG (src_reg);
4021 if (GET_CODE (src_reg) == REG
4022 && SPARC_FP_REG_P (REGNO (src_reg)))
4023 src_is_freg = true;
4024
4025 /* The 'restore src,%g0,dest' pattern for word mode and below. */
4026 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4027 && arith_operand (src, GET_MODE (src))
4028 && ! src_is_freg)
4029 {
4030 if (TARGET_ARCH64)
4031 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4032 else
4033 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
4034 }
4035
4036 /* The 'restore src,%g0,dest' pattern for double-word mode. */
4037 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4038 && arith_double_operand (src, GET_MODE (src))
4039 && ! src_is_freg)
4040 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4041
4042 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
4043 else if (! TARGET_FPU && register_operand (src, SFmode))
4044 return 1;
4045
4046 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
4047 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
4048 return 1;
4049
4050 /* If we have the 'return' instruction, anything that does not use
4051 local or output registers and can go into a delay slot wins. */
4052 else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
4053 return 1;
4054
4055 /* The 'restore src1,src2,dest' pattern for SImode. */
4056 else if (GET_CODE (src) == PLUS
4057 && register_operand (XEXP (src, 0), SImode)
4058 && arith_operand (XEXP (src, 1), SImode))
4059 return 1;
4060
4061 /* The 'restore src1,src2,dest' pattern for DImode. */
4062 else if (GET_CODE (src) == PLUS
4063 && register_operand (XEXP (src, 0), DImode)
4064 && arith_double_operand (XEXP (src, 1), DImode))
4065 return 1;
4066
4067 /* The 'restore src1,%lo(src2),dest' pattern. */
4068 else if (GET_CODE (src) == LO_SUM
4069 && ! TARGET_CM_MEDMID
4070 && ((register_operand (XEXP (src, 0), SImode)
4071 && immediate_operand (XEXP (src, 1), SImode))
4072 || (TARGET_ARCH64
4073 && register_operand (XEXP (src, 0), DImode)
4074 && immediate_operand (XEXP (src, 1), DImode))))
4075 return 1;
4076
4077 /* The 'restore src,src,dest' pattern. */
4078 else if (GET_CODE (src) == ASHIFT
4079 && (register_operand (XEXP (src, 0), SImode)
4080 || register_operand (XEXP (src, 0), DImode))
4081 && XEXP (src, 1) == const1_rtx)
4082 return 1;
4083
4084 return 0;
4085 }
4086
4087 /* Return nonzero if TRIAL can go into the function return's delay slot. */
4088
4089 int
4090 eligible_for_return_delay (rtx_insn *trial)
4091 {
4092 int regno;
4093 rtx pat;
4094
4095 /* If the function uses __builtin_eh_return, the eh_return machinery
4096 occupies the delay slot. */
4097 if (crtl->calls_eh_return)
4098 return 0;
4099
4100 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4101 return 0;
4102
4103 /* In the case of a leaf or flat function, anything can go into the slot. */
4104 if (sparc_leaf_function_p || TARGET_FLAT)
4105 return 1;
4106
4107 if (!NONJUMP_INSN_P (trial))
4108 return 0;
4109
4110 pat = PATTERN (trial);
4111 if (GET_CODE (pat) == PARALLEL)
4112 {
4113 int i;
4114
4115 if (! TARGET_V9)
4116 return 0;
4117 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
4118 {
4119 rtx expr = XVECEXP (pat, 0, i);
4120 if (GET_CODE (expr) != SET)
4121 return 0;
4122 if (GET_CODE (SET_DEST (expr)) != REG)
4123 return 0;
4124 regno = REGNO (SET_DEST (expr));
4125 if (regno >= 8 && regno < 24)
4126 return 0;
4127 }
4128 return !epilogue_renumber (&pat, 1);
4129 }
4130
4131 if (GET_CODE (pat) != SET)
4132 return 0;
4133
4134 if (GET_CODE (SET_DEST (pat)) != REG)
4135 return 0;
4136
4137 regno = REGNO (SET_DEST (pat));
4138
4139 /* Otherwise, only operations which can be done in tandem with
4140 a `restore' or `return' insn can go into the delay slot. */
4141 if (regno >= 8 && regno < 24)
4142 return 0;
4143
4144 /* If this instruction sets up floating point register and we have a return
4145 instruction, it can probably go in. But restore will not work
4146 with FP_REGS. */
4147 if (! SPARC_INT_REG_P (regno))
4148 return TARGET_V9 && !epilogue_renumber (&pat, 1);
4149
4150 return eligible_for_restore_insn (trial, true);
4151 }
4152
4153 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
4154
4155 int
4156 eligible_for_sibcall_delay (rtx_insn *trial)
4157 {
4158 rtx pat;
4159
4160 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4161 return 0;
4162
4163 if (!NONJUMP_INSN_P (trial))
4164 return 0;
4165
4166 pat = PATTERN (trial);
4167
4168 if (sparc_leaf_function_p || TARGET_FLAT)
4169 {
4170 /* If the tail call is done using the call instruction,
4171 we have to restore %o7 in the delay slot. */
4172 if (LEAF_SIBCALL_SLOT_RESERVED_P)
4173 return 0;
4174
4175 /* %g1 is used to build the function address */
4176 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
4177 return 0;
4178
4179 return 1;
4180 }
4181
4182 if (GET_CODE (pat) != SET)
4183 return 0;
4184
4185 /* Otherwise, only operations which can be done in tandem with
4186 a `restore' insn can go into the delay slot. */
4187 if (GET_CODE (SET_DEST (pat)) != REG
4188 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
4189 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
4190 return 0;
4191
4192 /* If it mentions %o7, it can't go in, because sibcall will clobber it
4193 in most cases. */
4194 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
4195 return 0;
4196
4197 return eligible_for_restore_insn (trial, false);
4198 }
4199 \f
4200 /* Determine if it's legal to put X into the constant pool. This
4201 is not possible if X contains the address of a symbol that is
4202 not constant (TLS) or not known at final link time (PIC). */
4203
4204 static bool
4205 sparc_cannot_force_const_mem (machine_mode mode, rtx x)
4206 {
4207 switch (GET_CODE (x))
4208 {
4209 case CONST_INT:
4210 case CONST_WIDE_INT:
4211 case CONST_DOUBLE:
4212 case CONST_VECTOR:
4213 /* Accept all non-symbolic constants. */
4214 return false;
4215
4216 case LABEL_REF:
4217 /* Labels are OK iff we are non-PIC. */
4218 return flag_pic != 0;
4219
4220 case SYMBOL_REF:
4221 /* 'Naked' TLS symbol references are never OK,
4222 non-TLS symbols are OK iff we are non-PIC. */
4223 if (SYMBOL_REF_TLS_MODEL (x))
4224 return true;
4225 else
4226 return flag_pic != 0;
4227
4228 case CONST:
4229 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
4230 case PLUS:
4231 case MINUS:
4232 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
4233 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
4234 case UNSPEC:
4235 return true;
4236 default:
4237 gcc_unreachable ();
4238 }
4239 }
4240 \f
4241 /* Global Offset Table support. */
4242 static GTY(()) rtx got_helper_rtx = NULL_RTX;
4243 static GTY(()) rtx got_register_rtx = NULL_RTX;
4244 static GTY(()) rtx got_symbol_rtx = NULL_RTX;
4245
4246 /* Return the SYMBOL_REF for the Global Offset Table. */
4247
4248 static rtx
4249 sparc_got (void)
4250 {
4251 if (!got_symbol_rtx)
4252 got_symbol_rtx = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
4253
4254 return got_symbol_rtx;
4255 }
4256
4257 #ifdef HAVE_GAS_HIDDEN
4258 # define USE_HIDDEN_LINKONCE 1
4259 #else
4260 # define USE_HIDDEN_LINKONCE 0
4261 #endif
4262
4263 static void
4264 get_pc_thunk_name (char name[32], unsigned int regno)
4265 {
4266 const char *reg_name = reg_names[regno];
4267
4268 /* Skip the leading '%' as that cannot be used in a
4269 symbol name. */
4270 reg_name += 1;
4271
4272 if (USE_HIDDEN_LINKONCE)
4273 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4274 else
4275 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4276 }
4277
4278 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4279
4280 static rtx
4281 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2)
4282 {
4283 int orig_flag_pic = flag_pic;
4284 rtx insn;
4285
4286 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4287 flag_pic = 0;
4288 if (TARGET_ARCH64)
4289 insn = gen_load_pcrel_symdi (op0, op1, op2, GEN_INT (REGNO (op0)));
4290 else
4291 insn = gen_load_pcrel_symsi (op0, op1, op2, GEN_INT (REGNO (op0)));
4292 flag_pic = orig_flag_pic;
4293
4294 return insn;
4295 }
4296
4297 /* Emit code to load the GOT register. */
4298
4299 void
4300 load_got_register (void)
4301 {
4302 if (!got_register_rtx)
4303 got_register_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4304
4305 if (TARGET_VXWORKS_RTP)
4306 emit_insn (gen_vxworks_load_got ());
4307 else
4308 {
4309 /* The GOT symbol is subject to a PC-relative relocation so we need a
4310 helper function to add the PC value and thus get the final value. */
4311 if (!got_helper_rtx)
4312 {
4313 char name[32];
4314 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4315 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4316 }
4317
4318 emit_insn (gen_load_pcrel_sym (got_register_rtx, sparc_got (),
4319 got_helper_rtx));
4320 }
4321 }
4322
4323 /* Ensure that we are not using patterns that are not OK with PIC. */
4324
4325 int
4326 check_pic (int i)
4327 {
4328 rtx op;
4329
4330 switch (flag_pic)
4331 {
4332 case 1:
4333 op = recog_data.operand[i];
4334 gcc_assert (GET_CODE (op) != SYMBOL_REF
4335 && (GET_CODE (op) != CONST
4336 || (GET_CODE (XEXP (op, 0)) == MINUS
4337 && XEXP (XEXP (op, 0), 0) == sparc_got ()
4338 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
4339 /* fallthrough */
4340 case 2:
4341 default:
4342 return 1;
4343 }
4344 }
4345
4346 /* Return true if X is an address which needs a temporary register when
4347 reloaded while generating PIC code. */
4348
4349 int
4350 pic_address_needs_scratch (rtx x)
4351 {
4352 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
4353 if (GET_CODE (x) == CONST
4354 && GET_CODE (XEXP (x, 0)) == PLUS
4355 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
4356 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4357 && !SMALL_INT (XEXP (XEXP (x, 0), 1)))
4358 return 1;
4359
4360 return 0;
4361 }
4362
4363 /* Determine if a given RTX is a valid constant. We already know this
4364 satisfies CONSTANT_P. */
4365
4366 static bool
4367 sparc_legitimate_constant_p (machine_mode mode, rtx x)
4368 {
4369 switch (GET_CODE (x))
4370 {
4371 case CONST:
4372 case SYMBOL_REF:
4373 if (sparc_tls_referenced_p (x))
4374 return false;
4375 break;
4376
4377 case CONST_DOUBLE:
4378 /* Floating point constants are generally not ok.
4379 The only exception is 0.0 and all-ones in VIS. */
4380 if (TARGET_VIS
4381 && SCALAR_FLOAT_MODE_P (mode)
4382 && (const_zero_operand (x, mode)
4383 || const_all_ones_operand (x, mode)))
4384 return true;
4385
4386 return false;
4387
4388 case CONST_VECTOR:
4389 /* Vector constants are generally not ok.
4390 The only exception is 0 or -1 in VIS. */
4391 if (TARGET_VIS
4392 && (const_zero_operand (x, mode)
4393 || const_all_ones_operand (x, mode)))
4394 return true;
4395
4396 return false;
4397
4398 default:
4399 break;
4400 }
4401
4402 return true;
4403 }
4404
4405 /* Determine if a given RTX is a valid constant address. */
4406
4407 bool
4408 constant_address_p (rtx x)
4409 {
4410 switch (GET_CODE (x))
4411 {
4412 case LABEL_REF:
4413 case CONST_INT:
4414 case HIGH:
4415 return true;
4416
4417 case CONST:
4418 if (flag_pic && pic_address_needs_scratch (x))
4419 return false;
4420 return sparc_legitimate_constant_p (Pmode, x);
4421
4422 case SYMBOL_REF:
4423 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
4424
4425 default:
4426 return false;
4427 }
4428 }
4429
4430 /* Nonzero if the constant value X is a legitimate general operand
4431 when generating PIC code. It is given that flag_pic is on and
4432 that X satisfies CONSTANT_P. */
4433
4434 bool
4435 legitimate_pic_operand_p (rtx x)
4436 {
4437 if (pic_address_needs_scratch (x))
4438 return false;
4439 if (sparc_tls_referenced_p (x))
4440 return false;
4441 return true;
4442 }
4443
4444 /* Return true if X is a representation of the PIC register. */
4445
4446 static bool
4447 sparc_pic_register_p (rtx x)
4448 {
4449 if (!REG_P (x) || !pic_offset_table_rtx)
4450 return false;
4451
4452 if (x == pic_offset_table_rtx)
4453 return true;
4454
4455 if (!HARD_REGISTER_P (pic_offset_table_rtx)
4456 && (HARD_REGISTER_P (x) || lra_in_progress)
4457 && ORIGINAL_REGNO (x) == REGNO (pic_offset_table_rtx))
4458 return true;
4459
4460 return false;
4461 }
4462
4463 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
4464 (CONST_INT_P (X) \
4465 && INTVAL (X) >= -0x1000 \
4466 && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
4467
4468 #define RTX_OK_FOR_OLO10_P(X, MODE) \
4469 (CONST_INT_P (X) \
4470 && INTVAL (X) >= -0x1000 \
4471 && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
4472
4473 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
4474
4475 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
4476 ordinarily. This changes a bit when generating PIC. */
4477
4478 static bool
4479 sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4480 {
4481 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
4482
4483 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
4484 rs1 = addr;
4485 else if (GET_CODE (addr) == PLUS)
4486 {
4487 rs1 = XEXP (addr, 0);
4488 rs2 = XEXP (addr, 1);
4489
4490 /* Canonicalize. REG comes first, if there are no regs,
4491 LO_SUM comes first. */
4492 if (!REG_P (rs1)
4493 && GET_CODE (rs1) != SUBREG
4494 && (REG_P (rs2)
4495 || GET_CODE (rs2) == SUBREG
4496 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
4497 {
4498 rs1 = XEXP (addr, 1);
4499 rs2 = XEXP (addr, 0);
4500 }
4501
4502 if ((flag_pic == 1
4503 && sparc_pic_register_p (rs1)
4504 && !REG_P (rs2)
4505 && GET_CODE (rs2) != SUBREG
4506 && GET_CODE (rs2) != LO_SUM
4507 && GET_CODE (rs2) != MEM
4508 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
4509 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
4510 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
4511 || ((REG_P (rs1)
4512 || GET_CODE (rs1) == SUBREG)
4513 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
4514 {
4515 imm1 = rs2;
4516 rs2 = NULL;
4517 }
4518 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
4519 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
4520 {
4521 /* We prohibit REG + REG for TFmode when there are no quad move insns
4522 and we consequently need to split. We do this because REG+REG
4523 is not an offsettable address. If we get the situation in reload
4524 where source and destination of a movtf pattern are both MEMs with
4525 REG+REG address, then only one of them gets converted to an
4526 offsettable address. */
4527 if (mode == TFmode
4528 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
4529 return 0;
4530
4531 /* Likewise for TImode, but in all cases. */
4532 if (mode == TImode)
4533 return 0;
4534
4535 /* We prohibit REG + REG on ARCH32 if not optimizing for
4536 DFmode/DImode because then mem_min_alignment is likely to be zero
4537 after reload and the forced split would lack a matching splitter
4538 pattern. */
4539 if (TARGET_ARCH32 && !optimize
4540 && (mode == DFmode || mode == DImode))
4541 return 0;
4542 }
4543 else if (USE_AS_OFFSETABLE_LO10
4544 && GET_CODE (rs1) == LO_SUM
4545 && TARGET_ARCH64
4546 && ! TARGET_CM_MEDMID
4547 && RTX_OK_FOR_OLO10_P (rs2, mode))
4548 {
4549 rs2 = NULL;
4550 imm1 = XEXP (rs1, 1);
4551 rs1 = XEXP (rs1, 0);
4552 if (!CONSTANT_P (imm1)
4553 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4554 return 0;
4555 }
4556 }
4557 else if (GET_CODE (addr) == LO_SUM)
4558 {
4559 rs1 = XEXP (addr, 0);
4560 imm1 = XEXP (addr, 1);
4561
4562 if (!CONSTANT_P (imm1)
4563 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4564 return 0;
4565
4566 /* We can't allow TFmode in 32-bit mode, because an offset greater
4567 than the alignment (8) may cause the LO_SUM to overflow. */
4568 if (mode == TFmode && TARGET_ARCH32)
4569 return 0;
4570
4571 /* During reload, accept the HIGH+LO_SUM construct generated by
4572 sparc_legitimize_reload_address. */
4573 if (reload_in_progress
4574 && GET_CODE (rs1) == HIGH
4575 && XEXP (rs1, 0) == imm1)
4576 return 1;
4577 }
4578 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
4579 return 1;
4580 else
4581 return 0;
4582
4583 if (GET_CODE (rs1) == SUBREG)
4584 rs1 = SUBREG_REG (rs1);
4585 if (!REG_P (rs1))
4586 return 0;
4587
4588 if (rs2)
4589 {
4590 if (GET_CODE (rs2) == SUBREG)
4591 rs2 = SUBREG_REG (rs2);
4592 if (!REG_P (rs2))
4593 return 0;
4594 }
4595
4596 if (strict)
4597 {
4598 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
4599 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
4600 return 0;
4601 }
4602 else
4603 {
4604 if ((! SPARC_INT_REG_P (REGNO (rs1))
4605 && REGNO (rs1) != FRAME_POINTER_REGNUM
4606 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
4607 || (rs2
4608 && (! SPARC_INT_REG_P (REGNO (rs2))
4609 && REGNO (rs2) != FRAME_POINTER_REGNUM
4610 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
4611 return 0;
4612 }
4613 return 1;
4614 }
4615
4616 /* Return the SYMBOL_REF for the tls_get_addr function. */
4617
4618 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
4619
4620 static rtx
4621 sparc_tls_get_addr (void)
4622 {
4623 if (!sparc_tls_symbol)
4624 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
4625
4626 return sparc_tls_symbol;
4627 }
4628
4629 /* Return the Global Offset Table to be used in TLS mode. */
4630
4631 static rtx
4632 sparc_tls_got (void)
4633 {
4634 /* In PIC mode, this is just the PIC offset table. */
4635 if (flag_pic)
4636 {
4637 crtl->uses_pic_offset_table = 1;
4638 return pic_offset_table_rtx;
4639 }
4640
4641 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4642 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4643 if (TARGET_SUN_TLS && TARGET_ARCH32)
4644 {
4645 load_got_register ();
4646 return got_register_rtx;
4647 }
4648
4649 /* In all other cases, we load a new pseudo with the GOT symbol. */
4650 return copy_to_reg (sparc_got ());
4651 }
4652
4653 /* Return true if X contains a thread-local symbol. */
4654
4655 static bool
4656 sparc_tls_referenced_p (rtx x)
4657 {
4658 if (!TARGET_HAVE_TLS)
4659 return false;
4660
4661 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4662 x = XEXP (XEXP (x, 0), 0);
4663
4664 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
4665 return true;
4666
4667 /* That's all we handle in sparc_legitimize_tls_address for now. */
4668 return false;
4669 }
4670
4671 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4672 this (thread-local) address. */
4673
4674 static rtx
4675 sparc_legitimize_tls_address (rtx addr)
4676 {
4677 rtx temp1, temp2, temp3, ret, o0, got;
4678 rtx_insn *insn;
4679
4680 gcc_assert (can_create_pseudo_p ());
4681
4682 if (GET_CODE (addr) == SYMBOL_REF)
4683 /* Although the various sethi/or sequences generate SImode values, many of
4684 them can be transformed by the linker when relaxing and, if relaxing to
4685 local-exec, will become a sethi/xor pair, which is signed and therefore
4686 a full DImode value in 64-bit mode. Thus we must use Pmode, lest these
4687 values be spilled onto the stack in 64-bit mode. */
4688 switch (SYMBOL_REF_TLS_MODEL (addr))
4689 {
4690 case TLS_MODEL_GLOBAL_DYNAMIC:
4691 start_sequence ();
4692 temp1 = gen_reg_rtx (Pmode);
4693 temp2 = gen_reg_rtx (Pmode);
4694 ret = gen_reg_rtx (Pmode);
4695 o0 = gen_rtx_REG (Pmode, 8);
4696 got = sparc_tls_got ();
4697 if (TARGET_ARCH32)
4698 {
4699 emit_insn (gen_tgd_hi22si (temp1, addr));
4700 emit_insn (gen_tgd_lo10si (temp2, temp1, addr));
4701 emit_insn (gen_tgd_addsi (o0, got, temp2, addr));
4702 insn = emit_call_insn (gen_tgd_callsi (o0, sparc_tls_get_addr (),
4703 addr, const1_rtx));
4704 }
4705 else
4706 {
4707 emit_insn (gen_tgd_hi22di (temp1, addr));
4708 emit_insn (gen_tgd_lo10di (temp2, temp1, addr));
4709 emit_insn (gen_tgd_adddi (o0, got, temp2, addr));
4710 insn = emit_call_insn (gen_tgd_calldi (o0, sparc_tls_get_addr (),
4711 addr, const1_rtx));
4712 }
4713 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4714 RTL_CONST_CALL_P (insn) = 1;
4715 insn = get_insns ();
4716 end_sequence ();
4717 emit_libcall_block (insn, ret, o0, addr);
4718 break;
4719
4720 case TLS_MODEL_LOCAL_DYNAMIC:
4721 start_sequence ();
4722 temp1 = gen_reg_rtx (Pmode);
4723 temp2 = gen_reg_rtx (Pmode);
4724 temp3 = gen_reg_rtx (Pmode);
4725 ret = gen_reg_rtx (Pmode);
4726 o0 = gen_rtx_REG (Pmode, 8);
4727 got = sparc_tls_got ();
4728 if (TARGET_ARCH32)
4729 {
4730 emit_insn (gen_tldm_hi22si (temp1));
4731 emit_insn (gen_tldm_lo10si (temp2, temp1));
4732 emit_insn (gen_tldm_addsi (o0, got, temp2));
4733 insn = emit_call_insn (gen_tldm_callsi (o0, sparc_tls_get_addr (),
4734 const1_rtx));
4735 }
4736 else
4737 {
4738 emit_insn (gen_tldm_hi22di (temp1));
4739 emit_insn (gen_tldm_lo10di (temp2, temp1));
4740 emit_insn (gen_tldm_adddi (o0, got, temp2));
4741 insn = emit_call_insn (gen_tldm_calldi (o0, sparc_tls_get_addr (),
4742 const1_rtx));
4743 }
4744 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4745 RTL_CONST_CALL_P (insn) = 1;
4746 insn = get_insns ();
4747 end_sequence ();
4748 /* Attach a unique REG_EQUAL, to allow the RTL optimizers to
4749 share the LD_BASE result with other LD model accesses. */
4750 emit_libcall_block (insn, temp3, o0,
4751 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4752 UNSPEC_TLSLD_BASE));
4753 temp1 = gen_reg_rtx (Pmode);
4754 temp2 = gen_reg_rtx (Pmode);
4755 if (TARGET_ARCH32)
4756 {
4757 emit_insn (gen_tldo_hix22si (temp1, addr));
4758 emit_insn (gen_tldo_lox10si (temp2, temp1, addr));
4759 emit_insn (gen_tldo_addsi (ret, temp3, temp2, addr));
4760 }
4761 else
4762 {
4763 emit_insn (gen_tldo_hix22di (temp1, addr));
4764 emit_insn (gen_tldo_lox10di (temp2, temp1, addr));
4765 emit_insn (gen_tldo_adddi (ret, temp3, temp2, addr));
4766 }
4767 break;
4768
4769 case TLS_MODEL_INITIAL_EXEC:
4770 temp1 = gen_reg_rtx (Pmode);
4771 temp2 = gen_reg_rtx (Pmode);
4772 temp3 = gen_reg_rtx (Pmode);
4773 got = sparc_tls_got ();
4774 if (TARGET_ARCH32)
4775 {
4776 emit_insn (gen_tie_hi22si (temp1, addr));
4777 emit_insn (gen_tie_lo10si (temp2, temp1, addr));
4778 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4779 }
4780 else
4781 {
4782 emit_insn (gen_tie_hi22di (temp1, addr));
4783 emit_insn (gen_tie_lo10di (temp2, temp1, addr));
4784 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4785 }
4786 if (TARGET_SUN_TLS)
4787 {
4788 ret = gen_reg_rtx (Pmode);
4789 if (TARGET_ARCH32)
4790 emit_insn (gen_tie_addsi (ret, gen_rtx_REG (Pmode, 7),
4791 temp3, addr));
4792 else
4793 emit_insn (gen_tie_adddi (ret, gen_rtx_REG (Pmode, 7),
4794 temp3, addr));
4795 }
4796 else
4797 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4798 break;
4799
4800 case TLS_MODEL_LOCAL_EXEC:
4801 temp1 = gen_reg_rtx (Pmode);
4802 temp2 = gen_reg_rtx (Pmode);
4803 if (TARGET_ARCH32)
4804 {
4805 emit_insn (gen_tle_hix22si (temp1, addr));
4806 emit_insn (gen_tle_lox10si (temp2, temp1, addr));
4807 }
4808 else
4809 {
4810 emit_insn (gen_tle_hix22di (temp1, addr));
4811 emit_insn (gen_tle_lox10di (temp2, temp1, addr));
4812 }
4813 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4814 break;
4815
4816 default:
4817 gcc_unreachable ();
4818 }
4819
4820 else if (GET_CODE (addr) == CONST)
4821 {
4822 rtx base, offset;
4823
4824 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4825
4826 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4827 offset = XEXP (XEXP (addr, 0), 1);
4828
4829 base = force_operand (base, NULL_RTX);
4830 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4831 offset = force_reg (Pmode, offset);
4832 ret = gen_rtx_PLUS (Pmode, base, offset);
4833 }
4834
4835 else
4836 gcc_unreachable (); /* for now ... */
4837
4838 return ret;
4839 }
4840
4841 /* Legitimize PIC addresses. If the address is already position-independent,
4842 we return ORIG. Newly generated position-independent addresses go into a
4843 reg. This is REG if nonzero, otherwise we allocate register(s) as
4844 necessary. */
4845
4846 static rtx
4847 sparc_legitimize_pic_address (rtx orig, rtx reg)
4848 {
4849 if (GET_CODE (orig) == SYMBOL_REF
4850 /* See the comment in sparc_expand_move. */
4851 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4852 {
4853 bool gotdata_op = false;
4854 rtx pic_ref, address;
4855 rtx_insn *insn;
4856
4857 if (!reg)
4858 {
4859 gcc_assert (can_create_pseudo_p ());
4860 reg = gen_reg_rtx (Pmode);
4861 }
4862
4863 if (flag_pic == 2)
4864 {
4865 /* If not during reload, allocate another temp reg here for loading
4866 in the address, so that these instructions can be optimized
4867 properly. */
4868 rtx temp_reg = can_create_pseudo_p () ? gen_reg_rtx (Pmode) : reg;
4869
4870 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4871 won't get confused into thinking that these two instructions
4872 are loading in the true address of the symbol. If in the
4873 future a PIC rtx exists, that should be used instead. */
4874 if (TARGET_ARCH64)
4875 {
4876 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4877 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4878 }
4879 else
4880 {
4881 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4882 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4883 }
4884
4885 address = temp_reg;
4886 gotdata_op = true;
4887 }
4888 else
4889 address = orig;
4890
4891 crtl->uses_pic_offset_table = 1;
4892 if (gotdata_op)
4893 {
4894 if (TARGET_ARCH64)
4895 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4896 pic_offset_table_rtx,
4897 address, orig));
4898 else
4899 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4900 pic_offset_table_rtx,
4901 address, orig));
4902 }
4903 else
4904 {
4905 pic_ref
4906 = gen_const_mem (Pmode,
4907 gen_rtx_PLUS (Pmode,
4908 pic_offset_table_rtx, address));
4909 insn = emit_move_insn (reg, pic_ref);
4910 }
4911
4912 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4913 by loop. */
4914 set_unique_reg_note (insn, REG_EQUAL, orig);
4915 return reg;
4916 }
4917 else if (GET_CODE (orig) == CONST)
4918 {
4919 rtx base, offset;
4920
4921 if (GET_CODE (XEXP (orig, 0)) == PLUS
4922 && sparc_pic_register_p (XEXP (XEXP (orig, 0), 0)))
4923 return orig;
4924
4925 if (!reg)
4926 {
4927 gcc_assert (can_create_pseudo_p ());
4928 reg = gen_reg_rtx (Pmode);
4929 }
4930
4931 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4932 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4933 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4934 base == reg ? NULL_RTX : reg);
4935
4936 if (GET_CODE (offset) == CONST_INT)
4937 {
4938 if (SMALL_INT (offset))
4939 return plus_constant (Pmode, base, INTVAL (offset));
4940 else if (can_create_pseudo_p ())
4941 offset = force_reg (Pmode, offset);
4942 else
4943 /* If we reach here, then something is seriously wrong. */
4944 gcc_unreachable ();
4945 }
4946 return gen_rtx_PLUS (Pmode, base, offset);
4947 }
4948 else if (GET_CODE (orig) == LABEL_REF)
4949 /* ??? We ought to be checking that the register is live instead, in case
4950 it is eliminated. */
4951 crtl->uses_pic_offset_table = 1;
4952
4953 return orig;
4954 }
4955
4956 /* Try machine-dependent ways of modifying an illegitimate address X
4957 to be legitimate. If we find one, return the new, valid address.
4958
4959 OLDX is the address as it was before break_out_memory_refs was called.
4960 In some cases it is useful to look at this to decide what needs to be done.
4961
4962 MODE is the mode of the operand pointed to by X.
4963
4964 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4965
4966 static rtx
4967 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4968 machine_mode mode)
4969 {
4970 rtx orig_x = x;
4971
4972 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4973 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4974 force_operand (XEXP (x, 0), NULL_RTX));
4975 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4976 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4977 force_operand (XEXP (x, 1), NULL_RTX));
4978 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4979 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4980 XEXP (x, 1));
4981 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4982 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4983 force_operand (XEXP (x, 1), NULL_RTX));
4984
4985 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4986 return x;
4987
4988 if (sparc_tls_referenced_p (x))
4989 x = sparc_legitimize_tls_address (x);
4990 else if (flag_pic)
4991 x = sparc_legitimize_pic_address (x, NULL_RTX);
4992 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4993 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4994 copy_to_mode_reg (Pmode, XEXP (x, 1)));
4995 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
4996 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4997 copy_to_mode_reg (Pmode, XEXP (x, 0)));
4998 else if (GET_CODE (x) == SYMBOL_REF
4999 || GET_CODE (x) == CONST
5000 || GET_CODE (x) == LABEL_REF)
5001 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
5002
5003 return x;
5004 }
5005
5006 /* Delegitimize an address that was legitimized by the above function. */
5007
5008 static rtx
5009 sparc_delegitimize_address (rtx x)
5010 {
5011 x = delegitimize_mem_from_attrs (x);
5012
5013 if (GET_CODE (x) == LO_SUM)
5014 x = XEXP (x, 1);
5015
5016 if (GET_CODE (x) == UNSPEC)
5017 switch (XINT (x, 1))
5018 {
5019 case UNSPEC_MOVE_PIC:
5020 case UNSPEC_TLSLE:
5021 x = XVECEXP (x, 0, 0);
5022 gcc_assert (GET_CODE (x) == SYMBOL_REF);
5023 break;
5024 case UNSPEC_MOVE_GOTDATA:
5025 x = XVECEXP (x, 0, 2);
5026 gcc_assert (GET_CODE (x) == SYMBOL_REF);
5027 break;
5028 default:
5029 break;
5030 }
5031
5032 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
5033 if (GET_CODE (x) == MINUS
5034 && (XEXP (x, 0) == got_register_rtx
5035 || sparc_pic_register_p (XEXP (x, 0))))
5036 {
5037 rtx y = XEXP (x, 1);
5038
5039 if (GET_CODE (y) == LO_SUM)
5040 y = XEXP (y, 1);
5041
5042 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MOVE_PIC_LABEL)
5043 {
5044 x = XVECEXP (y, 0, 0);
5045 gcc_assert (GET_CODE (x) == LABEL_REF
5046 || (GET_CODE (x) == CONST
5047 && GET_CODE (XEXP (x, 0)) == PLUS
5048 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
5049 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT));
5050 }
5051 }
5052
5053 return x;
5054 }
5055
5056 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
5057 replace the input X, or the original X if no replacement is called for.
5058 The output parameter *WIN is 1 if the calling macro should goto WIN,
5059 0 if it should not.
5060
5061 For SPARC, we wish to handle addresses by splitting them into
5062 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
5063 This cuts the number of extra insns by one.
5064
5065 Do nothing when generating PIC code and the address is a symbolic
5066 operand or requires a scratch register. */
5067
5068 rtx
5069 sparc_legitimize_reload_address (rtx x, machine_mode mode,
5070 int opnum, int type,
5071 int ind_levels ATTRIBUTE_UNUSED, int *win)
5072 {
5073 /* Decompose SImode constants into HIGH+LO_SUM. */
5074 if (CONSTANT_P (x)
5075 && (mode != TFmode || TARGET_ARCH64)
5076 && GET_MODE (x) == SImode
5077 && GET_CODE (x) != LO_SUM
5078 && GET_CODE (x) != HIGH
5079 && sparc_code_model <= CM_MEDLOW
5080 && !(flag_pic
5081 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
5082 {
5083 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
5084 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5085 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5086 opnum, (enum reload_type)type);
5087 *win = 1;
5088 return x;
5089 }
5090
5091 /* We have to recognize what we have already generated above. */
5092 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
5093 {
5094 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5095 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5096 opnum, (enum reload_type)type);
5097 *win = 1;
5098 return x;
5099 }
5100
5101 *win = 0;
5102 return x;
5103 }
5104
5105 /* Return true if ADDR (a legitimate address expression)
5106 has an effect that depends on the machine mode it is used for.
5107
5108 In PIC mode,
5109
5110 (mem:HI [%l7+a])
5111
5112 is not equivalent to
5113
5114 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
5115
5116 because [%l7+a+1] is interpreted as the address of (a+1). */
5117
5118
5119 static bool
5120 sparc_mode_dependent_address_p (const_rtx addr,
5121 addr_space_t as ATTRIBUTE_UNUSED)
5122 {
5123 if (GET_CODE (addr) == PLUS
5124 && sparc_pic_register_p (XEXP (addr, 0))
5125 && symbolic_operand (XEXP (addr, 1), VOIDmode))
5126 return true;
5127
5128 return false;
5129 }
5130
5131 /* Emit a call instruction with the pattern given by PAT. ADDR is the
5132 address of the call target. */
5133
5134 void
5135 sparc_emit_call_insn (rtx pat, rtx addr)
5136 {
5137 rtx_insn *insn;
5138
5139 insn = emit_call_insn (pat);
5140
5141 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
5142 if (TARGET_VXWORKS_RTP
5143 && flag_pic
5144 && GET_CODE (addr) == SYMBOL_REF
5145 && (SYMBOL_REF_DECL (addr)
5146 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
5147 : !SYMBOL_REF_LOCAL_P (addr)))
5148 {
5149 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5150 crtl->uses_pic_offset_table = 1;
5151 }
5152 }
5153 \f
5154 /* Return 1 if RTX is a MEM which is known to be aligned to at
5155 least a DESIRED byte boundary. */
5156
5157 int
5158 mem_min_alignment (rtx mem, int desired)
5159 {
5160 rtx addr, base, offset;
5161
5162 /* If it's not a MEM we can't accept it. */
5163 if (GET_CODE (mem) != MEM)
5164 return 0;
5165
5166 /* Obviously... */
5167 if (!TARGET_UNALIGNED_DOUBLES
5168 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
5169 return 1;
5170
5171 /* ??? The rest of the function predates MEM_ALIGN so
5172 there is probably a bit of redundancy. */
5173 addr = XEXP (mem, 0);
5174 base = offset = NULL_RTX;
5175 if (GET_CODE (addr) == PLUS)
5176 {
5177 if (GET_CODE (XEXP (addr, 0)) == REG)
5178 {
5179 base = XEXP (addr, 0);
5180
5181 /* What we are saying here is that if the base
5182 REG is aligned properly, the compiler will make
5183 sure any REG based index upon it will be so
5184 as well. */
5185 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
5186 offset = XEXP (addr, 1);
5187 else
5188 offset = const0_rtx;
5189 }
5190 }
5191 else if (GET_CODE (addr) == REG)
5192 {
5193 base = addr;
5194 offset = const0_rtx;
5195 }
5196
5197 if (base != NULL_RTX)
5198 {
5199 int regno = REGNO (base);
5200
5201 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
5202 {
5203 /* Check if the compiler has recorded some information
5204 about the alignment of the base REG. If reload has
5205 completed, we already matched with proper alignments.
5206 If not running global_alloc, reload might give us
5207 unaligned pointer to local stack though. */
5208 if (((cfun != 0
5209 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
5210 || (optimize && reload_completed))
5211 && (INTVAL (offset) & (desired - 1)) == 0)
5212 return 1;
5213 }
5214 else
5215 {
5216 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
5217 return 1;
5218 }
5219 }
5220 else if (! TARGET_UNALIGNED_DOUBLES
5221 || CONSTANT_P (addr)
5222 || GET_CODE (addr) == LO_SUM)
5223 {
5224 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
5225 is true, in which case we can only assume that an access is aligned if
5226 it is to a constant address, or the address involves a LO_SUM. */
5227 return 1;
5228 }
5229
5230 /* An obviously unaligned address. */
5231 return 0;
5232 }
5233
5234 \f
5235 /* Vectors to keep interesting information about registers where it can easily
5236 be got. We used to use the actual mode value as the bit number, but there
5237 are more than 32 modes now. Instead we use two tables: one indexed by
5238 hard register number, and one indexed by mode. */
5239
5240 /* The purpose of sparc_mode_class is to shrink the range of modes so that
5241 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
5242 mapped into one sparc_mode_class mode. */
5243
5244 enum sparc_mode_class {
5245 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
5246 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
5247 CC_MODE, CCFP_MODE
5248 };
5249
5250 /* Modes for single-word and smaller quantities. */
5251 #define S_MODES \
5252 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
5253
5254 /* Modes for double-word and smaller quantities. */
5255 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5256
5257 /* Modes for quad-word and smaller quantities. */
5258 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
5259
5260 /* Modes for 8-word and smaller quantities. */
5261 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
5262
5263 /* Modes for single-float quantities. */
5264 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
5265
5266 /* Modes for double-float and smaller quantities. */
5267 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5268
5269 /* Modes for quad-float and smaller quantities. */
5270 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
5271
5272 /* Modes for quad-float pairs and smaller quantities. */
5273 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
5274
5275 /* Modes for double-float only quantities. */
5276 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
5277
5278 /* Modes for quad-float and double-float only quantities. */
5279 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
5280
5281 /* Modes for quad-float pairs and double-float only quantities. */
5282 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
5283
5284 /* Modes for condition codes. */
5285 #define CC_MODES (1 << (int) CC_MODE)
5286 #define CCFP_MODES (1 << (int) CCFP_MODE)
5287
5288 /* Value is 1 if register/mode pair is acceptable on sparc.
5289
5290 The funny mixture of D and T modes is because integer operations
5291 do not specially operate on tetra quantities, so non-quad-aligned
5292 registers can hold quadword quantities (except %o4 and %i4 because
5293 they cross fixed registers).
5294
5295 ??? Note that, despite the settings, non-double-aligned parameter
5296 registers can hold double-word quantities in 32-bit mode. */
5297
5298 /* This points to either the 32-bit or the 64-bit version. */
5299 static const int *hard_regno_mode_classes;
5300
5301 static const int hard_32bit_mode_classes[] = {
5302 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5303 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5304 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5305 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5306
5307 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5308 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5309 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5310 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5311
5312 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5313 and none can hold SFmode/SImode values. */
5314 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5315 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5316 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5317 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5318
5319 /* %fcc[0123] */
5320 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5321
5322 /* %icc, %sfp, %gsr */
5323 CC_MODES, 0, D_MODES
5324 };
5325
5326 static const int hard_64bit_mode_classes[] = {
5327 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5328 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5329 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5330 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5331
5332 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5333 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5334 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5335 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5336
5337 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5338 and none can hold SFmode/SImode values. */
5339 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5340 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5341 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5342 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5343
5344 /* %fcc[0123] */
5345 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5346
5347 /* %icc, %sfp, %gsr */
5348 CC_MODES, 0, D_MODES
5349 };
5350
5351 static int sparc_mode_class [NUM_MACHINE_MODES];
5352
5353 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
5354
5355 static void
5356 sparc_init_modes (void)
5357 {
5358 int i;
5359
5360 for (i = 0; i < NUM_MACHINE_MODES; i++)
5361 {
5362 machine_mode m = (machine_mode) i;
5363 unsigned int size = GET_MODE_SIZE (m);
5364
5365 switch (GET_MODE_CLASS (m))
5366 {
5367 case MODE_INT:
5368 case MODE_PARTIAL_INT:
5369 case MODE_COMPLEX_INT:
5370 if (size < 4)
5371 sparc_mode_class[i] = 1 << (int) H_MODE;
5372 else if (size == 4)
5373 sparc_mode_class[i] = 1 << (int) S_MODE;
5374 else if (size == 8)
5375 sparc_mode_class[i] = 1 << (int) D_MODE;
5376 else if (size == 16)
5377 sparc_mode_class[i] = 1 << (int) T_MODE;
5378 else if (size == 32)
5379 sparc_mode_class[i] = 1 << (int) O_MODE;
5380 else
5381 sparc_mode_class[i] = 0;
5382 break;
5383 case MODE_VECTOR_INT:
5384 if (size == 4)
5385 sparc_mode_class[i] = 1 << (int) SF_MODE;
5386 else if (size == 8)
5387 sparc_mode_class[i] = 1 << (int) DF_MODE;
5388 else
5389 sparc_mode_class[i] = 0;
5390 break;
5391 case MODE_FLOAT:
5392 case MODE_COMPLEX_FLOAT:
5393 if (size == 4)
5394 sparc_mode_class[i] = 1 << (int) SF_MODE;
5395 else if (size == 8)
5396 sparc_mode_class[i] = 1 << (int) DF_MODE;
5397 else if (size == 16)
5398 sparc_mode_class[i] = 1 << (int) TF_MODE;
5399 else if (size == 32)
5400 sparc_mode_class[i] = 1 << (int) OF_MODE;
5401 else
5402 sparc_mode_class[i] = 0;
5403 break;
5404 case MODE_CC:
5405 if (m == CCFPmode || m == CCFPEmode)
5406 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
5407 else
5408 sparc_mode_class[i] = 1 << (int) CC_MODE;
5409 break;
5410 default:
5411 sparc_mode_class[i] = 0;
5412 break;
5413 }
5414 }
5415
5416 if (TARGET_ARCH64)
5417 hard_regno_mode_classes = hard_64bit_mode_classes;
5418 else
5419 hard_regno_mode_classes = hard_32bit_mode_classes;
5420
5421 /* Initialize the array used by REGNO_REG_CLASS. */
5422 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5423 {
5424 if (i < 16 && TARGET_V8PLUS)
5425 sparc_regno_reg_class[i] = I64_REGS;
5426 else if (i < 32 || i == FRAME_POINTER_REGNUM)
5427 sparc_regno_reg_class[i] = GENERAL_REGS;
5428 else if (i < 64)
5429 sparc_regno_reg_class[i] = FP_REGS;
5430 else if (i < 96)
5431 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
5432 else if (i < 100)
5433 sparc_regno_reg_class[i] = FPCC_REGS;
5434 else
5435 sparc_regno_reg_class[i] = NO_REGS;
5436 }
5437 }
5438 \f
5439 /* Return whether REGNO, a global or FP register, must be saved/restored. */
5440
5441 static inline bool
5442 save_global_or_fp_reg_p (unsigned int regno,
5443 int leaf_function ATTRIBUTE_UNUSED)
5444 {
5445 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
5446 }
5447
5448 /* Return whether the return address register (%i7) is needed. */
5449
5450 static inline bool
5451 return_addr_reg_needed_p (int leaf_function)
5452 {
5453 /* If it is live, for example because of __builtin_return_address (0). */
5454 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
5455 return true;
5456
5457 /* Otherwise, it is needed as save register if %o7 is clobbered. */
5458 if (!leaf_function
5459 /* Loading the GOT register clobbers %o7. */
5460 || crtl->uses_pic_offset_table
5461 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
5462 return true;
5463
5464 return false;
5465 }
5466
5467 /* Return whether REGNO, a local or in register, must be saved/restored. */
5468
5469 static bool
5470 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
5471 {
5472 /* General case: call-saved registers live at some point. */
5473 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
5474 return true;
5475
5476 /* Frame pointer register (%fp) if needed. */
5477 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5478 return true;
5479
5480 /* Return address register (%i7) if needed. */
5481 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
5482 return true;
5483
5484 /* GOT register (%l7) if needed. */
5485 if (regno == GLOBAL_OFFSET_TABLE_REGNUM && got_register_rtx)
5486 return true;
5487
5488 /* If the function accesses prior frames, the frame pointer and the return
5489 address of the previous frame must be saved on the stack. */
5490 if (crtl->accesses_prior_frames
5491 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
5492 return true;
5493
5494 return false;
5495 }
5496
5497 /* Compute the frame size required by the function. This function is called
5498 during the reload pass and also by sparc_expand_prologue. */
5499
5500 static HOST_WIDE_INT
5501 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
5502 {
5503 HOST_WIDE_INT frame_size, apparent_frame_size;
5504 int args_size, n_global_fp_regs = 0;
5505 bool save_local_in_regs_p = false;
5506 unsigned int i;
5507
5508 /* If the function allocates dynamic stack space, the dynamic offset is
5509 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
5510 if (leaf_function && !cfun->calls_alloca)
5511 args_size = 0;
5512 else
5513 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
5514
5515 /* Calculate space needed for global registers. */
5516 if (TARGET_ARCH64)
5517 {
5518 for (i = 0; i < 8; i++)
5519 if (save_global_or_fp_reg_p (i, 0))
5520 n_global_fp_regs += 2;
5521 }
5522 else
5523 {
5524 for (i = 0; i < 8; i += 2)
5525 if (save_global_or_fp_reg_p (i, 0)
5526 || save_global_or_fp_reg_p (i + 1, 0))
5527 n_global_fp_regs += 2;
5528 }
5529
5530 /* In the flat window model, find out which local and in registers need to
5531 be saved. We don't reserve space in the current frame for them as they
5532 will be spilled into the register window save area of the caller's frame.
5533 However, as soon as we use this register window save area, we must create
5534 that of the current frame to make it the live one. */
5535 if (TARGET_FLAT)
5536 for (i = 16; i < 32; i++)
5537 if (save_local_or_in_reg_p (i, leaf_function))
5538 {
5539 save_local_in_regs_p = true;
5540 break;
5541 }
5542
5543 /* Calculate space needed for FP registers. */
5544 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
5545 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
5546 n_global_fp_regs += 2;
5547
5548 if (size == 0
5549 && n_global_fp_regs == 0
5550 && args_size == 0
5551 && !save_local_in_regs_p)
5552 frame_size = apparent_frame_size = 0;
5553 else
5554 {
5555 /* Start from the apparent frame size. */
5556 apparent_frame_size = ROUND_UP (size, 8) + n_global_fp_regs * 4;
5557
5558 /* We need to add the size of the outgoing argument area. */
5559 frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
5560
5561 /* And that of the register window save area. */
5562 frame_size += FIRST_PARM_OFFSET (cfun->decl);
5563
5564 /* Finally, bump to the appropriate alignment. */
5565 frame_size = SPARC_STACK_ALIGN (frame_size);
5566 }
5567
5568 /* Set up values for use in prologue and epilogue. */
5569 sparc_frame_size = frame_size;
5570 sparc_apparent_frame_size = apparent_frame_size;
5571 sparc_n_global_fp_regs = n_global_fp_regs;
5572 sparc_save_local_in_regs_p = save_local_in_regs_p;
5573
5574 return frame_size;
5575 }
5576
5577 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5578
5579 int
5580 sparc_initial_elimination_offset (int to)
5581 {
5582 int offset;
5583
5584 if (to == STACK_POINTER_REGNUM)
5585 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
5586 else
5587 offset = 0;
5588
5589 offset += SPARC_STACK_BIAS;
5590 return offset;
5591 }
5592
5593 /* Output any necessary .register pseudo-ops. */
5594
5595 void
5596 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
5597 {
5598 int i;
5599
5600 if (TARGET_ARCH32)
5601 return;
5602
5603 /* Check if %g[2367] were used without
5604 .register being printed for them already. */
5605 for (i = 2; i < 8; i++)
5606 {
5607 if (df_regs_ever_live_p (i)
5608 && ! sparc_hard_reg_printed [i])
5609 {
5610 sparc_hard_reg_printed [i] = 1;
5611 /* %g7 is used as TLS base register, use #ignore
5612 for it instead of #scratch. */
5613 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
5614 i == 7 ? "ignore" : "scratch");
5615 }
5616 if (i == 3) i = 5;
5617 }
5618 }
5619
5620 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5621
5622 #if PROBE_INTERVAL > 4096
5623 #error Cannot use indexed addressing mode for stack probing
5624 #endif
5625
5626 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5627 inclusive. These are offsets from the current stack pointer.
5628
5629 Note that we don't use the REG+REG addressing mode for the probes because
5630 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5631 so the advantages of having a single code win here. */
5632
5633 static void
5634 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
5635 {
5636 rtx g1 = gen_rtx_REG (Pmode, 1);
5637
5638 /* See if we have a constant small number of probes to generate. If so,
5639 that's the easy case. */
5640 if (size <= PROBE_INTERVAL)
5641 {
5642 emit_move_insn (g1, GEN_INT (first));
5643 emit_insn (gen_rtx_SET (g1,
5644 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5645 emit_stack_probe (plus_constant (Pmode, g1, -size));
5646 }
5647
5648 /* The run-time loop is made up of 9 insns in the generic case while the
5649 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5650 else if (size <= 4 * PROBE_INTERVAL)
5651 {
5652 HOST_WIDE_INT i;
5653
5654 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
5655 emit_insn (gen_rtx_SET (g1,
5656 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5657 emit_stack_probe (g1);
5658
5659 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5660 it exceeds SIZE. If only two probes are needed, this will not
5661 generate any code. Then probe at FIRST + SIZE. */
5662 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5663 {
5664 emit_insn (gen_rtx_SET (g1,
5665 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5666 emit_stack_probe (g1);
5667 }
5668
5669 emit_stack_probe (plus_constant (Pmode, g1,
5670 (i - PROBE_INTERVAL) - size));
5671 }
5672
5673 /* Otherwise, do the same as above, but in a loop. Note that we must be
5674 extra careful with variables wrapping around because we might be at
5675 the very top (or the very bottom) of the address space and we have
5676 to be able to handle this case properly; in particular, we use an
5677 equality test for the loop condition. */
5678 else
5679 {
5680 HOST_WIDE_INT rounded_size;
5681 rtx g4 = gen_rtx_REG (Pmode, 4);
5682
5683 emit_move_insn (g1, GEN_INT (first));
5684
5685
5686 /* Step 1: round SIZE to the previous multiple of the interval. */
5687
5688 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
5689 emit_move_insn (g4, GEN_INT (rounded_size));
5690
5691
5692 /* Step 2: compute initial and final value of the loop counter. */
5693
5694 /* TEST_ADDR = SP + FIRST. */
5695 emit_insn (gen_rtx_SET (g1,
5696 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5697
5698 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5699 emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
5700
5701
5702 /* Step 3: the loop
5703
5704 while (TEST_ADDR != LAST_ADDR)
5705 {
5706 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5707 probe at TEST_ADDR
5708 }
5709
5710 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5711 until it is equal to ROUNDED_SIZE. */
5712
5713 if (TARGET_ARCH64)
5714 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
5715 else
5716 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
5717
5718
5719 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5720 that SIZE is equal to ROUNDED_SIZE. */
5721
5722 if (size != rounded_size)
5723 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5724 }
5725
5726 /* Make sure nothing is scheduled before we are done. */
5727 emit_insn (gen_blockage ());
5728 }
5729
5730 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5731 absolute addresses. */
5732
5733 const char *
5734 output_probe_stack_range (rtx reg1, rtx reg2)
5735 {
5736 static int labelno = 0;
5737 char loop_lab[32];
5738 rtx xops[2];
5739
5740 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
5741
5742 /* Loop. */
5743 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5744
5745 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5746 xops[0] = reg1;
5747 xops[1] = GEN_INT (-PROBE_INTERVAL);
5748 output_asm_insn ("add\t%0, %1, %0", xops);
5749
5750 /* Test if TEST_ADDR == LAST_ADDR. */
5751 xops[1] = reg2;
5752 output_asm_insn ("cmp\t%0, %1", xops);
5753
5754 /* Probe at TEST_ADDR and branch. */
5755 if (TARGET_ARCH64)
5756 fputs ("\tbne,pt\t%xcc,", asm_out_file);
5757 else
5758 fputs ("\tbne\t", asm_out_file);
5759 assemble_name_raw (asm_out_file, loop_lab);
5760 fputc ('\n', asm_out_file);
5761 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5762 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5763
5764 return "";
5765 }
5766
5767 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5768 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5769 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5770 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5771 the action to be performed if it returns false. Return the new offset. */
5772
5773 typedef bool (*sorr_pred_t) (unsigned int, int);
5774 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5775
5776 static int
5777 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5778 int offset, int leaf_function, sorr_pred_t save_p,
5779 sorr_act_t action_true, sorr_act_t action_false)
5780 {
5781 unsigned int i;
5782 rtx mem;
5783 rtx_insn *insn;
5784
5785 if (TARGET_ARCH64 && high <= 32)
5786 {
5787 int fp_offset = -1;
5788
5789 for (i = low; i < high; i++)
5790 {
5791 if (save_p (i, leaf_function))
5792 {
5793 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5794 base, offset));
5795 if (action_true == SORR_SAVE)
5796 {
5797 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5798 RTX_FRAME_RELATED_P (insn) = 1;
5799 }
5800 else /* action_true == SORR_RESTORE */
5801 {
5802 /* The frame pointer must be restored last since its old
5803 value may be used as base address for the frame. This
5804 is problematic in 64-bit mode only because of the lack
5805 of double-word load instruction. */
5806 if (i == HARD_FRAME_POINTER_REGNUM)
5807 fp_offset = offset;
5808 else
5809 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5810 }
5811 offset += 8;
5812 }
5813 else if (action_false == SORR_ADVANCE)
5814 offset += 8;
5815 }
5816
5817 if (fp_offset >= 0)
5818 {
5819 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5820 emit_move_insn (hard_frame_pointer_rtx, mem);
5821 }
5822 }
5823 else
5824 {
5825 for (i = low; i < high; i += 2)
5826 {
5827 bool reg0 = save_p (i, leaf_function);
5828 bool reg1 = save_p (i + 1, leaf_function);
5829 machine_mode mode;
5830 int regno;
5831
5832 if (reg0 && reg1)
5833 {
5834 mode = SPARC_INT_REG_P (i) ? E_DImode : E_DFmode;
5835 regno = i;
5836 }
5837 else if (reg0)
5838 {
5839 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5840 regno = i;
5841 }
5842 else if (reg1)
5843 {
5844 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5845 regno = i + 1;
5846 offset += 4;
5847 }
5848 else
5849 {
5850 if (action_false == SORR_ADVANCE)
5851 offset += 8;
5852 continue;
5853 }
5854
5855 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5856 if (action_true == SORR_SAVE)
5857 {
5858 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5859 RTX_FRAME_RELATED_P (insn) = 1;
5860 if (mode == DImode)
5861 {
5862 rtx set1, set2;
5863 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5864 offset));
5865 set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
5866 RTX_FRAME_RELATED_P (set1) = 1;
5867 mem
5868 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5869 offset + 4));
5870 set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
5871 RTX_FRAME_RELATED_P (set2) = 1;
5872 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5873 gen_rtx_PARALLEL (VOIDmode,
5874 gen_rtvec (2, set1, set2)));
5875 }
5876 }
5877 else /* action_true == SORR_RESTORE */
5878 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5879
5880 /* Bump and round down to double word
5881 in case we already bumped by 4. */
5882 offset = ROUND_DOWN (offset + 8, 8);
5883 }
5884 }
5885
5886 return offset;
5887 }
5888
5889 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5890
5891 static rtx
5892 emit_adjust_base_to_offset (rtx base, int offset)
5893 {
5894 /* ??? This might be optimized a little as %g1 might already have a
5895 value close enough that a single add insn will do. */
5896 /* ??? Although, all of this is probably only a temporary fix because
5897 if %g1 can hold a function result, then sparc_expand_epilogue will
5898 lose (the result will be clobbered). */
5899 rtx new_base = gen_rtx_REG (Pmode, 1);
5900 emit_move_insn (new_base, GEN_INT (offset));
5901 emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5902 return new_base;
5903 }
5904
5905 /* Emit code to save/restore call-saved global and FP registers. */
5906
5907 static void
5908 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5909 {
5910 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5911 {
5912 base = emit_adjust_base_to_offset (base, offset);
5913 offset = 0;
5914 }
5915
5916 offset
5917 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5918 save_global_or_fp_reg_p, action, SORR_NONE);
5919 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5920 save_global_or_fp_reg_p, action, SORR_NONE);
5921 }
5922
5923 /* Emit code to save/restore call-saved local and in registers. */
5924
5925 static void
5926 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5927 {
5928 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5929 {
5930 base = emit_adjust_base_to_offset (base, offset);
5931 offset = 0;
5932 }
5933
5934 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5935 save_local_or_in_reg_p, action, SORR_ADVANCE);
5936 }
5937
5938 /* Emit a window_save insn. */
5939
5940 static rtx_insn *
5941 emit_window_save (rtx increment)
5942 {
5943 rtx_insn *insn = emit_insn (gen_window_save (increment));
5944 RTX_FRAME_RELATED_P (insn) = 1;
5945
5946 /* The incoming return address (%o7) is saved in %i7. */
5947 add_reg_note (insn, REG_CFA_REGISTER,
5948 gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5949 gen_rtx_REG (Pmode,
5950 INCOMING_RETURN_ADDR_REGNUM)));
5951
5952 /* The window save event. */
5953 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5954
5955 /* The CFA is %fp, the hard frame pointer. */
5956 add_reg_note (insn, REG_CFA_DEF_CFA,
5957 plus_constant (Pmode, hard_frame_pointer_rtx,
5958 INCOMING_FRAME_SP_OFFSET));
5959
5960 return insn;
5961 }
5962
5963 /* Generate an increment for the stack pointer. */
5964
5965 static rtx
5966 gen_stack_pointer_inc (rtx increment)
5967 {
5968 return gen_rtx_SET (stack_pointer_rtx,
5969 gen_rtx_PLUS (Pmode,
5970 stack_pointer_rtx,
5971 increment));
5972 }
5973
5974 /* Expand the function prologue. The prologue is responsible for reserving
5975 storage for the frame, saving the call-saved registers and loading the
5976 GOT register if needed. */
5977
5978 void
5979 sparc_expand_prologue (void)
5980 {
5981 HOST_WIDE_INT size;
5982 rtx_insn *insn;
5983
5984 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5985 on the final value of the flag means deferring the prologue/epilogue
5986 expansion until just before the second scheduling pass, which is too
5987 late to emit multiple epilogues or return insns.
5988
5989 Of course we are making the assumption that the value of the flag
5990 will not change between now and its final value. Of the three parts
5991 of the formula, only the last one can reasonably vary. Let's take a
5992 closer look, after assuming that the first two ones are set to true
5993 (otherwise the last value is effectively silenced).
5994
5995 If only_leaf_regs_used returns false, the global predicate will also
5996 be false so the actual frame size calculated below will be positive.
5997 As a consequence, the save_register_window insn will be emitted in
5998 the instruction stream; now this insn explicitly references %fp
5999 which is not a leaf register so only_leaf_regs_used will always
6000 return false subsequently.
6001
6002 If only_leaf_regs_used returns true, we hope that the subsequent
6003 optimization passes won't cause non-leaf registers to pop up. For
6004 example, the regrename pass has special provisions to not rename to
6005 non-leaf registers in a leaf function. */
6006 sparc_leaf_function_p
6007 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
6008
6009 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6010
6011 if (flag_stack_usage_info)
6012 current_function_static_stack_size = size;
6013
6014 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6015 || flag_stack_clash_protection)
6016 {
6017 if (crtl->is_leaf && !cfun->calls_alloca)
6018 {
6019 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6020 sparc_emit_probe_stack_range (get_stack_check_protect (),
6021 size - get_stack_check_protect ());
6022 }
6023 else if (size > 0)
6024 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6025 }
6026
6027 if (size == 0)
6028 ; /* do nothing. */
6029 else if (sparc_leaf_function_p)
6030 {
6031 rtx size_int_rtx = GEN_INT (-size);
6032
6033 if (size <= 4096)
6034 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6035 else if (size <= 8192)
6036 {
6037 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6038 RTX_FRAME_RELATED_P (insn) = 1;
6039
6040 /* %sp is still the CFA register. */
6041 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6042 }
6043 else
6044 {
6045 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6046 emit_move_insn (size_rtx, size_int_rtx);
6047 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6048 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
6049 gen_stack_pointer_inc (size_int_rtx));
6050 }
6051
6052 RTX_FRAME_RELATED_P (insn) = 1;
6053 }
6054 else
6055 {
6056 rtx size_int_rtx = GEN_INT (-size);
6057
6058 if (size <= 4096)
6059 emit_window_save (size_int_rtx);
6060 else if (size <= 8192)
6061 {
6062 emit_window_save (GEN_INT (-4096));
6063
6064 /* %sp is not the CFA register anymore. */
6065 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6066
6067 /* Make sure no %fp-based store is issued until after the frame is
6068 established. The offset between the frame pointer and the stack
6069 pointer is calculated relative to the value of the stack pointer
6070 at the end of the function prologue, and moving instructions that
6071 access the stack via the frame pointer between the instructions
6072 that decrement the stack pointer could result in accessing the
6073 register window save area, which is volatile. */
6074 emit_insn (gen_frame_blockage ());
6075 }
6076 else
6077 {
6078 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6079 emit_move_insn (size_rtx, size_int_rtx);
6080 emit_window_save (size_rtx);
6081 }
6082 }
6083
6084 if (sparc_leaf_function_p)
6085 {
6086 sparc_frame_base_reg = stack_pointer_rtx;
6087 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6088 }
6089 else
6090 {
6091 sparc_frame_base_reg = hard_frame_pointer_rtx;
6092 sparc_frame_base_offset = SPARC_STACK_BIAS;
6093 }
6094
6095 if (sparc_n_global_fp_regs > 0)
6096 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6097 sparc_frame_base_offset
6098 - sparc_apparent_frame_size,
6099 SORR_SAVE);
6100
6101 /* Advertise that the data calculated just above are now valid. */
6102 sparc_prologue_data_valid_p = true;
6103 }
6104
6105 /* Expand the function prologue. The prologue is responsible for reserving
6106 storage for the frame, saving the call-saved registers and loading the
6107 GOT register if needed. */
6108
6109 void
6110 sparc_flat_expand_prologue (void)
6111 {
6112 HOST_WIDE_INT size;
6113 rtx_insn *insn;
6114
6115 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
6116
6117 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6118
6119 if (flag_stack_usage_info)
6120 current_function_static_stack_size = size;
6121
6122 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6123 || flag_stack_clash_protection)
6124 {
6125 if (crtl->is_leaf && !cfun->calls_alloca)
6126 {
6127 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6128 sparc_emit_probe_stack_range (get_stack_check_protect (),
6129 size - get_stack_check_protect ());
6130 }
6131 else if (size > 0)
6132 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6133 }
6134
6135 if (sparc_save_local_in_regs_p)
6136 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
6137 SORR_SAVE);
6138
6139 if (size == 0)
6140 ; /* do nothing. */
6141 else
6142 {
6143 rtx size_int_rtx, size_rtx;
6144
6145 size_rtx = size_int_rtx = GEN_INT (-size);
6146
6147 /* We establish the frame (i.e. decrement the stack pointer) first, even
6148 if we use a frame pointer, because we cannot clobber any call-saved
6149 registers, including the frame pointer, if we haven't created a new
6150 register save area, for the sake of compatibility with the ABI. */
6151 if (size <= 4096)
6152 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6153 else if (size <= 8192 && !frame_pointer_needed)
6154 {
6155 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6156 RTX_FRAME_RELATED_P (insn) = 1;
6157 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6158 }
6159 else
6160 {
6161 size_rtx = gen_rtx_REG (Pmode, 1);
6162 emit_move_insn (size_rtx, size_int_rtx);
6163 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6164 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6165 gen_stack_pointer_inc (size_int_rtx));
6166 }
6167 RTX_FRAME_RELATED_P (insn) = 1;
6168
6169 /* Ensure nothing is scheduled until after the frame is established. */
6170 emit_insn (gen_blockage ());
6171
6172 if (frame_pointer_needed)
6173 {
6174 insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
6175 gen_rtx_MINUS (Pmode,
6176 stack_pointer_rtx,
6177 size_rtx)));
6178 RTX_FRAME_RELATED_P (insn) = 1;
6179
6180 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6181 gen_rtx_SET (hard_frame_pointer_rtx,
6182 plus_constant (Pmode, stack_pointer_rtx,
6183 size)));
6184 }
6185
6186 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6187 {
6188 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
6189 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
6190
6191 insn = emit_move_insn (i7, o7);
6192 RTX_FRAME_RELATED_P (insn) = 1;
6193
6194 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
6195
6196 /* Prevent this instruction from ever being considered dead,
6197 even if this function has no epilogue. */
6198 emit_use (i7);
6199 }
6200 }
6201
6202 if (frame_pointer_needed)
6203 {
6204 sparc_frame_base_reg = hard_frame_pointer_rtx;
6205 sparc_frame_base_offset = SPARC_STACK_BIAS;
6206 }
6207 else
6208 {
6209 sparc_frame_base_reg = stack_pointer_rtx;
6210 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6211 }
6212
6213 if (sparc_n_global_fp_regs > 0)
6214 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6215 sparc_frame_base_offset
6216 - sparc_apparent_frame_size,
6217 SORR_SAVE);
6218
6219 /* Advertise that the data calculated just above are now valid. */
6220 sparc_prologue_data_valid_p = true;
6221 }
6222
6223 /* This function generates the assembly code for function entry, which boils
6224 down to emitting the necessary .register directives. */
6225
6226 static void
6227 sparc_asm_function_prologue (FILE *file)
6228 {
6229 /* Check that the assumption we made in sparc_expand_prologue is valid. */
6230 if (!TARGET_FLAT)
6231 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
6232
6233 sparc_output_scratch_registers (file);
6234 }
6235
6236 /* Expand the function epilogue, either normal or part of a sibcall.
6237 We emit all the instructions except the return or the call. */
6238
6239 void
6240 sparc_expand_epilogue (bool for_eh)
6241 {
6242 HOST_WIDE_INT size = sparc_frame_size;
6243
6244 if (cfun->calls_alloca)
6245 emit_insn (gen_frame_blockage ());
6246
6247 if (sparc_n_global_fp_regs > 0)
6248 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6249 sparc_frame_base_offset
6250 - sparc_apparent_frame_size,
6251 SORR_RESTORE);
6252
6253 if (size == 0 || for_eh)
6254 ; /* do nothing. */
6255 else if (sparc_leaf_function_p)
6256 {
6257 if (size <= 4096)
6258 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6259 else if (size <= 8192)
6260 {
6261 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6262 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6263 }
6264 else
6265 {
6266 rtx reg = gen_rtx_REG (Pmode, 1);
6267 emit_move_insn (reg, GEN_INT (size));
6268 emit_insn (gen_stack_pointer_inc (reg));
6269 }
6270 }
6271 }
6272
6273 /* Expand the function epilogue, either normal or part of a sibcall.
6274 We emit all the instructions except the return or the call. */
6275
6276 void
6277 sparc_flat_expand_epilogue (bool for_eh)
6278 {
6279 HOST_WIDE_INT size = sparc_frame_size;
6280
6281 if (sparc_n_global_fp_regs > 0)
6282 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6283 sparc_frame_base_offset
6284 - sparc_apparent_frame_size,
6285 SORR_RESTORE);
6286
6287 /* If we have a frame pointer, we'll need both to restore it before the
6288 frame is destroyed and use its current value in destroying the frame.
6289 Since we don't have an atomic way to do that in the flat window model,
6290 we save the current value into a temporary register (%g1). */
6291 if (frame_pointer_needed && !for_eh)
6292 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
6293
6294 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6295 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
6296 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
6297
6298 if (sparc_save_local_in_regs_p)
6299 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
6300 sparc_frame_base_offset,
6301 SORR_RESTORE);
6302
6303 if (size == 0 || for_eh)
6304 ; /* do nothing. */
6305 else if (frame_pointer_needed)
6306 {
6307 /* Make sure the frame is destroyed after everything else is done. */
6308 emit_insn (gen_blockage ());
6309
6310 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
6311 }
6312 else
6313 {
6314 /* Likewise. */
6315 emit_insn (gen_blockage ());
6316
6317 if (size <= 4096)
6318 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6319 else if (size <= 8192)
6320 {
6321 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6322 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6323 }
6324 else
6325 {
6326 rtx reg = gen_rtx_REG (Pmode, 1);
6327 emit_move_insn (reg, GEN_INT (size));
6328 emit_insn (gen_stack_pointer_inc (reg));
6329 }
6330 }
6331 }
6332
6333 /* Return true if it is appropriate to emit `return' instructions in the
6334 body of a function. */
6335
6336 bool
6337 sparc_can_use_return_insn_p (void)
6338 {
6339 return sparc_prologue_data_valid_p
6340 && sparc_n_global_fp_regs == 0
6341 && TARGET_FLAT
6342 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
6343 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
6344 }
6345
6346 /* This function generates the assembly code for function exit. */
6347
6348 static void
6349 sparc_asm_function_epilogue (FILE *file)
6350 {
6351 /* If the last two instructions of a function are "call foo; dslot;"
6352 the return address might point to the first instruction in the next
6353 function and we have to output a dummy nop for the sake of sane
6354 backtraces in such cases. This is pointless for sibling calls since
6355 the return address is explicitly adjusted. */
6356
6357 rtx_insn *insn = get_last_insn ();
6358
6359 rtx last_real_insn = prev_real_insn (insn);
6360 if (last_real_insn
6361 && NONJUMP_INSN_P (last_real_insn)
6362 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
6363 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
6364
6365 if (last_real_insn
6366 && CALL_P (last_real_insn)
6367 && !SIBLING_CALL_P (last_real_insn))
6368 fputs("\tnop\n", file);
6369
6370 sparc_output_deferred_case_vectors ();
6371 }
6372
6373 /* Output a 'restore' instruction. */
6374
6375 static void
6376 output_restore (rtx pat)
6377 {
6378 rtx operands[3];
6379
6380 if (! pat)
6381 {
6382 fputs ("\t restore\n", asm_out_file);
6383 return;
6384 }
6385
6386 gcc_assert (GET_CODE (pat) == SET);
6387
6388 operands[0] = SET_DEST (pat);
6389 pat = SET_SRC (pat);
6390
6391 switch (GET_CODE (pat))
6392 {
6393 case PLUS:
6394 operands[1] = XEXP (pat, 0);
6395 operands[2] = XEXP (pat, 1);
6396 output_asm_insn (" restore %r1, %2, %Y0", operands);
6397 break;
6398 case LO_SUM:
6399 operands[1] = XEXP (pat, 0);
6400 operands[2] = XEXP (pat, 1);
6401 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
6402 break;
6403 case ASHIFT:
6404 operands[1] = XEXP (pat, 0);
6405 gcc_assert (XEXP (pat, 1) == const1_rtx);
6406 output_asm_insn (" restore %r1, %r1, %Y0", operands);
6407 break;
6408 default:
6409 operands[1] = pat;
6410 output_asm_insn (" restore %%g0, %1, %Y0", operands);
6411 break;
6412 }
6413 }
6414
6415 /* Output a return. */
6416
6417 const char *
6418 output_return (rtx_insn *insn)
6419 {
6420 if (crtl->calls_eh_return)
6421 {
6422 /* If the function uses __builtin_eh_return, the eh_return
6423 machinery occupies the delay slot. */
6424 gcc_assert (!final_sequence);
6425
6426 if (flag_delayed_branch)
6427 {
6428 if (!TARGET_FLAT && TARGET_V9)
6429 fputs ("\treturn\t%i7+8\n", asm_out_file);
6430 else
6431 {
6432 if (!TARGET_FLAT)
6433 fputs ("\trestore\n", asm_out_file);
6434
6435 fputs ("\tjmp\t%o7+8\n", asm_out_file);
6436 }
6437
6438 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
6439 }
6440 else
6441 {
6442 if (!TARGET_FLAT)
6443 fputs ("\trestore\n", asm_out_file);
6444
6445 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
6446 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
6447 }
6448 }
6449 else if (sparc_leaf_function_p || TARGET_FLAT)
6450 {
6451 /* This is a leaf or flat function so we don't have to bother restoring
6452 the register window, which frees us from dealing with the convoluted
6453 semantics of restore/return. We simply output the jump to the
6454 return address and the insn in the delay slot (if any). */
6455
6456 return "jmp\t%%o7+%)%#";
6457 }
6458 else
6459 {
6460 /* This is a regular function so we have to restore the register window.
6461 We may have a pending insn for the delay slot, which will be either
6462 combined with the 'restore' instruction or put in the delay slot of
6463 the 'return' instruction. */
6464
6465 if (final_sequence)
6466 {
6467 rtx_insn *delay;
6468 rtx pat;
6469
6470 delay = NEXT_INSN (insn);
6471 gcc_assert (delay);
6472
6473 pat = PATTERN (delay);
6474
6475 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
6476 {
6477 epilogue_renumber (&pat, 0);
6478 return "return\t%%i7+%)%#";
6479 }
6480 else
6481 {
6482 output_asm_insn ("jmp\t%%i7+%)", NULL);
6483
6484 /* We're going to output the insn in the delay slot manually.
6485 Make sure to output its source location first. */
6486 PATTERN (delay) = gen_blockage ();
6487 INSN_CODE (delay) = -1;
6488 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6489 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6490
6491 output_restore (pat);
6492 }
6493 }
6494 else
6495 {
6496 /* The delay slot is empty. */
6497 if (TARGET_V9)
6498 return "return\t%%i7+%)\n\t nop";
6499 else if (flag_delayed_branch)
6500 return "jmp\t%%i7+%)\n\t restore";
6501 else
6502 return "restore\n\tjmp\t%%o7+%)\n\t nop";
6503 }
6504 }
6505
6506 return "";
6507 }
6508
6509 /* Output a sibling call. */
6510
6511 const char *
6512 output_sibcall (rtx_insn *insn, rtx call_operand)
6513 {
6514 rtx operands[1];
6515
6516 gcc_assert (flag_delayed_branch);
6517
6518 operands[0] = call_operand;
6519
6520 if (sparc_leaf_function_p || TARGET_FLAT)
6521 {
6522 /* This is a leaf or flat function so we don't have to bother restoring
6523 the register window. We simply output the jump to the function and
6524 the insn in the delay slot (if any). */
6525
6526 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
6527
6528 if (final_sequence)
6529 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
6530 operands);
6531 else
6532 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6533 it into branch if possible. */
6534 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6535 operands);
6536 }
6537 else
6538 {
6539 /* This is a regular function so we have to restore the register window.
6540 We may have a pending insn for the delay slot, which will be combined
6541 with the 'restore' instruction. */
6542
6543 output_asm_insn ("call\t%a0, 0", operands);
6544
6545 if (final_sequence)
6546 {
6547 rtx_insn *delay;
6548 rtx pat;
6549
6550 delay = NEXT_INSN (insn);
6551 gcc_assert (delay);
6552
6553 pat = PATTERN (delay);
6554
6555 /* We're going to output the insn in the delay slot manually.
6556 Make sure to output its source location first. */
6557 PATTERN (delay) = gen_blockage ();
6558 INSN_CODE (delay) = -1;
6559 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6560 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6561
6562 output_restore (pat);
6563 }
6564 else
6565 output_restore (NULL_RTX);
6566 }
6567
6568 return "";
6569 }
6570 \f
6571 /* Functions for handling argument passing.
6572
6573 For 32-bit, the first 6 args are normally in registers and the rest are
6574 pushed. Any arg that starts within the first 6 words is at least
6575 partially passed in a register unless its data type forbids.
6576
6577 For 64-bit, the argument registers are laid out as an array of 16 elements
6578 and arguments are added sequentially. The first 6 int args and up to the
6579 first 16 fp args (depending on size) are passed in regs.
6580
6581 Slot Stack Integral Float Float in structure Double Long Double
6582 ---- ----- -------- ----- ------------------ ------ -----------
6583 15 [SP+248] %f31 %f30,%f31 %d30
6584 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6585 13 [SP+232] %f27 %f26,%f27 %d26
6586 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6587 11 [SP+216] %f23 %f22,%f23 %d22
6588 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6589 9 [SP+200] %f19 %f18,%f19 %d18
6590 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6591 7 [SP+184] %f15 %f14,%f15 %d14
6592 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6593 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6594 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6595 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6596 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6597 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6598 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6599
6600 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6601
6602 Integral arguments are always passed as 64-bit quantities appropriately
6603 extended.
6604
6605 Passing of floating point values is handled as follows.
6606 If a prototype is in scope:
6607 If the value is in a named argument (i.e. not a stdarg function or a
6608 value not part of the `...') then the value is passed in the appropriate
6609 fp reg.
6610 If the value is part of the `...' and is passed in one of the first 6
6611 slots then the value is passed in the appropriate int reg.
6612 If the value is part of the `...' and is not passed in one of the first 6
6613 slots then the value is passed in memory.
6614 If a prototype is not in scope:
6615 If the value is one of the first 6 arguments the value is passed in the
6616 appropriate integer reg and the appropriate fp reg.
6617 If the value is not one of the first 6 arguments the value is passed in
6618 the appropriate fp reg and in memory.
6619
6620
6621 Summary of the calling conventions implemented by GCC on the SPARC:
6622
6623 32-bit ABI:
6624 size argument return value
6625
6626 small integer <4 int. reg. int. reg.
6627 word 4 int. reg. int. reg.
6628 double word 8 int. reg. int. reg.
6629
6630 _Complex small integer <8 int. reg. int. reg.
6631 _Complex word 8 int. reg. int. reg.
6632 _Complex double word 16 memory int. reg.
6633
6634 vector integer <=8 int. reg. FP reg.
6635 vector integer >8 memory memory
6636
6637 float 4 int. reg. FP reg.
6638 double 8 int. reg. FP reg.
6639 long double 16 memory memory
6640
6641 _Complex float 8 memory FP reg.
6642 _Complex double 16 memory FP reg.
6643 _Complex long double 32 memory FP reg.
6644
6645 vector float any memory memory
6646
6647 aggregate any memory memory
6648
6649
6650
6651 64-bit ABI:
6652 size argument return value
6653
6654 small integer <8 int. reg. int. reg.
6655 word 8 int. reg. int. reg.
6656 double word 16 int. reg. int. reg.
6657
6658 _Complex small integer <16 int. reg. int. reg.
6659 _Complex word 16 int. reg. int. reg.
6660 _Complex double word 32 memory int. reg.
6661
6662 vector integer <=16 FP reg. FP reg.
6663 vector integer 16<s<=32 memory FP reg.
6664 vector integer >32 memory memory
6665
6666 float 4 FP reg. FP reg.
6667 double 8 FP reg. FP reg.
6668 long double 16 FP reg. FP reg.
6669
6670 _Complex float 8 FP reg. FP reg.
6671 _Complex double 16 FP reg. FP reg.
6672 _Complex long double 32 memory FP reg.
6673
6674 vector float <=16 FP reg. FP reg.
6675 vector float 16<s<=32 memory FP reg.
6676 vector float >32 memory memory
6677
6678 aggregate <=16 reg. reg.
6679 aggregate 16<s<=32 memory reg.
6680 aggregate >32 memory memory
6681
6682
6683
6684 Note #1: complex floating-point types follow the extended SPARC ABIs as
6685 implemented by the Sun compiler.
6686
6687 Note #2: integer vector types follow the scalar floating-point types
6688 conventions to match what is implemented by the Sun VIS SDK.
6689
6690 Note #3: floating-point vector types follow the aggregate types
6691 conventions. */
6692
6693
6694 /* Maximum number of int regs for args. */
6695 #define SPARC_INT_ARG_MAX 6
6696 /* Maximum number of fp regs for args. */
6697 #define SPARC_FP_ARG_MAX 16
6698 /* Number of words (partially) occupied for a given size in units. */
6699 #define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
6700
6701 /* Handle the INIT_CUMULATIVE_ARGS macro.
6702 Initialize a variable CUM of type CUMULATIVE_ARGS
6703 for a call to a function whose data type is FNTYPE.
6704 For a library call, FNTYPE is 0. */
6705
6706 void
6707 init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
6708 {
6709 cum->words = 0;
6710 cum->prototype_p = fntype && prototype_p (fntype);
6711 cum->libcall_p = !fntype;
6712 }
6713
6714 /* Handle promotion of pointer and integer arguments. */
6715
6716 static machine_mode
6717 sparc_promote_function_mode (const_tree type, machine_mode mode,
6718 int *punsignedp, const_tree, int)
6719 {
6720 if (type && POINTER_TYPE_P (type))
6721 {
6722 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6723 return Pmode;
6724 }
6725
6726 /* Integral arguments are passed as full words, as per the ABI. */
6727 if (GET_MODE_CLASS (mode) == MODE_INT
6728 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6729 return word_mode;
6730
6731 return mode;
6732 }
6733
6734 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6735
6736 static bool
6737 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6738 {
6739 return TARGET_ARCH64 ? true : false;
6740 }
6741
6742 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6743 Specify whether to pass the argument by reference. */
6744
6745 static bool
6746 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6747 machine_mode mode, const_tree type,
6748 bool named ATTRIBUTE_UNUSED)
6749 {
6750 if (TARGET_ARCH32)
6751 /* Original SPARC 32-bit ABI says that structures and unions,
6752 and quad-precision floats are passed by reference.
6753 All other base types are passed in registers.
6754
6755 Extended ABI (as implemented by the Sun compiler) says that all
6756 complex floats are passed by reference. Pass complex integers
6757 in registers up to 8 bytes. More generally, enforce the 2-word
6758 cap for passing arguments in registers.
6759
6760 Vector ABI (as implemented by the Sun VIS SDK) says that integer
6761 vectors are passed like floats of the same size, that is in
6762 registers up to 8 bytes. Pass all vector floats by reference
6763 like structure and unions. */
6764 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6765 || mode == SCmode
6766 /* Catch CDImode, TFmode, DCmode and TCmode. */
6767 || GET_MODE_SIZE (mode) > 8
6768 || (type
6769 && VECTOR_TYPE_P (type)
6770 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6771 else
6772 /* Original SPARC 64-bit ABI says that structures and unions
6773 smaller than 16 bytes are passed in registers, as well as
6774 all other base types.
6775
6776 Extended ABI (as implemented by the Sun compiler) says that
6777 complex floats are passed in registers up to 16 bytes. Pass
6778 all complex integers in registers up to 16 bytes. More generally,
6779 enforce the 2-word cap for passing arguments in registers.
6780
6781 Vector ABI (as implemented by the Sun VIS SDK) says that integer
6782 vectors are passed like floats of the same size, that is in
6783 registers (up to 16 bytes). Pass all vector floats like structure
6784 and unions. */
6785 return ((type
6786 && (AGGREGATE_TYPE_P (type) || VECTOR_TYPE_P (type))
6787 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6788 /* Catch CTImode and TCmode. */
6789 || GET_MODE_SIZE (mode) > 16);
6790 }
6791
6792 /* Traverse the record TYPE recursively and call FUNC on its fields.
6793 NAMED is true if this is for a named parameter. DATA is passed
6794 to FUNC for each field. OFFSET is the starting position and
6795 PACKED is true if we are inside a packed record. */
6796
6797 template <typename T, void Func (const_tree, int, bool, T*)>
6798 static void
6799 traverse_record_type (const_tree type, bool named, T *data,
6800 int offset = 0, bool packed = false)
6801 {
6802 /* The ABI obviously doesn't specify how packed structures are passed.
6803 These are passed in integer regs if possible, otherwise memory. */
6804 if (!packed)
6805 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6806 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6807 {
6808 packed = true;
6809 break;
6810 }
6811
6812 /* Walk the real fields, but skip those with no size or a zero size.
6813 ??? Fields with variable offset are handled as having zero offset. */
6814 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6815 if (TREE_CODE (field) == FIELD_DECL)
6816 {
6817 if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
6818 continue;
6819
6820 int bitpos = offset;
6821 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
6822 bitpos += int_bit_position (field);
6823
6824 tree field_type = TREE_TYPE (field);
6825 if (TREE_CODE (field_type) == RECORD_TYPE)
6826 traverse_record_type<T, Func> (field_type, named, data, bitpos,
6827 packed);
6828 else
6829 {
6830 const bool fp_type
6831 = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
6832 Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
6833 data);
6834 }
6835 }
6836 }
6837
6838 /* Handle recursive register classifying for structure layout. */
6839
6840 typedef struct
6841 {
6842 bool fp_regs; /* true if field eligible to FP registers. */
6843 bool fp_regs_in_first_word; /* true if such field in first word. */
6844 } classify_data_t;
6845
6846 /* A subroutine of function_arg_slotno. Classify the field. */
6847
6848 inline void
6849 classify_registers (const_tree, int bitpos, bool fp, classify_data_t *data)
6850 {
6851 if (fp)
6852 {
6853 data->fp_regs = true;
6854 if (bitpos < BITS_PER_WORD)
6855 data->fp_regs_in_first_word = true;
6856 }
6857 }
6858
6859 /* Compute the slot number to pass an argument in.
6860 Return the slot number or -1 if passing on the stack.
6861
6862 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6863 the preceding args and about the function being called.
6864 MODE is the argument's machine mode.
6865 TYPE is the data type of the argument (as a tree).
6866 This is null for libcalls where that information may
6867 not be available.
6868 NAMED is nonzero if this argument is a named parameter
6869 (otherwise it is an extra parameter matching an ellipsis).
6870 INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6871 *PREGNO records the register number to use if scalar type.
6872 *PPADDING records the amount of padding needed in words. */
6873
6874 static int
6875 function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
6876 const_tree type, bool named, bool incoming,
6877 int *pregno, int *ppadding)
6878 {
6879 const int regbase
6880 = incoming ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
6881 int slotno = cum->words, regno;
6882 enum mode_class mclass = GET_MODE_CLASS (mode);
6883
6884 /* Silence warnings in the callers. */
6885 *pregno = -1;
6886 *ppadding = -1;
6887
6888 if (type && TREE_ADDRESSABLE (type))
6889 return -1;
6890
6891 /* In 64-bit mode, objects requiring 16-byte alignment get it. */
6892 if (TARGET_ARCH64
6893 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6894 && (slotno & 1) != 0)
6895 {
6896 slotno++;
6897 *ppadding = 1;
6898 }
6899 else
6900 *ppadding = 0;
6901
6902 /* Vector types deserve special treatment because they are polymorphic wrt
6903 their mode, depending upon whether VIS instructions are enabled. */
6904 if (type && VECTOR_TYPE_P (type))
6905 {
6906 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6907 {
6908 /* The SPARC port defines no floating-point vector modes. */
6909 gcc_assert (mode == BLKmode);
6910 }
6911 else
6912 {
6913 /* Integer vector types should either have a vector
6914 mode or an integral mode, because we are guaranteed
6915 by pass_by_reference that their size is not greater
6916 than 16 bytes and TImode is 16-byte wide. */
6917 gcc_assert (mode != BLKmode);
6918
6919 /* Integer vectors are handled like floats as per
6920 the Sun VIS SDK. */
6921 mclass = MODE_FLOAT;
6922 }
6923 }
6924
6925 switch (mclass)
6926 {
6927 case MODE_FLOAT:
6928 case MODE_COMPLEX_FLOAT:
6929 case MODE_VECTOR_INT:
6930 if (TARGET_ARCH64 && TARGET_FPU && named)
6931 {
6932 /* If all arg slots are filled, then must pass on stack. */
6933 if (slotno >= SPARC_FP_ARG_MAX)
6934 return -1;
6935
6936 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6937 /* Arguments filling only one single FP register are
6938 right-justified in the outer double FP register. */
6939 if (GET_MODE_SIZE (mode) <= 4)
6940 regno++;
6941 break;
6942 }
6943 /* fallthrough */
6944
6945 case MODE_INT:
6946 case MODE_COMPLEX_INT:
6947 /* If all arg slots are filled, then must pass on stack. */
6948 if (slotno >= SPARC_INT_ARG_MAX)
6949 return -1;
6950
6951 regno = regbase + slotno;
6952 break;
6953
6954 case MODE_RANDOM:
6955 /* MODE is VOIDmode when generating the actual call. */
6956 if (mode == VOIDmode)
6957 return -1;
6958
6959 if (TARGET_64BIT && TARGET_FPU && named
6960 && type
6961 && (TREE_CODE (type) == RECORD_TYPE || VECTOR_TYPE_P (type)))
6962 {
6963 /* If all arg slots are filled, then must pass on stack. */
6964 if (slotno >= SPARC_FP_ARG_MAX)
6965 return -1;
6966
6967 if (TREE_CODE (type) == RECORD_TYPE)
6968 {
6969 classify_data_t data = { false, false };
6970 traverse_record_type<classify_data_t, classify_registers>
6971 (type, named, &data);
6972
6973 if (data.fp_regs)
6974 {
6975 /* If all FP slots are filled except for the last one and
6976 there is no FP field in the first word, then must pass
6977 on stack. */
6978 if (slotno >= SPARC_FP_ARG_MAX - 1
6979 && !data.fp_regs_in_first_word)
6980 return -1;
6981 }
6982 else
6983 {
6984 /* If all int slots are filled, then must pass on stack. */
6985 if (slotno >= SPARC_INT_ARG_MAX)
6986 return -1;
6987 }
6988
6989 /* PREGNO isn't set since both int and FP regs can be used. */
6990 return slotno;
6991 }
6992
6993 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6994 }
6995 else
6996 {
6997 /* If all arg slots are filled, then must pass on stack. */
6998 if (slotno >= SPARC_INT_ARG_MAX)
6999 return -1;
7000
7001 regno = regbase + slotno;
7002 }
7003 break;
7004
7005 default :
7006 gcc_unreachable ();
7007 }
7008
7009 *pregno = regno;
7010 return slotno;
7011 }
7012
7013 /* Handle recursive register counting/assigning for structure layout. */
7014
7015 typedef struct
7016 {
7017 int slotno; /* slot number of the argument. */
7018 int regbase; /* regno of the base register. */
7019 int intoffset; /* offset of the first pending integer field. */
7020 int nregs; /* number of words passed in registers. */
7021 bool stack; /* true if part of the argument is on the stack. */
7022 rtx ret; /* return expression being built. */
7023 } assign_data_t;
7024
7025 /* A subroutine of function_arg_record_value. Compute the number of integer
7026 registers to be assigned between PARMS->intoffset and BITPOS. Return
7027 true if at least one integer register is assigned or false otherwise. */
7028
7029 static bool
7030 compute_int_layout (int bitpos, assign_data_t *data, int *pnregs)
7031 {
7032 if (data->intoffset < 0)
7033 return false;
7034
7035 const int intoffset = data->intoffset;
7036 data->intoffset = -1;
7037
7038 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7039 const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
7040 const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
7041 int nregs = (endbit - startbit) / BITS_PER_WORD;
7042
7043 if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
7044 {
7045 nregs = SPARC_INT_ARG_MAX - this_slotno;
7046
7047 /* We need to pass this field (partly) on the stack. */
7048 data->stack = 1;
7049 }
7050
7051 if (nregs <= 0)
7052 return false;
7053
7054 *pnregs = nregs;
7055 return true;
7056 }
7057
7058 /* A subroutine of function_arg_record_value. Compute the number and the mode
7059 of the FP registers to be assigned for FIELD. Return true if at least one
7060 FP register is assigned or false otherwise. */
7061
7062 static bool
7063 compute_fp_layout (const_tree field, int bitpos, assign_data_t *data,
7064 int *pnregs, machine_mode *pmode)
7065 {
7066 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7067 machine_mode mode = DECL_MODE (field);
7068 int nregs, nslots;
7069
7070 /* Slots are counted as words while regs are counted as having the size of
7071 the (inner) mode. */
7072 if (VECTOR_TYPE_P (TREE_TYPE (field)) && mode == BLKmode)
7073 {
7074 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7075 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
7076 }
7077 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
7078 {
7079 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7080 nregs = 2;
7081 }
7082 else
7083 nregs = 1;
7084
7085 nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
7086
7087 if (nslots > SPARC_FP_ARG_MAX - this_slotno)
7088 {
7089 nslots = SPARC_FP_ARG_MAX - this_slotno;
7090 nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
7091
7092 /* We need to pass this field (partly) on the stack. */
7093 data->stack = 1;
7094
7095 if (nregs <= 0)
7096 return false;
7097 }
7098
7099 *pnregs = nregs;
7100 *pmode = mode;
7101 return true;
7102 }
7103
7104 /* A subroutine of function_arg_record_value. Count the number of registers
7105 to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
7106
7107 inline void
7108 count_registers (const_tree field, int bitpos, bool fp, assign_data_t *data)
7109 {
7110 if (fp)
7111 {
7112 int nregs;
7113 machine_mode mode;
7114
7115 if (compute_int_layout (bitpos, data, &nregs))
7116 data->nregs += nregs;
7117
7118 if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
7119 data->nregs += nregs;
7120 }
7121 else
7122 {
7123 if (data->intoffset < 0)
7124 data->intoffset = bitpos;
7125 }
7126 }
7127
7128 /* A subroutine of function_arg_record_value. Assign the bits of the
7129 structure between PARMS->intoffset and BITPOS to integer registers. */
7130
7131 static void
7132 assign_int_registers (int bitpos, assign_data_t *data)
7133 {
7134 int intoffset = data->intoffset;
7135 machine_mode mode;
7136 int nregs;
7137
7138 if (!compute_int_layout (bitpos, data, &nregs))
7139 return;
7140
7141 /* If this is the trailing part of a word, only load that much into
7142 the register. Otherwise load the whole register. Note that in
7143 the latter case we may pick up unwanted bits. It's not a problem
7144 at the moment but may wish to revisit. */
7145 if (intoffset % BITS_PER_WORD != 0)
7146 mode = smallest_int_mode_for_size (BITS_PER_WORD
7147 - intoffset % BITS_PER_WORD);
7148 else
7149 mode = word_mode;
7150
7151 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7152 unsigned int regno = data->regbase + this_slotno;
7153 intoffset /= BITS_PER_UNIT;
7154
7155 do
7156 {
7157 rtx reg = gen_rtx_REG (mode, regno);
7158 XVECEXP (data->ret, 0, data->stack + data->nregs)
7159 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
7160 data->nregs += 1;
7161 mode = word_mode;
7162 regno += 1;
7163 intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
7164 }
7165 while (--nregs > 0);
7166 }
7167
7168 /* A subroutine of function_arg_record_value. Assign FIELD at position
7169 BITPOS to FP registers. */
7170
7171 static void
7172 assign_fp_registers (const_tree field, int bitpos, assign_data_t *data)
7173 {
7174 int nregs;
7175 machine_mode mode;
7176
7177 if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
7178 return;
7179
7180 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7181 int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
7182 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
7183 regno++;
7184 int pos = bitpos / BITS_PER_UNIT;
7185
7186 do
7187 {
7188 rtx reg = gen_rtx_REG (mode, regno);
7189 XVECEXP (data->ret, 0, data->stack + data->nregs)
7190 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
7191 data->nregs += 1;
7192 regno += GET_MODE_SIZE (mode) / 4;
7193 pos += GET_MODE_SIZE (mode);
7194 }
7195 while (--nregs > 0);
7196 }
7197
7198 /* A subroutine of function_arg_record_value. Assign FIELD and the bits of
7199 the structure between PARMS->intoffset and BITPOS to registers. */
7200
7201 inline void
7202 assign_registers (const_tree field, int bitpos, bool fp, assign_data_t *data)
7203 {
7204 if (fp)
7205 {
7206 assign_int_registers (bitpos, data);
7207
7208 assign_fp_registers (field, bitpos, data);
7209 }
7210 else
7211 {
7212 if (data->intoffset < 0)
7213 data->intoffset = bitpos;
7214 }
7215 }
7216
7217 /* Used by function_arg and function_value to implement the complex
7218 conventions of the 64-bit ABI for passing and returning structures.
7219 Return an expression valid as a return value for the FUNCTION_ARG
7220 and TARGET_FUNCTION_VALUE.
7221
7222 TYPE is the data type of the argument (as a tree).
7223 This is null for libcalls where that information may
7224 not be available.
7225 MODE is the argument's machine mode.
7226 SLOTNO is the index number of the argument's slot in the parameter array.
7227 NAMED is true if this argument is a named parameter
7228 (otherwise it is an extra parameter matching an ellipsis).
7229 REGBASE is the regno of the base register for the parameter array. */
7230
7231 static rtx
7232 function_arg_record_value (const_tree type, machine_mode mode,
7233 int slotno, bool named, int regbase)
7234 {
7235 const int size = int_size_in_bytes (type);
7236 assign_data_t data;
7237 int nregs;
7238
7239 data.slotno = slotno;
7240 data.regbase = regbase;
7241
7242 /* Count how many registers we need. */
7243 data.nregs = 0;
7244 data.intoffset = 0;
7245 data.stack = false;
7246 traverse_record_type<assign_data_t, count_registers> (type, named, &data);
7247
7248 /* Take into account pending integer fields. */
7249 if (compute_int_layout (size * BITS_PER_UNIT, &data, &nregs))
7250 data.nregs += nregs;
7251
7252 /* Allocate the vector and handle some annoying special cases. */
7253 nregs = data.nregs;
7254
7255 if (nregs == 0)
7256 {
7257 /* ??? Empty structure has no value? Duh? */
7258 if (size <= 0)
7259 {
7260 /* Though there's nothing really to store, return a word register
7261 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
7262 leads to breakage due to the fact that there are zero bytes to
7263 load. */
7264 return gen_rtx_REG (mode, regbase);
7265 }
7266
7267 /* ??? C++ has structures with no fields, and yet a size. Give up
7268 for now and pass everything back in integer registers. */
7269 nregs = CEIL_NWORDS (size);
7270 if (nregs + slotno > SPARC_INT_ARG_MAX)
7271 nregs = SPARC_INT_ARG_MAX - slotno;
7272 }
7273
7274 gcc_assert (nregs > 0);
7275
7276 data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
7277
7278 /* If at least one field must be passed on the stack, generate
7279 (parallel [(expr_list (nil) ...) ...]) so that all fields will
7280 also be passed on the stack. We can't do much better because the
7281 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
7282 of structures for which the fields passed exclusively in registers
7283 are not at the beginning of the structure. */
7284 if (data.stack)
7285 XVECEXP (data.ret, 0, 0)
7286 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7287
7288 /* Assign the registers. */
7289 data.nregs = 0;
7290 data.intoffset = 0;
7291 traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
7292
7293 /* Assign pending integer fields. */
7294 assign_int_registers (size * BITS_PER_UNIT, &data);
7295
7296 gcc_assert (data.nregs == nregs);
7297
7298 return data.ret;
7299 }
7300
7301 /* Used by function_arg and function_value to implement the conventions
7302 of the 64-bit ABI for passing and returning unions.
7303 Return an expression valid as a return value for the FUNCTION_ARG
7304 and TARGET_FUNCTION_VALUE.
7305
7306 SIZE is the size in bytes of the union.
7307 MODE is the argument's machine mode.
7308 SLOTNO is the index number of the argument's slot in the parameter array.
7309 REGNO is the hard register the union will be passed in. */
7310
7311 static rtx
7312 function_arg_union_value (int size, machine_mode mode, int slotno, int regno)
7313 {
7314 unsigned int nwords;
7315
7316 /* See comment in function_arg_record_value for empty structures. */
7317 if (size <= 0)
7318 return gen_rtx_REG (mode, regno);
7319
7320 if (slotno == SPARC_INT_ARG_MAX - 1)
7321 nwords = 1;
7322 else
7323 nwords = CEIL_NWORDS (size);
7324
7325 rtx regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
7326
7327 /* Unions are passed left-justified. */
7328 for (unsigned int i = 0; i < nwords; i++)
7329 XVECEXP (regs, 0, i)
7330 = gen_rtx_EXPR_LIST (VOIDmode,
7331 gen_rtx_REG (word_mode, regno + i),
7332 GEN_INT (UNITS_PER_WORD * i));
7333
7334 return regs;
7335 }
7336
7337 /* Used by function_arg and function_value to implement the conventions
7338 of the 64-bit ABI for passing and returning BLKmode vectors.
7339 Return an expression valid as a return value for the FUNCTION_ARG
7340 and TARGET_FUNCTION_VALUE.
7341
7342 SIZE is the size in bytes of the vector.
7343 SLOTNO is the index number of the argument's slot in the parameter array.
7344 NAMED is true if this argument is a named parameter
7345 (otherwise it is an extra parameter matching an ellipsis).
7346 REGNO is the hard register the vector will be passed in. */
7347
7348 static rtx
7349 function_arg_vector_value (int size, int slotno, bool named, int regno)
7350 {
7351 const int mult = (named ? 2 : 1);
7352 unsigned int nwords;
7353
7354 if (slotno == (named ? SPARC_FP_ARG_MAX : SPARC_INT_ARG_MAX) - 1)
7355 nwords = 1;
7356 else
7357 nwords = CEIL_NWORDS (size);
7358
7359 rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nwords));
7360
7361 if (size < UNITS_PER_WORD)
7362 XVECEXP (regs, 0, 0)
7363 = gen_rtx_EXPR_LIST (VOIDmode,
7364 gen_rtx_REG (SImode, regno),
7365 const0_rtx);
7366 else
7367 for (unsigned int i = 0; i < nwords; i++)
7368 XVECEXP (regs, 0, i)
7369 = gen_rtx_EXPR_LIST (VOIDmode,
7370 gen_rtx_REG (word_mode, regno + i * mult),
7371 GEN_INT (i * UNITS_PER_WORD));
7372
7373 return regs;
7374 }
7375
7376 /* Determine where to put an argument to a function.
7377 Value is zero to push the argument on the stack,
7378 or a hard register in which to store the argument.
7379
7380 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7381 the preceding args and about the function being called.
7382 MODE is the argument's machine mode.
7383 TYPE is the data type of the argument (as a tree).
7384 This is null for libcalls where that information may
7385 not be available.
7386 NAMED is true if this argument is a named parameter
7387 (otherwise it is an extra parameter matching an ellipsis).
7388 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
7389 TARGET_FUNCTION_INCOMING_ARG. */
7390
7391 static rtx
7392 sparc_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
7393 const_tree type, bool named, bool incoming)
7394 {
7395 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7396 const int regbase
7397 = incoming ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
7398 int slotno, regno, padding;
7399 enum mode_class mclass = GET_MODE_CLASS (mode);
7400
7401 slotno
7402 = function_arg_slotno (cum, mode, type, named, incoming, &regno, &padding);
7403 if (slotno == -1)
7404 return 0;
7405
7406 /* Integer vectors are handled like floats as per the Sun VIS SDK. */
7407 if (type && VECTOR_INTEGER_TYPE_P (type))
7408 mclass = MODE_FLOAT;
7409
7410 if (TARGET_ARCH32)
7411 return gen_rtx_REG (mode, regno);
7412
7413 /* Structures up to 16 bytes in size are passed in arg slots on the stack
7414 and are promoted to registers if possible. */
7415 if (type && TREE_CODE (type) == RECORD_TYPE)
7416 {
7417 const int size = int_size_in_bytes (type);
7418 gcc_assert (size <= 16);
7419
7420 return function_arg_record_value (type, mode, slotno, named, regbase);
7421 }
7422
7423 /* Unions up to 16 bytes in size are passed in integer registers. */
7424 else if (type && TREE_CODE (type) == UNION_TYPE)
7425 {
7426 const int size = int_size_in_bytes (type);
7427 gcc_assert (size <= 16);
7428
7429 return function_arg_union_value (size, mode, slotno, regno);
7430 }
7431
7432 /* Floating-point vectors up to 16 bytes are passed in registers. */
7433 else if (type && VECTOR_TYPE_P (type) && mode == BLKmode)
7434 {
7435 const int size = int_size_in_bytes (type);
7436 gcc_assert (size <= 16);
7437
7438 return function_arg_vector_value (size, slotno, named, regno);
7439 }
7440
7441 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
7442 but also have the slot allocated for them.
7443 If no prototype is in scope fp values in register slots get passed
7444 in two places, either fp regs and int regs or fp regs and memory. */
7445 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7446 && SPARC_FP_REG_P (regno))
7447 {
7448 rtx reg = gen_rtx_REG (mode, regno);
7449 if (cum->prototype_p || cum->libcall_p)
7450 return reg;
7451 else
7452 {
7453 rtx v0, v1;
7454
7455 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
7456 {
7457 int intreg;
7458
7459 /* On incoming, we don't need to know that the value
7460 is passed in %f0 and %i0, and it confuses other parts
7461 causing needless spillage even on the simplest cases. */
7462 if (incoming)
7463 return reg;
7464
7465 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
7466 + (regno - SPARC_FP_ARG_FIRST) / 2);
7467
7468 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7469 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
7470 const0_rtx);
7471 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7472 }
7473 else
7474 {
7475 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7476 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7477 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7478 }
7479 }
7480 }
7481
7482 /* All other aggregate types are passed in an integer register in a mode
7483 corresponding to the size of the type. */
7484 else if (type && AGGREGATE_TYPE_P (type))
7485 {
7486 const int size = int_size_in_bytes (type);
7487 gcc_assert (size <= 16);
7488
7489 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7490 }
7491
7492 return gen_rtx_REG (mode, regno);
7493 }
7494
7495 /* Handle the TARGET_FUNCTION_ARG target hook. */
7496
7497 static rtx
7498 sparc_function_arg (cumulative_args_t cum, machine_mode mode,
7499 const_tree type, bool named)
7500 {
7501 return sparc_function_arg_1 (cum, mode, type, named, false);
7502 }
7503
7504 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
7505
7506 static rtx
7507 sparc_function_incoming_arg (cumulative_args_t cum, machine_mode mode,
7508 const_tree type, bool named)
7509 {
7510 return sparc_function_arg_1 (cum, mode, type, named, true);
7511 }
7512
7513 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7514
7515 static unsigned int
7516 sparc_function_arg_boundary (machine_mode mode, const_tree type)
7517 {
7518 return ((TARGET_ARCH64
7519 && (GET_MODE_ALIGNMENT (mode) == 128
7520 || (type && TYPE_ALIGN (type) == 128)))
7521 ? 128
7522 : PARM_BOUNDARY);
7523 }
7524
7525 /* For an arg passed partly in registers and partly in memory,
7526 this is the number of bytes of registers used.
7527 For args passed entirely in registers or entirely in memory, zero.
7528
7529 Any arg that starts in the first 6 regs but won't entirely fit in them
7530 needs partial registers on v8. On v9, structures with integer
7531 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7532 values that begin in the last fp reg [where "last fp reg" varies with the
7533 mode] will be split between that reg and memory. */
7534
7535 static int
7536 sparc_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
7537 tree type, bool named)
7538 {
7539 int slotno, regno, padding;
7540
7541 /* We pass false for incoming here, it doesn't matter. */
7542 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
7543 false, &regno, &padding);
7544
7545 if (slotno == -1)
7546 return 0;
7547
7548 if (TARGET_ARCH32)
7549 {
7550 /* We are guaranteed by pass_by_reference that the size of the
7551 argument is not greater than 8 bytes, so we only need to return
7552 one word if the argument is partially passed in registers. */
7553 const int size = GET_MODE_SIZE (mode);
7554
7555 if (size > UNITS_PER_WORD && slotno == SPARC_INT_ARG_MAX - 1)
7556 return UNITS_PER_WORD;
7557 }
7558 else
7559 {
7560 /* We are guaranteed by pass_by_reference that the size of the
7561 argument is not greater than 16 bytes, so we only need to return
7562 one word if the argument is partially passed in registers. */
7563 if (type && AGGREGATE_TYPE_P (type))
7564 {
7565 const int size = int_size_in_bytes (type);
7566
7567 if (size > UNITS_PER_WORD
7568 && (slotno == SPARC_INT_ARG_MAX - 1
7569 || slotno == SPARC_FP_ARG_MAX - 1))
7570 return UNITS_PER_WORD;
7571 }
7572 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
7573 || ((GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7574 || (type && VECTOR_TYPE_P (type)))
7575 && !(TARGET_FPU && named)))
7576 {
7577 const int size = (type && VECTOR_FLOAT_TYPE_P (type))
7578 ? int_size_in_bytes (type)
7579 : GET_MODE_SIZE (mode);
7580
7581 if (size > UNITS_PER_WORD && slotno == SPARC_INT_ARG_MAX - 1)
7582 return UNITS_PER_WORD;
7583 }
7584 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7585 || (type && VECTOR_TYPE_P (type)))
7586 {
7587 const int size = (type && VECTOR_FLOAT_TYPE_P (type))
7588 ? int_size_in_bytes (type)
7589 : GET_MODE_SIZE (mode);
7590
7591 if (size > UNITS_PER_WORD && slotno == SPARC_FP_ARG_MAX - 1)
7592 return UNITS_PER_WORD;
7593 }
7594 }
7595
7596 return 0;
7597 }
7598
7599 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7600 Update the data in CUM to advance over an argument
7601 of mode MODE and data type TYPE.
7602 TYPE is null for libcalls where that information may not be available. */
7603
7604 static void
7605 sparc_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
7606 const_tree type, bool named)
7607 {
7608 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7609 int regno, padding;
7610
7611 /* We pass false for incoming here, it doesn't matter. */
7612 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
7613
7614 /* If argument requires leading padding, add it. */
7615 cum->words += padding;
7616
7617 if (TARGET_ARCH32)
7618 cum->words += CEIL_NWORDS (GET_MODE_SIZE (mode));
7619 else
7620 {
7621 /* For types that can have BLKmode, get the size from the type. */
7622 if (type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
7623 {
7624 const int size = int_size_in_bytes (type);
7625
7626 /* See comment in function_arg_record_value for empty structures. */
7627 if (size <= 0)
7628 cum->words++;
7629 else
7630 cum->words += CEIL_NWORDS (size);
7631 }
7632 else
7633 cum->words += CEIL_NWORDS (GET_MODE_SIZE (mode));
7634 }
7635 }
7636
7637 /* Implement TARGET_FUNCTION_ARG_PADDING. For the 64-bit ABI structs
7638 are always stored left shifted in their argument slot. */
7639
7640 static pad_direction
7641 sparc_function_arg_padding (machine_mode mode, const_tree type)
7642 {
7643 if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
7644 return PAD_UPWARD;
7645
7646 /* Fall back to the default. */
7647 return default_function_arg_padding (mode, type);
7648 }
7649
7650 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7651 Specify whether to return the return value in memory. */
7652
7653 static bool
7654 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7655 {
7656 if (TARGET_ARCH32)
7657 /* Original SPARC 32-bit ABI says that structures and unions, and
7658 quad-precision floats are returned in memory. But note that the
7659 first part is implemented through -fpcc-struct-return being the
7660 default, so here we only implement -freg-struct-return instead.
7661 All other base types are returned in registers.
7662
7663 Extended ABI (as implemented by the Sun compiler) says that
7664 all complex floats are returned in registers (8 FP registers
7665 at most for '_Complex long double'). Return all complex integers
7666 in registers (4 at most for '_Complex long long').
7667
7668 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7669 integers are returned like floats of the same size, that is in
7670 registers up to 8 bytes and in memory otherwise. Return all
7671 vector floats in memory like structure and unions; note that
7672 they always have BLKmode like the latter. */
7673 return (TYPE_MODE (type) == BLKmode
7674 || TYPE_MODE (type) == TFmode
7675 || (TREE_CODE (type) == VECTOR_TYPE
7676 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7677 else
7678 /* Original SPARC 64-bit ABI says that structures and unions
7679 smaller than 32 bytes are returned in registers, as well as
7680 all other base types.
7681
7682 Extended ABI (as implemented by the Sun compiler) says that all
7683 complex floats are returned in registers (8 FP registers at most
7684 for '_Complex long double'). Return all complex integers in
7685 registers (4 at most for '_Complex TItype').
7686
7687 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7688 integers are returned like floats of the same size, that is in
7689 registers. Return all vector floats like structure and unions;
7690 note that they always have BLKmode like the latter. */
7691 return (TYPE_MODE (type) == BLKmode
7692 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7693 }
7694
7695 /* Handle the TARGET_STRUCT_VALUE target hook.
7696 Return where to find the structure return value address. */
7697
7698 static rtx
7699 sparc_struct_value_rtx (tree fndecl, int incoming)
7700 {
7701 if (TARGET_ARCH64)
7702 return NULL_RTX;
7703 else
7704 {
7705 rtx mem;
7706
7707 if (incoming)
7708 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7709 STRUCT_VALUE_OFFSET));
7710 else
7711 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7712 STRUCT_VALUE_OFFSET));
7713
7714 /* Only follow the SPARC ABI for fixed-size structure returns.
7715 Variable size structure returns are handled per the normal
7716 procedures in GCC. This is enabled by -mstd-struct-return */
7717 if (incoming == 2
7718 && sparc_std_struct_return
7719 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7720 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7721 {
7722 /* We must check and adjust the return address, as it is optional
7723 as to whether the return object is really provided. */
7724 rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
7725 rtx scratch = gen_reg_rtx (SImode);
7726 rtx_code_label *endlab = gen_label_rtx ();
7727
7728 /* Calculate the return object size. */
7729 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7730 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7731 /* Construct a temporary return value. */
7732 rtx temp_val
7733 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7734
7735 /* Implement SPARC 32-bit psABI callee return struct checking:
7736
7737 Fetch the instruction where we will return to and see if
7738 it's an unimp instruction (the most significant 10 bits
7739 will be zero). */
7740 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7741 plus_constant (Pmode,
7742 ret_reg, 8)));
7743 /* Assume the size is valid and pre-adjust. */
7744 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7745 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7746 0, endlab);
7747 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7748 /* Write the address of the memory pointed to by temp_val into
7749 the memory pointed to by mem. */
7750 emit_move_insn (mem, XEXP (temp_val, 0));
7751 emit_label (endlab);
7752 }
7753
7754 return mem;
7755 }
7756 }
7757
7758 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7759 For v9, function return values are subject to the same rules as arguments,
7760 except that up to 32 bytes may be returned in registers. */
7761
7762 static rtx
7763 sparc_function_value_1 (const_tree type, machine_mode mode, bool outgoing)
7764 {
7765 /* Beware that the two values are swapped here wrt function_arg. */
7766 const int regbase
7767 = outgoing ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
7768 enum mode_class mclass = GET_MODE_CLASS (mode);
7769 int regno;
7770
7771 /* Integer vectors are handled like floats as per the Sun VIS SDK.
7772 Note that integer vectors larger than 16 bytes have BLKmode so
7773 they need to be handled like floating-point vectors below. */
7774 if (type && VECTOR_INTEGER_TYPE_P (type) && mode != BLKmode)
7775 mclass = MODE_FLOAT;
7776
7777 if (TARGET_ARCH64 && type)
7778 {
7779 /* Structures up to 32 bytes in size are returned in registers. */
7780 if (TREE_CODE (type) == RECORD_TYPE)
7781 {
7782 const int size = int_size_in_bytes (type);
7783 gcc_assert (size <= 32);
7784
7785 return function_arg_record_value (type, mode, 0, true, regbase);
7786 }
7787
7788 /* Unions up to 32 bytes in size are returned in integer registers. */
7789 else if (TREE_CODE (type) == UNION_TYPE)
7790 {
7791 const int size = int_size_in_bytes (type);
7792 gcc_assert (size <= 32);
7793
7794 return function_arg_union_value (size, mode, 0, regbase);
7795 }
7796
7797 /* Vectors up to 32 bytes are returned in FP registers. */
7798 else if (VECTOR_TYPE_P (type) && mode == BLKmode)
7799 {
7800 const int size = int_size_in_bytes (type);
7801 gcc_assert (size <= 32);
7802
7803 return function_arg_vector_value (size, 0, true, SPARC_FP_ARG_FIRST);
7804 }
7805
7806 /* Objects that require it are returned in FP registers. */
7807 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7808 ;
7809
7810 /* All other aggregate types are returned in an integer register in a
7811 mode corresponding to the size of the type. */
7812 else if (AGGREGATE_TYPE_P (type))
7813 {
7814 /* All other aggregate types are passed in an integer register
7815 in a mode corresponding to the size of the type. */
7816 const int size = int_size_in_bytes (type);
7817 gcc_assert (size <= 32);
7818
7819 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7820
7821 /* ??? We probably should have made the same ABI change in
7822 3.4.0 as the one we made for unions. The latter was
7823 required by the SCD though, while the former is not
7824 specified, so we favored compatibility and efficiency.
7825
7826 Now we're stuck for aggregates larger than 16 bytes,
7827 because OImode vanished in the meantime. Let's not
7828 try to be unduly clever, and simply follow the ABI
7829 for unions in that case. */
7830 if (mode == BLKmode)
7831 return function_arg_union_value (size, mode, 0, regbase);
7832 else
7833 mclass = MODE_INT;
7834 }
7835
7836 /* We should only have pointer and integer types at this point. This
7837 must match sparc_promote_function_mode. */
7838 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7839 mode = word_mode;
7840 }
7841
7842 /* We should only have pointer and integer types at this point, except with
7843 -freg-struct-return. This must match sparc_promote_function_mode. */
7844 else if (TARGET_ARCH32
7845 && !(type && AGGREGATE_TYPE_P (type))
7846 && mclass == MODE_INT
7847 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7848 mode = word_mode;
7849
7850 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7851 regno = SPARC_FP_ARG_FIRST;
7852 else
7853 regno = regbase;
7854
7855 return gen_rtx_REG (mode, regno);
7856 }
7857
7858 /* Handle TARGET_FUNCTION_VALUE.
7859 On the SPARC, the value is found in the first "output" register, but the
7860 called function leaves it in the first "input" register. */
7861
7862 static rtx
7863 sparc_function_value (const_tree valtype,
7864 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7865 bool outgoing)
7866 {
7867 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7868 }
7869
7870 /* Handle TARGET_LIBCALL_VALUE. */
7871
7872 static rtx
7873 sparc_libcall_value (machine_mode mode,
7874 const_rtx fun ATTRIBUTE_UNUSED)
7875 {
7876 return sparc_function_value_1 (NULL_TREE, mode, false);
7877 }
7878
7879 /* Handle FUNCTION_VALUE_REGNO_P.
7880 On the SPARC, the first "output" reg is used for integer values, and the
7881 first floating point register is used for floating point values. */
7882
7883 static bool
7884 sparc_function_value_regno_p (const unsigned int regno)
7885 {
7886 return (regno == 8 || (TARGET_FPU && regno == 32));
7887 }
7888
7889 /* Do what is necessary for `va_start'. We look at the current function
7890 to determine if stdarg or varargs is used and return the address of
7891 the first unnamed parameter. */
7892
7893 static rtx
7894 sparc_builtin_saveregs (void)
7895 {
7896 int first_reg = crtl->args.info.words;
7897 rtx address;
7898 int regno;
7899
7900 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7901 emit_move_insn (gen_rtx_MEM (word_mode,
7902 gen_rtx_PLUS (Pmode,
7903 frame_pointer_rtx,
7904 GEN_INT (FIRST_PARM_OFFSET (0)
7905 + (UNITS_PER_WORD
7906 * regno)))),
7907 gen_rtx_REG (word_mode,
7908 SPARC_INCOMING_INT_ARG_FIRST + regno));
7909
7910 address = gen_rtx_PLUS (Pmode,
7911 frame_pointer_rtx,
7912 GEN_INT (FIRST_PARM_OFFSET (0)
7913 + UNITS_PER_WORD * first_reg));
7914
7915 return address;
7916 }
7917
7918 /* Implement `va_start' for stdarg. */
7919
7920 static void
7921 sparc_va_start (tree valist, rtx nextarg)
7922 {
7923 nextarg = expand_builtin_saveregs ();
7924 std_expand_builtin_va_start (valist, nextarg);
7925 }
7926
7927 /* Implement `va_arg' for stdarg. */
7928
7929 static tree
7930 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7931 gimple_seq *post_p)
7932 {
7933 HOST_WIDE_INT size, rsize, align;
7934 tree addr, incr;
7935 bool indirect;
7936 tree ptrtype = build_pointer_type (type);
7937
7938 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
7939 {
7940 indirect = true;
7941 size = rsize = UNITS_PER_WORD;
7942 align = 0;
7943 }
7944 else
7945 {
7946 indirect = false;
7947 size = int_size_in_bytes (type);
7948 rsize = ROUND_UP (size, UNITS_PER_WORD);
7949 align = 0;
7950
7951 if (TARGET_ARCH64)
7952 {
7953 /* For SPARC64, objects requiring 16-byte alignment get it. */
7954 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7955 align = 2 * UNITS_PER_WORD;
7956
7957 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7958 are left-justified in their slots. */
7959 if (AGGREGATE_TYPE_P (type))
7960 {
7961 if (size == 0)
7962 size = rsize = UNITS_PER_WORD;
7963 else
7964 size = rsize;
7965 }
7966 }
7967 }
7968
7969 incr = valist;
7970 if (align)
7971 {
7972 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7973 incr = fold_convert (sizetype, incr);
7974 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7975 size_int (-align));
7976 incr = fold_convert (ptr_type_node, incr);
7977 }
7978
7979 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7980 addr = incr;
7981
7982 if (BYTES_BIG_ENDIAN && size < rsize)
7983 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7984
7985 if (indirect)
7986 {
7987 addr = fold_convert (build_pointer_type (ptrtype), addr);
7988 addr = build_va_arg_indirect_ref (addr);
7989 }
7990
7991 /* If the address isn't aligned properly for the type, we need a temporary.
7992 FIXME: This is inefficient, usually we can do this in registers. */
7993 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7994 {
7995 tree tmp = create_tmp_var (type, "va_arg_tmp");
7996 tree dest_addr = build_fold_addr_expr (tmp);
7997 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7998 3, dest_addr, addr, size_int (rsize));
7999 TREE_ADDRESSABLE (tmp) = 1;
8000 gimplify_and_add (copy, pre_p);
8001 addr = dest_addr;
8002 }
8003
8004 else
8005 addr = fold_convert (ptrtype, addr);
8006
8007 incr = fold_build_pointer_plus_hwi (incr, rsize);
8008 gimplify_assign (valist, incr, post_p);
8009
8010 return build_va_arg_indirect_ref (addr);
8011 }
8012 \f
8013 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
8014 Specify whether the vector mode is supported by the hardware. */
8015
8016 static bool
8017 sparc_vector_mode_supported_p (machine_mode mode)
8018 {
8019 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
8020 }
8021 \f
8022 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
8023
8024 static machine_mode
8025 sparc_preferred_simd_mode (scalar_mode mode)
8026 {
8027 if (TARGET_VIS)
8028 switch (mode)
8029 {
8030 case E_SImode:
8031 return V2SImode;
8032 case E_HImode:
8033 return V4HImode;
8034 case E_QImode:
8035 return V8QImode;
8036
8037 default:;
8038 }
8039
8040 return word_mode;
8041 }
8042 \f
8043 \f/* Implement TARGET_CAN_FOLLOW_JUMP. */
8044
8045 static bool
8046 sparc_can_follow_jump (const rtx_insn *follower, const rtx_insn *followee)
8047 {
8048 /* Do not fold unconditional jumps that have been created for crossing
8049 partition boundaries. */
8050 if (CROSSING_JUMP_P (followee) && !CROSSING_JUMP_P (follower))
8051 return false;
8052
8053 return true;
8054 }
8055
8056 /* Return the string to output an unconditional branch to LABEL, which is
8057 the operand number of the label.
8058
8059 DEST is the destination insn (i.e. the label), INSN is the source. */
8060
8061 const char *
8062 output_ubranch (rtx dest, rtx_insn *insn)
8063 {
8064 static char string[64];
8065 bool v9_form = false;
8066 int delta;
8067 char *p;
8068
8069 /* Even if we are trying to use cbcond for this, evaluate
8070 whether we can use V9 branches as our backup plan. */
8071 delta = 5000000;
8072 if (!CROSSING_JUMP_P (insn) && INSN_ADDRESSES_SET_P ())
8073 delta = (INSN_ADDRESSES (INSN_UID (dest))
8074 - INSN_ADDRESSES (INSN_UID (insn)));
8075
8076 /* Leave some instructions for "slop". */
8077 if (TARGET_V9 && delta >= -260000 && delta < 260000)
8078 v9_form = true;
8079
8080 if (TARGET_CBCOND)
8081 {
8082 bool emit_nop = emit_cbcond_nop (insn);
8083 bool far = false;
8084 const char *rval;
8085
8086 if (delta < -500 || delta > 500)
8087 far = true;
8088
8089 if (far)
8090 {
8091 if (v9_form)
8092 rval = "ba,a,pt\t%%xcc, %l0";
8093 else
8094 rval = "b,a\t%l0";
8095 }
8096 else
8097 {
8098 if (emit_nop)
8099 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
8100 else
8101 rval = "cwbe\t%%g0, %%g0, %l0";
8102 }
8103 return rval;
8104 }
8105
8106 if (v9_form)
8107 strcpy (string, "ba%*,pt\t%%xcc, ");
8108 else
8109 strcpy (string, "b%*\t");
8110
8111 p = strchr (string, '\0');
8112 *p++ = '%';
8113 *p++ = 'l';
8114 *p++ = '0';
8115 *p++ = '%';
8116 *p++ = '(';
8117 *p = '\0';
8118
8119 return string;
8120 }
8121
8122 /* Return the string to output a conditional branch to LABEL, which is
8123 the operand number of the label. OP is the conditional expression.
8124 XEXP (OP, 0) is assumed to be a condition code register (integer or
8125 floating point) and its mode specifies what kind of comparison we made.
8126
8127 DEST is the destination insn (i.e. the label), INSN is the source.
8128
8129 REVERSED is nonzero if we should reverse the sense of the comparison.
8130
8131 ANNUL is nonzero if we should generate an annulling branch. */
8132
8133 const char *
8134 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
8135 rtx_insn *insn)
8136 {
8137 static char string[64];
8138 enum rtx_code code = GET_CODE (op);
8139 rtx cc_reg = XEXP (op, 0);
8140 machine_mode mode = GET_MODE (cc_reg);
8141 const char *labelno, *branch;
8142 int spaces = 8, far;
8143 char *p;
8144
8145 /* v9 branches are limited to +-1MB. If it is too far away,
8146 change
8147
8148 bne,pt %xcc, .LC30
8149
8150 to
8151
8152 be,pn %xcc, .+12
8153 nop
8154 ba .LC30
8155
8156 and
8157
8158 fbne,a,pn %fcc2, .LC29
8159
8160 to
8161
8162 fbe,pt %fcc2, .+16
8163 nop
8164 ba .LC29 */
8165
8166 far = TARGET_V9 && (get_attr_length (insn) >= 3);
8167 if (reversed ^ far)
8168 {
8169 /* Reversal of FP compares takes care -- an ordered compare
8170 becomes an unordered compare and vice versa. */
8171 if (mode == CCFPmode || mode == CCFPEmode)
8172 code = reverse_condition_maybe_unordered (code);
8173 else
8174 code = reverse_condition (code);
8175 }
8176
8177 /* Start by writing the branch condition. */
8178 if (mode == CCFPmode || mode == CCFPEmode)
8179 {
8180 switch (code)
8181 {
8182 case NE:
8183 branch = "fbne";
8184 break;
8185 case EQ:
8186 branch = "fbe";
8187 break;
8188 case GE:
8189 branch = "fbge";
8190 break;
8191 case GT:
8192 branch = "fbg";
8193 break;
8194 case LE:
8195 branch = "fble";
8196 break;
8197 case LT:
8198 branch = "fbl";
8199 break;
8200 case UNORDERED:
8201 branch = "fbu";
8202 break;
8203 case ORDERED:
8204 branch = "fbo";
8205 break;
8206 case UNGT:
8207 branch = "fbug";
8208 break;
8209 case UNLT:
8210 branch = "fbul";
8211 break;
8212 case UNEQ:
8213 branch = "fbue";
8214 break;
8215 case UNGE:
8216 branch = "fbuge";
8217 break;
8218 case UNLE:
8219 branch = "fbule";
8220 break;
8221 case LTGT:
8222 branch = "fblg";
8223 break;
8224 default:
8225 gcc_unreachable ();
8226 }
8227
8228 /* ??? !v9: FP branches cannot be preceded by another floating point
8229 insn. Because there is currently no concept of pre-delay slots,
8230 we can fix this only by always emitting a nop before a floating
8231 point branch. */
8232
8233 string[0] = '\0';
8234 if (! TARGET_V9)
8235 strcpy (string, "nop\n\t");
8236 strcat (string, branch);
8237 }
8238 else
8239 {
8240 switch (code)
8241 {
8242 case NE:
8243 if (mode == CCVmode || mode == CCXVmode)
8244 branch = "bvs";
8245 else
8246 branch = "bne";
8247 break;
8248 case EQ:
8249 if (mode == CCVmode || mode == CCXVmode)
8250 branch = "bvc";
8251 else
8252 branch = "be";
8253 break;
8254 case GE:
8255 if (mode == CCNZmode || mode == CCXNZmode)
8256 branch = "bpos";
8257 else
8258 branch = "bge";
8259 break;
8260 case GT:
8261 branch = "bg";
8262 break;
8263 case LE:
8264 branch = "ble";
8265 break;
8266 case LT:
8267 if (mode == CCNZmode || mode == CCXNZmode)
8268 branch = "bneg";
8269 else
8270 branch = "bl";
8271 break;
8272 case GEU:
8273 branch = "bgeu";
8274 break;
8275 case GTU:
8276 branch = "bgu";
8277 break;
8278 case LEU:
8279 branch = "bleu";
8280 break;
8281 case LTU:
8282 branch = "blu";
8283 break;
8284 default:
8285 gcc_unreachable ();
8286 }
8287 strcpy (string, branch);
8288 }
8289 spaces -= strlen (branch);
8290 p = strchr (string, '\0');
8291
8292 /* Now add the annulling, the label, and a possible noop. */
8293 if (annul && ! far)
8294 {
8295 strcpy (p, ",a");
8296 p += 2;
8297 spaces -= 2;
8298 }
8299
8300 if (TARGET_V9)
8301 {
8302 rtx note;
8303 int v8 = 0;
8304
8305 if (! far && insn && INSN_ADDRESSES_SET_P ())
8306 {
8307 int delta = (INSN_ADDRESSES (INSN_UID (dest))
8308 - INSN_ADDRESSES (INSN_UID (insn)));
8309 /* Leave some instructions for "slop". */
8310 if (delta < -260000 || delta >= 260000)
8311 v8 = 1;
8312 }
8313
8314 switch (mode)
8315 {
8316 case E_CCmode:
8317 case E_CCNZmode:
8318 case E_CCCmode:
8319 case E_CCVmode:
8320 labelno = "%%icc, ";
8321 if (v8)
8322 labelno = "";
8323 break;
8324 case E_CCXmode:
8325 case E_CCXNZmode:
8326 case E_CCXCmode:
8327 case E_CCXVmode:
8328 labelno = "%%xcc, ";
8329 gcc_assert (!v8);
8330 break;
8331 case E_CCFPmode:
8332 case E_CCFPEmode:
8333 {
8334 static char v9_fcc_labelno[] = "%%fccX, ";
8335 /* Set the char indicating the number of the fcc reg to use. */
8336 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
8337 labelno = v9_fcc_labelno;
8338 if (v8)
8339 {
8340 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
8341 labelno = "";
8342 }
8343 }
8344 break;
8345 default:
8346 gcc_unreachable ();
8347 }
8348
8349 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8350 {
8351 strcpy (p,
8352 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8353 >= profile_probability::even ()) ^ far)
8354 ? ",pt" : ",pn");
8355 p += 3;
8356 spaces -= 3;
8357 }
8358 }
8359 else
8360 labelno = "";
8361
8362 if (spaces > 0)
8363 *p++ = '\t';
8364 else
8365 *p++ = ' ';
8366 strcpy (p, labelno);
8367 p = strchr (p, '\0');
8368 if (far)
8369 {
8370 strcpy (p, ".+12\n\t nop\n\tb\t");
8371 /* Skip the next insn if requested or
8372 if we know that it will be a nop. */
8373 if (annul || ! final_sequence)
8374 p[3] = '6';
8375 p += 14;
8376 }
8377 *p++ = '%';
8378 *p++ = 'l';
8379 *p++ = label + '0';
8380 *p++ = '%';
8381 *p++ = '#';
8382 *p = '\0';
8383
8384 return string;
8385 }
8386
8387 /* Emit a library call comparison between floating point X and Y.
8388 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
8389 Return the new operator to be used in the comparison sequence.
8390
8391 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
8392 values as arguments instead of the TFmode registers themselves,
8393 that's why we cannot call emit_float_lib_cmp. */
8394
8395 rtx
8396 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
8397 {
8398 const char *qpfunc;
8399 rtx slot0, slot1, result, tem, tem2, libfunc;
8400 machine_mode mode;
8401 enum rtx_code new_comparison;
8402
8403 switch (comparison)
8404 {
8405 case EQ:
8406 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
8407 break;
8408
8409 case NE:
8410 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
8411 break;
8412
8413 case GT:
8414 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
8415 break;
8416
8417 case GE:
8418 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
8419 break;
8420
8421 case LT:
8422 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
8423 break;
8424
8425 case LE:
8426 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
8427 break;
8428
8429 case ORDERED:
8430 case UNORDERED:
8431 case UNGT:
8432 case UNLT:
8433 case UNEQ:
8434 case UNGE:
8435 case UNLE:
8436 case LTGT:
8437 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
8438 break;
8439
8440 default:
8441 gcc_unreachable ();
8442 }
8443
8444 if (TARGET_ARCH64)
8445 {
8446 if (MEM_P (x))
8447 {
8448 tree expr = MEM_EXPR (x);
8449 if (expr)
8450 mark_addressable (expr);
8451 slot0 = x;
8452 }
8453 else
8454 {
8455 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8456 emit_move_insn (slot0, x);
8457 }
8458
8459 if (MEM_P (y))
8460 {
8461 tree expr = MEM_EXPR (y);
8462 if (expr)
8463 mark_addressable (expr);
8464 slot1 = y;
8465 }
8466 else
8467 {
8468 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8469 emit_move_insn (slot1, y);
8470 }
8471
8472 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8473 emit_library_call (libfunc, LCT_NORMAL,
8474 DImode,
8475 XEXP (slot0, 0), Pmode,
8476 XEXP (slot1, 0), Pmode);
8477 mode = DImode;
8478 }
8479 else
8480 {
8481 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8482 emit_library_call (libfunc, LCT_NORMAL,
8483 SImode,
8484 x, TFmode, y, TFmode);
8485 mode = SImode;
8486 }
8487
8488
8489 /* Immediately move the result of the libcall into a pseudo
8490 register so reload doesn't clobber the value if it needs
8491 the return register for a spill reg. */
8492 result = gen_reg_rtx (mode);
8493 emit_move_insn (result, hard_libcall_value (mode, libfunc));
8494
8495 switch (comparison)
8496 {
8497 default:
8498 return gen_rtx_NE (VOIDmode, result, const0_rtx);
8499 case ORDERED:
8500 case UNORDERED:
8501 new_comparison = (comparison == UNORDERED ? EQ : NE);
8502 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
8503 case UNGT:
8504 case UNGE:
8505 new_comparison = (comparison == UNGT ? GT : NE);
8506 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
8507 case UNLE:
8508 return gen_rtx_NE (VOIDmode, result, const2_rtx);
8509 case UNLT:
8510 tem = gen_reg_rtx (mode);
8511 if (TARGET_ARCH32)
8512 emit_insn (gen_andsi3 (tem, result, const1_rtx));
8513 else
8514 emit_insn (gen_anddi3 (tem, result, const1_rtx));
8515 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
8516 case UNEQ:
8517 case LTGT:
8518 tem = gen_reg_rtx (mode);
8519 if (TARGET_ARCH32)
8520 emit_insn (gen_addsi3 (tem, result, const1_rtx));
8521 else
8522 emit_insn (gen_adddi3 (tem, result, const1_rtx));
8523 tem2 = gen_reg_rtx (mode);
8524 if (TARGET_ARCH32)
8525 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
8526 else
8527 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
8528 new_comparison = (comparison == UNEQ ? EQ : NE);
8529 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
8530 }
8531
8532 gcc_unreachable ();
8533 }
8534
8535 /* Generate an unsigned DImode to FP conversion. This is the same code
8536 optabs would emit if we didn't have TFmode patterns. */
8537
8538 void
8539 sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
8540 {
8541 rtx i0, i1, f0, in, out;
8542
8543 out = operands[0];
8544 in = force_reg (DImode, operands[1]);
8545 rtx_code_label *neglab = gen_label_rtx ();
8546 rtx_code_label *donelab = gen_label_rtx ();
8547 i0 = gen_reg_rtx (DImode);
8548 i1 = gen_reg_rtx (DImode);
8549 f0 = gen_reg_rtx (mode);
8550
8551 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
8552
8553 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
8554 emit_jump_insn (gen_jump (donelab));
8555 emit_barrier ();
8556
8557 emit_label (neglab);
8558
8559 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
8560 emit_insn (gen_anddi3 (i1, in, const1_rtx));
8561 emit_insn (gen_iordi3 (i0, i0, i1));
8562 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
8563 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
8564
8565 emit_label (donelab);
8566 }
8567
8568 /* Generate an FP to unsigned DImode conversion. This is the same code
8569 optabs would emit if we didn't have TFmode patterns. */
8570
8571 void
8572 sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
8573 {
8574 rtx i0, i1, f0, in, out, limit;
8575
8576 out = operands[0];
8577 in = force_reg (mode, operands[1]);
8578 rtx_code_label *neglab = gen_label_rtx ();
8579 rtx_code_label *donelab = gen_label_rtx ();
8580 i0 = gen_reg_rtx (DImode);
8581 i1 = gen_reg_rtx (DImode);
8582 limit = gen_reg_rtx (mode);
8583 f0 = gen_reg_rtx (mode);
8584
8585 emit_move_insn (limit,
8586 const_double_from_real_value (
8587 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
8588 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
8589
8590 emit_insn (gen_rtx_SET (out,
8591 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
8592 emit_jump_insn (gen_jump (donelab));
8593 emit_barrier ();
8594
8595 emit_label (neglab);
8596
8597 emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
8598 emit_insn (gen_rtx_SET (i0,
8599 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
8600 emit_insn (gen_movdi (i1, const1_rtx));
8601 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
8602 emit_insn (gen_xordi3 (out, i0, i1));
8603
8604 emit_label (donelab);
8605 }
8606
8607 /* Return the string to output a compare and branch instruction to DEST.
8608 DEST is the destination insn (i.e. the label), INSN is the source,
8609 and OP is the conditional expression. */
8610
8611 const char *
8612 output_cbcond (rtx op, rtx dest, rtx_insn *insn)
8613 {
8614 machine_mode mode = GET_MODE (XEXP (op, 0));
8615 enum rtx_code code = GET_CODE (op);
8616 const char *cond_str, *tmpl;
8617 int far, emit_nop, len;
8618 static char string[64];
8619 char size_char;
8620
8621 /* Compare and Branch is limited to +-2KB. If it is too far away,
8622 change
8623
8624 cxbne X, Y, .LC30
8625
8626 to
8627
8628 cxbe X, Y, .+16
8629 nop
8630 ba,pt xcc, .LC30
8631 nop */
8632
8633 len = get_attr_length (insn);
8634
8635 far = len == 4;
8636 emit_nop = len == 2;
8637
8638 if (far)
8639 code = reverse_condition (code);
8640
8641 size_char = ((mode == SImode) ? 'w' : 'x');
8642
8643 switch (code)
8644 {
8645 case NE:
8646 cond_str = "ne";
8647 break;
8648
8649 case EQ:
8650 cond_str = "e";
8651 break;
8652
8653 case GE:
8654 cond_str = "ge";
8655 break;
8656
8657 case GT:
8658 cond_str = "g";
8659 break;
8660
8661 case LE:
8662 cond_str = "le";
8663 break;
8664
8665 case LT:
8666 cond_str = "l";
8667 break;
8668
8669 case GEU:
8670 cond_str = "cc";
8671 break;
8672
8673 case GTU:
8674 cond_str = "gu";
8675 break;
8676
8677 case LEU:
8678 cond_str = "leu";
8679 break;
8680
8681 case LTU:
8682 cond_str = "cs";
8683 break;
8684
8685 default:
8686 gcc_unreachable ();
8687 }
8688
8689 if (far)
8690 {
8691 int veryfar = 1, delta;
8692
8693 if (INSN_ADDRESSES_SET_P ())
8694 {
8695 delta = (INSN_ADDRESSES (INSN_UID (dest))
8696 - INSN_ADDRESSES (INSN_UID (insn)));
8697 /* Leave some instructions for "slop". */
8698 if (delta >= -260000 && delta < 260000)
8699 veryfar = 0;
8700 }
8701
8702 if (veryfar)
8703 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8704 else
8705 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8706 }
8707 else
8708 {
8709 if (emit_nop)
8710 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8711 else
8712 tmpl = "c%cb%s\t%%1, %%2, %%3";
8713 }
8714
8715 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8716
8717 return string;
8718 }
8719
8720 /* Return the string to output a conditional branch to LABEL, testing
8721 register REG. LABEL is the operand number of the label; REG is the
8722 operand number of the reg. OP is the conditional expression. The mode
8723 of REG says what kind of comparison we made.
8724
8725 DEST is the destination insn (i.e. the label), INSN is the source.
8726
8727 REVERSED is nonzero if we should reverse the sense of the comparison.
8728
8729 ANNUL is nonzero if we should generate an annulling branch. */
8730
8731 const char *
8732 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8733 int annul, rtx_insn *insn)
8734 {
8735 static char string[64];
8736 enum rtx_code code = GET_CODE (op);
8737 machine_mode mode = GET_MODE (XEXP (op, 0));
8738 rtx note;
8739 int far;
8740 char *p;
8741
8742 /* branch on register are limited to +-128KB. If it is too far away,
8743 change
8744
8745 brnz,pt %g1, .LC30
8746
8747 to
8748
8749 brz,pn %g1, .+12
8750 nop
8751 ba,pt %xcc, .LC30
8752
8753 and
8754
8755 brgez,a,pn %o1, .LC29
8756
8757 to
8758
8759 brlz,pt %o1, .+16
8760 nop
8761 ba,pt %xcc, .LC29 */
8762
8763 far = get_attr_length (insn) >= 3;
8764
8765 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8766 if (reversed ^ far)
8767 code = reverse_condition (code);
8768
8769 /* Only 64-bit versions of these instructions exist. */
8770 gcc_assert (mode == DImode);
8771
8772 /* Start by writing the branch condition. */
8773
8774 switch (code)
8775 {
8776 case NE:
8777 strcpy (string, "brnz");
8778 break;
8779
8780 case EQ:
8781 strcpy (string, "brz");
8782 break;
8783
8784 case GE:
8785 strcpy (string, "brgez");
8786 break;
8787
8788 case LT:
8789 strcpy (string, "brlz");
8790 break;
8791
8792 case LE:
8793 strcpy (string, "brlez");
8794 break;
8795
8796 case GT:
8797 strcpy (string, "brgz");
8798 break;
8799
8800 default:
8801 gcc_unreachable ();
8802 }
8803
8804 p = strchr (string, '\0');
8805
8806 /* Now add the annulling, reg, label, and nop. */
8807 if (annul && ! far)
8808 {
8809 strcpy (p, ",a");
8810 p += 2;
8811 }
8812
8813 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8814 {
8815 strcpy (p,
8816 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8817 >= profile_probability::even ()) ^ far)
8818 ? ",pt" : ",pn");
8819 p += 3;
8820 }
8821
8822 *p = p < string + 8 ? '\t' : ' ';
8823 p++;
8824 *p++ = '%';
8825 *p++ = '0' + reg;
8826 *p++ = ',';
8827 *p++ = ' ';
8828 if (far)
8829 {
8830 int veryfar = 1, delta;
8831
8832 if (INSN_ADDRESSES_SET_P ())
8833 {
8834 delta = (INSN_ADDRESSES (INSN_UID (dest))
8835 - INSN_ADDRESSES (INSN_UID (insn)));
8836 /* Leave some instructions for "slop". */
8837 if (delta >= -260000 && delta < 260000)
8838 veryfar = 0;
8839 }
8840
8841 strcpy (p, ".+12\n\t nop\n\t");
8842 /* Skip the next insn if requested or
8843 if we know that it will be a nop. */
8844 if (annul || ! final_sequence)
8845 p[3] = '6';
8846 p += 12;
8847 if (veryfar)
8848 {
8849 strcpy (p, "b\t");
8850 p += 2;
8851 }
8852 else
8853 {
8854 strcpy (p, "ba,pt\t%%xcc, ");
8855 p += 13;
8856 }
8857 }
8858 *p++ = '%';
8859 *p++ = 'l';
8860 *p++ = '0' + label;
8861 *p++ = '%';
8862 *p++ = '#';
8863 *p = '\0';
8864
8865 return string;
8866 }
8867
8868 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8869 Such instructions cannot be used in the delay slot of return insn on v9.
8870 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8871 */
8872
8873 static int
8874 epilogue_renumber (register rtx *where, int test)
8875 {
8876 register const char *fmt;
8877 register int i;
8878 register enum rtx_code code;
8879
8880 if (*where == 0)
8881 return 0;
8882
8883 code = GET_CODE (*where);
8884
8885 switch (code)
8886 {
8887 case REG:
8888 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8889 return 1;
8890 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8891 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
8892 /* fallthrough */
8893 case SCRATCH:
8894 case CC0:
8895 case PC:
8896 case CONST_INT:
8897 case CONST_WIDE_INT:
8898 case CONST_DOUBLE:
8899 return 0;
8900
8901 /* Do not replace the frame pointer with the stack pointer because
8902 it can cause the delayed instruction to load below the stack.
8903 This occurs when instructions like:
8904
8905 (set (reg/i:SI 24 %i0)
8906 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8907 (const_int -20 [0xffffffec])) 0))
8908
8909 are in the return delayed slot. */
8910 case PLUS:
8911 if (GET_CODE (XEXP (*where, 0)) == REG
8912 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8913 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8914 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8915 return 1;
8916 break;
8917
8918 case MEM:
8919 if (SPARC_STACK_BIAS
8920 && GET_CODE (XEXP (*where, 0)) == REG
8921 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8922 return 1;
8923 break;
8924
8925 default:
8926 break;
8927 }
8928
8929 fmt = GET_RTX_FORMAT (code);
8930
8931 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8932 {
8933 if (fmt[i] == 'E')
8934 {
8935 register int j;
8936 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8937 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8938 return 1;
8939 }
8940 else if (fmt[i] == 'e'
8941 && epilogue_renumber (&(XEXP (*where, i)), test))
8942 return 1;
8943 }
8944 return 0;
8945 }
8946 \f
8947 /* Leaf functions and non-leaf functions have different needs. */
8948
8949 static const int
8950 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8951
8952 static const int
8953 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8954
8955 static const int *const reg_alloc_orders[] = {
8956 reg_leaf_alloc_order,
8957 reg_nonleaf_alloc_order};
8958
8959 void
8960 order_regs_for_local_alloc (void)
8961 {
8962 static int last_order_nonleaf = 1;
8963
8964 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8965 {
8966 last_order_nonleaf = !last_order_nonleaf;
8967 memcpy ((char *) reg_alloc_order,
8968 (const char *) reg_alloc_orders[last_order_nonleaf],
8969 FIRST_PSEUDO_REGISTER * sizeof (int));
8970 }
8971 }
8972 \f
8973 /* Return 1 if REG and MEM are legitimate enough to allow the various
8974 MEM<-->REG splits to be run. */
8975
8976 int
8977 sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
8978 {
8979 /* Punt if we are here by mistake. */
8980 gcc_assert (reload_completed);
8981
8982 /* We must have an offsettable memory reference. */
8983 if (!offsettable_memref_p (mem))
8984 return 0;
8985
8986 /* If we have legitimate args for ldd/std, we do not want
8987 the split to happen. */
8988 if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
8989 return 0;
8990
8991 /* Success. */
8992 return 1;
8993 }
8994
8995 /* Split a REG <-- MEM move into a pair of moves in MODE. */
8996
8997 void
8998 sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
8999 {
9000 rtx high_part = gen_highpart (mode, dest);
9001 rtx low_part = gen_lowpart (mode, dest);
9002 rtx word0 = adjust_address (src, mode, 0);
9003 rtx word1 = adjust_address (src, mode, 4);
9004
9005 if (reg_overlap_mentioned_p (high_part, word1))
9006 {
9007 emit_move_insn_1 (low_part, word1);
9008 emit_move_insn_1 (high_part, word0);
9009 }
9010 else
9011 {
9012 emit_move_insn_1 (high_part, word0);
9013 emit_move_insn_1 (low_part, word1);
9014 }
9015 }
9016
9017 /* Split a MEM <-- REG move into a pair of moves in MODE. */
9018
9019 void
9020 sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
9021 {
9022 rtx word0 = adjust_address (dest, mode, 0);
9023 rtx word1 = adjust_address (dest, mode, 4);
9024 rtx high_part = gen_highpart (mode, src);
9025 rtx low_part = gen_lowpart (mode, src);
9026
9027 emit_move_insn_1 (word0, high_part);
9028 emit_move_insn_1 (word1, low_part);
9029 }
9030
9031 /* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
9032
9033 int
9034 sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
9035 {
9036 /* Punt if we are here by mistake. */
9037 gcc_assert (reload_completed);
9038
9039 if (GET_CODE (reg1) == SUBREG)
9040 reg1 = SUBREG_REG (reg1);
9041 if (GET_CODE (reg1) != REG)
9042 return 0;
9043 const int regno1 = REGNO (reg1);
9044
9045 if (GET_CODE (reg2) == SUBREG)
9046 reg2 = SUBREG_REG (reg2);
9047 if (GET_CODE (reg2) != REG)
9048 return 0;
9049 const int regno2 = REGNO (reg2);
9050
9051 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
9052 return 1;
9053
9054 if (TARGET_VIS3)
9055 {
9056 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
9057 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
9058 return 1;
9059 }
9060
9061 return 0;
9062 }
9063
9064 /* Split a REG <--> REG move into a pair of moves in MODE. */
9065
9066 void
9067 sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
9068 {
9069 rtx dest1 = gen_highpart (mode, dest);
9070 rtx dest2 = gen_lowpart (mode, dest);
9071 rtx src1 = gen_highpart (mode, src);
9072 rtx src2 = gen_lowpart (mode, src);
9073
9074 /* Now emit using the real source and destination we found, swapping
9075 the order if we detect overlap. */
9076 if (reg_overlap_mentioned_p (dest1, src2))
9077 {
9078 emit_move_insn_1 (dest2, src2);
9079 emit_move_insn_1 (dest1, src1);
9080 }
9081 else
9082 {
9083 emit_move_insn_1 (dest1, src1);
9084 emit_move_insn_1 (dest2, src2);
9085 }
9086 }
9087
9088 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
9089 This makes them candidates for using ldd and std insns.
9090
9091 Note reg1 and reg2 *must* be hard registers. */
9092
9093 int
9094 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
9095 {
9096 /* We might have been passed a SUBREG. */
9097 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
9098 return 0;
9099
9100 if (REGNO (reg1) % 2 != 0)
9101 return 0;
9102
9103 /* Integer ldd is deprecated in SPARC V9 */
9104 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
9105 return 0;
9106
9107 return (REGNO (reg1) == REGNO (reg2) - 1);
9108 }
9109
9110 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
9111 an ldd or std insn.
9112
9113 This can only happen when addr1 and addr2, the addresses in mem1
9114 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
9115 addr1 must also be aligned on a 64-bit boundary.
9116
9117 Also iff dependent_reg_rtx is not null it should not be used to
9118 compute the address for mem1, i.e. we cannot optimize a sequence
9119 like:
9120 ld [%o0], %o0
9121 ld [%o0 + 4], %o1
9122 to
9123 ldd [%o0], %o0
9124 nor:
9125 ld [%g3 + 4], %g3
9126 ld [%g3], %g2
9127 to
9128 ldd [%g3], %g2
9129
9130 But, note that the transformation from:
9131 ld [%g2 + 4], %g3
9132 ld [%g2], %g2
9133 to
9134 ldd [%g2], %g2
9135 is perfectly fine. Thus, the peephole2 patterns always pass us
9136 the destination register of the first load, never the second one.
9137
9138 For stores we don't have a similar problem, so dependent_reg_rtx is
9139 NULL_RTX. */
9140
9141 int
9142 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
9143 {
9144 rtx addr1, addr2;
9145 unsigned int reg1;
9146 HOST_WIDE_INT offset1;
9147
9148 /* The mems cannot be volatile. */
9149 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
9150 return 0;
9151
9152 /* MEM1 should be aligned on a 64-bit boundary. */
9153 if (MEM_ALIGN (mem1) < 64)
9154 return 0;
9155
9156 addr1 = XEXP (mem1, 0);
9157 addr2 = XEXP (mem2, 0);
9158
9159 /* Extract a register number and offset (if used) from the first addr. */
9160 if (GET_CODE (addr1) == PLUS)
9161 {
9162 /* If not a REG, return zero. */
9163 if (GET_CODE (XEXP (addr1, 0)) != REG)
9164 return 0;
9165 else
9166 {
9167 reg1 = REGNO (XEXP (addr1, 0));
9168 /* The offset must be constant! */
9169 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
9170 return 0;
9171 offset1 = INTVAL (XEXP (addr1, 1));
9172 }
9173 }
9174 else if (GET_CODE (addr1) != REG)
9175 return 0;
9176 else
9177 {
9178 reg1 = REGNO (addr1);
9179 /* This was a simple (mem (reg)) expression. Offset is 0. */
9180 offset1 = 0;
9181 }
9182
9183 /* Make sure the second address is a (mem (plus (reg) (const_int). */
9184 if (GET_CODE (addr2) != PLUS)
9185 return 0;
9186
9187 if (GET_CODE (XEXP (addr2, 0)) != REG
9188 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
9189 return 0;
9190
9191 if (reg1 != REGNO (XEXP (addr2, 0)))
9192 return 0;
9193
9194 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
9195 return 0;
9196
9197 /* The first offset must be evenly divisible by 8 to ensure the
9198 address is 64-bit aligned. */
9199 if (offset1 % 8 != 0)
9200 return 0;
9201
9202 /* The offset for the second addr must be 4 more than the first addr. */
9203 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
9204 return 0;
9205
9206 /* All the tests passed. addr1 and addr2 are valid for ldd and std
9207 instructions. */
9208 return 1;
9209 }
9210
9211 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
9212
9213 rtx
9214 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
9215 {
9216 rtx x = widen_memory_access (mem1, mode, 0);
9217 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
9218 return x;
9219 }
9220
9221 /* Return 1 if reg is a pseudo, or is the first register in
9222 a hard register pair. This makes it suitable for use in
9223 ldd and std insns. */
9224
9225 int
9226 register_ok_for_ldd (rtx reg)
9227 {
9228 /* We might have been passed a SUBREG. */
9229 if (!REG_P (reg))
9230 return 0;
9231
9232 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
9233 return (REGNO (reg) % 2 == 0);
9234
9235 return 1;
9236 }
9237
9238 /* Return 1 if OP, a MEM, has an address which is known to be
9239 aligned to an 8-byte boundary. */
9240
9241 int
9242 memory_ok_for_ldd (rtx op)
9243 {
9244 /* In 64-bit mode, we assume that the address is word-aligned. */
9245 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
9246 return 0;
9247
9248 if (! can_create_pseudo_p ()
9249 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
9250 return 0;
9251
9252 return 1;
9253 }
9254 \f
9255 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
9256
9257 static bool
9258 sparc_print_operand_punct_valid_p (unsigned char code)
9259 {
9260 if (code == '#'
9261 || code == '*'
9262 || code == '('
9263 || code == ')'
9264 || code == '_'
9265 || code == '&')
9266 return true;
9267
9268 return false;
9269 }
9270
9271 /* Implement TARGET_PRINT_OPERAND.
9272 Print operand X (an rtx) in assembler syntax to file FILE.
9273 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
9274 For `%' followed by punctuation, CODE is the punctuation and X is null. */
9275
9276 static void
9277 sparc_print_operand (FILE *file, rtx x, int code)
9278 {
9279 const char *s;
9280
9281 switch (code)
9282 {
9283 case '#':
9284 /* Output an insn in a delay slot. */
9285 if (final_sequence)
9286 sparc_indent_opcode = 1;
9287 else
9288 fputs ("\n\t nop", file);
9289 return;
9290 case '*':
9291 /* Output an annul flag if there's nothing for the delay slot and we
9292 are optimizing. This is always used with '(' below.
9293 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
9294 this is a dbx bug. So, we only do this when optimizing.
9295 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
9296 Always emit a nop in case the next instruction is a branch. */
9297 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
9298 fputs (",a", file);
9299 return;
9300 case '(':
9301 /* Output a 'nop' if there's nothing for the delay slot and we are
9302 not optimizing. This is always used with '*' above. */
9303 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
9304 fputs ("\n\t nop", file);
9305 else if (final_sequence)
9306 sparc_indent_opcode = 1;
9307 return;
9308 case ')':
9309 /* Output the right displacement from the saved PC on function return.
9310 The caller may have placed an "unimp" insn immediately after the call
9311 so we have to account for it. This insn is used in the 32-bit ABI
9312 when calling a function that returns a non zero-sized structure. The
9313 64-bit ABI doesn't have it. Be careful to have this test be the same
9314 as that for the call. The exception is when sparc_std_struct_return
9315 is enabled, the psABI is followed exactly and the adjustment is made
9316 by the code in sparc_struct_value_rtx. The call emitted is the same
9317 when sparc_std_struct_return is enabled. */
9318 if (!TARGET_ARCH64
9319 && cfun->returns_struct
9320 && !sparc_std_struct_return
9321 && DECL_SIZE (DECL_RESULT (current_function_decl))
9322 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
9323 == INTEGER_CST
9324 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
9325 fputs ("12", file);
9326 else
9327 fputc ('8', file);
9328 return;
9329 case '_':
9330 /* Output the Embedded Medium/Anywhere code model base register. */
9331 fputs (EMBMEDANY_BASE_REG, file);
9332 return;
9333 case '&':
9334 /* Print some local dynamic TLS name. */
9335 if (const char *name = get_some_local_dynamic_name ())
9336 assemble_name (file, name);
9337 else
9338 output_operand_lossage ("'%%&' used without any "
9339 "local dynamic TLS references");
9340 return;
9341
9342 case 'Y':
9343 /* Adjust the operand to take into account a RESTORE operation. */
9344 if (GET_CODE (x) == CONST_INT)
9345 break;
9346 else if (GET_CODE (x) != REG)
9347 output_operand_lossage ("invalid %%Y operand");
9348 else if (REGNO (x) < 8)
9349 fputs (reg_names[REGNO (x)], file);
9350 else if (REGNO (x) >= 24 && REGNO (x) < 32)
9351 fputs (reg_names[REGNO (x)-16], file);
9352 else
9353 output_operand_lossage ("invalid %%Y operand");
9354 return;
9355 case 'L':
9356 /* Print out the low order register name of a register pair. */
9357 if (WORDS_BIG_ENDIAN)
9358 fputs (reg_names[REGNO (x)+1], file);
9359 else
9360 fputs (reg_names[REGNO (x)], file);
9361 return;
9362 case 'H':
9363 /* Print out the high order register name of a register pair. */
9364 if (WORDS_BIG_ENDIAN)
9365 fputs (reg_names[REGNO (x)], file);
9366 else
9367 fputs (reg_names[REGNO (x)+1], file);
9368 return;
9369 case 'R':
9370 /* Print out the second register name of a register pair or quad.
9371 I.e., R (%o0) => %o1. */
9372 fputs (reg_names[REGNO (x)+1], file);
9373 return;
9374 case 'S':
9375 /* Print out the third register name of a register quad.
9376 I.e., S (%o0) => %o2. */
9377 fputs (reg_names[REGNO (x)+2], file);
9378 return;
9379 case 'T':
9380 /* Print out the fourth register name of a register quad.
9381 I.e., T (%o0) => %o3. */
9382 fputs (reg_names[REGNO (x)+3], file);
9383 return;
9384 case 'x':
9385 /* Print a condition code register. */
9386 if (REGNO (x) == SPARC_ICC_REG)
9387 {
9388 switch (GET_MODE (x))
9389 {
9390 case E_CCmode:
9391 case E_CCNZmode:
9392 case E_CCCmode:
9393 case E_CCVmode:
9394 s = "%icc";
9395 break;
9396 case E_CCXmode:
9397 case E_CCXNZmode:
9398 case E_CCXCmode:
9399 case E_CCXVmode:
9400 s = "%xcc";
9401 break;
9402 default:
9403 gcc_unreachable ();
9404 }
9405 fputs (s, file);
9406 }
9407 else
9408 /* %fccN register */
9409 fputs (reg_names[REGNO (x)], file);
9410 return;
9411 case 'm':
9412 /* Print the operand's address only. */
9413 output_address (GET_MODE (x), XEXP (x, 0));
9414 return;
9415 case 'r':
9416 /* In this case we need a register. Use %g0 if the
9417 operand is const0_rtx. */
9418 if (x == const0_rtx
9419 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
9420 {
9421 fputs ("%g0", file);
9422 return;
9423 }
9424 else
9425 break;
9426
9427 case 'A':
9428 switch (GET_CODE (x))
9429 {
9430 case IOR:
9431 s = "or";
9432 break;
9433 case AND:
9434 s = "and";
9435 break;
9436 case XOR:
9437 s = "xor";
9438 break;
9439 default:
9440 output_operand_lossage ("invalid %%A operand");
9441 s = "";
9442 break;
9443 }
9444 fputs (s, file);
9445 return;
9446
9447 case 'B':
9448 switch (GET_CODE (x))
9449 {
9450 case IOR:
9451 s = "orn";
9452 break;
9453 case AND:
9454 s = "andn";
9455 break;
9456 case XOR:
9457 s = "xnor";
9458 break;
9459 default:
9460 output_operand_lossage ("invalid %%B operand");
9461 s = "";
9462 break;
9463 }
9464 fputs (s, file);
9465 return;
9466
9467 /* This is used by the conditional move instructions. */
9468 case 'C':
9469 {
9470 machine_mode mode = GET_MODE (XEXP (x, 0));
9471 switch (GET_CODE (x))
9472 {
9473 case NE:
9474 if (mode == CCVmode || mode == CCXVmode)
9475 s = "vs";
9476 else
9477 s = "ne";
9478 break;
9479 case EQ:
9480 if (mode == CCVmode || mode == CCXVmode)
9481 s = "vc";
9482 else
9483 s = "e";
9484 break;
9485 case GE:
9486 if (mode == CCNZmode || mode == CCXNZmode)
9487 s = "pos";
9488 else
9489 s = "ge";
9490 break;
9491 case GT:
9492 s = "g";
9493 break;
9494 case LE:
9495 s = "le";
9496 break;
9497 case LT:
9498 if (mode == CCNZmode || mode == CCXNZmode)
9499 s = "neg";
9500 else
9501 s = "l";
9502 break;
9503 case GEU:
9504 s = "geu";
9505 break;
9506 case GTU:
9507 s = "gu";
9508 break;
9509 case LEU:
9510 s = "leu";
9511 break;
9512 case LTU:
9513 s = "lu";
9514 break;
9515 case LTGT:
9516 s = "lg";
9517 break;
9518 case UNORDERED:
9519 s = "u";
9520 break;
9521 case ORDERED:
9522 s = "o";
9523 break;
9524 case UNLT:
9525 s = "ul";
9526 break;
9527 case UNLE:
9528 s = "ule";
9529 break;
9530 case UNGT:
9531 s = "ug";
9532 break;
9533 case UNGE:
9534 s = "uge"
9535 ; break;
9536 case UNEQ:
9537 s = "ue";
9538 break;
9539 default:
9540 output_operand_lossage ("invalid %%C operand");
9541 s = "";
9542 break;
9543 }
9544 fputs (s, file);
9545 return;
9546 }
9547
9548 /* This are used by the movr instruction pattern. */
9549 case 'D':
9550 {
9551 switch (GET_CODE (x))
9552 {
9553 case NE:
9554 s = "ne";
9555 break;
9556 case EQ:
9557 s = "e";
9558 break;
9559 case GE:
9560 s = "gez";
9561 break;
9562 case LT:
9563 s = "lz";
9564 break;
9565 case LE:
9566 s = "lez";
9567 break;
9568 case GT:
9569 s = "gz";
9570 break;
9571 default:
9572 output_operand_lossage ("invalid %%D operand");
9573 s = "";
9574 break;
9575 }
9576 fputs (s, file);
9577 return;
9578 }
9579
9580 case 'b':
9581 {
9582 /* Print a sign-extended character. */
9583 int i = trunc_int_for_mode (INTVAL (x), QImode);
9584 fprintf (file, "%d", i);
9585 return;
9586 }
9587
9588 case 'f':
9589 /* Operand must be a MEM; write its address. */
9590 if (GET_CODE (x) != MEM)
9591 output_operand_lossage ("invalid %%f operand");
9592 output_address (GET_MODE (x), XEXP (x, 0));
9593 return;
9594
9595 case 's':
9596 {
9597 /* Print a sign-extended 32-bit value. */
9598 HOST_WIDE_INT i;
9599 if (GET_CODE(x) == CONST_INT)
9600 i = INTVAL (x);
9601 else
9602 {
9603 output_operand_lossage ("invalid %%s operand");
9604 return;
9605 }
9606 i = trunc_int_for_mode (i, SImode);
9607 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
9608 return;
9609 }
9610
9611 case 0:
9612 /* Do nothing special. */
9613 break;
9614
9615 default:
9616 /* Undocumented flag. */
9617 output_operand_lossage ("invalid operand output code");
9618 }
9619
9620 if (GET_CODE (x) == REG)
9621 fputs (reg_names[REGNO (x)], file);
9622 else if (GET_CODE (x) == MEM)
9623 {
9624 fputc ('[', file);
9625 /* Poor Sun assembler doesn't understand absolute addressing. */
9626 if (CONSTANT_P (XEXP (x, 0)))
9627 fputs ("%g0+", file);
9628 output_address (GET_MODE (x), XEXP (x, 0));
9629 fputc (']', file);
9630 }
9631 else if (GET_CODE (x) == HIGH)
9632 {
9633 fputs ("%hi(", file);
9634 output_addr_const (file, XEXP (x, 0));
9635 fputc (')', file);
9636 }
9637 else if (GET_CODE (x) == LO_SUM)
9638 {
9639 sparc_print_operand (file, XEXP (x, 0), 0);
9640 if (TARGET_CM_MEDMID)
9641 fputs ("+%l44(", file);
9642 else
9643 fputs ("+%lo(", file);
9644 output_addr_const (file, XEXP (x, 1));
9645 fputc (')', file);
9646 }
9647 else if (GET_CODE (x) == CONST_DOUBLE)
9648 output_operand_lossage ("floating-point constant not a valid immediate operand");
9649 else
9650 output_addr_const (file, x);
9651 }
9652
9653 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9654
9655 static void
9656 sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9657 {
9658 register rtx base, index = 0;
9659 int offset = 0;
9660 register rtx addr = x;
9661
9662 if (REG_P (addr))
9663 fputs (reg_names[REGNO (addr)], file);
9664 else if (GET_CODE (addr) == PLUS)
9665 {
9666 if (CONST_INT_P (XEXP (addr, 0)))
9667 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
9668 else if (CONST_INT_P (XEXP (addr, 1)))
9669 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
9670 else
9671 base = XEXP (addr, 0), index = XEXP (addr, 1);
9672 if (GET_CODE (base) == LO_SUM)
9673 {
9674 gcc_assert (USE_AS_OFFSETABLE_LO10
9675 && TARGET_ARCH64
9676 && ! TARGET_CM_MEDMID);
9677 output_operand (XEXP (base, 0), 0);
9678 fputs ("+%lo(", file);
9679 output_address (VOIDmode, XEXP (base, 1));
9680 fprintf (file, ")+%d", offset);
9681 }
9682 else
9683 {
9684 fputs (reg_names[REGNO (base)], file);
9685 if (index == 0)
9686 fprintf (file, "%+d", offset);
9687 else if (REG_P (index))
9688 fprintf (file, "+%s", reg_names[REGNO (index)]);
9689 else if (GET_CODE (index) == SYMBOL_REF
9690 || GET_CODE (index) == LABEL_REF
9691 || GET_CODE (index) == CONST)
9692 fputc ('+', file), output_addr_const (file, index);
9693 else gcc_unreachable ();
9694 }
9695 }
9696 else if (GET_CODE (addr) == MINUS
9697 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
9698 {
9699 output_addr_const (file, XEXP (addr, 0));
9700 fputs ("-(", file);
9701 output_addr_const (file, XEXP (addr, 1));
9702 fputs ("-.)", file);
9703 }
9704 else if (GET_CODE (addr) == LO_SUM)
9705 {
9706 output_operand (XEXP (addr, 0), 0);
9707 if (TARGET_CM_MEDMID)
9708 fputs ("+%l44(", file);
9709 else
9710 fputs ("+%lo(", file);
9711 output_address (VOIDmode, XEXP (addr, 1));
9712 fputc (')', file);
9713 }
9714 else if (flag_pic
9715 && GET_CODE (addr) == CONST
9716 && GET_CODE (XEXP (addr, 0)) == MINUS
9717 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
9718 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
9719 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
9720 {
9721 addr = XEXP (addr, 0);
9722 output_addr_const (file, XEXP (addr, 0));
9723 /* Group the args of the second CONST in parenthesis. */
9724 fputs ("-(", file);
9725 /* Skip past the second CONST--it does nothing for us. */
9726 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
9727 /* Close the parenthesis. */
9728 fputc (')', file);
9729 }
9730 else
9731 {
9732 output_addr_const (file, addr);
9733 }
9734 }
9735 \f
9736 /* Target hook for assembling integer objects. The sparc version has
9737 special handling for aligned DI-mode objects. */
9738
9739 static bool
9740 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9741 {
9742 /* ??? We only output .xword's for symbols and only then in environments
9743 where the assembler can handle them. */
9744 if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
9745 {
9746 if (TARGET_V9)
9747 {
9748 assemble_integer_with_op ("\t.xword\t", x);
9749 return true;
9750 }
9751 else
9752 {
9753 assemble_aligned_integer (4, const0_rtx);
9754 assemble_aligned_integer (4, x);
9755 return true;
9756 }
9757 }
9758 return default_assemble_integer (x, size, aligned_p);
9759 }
9760 \f
9761 /* Return the value of a code used in the .proc pseudo-op that says
9762 what kind of result this function returns. For non-C types, we pick
9763 the closest C type. */
9764
9765 #ifndef SHORT_TYPE_SIZE
9766 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9767 #endif
9768
9769 #ifndef INT_TYPE_SIZE
9770 #define INT_TYPE_SIZE BITS_PER_WORD
9771 #endif
9772
9773 #ifndef LONG_TYPE_SIZE
9774 #define LONG_TYPE_SIZE BITS_PER_WORD
9775 #endif
9776
9777 #ifndef LONG_LONG_TYPE_SIZE
9778 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9779 #endif
9780
9781 #ifndef FLOAT_TYPE_SIZE
9782 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9783 #endif
9784
9785 #ifndef DOUBLE_TYPE_SIZE
9786 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9787 #endif
9788
9789 #ifndef LONG_DOUBLE_TYPE_SIZE
9790 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9791 #endif
9792
9793 unsigned long
9794 sparc_type_code (register tree type)
9795 {
9796 register unsigned long qualifiers = 0;
9797 register unsigned shift;
9798
9799 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9800 setting more, since some assemblers will give an error for this. Also,
9801 we must be careful to avoid shifts of 32 bits or more to avoid getting
9802 unpredictable results. */
9803
9804 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9805 {
9806 switch (TREE_CODE (type))
9807 {
9808 case ERROR_MARK:
9809 return qualifiers;
9810
9811 case ARRAY_TYPE:
9812 qualifiers |= (3 << shift);
9813 break;
9814
9815 case FUNCTION_TYPE:
9816 case METHOD_TYPE:
9817 qualifiers |= (2 << shift);
9818 break;
9819
9820 case POINTER_TYPE:
9821 case REFERENCE_TYPE:
9822 case OFFSET_TYPE:
9823 qualifiers |= (1 << shift);
9824 break;
9825
9826 case RECORD_TYPE:
9827 return (qualifiers | 8);
9828
9829 case UNION_TYPE:
9830 case QUAL_UNION_TYPE:
9831 return (qualifiers | 9);
9832
9833 case ENUMERAL_TYPE:
9834 return (qualifiers | 10);
9835
9836 case VOID_TYPE:
9837 return (qualifiers | 16);
9838
9839 case INTEGER_TYPE:
9840 /* If this is a range type, consider it to be the underlying
9841 type. */
9842 if (TREE_TYPE (type) != 0)
9843 break;
9844
9845 /* Carefully distinguish all the standard types of C,
9846 without messing up if the language is not C. We do this by
9847 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9848 look at both the names and the above fields, but that's redundant.
9849 Any type whose size is between two C types will be considered
9850 to be the wider of the two types. Also, we do not have a
9851 special code to use for "long long", so anything wider than
9852 long is treated the same. Note that we can't distinguish
9853 between "int" and "long" in this code if they are the same
9854 size, but that's fine, since neither can the assembler. */
9855
9856 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9857 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9858
9859 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9860 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9861
9862 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9863 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9864
9865 else
9866 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9867
9868 case REAL_TYPE:
9869 /* If this is a range type, consider it to be the underlying
9870 type. */
9871 if (TREE_TYPE (type) != 0)
9872 break;
9873
9874 /* Carefully distinguish all the standard types of C,
9875 without messing up if the language is not C. */
9876
9877 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9878 return (qualifiers | 6);
9879
9880 else
9881 return (qualifiers | 7);
9882
9883 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9884 /* ??? We need to distinguish between double and float complex types,
9885 but I don't know how yet because I can't reach this code from
9886 existing front-ends. */
9887 return (qualifiers | 7); /* Who knows? */
9888
9889 case VECTOR_TYPE:
9890 case BOOLEAN_TYPE: /* Boolean truth value type. */
9891 case LANG_TYPE:
9892 case NULLPTR_TYPE:
9893 return qualifiers;
9894
9895 default:
9896 gcc_unreachable (); /* Not a type! */
9897 }
9898 }
9899
9900 return qualifiers;
9901 }
9902 \f
9903 /* Nested function support. */
9904
9905 /* Emit RTL insns to initialize the variable parts of a trampoline.
9906 FNADDR is an RTX for the address of the function's pure code.
9907 CXT is an RTX for the static chain value for the function.
9908
9909 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9910 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9911 (to store insns). This is a bit excessive. Perhaps a different
9912 mechanism would be better here.
9913
9914 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9915
9916 static void
9917 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9918 {
9919 /* SPARC 32-bit trampoline:
9920
9921 sethi %hi(fn), %g1
9922 sethi %hi(static), %g2
9923 jmp %g1+%lo(fn)
9924 or %g2, %lo(static), %g2
9925
9926 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9927 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9928 */
9929
9930 emit_move_insn
9931 (adjust_address (m_tramp, SImode, 0),
9932 expand_binop (SImode, ior_optab,
9933 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9934 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9935 NULL_RTX, 1, OPTAB_DIRECT));
9936
9937 emit_move_insn
9938 (adjust_address (m_tramp, SImode, 4),
9939 expand_binop (SImode, ior_optab,
9940 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9941 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9942 NULL_RTX, 1, OPTAB_DIRECT));
9943
9944 emit_move_insn
9945 (adjust_address (m_tramp, SImode, 8),
9946 expand_binop (SImode, ior_optab,
9947 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9948 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9949 NULL_RTX, 1, OPTAB_DIRECT));
9950
9951 emit_move_insn
9952 (adjust_address (m_tramp, SImode, 12),
9953 expand_binop (SImode, ior_optab,
9954 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9955 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9956 NULL_RTX, 1, OPTAB_DIRECT));
9957
9958 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9959 aligned on a 16 byte boundary so one flush clears it all. */
9960 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
9961 if (sparc_cpu != PROCESSOR_ULTRASPARC
9962 && sparc_cpu != PROCESSOR_ULTRASPARC3
9963 && sparc_cpu != PROCESSOR_NIAGARA
9964 && sparc_cpu != PROCESSOR_NIAGARA2
9965 && sparc_cpu != PROCESSOR_NIAGARA3
9966 && sparc_cpu != PROCESSOR_NIAGARA4
9967 && sparc_cpu != PROCESSOR_NIAGARA7
9968 && sparc_cpu != PROCESSOR_M8)
9969 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
9970
9971 /* Call __enable_execute_stack after writing onto the stack to make sure
9972 the stack address is accessible. */
9973 #ifdef HAVE_ENABLE_EXECUTE_STACK
9974 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9975 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
9976 #endif
9977
9978 }
9979
9980 /* The 64-bit version is simpler because it makes more sense to load the
9981 values as "immediate" data out of the trampoline. It's also easier since
9982 we can read the PC without clobbering a register. */
9983
9984 static void
9985 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9986 {
9987 /* SPARC 64-bit trampoline:
9988
9989 rd %pc, %g1
9990 ldx [%g1+24], %g5
9991 jmp %g5
9992 ldx [%g1+16], %g5
9993 +16 bytes data
9994 */
9995
9996 emit_move_insn (adjust_address (m_tramp, SImode, 0),
9997 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
9998 emit_move_insn (adjust_address (m_tramp, SImode, 4),
9999 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
10000 emit_move_insn (adjust_address (m_tramp, SImode, 8),
10001 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
10002 emit_move_insn (adjust_address (m_tramp, SImode, 12),
10003 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
10004 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
10005 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
10006 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
10007
10008 if (sparc_cpu != PROCESSOR_ULTRASPARC
10009 && sparc_cpu != PROCESSOR_ULTRASPARC3
10010 && sparc_cpu != PROCESSOR_NIAGARA
10011 && sparc_cpu != PROCESSOR_NIAGARA2
10012 && sparc_cpu != PROCESSOR_NIAGARA3
10013 && sparc_cpu != PROCESSOR_NIAGARA4
10014 && sparc_cpu != PROCESSOR_NIAGARA7
10015 && sparc_cpu != PROCESSOR_M8)
10016 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
10017
10018 /* Call __enable_execute_stack after writing onto the stack to make sure
10019 the stack address is accessible. */
10020 #ifdef HAVE_ENABLE_EXECUTE_STACK
10021 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10022 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10023 #endif
10024 }
10025
10026 /* Worker for TARGET_TRAMPOLINE_INIT. */
10027
10028 static void
10029 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
10030 {
10031 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
10032 cxt = force_reg (Pmode, cxt);
10033 if (TARGET_ARCH64)
10034 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
10035 else
10036 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
10037 }
10038 \f
10039 /* Adjust the cost of a scheduling dependency. Return the new cost of
10040 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
10041
10042 static int
10043 supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
10044 int cost)
10045 {
10046 enum attr_type insn_type;
10047
10048 if (recog_memoized (insn) < 0)
10049 return cost;
10050
10051 insn_type = get_attr_type (insn);
10052
10053 if (dep_type == 0)
10054 {
10055 /* Data dependency; DEP_INSN writes a register that INSN reads some
10056 cycles later. */
10057
10058 /* if a load, then the dependence must be on the memory address;
10059 add an extra "cycle". Note that the cost could be two cycles
10060 if the reg was written late in an instruction group; we ca not tell
10061 here. */
10062 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
10063 return cost + 3;
10064
10065 /* Get the delay only if the address of the store is the dependence. */
10066 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
10067 {
10068 rtx pat = PATTERN(insn);
10069 rtx dep_pat = PATTERN (dep_insn);
10070
10071 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10072 return cost; /* This should not happen! */
10073
10074 /* The dependency between the two instructions was on the data that
10075 is being stored. Assume that this implies that the address of the
10076 store is not dependent. */
10077 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10078 return cost;
10079
10080 return cost + 3; /* An approximation. */
10081 }
10082
10083 /* A shift instruction cannot receive its data from an instruction
10084 in the same cycle; add a one cycle penalty. */
10085 if (insn_type == TYPE_SHIFT)
10086 return cost + 3; /* Split before cascade into shift. */
10087 }
10088 else
10089 {
10090 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
10091 INSN writes some cycles later. */
10092
10093 /* These are only significant for the fpu unit; writing a fp reg before
10094 the fpu has finished with it stalls the processor. */
10095
10096 /* Reusing an integer register causes no problems. */
10097 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10098 return 0;
10099 }
10100
10101 return cost;
10102 }
10103
10104 static int
10105 hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
10106 int cost)
10107 {
10108 enum attr_type insn_type, dep_type;
10109 rtx pat = PATTERN(insn);
10110 rtx dep_pat = PATTERN (dep_insn);
10111
10112 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
10113 return cost;
10114
10115 insn_type = get_attr_type (insn);
10116 dep_type = get_attr_type (dep_insn);
10117
10118 switch (dtype)
10119 {
10120 case 0:
10121 /* Data dependency; DEP_INSN writes a register that INSN reads some
10122 cycles later. */
10123
10124 switch (insn_type)
10125 {
10126 case TYPE_STORE:
10127 case TYPE_FPSTORE:
10128 /* Get the delay iff the address of the store is the dependence. */
10129 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10130 return cost;
10131
10132 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10133 return cost;
10134 return cost + 3;
10135
10136 case TYPE_LOAD:
10137 case TYPE_SLOAD:
10138 case TYPE_FPLOAD:
10139 /* If a load, then the dependence must be on the memory address. If
10140 the addresses aren't equal, then it might be a false dependency */
10141 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
10142 {
10143 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
10144 || GET_CODE (SET_DEST (dep_pat)) != MEM
10145 || GET_CODE (SET_SRC (pat)) != MEM
10146 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
10147 XEXP (SET_SRC (pat), 0)))
10148 return cost + 2;
10149
10150 return cost + 8;
10151 }
10152 break;
10153
10154 case TYPE_BRANCH:
10155 /* Compare to branch latency is 0. There is no benefit from
10156 separating compare and branch. */
10157 if (dep_type == TYPE_COMPARE)
10158 return 0;
10159 /* Floating point compare to branch latency is less than
10160 compare to conditional move. */
10161 if (dep_type == TYPE_FPCMP)
10162 return cost - 1;
10163 break;
10164 default:
10165 break;
10166 }
10167 break;
10168
10169 case REG_DEP_ANTI:
10170 /* Anti-dependencies only penalize the fpu unit. */
10171 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10172 return 0;
10173 break;
10174
10175 default:
10176 break;
10177 }
10178
10179 return cost;
10180 }
10181
10182 static int
10183 sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
10184 unsigned int)
10185 {
10186 switch (sparc_cpu)
10187 {
10188 case PROCESSOR_SUPERSPARC:
10189 cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
10190 break;
10191 case PROCESSOR_HYPERSPARC:
10192 case PROCESSOR_SPARCLITE86X:
10193 cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
10194 break;
10195 default:
10196 break;
10197 }
10198 return cost;
10199 }
10200
10201 static void
10202 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
10203 int sched_verbose ATTRIBUTE_UNUSED,
10204 int max_ready ATTRIBUTE_UNUSED)
10205 {}
10206
10207 static int
10208 sparc_use_sched_lookahead (void)
10209 {
10210 switch (sparc_cpu)
10211 {
10212 case PROCESSOR_ULTRASPARC:
10213 case PROCESSOR_ULTRASPARC3:
10214 return 4;
10215 case PROCESSOR_SUPERSPARC:
10216 case PROCESSOR_HYPERSPARC:
10217 case PROCESSOR_SPARCLITE86X:
10218 return 3;
10219 case PROCESSOR_NIAGARA4:
10220 case PROCESSOR_NIAGARA7:
10221 case PROCESSOR_M8:
10222 return 2;
10223 case PROCESSOR_NIAGARA:
10224 case PROCESSOR_NIAGARA2:
10225 case PROCESSOR_NIAGARA3:
10226 default:
10227 return 0;
10228 }
10229 }
10230
10231 static int
10232 sparc_issue_rate (void)
10233 {
10234 switch (sparc_cpu)
10235 {
10236 case PROCESSOR_ULTRASPARC:
10237 case PROCESSOR_ULTRASPARC3:
10238 case PROCESSOR_M8:
10239 return 4;
10240 case PROCESSOR_SUPERSPARC:
10241 return 3;
10242 case PROCESSOR_HYPERSPARC:
10243 case PROCESSOR_SPARCLITE86X:
10244 case PROCESSOR_V9:
10245 /* Assume V9 processors are capable of at least dual-issue. */
10246 case PROCESSOR_NIAGARA4:
10247 case PROCESSOR_NIAGARA7:
10248 return 2;
10249 case PROCESSOR_NIAGARA:
10250 case PROCESSOR_NIAGARA2:
10251 case PROCESSOR_NIAGARA3:
10252 default:
10253 return 1;
10254 }
10255 }
10256
10257 int
10258 sparc_branch_cost (bool speed_p, bool predictable_p)
10259 {
10260 if (!speed_p)
10261 return 2;
10262
10263 /* For pre-V9 processors we use a single value (usually 3) to take into
10264 account the potential annulling of the delay slot (which ends up being
10265 a bubble in the pipeline slot) plus a cycle to take into consideration
10266 the instruction cache effects.
10267
10268 On V9 and later processors, which have branch prediction facilities,
10269 we take into account whether the branch is (easily) predictable. */
10270 const int cost = sparc_costs->branch_cost;
10271
10272 switch (sparc_cpu)
10273 {
10274 case PROCESSOR_V9:
10275 case PROCESSOR_ULTRASPARC:
10276 case PROCESSOR_ULTRASPARC3:
10277 case PROCESSOR_NIAGARA:
10278 case PROCESSOR_NIAGARA2:
10279 case PROCESSOR_NIAGARA3:
10280 case PROCESSOR_NIAGARA4:
10281 case PROCESSOR_NIAGARA7:
10282 case PROCESSOR_M8:
10283 return cost + (predictable_p ? 0 : 2);
10284
10285 default:
10286 return cost;
10287 }
10288 }
10289
10290 static int
10291 set_extends (rtx_insn *insn)
10292 {
10293 register rtx pat = PATTERN (insn);
10294
10295 switch (GET_CODE (SET_SRC (pat)))
10296 {
10297 /* Load and some shift instructions zero extend. */
10298 case MEM:
10299 case ZERO_EXTEND:
10300 /* sethi clears the high bits */
10301 case HIGH:
10302 /* LO_SUM is used with sethi. sethi cleared the high
10303 bits and the values used with lo_sum are positive */
10304 case LO_SUM:
10305 /* Store flag stores 0 or 1 */
10306 case LT: case LTU:
10307 case GT: case GTU:
10308 case LE: case LEU:
10309 case GE: case GEU:
10310 case EQ:
10311 case NE:
10312 return 1;
10313 case AND:
10314 {
10315 rtx op0 = XEXP (SET_SRC (pat), 0);
10316 rtx op1 = XEXP (SET_SRC (pat), 1);
10317 if (GET_CODE (op1) == CONST_INT)
10318 return INTVAL (op1) >= 0;
10319 if (GET_CODE (op0) != REG)
10320 return 0;
10321 if (sparc_check_64 (op0, insn) == 1)
10322 return 1;
10323 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10324 }
10325 case IOR:
10326 case XOR:
10327 {
10328 rtx op0 = XEXP (SET_SRC (pat), 0);
10329 rtx op1 = XEXP (SET_SRC (pat), 1);
10330 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
10331 return 0;
10332 if (GET_CODE (op1) == CONST_INT)
10333 return INTVAL (op1) >= 0;
10334 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10335 }
10336 case LSHIFTRT:
10337 return GET_MODE (SET_SRC (pat)) == SImode;
10338 /* Positive integers leave the high bits zero. */
10339 case CONST_INT:
10340 return !(INTVAL (SET_SRC (pat)) & 0x80000000);
10341 case ASHIFTRT:
10342 case SIGN_EXTEND:
10343 return - (GET_MODE (SET_SRC (pat)) == SImode);
10344 case REG:
10345 return sparc_check_64 (SET_SRC (pat), insn);
10346 default:
10347 return 0;
10348 }
10349 }
10350
10351 /* We _ought_ to have only one kind per function, but... */
10352 static GTY(()) rtx sparc_addr_diff_list;
10353 static GTY(()) rtx sparc_addr_list;
10354
10355 void
10356 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
10357 {
10358 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10359 if (diff)
10360 sparc_addr_diff_list
10361 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
10362 else
10363 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
10364 }
10365
10366 static void
10367 sparc_output_addr_vec (rtx vec)
10368 {
10369 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10370 int idx, vlen = XVECLEN (body, 0);
10371
10372 #ifdef ASM_OUTPUT_ADDR_VEC_START
10373 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10374 #endif
10375
10376 #ifdef ASM_OUTPUT_CASE_LABEL
10377 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10378 NEXT_INSN (lab));
10379 #else
10380 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10381 #endif
10382
10383 for (idx = 0; idx < vlen; idx++)
10384 {
10385 ASM_OUTPUT_ADDR_VEC_ELT
10386 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10387 }
10388
10389 #ifdef ASM_OUTPUT_ADDR_VEC_END
10390 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10391 #endif
10392 }
10393
10394 static void
10395 sparc_output_addr_diff_vec (rtx vec)
10396 {
10397 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10398 rtx base = XEXP (XEXP (body, 0), 0);
10399 int idx, vlen = XVECLEN (body, 1);
10400
10401 #ifdef ASM_OUTPUT_ADDR_VEC_START
10402 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10403 #endif
10404
10405 #ifdef ASM_OUTPUT_CASE_LABEL
10406 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10407 NEXT_INSN (lab));
10408 #else
10409 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10410 #endif
10411
10412 for (idx = 0; idx < vlen; idx++)
10413 {
10414 ASM_OUTPUT_ADDR_DIFF_ELT
10415 (asm_out_file,
10416 body,
10417 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10418 CODE_LABEL_NUMBER (base));
10419 }
10420
10421 #ifdef ASM_OUTPUT_ADDR_VEC_END
10422 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10423 #endif
10424 }
10425
10426 static void
10427 sparc_output_deferred_case_vectors (void)
10428 {
10429 rtx t;
10430 int align;
10431
10432 if (sparc_addr_list == NULL_RTX
10433 && sparc_addr_diff_list == NULL_RTX)
10434 return;
10435
10436 /* Align to cache line in the function's code section. */
10437 switch_to_section (current_function_section ());
10438
10439 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10440 if (align > 0)
10441 ASM_OUTPUT_ALIGN (asm_out_file, align);
10442
10443 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
10444 sparc_output_addr_vec (XEXP (t, 0));
10445 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
10446 sparc_output_addr_diff_vec (XEXP (t, 0));
10447
10448 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
10449 }
10450
10451 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
10452 unknown. Return 1 if the high bits are zero, -1 if the register is
10453 sign extended. */
10454 int
10455 sparc_check_64 (rtx x, rtx_insn *insn)
10456 {
10457 /* If a register is set only once it is safe to ignore insns this
10458 code does not know how to handle. The loop will either recognize
10459 the single set and return the correct value or fail to recognize
10460 it and return 0. */
10461 int set_once = 0;
10462 rtx y = x;
10463
10464 gcc_assert (GET_CODE (x) == REG);
10465
10466 if (GET_MODE (x) == DImode)
10467 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
10468
10469 if (flag_expensive_optimizations
10470 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
10471 set_once = 1;
10472
10473 if (insn == 0)
10474 {
10475 if (set_once)
10476 insn = get_last_insn_anywhere ();
10477 else
10478 return 0;
10479 }
10480
10481 while ((insn = PREV_INSN (insn)))
10482 {
10483 switch (GET_CODE (insn))
10484 {
10485 case JUMP_INSN:
10486 case NOTE:
10487 break;
10488 case CODE_LABEL:
10489 case CALL_INSN:
10490 default:
10491 if (! set_once)
10492 return 0;
10493 break;
10494 case INSN:
10495 {
10496 rtx pat = PATTERN (insn);
10497 if (GET_CODE (pat) != SET)
10498 return 0;
10499 if (rtx_equal_p (x, SET_DEST (pat)))
10500 return set_extends (insn);
10501 if (y && rtx_equal_p (y, SET_DEST (pat)))
10502 return set_extends (insn);
10503 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
10504 return 0;
10505 }
10506 }
10507 }
10508 return 0;
10509 }
10510
10511 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
10512 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
10513
10514 const char *
10515 output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
10516 {
10517 static char asm_code[60];
10518
10519 /* The scratch register is only required when the destination
10520 register is not a 64-bit global or out register. */
10521 if (which_alternative != 2)
10522 operands[3] = operands[0];
10523
10524 /* We can only shift by constants <= 63. */
10525 if (GET_CODE (operands[2]) == CONST_INT)
10526 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
10527
10528 if (GET_CODE (operands[1]) == CONST_INT)
10529 {
10530 output_asm_insn ("mov\t%1, %3", operands);
10531 }
10532 else
10533 {
10534 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10535 if (sparc_check_64 (operands[1], insn) <= 0)
10536 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10537 output_asm_insn ("or\t%L1, %3, %3", operands);
10538 }
10539
10540 strcpy (asm_code, opcode);
10541
10542 if (which_alternative != 2)
10543 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
10544 else
10545 return
10546 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
10547 }
10548 \f
10549 /* Output rtl to increment the profiler label LABELNO
10550 for profiling a function entry. */
10551
10552 void
10553 sparc_profile_hook (int labelno)
10554 {
10555 char buf[32];
10556 rtx lab, fun;
10557
10558 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
10559 if (NO_PROFILE_COUNTERS)
10560 {
10561 emit_library_call (fun, LCT_NORMAL, VOIDmode);
10562 }
10563 else
10564 {
10565 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10566 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
10567 emit_library_call (fun, LCT_NORMAL, VOIDmode, lab, Pmode);
10568 }
10569 }
10570 \f
10571 #ifdef TARGET_SOLARIS
10572 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
10573
10574 static void
10575 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
10576 tree decl ATTRIBUTE_UNUSED)
10577 {
10578 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
10579 {
10580 solaris_elf_asm_comdat_section (name, flags, decl);
10581 return;
10582 }
10583
10584 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
10585
10586 if (!(flags & SECTION_DEBUG))
10587 fputs (",#alloc", asm_out_file);
10588 #if HAVE_GAS_SECTION_EXCLUDE
10589 if (flags & SECTION_EXCLUDE)
10590 fputs (",#exclude", asm_out_file);
10591 #endif
10592 if (flags & SECTION_WRITE)
10593 fputs (",#write", asm_out_file);
10594 if (flags & SECTION_TLS)
10595 fputs (",#tls", asm_out_file);
10596 if (flags & SECTION_CODE)
10597 fputs (",#execinstr", asm_out_file);
10598
10599 if (flags & SECTION_NOTYPE)
10600 ;
10601 else if (flags & SECTION_BSS)
10602 fputs (",#nobits", asm_out_file);
10603 else
10604 fputs (",#progbits", asm_out_file);
10605
10606 fputc ('\n', asm_out_file);
10607 }
10608 #endif /* TARGET_SOLARIS */
10609
10610 /* We do not allow indirect calls to be optimized into sibling calls.
10611
10612 We cannot use sibling calls when delayed branches are disabled
10613 because they will likely require the call delay slot to be filled.
10614
10615 Also, on SPARC 32-bit we cannot emit a sibling call when the
10616 current function returns a structure. This is because the "unimp
10617 after call" convention would cause the callee to return to the
10618 wrong place. The generic code already disallows cases where the
10619 function being called returns a structure.
10620
10621 It may seem strange how this last case could occur. Usually there
10622 is code after the call which jumps to epilogue code which dumps the
10623 return value into the struct return area. That ought to invalidate
10624 the sibling call right? Well, in the C++ case we can end up passing
10625 the pointer to the struct return area to a constructor (which returns
10626 void) and then nothing else happens. Such a sibling call would look
10627 valid without the added check here.
10628
10629 VxWorks PIC PLT entries require the global pointer to be initialized
10630 on entry. We therefore can't emit sibling calls to them. */
10631 static bool
10632 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10633 {
10634 return (decl
10635 && flag_delayed_branch
10636 && (TARGET_ARCH64 || ! cfun->returns_struct)
10637 && !(TARGET_VXWORKS_RTP
10638 && flag_pic
10639 && !targetm.binds_local_p (decl)));
10640 }
10641 \f
10642 /* libfunc renaming. */
10643
10644 static void
10645 sparc_init_libfuncs (void)
10646 {
10647 if (TARGET_ARCH32)
10648 {
10649 /* Use the subroutines that Sun's library provides for integer
10650 multiply and divide. The `*' prevents an underscore from
10651 being prepended by the compiler. .umul is a little faster
10652 than .mul. */
10653 set_optab_libfunc (smul_optab, SImode, "*.umul");
10654 set_optab_libfunc (sdiv_optab, SImode, "*.div");
10655 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
10656 set_optab_libfunc (smod_optab, SImode, "*.rem");
10657 set_optab_libfunc (umod_optab, SImode, "*.urem");
10658
10659 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
10660 set_optab_libfunc (add_optab, TFmode, "_Q_add");
10661 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
10662 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
10663 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
10664 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
10665
10666 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10667 is because with soft-float, the SFmode and DFmode sqrt
10668 instructions will be absent, and the compiler will notice and
10669 try to use the TFmode sqrt instruction for calls to the
10670 builtin function sqrt, but this fails. */
10671 if (TARGET_FPU)
10672 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
10673
10674 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
10675 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
10676 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
10677 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
10678 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
10679 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
10680
10681 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
10682 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
10683 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
10684 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
10685
10686 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
10687 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
10688 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
10689 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
10690
10691 if (DITF_CONVERSION_LIBFUNCS)
10692 {
10693 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
10694 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
10695 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
10696 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
10697 }
10698
10699 if (SUN_CONVERSION_LIBFUNCS)
10700 {
10701 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
10702 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
10703 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
10704 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
10705 }
10706 }
10707 if (TARGET_ARCH64)
10708 {
10709 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10710 do not exist in the library. Make sure the compiler does not
10711 emit calls to them by accident. (It should always use the
10712 hardware instructions.) */
10713 set_optab_libfunc (smul_optab, SImode, 0);
10714 set_optab_libfunc (sdiv_optab, SImode, 0);
10715 set_optab_libfunc (udiv_optab, SImode, 0);
10716 set_optab_libfunc (smod_optab, SImode, 0);
10717 set_optab_libfunc (umod_optab, SImode, 0);
10718
10719 if (SUN_INTEGER_MULTIPLY_64)
10720 {
10721 set_optab_libfunc (smul_optab, DImode, "__mul64");
10722 set_optab_libfunc (sdiv_optab, DImode, "__div64");
10723 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
10724 set_optab_libfunc (smod_optab, DImode, "__rem64");
10725 set_optab_libfunc (umod_optab, DImode, "__urem64");
10726 }
10727
10728 if (SUN_CONVERSION_LIBFUNCS)
10729 {
10730 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
10731 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
10732 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
10733 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
10734 }
10735 }
10736 }
10737 \f
10738 /* SPARC builtins. */
10739 enum sparc_builtins
10740 {
10741 /* FPU builtins. */
10742 SPARC_BUILTIN_LDFSR,
10743 SPARC_BUILTIN_STFSR,
10744
10745 /* VIS 1.0 builtins. */
10746 SPARC_BUILTIN_FPACK16,
10747 SPARC_BUILTIN_FPACK32,
10748 SPARC_BUILTIN_FPACKFIX,
10749 SPARC_BUILTIN_FEXPAND,
10750 SPARC_BUILTIN_FPMERGE,
10751 SPARC_BUILTIN_FMUL8X16,
10752 SPARC_BUILTIN_FMUL8X16AU,
10753 SPARC_BUILTIN_FMUL8X16AL,
10754 SPARC_BUILTIN_FMUL8SUX16,
10755 SPARC_BUILTIN_FMUL8ULX16,
10756 SPARC_BUILTIN_FMULD8SUX16,
10757 SPARC_BUILTIN_FMULD8ULX16,
10758 SPARC_BUILTIN_FALIGNDATAV4HI,
10759 SPARC_BUILTIN_FALIGNDATAV8QI,
10760 SPARC_BUILTIN_FALIGNDATAV2SI,
10761 SPARC_BUILTIN_FALIGNDATADI,
10762 SPARC_BUILTIN_WRGSR,
10763 SPARC_BUILTIN_RDGSR,
10764 SPARC_BUILTIN_ALIGNADDR,
10765 SPARC_BUILTIN_ALIGNADDRL,
10766 SPARC_BUILTIN_PDIST,
10767 SPARC_BUILTIN_EDGE8,
10768 SPARC_BUILTIN_EDGE8L,
10769 SPARC_BUILTIN_EDGE16,
10770 SPARC_BUILTIN_EDGE16L,
10771 SPARC_BUILTIN_EDGE32,
10772 SPARC_BUILTIN_EDGE32L,
10773 SPARC_BUILTIN_FCMPLE16,
10774 SPARC_BUILTIN_FCMPLE32,
10775 SPARC_BUILTIN_FCMPNE16,
10776 SPARC_BUILTIN_FCMPNE32,
10777 SPARC_BUILTIN_FCMPGT16,
10778 SPARC_BUILTIN_FCMPGT32,
10779 SPARC_BUILTIN_FCMPEQ16,
10780 SPARC_BUILTIN_FCMPEQ32,
10781 SPARC_BUILTIN_FPADD16,
10782 SPARC_BUILTIN_FPADD16S,
10783 SPARC_BUILTIN_FPADD32,
10784 SPARC_BUILTIN_FPADD32S,
10785 SPARC_BUILTIN_FPSUB16,
10786 SPARC_BUILTIN_FPSUB16S,
10787 SPARC_BUILTIN_FPSUB32,
10788 SPARC_BUILTIN_FPSUB32S,
10789 SPARC_BUILTIN_ARRAY8,
10790 SPARC_BUILTIN_ARRAY16,
10791 SPARC_BUILTIN_ARRAY32,
10792
10793 /* VIS 2.0 builtins. */
10794 SPARC_BUILTIN_EDGE8N,
10795 SPARC_BUILTIN_EDGE8LN,
10796 SPARC_BUILTIN_EDGE16N,
10797 SPARC_BUILTIN_EDGE16LN,
10798 SPARC_BUILTIN_EDGE32N,
10799 SPARC_BUILTIN_EDGE32LN,
10800 SPARC_BUILTIN_BMASK,
10801 SPARC_BUILTIN_BSHUFFLEV4HI,
10802 SPARC_BUILTIN_BSHUFFLEV8QI,
10803 SPARC_BUILTIN_BSHUFFLEV2SI,
10804 SPARC_BUILTIN_BSHUFFLEDI,
10805
10806 /* VIS 3.0 builtins. */
10807 SPARC_BUILTIN_CMASK8,
10808 SPARC_BUILTIN_CMASK16,
10809 SPARC_BUILTIN_CMASK32,
10810 SPARC_BUILTIN_FCHKSM16,
10811 SPARC_BUILTIN_FSLL16,
10812 SPARC_BUILTIN_FSLAS16,
10813 SPARC_BUILTIN_FSRL16,
10814 SPARC_BUILTIN_FSRA16,
10815 SPARC_BUILTIN_FSLL32,
10816 SPARC_BUILTIN_FSLAS32,
10817 SPARC_BUILTIN_FSRL32,
10818 SPARC_BUILTIN_FSRA32,
10819 SPARC_BUILTIN_PDISTN,
10820 SPARC_BUILTIN_FMEAN16,
10821 SPARC_BUILTIN_FPADD64,
10822 SPARC_BUILTIN_FPSUB64,
10823 SPARC_BUILTIN_FPADDS16,
10824 SPARC_BUILTIN_FPADDS16S,
10825 SPARC_BUILTIN_FPSUBS16,
10826 SPARC_BUILTIN_FPSUBS16S,
10827 SPARC_BUILTIN_FPADDS32,
10828 SPARC_BUILTIN_FPADDS32S,
10829 SPARC_BUILTIN_FPSUBS32,
10830 SPARC_BUILTIN_FPSUBS32S,
10831 SPARC_BUILTIN_FUCMPLE8,
10832 SPARC_BUILTIN_FUCMPNE8,
10833 SPARC_BUILTIN_FUCMPGT8,
10834 SPARC_BUILTIN_FUCMPEQ8,
10835 SPARC_BUILTIN_FHADDS,
10836 SPARC_BUILTIN_FHADDD,
10837 SPARC_BUILTIN_FHSUBS,
10838 SPARC_BUILTIN_FHSUBD,
10839 SPARC_BUILTIN_FNHADDS,
10840 SPARC_BUILTIN_FNHADDD,
10841 SPARC_BUILTIN_UMULXHI,
10842 SPARC_BUILTIN_XMULX,
10843 SPARC_BUILTIN_XMULXHI,
10844
10845 /* VIS 4.0 builtins. */
10846 SPARC_BUILTIN_FPADD8,
10847 SPARC_BUILTIN_FPADDS8,
10848 SPARC_BUILTIN_FPADDUS8,
10849 SPARC_BUILTIN_FPADDUS16,
10850 SPARC_BUILTIN_FPCMPLE8,
10851 SPARC_BUILTIN_FPCMPGT8,
10852 SPARC_BUILTIN_FPCMPULE16,
10853 SPARC_BUILTIN_FPCMPUGT16,
10854 SPARC_BUILTIN_FPCMPULE32,
10855 SPARC_BUILTIN_FPCMPUGT32,
10856 SPARC_BUILTIN_FPMAX8,
10857 SPARC_BUILTIN_FPMAX16,
10858 SPARC_BUILTIN_FPMAX32,
10859 SPARC_BUILTIN_FPMAXU8,
10860 SPARC_BUILTIN_FPMAXU16,
10861 SPARC_BUILTIN_FPMAXU32,
10862 SPARC_BUILTIN_FPMIN8,
10863 SPARC_BUILTIN_FPMIN16,
10864 SPARC_BUILTIN_FPMIN32,
10865 SPARC_BUILTIN_FPMINU8,
10866 SPARC_BUILTIN_FPMINU16,
10867 SPARC_BUILTIN_FPMINU32,
10868 SPARC_BUILTIN_FPSUB8,
10869 SPARC_BUILTIN_FPSUBS8,
10870 SPARC_BUILTIN_FPSUBUS8,
10871 SPARC_BUILTIN_FPSUBUS16,
10872
10873 /* VIS 4.0B builtins. */
10874
10875 /* Note that all the DICTUNPACK* entries should be kept
10876 contiguous. */
10877 SPARC_BUILTIN_FIRST_DICTUNPACK,
10878 SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
10879 SPARC_BUILTIN_DICTUNPACK16,
10880 SPARC_BUILTIN_DICTUNPACK32,
10881 SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
10882
10883 /* Note that all the FPCMP*SHL entries should be kept
10884 contiguous. */
10885 SPARC_BUILTIN_FIRST_FPCMPSHL,
10886 SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
10887 SPARC_BUILTIN_FPCMPGT8SHL,
10888 SPARC_BUILTIN_FPCMPEQ8SHL,
10889 SPARC_BUILTIN_FPCMPNE8SHL,
10890 SPARC_BUILTIN_FPCMPLE16SHL,
10891 SPARC_BUILTIN_FPCMPGT16SHL,
10892 SPARC_BUILTIN_FPCMPEQ16SHL,
10893 SPARC_BUILTIN_FPCMPNE16SHL,
10894 SPARC_BUILTIN_FPCMPLE32SHL,
10895 SPARC_BUILTIN_FPCMPGT32SHL,
10896 SPARC_BUILTIN_FPCMPEQ32SHL,
10897 SPARC_BUILTIN_FPCMPNE32SHL,
10898 SPARC_BUILTIN_FPCMPULE8SHL,
10899 SPARC_BUILTIN_FPCMPUGT8SHL,
10900 SPARC_BUILTIN_FPCMPULE16SHL,
10901 SPARC_BUILTIN_FPCMPUGT16SHL,
10902 SPARC_BUILTIN_FPCMPULE32SHL,
10903 SPARC_BUILTIN_FPCMPUGT32SHL,
10904 SPARC_BUILTIN_FPCMPDE8SHL,
10905 SPARC_BUILTIN_FPCMPDE16SHL,
10906 SPARC_BUILTIN_FPCMPDE32SHL,
10907 SPARC_BUILTIN_FPCMPUR8SHL,
10908 SPARC_BUILTIN_FPCMPUR16SHL,
10909 SPARC_BUILTIN_FPCMPUR32SHL,
10910 SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
10911
10912 SPARC_BUILTIN_MAX
10913 };
10914
10915 static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
10916 static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
10917
10918 /* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
10919 The instruction should require a constant operand of some sort. The
10920 function prints an error if OPVAL is not valid. */
10921
10922 static int
10923 check_constant_argument (enum insn_code icode, int opnum, rtx opval)
10924 {
10925 if (GET_CODE (opval) != CONST_INT)
10926 {
10927 error ("%qs expects a constant argument", insn_data[icode].name);
10928 return false;
10929 }
10930
10931 if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
10932 {
10933 error ("constant argument out of range for %qs", insn_data[icode].name);
10934 return false;
10935 }
10936 return true;
10937 }
10938
10939 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
10940 function decl or NULL_TREE if the builtin was not added. */
10941
10942 static tree
10943 def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
10944 tree type)
10945 {
10946 tree t
10947 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
10948
10949 if (t)
10950 {
10951 sparc_builtins[code] = t;
10952 sparc_builtins_icode[code] = icode;
10953 }
10954
10955 return t;
10956 }
10957
10958 /* Likewise, but also marks the function as "const". */
10959
10960 static tree
10961 def_builtin_const (const char *name, enum insn_code icode,
10962 enum sparc_builtins code, tree type)
10963 {
10964 tree t = def_builtin (name, icode, code, type);
10965
10966 if (t)
10967 TREE_READONLY (t) = 1;
10968
10969 return t;
10970 }
10971
10972 /* Implement the TARGET_INIT_BUILTINS target hook.
10973 Create builtin functions for special SPARC instructions. */
10974
10975 static void
10976 sparc_init_builtins (void)
10977 {
10978 if (TARGET_FPU)
10979 sparc_fpu_init_builtins ();
10980
10981 if (TARGET_VIS)
10982 sparc_vis_init_builtins ();
10983 }
10984
10985 /* Create builtin functions for FPU instructions. */
10986
10987 static void
10988 sparc_fpu_init_builtins (void)
10989 {
10990 tree ftype
10991 = build_function_type_list (void_type_node,
10992 build_pointer_type (unsigned_type_node), 0);
10993 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
10994 SPARC_BUILTIN_LDFSR, ftype);
10995 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
10996 SPARC_BUILTIN_STFSR, ftype);
10997 }
10998
10999 /* Create builtin functions for VIS instructions. */
11000
11001 static void
11002 sparc_vis_init_builtins (void)
11003 {
11004 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
11005 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
11006 tree v4hi = build_vector_type (intHI_type_node, 4);
11007 tree v2hi = build_vector_type (intHI_type_node, 2);
11008 tree v2si = build_vector_type (intSI_type_node, 2);
11009 tree v1si = build_vector_type (intSI_type_node, 1);
11010
11011 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
11012 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
11013 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
11014 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
11015 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
11016 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
11017 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
11018 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
11019 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
11020 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
11021 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
11022 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
11023 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
11024 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
11025 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
11026 v8qi, v8qi,
11027 intDI_type_node, 0);
11028 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
11029 v8qi, v8qi, 0);
11030 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
11031 v8qi, v8qi, 0);
11032 tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
11033 intSI_type_node, 0);
11034 tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
11035 intSI_type_node, 0);
11036 tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
11037 intDI_type_node, 0);
11038 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
11039 intDI_type_node,
11040 intDI_type_node, 0);
11041 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
11042 intSI_type_node,
11043 intSI_type_node, 0);
11044 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
11045 ptr_type_node,
11046 intSI_type_node, 0);
11047 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
11048 ptr_type_node,
11049 intDI_type_node, 0);
11050 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
11051 ptr_type_node,
11052 ptr_type_node, 0);
11053 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
11054 ptr_type_node,
11055 ptr_type_node, 0);
11056 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
11057 v4hi, v4hi, 0);
11058 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
11059 v2si, v2si, 0);
11060 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
11061 v4hi, v4hi, 0);
11062 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
11063 v2si, v2si, 0);
11064 tree void_ftype_di = build_function_type_list (void_type_node,
11065 intDI_type_node, 0);
11066 tree di_ftype_void = build_function_type_list (intDI_type_node,
11067 void_type_node, 0);
11068 tree void_ftype_si = build_function_type_list (void_type_node,
11069 intSI_type_node, 0);
11070 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
11071 float_type_node,
11072 float_type_node, 0);
11073 tree df_ftype_df_df = build_function_type_list (double_type_node,
11074 double_type_node,
11075 double_type_node, 0);
11076
11077 /* Packing and expanding vectors. */
11078 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
11079 SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
11080 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
11081 SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
11082 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
11083 SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
11084 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
11085 SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
11086 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
11087 SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
11088
11089 /* Multiplications. */
11090 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
11091 SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
11092 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
11093 SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
11094 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
11095 SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
11096 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
11097 SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
11098 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
11099 SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
11100 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
11101 SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
11102 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
11103 SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
11104
11105 /* Data aligning. */
11106 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
11107 SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
11108 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
11109 SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
11110 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
11111 SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
11112 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
11113 SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
11114
11115 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
11116 SPARC_BUILTIN_WRGSR, void_ftype_di);
11117 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
11118 SPARC_BUILTIN_RDGSR, di_ftype_void);
11119
11120 if (TARGET_ARCH64)
11121 {
11122 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
11123 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
11124 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
11125 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
11126 }
11127 else
11128 {
11129 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
11130 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
11131 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
11132 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
11133 }
11134
11135 /* Pixel distance. */
11136 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
11137 SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
11138
11139 /* Edge handling. */
11140 if (TARGET_ARCH64)
11141 {
11142 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
11143 SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
11144 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
11145 SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
11146 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
11147 SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
11148 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
11149 SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
11150 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
11151 SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
11152 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
11153 SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
11154 }
11155 else
11156 {
11157 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
11158 SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
11159 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
11160 SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
11161 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
11162 SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
11163 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
11164 SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
11165 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
11166 SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
11167 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
11168 SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
11169 }
11170
11171 /* Pixel compare. */
11172 if (TARGET_ARCH64)
11173 {
11174 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
11175 SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
11176 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
11177 SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
11178 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
11179 SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
11180 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
11181 SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
11182 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
11183 SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
11184 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
11185 SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
11186 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
11187 SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
11188 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
11189 SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
11190 }
11191 else
11192 {
11193 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
11194 SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
11195 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
11196 SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
11197 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
11198 SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
11199 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
11200 SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
11201 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
11202 SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
11203 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
11204 SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
11205 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
11206 SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
11207 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
11208 SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
11209 }
11210
11211 /* Addition and subtraction. */
11212 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
11213 SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
11214 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
11215 SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
11216 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
11217 SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
11218 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
11219 SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
11220 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
11221 SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
11222 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
11223 SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
11224 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
11225 SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
11226 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
11227 SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
11228
11229 /* Three-dimensional array addressing. */
11230 if (TARGET_ARCH64)
11231 {
11232 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
11233 SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
11234 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
11235 SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
11236 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
11237 SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
11238 }
11239 else
11240 {
11241 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
11242 SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
11243 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
11244 SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
11245 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
11246 SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
11247 }
11248
11249 if (TARGET_VIS2)
11250 {
11251 /* Edge handling. */
11252 if (TARGET_ARCH64)
11253 {
11254 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
11255 SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
11256 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
11257 SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
11258 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
11259 SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
11260 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
11261 SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
11262 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
11263 SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
11264 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
11265 SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
11266 }
11267 else
11268 {
11269 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
11270 SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
11271 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
11272 SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
11273 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
11274 SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
11275 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
11276 SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
11277 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
11278 SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
11279 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
11280 SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
11281 }
11282
11283 /* Byte mask and shuffle. */
11284 if (TARGET_ARCH64)
11285 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
11286 SPARC_BUILTIN_BMASK, di_ftype_di_di);
11287 else
11288 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
11289 SPARC_BUILTIN_BMASK, si_ftype_si_si);
11290 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
11291 SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
11292 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
11293 SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
11294 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
11295 SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
11296 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
11297 SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
11298 }
11299
11300 if (TARGET_VIS3)
11301 {
11302 if (TARGET_ARCH64)
11303 {
11304 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
11305 SPARC_BUILTIN_CMASK8, void_ftype_di);
11306 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
11307 SPARC_BUILTIN_CMASK16, void_ftype_di);
11308 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
11309 SPARC_BUILTIN_CMASK32, void_ftype_di);
11310 }
11311 else
11312 {
11313 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
11314 SPARC_BUILTIN_CMASK8, void_ftype_si);
11315 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
11316 SPARC_BUILTIN_CMASK16, void_ftype_si);
11317 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
11318 SPARC_BUILTIN_CMASK32, void_ftype_si);
11319 }
11320
11321 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
11322 SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
11323
11324 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
11325 SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
11326 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
11327 SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
11328 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
11329 SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
11330 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
11331 SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
11332 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
11333 SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
11334 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
11335 SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
11336 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
11337 SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
11338 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
11339 SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
11340
11341 if (TARGET_ARCH64)
11342 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
11343 SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
11344 else
11345 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
11346 SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
11347
11348 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
11349 SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
11350 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
11351 SPARC_BUILTIN_FPADD64, di_ftype_di_di);
11352 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
11353 SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
11354
11355 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
11356 SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
11357 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
11358 SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
11359 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
11360 SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
11361 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
11362 SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
11363 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
11364 SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
11365 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
11366 SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
11367 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
11368 SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
11369 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
11370 SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
11371
11372 if (TARGET_ARCH64)
11373 {
11374 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
11375 SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
11376 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
11377 SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
11378 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
11379 SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
11380 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
11381 SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
11382 }
11383 else
11384 {
11385 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
11386 SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
11387 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
11388 SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
11389 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
11390 SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
11391 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
11392 SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
11393 }
11394
11395 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
11396 SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
11397 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
11398 SPARC_BUILTIN_FHADDD, df_ftype_df_df);
11399 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
11400 SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
11401 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
11402 SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
11403 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
11404 SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
11405 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
11406 SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
11407
11408 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
11409 SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
11410 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
11411 SPARC_BUILTIN_XMULX, di_ftype_di_di);
11412 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
11413 SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
11414 }
11415
11416 if (TARGET_VIS4)
11417 {
11418 def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
11419 SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
11420 def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
11421 SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
11422 def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
11423 SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
11424 def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
11425 SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
11426
11427
11428 if (TARGET_ARCH64)
11429 {
11430 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
11431 SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
11432 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
11433 SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
11434 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
11435 SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
11436 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
11437 SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
11438 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
11439 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11440 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
11441 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11442 }
11443 else
11444 {
11445 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
11446 SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
11447 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
11448 SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
11449 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
11450 SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
11451 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
11452 SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
11453 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
11454 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11455 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
11456 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11457 }
11458
11459 def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
11460 SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
11461 def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
11462 SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
11463 def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
11464 SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
11465 def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
11466 SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
11467 def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
11468 SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
11469 def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
11470 SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
11471 def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
11472 SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
11473 def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
11474 SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
11475 def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
11476 SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
11477 def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
11478 SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
11479 def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
11480 SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
11481 def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
11482 SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
11483 def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
11484 SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
11485 def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
11486 SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
11487 def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
11488 SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
11489 def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
11490 SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
11491 }
11492
11493 if (TARGET_VIS4B)
11494 {
11495 def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
11496 SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
11497 def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
11498 SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
11499 def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
11500 SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
11501
11502 if (TARGET_ARCH64)
11503 {
11504 tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
11505 v8qi, v8qi,
11506 intSI_type_node, 0);
11507 tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
11508 v4hi, v4hi,
11509 intSI_type_node, 0);
11510 tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
11511 v2si, v2si,
11512 intSI_type_node, 0);
11513
11514 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
11515 SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
11516 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
11517 SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
11518 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
11519 SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
11520 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
11521 SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
11522
11523 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
11524 SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
11525 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
11526 SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
11527 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
11528 SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
11529 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
11530 SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
11531
11532 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
11533 SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
11534 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
11535 SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
11536 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
11537 SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
11538 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
11539 SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
11540
11541
11542 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
11543 SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
11544 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
11545 SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
11546
11547 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
11548 SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
11549 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
11550 SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
11551
11552 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
11553 SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
11554 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
11555 SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
11556
11557 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
11558 SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
11559 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
11560 SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
11561 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
11562 SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
11563
11564 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
11565 SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
11566 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
11567 SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
11568 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
11569 SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
11570
11571 }
11572 else
11573 {
11574 tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
11575 v8qi, v8qi,
11576 intSI_type_node, 0);
11577 tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
11578 v4hi, v4hi,
11579 intSI_type_node, 0);
11580 tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
11581 v2si, v2si,
11582 intSI_type_node, 0);
11583
11584 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
11585 SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
11586 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
11587 SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
11588 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
11589 SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
11590 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
11591 SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
11592
11593 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
11594 SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
11595 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
11596 SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
11597 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
11598 SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
11599 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
11600 SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
11601
11602 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
11603 SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
11604 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
11605 SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
11606 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
11607 SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
11608 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
11609 SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
11610
11611
11612 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
11613 SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
11614 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
11615 SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
11616
11617 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
11618 SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
11619 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
11620 SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
11621
11622 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
11623 SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
11624 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
11625 SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
11626
11627 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
11628 SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
11629 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
11630 SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
11631 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
11632 SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
11633
11634 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
11635 SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
11636 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
11637 SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
11638 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
11639 SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
11640 }
11641 }
11642 }
11643
11644 /* Implement TARGET_BUILTIN_DECL hook. */
11645
11646 static tree
11647 sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11648 {
11649 if (code >= SPARC_BUILTIN_MAX)
11650 return error_mark_node;
11651
11652 return sparc_builtins[code];
11653 }
11654
11655 /* Implemented TARGET_EXPAND_BUILTIN hook. */
11656
11657 static rtx
11658 sparc_expand_builtin (tree exp, rtx target,
11659 rtx subtarget ATTRIBUTE_UNUSED,
11660 machine_mode tmode ATTRIBUTE_UNUSED,
11661 int ignore ATTRIBUTE_UNUSED)
11662 {
11663 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11664 enum sparc_builtins code
11665 = (enum sparc_builtins) DECL_MD_FUNCTION_CODE (fndecl);
11666 enum insn_code icode = sparc_builtins_icode[code];
11667 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11668 call_expr_arg_iterator iter;
11669 int arg_count = 0;
11670 rtx pat, op[4];
11671 tree arg;
11672
11673 if (nonvoid)
11674 {
11675 machine_mode tmode = insn_data[icode].operand[0].mode;
11676 if (!target
11677 || GET_MODE (target) != tmode
11678 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11679 op[0] = gen_reg_rtx (tmode);
11680 else
11681 op[0] = target;
11682 }
11683 else
11684 op[0] = NULL_RTX;
11685
11686 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11687 {
11688 const struct insn_operand_data *insn_op;
11689 int idx;
11690
11691 if (arg == error_mark_node)
11692 return NULL_RTX;
11693
11694 arg_count++;
11695 idx = arg_count - !nonvoid;
11696 insn_op = &insn_data[icode].operand[idx];
11697 op[arg_count] = expand_normal (arg);
11698
11699 /* Some of the builtins require constant arguments. We check
11700 for this here. */
11701 if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
11702 && code <= SPARC_BUILTIN_LAST_FPCMPSHL
11703 && arg_count == 3)
11704 || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
11705 && code <= SPARC_BUILTIN_LAST_DICTUNPACK
11706 && arg_count == 2))
11707 {
11708 if (!check_constant_argument (icode, idx, op[arg_count]))
11709 return const0_rtx;
11710 }
11711
11712 if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
11713 {
11714 if (!address_operand (op[arg_count], SImode))
11715 {
11716 op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
11717 op[arg_count] = copy_addr_to_reg (op[arg_count]);
11718 }
11719 op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
11720 }
11721
11722 else if (insn_op->mode == V1DImode
11723 && GET_MODE (op[arg_count]) == DImode)
11724 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
11725
11726 else if (insn_op->mode == V1SImode
11727 && GET_MODE (op[arg_count]) == SImode)
11728 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
11729
11730 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
11731 insn_op->mode))
11732 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
11733 }
11734
11735 switch (arg_count)
11736 {
11737 case 0:
11738 pat = GEN_FCN (icode) (op[0]);
11739 break;
11740 case 1:
11741 if (nonvoid)
11742 pat = GEN_FCN (icode) (op[0], op[1]);
11743 else
11744 pat = GEN_FCN (icode) (op[1]);
11745 break;
11746 case 2:
11747 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11748 break;
11749 case 3:
11750 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11751 break;
11752 default:
11753 gcc_unreachable ();
11754 }
11755
11756 if (!pat)
11757 return NULL_RTX;
11758
11759 emit_insn (pat);
11760
11761 return (nonvoid ? op[0] : const0_rtx);
11762 }
11763
11764 /* Return the upper 16 bits of the 8x16 multiplication. */
11765
11766 static int
11767 sparc_vis_mul8x16 (int e8, int e16)
11768 {
11769 return (e8 * e16 + 128) / 256;
11770 }
11771
11772 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
11773 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
11774
11775 static void
11776 sparc_handle_vis_mul8x16 (vec<tree> *n_elts, enum sparc_builtins fncode,
11777 tree inner_type, tree cst0, tree cst1)
11778 {
11779 unsigned i, num = VECTOR_CST_NELTS (cst0);
11780 int scale;
11781
11782 switch (fncode)
11783 {
11784 case SPARC_BUILTIN_FMUL8X16:
11785 for (i = 0; i < num; ++i)
11786 {
11787 int val
11788 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11789 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
11790 n_elts->quick_push (build_int_cst (inner_type, val));
11791 }
11792 break;
11793
11794 case SPARC_BUILTIN_FMUL8X16AU:
11795 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
11796
11797 for (i = 0; i < num; ++i)
11798 {
11799 int val
11800 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11801 scale);
11802 n_elts->quick_push (build_int_cst (inner_type, val));
11803 }
11804 break;
11805
11806 case SPARC_BUILTIN_FMUL8X16AL:
11807 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
11808
11809 for (i = 0; i < num; ++i)
11810 {
11811 int val
11812 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11813 scale);
11814 n_elts->quick_push (build_int_cst (inner_type, val));
11815 }
11816 break;
11817
11818 default:
11819 gcc_unreachable ();
11820 }
11821 }
11822
11823 /* Implement TARGET_FOLD_BUILTIN hook.
11824
11825 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
11826 result of the function call is ignored. NULL_TREE is returned if the
11827 function could not be folded. */
11828
11829 static tree
11830 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
11831 tree *args, bool ignore)
11832 {
11833 enum sparc_builtins code
11834 = (enum sparc_builtins) DECL_MD_FUNCTION_CODE (fndecl);
11835 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
11836 tree arg0, arg1, arg2;
11837
11838 if (ignore)
11839 switch (code)
11840 {
11841 case SPARC_BUILTIN_LDFSR:
11842 case SPARC_BUILTIN_STFSR:
11843 case SPARC_BUILTIN_ALIGNADDR:
11844 case SPARC_BUILTIN_WRGSR:
11845 case SPARC_BUILTIN_BMASK:
11846 case SPARC_BUILTIN_CMASK8:
11847 case SPARC_BUILTIN_CMASK16:
11848 case SPARC_BUILTIN_CMASK32:
11849 break;
11850
11851 default:
11852 return build_zero_cst (rtype);
11853 }
11854
11855 switch (code)
11856 {
11857 case SPARC_BUILTIN_FEXPAND:
11858 arg0 = args[0];
11859 STRIP_NOPS (arg0);
11860
11861 if (TREE_CODE (arg0) == VECTOR_CST)
11862 {
11863 tree inner_type = TREE_TYPE (rtype);
11864 unsigned i;
11865
11866 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11867 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11868 {
11869 unsigned HOST_WIDE_INT val
11870 = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg0, i));
11871 n_elts.quick_push (build_int_cst (inner_type, val << 4));
11872 }
11873 return n_elts.build ();
11874 }
11875 break;
11876
11877 case SPARC_BUILTIN_FMUL8X16:
11878 case SPARC_BUILTIN_FMUL8X16AU:
11879 case SPARC_BUILTIN_FMUL8X16AL:
11880 arg0 = args[0];
11881 arg1 = args[1];
11882 STRIP_NOPS (arg0);
11883 STRIP_NOPS (arg1);
11884
11885 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11886 {
11887 tree inner_type = TREE_TYPE (rtype);
11888 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11889 sparc_handle_vis_mul8x16 (&n_elts, code, inner_type, arg0, arg1);
11890 return n_elts.build ();
11891 }
11892 break;
11893
11894 case SPARC_BUILTIN_FPMERGE:
11895 arg0 = args[0];
11896 arg1 = args[1];
11897 STRIP_NOPS (arg0);
11898 STRIP_NOPS (arg1);
11899
11900 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11901 {
11902 tree_vector_builder n_elts (rtype, 2 * VECTOR_CST_NELTS (arg0), 1);
11903 unsigned i;
11904 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11905 {
11906 n_elts.quick_push (VECTOR_CST_ELT (arg0, i));
11907 n_elts.quick_push (VECTOR_CST_ELT (arg1, i));
11908 }
11909
11910 return n_elts.build ();
11911 }
11912 break;
11913
11914 case SPARC_BUILTIN_PDIST:
11915 case SPARC_BUILTIN_PDISTN:
11916 arg0 = args[0];
11917 arg1 = args[1];
11918 STRIP_NOPS (arg0);
11919 STRIP_NOPS (arg1);
11920 if (code == SPARC_BUILTIN_PDIST)
11921 {
11922 arg2 = args[2];
11923 STRIP_NOPS (arg2);
11924 }
11925 else
11926 arg2 = integer_zero_node;
11927
11928 if (TREE_CODE (arg0) == VECTOR_CST
11929 && TREE_CODE (arg1) == VECTOR_CST
11930 && TREE_CODE (arg2) == INTEGER_CST)
11931 {
11932 bool overflow = false;
11933 widest_int result = wi::to_widest (arg2);
11934 widest_int tmp;
11935 unsigned i;
11936
11937 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11938 {
11939 tree e0 = VECTOR_CST_ELT (arg0, i);
11940 tree e1 = VECTOR_CST_ELT (arg1, i);
11941
11942 wi::overflow_type neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
11943
11944 tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
11945 tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
11946 if (wi::neg_p (tmp))
11947 tmp = wi::neg (tmp, &neg2_ovf);
11948 else
11949 neg2_ovf = wi::OVF_NONE;
11950 result = wi::add (result, tmp, SIGNED, &add2_ovf);
11951 overflow |= ((neg1_ovf != wi::OVF_NONE)
11952 | (neg2_ovf != wi::OVF_NONE)
11953 | (add1_ovf != wi::OVF_NONE)
11954 | (add2_ovf != wi::OVF_NONE));
11955 }
11956
11957 gcc_assert (!overflow);
11958
11959 return wide_int_to_tree (rtype, result);
11960 }
11961
11962 default:
11963 break;
11964 }
11965
11966 return NULL_TREE;
11967 }
11968 \f
11969 /* ??? This duplicates information provided to the compiler by the
11970 ??? scheduler description. Some day, teach genautomata to output
11971 ??? the latencies and then CSE will just use that. */
11972
11973 static bool
11974 sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
11975 int opno ATTRIBUTE_UNUSED,
11976 int *total, bool speed ATTRIBUTE_UNUSED)
11977 {
11978 int code = GET_CODE (x);
11979 bool float_mode_p = FLOAT_MODE_P (mode);
11980
11981 switch (code)
11982 {
11983 case CONST_INT:
11984 if (SMALL_INT (x))
11985 *total = 0;
11986 else
11987 *total = 2;
11988 return true;
11989
11990 case CONST_WIDE_INT:
11991 *total = 0;
11992 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
11993 *total += 2;
11994 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
11995 *total += 2;
11996 return true;
11997
11998 case HIGH:
11999 *total = 2;
12000 return true;
12001
12002 case CONST:
12003 case LABEL_REF:
12004 case SYMBOL_REF:
12005 *total = 4;
12006 return true;
12007
12008 case CONST_DOUBLE:
12009 *total = 8;
12010 return true;
12011
12012 case MEM:
12013 /* If outer-code was a sign or zero extension, a cost
12014 of COSTS_N_INSNS (1) was already added in. This is
12015 why we are subtracting it back out. */
12016 if (outer_code == ZERO_EXTEND)
12017 {
12018 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
12019 }
12020 else if (outer_code == SIGN_EXTEND)
12021 {
12022 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
12023 }
12024 else if (float_mode_p)
12025 {
12026 *total = sparc_costs->float_load;
12027 }
12028 else
12029 {
12030 *total = sparc_costs->int_load;
12031 }
12032
12033 return true;
12034
12035 case PLUS:
12036 case MINUS:
12037 if (float_mode_p)
12038 *total = sparc_costs->float_plusminus;
12039 else
12040 *total = COSTS_N_INSNS (1);
12041 return false;
12042
12043 case FMA:
12044 {
12045 rtx sub;
12046
12047 gcc_assert (float_mode_p);
12048 *total = sparc_costs->float_mul;
12049
12050 sub = XEXP (x, 0);
12051 if (GET_CODE (sub) == NEG)
12052 sub = XEXP (sub, 0);
12053 *total += rtx_cost (sub, mode, FMA, 0, speed);
12054
12055 sub = XEXP (x, 2);
12056 if (GET_CODE (sub) == NEG)
12057 sub = XEXP (sub, 0);
12058 *total += rtx_cost (sub, mode, FMA, 2, speed);
12059 return true;
12060 }
12061
12062 case MULT:
12063 if (float_mode_p)
12064 *total = sparc_costs->float_mul;
12065 else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
12066 *total = COSTS_N_INSNS (25);
12067 else
12068 {
12069 int bit_cost;
12070
12071 bit_cost = 0;
12072 if (sparc_costs->int_mul_bit_factor)
12073 {
12074 int nbits;
12075
12076 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
12077 {
12078 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
12079 for (nbits = 0; value != 0; value &= value - 1)
12080 nbits++;
12081 }
12082 else
12083 nbits = 7;
12084
12085 if (nbits < 3)
12086 nbits = 3;
12087 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
12088 bit_cost = COSTS_N_INSNS (bit_cost);
12089 }
12090
12091 if (mode == DImode || !TARGET_HARD_MUL)
12092 *total = sparc_costs->int_mulX + bit_cost;
12093 else
12094 *total = sparc_costs->int_mul + bit_cost;
12095 }
12096 return false;
12097
12098 case ASHIFT:
12099 case ASHIFTRT:
12100 case LSHIFTRT:
12101 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
12102 return false;
12103
12104 case DIV:
12105 case UDIV:
12106 case MOD:
12107 case UMOD:
12108 if (float_mode_p)
12109 {
12110 if (mode == DFmode)
12111 *total = sparc_costs->float_div_df;
12112 else
12113 *total = sparc_costs->float_div_sf;
12114 }
12115 else
12116 {
12117 if (mode == DImode)
12118 *total = sparc_costs->int_divX;
12119 else
12120 *total = sparc_costs->int_div;
12121 }
12122 return false;
12123
12124 case NEG:
12125 if (! float_mode_p)
12126 {
12127 *total = COSTS_N_INSNS (1);
12128 return false;
12129 }
12130 /* FALLTHRU */
12131
12132 case ABS:
12133 case FLOAT:
12134 case UNSIGNED_FLOAT:
12135 case FIX:
12136 case UNSIGNED_FIX:
12137 case FLOAT_EXTEND:
12138 case FLOAT_TRUNCATE:
12139 *total = sparc_costs->float_move;
12140 return false;
12141
12142 case SQRT:
12143 if (mode == DFmode)
12144 *total = sparc_costs->float_sqrt_df;
12145 else
12146 *total = sparc_costs->float_sqrt_sf;
12147 return false;
12148
12149 case COMPARE:
12150 if (float_mode_p)
12151 *total = sparc_costs->float_cmp;
12152 else
12153 *total = COSTS_N_INSNS (1);
12154 return false;
12155
12156 case IF_THEN_ELSE:
12157 if (float_mode_p)
12158 *total = sparc_costs->float_cmove;
12159 else
12160 *total = sparc_costs->int_cmove;
12161 return false;
12162
12163 case IOR:
12164 /* Handle the NAND vector patterns. */
12165 if (sparc_vector_mode_supported_p (mode)
12166 && GET_CODE (XEXP (x, 0)) == NOT
12167 && GET_CODE (XEXP (x, 1)) == NOT)
12168 {
12169 *total = COSTS_N_INSNS (1);
12170 return true;
12171 }
12172 else
12173 return false;
12174
12175 default:
12176 return false;
12177 }
12178 }
12179
12180 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
12181
12182 static inline bool
12183 general_or_i64_p (reg_class_t rclass)
12184 {
12185 return (rclass == GENERAL_REGS || rclass == I64_REGS);
12186 }
12187
12188 /* Implement TARGET_REGISTER_MOVE_COST. */
12189
12190 static int
12191 sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
12192 reg_class_t from, reg_class_t to)
12193 {
12194 bool need_memory = false;
12195
12196 /* This helps postreload CSE to eliminate redundant comparisons. */
12197 if (from == NO_REGS || to == NO_REGS)
12198 return 100;
12199
12200 if (from == FPCC_REGS || to == FPCC_REGS)
12201 need_memory = true;
12202 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
12203 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
12204 {
12205 if (TARGET_VIS3)
12206 {
12207 int size = GET_MODE_SIZE (mode);
12208 if (size == 8 || size == 4)
12209 {
12210 if (! TARGET_ARCH32 || size == 4)
12211 return 4;
12212 else
12213 return 6;
12214 }
12215 }
12216 need_memory = true;
12217 }
12218
12219 if (need_memory)
12220 {
12221 if (sparc_cpu == PROCESSOR_ULTRASPARC
12222 || sparc_cpu == PROCESSOR_ULTRASPARC3
12223 || sparc_cpu == PROCESSOR_NIAGARA
12224 || sparc_cpu == PROCESSOR_NIAGARA2
12225 || sparc_cpu == PROCESSOR_NIAGARA3
12226 || sparc_cpu == PROCESSOR_NIAGARA4
12227 || sparc_cpu == PROCESSOR_NIAGARA7
12228 || sparc_cpu == PROCESSOR_M8)
12229 return 12;
12230
12231 return 6;
12232 }
12233
12234 return 2;
12235 }
12236
12237 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
12238 This is achieved by means of a manual dynamic stack space allocation in
12239 the current frame. We make the assumption that SEQ doesn't contain any
12240 function calls, with the possible exception of calls to the GOT helper. */
12241
12242 static void
12243 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
12244 {
12245 /* We must preserve the lowest 16 words for the register save area. */
12246 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
12247 /* We really need only 2 words of fresh stack space. */
12248 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
12249
12250 rtx slot
12251 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
12252 SPARC_STACK_BIAS + offset));
12253
12254 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
12255 emit_insn (gen_rtx_SET (slot, reg));
12256 if (reg2)
12257 emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
12258 reg2));
12259 emit_insn (seq);
12260 if (reg2)
12261 emit_insn (gen_rtx_SET (reg2,
12262 adjust_address (slot, word_mode, UNITS_PER_WORD)));
12263 emit_insn (gen_rtx_SET (reg, slot));
12264 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
12265 }
12266
12267 /* Output the assembler code for a thunk function. THUNK_DECL is the
12268 declaration for the thunk function itself, FUNCTION is the decl for
12269 the target function. DELTA is an immediate constant offset to be
12270 added to THIS. If VCALL_OFFSET is nonzero, the word at address
12271 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
12272
12273 static void
12274 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12275 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12276 tree function)
12277 {
12278 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
12279 rtx this_rtx, funexp;
12280 rtx_insn *insn;
12281 unsigned int int_arg_first;
12282
12283 reload_completed = 1;
12284 epilogue_completed = 1;
12285
12286 emit_note (NOTE_INSN_PROLOGUE_END);
12287
12288 if (TARGET_FLAT)
12289 {
12290 sparc_leaf_function_p = 1;
12291
12292 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12293 }
12294 else if (flag_delayed_branch)
12295 {
12296 /* We will emit a regular sibcall below, so we need to instruct
12297 output_sibcall that we are in a leaf function. */
12298 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
12299
12300 /* This will cause final.c to invoke leaf_renumber_regs so we
12301 must behave as if we were in a not-yet-leafified function. */
12302 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
12303 }
12304 else
12305 {
12306 /* We will emit the sibcall manually below, so we will need to
12307 manually spill non-leaf registers. */
12308 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
12309
12310 /* We really are in a leaf function. */
12311 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12312 }
12313
12314 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
12315 returns a structure, the structure return pointer is there instead. */
12316 if (TARGET_ARCH64
12317 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12318 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
12319 else
12320 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
12321
12322 /* Add DELTA. When possible use a plain add, otherwise load it into
12323 a register first. */
12324 if (delta)
12325 {
12326 rtx delta_rtx = GEN_INT (delta);
12327
12328 if (! SPARC_SIMM13_P (delta))
12329 {
12330 rtx scratch = gen_rtx_REG (Pmode, 1);
12331 emit_move_insn (scratch, delta_rtx);
12332 delta_rtx = scratch;
12333 }
12334
12335 /* THIS_RTX += DELTA. */
12336 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
12337 }
12338
12339 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
12340 if (vcall_offset)
12341 {
12342 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
12343 rtx scratch = gen_rtx_REG (Pmode, 1);
12344
12345 gcc_assert (vcall_offset < 0);
12346
12347 /* SCRATCH = *THIS_RTX. */
12348 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
12349
12350 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
12351 may not have any available scratch register at this point. */
12352 if (SPARC_SIMM13_P (vcall_offset))
12353 ;
12354 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
12355 else if (! fixed_regs[5]
12356 /* The below sequence is made up of at least 2 insns,
12357 while the default method may need only one. */
12358 && vcall_offset < -8192)
12359 {
12360 rtx scratch2 = gen_rtx_REG (Pmode, 5);
12361 emit_move_insn (scratch2, vcall_offset_rtx);
12362 vcall_offset_rtx = scratch2;
12363 }
12364 else
12365 {
12366 rtx increment = GEN_INT (-4096);
12367
12368 /* VCALL_OFFSET is a negative number whose typical range can be
12369 estimated as -32768..0 in 32-bit mode. In almost all cases
12370 it is therefore cheaper to emit multiple add insns than
12371 spilling and loading the constant into a register (at least
12372 6 insns). */
12373 while (! SPARC_SIMM13_P (vcall_offset))
12374 {
12375 emit_insn (gen_add2_insn (scratch, increment));
12376 vcall_offset += 4096;
12377 }
12378 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
12379 }
12380
12381 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
12382 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
12383 gen_rtx_PLUS (Pmode,
12384 scratch,
12385 vcall_offset_rtx)));
12386
12387 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
12388 emit_insn (gen_add2_insn (this_rtx, scratch));
12389 }
12390
12391 /* Generate a tail call to the target function. */
12392 if (! TREE_USED (function))
12393 {
12394 assemble_external (function);
12395 TREE_USED (function) = 1;
12396 }
12397 funexp = XEXP (DECL_RTL (function), 0);
12398
12399 if (flag_delayed_branch)
12400 {
12401 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
12402 insn = emit_call_insn (gen_sibcall (funexp));
12403 SIBLING_CALL_P (insn) = 1;
12404 }
12405 else
12406 {
12407 /* The hoops we have to jump through in order to generate a sibcall
12408 without using delay slots... */
12409 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
12410
12411 if (flag_pic)
12412 {
12413 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
12414 start_sequence ();
12415 load_got_register (); /* clobbers %o7 */
12416 if (!TARGET_VXWORKS_RTP)
12417 pic_offset_table_rtx = got_register_rtx;
12418 scratch = sparc_legitimize_pic_address (funexp, scratch);
12419 seq = get_insns ();
12420 end_sequence ();
12421 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
12422 }
12423 else if (TARGET_ARCH32)
12424 {
12425 emit_insn (gen_rtx_SET (scratch,
12426 gen_rtx_HIGH (SImode, funexp)));
12427 emit_insn (gen_rtx_SET (scratch,
12428 gen_rtx_LO_SUM (SImode, scratch, funexp)));
12429 }
12430 else /* TARGET_ARCH64 */
12431 {
12432 switch (sparc_code_model)
12433 {
12434 case CM_MEDLOW:
12435 case CM_MEDMID:
12436 /* The destination can serve as a temporary. */
12437 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
12438 break;
12439
12440 case CM_MEDANY:
12441 case CM_EMBMEDANY:
12442 /* The destination cannot serve as a temporary. */
12443 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
12444 start_sequence ();
12445 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
12446 seq = get_insns ();
12447 end_sequence ();
12448 emit_and_preserve (seq, spill_reg, 0);
12449 break;
12450
12451 default:
12452 gcc_unreachable ();
12453 }
12454 }
12455
12456 emit_jump_insn (gen_indirect_jump (scratch));
12457 }
12458
12459 emit_barrier ();
12460
12461 /* Run just enough of rest_of_compilation to get the insns emitted.
12462 There's not really enough bulk here to make other passes such as
12463 instruction scheduling worth while. */
12464 insn = get_insns ();
12465 shorten_branches (insn);
12466 assemble_start_function (thunk_fndecl, fnname);
12467 final_start_function (insn, file, 1);
12468 final (insn, file, 1);
12469 final_end_function ();
12470 assemble_end_function (thunk_fndecl, fnname);
12471
12472 reload_completed = 0;
12473 epilogue_completed = 0;
12474 }
12475
12476 /* Return true if sparc_output_mi_thunk would be able to output the
12477 assembler code for the thunk function specified by the arguments
12478 it is passed, and false otherwise. */
12479 static bool
12480 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
12481 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
12482 HOST_WIDE_INT vcall_offset,
12483 const_tree function ATTRIBUTE_UNUSED)
12484 {
12485 /* Bound the loop used in the default method above. */
12486 return (vcall_offset >= -32768 || ! fixed_regs[5]);
12487 }
12488
12489 /* How to allocate a 'struct machine_function'. */
12490
12491 static struct machine_function *
12492 sparc_init_machine_status (void)
12493 {
12494 return ggc_cleared_alloc<machine_function> ();
12495 }
12496 \f
12497 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
12498
12499 static unsigned HOST_WIDE_INT
12500 sparc_asan_shadow_offset (void)
12501 {
12502 return TARGET_ARCH64 ? (HOST_WIDE_INT_1 << 43) : (HOST_WIDE_INT_1 << 29);
12503 }
12504 \f
12505 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12506 We need to emit DTP-relative relocations. */
12507
12508 static void
12509 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
12510 {
12511 switch (size)
12512 {
12513 case 4:
12514 fputs ("\t.word\t%r_tls_dtpoff32(", file);
12515 break;
12516 case 8:
12517 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
12518 break;
12519 default:
12520 gcc_unreachable ();
12521 }
12522 output_addr_const (file, x);
12523 fputs (")", file);
12524 }
12525
12526 /* Do whatever processing is required at the end of a file. */
12527
12528 static void
12529 sparc_file_end (void)
12530 {
12531 /* If we need to emit the special GOT helper function, do so now. */
12532 if (got_helper_rtx)
12533 {
12534 const char *name = XSTR (got_helper_rtx, 0);
12535 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
12536 #ifdef DWARF2_UNWIND_INFO
12537 bool do_cfi;
12538 #endif
12539
12540 if (USE_HIDDEN_LINKONCE)
12541 {
12542 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
12543 get_identifier (name),
12544 build_function_type_list (void_type_node,
12545 NULL_TREE));
12546 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
12547 NULL_TREE, void_type_node);
12548 TREE_PUBLIC (decl) = 1;
12549 TREE_STATIC (decl) = 1;
12550 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
12551 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
12552 DECL_VISIBILITY_SPECIFIED (decl) = 1;
12553 resolve_unique_section (decl, 0, flag_function_sections);
12554 allocate_struct_function (decl, true);
12555 cfun->is_thunk = 1;
12556 current_function_decl = decl;
12557 init_varasm_status ();
12558 assemble_start_function (decl, name);
12559 }
12560 else
12561 {
12562 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
12563 switch_to_section (text_section);
12564 if (align > 0)
12565 ASM_OUTPUT_ALIGN (asm_out_file, align);
12566 ASM_OUTPUT_LABEL (asm_out_file, name);
12567 }
12568
12569 #ifdef DWARF2_UNWIND_INFO
12570 do_cfi = dwarf2out_do_cfi_asm ();
12571 if (do_cfi)
12572 fprintf (asm_out_file, "\t.cfi_startproc\n");
12573 #endif
12574 if (flag_delayed_branch)
12575 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
12576 reg_name, reg_name);
12577 else
12578 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
12579 reg_name, reg_name);
12580 #ifdef DWARF2_UNWIND_INFO
12581 if (do_cfi)
12582 fprintf (asm_out_file, "\t.cfi_endproc\n");
12583 #endif
12584 }
12585
12586 if (NEED_INDICATE_EXEC_STACK)
12587 file_end_indicate_exec_stack ();
12588
12589 #ifdef TARGET_SOLARIS
12590 solaris_file_end ();
12591 #endif
12592 }
12593
12594 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12595 /* Implement TARGET_MANGLE_TYPE. */
12596
12597 static const char *
12598 sparc_mangle_type (const_tree type)
12599 {
12600 if (TARGET_ARCH32
12601 && TYPE_MAIN_VARIANT (type) == long_double_type_node
12602 && TARGET_LONG_DOUBLE_128)
12603 return "g";
12604
12605 /* For all other types, use normal C++ mangling. */
12606 return NULL;
12607 }
12608 #endif
12609
12610 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
12611 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
12612 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
12613
12614 void
12615 sparc_emit_membar_for_model (enum memmodel model,
12616 int load_store, int before_after)
12617 {
12618 /* Bits for the MEMBAR mmask field. */
12619 const int LoadLoad = 1;
12620 const int StoreLoad = 2;
12621 const int LoadStore = 4;
12622 const int StoreStore = 8;
12623
12624 int mm = 0, implied = 0;
12625
12626 switch (sparc_memory_model)
12627 {
12628 case SMM_SC:
12629 /* Sequential Consistency. All memory transactions are immediately
12630 visible in sequential execution order. No barriers needed. */
12631 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
12632 break;
12633
12634 case SMM_TSO:
12635 /* Total Store Ordering: all memory transactions with store semantics
12636 are followed by an implied StoreStore. */
12637 implied |= StoreStore;
12638
12639 /* If we're not looking for a raw barrer (before+after), then atomic
12640 operations get the benefit of being both load and store. */
12641 if (load_store == 3 && before_after == 1)
12642 implied |= StoreLoad;
12643 /* FALLTHRU */
12644
12645 case SMM_PSO:
12646 /* Partial Store Ordering: all memory transactions with load semantics
12647 are followed by an implied LoadLoad | LoadStore. */
12648 implied |= LoadLoad | LoadStore;
12649
12650 /* If we're not looking for a raw barrer (before+after), then atomic
12651 operations get the benefit of being both load and store. */
12652 if (load_store == 3 && before_after == 2)
12653 implied |= StoreLoad | StoreStore;
12654 /* FALLTHRU */
12655
12656 case SMM_RMO:
12657 /* Relaxed Memory Ordering: no implicit bits. */
12658 break;
12659
12660 default:
12661 gcc_unreachable ();
12662 }
12663
12664 if (before_after & 1)
12665 {
12666 if (is_mm_release (model) || is_mm_acq_rel (model)
12667 || is_mm_seq_cst (model))
12668 {
12669 if (load_store & 1)
12670 mm |= LoadLoad | StoreLoad;
12671 if (load_store & 2)
12672 mm |= LoadStore | StoreStore;
12673 }
12674 }
12675 if (before_after & 2)
12676 {
12677 if (is_mm_acquire (model) || is_mm_acq_rel (model)
12678 || is_mm_seq_cst (model))
12679 {
12680 if (load_store & 1)
12681 mm |= LoadLoad | LoadStore;
12682 if (load_store & 2)
12683 mm |= StoreLoad | StoreStore;
12684 }
12685 }
12686
12687 /* Remove the bits implied by the system memory model. */
12688 mm &= ~implied;
12689
12690 /* For raw barriers (before+after), always emit a barrier.
12691 This will become a compile-time barrier if needed. */
12692 if (mm || before_after == 3)
12693 emit_insn (gen_membar (GEN_INT (mm)));
12694 }
12695
12696 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
12697 compare and swap on the word containing the byte or half-word. */
12698
12699 static void
12700 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
12701 rtx oldval, rtx newval)
12702 {
12703 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
12704 rtx addr = gen_reg_rtx (Pmode);
12705 rtx off = gen_reg_rtx (SImode);
12706 rtx oldv = gen_reg_rtx (SImode);
12707 rtx newv = gen_reg_rtx (SImode);
12708 rtx oldvalue = gen_reg_rtx (SImode);
12709 rtx newvalue = gen_reg_rtx (SImode);
12710 rtx res = gen_reg_rtx (SImode);
12711 rtx resv = gen_reg_rtx (SImode);
12712 rtx memsi, val, mask, cc;
12713
12714 emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
12715
12716 if (Pmode != SImode)
12717 addr1 = gen_lowpart (SImode, addr1);
12718 emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
12719
12720 memsi = gen_rtx_MEM (SImode, addr);
12721 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
12722 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
12723
12724 val = copy_to_reg (memsi);
12725
12726 emit_insn (gen_rtx_SET (off,
12727 gen_rtx_XOR (SImode, off,
12728 GEN_INT (GET_MODE (mem) == QImode
12729 ? 3 : 2))));
12730
12731 emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
12732
12733 if (GET_MODE (mem) == QImode)
12734 mask = force_reg (SImode, GEN_INT (0xff));
12735 else
12736 mask = force_reg (SImode, GEN_INT (0xffff));
12737
12738 emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
12739
12740 emit_insn (gen_rtx_SET (val,
12741 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12742 val)));
12743
12744 oldval = gen_lowpart (SImode, oldval);
12745 emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
12746
12747 newval = gen_lowpart_common (SImode, newval);
12748 emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
12749
12750 emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
12751
12752 emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
12753
12754 rtx_code_label *end_label = gen_label_rtx ();
12755 rtx_code_label *loop_label = gen_label_rtx ();
12756 emit_label (loop_label);
12757
12758 emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
12759
12760 emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
12761
12762 emit_move_insn (bool_result, const1_rtx);
12763
12764 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
12765
12766 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
12767
12768 emit_insn (gen_rtx_SET (resv,
12769 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12770 res)));
12771
12772 emit_move_insn (bool_result, const0_rtx);
12773
12774 cc = gen_compare_reg_1 (NE, resv, val);
12775 emit_insn (gen_rtx_SET (val, resv));
12776
12777 /* Use cbranchcc4 to separate the compare and branch! */
12778 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
12779 cc, const0_rtx, loop_label));
12780
12781 emit_label (end_label);
12782
12783 emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
12784
12785 emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
12786
12787 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
12788 }
12789
12790 /* Expand code to perform a compare-and-swap. */
12791
12792 void
12793 sparc_expand_compare_and_swap (rtx operands[])
12794 {
12795 rtx bval, retval, mem, oldval, newval;
12796 machine_mode mode;
12797 enum memmodel model;
12798
12799 bval = operands[0];
12800 retval = operands[1];
12801 mem = operands[2];
12802 oldval = operands[3];
12803 newval = operands[4];
12804 model = (enum memmodel) INTVAL (operands[6]);
12805 mode = GET_MODE (mem);
12806
12807 sparc_emit_membar_for_model (model, 3, 1);
12808
12809 if (reg_overlap_mentioned_p (retval, oldval))
12810 oldval = copy_to_reg (oldval);
12811
12812 if (mode == QImode || mode == HImode)
12813 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
12814 else
12815 {
12816 rtx (*gen) (rtx, rtx, rtx, rtx);
12817 rtx x;
12818
12819 if (mode == SImode)
12820 gen = gen_atomic_compare_and_swapsi_1;
12821 else
12822 gen = gen_atomic_compare_and_swapdi_1;
12823 emit_insn (gen (retval, mem, oldval, newval));
12824
12825 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
12826 if (x != bval)
12827 convert_move (bval, x, 1);
12828 }
12829
12830 sparc_emit_membar_for_model (model, 3, 2);
12831 }
12832
12833 void
12834 sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
12835 {
12836 rtx t_1, t_2, t_3;
12837
12838 sel = gen_lowpart (DImode, sel);
12839 switch (vmode)
12840 {
12841 case E_V2SImode:
12842 /* inp = xxxxxxxAxxxxxxxB */
12843 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12844 NULL_RTX, 1, OPTAB_DIRECT);
12845 /* t_1 = ....xxxxxxxAxxx. */
12846 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12847 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
12848 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12849 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
12850 /* sel = .......B */
12851 /* t_1 = ...A.... */
12852 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12853 /* sel = ...A...B */
12854 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
12855 /* sel = AAAABBBB * 4 */
12856 t_1 = force_reg (SImode, GEN_INT (0x01230123));
12857 /* sel = { A*4, A*4+1, A*4+2, ... } */
12858 break;
12859
12860 case E_V4HImode:
12861 /* inp = xxxAxxxBxxxCxxxD */
12862 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12863 NULL_RTX, 1, OPTAB_DIRECT);
12864 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12865 NULL_RTX, 1, OPTAB_DIRECT);
12866 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
12867 NULL_RTX, 1, OPTAB_DIRECT);
12868 /* t_1 = ..xxxAxxxBxxxCxx */
12869 /* t_2 = ....xxxAxxxBxxxC */
12870 /* t_3 = ......xxxAxxxBxx */
12871 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12872 GEN_INT (0x07),
12873 NULL_RTX, 1, OPTAB_DIRECT);
12874 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12875 GEN_INT (0x0700),
12876 NULL_RTX, 1, OPTAB_DIRECT);
12877 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
12878 GEN_INT (0x070000),
12879 NULL_RTX, 1, OPTAB_DIRECT);
12880 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
12881 GEN_INT (0x07000000),
12882 NULL_RTX, 1, OPTAB_DIRECT);
12883 /* sel = .......D */
12884 /* t_1 = .....C.. */
12885 /* t_2 = ...B.... */
12886 /* t_3 = .A...... */
12887 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12888 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
12889 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
12890 /* sel = .A.B.C.D */
12891 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
12892 /* sel = AABBCCDD * 2 */
12893 t_1 = force_reg (SImode, GEN_INT (0x01010101));
12894 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
12895 break;
12896
12897 case E_V8QImode:
12898 /* input = xAxBxCxDxExFxGxH */
12899 sel = expand_simple_binop (DImode, AND, sel,
12900 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
12901 | 0x0f0f0f0f),
12902 NULL_RTX, 1, OPTAB_DIRECT);
12903 /* sel = .A.B.C.D.E.F.G.H */
12904 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
12905 NULL_RTX, 1, OPTAB_DIRECT);
12906 /* t_1 = ..A.B.C.D.E.F.G. */
12907 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12908 NULL_RTX, 1, OPTAB_DIRECT);
12909 /* sel = .AABBCCDDEEFFGGH */
12910 sel = expand_simple_binop (DImode, AND, sel,
12911 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
12912 | 0xff00ff),
12913 NULL_RTX, 1, OPTAB_DIRECT);
12914 /* sel = ..AB..CD..EF..GH */
12915 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12916 NULL_RTX, 1, OPTAB_DIRECT);
12917 /* t_1 = ....AB..CD..EF.. */
12918 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12919 NULL_RTX, 1, OPTAB_DIRECT);
12920 /* sel = ..ABABCDCDEFEFGH */
12921 sel = expand_simple_binop (DImode, AND, sel,
12922 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
12923 NULL_RTX, 1, OPTAB_DIRECT);
12924 /* sel = ....ABCD....EFGH */
12925 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12926 NULL_RTX, 1, OPTAB_DIRECT);
12927 /* t_1 = ........ABCD.... */
12928 sel = gen_lowpart (SImode, sel);
12929 t_1 = gen_lowpart (SImode, t_1);
12930 break;
12931
12932 default:
12933 gcc_unreachable ();
12934 }
12935
12936 /* Always perform the final addition/merge within the bmask insn. */
12937 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
12938 }
12939
12940 /* Implement TARGET_VEC_PERM_CONST. */
12941
12942 static bool
12943 sparc_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
12944 rtx op1, const vec_perm_indices &sel)
12945 {
12946 if (!TARGET_VIS2)
12947 return false;
12948
12949 /* All permutes are supported. */
12950 if (!target)
12951 return true;
12952
12953 /* Force target-independent code to convert constant permutations on other
12954 modes down to V8QI. Rely on this to avoid the complexity of the byte
12955 order of the permutation. */
12956 if (vmode != V8QImode)
12957 return false;
12958
12959 unsigned int i, mask;
12960 for (i = mask = 0; i < 8; ++i)
12961 mask |= (sel[i] & 0xf) << (28 - i*4);
12962 rtx mask_rtx = force_reg (SImode, gen_int_mode (mask, SImode));
12963
12964 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), mask_rtx, const0_rtx));
12965 emit_insn (gen_bshufflev8qi_vis (target, op0, op1));
12966 return true;
12967 }
12968
12969 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
12970
12971 static bool
12972 sparc_frame_pointer_required (void)
12973 {
12974 /* If the stack pointer is dynamically modified in the function, it cannot
12975 serve as the frame pointer. */
12976 if (cfun->calls_alloca)
12977 return true;
12978
12979 /* If the function receives nonlocal gotos, it needs to save the frame
12980 pointer in the nonlocal_goto_save_area object. */
12981 if (cfun->has_nonlocal_label)
12982 return true;
12983
12984 /* In flat mode, that's it. */
12985 if (TARGET_FLAT)
12986 return false;
12987
12988 /* Otherwise, the frame pointer is required if the function isn't leaf, but
12989 we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
12990 return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
12991 }
12992
12993 /* The way this is structured, we can't eliminate SFP in favor of SP
12994 if the frame pointer is required: we want to use the SFP->HFP elimination
12995 in that case. But the test in update_eliminables doesn't know we are
12996 assuming below that we only do the former elimination. */
12997
12998 static bool
12999 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
13000 {
13001 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
13002 }
13003
13004 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
13005 they won't be allocated. */
13006
13007 static void
13008 sparc_conditional_register_usage (void)
13009 {
13010 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
13011 {
13012 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13013 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13014 }
13015 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
13016 /* then honor it. */
13017 if (TARGET_ARCH32 && fixed_regs[5])
13018 fixed_regs[5] = 1;
13019 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
13020 fixed_regs[5] = 0;
13021 if (! TARGET_V9)
13022 {
13023 int regno;
13024 for (regno = SPARC_FIRST_V9_FP_REG;
13025 regno <= SPARC_LAST_V9_FP_REG;
13026 regno++)
13027 fixed_regs[regno] = 1;
13028 /* %fcc0 is used by v8 and v9. */
13029 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
13030 regno <= SPARC_LAST_V9_FCC_REG;
13031 regno++)
13032 fixed_regs[regno] = 1;
13033 }
13034 if (! TARGET_FPU)
13035 {
13036 int regno;
13037 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
13038 fixed_regs[regno] = 1;
13039 }
13040 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
13041 /* then honor it. Likewise with g3 and g4. */
13042 if (fixed_regs[2] == 2)
13043 fixed_regs[2] = ! TARGET_APP_REGS;
13044 if (fixed_regs[3] == 2)
13045 fixed_regs[3] = ! TARGET_APP_REGS;
13046 if (TARGET_ARCH32 && fixed_regs[4] == 2)
13047 fixed_regs[4] = ! TARGET_APP_REGS;
13048 else if (TARGET_CM_EMBMEDANY)
13049 fixed_regs[4] = 1;
13050 else if (fixed_regs[4] == 2)
13051 fixed_regs[4] = 0;
13052 if (TARGET_FLAT)
13053 {
13054 int regno;
13055 /* Disable leaf functions. */
13056 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
13057 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
13058 leaf_reg_remap [regno] = regno;
13059 }
13060 if (TARGET_VIS)
13061 global_regs[SPARC_GSR_REG] = 1;
13062 }
13063
13064 /* Implement TARGET_USE_PSEUDO_PIC_REG. */
13065
13066 static bool
13067 sparc_use_pseudo_pic_reg (void)
13068 {
13069 return !TARGET_VXWORKS_RTP && flag_pic;
13070 }
13071
13072 /* Implement TARGET_INIT_PIC_REG. */
13073
13074 static void
13075 sparc_init_pic_reg (void)
13076 {
13077 edge entry_edge;
13078 rtx_insn *seq;
13079
13080 if (!crtl->uses_pic_offset_table)
13081 return;
13082
13083 start_sequence ();
13084 load_got_register ();
13085 if (!TARGET_VXWORKS_RTP)
13086 emit_move_insn (pic_offset_table_rtx, got_register_rtx);
13087 seq = get_insns ();
13088 end_sequence ();
13089
13090 entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13091 insert_insn_on_edge (seq, entry_edge);
13092 commit_one_edge_insertion (entry_edge);
13093 }
13094
13095 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
13096
13097 - We can't load constants into FP registers.
13098 - We can't load FP constants into integer registers when soft-float,
13099 because there is no soft-float pattern with a r/F constraint.
13100 - We can't load FP constants into integer registers for TFmode unless
13101 it is 0.0L, because there is no movtf pattern with a r/F constraint.
13102 - Try and reload integer constants (symbolic or otherwise) back into
13103 registers directly, rather than having them dumped to memory. */
13104
13105 static reg_class_t
13106 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
13107 {
13108 machine_mode mode = GET_MODE (x);
13109 if (CONSTANT_P (x))
13110 {
13111 if (FP_REG_CLASS_P (rclass)
13112 || rclass == GENERAL_OR_FP_REGS
13113 || rclass == GENERAL_OR_EXTRA_FP_REGS
13114 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
13115 || (mode == TFmode && ! const_zero_operand (x, mode)))
13116 return NO_REGS;
13117
13118 if (GET_MODE_CLASS (mode) == MODE_INT)
13119 return GENERAL_REGS;
13120
13121 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13122 {
13123 if (! FP_REG_CLASS_P (rclass)
13124 || !(const_zero_operand (x, mode)
13125 || const_all_ones_operand (x, mode)))
13126 return NO_REGS;
13127 }
13128 }
13129
13130 if (TARGET_VIS3
13131 && ! TARGET_ARCH64
13132 && (rclass == EXTRA_FP_REGS
13133 || rclass == GENERAL_OR_EXTRA_FP_REGS))
13134 {
13135 int regno = true_regnum (x);
13136
13137 if (SPARC_INT_REG_P (regno))
13138 return (rclass == EXTRA_FP_REGS
13139 ? FP_REGS : GENERAL_OR_FP_REGS);
13140 }
13141
13142 return rclass;
13143 }
13144
13145 /* Return true if we use LRA instead of reload pass. */
13146
13147 static bool
13148 sparc_lra_p (void)
13149 {
13150 return TARGET_LRA;
13151 }
13152
13153 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
13154 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
13155
13156 const char *
13157 output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
13158 {
13159 char mulstr[32];
13160
13161 gcc_assert (! TARGET_ARCH64);
13162
13163 if (sparc_check_64 (operands[1], insn) <= 0)
13164 output_asm_insn ("srl\t%L1, 0, %L1", operands);
13165 if (which_alternative == 1)
13166 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
13167 if (GET_CODE (operands[2]) == CONST_INT)
13168 {
13169 if (which_alternative == 1)
13170 {
13171 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13172 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
13173 output_asm_insn (mulstr, operands);
13174 return "srlx\t%L0, 32, %H0";
13175 }
13176 else
13177 {
13178 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13179 output_asm_insn ("or\t%L1, %3, %3", operands);
13180 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
13181 output_asm_insn (mulstr, operands);
13182 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13183 return "mov\t%3, %L0";
13184 }
13185 }
13186 else if (rtx_equal_p (operands[1], operands[2]))
13187 {
13188 if (which_alternative == 1)
13189 {
13190 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13191 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
13192 output_asm_insn (mulstr, operands);
13193 return "srlx\t%L0, 32, %H0";
13194 }
13195 else
13196 {
13197 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13198 output_asm_insn ("or\t%L1, %3, %3", operands);
13199 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
13200 output_asm_insn (mulstr, operands);
13201 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13202 return "mov\t%3, %L0";
13203 }
13204 }
13205 if (sparc_check_64 (operands[2], insn) <= 0)
13206 output_asm_insn ("srl\t%L2, 0, %L2", operands);
13207 if (which_alternative == 1)
13208 {
13209 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13210 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
13211 output_asm_insn ("or\t%L2, %L1, %L1", operands);
13212 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
13213 output_asm_insn (mulstr, operands);
13214 return "srlx\t%L0, 32, %H0";
13215 }
13216 else
13217 {
13218 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13219 output_asm_insn ("sllx\t%H2, 32, %4", operands);
13220 output_asm_insn ("or\t%L1, %3, %3", operands);
13221 output_asm_insn ("or\t%L2, %4, %4", operands);
13222 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
13223 output_asm_insn (mulstr, operands);
13224 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13225 return "mov\t%3, %L0";
13226 }
13227 }
13228
13229 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13230 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
13231 and INNER_MODE are the modes describing TARGET. */
13232
13233 static void
13234 vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
13235 machine_mode inner_mode)
13236 {
13237 rtx t1, final_insn, sel;
13238 int bmask;
13239
13240 t1 = gen_reg_rtx (mode);
13241
13242 elt = convert_modes (SImode, inner_mode, elt, true);
13243 emit_move_insn (gen_lowpart(SImode, t1), elt);
13244
13245 switch (mode)
13246 {
13247 case E_V2SImode:
13248 final_insn = gen_bshufflev2si_vis (target, t1, t1);
13249 bmask = 0x45674567;
13250 break;
13251 case E_V4HImode:
13252 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
13253 bmask = 0x67676767;
13254 break;
13255 case E_V8QImode:
13256 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
13257 bmask = 0x77777777;
13258 break;
13259 default:
13260 gcc_unreachable ();
13261 }
13262
13263 sel = force_reg (SImode, GEN_INT (bmask));
13264 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
13265 emit_insn (final_insn);
13266 }
13267
13268 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13269 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
13270
13271 static void
13272 vector_init_fpmerge (rtx target, rtx elt)
13273 {
13274 rtx t1, t2, t2_low, t3, t3_low;
13275
13276 t1 = gen_reg_rtx (V4QImode);
13277 elt = convert_modes (SImode, QImode, elt, true);
13278 emit_move_insn (gen_lowpart (SImode, t1), elt);
13279
13280 t2 = gen_reg_rtx (V8QImode);
13281 t2_low = gen_lowpart (V4QImode, t2);
13282 emit_insn (gen_fpmerge_vis (t2, t1, t1));
13283
13284 t3 = gen_reg_rtx (V8QImode);
13285 t3_low = gen_lowpart (V4QImode, t3);
13286 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
13287
13288 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
13289 }
13290
13291 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13292 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
13293
13294 static void
13295 vector_init_faligndata (rtx target, rtx elt)
13296 {
13297 rtx t1 = gen_reg_rtx (V4HImode);
13298 int i;
13299
13300 elt = convert_modes (SImode, HImode, elt, true);
13301 emit_move_insn (gen_lowpart (SImode, t1), elt);
13302
13303 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
13304 force_reg (SImode, GEN_INT (6)),
13305 const0_rtx));
13306
13307 for (i = 0; i < 4; i++)
13308 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
13309 }
13310
13311 /* Emit code to initialize TARGET to values for individual fields VALS. */
13312
13313 void
13314 sparc_expand_vector_init (rtx target, rtx vals)
13315 {
13316 const machine_mode mode = GET_MODE (target);
13317 const machine_mode inner_mode = GET_MODE_INNER (mode);
13318 const int n_elts = GET_MODE_NUNITS (mode);
13319 int i, n_var = 0;
13320 bool all_same = true;
13321 rtx mem;
13322
13323 for (i = 0; i < n_elts; i++)
13324 {
13325 rtx x = XVECEXP (vals, 0, i);
13326 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
13327 n_var++;
13328
13329 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
13330 all_same = false;
13331 }
13332
13333 if (n_var == 0)
13334 {
13335 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
13336 return;
13337 }
13338
13339 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
13340 {
13341 if (GET_MODE_SIZE (inner_mode) == 4)
13342 {
13343 emit_move_insn (gen_lowpart (SImode, target),
13344 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
13345 return;
13346 }
13347 else if (GET_MODE_SIZE (inner_mode) == 8)
13348 {
13349 emit_move_insn (gen_lowpart (DImode, target),
13350 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
13351 return;
13352 }
13353 }
13354 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
13355 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
13356 {
13357 emit_move_insn (gen_highpart (word_mode, target),
13358 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
13359 emit_move_insn (gen_lowpart (word_mode, target),
13360 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
13361 return;
13362 }
13363
13364 if (all_same && GET_MODE_SIZE (mode) == 8)
13365 {
13366 if (TARGET_VIS2)
13367 {
13368 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
13369 return;
13370 }
13371 if (mode == V8QImode)
13372 {
13373 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
13374 return;
13375 }
13376 if (mode == V4HImode)
13377 {
13378 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
13379 return;
13380 }
13381 }
13382
13383 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
13384 for (i = 0; i < n_elts; i++)
13385 emit_move_insn (adjust_address_nv (mem, inner_mode,
13386 i * GET_MODE_SIZE (inner_mode)),
13387 XVECEXP (vals, 0, i));
13388 emit_move_insn (target, mem);
13389 }
13390
13391 /* Implement TARGET_SECONDARY_RELOAD. */
13392
13393 static reg_class_t
13394 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
13395 machine_mode mode, secondary_reload_info *sri)
13396 {
13397 enum reg_class rclass = (enum reg_class) rclass_i;
13398
13399 sri->icode = CODE_FOR_nothing;
13400 sri->extra_cost = 0;
13401
13402 /* We need a temporary when loading/storing a HImode/QImode value
13403 between memory and the FPU registers. This can happen when combine puts
13404 a paradoxical subreg in a float/fix conversion insn. */
13405 if (FP_REG_CLASS_P (rclass)
13406 && (mode == HImode || mode == QImode)
13407 && (GET_CODE (x) == MEM
13408 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
13409 && true_regnum (x) == -1)))
13410 return GENERAL_REGS;
13411
13412 /* On 32-bit we need a temporary when loading/storing a DFmode value
13413 between unaligned memory and the upper FPU registers. */
13414 if (TARGET_ARCH32
13415 && rclass == EXTRA_FP_REGS
13416 && mode == DFmode
13417 && GET_CODE (x) == MEM
13418 && ! mem_min_alignment (x, 8))
13419 return FP_REGS;
13420
13421 if (((TARGET_CM_MEDANY
13422 && symbolic_operand (x, mode))
13423 || (TARGET_CM_EMBMEDANY
13424 && text_segment_operand (x, mode)))
13425 && ! flag_pic)
13426 {
13427 if (in_p)
13428 sri->icode = direct_optab_handler (reload_in_optab, mode);
13429 else
13430 sri->icode = direct_optab_handler (reload_out_optab, mode);
13431 return NO_REGS;
13432 }
13433
13434 if (TARGET_VIS3 && TARGET_ARCH32)
13435 {
13436 int regno = true_regnum (x);
13437
13438 /* When using VIS3 fp<-->int register moves, on 32-bit we have
13439 to move 8-byte values in 4-byte pieces. This only works via
13440 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
13441 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
13442 an FP_REGS intermediate move. */
13443 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
13444 || ((general_or_i64_p (rclass)
13445 || rclass == GENERAL_OR_FP_REGS)
13446 && SPARC_FP_REG_P (regno)))
13447 {
13448 sri->extra_cost = 2;
13449 return FP_REGS;
13450 }
13451 }
13452
13453 return NO_REGS;
13454 }
13455
13456 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
13457
13458 On SPARC when not VIS3 it is not possible to directly move data
13459 between GENERAL_REGS and FP_REGS. */
13460
13461 static bool
13462 sparc_secondary_memory_needed (machine_mode mode, reg_class_t class1,
13463 reg_class_t class2)
13464 {
13465 return ((FP_REG_CLASS_P (class1) != FP_REG_CLASS_P (class2))
13466 && (! TARGET_VIS3
13467 || GET_MODE_SIZE (mode) > 8
13468 || GET_MODE_SIZE (mode) < 4));
13469 }
13470
13471 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
13472
13473 get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
13474 because the movsi and movsf patterns don't handle r/f moves.
13475 For v8 we copy the default definition. */
13476
13477 static machine_mode
13478 sparc_secondary_memory_needed_mode (machine_mode mode)
13479 {
13480 if (TARGET_ARCH64)
13481 {
13482 if (GET_MODE_BITSIZE (mode) < 32)
13483 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
13484 return mode;
13485 }
13486 else
13487 {
13488 if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
13489 return mode_for_size (BITS_PER_WORD,
13490 GET_MODE_CLASS (mode), 0).require ();
13491 return mode;
13492 }
13493 }
13494
13495 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
13496 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
13497
13498 bool
13499 sparc_expand_conditional_move (machine_mode mode, rtx *operands)
13500 {
13501 enum rtx_code rc = GET_CODE (operands[1]);
13502 machine_mode cmp_mode;
13503 rtx cc_reg, dst, cmp;
13504
13505 cmp = operands[1];
13506 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
13507 return false;
13508
13509 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
13510 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
13511
13512 cmp_mode = GET_MODE (XEXP (cmp, 0));
13513 rc = GET_CODE (cmp);
13514
13515 dst = operands[0];
13516 if (! rtx_equal_p (operands[2], dst)
13517 && ! rtx_equal_p (operands[3], dst))
13518 {
13519 if (reg_overlap_mentioned_p (dst, cmp))
13520 dst = gen_reg_rtx (mode);
13521
13522 emit_move_insn (dst, operands[3]);
13523 }
13524 else if (operands[2] == dst)
13525 {
13526 operands[2] = operands[3];
13527
13528 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
13529 rc = reverse_condition_maybe_unordered (rc);
13530 else
13531 rc = reverse_condition (rc);
13532 }
13533
13534 if (XEXP (cmp, 1) == const0_rtx
13535 && GET_CODE (XEXP (cmp, 0)) == REG
13536 && cmp_mode == DImode
13537 && v9_regcmp_p (rc))
13538 cc_reg = XEXP (cmp, 0);
13539 else
13540 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
13541
13542 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
13543
13544 emit_insn (gen_rtx_SET (dst,
13545 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
13546
13547 if (dst != operands[0])
13548 emit_move_insn (operands[0], dst);
13549
13550 return true;
13551 }
13552
13553 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
13554 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
13555 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
13556 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
13557 code to be used for the condition mask. */
13558
13559 void
13560 sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
13561 {
13562 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
13563 enum rtx_code code = GET_CODE (operands[3]);
13564
13565 mask = gen_reg_rtx (Pmode);
13566 cop0 = operands[4];
13567 cop1 = operands[5];
13568 if (code == LT || code == GE)
13569 {
13570 rtx t;
13571
13572 code = swap_condition (code);
13573 t = cop0; cop0 = cop1; cop1 = t;
13574 }
13575
13576 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
13577
13578 fcmp = gen_rtx_UNSPEC (Pmode,
13579 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
13580 fcode);
13581
13582 cmask = gen_rtx_UNSPEC (DImode,
13583 gen_rtvec (2, mask, gsr),
13584 ccode);
13585
13586 bshuf = gen_rtx_UNSPEC (mode,
13587 gen_rtvec (3, operands[1], operands[2], gsr),
13588 UNSPEC_BSHUFFLE);
13589
13590 emit_insn (gen_rtx_SET (mask, fcmp));
13591 emit_insn (gen_rtx_SET (gsr, cmask));
13592
13593 emit_insn (gen_rtx_SET (operands[0], bshuf));
13594 }
13595
13596 /* On sparc, any mode which naturally allocates into the float
13597 registers should return 4 here. */
13598
13599 unsigned int
13600 sparc_regmode_natural_size (machine_mode mode)
13601 {
13602 int size = UNITS_PER_WORD;
13603
13604 if (TARGET_ARCH64)
13605 {
13606 enum mode_class mclass = GET_MODE_CLASS (mode);
13607
13608 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
13609 size = 4;
13610 }
13611
13612 return size;
13613 }
13614
13615 /* Implement TARGET_HARD_REGNO_NREGS.
13616
13617 On SPARC, ordinary registers hold 32 bits worth; this means both
13618 integer and floating point registers. On v9, integer regs hold 64
13619 bits worth; floating point regs hold 32 bits worth (this includes the
13620 new fp regs as even the odd ones are included in the hard register
13621 count). */
13622
13623 static unsigned int
13624 sparc_hard_regno_nregs (unsigned int regno, machine_mode mode)
13625 {
13626 if (regno == SPARC_GSR_REG)
13627 return 1;
13628 if (TARGET_ARCH64)
13629 {
13630 if (SPARC_INT_REG_P (regno) || regno == FRAME_POINTER_REGNUM)
13631 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13632 return CEIL (GET_MODE_SIZE (mode), 4);
13633 }
13634 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13635 }
13636
13637 /* Implement TARGET_HARD_REGNO_MODE_OK.
13638
13639 ??? Because of the funny way we pass parameters we should allow certain
13640 ??? types of float/complex values to be in integer registers during
13641 ??? RTL generation. This only matters on arch32. */
13642
13643 static bool
13644 sparc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
13645 {
13646 return (hard_regno_mode_classes[regno] & sparc_mode_class[mode]) != 0;
13647 }
13648
13649 /* Implement TARGET_MODES_TIEABLE_P.
13650
13651 For V9 we have to deal with the fact that only the lower 32 floating
13652 point registers are 32-bit addressable. */
13653
13654 static bool
13655 sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13656 {
13657 enum mode_class mclass1, mclass2;
13658 unsigned short size1, size2;
13659
13660 if (mode1 == mode2)
13661 return true;
13662
13663 mclass1 = GET_MODE_CLASS (mode1);
13664 mclass2 = GET_MODE_CLASS (mode2);
13665 if (mclass1 != mclass2)
13666 return false;
13667
13668 if (! TARGET_V9)
13669 return true;
13670
13671 /* Classes are the same and we are V9 so we have to deal with upper
13672 vs. lower floating point registers. If one of the modes is a
13673 4-byte mode, and the other is not, we have to mark them as not
13674 tieable because only the lower 32 floating point register are
13675 addressable 32-bits at a time.
13676
13677 We can't just test explicitly for SFmode, otherwise we won't
13678 cover the vector mode cases properly. */
13679
13680 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
13681 return true;
13682
13683 size1 = GET_MODE_SIZE (mode1);
13684 size2 = GET_MODE_SIZE (mode2);
13685 if ((size1 > 4 && size2 == 4)
13686 || (size2 > 4 && size1 == 4))
13687 return false;
13688
13689 return true;
13690 }
13691
13692 /* Implement TARGET_CSTORE_MODE. */
13693
13694 static scalar_int_mode
13695 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
13696 {
13697 return (TARGET_ARCH64 ? DImode : SImode);
13698 }
13699
13700 /* Return the compound expression made of T1 and T2. */
13701
13702 static inline tree
13703 compound_expr (tree t1, tree t2)
13704 {
13705 return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
13706 }
13707
13708 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13709
13710 static void
13711 sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13712 {
13713 if (!TARGET_FPU)
13714 return;
13715
13716 const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
13717 const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
13718
13719 /* We generate the equivalent of feholdexcept (&fenv_var):
13720
13721 unsigned int fenv_var;
13722 __builtin_store_fsr (&fenv_var);
13723
13724 unsigned int tmp1_var;
13725 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
13726
13727 __builtin_load_fsr (&tmp1_var); */
13728
13729 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
13730 TREE_ADDRESSABLE (fenv_var) = 1;
13731 tree fenv_addr = build_fold_addr_expr (fenv_var);
13732 tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
13733 tree hold_stfsr
13734 = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
13735 build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
13736
13737 tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
13738 TREE_ADDRESSABLE (tmp1_var) = 1;
13739 tree masked_fenv_var
13740 = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13741 build_int_cst (unsigned_type_node,
13742 ~(accrued_exception_mask | trap_enable_mask)));
13743 tree hold_mask
13744 = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
13745 NULL_TREE, NULL_TREE);
13746
13747 tree tmp1_addr = build_fold_addr_expr (tmp1_var);
13748 tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
13749 tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
13750
13751 *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
13752
13753 /* We reload the value of tmp1_var to clear the exceptions:
13754
13755 __builtin_load_fsr (&tmp1_var); */
13756
13757 *clear = build_call_expr (ldfsr, 1, tmp1_addr);
13758
13759 /* We generate the equivalent of feupdateenv (&fenv_var):
13760
13761 unsigned int tmp2_var;
13762 __builtin_store_fsr (&tmp2_var);
13763
13764 __builtin_load_fsr (&fenv_var);
13765
13766 if (SPARC_LOW_FE_EXCEPT_VALUES)
13767 tmp2_var >>= 5;
13768 __atomic_feraiseexcept ((int) tmp2_var); */
13769
13770 tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
13771 TREE_ADDRESSABLE (tmp2_var) = 1;
13772 tree tmp2_addr = build_fold_addr_expr (tmp2_var);
13773 tree update_stfsr
13774 = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
13775 build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
13776
13777 tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
13778
13779 tree atomic_feraiseexcept
13780 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13781 tree update_call
13782 = build_call_expr (atomic_feraiseexcept, 1,
13783 fold_convert (integer_type_node, tmp2_var));
13784
13785 if (SPARC_LOW_FE_EXCEPT_VALUES)
13786 {
13787 tree shifted_tmp2_var
13788 = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
13789 build_int_cst (unsigned_type_node, 5));
13790 tree update_shift
13791 = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
13792 update_call = compound_expr (update_shift, update_call);
13793 }
13794
13795 *update
13796 = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
13797 }
13798
13799 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. Borrowed from the PA port.
13800
13801 SImode loads to floating-point registers are not zero-extended.
13802 The definition for LOAD_EXTEND_OP specifies that integer loads
13803 narrower than BITS_PER_WORD will be zero-extended. As a result,
13804 we inhibit changes from SImode unless they are to a mode that is
13805 identical in size.
13806
13807 Likewise for SFmode, since word-mode paradoxical subregs are
13808 problematic on big-endian architectures. */
13809
13810 static bool
13811 sparc_can_change_mode_class (machine_mode from, machine_mode to,
13812 reg_class_t rclass)
13813 {
13814 if (TARGET_ARCH64
13815 && GET_MODE_SIZE (from) == 4
13816 && GET_MODE_SIZE (to) != 4)
13817 return !reg_classes_intersect_p (rclass, FP_REGS);
13818 return true;
13819 }
13820
13821 /* Implement TARGET_CONSTANT_ALIGNMENT. */
13822
13823 static HOST_WIDE_INT
13824 sparc_constant_alignment (const_tree exp, HOST_WIDE_INT align)
13825 {
13826 if (TREE_CODE (exp) == STRING_CST)
13827 return MAX (align, FASTEST_ALIGNMENT);
13828 return align;
13829 }
13830
13831 #include "gt-sparc.h"