]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sparc/sparc.c
Support SHF_EXCLUDE on non-x86 and with Solaris as
[thirdparty/gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #define IN_TARGET_CODE 1
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "memmodel.h"
33 #include "gimple.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "varasm.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "explow.h"
52 #include "expr.h"
53 #include "debug.h"
54 #include "cfgrtl.h"
55 #include "common/common-target.h"
56 #include "gimplify.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "params.h"
60 #include "tree-pass.h"
61 #include "context.h"
62 #include "builtins.h"
63 #include "tree-vector-builder.h"
64
65 /* This file should be included last. */
66 #include "target-def.h"
67
68 /* Processor costs */
69
70 struct processor_costs {
71 /* Integer load */
72 const int int_load;
73
74 /* Integer signed load */
75 const int int_sload;
76
77 /* Integer zeroed load */
78 const int int_zload;
79
80 /* Float load */
81 const int float_load;
82
83 /* fmov, fneg, fabs */
84 const int float_move;
85
86 /* fadd, fsub */
87 const int float_plusminus;
88
89 /* fcmp */
90 const int float_cmp;
91
92 /* fmov, fmovr */
93 const int float_cmove;
94
95 /* fmul */
96 const int float_mul;
97
98 /* fdivs */
99 const int float_div_sf;
100
101 /* fdivd */
102 const int float_div_df;
103
104 /* fsqrts */
105 const int float_sqrt_sf;
106
107 /* fsqrtd */
108 const int float_sqrt_df;
109
110 /* umul/smul */
111 const int int_mul;
112
113 /* mulX */
114 const int int_mulX;
115
116 /* integer multiply cost for each bit set past the most
117 significant 3, so the formula for multiply cost becomes:
118
119 if (rs1 < 0)
120 highest_bit = highest_clear_bit(rs1);
121 else
122 highest_bit = highest_set_bit(rs1);
123 if (highest_bit < 3)
124 highest_bit = 3;
125 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
126
127 A value of zero indicates that the multiply costs is fixed,
128 and not variable. */
129 const int int_mul_bit_factor;
130
131 /* udiv/sdiv */
132 const int int_div;
133
134 /* divX */
135 const int int_divX;
136
137 /* movcc, movr */
138 const int int_cmove;
139
140 /* penalty for shifts, due to scheduling rules etc. */
141 const int shift_penalty;
142 };
143
144 static const
145 struct processor_costs cypress_costs = {
146 COSTS_N_INSNS (2), /* int load */
147 COSTS_N_INSNS (2), /* int signed load */
148 COSTS_N_INSNS (2), /* int zeroed load */
149 COSTS_N_INSNS (2), /* float load */
150 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
151 COSTS_N_INSNS (5), /* fadd, fsub */
152 COSTS_N_INSNS (1), /* fcmp */
153 COSTS_N_INSNS (1), /* fmov, fmovr */
154 COSTS_N_INSNS (7), /* fmul */
155 COSTS_N_INSNS (37), /* fdivs */
156 COSTS_N_INSNS (37), /* fdivd */
157 COSTS_N_INSNS (63), /* fsqrts */
158 COSTS_N_INSNS (63), /* fsqrtd */
159 COSTS_N_INSNS (1), /* imul */
160 COSTS_N_INSNS (1), /* imulX */
161 0, /* imul bit factor */
162 COSTS_N_INSNS (1), /* idiv */
163 COSTS_N_INSNS (1), /* idivX */
164 COSTS_N_INSNS (1), /* movcc/movr */
165 0, /* shift penalty */
166 };
167
168 static const
169 struct processor_costs supersparc_costs = {
170 COSTS_N_INSNS (1), /* int load */
171 COSTS_N_INSNS (1), /* int signed load */
172 COSTS_N_INSNS (1), /* int zeroed load */
173 COSTS_N_INSNS (0), /* float load */
174 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
175 COSTS_N_INSNS (3), /* fadd, fsub */
176 COSTS_N_INSNS (3), /* fcmp */
177 COSTS_N_INSNS (1), /* fmov, fmovr */
178 COSTS_N_INSNS (3), /* fmul */
179 COSTS_N_INSNS (6), /* fdivs */
180 COSTS_N_INSNS (9), /* fdivd */
181 COSTS_N_INSNS (12), /* fsqrts */
182 COSTS_N_INSNS (12), /* fsqrtd */
183 COSTS_N_INSNS (4), /* imul */
184 COSTS_N_INSNS (4), /* imulX */
185 0, /* imul bit factor */
186 COSTS_N_INSNS (4), /* idiv */
187 COSTS_N_INSNS (4), /* idivX */
188 COSTS_N_INSNS (1), /* movcc/movr */
189 1, /* shift penalty */
190 };
191
192 static const
193 struct processor_costs hypersparc_costs = {
194 COSTS_N_INSNS (1), /* int load */
195 COSTS_N_INSNS (1), /* int signed load */
196 COSTS_N_INSNS (1), /* int zeroed load */
197 COSTS_N_INSNS (1), /* float load */
198 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
199 COSTS_N_INSNS (1), /* fadd, fsub */
200 COSTS_N_INSNS (1), /* fcmp */
201 COSTS_N_INSNS (1), /* fmov, fmovr */
202 COSTS_N_INSNS (1), /* fmul */
203 COSTS_N_INSNS (8), /* fdivs */
204 COSTS_N_INSNS (12), /* fdivd */
205 COSTS_N_INSNS (17), /* fsqrts */
206 COSTS_N_INSNS (17), /* fsqrtd */
207 COSTS_N_INSNS (17), /* imul */
208 COSTS_N_INSNS (17), /* imulX */
209 0, /* imul bit factor */
210 COSTS_N_INSNS (17), /* idiv */
211 COSTS_N_INSNS (17), /* idivX */
212 COSTS_N_INSNS (1), /* movcc/movr */
213 0, /* shift penalty */
214 };
215
216 static const
217 struct processor_costs leon_costs = {
218 COSTS_N_INSNS (1), /* int load */
219 COSTS_N_INSNS (1), /* int signed load */
220 COSTS_N_INSNS (1), /* int zeroed load */
221 COSTS_N_INSNS (1), /* float load */
222 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
223 COSTS_N_INSNS (1), /* fadd, fsub */
224 COSTS_N_INSNS (1), /* fcmp */
225 COSTS_N_INSNS (1), /* fmov, fmovr */
226 COSTS_N_INSNS (1), /* fmul */
227 COSTS_N_INSNS (15), /* fdivs */
228 COSTS_N_INSNS (15), /* fdivd */
229 COSTS_N_INSNS (23), /* fsqrts */
230 COSTS_N_INSNS (23), /* fsqrtd */
231 COSTS_N_INSNS (5), /* imul */
232 COSTS_N_INSNS (5), /* imulX */
233 0, /* imul bit factor */
234 COSTS_N_INSNS (5), /* idiv */
235 COSTS_N_INSNS (5), /* idivX */
236 COSTS_N_INSNS (1), /* movcc/movr */
237 0, /* shift penalty */
238 };
239
240 static const
241 struct processor_costs leon3_costs = {
242 COSTS_N_INSNS (1), /* int load */
243 COSTS_N_INSNS (1), /* int signed load */
244 COSTS_N_INSNS (1), /* int zeroed load */
245 COSTS_N_INSNS (1), /* float load */
246 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
247 COSTS_N_INSNS (1), /* fadd, fsub */
248 COSTS_N_INSNS (1), /* fcmp */
249 COSTS_N_INSNS (1), /* fmov, fmovr */
250 COSTS_N_INSNS (1), /* fmul */
251 COSTS_N_INSNS (14), /* fdivs */
252 COSTS_N_INSNS (15), /* fdivd */
253 COSTS_N_INSNS (22), /* fsqrts */
254 COSTS_N_INSNS (23), /* fsqrtd */
255 COSTS_N_INSNS (5), /* imul */
256 COSTS_N_INSNS (5), /* imulX */
257 0, /* imul bit factor */
258 COSTS_N_INSNS (35), /* idiv */
259 COSTS_N_INSNS (35), /* idivX */
260 COSTS_N_INSNS (1), /* movcc/movr */
261 0, /* shift penalty */
262 };
263
264 static const
265 struct processor_costs sparclet_costs = {
266 COSTS_N_INSNS (3), /* int load */
267 COSTS_N_INSNS (3), /* int signed load */
268 COSTS_N_INSNS (1), /* int zeroed load */
269 COSTS_N_INSNS (1), /* float load */
270 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
271 COSTS_N_INSNS (1), /* fadd, fsub */
272 COSTS_N_INSNS (1), /* fcmp */
273 COSTS_N_INSNS (1), /* fmov, fmovr */
274 COSTS_N_INSNS (1), /* fmul */
275 COSTS_N_INSNS (1), /* fdivs */
276 COSTS_N_INSNS (1), /* fdivd */
277 COSTS_N_INSNS (1), /* fsqrts */
278 COSTS_N_INSNS (1), /* fsqrtd */
279 COSTS_N_INSNS (5), /* imul */
280 COSTS_N_INSNS (5), /* imulX */
281 0, /* imul bit factor */
282 COSTS_N_INSNS (5), /* idiv */
283 COSTS_N_INSNS (5), /* idivX */
284 COSTS_N_INSNS (1), /* movcc/movr */
285 0, /* shift penalty */
286 };
287
288 static const
289 struct processor_costs ultrasparc_costs = {
290 COSTS_N_INSNS (2), /* int load */
291 COSTS_N_INSNS (3), /* int signed load */
292 COSTS_N_INSNS (2), /* int zeroed load */
293 COSTS_N_INSNS (2), /* float load */
294 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
295 COSTS_N_INSNS (4), /* fadd, fsub */
296 COSTS_N_INSNS (1), /* fcmp */
297 COSTS_N_INSNS (2), /* fmov, fmovr */
298 COSTS_N_INSNS (4), /* fmul */
299 COSTS_N_INSNS (13), /* fdivs */
300 COSTS_N_INSNS (23), /* fdivd */
301 COSTS_N_INSNS (13), /* fsqrts */
302 COSTS_N_INSNS (23), /* fsqrtd */
303 COSTS_N_INSNS (4), /* imul */
304 COSTS_N_INSNS (4), /* imulX */
305 2, /* imul bit factor */
306 COSTS_N_INSNS (37), /* idiv */
307 COSTS_N_INSNS (68), /* idivX */
308 COSTS_N_INSNS (2), /* movcc/movr */
309 2, /* shift penalty */
310 };
311
312 static const
313 struct processor_costs ultrasparc3_costs = {
314 COSTS_N_INSNS (2), /* int load */
315 COSTS_N_INSNS (3), /* int signed load */
316 COSTS_N_INSNS (3), /* int zeroed load */
317 COSTS_N_INSNS (2), /* float load */
318 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
319 COSTS_N_INSNS (4), /* fadd, fsub */
320 COSTS_N_INSNS (5), /* fcmp */
321 COSTS_N_INSNS (3), /* fmov, fmovr */
322 COSTS_N_INSNS (4), /* fmul */
323 COSTS_N_INSNS (17), /* fdivs */
324 COSTS_N_INSNS (20), /* fdivd */
325 COSTS_N_INSNS (20), /* fsqrts */
326 COSTS_N_INSNS (29), /* fsqrtd */
327 COSTS_N_INSNS (6), /* imul */
328 COSTS_N_INSNS (6), /* imulX */
329 0, /* imul bit factor */
330 COSTS_N_INSNS (40), /* idiv */
331 COSTS_N_INSNS (71), /* idivX */
332 COSTS_N_INSNS (2), /* movcc/movr */
333 0, /* shift penalty */
334 };
335
336 static const
337 struct processor_costs niagara_costs = {
338 COSTS_N_INSNS (3), /* int load */
339 COSTS_N_INSNS (3), /* int signed load */
340 COSTS_N_INSNS (3), /* int zeroed load */
341 COSTS_N_INSNS (9), /* float load */
342 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
343 COSTS_N_INSNS (8), /* fadd, fsub */
344 COSTS_N_INSNS (26), /* fcmp */
345 COSTS_N_INSNS (8), /* fmov, fmovr */
346 COSTS_N_INSNS (29), /* fmul */
347 COSTS_N_INSNS (54), /* fdivs */
348 COSTS_N_INSNS (83), /* fdivd */
349 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
350 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
351 COSTS_N_INSNS (11), /* imul */
352 COSTS_N_INSNS (11), /* imulX */
353 0, /* imul bit factor */
354 COSTS_N_INSNS (72), /* idiv */
355 COSTS_N_INSNS (72), /* idivX */
356 COSTS_N_INSNS (1), /* movcc/movr */
357 0, /* shift penalty */
358 };
359
360 static const
361 struct processor_costs niagara2_costs = {
362 COSTS_N_INSNS (3), /* int load */
363 COSTS_N_INSNS (3), /* int signed load */
364 COSTS_N_INSNS (3), /* int zeroed load */
365 COSTS_N_INSNS (3), /* float load */
366 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
367 COSTS_N_INSNS (6), /* fadd, fsub */
368 COSTS_N_INSNS (6), /* fcmp */
369 COSTS_N_INSNS (6), /* fmov, fmovr */
370 COSTS_N_INSNS (6), /* fmul */
371 COSTS_N_INSNS (19), /* fdivs */
372 COSTS_N_INSNS (33), /* fdivd */
373 COSTS_N_INSNS (19), /* fsqrts */
374 COSTS_N_INSNS (33), /* fsqrtd */
375 COSTS_N_INSNS (5), /* imul */
376 COSTS_N_INSNS (5), /* imulX */
377 0, /* imul bit factor */
378 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
379 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
380 COSTS_N_INSNS (1), /* movcc/movr */
381 0, /* shift penalty */
382 };
383
384 static const
385 struct processor_costs niagara3_costs = {
386 COSTS_N_INSNS (3), /* int load */
387 COSTS_N_INSNS (3), /* int signed load */
388 COSTS_N_INSNS (3), /* int zeroed load */
389 COSTS_N_INSNS (3), /* float load */
390 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
391 COSTS_N_INSNS (9), /* fadd, fsub */
392 COSTS_N_INSNS (9), /* fcmp */
393 COSTS_N_INSNS (9), /* fmov, fmovr */
394 COSTS_N_INSNS (9), /* fmul */
395 COSTS_N_INSNS (23), /* fdivs */
396 COSTS_N_INSNS (37), /* fdivd */
397 COSTS_N_INSNS (23), /* fsqrts */
398 COSTS_N_INSNS (37), /* fsqrtd */
399 COSTS_N_INSNS (9), /* imul */
400 COSTS_N_INSNS (9), /* imulX */
401 0, /* imul bit factor */
402 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
403 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
404 COSTS_N_INSNS (1), /* movcc/movr */
405 0, /* shift penalty */
406 };
407
408 static const
409 struct processor_costs niagara4_costs = {
410 COSTS_N_INSNS (5), /* int load */
411 COSTS_N_INSNS (5), /* int signed load */
412 COSTS_N_INSNS (5), /* int zeroed load */
413 COSTS_N_INSNS (5), /* float load */
414 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
415 COSTS_N_INSNS (11), /* fadd, fsub */
416 COSTS_N_INSNS (11), /* fcmp */
417 COSTS_N_INSNS (11), /* fmov, fmovr */
418 COSTS_N_INSNS (11), /* fmul */
419 COSTS_N_INSNS (24), /* fdivs */
420 COSTS_N_INSNS (37), /* fdivd */
421 COSTS_N_INSNS (24), /* fsqrts */
422 COSTS_N_INSNS (37), /* fsqrtd */
423 COSTS_N_INSNS (12), /* imul */
424 COSTS_N_INSNS (12), /* imulX */
425 0, /* imul bit factor */
426 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
427 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
428 COSTS_N_INSNS (1), /* movcc/movr */
429 0, /* shift penalty */
430 };
431
432 static const
433 struct processor_costs niagara7_costs = {
434 COSTS_N_INSNS (5), /* int load */
435 COSTS_N_INSNS (5), /* int signed load */
436 COSTS_N_INSNS (5), /* int zeroed load */
437 COSTS_N_INSNS (5), /* float load */
438 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
439 COSTS_N_INSNS (11), /* fadd, fsub */
440 COSTS_N_INSNS (11), /* fcmp */
441 COSTS_N_INSNS (11), /* fmov, fmovr */
442 COSTS_N_INSNS (11), /* fmul */
443 COSTS_N_INSNS (24), /* fdivs */
444 COSTS_N_INSNS (37), /* fdivd */
445 COSTS_N_INSNS (24), /* fsqrts */
446 COSTS_N_INSNS (37), /* fsqrtd */
447 COSTS_N_INSNS (12), /* imul */
448 COSTS_N_INSNS (12), /* imulX */
449 0, /* imul bit factor */
450 COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
451 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
452 COSTS_N_INSNS (1), /* movcc/movr */
453 0, /* shift penalty */
454 };
455
456 static const
457 struct processor_costs m8_costs = {
458 COSTS_N_INSNS (3), /* int load */
459 COSTS_N_INSNS (3), /* int signed load */
460 COSTS_N_INSNS (3), /* int zeroed load */
461 COSTS_N_INSNS (3), /* float load */
462 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
463 COSTS_N_INSNS (9), /* fadd, fsub */
464 COSTS_N_INSNS (9), /* fcmp */
465 COSTS_N_INSNS (9), /* fmov, fmovr */
466 COSTS_N_INSNS (9), /* fmul */
467 COSTS_N_INSNS (26), /* fdivs */
468 COSTS_N_INSNS (30), /* fdivd */
469 COSTS_N_INSNS (33), /* fsqrts */
470 COSTS_N_INSNS (41), /* fsqrtd */
471 COSTS_N_INSNS (12), /* imul */
472 COSTS_N_INSNS (10), /* imulX */
473 0, /* imul bit factor */
474 COSTS_N_INSNS (57), /* udiv/sdiv */
475 COSTS_N_INSNS (30), /* udivx/sdivx */
476 COSTS_N_INSNS (1), /* movcc/movr */
477 0, /* shift penalty */
478 };
479
480 static const struct processor_costs *sparc_costs = &cypress_costs;
481
482 #ifdef HAVE_AS_RELAX_OPTION
483 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
484 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
485 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
486 somebody does not branch between the sethi and jmp. */
487 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
488 #else
489 #define LEAF_SIBCALL_SLOT_RESERVED_P \
490 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
491 #endif
492
493 /* Vector to say how input registers are mapped to output registers.
494 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
495 eliminate it. You must use -fomit-frame-pointer to get that. */
496 char leaf_reg_remap[] =
497 { 0, 1, 2, 3, 4, 5, 6, 7,
498 -1, -1, -1, -1, -1, -1, 14, -1,
499 -1, -1, -1, -1, -1, -1, -1, -1,
500 8, 9, 10, 11, 12, 13, -1, 15,
501
502 32, 33, 34, 35, 36, 37, 38, 39,
503 40, 41, 42, 43, 44, 45, 46, 47,
504 48, 49, 50, 51, 52, 53, 54, 55,
505 56, 57, 58, 59, 60, 61, 62, 63,
506 64, 65, 66, 67, 68, 69, 70, 71,
507 72, 73, 74, 75, 76, 77, 78, 79,
508 80, 81, 82, 83, 84, 85, 86, 87,
509 88, 89, 90, 91, 92, 93, 94, 95,
510 96, 97, 98, 99, 100, 101, 102};
511
512 /* Vector, indexed by hard register number, which contains 1
513 for a register that is allowable in a candidate for leaf
514 function treatment. */
515 char sparc_leaf_regs[] =
516 { 1, 1, 1, 1, 1, 1, 1, 1,
517 0, 0, 0, 0, 0, 0, 1, 0,
518 0, 0, 0, 0, 0, 0, 0, 0,
519 1, 1, 1, 1, 1, 1, 0, 1,
520 1, 1, 1, 1, 1, 1, 1, 1,
521 1, 1, 1, 1, 1, 1, 1, 1,
522 1, 1, 1, 1, 1, 1, 1, 1,
523 1, 1, 1, 1, 1, 1, 1, 1,
524 1, 1, 1, 1, 1, 1, 1, 1,
525 1, 1, 1, 1, 1, 1, 1, 1,
526 1, 1, 1, 1, 1, 1, 1, 1,
527 1, 1, 1, 1, 1, 1, 1, 1,
528 1, 1, 1, 1, 1, 1, 1};
529
530 struct GTY(()) machine_function
531 {
532 /* Size of the frame of the function. */
533 HOST_WIDE_INT frame_size;
534
535 /* Size of the frame of the function minus the register window save area
536 and the outgoing argument area. */
537 HOST_WIDE_INT apparent_frame_size;
538
539 /* Register we pretend the frame pointer is allocated to. Normally, this
540 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
541 record "offset" separately as it may be too big for (reg + disp). */
542 rtx frame_base_reg;
543 HOST_WIDE_INT frame_base_offset;
544
545 /* Number of global or FP registers to be saved (as 4-byte quantities). */
546 int n_global_fp_regs;
547
548 /* True if the current function is leaf and uses only leaf regs,
549 so that the SPARC leaf function optimization can be applied.
550 Private version of crtl->uses_only_leaf_regs, see
551 sparc_expand_prologue for the rationale. */
552 int leaf_function_p;
553
554 /* True if the prologue saves local or in registers. */
555 bool save_local_in_regs_p;
556
557 /* True if the data calculated by sparc_expand_prologue are valid. */
558 bool prologue_data_valid_p;
559 };
560
561 #define sparc_frame_size cfun->machine->frame_size
562 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
563 #define sparc_frame_base_reg cfun->machine->frame_base_reg
564 #define sparc_frame_base_offset cfun->machine->frame_base_offset
565 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
566 #define sparc_leaf_function_p cfun->machine->leaf_function_p
567 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
568 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
569
570 /* 1 if the next opcode is to be specially indented. */
571 int sparc_indent_opcode = 0;
572
573 static void sparc_option_override (void);
574 static void sparc_init_modes (void);
575 static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
576 const_tree, bool, bool, int *, int *);
577
578 static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
579 static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
580
581 static void sparc_emit_set_const32 (rtx, rtx);
582 static void sparc_emit_set_const64 (rtx, rtx);
583 static void sparc_output_addr_vec (rtx);
584 static void sparc_output_addr_diff_vec (rtx);
585 static void sparc_output_deferred_case_vectors (void);
586 static bool sparc_legitimate_address_p (machine_mode, rtx, bool);
587 static bool sparc_legitimate_constant_p (machine_mode, rtx);
588 static rtx sparc_builtin_saveregs (void);
589 static int epilogue_renumber (rtx *, int);
590 static bool sparc_assemble_integer (rtx, unsigned int, int);
591 static int set_extends (rtx_insn *);
592 static void sparc_asm_function_prologue (FILE *);
593 static void sparc_asm_function_epilogue (FILE *);
594 #ifdef TARGET_SOLARIS
595 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
596 tree) ATTRIBUTE_UNUSED;
597 #endif
598 static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
599 static int sparc_issue_rate (void);
600 static void sparc_sched_init (FILE *, int, int);
601 static int sparc_use_sched_lookahead (void);
602
603 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
604 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
605 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
606 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
607 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
608
609 static bool sparc_function_ok_for_sibcall (tree, tree);
610 static void sparc_init_libfuncs (void);
611 static void sparc_init_builtins (void);
612 static void sparc_fpu_init_builtins (void);
613 static void sparc_vis_init_builtins (void);
614 static tree sparc_builtin_decl (unsigned, bool);
615 static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
616 static tree sparc_fold_builtin (tree, int, tree *, bool);
617 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
618 HOST_WIDE_INT, tree);
619 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
620 HOST_WIDE_INT, const_tree);
621 static struct machine_function * sparc_init_machine_status (void);
622 static bool sparc_cannot_force_const_mem (machine_mode, rtx);
623 static rtx sparc_tls_get_addr (void);
624 static rtx sparc_tls_got (void);
625 static int sparc_register_move_cost (machine_mode,
626 reg_class_t, reg_class_t);
627 static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
628 static rtx sparc_function_value (const_tree, const_tree, bool);
629 static rtx sparc_libcall_value (machine_mode, const_rtx);
630 static bool sparc_function_value_regno_p (const unsigned int);
631 static rtx sparc_struct_value_rtx (tree, int);
632 static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
633 int *, const_tree, int);
634 static bool sparc_return_in_memory (const_tree, const_tree);
635 static bool sparc_strict_argument_naming (cumulative_args_t);
636 static void sparc_va_start (tree, rtx);
637 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
638 static bool sparc_vector_mode_supported_p (machine_mode);
639 static bool sparc_tls_referenced_p (rtx);
640 static rtx sparc_legitimize_tls_address (rtx);
641 static rtx sparc_legitimize_pic_address (rtx, rtx);
642 static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
643 static rtx sparc_delegitimize_address (rtx);
644 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
645 static bool sparc_pass_by_reference (cumulative_args_t,
646 machine_mode, const_tree, bool);
647 static void sparc_function_arg_advance (cumulative_args_t,
648 machine_mode, const_tree, bool);
649 static rtx sparc_function_arg_1 (cumulative_args_t,
650 machine_mode, const_tree, bool, bool);
651 static rtx sparc_function_arg (cumulative_args_t,
652 machine_mode, const_tree, bool);
653 static rtx sparc_function_incoming_arg (cumulative_args_t,
654 machine_mode, const_tree, bool);
655 static pad_direction sparc_function_arg_padding (machine_mode, const_tree);
656 static unsigned int sparc_function_arg_boundary (machine_mode,
657 const_tree);
658 static int sparc_arg_partial_bytes (cumulative_args_t,
659 machine_mode, tree, bool);
660 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
661 static void sparc_file_end (void);
662 static bool sparc_frame_pointer_required (void);
663 static bool sparc_can_eliminate (const int, const int);
664 static rtx sparc_builtin_setjmp_frame_value (void);
665 static void sparc_conditional_register_usage (void);
666 static bool sparc_use_pseudo_pic_reg (void);
667 static void sparc_init_pic_reg (void);
668 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
669 static const char *sparc_mangle_type (const_tree);
670 #endif
671 static void sparc_trampoline_init (rtx, tree, rtx);
672 static machine_mode sparc_preferred_simd_mode (scalar_mode);
673 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
674 static bool sparc_lra_p (void);
675 static bool sparc_print_operand_punct_valid_p (unsigned char);
676 static void sparc_print_operand (FILE *, rtx, int);
677 static void sparc_print_operand_address (FILE *, machine_mode, rtx);
678 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
679 machine_mode,
680 secondary_reload_info *);
681 static bool sparc_secondary_memory_needed (machine_mode, reg_class_t,
682 reg_class_t);
683 static machine_mode sparc_secondary_memory_needed_mode (machine_mode);
684 static scalar_int_mode sparc_cstore_mode (enum insn_code icode);
685 static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
686 static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
687 static unsigned int sparc_min_arithmetic_precision (void);
688 static unsigned int sparc_hard_regno_nregs (unsigned int, machine_mode);
689 static bool sparc_hard_regno_mode_ok (unsigned int, machine_mode);
690 static bool sparc_modes_tieable_p (machine_mode, machine_mode);
691 static bool sparc_can_change_mode_class (machine_mode, machine_mode,
692 reg_class_t);
693 static HOST_WIDE_INT sparc_constant_alignment (const_tree, HOST_WIDE_INT);
694 static bool sparc_vectorize_vec_perm_const (machine_mode, rtx, rtx, rtx,
695 const vec_perm_indices &);
696 \f
697 #ifdef SUBTARGET_ATTRIBUTE_TABLE
698 /* Table of valid machine attributes. */
699 static const struct attribute_spec sparc_attribute_table[] =
700 {
701 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
702 do_diagnostic, handler, exclude } */
703 SUBTARGET_ATTRIBUTE_TABLE,
704 { NULL, 0, 0, false, false, false, false, NULL, NULL }
705 };
706 #endif
707 \f
708 /* Option handling. */
709
710 /* Parsed value. */
711 enum cmodel sparc_cmodel;
712
713 char sparc_hard_reg_printed[8];
714
715 /* Initialize the GCC target structure. */
716
717 /* The default is to use .half rather than .short for aligned HI objects. */
718 #undef TARGET_ASM_ALIGNED_HI_OP
719 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
720
721 #undef TARGET_ASM_UNALIGNED_HI_OP
722 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
723 #undef TARGET_ASM_UNALIGNED_SI_OP
724 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
725 #undef TARGET_ASM_UNALIGNED_DI_OP
726 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
727
728 /* The target hook has to handle DI-mode values. */
729 #undef TARGET_ASM_INTEGER
730 #define TARGET_ASM_INTEGER sparc_assemble_integer
731
732 #undef TARGET_ASM_FUNCTION_PROLOGUE
733 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
734 #undef TARGET_ASM_FUNCTION_EPILOGUE
735 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
736
737 #undef TARGET_SCHED_ADJUST_COST
738 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
739 #undef TARGET_SCHED_ISSUE_RATE
740 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
741 #undef TARGET_SCHED_INIT
742 #define TARGET_SCHED_INIT sparc_sched_init
743 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
744 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
745
746 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
747 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
748
749 #undef TARGET_INIT_LIBFUNCS
750 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
751
752 #undef TARGET_LEGITIMIZE_ADDRESS
753 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
754 #undef TARGET_DELEGITIMIZE_ADDRESS
755 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
756 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
757 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
758
759 #undef TARGET_INIT_BUILTINS
760 #define TARGET_INIT_BUILTINS sparc_init_builtins
761 #undef TARGET_BUILTIN_DECL
762 #define TARGET_BUILTIN_DECL sparc_builtin_decl
763 #undef TARGET_EXPAND_BUILTIN
764 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
765 #undef TARGET_FOLD_BUILTIN
766 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
767
768 #if TARGET_TLS
769 #undef TARGET_HAVE_TLS
770 #define TARGET_HAVE_TLS true
771 #endif
772
773 #undef TARGET_CANNOT_FORCE_CONST_MEM
774 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
775
776 #undef TARGET_ASM_OUTPUT_MI_THUNK
777 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
778 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
779 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
780
781 #undef TARGET_RTX_COSTS
782 #define TARGET_RTX_COSTS sparc_rtx_costs
783 #undef TARGET_ADDRESS_COST
784 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
785 #undef TARGET_REGISTER_MOVE_COST
786 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
787
788 #undef TARGET_PROMOTE_FUNCTION_MODE
789 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
790
791 #undef TARGET_FUNCTION_VALUE
792 #define TARGET_FUNCTION_VALUE sparc_function_value
793 #undef TARGET_LIBCALL_VALUE
794 #define TARGET_LIBCALL_VALUE sparc_libcall_value
795 #undef TARGET_FUNCTION_VALUE_REGNO_P
796 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
797
798 #undef TARGET_STRUCT_VALUE_RTX
799 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
800 #undef TARGET_RETURN_IN_MEMORY
801 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
802 #undef TARGET_MUST_PASS_IN_STACK
803 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
804 #undef TARGET_PASS_BY_REFERENCE
805 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
806 #undef TARGET_ARG_PARTIAL_BYTES
807 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
808 #undef TARGET_FUNCTION_ARG_ADVANCE
809 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
810 #undef TARGET_FUNCTION_ARG
811 #define TARGET_FUNCTION_ARG sparc_function_arg
812 #undef TARGET_FUNCTION_INCOMING_ARG
813 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
814 #undef TARGET_FUNCTION_ARG_PADDING
815 #define TARGET_FUNCTION_ARG_PADDING sparc_function_arg_padding
816 #undef TARGET_FUNCTION_ARG_BOUNDARY
817 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
818
819 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
820 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
821 #undef TARGET_STRICT_ARGUMENT_NAMING
822 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
823
824 #undef TARGET_EXPAND_BUILTIN_VA_START
825 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
826 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
827 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
828
829 #undef TARGET_VECTOR_MODE_SUPPORTED_P
830 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
831
832 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
833 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
834
835 #ifdef SUBTARGET_INSERT_ATTRIBUTES
836 #undef TARGET_INSERT_ATTRIBUTES
837 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
838 #endif
839
840 #ifdef SUBTARGET_ATTRIBUTE_TABLE
841 #undef TARGET_ATTRIBUTE_TABLE
842 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
843 #endif
844
845 #undef TARGET_OPTION_OVERRIDE
846 #define TARGET_OPTION_OVERRIDE sparc_option_override
847
848 #ifdef TARGET_THREAD_SSP_OFFSET
849 #undef TARGET_STACK_PROTECT_GUARD
850 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
851 #endif
852
853 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
854 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
855 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
856 #endif
857
858 #undef TARGET_ASM_FILE_END
859 #define TARGET_ASM_FILE_END sparc_file_end
860
861 #undef TARGET_FRAME_POINTER_REQUIRED
862 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
863
864 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
865 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
866
867 #undef TARGET_CAN_ELIMINATE
868 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
869
870 #undef TARGET_PREFERRED_RELOAD_CLASS
871 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
872
873 #undef TARGET_SECONDARY_RELOAD
874 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
875 #undef TARGET_SECONDARY_MEMORY_NEEDED
876 #define TARGET_SECONDARY_MEMORY_NEEDED sparc_secondary_memory_needed
877 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
878 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE sparc_secondary_memory_needed_mode
879
880 #undef TARGET_CONDITIONAL_REGISTER_USAGE
881 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
882
883 #undef TARGET_INIT_PIC_REG
884 #define TARGET_INIT_PIC_REG sparc_init_pic_reg
885
886 #undef TARGET_USE_PSEUDO_PIC_REG
887 #define TARGET_USE_PSEUDO_PIC_REG sparc_use_pseudo_pic_reg
888
889 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
890 #undef TARGET_MANGLE_TYPE
891 #define TARGET_MANGLE_TYPE sparc_mangle_type
892 #endif
893
894 #undef TARGET_LRA_P
895 #define TARGET_LRA_P sparc_lra_p
896
897 #undef TARGET_LEGITIMATE_ADDRESS_P
898 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
899
900 #undef TARGET_LEGITIMATE_CONSTANT_P
901 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
902
903 #undef TARGET_TRAMPOLINE_INIT
904 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
905
906 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
907 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
908 #undef TARGET_PRINT_OPERAND
909 #define TARGET_PRINT_OPERAND sparc_print_operand
910 #undef TARGET_PRINT_OPERAND_ADDRESS
911 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
912
913 /* The value stored by LDSTUB. */
914 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
915 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
916
917 #undef TARGET_CSTORE_MODE
918 #define TARGET_CSTORE_MODE sparc_cstore_mode
919
920 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
921 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
922
923 #undef TARGET_FIXED_CONDITION_CODE_REGS
924 #define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
925
926 #undef TARGET_MIN_ARITHMETIC_PRECISION
927 #define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
928
929 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
930 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
931
932 #undef TARGET_HARD_REGNO_NREGS
933 #define TARGET_HARD_REGNO_NREGS sparc_hard_regno_nregs
934 #undef TARGET_HARD_REGNO_MODE_OK
935 #define TARGET_HARD_REGNO_MODE_OK sparc_hard_regno_mode_ok
936
937 #undef TARGET_MODES_TIEABLE_P
938 #define TARGET_MODES_TIEABLE_P sparc_modes_tieable_p
939
940 #undef TARGET_CAN_CHANGE_MODE_CLASS
941 #define TARGET_CAN_CHANGE_MODE_CLASS sparc_can_change_mode_class
942
943 #undef TARGET_CONSTANT_ALIGNMENT
944 #define TARGET_CONSTANT_ALIGNMENT sparc_constant_alignment
945
946 #undef TARGET_VECTORIZE_VEC_PERM_CONST
947 #define TARGET_VECTORIZE_VEC_PERM_CONST sparc_vectorize_vec_perm_const
948
949 struct gcc_target targetm = TARGET_INITIALIZER;
950
951 /* Return the memory reference contained in X if any, zero otherwise. */
952
953 static rtx
954 mem_ref (rtx x)
955 {
956 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
957 x = XEXP (x, 0);
958
959 if (MEM_P (x))
960 return x;
961
962 return NULL_RTX;
963 }
964
965 /* True if any of INSN's source register(s) is REG. */
966
967 static bool
968 insn_uses_reg_p (rtx_insn *insn, unsigned int reg)
969 {
970 extract_insn (insn);
971 return ((REG_P (recog_data.operand[1])
972 && REGNO (recog_data.operand[1]) == reg)
973 || (recog_data.n_operands == 3
974 && REG_P (recog_data.operand[2])
975 && REGNO (recog_data.operand[2]) == reg));
976 }
977
978 /* True if INSN is a floating-point division or square-root. */
979
980 static bool
981 div_sqrt_insn_p (rtx_insn *insn)
982 {
983 if (GET_CODE (PATTERN (insn)) != SET)
984 return false;
985
986 switch (get_attr_type (insn))
987 {
988 case TYPE_FPDIVS:
989 case TYPE_FPSQRTS:
990 case TYPE_FPDIVD:
991 case TYPE_FPSQRTD:
992 return true;
993 default:
994 return false;
995 }
996 }
997
998 /* True if INSN is a floating-point instruction. */
999
1000 static bool
1001 fpop_insn_p (rtx_insn *insn)
1002 {
1003 if (GET_CODE (PATTERN (insn)) != SET)
1004 return false;
1005
1006 switch (get_attr_type (insn))
1007 {
1008 case TYPE_FPMOVE:
1009 case TYPE_FPCMOVE:
1010 case TYPE_FP:
1011 case TYPE_FPCMP:
1012 case TYPE_FPMUL:
1013 case TYPE_FPDIVS:
1014 case TYPE_FPSQRTS:
1015 case TYPE_FPDIVD:
1016 case TYPE_FPSQRTD:
1017 return true;
1018 default:
1019 return false;
1020 }
1021 }
1022
1023 /* True if INSN is an atomic instruction. */
1024
1025 static bool
1026 atomic_insn_for_leon3_p (rtx_insn *insn)
1027 {
1028 switch (INSN_CODE (insn))
1029 {
1030 case CODE_FOR_swapsi:
1031 case CODE_FOR_ldstub:
1032 case CODE_FOR_atomic_compare_and_swap_leon3_1:
1033 return true;
1034 default:
1035 return false;
1036 }
1037 }
1038
1039 /* We use a machine specific pass to enable workarounds for errata.
1040
1041 We need to have the (essentially) final form of the insn stream in order
1042 to properly detect the various hazards. Therefore, this machine specific
1043 pass runs as late as possible. */
1044
1045 /* True if INSN is a md pattern or asm statement. */
1046 #define USEFUL_INSN_P(INSN) \
1047 (NONDEBUG_INSN_P (INSN) \
1048 && GET_CODE (PATTERN (INSN)) != USE \
1049 && GET_CODE (PATTERN (INSN)) != CLOBBER)
1050
1051 static unsigned int
1052 sparc_do_work_around_errata (void)
1053 {
1054 rtx_insn *insn, *next;
1055
1056 /* Force all instructions to be split into their final form. */
1057 split_all_insns_noflow ();
1058
1059 /* Now look for specific patterns in the insn stream. */
1060 for (insn = get_insns (); insn; insn = next)
1061 {
1062 bool insert_nop = false;
1063 rtx set;
1064 rtx_insn *jump;
1065 rtx_sequence *seq;
1066
1067 /* Look into the instruction in a delay slot. */
1068 if (NONJUMP_INSN_P (insn)
1069 && (seq = dyn_cast <rtx_sequence *> (PATTERN (insn))))
1070 {
1071 jump = seq->insn (0);
1072 insn = seq->insn (1);
1073 }
1074 else if (JUMP_P (insn))
1075 jump = insn;
1076 else
1077 jump = NULL;
1078
1079 /* Place a NOP at the branch target of an integer branch if it is a
1080 floating-point operation or a floating-point branch. */
1081 if (sparc_fix_gr712rc
1082 && jump
1083 && jump_to_label_p (jump)
1084 && get_attr_branch_type (jump) == BRANCH_TYPE_ICC)
1085 {
1086 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1087 if (target
1088 && (fpop_insn_p (target)
1089 || (JUMP_P (target)
1090 && get_attr_branch_type (target) == BRANCH_TYPE_FCC)))
1091 emit_insn_before (gen_nop (), target);
1092 }
1093
1094 /* Insert a NOP between load instruction and atomic instruction. Insert
1095 a NOP at branch target if there is a load in delay slot and an atomic
1096 instruction at branch target. */
1097 if (sparc_fix_ut700
1098 && NONJUMP_INSN_P (insn)
1099 && (set = single_set (insn)) != NULL_RTX
1100 && mem_ref (SET_SRC (set))
1101 && REG_P (SET_DEST (set)))
1102 {
1103 if (jump && jump_to_label_p (jump))
1104 {
1105 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1106 if (target && atomic_insn_for_leon3_p (target))
1107 emit_insn_before (gen_nop (), target);
1108 }
1109
1110 next = next_active_insn (insn);
1111 if (!next)
1112 break;
1113
1114 if (atomic_insn_for_leon3_p (next))
1115 insert_nop = true;
1116 }
1117
1118 /* Look for a sequence that starts with a fdiv or fsqrt instruction and
1119 ends with another fdiv or fsqrt instruction with no dependencies on
1120 the former, along with an appropriate pattern in between. */
1121 if (sparc_fix_lost_divsqrt
1122 && NONJUMP_INSN_P (insn)
1123 && div_sqrt_insn_p (insn))
1124 {
1125 int i;
1126 int fp_found = 0;
1127 rtx_insn *after;
1128
1129 const unsigned int dest_reg = REGNO (SET_DEST (single_set (insn)));
1130
1131 next = next_active_insn (insn);
1132 if (!next)
1133 break;
1134
1135 for (after = next, i = 0; i < 4; i++)
1136 {
1137 /* Count floating-point operations. */
1138 if (i != 3 && fpop_insn_p (after))
1139 {
1140 /* If the insn uses the destination register of
1141 the div/sqrt, then it cannot be problematic. */
1142 if (insn_uses_reg_p (after, dest_reg))
1143 break;
1144 fp_found++;
1145 }
1146
1147 /* Count floating-point loads. */
1148 if (i != 3
1149 && (set = single_set (after)) != NULL_RTX
1150 && REG_P (SET_DEST (set))
1151 && REGNO (SET_DEST (set)) > 31)
1152 {
1153 /* If the insn uses the destination register of
1154 the div/sqrt, then it cannot be problematic. */
1155 if (REGNO (SET_DEST (set)) == dest_reg)
1156 break;
1157 fp_found++;
1158 }
1159
1160 /* Check if this is a problematic sequence. */
1161 if (i > 1
1162 && fp_found >= 2
1163 && div_sqrt_insn_p (after))
1164 {
1165 /* If this is the short version of the problematic
1166 sequence we add two NOPs in a row to also prevent
1167 the long version. */
1168 if (i == 2)
1169 emit_insn_before (gen_nop (), next);
1170 insert_nop = true;
1171 break;
1172 }
1173
1174 /* No need to scan past a second div/sqrt. */
1175 if (div_sqrt_insn_p (after))
1176 break;
1177
1178 /* Insert NOP before branch. */
1179 if (i < 3
1180 && (!NONJUMP_INSN_P (after)
1181 || GET_CODE (PATTERN (after)) == SEQUENCE))
1182 {
1183 insert_nop = true;
1184 break;
1185 }
1186
1187 after = next_active_insn (after);
1188 if (!after)
1189 break;
1190 }
1191 }
1192
1193 /* Look for either of these two sequences:
1194
1195 Sequence A:
1196 1. store of word size or less (e.g. st / stb / sth / stf)
1197 2. any single instruction that is not a load or store
1198 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
1199
1200 Sequence B:
1201 1. store of double word size (e.g. std / stdf)
1202 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
1203 if (sparc_fix_b2bst
1204 && NONJUMP_INSN_P (insn)
1205 && (set = single_set (insn)) != NULL_RTX
1206 && MEM_P (SET_DEST (set)))
1207 {
1208 /* Sequence B begins with a double-word store. */
1209 bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
1210 rtx_insn *after;
1211 int i;
1212
1213 next = next_active_insn (insn);
1214 if (!next)
1215 break;
1216
1217 for (after = next, i = 0; i < 2; i++)
1218 {
1219 /* Skip empty assembly statements. */
1220 if ((GET_CODE (PATTERN (after)) == UNSPEC_VOLATILE)
1221 || (USEFUL_INSN_P (after)
1222 && (asm_noperands (PATTERN (after))>=0)
1223 && !strcmp (decode_asm_operands (PATTERN (after),
1224 NULL, NULL, NULL,
1225 NULL, NULL), "")))
1226 after = next_active_insn (after);
1227 if (!after)
1228 break;
1229
1230 /* If the insn is a branch, then it cannot be problematic. */
1231 if (!NONJUMP_INSN_P (after)
1232 || GET_CODE (PATTERN (after)) == SEQUENCE)
1233 break;
1234
1235 /* Sequence B is only two instructions long. */
1236 if (seq_b)
1237 {
1238 /* Add NOP if followed by a store. */
1239 if ((set = single_set (after)) != NULL_RTX
1240 && MEM_P (SET_DEST (set)))
1241 insert_nop = true;
1242
1243 /* Otherwise it is ok. */
1244 break;
1245 }
1246
1247 /* If the second instruction is a load or a store,
1248 then the sequence cannot be problematic. */
1249 if (i == 0)
1250 {
1251 if ((set = single_set (after)) != NULL_RTX
1252 && (MEM_P (SET_DEST (set)) || mem_ref (SET_SRC (set))))
1253 break;
1254
1255 after = next_active_insn (after);
1256 if (!after)
1257 break;
1258 }
1259
1260 /* Add NOP if third instruction is a store. */
1261 if (i == 1
1262 && (set = single_set (after)) != NULL_RTX
1263 && MEM_P (SET_DEST (set)))
1264 insert_nop = true;
1265 }
1266 }
1267
1268 /* Look for a single-word load into an odd-numbered FP register. */
1269 else if (sparc_fix_at697f
1270 && NONJUMP_INSN_P (insn)
1271 && (set = single_set (insn)) != NULL_RTX
1272 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1273 && mem_ref (SET_SRC (set))
1274 && REG_P (SET_DEST (set))
1275 && REGNO (SET_DEST (set)) > 31
1276 && REGNO (SET_DEST (set)) % 2 != 0)
1277 {
1278 /* The wrong dependency is on the enclosing double register. */
1279 const unsigned int x = REGNO (SET_DEST (set)) - 1;
1280 unsigned int src1, src2, dest;
1281 int code;
1282
1283 next = next_active_insn (insn);
1284 if (!next)
1285 break;
1286 /* If the insn is a branch, then it cannot be problematic. */
1287 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1288 continue;
1289
1290 extract_insn (next);
1291 code = INSN_CODE (next);
1292
1293 switch (code)
1294 {
1295 case CODE_FOR_adddf3:
1296 case CODE_FOR_subdf3:
1297 case CODE_FOR_muldf3:
1298 case CODE_FOR_divdf3:
1299 dest = REGNO (recog_data.operand[0]);
1300 src1 = REGNO (recog_data.operand[1]);
1301 src2 = REGNO (recog_data.operand[2]);
1302 if (src1 != src2)
1303 {
1304 /* Case [1-4]:
1305 ld [address], %fx+1
1306 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
1307 if ((src1 == x || src2 == x)
1308 && (dest == src1 || dest == src2))
1309 insert_nop = true;
1310 }
1311 else
1312 {
1313 /* Case 5:
1314 ld [address], %fx+1
1315 FPOPd %fx, %fx, %fx */
1316 if (src1 == x
1317 && dest == src1
1318 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
1319 insert_nop = true;
1320 }
1321 break;
1322
1323 case CODE_FOR_sqrtdf2:
1324 dest = REGNO (recog_data.operand[0]);
1325 src1 = REGNO (recog_data.operand[1]);
1326 /* Case 6:
1327 ld [address], %fx+1
1328 fsqrtd %fx, %fx */
1329 if (src1 == x && dest == src1)
1330 insert_nop = true;
1331 break;
1332
1333 default:
1334 break;
1335 }
1336 }
1337
1338 /* Look for a single-word load into an integer register. */
1339 else if (sparc_fix_ut699
1340 && NONJUMP_INSN_P (insn)
1341 && (set = single_set (insn)) != NULL_RTX
1342 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
1343 && (mem_ref (SET_SRC (set)) != NULL_RTX
1344 || INSN_CODE (insn) == CODE_FOR_movsi_pic_gotdata_op)
1345 && REG_P (SET_DEST (set))
1346 && REGNO (SET_DEST (set)) < 32)
1347 {
1348 /* There is no problem if the second memory access has a data
1349 dependency on the first single-cycle load. */
1350 rtx x = SET_DEST (set);
1351
1352 next = next_active_insn (insn);
1353 if (!next)
1354 break;
1355 /* If the insn is a branch, then it cannot be problematic. */
1356 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1357 continue;
1358
1359 /* Look for a second memory access to/from an integer register. */
1360 if ((set = single_set (next)) != NULL_RTX)
1361 {
1362 rtx src = SET_SRC (set);
1363 rtx dest = SET_DEST (set);
1364 rtx mem;
1365
1366 /* LDD is affected. */
1367 if ((mem = mem_ref (src)) != NULL_RTX
1368 && REG_P (dest)
1369 && REGNO (dest) < 32
1370 && !reg_mentioned_p (x, XEXP (mem, 0)))
1371 insert_nop = true;
1372
1373 /* STD is *not* affected. */
1374 else if (MEM_P (dest)
1375 && GET_MODE_SIZE (GET_MODE (dest)) <= 4
1376 && (src == CONST0_RTX (GET_MODE (dest))
1377 || (REG_P (src)
1378 && REGNO (src) < 32
1379 && REGNO (src) != REGNO (x)))
1380 && !reg_mentioned_p (x, XEXP (dest, 0)))
1381 insert_nop = true;
1382
1383 /* GOT accesses uses LD. */
1384 else if (INSN_CODE (next) == CODE_FOR_movsi_pic_gotdata_op
1385 && !reg_mentioned_p (x, XEXP (XEXP (src, 0), 1)))
1386 insert_nop = true;
1387 }
1388 }
1389
1390 /* Look for a single-word load/operation into an FP register. */
1391 else if (sparc_fix_ut699
1392 && NONJUMP_INSN_P (insn)
1393 && (set = single_set (insn)) != NULL_RTX
1394 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1395 && REG_P (SET_DEST (set))
1396 && REGNO (SET_DEST (set)) > 31)
1397 {
1398 /* Number of instructions in the problematic window. */
1399 const int n_insns = 4;
1400 /* The problematic combination is with the sibling FP register. */
1401 const unsigned int x = REGNO (SET_DEST (set));
1402 const unsigned int y = x ^ 1;
1403 rtx_insn *after;
1404 int i;
1405
1406 next = next_active_insn (insn);
1407 if (!next)
1408 break;
1409 /* If the insn is a branch, then it cannot be problematic. */
1410 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1411 continue;
1412
1413 /* Look for a second load/operation into the sibling FP register. */
1414 if (!((set = single_set (next)) != NULL_RTX
1415 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1416 && REG_P (SET_DEST (set))
1417 && REGNO (SET_DEST (set)) == y))
1418 continue;
1419
1420 /* Look for a (possible) store from the FP register in the next N
1421 instructions, but bail out if it is again modified or if there
1422 is a store from the sibling FP register before this store. */
1423 for (after = next, i = 0; i < n_insns; i++)
1424 {
1425 bool branch_p;
1426
1427 after = next_active_insn (after);
1428 if (!after)
1429 break;
1430
1431 /* This is a branch with an empty delay slot. */
1432 if (!NONJUMP_INSN_P (after))
1433 {
1434 if (++i == n_insns)
1435 break;
1436 branch_p = true;
1437 after = NULL;
1438 }
1439 /* This is a branch with a filled delay slot. */
1440 else if (rtx_sequence *seq =
1441 dyn_cast <rtx_sequence *> (PATTERN (after)))
1442 {
1443 if (++i == n_insns)
1444 break;
1445 branch_p = true;
1446 after = seq->insn (1);
1447 }
1448 /* This is a regular instruction. */
1449 else
1450 branch_p = false;
1451
1452 if (after && (set = single_set (after)) != NULL_RTX)
1453 {
1454 const rtx src = SET_SRC (set);
1455 const rtx dest = SET_DEST (set);
1456 const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
1457
1458 /* If the FP register is again modified before the store,
1459 then the store isn't affected. */
1460 if (REG_P (dest)
1461 && (REGNO (dest) == x
1462 || (REGNO (dest) == y && size == 8)))
1463 break;
1464
1465 if (MEM_P (dest) && REG_P (src))
1466 {
1467 /* If there is a store from the sibling FP register
1468 before the store, then the store is not affected. */
1469 if (REGNO (src) == y || (REGNO (src) == x && size == 8))
1470 break;
1471
1472 /* Otherwise, the store is affected. */
1473 if (REGNO (src) == x && size == 4)
1474 {
1475 insert_nop = true;
1476 break;
1477 }
1478 }
1479 }
1480
1481 /* If we have a branch in the first M instructions, then we
1482 cannot see the (M+2)th instruction so we play safe. */
1483 if (branch_p && i <= (n_insns - 2))
1484 {
1485 insert_nop = true;
1486 break;
1487 }
1488 }
1489 }
1490
1491 else
1492 next = NEXT_INSN (insn);
1493
1494 if (insert_nop)
1495 emit_insn_before (gen_nop (), next);
1496 }
1497
1498 return 0;
1499 }
1500
1501 namespace {
1502
1503 const pass_data pass_data_work_around_errata =
1504 {
1505 RTL_PASS, /* type */
1506 "errata", /* name */
1507 OPTGROUP_NONE, /* optinfo_flags */
1508 TV_MACH_DEP, /* tv_id */
1509 0, /* properties_required */
1510 0, /* properties_provided */
1511 0, /* properties_destroyed */
1512 0, /* todo_flags_start */
1513 0, /* todo_flags_finish */
1514 };
1515
1516 class pass_work_around_errata : public rtl_opt_pass
1517 {
1518 public:
1519 pass_work_around_errata(gcc::context *ctxt)
1520 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1521 {}
1522
1523 /* opt_pass methods: */
1524 virtual bool gate (function *)
1525 {
1526 return sparc_fix_at697f || sparc_fix_ut699 || sparc_fix_b2bst
1527 || sparc_fix_gr712rc || sparc_fix_ut700 || sparc_fix_lost_divsqrt;
1528 }
1529
1530 virtual unsigned int execute (function *)
1531 {
1532 return sparc_do_work_around_errata ();
1533 }
1534
1535 }; // class pass_work_around_errata
1536
1537 } // anon namespace
1538
1539 rtl_opt_pass *
1540 make_pass_work_around_errata (gcc::context *ctxt)
1541 {
1542 return new pass_work_around_errata (ctxt);
1543 }
1544
1545 /* Helpers for TARGET_DEBUG_OPTIONS. */
1546 static void
1547 dump_target_flag_bits (const int flags)
1548 {
1549 if (flags & MASK_64BIT)
1550 fprintf (stderr, "64BIT ");
1551 if (flags & MASK_APP_REGS)
1552 fprintf (stderr, "APP_REGS ");
1553 if (flags & MASK_FASTER_STRUCTS)
1554 fprintf (stderr, "FASTER_STRUCTS ");
1555 if (flags & MASK_FLAT)
1556 fprintf (stderr, "FLAT ");
1557 if (flags & MASK_FMAF)
1558 fprintf (stderr, "FMAF ");
1559 if (flags & MASK_FSMULD)
1560 fprintf (stderr, "FSMULD ");
1561 if (flags & MASK_FPU)
1562 fprintf (stderr, "FPU ");
1563 if (flags & MASK_HARD_QUAD)
1564 fprintf (stderr, "HARD_QUAD ");
1565 if (flags & MASK_POPC)
1566 fprintf (stderr, "POPC ");
1567 if (flags & MASK_PTR64)
1568 fprintf (stderr, "PTR64 ");
1569 if (flags & MASK_STACK_BIAS)
1570 fprintf (stderr, "STACK_BIAS ");
1571 if (flags & MASK_UNALIGNED_DOUBLES)
1572 fprintf (stderr, "UNALIGNED_DOUBLES ");
1573 if (flags & MASK_V8PLUS)
1574 fprintf (stderr, "V8PLUS ");
1575 if (flags & MASK_VIS)
1576 fprintf (stderr, "VIS ");
1577 if (flags & MASK_VIS2)
1578 fprintf (stderr, "VIS2 ");
1579 if (flags & MASK_VIS3)
1580 fprintf (stderr, "VIS3 ");
1581 if (flags & MASK_VIS4)
1582 fprintf (stderr, "VIS4 ");
1583 if (flags & MASK_VIS4B)
1584 fprintf (stderr, "VIS4B ");
1585 if (flags & MASK_CBCOND)
1586 fprintf (stderr, "CBCOND ");
1587 if (flags & MASK_DEPRECATED_V8_INSNS)
1588 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1589 if (flags & MASK_SPARCLET)
1590 fprintf (stderr, "SPARCLET ");
1591 if (flags & MASK_SPARCLITE)
1592 fprintf (stderr, "SPARCLITE ");
1593 if (flags & MASK_V8)
1594 fprintf (stderr, "V8 ");
1595 if (flags & MASK_V9)
1596 fprintf (stderr, "V9 ");
1597 }
1598
1599 static void
1600 dump_target_flags (const char *prefix, const int flags)
1601 {
1602 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1603 dump_target_flag_bits (flags);
1604 fprintf(stderr, "]\n");
1605 }
1606
1607 /* Validate and override various options, and do some machine dependent
1608 initialization. */
1609
1610 static void
1611 sparc_option_override (void)
1612 {
1613 static struct code_model {
1614 const char *const name;
1615 const enum cmodel value;
1616 } const cmodels[] = {
1617 { "32", CM_32 },
1618 { "medlow", CM_MEDLOW },
1619 { "medmid", CM_MEDMID },
1620 { "medany", CM_MEDANY },
1621 { "embmedany", CM_EMBMEDANY },
1622 { NULL, (enum cmodel) 0 }
1623 };
1624 const struct code_model *cmodel;
1625 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1626 static struct cpu_default {
1627 const int cpu;
1628 const enum processor_type processor;
1629 } const cpu_default[] = {
1630 /* There must be one entry here for each TARGET_CPU value. */
1631 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1632 { TARGET_CPU_v8, PROCESSOR_V8 },
1633 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1634 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1635 { TARGET_CPU_leon, PROCESSOR_LEON },
1636 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1637 { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
1638 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1639 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1640 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1641 { TARGET_CPU_v9, PROCESSOR_V9 },
1642 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1643 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1644 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1645 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1646 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1647 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1648 { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
1649 { TARGET_CPU_m8, PROCESSOR_M8 },
1650 { -1, PROCESSOR_V7 }
1651 };
1652 const struct cpu_default *def;
1653 /* Table of values for -m{cpu,tune}=. This must match the order of
1654 the enum processor_type in sparc-opts.h. */
1655 static struct cpu_table {
1656 const char *const name;
1657 const int disable;
1658 const int enable;
1659 } const cpu_table[] = {
1660 { "v7", MASK_ISA|MASK_FSMULD, 0 },
1661 { "cypress", MASK_ISA|MASK_FSMULD, 0 },
1662 { "v8", MASK_ISA, MASK_V8 },
1663 /* TI TMS390Z55 supersparc */
1664 { "supersparc", MASK_ISA, MASK_V8 },
1665 { "hypersparc", MASK_ISA, MASK_V8 },
1666 { "leon", MASK_ISA|MASK_FSMULD, MASK_V8|MASK_LEON },
1667 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3 },
1668 { "leon3v7", MASK_ISA|MASK_FSMULD, MASK_LEON3 },
1669 { "sparclite", MASK_ISA|MASK_FSMULD, MASK_SPARCLITE },
1670 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1671 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1672 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1673 { "f934", MASK_ISA|MASK_FSMULD, MASK_SPARCLITE },
1674 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1675 { "sparclet", MASK_ISA|MASK_FSMULD, MASK_SPARCLET },
1676 /* TEMIC sparclet */
1677 { "tsc701", MASK_ISA|MASK_FSMULD, MASK_SPARCLET },
1678 { "v9", MASK_ISA, MASK_V9 },
1679 /* UltraSPARC I, II, IIi */
1680 { "ultrasparc", MASK_ISA,
1681 /* Although insns using %y are deprecated, it is a clear win. */
1682 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1683 /* UltraSPARC III */
1684 /* ??? Check if %y issue still holds true. */
1685 { "ultrasparc3", MASK_ISA,
1686 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1687 /* UltraSPARC T1 */
1688 { "niagara", MASK_ISA,
1689 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1690 /* UltraSPARC T2 */
1691 { "niagara2", MASK_ISA,
1692 MASK_V9|MASK_POPC|MASK_VIS2 },
1693 /* UltraSPARC T3 */
1694 { "niagara3", MASK_ISA,
1695 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
1696 /* UltraSPARC T4 */
1697 { "niagara4", MASK_ISA,
1698 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1699 /* UltraSPARC M7 */
1700 { "niagara7", MASK_ISA,
1701 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
1702 /* UltraSPARC M8 */
1703 { "m8", MASK_ISA,
1704 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC|MASK_VIS4B }
1705 };
1706 const struct cpu_table *cpu;
1707 unsigned int i;
1708
1709 if (sparc_debug_string != NULL)
1710 {
1711 const char *q;
1712 char *p;
1713
1714 p = ASTRDUP (sparc_debug_string);
1715 while ((q = strtok (p, ",")) != NULL)
1716 {
1717 bool invert;
1718 int mask;
1719
1720 p = NULL;
1721 if (*q == '!')
1722 {
1723 invert = true;
1724 q++;
1725 }
1726 else
1727 invert = false;
1728
1729 if (! strcmp (q, "all"))
1730 mask = MASK_DEBUG_ALL;
1731 else if (! strcmp (q, "options"))
1732 mask = MASK_DEBUG_OPTIONS;
1733 else
1734 error ("unknown -mdebug-%s switch", q);
1735
1736 if (invert)
1737 sparc_debug &= ~mask;
1738 else
1739 sparc_debug |= mask;
1740 }
1741 }
1742
1743 /* Enable the FsMULd instruction by default if not explicitly specified by
1744 the user. It may be later disabled by the CPU (explicitly or not). */
1745 if (TARGET_FPU && !(target_flags_explicit & MASK_FSMULD))
1746 target_flags |= MASK_FSMULD;
1747
1748 if (TARGET_DEBUG_OPTIONS)
1749 {
1750 dump_target_flags("Initial target_flags", target_flags);
1751 dump_target_flags("target_flags_explicit", target_flags_explicit);
1752 }
1753
1754 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1755 SUBTARGET_OVERRIDE_OPTIONS;
1756 #endif
1757
1758 #ifndef SPARC_BI_ARCH
1759 /* Check for unsupported architecture size. */
1760 if (!TARGET_64BIT != DEFAULT_ARCH32_P)
1761 error ("%s is not supported by this configuration",
1762 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1763 #endif
1764
1765 /* We force all 64bit archs to use 128 bit long double */
1766 if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
1767 {
1768 error ("-mlong-double-64 not allowed with -m64");
1769 target_flags |= MASK_LONG_DOUBLE_128;
1770 }
1771
1772 /* Code model selection. */
1773 sparc_cmodel = SPARC_DEFAULT_CMODEL;
1774
1775 #ifdef SPARC_BI_ARCH
1776 if (TARGET_ARCH32)
1777 sparc_cmodel = CM_32;
1778 #endif
1779
1780 if (sparc_cmodel_string != NULL)
1781 {
1782 if (TARGET_ARCH64)
1783 {
1784 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1785 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1786 break;
1787 if (cmodel->name == NULL)
1788 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1789 else
1790 sparc_cmodel = cmodel->value;
1791 }
1792 else
1793 error ("-mcmodel= is not supported on 32-bit systems");
1794 }
1795
1796 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1797 for (i = 8; i < 16; i++)
1798 if (!call_used_regs [i])
1799 {
1800 error ("-fcall-saved-REG is not supported for out registers");
1801 call_used_regs [i] = 1;
1802 }
1803
1804 /* Set the default CPU if no -mcpu option was specified. */
1805 if (!global_options_set.x_sparc_cpu_and_features)
1806 {
1807 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1808 if (def->cpu == TARGET_CPU_DEFAULT)
1809 break;
1810 gcc_assert (def->cpu != -1);
1811 sparc_cpu_and_features = def->processor;
1812 }
1813
1814 /* Set the default CPU if no -mtune option was specified. */
1815 if (!global_options_set.x_sparc_cpu)
1816 sparc_cpu = sparc_cpu_and_features;
1817
1818 cpu = &cpu_table[(int) sparc_cpu_and_features];
1819
1820 if (TARGET_DEBUG_OPTIONS)
1821 {
1822 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1823 dump_target_flags ("cpu->disable", cpu->disable);
1824 dump_target_flags ("cpu->enable", cpu->enable);
1825 }
1826
1827 target_flags &= ~cpu->disable;
1828 target_flags |= (cpu->enable
1829 #ifndef HAVE_AS_FMAF_HPC_VIS3
1830 & ~(MASK_FMAF | MASK_VIS3)
1831 #endif
1832 #ifndef HAVE_AS_SPARC4
1833 & ~MASK_CBCOND
1834 #endif
1835 #ifndef HAVE_AS_SPARC5_VIS4
1836 & ~(MASK_VIS4 | MASK_SUBXC)
1837 #endif
1838 #ifndef HAVE_AS_SPARC6
1839 & ~(MASK_VIS4B)
1840 #endif
1841 #ifndef HAVE_AS_LEON
1842 & ~(MASK_LEON | MASK_LEON3)
1843 #endif
1844 & ~(target_flags_explicit & MASK_FEATURES)
1845 );
1846
1847 /* -mvis2 implies -mvis. */
1848 if (TARGET_VIS2)
1849 target_flags |= MASK_VIS;
1850
1851 /* -mvis3 implies -mvis2 and -mvis. */
1852 if (TARGET_VIS3)
1853 target_flags |= MASK_VIS2 | MASK_VIS;
1854
1855 /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
1856 if (TARGET_VIS4)
1857 target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1858
1859 /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
1860 if (TARGET_VIS4B)
1861 target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1862
1863 /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b, -mfmaf and -mfsmuld if
1864 FPU is disabled. */
1865 if (!TARGET_FPU)
1866 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
1867 | MASK_VIS4B | MASK_FMAF | MASK_FSMULD);
1868
1869 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1870 are available; -m64 also implies v9. */
1871 if (TARGET_VIS || TARGET_ARCH64)
1872 {
1873 target_flags |= MASK_V9;
1874 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1875 }
1876
1877 /* -mvis also implies -mv8plus on 32-bit. */
1878 if (TARGET_VIS && !TARGET_ARCH64)
1879 target_flags |= MASK_V8PLUS;
1880
1881 /* Use the deprecated v8 insns for sparc64 in 32-bit mode. */
1882 if (TARGET_V9 && TARGET_ARCH32)
1883 target_flags |= MASK_DEPRECATED_V8_INSNS;
1884
1885 /* V8PLUS requires V9 and makes no sense in 64-bit mode. */
1886 if (!TARGET_V9 || TARGET_ARCH64)
1887 target_flags &= ~MASK_V8PLUS;
1888
1889 /* Don't use stack biasing in 32-bit mode. */
1890 if (TARGET_ARCH32)
1891 target_flags &= ~MASK_STACK_BIAS;
1892
1893 /* Use LRA instead of reload, unless otherwise instructed. */
1894 if (!(target_flags_explicit & MASK_LRA))
1895 target_flags |= MASK_LRA;
1896
1897 /* Enable applicable errata workarounds for LEON3FT. */
1898 if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
1899 {
1900 sparc_fix_b2bst = 1;
1901 sparc_fix_lost_divsqrt = 1;
1902 }
1903
1904 /* Disable FsMULd for the UT699 since it doesn't work correctly. */
1905 if (sparc_fix_ut699)
1906 target_flags &= ~MASK_FSMULD;
1907
1908 /* Supply a default value for align_functions. */
1909 if (align_functions == 0)
1910 {
1911 if (sparc_cpu == PROCESSOR_ULTRASPARC
1912 || sparc_cpu == PROCESSOR_ULTRASPARC3
1913 || sparc_cpu == PROCESSOR_NIAGARA
1914 || sparc_cpu == PROCESSOR_NIAGARA2
1915 || sparc_cpu == PROCESSOR_NIAGARA3
1916 || sparc_cpu == PROCESSOR_NIAGARA4)
1917 align_functions = 32;
1918 else if (sparc_cpu == PROCESSOR_NIAGARA7
1919 || sparc_cpu == PROCESSOR_M8)
1920 align_functions = 64;
1921 }
1922
1923 /* Validate PCC_STRUCT_RETURN. */
1924 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1925 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1926
1927 /* Only use .uaxword when compiling for a 64-bit target. */
1928 if (!TARGET_ARCH64)
1929 targetm.asm_out.unaligned_op.di = NULL;
1930
1931 /* Do various machine dependent initializations. */
1932 sparc_init_modes ();
1933
1934 /* Set up function hooks. */
1935 init_machine_status = sparc_init_machine_status;
1936
1937 switch (sparc_cpu)
1938 {
1939 case PROCESSOR_V7:
1940 case PROCESSOR_CYPRESS:
1941 sparc_costs = &cypress_costs;
1942 break;
1943 case PROCESSOR_V8:
1944 case PROCESSOR_SPARCLITE:
1945 case PROCESSOR_SUPERSPARC:
1946 sparc_costs = &supersparc_costs;
1947 break;
1948 case PROCESSOR_F930:
1949 case PROCESSOR_F934:
1950 case PROCESSOR_HYPERSPARC:
1951 case PROCESSOR_SPARCLITE86X:
1952 sparc_costs = &hypersparc_costs;
1953 break;
1954 case PROCESSOR_LEON:
1955 sparc_costs = &leon_costs;
1956 break;
1957 case PROCESSOR_LEON3:
1958 case PROCESSOR_LEON3V7:
1959 sparc_costs = &leon3_costs;
1960 break;
1961 case PROCESSOR_SPARCLET:
1962 case PROCESSOR_TSC701:
1963 sparc_costs = &sparclet_costs;
1964 break;
1965 case PROCESSOR_V9:
1966 case PROCESSOR_ULTRASPARC:
1967 sparc_costs = &ultrasparc_costs;
1968 break;
1969 case PROCESSOR_ULTRASPARC3:
1970 sparc_costs = &ultrasparc3_costs;
1971 break;
1972 case PROCESSOR_NIAGARA:
1973 sparc_costs = &niagara_costs;
1974 break;
1975 case PROCESSOR_NIAGARA2:
1976 sparc_costs = &niagara2_costs;
1977 break;
1978 case PROCESSOR_NIAGARA3:
1979 sparc_costs = &niagara3_costs;
1980 break;
1981 case PROCESSOR_NIAGARA4:
1982 sparc_costs = &niagara4_costs;
1983 break;
1984 case PROCESSOR_NIAGARA7:
1985 sparc_costs = &niagara7_costs;
1986 break;
1987 case PROCESSOR_M8:
1988 sparc_costs = &m8_costs;
1989 break;
1990 case PROCESSOR_NATIVE:
1991 gcc_unreachable ();
1992 };
1993
1994 if (sparc_memory_model == SMM_DEFAULT)
1995 {
1996 /* Choose the memory model for the operating system. */
1997 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1998 if (os_default != SMM_DEFAULT)
1999 sparc_memory_model = os_default;
2000 /* Choose the most relaxed model for the processor. */
2001 else if (TARGET_V9)
2002 sparc_memory_model = SMM_RMO;
2003 else if (TARGET_LEON3)
2004 sparc_memory_model = SMM_TSO;
2005 else if (TARGET_LEON)
2006 sparc_memory_model = SMM_SC;
2007 else if (TARGET_V8)
2008 sparc_memory_model = SMM_PSO;
2009 else
2010 sparc_memory_model = SMM_SC;
2011 }
2012
2013 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
2014 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
2015 target_flags |= MASK_LONG_DOUBLE_128;
2016 #endif
2017
2018 if (TARGET_DEBUG_OPTIONS)
2019 dump_target_flags ("Final target_flags", target_flags);
2020
2021 /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
2022 can run at the same time. More important, it is the threshold
2023 defining when additional prefetches will be dropped by the
2024 hardware.
2025
2026 The UltraSPARC-III features a documented prefetch queue with a
2027 size of 8. Additional prefetches issued in the cpu are
2028 dropped.
2029
2030 Niagara processors are different. In these processors prefetches
2031 are handled much like regular loads. The L1 miss buffer is 32
2032 entries, but prefetches start getting affected when 30 entries
2033 become occupied. That occupation could be a mix of regular loads
2034 and prefetches though. And that buffer is shared by all threads.
2035 Once the threshold is reached, if the core is running a single
2036 thread the prefetch will retry. If more than one thread is
2037 running, the prefetch will be dropped.
2038
2039 All this makes it very difficult to determine how many
2040 simultaneous prefetches can be issued simultaneously, even in a
2041 single-threaded program. Experimental results show that setting
2042 this parameter to 32 works well when the number of threads is not
2043 high. */
2044 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
2045 ((sparc_cpu == PROCESSOR_ULTRASPARC
2046 || sparc_cpu == PROCESSOR_NIAGARA
2047 || sparc_cpu == PROCESSOR_NIAGARA2
2048 || sparc_cpu == PROCESSOR_NIAGARA3
2049 || sparc_cpu == PROCESSOR_NIAGARA4)
2050 ? 2
2051 : (sparc_cpu == PROCESSOR_ULTRASPARC3
2052 ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
2053 || sparc_cpu == PROCESSOR_M8)
2054 ? 32 : 3))),
2055 global_options.x_param_values,
2056 global_options_set.x_param_values);
2057
2058 /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
2059 bytes.
2060
2061 The Oracle SPARC Architecture (previously the UltraSPARC
2062 Architecture) specification states that when a PREFETCH[A]
2063 instruction is executed an implementation-specific amount of data
2064 is prefetched, and that it is at least 64 bytes long (aligned to
2065 at least 64 bytes).
2066
2067 However, this is not correct. The M7 (and implementations prior
2068 to that) does not guarantee a 64B prefetch into a cache if the
2069 line size is smaller. A single cache line is all that is ever
2070 prefetched. So for the M7, where the L1D$ has 32B lines and the
2071 L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
2072 L2 and L3, but only 32B are brought into the L1D$. (Assuming it
2073 is a read_n prefetch, which is the only type which allocates to
2074 the L1.) */
2075 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
2076 (sparc_cpu == PROCESSOR_M8
2077 ? 64 : 32),
2078 global_options.x_param_values,
2079 global_options_set.x_param_values);
2080
2081 /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
2082 Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
2083 Niagara processors feature a L1D$ of 16KB. */
2084 maybe_set_param_value (PARAM_L1_CACHE_SIZE,
2085 ((sparc_cpu == PROCESSOR_ULTRASPARC
2086 || sparc_cpu == PROCESSOR_ULTRASPARC3
2087 || sparc_cpu == PROCESSOR_NIAGARA
2088 || sparc_cpu == PROCESSOR_NIAGARA2
2089 || sparc_cpu == PROCESSOR_NIAGARA3
2090 || sparc_cpu == PROCESSOR_NIAGARA4
2091 || sparc_cpu == PROCESSOR_NIAGARA7
2092 || sparc_cpu == PROCESSOR_M8)
2093 ? 16 : 64),
2094 global_options.x_param_values,
2095 global_options_set.x_param_values);
2096
2097
2098 /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
2099 that 512 is the default in params.def. */
2100 maybe_set_param_value (PARAM_L2_CACHE_SIZE,
2101 ((sparc_cpu == PROCESSOR_NIAGARA4
2102 || sparc_cpu == PROCESSOR_M8)
2103 ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
2104 ? 256 : 512)),
2105 global_options.x_param_values,
2106 global_options_set.x_param_values);
2107
2108
2109 /* Disable save slot sharing for call-clobbered registers by default.
2110 The IRA sharing algorithm works on single registers only and this
2111 pessimizes for double floating-point registers. */
2112 if (!global_options_set.x_flag_ira_share_save_slots)
2113 flag_ira_share_save_slots = 0;
2114
2115 /* Only enable REE by default in 64-bit mode where it helps to eliminate
2116 redundant 32-to-64-bit extensions. */
2117 if (!global_options_set.x_flag_ree && TARGET_ARCH32)
2118 flag_ree = 0;
2119 }
2120 \f
2121 /* Miscellaneous utilities. */
2122
2123 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
2124 or branch on register contents instructions. */
2125
2126 int
2127 v9_regcmp_p (enum rtx_code code)
2128 {
2129 return (code == EQ || code == NE || code == GE || code == LT
2130 || code == LE || code == GT);
2131 }
2132
2133 /* Nonzero if OP is a floating point constant which can
2134 be loaded into an integer register using a single
2135 sethi instruction. */
2136
2137 int
2138 fp_sethi_p (rtx op)
2139 {
2140 if (GET_CODE (op) == CONST_DOUBLE)
2141 {
2142 long i;
2143
2144 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2145 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
2146 }
2147
2148 return 0;
2149 }
2150
2151 /* Nonzero if OP is a floating point constant which can
2152 be loaded into an integer register using a single
2153 mov instruction. */
2154
2155 int
2156 fp_mov_p (rtx op)
2157 {
2158 if (GET_CODE (op) == CONST_DOUBLE)
2159 {
2160 long i;
2161
2162 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2163 return SPARC_SIMM13_P (i);
2164 }
2165
2166 return 0;
2167 }
2168
2169 /* Nonzero if OP is a floating point constant which can
2170 be loaded into an integer register using a high/losum
2171 instruction sequence. */
2172
2173 int
2174 fp_high_losum_p (rtx op)
2175 {
2176 /* The constraints calling this should only be in
2177 SFmode move insns, so any constant which cannot
2178 be moved using a single insn will do. */
2179 if (GET_CODE (op) == CONST_DOUBLE)
2180 {
2181 long i;
2182
2183 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2184 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
2185 }
2186
2187 return 0;
2188 }
2189
2190 /* Return true if the address of LABEL can be loaded by means of the
2191 mov{si,di}_pic_label_ref patterns in PIC mode. */
2192
2193 static bool
2194 can_use_mov_pic_label_ref (rtx label)
2195 {
2196 /* VxWorks does not impose a fixed gap between segments; the run-time
2197 gap can be different from the object-file gap. We therefore can't
2198 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
2199 are absolutely sure that X is in the same segment as the GOT.
2200 Unfortunately, the flexibility of linker scripts means that we
2201 can't be sure of that in general, so assume that GOT-relative
2202 accesses are never valid on VxWorks. */
2203 if (TARGET_VXWORKS_RTP)
2204 return false;
2205
2206 /* Similarly, if the label is non-local, it might end up being placed
2207 in a different section than the current one; now mov_pic_label_ref
2208 requires the label and the code to be in the same section. */
2209 if (LABEL_REF_NONLOCAL_P (label))
2210 return false;
2211
2212 /* Finally, if we are reordering basic blocks and partition into hot
2213 and cold sections, this might happen for any label. */
2214 if (flag_reorder_blocks_and_partition)
2215 return false;
2216
2217 return true;
2218 }
2219
2220 /* Expand a move instruction. Return true if all work is done. */
2221
2222 bool
2223 sparc_expand_move (machine_mode mode, rtx *operands)
2224 {
2225 /* Handle sets of MEM first. */
2226 if (GET_CODE (operands[0]) == MEM)
2227 {
2228 /* 0 is a register (or a pair of registers) on SPARC. */
2229 if (register_or_zero_operand (operands[1], mode))
2230 return false;
2231
2232 if (!reload_in_progress)
2233 {
2234 operands[0] = validize_mem (operands[0]);
2235 operands[1] = force_reg (mode, operands[1]);
2236 }
2237 }
2238
2239 /* Fix up TLS cases. */
2240 if (TARGET_HAVE_TLS
2241 && CONSTANT_P (operands[1])
2242 && sparc_tls_referenced_p (operands [1]))
2243 {
2244 operands[1] = sparc_legitimize_tls_address (operands[1]);
2245 return false;
2246 }
2247
2248 /* Fix up PIC cases. */
2249 if (flag_pic && CONSTANT_P (operands[1]))
2250 {
2251 if (pic_address_needs_scratch (operands[1]))
2252 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
2253
2254 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
2255 if ((GET_CODE (operands[1]) == LABEL_REF
2256 && can_use_mov_pic_label_ref (operands[1]))
2257 || (GET_CODE (operands[1]) == CONST
2258 && GET_CODE (XEXP (operands[1], 0)) == PLUS
2259 && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
2260 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT
2261 && can_use_mov_pic_label_ref (XEXP (XEXP (operands[1], 0), 0))))
2262 {
2263 if (mode == SImode)
2264 {
2265 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
2266 return true;
2267 }
2268
2269 if (mode == DImode)
2270 {
2271 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
2272 return true;
2273 }
2274 }
2275
2276 if (symbolic_operand (operands[1], mode))
2277 {
2278 operands[1]
2279 = sparc_legitimize_pic_address (operands[1],
2280 reload_in_progress
2281 ? operands[0] : NULL_RTX);
2282 return false;
2283 }
2284 }
2285
2286 /* If we are trying to toss an integer constant into FP registers,
2287 or loading a FP or vector constant, force it into memory. */
2288 if (CONSTANT_P (operands[1])
2289 && REG_P (operands[0])
2290 && (SPARC_FP_REG_P (REGNO (operands[0]))
2291 || SCALAR_FLOAT_MODE_P (mode)
2292 || VECTOR_MODE_P (mode)))
2293 {
2294 /* emit_group_store will send such bogosity to us when it is
2295 not storing directly into memory. So fix this up to avoid
2296 crashes in output_constant_pool. */
2297 if (operands [1] == const0_rtx)
2298 operands[1] = CONST0_RTX (mode);
2299
2300 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
2301 always other regs. */
2302 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
2303 && (const_zero_operand (operands[1], mode)
2304 || const_all_ones_operand (operands[1], mode)))
2305 return false;
2306
2307 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
2308 /* We are able to build any SF constant in integer registers
2309 with at most 2 instructions. */
2310 && (mode == SFmode
2311 /* And any DF constant in integer registers if needed. */
2312 || (mode == DFmode && !can_create_pseudo_p ())))
2313 return false;
2314
2315 operands[1] = force_const_mem (mode, operands[1]);
2316 if (!reload_in_progress)
2317 operands[1] = validize_mem (operands[1]);
2318 return false;
2319 }
2320
2321 /* Accept non-constants and valid constants unmodified. */
2322 if (!CONSTANT_P (operands[1])
2323 || GET_CODE (operands[1]) == HIGH
2324 || input_operand (operands[1], mode))
2325 return false;
2326
2327 switch (mode)
2328 {
2329 case E_QImode:
2330 /* All QImode constants require only one insn, so proceed. */
2331 break;
2332
2333 case E_HImode:
2334 case E_SImode:
2335 sparc_emit_set_const32 (operands[0], operands[1]);
2336 return true;
2337
2338 case E_DImode:
2339 /* input_operand should have filtered out 32-bit mode. */
2340 sparc_emit_set_const64 (operands[0], operands[1]);
2341 return true;
2342
2343 case E_TImode:
2344 {
2345 rtx high, low;
2346 /* TImode isn't available in 32-bit mode. */
2347 split_double (operands[1], &high, &low);
2348 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
2349 high));
2350 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
2351 low));
2352 }
2353 return true;
2354
2355 default:
2356 gcc_unreachable ();
2357 }
2358
2359 return false;
2360 }
2361
2362 /* Load OP1, a 32-bit constant, into OP0, a register.
2363 We know it can't be done in one insn when we get
2364 here, the move expander guarantees this. */
2365
2366 static void
2367 sparc_emit_set_const32 (rtx op0, rtx op1)
2368 {
2369 machine_mode mode = GET_MODE (op0);
2370 rtx temp = op0;
2371
2372 if (can_create_pseudo_p ())
2373 temp = gen_reg_rtx (mode);
2374
2375 if (GET_CODE (op1) == CONST_INT)
2376 {
2377 gcc_assert (!small_int_operand (op1, mode)
2378 && !const_high_operand (op1, mode));
2379
2380 /* Emit them as real moves instead of a HIGH/LO_SUM,
2381 this way CSE can see everything and reuse intermediate
2382 values if it wants. */
2383 emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
2384 & ~(HOST_WIDE_INT) 0x3ff)));
2385
2386 emit_insn (gen_rtx_SET (op0,
2387 gen_rtx_IOR (mode, temp,
2388 GEN_INT (INTVAL (op1) & 0x3ff))));
2389 }
2390 else
2391 {
2392 /* A symbol, emit in the traditional way. */
2393 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
2394 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
2395 }
2396 }
2397
2398 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
2399 If TEMP is nonzero, we are forbidden to use any other scratch
2400 registers. Otherwise, we are allowed to generate them as needed.
2401
2402 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
2403 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
2404
2405 void
2406 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
2407 {
2408 rtx cst, temp1, temp2, temp3, temp4, temp5;
2409 rtx ti_temp = 0;
2410
2411 /* Deal with too large offsets. */
2412 if (GET_CODE (op1) == CONST
2413 && GET_CODE (XEXP (op1, 0)) == PLUS
2414 && CONST_INT_P (cst = XEXP (XEXP (op1, 0), 1))
2415 && trunc_int_for_mode (INTVAL (cst), SImode) != INTVAL (cst))
2416 {
2417 gcc_assert (!temp);
2418 temp1 = gen_reg_rtx (DImode);
2419 temp2 = gen_reg_rtx (DImode);
2420 sparc_emit_set_const64 (temp2, cst);
2421 sparc_emit_set_symbolic_const64 (temp1, XEXP (XEXP (op1, 0), 0),
2422 NULL_RTX);
2423 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp1, temp2)));
2424 return;
2425 }
2426
2427 if (temp && GET_MODE (temp) == TImode)
2428 {
2429 ti_temp = temp;
2430 temp = gen_rtx_REG (DImode, REGNO (temp));
2431 }
2432
2433 /* SPARC-V9 code-model support. */
2434 switch (sparc_cmodel)
2435 {
2436 case CM_MEDLOW:
2437 /* The range spanned by all instructions in the object is less
2438 than 2^31 bytes (2GB) and the distance from any instruction
2439 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2440 than 2^31 bytes (2GB).
2441
2442 The executable must be in the low 4TB of the virtual address
2443 space.
2444
2445 sethi %hi(symbol), %temp1
2446 or %temp1, %lo(symbol), %reg */
2447 if (temp)
2448 temp1 = temp; /* op0 is allowed. */
2449 else
2450 temp1 = gen_reg_rtx (DImode);
2451
2452 emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
2453 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
2454 break;
2455
2456 case CM_MEDMID:
2457 /* The range spanned by all instructions in the object is less
2458 than 2^31 bytes (2GB) and the distance from any instruction
2459 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2460 than 2^31 bytes (2GB).
2461
2462 The executable must be in the low 16TB of the virtual address
2463 space.
2464
2465 sethi %h44(symbol), %temp1
2466 or %temp1, %m44(symbol), %temp2
2467 sllx %temp2, 12, %temp3
2468 or %temp3, %l44(symbol), %reg */
2469 if (temp)
2470 {
2471 temp1 = op0;
2472 temp2 = op0;
2473 temp3 = temp; /* op0 is allowed. */
2474 }
2475 else
2476 {
2477 temp1 = gen_reg_rtx (DImode);
2478 temp2 = gen_reg_rtx (DImode);
2479 temp3 = gen_reg_rtx (DImode);
2480 }
2481
2482 emit_insn (gen_seth44 (temp1, op1));
2483 emit_insn (gen_setm44 (temp2, temp1, op1));
2484 emit_insn (gen_rtx_SET (temp3,
2485 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
2486 emit_insn (gen_setl44 (op0, temp3, op1));
2487 break;
2488
2489 case CM_MEDANY:
2490 /* The range spanned by all instructions in the object is less
2491 than 2^31 bytes (2GB) and the distance from any instruction
2492 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2493 than 2^31 bytes (2GB).
2494
2495 The executable can be placed anywhere in the virtual address
2496 space.
2497
2498 sethi %hh(symbol), %temp1
2499 sethi %lm(symbol), %temp2
2500 or %temp1, %hm(symbol), %temp3
2501 sllx %temp3, 32, %temp4
2502 or %temp4, %temp2, %temp5
2503 or %temp5, %lo(symbol), %reg */
2504 if (temp)
2505 {
2506 /* It is possible that one of the registers we got for operands[2]
2507 might coincide with that of operands[0] (which is why we made
2508 it TImode). Pick the other one to use as our scratch. */
2509 if (rtx_equal_p (temp, op0))
2510 {
2511 gcc_assert (ti_temp);
2512 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2513 }
2514 temp1 = op0;
2515 temp2 = temp; /* op0 is _not_ allowed, see above. */
2516 temp3 = op0;
2517 temp4 = op0;
2518 temp5 = op0;
2519 }
2520 else
2521 {
2522 temp1 = gen_reg_rtx (DImode);
2523 temp2 = gen_reg_rtx (DImode);
2524 temp3 = gen_reg_rtx (DImode);
2525 temp4 = gen_reg_rtx (DImode);
2526 temp5 = gen_reg_rtx (DImode);
2527 }
2528
2529 emit_insn (gen_sethh (temp1, op1));
2530 emit_insn (gen_setlm (temp2, op1));
2531 emit_insn (gen_sethm (temp3, temp1, op1));
2532 emit_insn (gen_rtx_SET (temp4,
2533 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2534 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2535 emit_insn (gen_setlo (op0, temp5, op1));
2536 break;
2537
2538 case CM_EMBMEDANY:
2539 /* Old old old backwards compatibility kruft here.
2540 Essentially it is MEDLOW with a fixed 64-bit
2541 virtual base added to all data segment addresses.
2542 Text-segment stuff is computed like MEDANY, we can't
2543 reuse the code above because the relocation knobs
2544 look different.
2545
2546 Data segment: sethi %hi(symbol), %temp1
2547 add %temp1, EMBMEDANY_BASE_REG, %temp2
2548 or %temp2, %lo(symbol), %reg */
2549 if (data_segment_operand (op1, GET_MODE (op1)))
2550 {
2551 if (temp)
2552 {
2553 temp1 = temp; /* op0 is allowed. */
2554 temp2 = op0;
2555 }
2556 else
2557 {
2558 temp1 = gen_reg_rtx (DImode);
2559 temp2 = gen_reg_rtx (DImode);
2560 }
2561
2562 emit_insn (gen_embmedany_sethi (temp1, op1));
2563 emit_insn (gen_embmedany_brsum (temp2, temp1));
2564 emit_insn (gen_embmedany_losum (op0, temp2, op1));
2565 }
2566
2567 /* Text segment: sethi %uhi(symbol), %temp1
2568 sethi %hi(symbol), %temp2
2569 or %temp1, %ulo(symbol), %temp3
2570 sllx %temp3, 32, %temp4
2571 or %temp4, %temp2, %temp5
2572 or %temp5, %lo(symbol), %reg */
2573 else
2574 {
2575 if (temp)
2576 {
2577 /* It is possible that one of the registers we got for operands[2]
2578 might coincide with that of operands[0] (which is why we made
2579 it TImode). Pick the other one to use as our scratch. */
2580 if (rtx_equal_p (temp, op0))
2581 {
2582 gcc_assert (ti_temp);
2583 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2584 }
2585 temp1 = op0;
2586 temp2 = temp; /* op0 is _not_ allowed, see above. */
2587 temp3 = op0;
2588 temp4 = op0;
2589 temp5 = op0;
2590 }
2591 else
2592 {
2593 temp1 = gen_reg_rtx (DImode);
2594 temp2 = gen_reg_rtx (DImode);
2595 temp3 = gen_reg_rtx (DImode);
2596 temp4 = gen_reg_rtx (DImode);
2597 temp5 = gen_reg_rtx (DImode);
2598 }
2599
2600 emit_insn (gen_embmedany_textuhi (temp1, op1));
2601 emit_insn (gen_embmedany_texthi (temp2, op1));
2602 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
2603 emit_insn (gen_rtx_SET (temp4,
2604 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2605 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2606 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
2607 }
2608 break;
2609
2610 default:
2611 gcc_unreachable ();
2612 }
2613 }
2614
2615 /* These avoid problems when cross compiling. If we do not
2616 go through all this hair then the optimizer will see
2617 invalid REG_EQUAL notes or in some cases none at all. */
2618 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2619 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2620 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2621 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2622
2623 /* The optimizer is not to assume anything about exactly
2624 which bits are set for a HIGH, they are unspecified.
2625 Unfortunately this leads to many missed optimizations
2626 during CSE. We mask out the non-HIGH bits, and matches
2627 a plain movdi, to alleviate this problem. */
2628 static rtx
2629 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2630 {
2631 return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2632 }
2633
2634 static rtx
2635 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2636 {
2637 return gen_rtx_SET (dest, GEN_INT (val));
2638 }
2639
2640 static rtx
2641 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2642 {
2643 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2644 }
2645
2646 static rtx
2647 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2648 {
2649 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2650 }
2651
2652 /* Worker routines for 64-bit constant formation on arch64.
2653 One of the key things to be doing in these emissions is
2654 to create as many temp REGs as possible. This makes it
2655 possible for half-built constants to be used later when
2656 such values are similar to something required later on.
2657 Without doing this, the optimizer cannot see such
2658 opportunities. */
2659
2660 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2661 unsigned HOST_WIDE_INT, int);
2662
2663 static void
2664 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2665 unsigned HOST_WIDE_INT low_bits, int is_neg)
2666 {
2667 unsigned HOST_WIDE_INT high_bits;
2668
2669 if (is_neg)
2670 high_bits = (~low_bits) & 0xffffffff;
2671 else
2672 high_bits = low_bits;
2673
2674 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2675 if (!is_neg)
2676 {
2677 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2678 }
2679 else
2680 {
2681 /* If we are XOR'ing with -1, then we should emit a one's complement
2682 instead. This way the combiner will notice logical operations
2683 such as ANDN later on and substitute. */
2684 if ((low_bits & 0x3ff) == 0x3ff)
2685 {
2686 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2687 }
2688 else
2689 {
2690 emit_insn (gen_rtx_SET (op0,
2691 gen_safe_XOR64 (temp,
2692 (-(HOST_WIDE_INT)0x400
2693 | (low_bits & 0x3ff)))));
2694 }
2695 }
2696 }
2697
2698 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2699 unsigned HOST_WIDE_INT, int);
2700
2701 static void
2702 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2703 unsigned HOST_WIDE_INT high_bits,
2704 unsigned HOST_WIDE_INT low_immediate,
2705 int shift_count)
2706 {
2707 rtx temp2 = op0;
2708
2709 if ((high_bits & 0xfffffc00) != 0)
2710 {
2711 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2712 if ((high_bits & ~0xfffffc00) != 0)
2713 emit_insn (gen_rtx_SET (op0,
2714 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2715 else
2716 temp2 = temp;
2717 }
2718 else
2719 {
2720 emit_insn (gen_safe_SET64 (temp, high_bits));
2721 temp2 = temp;
2722 }
2723
2724 /* Now shift it up into place. */
2725 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
2726 GEN_INT (shift_count))));
2727
2728 /* If there is a low immediate part piece, finish up by
2729 putting that in as well. */
2730 if (low_immediate != 0)
2731 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
2732 }
2733
2734 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2735 unsigned HOST_WIDE_INT);
2736
2737 /* Full 64-bit constant decomposition. Even though this is the
2738 'worst' case, we still optimize a few things away. */
2739 static void
2740 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2741 unsigned HOST_WIDE_INT high_bits,
2742 unsigned HOST_WIDE_INT low_bits)
2743 {
2744 rtx sub_temp = op0;
2745
2746 if (can_create_pseudo_p ())
2747 sub_temp = gen_reg_rtx (DImode);
2748
2749 if ((high_bits & 0xfffffc00) != 0)
2750 {
2751 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2752 if ((high_bits & ~0xfffffc00) != 0)
2753 emit_insn (gen_rtx_SET (sub_temp,
2754 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2755 else
2756 sub_temp = temp;
2757 }
2758 else
2759 {
2760 emit_insn (gen_safe_SET64 (temp, high_bits));
2761 sub_temp = temp;
2762 }
2763
2764 if (can_create_pseudo_p ())
2765 {
2766 rtx temp2 = gen_reg_rtx (DImode);
2767 rtx temp3 = gen_reg_rtx (DImode);
2768 rtx temp4 = gen_reg_rtx (DImode);
2769
2770 emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
2771 GEN_INT (32))));
2772
2773 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2774 if ((low_bits & ~0xfffffc00) != 0)
2775 {
2776 emit_insn (gen_rtx_SET (temp3,
2777 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2778 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
2779 }
2780 else
2781 {
2782 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
2783 }
2784 }
2785 else
2786 {
2787 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2788 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2789 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2790 int to_shift = 12;
2791
2792 /* We are in the middle of reload, so this is really
2793 painful. However we do still make an attempt to
2794 avoid emitting truly stupid code. */
2795 if (low1 != const0_rtx)
2796 {
2797 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2798 GEN_INT (to_shift))));
2799 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
2800 sub_temp = op0;
2801 to_shift = 12;
2802 }
2803 else
2804 {
2805 to_shift += 12;
2806 }
2807 if (low2 != const0_rtx)
2808 {
2809 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2810 GEN_INT (to_shift))));
2811 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
2812 sub_temp = op0;
2813 to_shift = 8;
2814 }
2815 else
2816 {
2817 to_shift += 8;
2818 }
2819 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2820 GEN_INT (to_shift))));
2821 if (low3 != const0_rtx)
2822 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
2823 /* phew... */
2824 }
2825 }
2826
2827 /* Analyze a 64-bit constant for certain properties. */
2828 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2829 unsigned HOST_WIDE_INT,
2830 int *, int *, int *);
2831
2832 static void
2833 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2834 unsigned HOST_WIDE_INT low_bits,
2835 int *hbsp, int *lbsp, int *abbasp)
2836 {
2837 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2838 int i;
2839
2840 lowest_bit_set = highest_bit_set = -1;
2841 i = 0;
2842 do
2843 {
2844 if ((lowest_bit_set == -1)
2845 && ((low_bits >> i) & 1))
2846 lowest_bit_set = i;
2847 if ((highest_bit_set == -1)
2848 && ((high_bits >> (32 - i - 1)) & 1))
2849 highest_bit_set = (64 - i - 1);
2850 }
2851 while (++i < 32
2852 && ((highest_bit_set == -1)
2853 || (lowest_bit_set == -1)));
2854 if (i == 32)
2855 {
2856 i = 0;
2857 do
2858 {
2859 if ((lowest_bit_set == -1)
2860 && ((high_bits >> i) & 1))
2861 lowest_bit_set = i + 32;
2862 if ((highest_bit_set == -1)
2863 && ((low_bits >> (32 - i - 1)) & 1))
2864 highest_bit_set = 32 - i - 1;
2865 }
2866 while (++i < 32
2867 && ((highest_bit_set == -1)
2868 || (lowest_bit_set == -1)));
2869 }
2870 /* If there are no bits set this should have gone out
2871 as one instruction! */
2872 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2873 all_bits_between_are_set = 1;
2874 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2875 {
2876 if (i < 32)
2877 {
2878 if ((low_bits & (1 << i)) != 0)
2879 continue;
2880 }
2881 else
2882 {
2883 if ((high_bits & (1 << (i - 32))) != 0)
2884 continue;
2885 }
2886 all_bits_between_are_set = 0;
2887 break;
2888 }
2889 *hbsp = highest_bit_set;
2890 *lbsp = lowest_bit_set;
2891 *abbasp = all_bits_between_are_set;
2892 }
2893
2894 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2895
2896 static int
2897 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2898 unsigned HOST_WIDE_INT low_bits)
2899 {
2900 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2901
2902 if (high_bits == 0
2903 || high_bits == 0xffffffff)
2904 return 1;
2905
2906 analyze_64bit_constant (high_bits, low_bits,
2907 &highest_bit_set, &lowest_bit_set,
2908 &all_bits_between_are_set);
2909
2910 if ((highest_bit_set == 63
2911 || lowest_bit_set == 0)
2912 && all_bits_between_are_set != 0)
2913 return 1;
2914
2915 if ((highest_bit_set - lowest_bit_set) < 21)
2916 return 1;
2917
2918 return 0;
2919 }
2920
2921 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2922 unsigned HOST_WIDE_INT,
2923 int, int);
2924
2925 static unsigned HOST_WIDE_INT
2926 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2927 unsigned HOST_WIDE_INT low_bits,
2928 int lowest_bit_set, int shift)
2929 {
2930 HOST_WIDE_INT hi, lo;
2931
2932 if (lowest_bit_set < 32)
2933 {
2934 lo = (low_bits >> lowest_bit_set) << shift;
2935 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2936 }
2937 else
2938 {
2939 lo = 0;
2940 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2941 }
2942 gcc_assert (! (hi & lo));
2943 return (hi | lo);
2944 }
2945
2946 /* Here we are sure to be arch64 and this is an integer constant
2947 being loaded into a register. Emit the most efficient
2948 insn sequence possible. Detection of all the 1-insn cases
2949 has been done already. */
2950 static void
2951 sparc_emit_set_const64 (rtx op0, rtx op1)
2952 {
2953 unsigned HOST_WIDE_INT high_bits, low_bits;
2954 int lowest_bit_set, highest_bit_set;
2955 int all_bits_between_are_set;
2956 rtx temp = 0;
2957
2958 /* Sanity check that we know what we are working with. */
2959 gcc_assert (TARGET_ARCH64
2960 && (GET_CODE (op0) == SUBREG
2961 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2962
2963 if (! can_create_pseudo_p ())
2964 temp = op0;
2965
2966 if (GET_CODE (op1) != CONST_INT)
2967 {
2968 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2969 return;
2970 }
2971
2972 if (! temp)
2973 temp = gen_reg_rtx (DImode);
2974
2975 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2976 low_bits = (INTVAL (op1) & 0xffffffff);
2977
2978 /* low_bits bits 0 --> 31
2979 high_bits bits 32 --> 63 */
2980
2981 analyze_64bit_constant (high_bits, low_bits,
2982 &highest_bit_set, &lowest_bit_set,
2983 &all_bits_between_are_set);
2984
2985 /* First try for a 2-insn sequence. */
2986
2987 /* These situations are preferred because the optimizer can
2988 * do more things with them:
2989 * 1) mov -1, %reg
2990 * sllx %reg, shift, %reg
2991 * 2) mov -1, %reg
2992 * srlx %reg, shift, %reg
2993 * 3) mov some_small_const, %reg
2994 * sllx %reg, shift, %reg
2995 */
2996 if (((highest_bit_set == 63
2997 || lowest_bit_set == 0)
2998 && all_bits_between_are_set != 0)
2999 || ((highest_bit_set - lowest_bit_set) < 12))
3000 {
3001 HOST_WIDE_INT the_const = -1;
3002 int shift = lowest_bit_set;
3003
3004 if ((highest_bit_set != 63
3005 && lowest_bit_set != 0)
3006 || all_bits_between_are_set == 0)
3007 {
3008 the_const =
3009 create_simple_focus_bits (high_bits, low_bits,
3010 lowest_bit_set, 0);
3011 }
3012 else if (lowest_bit_set == 0)
3013 shift = -(63 - highest_bit_set);
3014
3015 gcc_assert (SPARC_SIMM13_P (the_const));
3016 gcc_assert (shift != 0);
3017
3018 emit_insn (gen_safe_SET64 (temp, the_const));
3019 if (shift > 0)
3020 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
3021 GEN_INT (shift))));
3022 else if (shift < 0)
3023 emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
3024 GEN_INT (-shift))));
3025 return;
3026 }
3027
3028 /* Now a range of 22 or less bits set somewhere.
3029 * 1) sethi %hi(focus_bits), %reg
3030 * sllx %reg, shift, %reg
3031 * 2) sethi %hi(focus_bits), %reg
3032 * srlx %reg, shift, %reg
3033 */
3034 if ((highest_bit_set - lowest_bit_set) < 21)
3035 {
3036 unsigned HOST_WIDE_INT focus_bits =
3037 create_simple_focus_bits (high_bits, low_bits,
3038 lowest_bit_set, 10);
3039
3040 gcc_assert (SPARC_SETHI_P (focus_bits));
3041 gcc_assert (lowest_bit_set != 10);
3042
3043 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
3044
3045 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
3046 if (lowest_bit_set < 10)
3047 emit_insn (gen_rtx_SET (op0,
3048 gen_rtx_LSHIFTRT (DImode, temp,
3049 GEN_INT (10 - lowest_bit_set))));
3050 else if (lowest_bit_set > 10)
3051 emit_insn (gen_rtx_SET (op0,
3052 gen_rtx_ASHIFT (DImode, temp,
3053 GEN_INT (lowest_bit_set - 10))));
3054 return;
3055 }
3056
3057 /* 1) sethi %hi(low_bits), %reg
3058 * or %reg, %lo(low_bits), %reg
3059 * 2) sethi %hi(~low_bits), %reg
3060 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
3061 */
3062 if (high_bits == 0
3063 || high_bits == 0xffffffff)
3064 {
3065 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
3066 (high_bits == 0xffffffff));
3067 return;
3068 }
3069
3070 /* Now, try 3-insn sequences. */
3071
3072 /* 1) sethi %hi(high_bits), %reg
3073 * or %reg, %lo(high_bits), %reg
3074 * sllx %reg, 32, %reg
3075 */
3076 if (low_bits == 0)
3077 {
3078 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
3079 return;
3080 }
3081
3082 /* We may be able to do something quick
3083 when the constant is negated, so try that. */
3084 if (const64_is_2insns ((~high_bits) & 0xffffffff,
3085 (~low_bits) & 0xfffffc00))
3086 {
3087 /* NOTE: The trailing bits get XOR'd so we need the
3088 non-negated bits, not the negated ones. */
3089 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
3090
3091 if ((((~high_bits) & 0xffffffff) == 0
3092 && ((~low_bits) & 0x80000000) == 0)
3093 || (((~high_bits) & 0xffffffff) == 0xffffffff
3094 && ((~low_bits) & 0x80000000) != 0))
3095 {
3096 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
3097
3098 if ((SPARC_SETHI_P (fast_int)
3099 && (~high_bits & 0xffffffff) == 0)
3100 || SPARC_SIMM13_P (fast_int))
3101 emit_insn (gen_safe_SET64 (temp, fast_int));
3102 else
3103 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
3104 }
3105 else
3106 {
3107 rtx negated_const;
3108 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
3109 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
3110 sparc_emit_set_const64 (temp, negated_const);
3111 }
3112
3113 /* If we are XOR'ing with -1, then we should emit a one's complement
3114 instead. This way the combiner will notice logical operations
3115 such as ANDN later on and substitute. */
3116 if (trailing_bits == 0x3ff)
3117 {
3118 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
3119 }
3120 else
3121 {
3122 emit_insn (gen_rtx_SET (op0,
3123 gen_safe_XOR64 (temp,
3124 (-0x400 | trailing_bits))));
3125 }
3126 return;
3127 }
3128
3129 /* 1) sethi %hi(xxx), %reg
3130 * or %reg, %lo(xxx), %reg
3131 * sllx %reg, yyy, %reg
3132 *
3133 * ??? This is just a generalized version of the low_bits==0
3134 * thing above, FIXME...
3135 */
3136 if ((highest_bit_set - lowest_bit_set) < 32)
3137 {
3138 unsigned HOST_WIDE_INT focus_bits =
3139 create_simple_focus_bits (high_bits, low_bits,
3140 lowest_bit_set, 0);
3141
3142 /* We can't get here in this state. */
3143 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
3144
3145 /* So what we know is that the set bits straddle the
3146 middle of the 64-bit word. */
3147 sparc_emit_set_const64_quick2 (op0, temp,
3148 focus_bits, 0,
3149 lowest_bit_set);
3150 return;
3151 }
3152
3153 /* 1) sethi %hi(high_bits), %reg
3154 * or %reg, %lo(high_bits), %reg
3155 * sllx %reg, 32, %reg
3156 * or %reg, low_bits, %reg
3157 */
3158 if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
3159 {
3160 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
3161 return;
3162 }
3163
3164 /* The easiest way when all else fails, is full decomposition. */
3165 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
3166 }
3167
3168 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
3169
3170 static bool
3171 sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
3172 {
3173 *p1 = SPARC_ICC_REG;
3174 *p2 = SPARC_FCC_REG;
3175 return true;
3176 }
3177
3178 /* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
3179
3180 static unsigned int
3181 sparc_min_arithmetic_precision (void)
3182 {
3183 return 32;
3184 }
3185
3186 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
3187 return the mode to be used for the comparison. For floating-point,
3188 CCFP[E]mode is used. CCNZmode should be used when the first operand
3189 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
3190 processing is needed. */
3191
3192 machine_mode
3193 select_cc_mode (enum rtx_code op, rtx x, rtx y)
3194 {
3195 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3196 {
3197 switch (op)
3198 {
3199 case EQ:
3200 case NE:
3201 case UNORDERED:
3202 case ORDERED:
3203 case UNLT:
3204 case UNLE:
3205 case UNGT:
3206 case UNGE:
3207 case UNEQ:
3208 case LTGT:
3209 return CCFPmode;
3210
3211 case LT:
3212 case LE:
3213 case GT:
3214 case GE:
3215 return CCFPEmode;
3216
3217 default:
3218 gcc_unreachable ();
3219 }
3220 }
3221 else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
3222 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
3223 && y == const0_rtx)
3224 {
3225 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3226 return CCXNZmode;
3227 else
3228 return CCNZmode;
3229 }
3230 else
3231 {
3232 /* This is for the cmp<mode>_sne pattern. */
3233 if (GET_CODE (x) == NOT && y == constm1_rtx)
3234 {
3235 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3236 return CCXCmode;
3237 else
3238 return CCCmode;
3239 }
3240
3241 /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
3242 if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
3243 {
3244 if (GET_CODE (y) == UNSPEC
3245 && (XINT (y, 1) == UNSPEC_ADDV
3246 || XINT (y, 1) == UNSPEC_SUBV
3247 || XINT (y, 1) == UNSPEC_NEGV))
3248 return CCVmode;
3249 else
3250 return CCCmode;
3251 }
3252
3253 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3254 return CCXmode;
3255 else
3256 return CCmode;
3257 }
3258 }
3259
3260 /* Emit the compare insn and return the CC reg for a CODE comparison
3261 with operands X and Y. */
3262
3263 static rtx
3264 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
3265 {
3266 machine_mode mode;
3267 rtx cc_reg;
3268
3269 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
3270 return x;
3271
3272 mode = SELECT_CC_MODE (code, x, y);
3273
3274 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
3275 fcc regs (cse can't tell they're really call clobbered regs and will
3276 remove a duplicate comparison even if there is an intervening function
3277 call - it will then try to reload the cc reg via an int reg which is why
3278 we need the movcc patterns). It is possible to provide the movcc
3279 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
3280 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
3281 to tell cse that CCFPE mode registers (even pseudos) are call
3282 clobbered. */
3283
3284 /* ??? This is an experiment. Rather than making changes to cse which may
3285 or may not be easy/clean, we do our own cse. This is possible because
3286 we will generate hard registers. Cse knows they're call clobbered (it
3287 doesn't know the same thing about pseudos). If we guess wrong, no big
3288 deal, but if we win, great! */
3289
3290 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3291 #if 1 /* experiment */
3292 {
3293 int reg;
3294 /* We cycle through the registers to ensure they're all exercised. */
3295 static int next_fcc_reg = 0;
3296 /* Previous x,y for each fcc reg. */
3297 static rtx prev_args[4][2];
3298
3299 /* Scan prev_args for x,y. */
3300 for (reg = 0; reg < 4; reg++)
3301 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
3302 break;
3303 if (reg == 4)
3304 {
3305 reg = next_fcc_reg;
3306 prev_args[reg][0] = x;
3307 prev_args[reg][1] = y;
3308 next_fcc_reg = (next_fcc_reg + 1) & 3;
3309 }
3310 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
3311 }
3312 #else
3313 cc_reg = gen_reg_rtx (mode);
3314 #endif /* ! experiment */
3315 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3316 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
3317 else
3318 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
3319
3320 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
3321 will only result in an unrecognizable insn so no point in asserting. */
3322 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
3323
3324 return cc_reg;
3325 }
3326
3327
3328 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
3329
3330 rtx
3331 gen_compare_reg (rtx cmp)
3332 {
3333 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
3334 }
3335
3336 /* This function is used for v9 only.
3337 DEST is the target of the Scc insn.
3338 CODE is the code for an Scc's comparison.
3339 X and Y are the values we compare.
3340
3341 This function is needed to turn
3342
3343 (set (reg:SI 110)
3344 (gt (reg:CCX 100 %icc)
3345 (const_int 0)))
3346 into
3347 (set (reg:SI 110)
3348 (gt:DI (reg:CCX 100 %icc)
3349 (const_int 0)))
3350
3351 IE: The instruction recognizer needs to see the mode of the comparison to
3352 find the right instruction. We could use "gt:DI" right in the
3353 define_expand, but leaving it out allows us to handle DI, SI, etc. */
3354
3355 static int
3356 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
3357 {
3358 if (! TARGET_ARCH64
3359 && (GET_MODE (x) == DImode
3360 || GET_MODE (dest) == DImode))
3361 return 0;
3362
3363 /* Try to use the movrCC insns. */
3364 if (TARGET_ARCH64
3365 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3366 && y == const0_rtx
3367 && v9_regcmp_p (compare_code))
3368 {
3369 rtx op0 = x;
3370 rtx temp;
3371
3372 /* Special case for op0 != 0. This can be done with one instruction if
3373 dest == x. */
3374
3375 if (compare_code == NE
3376 && GET_MODE (dest) == DImode
3377 && rtx_equal_p (op0, dest))
3378 {
3379 emit_insn (gen_rtx_SET (dest,
3380 gen_rtx_IF_THEN_ELSE (DImode,
3381 gen_rtx_fmt_ee (compare_code, DImode,
3382 op0, const0_rtx),
3383 const1_rtx,
3384 dest)));
3385 return 1;
3386 }
3387
3388 if (reg_overlap_mentioned_p (dest, op0))
3389 {
3390 /* Handle the case where dest == x.
3391 We "early clobber" the result. */
3392 op0 = gen_reg_rtx (GET_MODE (x));
3393 emit_move_insn (op0, x);
3394 }
3395
3396 emit_insn (gen_rtx_SET (dest, const0_rtx));
3397 if (GET_MODE (op0) != DImode)
3398 {
3399 temp = gen_reg_rtx (DImode);
3400 convert_move (temp, op0, 0);
3401 }
3402 else
3403 temp = op0;
3404 emit_insn (gen_rtx_SET (dest,
3405 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3406 gen_rtx_fmt_ee (compare_code, DImode,
3407 temp, const0_rtx),
3408 const1_rtx,
3409 dest)));
3410 return 1;
3411 }
3412 else
3413 {
3414 x = gen_compare_reg_1 (compare_code, x, y);
3415 y = const0_rtx;
3416
3417 emit_insn (gen_rtx_SET (dest, const0_rtx));
3418 emit_insn (gen_rtx_SET (dest,
3419 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3420 gen_rtx_fmt_ee (compare_code,
3421 GET_MODE (x), x, y),
3422 const1_rtx, dest)));
3423 return 1;
3424 }
3425 }
3426
3427
3428 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
3429 without jumps using the addx/subx instructions. */
3430
3431 bool
3432 emit_scc_insn (rtx operands[])
3433 {
3434 rtx tem, x, y;
3435 enum rtx_code code;
3436 machine_mode mode;
3437
3438 /* The quad-word fp compare library routines all return nonzero to indicate
3439 true, which is different from the equivalent libgcc routines, so we must
3440 handle them specially here. */
3441 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
3442 {
3443 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
3444 GET_CODE (operands[1]));
3445 operands[2] = XEXP (operands[1], 0);
3446 operands[3] = XEXP (operands[1], 1);
3447 }
3448
3449 code = GET_CODE (operands[1]);
3450 x = operands[2];
3451 y = operands[3];
3452 mode = GET_MODE (x);
3453
3454 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
3455 more applications). The exception to this is "reg != 0" which can
3456 be done in one instruction on v9 (so we do it). */
3457 if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
3458 {
3459 if (y != const0_rtx)
3460 x = force_reg (mode, gen_rtx_XOR (mode, x, y));
3461
3462 rtx pat = gen_rtx_SET (operands[0],
3463 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3464 x, const0_rtx));
3465
3466 /* If we can use addx/subx or addxc, add a clobber for CC. */
3467 if (mode == SImode || (code == NE && TARGET_VIS3))
3468 {
3469 rtx clobber
3470 = gen_rtx_CLOBBER (VOIDmode,
3471 gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
3472 SPARC_ICC_REG));
3473 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
3474 }
3475
3476 emit_insn (pat);
3477 return true;
3478 }
3479
3480 /* We can do LTU in DImode using the addxc instruction with VIS3. */
3481 if (TARGET_ARCH64
3482 && mode == DImode
3483 && !((code == LTU || code == GTU) && TARGET_VIS3)
3484 && gen_v9_scc (operands[0], code, x, y))
3485 return true;
3486
3487 /* We can do LTU and GEU using the addx/subx instructions too. And
3488 for GTU/LEU, if both operands are registers swap them and fall
3489 back to the easy case. */
3490 if (code == GTU || code == LEU)
3491 {
3492 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3493 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
3494 {
3495 tem = x;
3496 x = y;
3497 y = tem;
3498 code = swap_condition (code);
3499 }
3500 }
3501
3502 if (code == LTU || code == GEU)
3503 {
3504 emit_insn (gen_rtx_SET (operands[0],
3505 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3506 gen_compare_reg_1 (code, x, y),
3507 const0_rtx)));
3508 return true;
3509 }
3510
3511 /* All the posibilities to use addx/subx based sequences has been
3512 exhausted, try for a 3 instruction sequence using v9 conditional
3513 moves. */
3514 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
3515 return true;
3516
3517 /* Nope, do branches. */
3518 return false;
3519 }
3520
3521 /* Emit a conditional jump insn for the v9 architecture using comparison code
3522 CODE and jump target LABEL.
3523 This function exists to take advantage of the v9 brxx insns. */
3524
3525 static void
3526 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
3527 {
3528 emit_jump_insn (gen_rtx_SET (pc_rtx,
3529 gen_rtx_IF_THEN_ELSE (VOIDmode,
3530 gen_rtx_fmt_ee (code, GET_MODE (op0),
3531 op0, const0_rtx),
3532 gen_rtx_LABEL_REF (VOIDmode, label),
3533 pc_rtx)));
3534 }
3535
3536 /* Emit a conditional jump insn for the UA2011 architecture using
3537 comparison code CODE and jump target LABEL. This function exists
3538 to take advantage of the UA2011 Compare and Branch insns. */
3539
3540 static void
3541 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
3542 {
3543 rtx if_then_else;
3544
3545 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
3546 gen_rtx_fmt_ee(code, GET_MODE(op0),
3547 op0, op1),
3548 gen_rtx_LABEL_REF (VOIDmode, label),
3549 pc_rtx);
3550
3551 emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
3552 }
3553
3554 void
3555 emit_conditional_branch_insn (rtx operands[])
3556 {
3557 /* The quad-word fp compare library routines all return nonzero to indicate
3558 true, which is different from the equivalent libgcc routines, so we must
3559 handle them specially here. */
3560 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
3561 {
3562 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
3563 GET_CODE (operands[0]));
3564 operands[1] = XEXP (operands[0], 0);
3565 operands[2] = XEXP (operands[0], 1);
3566 }
3567
3568 /* If we can tell early on that the comparison is against a constant
3569 that won't fit in the 5-bit signed immediate field of a cbcond,
3570 use one of the other v9 conditional branch sequences. */
3571 if (TARGET_CBCOND
3572 && GET_CODE (operands[1]) == REG
3573 && (GET_MODE (operands[1]) == SImode
3574 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
3575 && (GET_CODE (operands[2]) != CONST_INT
3576 || SPARC_SIMM5_P (INTVAL (operands[2]))))
3577 {
3578 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
3579 return;
3580 }
3581
3582 if (TARGET_ARCH64 && operands[2] == const0_rtx
3583 && GET_CODE (operands[1]) == REG
3584 && GET_MODE (operands[1]) == DImode)
3585 {
3586 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
3587 return;
3588 }
3589
3590 operands[1] = gen_compare_reg (operands[0]);
3591 operands[2] = const0_rtx;
3592 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
3593 operands[1], operands[2]);
3594 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
3595 operands[3]));
3596 }
3597
3598
3599 /* Generate a DFmode part of a hard TFmode register.
3600 REG is the TFmode hard register, LOW is 1 for the
3601 low 64bit of the register and 0 otherwise.
3602 */
3603 rtx
3604 gen_df_reg (rtx reg, int low)
3605 {
3606 int regno = REGNO (reg);
3607
3608 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3609 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3610 return gen_rtx_REG (DFmode, regno);
3611 }
3612 \f
3613 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3614 Unlike normal calls, TFmode operands are passed by reference. It is
3615 assumed that no more than 3 operands are required. */
3616
3617 static void
3618 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3619 {
3620 rtx ret_slot = NULL, arg[3], func_sym;
3621 int i;
3622
3623 /* We only expect to be called for conversions, unary, and binary ops. */
3624 gcc_assert (nargs == 2 || nargs == 3);
3625
3626 for (i = 0; i < nargs; ++i)
3627 {
3628 rtx this_arg = operands[i];
3629 rtx this_slot;
3630
3631 /* TFmode arguments and return values are passed by reference. */
3632 if (GET_MODE (this_arg) == TFmode)
3633 {
3634 int force_stack_temp;
3635
3636 force_stack_temp = 0;
3637 if (TARGET_BUGGY_QP_LIB && i == 0)
3638 force_stack_temp = 1;
3639
3640 if (GET_CODE (this_arg) == MEM
3641 && ! force_stack_temp)
3642 {
3643 tree expr = MEM_EXPR (this_arg);
3644 if (expr)
3645 mark_addressable (expr);
3646 this_arg = XEXP (this_arg, 0);
3647 }
3648 else if (CONSTANT_P (this_arg)
3649 && ! force_stack_temp)
3650 {
3651 this_slot = force_const_mem (TFmode, this_arg);
3652 this_arg = XEXP (this_slot, 0);
3653 }
3654 else
3655 {
3656 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3657
3658 /* Operand 0 is the return value. We'll copy it out later. */
3659 if (i > 0)
3660 emit_move_insn (this_slot, this_arg);
3661 else
3662 ret_slot = this_slot;
3663
3664 this_arg = XEXP (this_slot, 0);
3665 }
3666 }
3667
3668 arg[i] = this_arg;
3669 }
3670
3671 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3672
3673 if (GET_MODE (operands[0]) == TFmode)
3674 {
3675 if (nargs == 2)
3676 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3677 arg[0], GET_MODE (arg[0]),
3678 arg[1], GET_MODE (arg[1]));
3679 else
3680 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3681 arg[0], GET_MODE (arg[0]),
3682 arg[1], GET_MODE (arg[1]),
3683 arg[2], GET_MODE (arg[2]));
3684
3685 if (ret_slot)
3686 emit_move_insn (operands[0], ret_slot);
3687 }
3688 else
3689 {
3690 rtx ret;
3691
3692 gcc_assert (nargs == 2);
3693
3694 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3695 GET_MODE (operands[0]),
3696 arg[1], GET_MODE (arg[1]));
3697
3698 if (ret != operands[0])
3699 emit_move_insn (operands[0], ret);
3700 }
3701 }
3702
3703 /* Expand soft-float TFmode calls to sparc abi routines. */
3704
3705 static void
3706 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3707 {
3708 const char *func;
3709
3710 switch (code)
3711 {
3712 case PLUS:
3713 func = "_Qp_add";
3714 break;
3715 case MINUS:
3716 func = "_Qp_sub";
3717 break;
3718 case MULT:
3719 func = "_Qp_mul";
3720 break;
3721 case DIV:
3722 func = "_Qp_div";
3723 break;
3724 default:
3725 gcc_unreachable ();
3726 }
3727
3728 emit_soft_tfmode_libcall (func, 3, operands);
3729 }
3730
3731 static void
3732 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3733 {
3734 const char *func;
3735
3736 gcc_assert (code == SQRT);
3737 func = "_Qp_sqrt";
3738
3739 emit_soft_tfmode_libcall (func, 2, operands);
3740 }
3741
3742 static void
3743 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3744 {
3745 const char *func;
3746
3747 switch (code)
3748 {
3749 case FLOAT_EXTEND:
3750 switch (GET_MODE (operands[1]))
3751 {
3752 case E_SFmode:
3753 func = "_Qp_stoq";
3754 break;
3755 case E_DFmode:
3756 func = "_Qp_dtoq";
3757 break;
3758 default:
3759 gcc_unreachable ();
3760 }
3761 break;
3762
3763 case FLOAT_TRUNCATE:
3764 switch (GET_MODE (operands[0]))
3765 {
3766 case E_SFmode:
3767 func = "_Qp_qtos";
3768 break;
3769 case E_DFmode:
3770 func = "_Qp_qtod";
3771 break;
3772 default:
3773 gcc_unreachable ();
3774 }
3775 break;
3776
3777 case FLOAT:
3778 switch (GET_MODE (operands[1]))
3779 {
3780 case E_SImode:
3781 func = "_Qp_itoq";
3782 if (TARGET_ARCH64)
3783 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3784 break;
3785 case E_DImode:
3786 func = "_Qp_xtoq";
3787 break;
3788 default:
3789 gcc_unreachable ();
3790 }
3791 break;
3792
3793 case UNSIGNED_FLOAT:
3794 switch (GET_MODE (operands[1]))
3795 {
3796 case E_SImode:
3797 func = "_Qp_uitoq";
3798 if (TARGET_ARCH64)
3799 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3800 break;
3801 case E_DImode:
3802 func = "_Qp_uxtoq";
3803 break;
3804 default:
3805 gcc_unreachable ();
3806 }
3807 break;
3808
3809 case FIX:
3810 switch (GET_MODE (operands[0]))
3811 {
3812 case E_SImode:
3813 func = "_Qp_qtoi";
3814 break;
3815 case E_DImode:
3816 func = "_Qp_qtox";
3817 break;
3818 default:
3819 gcc_unreachable ();
3820 }
3821 break;
3822
3823 case UNSIGNED_FIX:
3824 switch (GET_MODE (operands[0]))
3825 {
3826 case E_SImode:
3827 func = "_Qp_qtoui";
3828 break;
3829 case E_DImode:
3830 func = "_Qp_qtoux";
3831 break;
3832 default:
3833 gcc_unreachable ();
3834 }
3835 break;
3836
3837 default:
3838 gcc_unreachable ();
3839 }
3840
3841 emit_soft_tfmode_libcall (func, 2, operands);
3842 }
3843
3844 /* Expand a hard-float tfmode operation. All arguments must be in
3845 registers. */
3846
3847 static void
3848 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3849 {
3850 rtx op, dest;
3851
3852 if (GET_RTX_CLASS (code) == RTX_UNARY)
3853 {
3854 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3855 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3856 }
3857 else
3858 {
3859 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3860 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3861 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3862 operands[1], operands[2]);
3863 }
3864
3865 if (register_operand (operands[0], VOIDmode))
3866 dest = operands[0];
3867 else
3868 dest = gen_reg_rtx (GET_MODE (operands[0]));
3869
3870 emit_insn (gen_rtx_SET (dest, op));
3871
3872 if (dest != operands[0])
3873 emit_move_insn (operands[0], dest);
3874 }
3875
3876 void
3877 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3878 {
3879 if (TARGET_HARD_QUAD)
3880 emit_hard_tfmode_operation (code, operands);
3881 else
3882 emit_soft_tfmode_binop (code, operands);
3883 }
3884
3885 void
3886 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3887 {
3888 if (TARGET_HARD_QUAD)
3889 emit_hard_tfmode_operation (code, operands);
3890 else
3891 emit_soft_tfmode_unop (code, operands);
3892 }
3893
3894 void
3895 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3896 {
3897 if (TARGET_HARD_QUAD)
3898 emit_hard_tfmode_operation (code, operands);
3899 else
3900 emit_soft_tfmode_cvt (code, operands);
3901 }
3902 \f
3903 /* Return nonzero if a branch/jump/call instruction will be emitting
3904 nop into its delay slot. */
3905
3906 int
3907 empty_delay_slot (rtx_insn *insn)
3908 {
3909 rtx seq;
3910
3911 /* If no previous instruction (should not happen), return true. */
3912 if (PREV_INSN (insn) == NULL)
3913 return 1;
3914
3915 seq = NEXT_INSN (PREV_INSN (insn));
3916 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3917 return 0;
3918
3919 return 1;
3920 }
3921
3922 /* Return nonzero if we should emit a nop after a cbcond instruction.
3923 The cbcond instruction does not have a delay slot, however there is
3924 a severe performance penalty if a control transfer appears right
3925 after a cbcond. Therefore we emit a nop when we detect this
3926 situation. */
3927
3928 int
3929 emit_cbcond_nop (rtx_insn *insn)
3930 {
3931 rtx next = next_active_insn (insn);
3932
3933 if (!next)
3934 return 1;
3935
3936 if (NONJUMP_INSN_P (next)
3937 && GET_CODE (PATTERN (next)) == SEQUENCE)
3938 next = XVECEXP (PATTERN (next), 0, 0);
3939 else if (CALL_P (next)
3940 && GET_CODE (PATTERN (next)) == PARALLEL)
3941 {
3942 rtx delay = XVECEXP (PATTERN (next), 0, 1);
3943
3944 if (GET_CODE (delay) == RETURN)
3945 {
3946 /* It's a sibling call. Do not emit the nop if we're going
3947 to emit something other than the jump itself as the first
3948 instruction of the sibcall sequence. */
3949 if (sparc_leaf_function_p || TARGET_FLAT)
3950 return 0;
3951 }
3952 }
3953
3954 if (NONJUMP_INSN_P (next))
3955 return 0;
3956
3957 return 1;
3958 }
3959
3960 /* Return nonzero if TRIAL can go into the call delay slot. */
3961
3962 int
3963 eligible_for_call_delay (rtx_insn *trial)
3964 {
3965 rtx pat;
3966
3967 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3968 return 0;
3969
3970 /* Binutils allows
3971 call __tls_get_addr, %tgd_call (foo)
3972 add %l7, %o0, %o0, %tgd_add (foo)
3973 while Sun as/ld does not. */
3974 if (TARGET_GNU_TLS || !TARGET_TLS)
3975 return 1;
3976
3977 pat = PATTERN (trial);
3978
3979 /* We must reject tgd_add{32|64}, i.e.
3980 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3981 and tldm_add{32|64}, i.e.
3982 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3983 for Sun as/ld. */
3984 if (GET_CODE (pat) == SET
3985 && GET_CODE (SET_SRC (pat)) == PLUS)
3986 {
3987 rtx unspec = XEXP (SET_SRC (pat), 1);
3988
3989 if (GET_CODE (unspec) == UNSPEC
3990 && (XINT (unspec, 1) == UNSPEC_TLSGD
3991 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3992 return 0;
3993 }
3994
3995 return 1;
3996 }
3997
3998 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3999 instruction. RETURN_P is true if the v9 variant 'return' is to be
4000 considered in the test too.
4001
4002 TRIAL must be a SET whose destination is a REG appropriate for the
4003 'restore' instruction or, if RETURN_P is true, for the 'return'
4004 instruction. */
4005
4006 static int
4007 eligible_for_restore_insn (rtx trial, bool return_p)
4008 {
4009 rtx pat = PATTERN (trial);
4010 rtx src = SET_SRC (pat);
4011 bool src_is_freg = false;
4012 rtx src_reg;
4013
4014 /* Since we now can do moves between float and integer registers when
4015 VIS3 is enabled, we have to catch this case. We can allow such
4016 moves when doing a 'return' however. */
4017 src_reg = src;
4018 if (GET_CODE (src_reg) == SUBREG)
4019 src_reg = SUBREG_REG (src_reg);
4020 if (GET_CODE (src_reg) == REG
4021 && SPARC_FP_REG_P (REGNO (src_reg)))
4022 src_is_freg = true;
4023
4024 /* The 'restore src,%g0,dest' pattern for word mode and below. */
4025 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4026 && arith_operand (src, GET_MODE (src))
4027 && ! src_is_freg)
4028 {
4029 if (TARGET_ARCH64)
4030 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4031 else
4032 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
4033 }
4034
4035 /* The 'restore src,%g0,dest' pattern for double-word mode. */
4036 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4037 && arith_double_operand (src, GET_MODE (src))
4038 && ! src_is_freg)
4039 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4040
4041 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
4042 else if (! TARGET_FPU && register_operand (src, SFmode))
4043 return 1;
4044
4045 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
4046 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
4047 return 1;
4048
4049 /* If we have the 'return' instruction, anything that does not use
4050 local or output registers and can go into a delay slot wins. */
4051 else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
4052 return 1;
4053
4054 /* The 'restore src1,src2,dest' pattern for SImode. */
4055 else if (GET_CODE (src) == PLUS
4056 && register_operand (XEXP (src, 0), SImode)
4057 && arith_operand (XEXP (src, 1), SImode))
4058 return 1;
4059
4060 /* The 'restore src1,src2,dest' pattern for DImode. */
4061 else if (GET_CODE (src) == PLUS
4062 && register_operand (XEXP (src, 0), DImode)
4063 && arith_double_operand (XEXP (src, 1), DImode))
4064 return 1;
4065
4066 /* The 'restore src1,%lo(src2),dest' pattern. */
4067 else if (GET_CODE (src) == LO_SUM
4068 && ! TARGET_CM_MEDMID
4069 && ((register_operand (XEXP (src, 0), SImode)
4070 && immediate_operand (XEXP (src, 1), SImode))
4071 || (TARGET_ARCH64
4072 && register_operand (XEXP (src, 0), DImode)
4073 && immediate_operand (XEXP (src, 1), DImode))))
4074 return 1;
4075
4076 /* The 'restore src,src,dest' pattern. */
4077 else if (GET_CODE (src) == ASHIFT
4078 && (register_operand (XEXP (src, 0), SImode)
4079 || register_operand (XEXP (src, 0), DImode))
4080 && XEXP (src, 1) == const1_rtx)
4081 return 1;
4082
4083 return 0;
4084 }
4085
4086 /* Return nonzero if TRIAL can go into the function return's delay slot. */
4087
4088 int
4089 eligible_for_return_delay (rtx_insn *trial)
4090 {
4091 int regno;
4092 rtx pat;
4093
4094 /* If the function uses __builtin_eh_return, the eh_return machinery
4095 occupies the delay slot. */
4096 if (crtl->calls_eh_return)
4097 return 0;
4098
4099 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4100 return 0;
4101
4102 /* In the case of a leaf or flat function, anything can go into the slot. */
4103 if (sparc_leaf_function_p || TARGET_FLAT)
4104 return 1;
4105
4106 if (!NONJUMP_INSN_P (trial))
4107 return 0;
4108
4109 pat = PATTERN (trial);
4110 if (GET_CODE (pat) == PARALLEL)
4111 {
4112 int i;
4113
4114 if (! TARGET_V9)
4115 return 0;
4116 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
4117 {
4118 rtx expr = XVECEXP (pat, 0, i);
4119 if (GET_CODE (expr) != SET)
4120 return 0;
4121 if (GET_CODE (SET_DEST (expr)) != REG)
4122 return 0;
4123 regno = REGNO (SET_DEST (expr));
4124 if (regno >= 8 && regno < 24)
4125 return 0;
4126 }
4127 return !epilogue_renumber (&pat, 1);
4128 }
4129
4130 if (GET_CODE (pat) != SET)
4131 return 0;
4132
4133 if (GET_CODE (SET_DEST (pat)) != REG)
4134 return 0;
4135
4136 regno = REGNO (SET_DEST (pat));
4137
4138 /* Otherwise, only operations which can be done in tandem with
4139 a `restore' or `return' insn can go into the delay slot. */
4140 if (regno >= 8 && regno < 24)
4141 return 0;
4142
4143 /* If this instruction sets up floating point register and we have a return
4144 instruction, it can probably go in. But restore will not work
4145 with FP_REGS. */
4146 if (! SPARC_INT_REG_P (regno))
4147 return TARGET_V9 && !epilogue_renumber (&pat, 1);
4148
4149 return eligible_for_restore_insn (trial, true);
4150 }
4151
4152 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
4153
4154 int
4155 eligible_for_sibcall_delay (rtx_insn *trial)
4156 {
4157 rtx pat;
4158
4159 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4160 return 0;
4161
4162 if (!NONJUMP_INSN_P (trial))
4163 return 0;
4164
4165 pat = PATTERN (trial);
4166
4167 if (sparc_leaf_function_p || TARGET_FLAT)
4168 {
4169 /* If the tail call is done using the call instruction,
4170 we have to restore %o7 in the delay slot. */
4171 if (LEAF_SIBCALL_SLOT_RESERVED_P)
4172 return 0;
4173
4174 /* %g1 is used to build the function address */
4175 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
4176 return 0;
4177
4178 return 1;
4179 }
4180
4181 if (GET_CODE (pat) != SET)
4182 return 0;
4183
4184 /* Otherwise, only operations which can be done in tandem with
4185 a `restore' insn can go into the delay slot. */
4186 if (GET_CODE (SET_DEST (pat)) != REG
4187 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
4188 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
4189 return 0;
4190
4191 /* If it mentions %o7, it can't go in, because sibcall will clobber it
4192 in most cases. */
4193 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
4194 return 0;
4195
4196 return eligible_for_restore_insn (trial, false);
4197 }
4198 \f
4199 /* Determine if it's legal to put X into the constant pool. This
4200 is not possible if X contains the address of a symbol that is
4201 not constant (TLS) or not known at final link time (PIC). */
4202
4203 static bool
4204 sparc_cannot_force_const_mem (machine_mode mode, rtx x)
4205 {
4206 switch (GET_CODE (x))
4207 {
4208 case CONST_INT:
4209 case CONST_WIDE_INT:
4210 case CONST_DOUBLE:
4211 case CONST_VECTOR:
4212 /* Accept all non-symbolic constants. */
4213 return false;
4214
4215 case LABEL_REF:
4216 /* Labels are OK iff we are non-PIC. */
4217 return flag_pic != 0;
4218
4219 case SYMBOL_REF:
4220 /* 'Naked' TLS symbol references are never OK,
4221 non-TLS symbols are OK iff we are non-PIC. */
4222 if (SYMBOL_REF_TLS_MODEL (x))
4223 return true;
4224 else
4225 return flag_pic != 0;
4226
4227 case CONST:
4228 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
4229 case PLUS:
4230 case MINUS:
4231 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
4232 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
4233 case UNSPEC:
4234 return true;
4235 default:
4236 gcc_unreachable ();
4237 }
4238 }
4239 \f
4240 /* Global Offset Table support. */
4241 static GTY(()) rtx got_helper_rtx = NULL_RTX;
4242 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
4243
4244 /* Return the SYMBOL_REF for the Global Offset Table. */
4245
4246 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
4247
4248 static rtx
4249 sparc_got (void)
4250 {
4251 if (!sparc_got_symbol)
4252 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
4253
4254 return sparc_got_symbol;
4255 }
4256
4257 /* Ensure that we are not using patterns that are not OK with PIC. */
4258
4259 int
4260 check_pic (int i)
4261 {
4262 rtx op;
4263
4264 switch (flag_pic)
4265 {
4266 case 1:
4267 op = recog_data.operand[i];
4268 gcc_assert (GET_CODE (op) != SYMBOL_REF
4269 && (GET_CODE (op) != CONST
4270 || (GET_CODE (XEXP (op, 0)) == MINUS
4271 && XEXP (XEXP (op, 0), 0) == sparc_got ()
4272 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
4273 /* fallthrough */
4274 case 2:
4275 default:
4276 return 1;
4277 }
4278 }
4279
4280 /* Return true if X is an address which needs a temporary register when
4281 reloaded while generating PIC code. */
4282
4283 int
4284 pic_address_needs_scratch (rtx x)
4285 {
4286 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
4287 if (GET_CODE (x) == CONST
4288 && GET_CODE (XEXP (x, 0)) == PLUS
4289 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
4290 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4291 && !SMALL_INT (XEXP (XEXP (x, 0), 1)))
4292 return 1;
4293
4294 return 0;
4295 }
4296
4297 /* Determine if a given RTX is a valid constant. We already know this
4298 satisfies CONSTANT_P. */
4299
4300 static bool
4301 sparc_legitimate_constant_p (machine_mode mode, rtx x)
4302 {
4303 switch (GET_CODE (x))
4304 {
4305 case CONST:
4306 case SYMBOL_REF:
4307 if (sparc_tls_referenced_p (x))
4308 return false;
4309 break;
4310
4311 case CONST_DOUBLE:
4312 /* Floating point constants are generally not ok.
4313 The only exception is 0.0 and all-ones in VIS. */
4314 if (TARGET_VIS
4315 && SCALAR_FLOAT_MODE_P (mode)
4316 && (const_zero_operand (x, mode)
4317 || const_all_ones_operand (x, mode)))
4318 return true;
4319
4320 return false;
4321
4322 case CONST_VECTOR:
4323 /* Vector constants are generally not ok.
4324 The only exception is 0 or -1 in VIS. */
4325 if (TARGET_VIS
4326 && (const_zero_operand (x, mode)
4327 || const_all_ones_operand (x, mode)))
4328 return true;
4329
4330 return false;
4331
4332 default:
4333 break;
4334 }
4335
4336 return true;
4337 }
4338
4339 /* Determine if a given RTX is a valid constant address. */
4340
4341 bool
4342 constant_address_p (rtx x)
4343 {
4344 switch (GET_CODE (x))
4345 {
4346 case LABEL_REF:
4347 case CONST_INT:
4348 case HIGH:
4349 return true;
4350
4351 case CONST:
4352 if (flag_pic && pic_address_needs_scratch (x))
4353 return false;
4354 return sparc_legitimate_constant_p (Pmode, x);
4355
4356 case SYMBOL_REF:
4357 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
4358
4359 default:
4360 return false;
4361 }
4362 }
4363
4364 /* Nonzero if the constant value X is a legitimate general operand
4365 when generating PIC code. It is given that flag_pic is on and
4366 that X satisfies CONSTANT_P. */
4367
4368 bool
4369 legitimate_pic_operand_p (rtx x)
4370 {
4371 if (pic_address_needs_scratch (x))
4372 return false;
4373 if (sparc_tls_referenced_p (x))
4374 return false;
4375 return true;
4376 }
4377
4378 /* Return true if X is a representation of the PIC register. */
4379
4380 static bool
4381 sparc_pic_register_p (rtx x)
4382 {
4383 if (!REG_P (x) || !pic_offset_table_rtx)
4384 return false;
4385
4386 if (x == pic_offset_table_rtx)
4387 return true;
4388
4389 if (!HARD_REGISTER_P (pic_offset_table_rtx)
4390 && (HARD_REGISTER_P (x) || lra_in_progress)
4391 && ORIGINAL_REGNO (x) == REGNO (pic_offset_table_rtx))
4392 return true;
4393
4394 return false;
4395 }
4396
4397 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
4398 (CONST_INT_P (X) \
4399 && INTVAL (X) >= -0x1000 \
4400 && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
4401
4402 #define RTX_OK_FOR_OLO10_P(X, MODE) \
4403 (CONST_INT_P (X) \
4404 && INTVAL (X) >= -0x1000 \
4405 && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
4406
4407 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
4408
4409 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
4410 ordinarily. This changes a bit when generating PIC. */
4411
4412 static bool
4413 sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4414 {
4415 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
4416
4417 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
4418 rs1 = addr;
4419 else if (GET_CODE (addr) == PLUS)
4420 {
4421 rs1 = XEXP (addr, 0);
4422 rs2 = XEXP (addr, 1);
4423
4424 /* Canonicalize. REG comes first, if there are no regs,
4425 LO_SUM comes first. */
4426 if (!REG_P (rs1)
4427 && GET_CODE (rs1) != SUBREG
4428 && (REG_P (rs2)
4429 || GET_CODE (rs2) == SUBREG
4430 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
4431 {
4432 rs1 = XEXP (addr, 1);
4433 rs2 = XEXP (addr, 0);
4434 }
4435
4436 if ((flag_pic == 1
4437 && sparc_pic_register_p (rs1)
4438 && !REG_P (rs2)
4439 && GET_CODE (rs2) != SUBREG
4440 && GET_CODE (rs2) != LO_SUM
4441 && GET_CODE (rs2) != MEM
4442 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
4443 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
4444 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
4445 || ((REG_P (rs1)
4446 || GET_CODE (rs1) == SUBREG)
4447 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
4448 {
4449 imm1 = rs2;
4450 rs2 = NULL;
4451 }
4452 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
4453 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
4454 {
4455 /* We prohibit REG + REG for TFmode when there are no quad move insns
4456 and we consequently need to split. We do this because REG+REG
4457 is not an offsettable address. If we get the situation in reload
4458 where source and destination of a movtf pattern are both MEMs with
4459 REG+REG address, then only one of them gets converted to an
4460 offsettable address. */
4461 if (mode == TFmode
4462 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
4463 return 0;
4464
4465 /* Likewise for TImode, but in all cases. */
4466 if (mode == TImode)
4467 return 0;
4468
4469 /* We prohibit REG + REG on ARCH32 if not optimizing for
4470 DFmode/DImode because then mem_min_alignment is likely to be zero
4471 after reload and the forced split would lack a matching splitter
4472 pattern. */
4473 if (TARGET_ARCH32 && !optimize
4474 && (mode == DFmode || mode == DImode))
4475 return 0;
4476 }
4477 else if (USE_AS_OFFSETABLE_LO10
4478 && GET_CODE (rs1) == LO_SUM
4479 && TARGET_ARCH64
4480 && ! TARGET_CM_MEDMID
4481 && RTX_OK_FOR_OLO10_P (rs2, mode))
4482 {
4483 rs2 = NULL;
4484 imm1 = XEXP (rs1, 1);
4485 rs1 = XEXP (rs1, 0);
4486 if (!CONSTANT_P (imm1)
4487 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4488 return 0;
4489 }
4490 }
4491 else if (GET_CODE (addr) == LO_SUM)
4492 {
4493 rs1 = XEXP (addr, 0);
4494 imm1 = XEXP (addr, 1);
4495
4496 if (!CONSTANT_P (imm1)
4497 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4498 return 0;
4499
4500 /* We can't allow TFmode in 32-bit mode, because an offset greater
4501 than the alignment (8) may cause the LO_SUM to overflow. */
4502 if (mode == TFmode && TARGET_ARCH32)
4503 return 0;
4504
4505 /* During reload, accept the HIGH+LO_SUM construct generated by
4506 sparc_legitimize_reload_address. */
4507 if (reload_in_progress
4508 && GET_CODE (rs1) == HIGH
4509 && XEXP (rs1, 0) == imm1)
4510 return 1;
4511 }
4512 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
4513 return 1;
4514 else
4515 return 0;
4516
4517 if (GET_CODE (rs1) == SUBREG)
4518 rs1 = SUBREG_REG (rs1);
4519 if (!REG_P (rs1))
4520 return 0;
4521
4522 if (rs2)
4523 {
4524 if (GET_CODE (rs2) == SUBREG)
4525 rs2 = SUBREG_REG (rs2);
4526 if (!REG_P (rs2))
4527 return 0;
4528 }
4529
4530 if (strict)
4531 {
4532 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
4533 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
4534 return 0;
4535 }
4536 else
4537 {
4538 if ((! SPARC_INT_REG_P (REGNO (rs1))
4539 && REGNO (rs1) != FRAME_POINTER_REGNUM
4540 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
4541 || (rs2
4542 && (! SPARC_INT_REG_P (REGNO (rs2))
4543 && REGNO (rs2) != FRAME_POINTER_REGNUM
4544 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
4545 return 0;
4546 }
4547 return 1;
4548 }
4549
4550 /* Return the SYMBOL_REF for the tls_get_addr function. */
4551
4552 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
4553
4554 static rtx
4555 sparc_tls_get_addr (void)
4556 {
4557 if (!sparc_tls_symbol)
4558 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
4559
4560 return sparc_tls_symbol;
4561 }
4562
4563 /* Return the Global Offset Table to be used in TLS mode. */
4564
4565 static rtx
4566 sparc_tls_got (void)
4567 {
4568 /* In PIC mode, this is just the PIC offset table. */
4569 if (flag_pic)
4570 {
4571 crtl->uses_pic_offset_table = 1;
4572 return pic_offset_table_rtx;
4573 }
4574
4575 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4576 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4577 if (TARGET_SUN_TLS && TARGET_ARCH32)
4578 {
4579 load_got_register ();
4580 return global_offset_table_rtx;
4581 }
4582
4583 /* In all other cases, we load a new pseudo with the GOT symbol. */
4584 return copy_to_reg (sparc_got ());
4585 }
4586
4587 /* Return true if X contains a thread-local symbol. */
4588
4589 static bool
4590 sparc_tls_referenced_p (rtx x)
4591 {
4592 if (!TARGET_HAVE_TLS)
4593 return false;
4594
4595 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4596 x = XEXP (XEXP (x, 0), 0);
4597
4598 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
4599 return true;
4600
4601 /* That's all we handle in sparc_legitimize_tls_address for now. */
4602 return false;
4603 }
4604
4605 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4606 this (thread-local) address. */
4607
4608 static rtx
4609 sparc_legitimize_tls_address (rtx addr)
4610 {
4611 rtx temp1, temp2, temp3, ret, o0, got;
4612 rtx_insn *insn;
4613
4614 gcc_assert (can_create_pseudo_p ());
4615
4616 if (GET_CODE (addr) == SYMBOL_REF)
4617 switch (SYMBOL_REF_TLS_MODEL (addr))
4618 {
4619 case TLS_MODEL_GLOBAL_DYNAMIC:
4620 start_sequence ();
4621 temp1 = gen_reg_rtx (SImode);
4622 temp2 = gen_reg_rtx (SImode);
4623 ret = gen_reg_rtx (Pmode);
4624 o0 = gen_rtx_REG (Pmode, 8);
4625 got = sparc_tls_got ();
4626 emit_insn (gen_tgd_hi22 (temp1, addr));
4627 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
4628 if (TARGET_ARCH32)
4629 {
4630 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
4631 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
4632 addr, const1_rtx));
4633 }
4634 else
4635 {
4636 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
4637 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
4638 addr, const1_rtx));
4639 }
4640 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4641 insn = get_insns ();
4642 end_sequence ();
4643 emit_libcall_block (insn, ret, o0, addr);
4644 break;
4645
4646 case TLS_MODEL_LOCAL_DYNAMIC:
4647 start_sequence ();
4648 temp1 = gen_reg_rtx (SImode);
4649 temp2 = gen_reg_rtx (SImode);
4650 temp3 = gen_reg_rtx (Pmode);
4651 ret = gen_reg_rtx (Pmode);
4652 o0 = gen_rtx_REG (Pmode, 8);
4653 got = sparc_tls_got ();
4654 emit_insn (gen_tldm_hi22 (temp1));
4655 emit_insn (gen_tldm_lo10 (temp2, temp1));
4656 if (TARGET_ARCH32)
4657 {
4658 emit_insn (gen_tldm_add32 (o0, got, temp2));
4659 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
4660 const1_rtx));
4661 }
4662 else
4663 {
4664 emit_insn (gen_tldm_add64 (o0, got, temp2));
4665 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
4666 const1_rtx));
4667 }
4668 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4669 insn = get_insns ();
4670 end_sequence ();
4671 emit_libcall_block (insn, temp3, o0,
4672 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4673 UNSPEC_TLSLD_BASE));
4674 temp1 = gen_reg_rtx (SImode);
4675 temp2 = gen_reg_rtx (SImode);
4676 emit_insn (gen_tldo_hix22 (temp1, addr));
4677 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
4678 if (TARGET_ARCH32)
4679 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
4680 else
4681 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
4682 break;
4683
4684 case TLS_MODEL_INITIAL_EXEC:
4685 temp1 = gen_reg_rtx (SImode);
4686 temp2 = gen_reg_rtx (SImode);
4687 temp3 = gen_reg_rtx (Pmode);
4688 got = sparc_tls_got ();
4689 emit_insn (gen_tie_hi22 (temp1, addr));
4690 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
4691 if (TARGET_ARCH32)
4692 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4693 else
4694 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4695 if (TARGET_SUN_TLS)
4696 {
4697 ret = gen_reg_rtx (Pmode);
4698 if (TARGET_ARCH32)
4699 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
4700 temp3, addr));
4701 else
4702 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
4703 temp3, addr));
4704 }
4705 else
4706 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4707 break;
4708
4709 case TLS_MODEL_LOCAL_EXEC:
4710 temp1 = gen_reg_rtx (Pmode);
4711 temp2 = gen_reg_rtx (Pmode);
4712 if (TARGET_ARCH32)
4713 {
4714 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
4715 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
4716 }
4717 else
4718 {
4719 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
4720 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
4721 }
4722 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4723 break;
4724
4725 default:
4726 gcc_unreachable ();
4727 }
4728
4729 else if (GET_CODE (addr) == CONST)
4730 {
4731 rtx base, offset;
4732
4733 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4734
4735 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4736 offset = XEXP (XEXP (addr, 0), 1);
4737
4738 base = force_operand (base, NULL_RTX);
4739 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4740 offset = force_reg (Pmode, offset);
4741 ret = gen_rtx_PLUS (Pmode, base, offset);
4742 }
4743
4744 else
4745 gcc_unreachable (); /* for now ... */
4746
4747 return ret;
4748 }
4749
4750 /* Legitimize PIC addresses. If the address is already position-independent,
4751 we return ORIG. Newly generated position-independent addresses go into a
4752 reg. This is REG if nonzero, otherwise we allocate register(s) as
4753 necessary. */
4754
4755 static rtx
4756 sparc_legitimize_pic_address (rtx orig, rtx reg)
4757 {
4758 if (GET_CODE (orig) == SYMBOL_REF
4759 /* See the comment in sparc_expand_move. */
4760 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4761 {
4762 bool gotdata_op = false;
4763 rtx pic_ref, address;
4764 rtx_insn *insn;
4765
4766 if (!reg)
4767 {
4768 gcc_assert (can_create_pseudo_p ());
4769 reg = gen_reg_rtx (Pmode);
4770 }
4771
4772 if (flag_pic == 2)
4773 {
4774 /* If not during reload, allocate another temp reg here for loading
4775 in the address, so that these instructions can be optimized
4776 properly. */
4777 rtx temp_reg = can_create_pseudo_p () ? gen_reg_rtx (Pmode) : reg;
4778
4779 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4780 won't get confused into thinking that these two instructions
4781 are loading in the true address of the symbol. If in the
4782 future a PIC rtx exists, that should be used instead. */
4783 if (TARGET_ARCH64)
4784 {
4785 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4786 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4787 }
4788 else
4789 {
4790 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4791 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4792 }
4793
4794 address = temp_reg;
4795 gotdata_op = true;
4796 }
4797 else
4798 address = orig;
4799
4800 crtl->uses_pic_offset_table = 1;
4801 if (gotdata_op)
4802 {
4803 if (TARGET_ARCH64)
4804 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4805 pic_offset_table_rtx,
4806 address, orig));
4807 else
4808 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4809 pic_offset_table_rtx,
4810 address, orig));
4811 }
4812 else
4813 {
4814 pic_ref
4815 = gen_const_mem (Pmode,
4816 gen_rtx_PLUS (Pmode,
4817 pic_offset_table_rtx, address));
4818 insn = emit_move_insn (reg, pic_ref);
4819 }
4820
4821 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4822 by loop. */
4823 set_unique_reg_note (insn, REG_EQUAL, orig);
4824 return reg;
4825 }
4826 else if (GET_CODE (orig) == CONST)
4827 {
4828 rtx base, offset;
4829
4830 if (GET_CODE (XEXP (orig, 0)) == PLUS
4831 && sparc_pic_register_p (XEXP (XEXP (orig, 0), 0)))
4832 return orig;
4833
4834 if (!reg)
4835 {
4836 gcc_assert (can_create_pseudo_p ());
4837 reg = gen_reg_rtx (Pmode);
4838 }
4839
4840 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4841 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4842 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4843 base == reg ? NULL_RTX : reg);
4844
4845 if (GET_CODE (offset) == CONST_INT)
4846 {
4847 if (SMALL_INT (offset))
4848 return plus_constant (Pmode, base, INTVAL (offset));
4849 else if (can_create_pseudo_p ())
4850 offset = force_reg (Pmode, offset);
4851 else
4852 /* If we reach here, then something is seriously wrong. */
4853 gcc_unreachable ();
4854 }
4855 return gen_rtx_PLUS (Pmode, base, offset);
4856 }
4857 else if (GET_CODE (orig) == LABEL_REF)
4858 /* ??? We ought to be checking that the register is live instead, in case
4859 it is eliminated. */
4860 crtl->uses_pic_offset_table = 1;
4861
4862 return orig;
4863 }
4864
4865 /* Try machine-dependent ways of modifying an illegitimate address X
4866 to be legitimate. If we find one, return the new, valid address.
4867
4868 OLDX is the address as it was before break_out_memory_refs was called.
4869 In some cases it is useful to look at this to decide what needs to be done.
4870
4871 MODE is the mode of the operand pointed to by X.
4872
4873 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4874
4875 static rtx
4876 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4877 machine_mode mode)
4878 {
4879 rtx orig_x = x;
4880
4881 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4882 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4883 force_operand (XEXP (x, 0), NULL_RTX));
4884 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4885 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4886 force_operand (XEXP (x, 1), NULL_RTX));
4887 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4888 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4889 XEXP (x, 1));
4890 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4891 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4892 force_operand (XEXP (x, 1), NULL_RTX));
4893
4894 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4895 return x;
4896
4897 if (sparc_tls_referenced_p (x))
4898 x = sparc_legitimize_tls_address (x);
4899 else if (flag_pic)
4900 x = sparc_legitimize_pic_address (x, NULL_RTX);
4901 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4902 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4903 copy_to_mode_reg (Pmode, XEXP (x, 1)));
4904 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
4905 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4906 copy_to_mode_reg (Pmode, XEXP (x, 0)));
4907 else if (GET_CODE (x) == SYMBOL_REF
4908 || GET_CODE (x) == CONST
4909 || GET_CODE (x) == LABEL_REF)
4910 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
4911
4912 return x;
4913 }
4914
4915 /* Delegitimize an address that was legitimized by the above function. */
4916
4917 static rtx
4918 sparc_delegitimize_address (rtx x)
4919 {
4920 x = delegitimize_mem_from_attrs (x);
4921
4922 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
4923 switch (XINT (XEXP (x, 1), 1))
4924 {
4925 case UNSPEC_MOVE_PIC:
4926 case UNSPEC_TLSLE:
4927 x = XVECEXP (XEXP (x, 1), 0, 0);
4928 gcc_assert (GET_CODE (x) == SYMBOL_REF);
4929 break;
4930 default:
4931 break;
4932 }
4933
4934 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
4935 if (GET_CODE (x) == MINUS
4936 && sparc_pic_register_p (XEXP (x, 0))
4937 && GET_CODE (XEXP (x, 1)) == LO_SUM
4938 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
4939 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
4940 {
4941 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
4942 gcc_assert (GET_CODE (x) == LABEL_REF
4943 || (GET_CODE (x) == CONST
4944 && GET_CODE (XEXP (x, 0)) == PLUS
4945 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
4946 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT));
4947 }
4948
4949 return x;
4950 }
4951
4952 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4953 replace the input X, or the original X if no replacement is called for.
4954 The output parameter *WIN is 1 if the calling macro should goto WIN,
4955 0 if it should not.
4956
4957 For SPARC, we wish to handle addresses by splitting them into
4958 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
4959 This cuts the number of extra insns by one.
4960
4961 Do nothing when generating PIC code and the address is a symbolic
4962 operand or requires a scratch register. */
4963
4964 rtx
4965 sparc_legitimize_reload_address (rtx x, machine_mode mode,
4966 int opnum, int type,
4967 int ind_levels ATTRIBUTE_UNUSED, int *win)
4968 {
4969 /* Decompose SImode constants into HIGH+LO_SUM. */
4970 if (CONSTANT_P (x)
4971 && (mode != TFmode || TARGET_ARCH64)
4972 && GET_MODE (x) == SImode
4973 && GET_CODE (x) != LO_SUM
4974 && GET_CODE (x) != HIGH
4975 && sparc_cmodel <= CM_MEDLOW
4976 && !(flag_pic
4977 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
4978 {
4979 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
4980 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4981 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4982 opnum, (enum reload_type)type);
4983 *win = 1;
4984 return x;
4985 }
4986
4987 /* We have to recognize what we have already generated above. */
4988 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
4989 {
4990 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4991 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4992 opnum, (enum reload_type)type);
4993 *win = 1;
4994 return x;
4995 }
4996
4997 *win = 0;
4998 return x;
4999 }
5000
5001 /* Return true if ADDR (a legitimate address expression)
5002 has an effect that depends on the machine mode it is used for.
5003
5004 In PIC mode,
5005
5006 (mem:HI [%l7+a])
5007
5008 is not equivalent to
5009
5010 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
5011
5012 because [%l7+a+1] is interpreted as the address of (a+1). */
5013
5014
5015 static bool
5016 sparc_mode_dependent_address_p (const_rtx addr,
5017 addr_space_t as ATTRIBUTE_UNUSED)
5018 {
5019 if (GET_CODE (addr) == PLUS
5020 && sparc_pic_register_p (XEXP (addr, 0))
5021 && symbolic_operand (XEXP (addr, 1), VOIDmode))
5022 return true;
5023
5024 return false;
5025 }
5026
5027 #ifdef HAVE_GAS_HIDDEN
5028 # define USE_HIDDEN_LINKONCE 1
5029 #else
5030 # define USE_HIDDEN_LINKONCE 0
5031 #endif
5032
5033 static void
5034 get_pc_thunk_name (char name[32], unsigned int regno)
5035 {
5036 const char *reg_name = reg_names[regno];
5037
5038 /* Skip the leading '%' as that cannot be used in a
5039 symbol name. */
5040 reg_name += 1;
5041
5042 if (USE_HIDDEN_LINKONCE)
5043 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
5044 else
5045 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
5046 }
5047
5048 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
5049
5050 static rtx
5051 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2)
5052 {
5053 int orig_flag_pic = flag_pic;
5054 rtx insn;
5055
5056 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
5057 flag_pic = 0;
5058 if (TARGET_ARCH64)
5059 insn = gen_load_pcrel_symdi (op0, op1, op2, GEN_INT (REGNO (op0)));
5060 else
5061 insn = gen_load_pcrel_symsi (op0, op1, op2, GEN_INT (REGNO (op0)));
5062 flag_pic = orig_flag_pic;
5063
5064 return insn;
5065 }
5066
5067 /* Emit code to load the GOT register. */
5068
5069 void
5070 load_got_register (void)
5071 {
5072 if (!global_offset_table_rtx)
5073 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
5074
5075 if (TARGET_VXWORKS_RTP)
5076 emit_insn (gen_vxworks_load_got ());
5077 else
5078 {
5079 /* The GOT symbol is subject to a PC-relative relocation so we need a
5080 helper function to add the PC value and thus get the final value. */
5081 if (!got_helper_rtx)
5082 {
5083 char name[32];
5084 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
5085 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5086 }
5087
5088 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
5089 got_helper_rtx));
5090 }
5091 }
5092
5093 /* Emit a call instruction with the pattern given by PAT. ADDR is the
5094 address of the call target. */
5095
5096 void
5097 sparc_emit_call_insn (rtx pat, rtx addr)
5098 {
5099 rtx_insn *insn;
5100
5101 insn = emit_call_insn (pat);
5102
5103 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
5104 if (TARGET_VXWORKS_RTP
5105 && flag_pic
5106 && GET_CODE (addr) == SYMBOL_REF
5107 && (SYMBOL_REF_DECL (addr)
5108 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
5109 : !SYMBOL_REF_LOCAL_P (addr)))
5110 {
5111 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5112 crtl->uses_pic_offset_table = 1;
5113 }
5114 }
5115 \f
5116 /* Return 1 if RTX is a MEM which is known to be aligned to at
5117 least a DESIRED byte boundary. */
5118
5119 int
5120 mem_min_alignment (rtx mem, int desired)
5121 {
5122 rtx addr, base, offset;
5123
5124 /* If it's not a MEM we can't accept it. */
5125 if (GET_CODE (mem) != MEM)
5126 return 0;
5127
5128 /* Obviously... */
5129 if (!TARGET_UNALIGNED_DOUBLES
5130 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
5131 return 1;
5132
5133 /* ??? The rest of the function predates MEM_ALIGN so
5134 there is probably a bit of redundancy. */
5135 addr = XEXP (mem, 0);
5136 base = offset = NULL_RTX;
5137 if (GET_CODE (addr) == PLUS)
5138 {
5139 if (GET_CODE (XEXP (addr, 0)) == REG)
5140 {
5141 base = XEXP (addr, 0);
5142
5143 /* What we are saying here is that if the base
5144 REG is aligned properly, the compiler will make
5145 sure any REG based index upon it will be so
5146 as well. */
5147 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
5148 offset = XEXP (addr, 1);
5149 else
5150 offset = const0_rtx;
5151 }
5152 }
5153 else if (GET_CODE (addr) == REG)
5154 {
5155 base = addr;
5156 offset = const0_rtx;
5157 }
5158
5159 if (base != NULL_RTX)
5160 {
5161 int regno = REGNO (base);
5162
5163 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
5164 {
5165 /* Check if the compiler has recorded some information
5166 about the alignment of the base REG. If reload has
5167 completed, we already matched with proper alignments.
5168 If not running global_alloc, reload might give us
5169 unaligned pointer to local stack though. */
5170 if (((cfun != 0
5171 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
5172 || (optimize && reload_completed))
5173 && (INTVAL (offset) & (desired - 1)) == 0)
5174 return 1;
5175 }
5176 else
5177 {
5178 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
5179 return 1;
5180 }
5181 }
5182 else if (! TARGET_UNALIGNED_DOUBLES
5183 || CONSTANT_P (addr)
5184 || GET_CODE (addr) == LO_SUM)
5185 {
5186 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
5187 is true, in which case we can only assume that an access is aligned if
5188 it is to a constant address, or the address involves a LO_SUM. */
5189 return 1;
5190 }
5191
5192 /* An obviously unaligned address. */
5193 return 0;
5194 }
5195
5196 \f
5197 /* Vectors to keep interesting information about registers where it can easily
5198 be got. We used to use the actual mode value as the bit number, but there
5199 are more than 32 modes now. Instead we use two tables: one indexed by
5200 hard register number, and one indexed by mode. */
5201
5202 /* The purpose of sparc_mode_class is to shrink the range of modes so that
5203 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
5204 mapped into one sparc_mode_class mode. */
5205
5206 enum sparc_mode_class {
5207 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
5208 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
5209 CC_MODE, CCFP_MODE
5210 };
5211
5212 /* Modes for single-word and smaller quantities. */
5213 #define S_MODES \
5214 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
5215
5216 /* Modes for double-word and smaller quantities. */
5217 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5218
5219 /* Modes for quad-word and smaller quantities. */
5220 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
5221
5222 /* Modes for 8-word and smaller quantities. */
5223 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
5224
5225 /* Modes for single-float quantities. */
5226 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
5227
5228 /* Modes for double-float and smaller quantities. */
5229 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5230
5231 /* Modes for quad-float and smaller quantities. */
5232 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
5233
5234 /* Modes for quad-float pairs and smaller quantities. */
5235 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
5236
5237 /* Modes for double-float only quantities. */
5238 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
5239
5240 /* Modes for quad-float and double-float only quantities. */
5241 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
5242
5243 /* Modes for quad-float pairs and double-float only quantities. */
5244 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
5245
5246 /* Modes for condition codes. */
5247 #define CC_MODES (1 << (int) CC_MODE)
5248 #define CCFP_MODES (1 << (int) CCFP_MODE)
5249
5250 /* Value is 1 if register/mode pair is acceptable on sparc.
5251
5252 The funny mixture of D and T modes is because integer operations
5253 do not specially operate on tetra quantities, so non-quad-aligned
5254 registers can hold quadword quantities (except %o4 and %i4 because
5255 they cross fixed registers).
5256
5257 ??? Note that, despite the settings, non-double-aligned parameter
5258 registers can hold double-word quantities in 32-bit mode. */
5259
5260 /* This points to either the 32-bit or the 64-bit version. */
5261 static const int *hard_regno_mode_classes;
5262
5263 static const int hard_32bit_mode_classes[] = {
5264 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5265 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5266 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5267 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5268
5269 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5270 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5271 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5272 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5273
5274 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5275 and none can hold SFmode/SImode values. */
5276 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5277 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5278 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5279 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5280
5281 /* %fcc[0123] */
5282 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5283
5284 /* %icc, %sfp, %gsr */
5285 CC_MODES, 0, D_MODES
5286 };
5287
5288 static const int hard_64bit_mode_classes[] = {
5289 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5290 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5291 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5292 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5293
5294 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5295 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5296 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5297 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5298
5299 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5300 and none can hold SFmode/SImode values. */
5301 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5302 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5303 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5304 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5305
5306 /* %fcc[0123] */
5307 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5308
5309 /* %icc, %sfp, %gsr */
5310 CC_MODES, 0, D_MODES
5311 };
5312
5313 static int sparc_mode_class [NUM_MACHINE_MODES];
5314
5315 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
5316
5317 static void
5318 sparc_init_modes (void)
5319 {
5320 int i;
5321
5322 for (i = 0; i < NUM_MACHINE_MODES; i++)
5323 {
5324 machine_mode m = (machine_mode) i;
5325 unsigned int size = GET_MODE_SIZE (m);
5326
5327 switch (GET_MODE_CLASS (m))
5328 {
5329 case MODE_INT:
5330 case MODE_PARTIAL_INT:
5331 case MODE_COMPLEX_INT:
5332 if (size < 4)
5333 sparc_mode_class[i] = 1 << (int) H_MODE;
5334 else if (size == 4)
5335 sparc_mode_class[i] = 1 << (int) S_MODE;
5336 else if (size == 8)
5337 sparc_mode_class[i] = 1 << (int) D_MODE;
5338 else if (size == 16)
5339 sparc_mode_class[i] = 1 << (int) T_MODE;
5340 else if (size == 32)
5341 sparc_mode_class[i] = 1 << (int) O_MODE;
5342 else
5343 sparc_mode_class[i] = 0;
5344 break;
5345 case MODE_VECTOR_INT:
5346 if (size == 4)
5347 sparc_mode_class[i] = 1 << (int) SF_MODE;
5348 else if (size == 8)
5349 sparc_mode_class[i] = 1 << (int) DF_MODE;
5350 else
5351 sparc_mode_class[i] = 0;
5352 break;
5353 case MODE_FLOAT:
5354 case MODE_COMPLEX_FLOAT:
5355 if (size == 4)
5356 sparc_mode_class[i] = 1 << (int) SF_MODE;
5357 else if (size == 8)
5358 sparc_mode_class[i] = 1 << (int) DF_MODE;
5359 else if (size == 16)
5360 sparc_mode_class[i] = 1 << (int) TF_MODE;
5361 else if (size == 32)
5362 sparc_mode_class[i] = 1 << (int) OF_MODE;
5363 else
5364 sparc_mode_class[i] = 0;
5365 break;
5366 case MODE_CC:
5367 if (m == CCFPmode || m == CCFPEmode)
5368 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
5369 else
5370 sparc_mode_class[i] = 1 << (int) CC_MODE;
5371 break;
5372 default:
5373 sparc_mode_class[i] = 0;
5374 break;
5375 }
5376 }
5377
5378 if (TARGET_ARCH64)
5379 hard_regno_mode_classes = hard_64bit_mode_classes;
5380 else
5381 hard_regno_mode_classes = hard_32bit_mode_classes;
5382
5383 /* Initialize the array used by REGNO_REG_CLASS. */
5384 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5385 {
5386 if (i < 16 && TARGET_V8PLUS)
5387 sparc_regno_reg_class[i] = I64_REGS;
5388 else if (i < 32 || i == FRAME_POINTER_REGNUM)
5389 sparc_regno_reg_class[i] = GENERAL_REGS;
5390 else if (i < 64)
5391 sparc_regno_reg_class[i] = FP_REGS;
5392 else if (i < 96)
5393 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
5394 else if (i < 100)
5395 sparc_regno_reg_class[i] = FPCC_REGS;
5396 else
5397 sparc_regno_reg_class[i] = NO_REGS;
5398 }
5399 }
5400 \f
5401 /* Return whether REGNO, a global or FP register, must be saved/restored. */
5402
5403 static inline bool
5404 save_global_or_fp_reg_p (unsigned int regno,
5405 int leaf_function ATTRIBUTE_UNUSED)
5406 {
5407 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
5408 }
5409
5410 /* Return whether the return address register (%i7) is needed. */
5411
5412 static inline bool
5413 return_addr_reg_needed_p (int leaf_function)
5414 {
5415 /* If it is live, for example because of __builtin_return_address (0). */
5416 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
5417 return true;
5418
5419 /* Otherwise, it is needed as save register if %o7 is clobbered. */
5420 if (!leaf_function
5421 /* Loading the GOT register clobbers %o7. */
5422 || crtl->uses_pic_offset_table
5423 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
5424 return true;
5425
5426 return false;
5427 }
5428
5429 /* Return whether REGNO, a local or in register, must be saved/restored. */
5430
5431 static bool
5432 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
5433 {
5434 /* General case: call-saved registers live at some point. */
5435 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
5436 return true;
5437
5438 /* Frame pointer register (%fp) if needed. */
5439 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5440 return true;
5441
5442 /* Return address register (%i7) if needed. */
5443 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
5444 return true;
5445
5446 /* GOT register (%l7) if needed. */
5447 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
5448 return true;
5449
5450 /* If the function accesses prior frames, the frame pointer and the return
5451 address of the previous frame must be saved on the stack. */
5452 if (crtl->accesses_prior_frames
5453 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
5454 return true;
5455
5456 return false;
5457 }
5458
5459 /* Compute the frame size required by the function. This function is called
5460 during the reload pass and also by sparc_expand_prologue. */
5461
5462 HOST_WIDE_INT
5463 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
5464 {
5465 HOST_WIDE_INT frame_size, apparent_frame_size;
5466 int args_size, n_global_fp_regs = 0;
5467 bool save_local_in_regs_p = false;
5468 unsigned int i;
5469
5470 /* If the function allocates dynamic stack space, the dynamic offset is
5471 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
5472 if (leaf_function && !cfun->calls_alloca)
5473 args_size = 0;
5474 else
5475 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
5476
5477 /* Calculate space needed for global registers. */
5478 if (TARGET_ARCH64)
5479 {
5480 for (i = 0; i < 8; i++)
5481 if (save_global_or_fp_reg_p (i, 0))
5482 n_global_fp_regs += 2;
5483 }
5484 else
5485 {
5486 for (i = 0; i < 8; i += 2)
5487 if (save_global_or_fp_reg_p (i, 0)
5488 || save_global_or_fp_reg_p (i + 1, 0))
5489 n_global_fp_regs += 2;
5490 }
5491
5492 /* In the flat window model, find out which local and in registers need to
5493 be saved. We don't reserve space in the current frame for them as they
5494 will be spilled into the register window save area of the caller's frame.
5495 However, as soon as we use this register window save area, we must create
5496 that of the current frame to make it the live one. */
5497 if (TARGET_FLAT)
5498 for (i = 16; i < 32; i++)
5499 if (save_local_or_in_reg_p (i, leaf_function))
5500 {
5501 save_local_in_regs_p = true;
5502 break;
5503 }
5504
5505 /* Calculate space needed for FP registers. */
5506 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
5507 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
5508 n_global_fp_regs += 2;
5509
5510 if (size == 0
5511 && n_global_fp_regs == 0
5512 && args_size == 0
5513 && !save_local_in_regs_p)
5514 frame_size = apparent_frame_size = 0;
5515 else
5516 {
5517 /* Start from the apparent frame size. */
5518 apparent_frame_size = ROUND_UP (size, 8) + n_global_fp_regs * 4;
5519
5520 /* We need to add the size of the outgoing argument area. */
5521 frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
5522
5523 /* And that of the register window save area. */
5524 frame_size += FIRST_PARM_OFFSET (cfun->decl);
5525
5526 /* Finally, bump to the appropriate alignment. */
5527 frame_size = SPARC_STACK_ALIGN (frame_size);
5528 }
5529
5530 /* Set up values for use in prologue and epilogue. */
5531 sparc_frame_size = frame_size;
5532 sparc_apparent_frame_size = apparent_frame_size;
5533 sparc_n_global_fp_regs = n_global_fp_regs;
5534 sparc_save_local_in_regs_p = save_local_in_regs_p;
5535
5536 return frame_size;
5537 }
5538
5539 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5540
5541 int
5542 sparc_initial_elimination_offset (int to)
5543 {
5544 int offset;
5545
5546 if (to == STACK_POINTER_REGNUM)
5547 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
5548 else
5549 offset = 0;
5550
5551 offset += SPARC_STACK_BIAS;
5552 return offset;
5553 }
5554
5555 /* Output any necessary .register pseudo-ops. */
5556
5557 void
5558 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
5559 {
5560 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
5561 int i;
5562
5563 if (TARGET_ARCH32)
5564 return;
5565
5566 /* Check if %g[2367] were used without
5567 .register being printed for them already. */
5568 for (i = 2; i < 8; i++)
5569 {
5570 if (df_regs_ever_live_p (i)
5571 && ! sparc_hard_reg_printed [i])
5572 {
5573 sparc_hard_reg_printed [i] = 1;
5574 /* %g7 is used as TLS base register, use #ignore
5575 for it instead of #scratch. */
5576 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
5577 i == 7 ? "ignore" : "scratch");
5578 }
5579 if (i == 3) i = 5;
5580 }
5581 #endif
5582 }
5583
5584 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5585
5586 #if PROBE_INTERVAL > 4096
5587 #error Cannot use indexed addressing mode for stack probing
5588 #endif
5589
5590 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5591 inclusive. These are offsets from the current stack pointer.
5592
5593 Note that we don't use the REG+REG addressing mode for the probes because
5594 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5595 so the advantages of having a single code win here. */
5596
5597 static void
5598 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
5599 {
5600 rtx g1 = gen_rtx_REG (Pmode, 1);
5601
5602 /* See if we have a constant small number of probes to generate. If so,
5603 that's the easy case. */
5604 if (size <= PROBE_INTERVAL)
5605 {
5606 emit_move_insn (g1, GEN_INT (first));
5607 emit_insn (gen_rtx_SET (g1,
5608 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5609 emit_stack_probe (plus_constant (Pmode, g1, -size));
5610 }
5611
5612 /* The run-time loop is made up of 9 insns in the generic case while the
5613 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5614 else if (size <= 4 * PROBE_INTERVAL)
5615 {
5616 HOST_WIDE_INT i;
5617
5618 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
5619 emit_insn (gen_rtx_SET (g1,
5620 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5621 emit_stack_probe (g1);
5622
5623 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5624 it exceeds SIZE. If only two probes are needed, this will not
5625 generate any code. Then probe at FIRST + SIZE. */
5626 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5627 {
5628 emit_insn (gen_rtx_SET (g1,
5629 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5630 emit_stack_probe (g1);
5631 }
5632
5633 emit_stack_probe (plus_constant (Pmode, g1,
5634 (i - PROBE_INTERVAL) - size));
5635 }
5636
5637 /* Otherwise, do the same as above, but in a loop. Note that we must be
5638 extra careful with variables wrapping around because we might be at
5639 the very top (or the very bottom) of the address space and we have
5640 to be able to handle this case properly; in particular, we use an
5641 equality test for the loop condition. */
5642 else
5643 {
5644 HOST_WIDE_INT rounded_size;
5645 rtx g4 = gen_rtx_REG (Pmode, 4);
5646
5647 emit_move_insn (g1, GEN_INT (first));
5648
5649
5650 /* Step 1: round SIZE to the previous multiple of the interval. */
5651
5652 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
5653 emit_move_insn (g4, GEN_INT (rounded_size));
5654
5655
5656 /* Step 2: compute initial and final value of the loop counter. */
5657
5658 /* TEST_ADDR = SP + FIRST. */
5659 emit_insn (gen_rtx_SET (g1,
5660 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5661
5662 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5663 emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
5664
5665
5666 /* Step 3: the loop
5667
5668 while (TEST_ADDR != LAST_ADDR)
5669 {
5670 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5671 probe at TEST_ADDR
5672 }
5673
5674 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5675 until it is equal to ROUNDED_SIZE. */
5676
5677 if (TARGET_ARCH64)
5678 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
5679 else
5680 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
5681
5682
5683 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5684 that SIZE is equal to ROUNDED_SIZE. */
5685
5686 if (size != rounded_size)
5687 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5688 }
5689
5690 /* Make sure nothing is scheduled before we are done. */
5691 emit_insn (gen_blockage ());
5692 }
5693
5694 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5695 absolute addresses. */
5696
5697 const char *
5698 output_probe_stack_range (rtx reg1, rtx reg2)
5699 {
5700 static int labelno = 0;
5701 char loop_lab[32];
5702 rtx xops[2];
5703
5704 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
5705
5706 /* Loop. */
5707 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5708
5709 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5710 xops[0] = reg1;
5711 xops[1] = GEN_INT (-PROBE_INTERVAL);
5712 output_asm_insn ("add\t%0, %1, %0", xops);
5713
5714 /* Test if TEST_ADDR == LAST_ADDR. */
5715 xops[1] = reg2;
5716 output_asm_insn ("cmp\t%0, %1", xops);
5717
5718 /* Probe at TEST_ADDR and branch. */
5719 if (TARGET_ARCH64)
5720 fputs ("\tbne,pt\t%xcc,", asm_out_file);
5721 else
5722 fputs ("\tbne\t", asm_out_file);
5723 assemble_name_raw (asm_out_file, loop_lab);
5724 fputc ('\n', asm_out_file);
5725 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5726 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5727
5728 return "";
5729 }
5730
5731 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5732 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5733 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5734 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5735 the action to be performed if it returns false. Return the new offset. */
5736
5737 typedef bool (*sorr_pred_t) (unsigned int, int);
5738 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5739
5740 static int
5741 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5742 int offset, int leaf_function, sorr_pred_t save_p,
5743 sorr_act_t action_true, sorr_act_t action_false)
5744 {
5745 unsigned int i;
5746 rtx mem;
5747 rtx_insn *insn;
5748
5749 if (TARGET_ARCH64 && high <= 32)
5750 {
5751 int fp_offset = -1;
5752
5753 for (i = low; i < high; i++)
5754 {
5755 if (save_p (i, leaf_function))
5756 {
5757 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5758 base, offset));
5759 if (action_true == SORR_SAVE)
5760 {
5761 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5762 RTX_FRAME_RELATED_P (insn) = 1;
5763 }
5764 else /* action_true == SORR_RESTORE */
5765 {
5766 /* The frame pointer must be restored last since its old
5767 value may be used as base address for the frame. This
5768 is problematic in 64-bit mode only because of the lack
5769 of double-word load instruction. */
5770 if (i == HARD_FRAME_POINTER_REGNUM)
5771 fp_offset = offset;
5772 else
5773 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5774 }
5775 offset += 8;
5776 }
5777 else if (action_false == SORR_ADVANCE)
5778 offset += 8;
5779 }
5780
5781 if (fp_offset >= 0)
5782 {
5783 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5784 emit_move_insn (hard_frame_pointer_rtx, mem);
5785 }
5786 }
5787 else
5788 {
5789 for (i = low; i < high; i += 2)
5790 {
5791 bool reg0 = save_p (i, leaf_function);
5792 bool reg1 = save_p (i + 1, leaf_function);
5793 machine_mode mode;
5794 int regno;
5795
5796 if (reg0 && reg1)
5797 {
5798 mode = SPARC_INT_REG_P (i) ? E_DImode : E_DFmode;
5799 regno = i;
5800 }
5801 else if (reg0)
5802 {
5803 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5804 regno = i;
5805 }
5806 else if (reg1)
5807 {
5808 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5809 regno = i + 1;
5810 offset += 4;
5811 }
5812 else
5813 {
5814 if (action_false == SORR_ADVANCE)
5815 offset += 8;
5816 continue;
5817 }
5818
5819 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5820 if (action_true == SORR_SAVE)
5821 {
5822 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5823 RTX_FRAME_RELATED_P (insn) = 1;
5824 if (mode == DImode)
5825 {
5826 rtx set1, set2;
5827 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5828 offset));
5829 set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
5830 RTX_FRAME_RELATED_P (set1) = 1;
5831 mem
5832 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5833 offset + 4));
5834 set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
5835 RTX_FRAME_RELATED_P (set2) = 1;
5836 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5837 gen_rtx_PARALLEL (VOIDmode,
5838 gen_rtvec (2, set1, set2)));
5839 }
5840 }
5841 else /* action_true == SORR_RESTORE */
5842 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5843
5844 /* Bump and round down to double word
5845 in case we already bumped by 4. */
5846 offset = ROUND_DOWN (offset + 8, 8);
5847 }
5848 }
5849
5850 return offset;
5851 }
5852
5853 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5854
5855 static rtx
5856 emit_adjust_base_to_offset (rtx base, int offset)
5857 {
5858 /* ??? This might be optimized a little as %g1 might already have a
5859 value close enough that a single add insn will do. */
5860 /* ??? Although, all of this is probably only a temporary fix because
5861 if %g1 can hold a function result, then sparc_expand_epilogue will
5862 lose (the result will be clobbered). */
5863 rtx new_base = gen_rtx_REG (Pmode, 1);
5864 emit_move_insn (new_base, GEN_INT (offset));
5865 emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5866 return new_base;
5867 }
5868
5869 /* Emit code to save/restore call-saved global and FP registers. */
5870
5871 static void
5872 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5873 {
5874 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5875 {
5876 base = emit_adjust_base_to_offset (base, offset);
5877 offset = 0;
5878 }
5879
5880 offset
5881 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5882 save_global_or_fp_reg_p, action, SORR_NONE);
5883 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5884 save_global_or_fp_reg_p, action, SORR_NONE);
5885 }
5886
5887 /* Emit code to save/restore call-saved local and in registers. */
5888
5889 static void
5890 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5891 {
5892 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5893 {
5894 base = emit_adjust_base_to_offset (base, offset);
5895 offset = 0;
5896 }
5897
5898 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5899 save_local_or_in_reg_p, action, SORR_ADVANCE);
5900 }
5901
5902 /* Emit a window_save insn. */
5903
5904 static rtx_insn *
5905 emit_window_save (rtx increment)
5906 {
5907 rtx_insn *insn = emit_insn (gen_window_save (increment));
5908 RTX_FRAME_RELATED_P (insn) = 1;
5909
5910 /* The incoming return address (%o7) is saved in %i7. */
5911 add_reg_note (insn, REG_CFA_REGISTER,
5912 gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5913 gen_rtx_REG (Pmode,
5914 INCOMING_RETURN_ADDR_REGNUM)));
5915
5916 /* The window save event. */
5917 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5918
5919 /* The CFA is %fp, the hard frame pointer. */
5920 add_reg_note (insn, REG_CFA_DEF_CFA,
5921 plus_constant (Pmode, hard_frame_pointer_rtx,
5922 INCOMING_FRAME_SP_OFFSET));
5923
5924 return insn;
5925 }
5926
5927 /* Generate an increment for the stack pointer. */
5928
5929 static rtx
5930 gen_stack_pointer_inc (rtx increment)
5931 {
5932 return gen_rtx_SET (stack_pointer_rtx,
5933 gen_rtx_PLUS (Pmode,
5934 stack_pointer_rtx,
5935 increment));
5936 }
5937
5938 /* Expand the function prologue. The prologue is responsible for reserving
5939 storage for the frame, saving the call-saved registers and loading the
5940 GOT register if needed. */
5941
5942 void
5943 sparc_expand_prologue (void)
5944 {
5945 HOST_WIDE_INT size;
5946 rtx_insn *insn;
5947
5948 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5949 on the final value of the flag means deferring the prologue/epilogue
5950 expansion until just before the second scheduling pass, which is too
5951 late to emit multiple epilogues or return insns.
5952
5953 Of course we are making the assumption that the value of the flag
5954 will not change between now and its final value. Of the three parts
5955 of the formula, only the last one can reasonably vary. Let's take a
5956 closer look, after assuming that the first two ones are set to true
5957 (otherwise the last value is effectively silenced).
5958
5959 If only_leaf_regs_used returns false, the global predicate will also
5960 be false so the actual frame size calculated below will be positive.
5961 As a consequence, the save_register_window insn will be emitted in
5962 the instruction stream; now this insn explicitly references %fp
5963 which is not a leaf register so only_leaf_regs_used will always
5964 return false subsequently.
5965
5966 If only_leaf_regs_used returns true, we hope that the subsequent
5967 optimization passes won't cause non-leaf registers to pop up. For
5968 example, the regrename pass has special provisions to not rename to
5969 non-leaf registers in a leaf function. */
5970 sparc_leaf_function_p
5971 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
5972
5973 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5974
5975 if (flag_stack_usage_info)
5976 current_function_static_stack_size = size;
5977
5978 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
5979 || flag_stack_clash_protection)
5980 {
5981 if (crtl->is_leaf && !cfun->calls_alloca)
5982 {
5983 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
5984 sparc_emit_probe_stack_range (get_stack_check_protect (),
5985 size - get_stack_check_protect ());
5986 }
5987 else if (size > 0)
5988 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
5989 }
5990
5991 if (size == 0)
5992 ; /* do nothing. */
5993 else if (sparc_leaf_function_p)
5994 {
5995 rtx size_int_rtx = GEN_INT (-size);
5996
5997 if (size <= 4096)
5998 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5999 else if (size <= 8192)
6000 {
6001 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6002 RTX_FRAME_RELATED_P (insn) = 1;
6003
6004 /* %sp is still the CFA register. */
6005 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6006 }
6007 else
6008 {
6009 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6010 emit_move_insn (size_rtx, size_int_rtx);
6011 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6012 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
6013 gen_stack_pointer_inc (size_int_rtx));
6014 }
6015
6016 RTX_FRAME_RELATED_P (insn) = 1;
6017 }
6018 else
6019 {
6020 rtx size_int_rtx = GEN_INT (-size);
6021
6022 if (size <= 4096)
6023 emit_window_save (size_int_rtx);
6024 else if (size <= 8192)
6025 {
6026 emit_window_save (GEN_INT (-4096));
6027
6028 /* %sp is not the CFA register anymore. */
6029 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6030
6031 /* Make sure no %fp-based store is issued until after the frame is
6032 established. The offset between the frame pointer and the stack
6033 pointer is calculated relative to the value of the stack pointer
6034 at the end of the function prologue, and moving instructions that
6035 access the stack via the frame pointer between the instructions
6036 that decrement the stack pointer could result in accessing the
6037 register window save area, which is volatile. */
6038 emit_insn (gen_frame_blockage ());
6039 }
6040 else
6041 {
6042 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6043 emit_move_insn (size_rtx, size_int_rtx);
6044 emit_window_save (size_rtx);
6045 }
6046 }
6047
6048 if (sparc_leaf_function_p)
6049 {
6050 sparc_frame_base_reg = stack_pointer_rtx;
6051 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6052 }
6053 else
6054 {
6055 sparc_frame_base_reg = hard_frame_pointer_rtx;
6056 sparc_frame_base_offset = SPARC_STACK_BIAS;
6057 }
6058
6059 if (sparc_n_global_fp_regs > 0)
6060 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6061 sparc_frame_base_offset
6062 - sparc_apparent_frame_size,
6063 SORR_SAVE);
6064
6065 /* Advertise that the data calculated just above are now valid. */
6066 sparc_prologue_data_valid_p = true;
6067 }
6068
6069 /* Expand the function prologue. The prologue is responsible for reserving
6070 storage for the frame, saving the call-saved registers and loading the
6071 GOT register if needed. */
6072
6073 void
6074 sparc_flat_expand_prologue (void)
6075 {
6076 HOST_WIDE_INT size;
6077 rtx_insn *insn;
6078
6079 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
6080
6081 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6082
6083 if (flag_stack_usage_info)
6084 current_function_static_stack_size = size;
6085
6086 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6087 || flag_stack_clash_protection)
6088 {
6089 if (crtl->is_leaf && !cfun->calls_alloca)
6090 {
6091 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6092 sparc_emit_probe_stack_range (get_stack_check_protect (),
6093 size - get_stack_check_protect ());
6094 }
6095 else if (size > 0)
6096 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6097 }
6098
6099 if (sparc_save_local_in_regs_p)
6100 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
6101 SORR_SAVE);
6102
6103 if (size == 0)
6104 ; /* do nothing. */
6105 else
6106 {
6107 rtx size_int_rtx, size_rtx;
6108
6109 size_rtx = size_int_rtx = GEN_INT (-size);
6110
6111 /* We establish the frame (i.e. decrement the stack pointer) first, even
6112 if we use a frame pointer, because we cannot clobber any call-saved
6113 registers, including the frame pointer, if we haven't created a new
6114 register save area, for the sake of compatibility with the ABI. */
6115 if (size <= 4096)
6116 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6117 else if (size <= 8192 && !frame_pointer_needed)
6118 {
6119 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6120 RTX_FRAME_RELATED_P (insn) = 1;
6121 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6122 }
6123 else
6124 {
6125 size_rtx = gen_rtx_REG (Pmode, 1);
6126 emit_move_insn (size_rtx, size_int_rtx);
6127 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6128 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6129 gen_stack_pointer_inc (size_int_rtx));
6130 }
6131 RTX_FRAME_RELATED_P (insn) = 1;
6132
6133 /* Ensure nothing is scheduled until after the frame is established. */
6134 emit_insn (gen_blockage ());
6135
6136 if (frame_pointer_needed)
6137 {
6138 insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
6139 gen_rtx_MINUS (Pmode,
6140 stack_pointer_rtx,
6141 size_rtx)));
6142 RTX_FRAME_RELATED_P (insn) = 1;
6143
6144 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6145 gen_rtx_SET (hard_frame_pointer_rtx,
6146 plus_constant (Pmode, stack_pointer_rtx,
6147 size)));
6148 }
6149
6150 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6151 {
6152 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
6153 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
6154
6155 insn = emit_move_insn (i7, o7);
6156 RTX_FRAME_RELATED_P (insn) = 1;
6157
6158 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
6159
6160 /* Prevent this instruction from ever being considered dead,
6161 even if this function has no epilogue. */
6162 emit_use (i7);
6163 }
6164 }
6165
6166 if (frame_pointer_needed)
6167 {
6168 sparc_frame_base_reg = hard_frame_pointer_rtx;
6169 sparc_frame_base_offset = SPARC_STACK_BIAS;
6170 }
6171 else
6172 {
6173 sparc_frame_base_reg = stack_pointer_rtx;
6174 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6175 }
6176
6177 if (sparc_n_global_fp_regs > 0)
6178 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6179 sparc_frame_base_offset
6180 - sparc_apparent_frame_size,
6181 SORR_SAVE);
6182
6183 /* Advertise that the data calculated just above are now valid. */
6184 sparc_prologue_data_valid_p = true;
6185 }
6186
6187 /* This function generates the assembly code for function entry, which boils
6188 down to emitting the necessary .register directives. */
6189
6190 static void
6191 sparc_asm_function_prologue (FILE *file)
6192 {
6193 /* Check that the assumption we made in sparc_expand_prologue is valid. */
6194 if (!TARGET_FLAT)
6195 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
6196
6197 sparc_output_scratch_registers (file);
6198 }
6199
6200 /* Expand the function epilogue, either normal or part of a sibcall.
6201 We emit all the instructions except the return or the call. */
6202
6203 void
6204 sparc_expand_epilogue (bool for_eh)
6205 {
6206 HOST_WIDE_INT size = sparc_frame_size;
6207
6208 if (cfun->calls_alloca)
6209 emit_insn (gen_frame_blockage ());
6210
6211 if (sparc_n_global_fp_regs > 0)
6212 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6213 sparc_frame_base_offset
6214 - sparc_apparent_frame_size,
6215 SORR_RESTORE);
6216
6217 if (size == 0 || for_eh)
6218 ; /* do nothing. */
6219 else if (sparc_leaf_function_p)
6220 {
6221 if (size <= 4096)
6222 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6223 else if (size <= 8192)
6224 {
6225 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6226 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6227 }
6228 else
6229 {
6230 rtx reg = gen_rtx_REG (Pmode, 1);
6231 emit_move_insn (reg, GEN_INT (size));
6232 emit_insn (gen_stack_pointer_inc (reg));
6233 }
6234 }
6235 }
6236
6237 /* Expand the function epilogue, either normal or part of a sibcall.
6238 We emit all the instructions except the return or the call. */
6239
6240 void
6241 sparc_flat_expand_epilogue (bool for_eh)
6242 {
6243 HOST_WIDE_INT size = sparc_frame_size;
6244
6245 if (sparc_n_global_fp_regs > 0)
6246 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6247 sparc_frame_base_offset
6248 - sparc_apparent_frame_size,
6249 SORR_RESTORE);
6250
6251 /* If we have a frame pointer, we'll need both to restore it before the
6252 frame is destroyed and use its current value in destroying the frame.
6253 Since we don't have an atomic way to do that in the flat window model,
6254 we save the current value into a temporary register (%g1). */
6255 if (frame_pointer_needed && !for_eh)
6256 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
6257
6258 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6259 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
6260 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
6261
6262 if (sparc_save_local_in_regs_p)
6263 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
6264 sparc_frame_base_offset,
6265 SORR_RESTORE);
6266
6267 if (size == 0 || for_eh)
6268 ; /* do nothing. */
6269 else if (frame_pointer_needed)
6270 {
6271 /* Make sure the frame is destroyed after everything else is done. */
6272 emit_insn (gen_blockage ());
6273
6274 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
6275 }
6276 else
6277 {
6278 /* Likewise. */
6279 emit_insn (gen_blockage ());
6280
6281 if (size <= 4096)
6282 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6283 else if (size <= 8192)
6284 {
6285 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6286 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6287 }
6288 else
6289 {
6290 rtx reg = gen_rtx_REG (Pmode, 1);
6291 emit_move_insn (reg, GEN_INT (size));
6292 emit_insn (gen_stack_pointer_inc (reg));
6293 }
6294 }
6295 }
6296
6297 /* Return true if it is appropriate to emit `return' instructions in the
6298 body of a function. */
6299
6300 bool
6301 sparc_can_use_return_insn_p (void)
6302 {
6303 return sparc_prologue_data_valid_p
6304 && sparc_n_global_fp_regs == 0
6305 && TARGET_FLAT
6306 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
6307 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
6308 }
6309
6310 /* This function generates the assembly code for function exit. */
6311
6312 static void
6313 sparc_asm_function_epilogue (FILE *file)
6314 {
6315 /* If the last two instructions of a function are "call foo; dslot;"
6316 the return address might point to the first instruction in the next
6317 function and we have to output a dummy nop for the sake of sane
6318 backtraces in such cases. This is pointless for sibling calls since
6319 the return address is explicitly adjusted. */
6320
6321 rtx_insn *insn = get_last_insn ();
6322
6323 rtx last_real_insn = prev_real_insn (insn);
6324 if (last_real_insn
6325 && NONJUMP_INSN_P (last_real_insn)
6326 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
6327 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
6328
6329 if (last_real_insn
6330 && CALL_P (last_real_insn)
6331 && !SIBLING_CALL_P (last_real_insn))
6332 fputs("\tnop\n", file);
6333
6334 sparc_output_deferred_case_vectors ();
6335 }
6336
6337 /* Output a 'restore' instruction. */
6338
6339 static void
6340 output_restore (rtx pat)
6341 {
6342 rtx operands[3];
6343
6344 if (! pat)
6345 {
6346 fputs ("\t restore\n", asm_out_file);
6347 return;
6348 }
6349
6350 gcc_assert (GET_CODE (pat) == SET);
6351
6352 operands[0] = SET_DEST (pat);
6353 pat = SET_SRC (pat);
6354
6355 switch (GET_CODE (pat))
6356 {
6357 case PLUS:
6358 operands[1] = XEXP (pat, 0);
6359 operands[2] = XEXP (pat, 1);
6360 output_asm_insn (" restore %r1, %2, %Y0", operands);
6361 break;
6362 case LO_SUM:
6363 operands[1] = XEXP (pat, 0);
6364 operands[2] = XEXP (pat, 1);
6365 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
6366 break;
6367 case ASHIFT:
6368 operands[1] = XEXP (pat, 0);
6369 gcc_assert (XEXP (pat, 1) == const1_rtx);
6370 output_asm_insn (" restore %r1, %r1, %Y0", operands);
6371 break;
6372 default:
6373 operands[1] = pat;
6374 output_asm_insn (" restore %%g0, %1, %Y0", operands);
6375 break;
6376 }
6377 }
6378
6379 /* Output a return. */
6380
6381 const char *
6382 output_return (rtx_insn *insn)
6383 {
6384 if (crtl->calls_eh_return)
6385 {
6386 /* If the function uses __builtin_eh_return, the eh_return
6387 machinery occupies the delay slot. */
6388 gcc_assert (!final_sequence);
6389
6390 if (flag_delayed_branch)
6391 {
6392 if (!TARGET_FLAT && TARGET_V9)
6393 fputs ("\treturn\t%i7+8\n", asm_out_file);
6394 else
6395 {
6396 if (!TARGET_FLAT)
6397 fputs ("\trestore\n", asm_out_file);
6398
6399 fputs ("\tjmp\t%o7+8\n", asm_out_file);
6400 }
6401
6402 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
6403 }
6404 else
6405 {
6406 if (!TARGET_FLAT)
6407 fputs ("\trestore\n", asm_out_file);
6408
6409 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
6410 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
6411 }
6412 }
6413 else if (sparc_leaf_function_p || TARGET_FLAT)
6414 {
6415 /* This is a leaf or flat function so we don't have to bother restoring
6416 the register window, which frees us from dealing with the convoluted
6417 semantics of restore/return. We simply output the jump to the
6418 return address and the insn in the delay slot (if any). */
6419
6420 return "jmp\t%%o7+%)%#";
6421 }
6422 else
6423 {
6424 /* This is a regular function so we have to restore the register window.
6425 We may have a pending insn for the delay slot, which will be either
6426 combined with the 'restore' instruction or put in the delay slot of
6427 the 'return' instruction. */
6428
6429 if (final_sequence)
6430 {
6431 rtx_insn *delay;
6432 rtx pat;
6433
6434 delay = NEXT_INSN (insn);
6435 gcc_assert (delay);
6436
6437 pat = PATTERN (delay);
6438
6439 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
6440 {
6441 epilogue_renumber (&pat, 0);
6442 return "return\t%%i7+%)%#";
6443 }
6444 else
6445 {
6446 output_asm_insn ("jmp\t%%i7+%)", NULL);
6447
6448 /* We're going to output the insn in the delay slot manually.
6449 Make sure to output its source location first. */
6450 PATTERN (delay) = gen_blockage ();
6451 INSN_CODE (delay) = -1;
6452 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6453 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6454
6455 output_restore (pat);
6456 }
6457 }
6458 else
6459 {
6460 /* The delay slot is empty. */
6461 if (TARGET_V9)
6462 return "return\t%%i7+%)\n\t nop";
6463 else if (flag_delayed_branch)
6464 return "jmp\t%%i7+%)\n\t restore";
6465 else
6466 return "restore\n\tjmp\t%%o7+%)\n\t nop";
6467 }
6468 }
6469
6470 return "";
6471 }
6472
6473 /* Output a sibling call. */
6474
6475 const char *
6476 output_sibcall (rtx_insn *insn, rtx call_operand)
6477 {
6478 rtx operands[1];
6479
6480 gcc_assert (flag_delayed_branch);
6481
6482 operands[0] = call_operand;
6483
6484 if (sparc_leaf_function_p || TARGET_FLAT)
6485 {
6486 /* This is a leaf or flat function so we don't have to bother restoring
6487 the register window. We simply output the jump to the function and
6488 the insn in the delay slot (if any). */
6489
6490 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
6491
6492 if (final_sequence)
6493 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
6494 operands);
6495 else
6496 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6497 it into branch if possible. */
6498 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6499 operands);
6500 }
6501 else
6502 {
6503 /* This is a regular function so we have to restore the register window.
6504 We may have a pending insn for the delay slot, which will be combined
6505 with the 'restore' instruction. */
6506
6507 output_asm_insn ("call\t%a0, 0", operands);
6508
6509 if (final_sequence)
6510 {
6511 rtx_insn *delay;
6512 rtx pat;
6513
6514 delay = NEXT_INSN (insn);
6515 gcc_assert (delay);
6516
6517 pat = PATTERN (delay);
6518
6519 /* We're going to output the insn in the delay slot manually.
6520 Make sure to output its source location first. */
6521 PATTERN (delay) = gen_blockage ();
6522 INSN_CODE (delay) = -1;
6523 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6524 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6525
6526 output_restore (pat);
6527 }
6528 else
6529 output_restore (NULL_RTX);
6530 }
6531
6532 return "";
6533 }
6534 \f
6535 /* Functions for handling argument passing.
6536
6537 For 32-bit, the first 6 args are normally in registers and the rest are
6538 pushed. Any arg that starts within the first 6 words is at least
6539 partially passed in a register unless its data type forbids.
6540
6541 For 64-bit, the argument registers are laid out as an array of 16 elements
6542 and arguments are added sequentially. The first 6 int args and up to the
6543 first 16 fp args (depending on size) are passed in regs.
6544
6545 Slot Stack Integral Float Float in structure Double Long Double
6546 ---- ----- -------- ----- ------------------ ------ -----------
6547 15 [SP+248] %f31 %f30,%f31 %d30
6548 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6549 13 [SP+232] %f27 %f26,%f27 %d26
6550 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6551 11 [SP+216] %f23 %f22,%f23 %d22
6552 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6553 9 [SP+200] %f19 %f18,%f19 %d18
6554 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6555 7 [SP+184] %f15 %f14,%f15 %d14
6556 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6557 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6558 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6559 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6560 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6561 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6562 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6563
6564 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6565
6566 Integral arguments are always passed as 64-bit quantities appropriately
6567 extended.
6568
6569 Passing of floating point values is handled as follows.
6570 If a prototype is in scope:
6571 If the value is in a named argument (i.e. not a stdarg function or a
6572 value not part of the `...') then the value is passed in the appropriate
6573 fp reg.
6574 If the value is part of the `...' and is passed in one of the first 6
6575 slots then the value is passed in the appropriate int reg.
6576 If the value is part of the `...' and is not passed in one of the first 6
6577 slots then the value is passed in memory.
6578 If a prototype is not in scope:
6579 If the value is one of the first 6 arguments the value is passed in the
6580 appropriate integer reg and the appropriate fp reg.
6581 If the value is not one of the first 6 arguments the value is passed in
6582 the appropriate fp reg and in memory.
6583
6584
6585 Summary of the calling conventions implemented by GCC on the SPARC:
6586
6587 32-bit ABI:
6588 size argument return value
6589
6590 small integer <4 int. reg. int. reg.
6591 word 4 int. reg. int. reg.
6592 double word 8 int. reg. int. reg.
6593
6594 _Complex small integer <8 int. reg. int. reg.
6595 _Complex word 8 int. reg. int. reg.
6596 _Complex double word 16 memory int. reg.
6597
6598 vector integer <=8 int. reg. FP reg.
6599 vector integer >8 memory memory
6600
6601 float 4 int. reg. FP reg.
6602 double 8 int. reg. FP reg.
6603 long double 16 memory memory
6604
6605 _Complex float 8 memory FP reg.
6606 _Complex double 16 memory FP reg.
6607 _Complex long double 32 memory FP reg.
6608
6609 vector float any memory memory
6610
6611 aggregate any memory memory
6612
6613
6614
6615 64-bit ABI:
6616 size argument return value
6617
6618 small integer <8 int. reg. int. reg.
6619 word 8 int. reg. int. reg.
6620 double word 16 int. reg. int. reg.
6621
6622 _Complex small integer <16 int. reg. int. reg.
6623 _Complex word 16 int. reg. int. reg.
6624 _Complex double word 32 memory int. reg.
6625
6626 vector integer <=16 FP reg. FP reg.
6627 vector integer 16<s<=32 memory FP reg.
6628 vector integer >32 memory memory
6629
6630 float 4 FP reg. FP reg.
6631 double 8 FP reg. FP reg.
6632 long double 16 FP reg. FP reg.
6633
6634 _Complex float 8 FP reg. FP reg.
6635 _Complex double 16 FP reg. FP reg.
6636 _Complex long double 32 memory FP reg.
6637
6638 vector float <=16 FP reg. FP reg.
6639 vector float 16<s<=32 memory FP reg.
6640 vector float >32 memory memory
6641
6642 aggregate <=16 reg. reg.
6643 aggregate 16<s<=32 memory reg.
6644 aggregate >32 memory memory
6645
6646
6647
6648 Note #1: complex floating-point types follow the extended SPARC ABIs as
6649 implemented by the Sun compiler.
6650
6651 Note #2: integral vector types follow the scalar floating-point types
6652 conventions to match what is implemented by the Sun VIS SDK.
6653
6654 Note #3: floating-point vector types follow the aggregate types
6655 conventions. */
6656
6657
6658 /* Maximum number of int regs for args. */
6659 #define SPARC_INT_ARG_MAX 6
6660 /* Maximum number of fp regs for args. */
6661 #define SPARC_FP_ARG_MAX 16
6662 /* Number of words (partially) occupied for a given size in units. */
6663 #define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
6664
6665 /* Handle the INIT_CUMULATIVE_ARGS macro.
6666 Initialize a variable CUM of type CUMULATIVE_ARGS
6667 for a call to a function whose data type is FNTYPE.
6668 For a library call, FNTYPE is 0. */
6669
6670 void
6671 init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
6672 {
6673 cum->words = 0;
6674 cum->prototype_p = fntype && prototype_p (fntype);
6675 cum->libcall_p = !fntype;
6676 }
6677
6678 /* Handle promotion of pointer and integer arguments. */
6679
6680 static machine_mode
6681 sparc_promote_function_mode (const_tree type, machine_mode mode,
6682 int *punsignedp, const_tree, int)
6683 {
6684 if (type && POINTER_TYPE_P (type))
6685 {
6686 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6687 return Pmode;
6688 }
6689
6690 /* Integral arguments are passed as full words, as per the ABI. */
6691 if (GET_MODE_CLASS (mode) == MODE_INT
6692 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6693 return word_mode;
6694
6695 return mode;
6696 }
6697
6698 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6699
6700 static bool
6701 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6702 {
6703 return TARGET_ARCH64 ? true : false;
6704 }
6705
6706 /* Traverse the record TYPE recursively and call FUNC on its fields.
6707 NAMED is true if this is for a named parameter. DATA is passed
6708 to FUNC for each field. OFFSET is the starting position and
6709 PACKED is true if we are inside a packed record. */
6710
6711 template <typename T, void Func (const_tree, HOST_WIDE_INT, bool, T*)>
6712 static void
6713 traverse_record_type (const_tree type, bool named, T *data,
6714 HOST_WIDE_INT offset = 0, bool packed = false)
6715 {
6716 /* The ABI obviously doesn't specify how packed structures are passed.
6717 These are passed in integer regs if possible, otherwise memory. */
6718 if (!packed)
6719 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6720 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6721 {
6722 packed = true;
6723 break;
6724 }
6725
6726 /* Walk the real fields, but skip those with no size or a zero size.
6727 ??? Fields with variable offset are handled as having zero offset. */
6728 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6729 if (TREE_CODE (field) == FIELD_DECL)
6730 {
6731 if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
6732 continue;
6733
6734 HOST_WIDE_INT bitpos = offset;
6735 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
6736 bitpos += int_bit_position (field);
6737
6738 tree field_type = TREE_TYPE (field);
6739 if (TREE_CODE (field_type) == RECORD_TYPE)
6740 traverse_record_type<T, Func> (field_type, named, data, bitpos,
6741 packed);
6742 else
6743 {
6744 const bool fp_type
6745 = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
6746 Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
6747 data);
6748 }
6749 }
6750 }
6751
6752 /* Handle recursive register classifying for structure layout. */
6753
6754 typedef struct
6755 {
6756 bool fp_regs; /* true if field eligible to FP registers. */
6757 bool fp_regs_in_first_word; /* true if such field in first word. */
6758 } classify_data_t;
6759
6760 /* A subroutine of function_arg_slotno. Classify the field. */
6761
6762 inline void
6763 classify_registers (const_tree, HOST_WIDE_INT bitpos, bool fp,
6764 classify_data_t *data)
6765 {
6766 if (fp)
6767 {
6768 data->fp_regs = true;
6769 if (bitpos < BITS_PER_WORD)
6770 data->fp_regs_in_first_word = true;
6771 }
6772 }
6773
6774 /* Compute the slot number to pass an argument in.
6775 Return the slot number or -1 if passing on the stack.
6776
6777 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6778 the preceding args and about the function being called.
6779 MODE is the argument's machine mode.
6780 TYPE is the data type of the argument (as a tree).
6781 This is null for libcalls where that information may
6782 not be available.
6783 NAMED is nonzero if this argument is a named parameter
6784 (otherwise it is an extra parameter matching an ellipsis).
6785 INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6786 *PREGNO records the register number to use if scalar type.
6787 *PPADDING records the amount of padding needed in words. */
6788
6789 static int
6790 function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
6791 const_tree type, bool named, bool incoming,
6792 int *pregno, int *ppadding)
6793 {
6794 int regbase = (incoming
6795 ? SPARC_INCOMING_INT_ARG_FIRST
6796 : SPARC_OUTGOING_INT_ARG_FIRST);
6797 int slotno = cum->words;
6798 enum mode_class mclass;
6799 int regno;
6800
6801 *ppadding = 0;
6802
6803 if (type && TREE_ADDRESSABLE (type))
6804 return -1;
6805
6806 if (TARGET_ARCH32
6807 && mode == BLKmode
6808 && type
6809 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
6810 return -1;
6811
6812 /* For SPARC64, objects requiring 16-byte alignment get it. */
6813 if (TARGET_ARCH64
6814 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6815 && (slotno & 1) != 0)
6816 slotno++, *ppadding = 1;
6817
6818 mclass = GET_MODE_CLASS (mode);
6819 if (type && TREE_CODE (type) == VECTOR_TYPE)
6820 {
6821 /* Vector types deserve special treatment because they are
6822 polymorphic wrt their mode, depending upon whether VIS
6823 instructions are enabled. */
6824 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6825 {
6826 /* The SPARC port defines no floating-point vector modes. */
6827 gcc_assert (mode == BLKmode);
6828 }
6829 else
6830 {
6831 /* Integral vector types should either have a vector
6832 mode or an integral mode, because we are guaranteed
6833 by pass_by_reference that their size is not greater
6834 than 16 bytes and TImode is 16-byte wide. */
6835 gcc_assert (mode != BLKmode);
6836
6837 /* Vector integers are handled like floats according to
6838 the Sun VIS SDK. */
6839 mclass = MODE_FLOAT;
6840 }
6841 }
6842
6843 switch (mclass)
6844 {
6845 case MODE_FLOAT:
6846 case MODE_COMPLEX_FLOAT:
6847 case MODE_VECTOR_INT:
6848 if (TARGET_ARCH64 && TARGET_FPU && named)
6849 {
6850 /* If all arg slots are filled, then must pass on stack. */
6851 if (slotno >= SPARC_FP_ARG_MAX)
6852 return -1;
6853
6854 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6855 /* Arguments filling only one single FP register are
6856 right-justified in the outer double FP register. */
6857 if (GET_MODE_SIZE (mode) <= 4)
6858 regno++;
6859 break;
6860 }
6861 /* fallthrough */
6862
6863 case MODE_INT:
6864 case MODE_COMPLEX_INT:
6865 /* If all arg slots are filled, then must pass on stack. */
6866 if (slotno >= SPARC_INT_ARG_MAX)
6867 return -1;
6868
6869 regno = regbase + slotno;
6870 break;
6871
6872 case MODE_RANDOM:
6873 if (mode == VOIDmode)
6874 /* MODE is VOIDmode when generating the actual call. */
6875 return -1;
6876
6877 gcc_assert (mode == BLKmode);
6878
6879 if (TARGET_ARCH32
6880 || !type
6881 || (TREE_CODE (type) != RECORD_TYPE
6882 && TREE_CODE (type) != VECTOR_TYPE))
6883 {
6884 /* If all arg slots are filled, then must pass on stack. */
6885 if (slotno >= SPARC_INT_ARG_MAX)
6886 return -1;
6887
6888 regno = regbase + slotno;
6889 }
6890 else /* TARGET_ARCH64 && type */
6891 {
6892 /* If all arg slots are filled, then must pass on stack. */
6893 if (slotno >= SPARC_FP_ARG_MAX)
6894 return -1;
6895
6896 if (TREE_CODE (type) == RECORD_TYPE)
6897 {
6898 classify_data_t data = { false, false };
6899 traverse_record_type<classify_data_t, classify_registers>
6900 (type, named, &data);
6901
6902 if (data.fp_regs)
6903 {
6904 /* If all FP slots are filled except for the last one and
6905 there is no FP field in the first word, then must pass
6906 on stack. */
6907 if (slotno >= SPARC_FP_ARG_MAX - 1
6908 && !data.fp_regs_in_first_word)
6909 return -1;
6910 }
6911 else
6912 {
6913 /* If all int slots are filled, then must pass on stack. */
6914 if (slotno >= SPARC_INT_ARG_MAX)
6915 return -1;
6916 }
6917 }
6918
6919 /* PREGNO isn't set since both int and FP regs can be used. */
6920 return slotno;
6921 }
6922 break;
6923
6924 default :
6925 gcc_unreachable ();
6926 }
6927
6928 *pregno = regno;
6929 return slotno;
6930 }
6931
6932 /* Handle recursive register counting/assigning for structure layout. */
6933
6934 typedef struct
6935 {
6936 int slotno; /* slot number of the argument. */
6937 int regbase; /* regno of the base register. */
6938 int intoffset; /* offset of the first pending integer field. */
6939 int nregs; /* number of words passed in registers. */
6940 bool stack; /* true if part of the argument is on the stack. */
6941 rtx ret; /* return expression being built. */
6942 } assign_data_t;
6943
6944 /* A subroutine of function_arg_record_value. Compute the number of integer
6945 registers to be assigned between PARMS->intoffset and BITPOS. Return
6946 true if at least one integer register is assigned or false otherwise. */
6947
6948 static bool
6949 compute_int_layout (HOST_WIDE_INT bitpos, assign_data_t *data, int *pnregs)
6950 {
6951 if (data->intoffset < 0)
6952 return false;
6953
6954 const int intoffset = data->intoffset;
6955 data->intoffset = -1;
6956
6957 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
6958 const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
6959 const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
6960 int nregs = (endbit - startbit) / BITS_PER_WORD;
6961
6962 if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
6963 {
6964 nregs = SPARC_INT_ARG_MAX - this_slotno;
6965
6966 /* We need to pass this field (partly) on the stack. */
6967 data->stack = 1;
6968 }
6969
6970 if (nregs <= 0)
6971 return false;
6972
6973 *pnregs = nregs;
6974 return true;
6975 }
6976
6977 /* A subroutine of function_arg_record_value. Compute the number and the mode
6978 of the FP registers to be assigned for FIELD. Return true if at least one
6979 FP register is assigned or false otherwise. */
6980
6981 static bool
6982 compute_fp_layout (const_tree field, HOST_WIDE_INT bitpos,
6983 assign_data_t *data,
6984 int *pnregs, machine_mode *pmode)
6985 {
6986 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
6987 machine_mode mode = DECL_MODE (field);
6988 int nregs, nslots;
6989
6990 /* Slots are counted as words while regs are counted as having the size of
6991 the (inner) mode. */
6992 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE && mode == BLKmode)
6993 {
6994 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6995 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6996 }
6997 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6998 {
6999 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7000 nregs = 2;
7001 }
7002 else
7003 nregs = 1;
7004
7005 nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
7006
7007 if (nslots > SPARC_FP_ARG_MAX - this_slotno)
7008 {
7009 nslots = SPARC_FP_ARG_MAX - this_slotno;
7010 nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
7011
7012 /* We need to pass this field (partly) on the stack. */
7013 data->stack = 1;
7014
7015 if (nregs <= 0)
7016 return false;
7017 }
7018
7019 *pnregs = nregs;
7020 *pmode = mode;
7021 return true;
7022 }
7023
7024 /* A subroutine of function_arg_record_value. Count the number of registers
7025 to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
7026
7027 inline void
7028 count_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
7029 assign_data_t *data)
7030 {
7031 if (fp)
7032 {
7033 int nregs;
7034 machine_mode mode;
7035
7036 if (compute_int_layout (bitpos, data, &nregs))
7037 data->nregs += nregs;
7038
7039 if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
7040 data->nregs += nregs;
7041 }
7042 else
7043 {
7044 if (data->intoffset < 0)
7045 data->intoffset = bitpos;
7046 }
7047 }
7048
7049 /* A subroutine of function_arg_record_value. Assign the bits of the
7050 structure between PARMS->intoffset and BITPOS to integer registers. */
7051
7052 static void
7053 assign_int_registers (HOST_WIDE_INT bitpos, assign_data_t *data)
7054 {
7055 int intoffset = data->intoffset;
7056 machine_mode mode;
7057 int nregs;
7058
7059 if (!compute_int_layout (bitpos, data, &nregs))
7060 return;
7061
7062 /* If this is the trailing part of a word, only load that much into
7063 the register. Otherwise load the whole register. Note that in
7064 the latter case we may pick up unwanted bits. It's not a problem
7065 at the moment but may wish to revisit. */
7066 if (intoffset % BITS_PER_WORD != 0)
7067 mode = smallest_int_mode_for_size (BITS_PER_WORD
7068 - intoffset % BITS_PER_WORD);
7069 else
7070 mode = word_mode;
7071
7072 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7073 unsigned int regno = data->regbase + this_slotno;
7074 intoffset /= BITS_PER_UNIT;
7075
7076 do
7077 {
7078 rtx reg = gen_rtx_REG (mode, regno);
7079 XVECEXP (data->ret, 0, data->stack + data->nregs)
7080 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
7081 data->nregs += 1;
7082 mode = word_mode;
7083 regno += 1;
7084 intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
7085 }
7086 while (--nregs > 0);
7087 }
7088
7089 /* A subroutine of function_arg_record_value. Assign FIELD at position
7090 BITPOS to FP registers. */
7091
7092 static void
7093 assign_fp_registers (const_tree field, HOST_WIDE_INT bitpos,
7094 assign_data_t *data)
7095 {
7096 int nregs;
7097 machine_mode mode;
7098
7099 if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
7100 return;
7101
7102 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7103 int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
7104 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
7105 regno++;
7106 int pos = bitpos / BITS_PER_UNIT;
7107
7108 do
7109 {
7110 rtx reg = gen_rtx_REG (mode, regno);
7111 XVECEXP (data->ret, 0, data->stack + data->nregs)
7112 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
7113 data->nregs += 1;
7114 regno += GET_MODE_SIZE (mode) / 4;
7115 pos += GET_MODE_SIZE (mode);
7116 }
7117 while (--nregs > 0);
7118 }
7119
7120 /* A subroutine of function_arg_record_value. Assign FIELD and the bits of
7121 the structure between PARMS->intoffset and BITPOS to registers. */
7122
7123 inline void
7124 assign_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
7125 assign_data_t *data)
7126 {
7127 if (fp)
7128 {
7129 assign_int_registers (bitpos, data);
7130
7131 assign_fp_registers (field, bitpos, data);
7132 }
7133 else
7134 {
7135 if (data->intoffset < 0)
7136 data->intoffset = bitpos;
7137 }
7138 }
7139
7140 /* Used by function_arg and sparc_function_value_1 to implement the complex
7141 conventions of the 64-bit ABI for passing and returning structures.
7142 Return an expression valid as a return value for the FUNCTION_ARG
7143 and TARGET_FUNCTION_VALUE.
7144
7145 TYPE is the data type of the argument (as a tree).
7146 This is null for libcalls where that information may
7147 not be available.
7148 MODE is the argument's machine mode.
7149 SLOTNO is the index number of the argument's slot in the parameter array.
7150 NAMED is true if this argument is a named parameter
7151 (otherwise it is an extra parameter matching an ellipsis).
7152 REGBASE is the regno of the base register for the parameter array. */
7153
7154 static rtx
7155 function_arg_record_value (const_tree type, machine_mode mode,
7156 int slotno, bool named, int regbase)
7157 {
7158 HOST_WIDE_INT typesize = int_size_in_bytes (type);
7159 assign_data_t data;
7160 int nregs;
7161
7162 data.slotno = slotno;
7163 data.regbase = regbase;
7164
7165 /* Count how many registers we need. */
7166 data.nregs = 0;
7167 data.intoffset = 0;
7168 data.stack = false;
7169 traverse_record_type<assign_data_t, count_registers> (type, named, &data);
7170
7171 /* Take into account pending integer fields. */
7172 if (compute_int_layout (typesize * BITS_PER_UNIT, &data, &nregs))
7173 data.nregs += nregs;
7174
7175 /* Allocate the vector and handle some annoying special cases. */
7176 nregs = data.nregs;
7177
7178 if (nregs == 0)
7179 {
7180 /* ??? Empty structure has no value? Duh? */
7181 if (typesize <= 0)
7182 {
7183 /* Though there's nothing really to store, return a word register
7184 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
7185 leads to breakage due to the fact that there are zero bytes to
7186 load. */
7187 return gen_rtx_REG (mode, regbase);
7188 }
7189
7190 /* ??? C++ has structures with no fields, and yet a size. Give up
7191 for now and pass everything back in integer registers. */
7192 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7193 if (nregs + slotno > SPARC_INT_ARG_MAX)
7194 nregs = SPARC_INT_ARG_MAX - slotno;
7195 }
7196
7197 gcc_assert (nregs > 0);
7198
7199 data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
7200
7201 /* If at least one field must be passed on the stack, generate
7202 (parallel [(expr_list (nil) ...) ...]) so that all fields will
7203 also be passed on the stack. We can't do much better because the
7204 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
7205 of structures for which the fields passed exclusively in registers
7206 are not at the beginning of the structure. */
7207 if (data.stack)
7208 XVECEXP (data.ret, 0, 0)
7209 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7210
7211 /* Assign the registers. */
7212 data.nregs = 0;
7213 data.intoffset = 0;
7214 traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
7215
7216 /* Assign pending integer fields. */
7217 assign_int_registers (typesize * BITS_PER_UNIT, &data);
7218
7219 gcc_assert (data.nregs == nregs);
7220
7221 return data.ret;
7222 }
7223
7224 /* Used by function_arg and sparc_function_value_1 to implement the conventions
7225 of the 64-bit ABI for passing and returning unions.
7226 Return an expression valid as a return value for the FUNCTION_ARG
7227 and TARGET_FUNCTION_VALUE.
7228
7229 SIZE is the size in bytes of the union.
7230 MODE is the argument's machine mode.
7231 REGNO is the hard register the union will be passed in. */
7232
7233 static rtx
7234 function_arg_union_value (int size, machine_mode mode, int slotno,
7235 int regno)
7236 {
7237 int nwords = CEIL_NWORDS (size), i;
7238 rtx regs;
7239
7240 /* See comment in previous function for empty structures. */
7241 if (nwords == 0)
7242 return gen_rtx_REG (mode, regno);
7243
7244 if (slotno == SPARC_INT_ARG_MAX - 1)
7245 nwords = 1;
7246
7247 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
7248
7249 for (i = 0; i < nwords; i++)
7250 {
7251 /* Unions are passed left-justified. */
7252 XVECEXP (regs, 0, i)
7253 = gen_rtx_EXPR_LIST (VOIDmode,
7254 gen_rtx_REG (word_mode, regno),
7255 GEN_INT (UNITS_PER_WORD * i));
7256 regno++;
7257 }
7258
7259 return regs;
7260 }
7261
7262 /* Used by function_arg and sparc_function_value_1 to implement the conventions
7263 for passing and returning BLKmode vectors.
7264 Return an expression valid as a return value for the FUNCTION_ARG
7265 and TARGET_FUNCTION_VALUE.
7266
7267 SIZE is the size in bytes of the vector.
7268 REGNO is the FP hard register the vector will be passed in. */
7269
7270 static rtx
7271 function_arg_vector_value (int size, int regno)
7272 {
7273 const int nregs = MAX (1, size / 8);
7274 rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
7275
7276 if (size < 8)
7277 XVECEXP (regs, 0, 0)
7278 = gen_rtx_EXPR_LIST (VOIDmode,
7279 gen_rtx_REG (SImode, regno),
7280 const0_rtx);
7281 else
7282 for (int i = 0; i < nregs; i++)
7283 XVECEXP (regs, 0, i)
7284 = gen_rtx_EXPR_LIST (VOIDmode,
7285 gen_rtx_REG (DImode, regno + 2*i),
7286 GEN_INT (i*8));
7287
7288 return regs;
7289 }
7290
7291 /* Determine where to put an argument to a function.
7292 Value is zero to push the argument on the stack,
7293 or a hard register in which to store the argument.
7294
7295 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7296 the preceding args and about the function being called.
7297 MODE is the argument's machine mode.
7298 TYPE is the data type of the argument (as a tree).
7299 This is null for libcalls where that information may
7300 not be available.
7301 NAMED is true if this argument is a named parameter
7302 (otherwise it is an extra parameter matching an ellipsis).
7303 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
7304 TARGET_FUNCTION_INCOMING_ARG. */
7305
7306 static rtx
7307 sparc_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
7308 const_tree type, bool named, bool incoming)
7309 {
7310 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7311
7312 int regbase = (incoming
7313 ? SPARC_INCOMING_INT_ARG_FIRST
7314 : SPARC_OUTGOING_INT_ARG_FIRST);
7315 int slotno, regno, padding;
7316 enum mode_class mclass = GET_MODE_CLASS (mode);
7317
7318 slotno = function_arg_slotno (cum, mode, type, named, incoming,
7319 &regno, &padding);
7320 if (slotno == -1)
7321 return 0;
7322
7323 /* Vector types deserve special treatment because they are polymorphic wrt
7324 their mode, depending upon whether VIS instructions are enabled. */
7325 if (type && TREE_CODE (type) == VECTOR_TYPE)
7326 {
7327 HOST_WIDE_INT size = int_size_in_bytes (type);
7328 gcc_assert ((TARGET_ARCH32 && size <= 8)
7329 || (TARGET_ARCH64 && size <= 16));
7330
7331 if (mode == BLKmode)
7332 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST + 2*slotno);
7333
7334 mclass = MODE_FLOAT;
7335 }
7336
7337 if (TARGET_ARCH32)
7338 return gen_rtx_REG (mode, regno);
7339
7340 /* Structures up to 16 bytes in size are passed in arg slots on the stack
7341 and are promoted to registers if possible. */
7342 if (type && TREE_CODE (type) == RECORD_TYPE)
7343 {
7344 HOST_WIDE_INT size = int_size_in_bytes (type);
7345 gcc_assert (size <= 16);
7346
7347 return function_arg_record_value (type, mode, slotno, named, regbase);
7348 }
7349
7350 /* Unions up to 16 bytes in size are passed in integer registers. */
7351 else if (type && TREE_CODE (type) == UNION_TYPE)
7352 {
7353 HOST_WIDE_INT size = int_size_in_bytes (type);
7354 gcc_assert (size <= 16);
7355
7356 return function_arg_union_value (size, mode, slotno, regno);
7357 }
7358
7359 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
7360 but also have the slot allocated for them.
7361 If no prototype is in scope fp values in register slots get passed
7362 in two places, either fp regs and int regs or fp regs and memory. */
7363 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7364 && SPARC_FP_REG_P (regno))
7365 {
7366 rtx reg = gen_rtx_REG (mode, regno);
7367 if (cum->prototype_p || cum->libcall_p)
7368 return reg;
7369 else
7370 {
7371 rtx v0, v1;
7372
7373 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
7374 {
7375 int intreg;
7376
7377 /* On incoming, we don't need to know that the value
7378 is passed in %f0 and %i0, and it confuses other parts
7379 causing needless spillage even on the simplest cases. */
7380 if (incoming)
7381 return reg;
7382
7383 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
7384 + (regno - SPARC_FP_ARG_FIRST) / 2);
7385
7386 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7387 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
7388 const0_rtx);
7389 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7390 }
7391 else
7392 {
7393 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7394 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7395 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7396 }
7397 }
7398 }
7399
7400 /* All other aggregate types are passed in an integer register in a mode
7401 corresponding to the size of the type. */
7402 else if (type && AGGREGATE_TYPE_P (type))
7403 {
7404 HOST_WIDE_INT size = int_size_in_bytes (type);
7405 gcc_assert (size <= 16);
7406
7407 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7408 }
7409
7410 return gen_rtx_REG (mode, regno);
7411 }
7412
7413 /* Handle the TARGET_FUNCTION_ARG target hook. */
7414
7415 static rtx
7416 sparc_function_arg (cumulative_args_t cum, machine_mode mode,
7417 const_tree type, bool named)
7418 {
7419 return sparc_function_arg_1 (cum, mode, type, named, false);
7420 }
7421
7422 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
7423
7424 static rtx
7425 sparc_function_incoming_arg (cumulative_args_t cum, machine_mode mode,
7426 const_tree type, bool named)
7427 {
7428 return sparc_function_arg_1 (cum, mode, type, named, true);
7429 }
7430
7431 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7432
7433 static unsigned int
7434 sparc_function_arg_boundary (machine_mode mode, const_tree type)
7435 {
7436 return ((TARGET_ARCH64
7437 && (GET_MODE_ALIGNMENT (mode) == 128
7438 || (type && TYPE_ALIGN (type) == 128)))
7439 ? 128
7440 : PARM_BOUNDARY);
7441 }
7442
7443 /* For an arg passed partly in registers and partly in memory,
7444 this is the number of bytes of registers used.
7445 For args passed entirely in registers or entirely in memory, zero.
7446
7447 Any arg that starts in the first 6 regs but won't entirely fit in them
7448 needs partial registers on v8. On v9, structures with integer
7449 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7450 values that begin in the last fp reg [where "last fp reg" varies with the
7451 mode] will be split between that reg and memory. */
7452
7453 static int
7454 sparc_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
7455 tree type, bool named)
7456 {
7457 int slotno, regno, padding;
7458
7459 /* We pass false for incoming here, it doesn't matter. */
7460 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
7461 false, &regno, &padding);
7462
7463 if (slotno == -1)
7464 return 0;
7465
7466 if (TARGET_ARCH32)
7467 {
7468 if ((slotno + (mode == BLKmode
7469 ? CEIL_NWORDS (int_size_in_bytes (type))
7470 : CEIL_NWORDS (GET_MODE_SIZE (mode))))
7471 > SPARC_INT_ARG_MAX)
7472 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
7473 }
7474 else
7475 {
7476 /* We are guaranteed by pass_by_reference that the size of the
7477 argument is not greater than 16 bytes, so we only need to return
7478 one word if the argument is partially passed in registers. */
7479
7480 if (type && AGGREGATE_TYPE_P (type))
7481 {
7482 int size = int_size_in_bytes (type);
7483
7484 if (size > UNITS_PER_WORD
7485 && (slotno == SPARC_INT_ARG_MAX - 1
7486 || slotno == SPARC_FP_ARG_MAX - 1))
7487 return UNITS_PER_WORD;
7488 }
7489 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
7490 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7491 && ! (TARGET_FPU && named)))
7492 {
7493 /* The complex types are passed as packed types. */
7494 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7495 && slotno == SPARC_INT_ARG_MAX - 1)
7496 return UNITS_PER_WORD;
7497 }
7498 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7499 {
7500 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
7501 > SPARC_FP_ARG_MAX)
7502 return UNITS_PER_WORD;
7503 }
7504 }
7505
7506 return 0;
7507 }
7508
7509 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
7510 Specify whether to pass the argument by reference. */
7511
7512 static bool
7513 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
7514 machine_mode mode, const_tree type,
7515 bool named ATTRIBUTE_UNUSED)
7516 {
7517 if (TARGET_ARCH32)
7518 /* Original SPARC 32-bit ABI says that structures and unions,
7519 and quad-precision floats are passed by reference. For Pascal,
7520 also pass arrays by reference. All other base types are passed
7521 in registers.
7522
7523 Extended ABI (as implemented by the Sun compiler) says that all
7524 complex floats are passed by reference. Pass complex integers
7525 in registers up to 8 bytes. More generally, enforce the 2-word
7526 cap for passing arguments in registers.
7527
7528 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7529 integers are passed like floats of the same size, that is in
7530 registers up to 8 bytes. Pass all vector floats by reference
7531 like structure and unions. */
7532 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
7533 || mode == SCmode
7534 /* Catch CDImode, TFmode, DCmode and TCmode. */
7535 || GET_MODE_SIZE (mode) > 8
7536 || (type
7537 && TREE_CODE (type) == VECTOR_TYPE
7538 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7539 else
7540 /* Original SPARC 64-bit ABI says that structures and unions
7541 smaller than 16 bytes are passed in registers, as well as
7542 all other base types.
7543
7544 Extended ABI (as implemented by the Sun compiler) says that
7545 complex floats are passed in registers up to 16 bytes. Pass
7546 all complex integers in registers up to 16 bytes. More generally,
7547 enforce the 2-word cap for passing arguments in registers.
7548
7549 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7550 integers are passed like floats of the same size, that is in
7551 registers (up to 16 bytes). Pass all vector floats like structure
7552 and unions. */
7553 return ((type
7554 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
7555 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
7556 /* Catch CTImode and TCmode. */
7557 || GET_MODE_SIZE (mode) > 16);
7558 }
7559
7560 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7561 Update the data in CUM to advance over an argument
7562 of mode MODE and data type TYPE.
7563 TYPE is null for libcalls where that information may not be available. */
7564
7565 static void
7566 sparc_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
7567 const_tree type, bool named)
7568 {
7569 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7570 int regno, padding;
7571
7572 /* We pass false for incoming here, it doesn't matter. */
7573 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
7574
7575 /* If argument requires leading padding, add it. */
7576 cum->words += padding;
7577
7578 if (TARGET_ARCH32)
7579 cum->words += (mode == BLKmode
7580 ? CEIL_NWORDS (int_size_in_bytes (type))
7581 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7582 else
7583 {
7584 if (type && AGGREGATE_TYPE_P (type))
7585 {
7586 int size = int_size_in_bytes (type);
7587
7588 if (size <= 8)
7589 ++cum->words;
7590 else if (size <= 16)
7591 cum->words += 2;
7592 else /* passed by reference */
7593 ++cum->words;
7594 }
7595 else
7596 cum->words += (mode == BLKmode
7597 ? CEIL_NWORDS (int_size_in_bytes (type))
7598 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7599 }
7600 }
7601
7602 /* Implement TARGET_FUNCTION_ARG_PADDING. For the 64-bit ABI structs
7603 are always stored left shifted in their argument slot. */
7604
7605 static pad_direction
7606 sparc_function_arg_padding (machine_mode mode, const_tree type)
7607 {
7608 if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
7609 return PAD_UPWARD;
7610
7611 /* Fall back to the default. */
7612 return default_function_arg_padding (mode, type);
7613 }
7614
7615 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7616 Specify whether to return the return value in memory. */
7617
7618 static bool
7619 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7620 {
7621 if (TARGET_ARCH32)
7622 /* Original SPARC 32-bit ABI says that structures and unions,
7623 and quad-precision floats are returned in memory. All other
7624 base types are returned in registers.
7625
7626 Extended ABI (as implemented by the Sun compiler) says that
7627 all complex floats are returned in registers (8 FP registers
7628 at most for '_Complex long double'). Return all complex integers
7629 in registers (4 at most for '_Complex long long').
7630
7631 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7632 integers are returned like floats of the same size, that is in
7633 registers up to 8 bytes and in memory otherwise. Return all
7634 vector floats in memory like structure and unions; note that
7635 they always have BLKmode like the latter. */
7636 return (TYPE_MODE (type) == BLKmode
7637 || TYPE_MODE (type) == TFmode
7638 || (TREE_CODE (type) == VECTOR_TYPE
7639 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7640 else
7641 /* Original SPARC 64-bit ABI says that structures and unions
7642 smaller than 32 bytes are returned in registers, as well as
7643 all other base types.
7644
7645 Extended ABI (as implemented by the Sun compiler) says that all
7646 complex floats are returned in registers (8 FP registers at most
7647 for '_Complex long double'). Return all complex integers in
7648 registers (4 at most for '_Complex TItype').
7649
7650 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7651 integers are returned like floats of the same size, that is in
7652 registers. Return all vector floats like structure and unions;
7653 note that they always have BLKmode like the latter. */
7654 return (TYPE_MODE (type) == BLKmode
7655 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7656 }
7657
7658 /* Handle the TARGET_STRUCT_VALUE target hook.
7659 Return where to find the structure return value address. */
7660
7661 static rtx
7662 sparc_struct_value_rtx (tree fndecl, int incoming)
7663 {
7664 if (TARGET_ARCH64)
7665 return 0;
7666 else
7667 {
7668 rtx mem;
7669
7670 if (incoming)
7671 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7672 STRUCT_VALUE_OFFSET));
7673 else
7674 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7675 STRUCT_VALUE_OFFSET));
7676
7677 /* Only follow the SPARC ABI for fixed-size structure returns.
7678 Variable size structure returns are handled per the normal
7679 procedures in GCC. This is enabled by -mstd-struct-return */
7680 if (incoming == 2
7681 && sparc_std_struct_return
7682 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7683 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7684 {
7685 /* We must check and adjust the return address, as it is optional
7686 as to whether the return object is really provided. */
7687 rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
7688 rtx scratch = gen_reg_rtx (SImode);
7689 rtx_code_label *endlab = gen_label_rtx ();
7690
7691 /* Calculate the return object size. */
7692 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7693 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7694 /* Construct a temporary return value. */
7695 rtx temp_val
7696 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7697
7698 /* Implement SPARC 32-bit psABI callee return struct checking:
7699
7700 Fetch the instruction where we will return to and see if
7701 it's an unimp instruction (the most significant 10 bits
7702 will be zero). */
7703 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7704 plus_constant (Pmode,
7705 ret_reg, 8)));
7706 /* Assume the size is valid and pre-adjust. */
7707 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7708 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7709 0, endlab);
7710 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7711 /* Write the address of the memory pointed to by temp_val into
7712 the memory pointed to by mem. */
7713 emit_move_insn (mem, XEXP (temp_val, 0));
7714 emit_label (endlab);
7715 }
7716
7717 return mem;
7718 }
7719 }
7720
7721 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7722 For v9, function return values are subject to the same rules as arguments,
7723 except that up to 32 bytes may be returned in registers. */
7724
7725 static rtx
7726 sparc_function_value_1 (const_tree type, machine_mode mode,
7727 bool outgoing)
7728 {
7729 /* Beware that the two values are swapped here wrt function_arg. */
7730 int regbase = (outgoing
7731 ? SPARC_INCOMING_INT_ARG_FIRST
7732 : SPARC_OUTGOING_INT_ARG_FIRST);
7733 enum mode_class mclass = GET_MODE_CLASS (mode);
7734 int regno;
7735
7736 /* Vector types deserve special treatment because they are polymorphic wrt
7737 their mode, depending upon whether VIS instructions are enabled. */
7738 if (type && TREE_CODE (type) == VECTOR_TYPE)
7739 {
7740 HOST_WIDE_INT size = int_size_in_bytes (type);
7741 gcc_assert ((TARGET_ARCH32 && size <= 8)
7742 || (TARGET_ARCH64 && size <= 32));
7743
7744 if (mode == BLKmode)
7745 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST);
7746
7747 mclass = MODE_FLOAT;
7748 }
7749
7750 if (TARGET_ARCH64 && type)
7751 {
7752 /* Structures up to 32 bytes in size are returned in registers. */
7753 if (TREE_CODE (type) == RECORD_TYPE)
7754 {
7755 HOST_WIDE_INT size = int_size_in_bytes (type);
7756 gcc_assert (size <= 32);
7757
7758 return function_arg_record_value (type, mode, 0, 1, regbase);
7759 }
7760
7761 /* Unions up to 32 bytes in size are returned in integer registers. */
7762 else if (TREE_CODE (type) == UNION_TYPE)
7763 {
7764 HOST_WIDE_INT size = int_size_in_bytes (type);
7765 gcc_assert (size <= 32);
7766
7767 return function_arg_union_value (size, mode, 0, regbase);
7768 }
7769
7770 /* Objects that require it are returned in FP registers. */
7771 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7772 ;
7773
7774 /* All other aggregate types are returned in an integer register in a
7775 mode corresponding to the size of the type. */
7776 else if (AGGREGATE_TYPE_P (type))
7777 {
7778 /* All other aggregate types are passed in an integer register
7779 in a mode corresponding to the size of the type. */
7780 HOST_WIDE_INT size = int_size_in_bytes (type);
7781 gcc_assert (size <= 32);
7782
7783 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7784
7785 /* ??? We probably should have made the same ABI change in
7786 3.4.0 as the one we made for unions. The latter was
7787 required by the SCD though, while the former is not
7788 specified, so we favored compatibility and efficiency.
7789
7790 Now we're stuck for aggregates larger than 16 bytes,
7791 because OImode vanished in the meantime. Let's not
7792 try to be unduly clever, and simply follow the ABI
7793 for unions in that case. */
7794 if (mode == BLKmode)
7795 return function_arg_union_value (size, mode, 0, regbase);
7796 else
7797 mclass = MODE_INT;
7798 }
7799
7800 /* We should only have pointer and integer types at this point. This
7801 must match sparc_promote_function_mode. */
7802 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7803 mode = word_mode;
7804 }
7805
7806 /* We should only have pointer and integer types at this point, except with
7807 -freg-struct-return. This must match sparc_promote_function_mode. */
7808 else if (TARGET_ARCH32
7809 && !(type && AGGREGATE_TYPE_P (type))
7810 && mclass == MODE_INT
7811 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7812 mode = word_mode;
7813
7814 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7815 regno = SPARC_FP_ARG_FIRST;
7816 else
7817 regno = regbase;
7818
7819 return gen_rtx_REG (mode, regno);
7820 }
7821
7822 /* Handle TARGET_FUNCTION_VALUE.
7823 On the SPARC, the value is found in the first "output" register, but the
7824 called function leaves it in the first "input" register. */
7825
7826 static rtx
7827 sparc_function_value (const_tree valtype,
7828 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7829 bool outgoing)
7830 {
7831 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7832 }
7833
7834 /* Handle TARGET_LIBCALL_VALUE. */
7835
7836 static rtx
7837 sparc_libcall_value (machine_mode mode,
7838 const_rtx fun ATTRIBUTE_UNUSED)
7839 {
7840 return sparc_function_value_1 (NULL_TREE, mode, false);
7841 }
7842
7843 /* Handle FUNCTION_VALUE_REGNO_P.
7844 On the SPARC, the first "output" reg is used for integer values, and the
7845 first floating point register is used for floating point values. */
7846
7847 static bool
7848 sparc_function_value_regno_p (const unsigned int regno)
7849 {
7850 return (regno == 8 || (TARGET_FPU && regno == 32));
7851 }
7852
7853 /* Do what is necessary for `va_start'. We look at the current function
7854 to determine if stdarg or varargs is used and return the address of
7855 the first unnamed parameter. */
7856
7857 static rtx
7858 sparc_builtin_saveregs (void)
7859 {
7860 int first_reg = crtl->args.info.words;
7861 rtx address;
7862 int regno;
7863
7864 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7865 emit_move_insn (gen_rtx_MEM (word_mode,
7866 gen_rtx_PLUS (Pmode,
7867 frame_pointer_rtx,
7868 GEN_INT (FIRST_PARM_OFFSET (0)
7869 + (UNITS_PER_WORD
7870 * regno)))),
7871 gen_rtx_REG (word_mode,
7872 SPARC_INCOMING_INT_ARG_FIRST + regno));
7873
7874 address = gen_rtx_PLUS (Pmode,
7875 frame_pointer_rtx,
7876 GEN_INT (FIRST_PARM_OFFSET (0)
7877 + UNITS_PER_WORD * first_reg));
7878
7879 return address;
7880 }
7881
7882 /* Implement `va_start' for stdarg. */
7883
7884 static void
7885 sparc_va_start (tree valist, rtx nextarg)
7886 {
7887 nextarg = expand_builtin_saveregs ();
7888 std_expand_builtin_va_start (valist, nextarg);
7889 }
7890
7891 /* Implement `va_arg' for stdarg. */
7892
7893 static tree
7894 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7895 gimple_seq *post_p)
7896 {
7897 HOST_WIDE_INT size, rsize, align;
7898 tree addr, incr;
7899 bool indirect;
7900 tree ptrtype = build_pointer_type (type);
7901
7902 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
7903 {
7904 indirect = true;
7905 size = rsize = UNITS_PER_WORD;
7906 align = 0;
7907 }
7908 else
7909 {
7910 indirect = false;
7911 size = int_size_in_bytes (type);
7912 rsize = ROUND_UP (size, UNITS_PER_WORD);
7913 align = 0;
7914
7915 if (TARGET_ARCH64)
7916 {
7917 /* For SPARC64, objects requiring 16-byte alignment get it. */
7918 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7919 align = 2 * UNITS_PER_WORD;
7920
7921 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7922 are left-justified in their slots. */
7923 if (AGGREGATE_TYPE_P (type))
7924 {
7925 if (size == 0)
7926 size = rsize = UNITS_PER_WORD;
7927 else
7928 size = rsize;
7929 }
7930 }
7931 }
7932
7933 incr = valist;
7934 if (align)
7935 {
7936 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7937 incr = fold_convert (sizetype, incr);
7938 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7939 size_int (-align));
7940 incr = fold_convert (ptr_type_node, incr);
7941 }
7942
7943 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7944 addr = incr;
7945
7946 if (BYTES_BIG_ENDIAN && size < rsize)
7947 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7948
7949 if (indirect)
7950 {
7951 addr = fold_convert (build_pointer_type (ptrtype), addr);
7952 addr = build_va_arg_indirect_ref (addr);
7953 }
7954
7955 /* If the address isn't aligned properly for the type, we need a temporary.
7956 FIXME: This is inefficient, usually we can do this in registers. */
7957 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7958 {
7959 tree tmp = create_tmp_var (type, "va_arg_tmp");
7960 tree dest_addr = build_fold_addr_expr (tmp);
7961 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7962 3, dest_addr, addr, size_int (rsize));
7963 TREE_ADDRESSABLE (tmp) = 1;
7964 gimplify_and_add (copy, pre_p);
7965 addr = dest_addr;
7966 }
7967
7968 else
7969 addr = fold_convert (ptrtype, addr);
7970
7971 incr = fold_build_pointer_plus_hwi (incr, rsize);
7972 gimplify_assign (valist, incr, post_p);
7973
7974 return build_va_arg_indirect_ref (addr);
7975 }
7976 \f
7977 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
7978 Specify whether the vector mode is supported by the hardware. */
7979
7980 static bool
7981 sparc_vector_mode_supported_p (machine_mode mode)
7982 {
7983 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
7984 }
7985 \f
7986 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
7987
7988 static machine_mode
7989 sparc_preferred_simd_mode (scalar_mode mode)
7990 {
7991 if (TARGET_VIS)
7992 switch (mode)
7993 {
7994 case E_SImode:
7995 return V2SImode;
7996 case E_HImode:
7997 return V4HImode;
7998 case E_QImode:
7999 return V8QImode;
8000
8001 default:;
8002 }
8003
8004 return word_mode;
8005 }
8006 \f
8007 /* Return the string to output an unconditional branch to LABEL, which is
8008 the operand number of the label.
8009
8010 DEST is the destination insn (i.e. the label), INSN is the source. */
8011
8012 const char *
8013 output_ubranch (rtx dest, rtx_insn *insn)
8014 {
8015 static char string[64];
8016 bool v9_form = false;
8017 int delta;
8018 char *p;
8019
8020 /* Even if we are trying to use cbcond for this, evaluate
8021 whether we can use V9 branches as our backup plan. */
8022
8023 delta = 5000000;
8024 if (INSN_ADDRESSES_SET_P ())
8025 delta = (INSN_ADDRESSES (INSN_UID (dest))
8026 - INSN_ADDRESSES (INSN_UID (insn)));
8027
8028 /* Leave some instructions for "slop". */
8029 if (TARGET_V9 && delta >= -260000 && delta < 260000)
8030 v9_form = true;
8031
8032 if (TARGET_CBCOND)
8033 {
8034 bool emit_nop = emit_cbcond_nop (insn);
8035 bool far = false;
8036 const char *rval;
8037
8038 if (delta < -500 || delta > 500)
8039 far = true;
8040
8041 if (far)
8042 {
8043 if (v9_form)
8044 rval = "ba,a,pt\t%%xcc, %l0";
8045 else
8046 rval = "b,a\t%l0";
8047 }
8048 else
8049 {
8050 if (emit_nop)
8051 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
8052 else
8053 rval = "cwbe\t%%g0, %%g0, %l0";
8054 }
8055 return rval;
8056 }
8057
8058 if (v9_form)
8059 strcpy (string, "ba%*,pt\t%%xcc, ");
8060 else
8061 strcpy (string, "b%*\t");
8062
8063 p = strchr (string, '\0');
8064 *p++ = '%';
8065 *p++ = 'l';
8066 *p++ = '0';
8067 *p++ = '%';
8068 *p++ = '(';
8069 *p = '\0';
8070
8071 return string;
8072 }
8073
8074 /* Return the string to output a conditional branch to LABEL, which is
8075 the operand number of the label. OP is the conditional expression.
8076 XEXP (OP, 0) is assumed to be a condition code register (integer or
8077 floating point) and its mode specifies what kind of comparison we made.
8078
8079 DEST is the destination insn (i.e. the label), INSN is the source.
8080
8081 REVERSED is nonzero if we should reverse the sense of the comparison.
8082
8083 ANNUL is nonzero if we should generate an annulling branch. */
8084
8085 const char *
8086 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
8087 rtx_insn *insn)
8088 {
8089 static char string[64];
8090 enum rtx_code code = GET_CODE (op);
8091 rtx cc_reg = XEXP (op, 0);
8092 machine_mode mode = GET_MODE (cc_reg);
8093 const char *labelno, *branch;
8094 int spaces = 8, far;
8095 char *p;
8096
8097 /* v9 branches are limited to +-1MB. If it is too far away,
8098 change
8099
8100 bne,pt %xcc, .LC30
8101
8102 to
8103
8104 be,pn %xcc, .+12
8105 nop
8106 ba .LC30
8107
8108 and
8109
8110 fbne,a,pn %fcc2, .LC29
8111
8112 to
8113
8114 fbe,pt %fcc2, .+16
8115 nop
8116 ba .LC29 */
8117
8118 far = TARGET_V9 && (get_attr_length (insn) >= 3);
8119 if (reversed ^ far)
8120 {
8121 /* Reversal of FP compares takes care -- an ordered compare
8122 becomes an unordered compare and vice versa. */
8123 if (mode == CCFPmode || mode == CCFPEmode)
8124 code = reverse_condition_maybe_unordered (code);
8125 else
8126 code = reverse_condition (code);
8127 }
8128
8129 /* Start by writing the branch condition. */
8130 if (mode == CCFPmode || mode == CCFPEmode)
8131 {
8132 switch (code)
8133 {
8134 case NE:
8135 branch = "fbne";
8136 break;
8137 case EQ:
8138 branch = "fbe";
8139 break;
8140 case GE:
8141 branch = "fbge";
8142 break;
8143 case GT:
8144 branch = "fbg";
8145 break;
8146 case LE:
8147 branch = "fble";
8148 break;
8149 case LT:
8150 branch = "fbl";
8151 break;
8152 case UNORDERED:
8153 branch = "fbu";
8154 break;
8155 case ORDERED:
8156 branch = "fbo";
8157 break;
8158 case UNGT:
8159 branch = "fbug";
8160 break;
8161 case UNLT:
8162 branch = "fbul";
8163 break;
8164 case UNEQ:
8165 branch = "fbue";
8166 break;
8167 case UNGE:
8168 branch = "fbuge";
8169 break;
8170 case UNLE:
8171 branch = "fbule";
8172 break;
8173 case LTGT:
8174 branch = "fblg";
8175 break;
8176 default:
8177 gcc_unreachable ();
8178 }
8179
8180 /* ??? !v9: FP branches cannot be preceded by another floating point
8181 insn. Because there is currently no concept of pre-delay slots,
8182 we can fix this only by always emitting a nop before a floating
8183 point branch. */
8184
8185 string[0] = '\0';
8186 if (! TARGET_V9)
8187 strcpy (string, "nop\n\t");
8188 strcat (string, branch);
8189 }
8190 else
8191 {
8192 switch (code)
8193 {
8194 case NE:
8195 if (mode == CCVmode || mode == CCXVmode)
8196 branch = "bvs";
8197 else
8198 branch = "bne";
8199 break;
8200 case EQ:
8201 if (mode == CCVmode || mode == CCXVmode)
8202 branch = "bvc";
8203 else
8204 branch = "be";
8205 break;
8206 case GE:
8207 if (mode == CCNZmode || mode == CCXNZmode)
8208 branch = "bpos";
8209 else
8210 branch = "bge";
8211 break;
8212 case GT:
8213 branch = "bg";
8214 break;
8215 case LE:
8216 branch = "ble";
8217 break;
8218 case LT:
8219 if (mode == CCNZmode || mode == CCXNZmode)
8220 branch = "bneg";
8221 else
8222 branch = "bl";
8223 break;
8224 case GEU:
8225 branch = "bgeu";
8226 break;
8227 case GTU:
8228 branch = "bgu";
8229 break;
8230 case LEU:
8231 branch = "bleu";
8232 break;
8233 case LTU:
8234 branch = "blu";
8235 break;
8236 default:
8237 gcc_unreachable ();
8238 }
8239 strcpy (string, branch);
8240 }
8241 spaces -= strlen (branch);
8242 p = strchr (string, '\0');
8243
8244 /* Now add the annulling, the label, and a possible noop. */
8245 if (annul && ! far)
8246 {
8247 strcpy (p, ",a");
8248 p += 2;
8249 spaces -= 2;
8250 }
8251
8252 if (TARGET_V9)
8253 {
8254 rtx note;
8255 int v8 = 0;
8256
8257 if (! far && insn && INSN_ADDRESSES_SET_P ())
8258 {
8259 int delta = (INSN_ADDRESSES (INSN_UID (dest))
8260 - INSN_ADDRESSES (INSN_UID (insn)));
8261 /* Leave some instructions for "slop". */
8262 if (delta < -260000 || delta >= 260000)
8263 v8 = 1;
8264 }
8265
8266 switch (mode)
8267 {
8268 case E_CCmode:
8269 case E_CCNZmode:
8270 case E_CCCmode:
8271 case E_CCVmode:
8272 labelno = "%%icc, ";
8273 if (v8)
8274 labelno = "";
8275 break;
8276 case E_CCXmode:
8277 case E_CCXNZmode:
8278 case E_CCXCmode:
8279 case E_CCXVmode:
8280 labelno = "%%xcc, ";
8281 gcc_assert (!v8);
8282 break;
8283 case E_CCFPmode:
8284 case E_CCFPEmode:
8285 {
8286 static char v9_fcc_labelno[] = "%%fccX, ";
8287 /* Set the char indicating the number of the fcc reg to use. */
8288 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
8289 labelno = v9_fcc_labelno;
8290 if (v8)
8291 {
8292 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
8293 labelno = "";
8294 }
8295 }
8296 break;
8297 default:
8298 gcc_unreachable ();
8299 }
8300
8301 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8302 {
8303 strcpy (p,
8304 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8305 >= profile_probability::even ()) ^ far)
8306 ? ",pt" : ",pn");
8307 p += 3;
8308 spaces -= 3;
8309 }
8310 }
8311 else
8312 labelno = "";
8313
8314 if (spaces > 0)
8315 *p++ = '\t';
8316 else
8317 *p++ = ' ';
8318 strcpy (p, labelno);
8319 p = strchr (p, '\0');
8320 if (far)
8321 {
8322 strcpy (p, ".+12\n\t nop\n\tb\t");
8323 /* Skip the next insn if requested or
8324 if we know that it will be a nop. */
8325 if (annul || ! final_sequence)
8326 p[3] = '6';
8327 p += 14;
8328 }
8329 *p++ = '%';
8330 *p++ = 'l';
8331 *p++ = label + '0';
8332 *p++ = '%';
8333 *p++ = '#';
8334 *p = '\0';
8335
8336 return string;
8337 }
8338
8339 /* Emit a library call comparison between floating point X and Y.
8340 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
8341 Return the new operator to be used in the comparison sequence.
8342
8343 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
8344 values as arguments instead of the TFmode registers themselves,
8345 that's why we cannot call emit_float_lib_cmp. */
8346
8347 rtx
8348 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
8349 {
8350 const char *qpfunc;
8351 rtx slot0, slot1, result, tem, tem2, libfunc;
8352 machine_mode mode;
8353 enum rtx_code new_comparison;
8354
8355 switch (comparison)
8356 {
8357 case EQ:
8358 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
8359 break;
8360
8361 case NE:
8362 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
8363 break;
8364
8365 case GT:
8366 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
8367 break;
8368
8369 case GE:
8370 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
8371 break;
8372
8373 case LT:
8374 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
8375 break;
8376
8377 case LE:
8378 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
8379 break;
8380
8381 case ORDERED:
8382 case UNORDERED:
8383 case UNGT:
8384 case UNLT:
8385 case UNEQ:
8386 case UNGE:
8387 case UNLE:
8388 case LTGT:
8389 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
8390 break;
8391
8392 default:
8393 gcc_unreachable ();
8394 }
8395
8396 if (TARGET_ARCH64)
8397 {
8398 if (MEM_P (x))
8399 {
8400 tree expr = MEM_EXPR (x);
8401 if (expr)
8402 mark_addressable (expr);
8403 slot0 = x;
8404 }
8405 else
8406 {
8407 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8408 emit_move_insn (slot0, x);
8409 }
8410
8411 if (MEM_P (y))
8412 {
8413 tree expr = MEM_EXPR (y);
8414 if (expr)
8415 mark_addressable (expr);
8416 slot1 = y;
8417 }
8418 else
8419 {
8420 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8421 emit_move_insn (slot1, y);
8422 }
8423
8424 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8425 emit_library_call (libfunc, LCT_NORMAL,
8426 DImode,
8427 XEXP (slot0, 0), Pmode,
8428 XEXP (slot1, 0), Pmode);
8429 mode = DImode;
8430 }
8431 else
8432 {
8433 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8434 emit_library_call (libfunc, LCT_NORMAL,
8435 SImode,
8436 x, TFmode, y, TFmode);
8437 mode = SImode;
8438 }
8439
8440
8441 /* Immediately move the result of the libcall into a pseudo
8442 register so reload doesn't clobber the value if it needs
8443 the return register for a spill reg. */
8444 result = gen_reg_rtx (mode);
8445 emit_move_insn (result, hard_libcall_value (mode, libfunc));
8446
8447 switch (comparison)
8448 {
8449 default:
8450 return gen_rtx_NE (VOIDmode, result, const0_rtx);
8451 case ORDERED:
8452 case UNORDERED:
8453 new_comparison = (comparison == UNORDERED ? EQ : NE);
8454 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
8455 case UNGT:
8456 case UNGE:
8457 new_comparison = (comparison == UNGT ? GT : NE);
8458 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
8459 case UNLE:
8460 return gen_rtx_NE (VOIDmode, result, const2_rtx);
8461 case UNLT:
8462 tem = gen_reg_rtx (mode);
8463 if (TARGET_ARCH32)
8464 emit_insn (gen_andsi3 (tem, result, const1_rtx));
8465 else
8466 emit_insn (gen_anddi3 (tem, result, const1_rtx));
8467 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
8468 case UNEQ:
8469 case LTGT:
8470 tem = gen_reg_rtx (mode);
8471 if (TARGET_ARCH32)
8472 emit_insn (gen_addsi3 (tem, result, const1_rtx));
8473 else
8474 emit_insn (gen_adddi3 (tem, result, const1_rtx));
8475 tem2 = gen_reg_rtx (mode);
8476 if (TARGET_ARCH32)
8477 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
8478 else
8479 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
8480 new_comparison = (comparison == UNEQ ? EQ : NE);
8481 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
8482 }
8483
8484 gcc_unreachable ();
8485 }
8486
8487 /* Generate an unsigned DImode to FP conversion. This is the same code
8488 optabs would emit if we didn't have TFmode patterns. */
8489
8490 void
8491 sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
8492 {
8493 rtx i0, i1, f0, in, out;
8494
8495 out = operands[0];
8496 in = force_reg (DImode, operands[1]);
8497 rtx_code_label *neglab = gen_label_rtx ();
8498 rtx_code_label *donelab = gen_label_rtx ();
8499 i0 = gen_reg_rtx (DImode);
8500 i1 = gen_reg_rtx (DImode);
8501 f0 = gen_reg_rtx (mode);
8502
8503 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
8504
8505 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
8506 emit_jump_insn (gen_jump (donelab));
8507 emit_barrier ();
8508
8509 emit_label (neglab);
8510
8511 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
8512 emit_insn (gen_anddi3 (i1, in, const1_rtx));
8513 emit_insn (gen_iordi3 (i0, i0, i1));
8514 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
8515 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
8516
8517 emit_label (donelab);
8518 }
8519
8520 /* Generate an FP to unsigned DImode conversion. This is the same code
8521 optabs would emit if we didn't have TFmode patterns. */
8522
8523 void
8524 sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
8525 {
8526 rtx i0, i1, f0, in, out, limit;
8527
8528 out = operands[0];
8529 in = force_reg (mode, operands[1]);
8530 rtx_code_label *neglab = gen_label_rtx ();
8531 rtx_code_label *donelab = gen_label_rtx ();
8532 i0 = gen_reg_rtx (DImode);
8533 i1 = gen_reg_rtx (DImode);
8534 limit = gen_reg_rtx (mode);
8535 f0 = gen_reg_rtx (mode);
8536
8537 emit_move_insn (limit,
8538 const_double_from_real_value (
8539 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
8540 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
8541
8542 emit_insn (gen_rtx_SET (out,
8543 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
8544 emit_jump_insn (gen_jump (donelab));
8545 emit_barrier ();
8546
8547 emit_label (neglab);
8548
8549 emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
8550 emit_insn (gen_rtx_SET (i0,
8551 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
8552 emit_insn (gen_movdi (i1, const1_rtx));
8553 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
8554 emit_insn (gen_xordi3 (out, i0, i1));
8555
8556 emit_label (donelab);
8557 }
8558
8559 /* Return the string to output a compare and branch instruction to DEST.
8560 DEST is the destination insn (i.e. the label), INSN is the source,
8561 and OP is the conditional expression. */
8562
8563 const char *
8564 output_cbcond (rtx op, rtx dest, rtx_insn *insn)
8565 {
8566 machine_mode mode = GET_MODE (XEXP (op, 0));
8567 enum rtx_code code = GET_CODE (op);
8568 const char *cond_str, *tmpl;
8569 int far, emit_nop, len;
8570 static char string[64];
8571 char size_char;
8572
8573 /* Compare and Branch is limited to +-2KB. If it is too far away,
8574 change
8575
8576 cxbne X, Y, .LC30
8577
8578 to
8579
8580 cxbe X, Y, .+16
8581 nop
8582 ba,pt xcc, .LC30
8583 nop */
8584
8585 len = get_attr_length (insn);
8586
8587 far = len == 4;
8588 emit_nop = len == 2;
8589
8590 if (far)
8591 code = reverse_condition (code);
8592
8593 size_char = ((mode == SImode) ? 'w' : 'x');
8594
8595 switch (code)
8596 {
8597 case NE:
8598 cond_str = "ne";
8599 break;
8600
8601 case EQ:
8602 cond_str = "e";
8603 break;
8604
8605 case GE:
8606 cond_str = "ge";
8607 break;
8608
8609 case GT:
8610 cond_str = "g";
8611 break;
8612
8613 case LE:
8614 cond_str = "le";
8615 break;
8616
8617 case LT:
8618 cond_str = "l";
8619 break;
8620
8621 case GEU:
8622 cond_str = "cc";
8623 break;
8624
8625 case GTU:
8626 cond_str = "gu";
8627 break;
8628
8629 case LEU:
8630 cond_str = "leu";
8631 break;
8632
8633 case LTU:
8634 cond_str = "cs";
8635 break;
8636
8637 default:
8638 gcc_unreachable ();
8639 }
8640
8641 if (far)
8642 {
8643 int veryfar = 1, delta;
8644
8645 if (INSN_ADDRESSES_SET_P ())
8646 {
8647 delta = (INSN_ADDRESSES (INSN_UID (dest))
8648 - INSN_ADDRESSES (INSN_UID (insn)));
8649 /* Leave some instructions for "slop". */
8650 if (delta >= -260000 && delta < 260000)
8651 veryfar = 0;
8652 }
8653
8654 if (veryfar)
8655 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8656 else
8657 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8658 }
8659 else
8660 {
8661 if (emit_nop)
8662 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8663 else
8664 tmpl = "c%cb%s\t%%1, %%2, %%3";
8665 }
8666
8667 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8668
8669 return string;
8670 }
8671
8672 /* Return the string to output a conditional branch to LABEL, testing
8673 register REG. LABEL is the operand number of the label; REG is the
8674 operand number of the reg. OP is the conditional expression. The mode
8675 of REG says what kind of comparison we made.
8676
8677 DEST is the destination insn (i.e. the label), INSN is the source.
8678
8679 REVERSED is nonzero if we should reverse the sense of the comparison.
8680
8681 ANNUL is nonzero if we should generate an annulling branch. */
8682
8683 const char *
8684 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8685 int annul, rtx_insn *insn)
8686 {
8687 static char string[64];
8688 enum rtx_code code = GET_CODE (op);
8689 machine_mode mode = GET_MODE (XEXP (op, 0));
8690 rtx note;
8691 int far;
8692 char *p;
8693
8694 /* branch on register are limited to +-128KB. If it is too far away,
8695 change
8696
8697 brnz,pt %g1, .LC30
8698
8699 to
8700
8701 brz,pn %g1, .+12
8702 nop
8703 ba,pt %xcc, .LC30
8704
8705 and
8706
8707 brgez,a,pn %o1, .LC29
8708
8709 to
8710
8711 brlz,pt %o1, .+16
8712 nop
8713 ba,pt %xcc, .LC29 */
8714
8715 far = get_attr_length (insn) >= 3;
8716
8717 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8718 if (reversed ^ far)
8719 code = reverse_condition (code);
8720
8721 /* Only 64-bit versions of these instructions exist. */
8722 gcc_assert (mode == DImode);
8723
8724 /* Start by writing the branch condition. */
8725
8726 switch (code)
8727 {
8728 case NE:
8729 strcpy (string, "brnz");
8730 break;
8731
8732 case EQ:
8733 strcpy (string, "brz");
8734 break;
8735
8736 case GE:
8737 strcpy (string, "brgez");
8738 break;
8739
8740 case LT:
8741 strcpy (string, "brlz");
8742 break;
8743
8744 case LE:
8745 strcpy (string, "brlez");
8746 break;
8747
8748 case GT:
8749 strcpy (string, "brgz");
8750 break;
8751
8752 default:
8753 gcc_unreachable ();
8754 }
8755
8756 p = strchr (string, '\0');
8757
8758 /* Now add the annulling, reg, label, and nop. */
8759 if (annul && ! far)
8760 {
8761 strcpy (p, ",a");
8762 p += 2;
8763 }
8764
8765 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8766 {
8767 strcpy (p,
8768 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8769 >= profile_probability::even ()) ^ far)
8770 ? ",pt" : ",pn");
8771 p += 3;
8772 }
8773
8774 *p = p < string + 8 ? '\t' : ' ';
8775 p++;
8776 *p++ = '%';
8777 *p++ = '0' + reg;
8778 *p++ = ',';
8779 *p++ = ' ';
8780 if (far)
8781 {
8782 int veryfar = 1, delta;
8783
8784 if (INSN_ADDRESSES_SET_P ())
8785 {
8786 delta = (INSN_ADDRESSES (INSN_UID (dest))
8787 - INSN_ADDRESSES (INSN_UID (insn)));
8788 /* Leave some instructions for "slop". */
8789 if (delta >= -260000 && delta < 260000)
8790 veryfar = 0;
8791 }
8792
8793 strcpy (p, ".+12\n\t nop\n\t");
8794 /* Skip the next insn if requested or
8795 if we know that it will be a nop. */
8796 if (annul || ! final_sequence)
8797 p[3] = '6';
8798 p += 12;
8799 if (veryfar)
8800 {
8801 strcpy (p, "b\t");
8802 p += 2;
8803 }
8804 else
8805 {
8806 strcpy (p, "ba,pt\t%%xcc, ");
8807 p += 13;
8808 }
8809 }
8810 *p++ = '%';
8811 *p++ = 'l';
8812 *p++ = '0' + label;
8813 *p++ = '%';
8814 *p++ = '#';
8815 *p = '\0';
8816
8817 return string;
8818 }
8819
8820 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8821 Such instructions cannot be used in the delay slot of return insn on v9.
8822 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8823 */
8824
8825 static int
8826 epilogue_renumber (register rtx *where, int test)
8827 {
8828 register const char *fmt;
8829 register int i;
8830 register enum rtx_code code;
8831
8832 if (*where == 0)
8833 return 0;
8834
8835 code = GET_CODE (*where);
8836
8837 switch (code)
8838 {
8839 case REG:
8840 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8841 return 1;
8842 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8843 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
8844 /* fallthrough */
8845 case SCRATCH:
8846 case CC0:
8847 case PC:
8848 case CONST_INT:
8849 case CONST_WIDE_INT:
8850 case CONST_DOUBLE:
8851 return 0;
8852
8853 /* Do not replace the frame pointer with the stack pointer because
8854 it can cause the delayed instruction to load below the stack.
8855 This occurs when instructions like:
8856
8857 (set (reg/i:SI 24 %i0)
8858 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8859 (const_int -20 [0xffffffec])) 0))
8860
8861 are in the return delayed slot. */
8862 case PLUS:
8863 if (GET_CODE (XEXP (*where, 0)) == REG
8864 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8865 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8866 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8867 return 1;
8868 break;
8869
8870 case MEM:
8871 if (SPARC_STACK_BIAS
8872 && GET_CODE (XEXP (*where, 0)) == REG
8873 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8874 return 1;
8875 break;
8876
8877 default:
8878 break;
8879 }
8880
8881 fmt = GET_RTX_FORMAT (code);
8882
8883 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8884 {
8885 if (fmt[i] == 'E')
8886 {
8887 register int j;
8888 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8889 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8890 return 1;
8891 }
8892 else if (fmt[i] == 'e'
8893 && epilogue_renumber (&(XEXP (*where, i)), test))
8894 return 1;
8895 }
8896 return 0;
8897 }
8898 \f
8899 /* Leaf functions and non-leaf functions have different needs. */
8900
8901 static const int
8902 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8903
8904 static const int
8905 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8906
8907 static const int *const reg_alloc_orders[] = {
8908 reg_leaf_alloc_order,
8909 reg_nonleaf_alloc_order};
8910
8911 void
8912 order_regs_for_local_alloc (void)
8913 {
8914 static int last_order_nonleaf = 1;
8915
8916 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8917 {
8918 last_order_nonleaf = !last_order_nonleaf;
8919 memcpy ((char *) reg_alloc_order,
8920 (const char *) reg_alloc_orders[last_order_nonleaf],
8921 FIRST_PSEUDO_REGISTER * sizeof (int));
8922 }
8923 }
8924 \f
8925 /* Return 1 if REG and MEM are legitimate enough to allow the various
8926 MEM<-->REG splits to be run. */
8927
8928 int
8929 sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
8930 {
8931 /* Punt if we are here by mistake. */
8932 gcc_assert (reload_completed);
8933
8934 /* We must have an offsettable memory reference. */
8935 if (!offsettable_memref_p (mem))
8936 return 0;
8937
8938 /* If we have legitimate args for ldd/std, we do not want
8939 the split to happen. */
8940 if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
8941 return 0;
8942
8943 /* Success. */
8944 return 1;
8945 }
8946
8947 /* Split a REG <-- MEM move into a pair of moves in MODE. */
8948
8949 void
8950 sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
8951 {
8952 rtx high_part = gen_highpart (mode, dest);
8953 rtx low_part = gen_lowpart (mode, dest);
8954 rtx word0 = adjust_address (src, mode, 0);
8955 rtx word1 = adjust_address (src, mode, 4);
8956
8957 if (reg_overlap_mentioned_p (high_part, word1))
8958 {
8959 emit_move_insn_1 (low_part, word1);
8960 emit_move_insn_1 (high_part, word0);
8961 }
8962 else
8963 {
8964 emit_move_insn_1 (high_part, word0);
8965 emit_move_insn_1 (low_part, word1);
8966 }
8967 }
8968
8969 /* Split a MEM <-- REG move into a pair of moves in MODE. */
8970
8971 void
8972 sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
8973 {
8974 rtx word0 = adjust_address (dest, mode, 0);
8975 rtx word1 = adjust_address (dest, mode, 4);
8976 rtx high_part = gen_highpart (mode, src);
8977 rtx low_part = gen_lowpart (mode, src);
8978
8979 emit_move_insn_1 (word0, high_part);
8980 emit_move_insn_1 (word1, low_part);
8981 }
8982
8983 /* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
8984
8985 int
8986 sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
8987 {
8988 /* Punt if we are here by mistake. */
8989 gcc_assert (reload_completed);
8990
8991 if (GET_CODE (reg1) == SUBREG)
8992 reg1 = SUBREG_REG (reg1);
8993 if (GET_CODE (reg1) != REG)
8994 return 0;
8995 const int regno1 = REGNO (reg1);
8996
8997 if (GET_CODE (reg2) == SUBREG)
8998 reg2 = SUBREG_REG (reg2);
8999 if (GET_CODE (reg2) != REG)
9000 return 0;
9001 const int regno2 = REGNO (reg2);
9002
9003 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
9004 return 1;
9005
9006 if (TARGET_VIS3)
9007 {
9008 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
9009 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
9010 return 1;
9011 }
9012
9013 return 0;
9014 }
9015
9016 /* Split a REG <--> REG move into a pair of moves in MODE. */
9017
9018 void
9019 sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
9020 {
9021 rtx dest1 = gen_highpart (mode, dest);
9022 rtx dest2 = gen_lowpart (mode, dest);
9023 rtx src1 = gen_highpart (mode, src);
9024 rtx src2 = gen_lowpart (mode, src);
9025
9026 /* Now emit using the real source and destination we found, swapping
9027 the order if we detect overlap. */
9028 if (reg_overlap_mentioned_p (dest1, src2))
9029 {
9030 emit_move_insn_1 (dest2, src2);
9031 emit_move_insn_1 (dest1, src1);
9032 }
9033 else
9034 {
9035 emit_move_insn_1 (dest1, src1);
9036 emit_move_insn_1 (dest2, src2);
9037 }
9038 }
9039
9040 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
9041 This makes them candidates for using ldd and std insns.
9042
9043 Note reg1 and reg2 *must* be hard registers. */
9044
9045 int
9046 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
9047 {
9048 /* We might have been passed a SUBREG. */
9049 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
9050 return 0;
9051
9052 if (REGNO (reg1) % 2 != 0)
9053 return 0;
9054
9055 /* Integer ldd is deprecated in SPARC V9 */
9056 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
9057 return 0;
9058
9059 return (REGNO (reg1) == REGNO (reg2) - 1);
9060 }
9061
9062 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
9063 an ldd or std insn.
9064
9065 This can only happen when addr1 and addr2, the addresses in mem1
9066 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
9067 addr1 must also be aligned on a 64-bit boundary.
9068
9069 Also iff dependent_reg_rtx is not null it should not be used to
9070 compute the address for mem1, i.e. we cannot optimize a sequence
9071 like:
9072 ld [%o0], %o0
9073 ld [%o0 + 4], %o1
9074 to
9075 ldd [%o0], %o0
9076 nor:
9077 ld [%g3 + 4], %g3
9078 ld [%g3], %g2
9079 to
9080 ldd [%g3], %g2
9081
9082 But, note that the transformation from:
9083 ld [%g2 + 4], %g3
9084 ld [%g2], %g2
9085 to
9086 ldd [%g2], %g2
9087 is perfectly fine. Thus, the peephole2 patterns always pass us
9088 the destination register of the first load, never the second one.
9089
9090 For stores we don't have a similar problem, so dependent_reg_rtx is
9091 NULL_RTX. */
9092
9093 int
9094 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
9095 {
9096 rtx addr1, addr2;
9097 unsigned int reg1;
9098 HOST_WIDE_INT offset1;
9099
9100 /* The mems cannot be volatile. */
9101 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
9102 return 0;
9103
9104 /* MEM1 should be aligned on a 64-bit boundary. */
9105 if (MEM_ALIGN (mem1) < 64)
9106 return 0;
9107
9108 addr1 = XEXP (mem1, 0);
9109 addr2 = XEXP (mem2, 0);
9110
9111 /* Extract a register number and offset (if used) from the first addr. */
9112 if (GET_CODE (addr1) == PLUS)
9113 {
9114 /* If not a REG, return zero. */
9115 if (GET_CODE (XEXP (addr1, 0)) != REG)
9116 return 0;
9117 else
9118 {
9119 reg1 = REGNO (XEXP (addr1, 0));
9120 /* The offset must be constant! */
9121 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
9122 return 0;
9123 offset1 = INTVAL (XEXP (addr1, 1));
9124 }
9125 }
9126 else if (GET_CODE (addr1) != REG)
9127 return 0;
9128 else
9129 {
9130 reg1 = REGNO (addr1);
9131 /* This was a simple (mem (reg)) expression. Offset is 0. */
9132 offset1 = 0;
9133 }
9134
9135 /* Make sure the second address is a (mem (plus (reg) (const_int). */
9136 if (GET_CODE (addr2) != PLUS)
9137 return 0;
9138
9139 if (GET_CODE (XEXP (addr2, 0)) != REG
9140 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
9141 return 0;
9142
9143 if (reg1 != REGNO (XEXP (addr2, 0)))
9144 return 0;
9145
9146 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
9147 return 0;
9148
9149 /* The first offset must be evenly divisible by 8 to ensure the
9150 address is 64-bit aligned. */
9151 if (offset1 % 8 != 0)
9152 return 0;
9153
9154 /* The offset for the second addr must be 4 more than the first addr. */
9155 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
9156 return 0;
9157
9158 /* All the tests passed. addr1 and addr2 are valid for ldd and std
9159 instructions. */
9160 return 1;
9161 }
9162
9163 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
9164
9165 rtx
9166 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
9167 {
9168 rtx x = widen_memory_access (mem1, mode, 0);
9169 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
9170 return x;
9171 }
9172
9173 /* Return 1 if reg is a pseudo, or is the first register in
9174 a hard register pair. This makes it suitable for use in
9175 ldd and std insns. */
9176
9177 int
9178 register_ok_for_ldd (rtx reg)
9179 {
9180 /* We might have been passed a SUBREG. */
9181 if (!REG_P (reg))
9182 return 0;
9183
9184 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
9185 return (REGNO (reg) % 2 == 0);
9186
9187 return 1;
9188 }
9189
9190 /* Return 1 if OP, a MEM, has an address which is known to be
9191 aligned to an 8-byte boundary. */
9192
9193 int
9194 memory_ok_for_ldd (rtx op)
9195 {
9196 /* In 64-bit mode, we assume that the address is word-aligned. */
9197 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
9198 return 0;
9199
9200 if (! can_create_pseudo_p ()
9201 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
9202 return 0;
9203
9204 return 1;
9205 }
9206 \f
9207 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
9208
9209 static bool
9210 sparc_print_operand_punct_valid_p (unsigned char code)
9211 {
9212 if (code == '#'
9213 || code == '*'
9214 || code == '('
9215 || code == ')'
9216 || code == '_'
9217 || code == '&')
9218 return true;
9219
9220 return false;
9221 }
9222
9223 /* Implement TARGET_PRINT_OPERAND.
9224 Print operand X (an rtx) in assembler syntax to file FILE.
9225 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
9226 For `%' followed by punctuation, CODE is the punctuation and X is null. */
9227
9228 static void
9229 sparc_print_operand (FILE *file, rtx x, int code)
9230 {
9231 const char *s;
9232
9233 switch (code)
9234 {
9235 case '#':
9236 /* Output an insn in a delay slot. */
9237 if (final_sequence)
9238 sparc_indent_opcode = 1;
9239 else
9240 fputs ("\n\t nop", file);
9241 return;
9242 case '*':
9243 /* Output an annul flag if there's nothing for the delay slot and we
9244 are optimizing. This is always used with '(' below.
9245 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
9246 this is a dbx bug. So, we only do this when optimizing.
9247 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
9248 Always emit a nop in case the next instruction is a branch. */
9249 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
9250 fputs (",a", file);
9251 return;
9252 case '(':
9253 /* Output a 'nop' if there's nothing for the delay slot and we are
9254 not optimizing. This is always used with '*' above. */
9255 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
9256 fputs ("\n\t nop", file);
9257 else if (final_sequence)
9258 sparc_indent_opcode = 1;
9259 return;
9260 case ')':
9261 /* Output the right displacement from the saved PC on function return.
9262 The caller may have placed an "unimp" insn immediately after the call
9263 so we have to account for it. This insn is used in the 32-bit ABI
9264 when calling a function that returns a non zero-sized structure. The
9265 64-bit ABI doesn't have it. Be careful to have this test be the same
9266 as that for the call. The exception is when sparc_std_struct_return
9267 is enabled, the psABI is followed exactly and the adjustment is made
9268 by the code in sparc_struct_value_rtx. The call emitted is the same
9269 when sparc_std_struct_return is enabled. */
9270 if (!TARGET_ARCH64
9271 && cfun->returns_struct
9272 && !sparc_std_struct_return
9273 && DECL_SIZE (DECL_RESULT (current_function_decl))
9274 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
9275 == INTEGER_CST
9276 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
9277 fputs ("12", file);
9278 else
9279 fputc ('8', file);
9280 return;
9281 case '_':
9282 /* Output the Embedded Medium/Anywhere code model base register. */
9283 fputs (EMBMEDANY_BASE_REG, file);
9284 return;
9285 case '&':
9286 /* Print some local dynamic TLS name. */
9287 if (const char *name = get_some_local_dynamic_name ())
9288 assemble_name (file, name);
9289 else
9290 output_operand_lossage ("'%%&' used without any "
9291 "local dynamic TLS references");
9292 return;
9293
9294 case 'Y':
9295 /* Adjust the operand to take into account a RESTORE operation. */
9296 if (GET_CODE (x) == CONST_INT)
9297 break;
9298 else if (GET_CODE (x) != REG)
9299 output_operand_lossage ("invalid %%Y operand");
9300 else if (REGNO (x) < 8)
9301 fputs (reg_names[REGNO (x)], file);
9302 else if (REGNO (x) >= 24 && REGNO (x) < 32)
9303 fputs (reg_names[REGNO (x)-16], file);
9304 else
9305 output_operand_lossage ("invalid %%Y operand");
9306 return;
9307 case 'L':
9308 /* Print out the low order register name of a register pair. */
9309 if (WORDS_BIG_ENDIAN)
9310 fputs (reg_names[REGNO (x)+1], file);
9311 else
9312 fputs (reg_names[REGNO (x)], file);
9313 return;
9314 case 'H':
9315 /* Print out the high order register name of a register pair. */
9316 if (WORDS_BIG_ENDIAN)
9317 fputs (reg_names[REGNO (x)], file);
9318 else
9319 fputs (reg_names[REGNO (x)+1], file);
9320 return;
9321 case 'R':
9322 /* Print out the second register name of a register pair or quad.
9323 I.e., R (%o0) => %o1. */
9324 fputs (reg_names[REGNO (x)+1], file);
9325 return;
9326 case 'S':
9327 /* Print out the third register name of a register quad.
9328 I.e., S (%o0) => %o2. */
9329 fputs (reg_names[REGNO (x)+2], file);
9330 return;
9331 case 'T':
9332 /* Print out the fourth register name of a register quad.
9333 I.e., T (%o0) => %o3. */
9334 fputs (reg_names[REGNO (x)+3], file);
9335 return;
9336 case 'x':
9337 /* Print a condition code register. */
9338 if (REGNO (x) == SPARC_ICC_REG)
9339 {
9340 switch (GET_MODE (x))
9341 {
9342 case E_CCmode:
9343 case E_CCNZmode:
9344 case E_CCCmode:
9345 case E_CCVmode:
9346 s = "%icc";
9347 break;
9348 case E_CCXmode:
9349 case E_CCXNZmode:
9350 case E_CCXCmode:
9351 case E_CCXVmode:
9352 s = "%xcc";
9353 break;
9354 default:
9355 gcc_unreachable ();
9356 }
9357 fputs (s, file);
9358 }
9359 else
9360 /* %fccN register */
9361 fputs (reg_names[REGNO (x)], file);
9362 return;
9363 case 'm':
9364 /* Print the operand's address only. */
9365 output_address (GET_MODE (x), XEXP (x, 0));
9366 return;
9367 case 'r':
9368 /* In this case we need a register. Use %g0 if the
9369 operand is const0_rtx. */
9370 if (x == const0_rtx
9371 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
9372 {
9373 fputs ("%g0", file);
9374 return;
9375 }
9376 else
9377 break;
9378
9379 case 'A':
9380 switch (GET_CODE (x))
9381 {
9382 case IOR:
9383 s = "or";
9384 break;
9385 case AND:
9386 s = "and";
9387 break;
9388 case XOR:
9389 s = "xor";
9390 break;
9391 default:
9392 output_operand_lossage ("invalid %%A operand");
9393 s = "";
9394 break;
9395 }
9396 fputs (s, file);
9397 return;
9398
9399 case 'B':
9400 switch (GET_CODE (x))
9401 {
9402 case IOR:
9403 s = "orn";
9404 break;
9405 case AND:
9406 s = "andn";
9407 break;
9408 case XOR:
9409 s = "xnor";
9410 break;
9411 default:
9412 output_operand_lossage ("invalid %%B operand");
9413 s = "";
9414 break;
9415 }
9416 fputs (s, file);
9417 return;
9418
9419 /* This is used by the conditional move instructions. */
9420 case 'C':
9421 {
9422 machine_mode mode = GET_MODE (XEXP (x, 0));
9423 switch (GET_CODE (x))
9424 {
9425 case NE:
9426 if (mode == CCVmode || mode == CCXVmode)
9427 s = "vs";
9428 else
9429 s = "ne";
9430 break;
9431 case EQ:
9432 if (mode == CCVmode || mode == CCXVmode)
9433 s = "vc";
9434 else
9435 s = "e";
9436 break;
9437 case GE:
9438 if (mode == CCNZmode || mode == CCXNZmode)
9439 s = "pos";
9440 else
9441 s = "ge";
9442 break;
9443 case GT:
9444 s = "g";
9445 break;
9446 case LE:
9447 s = "le";
9448 break;
9449 case LT:
9450 if (mode == CCNZmode || mode == CCXNZmode)
9451 s = "neg";
9452 else
9453 s = "l";
9454 break;
9455 case GEU:
9456 s = "geu";
9457 break;
9458 case GTU:
9459 s = "gu";
9460 break;
9461 case LEU:
9462 s = "leu";
9463 break;
9464 case LTU:
9465 s = "lu";
9466 break;
9467 case LTGT:
9468 s = "lg";
9469 break;
9470 case UNORDERED:
9471 s = "u";
9472 break;
9473 case ORDERED:
9474 s = "o";
9475 break;
9476 case UNLT:
9477 s = "ul";
9478 break;
9479 case UNLE:
9480 s = "ule";
9481 break;
9482 case UNGT:
9483 s = "ug";
9484 break;
9485 case UNGE:
9486 s = "uge"
9487 ; break;
9488 case UNEQ:
9489 s = "ue";
9490 break;
9491 default:
9492 output_operand_lossage ("invalid %%C operand");
9493 s = "";
9494 break;
9495 }
9496 fputs (s, file);
9497 return;
9498 }
9499
9500 /* This are used by the movr instruction pattern. */
9501 case 'D':
9502 {
9503 switch (GET_CODE (x))
9504 {
9505 case NE:
9506 s = "ne";
9507 break;
9508 case EQ:
9509 s = "e";
9510 break;
9511 case GE:
9512 s = "gez";
9513 break;
9514 case LT:
9515 s = "lz";
9516 break;
9517 case LE:
9518 s = "lez";
9519 break;
9520 case GT:
9521 s = "gz";
9522 break;
9523 default:
9524 output_operand_lossage ("invalid %%D operand");
9525 s = "";
9526 break;
9527 }
9528 fputs (s, file);
9529 return;
9530 }
9531
9532 case 'b':
9533 {
9534 /* Print a sign-extended character. */
9535 int i = trunc_int_for_mode (INTVAL (x), QImode);
9536 fprintf (file, "%d", i);
9537 return;
9538 }
9539
9540 case 'f':
9541 /* Operand must be a MEM; write its address. */
9542 if (GET_CODE (x) != MEM)
9543 output_operand_lossage ("invalid %%f operand");
9544 output_address (GET_MODE (x), XEXP (x, 0));
9545 return;
9546
9547 case 's':
9548 {
9549 /* Print a sign-extended 32-bit value. */
9550 HOST_WIDE_INT i;
9551 if (GET_CODE(x) == CONST_INT)
9552 i = INTVAL (x);
9553 else
9554 {
9555 output_operand_lossage ("invalid %%s operand");
9556 return;
9557 }
9558 i = trunc_int_for_mode (i, SImode);
9559 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
9560 return;
9561 }
9562
9563 case 0:
9564 /* Do nothing special. */
9565 break;
9566
9567 default:
9568 /* Undocumented flag. */
9569 output_operand_lossage ("invalid operand output code");
9570 }
9571
9572 if (GET_CODE (x) == REG)
9573 fputs (reg_names[REGNO (x)], file);
9574 else if (GET_CODE (x) == MEM)
9575 {
9576 fputc ('[', file);
9577 /* Poor Sun assembler doesn't understand absolute addressing. */
9578 if (CONSTANT_P (XEXP (x, 0)))
9579 fputs ("%g0+", file);
9580 output_address (GET_MODE (x), XEXP (x, 0));
9581 fputc (']', file);
9582 }
9583 else if (GET_CODE (x) == HIGH)
9584 {
9585 fputs ("%hi(", file);
9586 output_addr_const (file, XEXP (x, 0));
9587 fputc (')', file);
9588 }
9589 else if (GET_CODE (x) == LO_SUM)
9590 {
9591 sparc_print_operand (file, XEXP (x, 0), 0);
9592 if (TARGET_CM_MEDMID)
9593 fputs ("+%l44(", file);
9594 else
9595 fputs ("+%lo(", file);
9596 output_addr_const (file, XEXP (x, 1));
9597 fputc (')', file);
9598 }
9599 else if (GET_CODE (x) == CONST_DOUBLE)
9600 output_operand_lossage ("floating-point constant not a valid immediate operand");
9601 else
9602 output_addr_const (file, x);
9603 }
9604
9605 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9606
9607 static void
9608 sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9609 {
9610 register rtx base, index = 0;
9611 int offset = 0;
9612 register rtx addr = x;
9613
9614 if (REG_P (addr))
9615 fputs (reg_names[REGNO (addr)], file);
9616 else if (GET_CODE (addr) == PLUS)
9617 {
9618 if (CONST_INT_P (XEXP (addr, 0)))
9619 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
9620 else if (CONST_INT_P (XEXP (addr, 1)))
9621 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
9622 else
9623 base = XEXP (addr, 0), index = XEXP (addr, 1);
9624 if (GET_CODE (base) == LO_SUM)
9625 {
9626 gcc_assert (USE_AS_OFFSETABLE_LO10
9627 && TARGET_ARCH64
9628 && ! TARGET_CM_MEDMID);
9629 output_operand (XEXP (base, 0), 0);
9630 fputs ("+%lo(", file);
9631 output_address (VOIDmode, XEXP (base, 1));
9632 fprintf (file, ")+%d", offset);
9633 }
9634 else
9635 {
9636 fputs (reg_names[REGNO (base)], file);
9637 if (index == 0)
9638 fprintf (file, "%+d", offset);
9639 else if (REG_P (index))
9640 fprintf (file, "+%s", reg_names[REGNO (index)]);
9641 else if (GET_CODE (index) == SYMBOL_REF
9642 || GET_CODE (index) == LABEL_REF
9643 || GET_CODE (index) == CONST)
9644 fputc ('+', file), output_addr_const (file, index);
9645 else gcc_unreachable ();
9646 }
9647 }
9648 else if (GET_CODE (addr) == MINUS
9649 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
9650 {
9651 output_addr_const (file, XEXP (addr, 0));
9652 fputs ("-(", file);
9653 output_addr_const (file, XEXP (addr, 1));
9654 fputs ("-.)", file);
9655 }
9656 else if (GET_CODE (addr) == LO_SUM)
9657 {
9658 output_operand (XEXP (addr, 0), 0);
9659 if (TARGET_CM_MEDMID)
9660 fputs ("+%l44(", file);
9661 else
9662 fputs ("+%lo(", file);
9663 output_address (VOIDmode, XEXP (addr, 1));
9664 fputc (')', file);
9665 }
9666 else if (flag_pic
9667 && GET_CODE (addr) == CONST
9668 && GET_CODE (XEXP (addr, 0)) == MINUS
9669 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
9670 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
9671 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
9672 {
9673 addr = XEXP (addr, 0);
9674 output_addr_const (file, XEXP (addr, 0));
9675 /* Group the args of the second CONST in parenthesis. */
9676 fputs ("-(", file);
9677 /* Skip past the second CONST--it does nothing for us. */
9678 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
9679 /* Close the parenthesis. */
9680 fputc (')', file);
9681 }
9682 else
9683 {
9684 output_addr_const (file, addr);
9685 }
9686 }
9687 \f
9688 /* Target hook for assembling integer objects. The sparc version has
9689 special handling for aligned DI-mode objects. */
9690
9691 static bool
9692 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9693 {
9694 /* ??? We only output .xword's for symbols and only then in environments
9695 where the assembler can handle them. */
9696 if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
9697 {
9698 if (TARGET_V9)
9699 {
9700 assemble_integer_with_op ("\t.xword\t", x);
9701 return true;
9702 }
9703 else
9704 {
9705 assemble_aligned_integer (4, const0_rtx);
9706 assemble_aligned_integer (4, x);
9707 return true;
9708 }
9709 }
9710 return default_assemble_integer (x, size, aligned_p);
9711 }
9712 \f
9713 /* Return the value of a code used in the .proc pseudo-op that says
9714 what kind of result this function returns. For non-C types, we pick
9715 the closest C type. */
9716
9717 #ifndef SHORT_TYPE_SIZE
9718 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9719 #endif
9720
9721 #ifndef INT_TYPE_SIZE
9722 #define INT_TYPE_SIZE BITS_PER_WORD
9723 #endif
9724
9725 #ifndef LONG_TYPE_SIZE
9726 #define LONG_TYPE_SIZE BITS_PER_WORD
9727 #endif
9728
9729 #ifndef LONG_LONG_TYPE_SIZE
9730 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9731 #endif
9732
9733 #ifndef FLOAT_TYPE_SIZE
9734 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9735 #endif
9736
9737 #ifndef DOUBLE_TYPE_SIZE
9738 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9739 #endif
9740
9741 #ifndef LONG_DOUBLE_TYPE_SIZE
9742 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9743 #endif
9744
9745 unsigned long
9746 sparc_type_code (register tree type)
9747 {
9748 register unsigned long qualifiers = 0;
9749 register unsigned shift;
9750
9751 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9752 setting more, since some assemblers will give an error for this. Also,
9753 we must be careful to avoid shifts of 32 bits or more to avoid getting
9754 unpredictable results. */
9755
9756 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9757 {
9758 switch (TREE_CODE (type))
9759 {
9760 case ERROR_MARK:
9761 return qualifiers;
9762
9763 case ARRAY_TYPE:
9764 qualifiers |= (3 << shift);
9765 break;
9766
9767 case FUNCTION_TYPE:
9768 case METHOD_TYPE:
9769 qualifiers |= (2 << shift);
9770 break;
9771
9772 case POINTER_TYPE:
9773 case REFERENCE_TYPE:
9774 case OFFSET_TYPE:
9775 qualifiers |= (1 << shift);
9776 break;
9777
9778 case RECORD_TYPE:
9779 return (qualifiers | 8);
9780
9781 case UNION_TYPE:
9782 case QUAL_UNION_TYPE:
9783 return (qualifiers | 9);
9784
9785 case ENUMERAL_TYPE:
9786 return (qualifiers | 10);
9787
9788 case VOID_TYPE:
9789 return (qualifiers | 16);
9790
9791 case INTEGER_TYPE:
9792 /* If this is a range type, consider it to be the underlying
9793 type. */
9794 if (TREE_TYPE (type) != 0)
9795 break;
9796
9797 /* Carefully distinguish all the standard types of C,
9798 without messing up if the language is not C. We do this by
9799 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9800 look at both the names and the above fields, but that's redundant.
9801 Any type whose size is between two C types will be considered
9802 to be the wider of the two types. Also, we do not have a
9803 special code to use for "long long", so anything wider than
9804 long is treated the same. Note that we can't distinguish
9805 between "int" and "long" in this code if they are the same
9806 size, but that's fine, since neither can the assembler. */
9807
9808 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9809 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9810
9811 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9812 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9813
9814 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9815 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9816
9817 else
9818 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9819
9820 case REAL_TYPE:
9821 /* If this is a range type, consider it to be the underlying
9822 type. */
9823 if (TREE_TYPE (type) != 0)
9824 break;
9825
9826 /* Carefully distinguish all the standard types of C,
9827 without messing up if the language is not C. */
9828
9829 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9830 return (qualifiers | 6);
9831
9832 else
9833 return (qualifiers | 7);
9834
9835 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9836 /* ??? We need to distinguish between double and float complex types,
9837 but I don't know how yet because I can't reach this code from
9838 existing front-ends. */
9839 return (qualifiers | 7); /* Who knows? */
9840
9841 case VECTOR_TYPE:
9842 case BOOLEAN_TYPE: /* Boolean truth value type. */
9843 case LANG_TYPE:
9844 case NULLPTR_TYPE:
9845 return qualifiers;
9846
9847 default:
9848 gcc_unreachable (); /* Not a type! */
9849 }
9850 }
9851
9852 return qualifiers;
9853 }
9854 \f
9855 /* Nested function support. */
9856
9857 /* Emit RTL insns to initialize the variable parts of a trampoline.
9858 FNADDR is an RTX for the address of the function's pure code.
9859 CXT is an RTX for the static chain value for the function.
9860
9861 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9862 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9863 (to store insns). This is a bit excessive. Perhaps a different
9864 mechanism would be better here.
9865
9866 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9867
9868 static void
9869 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9870 {
9871 /* SPARC 32-bit trampoline:
9872
9873 sethi %hi(fn), %g1
9874 sethi %hi(static), %g2
9875 jmp %g1+%lo(fn)
9876 or %g2, %lo(static), %g2
9877
9878 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9879 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9880 */
9881
9882 emit_move_insn
9883 (adjust_address (m_tramp, SImode, 0),
9884 expand_binop (SImode, ior_optab,
9885 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9886 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9887 NULL_RTX, 1, OPTAB_DIRECT));
9888
9889 emit_move_insn
9890 (adjust_address (m_tramp, SImode, 4),
9891 expand_binop (SImode, ior_optab,
9892 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9893 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9894 NULL_RTX, 1, OPTAB_DIRECT));
9895
9896 emit_move_insn
9897 (adjust_address (m_tramp, SImode, 8),
9898 expand_binop (SImode, ior_optab,
9899 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9900 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9901 NULL_RTX, 1, OPTAB_DIRECT));
9902
9903 emit_move_insn
9904 (adjust_address (m_tramp, SImode, 12),
9905 expand_binop (SImode, ior_optab,
9906 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9907 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9908 NULL_RTX, 1, OPTAB_DIRECT));
9909
9910 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9911 aligned on a 16 byte boundary so one flush clears it all. */
9912 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
9913 if (sparc_cpu != PROCESSOR_ULTRASPARC
9914 && sparc_cpu != PROCESSOR_ULTRASPARC3
9915 && sparc_cpu != PROCESSOR_NIAGARA
9916 && sparc_cpu != PROCESSOR_NIAGARA2
9917 && sparc_cpu != PROCESSOR_NIAGARA3
9918 && sparc_cpu != PROCESSOR_NIAGARA4
9919 && sparc_cpu != PROCESSOR_NIAGARA7
9920 && sparc_cpu != PROCESSOR_M8)
9921 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
9922
9923 /* Call __enable_execute_stack after writing onto the stack to make sure
9924 the stack address is accessible. */
9925 #ifdef HAVE_ENABLE_EXECUTE_STACK
9926 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9927 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
9928 #endif
9929
9930 }
9931
9932 /* The 64-bit version is simpler because it makes more sense to load the
9933 values as "immediate" data out of the trampoline. It's also easier since
9934 we can read the PC without clobbering a register. */
9935
9936 static void
9937 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9938 {
9939 /* SPARC 64-bit trampoline:
9940
9941 rd %pc, %g1
9942 ldx [%g1+24], %g5
9943 jmp %g5
9944 ldx [%g1+16], %g5
9945 +16 bytes data
9946 */
9947
9948 emit_move_insn (adjust_address (m_tramp, SImode, 0),
9949 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
9950 emit_move_insn (adjust_address (m_tramp, SImode, 4),
9951 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
9952 emit_move_insn (adjust_address (m_tramp, SImode, 8),
9953 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
9954 emit_move_insn (adjust_address (m_tramp, SImode, 12),
9955 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
9956 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
9957 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
9958 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
9959
9960 if (sparc_cpu != PROCESSOR_ULTRASPARC
9961 && sparc_cpu != PROCESSOR_ULTRASPARC3
9962 && sparc_cpu != PROCESSOR_NIAGARA
9963 && sparc_cpu != PROCESSOR_NIAGARA2
9964 && sparc_cpu != PROCESSOR_NIAGARA3
9965 && sparc_cpu != PROCESSOR_NIAGARA4
9966 && sparc_cpu != PROCESSOR_NIAGARA7
9967 && sparc_cpu != PROCESSOR_M8)
9968 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
9969
9970 /* Call __enable_execute_stack after writing onto the stack to make sure
9971 the stack address is accessible. */
9972 #ifdef HAVE_ENABLE_EXECUTE_STACK
9973 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9974 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
9975 #endif
9976 }
9977
9978 /* Worker for TARGET_TRAMPOLINE_INIT. */
9979
9980 static void
9981 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9982 {
9983 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
9984 cxt = force_reg (Pmode, cxt);
9985 if (TARGET_ARCH64)
9986 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
9987 else
9988 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
9989 }
9990 \f
9991 /* Adjust the cost of a scheduling dependency. Return the new cost of
9992 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
9993
9994 static int
9995 supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
9996 int cost)
9997 {
9998 enum attr_type insn_type;
9999
10000 if (recog_memoized (insn) < 0)
10001 return cost;
10002
10003 insn_type = get_attr_type (insn);
10004
10005 if (dep_type == 0)
10006 {
10007 /* Data dependency; DEP_INSN writes a register that INSN reads some
10008 cycles later. */
10009
10010 /* if a load, then the dependence must be on the memory address;
10011 add an extra "cycle". Note that the cost could be two cycles
10012 if the reg was written late in an instruction group; we ca not tell
10013 here. */
10014 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
10015 return cost + 3;
10016
10017 /* Get the delay only if the address of the store is the dependence. */
10018 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
10019 {
10020 rtx pat = PATTERN(insn);
10021 rtx dep_pat = PATTERN (dep_insn);
10022
10023 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10024 return cost; /* This should not happen! */
10025
10026 /* The dependency between the two instructions was on the data that
10027 is being stored. Assume that this implies that the address of the
10028 store is not dependent. */
10029 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10030 return cost;
10031
10032 return cost + 3; /* An approximation. */
10033 }
10034
10035 /* A shift instruction cannot receive its data from an instruction
10036 in the same cycle; add a one cycle penalty. */
10037 if (insn_type == TYPE_SHIFT)
10038 return cost + 3; /* Split before cascade into shift. */
10039 }
10040 else
10041 {
10042 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
10043 INSN writes some cycles later. */
10044
10045 /* These are only significant for the fpu unit; writing a fp reg before
10046 the fpu has finished with it stalls the processor. */
10047
10048 /* Reusing an integer register causes no problems. */
10049 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10050 return 0;
10051 }
10052
10053 return cost;
10054 }
10055
10056 static int
10057 hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
10058 int cost)
10059 {
10060 enum attr_type insn_type, dep_type;
10061 rtx pat = PATTERN(insn);
10062 rtx dep_pat = PATTERN (dep_insn);
10063
10064 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
10065 return cost;
10066
10067 insn_type = get_attr_type (insn);
10068 dep_type = get_attr_type (dep_insn);
10069
10070 switch (dtype)
10071 {
10072 case 0:
10073 /* Data dependency; DEP_INSN writes a register that INSN reads some
10074 cycles later. */
10075
10076 switch (insn_type)
10077 {
10078 case TYPE_STORE:
10079 case TYPE_FPSTORE:
10080 /* Get the delay iff the address of the store is the dependence. */
10081 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10082 return cost;
10083
10084 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10085 return cost;
10086 return cost + 3;
10087
10088 case TYPE_LOAD:
10089 case TYPE_SLOAD:
10090 case TYPE_FPLOAD:
10091 /* If a load, then the dependence must be on the memory address. If
10092 the addresses aren't equal, then it might be a false dependency */
10093 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
10094 {
10095 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
10096 || GET_CODE (SET_DEST (dep_pat)) != MEM
10097 || GET_CODE (SET_SRC (pat)) != MEM
10098 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
10099 XEXP (SET_SRC (pat), 0)))
10100 return cost + 2;
10101
10102 return cost + 8;
10103 }
10104 break;
10105
10106 case TYPE_BRANCH:
10107 /* Compare to branch latency is 0. There is no benefit from
10108 separating compare and branch. */
10109 if (dep_type == TYPE_COMPARE)
10110 return 0;
10111 /* Floating point compare to branch latency is less than
10112 compare to conditional move. */
10113 if (dep_type == TYPE_FPCMP)
10114 return cost - 1;
10115 break;
10116 default:
10117 break;
10118 }
10119 break;
10120
10121 case REG_DEP_ANTI:
10122 /* Anti-dependencies only penalize the fpu unit. */
10123 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10124 return 0;
10125 break;
10126
10127 default:
10128 break;
10129 }
10130
10131 return cost;
10132 }
10133
10134 static int
10135 sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
10136 unsigned int)
10137 {
10138 switch (sparc_cpu)
10139 {
10140 case PROCESSOR_SUPERSPARC:
10141 cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
10142 break;
10143 case PROCESSOR_HYPERSPARC:
10144 case PROCESSOR_SPARCLITE86X:
10145 cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
10146 break;
10147 default:
10148 break;
10149 }
10150 return cost;
10151 }
10152
10153 static void
10154 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
10155 int sched_verbose ATTRIBUTE_UNUSED,
10156 int max_ready ATTRIBUTE_UNUSED)
10157 {}
10158
10159 static int
10160 sparc_use_sched_lookahead (void)
10161 {
10162 if (sparc_cpu == PROCESSOR_NIAGARA
10163 || sparc_cpu == PROCESSOR_NIAGARA2
10164 || sparc_cpu == PROCESSOR_NIAGARA3)
10165 return 0;
10166 if (sparc_cpu == PROCESSOR_NIAGARA4
10167 || sparc_cpu == PROCESSOR_NIAGARA7
10168 || sparc_cpu == PROCESSOR_M8)
10169 return 2;
10170 if (sparc_cpu == PROCESSOR_ULTRASPARC
10171 || sparc_cpu == PROCESSOR_ULTRASPARC3)
10172 return 4;
10173 if ((1 << sparc_cpu) &
10174 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
10175 (1 << PROCESSOR_SPARCLITE86X)))
10176 return 3;
10177 return 0;
10178 }
10179
10180 static int
10181 sparc_issue_rate (void)
10182 {
10183 switch (sparc_cpu)
10184 {
10185 case PROCESSOR_NIAGARA:
10186 case PROCESSOR_NIAGARA2:
10187 case PROCESSOR_NIAGARA3:
10188 default:
10189 return 1;
10190 case PROCESSOR_NIAGARA4:
10191 case PROCESSOR_NIAGARA7:
10192 case PROCESSOR_V9:
10193 /* Assume V9 processors are capable of at least dual-issue. */
10194 return 2;
10195 case PROCESSOR_SUPERSPARC:
10196 return 3;
10197 case PROCESSOR_HYPERSPARC:
10198 case PROCESSOR_SPARCLITE86X:
10199 return 2;
10200 case PROCESSOR_ULTRASPARC:
10201 case PROCESSOR_ULTRASPARC3:
10202 case PROCESSOR_M8:
10203 return 4;
10204 }
10205 }
10206
10207 static int
10208 set_extends (rtx_insn *insn)
10209 {
10210 register rtx pat = PATTERN (insn);
10211
10212 switch (GET_CODE (SET_SRC (pat)))
10213 {
10214 /* Load and some shift instructions zero extend. */
10215 case MEM:
10216 case ZERO_EXTEND:
10217 /* sethi clears the high bits */
10218 case HIGH:
10219 /* LO_SUM is used with sethi. sethi cleared the high
10220 bits and the values used with lo_sum are positive */
10221 case LO_SUM:
10222 /* Store flag stores 0 or 1 */
10223 case LT: case LTU:
10224 case GT: case GTU:
10225 case LE: case LEU:
10226 case GE: case GEU:
10227 case EQ:
10228 case NE:
10229 return 1;
10230 case AND:
10231 {
10232 rtx op0 = XEXP (SET_SRC (pat), 0);
10233 rtx op1 = XEXP (SET_SRC (pat), 1);
10234 if (GET_CODE (op1) == CONST_INT)
10235 return INTVAL (op1) >= 0;
10236 if (GET_CODE (op0) != REG)
10237 return 0;
10238 if (sparc_check_64 (op0, insn) == 1)
10239 return 1;
10240 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10241 }
10242 case IOR:
10243 case XOR:
10244 {
10245 rtx op0 = XEXP (SET_SRC (pat), 0);
10246 rtx op1 = XEXP (SET_SRC (pat), 1);
10247 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
10248 return 0;
10249 if (GET_CODE (op1) == CONST_INT)
10250 return INTVAL (op1) >= 0;
10251 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10252 }
10253 case LSHIFTRT:
10254 return GET_MODE (SET_SRC (pat)) == SImode;
10255 /* Positive integers leave the high bits zero. */
10256 case CONST_INT:
10257 return !(INTVAL (SET_SRC (pat)) & 0x80000000);
10258 case ASHIFTRT:
10259 case SIGN_EXTEND:
10260 return - (GET_MODE (SET_SRC (pat)) == SImode);
10261 case REG:
10262 return sparc_check_64 (SET_SRC (pat), insn);
10263 default:
10264 return 0;
10265 }
10266 }
10267
10268 /* We _ought_ to have only one kind per function, but... */
10269 static GTY(()) rtx sparc_addr_diff_list;
10270 static GTY(()) rtx sparc_addr_list;
10271
10272 void
10273 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
10274 {
10275 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10276 if (diff)
10277 sparc_addr_diff_list
10278 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
10279 else
10280 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
10281 }
10282
10283 static void
10284 sparc_output_addr_vec (rtx vec)
10285 {
10286 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10287 int idx, vlen = XVECLEN (body, 0);
10288
10289 #ifdef ASM_OUTPUT_ADDR_VEC_START
10290 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10291 #endif
10292
10293 #ifdef ASM_OUTPUT_CASE_LABEL
10294 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10295 NEXT_INSN (lab));
10296 #else
10297 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10298 #endif
10299
10300 for (idx = 0; idx < vlen; idx++)
10301 {
10302 ASM_OUTPUT_ADDR_VEC_ELT
10303 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10304 }
10305
10306 #ifdef ASM_OUTPUT_ADDR_VEC_END
10307 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10308 #endif
10309 }
10310
10311 static void
10312 sparc_output_addr_diff_vec (rtx vec)
10313 {
10314 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10315 rtx base = XEXP (XEXP (body, 0), 0);
10316 int idx, vlen = XVECLEN (body, 1);
10317
10318 #ifdef ASM_OUTPUT_ADDR_VEC_START
10319 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10320 #endif
10321
10322 #ifdef ASM_OUTPUT_CASE_LABEL
10323 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10324 NEXT_INSN (lab));
10325 #else
10326 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10327 #endif
10328
10329 for (idx = 0; idx < vlen; idx++)
10330 {
10331 ASM_OUTPUT_ADDR_DIFF_ELT
10332 (asm_out_file,
10333 body,
10334 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10335 CODE_LABEL_NUMBER (base));
10336 }
10337
10338 #ifdef ASM_OUTPUT_ADDR_VEC_END
10339 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10340 #endif
10341 }
10342
10343 static void
10344 sparc_output_deferred_case_vectors (void)
10345 {
10346 rtx t;
10347 int align;
10348
10349 if (sparc_addr_list == NULL_RTX
10350 && sparc_addr_diff_list == NULL_RTX)
10351 return;
10352
10353 /* Align to cache line in the function's code section. */
10354 switch_to_section (current_function_section ());
10355
10356 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10357 if (align > 0)
10358 ASM_OUTPUT_ALIGN (asm_out_file, align);
10359
10360 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
10361 sparc_output_addr_vec (XEXP (t, 0));
10362 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
10363 sparc_output_addr_diff_vec (XEXP (t, 0));
10364
10365 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
10366 }
10367
10368 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
10369 unknown. Return 1 if the high bits are zero, -1 if the register is
10370 sign extended. */
10371 int
10372 sparc_check_64 (rtx x, rtx_insn *insn)
10373 {
10374 /* If a register is set only once it is safe to ignore insns this
10375 code does not know how to handle. The loop will either recognize
10376 the single set and return the correct value or fail to recognize
10377 it and return 0. */
10378 int set_once = 0;
10379 rtx y = x;
10380
10381 gcc_assert (GET_CODE (x) == REG);
10382
10383 if (GET_MODE (x) == DImode)
10384 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
10385
10386 if (flag_expensive_optimizations
10387 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
10388 set_once = 1;
10389
10390 if (insn == 0)
10391 {
10392 if (set_once)
10393 insn = get_last_insn_anywhere ();
10394 else
10395 return 0;
10396 }
10397
10398 while ((insn = PREV_INSN (insn)))
10399 {
10400 switch (GET_CODE (insn))
10401 {
10402 case JUMP_INSN:
10403 case NOTE:
10404 break;
10405 case CODE_LABEL:
10406 case CALL_INSN:
10407 default:
10408 if (! set_once)
10409 return 0;
10410 break;
10411 case INSN:
10412 {
10413 rtx pat = PATTERN (insn);
10414 if (GET_CODE (pat) != SET)
10415 return 0;
10416 if (rtx_equal_p (x, SET_DEST (pat)))
10417 return set_extends (insn);
10418 if (y && rtx_equal_p (y, SET_DEST (pat)))
10419 return set_extends (insn);
10420 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
10421 return 0;
10422 }
10423 }
10424 }
10425 return 0;
10426 }
10427
10428 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
10429 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
10430
10431 const char *
10432 output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
10433 {
10434 static char asm_code[60];
10435
10436 /* The scratch register is only required when the destination
10437 register is not a 64-bit global or out register. */
10438 if (which_alternative != 2)
10439 operands[3] = operands[0];
10440
10441 /* We can only shift by constants <= 63. */
10442 if (GET_CODE (operands[2]) == CONST_INT)
10443 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
10444
10445 if (GET_CODE (operands[1]) == CONST_INT)
10446 {
10447 output_asm_insn ("mov\t%1, %3", operands);
10448 }
10449 else
10450 {
10451 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10452 if (sparc_check_64 (operands[1], insn) <= 0)
10453 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10454 output_asm_insn ("or\t%L1, %3, %3", operands);
10455 }
10456
10457 strcpy (asm_code, opcode);
10458
10459 if (which_alternative != 2)
10460 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
10461 else
10462 return
10463 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
10464 }
10465 \f
10466 /* Output rtl to increment the profiler label LABELNO
10467 for profiling a function entry. */
10468
10469 void
10470 sparc_profile_hook (int labelno)
10471 {
10472 char buf[32];
10473 rtx lab, fun;
10474
10475 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
10476 if (NO_PROFILE_COUNTERS)
10477 {
10478 emit_library_call (fun, LCT_NORMAL, VOIDmode);
10479 }
10480 else
10481 {
10482 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10483 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
10484 emit_library_call (fun, LCT_NORMAL, VOIDmode, lab, Pmode);
10485 }
10486 }
10487 \f
10488 #ifdef TARGET_SOLARIS
10489 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
10490
10491 static void
10492 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
10493 tree decl ATTRIBUTE_UNUSED)
10494 {
10495 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
10496 {
10497 solaris_elf_asm_comdat_section (name, flags, decl);
10498 return;
10499 }
10500
10501 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
10502
10503 if (!(flags & SECTION_DEBUG))
10504 fputs (",#alloc", asm_out_file);
10505 #if HAVE_GAS_SECTION_EXCLUDE
10506 if (flags & SECTION_EXCLUDE)
10507 fputs (",#exclude", asm_out_file);
10508 #endif
10509 if (flags & SECTION_WRITE)
10510 fputs (",#write", asm_out_file);
10511 if (flags & SECTION_TLS)
10512 fputs (",#tls", asm_out_file);
10513 if (flags & SECTION_CODE)
10514 fputs (",#execinstr", asm_out_file);
10515
10516 if (flags & SECTION_NOTYPE)
10517 ;
10518 else if (flags & SECTION_BSS)
10519 fputs (",#nobits", asm_out_file);
10520 else
10521 fputs (",#progbits", asm_out_file);
10522
10523 fputc ('\n', asm_out_file);
10524 }
10525 #endif /* TARGET_SOLARIS */
10526
10527 /* We do not allow indirect calls to be optimized into sibling calls.
10528
10529 We cannot use sibling calls when delayed branches are disabled
10530 because they will likely require the call delay slot to be filled.
10531
10532 Also, on SPARC 32-bit we cannot emit a sibling call when the
10533 current function returns a structure. This is because the "unimp
10534 after call" convention would cause the callee to return to the
10535 wrong place. The generic code already disallows cases where the
10536 function being called returns a structure.
10537
10538 It may seem strange how this last case could occur. Usually there
10539 is code after the call which jumps to epilogue code which dumps the
10540 return value into the struct return area. That ought to invalidate
10541 the sibling call right? Well, in the C++ case we can end up passing
10542 the pointer to the struct return area to a constructor (which returns
10543 void) and then nothing else happens. Such a sibling call would look
10544 valid without the added check here.
10545
10546 VxWorks PIC PLT entries require the global pointer to be initialized
10547 on entry. We therefore can't emit sibling calls to them. */
10548 static bool
10549 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10550 {
10551 return (decl
10552 && flag_delayed_branch
10553 && (TARGET_ARCH64 || ! cfun->returns_struct)
10554 && !(TARGET_VXWORKS_RTP
10555 && flag_pic
10556 && !targetm.binds_local_p (decl)));
10557 }
10558 \f
10559 /* libfunc renaming. */
10560
10561 static void
10562 sparc_init_libfuncs (void)
10563 {
10564 if (TARGET_ARCH32)
10565 {
10566 /* Use the subroutines that Sun's library provides for integer
10567 multiply and divide. The `*' prevents an underscore from
10568 being prepended by the compiler. .umul is a little faster
10569 than .mul. */
10570 set_optab_libfunc (smul_optab, SImode, "*.umul");
10571 set_optab_libfunc (sdiv_optab, SImode, "*.div");
10572 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
10573 set_optab_libfunc (smod_optab, SImode, "*.rem");
10574 set_optab_libfunc (umod_optab, SImode, "*.urem");
10575
10576 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
10577 set_optab_libfunc (add_optab, TFmode, "_Q_add");
10578 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
10579 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
10580 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
10581 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
10582
10583 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10584 is because with soft-float, the SFmode and DFmode sqrt
10585 instructions will be absent, and the compiler will notice and
10586 try to use the TFmode sqrt instruction for calls to the
10587 builtin function sqrt, but this fails. */
10588 if (TARGET_FPU)
10589 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
10590
10591 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
10592 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
10593 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
10594 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
10595 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
10596 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
10597
10598 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
10599 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
10600 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
10601 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
10602
10603 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
10604 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
10605 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
10606 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
10607
10608 if (DITF_CONVERSION_LIBFUNCS)
10609 {
10610 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
10611 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
10612 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
10613 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
10614 }
10615
10616 if (SUN_CONVERSION_LIBFUNCS)
10617 {
10618 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
10619 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
10620 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
10621 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
10622 }
10623 }
10624 if (TARGET_ARCH64)
10625 {
10626 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10627 do not exist in the library. Make sure the compiler does not
10628 emit calls to them by accident. (It should always use the
10629 hardware instructions.) */
10630 set_optab_libfunc (smul_optab, SImode, 0);
10631 set_optab_libfunc (sdiv_optab, SImode, 0);
10632 set_optab_libfunc (udiv_optab, SImode, 0);
10633 set_optab_libfunc (smod_optab, SImode, 0);
10634 set_optab_libfunc (umod_optab, SImode, 0);
10635
10636 if (SUN_INTEGER_MULTIPLY_64)
10637 {
10638 set_optab_libfunc (smul_optab, DImode, "__mul64");
10639 set_optab_libfunc (sdiv_optab, DImode, "__div64");
10640 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
10641 set_optab_libfunc (smod_optab, DImode, "__rem64");
10642 set_optab_libfunc (umod_optab, DImode, "__urem64");
10643 }
10644
10645 if (SUN_CONVERSION_LIBFUNCS)
10646 {
10647 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
10648 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
10649 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
10650 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
10651 }
10652 }
10653 }
10654 \f
10655 /* SPARC builtins. */
10656 enum sparc_builtins
10657 {
10658 /* FPU builtins. */
10659 SPARC_BUILTIN_LDFSR,
10660 SPARC_BUILTIN_STFSR,
10661
10662 /* VIS 1.0 builtins. */
10663 SPARC_BUILTIN_FPACK16,
10664 SPARC_BUILTIN_FPACK32,
10665 SPARC_BUILTIN_FPACKFIX,
10666 SPARC_BUILTIN_FEXPAND,
10667 SPARC_BUILTIN_FPMERGE,
10668 SPARC_BUILTIN_FMUL8X16,
10669 SPARC_BUILTIN_FMUL8X16AU,
10670 SPARC_BUILTIN_FMUL8X16AL,
10671 SPARC_BUILTIN_FMUL8SUX16,
10672 SPARC_BUILTIN_FMUL8ULX16,
10673 SPARC_BUILTIN_FMULD8SUX16,
10674 SPARC_BUILTIN_FMULD8ULX16,
10675 SPARC_BUILTIN_FALIGNDATAV4HI,
10676 SPARC_BUILTIN_FALIGNDATAV8QI,
10677 SPARC_BUILTIN_FALIGNDATAV2SI,
10678 SPARC_BUILTIN_FALIGNDATADI,
10679 SPARC_BUILTIN_WRGSR,
10680 SPARC_BUILTIN_RDGSR,
10681 SPARC_BUILTIN_ALIGNADDR,
10682 SPARC_BUILTIN_ALIGNADDRL,
10683 SPARC_BUILTIN_PDIST,
10684 SPARC_BUILTIN_EDGE8,
10685 SPARC_BUILTIN_EDGE8L,
10686 SPARC_BUILTIN_EDGE16,
10687 SPARC_BUILTIN_EDGE16L,
10688 SPARC_BUILTIN_EDGE32,
10689 SPARC_BUILTIN_EDGE32L,
10690 SPARC_BUILTIN_FCMPLE16,
10691 SPARC_BUILTIN_FCMPLE32,
10692 SPARC_BUILTIN_FCMPNE16,
10693 SPARC_BUILTIN_FCMPNE32,
10694 SPARC_BUILTIN_FCMPGT16,
10695 SPARC_BUILTIN_FCMPGT32,
10696 SPARC_BUILTIN_FCMPEQ16,
10697 SPARC_BUILTIN_FCMPEQ32,
10698 SPARC_BUILTIN_FPADD16,
10699 SPARC_BUILTIN_FPADD16S,
10700 SPARC_BUILTIN_FPADD32,
10701 SPARC_BUILTIN_FPADD32S,
10702 SPARC_BUILTIN_FPSUB16,
10703 SPARC_BUILTIN_FPSUB16S,
10704 SPARC_BUILTIN_FPSUB32,
10705 SPARC_BUILTIN_FPSUB32S,
10706 SPARC_BUILTIN_ARRAY8,
10707 SPARC_BUILTIN_ARRAY16,
10708 SPARC_BUILTIN_ARRAY32,
10709
10710 /* VIS 2.0 builtins. */
10711 SPARC_BUILTIN_EDGE8N,
10712 SPARC_BUILTIN_EDGE8LN,
10713 SPARC_BUILTIN_EDGE16N,
10714 SPARC_BUILTIN_EDGE16LN,
10715 SPARC_BUILTIN_EDGE32N,
10716 SPARC_BUILTIN_EDGE32LN,
10717 SPARC_BUILTIN_BMASK,
10718 SPARC_BUILTIN_BSHUFFLEV4HI,
10719 SPARC_BUILTIN_BSHUFFLEV8QI,
10720 SPARC_BUILTIN_BSHUFFLEV2SI,
10721 SPARC_BUILTIN_BSHUFFLEDI,
10722
10723 /* VIS 3.0 builtins. */
10724 SPARC_BUILTIN_CMASK8,
10725 SPARC_BUILTIN_CMASK16,
10726 SPARC_BUILTIN_CMASK32,
10727 SPARC_BUILTIN_FCHKSM16,
10728 SPARC_BUILTIN_FSLL16,
10729 SPARC_BUILTIN_FSLAS16,
10730 SPARC_BUILTIN_FSRL16,
10731 SPARC_BUILTIN_FSRA16,
10732 SPARC_BUILTIN_FSLL32,
10733 SPARC_BUILTIN_FSLAS32,
10734 SPARC_BUILTIN_FSRL32,
10735 SPARC_BUILTIN_FSRA32,
10736 SPARC_BUILTIN_PDISTN,
10737 SPARC_BUILTIN_FMEAN16,
10738 SPARC_BUILTIN_FPADD64,
10739 SPARC_BUILTIN_FPSUB64,
10740 SPARC_BUILTIN_FPADDS16,
10741 SPARC_BUILTIN_FPADDS16S,
10742 SPARC_BUILTIN_FPSUBS16,
10743 SPARC_BUILTIN_FPSUBS16S,
10744 SPARC_BUILTIN_FPADDS32,
10745 SPARC_BUILTIN_FPADDS32S,
10746 SPARC_BUILTIN_FPSUBS32,
10747 SPARC_BUILTIN_FPSUBS32S,
10748 SPARC_BUILTIN_FUCMPLE8,
10749 SPARC_BUILTIN_FUCMPNE8,
10750 SPARC_BUILTIN_FUCMPGT8,
10751 SPARC_BUILTIN_FUCMPEQ8,
10752 SPARC_BUILTIN_FHADDS,
10753 SPARC_BUILTIN_FHADDD,
10754 SPARC_BUILTIN_FHSUBS,
10755 SPARC_BUILTIN_FHSUBD,
10756 SPARC_BUILTIN_FNHADDS,
10757 SPARC_BUILTIN_FNHADDD,
10758 SPARC_BUILTIN_UMULXHI,
10759 SPARC_BUILTIN_XMULX,
10760 SPARC_BUILTIN_XMULXHI,
10761
10762 /* VIS 4.0 builtins. */
10763 SPARC_BUILTIN_FPADD8,
10764 SPARC_BUILTIN_FPADDS8,
10765 SPARC_BUILTIN_FPADDUS8,
10766 SPARC_BUILTIN_FPADDUS16,
10767 SPARC_BUILTIN_FPCMPLE8,
10768 SPARC_BUILTIN_FPCMPGT8,
10769 SPARC_BUILTIN_FPCMPULE16,
10770 SPARC_BUILTIN_FPCMPUGT16,
10771 SPARC_BUILTIN_FPCMPULE32,
10772 SPARC_BUILTIN_FPCMPUGT32,
10773 SPARC_BUILTIN_FPMAX8,
10774 SPARC_BUILTIN_FPMAX16,
10775 SPARC_BUILTIN_FPMAX32,
10776 SPARC_BUILTIN_FPMAXU8,
10777 SPARC_BUILTIN_FPMAXU16,
10778 SPARC_BUILTIN_FPMAXU32,
10779 SPARC_BUILTIN_FPMIN8,
10780 SPARC_BUILTIN_FPMIN16,
10781 SPARC_BUILTIN_FPMIN32,
10782 SPARC_BUILTIN_FPMINU8,
10783 SPARC_BUILTIN_FPMINU16,
10784 SPARC_BUILTIN_FPMINU32,
10785 SPARC_BUILTIN_FPSUB8,
10786 SPARC_BUILTIN_FPSUBS8,
10787 SPARC_BUILTIN_FPSUBUS8,
10788 SPARC_BUILTIN_FPSUBUS16,
10789
10790 /* VIS 4.0B builtins. */
10791
10792 /* Note that all the DICTUNPACK* entries should be kept
10793 contiguous. */
10794 SPARC_BUILTIN_FIRST_DICTUNPACK,
10795 SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
10796 SPARC_BUILTIN_DICTUNPACK16,
10797 SPARC_BUILTIN_DICTUNPACK32,
10798 SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
10799
10800 /* Note that all the FPCMP*SHL entries should be kept
10801 contiguous. */
10802 SPARC_BUILTIN_FIRST_FPCMPSHL,
10803 SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
10804 SPARC_BUILTIN_FPCMPGT8SHL,
10805 SPARC_BUILTIN_FPCMPEQ8SHL,
10806 SPARC_BUILTIN_FPCMPNE8SHL,
10807 SPARC_BUILTIN_FPCMPLE16SHL,
10808 SPARC_BUILTIN_FPCMPGT16SHL,
10809 SPARC_BUILTIN_FPCMPEQ16SHL,
10810 SPARC_BUILTIN_FPCMPNE16SHL,
10811 SPARC_BUILTIN_FPCMPLE32SHL,
10812 SPARC_BUILTIN_FPCMPGT32SHL,
10813 SPARC_BUILTIN_FPCMPEQ32SHL,
10814 SPARC_BUILTIN_FPCMPNE32SHL,
10815 SPARC_BUILTIN_FPCMPULE8SHL,
10816 SPARC_BUILTIN_FPCMPUGT8SHL,
10817 SPARC_BUILTIN_FPCMPULE16SHL,
10818 SPARC_BUILTIN_FPCMPUGT16SHL,
10819 SPARC_BUILTIN_FPCMPULE32SHL,
10820 SPARC_BUILTIN_FPCMPUGT32SHL,
10821 SPARC_BUILTIN_FPCMPDE8SHL,
10822 SPARC_BUILTIN_FPCMPDE16SHL,
10823 SPARC_BUILTIN_FPCMPDE32SHL,
10824 SPARC_BUILTIN_FPCMPUR8SHL,
10825 SPARC_BUILTIN_FPCMPUR16SHL,
10826 SPARC_BUILTIN_FPCMPUR32SHL,
10827 SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
10828
10829 SPARC_BUILTIN_MAX
10830 };
10831
10832 static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
10833 static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
10834
10835 /* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
10836 The instruction should require a constant operand of some sort. The
10837 function prints an error if OPVAL is not valid. */
10838
10839 static int
10840 check_constant_argument (enum insn_code icode, int opnum, rtx opval)
10841 {
10842 if (GET_CODE (opval) != CONST_INT)
10843 {
10844 error ("%qs expects a constant argument", insn_data[icode].name);
10845 return false;
10846 }
10847
10848 if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
10849 {
10850 error ("constant argument out of range for %qs", insn_data[icode].name);
10851 return false;
10852 }
10853 return true;
10854 }
10855
10856 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
10857 function decl or NULL_TREE if the builtin was not added. */
10858
10859 static tree
10860 def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
10861 tree type)
10862 {
10863 tree t
10864 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
10865
10866 if (t)
10867 {
10868 sparc_builtins[code] = t;
10869 sparc_builtins_icode[code] = icode;
10870 }
10871
10872 return t;
10873 }
10874
10875 /* Likewise, but also marks the function as "const". */
10876
10877 static tree
10878 def_builtin_const (const char *name, enum insn_code icode,
10879 enum sparc_builtins code, tree type)
10880 {
10881 tree t = def_builtin (name, icode, code, type);
10882
10883 if (t)
10884 TREE_READONLY (t) = 1;
10885
10886 return t;
10887 }
10888
10889 /* Implement the TARGET_INIT_BUILTINS target hook.
10890 Create builtin functions for special SPARC instructions. */
10891
10892 static void
10893 sparc_init_builtins (void)
10894 {
10895 if (TARGET_FPU)
10896 sparc_fpu_init_builtins ();
10897
10898 if (TARGET_VIS)
10899 sparc_vis_init_builtins ();
10900 }
10901
10902 /* Create builtin functions for FPU instructions. */
10903
10904 static void
10905 sparc_fpu_init_builtins (void)
10906 {
10907 tree ftype
10908 = build_function_type_list (void_type_node,
10909 build_pointer_type (unsigned_type_node), 0);
10910 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
10911 SPARC_BUILTIN_LDFSR, ftype);
10912 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
10913 SPARC_BUILTIN_STFSR, ftype);
10914 }
10915
10916 /* Create builtin functions for VIS instructions. */
10917
10918 static void
10919 sparc_vis_init_builtins (void)
10920 {
10921 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
10922 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
10923 tree v4hi = build_vector_type (intHI_type_node, 4);
10924 tree v2hi = build_vector_type (intHI_type_node, 2);
10925 tree v2si = build_vector_type (intSI_type_node, 2);
10926 tree v1si = build_vector_type (intSI_type_node, 1);
10927
10928 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
10929 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
10930 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
10931 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
10932 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
10933 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
10934 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
10935 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
10936 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
10937 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
10938 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
10939 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
10940 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
10941 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
10942 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
10943 v8qi, v8qi,
10944 intDI_type_node, 0);
10945 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
10946 v8qi, v8qi, 0);
10947 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
10948 v8qi, v8qi, 0);
10949 tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
10950 intSI_type_node, 0);
10951 tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
10952 intSI_type_node, 0);
10953 tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
10954 intDI_type_node, 0);
10955 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
10956 intDI_type_node,
10957 intDI_type_node, 0);
10958 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
10959 intSI_type_node,
10960 intSI_type_node, 0);
10961 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
10962 ptr_type_node,
10963 intSI_type_node, 0);
10964 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
10965 ptr_type_node,
10966 intDI_type_node, 0);
10967 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
10968 ptr_type_node,
10969 ptr_type_node, 0);
10970 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
10971 ptr_type_node,
10972 ptr_type_node, 0);
10973 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
10974 v4hi, v4hi, 0);
10975 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
10976 v2si, v2si, 0);
10977 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
10978 v4hi, v4hi, 0);
10979 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
10980 v2si, v2si, 0);
10981 tree void_ftype_di = build_function_type_list (void_type_node,
10982 intDI_type_node, 0);
10983 tree di_ftype_void = build_function_type_list (intDI_type_node,
10984 void_type_node, 0);
10985 tree void_ftype_si = build_function_type_list (void_type_node,
10986 intSI_type_node, 0);
10987 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
10988 float_type_node,
10989 float_type_node, 0);
10990 tree df_ftype_df_df = build_function_type_list (double_type_node,
10991 double_type_node,
10992 double_type_node, 0);
10993
10994 /* Packing and expanding vectors. */
10995 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
10996 SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
10997 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
10998 SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
10999 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
11000 SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
11001 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
11002 SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
11003 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
11004 SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
11005
11006 /* Multiplications. */
11007 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
11008 SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
11009 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
11010 SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
11011 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
11012 SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
11013 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
11014 SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
11015 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
11016 SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
11017 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
11018 SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
11019 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
11020 SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
11021
11022 /* Data aligning. */
11023 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
11024 SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
11025 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
11026 SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
11027 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
11028 SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
11029 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
11030 SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
11031
11032 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
11033 SPARC_BUILTIN_WRGSR, void_ftype_di);
11034 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
11035 SPARC_BUILTIN_RDGSR, di_ftype_void);
11036
11037 if (TARGET_ARCH64)
11038 {
11039 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
11040 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
11041 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
11042 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
11043 }
11044 else
11045 {
11046 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
11047 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
11048 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
11049 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
11050 }
11051
11052 /* Pixel distance. */
11053 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
11054 SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
11055
11056 /* Edge handling. */
11057 if (TARGET_ARCH64)
11058 {
11059 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
11060 SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
11061 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
11062 SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
11063 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
11064 SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
11065 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
11066 SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
11067 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
11068 SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
11069 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
11070 SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
11071 }
11072 else
11073 {
11074 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
11075 SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
11076 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
11077 SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
11078 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
11079 SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
11080 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
11081 SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
11082 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
11083 SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
11084 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
11085 SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
11086 }
11087
11088 /* Pixel compare. */
11089 if (TARGET_ARCH64)
11090 {
11091 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
11092 SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
11093 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
11094 SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
11095 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
11096 SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
11097 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
11098 SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
11099 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
11100 SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
11101 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
11102 SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
11103 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
11104 SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
11105 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
11106 SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
11107 }
11108 else
11109 {
11110 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
11111 SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
11112 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
11113 SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
11114 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
11115 SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
11116 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
11117 SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
11118 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
11119 SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
11120 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
11121 SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
11122 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
11123 SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
11124 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
11125 SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
11126 }
11127
11128 /* Addition and subtraction. */
11129 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
11130 SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
11131 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
11132 SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
11133 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
11134 SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
11135 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
11136 SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
11137 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
11138 SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
11139 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
11140 SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
11141 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
11142 SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
11143 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
11144 SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
11145
11146 /* Three-dimensional array addressing. */
11147 if (TARGET_ARCH64)
11148 {
11149 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
11150 SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
11151 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
11152 SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
11153 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
11154 SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
11155 }
11156 else
11157 {
11158 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
11159 SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
11160 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
11161 SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
11162 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
11163 SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
11164 }
11165
11166 if (TARGET_VIS2)
11167 {
11168 /* Edge handling. */
11169 if (TARGET_ARCH64)
11170 {
11171 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
11172 SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
11173 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
11174 SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
11175 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
11176 SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
11177 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
11178 SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
11179 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
11180 SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
11181 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
11182 SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
11183 }
11184 else
11185 {
11186 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
11187 SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
11188 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
11189 SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
11190 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
11191 SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
11192 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
11193 SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
11194 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
11195 SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
11196 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
11197 SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
11198 }
11199
11200 /* Byte mask and shuffle. */
11201 if (TARGET_ARCH64)
11202 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
11203 SPARC_BUILTIN_BMASK, di_ftype_di_di);
11204 else
11205 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
11206 SPARC_BUILTIN_BMASK, si_ftype_si_si);
11207 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
11208 SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
11209 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
11210 SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
11211 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
11212 SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
11213 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
11214 SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
11215 }
11216
11217 if (TARGET_VIS3)
11218 {
11219 if (TARGET_ARCH64)
11220 {
11221 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
11222 SPARC_BUILTIN_CMASK8, void_ftype_di);
11223 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
11224 SPARC_BUILTIN_CMASK16, void_ftype_di);
11225 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
11226 SPARC_BUILTIN_CMASK32, void_ftype_di);
11227 }
11228 else
11229 {
11230 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
11231 SPARC_BUILTIN_CMASK8, void_ftype_si);
11232 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
11233 SPARC_BUILTIN_CMASK16, void_ftype_si);
11234 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
11235 SPARC_BUILTIN_CMASK32, void_ftype_si);
11236 }
11237
11238 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
11239 SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
11240
11241 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
11242 SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
11243 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
11244 SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
11245 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
11246 SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
11247 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
11248 SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
11249 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
11250 SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
11251 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
11252 SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
11253 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
11254 SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
11255 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
11256 SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
11257
11258 if (TARGET_ARCH64)
11259 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
11260 SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
11261 else
11262 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
11263 SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
11264
11265 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
11266 SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
11267 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
11268 SPARC_BUILTIN_FPADD64, di_ftype_di_di);
11269 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
11270 SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
11271
11272 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
11273 SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
11274 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
11275 SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
11276 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
11277 SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
11278 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
11279 SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
11280 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
11281 SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
11282 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
11283 SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
11284 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
11285 SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
11286 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
11287 SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
11288
11289 if (TARGET_ARCH64)
11290 {
11291 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
11292 SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
11293 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
11294 SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
11295 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
11296 SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
11297 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
11298 SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
11299 }
11300 else
11301 {
11302 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
11303 SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
11304 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
11305 SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
11306 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
11307 SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
11308 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
11309 SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
11310 }
11311
11312 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
11313 SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
11314 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
11315 SPARC_BUILTIN_FHADDD, df_ftype_df_df);
11316 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
11317 SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
11318 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
11319 SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
11320 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
11321 SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
11322 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
11323 SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
11324
11325 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
11326 SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
11327 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
11328 SPARC_BUILTIN_XMULX, di_ftype_di_di);
11329 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
11330 SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
11331 }
11332
11333 if (TARGET_VIS4)
11334 {
11335 def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
11336 SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
11337 def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
11338 SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
11339 def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
11340 SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
11341 def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
11342 SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
11343
11344
11345 if (TARGET_ARCH64)
11346 {
11347 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
11348 SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
11349 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
11350 SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
11351 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
11352 SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
11353 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
11354 SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
11355 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
11356 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11357 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
11358 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11359 }
11360 else
11361 {
11362 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
11363 SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
11364 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
11365 SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
11366 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
11367 SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
11368 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
11369 SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
11370 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
11371 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11372 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
11373 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11374 }
11375
11376 def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
11377 SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
11378 def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
11379 SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
11380 def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
11381 SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
11382 def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
11383 SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
11384 def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
11385 SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
11386 def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
11387 SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
11388 def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
11389 SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
11390 def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
11391 SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
11392 def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
11393 SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
11394 def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
11395 SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
11396 def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
11397 SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
11398 def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
11399 SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
11400 def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
11401 SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
11402 def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
11403 SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
11404 def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
11405 SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
11406 def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
11407 SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
11408 }
11409
11410 if (TARGET_VIS4B)
11411 {
11412 def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
11413 SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
11414 def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
11415 SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
11416 def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
11417 SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
11418
11419 if (TARGET_ARCH64)
11420 {
11421 tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
11422 v8qi, v8qi,
11423 intSI_type_node, 0);
11424 tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
11425 v4hi, v4hi,
11426 intSI_type_node, 0);
11427 tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
11428 v2si, v2si,
11429 intSI_type_node, 0);
11430
11431 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
11432 SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
11433 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
11434 SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
11435 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
11436 SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
11437 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
11438 SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
11439
11440 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
11441 SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
11442 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
11443 SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
11444 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
11445 SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
11446 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
11447 SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
11448
11449 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
11450 SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
11451 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
11452 SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
11453 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
11454 SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
11455 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
11456 SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
11457
11458
11459 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
11460 SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
11461 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
11462 SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
11463
11464 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
11465 SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
11466 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
11467 SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
11468
11469 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
11470 SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
11471 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
11472 SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
11473
11474 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
11475 SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
11476 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
11477 SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
11478 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
11479 SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
11480
11481 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
11482 SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
11483 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
11484 SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
11485 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
11486 SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
11487
11488 }
11489 else
11490 {
11491 tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
11492 v8qi, v8qi,
11493 intSI_type_node, 0);
11494 tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
11495 v4hi, v4hi,
11496 intSI_type_node, 0);
11497 tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
11498 v2si, v2si,
11499 intSI_type_node, 0);
11500
11501 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
11502 SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
11503 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
11504 SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
11505 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
11506 SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
11507 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
11508 SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
11509
11510 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
11511 SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
11512 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
11513 SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
11514 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
11515 SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
11516 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
11517 SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
11518
11519 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
11520 SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
11521 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
11522 SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
11523 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
11524 SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
11525 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
11526 SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
11527
11528
11529 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
11530 SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
11531 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
11532 SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
11533
11534 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
11535 SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
11536 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
11537 SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
11538
11539 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
11540 SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
11541 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
11542 SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
11543
11544 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
11545 SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
11546 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
11547 SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
11548 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
11549 SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
11550
11551 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
11552 SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
11553 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
11554 SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
11555 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
11556 SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
11557 }
11558 }
11559 }
11560
11561 /* Implement TARGET_BUILTIN_DECL hook. */
11562
11563 static tree
11564 sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11565 {
11566 if (code >= SPARC_BUILTIN_MAX)
11567 return error_mark_node;
11568
11569 return sparc_builtins[code];
11570 }
11571
11572 /* Implemented TARGET_EXPAND_BUILTIN hook. */
11573
11574 static rtx
11575 sparc_expand_builtin (tree exp, rtx target,
11576 rtx subtarget ATTRIBUTE_UNUSED,
11577 machine_mode tmode ATTRIBUTE_UNUSED,
11578 int ignore ATTRIBUTE_UNUSED)
11579 {
11580 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11581 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11582 enum insn_code icode = sparc_builtins_icode[code];
11583 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11584 call_expr_arg_iterator iter;
11585 int arg_count = 0;
11586 rtx pat, op[4];
11587 tree arg;
11588
11589 if (nonvoid)
11590 {
11591 machine_mode tmode = insn_data[icode].operand[0].mode;
11592 if (!target
11593 || GET_MODE (target) != tmode
11594 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11595 op[0] = gen_reg_rtx (tmode);
11596 else
11597 op[0] = target;
11598 }
11599 else
11600 op[0] = NULL_RTX;
11601
11602 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11603 {
11604 const struct insn_operand_data *insn_op;
11605 int idx;
11606
11607 if (arg == error_mark_node)
11608 return NULL_RTX;
11609
11610 arg_count++;
11611 idx = arg_count - !nonvoid;
11612 insn_op = &insn_data[icode].operand[idx];
11613 op[arg_count] = expand_normal (arg);
11614
11615 /* Some of the builtins require constant arguments. We check
11616 for this here. */
11617 if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
11618 && code <= SPARC_BUILTIN_LAST_FPCMPSHL
11619 && arg_count == 3)
11620 || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
11621 && code <= SPARC_BUILTIN_LAST_DICTUNPACK
11622 && arg_count == 2))
11623 {
11624 if (!check_constant_argument (icode, idx, op[arg_count]))
11625 return const0_rtx;
11626 }
11627
11628 if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
11629 {
11630 if (!address_operand (op[arg_count], SImode))
11631 {
11632 op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
11633 op[arg_count] = copy_addr_to_reg (op[arg_count]);
11634 }
11635 op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
11636 }
11637
11638 else if (insn_op->mode == V1DImode
11639 && GET_MODE (op[arg_count]) == DImode)
11640 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
11641
11642 else if (insn_op->mode == V1SImode
11643 && GET_MODE (op[arg_count]) == SImode)
11644 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
11645
11646 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
11647 insn_op->mode))
11648 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
11649 }
11650
11651 switch (arg_count)
11652 {
11653 case 0:
11654 pat = GEN_FCN (icode) (op[0]);
11655 break;
11656 case 1:
11657 if (nonvoid)
11658 pat = GEN_FCN (icode) (op[0], op[1]);
11659 else
11660 pat = GEN_FCN (icode) (op[1]);
11661 break;
11662 case 2:
11663 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11664 break;
11665 case 3:
11666 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11667 break;
11668 default:
11669 gcc_unreachable ();
11670 }
11671
11672 if (!pat)
11673 return NULL_RTX;
11674
11675 emit_insn (pat);
11676
11677 return (nonvoid ? op[0] : const0_rtx);
11678 }
11679
11680 /* Return the upper 16 bits of the 8x16 multiplication. */
11681
11682 static int
11683 sparc_vis_mul8x16 (int e8, int e16)
11684 {
11685 return (e8 * e16 + 128) / 256;
11686 }
11687
11688 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
11689 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
11690
11691 static void
11692 sparc_handle_vis_mul8x16 (vec<tree> *n_elts, enum sparc_builtins fncode,
11693 tree inner_type, tree cst0, tree cst1)
11694 {
11695 unsigned i, num = VECTOR_CST_NELTS (cst0);
11696 int scale;
11697
11698 switch (fncode)
11699 {
11700 case SPARC_BUILTIN_FMUL8X16:
11701 for (i = 0; i < num; ++i)
11702 {
11703 int val
11704 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11705 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
11706 n_elts->quick_push (build_int_cst (inner_type, val));
11707 }
11708 break;
11709
11710 case SPARC_BUILTIN_FMUL8X16AU:
11711 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
11712
11713 for (i = 0; i < num; ++i)
11714 {
11715 int val
11716 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11717 scale);
11718 n_elts->quick_push (build_int_cst (inner_type, val));
11719 }
11720 break;
11721
11722 case SPARC_BUILTIN_FMUL8X16AL:
11723 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
11724
11725 for (i = 0; i < num; ++i)
11726 {
11727 int val
11728 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11729 scale);
11730 n_elts->quick_push (build_int_cst (inner_type, val));
11731 }
11732 break;
11733
11734 default:
11735 gcc_unreachable ();
11736 }
11737 }
11738
11739 /* Implement TARGET_FOLD_BUILTIN hook.
11740
11741 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
11742 result of the function call is ignored. NULL_TREE is returned if the
11743 function could not be folded. */
11744
11745 static tree
11746 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
11747 tree *args, bool ignore)
11748 {
11749 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11750 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
11751 tree arg0, arg1, arg2;
11752
11753 if (ignore)
11754 switch (code)
11755 {
11756 case SPARC_BUILTIN_LDFSR:
11757 case SPARC_BUILTIN_STFSR:
11758 case SPARC_BUILTIN_ALIGNADDR:
11759 case SPARC_BUILTIN_WRGSR:
11760 case SPARC_BUILTIN_BMASK:
11761 case SPARC_BUILTIN_CMASK8:
11762 case SPARC_BUILTIN_CMASK16:
11763 case SPARC_BUILTIN_CMASK32:
11764 break;
11765
11766 default:
11767 return build_zero_cst (rtype);
11768 }
11769
11770 switch (code)
11771 {
11772 case SPARC_BUILTIN_FEXPAND:
11773 arg0 = args[0];
11774 STRIP_NOPS (arg0);
11775
11776 if (TREE_CODE (arg0) == VECTOR_CST)
11777 {
11778 tree inner_type = TREE_TYPE (rtype);
11779 unsigned i;
11780
11781 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11782 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11783 {
11784 unsigned HOST_WIDE_INT val
11785 = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg0, i));
11786 n_elts.quick_push (build_int_cst (inner_type, val << 4));
11787 }
11788 return n_elts.build ();
11789 }
11790 break;
11791
11792 case SPARC_BUILTIN_FMUL8X16:
11793 case SPARC_BUILTIN_FMUL8X16AU:
11794 case SPARC_BUILTIN_FMUL8X16AL:
11795 arg0 = args[0];
11796 arg1 = args[1];
11797 STRIP_NOPS (arg0);
11798 STRIP_NOPS (arg1);
11799
11800 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11801 {
11802 tree inner_type = TREE_TYPE (rtype);
11803 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11804 sparc_handle_vis_mul8x16 (&n_elts, code, inner_type, arg0, arg1);
11805 return n_elts.build ();
11806 }
11807 break;
11808
11809 case SPARC_BUILTIN_FPMERGE:
11810 arg0 = args[0];
11811 arg1 = args[1];
11812 STRIP_NOPS (arg0);
11813 STRIP_NOPS (arg1);
11814
11815 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11816 {
11817 tree_vector_builder n_elts (rtype, 2 * VECTOR_CST_NELTS (arg0), 1);
11818 unsigned i;
11819 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11820 {
11821 n_elts.quick_push (VECTOR_CST_ELT (arg0, i));
11822 n_elts.quick_push (VECTOR_CST_ELT (arg1, i));
11823 }
11824
11825 return n_elts.build ();
11826 }
11827 break;
11828
11829 case SPARC_BUILTIN_PDIST:
11830 case SPARC_BUILTIN_PDISTN:
11831 arg0 = args[0];
11832 arg1 = args[1];
11833 STRIP_NOPS (arg0);
11834 STRIP_NOPS (arg1);
11835 if (code == SPARC_BUILTIN_PDIST)
11836 {
11837 arg2 = args[2];
11838 STRIP_NOPS (arg2);
11839 }
11840 else
11841 arg2 = integer_zero_node;
11842
11843 if (TREE_CODE (arg0) == VECTOR_CST
11844 && TREE_CODE (arg1) == VECTOR_CST
11845 && TREE_CODE (arg2) == INTEGER_CST)
11846 {
11847 bool overflow = false;
11848 widest_int result = wi::to_widest (arg2);
11849 widest_int tmp;
11850 unsigned i;
11851
11852 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11853 {
11854 tree e0 = VECTOR_CST_ELT (arg0, i);
11855 tree e1 = VECTOR_CST_ELT (arg1, i);
11856
11857 bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
11858
11859 tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
11860 tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
11861 if (wi::neg_p (tmp))
11862 tmp = wi::neg (tmp, &neg2_ovf);
11863 else
11864 neg2_ovf = false;
11865 result = wi::add (result, tmp, SIGNED, &add2_ovf);
11866 overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf;
11867 }
11868
11869 gcc_assert (!overflow);
11870
11871 return wide_int_to_tree (rtype, result);
11872 }
11873
11874 default:
11875 break;
11876 }
11877
11878 return NULL_TREE;
11879 }
11880 \f
11881 /* ??? This duplicates information provided to the compiler by the
11882 ??? scheduler description. Some day, teach genautomata to output
11883 ??? the latencies and then CSE will just use that. */
11884
11885 static bool
11886 sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
11887 int opno ATTRIBUTE_UNUSED,
11888 int *total, bool speed ATTRIBUTE_UNUSED)
11889 {
11890 int code = GET_CODE (x);
11891 bool float_mode_p = FLOAT_MODE_P (mode);
11892
11893 switch (code)
11894 {
11895 case CONST_INT:
11896 if (SMALL_INT (x))
11897 *total = 0;
11898 else
11899 *total = 2;
11900 return true;
11901
11902 case CONST_WIDE_INT:
11903 *total = 0;
11904 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
11905 *total += 2;
11906 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
11907 *total += 2;
11908 return true;
11909
11910 case HIGH:
11911 *total = 2;
11912 return true;
11913
11914 case CONST:
11915 case LABEL_REF:
11916 case SYMBOL_REF:
11917 *total = 4;
11918 return true;
11919
11920 case CONST_DOUBLE:
11921 *total = 8;
11922 return true;
11923
11924 case MEM:
11925 /* If outer-code was a sign or zero extension, a cost
11926 of COSTS_N_INSNS (1) was already added in. This is
11927 why we are subtracting it back out. */
11928 if (outer_code == ZERO_EXTEND)
11929 {
11930 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
11931 }
11932 else if (outer_code == SIGN_EXTEND)
11933 {
11934 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
11935 }
11936 else if (float_mode_p)
11937 {
11938 *total = sparc_costs->float_load;
11939 }
11940 else
11941 {
11942 *total = sparc_costs->int_load;
11943 }
11944
11945 return true;
11946
11947 case PLUS:
11948 case MINUS:
11949 if (float_mode_p)
11950 *total = sparc_costs->float_plusminus;
11951 else
11952 *total = COSTS_N_INSNS (1);
11953 return false;
11954
11955 case FMA:
11956 {
11957 rtx sub;
11958
11959 gcc_assert (float_mode_p);
11960 *total = sparc_costs->float_mul;
11961
11962 sub = XEXP (x, 0);
11963 if (GET_CODE (sub) == NEG)
11964 sub = XEXP (sub, 0);
11965 *total += rtx_cost (sub, mode, FMA, 0, speed);
11966
11967 sub = XEXP (x, 2);
11968 if (GET_CODE (sub) == NEG)
11969 sub = XEXP (sub, 0);
11970 *total += rtx_cost (sub, mode, FMA, 2, speed);
11971 return true;
11972 }
11973
11974 case MULT:
11975 if (float_mode_p)
11976 *total = sparc_costs->float_mul;
11977 else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
11978 *total = COSTS_N_INSNS (25);
11979 else
11980 {
11981 int bit_cost;
11982
11983 bit_cost = 0;
11984 if (sparc_costs->int_mul_bit_factor)
11985 {
11986 int nbits;
11987
11988 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
11989 {
11990 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
11991 for (nbits = 0; value != 0; value &= value - 1)
11992 nbits++;
11993 }
11994 else
11995 nbits = 7;
11996
11997 if (nbits < 3)
11998 nbits = 3;
11999 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
12000 bit_cost = COSTS_N_INSNS (bit_cost);
12001 }
12002
12003 if (mode == DImode || !TARGET_HARD_MUL)
12004 *total = sparc_costs->int_mulX + bit_cost;
12005 else
12006 *total = sparc_costs->int_mul + bit_cost;
12007 }
12008 return false;
12009
12010 case ASHIFT:
12011 case ASHIFTRT:
12012 case LSHIFTRT:
12013 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
12014 return false;
12015
12016 case DIV:
12017 case UDIV:
12018 case MOD:
12019 case UMOD:
12020 if (float_mode_p)
12021 {
12022 if (mode == DFmode)
12023 *total = sparc_costs->float_div_df;
12024 else
12025 *total = sparc_costs->float_div_sf;
12026 }
12027 else
12028 {
12029 if (mode == DImode)
12030 *total = sparc_costs->int_divX;
12031 else
12032 *total = sparc_costs->int_div;
12033 }
12034 return false;
12035
12036 case NEG:
12037 if (! float_mode_p)
12038 {
12039 *total = COSTS_N_INSNS (1);
12040 return false;
12041 }
12042 /* FALLTHRU */
12043
12044 case ABS:
12045 case FLOAT:
12046 case UNSIGNED_FLOAT:
12047 case FIX:
12048 case UNSIGNED_FIX:
12049 case FLOAT_EXTEND:
12050 case FLOAT_TRUNCATE:
12051 *total = sparc_costs->float_move;
12052 return false;
12053
12054 case SQRT:
12055 if (mode == DFmode)
12056 *total = sparc_costs->float_sqrt_df;
12057 else
12058 *total = sparc_costs->float_sqrt_sf;
12059 return false;
12060
12061 case COMPARE:
12062 if (float_mode_p)
12063 *total = sparc_costs->float_cmp;
12064 else
12065 *total = COSTS_N_INSNS (1);
12066 return false;
12067
12068 case IF_THEN_ELSE:
12069 if (float_mode_p)
12070 *total = sparc_costs->float_cmove;
12071 else
12072 *total = sparc_costs->int_cmove;
12073 return false;
12074
12075 case IOR:
12076 /* Handle the NAND vector patterns. */
12077 if (sparc_vector_mode_supported_p (mode)
12078 && GET_CODE (XEXP (x, 0)) == NOT
12079 && GET_CODE (XEXP (x, 1)) == NOT)
12080 {
12081 *total = COSTS_N_INSNS (1);
12082 return true;
12083 }
12084 else
12085 return false;
12086
12087 default:
12088 return false;
12089 }
12090 }
12091
12092 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
12093
12094 static inline bool
12095 general_or_i64_p (reg_class_t rclass)
12096 {
12097 return (rclass == GENERAL_REGS || rclass == I64_REGS);
12098 }
12099
12100 /* Implement TARGET_REGISTER_MOVE_COST. */
12101
12102 static int
12103 sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
12104 reg_class_t from, reg_class_t to)
12105 {
12106 bool need_memory = false;
12107
12108 /* This helps postreload CSE to eliminate redundant comparisons. */
12109 if (from == NO_REGS || to == NO_REGS)
12110 return 100;
12111
12112 if (from == FPCC_REGS || to == FPCC_REGS)
12113 need_memory = true;
12114 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
12115 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
12116 {
12117 if (TARGET_VIS3)
12118 {
12119 int size = GET_MODE_SIZE (mode);
12120 if (size == 8 || size == 4)
12121 {
12122 if (! TARGET_ARCH32 || size == 4)
12123 return 4;
12124 else
12125 return 6;
12126 }
12127 }
12128 need_memory = true;
12129 }
12130
12131 if (need_memory)
12132 {
12133 if (sparc_cpu == PROCESSOR_ULTRASPARC
12134 || sparc_cpu == PROCESSOR_ULTRASPARC3
12135 || sparc_cpu == PROCESSOR_NIAGARA
12136 || sparc_cpu == PROCESSOR_NIAGARA2
12137 || sparc_cpu == PROCESSOR_NIAGARA3
12138 || sparc_cpu == PROCESSOR_NIAGARA4
12139 || sparc_cpu == PROCESSOR_NIAGARA7
12140 || sparc_cpu == PROCESSOR_M8)
12141 return 12;
12142
12143 return 6;
12144 }
12145
12146 return 2;
12147 }
12148
12149 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
12150 This is achieved by means of a manual dynamic stack space allocation in
12151 the current frame. We make the assumption that SEQ doesn't contain any
12152 function calls, with the possible exception of calls to the GOT helper. */
12153
12154 static void
12155 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
12156 {
12157 /* We must preserve the lowest 16 words for the register save area. */
12158 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
12159 /* We really need only 2 words of fresh stack space. */
12160 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
12161
12162 rtx slot
12163 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
12164 SPARC_STACK_BIAS + offset));
12165
12166 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
12167 emit_insn (gen_rtx_SET (slot, reg));
12168 if (reg2)
12169 emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
12170 reg2));
12171 emit_insn (seq);
12172 if (reg2)
12173 emit_insn (gen_rtx_SET (reg2,
12174 adjust_address (slot, word_mode, UNITS_PER_WORD)));
12175 emit_insn (gen_rtx_SET (reg, slot));
12176 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
12177 }
12178
12179 /* Output the assembler code for a thunk function. THUNK_DECL is the
12180 declaration for the thunk function itself, FUNCTION is the decl for
12181 the target function. DELTA is an immediate constant offset to be
12182 added to THIS. If VCALL_OFFSET is nonzero, the word at address
12183 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
12184
12185 static void
12186 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12187 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12188 tree function)
12189 {
12190 rtx this_rtx, funexp;
12191 rtx_insn *insn;
12192 unsigned int int_arg_first;
12193
12194 reload_completed = 1;
12195 epilogue_completed = 1;
12196
12197 emit_note (NOTE_INSN_PROLOGUE_END);
12198
12199 if (TARGET_FLAT)
12200 {
12201 sparc_leaf_function_p = 1;
12202
12203 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12204 }
12205 else if (flag_delayed_branch)
12206 {
12207 /* We will emit a regular sibcall below, so we need to instruct
12208 output_sibcall that we are in a leaf function. */
12209 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
12210
12211 /* This will cause final.c to invoke leaf_renumber_regs so we
12212 must behave as if we were in a not-yet-leafified function. */
12213 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
12214 }
12215 else
12216 {
12217 /* We will emit the sibcall manually below, so we will need to
12218 manually spill non-leaf registers. */
12219 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
12220
12221 /* We really are in a leaf function. */
12222 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12223 }
12224
12225 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
12226 returns a structure, the structure return pointer is there instead. */
12227 if (TARGET_ARCH64
12228 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12229 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
12230 else
12231 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
12232
12233 /* Add DELTA. When possible use a plain add, otherwise load it into
12234 a register first. */
12235 if (delta)
12236 {
12237 rtx delta_rtx = GEN_INT (delta);
12238
12239 if (! SPARC_SIMM13_P (delta))
12240 {
12241 rtx scratch = gen_rtx_REG (Pmode, 1);
12242 emit_move_insn (scratch, delta_rtx);
12243 delta_rtx = scratch;
12244 }
12245
12246 /* THIS_RTX += DELTA. */
12247 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
12248 }
12249
12250 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
12251 if (vcall_offset)
12252 {
12253 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
12254 rtx scratch = gen_rtx_REG (Pmode, 1);
12255
12256 gcc_assert (vcall_offset < 0);
12257
12258 /* SCRATCH = *THIS_RTX. */
12259 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
12260
12261 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
12262 may not have any available scratch register at this point. */
12263 if (SPARC_SIMM13_P (vcall_offset))
12264 ;
12265 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
12266 else if (! fixed_regs[5]
12267 /* The below sequence is made up of at least 2 insns,
12268 while the default method may need only one. */
12269 && vcall_offset < -8192)
12270 {
12271 rtx scratch2 = gen_rtx_REG (Pmode, 5);
12272 emit_move_insn (scratch2, vcall_offset_rtx);
12273 vcall_offset_rtx = scratch2;
12274 }
12275 else
12276 {
12277 rtx increment = GEN_INT (-4096);
12278
12279 /* VCALL_OFFSET is a negative number whose typical range can be
12280 estimated as -32768..0 in 32-bit mode. In almost all cases
12281 it is therefore cheaper to emit multiple add insns than
12282 spilling and loading the constant into a register (at least
12283 6 insns). */
12284 while (! SPARC_SIMM13_P (vcall_offset))
12285 {
12286 emit_insn (gen_add2_insn (scratch, increment));
12287 vcall_offset += 4096;
12288 }
12289 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
12290 }
12291
12292 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
12293 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
12294 gen_rtx_PLUS (Pmode,
12295 scratch,
12296 vcall_offset_rtx)));
12297
12298 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
12299 emit_insn (gen_add2_insn (this_rtx, scratch));
12300 }
12301
12302 /* Generate a tail call to the target function. */
12303 if (! TREE_USED (function))
12304 {
12305 assemble_external (function);
12306 TREE_USED (function) = 1;
12307 }
12308 funexp = XEXP (DECL_RTL (function), 0);
12309
12310 if (flag_delayed_branch)
12311 {
12312 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
12313 insn = emit_call_insn (gen_sibcall (funexp));
12314 SIBLING_CALL_P (insn) = 1;
12315 }
12316 else
12317 {
12318 /* The hoops we have to jump through in order to generate a sibcall
12319 without using delay slots... */
12320 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
12321
12322 if (flag_pic)
12323 {
12324 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
12325 start_sequence ();
12326 load_got_register (); /* clobbers %o7 */
12327 if (!TARGET_VXWORKS_RTP)
12328 pic_offset_table_rtx = global_offset_table_rtx;
12329 scratch = sparc_legitimize_pic_address (funexp, scratch);
12330 seq = get_insns ();
12331 end_sequence ();
12332 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
12333 }
12334 else if (TARGET_ARCH32)
12335 {
12336 emit_insn (gen_rtx_SET (scratch,
12337 gen_rtx_HIGH (SImode, funexp)));
12338 emit_insn (gen_rtx_SET (scratch,
12339 gen_rtx_LO_SUM (SImode, scratch, funexp)));
12340 }
12341 else /* TARGET_ARCH64 */
12342 {
12343 switch (sparc_cmodel)
12344 {
12345 case CM_MEDLOW:
12346 case CM_MEDMID:
12347 /* The destination can serve as a temporary. */
12348 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
12349 break;
12350
12351 case CM_MEDANY:
12352 case CM_EMBMEDANY:
12353 /* The destination cannot serve as a temporary. */
12354 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
12355 start_sequence ();
12356 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
12357 seq = get_insns ();
12358 end_sequence ();
12359 emit_and_preserve (seq, spill_reg, 0);
12360 break;
12361
12362 default:
12363 gcc_unreachable ();
12364 }
12365 }
12366
12367 emit_jump_insn (gen_indirect_jump (scratch));
12368 }
12369
12370 emit_barrier ();
12371
12372 /* Run just enough of rest_of_compilation to get the insns emitted.
12373 There's not really enough bulk here to make other passes such as
12374 instruction scheduling worth while. Note that use_thunk calls
12375 assemble_start_function and assemble_end_function. */
12376 insn = get_insns ();
12377 shorten_branches (insn);
12378 final_start_function (insn, file, 1);
12379 final (insn, file, 1);
12380 final_end_function ();
12381
12382 reload_completed = 0;
12383 epilogue_completed = 0;
12384 }
12385
12386 /* Return true if sparc_output_mi_thunk would be able to output the
12387 assembler code for the thunk function specified by the arguments
12388 it is passed, and false otherwise. */
12389 static bool
12390 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
12391 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
12392 HOST_WIDE_INT vcall_offset,
12393 const_tree function ATTRIBUTE_UNUSED)
12394 {
12395 /* Bound the loop used in the default method above. */
12396 return (vcall_offset >= -32768 || ! fixed_regs[5]);
12397 }
12398
12399 /* How to allocate a 'struct machine_function'. */
12400
12401 static struct machine_function *
12402 sparc_init_machine_status (void)
12403 {
12404 return ggc_cleared_alloc<machine_function> ();
12405 }
12406
12407 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12408 We need to emit DTP-relative relocations. */
12409
12410 static void
12411 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
12412 {
12413 switch (size)
12414 {
12415 case 4:
12416 fputs ("\t.word\t%r_tls_dtpoff32(", file);
12417 break;
12418 case 8:
12419 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
12420 break;
12421 default:
12422 gcc_unreachable ();
12423 }
12424 output_addr_const (file, x);
12425 fputs (")", file);
12426 }
12427
12428 /* Do whatever processing is required at the end of a file. */
12429
12430 static void
12431 sparc_file_end (void)
12432 {
12433 /* If we need to emit the special GOT helper function, do so now. */
12434 if (got_helper_rtx)
12435 {
12436 const char *name = XSTR (got_helper_rtx, 0);
12437 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
12438 #ifdef DWARF2_UNWIND_INFO
12439 bool do_cfi;
12440 #endif
12441
12442 if (USE_HIDDEN_LINKONCE)
12443 {
12444 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
12445 get_identifier (name),
12446 build_function_type_list (void_type_node,
12447 NULL_TREE));
12448 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
12449 NULL_TREE, void_type_node);
12450 TREE_PUBLIC (decl) = 1;
12451 TREE_STATIC (decl) = 1;
12452 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
12453 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
12454 DECL_VISIBILITY_SPECIFIED (decl) = 1;
12455 resolve_unique_section (decl, 0, flag_function_sections);
12456 allocate_struct_function (decl, true);
12457 cfun->is_thunk = 1;
12458 current_function_decl = decl;
12459 init_varasm_status ();
12460 assemble_start_function (decl, name);
12461 }
12462 else
12463 {
12464 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
12465 switch_to_section (text_section);
12466 if (align > 0)
12467 ASM_OUTPUT_ALIGN (asm_out_file, align);
12468 ASM_OUTPUT_LABEL (asm_out_file, name);
12469 }
12470
12471 #ifdef DWARF2_UNWIND_INFO
12472 do_cfi = dwarf2out_do_cfi_asm ();
12473 if (do_cfi)
12474 fprintf (asm_out_file, "\t.cfi_startproc\n");
12475 #endif
12476 if (flag_delayed_branch)
12477 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
12478 reg_name, reg_name);
12479 else
12480 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
12481 reg_name, reg_name);
12482 #ifdef DWARF2_UNWIND_INFO
12483 if (do_cfi)
12484 fprintf (asm_out_file, "\t.cfi_endproc\n");
12485 #endif
12486 }
12487
12488 if (NEED_INDICATE_EXEC_STACK)
12489 file_end_indicate_exec_stack ();
12490
12491 #ifdef TARGET_SOLARIS
12492 solaris_file_end ();
12493 #endif
12494 }
12495
12496 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12497 /* Implement TARGET_MANGLE_TYPE. */
12498
12499 static const char *
12500 sparc_mangle_type (const_tree type)
12501 {
12502 if (TARGET_ARCH32
12503 && TYPE_MAIN_VARIANT (type) == long_double_type_node
12504 && TARGET_LONG_DOUBLE_128)
12505 return "g";
12506
12507 /* For all other types, use normal C++ mangling. */
12508 return NULL;
12509 }
12510 #endif
12511
12512 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
12513 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
12514 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
12515
12516 void
12517 sparc_emit_membar_for_model (enum memmodel model,
12518 int load_store, int before_after)
12519 {
12520 /* Bits for the MEMBAR mmask field. */
12521 const int LoadLoad = 1;
12522 const int StoreLoad = 2;
12523 const int LoadStore = 4;
12524 const int StoreStore = 8;
12525
12526 int mm = 0, implied = 0;
12527
12528 switch (sparc_memory_model)
12529 {
12530 case SMM_SC:
12531 /* Sequential Consistency. All memory transactions are immediately
12532 visible in sequential execution order. No barriers needed. */
12533 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
12534 break;
12535
12536 case SMM_TSO:
12537 /* Total Store Ordering: all memory transactions with store semantics
12538 are followed by an implied StoreStore. */
12539 implied |= StoreStore;
12540
12541 /* If we're not looking for a raw barrer (before+after), then atomic
12542 operations get the benefit of being both load and store. */
12543 if (load_store == 3 && before_after == 1)
12544 implied |= StoreLoad;
12545 /* FALLTHRU */
12546
12547 case SMM_PSO:
12548 /* Partial Store Ordering: all memory transactions with load semantics
12549 are followed by an implied LoadLoad | LoadStore. */
12550 implied |= LoadLoad | LoadStore;
12551
12552 /* If we're not looking for a raw barrer (before+after), then atomic
12553 operations get the benefit of being both load and store. */
12554 if (load_store == 3 && before_after == 2)
12555 implied |= StoreLoad | StoreStore;
12556 /* FALLTHRU */
12557
12558 case SMM_RMO:
12559 /* Relaxed Memory Ordering: no implicit bits. */
12560 break;
12561
12562 default:
12563 gcc_unreachable ();
12564 }
12565
12566 if (before_after & 1)
12567 {
12568 if (is_mm_release (model) || is_mm_acq_rel (model)
12569 || is_mm_seq_cst (model))
12570 {
12571 if (load_store & 1)
12572 mm |= LoadLoad | StoreLoad;
12573 if (load_store & 2)
12574 mm |= LoadStore | StoreStore;
12575 }
12576 }
12577 if (before_after & 2)
12578 {
12579 if (is_mm_acquire (model) || is_mm_acq_rel (model)
12580 || is_mm_seq_cst (model))
12581 {
12582 if (load_store & 1)
12583 mm |= LoadLoad | LoadStore;
12584 if (load_store & 2)
12585 mm |= StoreLoad | StoreStore;
12586 }
12587 }
12588
12589 /* Remove the bits implied by the system memory model. */
12590 mm &= ~implied;
12591
12592 /* For raw barriers (before+after), always emit a barrier.
12593 This will become a compile-time barrier if needed. */
12594 if (mm || before_after == 3)
12595 emit_insn (gen_membar (GEN_INT (mm)));
12596 }
12597
12598 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
12599 compare and swap on the word containing the byte or half-word. */
12600
12601 static void
12602 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
12603 rtx oldval, rtx newval)
12604 {
12605 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
12606 rtx addr = gen_reg_rtx (Pmode);
12607 rtx off = gen_reg_rtx (SImode);
12608 rtx oldv = gen_reg_rtx (SImode);
12609 rtx newv = gen_reg_rtx (SImode);
12610 rtx oldvalue = gen_reg_rtx (SImode);
12611 rtx newvalue = gen_reg_rtx (SImode);
12612 rtx res = gen_reg_rtx (SImode);
12613 rtx resv = gen_reg_rtx (SImode);
12614 rtx memsi, val, mask, cc;
12615
12616 emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
12617
12618 if (Pmode != SImode)
12619 addr1 = gen_lowpart (SImode, addr1);
12620 emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
12621
12622 memsi = gen_rtx_MEM (SImode, addr);
12623 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
12624 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
12625
12626 val = copy_to_reg (memsi);
12627
12628 emit_insn (gen_rtx_SET (off,
12629 gen_rtx_XOR (SImode, off,
12630 GEN_INT (GET_MODE (mem) == QImode
12631 ? 3 : 2))));
12632
12633 emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
12634
12635 if (GET_MODE (mem) == QImode)
12636 mask = force_reg (SImode, GEN_INT (0xff));
12637 else
12638 mask = force_reg (SImode, GEN_INT (0xffff));
12639
12640 emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
12641
12642 emit_insn (gen_rtx_SET (val,
12643 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12644 val)));
12645
12646 oldval = gen_lowpart (SImode, oldval);
12647 emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
12648
12649 newval = gen_lowpart_common (SImode, newval);
12650 emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
12651
12652 emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
12653
12654 emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
12655
12656 rtx_code_label *end_label = gen_label_rtx ();
12657 rtx_code_label *loop_label = gen_label_rtx ();
12658 emit_label (loop_label);
12659
12660 emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
12661
12662 emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
12663
12664 emit_move_insn (bool_result, const1_rtx);
12665
12666 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
12667
12668 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
12669
12670 emit_insn (gen_rtx_SET (resv,
12671 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12672 res)));
12673
12674 emit_move_insn (bool_result, const0_rtx);
12675
12676 cc = gen_compare_reg_1 (NE, resv, val);
12677 emit_insn (gen_rtx_SET (val, resv));
12678
12679 /* Use cbranchcc4 to separate the compare and branch! */
12680 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
12681 cc, const0_rtx, loop_label));
12682
12683 emit_label (end_label);
12684
12685 emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
12686
12687 emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
12688
12689 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
12690 }
12691
12692 /* Expand code to perform a compare-and-swap. */
12693
12694 void
12695 sparc_expand_compare_and_swap (rtx operands[])
12696 {
12697 rtx bval, retval, mem, oldval, newval;
12698 machine_mode mode;
12699 enum memmodel model;
12700
12701 bval = operands[0];
12702 retval = operands[1];
12703 mem = operands[2];
12704 oldval = operands[3];
12705 newval = operands[4];
12706 model = (enum memmodel) INTVAL (operands[6]);
12707 mode = GET_MODE (mem);
12708
12709 sparc_emit_membar_for_model (model, 3, 1);
12710
12711 if (reg_overlap_mentioned_p (retval, oldval))
12712 oldval = copy_to_reg (oldval);
12713
12714 if (mode == QImode || mode == HImode)
12715 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
12716 else
12717 {
12718 rtx (*gen) (rtx, rtx, rtx, rtx);
12719 rtx x;
12720
12721 if (mode == SImode)
12722 gen = gen_atomic_compare_and_swapsi_1;
12723 else
12724 gen = gen_atomic_compare_and_swapdi_1;
12725 emit_insn (gen (retval, mem, oldval, newval));
12726
12727 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
12728 if (x != bval)
12729 convert_move (bval, x, 1);
12730 }
12731
12732 sparc_emit_membar_for_model (model, 3, 2);
12733 }
12734
12735 void
12736 sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
12737 {
12738 rtx t_1, t_2, t_3;
12739
12740 sel = gen_lowpart (DImode, sel);
12741 switch (vmode)
12742 {
12743 case E_V2SImode:
12744 /* inp = xxxxxxxAxxxxxxxB */
12745 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12746 NULL_RTX, 1, OPTAB_DIRECT);
12747 /* t_1 = ....xxxxxxxAxxx. */
12748 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12749 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
12750 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12751 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
12752 /* sel = .......B */
12753 /* t_1 = ...A.... */
12754 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12755 /* sel = ...A...B */
12756 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
12757 /* sel = AAAABBBB * 4 */
12758 t_1 = force_reg (SImode, GEN_INT (0x01230123));
12759 /* sel = { A*4, A*4+1, A*4+2, ... } */
12760 break;
12761
12762 case E_V4HImode:
12763 /* inp = xxxAxxxBxxxCxxxD */
12764 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12765 NULL_RTX, 1, OPTAB_DIRECT);
12766 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12767 NULL_RTX, 1, OPTAB_DIRECT);
12768 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
12769 NULL_RTX, 1, OPTAB_DIRECT);
12770 /* t_1 = ..xxxAxxxBxxxCxx */
12771 /* t_2 = ....xxxAxxxBxxxC */
12772 /* t_3 = ......xxxAxxxBxx */
12773 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12774 GEN_INT (0x07),
12775 NULL_RTX, 1, OPTAB_DIRECT);
12776 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12777 GEN_INT (0x0700),
12778 NULL_RTX, 1, OPTAB_DIRECT);
12779 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
12780 GEN_INT (0x070000),
12781 NULL_RTX, 1, OPTAB_DIRECT);
12782 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
12783 GEN_INT (0x07000000),
12784 NULL_RTX, 1, OPTAB_DIRECT);
12785 /* sel = .......D */
12786 /* t_1 = .....C.. */
12787 /* t_2 = ...B.... */
12788 /* t_3 = .A...... */
12789 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12790 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
12791 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
12792 /* sel = .A.B.C.D */
12793 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
12794 /* sel = AABBCCDD * 2 */
12795 t_1 = force_reg (SImode, GEN_INT (0x01010101));
12796 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
12797 break;
12798
12799 case E_V8QImode:
12800 /* input = xAxBxCxDxExFxGxH */
12801 sel = expand_simple_binop (DImode, AND, sel,
12802 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
12803 | 0x0f0f0f0f),
12804 NULL_RTX, 1, OPTAB_DIRECT);
12805 /* sel = .A.B.C.D.E.F.G.H */
12806 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
12807 NULL_RTX, 1, OPTAB_DIRECT);
12808 /* t_1 = ..A.B.C.D.E.F.G. */
12809 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12810 NULL_RTX, 1, OPTAB_DIRECT);
12811 /* sel = .AABBCCDDEEFFGGH */
12812 sel = expand_simple_binop (DImode, AND, sel,
12813 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
12814 | 0xff00ff),
12815 NULL_RTX, 1, OPTAB_DIRECT);
12816 /* sel = ..AB..CD..EF..GH */
12817 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12818 NULL_RTX, 1, OPTAB_DIRECT);
12819 /* t_1 = ....AB..CD..EF.. */
12820 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12821 NULL_RTX, 1, OPTAB_DIRECT);
12822 /* sel = ..ABABCDCDEFEFGH */
12823 sel = expand_simple_binop (DImode, AND, sel,
12824 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
12825 NULL_RTX, 1, OPTAB_DIRECT);
12826 /* sel = ....ABCD....EFGH */
12827 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12828 NULL_RTX, 1, OPTAB_DIRECT);
12829 /* t_1 = ........ABCD.... */
12830 sel = gen_lowpart (SImode, sel);
12831 t_1 = gen_lowpart (SImode, t_1);
12832 break;
12833
12834 default:
12835 gcc_unreachable ();
12836 }
12837
12838 /* Always perform the final addition/merge within the bmask insn. */
12839 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
12840 }
12841
12842 /* Implement TARGET_VEC_PERM_CONST. */
12843
12844 static bool
12845 sparc_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
12846 rtx op1, const vec_perm_indices &sel)
12847 {
12848 if (!TARGET_VIS2)
12849 return false;
12850
12851 /* All permutes are supported. */
12852 if (!target)
12853 return true;
12854
12855 /* Force target-independent code to convert constant permutations on other
12856 modes down to V8QI. Rely on this to avoid the complexity of the byte
12857 order of the permutation. */
12858 if (vmode != V8QImode)
12859 return false;
12860
12861 unsigned int i, mask;
12862 for (i = mask = 0; i < 8; ++i)
12863 mask |= (sel[i] & 0xf) << (28 - i*4);
12864 rtx mask_rtx = force_reg (SImode, gen_int_mode (mask, SImode));
12865
12866 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), mask_rtx, const0_rtx));
12867 emit_insn (gen_bshufflev8qi_vis (target, op0, op1));
12868 return true;
12869 }
12870
12871 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
12872
12873 static bool
12874 sparc_frame_pointer_required (void)
12875 {
12876 /* If the stack pointer is dynamically modified in the function, it cannot
12877 serve as the frame pointer. */
12878 if (cfun->calls_alloca)
12879 return true;
12880
12881 /* If the function receives nonlocal gotos, it needs to save the frame
12882 pointer in the nonlocal_goto_save_area object. */
12883 if (cfun->has_nonlocal_label)
12884 return true;
12885
12886 /* In flat mode, that's it. */
12887 if (TARGET_FLAT)
12888 return false;
12889
12890 /* Otherwise, the frame pointer is required if the function isn't leaf, but
12891 we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
12892 return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
12893 }
12894
12895 /* The way this is structured, we can't eliminate SFP in favor of SP
12896 if the frame pointer is required: we want to use the SFP->HFP elimination
12897 in that case. But the test in update_eliminables doesn't know we are
12898 assuming below that we only do the former elimination. */
12899
12900 static bool
12901 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
12902 {
12903 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
12904 }
12905
12906 /* Return the hard frame pointer directly to bypass the stack bias. */
12907
12908 static rtx
12909 sparc_builtin_setjmp_frame_value (void)
12910 {
12911 return hard_frame_pointer_rtx;
12912 }
12913
12914 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
12915 they won't be allocated. */
12916
12917 static void
12918 sparc_conditional_register_usage (void)
12919 {
12920 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
12921 {
12922 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12923 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12924 }
12925 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
12926 /* then honor it. */
12927 if (TARGET_ARCH32 && fixed_regs[5])
12928 fixed_regs[5] = 1;
12929 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
12930 fixed_regs[5] = 0;
12931 if (! TARGET_V9)
12932 {
12933 int regno;
12934 for (regno = SPARC_FIRST_V9_FP_REG;
12935 regno <= SPARC_LAST_V9_FP_REG;
12936 regno++)
12937 fixed_regs[regno] = 1;
12938 /* %fcc0 is used by v8 and v9. */
12939 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
12940 regno <= SPARC_LAST_V9_FCC_REG;
12941 regno++)
12942 fixed_regs[regno] = 1;
12943 }
12944 if (! TARGET_FPU)
12945 {
12946 int regno;
12947 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
12948 fixed_regs[regno] = 1;
12949 }
12950 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
12951 /* then honor it. Likewise with g3 and g4. */
12952 if (fixed_regs[2] == 2)
12953 fixed_regs[2] = ! TARGET_APP_REGS;
12954 if (fixed_regs[3] == 2)
12955 fixed_regs[3] = ! TARGET_APP_REGS;
12956 if (TARGET_ARCH32 && fixed_regs[4] == 2)
12957 fixed_regs[4] = ! TARGET_APP_REGS;
12958 else if (TARGET_CM_EMBMEDANY)
12959 fixed_regs[4] = 1;
12960 else if (fixed_regs[4] == 2)
12961 fixed_regs[4] = 0;
12962 if (TARGET_FLAT)
12963 {
12964 int regno;
12965 /* Disable leaf functions. */
12966 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
12967 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12968 leaf_reg_remap [regno] = regno;
12969 }
12970 if (TARGET_VIS)
12971 global_regs[SPARC_GSR_REG] = 1;
12972 }
12973
12974 /* Implement TARGET_USE_PSEUDO_PIC_REG. */
12975
12976 static bool
12977 sparc_use_pseudo_pic_reg (void)
12978 {
12979 return !TARGET_VXWORKS_RTP && flag_pic;
12980 }
12981
12982 /* Implement TARGET_INIT_PIC_REG. */
12983
12984 static void
12985 sparc_init_pic_reg (void)
12986 {
12987 edge entry_edge;
12988 rtx_insn *seq;
12989
12990 if (!crtl->uses_pic_offset_table)
12991 return;
12992
12993 start_sequence ();
12994 load_got_register ();
12995 if (!TARGET_VXWORKS_RTP)
12996 emit_move_insn (pic_offset_table_rtx, global_offset_table_rtx);
12997 seq = get_insns ();
12998 end_sequence ();
12999
13000 entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13001 insert_insn_on_edge (seq, entry_edge);
13002 commit_one_edge_insertion (entry_edge);
13003 }
13004
13005 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
13006
13007 - We can't load constants into FP registers.
13008 - We can't load FP constants into integer registers when soft-float,
13009 because there is no soft-float pattern with a r/F constraint.
13010 - We can't load FP constants into integer registers for TFmode unless
13011 it is 0.0L, because there is no movtf pattern with a r/F constraint.
13012 - Try and reload integer constants (symbolic or otherwise) back into
13013 registers directly, rather than having them dumped to memory. */
13014
13015 static reg_class_t
13016 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
13017 {
13018 machine_mode mode = GET_MODE (x);
13019 if (CONSTANT_P (x))
13020 {
13021 if (FP_REG_CLASS_P (rclass)
13022 || rclass == GENERAL_OR_FP_REGS
13023 || rclass == GENERAL_OR_EXTRA_FP_REGS
13024 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
13025 || (mode == TFmode && ! const_zero_operand (x, mode)))
13026 return NO_REGS;
13027
13028 if (GET_MODE_CLASS (mode) == MODE_INT)
13029 return GENERAL_REGS;
13030
13031 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13032 {
13033 if (! FP_REG_CLASS_P (rclass)
13034 || !(const_zero_operand (x, mode)
13035 || const_all_ones_operand (x, mode)))
13036 return NO_REGS;
13037 }
13038 }
13039
13040 if (TARGET_VIS3
13041 && ! TARGET_ARCH64
13042 && (rclass == EXTRA_FP_REGS
13043 || rclass == GENERAL_OR_EXTRA_FP_REGS))
13044 {
13045 int regno = true_regnum (x);
13046
13047 if (SPARC_INT_REG_P (regno))
13048 return (rclass == EXTRA_FP_REGS
13049 ? FP_REGS : GENERAL_OR_FP_REGS);
13050 }
13051
13052 return rclass;
13053 }
13054
13055 /* Return true if we use LRA instead of reload pass. */
13056
13057 static bool
13058 sparc_lra_p (void)
13059 {
13060 return TARGET_LRA;
13061 }
13062
13063 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
13064 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
13065
13066 const char *
13067 output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
13068 {
13069 char mulstr[32];
13070
13071 gcc_assert (! TARGET_ARCH64);
13072
13073 if (sparc_check_64 (operands[1], insn) <= 0)
13074 output_asm_insn ("srl\t%L1, 0, %L1", operands);
13075 if (which_alternative == 1)
13076 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
13077 if (GET_CODE (operands[2]) == CONST_INT)
13078 {
13079 if (which_alternative == 1)
13080 {
13081 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13082 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
13083 output_asm_insn (mulstr, operands);
13084 return "srlx\t%L0, 32, %H0";
13085 }
13086 else
13087 {
13088 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13089 output_asm_insn ("or\t%L1, %3, %3", operands);
13090 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
13091 output_asm_insn (mulstr, operands);
13092 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13093 return "mov\t%3, %L0";
13094 }
13095 }
13096 else if (rtx_equal_p (operands[1], operands[2]))
13097 {
13098 if (which_alternative == 1)
13099 {
13100 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13101 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
13102 output_asm_insn (mulstr, operands);
13103 return "srlx\t%L0, 32, %H0";
13104 }
13105 else
13106 {
13107 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13108 output_asm_insn ("or\t%L1, %3, %3", operands);
13109 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
13110 output_asm_insn (mulstr, operands);
13111 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13112 return "mov\t%3, %L0";
13113 }
13114 }
13115 if (sparc_check_64 (operands[2], insn) <= 0)
13116 output_asm_insn ("srl\t%L2, 0, %L2", operands);
13117 if (which_alternative == 1)
13118 {
13119 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13120 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
13121 output_asm_insn ("or\t%L2, %L1, %L1", operands);
13122 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
13123 output_asm_insn (mulstr, operands);
13124 return "srlx\t%L0, 32, %H0";
13125 }
13126 else
13127 {
13128 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13129 output_asm_insn ("sllx\t%H2, 32, %4", operands);
13130 output_asm_insn ("or\t%L1, %3, %3", operands);
13131 output_asm_insn ("or\t%L2, %4, %4", operands);
13132 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
13133 output_asm_insn (mulstr, operands);
13134 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13135 return "mov\t%3, %L0";
13136 }
13137 }
13138
13139 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13140 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
13141 and INNER_MODE are the modes describing TARGET. */
13142
13143 static void
13144 vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
13145 machine_mode inner_mode)
13146 {
13147 rtx t1, final_insn, sel;
13148 int bmask;
13149
13150 t1 = gen_reg_rtx (mode);
13151
13152 elt = convert_modes (SImode, inner_mode, elt, true);
13153 emit_move_insn (gen_lowpart(SImode, t1), elt);
13154
13155 switch (mode)
13156 {
13157 case E_V2SImode:
13158 final_insn = gen_bshufflev2si_vis (target, t1, t1);
13159 bmask = 0x45674567;
13160 break;
13161 case E_V4HImode:
13162 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
13163 bmask = 0x67676767;
13164 break;
13165 case E_V8QImode:
13166 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
13167 bmask = 0x77777777;
13168 break;
13169 default:
13170 gcc_unreachable ();
13171 }
13172
13173 sel = force_reg (SImode, GEN_INT (bmask));
13174 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
13175 emit_insn (final_insn);
13176 }
13177
13178 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13179 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
13180
13181 static void
13182 vector_init_fpmerge (rtx target, rtx elt)
13183 {
13184 rtx t1, t2, t2_low, t3, t3_low;
13185
13186 t1 = gen_reg_rtx (V4QImode);
13187 elt = convert_modes (SImode, QImode, elt, true);
13188 emit_move_insn (gen_lowpart (SImode, t1), elt);
13189
13190 t2 = gen_reg_rtx (V8QImode);
13191 t2_low = gen_lowpart (V4QImode, t2);
13192 emit_insn (gen_fpmerge_vis (t2, t1, t1));
13193
13194 t3 = gen_reg_rtx (V8QImode);
13195 t3_low = gen_lowpart (V4QImode, t3);
13196 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
13197
13198 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
13199 }
13200
13201 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13202 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
13203
13204 static void
13205 vector_init_faligndata (rtx target, rtx elt)
13206 {
13207 rtx t1 = gen_reg_rtx (V4HImode);
13208 int i;
13209
13210 elt = convert_modes (SImode, HImode, elt, true);
13211 emit_move_insn (gen_lowpart (SImode, t1), elt);
13212
13213 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
13214 force_reg (SImode, GEN_INT (6)),
13215 const0_rtx));
13216
13217 for (i = 0; i < 4; i++)
13218 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
13219 }
13220
13221 /* Emit code to initialize TARGET to values for individual fields VALS. */
13222
13223 void
13224 sparc_expand_vector_init (rtx target, rtx vals)
13225 {
13226 const machine_mode mode = GET_MODE (target);
13227 const machine_mode inner_mode = GET_MODE_INNER (mode);
13228 const int n_elts = GET_MODE_NUNITS (mode);
13229 int i, n_var = 0;
13230 bool all_same = true;
13231 rtx mem;
13232
13233 for (i = 0; i < n_elts; i++)
13234 {
13235 rtx x = XVECEXP (vals, 0, i);
13236 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
13237 n_var++;
13238
13239 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
13240 all_same = false;
13241 }
13242
13243 if (n_var == 0)
13244 {
13245 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
13246 return;
13247 }
13248
13249 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
13250 {
13251 if (GET_MODE_SIZE (inner_mode) == 4)
13252 {
13253 emit_move_insn (gen_lowpart (SImode, target),
13254 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
13255 return;
13256 }
13257 else if (GET_MODE_SIZE (inner_mode) == 8)
13258 {
13259 emit_move_insn (gen_lowpart (DImode, target),
13260 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
13261 return;
13262 }
13263 }
13264 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
13265 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
13266 {
13267 emit_move_insn (gen_highpart (word_mode, target),
13268 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
13269 emit_move_insn (gen_lowpart (word_mode, target),
13270 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
13271 return;
13272 }
13273
13274 if (all_same && GET_MODE_SIZE (mode) == 8)
13275 {
13276 if (TARGET_VIS2)
13277 {
13278 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
13279 return;
13280 }
13281 if (mode == V8QImode)
13282 {
13283 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
13284 return;
13285 }
13286 if (mode == V4HImode)
13287 {
13288 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
13289 return;
13290 }
13291 }
13292
13293 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
13294 for (i = 0; i < n_elts; i++)
13295 emit_move_insn (adjust_address_nv (mem, inner_mode,
13296 i * GET_MODE_SIZE (inner_mode)),
13297 XVECEXP (vals, 0, i));
13298 emit_move_insn (target, mem);
13299 }
13300
13301 /* Implement TARGET_SECONDARY_RELOAD. */
13302
13303 static reg_class_t
13304 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
13305 machine_mode mode, secondary_reload_info *sri)
13306 {
13307 enum reg_class rclass = (enum reg_class) rclass_i;
13308
13309 sri->icode = CODE_FOR_nothing;
13310 sri->extra_cost = 0;
13311
13312 /* We need a temporary when loading/storing a HImode/QImode value
13313 between memory and the FPU registers. This can happen when combine puts
13314 a paradoxical subreg in a float/fix conversion insn. */
13315 if (FP_REG_CLASS_P (rclass)
13316 && (mode == HImode || mode == QImode)
13317 && (GET_CODE (x) == MEM
13318 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
13319 && true_regnum (x) == -1)))
13320 return GENERAL_REGS;
13321
13322 /* On 32-bit we need a temporary when loading/storing a DFmode value
13323 between unaligned memory and the upper FPU registers. */
13324 if (TARGET_ARCH32
13325 && rclass == EXTRA_FP_REGS
13326 && mode == DFmode
13327 && GET_CODE (x) == MEM
13328 && ! mem_min_alignment (x, 8))
13329 return FP_REGS;
13330
13331 if (((TARGET_CM_MEDANY
13332 && symbolic_operand (x, mode))
13333 || (TARGET_CM_EMBMEDANY
13334 && text_segment_operand (x, mode)))
13335 && ! flag_pic)
13336 {
13337 if (in_p)
13338 sri->icode = direct_optab_handler (reload_in_optab, mode);
13339 else
13340 sri->icode = direct_optab_handler (reload_out_optab, mode);
13341 return NO_REGS;
13342 }
13343
13344 if (TARGET_VIS3 && TARGET_ARCH32)
13345 {
13346 int regno = true_regnum (x);
13347
13348 /* When using VIS3 fp<-->int register moves, on 32-bit we have
13349 to move 8-byte values in 4-byte pieces. This only works via
13350 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
13351 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
13352 an FP_REGS intermediate move. */
13353 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
13354 || ((general_or_i64_p (rclass)
13355 || rclass == GENERAL_OR_FP_REGS)
13356 && SPARC_FP_REG_P (regno)))
13357 {
13358 sri->extra_cost = 2;
13359 return FP_REGS;
13360 }
13361 }
13362
13363 return NO_REGS;
13364 }
13365
13366 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
13367
13368 On SPARC when not VIS3 it is not possible to directly move data
13369 between GENERAL_REGS and FP_REGS. */
13370
13371 static bool
13372 sparc_secondary_memory_needed (machine_mode mode, reg_class_t class1,
13373 reg_class_t class2)
13374 {
13375 return ((FP_REG_CLASS_P (class1) != FP_REG_CLASS_P (class2))
13376 && (! TARGET_VIS3
13377 || GET_MODE_SIZE (mode) > 8
13378 || GET_MODE_SIZE (mode) < 4));
13379 }
13380
13381 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
13382
13383 get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
13384 because the movsi and movsf patterns don't handle r/f moves.
13385 For v8 we copy the default definition. */
13386
13387 static machine_mode
13388 sparc_secondary_memory_needed_mode (machine_mode mode)
13389 {
13390 if (TARGET_ARCH64)
13391 {
13392 if (GET_MODE_BITSIZE (mode) < 32)
13393 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
13394 return mode;
13395 }
13396 else
13397 {
13398 if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
13399 return mode_for_size (BITS_PER_WORD,
13400 GET_MODE_CLASS (mode), 0).require ();
13401 return mode;
13402 }
13403 }
13404
13405 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
13406 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
13407
13408 bool
13409 sparc_expand_conditional_move (machine_mode mode, rtx *operands)
13410 {
13411 enum rtx_code rc = GET_CODE (operands[1]);
13412 machine_mode cmp_mode;
13413 rtx cc_reg, dst, cmp;
13414
13415 cmp = operands[1];
13416 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
13417 return false;
13418
13419 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
13420 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
13421
13422 cmp_mode = GET_MODE (XEXP (cmp, 0));
13423 rc = GET_CODE (cmp);
13424
13425 dst = operands[0];
13426 if (! rtx_equal_p (operands[2], dst)
13427 && ! rtx_equal_p (operands[3], dst))
13428 {
13429 if (reg_overlap_mentioned_p (dst, cmp))
13430 dst = gen_reg_rtx (mode);
13431
13432 emit_move_insn (dst, operands[3]);
13433 }
13434 else if (operands[2] == dst)
13435 {
13436 operands[2] = operands[3];
13437
13438 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
13439 rc = reverse_condition_maybe_unordered (rc);
13440 else
13441 rc = reverse_condition (rc);
13442 }
13443
13444 if (XEXP (cmp, 1) == const0_rtx
13445 && GET_CODE (XEXP (cmp, 0)) == REG
13446 && cmp_mode == DImode
13447 && v9_regcmp_p (rc))
13448 cc_reg = XEXP (cmp, 0);
13449 else
13450 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
13451
13452 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
13453
13454 emit_insn (gen_rtx_SET (dst,
13455 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
13456
13457 if (dst != operands[0])
13458 emit_move_insn (operands[0], dst);
13459
13460 return true;
13461 }
13462
13463 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
13464 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
13465 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
13466 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
13467 code to be used for the condition mask. */
13468
13469 void
13470 sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
13471 {
13472 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
13473 enum rtx_code code = GET_CODE (operands[3]);
13474
13475 mask = gen_reg_rtx (Pmode);
13476 cop0 = operands[4];
13477 cop1 = operands[5];
13478 if (code == LT || code == GE)
13479 {
13480 rtx t;
13481
13482 code = swap_condition (code);
13483 t = cop0; cop0 = cop1; cop1 = t;
13484 }
13485
13486 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
13487
13488 fcmp = gen_rtx_UNSPEC (Pmode,
13489 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
13490 fcode);
13491
13492 cmask = gen_rtx_UNSPEC (DImode,
13493 gen_rtvec (2, mask, gsr),
13494 ccode);
13495
13496 bshuf = gen_rtx_UNSPEC (mode,
13497 gen_rtvec (3, operands[1], operands[2], gsr),
13498 UNSPEC_BSHUFFLE);
13499
13500 emit_insn (gen_rtx_SET (mask, fcmp));
13501 emit_insn (gen_rtx_SET (gsr, cmask));
13502
13503 emit_insn (gen_rtx_SET (operands[0], bshuf));
13504 }
13505
13506 /* On sparc, any mode which naturally allocates into the float
13507 registers should return 4 here. */
13508
13509 unsigned int
13510 sparc_regmode_natural_size (machine_mode mode)
13511 {
13512 int size = UNITS_PER_WORD;
13513
13514 if (TARGET_ARCH64)
13515 {
13516 enum mode_class mclass = GET_MODE_CLASS (mode);
13517
13518 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
13519 size = 4;
13520 }
13521
13522 return size;
13523 }
13524
13525 /* Implement TARGET_HARD_REGNO_NREGS.
13526
13527 On SPARC, ordinary registers hold 32 bits worth; this means both
13528 integer and floating point registers. On v9, integer regs hold 64
13529 bits worth; floating point regs hold 32 bits worth (this includes the
13530 new fp regs as even the odd ones are included in the hard register
13531 count). */
13532
13533 static unsigned int
13534 sparc_hard_regno_nregs (unsigned int regno, machine_mode mode)
13535 {
13536 if (regno == SPARC_GSR_REG)
13537 return 1;
13538 if (TARGET_ARCH64)
13539 {
13540 if (SPARC_INT_REG_P (regno) || regno == FRAME_POINTER_REGNUM)
13541 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13542 return CEIL (GET_MODE_SIZE (mode), 4);
13543 }
13544 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13545 }
13546
13547 /* Implement TARGET_HARD_REGNO_MODE_OK.
13548
13549 ??? Because of the funny way we pass parameters we should allow certain
13550 ??? types of float/complex values to be in integer registers during
13551 ??? RTL generation. This only matters on arch32. */
13552
13553 static bool
13554 sparc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
13555 {
13556 return (hard_regno_mode_classes[regno] & sparc_mode_class[mode]) != 0;
13557 }
13558
13559 /* Implement TARGET_MODES_TIEABLE_P.
13560
13561 For V9 we have to deal with the fact that only the lower 32 floating
13562 point registers are 32-bit addressable. */
13563
13564 static bool
13565 sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13566 {
13567 enum mode_class mclass1, mclass2;
13568 unsigned short size1, size2;
13569
13570 if (mode1 == mode2)
13571 return true;
13572
13573 mclass1 = GET_MODE_CLASS (mode1);
13574 mclass2 = GET_MODE_CLASS (mode2);
13575 if (mclass1 != mclass2)
13576 return false;
13577
13578 if (! TARGET_V9)
13579 return true;
13580
13581 /* Classes are the same and we are V9 so we have to deal with upper
13582 vs. lower floating point registers. If one of the modes is a
13583 4-byte mode, and the other is not, we have to mark them as not
13584 tieable because only the lower 32 floating point register are
13585 addressable 32-bits at a time.
13586
13587 We can't just test explicitly for SFmode, otherwise we won't
13588 cover the vector mode cases properly. */
13589
13590 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
13591 return true;
13592
13593 size1 = GET_MODE_SIZE (mode1);
13594 size2 = GET_MODE_SIZE (mode2);
13595 if ((size1 > 4 && size2 == 4)
13596 || (size2 > 4 && size1 == 4))
13597 return false;
13598
13599 return true;
13600 }
13601
13602 /* Implement TARGET_CSTORE_MODE. */
13603
13604 static scalar_int_mode
13605 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
13606 {
13607 return (TARGET_ARCH64 ? DImode : SImode);
13608 }
13609
13610 /* Return the compound expression made of T1 and T2. */
13611
13612 static inline tree
13613 compound_expr (tree t1, tree t2)
13614 {
13615 return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
13616 }
13617
13618 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13619
13620 static void
13621 sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13622 {
13623 if (!TARGET_FPU)
13624 return;
13625
13626 const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
13627 const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
13628
13629 /* We generate the equivalent of feholdexcept (&fenv_var):
13630
13631 unsigned int fenv_var;
13632 __builtin_store_fsr (&fenv_var);
13633
13634 unsigned int tmp1_var;
13635 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
13636
13637 __builtin_load_fsr (&tmp1_var); */
13638
13639 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
13640 TREE_ADDRESSABLE (fenv_var) = 1;
13641 tree fenv_addr = build_fold_addr_expr (fenv_var);
13642 tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
13643 tree hold_stfsr
13644 = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
13645 build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
13646
13647 tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
13648 TREE_ADDRESSABLE (tmp1_var) = 1;
13649 tree masked_fenv_var
13650 = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13651 build_int_cst (unsigned_type_node,
13652 ~(accrued_exception_mask | trap_enable_mask)));
13653 tree hold_mask
13654 = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
13655 NULL_TREE, NULL_TREE);
13656
13657 tree tmp1_addr = build_fold_addr_expr (tmp1_var);
13658 tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
13659 tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
13660
13661 *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
13662
13663 /* We reload the value of tmp1_var to clear the exceptions:
13664
13665 __builtin_load_fsr (&tmp1_var); */
13666
13667 *clear = build_call_expr (ldfsr, 1, tmp1_addr);
13668
13669 /* We generate the equivalent of feupdateenv (&fenv_var):
13670
13671 unsigned int tmp2_var;
13672 __builtin_store_fsr (&tmp2_var);
13673
13674 __builtin_load_fsr (&fenv_var);
13675
13676 if (SPARC_LOW_FE_EXCEPT_VALUES)
13677 tmp2_var >>= 5;
13678 __atomic_feraiseexcept ((int) tmp2_var); */
13679
13680 tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
13681 TREE_ADDRESSABLE (tmp2_var) = 1;
13682 tree tmp2_addr = build_fold_addr_expr (tmp2_var);
13683 tree update_stfsr
13684 = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
13685 build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
13686
13687 tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
13688
13689 tree atomic_feraiseexcept
13690 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13691 tree update_call
13692 = build_call_expr (atomic_feraiseexcept, 1,
13693 fold_convert (integer_type_node, tmp2_var));
13694
13695 if (SPARC_LOW_FE_EXCEPT_VALUES)
13696 {
13697 tree shifted_tmp2_var
13698 = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
13699 build_int_cst (unsigned_type_node, 5));
13700 tree update_shift
13701 = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
13702 update_call = compound_expr (update_shift, update_call);
13703 }
13704
13705 *update
13706 = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
13707 }
13708
13709 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. Borrowed from the PA port.
13710
13711 SImode loads to floating-point registers are not zero-extended.
13712 The definition for LOAD_EXTEND_OP specifies that integer loads
13713 narrower than BITS_PER_WORD will be zero-extended. As a result,
13714 we inhibit changes from SImode unless they are to a mode that is
13715 identical in size.
13716
13717 Likewise for SFmode, since word-mode paradoxical subregs are
13718 problematic on big-endian architectures. */
13719
13720 static bool
13721 sparc_can_change_mode_class (machine_mode from, machine_mode to,
13722 reg_class_t rclass)
13723 {
13724 if (TARGET_ARCH64
13725 && GET_MODE_SIZE (from) == 4
13726 && GET_MODE_SIZE (to) != 4)
13727 return !reg_classes_intersect_p (rclass, FP_REGS);
13728 return true;
13729 }
13730
13731 /* Implement TARGET_CONSTANT_ALIGNMENT. */
13732
13733 static HOST_WIDE_INT
13734 sparc_constant_alignment (const_tree exp, HOST_WIDE_INT align)
13735 {
13736 if (TREE_CODE (exp) == STRING_CST)
13737 return MAX (align, FASTEST_ALIGNMENT);
13738 return align;
13739 }
13740
13741 #include "gt-sparc.h"