]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sparc/sparc.c
profile-count.h (profile_probability::from_reg_br_prob_note, [...]): New functions.
[thirdparty/gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "memmodel.h"
31 #include "gimple.h"
32 #include "df.h"
33 #include "tm_p.h"
34 #include "stringpool.h"
35 #include "expmed.h"
36 #include "optabs.h"
37 #include "regs.h"
38 #include "emit-rtl.h"
39 #include "recog.h"
40 #include "diagnostic-core.h"
41 #include "alias.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
44 #include "calls.h"
45 #include "varasm.h"
46 #include "output.h"
47 #include "insn-attr.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "debug.h"
51 #include "common/common-target.h"
52 #include "gimplify.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "params.h"
56 #include "tree-pass.h"
57 #include "context.h"
58 #include "builtins.h"
59
60 /* This file should be included last. */
61 #include "target-def.h"
62
63 /* Processor costs */
64
65 struct processor_costs {
66 /* Integer load */
67 const int int_load;
68
69 /* Integer signed load */
70 const int int_sload;
71
72 /* Integer zeroed load */
73 const int int_zload;
74
75 /* Float load */
76 const int float_load;
77
78 /* fmov, fneg, fabs */
79 const int float_move;
80
81 /* fadd, fsub */
82 const int float_plusminus;
83
84 /* fcmp */
85 const int float_cmp;
86
87 /* fmov, fmovr */
88 const int float_cmove;
89
90 /* fmul */
91 const int float_mul;
92
93 /* fdivs */
94 const int float_div_sf;
95
96 /* fdivd */
97 const int float_div_df;
98
99 /* fsqrts */
100 const int float_sqrt_sf;
101
102 /* fsqrtd */
103 const int float_sqrt_df;
104
105 /* umul/smul */
106 const int int_mul;
107
108 /* mulX */
109 const int int_mulX;
110
111 /* integer multiply cost for each bit set past the most
112 significant 3, so the formula for multiply cost becomes:
113
114 if (rs1 < 0)
115 highest_bit = highest_clear_bit(rs1);
116 else
117 highest_bit = highest_set_bit(rs1);
118 if (highest_bit < 3)
119 highest_bit = 3;
120 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
121
122 A value of zero indicates that the multiply costs is fixed,
123 and not variable. */
124 const int int_mul_bit_factor;
125
126 /* udiv/sdiv */
127 const int int_div;
128
129 /* divX */
130 const int int_divX;
131
132 /* movcc, movr */
133 const int int_cmove;
134
135 /* penalty for shifts, due to scheduling rules etc. */
136 const int shift_penalty;
137 };
138
139 static const
140 struct processor_costs cypress_costs = {
141 COSTS_N_INSNS (2), /* int load */
142 COSTS_N_INSNS (2), /* int signed load */
143 COSTS_N_INSNS (2), /* int zeroed load */
144 COSTS_N_INSNS (2), /* float load */
145 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
146 COSTS_N_INSNS (5), /* fadd, fsub */
147 COSTS_N_INSNS (1), /* fcmp */
148 COSTS_N_INSNS (1), /* fmov, fmovr */
149 COSTS_N_INSNS (7), /* fmul */
150 COSTS_N_INSNS (37), /* fdivs */
151 COSTS_N_INSNS (37), /* fdivd */
152 COSTS_N_INSNS (63), /* fsqrts */
153 COSTS_N_INSNS (63), /* fsqrtd */
154 COSTS_N_INSNS (1), /* imul */
155 COSTS_N_INSNS (1), /* imulX */
156 0, /* imul bit factor */
157 COSTS_N_INSNS (1), /* idiv */
158 COSTS_N_INSNS (1), /* idivX */
159 COSTS_N_INSNS (1), /* movcc/movr */
160 0, /* shift penalty */
161 };
162
163 static const
164 struct processor_costs supersparc_costs = {
165 COSTS_N_INSNS (1), /* int load */
166 COSTS_N_INSNS (1), /* int signed load */
167 COSTS_N_INSNS (1), /* int zeroed load */
168 COSTS_N_INSNS (0), /* float load */
169 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
170 COSTS_N_INSNS (3), /* fadd, fsub */
171 COSTS_N_INSNS (3), /* fcmp */
172 COSTS_N_INSNS (1), /* fmov, fmovr */
173 COSTS_N_INSNS (3), /* fmul */
174 COSTS_N_INSNS (6), /* fdivs */
175 COSTS_N_INSNS (9), /* fdivd */
176 COSTS_N_INSNS (12), /* fsqrts */
177 COSTS_N_INSNS (12), /* fsqrtd */
178 COSTS_N_INSNS (4), /* imul */
179 COSTS_N_INSNS (4), /* imulX */
180 0, /* imul bit factor */
181 COSTS_N_INSNS (4), /* idiv */
182 COSTS_N_INSNS (4), /* idivX */
183 COSTS_N_INSNS (1), /* movcc/movr */
184 1, /* shift penalty */
185 };
186
187 static const
188 struct processor_costs hypersparc_costs = {
189 COSTS_N_INSNS (1), /* int load */
190 COSTS_N_INSNS (1), /* int signed load */
191 COSTS_N_INSNS (1), /* int zeroed load */
192 COSTS_N_INSNS (1), /* float load */
193 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
194 COSTS_N_INSNS (1), /* fadd, fsub */
195 COSTS_N_INSNS (1), /* fcmp */
196 COSTS_N_INSNS (1), /* fmov, fmovr */
197 COSTS_N_INSNS (1), /* fmul */
198 COSTS_N_INSNS (8), /* fdivs */
199 COSTS_N_INSNS (12), /* fdivd */
200 COSTS_N_INSNS (17), /* fsqrts */
201 COSTS_N_INSNS (17), /* fsqrtd */
202 COSTS_N_INSNS (17), /* imul */
203 COSTS_N_INSNS (17), /* imulX */
204 0, /* imul bit factor */
205 COSTS_N_INSNS (17), /* idiv */
206 COSTS_N_INSNS (17), /* idivX */
207 COSTS_N_INSNS (1), /* movcc/movr */
208 0, /* shift penalty */
209 };
210
211 static const
212 struct processor_costs leon_costs = {
213 COSTS_N_INSNS (1), /* int load */
214 COSTS_N_INSNS (1), /* int signed load */
215 COSTS_N_INSNS (1), /* int zeroed load */
216 COSTS_N_INSNS (1), /* float load */
217 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
218 COSTS_N_INSNS (1), /* fadd, fsub */
219 COSTS_N_INSNS (1), /* fcmp */
220 COSTS_N_INSNS (1), /* fmov, fmovr */
221 COSTS_N_INSNS (1), /* fmul */
222 COSTS_N_INSNS (15), /* fdivs */
223 COSTS_N_INSNS (15), /* fdivd */
224 COSTS_N_INSNS (23), /* fsqrts */
225 COSTS_N_INSNS (23), /* fsqrtd */
226 COSTS_N_INSNS (5), /* imul */
227 COSTS_N_INSNS (5), /* imulX */
228 0, /* imul bit factor */
229 COSTS_N_INSNS (5), /* idiv */
230 COSTS_N_INSNS (5), /* idivX */
231 COSTS_N_INSNS (1), /* movcc/movr */
232 0, /* shift penalty */
233 };
234
235 static const
236 struct processor_costs leon3_costs = {
237 COSTS_N_INSNS (1), /* int load */
238 COSTS_N_INSNS (1), /* int signed load */
239 COSTS_N_INSNS (1), /* int zeroed load */
240 COSTS_N_INSNS (1), /* float load */
241 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
242 COSTS_N_INSNS (1), /* fadd, fsub */
243 COSTS_N_INSNS (1), /* fcmp */
244 COSTS_N_INSNS (1), /* fmov, fmovr */
245 COSTS_N_INSNS (1), /* fmul */
246 COSTS_N_INSNS (14), /* fdivs */
247 COSTS_N_INSNS (15), /* fdivd */
248 COSTS_N_INSNS (22), /* fsqrts */
249 COSTS_N_INSNS (23), /* fsqrtd */
250 COSTS_N_INSNS (5), /* imul */
251 COSTS_N_INSNS (5), /* imulX */
252 0, /* imul bit factor */
253 COSTS_N_INSNS (35), /* idiv */
254 COSTS_N_INSNS (35), /* idivX */
255 COSTS_N_INSNS (1), /* movcc/movr */
256 0, /* shift penalty */
257 };
258
259 static const
260 struct processor_costs sparclet_costs = {
261 COSTS_N_INSNS (3), /* int load */
262 COSTS_N_INSNS (3), /* int signed load */
263 COSTS_N_INSNS (1), /* int zeroed load */
264 COSTS_N_INSNS (1), /* float load */
265 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
266 COSTS_N_INSNS (1), /* fadd, fsub */
267 COSTS_N_INSNS (1), /* fcmp */
268 COSTS_N_INSNS (1), /* fmov, fmovr */
269 COSTS_N_INSNS (1), /* fmul */
270 COSTS_N_INSNS (1), /* fdivs */
271 COSTS_N_INSNS (1), /* fdivd */
272 COSTS_N_INSNS (1), /* fsqrts */
273 COSTS_N_INSNS (1), /* fsqrtd */
274 COSTS_N_INSNS (5), /* imul */
275 COSTS_N_INSNS (5), /* imulX */
276 0, /* imul bit factor */
277 COSTS_N_INSNS (5), /* idiv */
278 COSTS_N_INSNS (5), /* idivX */
279 COSTS_N_INSNS (1), /* movcc/movr */
280 0, /* shift penalty */
281 };
282
283 static const
284 struct processor_costs ultrasparc_costs = {
285 COSTS_N_INSNS (2), /* int load */
286 COSTS_N_INSNS (3), /* int signed load */
287 COSTS_N_INSNS (2), /* int zeroed load */
288 COSTS_N_INSNS (2), /* float load */
289 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
290 COSTS_N_INSNS (4), /* fadd, fsub */
291 COSTS_N_INSNS (1), /* fcmp */
292 COSTS_N_INSNS (2), /* fmov, fmovr */
293 COSTS_N_INSNS (4), /* fmul */
294 COSTS_N_INSNS (13), /* fdivs */
295 COSTS_N_INSNS (23), /* fdivd */
296 COSTS_N_INSNS (13), /* fsqrts */
297 COSTS_N_INSNS (23), /* fsqrtd */
298 COSTS_N_INSNS (4), /* imul */
299 COSTS_N_INSNS (4), /* imulX */
300 2, /* imul bit factor */
301 COSTS_N_INSNS (37), /* idiv */
302 COSTS_N_INSNS (68), /* idivX */
303 COSTS_N_INSNS (2), /* movcc/movr */
304 2, /* shift penalty */
305 };
306
307 static const
308 struct processor_costs ultrasparc3_costs = {
309 COSTS_N_INSNS (2), /* int load */
310 COSTS_N_INSNS (3), /* int signed load */
311 COSTS_N_INSNS (3), /* int zeroed load */
312 COSTS_N_INSNS (2), /* float load */
313 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
314 COSTS_N_INSNS (4), /* fadd, fsub */
315 COSTS_N_INSNS (5), /* fcmp */
316 COSTS_N_INSNS (3), /* fmov, fmovr */
317 COSTS_N_INSNS (4), /* fmul */
318 COSTS_N_INSNS (17), /* fdivs */
319 COSTS_N_INSNS (20), /* fdivd */
320 COSTS_N_INSNS (20), /* fsqrts */
321 COSTS_N_INSNS (29), /* fsqrtd */
322 COSTS_N_INSNS (6), /* imul */
323 COSTS_N_INSNS (6), /* imulX */
324 0, /* imul bit factor */
325 COSTS_N_INSNS (40), /* idiv */
326 COSTS_N_INSNS (71), /* idivX */
327 COSTS_N_INSNS (2), /* movcc/movr */
328 0, /* shift penalty */
329 };
330
331 static const
332 struct processor_costs niagara_costs = {
333 COSTS_N_INSNS (3), /* int load */
334 COSTS_N_INSNS (3), /* int signed load */
335 COSTS_N_INSNS (3), /* int zeroed load */
336 COSTS_N_INSNS (9), /* float load */
337 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
338 COSTS_N_INSNS (8), /* fadd, fsub */
339 COSTS_N_INSNS (26), /* fcmp */
340 COSTS_N_INSNS (8), /* fmov, fmovr */
341 COSTS_N_INSNS (29), /* fmul */
342 COSTS_N_INSNS (54), /* fdivs */
343 COSTS_N_INSNS (83), /* fdivd */
344 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
345 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
346 COSTS_N_INSNS (11), /* imul */
347 COSTS_N_INSNS (11), /* imulX */
348 0, /* imul bit factor */
349 COSTS_N_INSNS (72), /* idiv */
350 COSTS_N_INSNS (72), /* idivX */
351 COSTS_N_INSNS (1), /* movcc/movr */
352 0, /* shift penalty */
353 };
354
355 static const
356 struct processor_costs niagara2_costs = {
357 COSTS_N_INSNS (3), /* int load */
358 COSTS_N_INSNS (3), /* int signed load */
359 COSTS_N_INSNS (3), /* int zeroed load */
360 COSTS_N_INSNS (3), /* float load */
361 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
362 COSTS_N_INSNS (6), /* fadd, fsub */
363 COSTS_N_INSNS (6), /* fcmp */
364 COSTS_N_INSNS (6), /* fmov, fmovr */
365 COSTS_N_INSNS (6), /* fmul */
366 COSTS_N_INSNS (19), /* fdivs */
367 COSTS_N_INSNS (33), /* fdivd */
368 COSTS_N_INSNS (19), /* fsqrts */
369 COSTS_N_INSNS (33), /* fsqrtd */
370 COSTS_N_INSNS (5), /* imul */
371 COSTS_N_INSNS (5), /* imulX */
372 0, /* imul bit factor */
373 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
374 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
375 COSTS_N_INSNS (1), /* movcc/movr */
376 0, /* shift penalty */
377 };
378
379 static const
380 struct processor_costs niagara3_costs = {
381 COSTS_N_INSNS (3), /* int load */
382 COSTS_N_INSNS (3), /* int signed load */
383 COSTS_N_INSNS (3), /* int zeroed load */
384 COSTS_N_INSNS (3), /* float load */
385 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
386 COSTS_N_INSNS (9), /* fadd, fsub */
387 COSTS_N_INSNS (9), /* fcmp */
388 COSTS_N_INSNS (9), /* fmov, fmovr */
389 COSTS_N_INSNS (9), /* fmul */
390 COSTS_N_INSNS (23), /* fdivs */
391 COSTS_N_INSNS (37), /* fdivd */
392 COSTS_N_INSNS (23), /* fsqrts */
393 COSTS_N_INSNS (37), /* fsqrtd */
394 COSTS_N_INSNS (9), /* imul */
395 COSTS_N_INSNS (9), /* imulX */
396 0, /* imul bit factor */
397 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
398 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
399 COSTS_N_INSNS (1), /* movcc/movr */
400 0, /* shift penalty */
401 };
402
403 static const
404 struct processor_costs niagara4_costs = {
405 COSTS_N_INSNS (5), /* int load */
406 COSTS_N_INSNS (5), /* int signed load */
407 COSTS_N_INSNS (5), /* int zeroed load */
408 COSTS_N_INSNS (5), /* float load */
409 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
410 COSTS_N_INSNS (11), /* fadd, fsub */
411 COSTS_N_INSNS (11), /* fcmp */
412 COSTS_N_INSNS (11), /* fmov, fmovr */
413 COSTS_N_INSNS (11), /* fmul */
414 COSTS_N_INSNS (24), /* fdivs */
415 COSTS_N_INSNS (37), /* fdivd */
416 COSTS_N_INSNS (24), /* fsqrts */
417 COSTS_N_INSNS (37), /* fsqrtd */
418 COSTS_N_INSNS (12), /* imul */
419 COSTS_N_INSNS (12), /* imulX */
420 0, /* imul bit factor */
421 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
422 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
423 COSTS_N_INSNS (1), /* movcc/movr */
424 0, /* shift penalty */
425 };
426
427 static const
428 struct processor_costs niagara7_costs = {
429 COSTS_N_INSNS (5), /* int load */
430 COSTS_N_INSNS (5), /* int signed load */
431 COSTS_N_INSNS (5), /* int zeroed load */
432 COSTS_N_INSNS (5), /* float load */
433 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
434 COSTS_N_INSNS (11), /* fadd, fsub */
435 COSTS_N_INSNS (11), /* fcmp */
436 COSTS_N_INSNS (11), /* fmov, fmovr */
437 COSTS_N_INSNS (11), /* fmul */
438 COSTS_N_INSNS (24), /* fdivs */
439 COSTS_N_INSNS (37), /* fdivd */
440 COSTS_N_INSNS (24), /* fsqrts */
441 COSTS_N_INSNS (37), /* fsqrtd */
442 COSTS_N_INSNS (12), /* imul */
443 COSTS_N_INSNS (12), /* imulX */
444 0, /* imul bit factor */
445 COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
446 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
447 COSTS_N_INSNS (1), /* movcc/movr */
448 0, /* shift penalty */
449 };
450
451 static const
452 struct processor_costs m8_costs = {
453 COSTS_N_INSNS (3), /* int load */
454 COSTS_N_INSNS (3), /* int signed load */
455 COSTS_N_INSNS (3), /* int zeroed load */
456 COSTS_N_INSNS (3), /* float load */
457 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
458 COSTS_N_INSNS (9), /* fadd, fsub */
459 COSTS_N_INSNS (9), /* fcmp */
460 COSTS_N_INSNS (9), /* fmov, fmovr */
461 COSTS_N_INSNS (9), /* fmul */
462 COSTS_N_INSNS (26), /* fdivs */
463 COSTS_N_INSNS (30), /* fdivd */
464 COSTS_N_INSNS (33), /* fsqrts */
465 COSTS_N_INSNS (41), /* fsqrtd */
466 COSTS_N_INSNS (12), /* imul */
467 COSTS_N_INSNS (10), /* imulX */
468 0, /* imul bit factor */
469 COSTS_N_INSNS (57), /* udiv/sdiv */
470 COSTS_N_INSNS (30), /* udivx/sdivx */
471 COSTS_N_INSNS (1), /* movcc/movr */
472 0, /* shift penalty */
473 };
474
475 static const struct processor_costs *sparc_costs = &cypress_costs;
476
477 #ifdef HAVE_AS_RELAX_OPTION
478 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
479 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
480 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
481 somebody does not branch between the sethi and jmp. */
482 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
483 #else
484 #define LEAF_SIBCALL_SLOT_RESERVED_P \
485 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
486 #endif
487
488 /* Vector to say how input registers are mapped to output registers.
489 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
490 eliminate it. You must use -fomit-frame-pointer to get that. */
491 char leaf_reg_remap[] =
492 { 0, 1, 2, 3, 4, 5, 6, 7,
493 -1, -1, -1, -1, -1, -1, 14, -1,
494 -1, -1, -1, -1, -1, -1, -1, -1,
495 8, 9, 10, 11, 12, 13, -1, 15,
496
497 32, 33, 34, 35, 36, 37, 38, 39,
498 40, 41, 42, 43, 44, 45, 46, 47,
499 48, 49, 50, 51, 52, 53, 54, 55,
500 56, 57, 58, 59, 60, 61, 62, 63,
501 64, 65, 66, 67, 68, 69, 70, 71,
502 72, 73, 74, 75, 76, 77, 78, 79,
503 80, 81, 82, 83, 84, 85, 86, 87,
504 88, 89, 90, 91, 92, 93, 94, 95,
505 96, 97, 98, 99, 100, 101, 102};
506
507 /* Vector, indexed by hard register number, which contains 1
508 for a register that is allowable in a candidate for leaf
509 function treatment. */
510 char sparc_leaf_regs[] =
511 { 1, 1, 1, 1, 1, 1, 1, 1,
512 0, 0, 0, 0, 0, 0, 1, 0,
513 0, 0, 0, 0, 0, 0, 0, 0,
514 1, 1, 1, 1, 1, 1, 0, 1,
515 1, 1, 1, 1, 1, 1, 1, 1,
516 1, 1, 1, 1, 1, 1, 1, 1,
517 1, 1, 1, 1, 1, 1, 1, 1,
518 1, 1, 1, 1, 1, 1, 1, 1,
519 1, 1, 1, 1, 1, 1, 1, 1,
520 1, 1, 1, 1, 1, 1, 1, 1,
521 1, 1, 1, 1, 1, 1, 1, 1,
522 1, 1, 1, 1, 1, 1, 1, 1,
523 1, 1, 1, 1, 1, 1, 1};
524
525 struct GTY(()) machine_function
526 {
527 /* Size of the frame of the function. */
528 HOST_WIDE_INT frame_size;
529
530 /* Size of the frame of the function minus the register window save area
531 and the outgoing argument area. */
532 HOST_WIDE_INT apparent_frame_size;
533
534 /* Register we pretend the frame pointer is allocated to. Normally, this
535 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
536 record "offset" separately as it may be too big for (reg + disp). */
537 rtx frame_base_reg;
538 HOST_WIDE_INT frame_base_offset;
539
540 /* Number of global or FP registers to be saved (as 4-byte quantities). */
541 int n_global_fp_regs;
542
543 /* True if the current function is leaf and uses only leaf regs,
544 so that the SPARC leaf function optimization can be applied.
545 Private version of crtl->uses_only_leaf_regs, see
546 sparc_expand_prologue for the rationale. */
547 int leaf_function_p;
548
549 /* True if the prologue saves local or in registers. */
550 bool save_local_in_regs_p;
551
552 /* True if the data calculated by sparc_expand_prologue are valid. */
553 bool prologue_data_valid_p;
554 };
555
556 #define sparc_frame_size cfun->machine->frame_size
557 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
558 #define sparc_frame_base_reg cfun->machine->frame_base_reg
559 #define sparc_frame_base_offset cfun->machine->frame_base_offset
560 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
561 #define sparc_leaf_function_p cfun->machine->leaf_function_p
562 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
563 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
564
565 /* 1 if the next opcode is to be specially indented. */
566 int sparc_indent_opcode = 0;
567
568 static void sparc_option_override (void);
569 static void sparc_init_modes (void);
570 static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
571 const_tree, bool, bool, int *, int *);
572
573 static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
574 static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
575
576 static void sparc_emit_set_const32 (rtx, rtx);
577 static void sparc_emit_set_const64 (rtx, rtx);
578 static void sparc_output_addr_vec (rtx);
579 static void sparc_output_addr_diff_vec (rtx);
580 static void sparc_output_deferred_case_vectors (void);
581 static bool sparc_legitimate_address_p (machine_mode, rtx, bool);
582 static bool sparc_legitimate_constant_p (machine_mode, rtx);
583 static rtx sparc_builtin_saveregs (void);
584 static int epilogue_renumber (rtx *, int);
585 static bool sparc_assemble_integer (rtx, unsigned int, int);
586 static int set_extends (rtx_insn *);
587 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
588 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
589 #ifdef TARGET_SOLARIS
590 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
591 tree) ATTRIBUTE_UNUSED;
592 #endif
593 static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
594 static int sparc_issue_rate (void);
595 static void sparc_sched_init (FILE *, int, int);
596 static int sparc_use_sched_lookahead (void);
597
598 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
599 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
600 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
601 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
602 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
603
604 static bool sparc_function_ok_for_sibcall (tree, tree);
605 static void sparc_init_libfuncs (void);
606 static void sparc_init_builtins (void);
607 static void sparc_fpu_init_builtins (void);
608 static void sparc_vis_init_builtins (void);
609 static tree sparc_builtin_decl (unsigned, bool);
610 static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
611 static tree sparc_fold_builtin (tree, int, tree *, bool);
612 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
613 HOST_WIDE_INT, tree);
614 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
615 HOST_WIDE_INT, const_tree);
616 static struct machine_function * sparc_init_machine_status (void);
617 static bool sparc_cannot_force_const_mem (machine_mode, rtx);
618 static rtx sparc_tls_get_addr (void);
619 static rtx sparc_tls_got (void);
620 static int sparc_register_move_cost (machine_mode,
621 reg_class_t, reg_class_t);
622 static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
623 static rtx sparc_function_value (const_tree, const_tree, bool);
624 static rtx sparc_libcall_value (machine_mode, const_rtx);
625 static bool sparc_function_value_regno_p (const unsigned int);
626 static rtx sparc_struct_value_rtx (tree, int);
627 static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
628 int *, const_tree, int);
629 static bool sparc_return_in_memory (const_tree, const_tree);
630 static bool sparc_strict_argument_naming (cumulative_args_t);
631 static void sparc_va_start (tree, rtx);
632 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
633 static bool sparc_vector_mode_supported_p (machine_mode);
634 static bool sparc_tls_referenced_p (rtx);
635 static rtx sparc_legitimize_tls_address (rtx);
636 static rtx sparc_legitimize_pic_address (rtx, rtx);
637 static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
638 static rtx sparc_delegitimize_address (rtx);
639 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
640 static bool sparc_pass_by_reference (cumulative_args_t,
641 machine_mode, const_tree, bool);
642 static void sparc_function_arg_advance (cumulative_args_t,
643 machine_mode, const_tree, bool);
644 static rtx sparc_function_arg_1 (cumulative_args_t,
645 machine_mode, const_tree, bool, bool);
646 static rtx sparc_function_arg (cumulative_args_t,
647 machine_mode, const_tree, bool);
648 static rtx sparc_function_incoming_arg (cumulative_args_t,
649 machine_mode, const_tree, bool);
650 static unsigned int sparc_function_arg_boundary (machine_mode,
651 const_tree);
652 static int sparc_arg_partial_bytes (cumulative_args_t,
653 machine_mode, tree, bool);
654 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
655 static void sparc_file_end (void);
656 static bool sparc_frame_pointer_required (void);
657 static bool sparc_can_eliminate (const int, const int);
658 static rtx sparc_builtin_setjmp_frame_value (void);
659 static void sparc_conditional_register_usage (void);
660 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
661 static const char *sparc_mangle_type (const_tree);
662 #endif
663 static void sparc_trampoline_init (rtx, tree, rtx);
664 static machine_mode sparc_preferred_simd_mode (machine_mode);
665 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
666 static bool sparc_lra_p (void);
667 static bool sparc_print_operand_punct_valid_p (unsigned char);
668 static void sparc_print_operand (FILE *, rtx, int);
669 static void sparc_print_operand_address (FILE *, machine_mode, rtx);
670 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
671 machine_mode,
672 secondary_reload_info *);
673 static machine_mode sparc_cstore_mode (enum insn_code icode);
674 static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
675 static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
676 static unsigned int sparc_min_arithmetic_precision (void);
677 \f
678 #ifdef SUBTARGET_ATTRIBUTE_TABLE
679 /* Table of valid machine attributes. */
680 static const struct attribute_spec sparc_attribute_table[] =
681 {
682 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
683 do_diagnostic } */
684 SUBTARGET_ATTRIBUTE_TABLE,
685 { NULL, 0, 0, false, false, false, NULL, false }
686 };
687 #endif
688 \f
689 /* Option handling. */
690
691 /* Parsed value. */
692 enum cmodel sparc_cmodel;
693
694 char sparc_hard_reg_printed[8];
695
696 /* Initialize the GCC target structure. */
697
698 /* The default is to use .half rather than .short for aligned HI objects. */
699 #undef TARGET_ASM_ALIGNED_HI_OP
700 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
701
702 #undef TARGET_ASM_UNALIGNED_HI_OP
703 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
704 #undef TARGET_ASM_UNALIGNED_SI_OP
705 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
706 #undef TARGET_ASM_UNALIGNED_DI_OP
707 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
708
709 /* The target hook has to handle DI-mode values. */
710 #undef TARGET_ASM_INTEGER
711 #define TARGET_ASM_INTEGER sparc_assemble_integer
712
713 #undef TARGET_ASM_FUNCTION_PROLOGUE
714 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
715 #undef TARGET_ASM_FUNCTION_EPILOGUE
716 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
717
718 #undef TARGET_SCHED_ADJUST_COST
719 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
720 #undef TARGET_SCHED_ISSUE_RATE
721 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
722 #undef TARGET_SCHED_INIT
723 #define TARGET_SCHED_INIT sparc_sched_init
724 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
725 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
726
727 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
728 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
729
730 #undef TARGET_INIT_LIBFUNCS
731 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
732
733 #undef TARGET_LEGITIMIZE_ADDRESS
734 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
735 #undef TARGET_DELEGITIMIZE_ADDRESS
736 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
737 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
738 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
739
740 #undef TARGET_INIT_BUILTINS
741 #define TARGET_INIT_BUILTINS sparc_init_builtins
742 #undef TARGET_BUILTIN_DECL
743 #define TARGET_BUILTIN_DECL sparc_builtin_decl
744 #undef TARGET_EXPAND_BUILTIN
745 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
746 #undef TARGET_FOLD_BUILTIN
747 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
748
749 #if TARGET_TLS
750 #undef TARGET_HAVE_TLS
751 #define TARGET_HAVE_TLS true
752 #endif
753
754 #undef TARGET_CANNOT_FORCE_CONST_MEM
755 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
756
757 #undef TARGET_ASM_OUTPUT_MI_THUNK
758 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
759 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
760 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
761
762 #undef TARGET_RTX_COSTS
763 #define TARGET_RTX_COSTS sparc_rtx_costs
764 #undef TARGET_ADDRESS_COST
765 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
766 #undef TARGET_REGISTER_MOVE_COST
767 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
768
769 #undef TARGET_PROMOTE_FUNCTION_MODE
770 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
771
772 #undef TARGET_FUNCTION_VALUE
773 #define TARGET_FUNCTION_VALUE sparc_function_value
774 #undef TARGET_LIBCALL_VALUE
775 #define TARGET_LIBCALL_VALUE sparc_libcall_value
776 #undef TARGET_FUNCTION_VALUE_REGNO_P
777 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
778
779 #undef TARGET_STRUCT_VALUE_RTX
780 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
781 #undef TARGET_RETURN_IN_MEMORY
782 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
783 #undef TARGET_MUST_PASS_IN_STACK
784 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
785 #undef TARGET_PASS_BY_REFERENCE
786 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
787 #undef TARGET_ARG_PARTIAL_BYTES
788 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
789 #undef TARGET_FUNCTION_ARG_ADVANCE
790 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
791 #undef TARGET_FUNCTION_ARG
792 #define TARGET_FUNCTION_ARG sparc_function_arg
793 #undef TARGET_FUNCTION_INCOMING_ARG
794 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
795 #undef TARGET_FUNCTION_ARG_BOUNDARY
796 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
797
798 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
799 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
800 #undef TARGET_STRICT_ARGUMENT_NAMING
801 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
802
803 #undef TARGET_EXPAND_BUILTIN_VA_START
804 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
805 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
806 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
807
808 #undef TARGET_VECTOR_MODE_SUPPORTED_P
809 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
810
811 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
812 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
813
814 #ifdef SUBTARGET_INSERT_ATTRIBUTES
815 #undef TARGET_INSERT_ATTRIBUTES
816 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
817 #endif
818
819 #ifdef SUBTARGET_ATTRIBUTE_TABLE
820 #undef TARGET_ATTRIBUTE_TABLE
821 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
822 #endif
823
824 #undef TARGET_OPTION_OVERRIDE
825 #define TARGET_OPTION_OVERRIDE sparc_option_override
826
827 #ifdef TARGET_THREAD_SSP_OFFSET
828 #undef TARGET_STACK_PROTECT_GUARD
829 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
830 #endif
831
832 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
833 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
834 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
835 #endif
836
837 #undef TARGET_ASM_FILE_END
838 #define TARGET_ASM_FILE_END sparc_file_end
839
840 #undef TARGET_FRAME_POINTER_REQUIRED
841 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
842
843 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
844 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
845
846 #undef TARGET_CAN_ELIMINATE
847 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
848
849 #undef TARGET_PREFERRED_RELOAD_CLASS
850 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
851
852 #undef TARGET_SECONDARY_RELOAD
853 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
854
855 #undef TARGET_CONDITIONAL_REGISTER_USAGE
856 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
857
858 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
859 #undef TARGET_MANGLE_TYPE
860 #define TARGET_MANGLE_TYPE sparc_mangle_type
861 #endif
862
863 #undef TARGET_LRA_P
864 #define TARGET_LRA_P sparc_lra_p
865
866 #undef TARGET_LEGITIMATE_ADDRESS_P
867 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
868
869 #undef TARGET_LEGITIMATE_CONSTANT_P
870 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
871
872 #undef TARGET_TRAMPOLINE_INIT
873 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
874
875 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
876 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
877 #undef TARGET_PRINT_OPERAND
878 #define TARGET_PRINT_OPERAND sparc_print_operand
879 #undef TARGET_PRINT_OPERAND_ADDRESS
880 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
881
882 /* The value stored by LDSTUB. */
883 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
884 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
885
886 #undef TARGET_CSTORE_MODE
887 #define TARGET_CSTORE_MODE sparc_cstore_mode
888
889 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
890 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
891
892 #undef TARGET_FIXED_CONDITION_CODE_REGS
893 #define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
894
895 #undef TARGET_MIN_ARITHMETIC_PRECISION
896 #define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
897
898 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
899 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
900
901 struct gcc_target targetm = TARGET_INITIALIZER;
902
903 /* Return the memory reference contained in X if any, zero otherwise. */
904
905 static rtx
906 mem_ref (rtx x)
907 {
908 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
909 x = XEXP (x, 0);
910
911 if (MEM_P (x))
912 return x;
913
914 return NULL_RTX;
915 }
916
917 /* We use a machine specific pass to enable workarounds for errata.
918
919 We need to have the (essentially) final form of the insn stream in order
920 to properly detect the various hazards. Therefore, this machine specific
921 pass runs as late as possible. */
922
923 /* True if INSN is a md pattern or asm statement. */
924 #define USEFUL_INSN_P(INSN) \
925 (NONDEBUG_INSN_P (INSN) \
926 && GET_CODE (PATTERN (INSN)) != USE \
927 && GET_CODE (PATTERN (INSN)) != CLOBBER)
928
929 static unsigned int
930 sparc_do_work_around_errata (void)
931 {
932 rtx_insn *insn, *next;
933
934 /* Force all instructions to be split into their final form. */
935 split_all_insns_noflow ();
936
937 /* Now look for specific patterns in the insn stream. */
938 for (insn = get_insns (); insn; insn = next)
939 {
940 bool insert_nop = false;
941 rtx set;
942
943 /* Look into the instruction in a delay slot. */
944 if (NONJUMP_INSN_P (insn))
945 if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
946 insn = seq->insn (1);
947
948 /* Look for either of these two sequences:
949
950 Sequence A:
951 1. store of word size or less (e.g. st / stb / sth / stf)
952 2. any single instruction that is not a load or store
953 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
954
955 Sequence B:
956 1. store of double word size (e.g. std / stdf)
957 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
958 if (sparc_fix_b2bst
959 && NONJUMP_INSN_P (insn)
960 && (set = single_set (insn)) != NULL_RTX
961 && MEM_P (SET_DEST (set)))
962 {
963 /* Sequence B begins with a double-word store. */
964 bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
965 rtx_insn *after;
966 int i;
967
968 next = next_active_insn (insn);
969 if (!next)
970 break;
971
972 for (after = next, i = 0; i < 2; i++)
973 {
974 /* Skip empty assembly statements. */
975 if ((GET_CODE (PATTERN (after)) == UNSPEC_VOLATILE)
976 || (USEFUL_INSN_P (after)
977 && (asm_noperands (PATTERN (after))>=0)
978 && !strcmp (decode_asm_operands (PATTERN (after),
979 NULL, NULL, NULL,
980 NULL, NULL), "")))
981 after = next_active_insn (after);
982 if (!after)
983 break;
984
985 /* If the insn is a branch, then it cannot be problematic. */
986 if (!NONJUMP_INSN_P (after)
987 || GET_CODE (PATTERN (after)) == SEQUENCE)
988 break;
989
990 /* Sequence B is only two instructions long. */
991 if (seq_b)
992 {
993 /* Add NOP if followed by a store. */
994 if ((set = single_set (after)) != NULL_RTX
995 && MEM_P (SET_DEST (set)))
996 insert_nop = true;
997
998 /* Otherwise it is ok. */
999 break;
1000 }
1001
1002 /* If the second instruction is a load or a store,
1003 then the sequence cannot be problematic. */
1004 if (i == 0)
1005 {
1006 if (((set = single_set (after)) != NULL_RTX)
1007 && (MEM_P (SET_DEST (set)) || MEM_P (SET_SRC (set))))
1008 break;
1009
1010 after = next_active_insn (after);
1011 if (!after)
1012 break;
1013 }
1014
1015 /* Add NOP if third instruction is a store. */
1016 if (i == 1
1017 && ((set = single_set (after)) != NULL_RTX)
1018 && MEM_P (SET_DEST (set)))
1019 insert_nop = true;
1020 }
1021 }
1022 else
1023 /* Look for a single-word load into an odd-numbered FP register. */
1024 if (sparc_fix_at697f
1025 && NONJUMP_INSN_P (insn)
1026 && (set = single_set (insn)) != NULL_RTX
1027 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1028 && MEM_P (SET_SRC (set))
1029 && REG_P (SET_DEST (set))
1030 && REGNO (SET_DEST (set)) > 31
1031 && REGNO (SET_DEST (set)) % 2 != 0)
1032 {
1033 /* The wrong dependency is on the enclosing double register. */
1034 const unsigned int x = REGNO (SET_DEST (set)) - 1;
1035 unsigned int src1, src2, dest;
1036 int code;
1037
1038 next = next_active_insn (insn);
1039 if (!next)
1040 break;
1041 /* If the insn is a branch, then it cannot be problematic. */
1042 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1043 continue;
1044
1045 extract_insn (next);
1046 code = INSN_CODE (next);
1047
1048 switch (code)
1049 {
1050 case CODE_FOR_adddf3:
1051 case CODE_FOR_subdf3:
1052 case CODE_FOR_muldf3:
1053 case CODE_FOR_divdf3:
1054 dest = REGNO (recog_data.operand[0]);
1055 src1 = REGNO (recog_data.operand[1]);
1056 src2 = REGNO (recog_data.operand[2]);
1057 if (src1 != src2)
1058 {
1059 /* Case [1-4]:
1060 ld [address], %fx+1
1061 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
1062 if ((src1 == x || src2 == x)
1063 && (dest == src1 || dest == src2))
1064 insert_nop = true;
1065 }
1066 else
1067 {
1068 /* Case 5:
1069 ld [address], %fx+1
1070 FPOPd %fx, %fx, %fx */
1071 if (src1 == x
1072 && dest == src1
1073 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
1074 insert_nop = true;
1075 }
1076 break;
1077
1078 case CODE_FOR_sqrtdf2:
1079 dest = REGNO (recog_data.operand[0]);
1080 src1 = REGNO (recog_data.operand[1]);
1081 /* Case 6:
1082 ld [address], %fx+1
1083 fsqrtd %fx, %fx */
1084 if (src1 == x && dest == src1)
1085 insert_nop = true;
1086 break;
1087
1088 default:
1089 break;
1090 }
1091 }
1092
1093 /* Look for a single-word load into an integer register. */
1094 else if (sparc_fix_ut699
1095 && NONJUMP_INSN_P (insn)
1096 && (set = single_set (insn)) != NULL_RTX
1097 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
1098 && mem_ref (SET_SRC (set)) != NULL_RTX
1099 && REG_P (SET_DEST (set))
1100 && REGNO (SET_DEST (set)) < 32)
1101 {
1102 /* There is no problem if the second memory access has a data
1103 dependency on the first single-cycle load. */
1104 rtx x = SET_DEST (set);
1105
1106 next = next_active_insn (insn);
1107 if (!next)
1108 break;
1109 /* If the insn is a branch, then it cannot be problematic. */
1110 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1111 continue;
1112
1113 /* Look for a second memory access to/from an integer register. */
1114 if ((set = single_set (next)) != NULL_RTX)
1115 {
1116 rtx src = SET_SRC (set);
1117 rtx dest = SET_DEST (set);
1118 rtx mem;
1119
1120 /* LDD is affected. */
1121 if ((mem = mem_ref (src)) != NULL_RTX
1122 && REG_P (dest)
1123 && REGNO (dest) < 32
1124 && !reg_mentioned_p (x, XEXP (mem, 0)))
1125 insert_nop = true;
1126
1127 /* STD is *not* affected. */
1128 else if (MEM_P (dest)
1129 && GET_MODE_SIZE (GET_MODE (dest)) <= 4
1130 && (src == CONST0_RTX (GET_MODE (dest))
1131 || (REG_P (src)
1132 && REGNO (src) < 32
1133 && REGNO (src) != REGNO (x)))
1134 && !reg_mentioned_p (x, XEXP (dest, 0)))
1135 insert_nop = true;
1136 }
1137 }
1138
1139 /* Look for a single-word load/operation into an FP register. */
1140 else if (sparc_fix_ut699
1141 && NONJUMP_INSN_P (insn)
1142 && (set = single_set (insn)) != NULL_RTX
1143 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1144 && REG_P (SET_DEST (set))
1145 && REGNO (SET_DEST (set)) > 31)
1146 {
1147 /* Number of instructions in the problematic window. */
1148 const int n_insns = 4;
1149 /* The problematic combination is with the sibling FP register. */
1150 const unsigned int x = REGNO (SET_DEST (set));
1151 const unsigned int y = x ^ 1;
1152 rtx_insn *after;
1153 int i;
1154
1155 next = next_active_insn (insn);
1156 if (!next)
1157 break;
1158 /* If the insn is a branch, then it cannot be problematic. */
1159 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1160 continue;
1161
1162 /* Look for a second load/operation into the sibling FP register. */
1163 if (!((set = single_set (next)) != NULL_RTX
1164 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1165 && REG_P (SET_DEST (set))
1166 && REGNO (SET_DEST (set)) == y))
1167 continue;
1168
1169 /* Look for a (possible) store from the FP register in the next N
1170 instructions, but bail out if it is again modified or if there
1171 is a store from the sibling FP register before this store. */
1172 for (after = next, i = 0; i < n_insns; i++)
1173 {
1174 bool branch_p;
1175
1176 after = next_active_insn (after);
1177 if (!after)
1178 break;
1179
1180 /* This is a branch with an empty delay slot. */
1181 if (!NONJUMP_INSN_P (after))
1182 {
1183 if (++i == n_insns)
1184 break;
1185 branch_p = true;
1186 after = NULL;
1187 }
1188 /* This is a branch with a filled delay slot. */
1189 else if (rtx_sequence *seq =
1190 dyn_cast <rtx_sequence *> (PATTERN (after)))
1191 {
1192 if (++i == n_insns)
1193 break;
1194 branch_p = true;
1195 after = seq->insn (1);
1196 }
1197 /* This is a regular instruction. */
1198 else
1199 branch_p = false;
1200
1201 if (after && (set = single_set (after)) != NULL_RTX)
1202 {
1203 const rtx src = SET_SRC (set);
1204 const rtx dest = SET_DEST (set);
1205 const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
1206
1207 /* If the FP register is again modified before the store,
1208 then the store isn't affected. */
1209 if (REG_P (dest)
1210 && (REGNO (dest) == x
1211 || (REGNO (dest) == y && size == 8)))
1212 break;
1213
1214 if (MEM_P (dest) && REG_P (src))
1215 {
1216 /* If there is a store from the sibling FP register
1217 before the store, then the store is not affected. */
1218 if (REGNO (src) == y || (REGNO (src) == x && size == 8))
1219 break;
1220
1221 /* Otherwise, the store is affected. */
1222 if (REGNO (src) == x && size == 4)
1223 {
1224 insert_nop = true;
1225 break;
1226 }
1227 }
1228 }
1229
1230 /* If we have a branch in the first M instructions, then we
1231 cannot see the (M+2)th instruction so we play safe. */
1232 if (branch_p && i <= (n_insns - 2))
1233 {
1234 insert_nop = true;
1235 break;
1236 }
1237 }
1238 }
1239
1240 else
1241 next = NEXT_INSN (insn);
1242
1243 if (insert_nop)
1244 emit_insn_before (gen_nop (), next);
1245 }
1246
1247 return 0;
1248 }
1249
1250 namespace {
1251
1252 const pass_data pass_data_work_around_errata =
1253 {
1254 RTL_PASS, /* type */
1255 "errata", /* name */
1256 OPTGROUP_NONE, /* optinfo_flags */
1257 TV_MACH_DEP, /* tv_id */
1258 0, /* properties_required */
1259 0, /* properties_provided */
1260 0, /* properties_destroyed */
1261 0, /* todo_flags_start */
1262 0, /* todo_flags_finish */
1263 };
1264
1265 class pass_work_around_errata : public rtl_opt_pass
1266 {
1267 public:
1268 pass_work_around_errata(gcc::context *ctxt)
1269 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1270 {}
1271
1272 /* opt_pass methods: */
1273 virtual bool gate (function *)
1274 {
1275 return sparc_fix_at697f || sparc_fix_ut699 || sparc_fix_b2bst;
1276 }
1277
1278 virtual unsigned int execute (function *)
1279 {
1280 return sparc_do_work_around_errata ();
1281 }
1282
1283 }; // class pass_work_around_errata
1284
1285 } // anon namespace
1286
1287 rtl_opt_pass *
1288 make_pass_work_around_errata (gcc::context *ctxt)
1289 {
1290 return new pass_work_around_errata (ctxt);
1291 }
1292
1293 /* Helpers for TARGET_DEBUG_OPTIONS. */
1294 static void
1295 dump_target_flag_bits (const int flags)
1296 {
1297 if (flags & MASK_64BIT)
1298 fprintf (stderr, "64BIT ");
1299 if (flags & MASK_APP_REGS)
1300 fprintf (stderr, "APP_REGS ");
1301 if (flags & MASK_FASTER_STRUCTS)
1302 fprintf (stderr, "FASTER_STRUCTS ");
1303 if (flags & MASK_FLAT)
1304 fprintf (stderr, "FLAT ");
1305 if (flags & MASK_FMAF)
1306 fprintf (stderr, "FMAF ");
1307 if (flags & MASK_FPU)
1308 fprintf (stderr, "FPU ");
1309 if (flags & MASK_HARD_QUAD)
1310 fprintf (stderr, "HARD_QUAD ");
1311 if (flags & MASK_POPC)
1312 fprintf (stderr, "POPC ");
1313 if (flags & MASK_PTR64)
1314 fprintf (stderr, "PTR64 ");
1315 if (flags & MASK_STACK_BIAS)
1316 fprintf (stderr, "STACK_BIAS ");
1317 if (flags & MASK_UNALIGNED_DOUBLES)
1318 fprintf (stderr, "UNALIGNED_DOUBLES ");
1319 if (flags & MASK_V8PLUS)
1320 fprintf (stderr, "V8PLUS ");
1321 if (flags & MASK_VIS)
1322 fprintf (stderr, "VIS ");
1323 if (flags & MASK_VIS2)
1324 fprintf (stderr, "VIS2 ");
1325 if (flags & MASK_VIS3)
1326 fprintf (stderr, "VIS3 ");
1327 if (flags & MASK_VIS4)
1328 fprintf (stderr, "VIS4 ");
1329 if (flags & MASK_VIS4B)
1330 fprintf (stderr, "VIS4B ");
1331 if (flags & MASK_CBCOND)
1332 fprintf (stderr, "CBCOND ");
1333 if (flags & MASK_DEPRECATED_V8_INSNS)
1334 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1335 if (flags & MASK_SPARCLET)
1336 fprintf (stderr, "SPARCLET ");
1337 if (flags & MASK_SPARCLITE)
1338 fprintf (stderr, "SPARCLITE ");
1339 if (flags & MASK_V8)
1340 fprintf (stderr, "V8 ");
1341 if (flags & MASK_V9)
1342 fprintf (stderr, "V9 ");
1343 }
1344
1345 static void
1346 dump_target_flags (const char *prefix, const int flags)
1347 {
1348 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1349 dump_target_flag_bits (flags);
1350 fprintf(stderr, "]\n");
1351 }
1352
1353 /* Validate and override various options, and do some machine dependent
1354 initialization. */
1355
1356 static void
1357 sparc_option_override (void)
1358 {
1359 static struct code_model {
1360 const char *const name;
1361 const enum cmodel value;
1362 } const cmodels[] = {
1363 { "32", CM_32 },
1364 { "medlow", CM_MEDLOW },
1365 { "medmid", CM_MEDMID },
1366 { "medany", CM_MEDANY },
1367 { "embmedany", CM_EMBMEDANY },
1368 { NULL, (enum cmodel) 0 }
1369 };
1370 const struct code_model *cmodel;
1371 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1372 static struct cpu_default {
1373 const int cpu;
1374 const enum processor_type processor;
1375 } const cpu_default[] = {
1376 /* There must be one entry here for each TARGET_CPU value. */
1377 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1378 { TARGET_CPU_v8, PROCESSOR_V8 },
1379 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1380 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1381 { TARGET_CPU_leon, PROCESSOR_LEON },
1382 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1383 { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
1384 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1385 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1386 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1387 { TARGET_CPU_v9, PROCESSOR_V9 },
1388 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1389 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1390 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1391 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1392 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1393 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1394 { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
1395 { TARGET_CPU_m8, PROCESSOR_M8 },
1396 { -1, PROCESSOR_V7 }
1397 };
1398 const struct cpu_default *def;
1399 /* Table of values for -m{cpu,tune}=. This must match the order of
1400 the enum processor_type in sparc-opts.h. */
1401 static struct cpu_table {
1402 const char *const name;
1403 const int disable;
1404 const int enable;
1405 } const cpu_table[] = {
1406 { "v7", MASK_ISA, 0 },
1407 { "cypress", MASK_ISA, 0 },
1408 { "v8", MASK_ISA, MASK_V8 },
1409 /* TI TMS390Z55 supersparc */
1410 { "supersparc", MASK_ISA, MASK_V8 },
1411 { "hypersparc", MASK_ISA, MASK_V8|MASK_FPU },
1412 { "leon", MASK_ISA, MASK_V8|MASK_LEON|MASK_FPU },
1413 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3|MASK_FPU },
1414 { "leon3v7", MASK_ISA, MASK_LEON3|MASK_FPU },
1415 { "sparclite", MASK_ISA, MASK_SPARCLITE },
1416 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1417 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1418 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1419 { "f934", MASK_ISA, MASK_SPARCLITE|MASK_FPU },
1420 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1421 { "sparclet", MASK_ISA, MASK_SPARCLET },
1422 /* TEMIC sparclet */
1423 { "tsc701", MASK_ISA, MASK_SPARCLET },
1424 { "v9", MASK_ISA, MASK_V9 },
1425 /* UltraSPARC I, II, IIi */
1426 { "ultrasparc", MASK_ISA,
1427 /* Although insns using %y are deprecated, it is a clear win. */
1428 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1429 /* UltraSPARC III */
1430 /* ??? Check if %y issue still holds true. */
1431 { "ultrasparc3", MASK_ISA,
1432 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1433 /* UltraSPARC T1 */
1434 { "niagara", MASK_ISA,
1435 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1436 /* UltraSPARC T2 */
1437 { "niagara2", MASK_ISA,
1438 MASK_V9|MASK_POPC|MASK_VIS2 },
1439 /* UltraSPARC T3 */
1440 { "niagara3", MASK_ISA,
1441 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
1442 /* UltraSPARC T4 */
1443 { "niagara4", MASK_ISA,
1444 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1445 /* UltraSPARC M7 */
1446 { "niagara7", MASK_ISA,
1447 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
1448 /* UltraSPARC M8 */
1449 { "m8", MASK_ISA,
1450 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC
1451 |MASK_VIS4B }
1452 };
1453 const struct cpu_table *cpu;
1454 unsigned int i;
1455
1456 if (sparc_debug_string != NULL)
1457 {
1458 const char *q;
1459 char *p;
1460
1461 p = ASTRDUP (sparc_debug_string);
1462 while ((q = strtok (p, ",")) != NULL)
1463 {
1464 bool invert;
1465 int mask;
1466
1467 p = NULL;
1468 if (*q == '!')
1469 {
1470 invert = true;
1471 q++;
1472 }
1473 else
1474 invert = false;
1475
1476 if (! strcmp (q, "all"))
1477 mask = MASK_DEBUG_ALL;
1478 else if (! strcmp (q, "options"))
1479 mask = MASK_DEBUG_OPTIONS;
1480 else
1481 error ("unknown -mdebug-%s switch", q);
1482
1483 if (invert)
1484 sparc_debug &= ~mask;
1485 else
1486 sparc_debug |= mask;
1487 }
1488 }
1489
1490 if (TARGET_DEBUG_OPTIONS)
1491 {
1492 dump_target_flags("Initial target_flags", target_flags);
1493 dump_target_flags("target_flags_explicit", target_flags_explicit);
1494 }
1495
1496 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1497 SUBTARGET_OVERRIDE_OPTIONS;
1498 #endif
1499
1500 #ifndef SPARC_BI_ARCH
1501 /* Check for unsupported architecture size. */
1502 if (!TARGET_64BIT != DEFAULT_ARCH32_P)
1503 error ("%s is not supported by this configuration",
1504 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1505 #endif
1506
1507 /* We force all 64bit archs to use 128 bit long double */
1508 if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
1509 {
1510 error ("-mlong-double-64 not allowed with -m64");
1511 target_flags |= MASK_LONG_DOUBLE_128;
1512 }
1513
1514 /* Code model selection. */
1515 sparc_cmodel = SPARC_DEFAULT_CMODEL;
1516
1517 #ifdef SPARC_BI_ARCH
1518 if (TARGET_ARCH32)
1519 sparc_cmodel = CM_32;
1520 #endif
1521
1522 if (sparc_cmodel_string != NULL)
1523 {
1524 if (TARGET_ARCH64)
1525 {
1526 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1527 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1528 break;
1529 if (cmodel->name == NULL)
1530 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1531 else
1532 sparc_cmodel = cmodel->value;
1533 }
1534 else
1535 error ("-mcmodel= is not supported on 32 bit systems");
1536 }
1537
1538 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1539 for (i = 8; i < 16; i++)
1540 if (!call_used_regs [i])
1541 {
1542 error ("-fcall-saved-REG is not supported for out registers");
1543 call_used_regs [i] = 1;
1544 }
1545
1546 /* Set the default CPU. */
1547 if (!global_options_set.x_sparc_cpu_and_features)
1548 {
1549 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1550 if (def->cpu == TARGET_CPU_DEFAULT)
1551 break;
1552 gcc_assert (def->cpu != -1);
1553 sparc_cpu_and_features = def->processor;
1554 }
1555
1556 if (!global_options_set.x_sparc_cpu)
1557 sparc_cpu = sparc_cpu_and_features;
1558
1559 cpu = &cpu_table[(int) sparc_cpu_and_features];
1560
1561 if (TARGET_DEBUG_OPTIONS)
1562 {
1563 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1564 fprintf (stderr, "sparc_cpu: %s\n",
1565 cpu_table[(int) sparc_cpu].name);
1566 dump_target_flags ("cpu->disable", cpu->disable);
1567 dump_target_flags ("cpu->enable", cpu->enable);
1568 }
1569
1570 target_flags &= ~cpu->disable;
1571 target_flags |= (cpu->enable
1572 #ifndef HAVE_AS_FMAF_HPC_VIS3
1573 & ~(MASK_FMAF | MASK_VIS3)
1574 #endif
1575 #ifndef HAVE_AS_SPARC4
1576 & ~MASK_CBCOND
1577 #endif
1578 #ifndef HAVE_AS_SPARC5_VIS4
1579 & ~(MASK_VIS4 | MASK_SUBXC)
1580 #endif
1581 #ifndef HAVE_AS_SPARC6
1582 & ~(MASK_VIS4B)
1583 #endif
1584 #ifndef HAVE_AS_LEON
1585 & ~(MASK_LEON | MASK_LEON3)
1586 #endif
1587 & ~(target_flags_explicit & MASK_FEATURES)
1588 );
1589
1590 /* -mvis2 implies -mvis. */
1591 if (TARGET_VIS2)
1592 target_flags |= MASK_VIS;
1593
1594 /* -mvis3 implies -mvis2 and -mvis. */
1595 if (TARGET_VIS3)
1596 target_flags |= MASK_VIS2 | MASK_VIS;
1597
1598 /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
1599 if (TARGET_VIS4)
1600 target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1601
1602 /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
1603 if (TARGET_VIS4B)
1604 target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1605
1606 /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b and -mfmaf if
1607 FPU is disabled. */
1608 if (! TARGET_FPU)
1609 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
1610 | MASK_VIS4B | MASK_FMAF);
1611
1612 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1613 are available; -m64 also implies v9. */
1614 if (TARGET_VIS || TARGET_ARCH64)
1615 {
1616 target_flags |= MASK_V9;
1617 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1618 }
1619
1620 /* -mvis also implies -mv8plus on 32-bit. */
1621 if (TARGET_VIS && ! TARGET_ARCH64)
1622 target_flags |= MASK_V8PLUS;
1623
1624 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1625 if (TARGET_V9 && TARGET_ARCH32)
1626 target_flags |= MASK_DEPRECATED_V8_INSNS;
1627
1628 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1629 if (! TARGET_V9 || TARGET_ARCH64)
1630 target_flags &= ~MASK_V8PLUS;
1631
1632 /* Don't use stack biasing in 32 bit mode. */
1633 if (TARGET_ARCH32)
1634 target_flags &= ~MASK_STACK_BIAS;
1635
1636 /* Use LRA instead of reload, unless otherwise instructed. */
1637 if (!(target_flags_explicit & MASK_LRA))
1638 target_flags |= MASK_LRA;
1639
1640 /* Enable the back-to-back store errata workaround for LEON3FT. */
1641 if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
1642 sparc_fix_b2bst = 1;
1643
1644 /* Supply a default value for align_functions. */
1645 if (align_functions == 0)
1646 {
1647 if (sparc_cpu == PROCESSOR_ULTRASPARC
1648 || sparc_cpu == PROCESSOR_ULTRASPARC3
1649 || sparc_cpu == PROCESSOR_NIAGARA
1650 || sparc_cpu == PROCESSOR_NIAGARA2
1651 || sparc_cpu == PROCESSOR_NIAGARA3
1652 || sparc_cpu == PROCESSOR_NIAGARA4)
1653 align_functions = 32;
1654 else if (sparc_cpu == PROCESSOR_NIAGARA7
1655 || sparc_cpu == PROCESSOR_M8)
1656 align_functions = 64;
1657 }
1658
1659 /* Validate PCC_STRUCT_RETURN. */
1660 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1661 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1662
1663 /* Only use .uaxword when compiling for a 64-bit target. */
1664 if (!TARGET_ARCH64)
1665 targetm.asm_out.unaligned_op.di = NULL;
1666
1667 /* Do various machine dependent initializations. */
1668 sparc_init_modes ();
1669
1670 /* Set up function hooks. */
1671 init_machine_status = sparc_init_machine_status;
1672
1673 switch (sparc_cpu)
1674 {
1675 case PROCESSOR_V7:
1676 case PROCESSOR_CYPRESS:
1677 sparc_costs = &cypress_costs;
1678 break;
1679 case PROCESSOR_V8:
1680 case PROCESSOR_SPARCLITE:
1681 case PROCESSOR_SUPERSPARC:
1682 sparc_costs = &supersparc_costs;
1683 break;
1684 case PROCESSOR_F930:
1685 case PROCESSOR_F934:
1686 case PROCESSOR_HYPERSPARC:
1687 case PROCESSOR_SPARCLITE86X:
1688 sparc_costs = &hypersparc_costs;
1689 break;
1690 case PROCESSOR_LEON:
1691 sparc_costs = &leon_costs;
1692 break;
1693 case PROCESSOR_LEON3:
1694 case PROCESSOR_LEON3V7:
1695 sparc_costs = &leon3_costs;
1696 break;
1697 case PROCESSOR_SPARCLET:
1698 case PROCESSOR_TSC701:
1699 sparc_costs = &sparclet_costs;
1700 break;
1701 case PROCESSOR_V9:
1702 case PROCESSOR_ULTRASPARC:
1703 sparc_costs = &ultrasparc_costs;
1704 break;
1705 case PROCESSOR_ULTRASPARC3:
1706 sparc_costs = &ultrasparc3_costs;
1707 break;
1708 case PROCESSOR_NIAGARA:
1709 sparc_costs = &niagara_costs;
1710 break;
1711 case PROCESSOR_NIAGARA2:
1712 sparc_costs = &niagara2_costs;
1713 break;
1714 case PROCESSOR_NIAGARA3:
1715 sparc_costs = &niagara3_costs;
1716 break;
1717 case PROCESSOR_NIAGARA4:
1718 sparc_costs = &niagara4_costs;
1719 break;
1720 case PROCESSOR_NIAGARA7:
1721 sparc_costs = &niagara7_costs;
1722 break;
1723 case PROCESSOR_M8:
1724 sparc_costs = &m8_costs;
1725 break;
1726 case PROCESSOR_NATIVE:
1727 gcc_unreachable ();
1728 };
1729
1730 if (sparc_memory_model == SMM_DEFAULT)
1731 {
1732 /* Choose the memory model for the operating system. */
1733 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1734 if (os_default != SMM_DEFAULT)
1735 sparc_memory_model = os_default;
1736 /* Choose the most relaxed model for the processor. */
1737 else if (TARGET_V9)
1738 sparc_memory_model = SMM_RMO;
1739 else if (TARGET_LEON3)
1740 sparc_memory_model = SMM_TSO;
1741 else if (TARGET_LEON)
1742 sparc_memory_model = SMM_SC;
1743 else if (TARGET_V8)
1744 sparc_memory_model = SMM_PSO;
1745 else
1746 sparc_memory_model = SMM_SC;
1747 }
1748
1749 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1750 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1751 target_flags |= MASK_LONG_DOUBLE_128;
1752 #endif
1753
1754 if (TARGET_DEBUG_OPTIONS)
1755 dump_target_flags ("Final target_flags", target_flags);
1756
1757 /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
1758 can run at the same time. More important, it is the threshold
1759 defining when additional prefetches will be dropped by the
1760 hardware.
1761
1762 The UltraSPARC-III features a documented prefetch queue with a
1763 size of 8. Additional prefetches issued in the cpu are
1764 dropped.
1765
1766 Niagara processors are different. In these processors prefetches
1767 are handled much like regular loads. The L1 miss buffer is 32
1768 entries, but prefetches start getting affected when 30 entries
1769 become occupied. That occupation could be a mix of regular loads
1770 and prefetches though. And that buffer is shared by all threads.
1771 Once the threshold is reached, if the core is running a single
1772 thread the prefetch will retry. If more than one thread is
1773 running, the prefetch will be dropped.
1774
1775 All this makes it very difficult to determine how many
1776 simultaneous prefetches can be issued simultaneously, even in a
1777 single-threaded program. Experimental results show that setting
1778 this parameter to 32 works well when the number of threads is not
1779 high. */
1780 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
1781 ((sparc_cpu == PROCESSOR_ULTRASPARC
1782 || sparc_cpu == PROCESSOR_NIAGARA
1783 || sparc_cpu == PROCESSOR_NIAGARA2
1784 || sparc_cpu == PROCESSOR_NIAGARA3
1785 || sparc_cpu == PROCESSOR_NIAGARA4)
1786 ? 2
1787 : (sparc_cpu == PROCESSOR_ULTRASPARC3
1788 ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
1789 || sparc_cpu == PROCESSOR_M8)
1790 ? 32 : 3))),
1791 global_options.x_param_values,
1792 global_options_set.x_param_values);
1793
1794 /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
1795 bytes.
1796
1797 The Oracle SPARC Architecture (previously the UltraSPARC
1798 Architecture) specification states that when a PREFETCH[A]
1799 instruction is executed an implementation-specific amount of data
1800 is prefetched, and that it is at least 64 bytes long (aligned to
1801 at least 64 bytes).
1802
1803 However, this is not correct. The M7 (and implementations prior
1804 to that) does not guarantee a 64B prefetch into a cache if the
1805 line size is smaller. A single cache line is all that is ever
1806 prefetched. So for the M7, where the L1D$ has 32B lines and the
1807 L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
1808 L2 and L3, but only 32B are brought into the L1D$. (Assuming it
1809 is a read_n prefetch, which is the only type which allocates to
1810 the L1.) */
1811 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
1812 (sparc_cpu == PROCESSOR_M8
1813 ? 64 : 32),
1814 global_options.x_param_values,
1815 global_options_set.x_param_values);
1816
1817 /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
1818 Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
1819 Niagara processors feature a L1D$ of 16KB. */
1820 maybe_set_param_value (PARAM_L1_CACHE_SIZE,
1821 ((sparc_cpu == PROCESSOR_ULTRASPARC
1822 || sparc_cpu == PROCESSOR_ULTRASPARC3
1823 || sparc_cpu == PROCESSOR_NIAGARA
1824 || sparc_cpu == PROCESSOR_NIAGARA2
1825 || sparc_cpu == PROCESSOR_NIAGARA3
1826 || sparc_cpu == PROCESSOR_NIAGARA4
1827 || sparc_cpu == PROCESSOR_NIAGARA7
1828 || sparc_cpu == PROCESSOR_M8)
1829 ? 16 : 64),
1830 global_options.x_param_values,
1831 global_options_set.x_param_values);
1832
1833
1834 /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
1835 that 512 is the default in params.def. */
1836 maybe_set_param_value (PARAM_L2_CACHE_SIZE,
1837 ((sparc_cpu == PROCESSOR_NIAGARA4
1838 || sparc_cpu == PROCESSOR_M8)
1839 ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
1840 ? 256 : 512)),
1841 global_options.x_param_values,
1842 global_options_set.x_param_values);
1843
1844
1845 /* Disable save slot sharing for call-clobbered registers by default.
1846 The IRA sharing algorithm works on single registers only and this
1847 pessimizes for double floating-point registers. */
1848 if (!global_options_set.x_flag_ira_share_save_slots)
1849 flag_ira_share_save_slots = 0;
1850
1851 /* Only enable REE by default in 64-bit mode where it helps to eliminate
1852 redundant 32-to-64-bit extensions. */
1853 if (!global_options_set.x_flag_ree && TARGET_ARCH32)
1854 flag_ree = 0;
1855 }
1856 \f
1857 /* Miscellaneous utilities. */
1858
1859 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1860 or branch on register contents instructions. */
1861
1862 int
1863 v9_regcmp_p (enum rtx_code code)
1864 {
1865 return (code == EQ || code == NE || code == GE || code == LT
1866 || code == LE || code == GT);
1867 }
1868
1869 /* Nonzero if OP is a floating point constant which can
1870 be loaded into an integer register using a single
1871 sethi instruction. */
1872
1873 int
1874 fp_sethi_p (rtx op)
1875 {
1876 if (GET_CODE (op) == CONST_DOUBLE)
1877 {
1878 long i;
1879
1880 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
1881 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1882 }
1883
1884 return 0;
1885 }
1886
1887 /* Nonzero if OP is a floating point constant which can
1888 be loaded into an integer register using a single
1889 mov instruction. */
1890
1891 int
1892 fp_mov_p (rtx op)
1893 {
1894 if (GET_CODE (op) == CONST_DOUBLE)
1895 {
1896 long i;
1897
1898 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
1899 return SPARC_SIMM13_P (i);
1900 }
1901
1902 return 0;
1903 }
1904
1905 /* Nonzero if OP is a floating point constant which can
1906 be loaded into an integer register using a high/losum
1907 instruction sequence. */
1908
1909 int
1910 fp_high_losum_p (rtx op)
1911 {
1912 /* The constraints calling this should only be in
1913 SFmode move insns, so any constant which cannot
1914 be moved using a single insn will do. */
1915 if (GET_CODE (op) == CONST_DOUBLE)
1916 {
1917 long i;
1918
1919 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
1920 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1921 }
1922
1923 return 0;
1924 }
1925
1926 /* Return true if the address of LABEL can be loaded by means of the
1927 mov{si,di}_pic_label_ref patterns in PIC mode. */
1928
1929 static bool
1930 can_use_mov_pic_label_ref (rtx label)
1931 {
1932 /* VxWorks does not impose a fixed gap between segments; the run-time
1933 gap can be different from the object-file gap. We therefore can't
1934 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1935 are absolutely sure that X is in the same segment as the GOT.
1936 Unfortunately, the flexibility of linker scripts means that we
1937 can't be sure of that in general, so assume that GOT-relative
1938 accesses are never valid on VxWorks. */
1939 if (TARGET_VXWORKS_RTP)
1940 return false;
1941
1942 /* Similarly, if the label is non-local, it might end up being placed
1943 in a different section than the current one; now mov_pic_label_ref
1944 requires the label and the code to be in the same section. */
1945 if (LABEL_REF_NONLOCAL_P (label))
1946 return false;
1947
1948 /* Finally, if we are reordering basic blocks and partition into hot
1949 and cold sections, this might happen for any label. */
1950 if (flag_reorder_blocks_and_partition)
1951 return false;
1952
1953 return true;
1954 }
1955
1956 /* Expand a move instruction. Return true if all work is done. */
1957
1958 bool
1959 sparc_expand_move (machine_mode mode, rtx *operands)
1960 {
1961 /* Handle sets of MEM first. */
1962 if (GET_CODE (operands[0]) == MEM)
1963 {
1964 /* 0 is a register (or a pair of registers) on SPARC. */
1965 if (register_or_zero_operand (operands[1], mode))
1966 return false;
1967
1968 if (!reload_in_progress)
1969 {
1970 operands[0] = validize_mem (operands[0]);
1971 operands[1] = force_reg (mode, operands[1]);
1972 }
1973 }
1974
1975 /* Fixup TLS cases. */
1976 if (TARGET_HAVE_TLS
1977 && CONSTANT_P (operands[1])
1978 && sparc_tls_referenced_p (operands [1]))
1979 {
1980 operands[1] = sparc_legitimize_tls_address (operands[1]);
1981 return false;
1982 }
1983
1984 /* Fixup PIC cases. */
1985 if (flag_pic && CONSTANT_P (operands[1]))
1986 {
1987 if (pic_address_needs_scratch (operands[1]))
1988 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1989
1990 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1991 if (GET_CODE (operands[1]) == LABEL_REF
1992 && can_use_mov_pic_label_ref (operands[1]))
1993 {
1994 if (mode == SImode)
1995 {
1996 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1997 return true;
1998 }
1999
2000 if (mode == DImode)
2001 {
2002 gcc_assert (TARGET_ARCH64);
2003 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
2004 return true;
2005 }
2006 }
2007
2008 if (symbolic_operand (operands[1], mode))
2009 {
2010 operands[1]
2011 = sparc_legitimize_pic_address (operands[1],
2012 reload_in_progress
2013 ? operands[0] : NULL_RTX);
2014 return false;
2015 }
2016 }
2017
2018 /* If we are trying to toss an integer constant into FP registers,
2019 or loading a FP or vector constant, force it into memory. */
2020 if (CONSTANT_P (operands[1])
2021 && REG_P (operands[0])
2022 && (SPARC_FP_REG_P (REGNO (operands[0]))
2023 || SCALAR_FLOAT_MODE_P (mode)
2024 || VECTOR_MODE_P (mode)))
2025 {
2026 /* emit_group_store will send such bogosity to us when it is
2027 not storing directly into memory. So fix this up to avoid
2028 crashes in output_constant_pool. */
2029 if (operands [1] == const0_rtx)
2030 operands[1] = CONST0_RTX (mode);
2031
2032 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
2033 always other regs. */
2034 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
2035 && (const_zero_operand (operands[1], mode)
2036 || const_all_ones_operand (operands[1], mode)))
2037 return false;
2038
2039 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
2040 /* We are able to build any SF constant in integer registers
2041 with at most 2 instructions. */
2042 && (mode == SFmode
2043 /* And any DF constant in integer registers if needed. */
2044 || (mode == DFmode && !can_create_pseudo_p ())))
2045 return false;
2046
2047 operands[1] = force_const_mem (mode, operands[1]);
2048 if (!reload_in_progress)
2049 operands[1] = validize_mem (operands[1]);
2050 return false;
2051 }
2052
2053 /* Accept non-constants and valid constants unmodified. */
2054 if (!CONSTANT_P (operands[1])
2055 || GET_CODE (operands[1]) == HIGH
2056 || input_operand (operands[1], mode))
2057 return false;
2058
2059 switch (mode)
2060 {
2061 case QImode:
2062 /* All QImode constants require only one insn, so proceed. */
2063 break;
2064
2065 case HImode:
2066 case SImode:
2067 sparc_emit_set_const32 (operands[0], operands[1]);
2068 return true;
2069
2070 case DImode:
2071 /* input_operand should have filtered out 32-bit mode. */
2072 sparc_emit_set_const64 (operands[0], operands[1]);
2073 return true;
2074
2075 case TImode:
2076 {
2077 rtx high, low;
2078 /* TImode isn't available in 32-bit mode. */
2079 split_double (operands[1], &high, &low);
2080 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
2081 high));
2082 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
2083 low));
2084 }
2085 return true;
2086
2087 default:
2088 gcc_unreachable ();
2089 }
2090
2091 return false;
2092 }
2093
2094 /* Load OP1, a 32-bit constant, into OP0, a register.
2095 We know it can't be done in one insn when we get
2096 here, the move expander guarantees this. */
2097
2098 static void
2099 sparc_emit_set_const32 (rtx op0, rtx op1)
2100 {
2101 machine_mode mode = GET_MODE (op0);
2102 rtx temp = op0;
2103
2104 if (can_create_pseudo_p ())
2105 temp = gen_reg_rtx (mode);
2106
2107 if (GET_CODE (op1) == CONST_INT)
2108 {
2109 gcc_assert (!small_int_operand (op1, mode)
2110 && !const_high_operand (op1, mode));
2111
2112 /* Emit them as real moves instead of a HIGH/LO_SUM,
2113 this way CSE can see everything and reuse intermediate
2114 values if it wants. */
2115 emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
2116 & ~(HOST_WIDE_INT) 0x3ff)));
2117
2118 emit_insn (gen_rtx_SET (op0,
2119 gen_rtx_IOR (mode, temp,
2120 GEN_INT (INTVAL (op1) & 0x3ff))));
2121 }
2122 else
2123 {
2124 /* A symbol, emit in the traditional way. */
2125 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
2126 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
2127 }
2128 }
2129
2130 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
2131 If TEMP is nonzero, we are forbidden to use any other scratch
2132 registers. Otherwise, we are allowed to generate them as needed.
2133
2134 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
2135 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
2136
2137 void
2138 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
2139 {
2140 rtx temp1, temp2, temp3, temp4, temp5;
2141 rtx ti_temp = 0;
2142
2143 if (temp && GET_MODE (temp) == TImode)
2144 {
2145 ti_temp = temp;
2146 temp = gen_rtx_REG (DImode, REGNO (temp));
2147 }
2148
2149 /* SPARC-V9 code-model support. */
2150 switch (sparc_cmodel)
2151 {
2152 case CM_MEDLOW:
2153 /* The range spanned by all instructions in the object is less
2154 than 2^31 bytes (2GB) and the distance from any instruction
2155 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2156 than 2^31 bytes (2GB).
2157
2158 The executable must be in the low 4TB of the virtual address
2159 space.
2160
2161 sethi %hi(symbol), %temp1
2162 or %temp1, %lo(symbol), %reg */
2163 if (temp)
2164 temp1 = temp; /* op0 is allowed. */
2165 else
2166 temp1 = gen_reg_rtx (DImode);
2167
2168 emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
2169 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
2170 break;
2171
2172 case CM_MEDMID:
2173 /* The range spanned by all instructions in the object is less
2174 than 2^31 bytes (2GB) and the distance from any instruction
2175 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2176 than 2^31 bytes (2GB).
2177
2178 The executable must be in the low 16TB of the virtual address
2179 space.
2180
2181 sethi %h44(symbol), %temp1
2182 or %temp1, %m44(symbol), %temp2
2183 sllx %temp2, 12, %temp3
2184 or %temp3, %l44(symbol), %reg */
2185 if (temp)
2186 {
2187 temp1 = op0;
2188 temp2 = op0;
2189 temp3 = temp; /* op0 is allowed. */
2190 }
2191 else
2192 {
2193 temp1 = gen_reg_rtx (DImode);
2194 temp2 = gen_reg_rtx (DImode);
2195 temp3 = gen_reg_rtx (DImode);
2196 }
2197
2198 emit_insn (gen_seth44 (temp1, op1));
2199 emit_insn (gen_setm44 (temp2, temp1, op1));
2200 emit_insn (gen_rtx_SET (temp3,
2201 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
2202 emit_insn (gen_setl44 (op0, temp3, op1));
2203 break;
2204
2205 case CM_MEDANY:
2206 /* The range spanned by all instructions in the object is less
2207 than 2^31 bytes (2GB) and the distance from any instruction
2208 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2209 than 2^31 bytes (2GB).
2210
2211 The executable can be placed anywhere in the virtual address
2212 space.
2213
2214 sethi %hh(symbol), %temp1
2215 sethi %lm(symbol), %temp2
2216 or %temp1, %hm(symbol), %temp3
2217 sllx %temp3, 32, %temp4
2218 or %temp4, %temp2, %temp5
2219 or %temp5, %lo(symbol), %reg */
2220 if (temp)
2221 {
2222 /* It is possible that one of the registers we got for operands[2]
2223 might coincide with that of operands[0] (which is why we made
2224 it TImode). Pick the other one to use as our scratch. */
2225 if (rtx_equal_p (temp, op0))
2226 {
2227 gcc_assert (ti_temp);
2228 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2229 }
2230 temp1 = op0;
2231 temp2 = temp; /* op0 is _not_ allowed, see above. */
2232 temp3 = op0;
2233 temp4 = op0;
2234 temp5 = op0;
2235 }
2236 else
2237 {
2238 temp1 = gen_reg_rtx (DImode);
2239 temp2 = gen_reg_rtx (DImode);
2240 temp3 = gen_reg_rtx (DImode);
2241 temp4 = gen_reg_rtx (DImode);
2242 temp5 = gen_reg_rtx (DImode);
2243 }
2244
2245 emit_insn (gen_sethh (temp1, op1));
2246 emit_insn (gen_setlm (temp2, op1));
2247 emit_insn (gen_sethm (temp3, temp1, op1));
2248 emit_insn (gen_rtx_SET (temp4,
2249 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2250 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2251 emit_insn (gen_setlo (op0, temp5, op1));
2252 break;
2253
2254 case CM_EMBMEDANY:
2255 /* Old old old backwards compatibility kruft here.
2256 Essentially it is MEDLOW with a fixed 64-bit
2257 virtual base added to all data segment addresses.
2258 Text-segment stuff is computed like MEDANY, we can't
2259 reuse the code above because the relocation knobs
2260 look different.
2261
2262 Data segment: sethi %hi(symbol), %temp1
2263 add %temp1, EMBMEDANY_BASE_REG, %temp2
2264 or %temp2, %lo(symbol), %reg */
2265 if (data_segment_operand (op1, GET_MODE (op1)))
2266 {
2267 if (temp)
2268 {
2269 temp1 = temp; /* op0 is allowed. */
2270 temp2 = op0;
2271 }
2272 else
2273 {
2274 temp1 = gen_reg_rtx (DImode);
2275 temp2 = gen_reg_rtx (DImode);
2276 }
2277
2278 emit_insn (gen_embmedany_sethi (temp1, op1));
2279 emit_insn (gen_embmedany_brsum (temp2, temp1));
2280 emit_insn (gen_embmedany_losum (op0, temp2, op1));
2281 }
2282
2283 /* Text segment: sethi %uhi(symbol), %temp1
2284 sethi %hi(symbol), %temp2
2285 or %temp1, %ulo(symbol), %temp3
2286 sllx %temp3, 32, %temp4
2287 or %temp4, %temp2, %temp5
2288 or %temp5, %lo(symbol), %reg */
2289 else
2290 {
2291 if (temp)
2292 {
2293 /* It is possible that one of the registers we got for operands[2]
2294 might coincide with that of operands[0] (which is why we made
2295 it TImode). Pick the other one to use as our scratch. */
2296 if (rtx_equal_p (temp, op0))
2297 {
2298 gcc_assert (ti_temp);
2299 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2300 }
2301 temp1 = op0;
2302 temp2 = temp; /* op0 is _not_ allowed, see above. */
2303 temp3 = op0;
2304 temp4 = op0;
2305 temp5 = op0;
2306 }
2307 else
2308 {
2309 temp1 = gen_reg_rtx (DImode);
2310 temp2 = gen_reg_rtx (DImode);
2311 temp3 = gen_reg_rtx (DImode);
2312 temp4 = gen_reg_rtx (DImode);
2313 temp5 = gen_reg_rtx (DImode);
2314 }
2315
2316 emit_insn (gen_embmedany_textuhi (temp1, op1));
2317 emit_insn (gen_embmedany_texthi (temp2, op1));
2318 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
2319 emit_insn (gen_rtx_SET (temp4,
2320 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2321 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2322 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
2323 }
2324 break;
2325
2326 default:
2327 gcc_unreachable ();
2328 }
2329 }
2330
2331 /* These avoid problems when cross compiling. If we do not
2332 go through all this hair then the optimizer will see
2333 invalid REG_EQUAL notes or in some cases none at all. */
2334 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2335 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2336 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2337 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2338
2339 /* The optimizer is not to assume anything about exactly
2340 which bits are set for a HIGH, they are unspecified.
2341 Unfortunately this leads to many missed optimizations
2342 during CSE. We mask out the non-HIGH bits, and matches
2343 a plain movdi, to alleviate this problem. */
2344 static rtx
2345 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2346 {
2347 return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2348 }
2349
2350 static rtx
2351 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2352 {
2353 return gen_rtx_SET (dest, GEN_INT (val));
2354 }
2355
2356 static rtx
2357 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2358 {
2359 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2360 }
2361
2362 static rtx
2363 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2364 {
2365 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2366 }
2367
2368 /* Worker routines for 64-bit constant formation on arch64.
2369 One of the key things to be doing in these emissions is
2370 to create as many temp REGs as possible. This makes it
2371 possible for half-built constants to be used later when
2372 such values are similar to something required later on.
2373 Without doing this, the optimizer cannot see such
2374 opportunities. */
2375
2376 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2377 unsigned HOST_WIDE_INT, int);
2378
2379 static void
2380 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2381 unsigned HOST_WIDE_INT low_bits, int is_neg)
2382 {
2383 unsigned HOST_WIDE_INT high_bits;
2384
2385 if (is_neg)
2386 high_bits = (~low_bits) & 0xffffffff;
2387 else
2388 high_bits = low_bits;
2389
2390 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2391 if (!is_neg)
2392 {
2393 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2394 }
2395 else
2396 {
2397 /* If we are XOR'ing with -1, then we should emit a one's complement
2398 instead. This way the combiner will notice logical operations
2399 such as ANDN later on and substitute. */
2400 if ((low_bits & 0x3ff) == 0x3ff)
2401 {
2402 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2403 }
2404 else
2405 {
2406 emit_insn (gen_rtx_SET (op0,
2407 gen_safe_XOR64 (temp,
2408 (-(HOST_WIDE_INT)0x400
2409 | (low_bits & 0x3ff)))));
2410 }
2411 }
2412 }
2413
2414 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2415 unsigned HOST_WIDE_INT, int);
2416
2417 static void
2418 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2419 unsigned HOST_WIDE_INT high_bits,
2420 unsigned HOST_WIDE_INT low_immediate,
2421 int shift_count)
2422 {
2423 rtx temp2 = op0;
2424
2425 if ((high_bits & 0xfffffc00) != 0)
2426 {
2427 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2428 if ((high_bits & ~0xfffffc00) != 0)
2429 emit_insn (gen_rtx_SET (op0,
2430 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2431 else
2432 temp2 = temp;
2433 }
2434 else
2435 {
2436 emit_insn (gen_safe_SET64 (temp, high_bits));
2437 temp2 = temp;
2438 }
2439
2440 /* Now shift it up into place. */
2441 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
2442 GEN_INT (shift_count))));
2443
2444 /* If there is a low immediate part piece, finish up by
2445 putting that in as well. */
2446 if (low_immediate != 0)
2447 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
2448 }
2449
2450 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2451 unsigned HOST_WIDE_INT);
2452
2453 /* Full 64-bit constant decomposition. Even though this is the
2454 'worst' case, we still optimize a few things away. */
2455 static void
2456 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2457 unsigned HOST_WIDE_INT high_bits,
2458 unsigned HOST_WIDE_INT low_bits)
2459 {
2460 rtx sub_temp = op0;
2461
2462 if (can_create_pseudo_p ())
2463 sub_temp = gen_reg_rtx (DImode);
2464
2465 if ((high_bits & 0xfffffc00) != 0)
2466 {
2467 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2468 if ((high_bits & ~0xfffffc00) != 0)
2469 emit_insn (gen_rtx_SET (sub_temp,
2470 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2471 else
2472 sub_temp = temp;
2473 }
2474 else
2475 {
2476 emit_insn (gen_safe_SET64 (temp, high_bits));
2477 sub_temp = temp;
2478 }
2479
2480 if (can_create_pseudo_p ())
2481 {
2482 rtx temp2 = gen_reg_rtx (DImode);
2483 rtx temp3 = gen_reg_rtx (DImode);
2484 rtx temp4 = gen_reg_rtx (DImode);
2485
2486 emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
2487 GEN_INT (32))));
2488
2489 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2490 if ((low_bits & ~0xfffffc00) != 0)
2491 {
2492 emit_insn (gen_rtx_SET (temp3,
2493 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2494 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
2495 }
2496 else
2497 {
2498 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
2499 }
2500 }
2501 else
2502 {
2503 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2504 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2505 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2506 int to_shift = 12;
2507
2508 /* We are in the middle of reload, so this is really
2509 painful. However we do still make an attempt to
2510 avoid emitting truly stupid code. */
2511 if (low1 != const0_rtx)
2512 {
2513 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2514 GEN_INT (to_shift))));
2515 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
2516 sub_temp = op0;
2517 to_shift = 12;
2518 }
2519 else
2520 {
2521 to_shift += 12;
2522 }
2523 if (low2 != const0_rtx)
2524 {
2525 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2526 GEN_INT (to_shift))));
2527 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
2528 sub_temp = op0;
2529 to_shift = 8;
2530 }
2531 else
2532 {
2533 to_shift += 8;
2534 }
2535 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2536 GEN_INT (to_shift))));
2537 if (low3 != const0_rtx)
2538 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
2539 /* phew... */
2540 }
2541 }
2542
2543 /* Analyze a 64-bit constant for certain properties. */
2544 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2545 unsigned HOST_WIDE_INT,
2546 int *, int *, int *);
2547
2548 static void
2549 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2550 unsigned HOST_WIDE_INT low_bits,
2551 int *hbsp, int *lbsp, int *abbasp)
2552 {
2553 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2554 int i;
2555
2556 lowest_bit_set = highest_bit_set = -1;
2557 i = 0;
2558 do
2559 {
2560 if ((lowest_bit_set == -1)
2561 && ((low_bits >> i) & 1))
2562 lowest_bit_set = i;
2563 if ((highest_bit_set == -1)
2564 && ((high_bits >> (32 - i - 1)) & 1))
2565 highest_bit_set = (64 - i - 1);
2566 }
2567 while (++i < 32
2568 && ((highest_bit_set == -1)
2569 || (lowest_bit_set == -1)));
2570 if (i == 32)
2571 {
2572 i = 0;
2573 do
2574 {
2575 if ((lowest_bit_set == -1)
2576 && ((high_bits >> i) & 1))
2577 lowest_bit_set = i + 32;
2578 if ((highest_bit_set == -1)
2579 && ((low_bits >> (32 - i - 1)) & 1))
2580 highest_bit_set = 32 - i - 1;
2581 }
2582 while (++i < 32
2583 && ((highest_bit_set == -1)
2584 || (lowest_bit_set == -1)));
2585 }
2586 /* If there are no bits set this should have gone out
2587 as one instruction! */
2588 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2589 all_bits_between_are_set = 1;
2590 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2591 {
2592 if (i < 32)
2593 {
2594 if ((low_bits & (1 << i)) != 0)
2595 continue;
2596 }
2597 else
2598 {
2599 if ((high_bits & (1 << (i - 32))) != 0)
2600 continue;
2601 }
2602 all_bits_between_are_set = 0;
2603 break;
2604 }
2605 *hbsp = highest_bit_set;
2606 *lbsp = lowest_bit_set;
2607 *abbasp = all_bits_between_are_set;
2608 }
2609
2610 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2611
2612 static int
2613 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2614 unsigned HOST_WIDE_INT low_bits)
2615 {
2616 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2617
2618 if (high_bits == 0
2619 || high_bits == 0xffffffff)
2620 return 1;
2621
2622 analyze_64bit_constant (high_bits, low_bits,
2623 &highest_bit_set, &lowest_bit_set,
2624 &all_bits_between_are_set);
2625
2626 if ((highest_bit_set == 63
2627 || lowest_bit_set == 0)
2628 && all_bits_between_are_set != 0)
2629 return 1;
2630
2631 if ((highest_bit_set - lowest_bit_set) < 21)
2632 return 1;
2633
2634 return 0;
2635 }
2636
2637 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2638 unsigned HOST_WIDE_INT,
2639 int, int);
2640
2641 static unsigned HOST_WIDE_INT
2642 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2643 unsigned HOST_WIDE_INT low_bits,
2644 int lowest_bit_set, int shift)
2645 {
2646 HOST_WIDE_INT hi, lo;
2647
2648 if (lowest_bit_set < 32)
2649 {
2650 lo = (low_bits >> lowest_bit_set) << shift;
2651 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2652 }
2653 else
2654 {
2655 lo = 0;
2656 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2657 }
2658 gcc_assert (! (hi & lo));
2659 return (hi | lo);
2660 }
2661
2662 /* Here we are sure to be arch64 and this is an integer constant
2663 being loaded into a register. Emit the most efficient
2664 insn sequence possible. Detection of all the 1-insn cases
2665 has been done already. */
2666 static void
2667 sparc_emit_set_const64 (rtx op0, rtx op1)
2668 {
2669 unsigned HOST_WIDE_INT high_bits, low_bits;
2670 int lowest_bit_set, highest_bit_set;
2671 int all_bits_between_are_set;
2672 rtx temp = 0;
2673
2674 /* Sanity check that we know what we are working with. */
2675 gcc_assert (TARGET_ARCH64
2676 && (GET_CODE (op0) == SUBREG
2677 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2678
2679 if (! can_create_pseudo_p ())
2680 temp = op0;
2681
2682 if (GET_CODE (op1) != CONST_INT)
2683 {
2684 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2685 return;
2686 }
2687
2688 if (! temp)
2689 temp = gen_reg_rtx (DImode);
2690
2691 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2692 low_bits = (INTVAL (op1) & 0xffffffff);
2693
2694 /* low_bits bits 0 --> 31
2695 high_bits bits 32 --> 63 */
2696
2697 analyze_64bit_constant (high_bits, low_bits,
2698 &highest_bit_set, &lowest_bit_set,
2699 &all_bits_between_are_set);
2700
2701 /* First try for a 2-insn sequence. */
2702
2703 /* These situations are preferred because the optimizer can
2704 * do more things with them:
2705 * 1) mov -1, %reg
2706 * sllx %reg, shift, %reg
2707 * 2) mov -1, %reg
2708 * srlx %reg, shift, %reg
2709 * 3) mov some_small_const, %reg
2710 * sllx %reg, shift, %reg
2711 */
2712 if (((highest_bit_set == 63
2713 || lowest_bit_set == 0)
2714 && all_bits_between_are_set != 0)
2715 || ((highest_bit_set - lowest_bit_set) < 12))
2716 {
2717 HOST_WIDE_INT the_const = -1;
2718 int shift = lowest_bit_set;
2719
2720 if ((highest_bit_set != 63
2721 && lowest_bit_set != 0)
2722 || all_bits_between_are_set == 0)
2723 {
2724 the_const =
2725 create_simple_focus_bits (high_bits, low_bits,
2726 lowest_bit_set, 0);
2727 }
2728 else if (lowest_bit_set == 0)
2729 shift = -(63 - highest_bit_set);
2730
2731 gcc_assert (SPARC_SIMM13_P (the_const));
2732 gcc_assert (shift != 0);
2733
2734 emit_insn (gen_safe_SET64 (temp, the_const));
2735 if (shift > 0)
2736 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
2737 GEN_INT (shift))));
2738 else if (shift < 0)
2739 emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
2740 GEN_INT (-shift))));
2741 return;
2742 }
2743
2744 /* Now a range of 22 or less bits set somewhere.
2745 * 1) sethi %hi(focus_bits), %reg
2746 * sllx %reg, shift, %reg
2747 * 2) sethi %hi(focus_bits), %reg
2748 * srlx %reg, shift, %reg
2749 */
2750 if ((highest_bit_set - lowest_bit_set) < 21)
2751 {
2752 unsigned HOST_WIDE_INT focus_bits =
2753 create_simple_focus_bits (high_bits, low_bits,
2754 lowest_bit_set, 10);
2755
2756 gcc_assert (SPARC_SETHI_P (focus_bits));
2757 gcc_assert (lowest_bit_set != 10);
2758
2759 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
2760
2761 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2762 if (lowest_bit_set < 10)
2763 emit_insn (gen_rtx_SET (op0,
2764 gen_rtx_LSHIFTRT (DImode, temp,
2765 GEN_INT (10 - lowest_bit_set))));
2766 else if (lowest_bit_set > 10)
2767 emit_insn (gen_rtx_SET (op0,
2768 gen_rtx_ASHIFT (DImode, temp,
2769 GEN_INT (lowest_bit_set - 10))));
2770 return;
2771 }
2772
2773 /* 1) sethi %hi(low_bits), %reg
2774 * or %reg, %lo(low_bits), %reg
2775 * 2) sethi %hi(~low_bits), %reg
2776 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2777 */
2778 if (high_bits == 0
2779 || high_bits == 0xffffffff)
2780 {
2781 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2782 (high_bits == 0xffffffff));
2783 return;
2784 }
2785
2786 /* Now, try 3-insn sequences. */
2787
2788 /* 1) sethi %hi(high_bits), %reg
2789 * or %reg, %lo(high_bits), %reg
2790 * sllx %reg, 32, %reg
2791 */
2792 if (low_bits == 0)
2793 {
2794 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2795 return;
2796 }
2797
2798 /* We may be able to do something quick
2799 when the constant is negated, so try that. */
2800 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2801 (~low_bits) & 0xfffffc00))
2802 {
2803 /* NOTE: The trailing bits get XOR'd so we need the
2804 non-negated bits, not the negated ones. */
2805 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2806
2807 if ((((~high_bits) & 0xffffffff) == 0
2808 && ((~low_bits) & 0x80000000) == 0)
2809 || (((~high_bits) & 0xffffffff) == 0xffffffff
2810 && ((~low_bits) & 0x80000000) != 0))
2811 {
2812 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
2813
2814 if ((SPARC_SETHI_P (fast_int)
2815 && (~high_bits & 0xffffffff) == 0)
2816 || SPARC_SIMM13_P (fast_int))
2817 emit_insn (gen_safe_SET64 (temp, fast_int));
2818 else
2819 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2820 }
2821 else
2822 {
2823 rtx negated_const;
2824 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2825 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2826 sparc_emit_set_const64 (temp, negated_const);
2827 }
2828
2829 /* If we are XOR'ing with -1, then we should emit a one's complement
2830 instead. This way the combiner will notice logical operations
2831 such as ANDN later on and substitute. */
2832 if (trailing_bits == 0x3ff)
2833 {
2834 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2835 }
2836 else
2837 {
2838 emit_insn (gen_rtx_SET (op0,
2839 gen_safe_XOR64 (temp,
2840 (-0x400 | trailing_bits))));
2841 }
2842 return;
2843 }
2844
2845 /* 1) sethi %hi(xxx), %reg
2846 * or %reg, %lo(xxx), %reg
2847 * sllx %reg, yyy, %reg
2848 *
2849 * ??? This is just a generalized version of the low_bits==0
2850 * thing above, FIXME...
2851 */
2852 if ((highest_bit_set - lowest_bit_set) < 32)
2853 {
2854 unsigned HOST_WIDE_INT focus_bits =
2855 create_simple_focus_bits (high_bits, low_bits,
2856 lowest_bit_set, 0);
2857
2858 /* We can't get here in this state. */
2859 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2860
2861 /* So what we know is that the set bits straddle the
2862 middle of the 64-bit word. */
2863 sparc_emit_set_const64_quick2 (op0, temp,
2864 focus_bits, 0,
2865 lowest_bit_set);
2866 return;
2867 }
2868
2869 /* 1) sethi %hi(high_bits), %reg
2870 * or %reg, %lo(high_bits), %reg
2871 * sllx %reg, 32, %reg
2872 * or %reg, low_bits, %reg
2873 */
2874 if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
2875 {
2876 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2877 return;
2878 }
2879
2880 /* The easiest way when all else fails, is full decomposition. */
2881 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2882 }
2883
2884 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
2885
2886 static bool
2887 sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2888 {
2889 *p1 = SPARC_ICC_REG;
2890 *p2 = SPARC_FCC_REG;
2891 return true;
2892 }
2893
2894 /* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
2895
2896 static unsigned int
2897 sparc_min_arithmetic_precision (void)
2898 {
2899 return 32;
2900 }
2901
2902 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2903 return the mode to be used for the comparison. For floating-point,
2904 CCFP[E]mode is used. CCNZmode should be used when the first operand
2905 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2906 processing is needed. */
2907
2908 machine_mode
2909 select_cc_mode (enum rtx_code op, rtx x, rtx y)
2910 {
2911 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2912 {
2913 switch (op)
2914 {
2915 case EQ:
2916 case NE:
2917 case UNORDERED:
2918 case ORDERED:
2919 case UNLT:
2920 case UNLE:
2921 case UNGT:
2922 case UNGE:
2923 case UNEQ:
2924 case LTGT:
2925 return CCFPmode;
2926
2927 case LT:
2928 case LE:
2929 case GT:
2930 case GE:
2931 return CCFPEmode;
2932
2933 default:
2934 gcc_unreachable ();
2935 }
2936 }
2937 else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2938 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2939 && y == const0_rtx)
2940 {
2941 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2942 return CCXNZmode;
2943 else
2944 return CCNZmode;
2945 }
2946 else
2947 {
2948 /* This is for the cmp<mode>_sne pattern. */
2949 if (GET_CODE (x) == NOT && y == constm1_rtx)
2950 {
2951 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2952 return CCXCmode;
2953 else
2954 return CCCmode;
2955 }
2956
2957 /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
2958 if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
2959 {
2960 if (GET_CODE (y) == UNSPEC
2961 && (XINT (y, 1) == UNSPEC_ADDV
2962 || XINT (y, 1) == UNSPEC_SUBV
2963 || XINT (y, 1) == UNSPEC_NEGV))
2964 return CCVmode;
2965 else
2966 return CCCmode;
2967 }
2968
2969 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2970 return CCXmode;
2971 else
2972 return CCmode;
2973 }
2974 }
2975
2976 /* Emit the compare insn and return the CC reg for a CODE comparison
2977 with operands X and Y. */
2978
2979 static rtx
2980 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2981 {
2982 machine_mode mode;
2983 rtx cc_reg;
2984
2985 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2986 return x;
2987
2988 mode = SELECT_CC_MODE (code, x, y);
2989
2990 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2991 fcc regs (cse can't tell they're really call clobbered regs and will
2992 remove a duplicate comparison even if there is an intervening function
2993 call - it will then try to reload the cc reg via an int reg which is why
2994 we need the movcc patterns). It is possible to provide the movcc
2995 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2996 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2997 to tell cse that CCFPE mode registers (even pseudos) are call
2998 clobbered. */
2999
3000 /* ??? This is an experiment. Rather than making changes to cse which may
3001 or may not be easy/clean, we do our own cse. This is possible because
3002 we will generate hard registers. Cse knows they're call clobbered (it
3003 doesn't know the same thing about pseudos). If we guess wrong, no big
3004 deal, but if we win, great! */
3005
3006 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3007 #if 1 /* experiment */
3008 {
3009 int reg;
3010 /* We cycle through the registers to ensure they're all exercised. */
3011 static int next_fcc_reg = 0;
3012 /* Previous x,y for each fcc reg. */
3013 static rtx prev_args[4][2];
3014
3015 /* Scan prev_args for x,y. */
3016 for (reg = 0; reg < 4; reg++)
3017 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
3018 break;
3019 if (reg == 4)
3020 {
3021 reg = next_fcc_reg;
3022 prev_args[reg][0] = x;
3023 prev_args[reg][1] = y;
3024 next_fcc_reg = (next_fcc_reg + 1) & 3;
3025 }
3026 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
3027 }
3028 #else
3029 cc_reg = gen_reg_rtx (mode);
3030 #endif /* ! experiment */
3031 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3032 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
3033 else
3034 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
3035
3036 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
3037 will only result in an unrecognizable insn so no point in asserting. */
3038 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
3039
3040 return cc_reg;
3041 }
3042
3043
3044 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
3045
3046 rtx
3047 gen_compare_reg (rtx cmp)
3048 {
3049 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
3050 }
3051
3052 /* This function is used for v9 only.
3053 DEST is the target of the Scc insn.
3054 CODE is the code for an Scc's comparison.
3055 X and Y are the values we compare.
3056
3057 This function is needed to turn
3058
3059 (set (reg:SI 110)
3060 (gt (reg:CCX 100 %icc)
3061 (const_int 0)))
3062 into
3063 (set (reg:SI 110)
3064 (gt:DI (reg:CCX 100 %icc)
3065 (const_int 0)))
3066
3067 IE: The instruction recognizer needs to see the mode of the comparison to
3068 find the right instruction. We could use "gt:DI" right in the
3069 define_expand, but leaving it out allows us to handle DI, SI, etc. */
3070
3071 static int
3072 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
3073 {
3074 if (! TARGET_ARCH64
3075 && (GET_MODE (x) == DImode
3076 || GET_MODE (dest) == DImode))
3077 return 0;
3078
3079 /* Try to use the movrCC insns. */
3080 if (TARGET_ARCH64
3081 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3082 && y == const0_rtx
3083 && v9_regcmp_p (compare_code))
3084 {
3085 rtx op0 = x;
3086 rtx temp;
3087
3088 /* Special case for op0 != 0. This can be done with one instruction if
3089 dest == x. */
3090
3091 if (compare_code == NE
3092 && GET_MODE (dest) == DImode
3093 && rtx_equal_p (op0, dest))
3094 {
3095 emit_insn (gen_rtx_SET (dest,
3096 gen_rtx_IF_THEN_ELSE (DImode,
3097 gen_rtx_fmt_ee (compare_code, DImode,
3098 op0, const0_rtx),
3099 const1_rtx,
3100 dest)));
3101 return 1;
3102 }
3103
3104 if (reg_overlap_mentioned_p (dest, op0))
3105 {
3106 /* Handle the case where dest == x.
3107 We "early clobber" the result. */
3108 op0 = gen_reg_rtx (GET_MODE (x));
3109 emit_move_insn (op0, x);
3110 }
3111
3112 emit_insn (gen_rtx_SET (dest, const0_rtx));
3113 if (GET_MODE (op0) != DImode)
3114 {
3115 temp = gen_reg_rtx (DImode);
3116 convert_move (temp, op0, 0);
3117 }
3118 else
3119 temp = op0;
3120 emit_insn (gen_rtx_SET (dest,
3121 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3122 gen_rtx_fmt_ee (compare_code, DImode,
3123 temp, const0_rtx),
3124 const1_rtx,
3125 dest)));
3126 return 1;
3127 }
3128 else
3129 {
3130 x = gen_compare_reg_1 (compare_code, x, y);
3131 y = const0_rtx;
3132
3133 emit_insn (gen_rtx_SET (dest, const0_rtx));
3134 emit_insn (gen_rtx_SET (dest,
3135 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3136 gen_rtx_fmt_ee (compare_code,
3137 GET_MODE (x), x, y),
3138 const1_rtx, dest)));
3139 return 1;
3140 }
3141 }
3142
3143
3144 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
3145 without jumps using the addx/subx instructions. */
3146
3147 bool
3148 emit_scc_insn (rtx operands[])
3149 {
3150 rtx tem, x, y;
3151 enum rtx_code code;
3152 machine_mode mode;
3153
3154 /* The quad-word fp compare library routines all return nonzero to indicate
3155 true, which is different from the equivalent libgcc routines, so we must
3156 handle them specially here. */
3157 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
3158 {
3159 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
3160 GET_CODE (operands[1]));
3161 operands[2] = XEXP (operands[1], 0);
3162 operands[3] = XEXP (operands[1], 1);
3163 }
3164
3165 code = GET_CODE (operands[1]);
3166 x = operands[2];
3167 y = operands[3];
3168 mode = GET_MODE (x);
3169
3170 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
3171 more applications). The exception to this is "reg != 0" which can
3172 be done in one instruction on v9 (so we do it). */
3173 if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
3174 {
3175 if (y != const0_rtx)
3176 x = force_reg (mode, gen_rtx_XOR (mode, x, y));
3177
3178 rtx pat = gen_rtx_SET (operands[0],
3179 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3180 x, const0_rtx));
3181
3182 /* If we can use addx/subx or addxc, add a clobber for CC. */
3183 if (mode == SImode || (code == NE && TARGET_VIS3))
3184 {
3185 rtx clobber
3186 = gen_rtx_CLOBBER (VOIDmode,
3187 gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
3188 SPARC_ICC_REG));
3189 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
3190 }
3191
3192 emit_insn (pat);
3193 return true;
3194 }
3195
3196 /* We can do LTU in DImode using the addxc instruction with VIS3. */
3197 if (TARGET_ARCH64
3198 && mode == DImode
3199 && !((code == LTU || code == GTU) && TARGET_VIS3)
3200 && gen_v9_scc (operands[0], code, x, y))
3201 return true;
3202
3203 /* We can do LTU and GEU using the addx/subx instructions too. And
3204 for GTU/LEU, if both operands are registers swap them and fall
3205 back to the easy case. */
3206 if (code == GTU || code == LEU)
3207 {
3208 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3209 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
3210 {
3211 tem = x;
3212 x = y;
3213 y = tem;
3214 code = swap_condition (code);
3215 }
3216 }
3217
3218 if (code == LTU || code == GEU)
3219 {
3220 emit_insn (gen_rtx_SET (operands[0],
3221 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3222 gen_compare_reg_1 (code, x, y),
3223 const0_rtx)));
3224 return true;
3225 }
3226
3227 /* All the posibilities to use addx/subx based sequences has been
3228 exhausted, try for a 3 instruction sequence using v9 conditional
3229 moves. */
3230 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
3231 return true;
3232
3233 /* Nope, do branches. */
3234 return false;
3235 }
3236
3237 /* Emit a conditional jump insn for the v9 architecture using comparison code
3238 CODE and jump target LABEL.
3239 This function exists to take advantage of the v9 brxx insns. */
3240
3241 static void
3242 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
3243 {
3244 emit_jump_insn (gen_rtx_SET (pc_rtx,
3245 gen_rtx_IF_THEN_ELSE (VOIDmode,
3246 gen_rtx_fmt_ee (code, GET_MODE (op0),
3247 op0, const0_rtx),
3248 gen_rtx_LABEL_REF (VOIDmode, label),
3249 pc_rtx)));
3250 }
3251
3252 /* Emit a conditional jump insn for the UA2011 architecture using
3253 comparison code CODE and jump target LABEL. This function exists
3254 to take advantage of the UA2011 Compare and Branch insns. */
3255
3256 static void
3257 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
3258 {
3259 rtx if_then_else;
3260
3261 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
3262 gen_rtx_fmt_ee(code, GET_MODE(op0),
3263 op0, op1),
3264 gen_rtx_LABEL_REF (VOIDmode, label),
3265 pc_rtx);
3266
3267 emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
3268 }
3269
3270 void
3271 emit_conditional_branch_insn (rtx operands[])
3272 {
3273 /* The quad-word fp compare library routines all return nonzero to indicate
3274 true, which is different from the equivalent libgcc routines, so we must
3275 handle them specially here. */
3276 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
3277 {
3278 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
3279 GET_CODE (operands[0]));
3280 operands[1] = XEXP (operands[0], 0);
3281 operands[2] = XEXP (operands[0], 1);
3282 }
3283
3284 /* If we can tell early on that the comparison is against a constant
3285 that won't fit in the 5-bit signed immediate field of a cbcond,
3286 use one of the other v9 conditional branch sequences. */
3287 if (TARGET_CBCOND
3288 && GET_CODE (operands[1]) == REG
3289 && (GET_MODE (operands[1]) == SImode
3290 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
3291 && (GET_CODE (operands[2]) != CONST_INT
3292 || SPARC_SIMM5_P (INTVAL (operands[2]))))
3293 {
3294 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
3295 return;
3296 }
3297
3298 if (TARGET_ARCH64 && operands[2] == const0_rtx
3299 && GET_CODE (operands[1]) == REG
3300 && GET_MODE (operands[1]) == DImode)
3301 {
3302 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
3303 return;
3304 }
3305
3306 operands[1] = gen_compare_reg (operands[0]);
3307 operands[2] = const0_rtx;
3308 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
3309 operands[1], operands[2]);
3310 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
3311 operands[3]));
3312 }
3313
3314
3315 /* Generate a DFmode part of a hard TFmode register.
3316 REG is the TFmode hard register, LOW is 1 for the
3317 low 64bit of the register and 0 otherwise.
3318 */
3319 rtx
3320 gen_df_reg (rtx reg, int low)
3321 {
3322 int regno = REGNO (reg);
3323
3324 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3325 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3326 return gen_rtx_REG (DFmode, regno);
3327 }
3328 \f
3329 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3330 Unlike normal calls, TFmode operands are passed by reference. It is
3331 assumed that no more than 3 operands are required. */
3332
3333 static void
3334 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3335 {
3336 rtx ret_slot = NULL, arg[3], func_sym;
3337 int i;
3338
3339 /* We only expect to be called for conversions, unary, and binary ops. */
3340 gcc_assert (nargs == 2 || nargs == 3);
3341
3342 for (i = 0; i < nargs; ++i)
3343 {
3344 rtx this_arg = operands[i];
3345 rtx this_slot;
3346
3347 /* TFmode arguments and return values are passed by reference. */
3348 if (GET_MODE (this_arg) == TFmode)
3349 {
3350 int force_stack_temp;
3351
3352 force_stack_temp = 0;
3353 if (TARGET_BUGGY_QP_LIB && i == 0)
3354 force_stack_temp = 1;
3355
3356 if (GET_CODE (this_arg) == MEM
3357 && ! force_stack_temp)
3358 {
3359 tree expr = MEM_EXPR (this_arg);
3360 if (expr)
3361 mark_addressable (expr);
3362 this_arg = XEXP (this_arg, 0);
3363 }
3364 else if (CONSTANT_P (this_arg)
3365 && ! force_stack_temp)
3366 {
3367 this_slot = force_const_mem (TFmode, this_arg);
3368 this_arg = XEXP (this_slot, 0);
3369 }
3370 else
3371 {
3372 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3373
3374 /* Operand 0 is the return value. We'll copy it out later. */
3375 if (i > 0)
3376 emit_move_insn (this_slot, this_arg);
3377 else
3378 ret_slot = this_slot;
3379
3380 this_arg = XEXP (this_slot, 0);
3381 }
3382 }
3383
3384 arg[i] = this_arg;
3385 }
3386
3387 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3388
3389 if (GET_MODE (operands[0]) == TFmode)
3390 {
3391 if (nargs == 2)
3392 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
3393 arg[0], GET_MODE (arg[0]),
3394 arg[1], GET_MODE (arg[1]));
3395 else
3396 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
3397 arg[0], GET_MODE (arg[0]),
3398 arg[1], GET_MODE (arg[1]),
3399 arg[2], GET_MODE (arg[2]));
3400
3401 if (ret_slot)
3402 emit_move_insn (operands[0], ret_slot);
3403 }
3404 else
3405 {
3406 rtx ret;
3407
3408 gcc_assert (nargs == 2);
3409
3410 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3411 GET_MODE (operands[0]), 1,
3412 arg[1], GET_MODE (arg[1]));
3413
3414 if (ret != operands[0])
3415 emit_move_insn (operands[0], ret);
3416 }
3417 }
3418
3419 /* Expand soft-float TFmode calls to sparc abi routines. */
3420
3421 static void
3422 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3423 {
3424 const char *func;
3425
3426 switch (code)
3427 {
3428 case PLUS:
3429 func = "_Qp_add";
3430 break;
3431 case MINUS:
3432 func = "_Qp_sub";
3433 break;
3434 case MULT:
3435 func = "_Qp_mul";
3436 break;
3437 case DIV:
3438 func = "_Qp_div";
3439 break;
3440 default:
3441 gcc_unreachable ();
3442 }
3443
3444 emit_soft_tfmode_libcall (func, 3, operands);
3445 }
3446
3447 static void
3448 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3449 {
3450 const char *func;
3451
3452 gcc_assert (code == SQRT);
3453 func = "_Qp_sqrt";
3454
3455 emit_soft_tfmode_libcall (func, 2, operands);
3456 }
3457
3458 static void
3459 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3460 {
3461 const char *func;
3462
3463 switch (code)
3464 {
3465 case FLOAT_EXTEND:
3466 switch (GET_MODE (operands[1]))
3467 {
3468 case SFmode:
3469 func = "_Qp_stoq";
3470 break;
3471 case DFmode:
3472 func = "_Qp_dtoq";
3473 break;
3474 default:
3475 gcc_unreachable ();
3476 }
3477 break;
3478
3479 case FLOAT_TRUNCATE:
3480 switch (GET_MODE (operands[0]))
3481 {
3482 case SFmode:
3483 func = "_Qp_qtos";
3484 break;
3485 case DFmode:
3486 func = "_Qp_qtod";
3487 break;
3488 default:
3489 gcc_unreachable ();
3490 }
3491 break;
3492
3493 case FLOAT:
3494 switch (GET_MODE (operands[1]))
3495 {
3496 case SImode:
3497 func = "_Qp_itoq";
3498 if (TARGET_ARCH64)
3499 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3500 break;
3501 case DImode:
3502 func = "_Qp_xtoq";
3503 break;
3504 default:
3505 gcc_unreachable ();
3506 }
3507 break;
3508
3509 case UNSIGNED_FLOAT:
3510 switch (GET_MODE (operands[1]))
3511 {
3512 case SImode:
3513 func = "_Qp_uitoq";
3514 if (TARGET_ARCH64)
3515 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3516 break;
3517 case DImode:
3518 func = "_Qp_uxtoq";
3519 break;
3520 default:
3521 gcc_unreachable ();
3522 }
3523 break;
3524
3525 case FIX:
3526 switch (GET_MODE (operands[0]))
3527 {
3528 case SImode:
3529 func = "_Qp_qtoi";
3530 break;
3531 case DImode:
3532 func = "_Qp_qtox";
3533 break;
3534 default:
3535 gcc_unreachable ();
3536 }
3537 break;
3538
3539 case UNSIGNED_FIX:
3540 switch (GET_MODE (operands[0]))
3541 {
3542 case SImode:
3543 func = "_Qp_qtoui";
3544 break;
3545 case DImode:
3546 func = "_Qp_qtoux";
3547 break;
3548 default:
3549 gcc_unreachable ();
3550 }
3551 break;
3552
3553 default:
3554 gcc_unreachable ();
3555 }
3556
3557 emit_soft_tfmode_libcall (func, 2, operands);
3558 }
3559
3560 /* Expand a hard-float tfmode operation. All arguments must be in
3561 registers. */
3562
3563 static void
3564 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3565 {
3566 rtx op, dest;
3567
3568 if (GET_RTX_CLASS (code) == RTX_UNARY)
3569 {
3570 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3571 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3572 }
3573 else
3574 {
3575 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3576 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3577 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3578 operands[1], operands[2]);
3579 }
3580
3581 if (register_operand (operands[0], VOIDmode))
3582 dest = operands[0];
3583 else
3584 dest = gen_reg_rtx (GET_MODE (operands[0]));
3585
3586 emit_insn (gen_rtx_SET (dest, op));
3587
3588 if (dest != operands[0])
3589 emit_move_insn (operands[0], dest);
3590 }
3591
3592 void
3593 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3594 {
3595 if (TARGET_HARD_QUAD)
3596 emit_hard_tfmode_operation (code, operands);
3597 else
3598 emit_soft_tfmode_binop (code, operands);
3599 }
3600
3601 void
3602 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3603 {
3604 if (TARGET_HARD_QUAD)
3605 emit_hard_tfmode_operation (code, operands);
3606 else
3607 emit_soft_tfmode_unop (code, operands);
3608 }
3609
3610 void
3611 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3612 {
3613 if (TARGET_HARD_QUAD)
3614 emit_hard_tfmode_operation (code, operands);
3615 else
3616 emit_soft_tfmode_cvt (code, operands);
3617 }
3618 \f
3619 /* Return nonzero if a branch/jump/call instruction will be emitting
3620 nop into its delay slot. */
3621
3622 int
3623 empty_delay_slot (rtx_insn *insn)
3624 {
3625 rtx seq;
3626
3627 /* If no previous instruction (should not happen), return true. */
3628 if (PREV_INSN (insn) == NULL)
3629 return 1;
3630
3631 seq = NEXT_INSN (PREV_INSN (insn));
3632 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3633 return 0;
3634
3635 return 1;
3636 }
3637
3638 /* Return nonzero if we should emit a nop after a cbcond instruction.
3639 The cbcond instruction does not have a delay slot, however there is
3640 a severe performance penalty if a control transfer appears right
3641 after a cbcond. Therefore we emit a nop when we detect this
3642 situation. */
3643
3644 int
3645 emit_cbcond_nop (rtx_insn *insn)
3646 {
3647 rtx next = next_active_insn (insn);
3648
3649 if (!next)
3650 return 1;
3651
3652 if (NONJUMP_INSN_P (next)
3653 && GET_CODE (PATTERN (next)) == SEQUENCE)
3654 next = XVECEXP (PATTERN (next), 0, 0);
3655 else if (CALL_P (next)
3656 && GET_CODE (PATTERN (next)) == PARALLEL)
3657 {
3658 rtx delay = XVECEXP (PATTERN (next), 0, 1);
3659
3660 if (GET_CODE (delay) == RETURN)
3661 {
3662 /* It's a sibling call. Do not emit the nop if we're going
3663 to emit something other than the jump itself as the first
3664 instruction of the sibcall sequence. */
3665 if (sparc_leaf_function_p || TARGET_FLAT)
3666 return 0;
3667 }
3668 }
3669
3670 if (NONJUMP_INSN_P (next))
3671 return 0;
3672
3673 return 1;
3674 }
3675
3676 /* Return nonzero if TRIAL can go into the call delay slot. */
3677
3678 int
3679 eligible_for_call_delay (rtx_insn *trial)
3680 {
3681 rtx pat;
3682
3683 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3684 return 0;
3685
3686 /* Binutils allows
3687 call __tls_get_addr, %tgd_call (foo)
3688 add %l7, %o0, %o0, %tgd_add (foo)
3689 while Sun as/ld does not. */
3690 if (TARGET_GNU_TLS || !TARGET_TLS)
3691 return 1;
3692
3693 pat = PATTERN (trial);
3694
3695 /* We must reject tgd_add{32|64}, i.e.
3696 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3697 and tldm_add{32|64}, i.e.
3698 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3699 for Sun as/ld. */
3700 if (GET_CODE (pat) == SET
3701 && GET_CODE (SET_SRC (pat)) == PLUS)
3702 {
3703 rtx unspec = XEXP (SET_SRC (pat), 1);
3704
3705 if (GET_CODE (unspec) == UNSPEC
3706 && (XINT (unspec, 1) == UNSPEC_TLSGD
3707 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3708 return 0;
3709 }
3710
3711 return 1;
3712 }
3713
3714 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3715 instruction. RETURN_P is true if the v9 variant 'return' is to be
3716 considered in the test too.
3717
3718 TRIAL must be a SET whose destination is a REG appropriate for the
3719 'restore' instruction or, if RETURN_P is true, for the 'return'
3720 instruction. */
3721
3722 static int
3723 eligible_for_restore_insn (rtx trial, bool return_p)
3724 {
3725 rtx pat = PATTERN (trial);
3726 rtx src = SET_SRC (pat);
3727 bool src_is_freg = false;
3728 rtx src_reg;
3729
3730 /* Since we now can do moves between float and integer registers when
3731 VIS3 is enabled, we have to catch this case. We can allow such
3732 moves when doing a 'return' however. */
3733 src_reg = src;
3734 if (GET_CODE (src_reg) == SUBREG)
3735 src_reg = SUBREG_REG (src_reg);
3736 if (GET_CODE (src_reg) == REG
3737 && SPARC_FP_REG_P (REGNO (src_reg)))
3738 src_is_freg = true;
3739
3740 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3741 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3742 && arith_operand (src, GET_MODE (src))
3743 && ! src_is_freg)
3744 {
3745 if (TARGET_ARCH64)
3746 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3747 else
3748 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3749 }
3750
3751 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3752 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3753 && arith_double_operand (src, GET_MODE (src))
3754 && ! src_is_freg)
3755 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3756
3757 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3758 else if (! TARGET_FPU && register_operand (src, SFmode))
3759 return 1;
3760
3761 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3762 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3763 return 1;
3764
3765 /* If we have the 'return' instruction, anything that does not use
3766 local or output registers and can go into a delay slot wins. */
3767 else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
3768 return 1;
3769
3770 /* The 'restore src1,src2,dest' pattern for SImode. */
3771 else if (GET_CODE (src) == PLUS
3772 && register_operand (XEXP (src, 0), SImode)
3773 && arith_operand (XEXP (src, 1), SImode))
3774 return 1;
3775
3776 /* The 'restore src1,src2,dest' pattern for DImode. */
3777 else if (GET_CODE (src) == PLUS
3778 && register_operand (XEXP (src, 0), DImode)
3779 && arith_double_operand (XEXP (src, 1), DImode))
3780 return 1;
3781
3782 /* The 'restore src1,%lo(src2),dest' pattern. */
3783 else if (GET_CODE (src) == LO_SUM
3784 && ! TARGET_CM_MEDMID
3785 && ((register_operand (XEXP (src, 0), SImode)
3786 && immediate_operand (XEXP (src, 1), SImode))
3787 || (TARGET_ARCH64
3788 && register_operand (XEXP (src, 0), DImode)
3789 && immediate_operand (XEXP (src, 1), DImode))))
3790 return 1;
3791
3792 /* The 'restore src,src,dest' pattern. */
3793 else if (GET_CODE (src) == ASHIFT
3794 && (register_operand (XEXP (src, 0), SImode)
3795 || register_operand (XEXP (src, 0), DImode))
3796 && XEXP (src, 1) == const1_rtx)
3797 return 1;
3798
3799 return 0;
3800 }
3801
3802 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3803
3804 int
3805 eligible_for_return_delay (rtx_insn *trial)
3806 {
3807 int regno;
3808 rtx pat;
3809
3810 /* If the function uses __builtin_eh_return, the eh_return machinery
3811 occupies the delay slot. */
3812 if (crtl->calls_eh_return)
3813 return 0;
3814
3815 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3816 return 0;
3817
3818 /* In the case of a leaf or flat function, anything can go into the slot. */
3819 if (sparc_leaf_function_p || TARGET_FLAT)
3820 return 1;
3821
3822 if (!NONJUMP_INSN_P (trial))
3823 return 0;
3824
3825 pat = PATTERN (trial);
3826 if (GET_CODE (pat) == PARALLEL)
3827 {
3828 int i;
3829
3830 if (! TARGET_V9)
3831 return 0;
3832 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3833 {
3834 rtx expr = XVECEXP (pat, 0, i);
3835 if (GET_CODE (expr) != SET)
3836 return 0;
3837 if (GET_CODE (SET_DEST (expr)) != REG)
3838 return 0;
3839 regno = REGNO (SET_DEST (expr));
3840 if (regno >= 8 && regno < 24)
3841 return 0;
3842 }
3843 return !epilogue_renumber (&pat, 1);
3844 }
3845
3846 if (GET_CODE (pat) != SET)
3847 return 0;
3848
3849 if (GET_CODE (SET_DEST (pat)) != REG)
3850 return 0;
3851
3852 regno = REGNO (SET_DEST (pat));
3853
3854 /* Otherwise, only operations which can be done in tandem with
3855 a `restore' or `return' insn can go into the delay slot. */
3856 if (regno >= 8 && regno < 24)
3857 return 0;
3858
3859 /* If this instruction sets up floating point register and we have a return
3860 instruction, it can probably go in. But restore will not work
3861 with FP_REGS. */
3862 if (! SPARC_INT_REG_P (regno))
3863 return TARGET_V9 && !epilogue_renumber (&pat, 1);
3864
3865 return eligible_for_restore_insn (trial, true);
3866 }
3867
3868 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3869
3870 int
3871 eligible_for_sibcall_delay (rtx_insn *trial)
3872 {
3873 rtx pat;
3874
3875 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3876 return 0;
3877
3878 if (!NONJUMP_INSN_P (trial))
3879 return 0;
3880
3881 pat = PATTERN (trial);
3882
3883 if (sparc_leaf_function_p || TARGET_FLAT)
3884 {
3885 /* If the tail call is done using the call instruction,
3886 we have to restore %o7 in the delay slot. */
3887 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3888 return 0;
3889
3890 /* %g1 is used to build the function address */
3891 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3892 return 0;
3893
3894 return 1;
3895 }
3896
3897 if (GET_CODE (pat) != SET)
3898 return 0;
3899
3900 /* Otherwise, only operations which can be done in tandem with
3901 a `restore' insn can go into the delay slot. */
3902 if (GET_CODE (SET_DEST (pat)) != REG
3903 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3904 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
3905 return 0;
3906
3907 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3908 in most cases. */
3909 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3910 return 0;
3911
3912 return eligible_for_restore_insn (trial, false);
3913 }
3914 \f
3915 /* Determine if it's legal to put X into the constant pool. This
3916 is not possible if X contains the address of a symbol that is
3917 not constant (TLS) or not known at final link time (PIC). */
3918
3919 static bool
3920 sparc_cannot_force_const_mem (machine_mode mode, rtx x)
3921 {
3922 switch (GET_CODE (x))
3923 {
3924 case CONST_INT:
3925 case CONST_WIDE_INT:
3926 case CONST_DOUBLE:
3927 case CONST_VECTOR:
3928 /* Accept all non-symbolic constants. */
3929 return false;
3930
3931 case LABEL_REF:
3932 /* Labels are OK iff we are non-PIC. */
3933 return flag_pic != 0;
3934
3935 case SYMBOL_REF:
3936 /* 'Naked' TLS symbol references are never OK,
3937 non-TLS symbols are OK iff we are non-PIC. */
3938 if (SYMBOL_REF_TLS_MODEL (x))
3939 return true;
3940 else
3941 return flag_pic != 0;
3942
3943 case CONST:
3944 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3945 case PLUS:
3946 case MINUS:
3947 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3948 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3949 case UNSPEC:
3950 return true;
3951 default:
3952 gcc_unreachable ();
3953 }
3954 }
3955 \f
3956 /* Global Offset Table support. */
3957 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3958 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3959
3960 /* Return the SYMBOL_REF for the Global Offset Table. */
3961
3962 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3963
3964 static rtx
3965 sparc_got (void)
3966 {
3967 if (!sparc_got_symbol)
3968 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3969
3970 return sparc_got_symbol;
3971 }
3972
3973 /* Ensure that we are not using patterns that are not OK with PIC. */
3974
3975 int
3976 check_pic (int i)
3977 {
3978 rtx op;
3979
3980 switch (flag_pic)
3981 {
3982 case 1:
3983 op = recog_data.operand[i];
3984 gcc_assert (GET_CODE (op) != SYMBOL_REF
3985 && (GET_CODE (op) != CONST
3986 || (GET_CODE (XEXP (op, 0)) == MINUS
3987 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3988 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3989 /* fallthrough */
3990 case 2:
3991 default:
3992 return 1;
3993 }
3994 }
3995
3996 /* Return true if X is an address which needs a temporary register when
3997 reloaded while generating PIC code. */
3998
3999 int
4000 pic_address_needs_scratch (rtx x)
4001 {
4002 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
4003 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
4004 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
4005 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4006 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
4007 return 1;
4008
4009 return 0;
4010 }
4011
4012 /* Determine if a given RTX is a valid constant. We already know this
4013 satisfies CONSTANT_P. */
4014
4015 static bool
4016 sparc_legitimate_constant_p (machine_mode mode, rtx x)
4017 {
4018 switch (GET_CODE (x))
4019 {
4020 case CONST:
4021 case SYMBOL_REF:
4022 if (sparc_tls_referenced_p (x))
4023 return false;
4024 break;
4025
4026 case CONST_DOUBLE:
4027 /* Floating point constants are generally not ok.
4028 The only exception is 0.0 and all-ones in VIS. */
4029 if (TARGET_VIS
4030 && SCALAR_FLOAT_MODE_P (mode)
4031 && (const_zero_operand (x, mode)
4032 || const_all_ones_operand (x, mode)))
4033 return true;
4034
4035 return false;
4036
4037 case CONST_VECTOR:
4038 /* Vector constants are generally not ok.
4039 The only exception is 0 or -1 in VIS. */
4040 if (TARGET_VIS
4041 && (const_zero_operand (x, mode)
4042 || const_all_ones_operand (x, mode)))
4043 return true;
4044
4045 return false;
4046
4047 default:
4048 break;
4049 }
4050
4051 return true;
4052 }
4053
4054 /* Determine if a given RTX is a valid constant address. */
4055
4056 bool
4057 constant_address_p (rtx x)
4058 {
4059 switch (GET_CODE (x))
4060 {
4061 case LABEL_REF:
4062 case CONST_INT:
4063 case HIGH:
4064 return true;
4065
4066 case CONST:
4067 if (flag_pic && pic_address_needs_scratch (x))
4068 return false;
4069 return sparc_legitimate_constant_p (Pmode, x);
4070
4071 case SYMBOL_REF:
4072 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
4073
4074 default:
4075 return false;
4076 }
4077 }
4078
4079 /* Nonzero if the constant value X is a legitimate general operand
4080 when generating PIC code. It is given that flag_pic is on and
4081 that X satisfies CONSTANT_P. */
4082
4083 bool
4084 legitimate_pic_operand_p (rtx x)
4085 {
4086 if (pic_address_needs_scratch (x))
4087 return false;
4088 if (sparc_tls_referenced_p (x))
4089 return false;
4090 return true;
4091 }
4092
4093 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
4094 (CONST_INT_P (X) \
4095 && INTVAL (X) >= -0x1000 \
4096 && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
4097
4098 #define RTX_OK_FOR_OLO10_P(X, MODE) \
4099 (CONST_INT_P (X) \
4100 && INTVAL (X) >= -0x1000 \
4101 && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
4102
4103 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
4104
4105 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
4106 ordinarily. This changes a bit when generating PIC. */
4107
4108 static bool
4109 sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4110 {
4111 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
4112
4113 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
4114 rs1 = addr;
4115 else if (GET_CODE (addr) == PLUS)
4116 {
4117 rs1 = XEXP (addr, 0);
4118 rs2 = XEXP (addr, 1);
4119
4120 /* Canonicalize. REG comes first, if there are no regs,
4121 LO_SUM comes first. */
4122 if (!REG_P (rs1)
4123 && GET_CODE (rs1) != SUBREG
4124 && (REG_P (rs2)
4125 || GET_CODE (rs2) == SUBREG
4126 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
4127 {
4128 rs1 = XEXP (addr, 1);
4129 rs2 = XEXP (addr, 0);
4130 }
4131
4132 if ((flag_pic == 1
4133 && rs1 == pic_offset_table_rtx
4134 && !REG_P (rs2)
4135 && GET_CODE (rs2) != SUBREG
4136 && GET_CODE (rs2) != LO_SUM
4137 && GET_CODE (rs2) != MEM
4138 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
4139 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
4140 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
4141 || ((REG_P (rs1)
4142 || GET_CODE (rs1) == SUBREG)
4143 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
4144 {
4145 imm1 = rs2;
4146 rs2 = NULL;
4147 }
4148 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
4149 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
4150 {
4151 /* We prohibit REG + REG for TFmode when there are no quad move insns
4152 and we consequently need to split. We do this because REG+REG
4153 is not an offsettable address. If we get the situation in reload
4154 where source and destination of a movtf pattern are both MEMs with
4155 REG+REG address, then only one of them gets converted to an
4156 offsettable address. */
4157 if (mode == TFmode
4158 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
4159 return 0;
4160
4161 /* Likewise for TImode, but in all cases. */
4162 if (mode == TImode)
4163 return 0;
4164
4165 /* We prohibit REG + REG on ARCH32 if not optimizing for
4166 DFmode/DImode because then mem_min_alignment is likely to be zero
4167 after reload and the forced split would lack a matching splitter
4168 pattern. */
4169 if (TARGET_ARCH32 && !optimize
4170 && (mode == DFmode || mode == DImode))
4171 return 0;
4172 }
4173 else if (USE_AS_OFFSETABLE_LO10
4174 && GET_CODE (rs1) == LO_SUM
4175 && TARGET_ARCH64
4176 && ! TARGET_CM_MEDMID
4177 && RTX_OK_FOR_OLO10_P (rs2, mode))
4178 {
4179 rs2 = NULL;
4180 imm1 = XEXP (rs1, 1);
4181 rs1 = XEXP (rs1, 0);
4182 if (!CONSTANT_P (imm1)
4183 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4184 return 0;
4185 }
4186 }
4187 else if (GET_CODE (addr) == LO_SUM)
4188 {
4189 rs1 = XEXP (addr, 0);
4190 imm1 = XEXP (addr, 1);
4191
4192 if (!CONSTANT_P (imm1)
4193 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4194 return 0;
4195
4196 /* We can't allow TFmode in 32-bit mode, because an offset greater
4197 than the alignment (8) may cause the LO_SUM to overflow. */
4198 if (mode == TFmode && TARGET_ARCH32)
4199 return 0;
4200
4201 /* During reload, accept the HIGH+LO_SUM construct generated by
4202 sparc_legitimize_reload_address. */
4203 if (reload_in_progress
4204 && GET_CODE (rs1) == HIGH
4205 && XEXP (rs1, 0) == imm1)
4206 return 1;
4207 }
4208 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
4209 return 1;
4210 else
4211 return 0;
4212
4213 if (GET_CODE (rs1) == SUBREG)
4214 rs1 = SUBREG_REG (rs1);
4215 if (!REG_P (rs1))
4216 return 0;
4217
4218 if (rs2)
4219 {
4220 if (GET_CODE (rs2) == SUBREG)
4221 rs2 = SUBREG_REG (rs2);
4222 if (!REG_P (rs2))
4223 return 0;
4224 }
4225
4226 if (strict)
4227 {
4228 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
4229 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
4230 return 0;
4231 }
4232 else
4233 {
4234 if ((! SPARC_INT_REG_P (REGNO (rs1))
4235 && REGNO (rs1) != FRAME_POINTER_REGNUM
4236 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
4237 || (rs2
4238 && (! SPARC_INT_REG_P (REGNO (rs2))
4239 && REGNO (rs2) != FRAME_POINTER_REGNUM
4240 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
4241 return 0;
4242 }
4243 return 1;
4244 }
4245
4246 /* Return the SYMBOL_REF for the tls_get_addr function. */
4247
4248 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
4249
4250 static rtx
4251 sparc_tls_get_addr (void)
4252 {
4253 if (!sparc_tls_symbol)
4254 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
4255
4256 return sparc_tls_symbol;
4257 }
4258
4259 /* Return the Global Offset Table to be used in TLS mode. */
4260
4261 static rtx
4262 sparc_tls_got (void)
4263 {
4264 /* In PIC mode, this is just the PIC offset table. */
4265 if (flag_pic)
4266 {
4267 crtl->uses_pic_offset_table = 1;
4268 return pic_offset_table_rtx;
4269 }
4270
4271 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4272 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4273 if (TARGET_SUN_TLS && TARGET_ARCH32)
4274 {
4275 load_got_register ();
4276 return global_offset_table_rtx;
4277 }
4278
4279 /* In all other cases, we load a new pseudo with the GOT symbol. */
4280 return copy_to_reg (sparc_got ());
4281 }
4282
4283 /* Return true if X contains a thread-local symbol. */
4284
4285 static bool
4286 sparc_tls_referenced_p (rtx x)
4287 {
4288 if (!TARGET_HAVE_TLS)
4289 return false;
4290
4291 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4292 x = XEXP (XEXP (x, 0), 0);
4293
4294 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
4295 return true;
4296
4297 /* That's all we handle in sparc_legitimize_tls_address for now. */
4298 return false;
4299 }
4300
4301 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4302 this (thread-local) address. */
4303
4304 static rtx
4305 sparc_legitimize_tls_address (rtx addr)
4306 {
4307 rtx temp1, temp2, temp3, ret, o0, got;
4308 rtx_insn *insn;
4309
4310 gcc_assert (can_create_pseudo_p ());
4311
4312 if (GET_CODE (addr) == SYMBOL_REF)
4313 switch (SYMBOL_REF_TLS_MODEL (addr))
4314 {
4315 case TLS_MODEL_GLOBAL_DYNAMIC:
4316 start_sequence ();
4317 temp1 = gen_reg_rtx (SImode);
4318 temp2 = gen_reg_rtx (SImode);
4319 ret = gen_reg_rtx (Pmode);
4320 o0 = gen_rtx_REG (Pmode, 8);
4321 got = sparc_tls_got ();
4322 emit_insn (gen_tgd_hi22 (temp1, addr));
4323 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
4324 if (TARGET_ARCH32)
4325 {
4326 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
4327 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
4328 addr, const1_rtx));
4329 }
4330 else
4331 {
4332 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
4333 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
4334 addr, const1_rtx));
4335 }
4336 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4337 insn = get_insns ();
4338 end_sequence ();
4339 emit_libcall_block (insn, ret, o0, addr);
4340 break;
4341
4342 case TLS_MODEL_LOCAL_DYNAMIC:
4343 start_sequence ();
4344 temp1 = gen_reg_rtx (SImode);
4345 temp2 = gen_reg_rtx (SImode);
4346 temp3 = gen_reg_rtx (Pmode);
4347 ret = gen_reg_rtx (Pmode);
4348 o0 = gen_rtx_REG (Pmode, 8);
4349 got = sparc_tls_got ();
4350 emit_insn (gen_tldm_hi22 (temp1));
4351 emit_insn (gen_tldm_lo10 (temp2, temp1));
4352 if (TARGET_ARCH32)
4353 {
4354 emit_insn (gen_tldm_add32 (o0, got, temp2));
4355 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
4356 const1_rtx));
4357 }
4358 else
4359 {
4360 emit_insn (gen_tldm_add64 (o0, got, temp2));
4361 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
4362 const1_rtx));
4363 }
4364 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4365 insn = get_insns ();
4366 end_sequence ();
4367 emit_libcall_block (insn, temp3, o0,
4368 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4369 UNSPEC_TLSLD_BASE));
4370 temp1 = gen_reg_rtx (SImode);
4371 temp2 = gen_reg_rtx (SImode);
4372 emit_insn (gen_tldo_hix22 (temp1, addr));
4373 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
4374 if (TARGET_ARCH32)
4375 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
4376 else
4377 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
4378 break;
4379
4380 case TLS_MODEL_INITIAL_EXEC:
4381 temp1 = gen_reg_rtx (SImode);
4382 temp2 = gen_reg_rtx (SImode);
4383 temp3 = gen_reg_rtx (Pmode);
4384 got = sparc_tls_got ();
4385 emit_insn (gen_tie_hi22 (temp1, addr));
4386 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
4387 if (TARGET_ARCH32)
4388 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4389 else
4390 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4391 if (TARGET_SUN_TLS)
4392 {
4393 ret = gen_reg_rtx (Pmode);
4394 if (TARGET_ARCH32)
4395 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
4396 temp3, addr));
4397 else
4398 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
4399 temp3, addr));
4400 }
4401 else
4402 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4403 break;
4404
4405 case TLS_MODEL_LOCAL_EXEC:
4406 temp1 = gen_reg_rtx (Pmode);
4407 temp2 = gen_reg_rtx (Pmode);
4408 if (TARGET_ARCH32)
4409 {
4410 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
4411 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
4412 }
4413 else
4414 {
4415 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
4416 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
4417 }
4418 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4419 break;
4420
4421 default:
4422 gcc_unreachable ();
4423 }
4424
4425 else if (GET_CODE (addr) == CONST)
4426 {
4427 rtx base, offset;
4428
4429 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4430
4431 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4432 offset = XEXP (XEXP (addr, 0), 1);
4433
4434 base = force_operand (base, NULL_RTX);
4435 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4436 offset = force_reg (Pmode, offset);
4437 ret = gen_rtx_PLUS (Pmode, base, offset);
4438 }
4439
4440 else
4441 gcc_unreachable (); /* for now ... */
4442
4443 return ret;
4444 }
4445
4446 /* Legitimize PIC addresses. If the address is already position-independent,
4447 we return ORIG. Newly generated position-independent addresses go into a
4448 reg. This is REG if nonzero, otherwise we allocate register(s) as
4449 necessary. */
4450
4451 static rtx
4452 sparc_legitimize_pic_address (rtx orig, rtx reg)
4453 {
4454 bool gotdata_op = false;
4455
4456 if (GET_CODE (orig) == SYMBOL_REF
4457 /* See the comment in sparc_expand_move. */
4458 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4459 {
4460 rtx pic_ref, address;
4461 rtx_insn *insn;
4462
4463 if (reg == 0)
4464 {
4465 gcc_assert (can_create_pseudo_p ());
4466 reg = gen_reg_rtx (Pmode);
4467 }
4468
4469 if (flag_pic == 2)
4470 {
4471 /* If not during reload, allocate another temp reg here for loading
4472 in the address, so that these instructions can be optimized
4473 properly. */
4474 rtx temp_reg = (! can_create_pseudo_p ()
4475 ? reg : gen_reg_rtx (Pmode));
4476
4477 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4478 won't get confused into thinking that these two instructions
4479 are loading in the true address of the symbol. If in the
4480 future a PIC rtx exists, that should be used instead. */
4481 if (TARGET_ARCH64)
4482 {
4483 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4484 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4485 }
4486 else
4487 {
4488 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4489 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4490 }
4491 address = temp_reg;
4492 gotdata_op = true;
4493 }
4494 else
4495 address = orig;
4496
4497 crtl->uses_pic_offset_table = 1;
4498 if (gotdata_op)
4499 {
4500 if (TARGET_ARCH64)
4501 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4502 pic_offset_table_rtx,
4503 address, orig));
4504 else
4505 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4506 pic_offset_table_rtx,
4507 address, orig));
4508 }
4509 else
4510 {
4511 pic_ref
4512 = gen_const_mem (Pmode,
4513 gen_rtx_PLUS (Pmode,
4514 pic_offset_table_rtx, address));
4515 insn = emit_move_insn (reg, pic_ref);
4516 }
4517
4518 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4519 by loop. */
4520 set_unique_reg_note (insn, REG_EQUAL, orig);
4521 return reg;
4522 }
4523 else if (GET_CODE (orig) == CONST)
4524 {
4525 rtx base, offset;
4526
4527 if (GET_CODE (XEXP (orig, 0)) == PLUS
4528 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
4529 return orig;
4530
4531 if (reg == 0)
4532 {
4533 gcc_assert (can_create_pseudo_p ());
4534 reg = gen_reg_rtx (Pmode);
4535 }
4536
4537 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4538 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4539 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4540 base == reg ? NULL_RTX : reg);
4541
4542 if (GET_CODE (offset) == CONST_INT)
4543 {
4544 if (SMALL_INT (offset))
4545 return plus_constant (Pmode, base, INTVAL (offset));
4546 else if (can_create_pseudo_p ())
4547 offset = force_reg (Pmode, offset);
4548 else
4549 /* If we reach here, then something is seriously wrong. */
4550 gcc_unreachable ();
4551 }
4552 return gen_rtx_PLUS (Pmode, base, offset);
4553 }
4554 else if (GET_CODE (orig) == LABEL_REF)
4555 /* ??? We ought to be checking that the register is live instead, in case
4556 it is eliminated. */
4557 crtl->uses_pic_offset_table = 1;
4558
4559 return orig;
4560 }
4561
4562 /* Try machine-dependent ways of modifying an illegitimate address X
4563 to be legitimate. If we find one, return the new, valid address.
4564
4565 OLDX is the address as it was before break_out_memory_refs was called.
4566 In some cases it is useful to look at this to decide what needs to be done.
4567
4568 MODE is the mode of the operand pointed to by X.
4569
4570 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4571
4572 static rtx
4573 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4574 machine_mode mode)
4575 {
4576 rtx orig_x = x;
4577
4578 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4579 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4580 force_operand (XEXP (x, 0), NULL_RTX));
4581 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4582 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4583 force_operand (XEXP (x, 1), NULL_RTX));
4584 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4585 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4586 XEXP (x, 1));
4587 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4588 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4589 force_operand (XEXP (x, 1), NULL_RTX));
4590
4591 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4592 return x;
4593
4594 if (sparc_tls_referenced_p (x))
4595 x = sparc_legitimize_tls_address (x);
4596 else if (flag_pic)
4597 x = sparc_legitimize_pic_address (x, NULL_RTX);
4598 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4599 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4600 copy_to_mode_reg (Pmode, XEXP (x, 1)));
4601 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
4602 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4603 copy_to_mode_reg (Pmode, XEXP (x, 0)));
4604 else if (GET_CODE (x) == SYMBOL_REF
4605 || GET_CODE (x) == CONST
4606 || GET_CODE (x) == LABEL_REF)
4607 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
4608
4609 return x;
4610 }
4611
4612 /* Delegitimize an address that was legitimized by the above function. */
4613
4614 static rtx
4615 sparc_delegitimize_address (rtx x)
4616 {
4617 x = delegitimize_mem_from_attrs (x);
4618
4619 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
4620 switch (XINT (XEXP (x, 1), 1))
4621 {
4622 case UNSPEC_MOVE_PIC:
4623 case UNSPEC_TLSLE:
4624 x = XVECEXP (XEXP (x, 1), 0, 0);
4625 gcc_assert (GET_CODE (x) == SYMBOL_REF);
4626 break;
4627 default:
4628 break;
4629 }
4630
4631 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
4632 if (GET_CODE (x) == MINUS
4633 && REG_P (XEXP (x, 0))
4634 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
4635 && GET_CODE (XEXP (x, 1)) == LO_SUM
4636 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
4637 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
4638 {
4639 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
4640 gcc_assert (GET_CODE (x) == LABEL_REF);
4641 }
4642
4643 return x;
4644 }
4645
4646 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4647 replace the input X, or the original X if no replacement is called for.
4648 The output parameter *WIN is 1 if the calling macro should goto WIN,
4649 0 if it should not.
4650
4651 For SPARC, we wish to handle addresses by splitting them into
4652 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
4653 This cuts the number of extra insns by one.
4654
4655 Do nothing when generating PIC code and the address is a symbolic
4656 operand or requires a scratch register. */
4657
4658 rtx
4659 sparc_legitimize_reload_address (rtx x, machine_mode mode,
4660 int opnum, int type,
4661 int ind_levels ATTRIBUTE_UNUSED, int *win)
4662 {
4663 /* Decompose SImode constants into HIGH+LO_SUM. */
4664 if (CONSTANT_P (x)
4665 && (mode != TFmode || TARGET_ARCH64)
4666 && GET_MODE (x) == SImode
4667 && GET_CODE (x) != LO_SUM
4668 && GET_CODE (x) != HIGH
4669 && sparc_cmodel <= CM_MEDLOW
4670 && !(flag_pic
4671 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
4672 {
4673 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
4674 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4675 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4676 opnum, (enum reload_type)type);
4677 *win = 1;
4678 return x;
4679 }
4680
4681 /* We have to recognize what we have already generated above. */
4682 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
4683 {
4684 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4685 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4686 opnum, (enum reload_type)type);
4687 *win = 1;
4688 return x;
4689 }
4690
4691 *win = 0;
4692 return x;
4693 }
4694
4695 /* Return true if ADDR (a legitimate address expression)
4696 has an effect that depends on the machine mode it is used for.
4697
4698 In PIC mode,
4699
4700 (mem:HI [%l7+a])
4701
4702 is not equivalent to
4703
4704 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
4705
4706 because [%l7+a+1] is interpreted as the address of (a+1). */
4707
4708
4709 static bool
4710 sparc_mode_dependent_address_p (const_rtx addr,
4711 addr_space_t as ATTRIBUTE_UNUSED)
4712 {
4713 if (flag_pic && GET_CODE (addr) == PLUS)
4714 {
4715 rtx op0 = XEXP (addr, 0);
4716 rtx op1 = XEXP (addr, 1);
4717 if (op0 == pic_offset_table_rtx
4718 && symbolic_operand (op1, VOIDmode))
4719 return true;
4720 }
4721
4722 return false;
4723 }
4724
4725 #ifdef HAVE_GAS_HIDDEN
4726 # define USE_HIDDEN_LINKONCE 1
4727 #else
4728 # define USE_HIDDEN_LINKONCE 0
4729 #endif
4730
4731 static void
4732 get_pc_thunk_name (char name[32], unsigned int regno)
4733 {
4734 const char *reg_name = reg_names[regno];
4735
4736 /* Skip the leading '%' as that cannot be used in a
4737 symbol name. */
4738 reg_name += 1;
4739
4740 if (USE_HIDDEN_LINKONCE)
4741 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4742 else
4743 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4744 }
4745
4746 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4747
4748 static rtx
4749 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
4750 {
4751 int orig_flag_pic = flag_pic;
4752 rtx insn;
4753
4754 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4755 flag_pic = 0;
4756 if (TARGET_ARCH64)
4757 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
4758 else
4759 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
4760 flag_pic = orig_flag_pic;
4761
4762 return insn;
4763 }
4764
4765 /* Emit code to load the GOT register. */
4766
4767 void
4768 load_got_register (void)
4769 {
4770 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4771 if (!global_offset_table_rtx)
4772 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4773
4774 if (TARGET_VXWORKS_RTP)
4775 emit_insn (gen_vxworks_load_got ());
4776 else
4777 {
4778 /* The GOT symbol is subject to a PC-relative relocation so we need a
4779 helper function to add the PC value and thus get the final value. */
4780 if (!got_helper_rtx)
4781 {
4782 char name[32];
4783 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4784 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4785 }
4786
4787 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
4788 got_helper_rtx,
4789 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
4790 }
4791
4792 /* Need to emit this whether or not we obey regdecls,
4793 since setjmp/longjmp can cause life info to screw up.
4794 ??? In the case where we don't obey regdecls, this is not sufficient
4795 since we may not fall out the bottom. */
4796 emit_use (global_offset_table_rtx);
4797 }
4798
4799 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4800 address of the call target. */
4801
4802 void
4803 sparc_emit_call_insn (rtx pat, rtx addr)
4804 {
4805 rtx_insn *insn;
4806
4807 insn = emit_call_insn (pat);
4808
4809 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4810 if (TARGET_VXWORKS_RTP
4811 && flag_pic
4812 && GET_CODE (addr) == SYMBOL_REF
4813 && (SYMBOL_REF_DECL (addr)
4814 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
4815 : !SYMBOL_REF_LOCAL_P (addr)))
4816 {
4817 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4818 crtl->uses_pic_offset_table = 1;
4819 }
4820 }
4821 \f
4822 /* Return 1 if RTX is a MEM which is known to be aligned to at
4823 least a DESIRED byte boundary. */
4824
4825 int
4826 mem_min_alignment (rtx mem, int desired)
4827 {
4828 rtx addr, base, offset;
4829
4830 /* If it's not a MEM we can't accept it. */
4831 if (GET_CODE (mem) != MEM)
4832 return 0;
4833
4834 /* Obviously... */
4835 if (!TARGET_UNALIGNED_DOUBLES
4836 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
4837 return 1;
4838
4839 /* ??? The rest of the function predates MEM_ALIGN so
4840 there is probably a bit of redundancy. */
4841 addr = XEXP (mem, 0);
4842 base = offset = NULL_RTX;
4843 if (GET_CODE (addr) == PLUS)
4844 {
4845 if (GET_CODE (XEXP (addr, 0)) == REG)
4846 {
4847 base = XEXP (addr, 0);
4848
4849 /* What we are saying here is that if the base
4850 REG is aligned properly, the compiler will make
4851 sure any REG based index upon it will be so
4852 as well. */
4853 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4854 offset = XEXP (addr, 1);
4855 else
4856 offset = const0_rtx;
4857 }
4858 }
4859 else if (GET_CODE (addr) == REG)
4860 {
4861 base = addr;
4862 offset = const0_rtx;
4863 }
4864
4865 if (base != NULL_RTX)
4866 {
4867 int regno = REGNO (base);
4868
4869 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4870 {
4871 /* Check if the compiler has recorded some information
4872 about the alignment of the base REG. If reload has
4873 completed, we already matched with proper alignments.
4874 If not running global_alloc, reload might give us
4875 unaligned pointer to local stack though. */
4876 if (((cfun != 0
4877 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4878 || (optimize && reload_completed))
4879 && (INTVAL (offset) & (desired - 1)) == 0)
4880 return 1;
4881 }
4882 else
4883 {
4884 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4885 return 1;
4886 }
4887 }
4888 else if (! TARGET_UNALIGNED_DOUBLES
4889 || CONSTANT_P (addr)
4890 || GET_CODE (addr) == LO_SUM)
4891 {
4892 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4893 is true, in which case we can only assume that an access is aligned if
4894 it is to a constant address, or the address involves a LO_SUM. */
4895 return 1;
4896 }
4897
4898 /* An obviously unaligned address. */
4899 return 0;
4900 }
4901
4902 \f
4903 /* Vectors to keep interesting information about registers where it can easily
4904 be got. We used to use the actual mode value as the bit number, but there
4905 are more than 32 modes now. Instead we use two tables: one indexed by
4906 hard register number, and one indexed by mode. */
4907
4908 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4909 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4910 mapped into one sparc_mode_class mode. */
4911
4912 enum sparc_mode_class {
4913 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
4914 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4915 CC_MODE, CCFP_MODE
4916 };
4917
4918 /* Modes for single-word and smaller quantities. */
4919 #define S_MODES \
4920 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
4921
4922 /* Modes for double-word and smaller quantities. */
4923 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
4924
4925 /* Modes for quad-word and smaller quantities. */
4926 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4927
4928 /* Modes for 8-word and smaller quantities. */
4929 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4930
4931 /* Modes for single-float quantities. */
4932 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4933
4934 /* Modes for double-float and smaller quantities. */
4935 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
4936
4937 /* Modes for quad-float and smaller quantities. */
4938 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4939
4940 /* Modes for quad-float pairs and smaller quantities. */
4941 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4942
4943 /* Modes for double-float only quantities. */
4944 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4945
4946 /* Modes for quad-float and double-float only quantities. */
4947 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4948
4949 /* Modes for quad-float pairs and double-float only quantities. */
4950 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4951
4952 /* Modes for condition codes. */
4953 #define CC_MODES (1 << (int) CC_MODE)
4954 #define CCFP_MODES (1 << (int) CCFP_MODE)
4955
4956 /* Value is 1 if register/mode pair is acceptable on sparc.
4957
4958 The funny mixture of D and T modes is because integer operations
4959 do not specially operate on tetra quantities, so non-quad-aligned
4960 registers can hold quadword quantities (except %o4 and %i4 because
4961 they cross fixed registers).
4962
4963 ??? Note that, despite the settings, non-double-aligned parameter
4964 registers can hold double-word quantities in 32-bit mode. */
4965
4966 /* This points to either the 32 bit or the 64 bit version. */
4967 const int *hard_regno_mode_classes;
4968
4969 static const int hard_32bit_mode_classes[] = {
4970 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4971 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4972 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4973 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4974
4975 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4976 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4977 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4978 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4979
4980 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4981 and none can hold SFmode/SImode values. */
4982 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4983 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4984 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4985 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4986
4987 /* %fcc[0123] */
4988 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4989
4990 /* %icc, %sfp, %gsr */
4991 CC_MODES, 0, D_MODES
4992 };
4993
4994 static const int hard_64bit_mode_classes[] = {
4995 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4996 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4997 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4998 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4999
5000 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5001 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5002 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5003 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5004
5005 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5006 and none can hold SFmode/SImode values. */
5007 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5008 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5009 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5010 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5011
5012 /* %fcc[0123] */
5013 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5014
5015 /* %icc, %sfp, %gsr */
5016 CC_MODES, 0, D_MODES
5017 };
5018
5019 int sparc_mode_class [NUM_MACHINE_MODES];
5020
5021 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
5022
5023 static void
5024 sparc_init_modes (void)
5025 {
5026 int i;
5027
5028 for (i = 0; i < NUM_MACHINE_MODES; i++)
5029 {
5030 machine_mode m = (machine_mode) i;
5031 unsigned int size = GET_MODE_SIZE (m);
5032
5033 switch (GET_MODE_CLASS (m))
5034 {
5035 case MODE_INT:
5036 case MODE_PARTIAL_INT:
5037 case MODE_COMPLEX_INT:
5038 if (size < 4)
5039 sparc_mode_class[i] = 1 << (int) H_MODE;
5040 else if (size == 4)
5041 sparc_mode_class[i] = 1 << (int) S_MODE;
5042 else if (size == 8)
5043 sparc_mode_class[i] = 1 << (int) D_MODE;
5044 else if (size == 16)
5045 sparc_mode_class[i] = 1 << (int) T_MODE;
5046 else if (size == 32)
5047 sparc_mode_class[i] = 1 << (int) O_MODE;
5048 else
5049 sparc_mode_class[i] = 0;
5050 break;
5051 case MODE_VECTOR_INT:
5052 if (size == 4)
5053 sparc_mode_class[i] = 1 << (int) SF_MODE;
5054 else if (size == 8)
5055 sparc_mode_class[i] = 1 << (int) DF_MODE;
5056 else
5057 sparc_mode_class[i] = 0;
5058 break;
5059 case MODE_FLOAT:
5060 case MODE_COMPLEX_FLOAT:
5061 if (size == 4)
5062 sparc_mode_class[i] = 1 << (int) SF_MODE;
5063 else if (size == 8)
5064 sparc_mode_class[i] = 1 << (int) DF_MODE;
5065 else if (size == 16)
5066 sparc_mode_class[i] = 1 << (int) TF_MODE;
5067 else if (size == 32)
5068 sparc_mode_class[i] = 1 << (int) OF_MODE;
5069 else
5070 sparc_mode_class[i] = 0;
5071 break;
5072 case MODE_CC:
5073 if (m == CCFPmode || m == CCFPEmode)
5074 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
5075 else
5076 sparc_mode_class[i] = 1 << (int) CC_MODE;
5077 break;
5078 default:
5079 sparc_mode_class[i] = 0;
5080 break;
5081 }
5082 }
5083
5084 if (TARGET_ARCH64)
5085 hard_regno_mode_classes = hard_64bit_mode_classes;
5086 else
5087 hard_regno_mode_classes = hard_32bit_mode_classes;
5088
5089 /* Initialize the array used by REGNO_REG_CLASS. */
5090 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5091 {
5092 if (i < 16 && TARGET_V8PLUS)
5093 sparc_regno_reg_class[i] = I64_REGS;
5094 else if (i < 32 || i == FRAME_POINTER_REGNUM)
5095 sparc_regno_reg_class[i] = GENERAL_REGS;
5096 else if (i < 64)
5097 sparc_regno_reg_class[i] = FP_REGS;
5098 else if (i < 96)
5099 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
5100 else if (i < 100)
5101 sparc_regno_reg_class[i] = FPCC_REGS;
5102 else
5103 sparc_regno_reg_class[i] = NO_REGS;
5104 }
5105 }
5106 \f
5107 /* Return whether REGNO, a global or FP register, must be saved/restored. */
5108
5109 static inline bool
5110 save_global_or_fp_reg_p (unsigned int regno,
5111 int leaf_function ATTRIBUTE_UNUSED)
5112 {
5113 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
5114 }
5115
5116 /* Return whether the return address register (%i7) is needed. */
5117
5118 static inline bool
5119 return_addr_reg_needed_p (int leaf_function)
5120 {
5121 /* If it is live, for example because of __builtin_return_address (0). */
5122 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
5123 return true;
5124
5125 /* Otherwise, it is needed as save register if %o7 is clobbered. */
5126 if (!leaf_function
5127 /* Loading the GOT register clobbers %o7. */
5128 || crtl->uses_pic_offset_table
5129 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
5130 return true;
5131
5132 return false;
5133 }
5134
5135 /* Return whether REGNO, a local or in register, must be saved/restored. */
5136
5137 static bool
5138 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
5139 {
5140 /* General case: call-saved registers live at some point. */
5141 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
5142 return true;
5143
5144 /* Frame pointer register (%fp) if needed. */
5145 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5146 return true;
5147
5148 /* Return address register (%i7) if needed. */
5149 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
5150 return true;
5151
5152 /* GOT register (%l7) if needed. */
5153 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
5154 return true;
5155
5156 /* If the function accesses prior frames, the frame pointer and the return
5157 address of the previous frame must be saved on the stack. */
5158 if (crtl->accesses_prior_frames
5159 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
5160 return true;
5161
5162 return false;
5163 }
5164
5165 /* Compute the frame size required by the function. This function is called
5166 during the reload pass and also by sparc_expand_prologue. */
5167
5168 HOST_WIDE_INT
5169 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
5170 {
5171 HOST_WIDE_INT frame_size, apparent_frame_size;
5172 int args_size, n_global_fp_regs = 0;
5173 bool save_local_in_regs_p = false;
5174 unsigned int i;
5175
5176 /* If the function allocates dynamic stack space, the dynamic offset is
5177 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
5178 if (leaf_function && !cfun->calls_alloca)
5179 args_size = 0;
5180 else
5181 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
5182
5183 /* Calculate space needed for global registers. */
5184 if (TARGET_ARCH64)
5185 {
5186 for (i = 0; i < 8; i++)
5187 if (save_global_or_fp_reg_p (i, 0))
5188 n_global_fp_regs += 2;
5189 }
5190 else
5191 {
5192 for (i = 0; i < 8; i += 2)
5193 if (save_global_or_fp_reg_p (i, 0)
5194 || save_global_or_fp_reg_p (i + 1, 0))
5195 n_global_fp_regs += 2;
5196 }
5197
5198 /* In the flat window model, find out which local and in registers need to
5199 be saved. We don't reserve space in the current frame for them as they
5200 will be spilled into the register window save area of the caller's frame.
5201 However, as soon as we use this register window save area, we must create
5202 that of the current frame to make it the live one. */
5203 if (TARGET_FLAT)
5204 for (i = 16; i < 32; i++)
5205 if (save_local_or_in_reg_p (i, leaf_function))
5206 {
5207 save_local_in_regs_p = true;
5208 break;
5209 }
5210
5211 /* Calculate space needed for FP registers. */
5212 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
5213 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
5214 n_global_fp_regs += 2;
5215
5216 if (size == 0
5217 && n_global_fp_regs == 0
5218 && args_size == 0
5219 && !save_local_in_regs_p)
5220 frame_size = apparent_frame_size = 0;
5221 else
5222 {
5223 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
5224 apparent_frame_size = ROUND_UP (size - STARTING_FRAME_OFFSET, 8);
5225 apparent_frame_size += n_global_fp_regs * 4;
5226
5227 /* We need to add the size of the outgoing argument area. */
5228 frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
5229
5230 /* And that of the register window save area. */
5231 frame_size += FIRST_PARM_OFFSET (cfun->decl);
5232
5233 /* Finally, bump to the appropriate alignment. */
5234 frame_size = SPARC_STACK_ALIGN (frame_size);
5235 }
5236
5237 /* Set up values for use in prologue and epilogue. */
5238 sparc_frame_size = frame_size;
5239 sparc_apparent_frame_size = apparent_frame_size;
5240 sparc_n_global_fp_regs = n_global_fp_regs;
5241 sparc_save_local_in_regs_p = save_local_in_regs_p;
5242
5243 return frame_size;
5244 }
5245
5246 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5247
5248 int
5249 sparc_initial_elimination_offset (int to)
5250 {
5251 int offset;
5252
5253 if (to == STACK_POINTER_REGNUM)
5254 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
5255 else
5256 offset = 0;
5257
5258 offset += SPARC_STACK_BIAS;
5259 return offset;
5260 }
5261
5262 /* Output any necessary .register pseudo-ops. */
5263
5264 void
5265 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
5266 {
5267 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
5268 int i;
5269
5270 if (TARGET_ARCH32)
5271 return;
5272
5273 /* Check if %g[2367] were used without
5274 .register being printed for them already. */
5275 for (i = 2; i < 8; i++)
5276 {
5277 if (df_regs_ever_live_p (i)
5278 && ! sparc_hard_reg_printed [i])
5279 {
5280 sparc_hard_reg_printed [i] = 1;
5281 /* %g7 is used as TLS base register, use #ignore
5282 for it instead of #scratch. */
5283 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
5284 i == 7 ? "ignore" : "scratch");
5285 }
5286 if (i == 3) i = 5;
5287 }
5288 #endif
5289 }
5290
5291 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5292
5293 #if PROBE_INTERVAL > 4096
5294 #error Cannot use indexed addressing mode for stack probing
5295 #endif
5296
5297 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5298 inclusive. These are offsets from the current stack pointer.
5299
5300 Note that we don't use the REG+REG addressing mode for the probes because
5301 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5302 so the advantages of having a single code win here. */
5303
5304 static void
5305 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
5306 {
5307 rtx g1 = gen_rtx_REG (Pmode, 1);
5308
5309 /* See if we have a constant small number of probes to generate. If so,
5310 that's the easy case. */
5311 if (size <= PROBE_INTERVAL)
5312 {
5313 emit_move_insn (g1, GEN_INT (first));
5314 emit_insn (gen_rtx_SET (g1,
5315 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5316 emit_stack_probe (plus_constant (Pmode, g1, -size));
5317 }
5318
5319 /* The run-time loop is made up of 9 insns in the generic case while the
5320 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5321 else if (size <= 4 * PROBE_INTERVAL)
5322 {
5323 HOST_WIDE_INT i;
5324
5325 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
5326 emit_insn (gen_rtx_SET (g1,
5327 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5328 emit_stack_probe (g1);
5329
5330 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5331 it exceeds SIZE. If only two probes are needed, this will not
5332 generate any code. Then probe at FIRST + SIZE. */
5333 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5334 {
5335 emit_insn (gen_rtx_SET (g1,
5336 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5337 emit_stack_probe (g1);
5338 }
5339
5340 emit_stack_probe (plus_constant (Pmode, g1,
5341 (i - PROBE_INTERVAL) - size));
5342 }
5343
5344 /* Otherwise, do the same as above, but in a loop. Note that we must be
5345 extra careful with variables wrapping around because we might be at
5346 the very top (or the very bottom) of the address space and we have
5347 to be able to handle this case properly; in particular, we use an
5348 equality test for the loop condition. */
5349 else
5350 {
5351 HOST_WIDE_INT rounded_size;
5352 rtx g4 = gen_rtx_REG (Pmode, 4);
5353
5354 emit_move_insn (g1, GEN_INT (first));
5355
5356
5357 /* Step 1: round SIZE to the previous multiple of the interval. */
5358
5359 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
5360 emit_move_insn (g4, GEN_INT (rounded_size));
5361
5362
5363 /* Step 2: compute initial and final value of the loop counter. */
5364
5365 /* TEST_ADDR = SP + FIRST. */
5366 emit_insn (gen_rtx_SET (g1,
5367 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5368
5369 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5370 emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
5371
5372
5373 /* Step 3: the loop
5374
5375 while (TEST_ADDR != LAST_ADDR)
5376 {
5377 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5378 probe at TEST_ADDR
5379 }
5380
5381 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5382 until it is equal to ROUNDED_SIZE. */
5383
5384 if (TARGET_ARCH64)
5385 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
5386 else
5387 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
5388
5389
5390 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5391 that SIZE is equal to ROUNDED_SIZE. */
5392
5393 if (size != rounded_size)
5394 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5395 }
5396
5397 /* Make sure nothing is scheduled before we are done. */
5398 emit_insn (gen_blockage ());
5399 }
5400
5401 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5402 absolute addresses. */
5403
5404 const char *
5405 output_probe_stack_range (rtx reg1, rtx reg2)
5406 {
5407 static int labelno = 0;
5408 char loop_lab[32];
5409 rtx xops[2];
5410
5411 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
5412
5413 /* Loop. */
5414 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5415
5416 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5417 xops[0] = reg1;
5418 xops[1] = GEN_INT (-PROBE_INTERVAL);
5419 output_asm_insn ("add\t%0, %1, %0", xops);
5420
5421 /* Test if TEST_ADDR == LAST_ADDR. */
5422 xops[1] = reg2;
5423 output_asm_insn ("cmp\t%0, %1", xops);
5424
5425 /* Probe at TEST_ADDR and branch. */
5426 if (TARGET_ARCH64)
5427 fputs ("\tbne,pt\t%xcc,", asm_out_file);
5428 else
5429 fputs ("\tbne\t", asm_out_file);
5430 assemble_name_raw (asm_out_file, loop_lab);
5431 fputc ('\n', asm_out_file);
5432 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5433 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5434
5435 return "";
5436 }
5437
5438 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5439 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5440 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5441 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5442 the action to be performed if it returns false. Return the new offset. */
5443
5444 typedef bool (*sorr_pred_t) (unsigned int, int);
5445 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5446
5447 static int
5448 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5449 int offset, int leaf_function, sorr_pred_t save_p,
5450 sorr_act_t action_true, sorr_act_t action_false)
5451 {
5452 unsigned int i;
5453 rtx mem;
5454 rtx_insn *insn;
5455
5456 if (TARGET_ARCH64 && high <= 32)
5457 {
5458 int fp_offset = -1;
5459
5460 for (i = low; i < high; i++)
5461 {
5462 if (save_p (i, leaf_function))
5463 {
5464 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5465 base, offset));
5466 if (action_true == SORR_SAVE)
5467 {
5468 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5469 RTX_FRAME_RELATED_P (insn) = 1;
5470 }
5471 else /* action_true == SORR_RESTORE */
5472 {
5473 /* The frame pointer must be restored last since its old
5474 value may be used as base address for the frame. This
5475 is problematic in 64-bit mode only because of the lack
5476 of double-word load instruction. */
5477 if (i == HARD_FRAME_POINTER_REGNUM)
5478 fp_offset = offset;
5479 else
5480 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5481 }
5482 offset += 8;
5483 }
5484 else if (action_false == SORR_ADVANCE)
5485 offset += 8;
5486 }
5487
5488 if (fp_offset >= 0)
5489 {
5490 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5491 emit_move_insn (hard_frame_pointer_rtx, mem);
5492 }
5493 }
5494 else
5495 {
5496 for (i = low; i < high; i += 2)
5497 {
5498 bool reg0 = save_p (i, leaf_function);
5499 bool reg1 = save_p (i + 1, leaf_function);
5500 machine_mode mode;
5501 int regno;
5502
5503 if (reg0 && reg1)
5504 {
5505 mode = SPARC_INT_REG_P (i) ? DImode : DFmode;
5506 regno = i;
5507 }
5508 else if (reg0)
5509 {
5510 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
5511 regno = i;
5512 }
5513 else if (reg1)
5514 {
5515 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
5516 regno = i + 1;
5517 offset += 4;
5518 }
5519 else
5520 {
5521 if (action_false == SORR_ADVANCE)
5522 offset += 8;
5523 continue;
5524 }
5525
5526 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5527 if (action_true == SORR_SAVE)
5528 {
5529 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5530 RTX_FRAME_RELATED_P (insn) = 1;
5531 if (mode == DImode)
5532 {
5533 rtx set1, set2;
5534 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5535 offset));
5536 set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
5537 RTX_FRAME_RELATED_P (set1) = 1;
5538 mem
5539 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5540 offset + 4));
5541 set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
5542 RTX_FRAME_RELATED_P (set2) = 1;
5543 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5544 gen_rtx_PARALLEL (VOIDmode,
5545 gen_rtvec (2, set1, set2)));
5546 }
5547 }
5548 else /* action_true == SORR_RESTORE */
5549 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5550
5551 /* Bump and round down to double word
5552 in case we already bumped by 4. */
5553 offset = ROUND_DOWN (offset + 8, 8);
5554 }
5555 }
5556
5557 return offset;
5558 }
5559
5560 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5561
5562 static rtx
5563 emit_adjust_base_to_offset (rtx base, int offset)
5564 {
5565 /* ??? This might be optimized a little as %g1 might already have a
5566 value close enough that a single add insn will do. */
5567 /* ??? Although, all of this is probably only a temporary fix because
5568 if %g1 can hold a function result, then sparc_expand_epilogue will
5569 lose (the result will be clobbered). */
5570 rtx new_base = gen_rtx_REG (Pmode, 1);
5571 emit_move_insn (new_base, GEN_INT (offset));
5572 emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5573 return new_base;
5574 }
5575
5576 /* Emit code to save/restore call-saved global and FP registers. */
5577
5578 static void
5579 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5580 {
5581 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5582 {
5583 base = emit_adjust_base_to_offset (base, offset);
5584 offset = 0;
5585 }
5586
5587 offset
5588 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5589 save_global_or_fp_reg_p, action, SORR_NONE);
5590 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5591 save_global_or_fp_reg_p, action, SORR_NONE);
5592 }
5593
5594 /* Emit code to save/restore call-saved local and in registers. */
5595
5596 static void
5597 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5598 {
5599 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5600 {
5601 base = emit_adjust_base_to_offset (base, offset);
5602 offset = 0;
5603 }
5604
5605 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5606 save_local_or_in_reg_p, action, SORR_ADVANCE);
5607 }
5608
5609 /* Emit a window_save insn. */
5610
5611 static rtx_insn *
5612 emit_window_save (rtx increment)
5613 {
5614 rtx_insn *insn = emit_insn (gen_window_save (increment));
5615 RTX_FRAME_RELATED_P (insn) = 1;
5616
5617 /* The incoming return address (%o7) is saved in %i7. */
5618 add_reg_note (insn, REG_CFA_REGISTER,
5619 gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5620 gen_rtx_REG (Pmode,
5621 INCOMING_RETURN_ADDR_REGNUM)));
5622
5623 /* The window save event. */
5624 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5625
5626 /* The CFA is %fp, the hard frame pointer. */
5627 add_reg_note (insn, REG_CFA_DEF_CFA,
5628 plus_constant (Pmode, hard_frame_pointer_rtx,
5629 INCOMING_FRAME_SP_OFFSET));
5630
5631 return insn;
5632 }
5633
5634 /* Generate an increment for the stack pointer. */
5635
5636 static rtx
5637 gen_stack_pointer_inc (rtx increment)
5638 {
5639 return gen_rtx_SET (stack_pointer_rtx,
5640 gen_rtx_PLUS (Pmode,
5641 stack_pointer_rtx,
5642 increment));
5643 }
5644
5645 /* Expand the function prologue. The prologue is responsible for reserving
5646 storage for the frame, saving the call-saved registers and loading the
5647 GOT register if needed. */
5648
5649 void
5650 sparc_expand_prologue (void)
5651 {
5652 HOST_WIDE_INT size;
5653 rtx_insn *insn;
5654
5655 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5656 on the final value of the flag means deferring the prologue/epilogue
5657 expansion until just before the second scheduling pass, which is too
5658 late to emit multiple epilogues or return insns.
5659
5660 Of course we are making the assumption that the value of the flag
5661 will not change between now and its final value. Of the three parts
5662 of the formula, only the last one can reasonably vary. Let's take a
5663 closer look, after assuming that the first two ones are set to true
5664 (otherwise the last value is effectively silenced).
5665
5666 If only_leaf_regs_used returns false, the global predicate will also
5667 be false so the actual frame size calculated below will be positive.
5668 As a consequence, the save_register_window insn will be emitted in
5669 the instruction stream; now this insn explicitly references %fp
5670 which is not a leaf register so only_leaf_regs_used will always
5671 return false subsequently.
5672
5673 If only_leaf_regs_used returns true, we hope that the subsequent
5674 optimization passes won't cause non-leaf registers to pop up. For
5675 example, the regrename pass has special provisions to not rename to
5676 non-leaf registers in a leaf function. */
5677 sparc_leaf_function_p
5678 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
5679
5680 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5681
5682 if (flag_stack_usage_info)
5683 current_function_static_stack_size = size;
5684
5685 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5686 {
5687 if (crtl->is_leaf && !cfun->calls_alloca)
5688 {
5689 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
5690 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
5691 size - STACK_CHECK_PROTECT);
5692 }
5693 else if (size > 0)
5694 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5695 }
5696
5697 if (size == 0)
5698 ; /* do nothing. */
5699 else if (sparc_leaf_function_p)
5700 {
5701 rtx size_int_rtx = GEN_INT (-size);
5702
5703 if (size <= 4096)
5704 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5705 else if (size <= 8192)
5706 {
5707 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5708 RTX_FRAME_RELATED_P (insn) = 1;
5709
5710 /* %sp is still the CFA register. */
5711 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5712 }
5713 else
5714 {
5715 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5716 emit_move_insn (size_rtx, size_int_rtx);
5717 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5718 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5719 gen_stack_pointer_inc (size_int_rtx));
5720 }
5721
5722 RTX_FRAME_RELATED_P (insn) = 1;
5723 }
5724 else
5725 {
5726 rtx size_int_rtx = GEN_INT (-size);
5727
5728 if (size <= 4096)
5729 emit_window_save (size_int_rtx);
5730 else if (size <= 8192)
5731 {
5732 emit_window_save (GEN_INT (-4096));
5733
5734 /* %sp is not the CFA register anymore. */
5735 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5736
5737 /* Make sure no %fp-based store is issued until after the frame is
5738 established. The offset between the frame pointer and the stack
5739 pointer is calculated relative to the value of the stack pointer
5740 at the end of the function prologue, and moving instructions that
5741 access the stack via the frame pointer between the instructions
5742 that decrement the stack pointer could result in accessing the
5743 register window save area, which is volatile. */
5744 emit_insn (gen_frame_blockage ());
5745 }
5746 else
5747 {
5748 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5749 emit_move_insn (size_rtx, size_int_rtx);
5750 emit_window_save (size_rtx);
5751 }
5752 }
5753
5754 if (sparc_leaf_function_p)
5755 {
5756 sparc_frame_base_reg = stack_pointer_rtx;
5757 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5758 }
5759 else
5760 {
5761 sparc_frame_base_reg = hard_frame_pointer_rtx;
5762 sparc_frame_base_offset = SPARC_STACK_BIAS;
5763 }
5764
5765 if (sparc_n_global_fp_regs > 0)
5766 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5767 sparc_frame_base_offset
5768 - sparc_apparent_frame_size,
5769 SORR_SAVE);
5770
5771 /* Load the GOT register if needed. */
5772 if (crtl->uses_pic_offset_table)
5773 load_got_register ();
5774
5775 /* Advertise that the data calculated just above are now valid. */
5776 sparc_prologue_data_valid_p = true;
5777 }
5778
5779 /* Expand the function prologue. The prologue is responsible for reserving
5780 storage for the frame, saving the call-saved registers and loading the
5781 GOT register if needed. */
5782
5783 void
5784 sparc_flat_expand_prologue (void)
5785 {
5786 HOST_WIDE_INT size;
5787 rtx_insn *insn;
5788
5789 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
5790
5791 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5792
5793 if (flag_stack_usage_info)
5794 current_function_static_stack_size = size;
5795
5796 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5797 {
5798 if (crtl->is_leaf && !cfun->calls_alloca)
5799 {
5800 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
5801 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
5802 size - STACK_CHECK_PROTECT);
5803 }
5804 else if (size > 0)
5805 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5806 }
5807
5808 if (sparc_save_local_in_regs_p)
5809 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
5810 SORR_SAVE);
5811
5812 if (size == 0)
5813 ; /* do nothing. */
5814 else
5815 {
5816 rtx size_int_rtx, size_rtx;
5817
5818 size_rtx = size_int_rtx = GEN_INT (-size);
5819
5820 /* We establish the frame (i.e. decrement the stack pointer) first, even
5821 if we use a frame pointer, because we cannot clobber any call-saved
5822 registers, including the frame pointer, if we haven't created a new
5823 register save area, for the sake of compatibility with the ABI. */
5824 if (size <= 4096)
5825 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5826 else if (size <= 8192 && !frame_pointer_needed)
5827 {
5828 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5829 RTX_FRAME_RELATED_P (insn) = 1;
5830 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5831 }
5832 else
5833 {
5834 size_rtx = gen_rtx_REG (Pmode, 1);
5835 emit_move_insn (size_rtx, size_int_rtx);
5836 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5837 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5838 gen_stack_pointer_inc (size_int_rtx));
5839 }
5840 RTX_FRAME_RELATED_P (insn) = 1;
5841
5842 /* Ensure nothing is scheduled until after the frame is established. */
5843 emit_insn (gen_blockage ());
5844
5845 if (frame_pointer_needed)
5846 {
5847 insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
5848 gen_rtx_MINUS (Pmode,
5849 stack_pointer_rtx,
5850 size_rtx)));
5851 RTX_FRAME_RELATED_P (insn) = 1;
5852
5853 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5854 gen_rtx_SET (hard_frame_pointer_rtx,
5855 plus_constant (Pmode, stack_pointer_rtx,
5856 size)));
5857 }
5858
5859 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5860 {
5861 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
5862 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
5863
5864 insn = emit_move_insn (i7, o7);
5865 RTX_FRAME_RELATED_P (insn) = 1;
5866
5867 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
5868
5869 /* Prevent this instruction from ever being considered dead,
5870 even if this function has no epilogue. */
5871 emit_use (i7);
5872 }
5873 }
5874
5875 if (frame_pointer_needed)
5876 {
5877 sparc_frame_base_reg = hard_frame_pointer_rtx;
5878 sparc_frame_base_offset = SPARC_STACK_BIAS;
5879 }
5880 else
5881 {
5882 sparc_frame_base_reg = stack_pointer_rtx;
5883 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5884 }
5885
5886 if (sparc_n_global_fp_regs > 0)
5887 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5888 sparc_frame_base_offset
5889 - sparc_apparent_frame_size,
5890 SORR_SAVE);
5891
5892 /* Load the GOT register if needed. */
5893 if (crtl->uses_pic_offset_table)
5894 load_got_register ();
5895
5896 /* Advertise that the data calculated just above are now valid. */
5897 sparc_prologue_data_valid_p = true;
5898 }
5899
5900 /* This function generates the assembly code for function entry, which boils
5901 down to emitting the necessary .register directives. */
5902
5903 static void
5904 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5905 {
5906 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5907 if (!TARGET_FLAT)
5908 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
5909
5910 sparc_output_scratch_registers (file);
5911 }
5912
5913 /* Expand the function epilogue, either normal or part of a sibcall.
5914 We emit all the instructions except the return or the call. */
5915
5916 void
5917 sparc_expand_epilogue (bool for_eh)
5918 {
5919 HOST_WIDE_INT size = sparc_frame_size;
5920
5921 if (cfun->calls_alloca)
5922 emit_insn (gen_frame_blockage ());
5923
5924 if (sparc_n_global_fp_regs > 0)
5925 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5926 sparc_frame_base_offset
5927 - sparc_apparent_frame_size,
5928 SORR_RESTORE);
5929
5930 if (size == 0 || for_eh)
5931 ; /* do nothing. */
5932 else if (sparc_leaf_function_p)
5933 {
5934 if (size <= 4096)
5935 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
5936 else if (size <= 8192)
5937 {
5938 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
5939 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
5940 }
5941 else
5942 {
5943 rtx reg = gen_rtx_REG (Pmode, 1);
5944 emit_move_insn (reg, GEN_INT (size));
5945 emit_insn (gen_stack_pointer_inc (reg));
5946 }
5947 }
5948 }
5949
5950 /* Expand the function epilogue, either normal or part of a sibcall.
5951 We emit all the instructions except the return or the call. */
5952
5953 void
5954 sparc_flat_expand_epilogue (bool for_eh)
5955 {
5956 HOST_WIDE_INT size = sparc_frame_size;
5957
5958 if (sparc_n_global_fp_regs > 0)
5959 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5960 sparc_frame_base_offset
5961 - sparc_apparent_frame_size,
5962 SORR_RESTORE);
5963
5964 /* If we have a frame pointer, we'll need both to restore it before the
5965 frame is destroyed and use its current value in destroying the frame.
5966 Since we don't have an atomic way to do that in the flat window model,
5967 we save the current value into a temporary register (%g1). */
5968 if (frame_pointer_needed && !for_eh)
5969 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5970
5971 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5972 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5973 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5974
5975 if (sparc_save_local_in_regs_p)
5976 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5977 sparc_frame_base_offset,
5978 SORR_RESTORE);
5979
5980 if (size == 0 || for_eh)
5981 ; /* do nothing. */
5982 else if (frame_pointer_needed)
5983 {
5984 /* Make sure the frame is destroyed after everything else is done. */
5985 emit_insn (gen_blockage ());
5986
5987 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5988 }
5989 else
5990 {
5991 /* Likewise. */
5992 emit_insn (gen_blockage ());
5993
5994 if (size <= 4096)
5995 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
5996 else if (size <= 8192)
5997 {
5998 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
5999 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6000 }
6001 else
6002 {
6003 rtx reg = gen_rtx_REG (Pmode, 1);
6004 emit_move_insn (reg, GEN_INT (size));
6005 emit_insn (gen_stack_pointer_inc (reg));
6006 }
6007 }
6008 }
6009
6010 /* Return true if it is appropriate to emit `return' instructions in the
6011 body of a function. */
6012
6013 bool
6014 sparc_can_use_return_insn_p (void)
6015 {
6016 return sparc_prologue_data_valid_p
6017 && sparc_n_global_fp_regs == 0
6018 && TARGET_FLAT
6019 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
6020 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
6021 }
6022
6023 /* This function generates the assembly code for function exit. */
6024
6025 static void
6026 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6027 {
6028 /* If the last two instructions of a function are "call foo; dslot;"
6029 the return address might point to the first instruction in the next
6030 function and we have to output a dummy nop for the sake of sane
6031 backtraces in such cases. This is pointless for sibling calls since
6032 the return address is explicitly adjusted. */
6033
6034 rtx_insn *insn = get_last_insn ();
6035
6036 rtx last_real_insn = prev_real_insn (insn);
6037 if (last_real_insn
6038 && NONJUMP_INSN_P (last_real_insn)
6039 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
6040 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
6041
6042 if (last_real_insn
6043 && CALL_P (last_real_insn)
6044 && !SIBLING_CALL_P (last_real_insn))
6045 fputs("\tnop\n", file);
6046
6047 sparc_output_deferred_case_vectors ();
6048 }
6049
6050 /* Output a 'restore' instruction. */
6051
6052 static void
6053 output_restore (rtx pat)
6054 {
6055 rtx operands[3];
6056
6057 if (! pat)
6058 {
6059 fputs ("\t restore\n", asm_out_file);
6060 return;
6061 }
6062
6063 gcc_assert (GET_CODE (pat) == SET);
6064
6065 operands[0] = SET_DEST (pat);
6066 pat = SET_SRC (pat);
6067
6068 switch (GET_CODE (pat))
6069 {
6070 case PLUS:
6071 operands[1] = XEXP (pat, 0);
6072 operands[2] = XEXP (pat, 1);
6073 output_asm_insn (" restore %r1, %2, %Y0", operands);
6074 break;
6075 case LO_SUM:
6076 operands[1] = XEXP (pat, 0);
6077 operands[2] = XEXP (pat, 1);
6078 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
6079 break;
6080 case ASHIFT:
6081 operands[1] = XEXP (pat, 0);
6082 gcc_assert (XEXP (pat, 1) == const1_rtx);
6083 output_asm_insn (" restore %r1, %r1, %Y0", operands);
6084 break;
6085 default:
6086 operands[1] = pat;
6087 output_asm_insn (" restore %%g0, %1, %Y0", operands);
6088 break;
6089 }
6090 }
6091
6092 /* Output a return. */
6093
6094 const char *
6095 output_return (rtx_insn *insn)
6096 {
6097 if (crtl->calls_eh_return)
6098 {
6099 /* If the function uses __builtin_eh_return, the eh_return
6100 machinery occupies the delay slot. */
6101 gcc_assert (!final_sequence);
6102
6103 if (flag_delayed_branch)
6104 {
6105 if (!TARGET_FLAT && TARGET_V9)
6106 fputs ("\treturn\t%i7+8\n", asm_out_file);
6107 else
6108 {
6109 if (!TARGET_FLAT)
6110 fputs ("\trestore\n", asm_out_file);
6111
6112 fputs ("\tjmp\t%o7+8\n", asm_out_file);
6113 }
6114
6115 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
6116 }
6117 else
6118 {
6119 if (!TARGET_FLAT)
6120 fputs ("\trestore\n", asm_out_file);
6121
6122 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
6123 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
6124 }
6125 }
6126 else if (sparc_leaf_function_p || TARGET_FLAT)
6127 {
6128 /* This is a leaf or flat function so we don't have to bother restoring
6129 the register window, which frees us from dealing with the convoluted
6130 semantics of restore/return. We simply output the jump to the
6131 return address and the insn in the delay slot (if any). */
6132
6133 return "jmp\t%%o7+%)%#";
6134 }
6135 else
6136 {
6137 /* This is a regular function so we have to restore the register window.
6138 We may have a pending insn for the delay slot, which will be either
6139 combined with the 'restore' instruction or put in the delay slot of
6140 the 'return' instruction. */
6141
6142 if (final_sequence)
6143 {
6144 rtx delay, pat;
6145
6146 delay = NEXT_INSN (insn);
6147 gcc_assert (delay);
6148
6149 pat = PATTERN (delay);
6150
6151 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
6152 {
6153 epilogue_renumber (&pat, 0);
6154 return "return\t%%i7+%)%#";
6155 }
6156 else
6157 {
6158 output_asm_insn ("jmp\t%%i7+%)", NULL);
6159 output_restore (pat);
6160 PATTERN (delay) = gen_blockage ();
6161 INSN_CODE (delay) = -1;
6162 }
6163 }
6164 else
6165 {
6166 /* The delay slot is empty. */
6167 if (TARGET_V9)
6168 return "return\t%%i7+%)\n\t nop";
6169 else if (flag_delayed_branch)
6170 return "jmp\t%%i7+%)\n\t restore";
6171 else
6172 return "restore\n\tjmp\t%%o7+%)\n\t nop";
6173 }
6174 }
6175
6176 return "";
6177 }
6178
6179 /* Output a sibling call. */
6180
6181 const char *
6182 output_sibcall (rtx_insn *insn, rtx call_operand)
6183 {
6184 rtx operands[1];
6185
6186 gcc_assert (flag_delayed_branch);
6187
6188 operands[0] = call_operand;
6189
6190 if (sparc_leaf_function_p || TARGET_FLAT)
6191 {
6192 /* This is a leaf or flat function so we don't have to bother restoring
6193 the register window. We simply output the jump to the function and
6194 the insn in the delay slot (if any). */
6195
6196 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
6197
6198 if (final_sequence)
6199 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
6200 operands);
6201 else
6202 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6203 it into branch if possible. */
6204 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6205 operands);
6206 }
6207 else
6208 {
6209 /* This is a regular function so we have to restore the register window.
6210 We may have a pending insn for the delay slot, which will be combined
6211 with the 'restore' instruction. */
6212
6213 output_asm_insn ("call\t%a0, 0", operands);
6214
6215 if (final_sequence)
6216 {
6217 rtx_insn *delay = NEXT_INSN (insn);
6218 gcc_assert (delay);
6219
6220 output_restore (PATTERN (delay));
6221
6222 PATTERN (delay) = gen_blockage ();
6223 INSN_CODE (delay) = -1;
6224 }
6225 else
6226 output_restore (NULL_RTX);
6227 }
6228
6229 return "";
6230 }
6231 \f
6232 /* Functions for handling argument passing.
6233
6234 For 32-bit, the first 6 args are normally in registers and the rest are
6235 pushed. Any arg that starts within the first 6 words is at least
6236 partially passed in a register unless its data type forbids.
6237
6238 For 64-bit, the argument registers are laid out as an array of 16 elements
6239 and arguments are added sequentially. The first 6 int args and up to the
6240 first 16 fp args (depending on size) are passed in regs.
6241
6242 Slot Stack Integral Float Float in structure Double Long Double
6243 ---- ----- -------- ----- ------------------ ------ -----------
6244 15 [SP+248] %f31 %f30,%f31 %d30
6245 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6246 13 [SP+232] %f27 %f26,%f27 %d26
6247 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6248 11 [SP+216] %f23 %f22,%f23 %d22
6249 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6250 9 [SP+200] %f19 %f18,%f19 %d18
6251 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6252 7 [SP+184] %f15 %f14,%f15 %d14
6253 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6254 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6255 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6256 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6257 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6258 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6259 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6260
6261 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6262
6263 Integral arguments are always passed as 64-bit quantities appropriately
6264 extended.
6265
6266 Passing of floating point values is handled as follows.
6267 If a prototype is in scope:
6268 If the value is in a named argument (i.e. not a stdarg function or a
6269 value not part of the `...') then the value is passed in the appropriate
6270 fp reg.
6271 If the value is part of the `...' and is passed in one of the first 6
6272 slots then the value is passed in the appropriate int reg.
6273 If the value is part of the `...' and is not passed in one of the first 6
6274 slots then the value is passed in memory.
6275 If a prototype is not in scope:
6276 If the value is one of the first 6 arguments the value is passed in the
6277 appropriate integer reg and the appropriate fp reg.
6278 If the value is not one of the first 6 arguments the value is passed in
6279 the appropriate fp reg and in memory.
6280
6281
6282 Summary of the calling conventions implemented by GCC on the SPARC:
6283
6284 32-bit ABI:
6285 size argument return value
6286
6287 small integer <4 int. reg. int. reg.
6288 word 4 int. reg. int. reg.
6289 double word 8 int. reg. int. reg.
6290
6291 _Complex small integer <8 int. reg. int. reg.
6292 _Complex word 8 int. reg. int. reg.
6293 _Complex double word 16 memory int. reg.
6294
6295 vector integer <=8 int. reg. FP reg.
6296 vector integer >8 memory memory
6297
6298 float 4 int. reg. FP reg.
6299 double 8 int. reg. FP reg.
6300 long double 16 memory memory
6301
6302 _Complex float 8 memory FP reg.
6303 _Complex double 16 memory FP reg.
6304 _Complex long double 32 memory FP reg.
6305
6306 vector float any memory memory
6307
6308 aggregate any memory memory
6309
6310
6311
6312 64-bit ABI:
6313 size argument return value
6314
6315 small integer <8 int. reg. int. reg.
6316 word 8 int. reg. int. reg.
6317 double word 16 int. reg. int. reg.
6318
6319 _Complex small integer <16 int. reg. int. reg.
6320 _Complex word 16 int. reg. int. reg.
6321 _Complex double word 32 memory int. reg.
6322
6323 vector integer <=16 FP reg. FP reg.
6324 vector integer 16<s<=32 memory FP reg.
6325 vector integer >32 memory memory
6326
6327 float 4 FP reg. FP reg.
6328 double 8 FP reg. FP reg.
6329 long double 16 FP reg. FP reg.
6330
6331 _Complex float 8 FP reg. FP reg.
6332 _Complex double 16 FP reg. FP reg.
6333 _Complex long double 32 memory FP reg.
6334
6335 vector float <=16 FP reg. FP reg.
6336 vector float 16<s<=32 memory FP reg.
6337 vector float >32 memory memory
6338
6339 aggregate <=16 reg. reg.
6340 aggregate 16<s<=32 memory reg.
6341 aggregate >32 memory memory
6342
6343
6344
6345 Note #1: complex floating-point types follow the extended SPARC ABIs as
6346 implemented by the Sun compiler.
6347
6348 Note #2: integral vector types follow the scalar floating-point types
6349 conventions to match what is implemented by the Sun VIS SDK.
6350
6351 Note #3: floating-point vector types follow the aggregate types
6352 conventions. */
6353
6354
6355 /* Maximum number of int regs for args. */
6356 #define SPARC_INT_ARG_MAX 6
6357 /* Maximum number of fp regs for args. */
6358 #define SPARC_FP_ARG_MAX 16
6359 /* Number of words (partially) occupied for a given size in units. */
6360 #define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
6361
6362 /* Handle the INIT_CUMULATIVE_ARGS macro.
6363 Initialize a variable CUM of type CUMULATIVE_ARGS
6364 for a call to a function whose data type is FNTYPE.
6365 For a library call, FNTYPE is 0. */
6366
6367 void
6368 init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
6369 {
6370 cum->words = 0;
6371 cum->prototype_p = fntype && prototype_p (fntype);
6372 cum->libcall_p = !fntype;
6373 }
6374
6375 /* Handle promotion of pointer and integer arguments. */
6376
6377 static machine_mode
6378 sparc_promote_function_mode (const_tree type, machine_mode mode,
6379 int *punsignedp, const_tree, int)
6380 {
6381 if (type && POINTER_TYPE_P (type))
6382 {
6383 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6384 return Pmode;
6385 }
6386
6387 /* Integral arguments are passed as full words, as per the ABI. */
6388 if (GET_MODE_CLASS (mode) == MODE_INT
6389 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6390 return word_mode;
6391
6392 return mode;
6393 }
6394
6395 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6396
6397 static bool
6398 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6399 {
6400 return TARGET_ARCH64 ? true : false;
6401 }
6402
6403 /* Traverse the record TYPE recursively and call FUNC on its fields.
6404 NAMED is true if this is for a named parameter. DATA is passed
6405 to FUNC for each field. OFFSET is the starting position and
6406 PACKED is true if we are inside a packed record. */
6407
6408 template <typename T, void Func (const_tree, HOST_WIDE_INT, bool, T*)>
6409 static void
6410 traverse_record_type (const_tree type, bool named, T *data,
6411 HOST_WIDE_INT offset = 0, bool packed = false)
6412 {
6413 /* The ABI obviously doesn't specify how packed structures are passed.
6414 These are passed in integer regs if possible, otherwise memory. */
6415 if (!packed)
6416 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6417 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6418 {
6419 packed = true;
6420 break;
6421 }
6422
6423 /* Walk the real fields, but skip those with no size or a zero size.
6424 ??? Fields with variable offset are handled as having zero offset. */
6425 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6426 if (TREE_CODE (field) == FIELD_DECL)
6427 {
6428 if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
6429 continue;
6430
6431 HOST_WIDE_INT bitpos = offset;
6432 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
6433 bitpos += int_bit_position (field);
6434
6435 tree field_type = TREE_TYPE (field);
6436 if (TREE_CODE (field_type) == RECORD_TYPE)
6437 traverse_record_type<T, Func> (field_type, named, data, bitpos,
6438 packed);
6439 else
6440 {
6441 const bool fp_type
6442 = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
6443 Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
6444 data);
6445 }
6446 }
6447 }
6448
6449 /* Handle recursive register classifying for structure layout. */
6450
6451 typedef struct
6452 {
6453 bool fp_regs; /* true if field eligible to FP registers. */
6454 bool fp_regs_in_first_word; /* true if such field in first word. */
6455 } classify_data_t;
6456
6457 /* A subroutine of function_arg_slotno. Classify the field. */
6458
6459 inline void
6460 classify_registers (const_tree, HOST_WIDE_INT bitpos, bool fp,
6461 classify_data_t *data)
6462 {
6463 if (fp)
6464 {
6465 data->fp_regs = true;
6466 if (bitpos < BITS_PER_WORD)
6467 data->fp_regs_in_first_word = true;
6468 }
6469 }
6470
6471 /* Compute the slot number to pass an argument in.
6472 Return the slot number or -1 if passing on the stack.
6473
6474 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6475 the preceding args and about the function being called.
6476 MODE is the argument's machine mode.
6477 TYPE is the data type of the argument (as a tree).
6478 This is null for libcalls where that information may
6479 not be available.
6480 NAMED is nonzero if this argument is a named parameter
6481 (otherwise it is an extra parameter matching an ellipsis).
6482 INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6483 *PREGNO records the register number to use if scalar type.
6484 *PPADDING records the amount of padding needed in words. */
6485
6486 static int
6487 function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
6488 const_tree type, bool named, bool incoming,
6489 int *pregno, int *ppadding)
6490 {
6491 int regbase = (incoming
6492 ? SPARC_INCOMING_INT_ARG_FIRST
6493 : SPARC_OUTGOING_INT_ARG_FIRST);
6494 int slotno = cum->words;
6495 enum mode_class mclass;
6496 int regno;
6497
6498 *ppadding = 0;
6499
6500 if (type && TREE_ADDRESSABLE (type))
6501 return -1;
6502
6503 if (TARGET_ARCH32
6504 && mode == BLKmode
6505 && type
6506 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
6507 return -1;
6508
6509 /* For SPARC64, objects requiring 16-byte alignment get it. */
6510 if (TARGET_ARCH64
6511 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6512 && (slotno & 1) != 0)
6513 slotno++, *ppadding = 1;
6514
6515 mclass = GET_MODE_CLASS (mode);
6516 if (type && TREE_CODE (type) == VECTOR_TYPE)
6517 {
6518 /* Vector types deserve special treatment because they are
6519 polymorphic wrt their mode, depending upon whether VIS
6520 instructions are enabled. */
6521 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6522 {
6523 /* The SPARC port defines no floating-point vector modes. */
6524 gcc_assert (mode == BLKmode);
6525 }
6526 else
6527 {
6528 /* Integral vector types should either have a vector
6529 mode or an integral mode, because we are guaranteed
6530 by pass_by_reference that their size is not greater
6531 than 16 bytes and TImode is 16-byte wide. */
6532 gcc_assert (mode != BLKmode);
6533
6534 /* Vector integers are handled like floats according to
6535 the Sun VIS SDK. */
6536 mclass = MODE_FLOAT;
6537 }
6538 }
6539
6540 switch (mclass)
6541 {
6542 case MODE_FLOAT:
6543 case MODE_COMPLEX_FLOAT:
6544 case MODE_VECTOR_INT:
6545 if (TARGET_ARCH64 && TARGET_FPU && named)
6546 {
6547 /* If all arg slots are filled, then must pass on stack. */
6548 if (slotno >= SPARC_FP_ARG_MAX)
6549 return -1;
6550
6551 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6552 /* Arguments filling only one single FP register are
6553 right-justified in the outer double FP register. */
6554 if (GET_MODE_SIZE (mode) <= 4)
6555 regno++;
6556 break;
6557 }
6558 /* fallthrough */
6559
6560 case MODE_INT:
6561 case MODE_COMPLEX_INT:
6562 /* If all arg slots are filled, then must pass on stack. */
6563 if (slotno >= SPARC_INT_ARG_MAX)
6564 return -1;
6565
6566 regno = regbase + slotno;
6567 break;
6568
6569 case MODE_RANDOM:
6570 if (mode == VOIDmode)
6571 /* MODE is VOIDmode when generating the actual call. */
6572 return -1;
6573
6574 gcc_assert (mode == BLKmode);
6575
6576 if (TARGET_ARCH32
6577 || !type
6578 || (TREE_CODE (type) != RECORD_TYPE
6579 && TREE_CODE (type) != VECTOR_TYPE))
6580 {
6581 /* If all arg slots are filled, then must pass on stack. */
6582 if (slotno >= SPARC_INT_ARG_MAX)
6583 return -1;
6584
6585 regno = regbase + slotno;
6586 }
6587 else /* TARGET_ARCH64 && type */
6588 {
6589 /* If all arg slots are filled, then must pass on stack. */
6590 if (slotno >= SPARC_FP_ARG_MAX)
6591 return -1;
6592
6593 if (TREE_CODE (type) == RECORD_TYPE)
6594 {
6595 classify_data_t data = { false, false };
6596 traverse_record_type<classify_data_t, classify_registers>
6597 (type, named, &data);
6598
6599 if (data.fp_regs)
6600 {
6601 /* If all FP slots are filled except for the last one and
6602 there is no FP field in the first word, then must pass
6603 on stack. */
6604 if (slotno >= SPARC_FP_ARG_MAX - 1
6605 && !data.fp_regs_in_first_word)
6606 return -1;
6607 }
6608 else
6609 {
6610 /* If all int slots are filled, then must pass on stack. */
6611 if (slotno >= SPARC_INT_ARG_MAX)
6612 return -1;
6613 }
6614 }
6615
6616 /* PREGNO isn't set since both int and FP regs can be used. */
6617 return slotno;
6618 }
6619 break;
6620
6621 default :
6622 gcc_unreachable ();
6623 }
6624
6625 *pregno = regno;
6626 return slotno;
6627 }
6628
6629 /* Handle recursive register counting/assigning for structure layout. */
6630
6631 typedef struct
6632 {
6633 int slotno; /* slot number of the argument. */
6634 int regbase; /* regno of the base register. */
6635 int intoffset; /* offset of the first pending integer field. */
6636 int nregs; /* number of words passed in registers. */
6637 bool stack; /* true if part of the argument is on the stack. */
6638 rtx ret; /* return expression being built. */
6639 } assign_data_t;
6640
6641 /* A subroutine of function_arg_record_value. Compute the number of integer
6642 registers to be assigned between PARMS->intoffset and BITPOS. Return
6643 true if at least one integer register is assigned or false otherwise. */
6644
6645 static bool
6646 compute_int_layout (HOST_WIDE_INT bitpos, assign_data_t *data, int *pnregs)
6647 {
6648 if (data->intoffset < 0)
6649 return false;
6650
6651 const int intoffset = data->intoffset;
6652 data->intoffset = -1;
6653
6654 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
6655 const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
6656 const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
6657 int nregs = (endbit - startbit) / BITS_PER_WORD;
6658
6659 if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
6660 {
6661 nregs = SPARC_INT_ARG_MAX - this_slotno;
6662
6663 /* We need to pass this field (partly) on the stack. */
6664 data->stack = 1;
6665 }
6666
6667 if (nregs <= 0)
6668 return false;
6669
6670 *pnregs = nregs;
6671 return true;
6672 }
6673
6674 /* A subroutine of function_arg_record_value. Compute the number and the mode
6675 of the FP registers to be assigned for FIELD. Return true if at least one
6676 FP register is assigned or false otherwise. */
6677
6678 static bool
6679 compute_fp_layout (const_tree field, HOST_WIDE_INT bitpos,
6680 assign_data_t *data,
6681 int *pnregs, machine_mode *pmode)
6682 {
6683 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
6684 machine_mode mode = DECL_MODE (field);
6685 int nregs, nslots;
6686
6687 /* Slots are counted as words while regs are counted as having the size of
6688 the (inner) mode. */
6689 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE && mode == BLKmode)
6690 {
6691 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6692 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6693 }
6694 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6695 {
6696 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6697 nregs = 2;
6698 }
6699 else
6700 nregs = 1;
6701
6702 nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
6703
6704 if (nslots > SPARC_FP_ARG_MAX - this_slotno)
6705 {
6706 nslots = SPARC_FP_ARG_MAX - this_slotno;
6707 nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
6708
6709 /* We need to pass this field (partly) on the stack. */
6710 data->stack = 1;
6711
6712 if (nregs <= 0)
6713 return false;
6714 }
6715
6716 *pnregs = nregs;
6717 *pmode = mode;
6718 return true;
6719 }
6720
6721 /* A subroutine of function_arg_record_value. Count the number of registers
6722 to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
6723
6724 inline void
6725 count_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
6726 assign_data_t *data)
6727 {
6728 if (fp)
6729 {
6730 int nregs;
6731 machine_mode mode;
6732
6733 if (compute_int_layout (bitpos, data, &nregs))
6734 data->nregs += nregs;
6735
6736 if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
6737 data->nregs += nregs;
6738 }
6739 else
6740 {
6741 if (data->intoffset < 0)
6742 data->intoffset = bitpos;
6743 }
6744 }
6745
6746 /* A subroutine of function_arg_record_value. Assign the bits of the
6747 structure between PARMS->intoffset and BITPOS to integer registers. */
6748
6749 static void
6750 assign_int_registers (HOST_WIDE_INT bitpos, assign_data_t *data)
6751 {
6752 int intoffset = data->intoffset;
6753 machine_mode mode;
6754 int nregs;
6755
6756 if (!compute_int_layout (bitpos, data, &nregs))
6757 return;
6758
6759 /* If this is the trailing part of a word, only load that much into
6760 the register. Otherwise load the whole register. Note that in
6761 the latter case we may pick up unwanted bits. It's not a problem
6762 at the moment but may wish to revisit. */
6763 if (intoffset % BITS_PER_WORD != 0)
6764 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
6765 MODE_INT);
6766 else
6767 mode = word_mode;
6768
6769 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
6770 unsigned int regno = data->regbase + this_slotno;
6771 intoffset /= BITS_PER_UNIT;
6772
6773 do
6774 {
6775 rtx reg = gen_rtx_REG (mode, regno);
6776 XVECEXP (data->ret, 0, data->stack + data->nregs)
6777 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
6778 data->nregs += 1;
6779 mode = word_mode;
6780 regno += 1;
6781 intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
6782 }
6783 while (--nregs > 0);
6784 }
6785
6786 /* A subroutine of function_arg_record_value. Assign FIELD at position
6787 BITPOS to FP registers. */
6788
6789 static void
6790 assign_fp_registers (const_tree field, HOST_WIDE_INT bitpos,
6791 assign_data_t *data)
6792 {
6793 int nregs;
6794 machine_mode mode;
6795
6796 if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
6797 return;
6798
6799 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
6800 int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
6801 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
6802 regno++;
6803 int pos = bitpos / BITS_PER_UNIT;
6804
6805 do
6806 {
6807 rtx reg = gen_rtx_REG (mode, regno);
6808 XVECEXP (data->ret, 0, data->stack + data->nregs)
6809 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6810 data->nregs += 1;
6811 regno += GET_MODE_SIZE (mode) / 4;
6812 pos += GET_MODE_SIZE (mode);
6813 }
6814 while (--nregs > 0);
6815 }
6816
6817 /* A subroutine of function_arg_record_value. Assign FIELD and the bits of
6818 the structure between PARMS->intoffset and BITPOS to registers. */
6819
6820 inline void
6821 assign_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
6822 assign_data_t *data)
6823 {
6824 if (fp)
6825 {
6826 assign_int_registers (bitpos, data);
6827
6828 assign_fp_registers (field, bitpos, data);
6829 }
6830 else
6831 {
6832 if (data->intoffset < 0)
6833 data->intoffset = bitpos;
6834 }
6835 }
6836
6837 /* Used by function_arg and sparc_function_value_1 to implement the complex
6838 conventions of the 64-bit ABI for passing and returning structures.
6839 Return an expression valid as a return value for the FUNCTION_ARG
6840 and TARGET_FUNCTION_VALUE.
6841
6842 TYPE is the data type of the argument (as a tree).
6843 This is null for libcalls where that information may
6844 not be available.
6845 MODE is the argument's machine mode.
6846 SLOTNO is the index number of the argument's slot in the parameter array.
6847 NAMED is true if this argument is a named parameter
6848 (otherwise it is an extra parameter matching an ellipsis).
6849 REGBASE is the regno of the base register for the parameter array. */
6850
6851 static rtx
6852 function_arg_record_value (const_tree type, machine_mode mode,
6853 int slotno, bool named, int regbase)
6854 {
6855 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6856 assign_data_t data;
6857 int nregs;
6858
6859 data.slotno = slotno;
6860 data.regbase = regbase;
6861
6862 /* Count how many registers we need. */
6863 data.nregs = 0;
6864 data.intoffset = 0;
6865 data.stack = false;
6866 traverse_record_type<assign_data_t, count_registers> (type, named, &data);
6867
6868 /* Take into account pending integer fields. */
6869 if (compute_int_layout (typesize * BITS_PER_UNIT, &data, &nregs))
6870 data.nregs += nregs;
6871
6872 /* Allocate the vector and handle some annoying special cases. */
6873 nregs = data.nregs;
6874
6875 if (nregs == 0)
6876 {
6877 /* ??? Empty structure has no value? Duh? */
6878 if (typesize <= 0)
6879 {
6880 /* Though there's nothing really to store, return a word register
6881 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6882 leads to breakage due to the fact that there are zero bytes to
6883 load. */
6884 return gen_rtx_REG (mode, regbase);
6885 }
6886
6887 /* ??? C++ has structures with no fields, and yet a size. Give up
6888 for now and pass everything back in integer registers. */
6889 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6890 if (nregs + slotno > SPARC_INT_ARG_MAX)
6891 nregs = SPARC_INT_ARG_MAX - slotno;
6892 }
6893
6894 gcc_assert (nregs > 0);
6895
6896 data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
6897
6898 /* If at least one field must be passed on the stack, generate
6899 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6900 also be passed on the stack. We can't do much better because the
6901 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6902 of structures for which the fields passed exclusively in registers
6903 are not at the beginning of the structure. */
6904 if (data.stack)
6905 XVECEXP (data.ret, 0, 0)
6906 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6907
6908 /* Assign the registers. */
6909 data.nregs = 0;
6910 data.intoffset = 0;
6911 traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
6912
6913 /* Assign pending integer fields. */
6914 assign_int_registers (typesize * BITS_PER_UNIT, &data);
6915
6916 gcc_assert (data.nregs == nregs);
6917
6918 return data.ret;
6919 }
6920
6921 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6922 of the 64-bit ABI for passing and returning unions.
6923 Return an expression valid as a return value for the FUNCTION_ARG
6924 and TARGET_FUNCTION_VALUE.
6925
6926 SIZE is the size in bytes of the union.
6927 MODE is the argument's machine mode.
6928 REGNO is the hard register the union will be passed in. */
6929
6930 static rtx
6931 function_arg_union_value (int size, machine_mode mode, int slotno,
6932 int regno)
6933 {
6934 int nwords = CEIL_NWORDS (size), i;
6935 rtx regs;
6936
6937 /* See comment in previous function for empty structures. */
6938 if (nwords == 0)
6939 return gen_rtx_REG (mode, regno);
6940
6941 if (slotno == SPARC_INT_ARG_MAX - 1)
6942 nwords = 1;
6943
6944 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6945
6946 for (i = 0; i < nwords; i++)
6947 {
6948 /* Unions are passed left-justified. */
6949 XVECEXP (regs, 0, i)
6950 = gen_rtx_EXPR_LIST (VOIDmode,
6951 gen_rtx_REG (word_mode, regno),
6952 GEN_INT (UNITS_PER_WORD * i));
6953 regno++;
6954 }
6955
6956 return regs;
6957 }
6958
6959 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6960 for passing and returning BLKmode vectors.
6961 Return an expression valid as a return value for the FUNCTION_ARG
6962 and TARGET_FUNCTION_VALUE.
6963
6964 SIZE is the size in bytes of the vector.
6965 REGNO is the FP hard register the vector will be passed in. */
6966
6967 static rtx
6968 function_arg_vector_value (int size, int regno)
6969 {
6970 const int nregs = MAX (1, size / 8);
6971 rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6972
6973 if (size < 8)
6974 XVECEXP (regs, 0, 0)
6975 = gen_rtx_EXPR_LIST (VOIDmode,
6976 gen_rtx_REG (SImode, regno),
6977 const0_rtx);
6978 else
6979 for (int i = 0; i < nregs; i++)
6980 XVECEXP (regs, 0, i)
6981 = gen_rtx_EXPR_LIST (VOIDmode,
6982 gen_rtx_REG (DImode, regno + 2*i),
6983 GEN_INT (i*8));
6984
6985 return regs;
6986 }
6987
6988 /* Determine where to put an argument to a function.
6989 Value is zero to push the argument on the stack,
6990 or a hard register in which to store the argument.
6991
6992 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6993 the preceding args and about the function being called.
6994 MODE is the argument's machine mode.
6995 TYPE is the data type of the argument (as a tree).
6996 This is null for libcalls where that information may
6997 not be available.
6998 NAMED is true if this argument is a named parameter
6999 (otherwise it is an extra parameter matching an ellipsis).
7000 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
7001 TARGET_FUNCTION_INCOMING_ARG. */
7002
7003 static rtx
7004 sparc_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
7005 const_tree type, bool named, bool incoming)
7006 {
7007 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7008
7009 int regbase = (incoming
7010 ? SPARC_INCOMING_INT_ARG_FIRST
7011 : SPARC_OUTGOING_INT_ARG_FIRST);
7012 int slotno, regno, padding;
7013 enum mode_class mclass = GET_MODE_CLASS (mode);
7014
7015 slotno = function_arg_slotno (cum, mode, type, named, incoming,
7016 &regno, &padding);
7017 if (slotno == -1)
7018 return 0;
7019
7020 /* Vector types deserve special treatment because they are polymorphic wrt
7021 their mode, depending upon whether VIS instructions are enabled. */
7022 if (type && TREE_CODE (type) == VECTOR_TYPE)
7023 {
7024 HOST_WIDE_INT size = int_size_in_bytes (type);
7025 gcc_assert ((TARGET_ARCH32 && size <= 8)
7026 || (TARGET_ARCH64 && size <= 16));
7027
7028 if (mode == BLKmode)
7029 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST + 2*slotno);
7030
7031 mclass = MODE_FLOAT;
7032 }
7033
7034 if (TARGET_ARCH32)
7035 return gen_rtx_REG (mode, regno);
7036
7037 /* Structures up to 16 bytes in size are passed in arg slots on the stack
7038 and are promoted to registers if possible. */
7039 if (type && TREE_CODE (type) == RECORD_TYPE)
7040 {
7041 HOST_WIDE_INT size = int_size_in_bytes (type);
7042 gcc_assert (size <= 16);
7043
7044 return function_arg_record_value (type, mode, slotno, named, regbase);
7045 }
7046
7047 /* Unions up to 16 bytes in size are passed in integer registers. */
7048 else if (type && TREE_CODE (type) == UNION_TYPE)
7049 {
7050 HOST_WIDE_INT size = int_size_in_bytes (type);
7051 gcc_assert (size <= 16);
7052
7053 return function_arg_union_value (size, mode, slotno, regno);
7054 }
7055
7056 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
7057 but also have the slot allocated for them.
7058 If no prototype is in scope fp values in register slots get passed
7059 in two places, either fp regs and int regs or fp regs and memory. */
7060 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7061 && SPARC_FP_REG_P (regno))
7062 {
7063 rtx reg = gen_rtx_REG (mode, regno);
7064 if (cum->prototype_p || cum->libcall_p)
7065 return reg;
7066 else
7067 {
7068 rtx v0, v1;
7069
7070 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
7071 {
7072 int intreg;
7073
7074 /* On incoming, we don't need to know that the value
7075 is passed in %f0 and %i0, and it confuses other parts
7076 causing needless spillage even on the simplest cases. */
7077 if (incoming)
7078 return reg;
7079
7080 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
7081 + (regno - SPARC_FP_ARG_FIRST) / 2);
7082
7083 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7084 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
7085 const0_rtx);
7086 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7087 }
7088 else
7089 {
7090 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7091 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7092 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7093 }
7094 }
7095 }
7096
7097 /* All other aggregate types are passed in an integer register in a mode
7098 corresponding to the size of the type. */
7099 else if (type && AGGREGATE_TYPE_P (type))
7100 {
7101 HOST_WIDE_INT size = int_size_in_bytes (type);
7102 gcc_assert (size <= 16);
7103
7104 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7105 }
7106
7107 return gen_rtx_REG (mode, regno);
7108 }
7109
7110 /* Handle the TARGET_FUNCTION_ARG target hook. */
7111
7112 static rtx
7113 sparc_function_arg (cumulative_args_t cum, machine_mode mode,
7114 const_tree type, bool named)
7115 {
7116 return sparc_function_arg_1 (cum, mode, type, named, false);
7117 }
7118
7119 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
7120
7121 static rtx
7122 sparc_function_incoming_arg (cumulative_args_t cum, machine_mode mode,
7123 const_tree type, bool named)
7124 {
7125 return sparc_function_arg_1 (cum, mode, type, named, true);
7126 }
7127
7128 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7129
7130 static unsigned int
7131 sparc_function_arg_boundary (machine_mode mode, const_tree type)
7132 {
7133 return ((TARGET_ARCH64
7134 && (GET_MODE_ALIGNMENT (mode) == 128
7135 || (type && TYPE_ALIGN (type) == 128)))
7136 ? 128
7137 : PARM_BOUNDARY);
7138 }
7139
7140 /* For an arg passed partly in registers and partly in memory,
7141 this is the number of bytes of registers used.
7142 For args passed entirely in registers or entirely in memory, zero.
7143
7144 Any arg that starts in the first 6 regs but won't entirely fit in them
7145 needs partial registers on v8. On v9, structures with integer
7146 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7147 values that begin in the last fp reg [where "last fp reg" varies with the
7148 mode] will be split between that reg and memory. */
7149
7150 static int
7151 sparc_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
7152 tree type, bool named)
7153 {
7154 int slotno, regno, padding;
7155
7156 /* We pass false for incoming here, it doesn't matter. */
7157 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
7158 false, &regno, &padding);
7159
7160 if (slotno == -1)
7161 return 0;
7162
7163 if (TARGET_ARCH32)
7164 {
7165 if ((slotno + (mode == BLKmode
7166 ? CEIL_NWORDS (int_size_in_bytes (type))
7167 : CEIL_NWORDS (GET_MODE_SIZE (mode))))
7168 > SPARC_INT_ARG_MAX)
7169 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
7170 }
7171 else
7172 {
7173 /* We are guaranteed by pass_by_reference that the size of the
7174 argument is not greater than 16 bytes, so we only need to return
7175 one word if the argument is partially passed in registers. */
7176
7177 if (type && AGGREGATE_TYPE_P (type))
7178 {
7179 int size = int_size_in_bytes (type);
7180
7181 if (size > UNITS_PER_WORD
7182 && (slotno == SPARC_INT_ARG_MAX - 1
7183 || slotno == SPARC_FP_ARG_MAX - 1))
7184 return UNITS_PER_WORD;
7185 }
7186 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
7187 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7188 && ! (TARGET_FPU && named)))
7189 {
7190 /* The complex types are passed as packed types. */
7191 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7192 && slotno == SPARC_INT_ARG_MAX - 1)
7193 return UNITS_PER_WORD;
7194 }
7195 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7196 {
7197 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
7198 > SPARC_FP_ARG_MAX)
7199 return UNITS_PER_WORD;
7200 }
7201 }
7202
7203 return 0;
7204 }
7205
7206 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
7207 Specify whether to pass the argument by reference. */
7208
7209 static bool
7210 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
7211 machine_mode mode, const_tree type,
7212 bool named ATTRIBUTE_UNUSED)
7213 {
7214 if (TARGET_ARCH32)
7215 /* Original SPARC 32-bit ABI says that structures and unions,
7216 and quad-precision floats are passed by reference. For Pascal,
7217 also pass arrays by reference. All other base types are passed
7218 in registers.
7219
7220 Extended ABI (as implemented by the Sun compiler) says that all
7221 complex floats are passed by reference. Pass complex integers
7222 in registers up to 8 bytes. More generally, enforce the 2-word
7223 cap for passing arguments in registers.
7224
7225 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7226 integers are passed like floats of the same size, that is in
7227 registers up to 8 bytes. Pass all vector floats by reference
7228 like structure and unions. */
7229 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
7230 || mode == SCmode
7231 /* Catch CDImode, TFmode, DCmode and TCmode. */
7232 || GET_MODE_SIZE (mode) > 8
7233 || (type
7234 && TREE_CODE (type) == VECTOR_TYPE
7235 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7236 else
7237 /* Original SPARC 64-bit ABI says that structures and unions
7238 smaller than 16 bytes are passed in registers, as well as
7239 all other base types.
7240
7241 Extended ABI (as implemented by the Sun compiler) says that
7242 complex floats are passed in registers up to 16 bytes. Pass
7243 all complex integers in registers up to 16 bytes. More generally,
7244 enforce the 2-word cap for passing arguments in registers.
7245
7246 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7247 integers are passed like floats of the same size, that is in
7248 registers (up to 16 bytes). Pass all vector floats like structure
7249 and unions. */
7250 return ((type
7251 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
7252 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
7253 /* Catch CTImode and TCmode. */
7254 || GET_MODE_SIZE (mode) > 16);
7255 }
7256
7257 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7258 Update the data in CUM to advance over an argument
7259 of mode MODE and data type TYPE.
7260 TYPE is null for libcalls where that information may not be available. */
7261
7262 static void
7263 sparc_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
7264 const_tree type, bool named)
7265 {
7266 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7267 int regno, padding;
7268
7269 /* We pass false for incoming here, it doesn't matter. */
7270 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
7271
7272 /* If argument requires leading padding, add it. */
7273 cum->words += padding;
7274
7275 if (TARGET_ARCH32)
7276 cum->words += (mode == BLKmode
7277 ? CEIL_NWORDS (int_size_in_bytes (type))
7278 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7279 else
7280 {
7281 if (type && AGGREGATE_TYPE_P (type))
7282 {
7283 int size = int_size_in_bytes (type);
7284
7285 if (size <= 8)
7286 ++cum->words;
7287 else if (size <= 16)
7288 cum->words += 2;
7289 else /* passed by reference */
7290 ++cum->words;
7291 }
7292 else
7293 cum->words += (mode == BLKmode
7294 ? CEIL_NWORDS (int_size_in_bytes (type))
7295 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7296 }
7297 }
7298
7299 /* Handle the FUNCTION_ARG_PADDING macro.
7300 For the 64 bit ABI structs are always stored left shifted in their
7301 argument slot. */
7302
7303 enum direction
7304 function_arg_padding (machine_mode mode, const_tree type)
7305 {
7306 if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
7307 return upward;
7308
7309 /* Fall back to the default. */
7310 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7311 }
7312
7313 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7314 Specify whether to return the return value in memory. */
7315
7316 static bool
7317 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7318 {
7319 if (TARGET_ARCH32)
7320 /* Original SPARC 32-bit ABI says that structures and unions,
7321 and quad-precision floats are returned in memory. All other
7322 base types are returned in registers.
7323
7324 Extended ABI (as implemented by the Sun compiler) says that
7325 all complex floats are returned in registers (8 FP registers
7326 at most for '_Complex long double'). Return all complex integers
7327 in registers (4 at most for '_Complex long long').
7328
7329 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7330 integers are returned like floats of the same size, that is in
7331 registers up to 8 bytes and in memory otherwise. Return all
7332 vector floats in memory like structure and unions; note that
7333 they always have BLKmode like the latter. */
7334 return (TYPE_MODE (type) == BLKmode
7335 || TYPE_MODE (type) == TFmode
7336 || (TREE_CODE (type) == VECTOR_TYPE
7337 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7338 else
7339 /* Original SPARC 64-bit ABI says that structures and unions
7340 smaller than 32 bytes are returned in registers, as well as
7341 all other base types.
7342
7343 Extended ABI (as implemented by the Sun compiler) says that all
7344 complex floats are returned in registers (8 FP registers at most
7345 for '_Complex long double'). Return all complex integers in
7346 registers (4 at most for '_Complex TItype').
7347
7348 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7349 integers are returned like floats of the same size, that is in
7350 registers. Return all vector floats like structure and unions;
7351 note that they always have BLKmode like the latter. */
7352 return (TYPE_MODE (type) == BLKmode
7353 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7354 }
7355
7356 /* Handle the TARGET_STRUCT_VALUE target hook.
7357 Return where to find the structure return value address. */
7358
7359 static rtx
7360 sparc_struct_value_rtx (tree fndecl, int incoming)
7361 {
7362 if (TARGET_ARCH64)
7363 return 0;
7364 else
7365 {
7366 rtx mem;
7367
7368 if (incoming)
7369 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7370 STRUCT_VALUE_OFFSET));
7371 else
7372 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7373 STRUCT_VALUE_OFFSET));
7374
7375 /* Only follow the SPARC ABI for fixed-size structure returns.
7376 Variable size structure returns are handled per the normal
7377 procedures in GCC. This is enabled by -mstd-struct-return */
7378 if (incoming == 2
7379 && sparc_std_struct_return
7380 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7381 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7382 {
7383 /* We must check and adjust the return address, as it is optional
7384 as to whether the return object is really provided. */
7385 rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
7386 rtx scratch = gen_reg_rtx (SImode);
7387 rtx_code_label *endlab = gen_label_rtx ();
7388
7389 /* Calculate the return object size. */
7390 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7391 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7392 /* Construct a temporary return value. */
7393 rtx temp_val
7394 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7395
7396 /* Implement SPARC 32-bit psABI callee return struct checking:
7397
7398 Fetch the instruction where we will return to and see if
7399 it's an unimp instruction (the most significant 10 bits
7400 will be zero). */
7401 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7402 plus_constant (Pmode,
7403 ret_reg, 8)));
7404 /* Assume the size is valid and pre-adjust. */
7405 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7406 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7407 0, endlab);
7408 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7409 /* Write the address of the memory pointed to by temp_val into
7410 the memory pointed to by mem. */
7411 emit_move_insn (mem, XEXP (temp_val, 0));
7412 emit_label (endlab);
7413 }
7414
7415 return mem;
7416 }
7417 }
7418
7419 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7420 For v9, function return values are subject to the same rules as arguments,
7421 except that up to 32 bytes may be returned in registers. */
7422
7423 static rtx
7424 sparc_function_value_1 (const_tree type, machine_mode mode,
7425 bool outgoing)
7426 {
7427 /* Beware that the two values are swapped here wrt function_arg. */
7428 int regbase = (outgoing
7429 ? SPARC_INCOMING_INT_ARG_FIRST
7430 : SPARC_OUTGOING_INT_ARG_FIRST);
7431 enum mode_class mclass = GET_MODE_CLASS (mode);
7432 int regno;
7433
7434 /* Vector types deserve special treatment because they are polymorphic wrt
7435 their mode, depending upon whether VIS instructions are enabled. */
7436 if (type && TREE_CODE (type) == VECTOR_TYPE)
7437 {
7438 HOST_WIDE_INT size = int_size_in_bytes (type);
7439 gcc_assert ((TARGET_ARCH32 && size <= 8)
7440 || (TARGET_ARCH64 && size <= 32));
7441
7442 if (mode == BLKmode)
7443 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST);
7444
7445 mclass = MODE_FLOAT;
7446 }
7447
7448 if (TARGET_ARCH64 && type)
7449 {
7450 /* Structures up to 32 bytes in size are returned in registers. */
7451 if (TREE_CODE (type) == RECORD_TYPE)
7452 {
7453 HOST_WIDE_INT size = int_size_in_bytes (type);
7454 gcc_assert (size <= 32);
7455
7456 return function_arg_record_value (type, mode, 0, 1, regbase);
7457 }
7458
7459 /* Unions up to 32 bytes in size are returned in integer registers. */
7460 else if (TREE_CODE (type) == UNION_TYPE)
7461 {
7462 HOST_WIDE_INT size = int_size_in_bytes (type);
7463 gcc_assert (size <= 32);
7464
7465 return function_arg_union_value (size, mode, 0, regbase);
7466 }
7467
7468 /* Objects that require it are returned in FP registers. */
7469 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7470 ;
7471
7472 /* All other aggregate types are returned in an integer register in a
7473 mode corresponding to the size of the type. */
7474 else if (AGGREGATE_TYPE_P (type))
7475 {
7476 /* All other aggregate types are passed in an integer register
7477 in a mode corresponding to the size of the type. */
7478 HOST_WIDE_INT size = int_size_in_bytes (type);
7479 gcc_assert (size <= 32);
7480
7481 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7482
7483 /* ??? We probably should have made the same ABI change in
7484 3.4.0 as the one we made for unions. The latter was
7485 required by the SCD though, while the former is not
7486 specified, so we favored compatibility and efficiency.
7487
7488 Now we're stuck for aggregates larger than 16 bytes,
7489 because OImode vanished in the meantime. Let's not
7490 try to be unduly clever, and simply follow the ABI
7491 for unions in that case. */
7492 if (mode == BLKmode)
7493 return function_arg_union_value (size, mode, 0, regbase);
7494 else
7495 mclass = MODE_INT;
7496 }
7497
7498 /* We should only have pointer and integer types at this point. This
7499 must match sparc_promote_function_mode. */
7500 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7501 mode = word_mode;
7502 }
7503
7504 /* We should only have pointer and integer types at this point, except with
7505 -freg-struct-return. This must match sparc_promote_function_mode. */
7506 else if (TARGET_ARCH32
7507 && !(type && AGGREGATE_TYPE_P (type))
7508 && mclass == MODE_INT
7509 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7510 mode = word_mode;
7511
7512 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7513 regno = SPARC_FP_ARG_FIRST;
7514 else
7515 regno = regbase;
7516
7517 return gen_rtx_REG (mode, regno);
7518 }
7519
7520 /* Handle TARGET_FUNCTION_VALUE.
7521 On the SPARC, the value is found in the first "output" register, but the
7522 called function leaves it in the first "input" register. */
7523
7524 static rtx
7525 sparc_function_value (const_tree valtype,
7526 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7527 bool outgoing)
7528 {
7529 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7530 }
7531
7532 /* Handle TARGET_LIBCALL_VALUE. */
7533
7534 static rtx
7535 sparc_libcall_value (machine_mode mode,
7536 const_rtx fun ATTRIBUTE_UNUSED)
7537 {
7538 return sparc_function_value_1 (NULL_TREE, mode, false);
7539 }
7540
7541 /* Handle FUNCTION_VALUE_REGNO_P.
7542 On the SPARC, the first "output" reg is used for integer values, and the
7543 first floating point register is used for floating point values. */
7544
7545 static bool
7546 sparc_function_value_regno_p (const unsigned int regno)
7547 {
7548 return (regno == 8 || (TARGET_FPU && regno == 32));
7549 }
7550
7551 /* Do what is necessary for `va_start'. We look at the current function
7552 to determine if stdarg or varargs is used and return the address of
7553 the first unnamed parameter. */
7554
7555 static rtx
7556 sparc_builtin_saveregs (void)
7557 {
7558 int first_reg = crtl->args.info.words;
7559 rtx address;
7560 int regno;
7561
7562 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7563 emit_move_insn (gen_rtx_MEM (word_mode,
7564 gen_rtx_PLUS (Pmode,
7565 frame_pointer_rtx,
7566 GEN_INT (FIRST_PARM_OFFSET (0)
7567 + (UNITS_PER_WORD
7568 * regno)))),
7569 gen_rtx_REG (word_mode,
7570 SPARC_INCOMING_INT_ARG_FIRST + regno));
7571
7572 address = gen_rtx_PLUS (Pmode,
7573 frame_pointer_rtx,
7574 GEN_INT (FIRST_PARM_OFFSET (0)
7575 + UNITS_PER_WORD * first_reg));
7576
7577 return address;
7578 }
7579
7580 /* Implement `va_start' for stdarg. */
7581
7582 static void
7583 sparc_va_start (tree valist, rtx nextarg)
7584 {
7585 nextarg = expand_builtin_saveregs ();
7586 std_expand_builtin_va_start (valist, nextarg);
7587 }
7588
7589 /* Implement `va_arg' for stdarg. */
7590
7591 static tree
7592 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7593 gimple_seq *post_p)
7594 {
7595 HOST_WIDE_INT size, rsize, align;
7596 tree addr, incr;
7597 bool indirect;
7598 tree ptrtype = build_pointer_type (type);
7599
7600 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
7601 {
7602 indirect = true;
7603 size = rsize = UNITS_PER_WORD;
7604 align = 0;
7605 }
7606 else
7607 {
7608 indirect = false;
7609 size = int_size_in_bytes (type);
7610 rsize = ROUND_UP (size, UNITS_PER_WORD);
7611 align = 0;
7612
7613 if (TARGET_ARCH64)
7614 {
7615 /* For SPARC64, objects requiring 16-byte alignment get it. */
7616 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7617 align = 2 * UNITS_PER_WORD;
7618
7619 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7620 are left-justified in their slots. */
7621 if (AGGREGATE_TYPE_P (type))
7622 {
7623 if (size == 0)
7624 size = rsize = UNITS_PER_WORD;
7625 else
7626 size = rsize;
7627 }
7628 }
7629 }
7630
7631 incr = valist;
7632 if (align)
7633 {
7634 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7635 incr = fold_convert (sizetype, incr);
7636 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7637 size_int (-align));
7638 incr = fold_convert (ptr_type_node, incr);
7639 }
7640
7641 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7642 addr = incr;
7643
7644 if (BYTES_BIG_ENDIAN && size < rsize)
7645 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7646
7647 if (indirect)
7648 {
7649 addr = fold_convert (build_pointer_type (ptrtype), addr);
7650 addr = build_va_arg_indirect_ref (addr);
7651 }
7652
7653 /* If the address isn't aligned properly for the type, we need a temporary.
7654 FIXME: This is inefficient, usually we can do this in registers. */
7655 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7656 {
7657 tree tmp = create_tmp_var (type, "va_arg_tmp");
7658 tree dest_addr = build_fold_addr_expr (tmp);
7659 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7660 3, dest_addr, addr, size_int (rsize));
7661 TREE_ADDRESSABLE (tmp) = 1;
7662 gimplify_and_add (copy, pre_p);
7663 addr = dest_addr;
7664 }
7665
7666 else
7667 addr = fold_convert (ptrtype, addr);
7668
7669 incr = fold_build_pointer_plus_hwi (incr, rsize);
7670 gimplify_assign (valist, incr, post_p);
7671
7672 return build_va_arg_indirect_ref (addr);
7673 }
7674 \f
7675 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
7676 Specify whether the vector mode is supported by the hardware. */
7677
7678 static bool
7679 sparc_vector_mode_supported_p (machine_mode mode)
7680 {
7681 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
7682 }
7683 \f
7684 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
7685
7686 static machine_mode
7687 sparc_preferred_simd_mode (machine_mode mode)
7688 {
7689 if (TARGET_VIS)
7690 switch (mode)
7691 {
7692 case SImode:
7693 return V2SImode;
7694 case HImode:
7695 return V4HImode;
7696 case QImode:
7697 return V8QImode;
7698
7699 default:;
7700 }
7701
7702 return word_mode;
7703 }
7704 \f
7705 /* Return the string to output an unconditional branch to LABEL, which is
7706 the operand number of the label.
7707
7708 DEST is the destination insn (i.e. the label), INSN is the source. */
7709
7710 const char *
7711 output_ubranch (rtx dest, rtx_insn *insn)
7712 {
7713 static char string[64];
7714 bool v9_form = false;
7715 int delta;
7716 char *p;
7717
7718 /* Even if we are trying to use cbcond for this, evaluate
7719 whether we can use V9 branches as our backup plan. */
7720
7721 delta = 5000000;
7722 if (INSN_ADDRESSES_SET_P ())
7723 delta = (INSN_ADDRESSES (INSN_UID (dest))
7724 - INSN_ADDRESSES (INSN_UID (insn)));
7725
7726 /* Leave some instructions for "slop". */
7727 if (TARGET_V9 && delta >= -260000 && delta < 260000)
7728 v9_form = true;
7729
7730 if (TARGET_CBCOND)
7731 {
7732 bool emit_nop = emit_cbcond_nop (insn);
7733 bool far = false;
7734 const char *rval;
7735
7736 if (delta < -500 || delta > 500)
7737 far = true;
7738
7739 if (far)
7740 {
7741 if (v9_form)
7742 rval = "ba,a,pt\t%%xcc, %l0";
7743 else
7744 rval = "b,a\t%l0";
7745 }
7746 else
7747 {
7748 if (emit_nop)
7749 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
7750 else
7751 rval = "cwbe\t%%g0, %%g0, %l0";
7752 }
7753 return rval;
7754 }
7755
7756 if (v9_form)
7757 strcpy (string, "ba%*,pt\t%%xcc, ");
7758 else
7759 strcpy (string, "b%*\t");
7760
7761 p = strchr (string, '\0');
7762 *p++ = '%';
7763 *p++ = 'l';
7764 *p++ = '0';
7765 *p++ = '%';
7766 *p++ = '(';
7767 *p = '\0';
7768
7769 return string;
7770 }
7771
7772 /* Return the string to output a conditional branch to LABEL, which is
7773 the operand number of the label. OP is the conditional expression.
7774 XEXP (OP, 0) is assumed to be a condition code register (integer or
7775 floating point) and its mode specifies what kind of comparison we made.
7776
7777 DEST is the destination insn (i.e. the label), INSN is the source.
7778
7779 REVERSED is nonzero if we should reverse the sense of the comparison.
7780
7781 ANNUL is nonzero if we should generate an annulling branch. */
7782
7783 const char *
7784 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
7785 rtx_insn *insn)
7786 {
7787 static char string[64];
7788 enum rtx_code code = GET_CODE (op);
7789 rtx cc_reg = XEXP (op, 0);
7790 machine_mode mode = GET_MODE (cc_reg);
7791 const char *labelno, *branch;
7792 int spaces = 8, far;
7793 char *p;
7794
7795 /* v9 branches are limited to +-1MB. If it is too far away,
7796 change
7797
7798 bne,pt %xcc, .LC30
7799
7800 to
7801
7802 be,pn %xcc, .+12
7803 nop
7804 ba .LC30
7805
7806 and
7807
7808 fbne,a,pn %fcc2, .LC29
7809
7810 to
7811
7812 fbe,pt %fcc2, .+16
7813 nop
7814 ba .LC29 */
7815
7816 far = TARGET_V9 && (get_attr_length (insn) >= 3);
7817 if (reversed ^ far)
7818 {
7819 /* Reversal of FP compares takes care -- an ordered compare
7820 becomes an unordered compare and vice versa. */
7821 if (mode == CCFPmode || mode == CCFPEmode)
7822 code = reverse_condition_maybe_unordered (code);
7823 else
7824 code = reverse_condition (code);
7825 }
7826
7827 /* Start by writing the branch condition. */
7828 if (mode == CCFPmode || mode == CCFPEmode)
7829 {
7830 switch (code)
7831 {
7832 case NE:
7833 branch = "fbne";
7834 break;
7835 case EQ:
7836 branch = "fbe";
7837 break;
7838 case GE:
7839 branch = "fbge";
7840 break;
7841 case GT:
7842 branch = "fbg";
7843 break;
7844 case LE:
7845 branch = "fble";
7846 break;
7847 case LT:
7848 branch = "fbl";
7849 break;
7850 case UNORDERED:
7851 branch = "fbu";
7852 break;
7853 case ORDERED:
7854 branch = "fbo";
7855 break;
7856 case UNGT:
7857 branch = "fbug";
7858 break;
7859 case UNLT:
7860 branch = "fbul";
7861 break;
7862 case UNEQ:
7863 branch = "fbue";
7864 break;
7865 case UNGE:
7866 branch = "fbuge";
7867 break;
7868 case UNLE:
7869 branch = "fbule";
7870 break;
7871 case LTGT:
7872 branch = "fblg";
7873 break;
7874 default:
7875 gcc_unreachable ();
7876 }
7877
7878 /* ??? !v9: FP branches cannot be preceded by another floating point
7879 insn. Because there is currently no concept of pre-delay slots,
7880 we can fix this only by always emitting a nop before a floating
7881 point branch. */
7882
7883 string[0] = '\0';
7884 if (! TARGET_V9)
7885 strcpy (string, "nop\n\t");
7886 strcat (string, branch);
7887 }
7888 else
7889 {
7890 switch (code)
7891 {
7892 case NE:
7893 if (mode == CCVmode || mode == CCXVmode)
7894 branch = "bvs";
7895 else
7896 branch = "bne";
7897 break;
7898 case EQ:
7899 if (mode == CCVmode || mode == CCXVmode)
7900 branch = "bvc";
7901 else
7902 branch = "be";
7903 break;
7904 case GE:
7905 if (mode == CCNZmode || mode == CCXNZmode)
7906 branch = "bpos";
7907 else
7908 branch = "bge";
7909 break;
7910 case GT:
7911 branch = "bg";
7912 break;
7913 case LE:
7914 branch = "ble";
7915 break;
7916 case LT:
7917 if (mode == CCNZmode || mode == CCXNZmode)
7918 branch = "bneg";
7919 else
7920 branch = "bl";
7921 break;
7922 case GEU:
7923 branch = "bgeu";
7924 break;
7925 case GTU:
7926 branch = "bgu";
7927 break;
7928 case LEU:
7929 branch = "bleu";
7930 break;
7931 case LTU:
7932 branch = "blu";
7933 break;
7934 default:
7935 gcc_unreachable ();
7936 }
7937 strcpy (string, branch);
7938 }
7939 spaces -= strlen (branch);
7940 p = strchr (string, '\0');
7941
7942 /* Now add the annulling, the label, and a possible noop. */
7943 if (annul && ! far)
7944 {
7945 strcpy (p, ",a");
7946 p += 2;
7947 spaces -= 2;
7948 }
7949
7950 if (TARGET_V9)
7951 {
7952 rtx note;
7953 int v8 = 0;
7954
7955 if (! far && insn && INSN_ADDRESSES_SET_P ())
7956 {
7957 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7958 - INSN_ADDRESSES (INSN_UID (insn)));
7959 /* Leave some instructions for "slop". */
7960 if (delta < -260000 || delta >= 260000)
7961 v8 = 1;
7962 }
7963
7964 switch (mode)
7965 {
7966 case CCmode:
7967 case CCNZmode:
7968 case CCCmode:
7969 case CCVmode:
7970 labelno = "%%icc, ";
7971 if (v8)
7972 labelno = "";
7973 break;
7974 case CCXmode:
7975 case CCXNZmode:
7976 case CCXCmode:
7977 case CCXVmode:
7978 labelno = "%%xcc, ";
7979 gcc_assert (!v8);
7980 break;
7981 case CCFPmode:
7982 case CCFPEmode:
7983 {
7984 static char v9_fcc_labelno[] = "%%fccX, ";
7985 /* Set the char indicating the number of the fcc reg to use. */
7986 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7987 labelno = v9_fcc_labelno;
7988 if (v8)
7989 {
7990 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7991 labelno = "";
7992 }
7993 }
7994 break;
7995 default:
7996 gcc_unreachable ();
7997 }
7998
7999 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8000 {
8001 strcpy (p,
8002 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8003 >= profile_probability::even ()) ^ far)
8004 ? ",pt" : ",pn");
8005 p += 3;
8006 spaces -= 3;
8007 }
8008 }
8009 else
8010 labelno = "";
8011
8012 if (spaces > 0)
8013 *p++ = '\t';
8014 else
8015 *p++ = ' ';
8016 strcpy (p, labelno);
8017 p = strchr (p, '\0');
8018 if (far)
8019 {
8020 strcpy (p, ".+12\n\t nop\n\tb\t");
8021 /* Skip the next insn if requested or
8022 if we know that it will be a nop. */
8023 if (annul || ! final_sequence)
8024 p[3] = '6';
8025 p += 14;
8026 }
8027 *p++ = '%';
8028 *p++ = 'l';
8029 *p++ = label + '0';
8030 *p++ = '%';
8031 *p++ = '#';
8032 *p = '\0';
8033
8034 return string;
8035 }
8036
8037 /* Emit a library call comparison between floating point X and Y.
8038 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
8039 Return the new operator to be used in the comparison sequence.
8040
8041 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
8042 values as arguments instead of the TFmode registers themselves,
8043 that's why we cannot call emit_float_lib_cmp. */
8044
8045 rtx
8046 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
8047 {
8048 const char *qpfunc;
8049 rtx slot0, slot1, result, tem, tem2, libfunc;
8050 machine_mode mode;
8051 enum rtx_code new_comparison;
8052
8053 switch (comparison)
8054 {
8055 case EQ:
8056 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
8057 break;
8058
8059 case NE:
8060 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
8061 break;
8062
8063 case GT:
8064 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
8065 break;
8066
8067 case GE:
8068 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
8069 break;
8070
8071 case LT:
8072 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
8073 break;
8074
8075 case LE:
8076 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
8077 break;
8078
8079 case ORDERED:
8080 case UNORDERED:
8081 case UNGT:
8082 case UNLT:
8083 case UNEQ:
8084 case UNGE:
8085 case UNLE:
8086 case LTGT:
8087 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
8088 break;
8089
8090 default:
8091 gcc_unreachable ();
8092 }
8093
8094 if (TARGET_ARCH64)
8095 {
8096 if (MEM_P (x))
8097 {
8098 tree expr = MEM_EXPR (x);
8099 if (expr)
8100 mark_addressable (expr);
8101 slot0 = x;
8102 }
8103 else
8104 {
8105 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8106 emit_move_insn (slot0, x);
8107 }
8108
8109 if (MEM_P (y))
8110 {
8111 tree expr = MEM_EXPR (y);
8112 if (expr)
8113 mark_addressable (expr);
8114 slot1 = y;
8115 }
8116 else
8117 {
8118 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8119 emit_move_insn (slot1, y);
8120 }
8121
8122 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8123 emit_library_call (libfunc, LCT_NORMAL,
8124 DImode, 2,
8125 XEXP (slot0, 0), Pmode,
8126 XEXP (slot1, 0), Pmode);
8127 mode = DImode;
8128 }
8129 else
8130 {
8131 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8132 emit_library_call (libfunc, LCT_NORMAL,
8133 SImode, 2,
8134 x, TFmode, y, TFmode);
8135 mode = SImode;
8136 }
8137
8138
8139 /* Immediately move the result of the libcall into a pseudo
8140 register so reload doesn't clobber the value if it needs
8141 the return register for a spill reg. */
8142 result = gen_reg_rtx (mode);
8143 emit_move_insn (result, hard_libcall_value (mode, libfunc));
8144
8145 switch (comparison)
8146 {
8147 default:
8148 return gen_rtx_NE (VOIDmode, result, const0_rtx);
8149 case ORDERED:
8150 case UNORDERED:
8151 new_comparison = (comparison == UNORDERED ? EQ : NE);
8152 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
8153 case UNGT:
8154 case UNGE:
8155 new_comparison = (comparison == UNGT ? GT : NE);
8156 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
8157 case UNLE:
8158 return gen_rtx_NE (VOIDmode, result, const2_rtx);
8159 case UNLT:
8160 tem = gen_reg_rtx (mode);
8161 if (TARGET_ARCH32)
8162 emit_insn (gen_andsi3 (tem, result, const1_rtx));
8163 else
8164 emit_insn (gen_anddi3 (tem, result, const1_rtx));
8165 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
8166 case UNEQ:
8167 case LTGT:
8168 tem = gen_reg_rtx (mode);
8169 if (TARGET_ARCH32)
8170 emit_insn (gen_addsi3 (tem, result, const1_rtx));
8171 else
8172 emit_insn (gen_adddi3 (tem, result, const1_rtx));
8173 tem2 = gen_reg_rtx (mode);
8174 if (TARGET_ARCH32)
8175 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
8176 else
8177 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
8178 new_comparison = (comparison == UNEQ ? EQ : NE);
8179 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
8180 }
8181
8182 gcc_unreachable ();
8183 }
8184
8185 /* Generate an unsigned DImode to FP conversion. This is the same code
8186 optabs would emit if we didn't have TFmode patterns. */
8187
8188 void
8189 sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
8190 {
8191 rtx i0, i1, f0, in, out;
8192
8193 out = operands[0];
8194 in = force_reg (DImode, operands[1]);
8195 rtx_code_label *neglab = gen_label_rtx ();
8196 rtx_code_label *donelab = gen_label_rtx ();
8197 i0 = gen_reg_rtx (DImode);
8198 i1 = gen_reg_rtx (DImode);
8199 f0 = gen_reg_rtx (mode);
8200
8201 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
8202
8203 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
8204 emit_jump_insn (gen_jump (donelab));
8205 emit_barrier ();
8206
8207 emit_label (neglab);
8208
8209 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
8210 emit_insn (gen_anddi3 (i1, in, const1_rtx));
8211 emit_insn (gen_iordi3 (i0, i0, i1));
8212 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
8213 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
8214
8215 emit_label (donelab);
8216 }
8217
8218 /* Generate an FP to unsigned DImode conversion. This is the same code
8219 optabs would emit if we didn't have TFmode patterns. */
8220
8221 void
8222 sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
8223 {
8224 rtx i0, i1, f0, in, out, limit;
8225
8226 out = operands[0];
8227 in = force_reg (mode, operands[1]);
8228 rtx_code_label *neglab = gen_label_rtx ();
8229 rtx_code_label *donelab = gen_label_rtx ();
8230 i0 = gen_reg_rtx (DImode);
8231 i1 = gen_reg_rtx (DImode);
8232 limit = gen_reg_rtx (mode);
8233 f0 = gen_reg_rtx (mode);
8234
8235 emit_move_insn (limit,
8236 const_double_from_real_value (
8237 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
8238 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
8239
8240 emit_insn (gen_rtx_SET (out,
8241 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
8242 emit_jump_insn (gen_jump (donelab));
8243 emit_barrier ();
8244
8245 emit_label (neglab);
8246
8247 emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
8248 emit_insn (gen_rtx_SET (i0,
8249 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
8250 emit_insn (gen_movdi (i1, const1_rtx));
8251 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
8252 emit_insn (gen_xordi3 (out, i0, i1));
8253
8254 emit_label (donelab);
8255 }
8256
8257 /* Return the string to output a compare and branch instruction to DEST.
8258 DEST is the destination insn (i.e. the label), INSN is the source,
8259 and OP is the conditional expression. */
8260
8261 const char *
8262 output_cbcond (rtx op, rtx dest, rtx_insn *insn)
8263 {
8264 machine_mode mode = GET_MODE (XEXP (op, 0));
8265 enum rtx_code code = GET_CODE (op);
8266 const char *cond_str, *tmpl;
8267 int far, emit_nop, len;
8268 static char string[64];
8269 char size_char;
8270
8271 /* Compare and Branch is limited to +-2KB. If it is too far away,
8272 change
8273
8274 cxbne X, Y, .LC30
8275
8276 to
8277
8278 cxbe X, Y, .+16
8279 nop
8280 ba,pt xcc, .LC30
8281 nop */
8282
8283 len = get_attr_length (insn);
8284
8285 far = len == 4;
8286 emit_nop = len == 2;
8287
8288 if (far)
8289 code = reverse_condition (code);
8290
8291 size_char = ((mode == SImode) ? 'w' : 'x');
8292
8293 switch (code)
8294 {
8295 case NE:
8296 cond_str = "ne";
8297 break;
8298
8299 case EQ:
8300 cond_str = "e";
8301 break;
8302
8303 case GE:
8304 cond_str = "ge";
8305 break;
8306
8307 case GT:
8308 cond_str = "g";
8309 break;
8310
8311 case LE:
8312 cond_str = "le";
8313 break;
8314
8315 case LT:
8316 cond_str = "l";
8317 break;
8318
8319 case GEU:
8320 cond_str = "cc";
8321 break;
8322
8323 case GTU:
8324 cond_str = "gu";
8325 break;
8326
8327 case LEU:
8328 cond_str = "leu";
8329 break;
8330
8331 case LTU:
8332 cond_str = "cs";
8333 break;
8334
8335 default:
8336 gcc_unreachable ();
8337 }
8338
8339 if (far)
8340 {
8341 int veryfar = 1, delta;
8342
8343 if (INSN_ADDRESSES_SET_P ())
8344 {
8345 delta = (INSN_ADDRESSES (INSN_UID (dest))
8346 - INSN_ADDRESSES (INSN_UID (insn)));
8347 /* Leave some instructions for "slop". */
8348 if (delta >= -260000 && delta < 260000)
8349 veryfar = 0;
8350 }
8351
8352 if (veryfar)
8353 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8354 else
8355 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8356 }
8357 else
8358 {
8359 if (emit_nop)
8360 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8361 else
8362 tmpl = "c%cb%s\t%%1, %%2, %%3";
8363 }
8364
8365 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8366
8367 return string;
8368 }
8369
8370 /* Return the string to output a conditional branch to LABEL, testing
8371 register REG. LABEL is the operand number of the label; REG is the
8372 operand number of the reg. OP is the conditional expression. The mode
8373 of REG says what kind of comparison we made.
8374
8375 DEST is the destination insn (i.e. the label), INSN is the source.
8376
8377 REVERSED is nonzero if we should reverse the sense of the comparison.
8378
8379 ANNUL is nonzero if we should generate an annulling branch. */
8380
8381 const char *
8382 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8383 int annul, rtx_insn *insn)
8384 {
8385 static char string[64];
8386 enum rtx_code code = GET_CODE (op);
8387 machine_mode mode = GET_MODE (XEXP (op, 0));
8388 rtx note;
8389 int far;
8390 char *p;
8391
8392 /* branch on register are limited to +-128KB. If it is too far away,
8393 change
8394
8395 brnz,pt %g1, .LC30
8396
8397 to
8398
8399 brz,pn %g1, .+12
8400 nop
8401 ba,pt %xcc, .LC30
8402
8403 and
8404
8405 brgez,a,pn %o1, .LC29
8406
8407 to
8408
8409 brlz,pt %o1, .+16
8410 nop
8411 ba,pt %xcc, .LC29 */
8412
8413 far = get_attr_length (insn) >= 3;
8414
8415 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8416 if (reversed ^ far)
8417 code = reverse_condition (code);
8418
8419 /* Only 64 bit versions of these instructions exist. */
8420 gcc_assert (mode == DImode);
8421
8422 /* Start by writing the branch condition. */
8423
8424 switch (code)
8425 {
8426 case NE:
8427 strcpy (string, "brnz");
8428 break;
8429
8430 case EQ:
8431 strcpy (string, "brz");
8432 break;
8433
8434 case GE:
8435 strcpy (string, "brgez");
8436 break;
8437
8438 case LT:
8439 strcpy (string, "brlz");
8440 break;
8441
8442 case LE:
8443 strcpy (string, "brlez");
8444 break;
8445
8446 case GT:
8447 strcpy (string, "brgz");
8448 break;
8449
8450 default:
8451 gcc_unreachable ();
8452 }
8453
8454 p = strchr (string, '\0');
8455
8456 /* Now add the annulling, reg, label, and nop. */
8457 if (annul && ! far)
8458 {
8459 strcpy (p, ",a");
8460 p += 2;
8461 }
8462
8463 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8464 {
8465 strcpy (p,
8466 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8467 >= profile_probability::even ()) ^ far)
8468 ? ",pt" : ",pn");
8469 p += 3;
8470 }
8471
8472 *p = p < string + 8 ? '\t' : ' ';
8473 p++;
8474 *p++ = '%';
8475 *p++ = '0' + reg;
8476 *p++ = ',';
8477 *p++ = ' ';
8478 if (far)
8479 {
8480 int veryfar = 1, delta;
8481
8482 if (INSN_ADDRESSES_SET_P ())
8483 {
8484 delta = (INSN_ADDRESSES (INSN_UID (dest))
8485 - INSN_ADDRESSES (INSN_UID (insn)));
8486 /* Leave some instructions for "slop". */
8487 if (delta >= -260000 && delta < 260000)
8488 veryfar = 0;
8489 }
8490
8491 strcpy (p, ".+12\n\t nop\n\t");
8492 /* Skip the next insn if requested or
8493 if we know that it will be a nop. */
8494 if (annul || ! final_sequence)
8495 p[3] = '6';
8496 p += 12;
8497 if (veryfar)
8498 {
8499 strcpy (p, "b\t");
8500 p += 2;
8501 }
8502 else
8503 {
8504 strcpy (p, "ba,pt\t%%xcc, ");
8505 p += 13;
8506 }
8507 }
8508 *p++ = '%';
8509 *p++ = 'l';
8510 *p++ = '0' + label;
8511 *p++ = '%';
8512 *p++ = '#';
8513 *p = '\0';
8514
8515 return string;
8516 }
8517
8518 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8519 Such instructions cannot be used in the delay slot of return insn on v9.
8520 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8521 */
8522
8523 static int
8524 epilogue_renumber (register rtx *where, int test)
8525 {
8526 register const char *fmt;
8527 register int i;
8528 register enum rtx_code code;
8529
8530 if (*where == 0)
8531 return 0;
8532
8533 code = GET_CODE (*where);
8534
8535 switch (code)
8536 {
8537 case REG:
8538 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8539 return 1;
8540 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8541 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
8542 /* fallthrough */
8543 case SCRATCH:
8544 case CC0:
8545 case PC:
8546 case CONST_INT:
8547 case CONST_WIDE_INT:
8548 case CONST_DOUBLE:
8549 return 0;
8550
8551 /* Do not replace the frame pointer with the stack pointer because
8552 it can cause the delayed instruction to load below the stack.
8553 This occurs when instructions like:
8554
8555 (set (reg/i:SI 24 %i0)
8556 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8557 (const_int -20 [0xffffffec])) 0))
8558
8559 are in the return delayed slot. */
8560 case PLUS:
8561 if (GET_CODE (XEXP (*where, 0)) == REG
8562 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8563 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8564 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8565 return 1;
8566 break;
8567
8568 case MEM:
8569 if (SPARC_STACK_BIAS
8570 && GET_CODE (XEXP (*where, 0)) == REG
8571 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8572 return 1;
8573 break;
8574
8575 default:
8576 break;
8577 }
8578
8579 fmt = GET_RTX_FORMAT (code);
8580
8581 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8582 {
8583 if (fmt[i] == 'E')
8584 {
8585 register int j;
8586 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8587 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8588 return 1;
8589 }
8590 else if (fmt[i] == 'e'
8591 && epilogue_renumber (&(XEXP (*where, i)), test))
8592 return 1;
8593 }
8594 return 0;
8595 }
8596 \f
8597 /* Leaf functions and non-leaf functions have different needs. */
8598
8599 static const int
8600 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8601
8602 static const int
8603 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8604
8605 static const int *const reg_alloc_orders[] = {
8606 reg_leaf_alloc_order,
8607 reg_nonleaf_alloc_order};
8608
8609 void
8610 order_regs_for_local_alloc (void)
8611 {
8612 static int last_order_nonleaf = 1;
8613
8614 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8615 {
8616 last_order_nonleaf = !last_order_nonleaf;
8617 memcpy ((char *) reg_alloc_order,
8618 (const char *) reg_alloc_orders[last_order_nonleaf],
8619 FIRST_PSEUDO_REGISTER * sizeof (int));
8620 }
8621 }
8622 \f
8623 /* Return 1 if REG and MEM are legitimate enough to allow the various
8624 MEM<-->REG splits to be run. */
8625
8626 int
8627 sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
8628 {
8629 /* Punt if we are here by mistake. */
8630 gcc_assert (reload_completed);
8631
8632 /* We must have an offsettable memory reference. */
8633 if (!offsettable_memref_p (mem))
8634 return 0;
8635
8636 /* If we have legitimate args for ldd/std, we do not want
8637 the split to happen. */
8638 if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
8639 return 0;
8640
8641 /* Success. */
8642 return 1;
8643 }
8644
8645 /* Split a REG <-- MEM move into a pair of moves in MODE. */
8646
8647 void
8648 sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
8649 {
8650 rtx high_part = gen_highpart (mode, dest);
8651 rtx low_part = gen_lowpart (mode, dest);
8652 rtx word0 = adjust_address (src, mode, 0);
8653 rtx word1 = adjust_address (src, mode, 4);
8654
8655 if (reg_overlap_mentioned_p (high_part, word1))
8656 {
8657 emit_move_insn_1 (low_part, word1);
8658 emit_move_insn_1 (high_part, word0);
8659 }
8660 else
8661 {
8662 emit_move_insn_1 (high_part, word0);
8663 emit_move_insn_1 (low_part, word1);
8664 }
8665 }
8666
8667 /* Split a MEM <-- REG move into a pair of moves in MODE. */
8668
8669 void
8670 sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
8671 {
8672 rtx word0 = adjust_address (dest, mode, 0);
8673 rtx word1 = adjust_address (dest, mode, 4);
8674 rtx high_part = gen_highpart (mode, src);
8675 rtx low_part = gen_lowpart (mode, src);
8676
8677 emit_move_insn_1 (word0, high_part);
8678 emit_move_insn_1 (word1, low_part);
8679 }
8680
8681 /* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
8682
8683 int
8684 sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
8685 {
8686 /* Punt if we are here by mistake. */
8687 gcc_assert (reload_completed);
8688
8689 if (GET_CODE (reg1) == SUBREG)
8690 reg1 = SUBREG_REG (reg1);
8691 if (GET_CODE (reg1) != REG)
8692 return 0;
8693 const int regno1 = REGNO (reg1);
8694
8695 if (GET_CODE (reg2) == SUBREG)
8696 reg2 = SUBREG_REG (reg2);
8697 if (GET_CODE (reg2) != REG)
8698 return 0;
8699 const int regno2 = REGNO (reg2);
8700
8701 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
8702 return 1;
8703
8704 if (TARGET_VIS3)
8705 {
8706 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
8707 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
8708 return 1;
8709 }
8710
8711 return 0;
8712 }
8713
8714 /* Split a REG <--> REG move into a pair of moves in MODE. */
8715
8716 void
8717 sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
8718 {
8719 rtx dest1 = gen_highpart (mode, dest);
8720 rtx dest2 = gen_lowpart (mode, dest);
8721 rtx src1 = gen_highpart (mode, src);
8722 rtx src2 = gen_lowpart (mode, src);
8723
8724 /* Now emit using the real source and destination we found, swapping
8725 the order if we detect overlap. */
8726 if (reg_overlap_mentioned_p (dest1, src2))
8727 {
8728 emit_move_insn_1 (dest2, src2);
8729 emit_move_insn_1 (dest1, src1);
8730 }
8731 else
8732 {
8733 emit_move_insn_1 (dest1, src1);
8734 emit_move_insn_1 (dest2, src2);
8735 }
8736 }
8737
8738 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
8739 This makes them candidates for using ldd and std insns.
8740
8741 Note reg1 and reg2 *must* be hard registers. */
8742
8743 int
8744 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
8745 {
8746 /* We might have been passed a SUBREG. */
8747 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
8748 return 0;
8749
8750 if (REGNO (reg1) % 2 != 0)
8751 return 0;
8752
8753 /* Integer ldd is deprecated in SPARC V9 */
8754 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
8755 return 0;
8756
8757 return (REGNO (reg1) == REGNO (reg2) - 1);
8758 }
8759
8760 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
8761 an ldd or std insn.
8762
8763 This can only happen when addr1 and addr2, the addresses in mem1
8764 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
8765 addr1 must also be aligned on a 64-bit boundary.
8766
8767 Also iff dependent_reg_rtx is not null it should not be used to
8768 compute the address for mem1, i.e. we cannot optimize a sequence
8769 like:
8770 ld [%o0], %o0
8771 ld [%o0 + 4], %o1
8772 to
8773 ldd [%o0], %o0
8774 nor:
8775 ld [%g3 + 4], %g3
8776 ld [%g3], %g2
8777 to
8778 ldd [%g3], %g2
8779
8780 But, note that the transformation from:
8781 ld [%g2 + 4], %g3
8782 ld [%g2], %g2
8783 to
8784 ldd [%g2], %g2
8785 is perfectly fine. Thus, the peephole2 patterns always pass us
8786 the destination register of the first load, never the second one.
8787
8788 For stores we don't have a similar problem, so dependent_reg_rtx is
8789 NULL_RTX. */
8790
8791 int
8792 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
8793 {
8794 rtx addr1, addr2;
8795 unsigned int reg1;
8796 HOST_WIDE_INT offset1;
8797
8798 /* The mems cannot be volatile. */
8799 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
8800 return 0;
8801
8802 /* MEM1 should be aligned on a 64-bit boundary. */
8803 if (MEM_ALIGN (mem1) < 64)
8804 return 0;
8805
8806 addr1 = XEXP (mem1, 0);
8807 addr2 = XEXP (mem2, 0);
8808
8809 /* Extract a register number and offset (if used) from the first addr. */
8810 if (GET_CODE (addr1) == PLUS)
8811 {
8812 /* If not a REG, return zero. */
8813 if (GET_CODE (XEXP (addr1, 0)) != REG)
8814 return 0;
8815 else
8816 {
8817 reg1 = REGNO (XEXP (addr1, 0));
8818 /* The offset must be constant! */
8819 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
8820 return 0;
8821 offset1 = INTVAL (XEXP (addr1, 1));
8822 }
8823 }
8824 else if (GET_CODE (addr1) != REG)
8825 return 0;
8826 else
8827 {
8828 reg1 = REGNO (addr1);
8829 /* This was a simple (mem (reg)) expression. Offset is 0. */
8830 offset1 = 0;
8831 }
8832
8833 /* Make sure the second address is a (mem (plus (reg) (const_int). */
8834 if (GET_CODE (addr2) != PLUS)
8835 return 0;
8836
8837 if (GET_CODE (XEXP (addr2, 0)) != REG
8838 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
8839 return 0;
8840
8841 if (reg1 != REGNO (XEXP (addr2, 0)))
8842 return 0;
8843
8844 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
8845 return 0;
8846
8847 /* The first offset must be evenly divisible by 8 to ensure the
8848 address is 64 bit aligned. */
8849 if (offset1 % 8 != 0)
8850 return 0;
8851
8852 /* The offset for the second addr must be 4 more than the first addr. */
8853 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
8854 return 0;
8855
8856 /* All the tests passed. addr1 and addr2 are valid for ldd and std
8857 instructions. */
8858 return 1;
8859 }
8860
8861 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
8862
8863 rtx
8864 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
8865 {
8866 rtx x = widen_memory_access (mem1, mode, 0);
8867 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
8868 return x;
8869 }
8870
8871 /* Return 1 if reg is a pseudo, or is the first register in
8872 a hard register pair. This makes it suitable for use in
8873 ldd and std insns. */
8874
8875 int
8876 register_ok_for_ldd (rtx reg)
8877 {
8878 /* We might have been passed a SUBREG. */
8879 if (!REG_P (reg))
8880 return 0;
8881
8882 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
8883 return (REGNO (reg) % 2 == 0);
8884
8885 return 1;
8886 }
8887
8888 /* Return 1 if OP, a MEM, has an address which is known to be
8889 aligned to an 8-byte boundary. */
8890
8891 int
8892 memory_ok_for_ldd (rtx op)
8893 {
8894 /* In 64-bit mode, we assume that the address is word-aligned. */
8895 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
8896 return 0;
8897
8898 if (! can_create_pseudo_p ()
8899 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
8900 return 0;
8901
8902 return 1;
8903 }
8904 \f
8905 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8906
8907 static bool
8908 sparc_print_operand_punct_valid_p (unsigned char code)
8909 {
8910 if (code == '#'
8911 || code == '*'
8912 || code == '('
8913 || code == ')'
8914 || code == '_'
8915 || code == '&')
8916 return true;
8917
8918 return false;
8919 }
8920
8921 /* Implement TARGET_PRINT_OPERAND.
8922 Print operand X (an rtx) in assembler syntax to file FILE.
8923 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8924 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8925
8926 static void
8927 sparc_print_operand (FILE *file, rtx x, int code)
8928 {
8929 const char *s;
8930
8931 switch (code)
8932 {
8933 case '#':
8934 /* Output an insn in a delay slot. */
8935 if (final_sequence)
8936 sparc_indent_opcode = 1;
8937 else
8938 fputs ("\n\t nop", file);
8939 return;
8940 case '*':
8941 /* Output an annul flag if there's nothing for the delay slot and we
8942 are optimizing. This is always used with '(' below.
8943 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8944 this is a dbx bug. So, we only do this when optimizing.
8945 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8946 Always emit a nop in case the next instruction is a branch. */
8947 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
8948 fputs (",a", file);
8949 return;
8950 case '(':
8951 /* Output a 'nop' if there's nothing for the delay slot and we are
8952 not optimizing. This is always used with '*' above. */
8953 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
8954 fputs ("\n\t nop", file);
8955 else if (final_sequence)
8956 sparc_indent_opcode = 1;
8957 return;
8958 case ')':
8959 /* Output the right displacement from the saved PC on function return.
8960 The caller may have placed an "unimp" insn immediately after the call
8961 so we have to account for it. This insn is used in the 32-bit ABI
8962 when calling a function that returns a non zero-sized structure. The
8963 64-bit ABI doesn't have it. Be careful to have this test be the same
8964 as that for the call. The exception is when sparc_std_struct_return
8965 is enabled, the psABI is followed exactly and the adjustment is made
8966 by the code in sparc_struct_value_rtx. The call emitted is the same
8967 when sparc_std_struct_return is enabled. */
8968 if (!TARGET_ARCH64
8969 && cfun->returns_struct
8970 && !sparc_std_struct_return
8971 && DECL_SIZE (DECL_RESULT (current_function_decl))
8972 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
8973 == INTEGER_CST
8974 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
8975 fputs ("12", file);
8976 else
8977 fputc ('8', file);
8978 return;
8979 case '_':
8980 /* Output the Embedded Medium/Anywhere code model base register. */
8981 fputs (EMBMEDANY_BASE_REG, file);
8982 return;
8983 case '&':
8984 /* Print some local dynamic TLS name. */
8985 if (const char *name = get_some_local_dynamic_name ())
8986 assemble_name (file, name);
8987 else
8988 output_operand_lossage ("'%%&' used without any "
8989 "local dynamic TLS references");
8990 return;
8991
8992 case 'Y':
8993 /* Adjust the operand to take into account a RESTORE operation. */
8994 if (GET_CODE (x) == CONST_INT)
8995 break;
8996 else if (GET_CODE (x) != REG)
8997 output_operand_lossage ("invalid %%Y operand");
8998 else if (REGNO (x) < 8)
8999 fputs (reg_names[REGNO (x)], file);
9000 else if (REGNO (x) >= 24 && REGNO (x) < 32)
9001 fputs (reg_names[REGNO (x)-16], file);
9002 else
9003 output_operand_lossage ("invalid %%Y operand");
9004 return;
9005 case 'L':
9006 /* Print out the low order register name of a register pair. */
9007 if (WORDS_BIG_ENDIAN)
9008 fputs (reg_names[REGNO (x)+1], file);
9009 else
9010 fputs (reg_names[REGNO (x)], file);
9011 return;
9012 case 'H':
9013 /* Print out the high order register name of a register pair. */
9014 if (WORDS_BIG_ENDIAN)
9015 fputs (reg_names[REGNO (x)], file);
9016 else
9017 fputs (reg_names[REGNO (x)+1], file);
9018 return;
9019 case 'R':
9020 /* Print out the second register name of a register pair or quad.
9021 I.e., R (%o0) => %o1. */
9022 fputs (reg_names[REGNO (x)+1], file);
9023 return;
9024 case 'S':
9025 /* Print out the third register name of a register quad.
9026 I.e., S (%o0) => %o2. */
9027 fputs (reg_names[REGNO (x)+2], file);
9028 return;
9029 case 'T':
9030 /* Print out the fourth register name of a register quad.
9031 I.e., T (%o0) => %o3. */
9032 fputs (reg_names[REGNO (x)+3], file);
9033 return;
9034 case 'x':
9035 /* Print a condition code register. */
9036 if (REGNO (x) == SPARC_ICC_REG)
9037 {
9038 switch (GET_MODE (x))
9039 {
9040 case CCmode:
9041 case CCNZmode:
9042 case CCCmode:
9043 case CCVmode:
9044 s = "%icc";
9045 break;
9046 case CCXmode:
9047 case CCXNZmode:
9048 case CCXCmode:
9049 case CCXVmode:
9050 s = "%xcc";
9051 break;
9052 default:
9053 gcc_unreachable ();
9054 }
9055 fputs (s, file);
9056 }
9057 else
9058 /* %fccN register */
9059 fputs (reg_names[REGNO (x)], file);
9060 return;
9061 case 'm':
9062 /* Print the operand's address only. */
9063 output_address (GET_MODE (x), XEXP (x, 0));
9064 return;
9065 case 'r':
9066 /* In this case we need a register. Use %g0 if the
9067 operand is const0_rtx. */
9068 if (x == const0_rtx
9069 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
9070 {
9071 fputs ("%g0", file);
9072 return;
9073 }
9074 else
9075 break;
9076
9077 case 'A':
9078 switch (GET_CODE (x))
9079 {
9080 case IOR:
9081 s = "or";
9082 break;
9083 case AND:
9084 s = "and";
9085 break;
9086 case XOR:
9087 s = "xor";
9088 break;
9089 default:
9090 output_operand_lossage ("invalid %%A operand");
9091 s = "";
9092 break;
9093 }
9094 fputs (s, file);
9095 return;
9096
9097 case 'B':
9098 switch (GET_CODE (x))
9099 {
9100 case IOR:
9101 s = "orn";
9102 break;
9103 case AND:
9104 s = "andn";
9105 break;
9106 case XOR:
9107 s = "xnor";
9108 break;
9109 default:
9110 output_operand_lossage ("invalid %%B operand");
9111 s = "";
9112 break;
9113 }
9114 fputs (s, file);
9115 return;
9116
9117 /* This is used by the conditional move instructions. */
9118 case 'C':
9119 {
9120 machine_mode mode = GET_MODE (XEXP (x, 0));
9121 switch (GET_CODE (x))
9122 {
9123 case NE:
9124 if (mode == CCVmode || mode == CCXVmode)
9125 s = "vs";
9126 else
9127 s = "ne";
9128 break;
9129 case EQ:
9130 if (mode == CCVmode || mode == CCXVmode)
9131 s = "vc";
9132 else
9133 s = "e";
9134 break;
9135 case GE:
9136 if (mode == CCNZmode || mode == CCXNZmode)
9137 s = "pos";
9138 else
9139 s = "ge";
9140 break;
9141 case GT:
9142 s = "g";
9143 break;
9144 case LE:
9145 s = "le";
9146 break;
9147 case LT:
9148 if (mode == CCNZmode || mode == CCXNZmode)
9149 s = "neg";
9150 else
9151 s = "l";
9152 break;
9153 case GEU:
9154 s = "geu";
9155 break;
9156 case GTU:
9157 s = "gu";
9158 break;
9159 case LEU:
9160 s = "leu";
9161 break;
9162 case LTU:
9163 s = "lu";
9164 break;
9165 case LTGT:
9166 s = "lg";
9167 break;
9168 case UNORDERED:
9169 s = "u";
9170 break;
9171 case ORDERED:
9172 s = "o";
9173 break;
9174 case UNLT:
9175 s = "ul";
9176 break;
9177 case UNLE:
9178 s = "ule";
9179 break;
9180 case UNGT:
9181 s = "ug";
9182 break;
9183 case UNGE:
9184 s = "uge"
9185 ; break;
9186 case UNEQ:
9187 s = "ue";
9188 break;
9189 default:
9190 output_operand_lossage ("invalid %%C operand");
9191 s = "";
9192 break;
9193 }
9194 fputs (s, file);
9195 return;
9196 }
9197
9198 /* This are used by the movr instruction pattern. */
9199 case 'D':
9200 {
9201 switch (GET_CODE (x))
9202 {
9203 case NE:
9204 s = "ne";
9205 break;
9206 case EQ:
9207 s = "e";
9208 break;
9209 case GE:
9210 s = "gez";
9211 break;
9212 case LT:
9213 s = "lz";
9214 break;
9215 case LE:
9216 s = "lez";
9217 break;
9218 case GT:
9219 s = "gz";
9220 break;
9221 default:
9222 output_operand_lossage ("invalid %%D operand");
9223 s = "";
9224 break;
9225 }
9226 fputs (s, file);
9227 return;
9228 }
9229
9230 case 'b':
9231 {
9232 /* Print a sign-extended character. */
9233 int i = trunc_int_for_mode (INTVAL (x), QImode);
9234 fprintf (file, "%d", i);
9235 return;
9236 }
9237
9238 case 'f':
9239 /* Operand must be a MEM; write its address. */
9240 if (GET_CODE (x) != MEM)
9241 output_operand_lossage ("invalid %%f operand");
9242 output_address (GET_MODE (x), XEXP (x, 0));
9243 return;
9244
9245 case 's':
9246 {
9247 /* Print a sign-extended 32-bit value. */
9248 HOST_WIDE_INT i;
9249 if (GET_CODE(x) == CONST_INT)
9250 i = INTVAL (x);
9251 else
9252 {
9253 output_operand_lossage ("invalid %%s operand");
9254 return;
9255 }
9256 i = trunc_int_for_mode (i, SImode);
9257 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
9258 return;
9259 }
9260
9261 case 0:
9262 /* Do nothing special. */
9263 break;
9264
9265 default:
9266 /* Undocumented flag. */
9267 output_operand_lossage ("invalid operand output code");
9268 }
9269
9270 if (GET_CODE (x) == REG)
9271 fputs (reg_names[REGNO (x)], file);
9272 else if (GET_CODE (x) == MEM)
9273 {
9274 fputc ('[', file);
9275 /* Poor Sun assembler doesn't understand absolute addressing. */
9276 if (CONSTANT_P (XEXP (x, 0)))
9277 fputs ("%g0+", file);
9278 output_address (GET_MODE (x), XEXP (x, 0));
9279 fputc (']', file);
9280 }
9281 else if (GET_CODE (x) == HIGH)
9282 {
9283 fputs ("%hi(", file);
9284 output_addr_const (file, XEXP (x, 0));
9285 fputc (')', file);
9286 }
9287 else if (GET_CODE (x) == LO_SUM)
9288 {
9289 sparc_print_operand (file, XEXP (x, 0), 0);
9290 if (TARGET_CM_MEDMID)
9291 fputs ("+%l44(", file);
9292 else
9293 fputs ("+%lo(", file);
9294 output_addr_const (file, XEXP (x, 1));
9295 fputc (')', file);
9296 }
9297 else if (GET_CODE (x) == CONST_DOUBLE)
9298 output_operand_lossage ("floating-point constant not a valid immediate operand");
9299 else
9300 output_addr_const (file, x);
9301 }
9302
9303 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9304
9305 static void
9306 sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9307 {
9308 register rtx base, index = 0;
9309 int offset = 0;
9310 register rtx addr = x;
9311
9312 if (REG_P (addr))
9313 fputs (reg_names[REGNO (addr)], file);
9314 else if (GET_CODE (addr) == PLUS)
9315 {
9316 if (CONST_INT_P (XEXP (addr, 0)))
9317 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
9318 else if (CONST_INT_P (XEXP (addr, 1)))
9319 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
9320 else
9321 base = XEXP (addr, 0), index = XEXP (addr, 1);
9322 if (GET_CODE (base) == LO_SUM)
9323 {
9324 gcc_assert (USE_AS_OFFSETABLE_LO10
9325 && TARGET_ARCH64
9326 && ! TARGET_CM_MEDMID);
9327 output_operand (XEXP (base, 0), 0);
9328 fputs ("+%lo(", file);
9329 output_address (VOIDmode, XEXP (base, 1));
9330 fprintf (file, ")+%d", offset);
9331 }
9332 else
9333 {
9334 fputs (reg_names[REGNO (base)], file);
9335 if (index == 0)
9336 fprintf (file, "%+d", offset);
9337 else if (REG_P (index))
9338 fprintf (file, "+%s", reg_names[REGNO (index)]);
9339 else if (GET_CODE (index) == SYMBOL_REF
9340 || GET_CODE (index) == LABEL_REF
9341 || GET_CODE (index) == CONST)
9342 fputc ('+', file), output_addr_const (file, index);
9343 else gcc_unreachable ();
9344 }
9345 }
9346 else if (GET_CODE (addr) == MINUS
9347 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
9348 {
9349 output_addr_const (file, XEXP (addr, 0));
9350 fputs ("-(", file);
9351 output_addr_const (file, XEXP (addr, 1));
9352 fputs ("-.)", file);
9353 }
9354 else if (GET_CODE (addr) == LO_SUM)
9355 {
9356 output_operand (XEXP (addr, 0), 0);
9357 if (TARGET_CM_MEDMID)
9358 fputs ("+%l44(", file);
9359 else
9360 fputs ("+%lo(", file);
9361 output_address (VOIDmode, XEXP (addr, 1));
9362 fputc (')', file);
9363 }
9364 else if (flag_pic
9365 && GET_CODE (addr) == CONST
9366 && GET_CODE (XEXP (addr, 0)) == MINUS
9367 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
9368 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
9369 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
9370 {
9371 addr = XEXP (addr, 0);
9372 output_addr_const (file, XEXP (addr, 0));
9373 /* Group the args of the second CONST in parenthesis. */
9374 fputs ("-(", file);
9375 /* Skip past the second CONST--it does nothing for us. */
9376 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
9377 /* Close the parenthesis. */
9378 fputc (')', file);
9379 }
9380 else
9381 {
9382 output_addr_const (file, addr);
9383 }
9384 }
9385 \f
9386 /* Target hook for assembling integer objects. The sparc version has
9387 special handling for aligned DI-mode objects. */
9388
9389 static bool
9390 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9391 {
9392 /* ??? We only output .xword's for symbols and only then in environments
9393 where the assembler can handle them. */
9394 if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
9395 {
9396 if (TARGET_V9)
9397 {
9398 assemble_integer_with_op ("\t.xword\t", x);
9399 return true;
9400 }
9401 else
9402 {
9403 assemble_aligned_integer (4, const0_rtx);
9404 assemble_aligned_integer (4, x);
9405 return true;
9406 }
9407 }
9408 return default_assemble_integer (x, size, aligned_p);
9409 }
9410 \f
9411 /* Return the value of a code used in the .proc pseudo-op that says
9412 what kind of result this function returns. For non-C types, we pick
9413 the closest C type. */
9414
9415 #ifndef SHORT_TYPE_SIZE
9416 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9417 #endif
9418
9419 #ifndef INT_TYPE_SIZE
9420 #define INT_TYPE_SIZE BITS_PER_WORD
9421 #endif
9422
9423 #ifndef LONG_TYPE_SIZE
9424 #define LONG_TYPE_SIZE BITS_PER_WORD
9425 #endif
9426
9427 #ifndef LONG_LONG_TYPE_SIZE
9428 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9429 #endif
9430
9431 #ifndef FLOAT_TYPE_SIZE
9432 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9433 #endif
9434
9435 #ifndef DOUBLE_TYPE_SIZE
9436 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9437 #endif
9438
9439 #ifndef LONG_DOUBLE_TYPE_SIZE
9440 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9441 #endif
9442
9443 unsigned long
9444 sparc_type_code (register tree type)
9445 {
9446 register unsigned long qualifiers = 0;
9447 register unsigned shift;
9448
9449 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9450 setting more, since some assemblers will give an error for this. Also,
9451 we must be careful to avoid shifts of 32 bits or more to avoid getting
9452 unpredictable results. */
9453
9454 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9455 {
9456 switch (TREE_CODE (type))
9457 {
9458 case ERROR_MARK:
9459 return qualifiers;
9460
9461 case ARRAY_TYPE:
9462 qualifiers |= (3 << shift);
9463 break;
9464
9465 case FUNCTION_TYPE:
9466 case METHOD_TYPE:
9467 qualifiers |= (2 << shift);
9468 break;
9469
9470 case POINTER_TYPE:
9471 case REFERENCE_TYPE:
9472 case OFFSET_TYPE:
9473 qualifiers |= (1 << shift);
9474 break;
9475
9476 case RECORD_TYPE:
9477 return (qualifiers | 8);
9478
9479 case UNION_TYPE:
9480 case QUAL_UNION_TYPE:
9481 return (qualifiers | 9);
9482
9483 case ENUMERAL_TYPE:
9484 return (qualifiers | 10);
9485
9486 case VOID_TYPE:
9487 return (qualifiers | 16);
9488
9489 case INTEGER_TYPE:
9490 /* If this is a range type, consider it to be the underlying
9491 type. */
9492 if (TREE_TYPE (type) != 0)
9493 break;
9494
9495 /* Carefully distinguish all the standard types of C,
9496 without messing up if the language is not C. We do this by
9497 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9498 look at both the names and the above fields, but that's redundant.
9499 Any type whose size is between two C types will be considered
9500 to be the wider of the two types. Also, we do not have a
9501 special code to use for "long long", so anything wider than
9502 long is treated the same. Note that we can't distinguish
9503 between "int" and "long" in this code if they are the same
9504 size, but that's fine, since neither can the assembler. */
9505
9506 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9507 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9508
9509 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9510 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9511
9512 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9513 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9514
9515 else
9516 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9517
9518 case REAL_TYPE:
9519 /* If this is a range type, consider it to be the underlying
9520 type. */
9521 if (TREE_TYPE (type) != 0)
9522 break;
9523
9524 /* Carefully distinguish all the standard types of C,
9525 without messing up if the language is not C. */
9526
9527 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9528 return (qualifiers | 6);
9529
9530 else
9531 return (qualifiers | 7);
9532
9533 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9534 /* ??? We need to distinguish between double and float complex types,
9535 but I don't know how yet because I can't reach this code from
9536 existing front-ends. */
9537 return (qualifiers | 7); /* Who knows? */
9538
9539 case VECTOR_TYPE:
9540 case BOOLEAN_TYPE: /* Boolean truth value type. */
9541 case LANG_TYPE:
9542 case NULLPTR_TYPE:
9543 return qualifiers;
9544
9545 default:
9546 gcc_unreachable (); /* Not a type! */
9547 }
9548 }
9549
9550 return qualifiers;
9551 }
9552 \f
9553 /* Nested function support. */
9554
9555 /* Emit RTL insns to initialize the variable parts of a trampoline.
9556 FNADDR is an RTX for the address of the function's pure code.
9557 CXT is an RTX for the static chain value for the function.
9558
9559 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9560 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9561 (to store insns). This is a bit excessive. Perhaps a different
9562 mechanism would be better here.
9563
9564 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9565
9566 static void
9567 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9568 {
9569 /* SPARC 32-bit trampoline:
9570
9571 sethi %hi(fn), %g1
9572 sethi %hi(static), %g2
9573 jmp %g1+%lo(fn)
9574 or %g2, %lo(static), %g2
9575
9576 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9577 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9578 */
9579
9580 emit_move_insn
9581 (adjust_address (m_tramp, SImode, 0),
9582 expand_binop (SImode, ior_optab,
9583 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9584 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9585 NULL_RTX, 1, OPTAB_DIRECT));
9586
9587 emit_move_insn
9588 (adjust_address (m_tramp, SImode, 4),
9589 expand_binop (SImode, ior_optab,
9590 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9591 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9592 NULL_RTX, 1, OPTAB_DIRECT));
9593
9594 emit_move_insn
9595 (adjust_address (m_tramp, SImode, 8),
9596 expand_binop (SImode, ior_optab,
9597 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9598 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9599 NULL_RTX, 1, OPTAB_DIRECT));
9600
9601 emit_move_insn
9602 (adjust_address (m_tramp, SImode, 12),
9603 expand_binop (SImode, ior_optab,
9604 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9605 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9606 NULL_RTX, 1, OPTAB_DIRECT));
9607
9608 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9609 aligned on a 16 byte boundary so one flush clears it all. */
9610 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
9611 if (sparc_cpu != PROCESSOR_ULTRASPARC
9612 && sparc_cpu != PROCESSOR_ULTRASPARC3
9613 && sparc_cpu != PROCESSOR_NIAGARA
9614 && sparc_cpu != PROCESSOR_NIAGARA2
9615 && sparc_cpu != PROCESSOR_NIAGARA3
9616 && sparc_cpu != PROCESSOR_NIAGARA4
9617 && sparc_cpu != PROCESSOR_NIAGARA7
9618 && sparc_cpu != PROCESSOR_M8)
9619 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
9620
9621 /* Call __enable_execute_stack after writing onto the stack to make sure
9622 the stack address is accessible. */
9623 #ifdef HAVE_ENABLE_EXECUTE_STACK
9624 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9625 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
9626 #endif
9627
9628 }
9629
9630 /* The 64-bit version is simpler because it makes more sense to load the
9631 values as "immediate" data out of the trampoline. It's also easier since
9632 we can read the PC without clobbering a register. */
9633
9634 static void
9635 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9636 {
9637 /* SPARC 64-bit trampoline:
9638
9639 rd %pc, %g1
9640 ldx [%g1+24], %g5
9641 jmp %g5
9642 ldx [%g1+16], %g5
9643 +16 bytes data
9644 */
9645
9646 emit_move_insn (adjust_address (m_tramp, SImode, 0),
9647 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
9648 emit_move_insn (adjust_address (m_tramp, SImode, 4),
9649 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
9650 emit_move_insn (adjust_address (m_tramp, SImode, 8),
9651 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
9652 emit_move_insn (adjust_address (m_tramp, SImode, 12),
9653 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
9654 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
9655 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
9656 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
9657
9658 if (sparc_cpu != PROCESSOR_ULTRASPARC
9659 && sparc_cpu != PROCESSOR_ULTRASPARC3
9660 && sparc_cpu != PROCESSOR_NIAGARA
9661 && sparc_cpu != PROCESSOR_NIAGARA2
9662 && sparc_cpu != PROCESSOR_NIAGARA3
9663 && sparc_cpu != PROCESSOR_NIAGARA4
9664 && sparc_cpu != PROCESSOR_NIAGARA7
9665 && sparc_cpu != PROCESSOR_M8)
9666 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
9667
9668 /* Call __enable_execute_stack after writing onto the stack to make sure
9669 the stack address is accessible. */
9670 #ifdef HAVE_ENABLE_EXECUTE_STACK
9671 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9672 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
9673 #endif
9674 }
9675
9676 /* Worker for TARGET_TRAMPOLINE_INIT. */
9677
9678 static void
9679 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9680 {
9681 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
9682 cxt = force_reg (Pmode, cxt);
9683 if (TARGET_ARCH64)
9684 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
9685 else
9686 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
9687 }
9688 \f
9689 /* Adjust the cost of a scheduling dependency. Return the new cost of
9690 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
9691
9692 static int
9693 supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
9694 int cost)
9695 {
9696 enum attr_type insn_type;
9697
9698 if (recog_memoized (insn) < 0)
9699 return cost;
9700
9701 insn_type = get_attr_type (insn);
9702
9703 if (dep_type == 0)
9704 {
9705 /* Data dependency; DEP_INSN writes a register that INSN reads some
9706 cycles later. */
9707
9708 /* if a load, then the dependence must be on the memory address;
9709 add an extra "cycle". Note that the cost could be two cycles
9710 if the reg was written late in an instruction group; we ca not tell
9711 here. */
9712 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
9713 return cost + 3;
9714
9715 /* Get the delay only if the address of the store is the dependence. */
9716 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
9717 {
9718 rtx pat = PATTERN(insn);
9719 rtx dep_pat = PATTERN (dep_insn);
9720
9721 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
9722 return cost; /* This should not happen! */
9723
9724 /* The dependency between the two instructions was on the data that
9725 is being stored. Assume that this implies that the address of the
9726 store is not dependent. */
9727 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
9728 return cost;
9729
9730 return cost + 3; /* An approximation. */
9731 }
9732
9733 /* A shift instruction cannot receive its data from an instruction
9734 in the same cycle; add a one cycle penalty. */
9735 if (insn_type == TYPE_SHIFT)
9736 return cost + 3; /* Split before cascade into shift. */
9737 }
9738 else
9739 {
9740 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
9741 INSN writes some cycles later. */
9742
9743 /* These are only significant for the fpu unit; writing a fp reg before
9744 the fpu has finished with it stalls the processor. */
9745
9746 /* Reusing an integer register causes no problems. */
9747 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
9748 return 0;
9749 }
9750
9751 return cost;
9752 }
9753
9754 static int
9755 hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
9756 int cost)
9757 {
9758 enum attr_type insn_type, dep_type;
9759 rtx pat = PATTERN(insn);
9760 rtx dep_pat = PATTERN (dep_insn);
9761
9762 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
9763 return cost;
9764
9765 insn_type = get_attr_type (insn);
9766 dep_type = get_attr_type (dep_insn);
9767
9768 switch (dtype)
9769 {
9770 case 0:
9771 /* Data dependency; DEP_INSN writes a register that INSN reads some
9772 cycles later. */
9773
9774 switch (insn_type)
9775 {
9776 case TYPE_STORE:
9777 case TYPE_FPSTORE:
9778 /* Get the delay iff the address of the store is the dependence. */
9779 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
9780 return cost;
9781
9782 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
9783 return cost;
9784 return cost + 3;
9785
9786 case TYPE_LOAD:
9787 case TYPE_SLOAD:
9788 case TYPE_FPLOAD:
9789 /* If a load, then the dependence must be on the memory address. If
9790 the addresses aren't equal, then it might be a false dependency */
9791 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
9792 {
9793 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
9794 || GET_CODE (SET_DEST (dep_pat)) != MEM
9795 || GET_CODE (SET_SRC (pat)) != MEM
9796 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
9797 XEXP (SET_SRC (pat), 0)))
9798 return cost + 2;
9799
9800 return cost + 8;
9801 }
9802 break;
9803
9804 case TYPE_BRANCH:
9805 /* Compare to branch latency is 0. There is no benefit from
9806 separating compare and branch. */
9807 if (dep_type == TYPE_COMPARE)
9808 return 0;
9809 /* Floating point compare to branch latency is less than
9810 compare to conditional move. */
9811 if (dep_type == TYPE_FPCMP)
9812 return cost - 1;
9813 break;
9814 default:
9815 break;
9816 }
9817 break;
9818
9819 case REG_DEP_ANTI:
9820 /* Anti-dependencies only penalize the fpu unit. */
9821 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
9822 return 0;
9823 break;
9824
9825 default:
9826 break;
9827 }
9828
9829 return cost;
9830 }
9831
9832 static int
9833 sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
9834 unsigned int)
9835 {
9836 switch (sparc_cpu)
9837 {
9838 case PROCESSOR_SUPERSPARC:
9839 cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
9840 break;
9841 case PROCESSOR_HYPERSPARC:
9842 case PROCESSOR_SPARCLITE86X:
9843 cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
9844 break;
9845 default:
9846 break;
9847 }
9848 return cost;
9849 }
9850
9851 static void
9852 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
9853 int sched_verbose ATTRIBUTE_UNUSED,
9854 int max_ready ATTRIBUTE_UNUSED)
9855 {}
9856
9857 static int
9858 sparc_use_sched_lookahead (void)
9859 {
9860 if (sparc_cpu == PROCESSOR_NIAGARA
9861 || sparc_cpu == PROCESSOR_NIAGARA2
9862 || sparc_cpu == PROCESSOR_NIAGARA3)
9863 return 0;
9864 if (sparc_cpu == PROCESSOR_NIAGARA4
9865 || sparc_cpu == PROCESSOR_NIAGARA7
9866 || sparc_cpu == PROCESSOR_M8)
9867 return 2;
9868 if (sparc_cpu == PROCESSOR_ULTRASPARC
9869 || sparc_cpu == PROCESSOR_ULTRASPARC3)
9870 return 4;
9871 if ((1 << sparc_cpu) &
9872 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
9873 (1 << PROCESSOR_SPARCLITE86X)))
9874 return 3;
9875 return 0;
9876 }
9877
9878 static int
9879 sparc_issue_rate (void)
9880 {
9881 switch (sparc_cpu)
9882 {
9883 case PROCESSOR_NIAGARA:
9884 case PROCESSOR_NIAGARA2:
9885 case PROCESSOR_NIAGARA3:
9886 default:
9887 return 1;
9888 case PROCESSOR_NIAGARA4:
9889 case PROCESSOR_NIAGARA7:
9890 case PROCESSOR_V9:
9891 /* Assume V9 processors are capable of at least dual-issue. */
9892 return 2;
9893 case PROCESSOR_SUPERSPARC:
9894 return 3;
9895 case PROCESSOR_HYPERSPARC:
9896 case PROCESSOR_SPARCLITE86X:
9897 return 2;
9898 case PROCESSOR_ULTRASPARC:
9899 case PROCESSOR_ULTRASPARC3:
9900 case PROCESSOR_M8:
9901 return 4;
9902 }
9903 }
9904
9905 static int
9906 set_extends (rtx_insn *insn)
9907 {
9908 register rtx pat = PATTERN (insn);
9909
9910 switch (GET_CODE (SET_SRC (pat)))
9911 {
9912 /* Load and some shift instructions zero extend. */
9913 case MEM:
9914 case ZERO_EXTEND:
9915 /* sethi clears the high bits */
9916 case HIGH:
9917 /* LO_SUM is used with sethi. sethi cleared the high
9918 bits and the values used with lo_sum are positive */
9919 case LO_SUM:
9920 /* Store flag stores 0 or 1 */
9921 case LT: case LTU:
9922 case GT: case GTU:
9923 case LE: case LEU:
9924 case GE: case GEU:
9925 case EQ:
9926 case NE:
9927 return 1;
9928 case AND:
9929 {
9930 rtx op0 = XEXP (SET_SRC (pat), 0);
9931 rtx op1 = XEXP (SET_SRC (pat), 1);
9932 if (GET_CODE (op1) == CONST_INT)
9933 return INTVAL (op1) >= 0;
9934 if (GET_CODE (op0) != REG)
9935 return 0;
9936 if (sparc_check_64 (op0, insn) == 1)
9937 return 1;
9938 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9939 }
9940 case IOR:
9941 case XOR:
9942 {
9943 rtx op0 = XEXP (SET_SRC (pat), 0);
9944 rtx op1 = XEXP (SET_SRC (pat), 1);
9945 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
9946 return 0;
9947 if (GET_CODE (op1) == CONST_INT)
9948 return INTVAL (op1) >= 0;
9949 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9950 }
9951 case LSHIFTRT:
9952 return GET_MODE (SET_SRC (pat)) == SImode;
9953 /* Positive integers leave the high bits zero. */
9954 case CONST_INT:
9955 return !(INTVAL (SET_SRC (pat)) & 0x80000000);
9956 case ASHIFTRT:
9957 case SIGN_EXTEND:
9958 return - (GET_MODE (SET_SRC (pat)) == SImode);
9959 case REG:
9960 return sparc_check_64 (SET_SRC (pat), insn);
9961 default:
9962 return 0;
9963 }
9964 }
9965
9966 /* We _ought_ to have only one kind per function, but... */
9967 static GTY(()) rtx sparc_addr_diff_list;
9968 static GTY(()) rtx sparc_addr_list;
9969
9970 void
9971 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
9972 {
9973 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9974 if (diff)
9975 sparc_addr_diff_list
9976 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
9977 else
9978 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
9979 }
9980
9981 static void
9982 sparc_output_addr_vec (rtx vec)
9983 {
9984 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9985 int idx, vlen = XVECLEN (body, 0);
9986
9987 #ifdef ASM_OUTPUT_ADDR_VEC_START
9988 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9989 #endif
9990
9991 #ifdef ASM_OUTPUT_CASE_LABEL
9992 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9993 NEXT_INSN (lab));
9994 #else
9995 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9996 #endif
9997
9998 for (idx = 0; idx < vlen; idx++)
9999 {
10000 ASM_OUTPUT_ADDR_VEC_ELT
10001 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10002 }
10003
10004 #ifdef ASM_OUTPUT_ADDR_VEC_END
10005 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10006 #endif
10007 }
10008
10009 static void
10010 sparc_output_addr_diff_vec (rtx vec)
10011 {
10012 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10013 rtx base = XEXP (XEXP (body, 0), 0);
10014 int idx, vlen = XVECLEN (body, 1);
10015
10016 #ifdef ASM_OUTPUT_ADDR_VEC_START
10017 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10018 #endif
10019
10020 #ifdef ASM_OUTPUT_CASE_LABEL
10021 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10022 NEXT_INSN (lab));
10023 #else
10024 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10025 #endif
10026
10027 for (idx = 0; idx < vlen; idx++)
10028 {
10029 ASM_OUTPUT_ADDR_DIFF_ELT
10030 (asm_out_file,
10031 body,
10032 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10033 CODE_LABEL_NUMBER (base));
10034 }
10035
10036 #ifdef ASM_OUTPUT_ADDR_VEC_END
10037 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10038 #endif
10039 }
10040
10041 static void
10042 sparc_output_deferred_case_vectors (void)
10043 {
10044 rtx t;
10045 int align;
10046
10047 if (sparc_addr_list == NULL_RTX
10048 && sparc_addr_diff_list == NULL_RTX)
10049 return;
10050
10051 /* Align to cache line in the function's code section. */
10052 switch_to_section (current_function_section ());
10053
10054 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10055 if (align > 0)
10056 ASM_OUTPUT_ALIGN (asm_out_file, align);
10057
10058 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
10059 sparc_output_addr_vec (XEXP (t, 0));
10060 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
10061 sparc_output_addr_diff_vec (XEXP (t, 0));
10062
10063 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
10064 }
10065
10066 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
10067 unknown. Return 1 if the high bits are zero, -1 if the register is
10068 sign extended. */
10069 int
10070 sparc_check_64 (rtx x, rtx_insn *insn)
10071 {
10072 /* If a register is set only once it is safe to ignore insns this
10073 code does not know how to handle. The loop will either recognize
10074 the single set and return the correct value or fail to recognize
10075 it and return 0. */
10076 int set_once = 0;
10077 rtx y = x;
10078
10079 gcc_assert (GET_CODE (x) == REG);
10080
10081 if (GET_MODE (x) == DImode)
10082 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
10083
10084 if (flag_expensive_optimizations
10085 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
10086 set_once = 1;
10087
10088 if (insn == 0)
10089 {
10090 if (set_once)
10091 insn = get_last_insn_anywhere ();
10092 else
10093 return 0;
10094 }
10095
10096 while ((insn = PREV_INSN (insn)))
10097 {
10098 switch (GET_CODE (insn))
10099 {
10100 case JUMP_INSN:
10101 case NOTE:
10102 break;
10103 case CODE_LABEL:
10104 case CALL_INSN:
10105 default:
10106 if (! set_once)
10107 return 0;
10108 break;
10109 case INSN:
10110 {
10111 rtx pat = PATTERN (insn);
10112 if (GET_CODE (pat) != SET)
10113 return 0;
10114 if (rtx_equal_p (x, SET_DEST (pat)))
10115 return set_extends (insn);
10116 if (y && rtx_equal_p (y, SET_DEST (pat)))
10117 return set_extends (insn);
10118 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
10119 return 0;
10120 }
10121 }
10122 }
10123 return 0;
10124 }
10125
10126 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
10127 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
10128
10129 const char *
10130 output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
10131 {
10132 static char asm_code[60];
10133
10134 /* The scratch register is only required when the destination
10135 register is not a 64-bit global or out register. */
10136 if (which_alternative != 2)
10137 operands[3] = operands[0];
10138
10139 /* We can only shift by constants <= 63. */
10140 if (GET_CODE (operands[2]) == CONST_INT)
10141 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
10142
10143 if (GET_CODE (operands[1]) == CONST_INT)
10144 {
10145 output_asm_insn ("mov\t%1, %3", operands);
10146 }
10147 else
10148 {
10149 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10150 if (sparc_check_64 (operands[1], insn) <= 0)
10151 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10152 output_asm_insn ("or\t%L1, %3, %3", operands);
10153 }
10154
10155 strcpy (asm_code, opcode);
10156
10157 if (which_alternative != 2)
10158 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
10159 else
10160 return
10161 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
10162 }
10163 \f
10164 /* Output rtl to increment the profiler label LABELNO
10165 for profiling a function entry. */
10166
10167 void
10168 sparc_profile_hook (int labelno)
10169 {
10170 char buf[32];
10171 rtx lab, fun;
10172
10173 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
10174 if (NO_PROFILE_COUNTERS)
10175 {
10176 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
10177 }
10178 else
10179 {
10180 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10181 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
10182 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
10183 }
10184 }
10185 \f
10186 #ifdef TARGET_SOLARIS
10187 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
10188
10189 static void
10190 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
10191 tree decl ATTRIBUTE_UNUSED)
10192 {
10193 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
10194 {
10195 solaris_elf_asm_comdat_section (name, flags, decl);
10196 return;
10197 }
10198
10199 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
10200
10201 if (!(flags & SECTION_DEBUG))
10202 fputs (",#alloc", asm_out_file);
10203 if (flags & SECTION_WRITE)
10204 fputs (",#write", asm_out_file);
10205 if (flags & SECTION_TLS)
10206 fputs (",#tls", asm_out_file);
10207 if (flags & SECTION_CODE)
10208 fputs (",#execinstr", asm_out_file);
10209
10210 if (flags & SECTION_NOTYPE)
10211 ;
10212 else if (flags & SECTION_BSS)
10213 fputs (",#nobits", asm_out_file);
10214 else
10215 fputs (",#progbits", asm_out_file);
10216
10217 fputc ('\n', asm_out_file);
10218 }
10219 #endif /* TARGET_SOLARIS */
10220
10221 /* We do not allow indirect calls to be optimized into sibling calls.
10222
10223 We cannot use sibling calls when delayed branches are disabled
10224 because they will likely require the call delay slot to be filled.
10225
10226 Also, on SPARC 32-bit we cannot emit a sibling call when the
10227 current function returns a structure. This is because the "unimp
10228 after call" convention would cause the callee to return to the
10229 wrong place. The generic code already disallows cases where the
10230 function being called returns a structure.
10231
10232 It may seem strange how this last case could occur. Usually there
10233 is code after the call which jumps to epilogue code which dumps the
10234 return value into the struct return area. That ought to invalidate
10235 the sibling call right? Well, in the C++ case we can end up passing
10236 the pointer to the struct return area to a constructor (which returns
10237 void) and then nothing else happens. Such a sibling call would look
10238 valid without the added check here.
10239
10240 VxWorks PIC PLT entries require the global pointer to be initialized
10241 on entry. We therefore can't emit sibling calls to them. */
10242 static bool
10243 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10244 {
10245 return (decl
10246 && flag_delayed_branch
10247 && (TARGET_ARCH64 || ! cfun->returns_struct)
10248 && !(TARGET_VXWORKS_RTP
10249 && flag_pic
10250 && !targetm.binds_local_p (decl)));
10251 }
10252 \f
10253 /* libfunc renaming. */
10254
10255 static void
10256 sparc_init_libfuncs (void)
10257 {
10258 if (TARGET_ARCH32)
10259 {
10260 /* Use the subroutines that Sun's library provides for integer
10261 multiply and divide. The `*' prevents an underscore from
10262 being prepended by the compiler. .umul is a little faster
10263 than .mul. */
10264 set_optab_libfunc (smul_optab, SImode, "*.umul");
10265 set_optab_libfunc (sdiv_optab, SImode, "*.div");
10266 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
10267 set_optab_libfunc (smod_optab, SImode, "*.rem");
10268 set_optab_libfunc (umod_optab, SImode, "*.urem");
10269
10270 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
10271 set_optab_libfunc (add_optab, TFmode, "_Q_add");
10272 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
10273 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
10274 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
10275 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
10276
10277 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10278 is because with soft-float, the SFmode and DFmode sqrt
10279 instructions will be absent, and the compiler will notice and
10280 try to use the TFmode sqrt instruction for calls to the
10281 builtin function sqrt, but this fails. */
10282 if (TARGET_FPU)
10283 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
10284
10285 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
10286 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
10287 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
10288 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
10289 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
10290 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
10291
10292 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
10293 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
10294 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
10295 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
10296
10297 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
10298 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
10299 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
10300 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
10301
10302 if (DITF_CONVERSION_LIBFUNCS)
10303 {
10304 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
10305 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
10306 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
10307 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
10308 }
10309
10310 if (SUN_CONVERSION_LIBFUNCS)
10311 {
10312 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
10313 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
10314 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
10315 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
10316 }
10317 }
10318 if (TARGET_ARCH64)
10319 {
10320 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10321 do not exist in the library. Make sure the compiler does not
10322 emit calls to them by accident. (It should always use the
10323 hardware instructions.) */
10324 set_optab_libfunc (smul_optab, SImode, 0);
10325 set_optab_libfunc (sdiv_optab, SImode, 0);
10326 set_optab_libfunc (udiv_optab, SImode, 0);
10327 set_optab_libfunc (smod_optab, SImode, 0);
10328 set_optab_libfunc (umod_optab, SImode, 0);
10329
10330 if (SUN_INTEGER_MULTIPLY_64)
10331 {
10332 set_optab_libfunc (smul_optab, DImode, "__mul64");
10333 set_optab_libfunc (sdiv_optab, DImode, "__div64");
10334 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
10335 set_optab_libfunc (smod_optab, DImode, "__rem64");
10336 set_optab_libfunc (umod_optab, DImode, "__urem64");
10337 }
10338
10339 if (SUN_CONVERSION_LIBFUNCS)
10340 {
10341 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
10342 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
10343 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
10344 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
10345 }
10346 }
10347 }
10348 \f
10349 /* SPARC builtins. */
10350 enum sparc_builtins
10351 {
10352 /* FPU builtins. */
10353 SPARC_BUILTIN_LDFSR,
10354 SPARC_BUILTIN_STFSR,
10355
10356 /* VIS 1.0 builtins. */
10357 SPARC_BUILTIN_FPACK16,
10358 SPARC_BUILTIN_FPACK32,
10359 SPARC_BUILTIN_FPACKFIX,
10360 SPARC_BUILTIN_FEXPAND,
10361 SPARC_BUILTIN_FPMERGE,
10362 SPARC_BUILTIN_FMUL8X16,
10363 SPARC_BUILTIN_FMUL8X16AU,
10364 SPARC_BUILTIN_FMUL8X16AL,
10365 SPARC_BUILTIN_FMUL8SUX16,
10366 SPARC_BUILTIN_FMUL8ULX16,
10367 SPARC_BUILTIN_FMULD8SUX16,
10368 SPARC_BUILTIN_FMULD8ULX16,
10369 SPARC_BUILTIN_FALIGNDATAV4HI,
10370 SPARC_BUILTIN_FALIGNDATAV8QI,
10371 SPARC_BUILTIN_FALIGNDATAV2SI,
10372 SPARC_BUILTIN_FALIGNDATADI,
10373 SPARC_BUILTIN_WRGSR,
10374 SPARC_BUILTIN_RDGSR,
10375 SPARC_BUILTIN_ALIGNADDR,
10376 SPARC_BUILTIN_ALIGNADDRL,
10377 SPARC_BUILTIN_PDIST,
10378 SPARC_BUILTIN_EDGE8,
10379 SPARC_BUILTIN_EDGE8L,
10380 SPARC_BUILTIN_EDGE16,
10381 SPARC_BUILTIN_EDGE16L,
10382 SPARC_BUILTIN_EDGE32,
10383 SPARC_BUILTIN_EDGE32L,
10384 SPARC_BUILTIN_FCMPLE16,
10385 SPARC_BUILTIN_FCMPLE32,
10386 SPARC_BUILTIN_FCMPNE16,
10387 SPARC_BUILTIN_FCMPNE32,
10388 SPARC_BUILTIN_FCMPGT16,
10389 SPARC_BUILTIN_FCMPGT32,
10390 SPARC_BUILTIN_FCMPEQ16,
10391 SPARC_BUILTIN_FCMPEQ32,
10392 SPARC_BUILTIN_FPADD16,
10393 SPARC_BUILTIN_FPADD16S,
10394 SPARC_BUILTIN_FPADD32,
10395 SPARC_BUILTIN_FPADD32S,
10396 SPARC_BUILTIN_FPSUB16,
10397 SPARC_BUILTIN_FPSUB16S,
10398 SPARC_BUILTIN_FPSUB32,
10399 SPARC_BUILTIN_FPSUB32S,
10400 SPARC_BUILTIN_ARRAY8,
10401 SPARC_BUILTIN_ARRAY16,
10402 SPARC_BUILTIN_ARRAY32,
10403
10404 /* VIS 2.0 builtins. */
10405 SPARC_BUILTIN_EDGE8N,
10406 SPARC_BUILTIN_EDGE8LN,
10407 SPARC_BUILTIN_EDGE16N,
10408 SPARC_BUILTIN_EDGE16LN,
10409 SPARC_BUILTIN_EDGE32N,
10410 SPARC_BUILTIN_EDGE32LN,
10411 SPARC_BUILTIN_BMASK,
10412 SPARC_BUILTIN_BSHUFFLEV4HI,
10413 SPARC_BUILTIN_BSHUFFLEV8QI,
10414 SPARC_BUILTIN_BSHUFFLEV2SI,
10415 SPARC_BUILTIN_BSHUFFLEDI,
10416
10417 /* VIS 3.0 builtins. */
10418 SPARC_BUILTIN_CMASK8,
10419 SPARC_BUILTIN_CMASK16,
10420 SPARC_BUILTIN_CMASK32,
10421 SPARC_BUILTIN_FCHKSM16,
10422 SPARC_BUILTIN_FSLL16,
10423 SPARC_BUILTIN_FSLAS16,
10424 SPARC_BUILTIN_FSRL16,
10425 SPARC_BUILTIN_FSRA16,
10426 SPARC_BUILTIN_FSLL32,
10427 SPARC_BUILTIN_FSLAS32,
10428 SPARC_BUILTIN_FSRL32,
10429 SPARC_BUILTIN_FSRA32,
10430 SPARC_BUILTIN_PDISTN,
10431 SPARC_BUILTIN_FMEAN16,
10432 SPARC_BUILTIN_FPADD64,
10433 SPARC_BUILTIN_FPSUB64,
10434 SPARC_BUILTIN_FPADDS16,
10435 SPARC_BUILTIN_FPADDS16S,
10436 SPARC_BUILTIN_FPSUBS16,
10437 SPARC_BUILTIN_FPSUBS16S,
10438 SPARC_BUILTIN_FPADDS32,
10439 SPARC_BUILTIN_FPADDS32S,
10440 SPARC_BUILTIN_FPSUBS32,
10441 SPARC_BUILTIN_FPSUBS32S,
10442 SPARC_BUILTIN_FUCMPLE8,
10443 SPARC_BUILTIN_FUCMPNE8,
10444 SPARC_BUILTIN_FUCMPGT8,
10445 SPARC_BUILTIN_FUCMPEQ8,
10446 SPARC_BUILTIN_FHADDS,
10447 SPARC_BUILTIN_FHADDD,
10448 SPARC_BUILTIN_FHSUBS,
10449 SPARC_BUILTIN_FHSUBD,
10450 SPARC_BUILTIN_FNHADDS,
10451 SPARC_BUILTIN_FNHADDD,
10452 SPARC_BUILTIN_UMULXHI,
10453 SPARC_BUILTIN_XMULX,
10454 SPARC_BUILTIN_XMULXHI,
10455
10456 /* VIS 4.0 builtins. */
10457 SPARC_BUILTIN_FPADD8,
10458 SPARC_BUILTIN_FPADDS8,
10459 SPARC_BUILTIN_FPADDUS8,
10460 SPARC_BUILTIN_FPADDUS16,
10461 SPARC_BUILTIN_FPCMPLE8,
10462 SPARC_BUILTIN_FPCMPGT8,
10463 SPARC_BUILTIN_FPCMPULE16,
10464 SPARC_BUILTIN_FPCMPUGT16,
10465 SPARC_BUILTIN_FPCMPULE32,
10466 SPARC_BUILTIN_FPCMPUGT32,
10467 SPARC_BUILTIN_FPMAX8,
10468 SPARC_BUILTIN_FPMAX16,
10469 SPARC_BUILTIN_FPMAX32,
10470 SPARC_BUILTIN_FPMAXU8,
10471 SPARC_BUILTIN_FPMAXU16,
10472 SPARC_BUILTIN_FPMAXU32,
10473 SPARC_BUILTIN_FPMIN8,
10474 SPARC_BUILTIN_FPMIN16,
10475 SPARC_BUILTIN_FPMIN32,
10476 SPARC_BUILTIN_FPMINU8,
10477 SPARC_BUILTIN_FPMINU16,
10478 SPARC_BUILTIN_FPMINU32,
10479 SPARC_BUILTIN_FPSUB8,
10480 SPARC_BUILTIN_FPSUBS8,
10481 SPARC_BUILTIN_FPSUBUS8,
10482 SPARC_BUILTIN_FPSUBUS16,
10483
10484 /* VIS 4.0B builtins. */
10485
10486 /* Note that all the DICTUNPACK* entries should be kept
10487 contiguous. */
10488 SPARC_BUILTIN_FIRST_DICTUNPACK,
10489 SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
10490 SPARC_BUILTIN_DICTUNPACK16,
10491 SPARC_BUILTIN_DICTUNPACK32,
10492 SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
10493
10494 /* Note that all the FPCMP*SHL entries should be kept
10495 contiguous. */
10496 SPARC_BUILTIN_FIRST_FPCMPSHL,
10497 SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
10498 SPARC_BUILTIN_FPCMPGT8SHL,
10499 SPARC_BUILTIN_FPCMPEQ8SHL,
10500 SPARC_BUILTIN_FPCMPNE8SHL,
10501 SPARC_BUILTIN_FPCMPLE16SHL,
10502 SPARC_BUILTIN_FPCMPGT16SHL,
10503 SPARC_BUILTIN_FPCMPEQ16SHL,
10504 SPARC_BUILTIN_FPCMPNE16SHL,
10505 SPARC_BUILTIN_FPCMPLE32SHL,
10506 SPARC_BUILTIN_FPCMPGT32SHL,
10507 SPARC_BUILTIN_FPCMPEQ32SHL,
10508 SPARC_BUILTIN_FPCMPNE32SHL,
10509 SPARC_BUILTIN_FPCMPULE8SHL,
10510 SPARC_BUILTIN_FPCMPUGT8SHL,
10511 SPARC_BUILTIN_FPCMPULE16SHL,
10512 SPARC_BUILTIN_FPCMPUGT16SHL,
10513 SPARC_BUILTIN_FPCMPULE32SHL,
10514 SPARC_BUILTIN_FPCMPUGT32SHL,
10515 SPARC_BUILTIN_FPCMPDE8SHL,
10516 SPARC_BUILTIN_FPCMPDE16SHL,
10517 SPARC_BUILTIN_FPCMPDE32SHL,
10518 SPARC_BUILTIN_FPCMPUR8SHL,
10519 SPARC_BUILTIN_FPCMPUR16SHL,
10520 SPARC_BUILTIN_FPCMPUR32SHL,
10521 SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
10522
10523 SPARC_BUILTIN_MAX
10524 };
10525
10526 static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
10527 static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
10528
10529 /* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
10530 The instruction should require a constant operand of some sort. The
10531 function prints an error if OPVAL is not valid. */
10532
10533 static int
10534 check_constant_argument (enum insn_code icode, int opnum, rtx opval)
10535 {
10536 if (GET_CODE (opval) != CONST_INT)
10537 {
10538 error ("%qs expects a constant argument", insn_data[icode].name);
10539 return false;
10540 }
10541
10542 if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
10543 {
10544 error ("constant argument out of range for %qs", insn_data[icode].name);
10545 return false;
10546 }
10547 return true;
10548 }
10549
10550 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
10551 function decl or NULL_TREE if the builtin was not added. */
10552
10553 static tree
10554 def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
10555 tree type)
10556 {
10557 tree t
10558 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
10559
10560 if (t)
10561 {
10562 sparc_builtins[code] = t;
10563 sparc_builtins_icode[code] = icode;
10564 }
10565
10566 return t;
10567 }
10568
10569 /* Likewise, but also marks the function as "const". */
10570
10571 static tree
10572 def_builtin_const (const char *name, enum insn_code icode,
10573 enum sparc_builtins code, tree type)
10574 {
10575 tree t = def_builtin (name, icode, code, type);
10576
10577 if (t)
10578 TREE_READONLY (t) = 1;
10579
10580 return t;
10581 }
10582
10583 /* Implement the TARGET_INIT_BUILTINS target hook.
10584 Create builtin functions for special SPARC instructions. */
10585
10586 static void
10587 sparc_init_builtins (void)
10588 {
10589 if (TARGET_FPU)
10590 sparc_fpu_init_builtins ();
10591
10592 if (TARGET_VIS)
10593 sparc_vis_init_builtins ();
10594 }
10595
10596 /* Create builtin functions for FPU instructions. */
10597
10598 static void
10599 sparc_fpu_init_builtins (void)
10600 {
10601 tree ftype
10602 = build_function_type_list (void_type_node,
10603 build_pointer_type (unsigned_type_node), 0);
10604 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
10605 SPARC_BUILTIN_LDFSR, ftype);
10606 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
10607 SPARC_BUILTIN_STFSR, ftype);
10608 }
10609
10610 /* Create builtin functions for VIS instructions. */
10611
10612 static void
10613 sparc_vis_init_builtins (void)
10614 {
10615 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
10616 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
10617 tree v4hi = build_vector_type (intHI_type_node, 4);
10618 tree v2hi = build_vector_type (intHI_type_node, 2);
10619 tree v2si = build_vector_type (intSI_type_node, 2);
10620 tree v1si = build_vector_type (intSI_type_node, 1);
10621
10622 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
10623 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
10624 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
10625 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
10626 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
10627 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
10628 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
10629 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
10630 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
10631 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
10632 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
10633 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
10634 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
10635 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
10636 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
10637 v8qi, v8qi,
10638 intDI_type_node, 0);
10639 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
10640 v8qi, v8qi, 0);
10641 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
10642 v8qi, v8qi, 0);
10643 tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
10644 intSI_type_node, 0);
10645 tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
10646 intSI_type_node, 0);
10647 tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
10648 intDI_type_node, 0);
10649 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
10650 intDI_type_node,
10651 intDI_type_node, 0);
10652 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
10653 intSI_type_node,
10654 intSI_type_node, 0);
10655 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
10656 ptr_type_node,
10657 intSI_type_node, 0);
10658 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
10659 ptr_type_node,
10660 intDI_type_node, 0);
10661 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
10662 ptr_type_node,
10663 ptr_type_node, 0);
10664 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
10665 ptr_type_node,
10666 ptr_type_node, 0);
10667 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
10668 v4hi, v4hi, 0);
10669 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
10670 v2si, v2si, 0);
10671 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
10672 v4hi, v4hi, 0);
10673 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
10674 v2si, v2si, 0);
10675 tree void_ftype_di = build_function_type_list (void_type_node,
10676 intDI_type_node, 0);
10677 tree di_ftype_void = build_function_type_list (intDI_type_node,
10678 void_type_node, 0);
10679 tree void_ftype_si = build_function_type_list (void_type_node,
10680 intSI_type_node, 0);
10681 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
10682 float_type_node,
10683 float_type_node, 0);
10684 tree df_ftype_df_df = build_function_type_list (double_type_node,
10685 double_type_node,
10686 double_type_node, 0);
10687
10688 /* Packing and expanding vectors. */
10689 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
10690 SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
10691 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
10692 SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
10693 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
10694 SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
10695 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
10696 SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
10697 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
10698 SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
10699
10700 /* Multiplications. */
10701 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
10702 SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
10703 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
10704 SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
10705 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
10706 SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
10707 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
10708 SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
10709 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
10710 SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
10711 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
10712 SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
10713 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
10714 SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
10715
10716 /* Data aligning. */
10717 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
10718 SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
10719 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
10720 SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
10721 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
10722 SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
10723 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
10724 SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
10725
10726 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
10727 SPARC_BUILTIN_WRGSR, void_ftype_di);
10728 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
10729 SPARC_BUILTIN_RDGSR, di_ftype_void);
10730
10731 if (TARGET_ARCH64)
10732 {
10733 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
10734 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
10735 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
10736 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
10737 }
10738 else
10739 {
10740 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
10741 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
10742 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
10743 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
10744 }
10745
10746 /* Pixel distance. */
10747 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
10748 SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
10749
10750 /* Edge handling. */
10751 if (TARGET_ARCH64)
10752 {
10753 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
10754 SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
10755 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
10756 SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
10757 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
10758 SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
10759 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
10760 SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
10761 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
10762 SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
10763 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
10764 SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
10765 }
10766 else
10767 {
10768 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
10769 SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
10770 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
10771 SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
10772 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
10773 SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
10774 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
10775 SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
10776 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
10777 SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
10778 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
10779 SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
10780 }
10781
10782 /* Pixel compare. */
10783 if (TARGET_ARCH64)
10784 {
10785 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
10786 SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
10787 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
10788 SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
10789 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
10790 SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
10791 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
10792 SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
10793 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
10794 SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
10795 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
10796 SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
10797 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
10798 SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
10799 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
10800 SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
10801 }
10802 else
10803 {
10804 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
10805 SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
10806 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
10807 SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
10808 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
10809 SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
10810 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
10811 SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
10812 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
10813 SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
10814 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
10815 SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
10816 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
10817 SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
10818 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
10819 SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
10820 }
10821
10822 /* Addition and subtraction. */
10823 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
10824 SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
10825 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
10826 SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
10827 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
10828 SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
10829 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
10830 SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
10831 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
10832 SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
10833 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
10834 SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
10835 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
10836 SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
10837 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
10838 SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
10839
10840 /* Three-dimensional array addressing. */
10841 if (TARGET_ARCH64)
10842 {
10843 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
10844 SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
10845 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
10846 SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
10847 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
10848 SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
10849 }
10850 else
10851 {
10852 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
10853 SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
10854 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
10855 SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
10856 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
10857 SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
10858 }
10859
10860 if (TARGET_VIS2)
10861 {
10862 /* Edge handling. */
10863 if (TARGET_ARCH64)
10864 {
10865 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
10866 SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
10867 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
10868 SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
10869 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
10870 SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
10871 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
10872 SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
10873 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
10874 SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
10875 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
10876 SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
10877 }
10878 else
10879 {
10880 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
10881 SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
10882 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
10883 SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
10884 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
10885 SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
10886 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
10887 SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
10888 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
10889 SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
10890 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
10891 SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
10892 }
10893
10894 /* Byte mask and shuffle. */
10895 if (TARGET_ARCH64)
10896 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
10897 SPARC_BUILTIN_BMASK, di_ftype_di_di);
10898 else
10899 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
10900 SPARC_BUILTIN_BMASK, si_ftype_si_si);
10901 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
10902 SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
10903 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
10904 SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
10905 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
10906 SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
10907 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
10908 SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
10909 }
10910
10911 if (TARGET_VIS3)
10912 {
10913 if (TARGET_ARCH64)
10914 {
10915 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
10916 SPARC_BUILTIN_CMASK8, void_ftype_di);
10917 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
10918 SPARC_BUILTIN_CMASK16, void_ftype_di);
10919 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
10920 SPARC_BUILTIN_CMASK32, void_ftype_di);
10921 }
10922 else
10923 {
10924 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
10925 SPARC_BUILTIN_CMASK8, void_ftype_si);
10926 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
10927 SPARC_BUILTIN_CMASK16, void_ftype_si);
10928 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
10929 SPARC_BUILTIN_CMASK32, void_ftype_si);
10930 }
10931
10932 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
10933 SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
10934
10935 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
10936 SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
10937 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
10938 SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
10939 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
10940 SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
10941 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
10942 SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
10943 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
10944 SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
10945 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
10946 SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
10947 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
10948 SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
10949 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
10950 SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
10951
10952 if (TARGET_ARCH64)
10953 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
10954 SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
10955 else
10956 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
10957 SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
10958
10959 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
10960 SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
10961 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
10962 SPARC_BUILTIN_FPADD64, di_ftype_di_di);
10963 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
10964 SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
10965
10966 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
10967 SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
10968 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
10969 SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
10970 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
10971 SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
10972 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
10973 SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
10974 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
10975 SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
10976 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
10977 SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
10978 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
10979 SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
10980 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
10981 SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
10982
10983 if (TARGET_ARCH64)
10984 {
10985 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
10986 SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
10987 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
10988 SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
10989 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
10990 SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
10991 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
10992 SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
10993 }
10994 else
10995 {
10996 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
10997 SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
10998 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
10999 SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
11000 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
11001 SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
11002 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
11003 SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
11004 }
11005
11006 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
11007 SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
11008 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
11009 SPARC_BUILTIN_FHADDD, df_ftype_df_df);
11010 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
11011 SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
11012 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
11013 SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
11014 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
11015 SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
11016 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
11017 SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
11018
11019 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
11020 SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
11021 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
11022 SPARC_BUILTIN_XMULX, di_ftype_di_di);
11023 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
11024 SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
11025 }
11026
11027 if (TARGET_VIS4)
11028 {
11029 def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
11030 SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
11031 def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
11032 SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
11033 def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
11034 SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
11035 def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
11036 SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
11037
11038
11039 if (TARGET_ARCH64)
11040 {
11041 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
11042 SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
11043 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
11044 SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
11045 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
11046 SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
11047 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
11048 SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
11049 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
11050 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11051 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
11052 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11053 }
11054 else
11055 {
11056 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
11057 SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
11058 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
11059 SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
11060 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
11061 SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
11062 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
11063 SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
11064 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
11065 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11066 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
11067 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11068 }
11069
11070 def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
11071 SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
11072 def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
11073 SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
11074 def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
11075 SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
11076 def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
11077 SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
11078 def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
11079 SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
11080 def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
11081 SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
11082 def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
11083 SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
11084 def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
11085 SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
11086 def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
11087 SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
11088 def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
11089 SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
11090 def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
11091 SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
11092 def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
11093 SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
11094 def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
11095 SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
11096 def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
11097 SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
11098 def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
11099 SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
11100 def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
11101 SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
11102 }
11103
11104 if (TARGET_VIS4B)
11105 {
11106 def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
11107 SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
11108 def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
11109 SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
11110 def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
11111 SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
11112
11113 if (TARGET_ARCH64)
11114 {
11115 tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
11116 v8qi, v8qi,
11117 intSI_type_node, 0);
11118 tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
11119 v4hi, v4hi,
11120 intSI_type_node, 0);
11121 tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
11122 v2si, v2si,
11123 intSI_type_node, 0);
11124
11125 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
11126 SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
11127 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
11128 SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
11129 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
11130 SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
11131 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
11132 SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
11133
11134 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
11135 SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
11136 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
11137 SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
11138 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
11139 SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
11140 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
11141 SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
11142
11143 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
11144 SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
11145 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
11146 SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
11147 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
11148 SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
11149 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
11150 SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
11151
11152
11153 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
11154 SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
11155 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
11156 SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
11157
11158 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
11159 SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
11160 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
11161 SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
11162
11163 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
11164 SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
11165 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
11166 SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
11167
11168 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
11169 SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
11170 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
11171 SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
11172 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
11173 SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
11174
11175 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
11176 SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
11177 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
11178 SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
11179 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
11180 SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
11181
11182 }
11183 else
11184 {
11185 tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
11186 v8qi, v8qi,
11187 intSI_type_node, 0);
11188 tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
11189 v4hi, v4hi,
11190 intSI_type_node, 0);
11191 tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
11192 v2si, v2si,
11193 intSI_type_node, 0);
11194
11195 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
11196 SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
11197 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
11198 SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
11199 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
11200 SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
11201 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
11202 SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
11203
11204 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
11205 SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
11206 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
11207 SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
11208 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
11209 SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
11210 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
11211 SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
11212
11213 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
11214 SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
11215 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
11216 SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
11217 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
11218 SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
11219 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
11220 SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
11221
11222
11223 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
11224 SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
11225 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
11226 SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
11227
11228 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
11229 SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
11230 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
11231 SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
11232
11233 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
11234 SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
11235 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
11236 SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
11237
11238 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
11239 SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
11240 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
11241 SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
11242 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
11243 SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
11244
11245 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
11246 SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
11247 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
11248 SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
11249 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
11250 SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
11251 }
11252 }
11253 }
11254
11255 /* Implement TARGET_BUILTIN_DECL hook. */
11256
11257 static tree
11258 sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11259 {
11260 if (code >= SPARC_BUILTIN_MAX)
11261 return error_mark_node;
11262
11263 return sparc_builtins[code];
11264 }
11265
11266 /* Implemented TARGET_EXPAND_BUILTIN hook. */
11267
11268 static rtx
11269 sparc_expand_builtin (tree exp, rtx target,
11270 rtx subtarget ATTRIBUTE_UNUSED,
11271 machine_mode tmode ATTRIBUTE_UNUSED,
11272 int ignore ATTRIBUTE_UNUSED)
11273 {
11274 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11275 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11276 enum insn_code icode = sparc_builtins_icode[code];
11277 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11278 call_expr_arg_iterator iter;
11279 int arg_count = 0;
11280 rtx pat, op[4];
11281 tree arg;
11282
11283 if (nonvoid)
11284 {
11285 machine_mode tmode = insn_data[icode].operand[0].mode;
11286 if (!target
11287 || GET_MODE (target) != tmode
11288 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11289 op[0] = gen_reg_rtx (tmode);
11290 else
11291 op[0] = target;
11292 }
11293
11294 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11295 {
11296 const struct insn_operand_data *insn_op;
11297 int idx;
11298
11299 if (arg == error_mark_node)
11300 return NULL_RTX;
11301
11302 arg_count++;
11303 idx = arg_count - !nonvoid;
11304 insn_op = &insn_data[icode].operand[idx];
11305 op[arg_count] = expand_normal (arg);
11306
11307 /* Some of the builtins require constant arguments. We check
11308 for this here. */
11309 if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
11310 && code <= SPARC_BUILTIN_LAST_FPCMPSHL
11311 && arg_count == 3)
11312 || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
11313 && code <= SPARC_BUILTIN_LAST_DICTUNPACK
11314 && arg_count == 2))
11315 {
11316 if (!check_constant_argument (icode, idx, op[arg_count]))
11317 return const0_rtx;
11318 }
11319
11320 if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
11321 {
11322 if (!address_operand (op[arg_count], SImode))
11323 {
11324 op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
11325 op[arg_count] = copy_addr_to_reg (op[arg_count]);
11326 }
11327 op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
11328 }
11329
11330 else if (insn_op->mode == V1DImode
11331 && GET_MODE (op[arg_count]) == DImode)
11332 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
11333
11334 else if (insn_op->mode == V1SImode
11335 && GET_MODE (op[arg_count]) == SImode)
11336 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
11337
11338 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
11339 insn_op->mode))
11340 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
11341 }
11342
11343 switch (arg_count)
11344 {
11345 case 0:
11346 pat = GEN_FCN (icode) (op[0]);
11347 break;
11348 case 1:
11349 if (nonvoid)
11350 pat = GEN_FCN (icode) (op[0], op[1]);
11351 else
11352 pat = GEN_FCN (icode) (op[1]);
11353 break;
11354 case 2:
11355 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11356 break;
11357 case 3:
11358 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11359 break;
11360 default:
11361 gcc_unreachable ();
11362 }
11363
11364 if (!pat)
11365 return NULL_RTX;
11366
11367 emit_insn (pat);
11368
11369 return (nonvoid ? op[0] : const0_rtx);
11370 }
11371
11372 /* Return the upper 16 bits of the 8x16 multiplication. */
11373
11374 static int
11375 sparc_vis_mul8x16 (int e8, int e16)
11376 {
11377 return (e8 * e16 + 128) / 256;
11378 }
11379
11380 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
11381 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
11382
11383 static void
11384 sparc_handle_vis_mul8x16 (tree *n_elts, enum sparc_builtins fncode,
11385 tree inner_type, tree cst0, tree cst1)
11386 {
11387 unsigned i, num = VECTOR_CST_NELTS (cst0);
11388 int scale;
11389
11390 switch (fncode)
11391 {
11392 case SPARC_BUILTIN_FMUL8X16:
11393 for (i = 0; i < num; ++i)
11394 {
11395 int val
11396 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11397 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
11398 n_elts[i] = build_int_cst (inner_type, val);
11399 }
11400 break;
11401
11402 case SPARC_BUILTIN_FMUL8X16AU:
11403 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
11404
11405 for (i = 0; i < num; ++i)
11406 {
11407 int val
11408 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11409 scale);
11410 n_elts[i] = build_int_cst (inner_type, val);
11411 }
11412 break;
11413
11414 case SPARC_BUILTIN_FMUL8X16AL:
11415 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
11416
11417 for (i = 0; i < num; ++i)
11418 {
11419 int val
11420 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11421 scale);
11422 n_elts[i] = build_int_cst (inner_type, val);
11423 }
11424 break;
11425
11426 default:
11427 gcc_unreachable ();
11428 }
11429 }
11430
11431 /* Implement TARGET_FOLD_BUILTIN hook.
11432
11433 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
11434 result of the function call is ignored. NULL_TREE is returned if the
11435 function could not be folded. */
11436
11437 static tree
11438 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
11439 tree *args, bool ignore)
11440 {
11441 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11442 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
11443 tree arg0, arg1, arg2;
11444
11445 if (ignore)
11446 switch (code)
11447 {
11448 case SPARC_BUILTIN_LDFSR:
11449 case SPARC_BUILTIN_STFSR:
11450 case SPARC_BUILTIN_ALIGNADDR:
11451 case SPARC_BUILTIN_WRGSR:
11452 case SPARC_BUILTIN_BMASK:
11453 case SPARC_BUILTIN_CMASK8:
11454 case SPARC_BUILTIN_CMASK16:
11455 case SPARC_BUILTIN_CMASK32:
11456 break;
11457
11458 default:
11459 return build_zero_cst (rtype);
11460 }
11461
11462 switch (code)
11463 {
11464 case SPARC_BUILTIN_FEXPAND:
11465 arg0 = args[0];
11466 STRIP_NOPS (arg0);
11467
11468 if (TREE_CODE (arg0) == VECTOR_CST)
11469 {
11470 tree inner_type = TREE_TYPE (rtype);
11471 tree *n_elts;
11472 unsigned i;
11473
11474 n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
11475 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11476 n_elts[i] = build_int_cst (inner_type,
11477 TREE_INT_CST_LOW
11478 (VECTOR_CST_ELT (arg0, i)) << 4);
11479 return build_vector (rtype, n_elts);
11480 }
11481 break;
11482
11483 case SPARC_BUILTIN_FMUL8X16:
11484 case SPARC_BUILTIN_FMUL8X16AU:
11485 case SPARC_BUILTIN_FMUL8X16AL:
11486 arg0 = args[0];
11487 arg1 = args[1];
11488 STRIP_NOPS (arg0);
11489 STRIP_NOPS (arg1);
11490
11491 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11492 {
11493 tree inner_type = TREE_TYPE (rtype);
11494 tree *n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
11495 sparc_handle_vis_mul8x16 (n_elts, code, inner_type, arg0, arg1);
11496 return build_vector (rtype, n_elts);
11497 }
11498 break;
11499
11500 case SPARC_BUILTIN_FPMERGE:
11501 arg0 = args[0];
11502 arg1 = args[1];
11503 STRIP_NOPS (arg0);
11504 STRIP_NOPS (arg1);
11505
11506 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11507 {
11508 tree *n_elts = XALLOCAVEC (tree, 2 * VECTOR_CST_NELTS (arg0));
11509 unsigned i;
11510 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11511 {
11512 n_elts[2*i] = VECTOR_CST_ELT (arg0, i);
11513 n_elts[2*i+1] = VECTOR_CST_ELT (arg1, i);
11514 }
11515
11516 return build_vector (rtype, n_elts);
11517 }
11518 break;
11519
11520 case SPARC_BUILTIN_PDIST:
11521 case SPARC_BUILTIN_PDISTN:
11522 arg0 = args[0];
11523 arg1 = args[1];
11524 STRIP_NOPS (arg0);
11525 STRIP_NOPS (arg1);
11526 if (code == SPARC_BUILTIN_PDIST)
11527 {
11528 arg2 = args[2];
11529 STRIP_NOPS (arg2);
11530 }
11531 else
11532 arg2 = integer_zero_node;
11533
11534 if (TREE_CODE (arg0) == VECTOR_CST
11535 && TREE_CODE (arg1) == VECTOR_CST
11536 && TREE_CODE (arg2) == INTEGER_CST)
11537 {
11538 bool overflow = false;
11539 widest_int result = wi::to_widest (arg2);
11540 widest_int tmp;
11541 unsigned i;
11542
11543 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11544 {
11545 tree e0 = VECTOR_CST_ELT (arg0, i);
11546 tree e1 = VECTOR_CST_ELT (arg1, i);
11547
11548 bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
11549
11550 tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
11551 tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
11552 if (wi::neg_p (tmp))
11553 tmp = wi::neg (tmp, &neg2_ovf);
11554 else
11555 neg2_ovf = false;
11556 result = wi::add (result, tmp, SIGNED, &add2_ovf);
11557 overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf;
11558 }
11559
11560 gcc_assert (!overflow);
11561
11562 return wide_int_to_tree (rtype, result);
11563 }
11564
11565 default:
11566 break;
11567 }
11568
11569 return NULL_TREE;
11570 }
11571 \f
11572 /* ??? This duplicates information provided to the compiler by the
11573 ??? scheduler description. Some day, teach genautomata to output
11574 ??? the latencies and then CSE will just use that. */
11575
11576 static bool
11577 sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
11578 int opno ATTRIBUTE_UNUSED,
11579 int *total, bool speed ATTRIBUTE_UNUSED)
11580 {
11581 int code = GET_CODE (x);
11582 bool float_mode_p = FLOAT_MODE_P (mode);
11583
11584 switch (code)
11585 {
11586 case CONST_INT:
11587 if (SMALL_INT (x))
11588 *total = 0;
11589 else
11590 *total = 2;
11591 return true;
11592
11593 case CONST_WIDE_INT:
11594 *total = 0;
11595 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
11596 *total += 2;
11597 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
11598 *total += 2;
11599 return true;
11600
11601 case HIGH:
11602 *total = 2;
11603 return true;
11604
11605 case CONST:
11606 case LABEL_REF:
11607 case SYMBOL_REF:
11608 *total = 4;
11609 return true;
11610
11611 case CONST_DOUBLE:
11612 *total = 8;
11613 return true;
11614
11615 case MEM:
11616 /* If outer-code was a sign or zero extension, a cost
11617 of COSTS_N_INSNS (1) was already added in. This is
11618 why we are subtracting it back out. */
11619 if (outer_code == ZERO_EXTEND)
11620 {
11621 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
11622 }
11623 else if (outer_code == SIGN_EXTEND)
11624 {
11625 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
11626 }
11627 else if (float_mode_p)
11628 {
11629 *total = sparc_costs->float_load;
11630 }
11631 else
11632 {
11633 *total = sparc_costs->int_load;
11634 }
11635
11636 return true;
11637
11638 case PLUS:
11639 case MINUS:
11640 if (float_mode_p)
11641 *total = sparc_costs->float_plusminus;
11642 else
11643 *total = COSTS_N_INSNS (1);
11644 return false;
11645
11646 case FMA:
11647 {
11648 rtx sub;
11649
11650 gcc_assert (float_mode_p);
11651 *total = sparc_costs->float_mul;
11652
11653 sub = XEXP (x, 0);
11654 if (GET_CODE (sub) == NEG)
11655 sub = XEXP (sub, 0);
11656 *total += rtx_cost (sub, mode, FMA, 0, speed);
11657
11658 sub = XEXP (x, 2);
11659 if (GET_CODE (sub) == NEG)
11660 sub = XEXP (sub, 0);
11661 *total += rtx_cost (sub, mode, FMA, 2, speed);
11662 return true;
11663 }
11664
11665 case MULT:
11666 if (float_mode_p)
11667 *total = sparc_costs->float_mul;
11668 else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
11669 *total = COSTS_N_INSNS (25);
11670 else
11671 {
11672 int bit_cost;
11673
11674 bit_cost = 0;
11675 if (sparc_costs->int_mul_bit_factor)
11676 {
11677 int nbits;
11678
11679 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
11680 {
11681 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
11682 for (nbits = 0; value != 0; value &= value - 1)
11683 nbits++;
11684 }
11685 else
11686 nbits = 7;
11687
11688 if (nbits < 3)
11689 nbits = 3;
11690 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
11691 bit_cost = COSTS_N_INSNS (bit_cost);
11692 }
11693
11694 if (mode == DImode || !TARGET_HARD_MUL)
11695 *total = sparc_costs->int_mulX + bit_cost;
11696 else
11697 *total = sparc_costs->int_mul + bit_cost;
11698 }
11699 return false;
11700
11701 case ASHIFT:
11702 case ASHIFTRT:
11703 case LSHIFTRT:
11704 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
11705 return false;
11706
11707 case DIV:
11708 case UDIV:
11709 case MOD:
11710 case UMOD:
11711 if (float_mode_p)
11712 {
11713 if (mode == DFmode)
11714 *total = sparc_costs->float_div_df;
11715 else
11716 *total = sparc_costs->float_div_sf;
11717 }
11718 else
11719 {
11720 if (mode == DImode)
11721 *total = sparc_costs->int_divX;
11722 else
11723 *total = sparc_costs->int_div;
11724 }
11725 return false;
11726
11727 case NEG:
11728 if (! float_mode_p)
11729 {
11730 *total = COSTS_N_INSNS (1);
11731 return false;
11732 }
11733 /* FALLTHRU */
11734
11735 case ABS:
11736 case FLOAT:
11737 case UNSIGNED_FLOAT:
11738 case FIX:
11739 case UNSIGNED_FIX:
11740 case FLOAT_EXTEND:
11741 case FLOAT_TRUNCATE:
11742 *total = sparc_costs->float_move;
11743 return false;
11744
11745 case SQRT:
11746 if (mode == DFmode)
11747 *total = sparc_costs->float_sqrt_df;
11748 else
11749 *total = sparc_costs->float_sqrt_sf;
11750 return false;
11751
11752 case COMPARE:
11753 if (float_mode_p)
11754 *total = sparc_costs->float_cmp;
11755 else
11756 *total = COSTS_N_INSNS (1);
11757 return false;
11758
11759 case IF_THEN_ELSE:
11760 if (float_mode_p)
11761 *total = sparc_costs->float_cmove;
11762 else
11763 *total = sparc_costs->int_cmove;
11764 return false;
11765
11766 case IOR:
11767 /* Handle the NAND vector patterns. */
11768 if (sparc_vector_mode_supported_p (mode)
11769 && GET_CODE (XEXP (x, 0)) == NOT
11770 && GET_CODE (XEXP (x, 1)) == NOT)
11771 {
11772 *total = COSTS_N_INSNS (1);
11773 return true;
11774 }
11775 else
11776 return false;
11777
11778 default:
11779 return false;
11780 }
11781 }
11782
11783 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
11784
11785 static inline bool
11786 general_or_i64_p (reg_class_t rclass)
11787 {
11788 return (rclass == GENERAL_REGS || rclass == I64_REGS);
11789 }
11790
11791 /* Implement TARGET_REGISTER_MOVE_COST. */
11792
11793 static int
11794 sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
11795 reg_class_t from, reg_class_t to)
11796 {
11797 bool need_memory = false;
11798
11799 /* This helps postreload CSE to eliminate redundant comparisons. */
11800 if (from == NO_REGS || to == NO_REGS)
11801 return 100;
11802
11803 if (from == FPCC_REGS || to == FPCC_REGS)
11804 need_memory = true;
11805 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
11806 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
11807 {
11808 if (TARGET_VIS3)
11809 {
11810 int size = GET_MODE_SIZE (mode);
11811 if (size == 8 || size == 4)
11812 {
11813 if (! TARGET_ARCH32 || size == 4)
11814 return 4;
11815 else
11816 return 6;
11817 }
11818 }
11819 need_memory = true;
11820 }
11821
11822 if (need_memory)
11823 {
11824 if (sparc_cpu == PROCESSOR_ULTRASPARC
11825 || sparc_cpu == PROCESSOR_ULTRASPARC3
11826 || sparc_cpu == PROCESSOR_NIAGARA
11827 || sparc_cpu == PROCESSOR_NIAGARA2
11828 || sparc_cpu == PROCESSOR_NIAGARA3
11829 || sparc_cpu == PROCESSOR_NIAGARA4
11830 || sparc_cpu == PROCESSOR_NIAGARA7
11831 || sparc_cpu == PROCESSOR_M8)
11832 return 12;
11833
11834 return 6;
11835 }
11836
11837 return 2;
11838 }
11839
11840 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
11841 This is achieved by means of a manual dynamic stack space allocation in
11842 the current frame. We make the assumption that SEQ doesn't contain any
11843 function calls, with the possible exception of calls to the GOT helper. */
11844
11845 static void
11846 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
11847 {
11848 /* We must preserve the lowest 16 words for the register save area. */
11849 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
11850 /* We really need only 2 words of fresh stack space. */
11851 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
11852
11853 rtx slot
11854 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
11855 SPARC_STACK_BIAS + offset));
11856
11857 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
11858 emit_insn (gen_rtx_SET (slot, reg));
11859 if (reg2)
11860 emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
11861 reg2));
11862 emit_insn (seq);
11863 if (reg2)
11864 emit_insn (gen_rtx_SET (reg2,
11865 adjust_address (slot, word_mode, UNITS_PER_WORD)));
11866 emit_insn (gen_rtx_SET (reg, slot));
11867 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
11868 }
11869
11870 /* Output the assembler code for a thunk function. THUNK_DECL is the
11871 declaration for the thunk function itself, FUNCTION is the decl for
11872 the target function. DELTA is an immediate constant offset to be
11873 added to THIS. If VCALL_OFFSET is nonzero, the word at address
11874 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
11875
11876 static void
11877 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11878 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11879 tree function)
11880 {
11881 rtx this_rtx, funexp;
11882 rtx_insn *insn;
11883 unsigned int int_arg_first;
11884
11885 reload_completed = 1;
11886 epilogue_completed = 1;
11887
11888 emit_note (NOTE_INSN_PROLOGUE_END);
11889
11890 if (TARGET_FLAT)
11891 {
11892 sparc_leaf_function_p = 1;
11893
11894 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
11895 }
11896 else if (flag_delayed_branch)
11897 {
11898 /* We will emit a regular sibcall below, so we need to instruct
11899 output_sibcall that we are in a leaf function. */
11900 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
11901
11902 /* This will cause final.c to invoke leaf_renumber_regs so we
11903 must behave as if we were in a not-yet-leafified function. */
11904 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
11905 }
11906 else
11907 {
11908 /* We will emit the sibcall manually below, so we will need to
11909 manually spill non-leaf registers. */
11910 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
11911
11912 /* We really are in a leaf function. */
11913 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
11914 }
11915
11916 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
11917 returns a structure, the structure return pointer is there instead. */
11918 if (TARGET_ARCH64
11919 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11920 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
11921 else
11922 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
11923
11924 /* Add DELTA. When possible use a plain add, otherwise load it into
11925 a register first. */
11926 if (delta)
11927 {
11928 rtx delta_rtx = GEN_INT (delta);
11929
11930 if (! SPARC_SIMM13_P (delta))
11931 {
11932 rtx scratch = gen_rtx_REG (Pmode, 1);
11933 emit_move_insn (scratch, delta_rtx);
11934 delta_rtx = scratch;
11935 }
11936
11937 /* THIS_RTX += DELTA. */
11938 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
11939 }
11940
11941 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
11942 if (vcall_offset)
11943 {
11944 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
11945 rtx scratch = gen_rtx_REG (Pmode, 1);
11946
11947 gcc_assert (vcall_offset < 0);
11948
11949 /* SCRATCH = *THIS_RTX. */
11950 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
11951
11952 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
11953 may not have any available scratch register at this point. */
11954 if (SPARC_SIMM13_P (vcall_offset))
11955 ;
11956 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
11957 else if (! fixed_regs[5]
11958 /* The below sequence is made up of at least 2 insns,
11959 while the default method may need only one. */
11960 && vcall_offset < -8192)
11961 {
11962 rtx scratch2 = gen_rtx_REG (Pmode, 5);
11963 emit_move_insn (scratch2, vcall_offset_rtx);
11964 vcall_offset_rtx = scratch2;
11965 }
11966 else
11967 {
11968 rtx increment = GEN_INT (-4096);
11969
11970 /* VCALL_OFFSET is a negative number whose typical range can be
11971 estimated as -32768..0 in 32-bit mode. In almost all cases
11972 it is therefore cheaper to emit multiple add insns than
11973 spilling and loading the constant into a register (at least
11974 6 insns). */
11975 while (! SPARC_SIMM13_P (vcall_offset))
11976 {
11977 emit_insn (gen_add2_insn (scratch, increment));
11978 vcall_offset += 4096;
11979 }
11980 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
11981 }
11982
11983 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
11984 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
11985 gen_rtx_PLUS (Pmode,
11986 scratch,
11987 vcall_offset_rtx)));
11988
11989 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
11990 emit_insn (gen_add2_insn (this_rtx, scratch));
11991 }
11992
11993 /* Generate a tail call to the target function. */
11994 if (! TREE_USED (function))
11995 {
11996 assemble_external (function);
11997 TREE_USED (function) = 1;
11998 }
11999 funexp = XEXP (DECL_RTL (function), 0);
12000
12001 if (flag_delayed_branch)
12002 {
12003 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
12004 insn = emit_call_insn (gen_sibcall (funexp));
12005 SIBLING_CALL_P (insn) = 1;
12006 }
12007 else
12008 {
12009 /* The hoops we have to jump through in order to generate a sibcall
12010 without using delay slots... */
12011 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
12012
12013 if (flag_pic)
12014 {
12015 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
12016 start_sequence ();
12017 load_got_register (); /* clobbers %o7 */
12018 scratch = sparc_legitimize_pic_address (funexp, scratch);
12019 seq = get_insns ();
12020 end_sequence ();
12021 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
12022 }
12023 else if (TARGET_ARCH32)
12024 {
12025 emit_insn (gen_rtx_SET (scratch,
12026 gen_rtx_HIGH (SImode, funexp)));
12027 emit_insn (gen_rtx_SET (scratch,
12028 gen_rtx_LO_SUM (SImode, scratch, funexp)));
12029 }
12030 else /* TARGET_ARCH64 */
12031 {
12032 switch (sparc_cmodel)
12033 {
12034 case CM_MEDLOW:
12035 case CM_MEDMID:
12036 /* The destination can serve as a temporary. */
12037 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
12038 break;
12039
12040 case CM_MEDANY:
12041 case CM_EMBMEDANY:
12042 /* The destination cannot serve as a temporary. */
12043 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
12044 start_sequence ();
12045 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
12046 seq = get_insns ();
12047 end_sequence ();
12048 emit_and_preserve (seq, spill_reg, 0);
12049 break;
12050
12051 default:
12052 gcc_unreachable ();
12053 }
12054 }
12055
12056 emit_jump_insn (gen_indirect_jump (scratch));
12057 }
12058
12059 emit_barrier ();
12060
12061 /* Run just enough of rest_of_compilation to get the insns emitted.
12062 There's not really enough bulk here to make other passes such as
12063 instruction scheduling worth while. Note that use_thunk calls
12064 assemble_start_function and assemble_end_function. */
12065 insn = get_insns ();
12066 shorten_branches (insn);
12067 final_start_function (insn, file, 1);
12068 final (insn, file, 1);
12069 final_end_function ();
12070
12071 reload_completed = 0;
12072 epilogue_completed = 0;
12073 }
12074
12075 /* Return true if sparc_output_mi_thunk would be able to output the
12076 assembler code for the thunk function specified by the arguments
12077 it is passed, and false otherwise. */
12078 static bool
12079 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
12080 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
12081 HOST_WIDE_INT vcall_offset,
12082 const_tree function ATTRIBUTE_UNUSED)
12083 {
12084 /* Bound the loop used in the default method above. */
12085 return (vcall_offset >= -32768 || ! fixed_regs[5]);
12086 }
12087
12088 /* How to allocate a 'struct machine_function'. */
12089
12090 static struct machine_function *
12091 sparc_init_machine_status (void)
12092 {
12093 return ggc_cleared_alloc<machine_function> ();
12094 }
12095
12096 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12097 We need to emit DTP-relative relocations. */
12098
12099 static void
12100 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
12101 {
12102 switch (size)
12103 {
12104 case 4:
12105 fputs ("\t.word\t%r_tls_dtpoff32(", file);
12106 break;
12107 case 8:
12108 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
12109 break;
12110 default:
12111 gcc_unreachable ();
12112 }
12113 output_addr_const (file, x);
12114 fputs (")", file);
12115 }
12116
12117 /* Do whatever processing is required at the end of a file. */
12118
12119 static void
12120 sparc_file_end (void)
12121 {
12122 /* If we need to emit the special GOT helper function, do so now. */
12123 if (got_helper_rtx)
12124 {
12125 const char *name = XSTR (got_helper_rtx, 0);
12126 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
12127 #ifdef DWARF2_UNWIND_INFO
12128 bool do_cfi;
12129 #endif
12130
12131 if (USE_HIDDEN_LINKONCE)
12132 {
12133 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
12134 get_identifier (name),
12135 build_function_type_list (void_type_node,
12136 NULL_TREE));
12137 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
12138 NULL_TREE, void_type_node);
12139 TREE_PUBLIC (decl) = 1;
12140 TREE_STATIC (decl) = 1;
12141 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
12142 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
12143 DECL_VISIBILITY_SPECIFIED (decl) = 1;
12144 resolve_unique_section (decl, 0, flag_function_sections);
12145 allocate_struct_function (decl, true);
12146 cfun->is_thunk = 1;
12147 current_function_decl = decl;
12148 init_varasm_status ();
12149 assemble_start_function (decl, name);
12150 }
12151 else
12152 {
12153 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
12154 switch_to_section (text_section);
12155 if (align > 0)
12156 ASM_OUTPUT_ALIGN (asm_out_file, align);
12157 ASM_OUTPUT_LABEL (asm_out_file, name);
12158 }
12159
12160 #ifdef DWARF2_UNWIND_INFO
12161 do_cfi = dwarf2out_do_cfi_asm ();
12162 if (do_cfi)
12163 fprintf (asm_out_file, "\t.cfi_startproc\n");
12164 #endif
12165 if (flag_delayed_branch)
12166 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
12167 reg_name, reg_name);
12168 else
12169 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
12170 reg_name, reg_name);
12171 #ifdef DWARF2_UNWIND_INFO
12172 if (do_cfi)
12173 fprintf (asm_out_file, "\t.cfi_endproc\n");
12174 #endif
12175 }
12176
12177 if (NEED_INDICATE_EXEC_STACK)
12178 file_end_indicate_exec_stack ();
12179
12180 #ifdef TARGET_SOLARIS
12181 solaris_file_end ();
12182 #endif
12183 }
12184
12185 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12186 /* Implement TARGET_MANGLE_TYPE. */
12187
12188 static const char *
12189 sparc_mangle_type (const_tree type)
12190 {
12191 if (TARGET_ARCH32
12192 && TYPE_MAIN_VARIANT (type) == long_double_type_node
12193 && TARGET_LONG_DOUBLE_128)
12194 return "g";
12195
12196 /* For all other types, use normal C++ mangling. */
12197 return NULL;
12198 }
12199 #endif
12200
12201 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
12202 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
12203 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
12204
12205 void
12206 sparc_emit_membar_for_model (enum memmodel model,
12207 int load_store, int before_after)
12208 {
12209 /* Bits for the MEMBAR mmask field. */
12210 const int LoadLoad = 1;
12211 const int StoreLoad = 2;
12212 const int LoadStore = 4;
12213 const int StoreStore = 8;
12214
12215 int mm = 0, implied = 0;
12216
12217 switch (sparc_memory_model)
12218 {
12219 case SMM_SC:
12220 /* Sequential Consistency. All memory transactions are immediately
12221 visible in sequential execution order. No barriers needed. */
12222 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
12223 break;
12224
12225 case SMM_TSO:
12226 /* Total Store Ordering: all memory transactions with store semantics
12227 are followed by an implied StoreStore. */
12228 implied |= StoreStore;
12229
12230 /* If we're not looking for a raw barrer (before+after), then atomic
12231 operations get the benefit of being both load and store. */
12232 if (load_store == 3 && before_after == 1)
12233 implied |= StoreLoad;
12234 /* FALLTHRU */
12235
12236 case SMM_PSO:
12237 /* Partial Store Ordering: all memory transactions with load semantics
12238 are followed by an implied LoadLoad | LoadStore. */
12239 implied |= LoadLoad | LoadStore;
12240
12241 /* If we're not looking for a raw barrer (before+after), then atomic
12242 operations get the benefit of being both load and store. */
12243 if (load_store == 3 && before_after == 2)
12244 implied |= StoreLoad | StoreStore;
12245 /* FALLTHRU */
12246
12247 case SMM_RMO:
12248 /* Relaxed Memory Ordering: no implicit bits. */
12249 break;
12250
12251 default:
12252 gcc_unreachable ();
12253 }
12254
12255 if (before_after & 1)
12256 {
12257 if (is_mm_release (model) || is_mm_acq_rel (model)
12258 || is_mm_seq_cst (model))
12259 {
12260 if (load_store & 1)
12261 mm |= LoadLoad | StoreLoad;
12262 if (load_store & 2)
12263 mm |= LoadStore | StoreStore;
12264 }
12265 }
12266 if (before_after & 2)
12267 {
12268 if (is_mm_acquire (model) || is_mm_acq_rel (model)
12269 || is_mm_seq_cst (model))
12270 {
12271 if (load_store & 1)
12272 mm |= LoadLoad | LoadStore;
12273 if (load_store & 2)
12274 mm |= StoreLoad | StoreStore;
12275 }
12276 }
12277
12278 /* Remove the bits implied by the system memory model. */
12279 mm &= ~implied;
12280
12281 /* For raw barriers (before+after), always emit a barrier.
12282 This will become a compile-time barrier if needed. */
12283 if (mm || before_after == 3)
12284 emit_insn (gen_membar (GEN_INT (mm)));
12285 }
12286
12287 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
12288 compare and swap on the word containing the byte or half-word. */
12289
12290 static void
12291 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
12292 rtx oldval, rtx newval)
12293 {
12294 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
12295 rtx addr = gen_reg_rtx (Pmode);
12296 rtx off = gen_reg_rtx (SImode);
12297 rtx oldv = gen_reg_rtx (SImode);
12298 rtx newv = gen_reg_rtx (SImode);
12299 rtx oldvalue = gen_reg_rtx (SImode);
12300 rtx newvalue = gen_reg_rtx (SImode);
12301 rtx res = gen_reg_rtx (SImode);
12302 rtx resv = gen_reg_rtx (SImode);
12303 rtx memsi, val, mask, cc;
12304
12305 emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
12306
12307 if (Pmode != SImode)
12308 addr1 = gen_lowpart (SImode, addr1);
12309 emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
12310
12311 memsi = gen_rtx_MEM (SImode, addr);
12312 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
12313 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
12314
12315 val = copy_to_reg (memsi);
12316
12317 emit_insn (gen_rtx_SET (off,
12318 gen_rtx_XOR (SImode, off,
12319 GEN_INT (GET_MODE (mem) == QImode
12320 ? 3 : 2))));
12321
12322 emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
12323
12324 if (GET_MODE (mem) == QImode)
12325 mask = force_reg (SImode, GEN_INT (0xff));
12326 else
12327 mask = force_reg (SImode, GEN_INT (0xffff));
12328
12329 emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
12330
12331 emit_insn (gen_rtx_SET (val,
12332 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12333 val)));
12334
12335 oldval = gen_lowpart (SImode, oldval);
12336 emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
12337
12338 newval = gen_lowpart_common (SImode, newval);
12339 emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
12340
12341 emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
12342
12343 emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
12344
12345 rtx_code_label *end_label = gen_label_rtx ();
12346 rtx_code_label *loop_label = gen_label_rtx ();
12347 emit_label (loop_label);
12348
12349 emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
12350
12351 emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
12352
12353 emit_move_insn (bool_result, const1_rtx);
12354
12355 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
12356
12357 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
12358
12359 emit_insn (gen_rtx_SET (resv,
12360 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12361 res)));
12362
12363 emit_move_insn (bool_result, const0_rtx);
12364
12365 cc = gen_compare_reg_1 (NE, resv, val);
12366 emit_insn (gen_rtx_SET (val, resv));
12367
12368 /* Use cbranchcc4 to separate the compare and branch! */
12369 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
12370 cc, const0_rtx, loop_label));
12371
12372 emit_label (end_label);
12373
12374 emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
12375
12376 emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
12377
12378 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
12379 }
12380
12381 /* Expand code to perform a compare-and-swap. */
12382
12383 void
12384 sparc_expand_compare_and_swap (rtx operands[])
12385 {
12386 rtx bval, retval, mem, oldval, newval;
12387 machine_mode mode;
12388 enum memmodel model;
12389
12390 bval = operands[0];
12391 retval = operands[1];
12392 mem = operands[2];
12393 oldval = operands[3];
12394 newval = operands[4];
12395 model = (enum memmodel) INTVAL (operands[6]);
12396 mode = GET_MODE (mem);
12397
12398 sparc_emit_membar_for_model (model, 3, 1);
12399
12400 if (reg_overlap_mentioned_p (retval, oldval))
12401 oldval = copy_to_reg (oldval);
12402
12403 if (mode == QImode || mode == HImode)
12404 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
12405 else
12406 {
12407 rtx (*gen) (rtx, rtx, rtx, rtx);
12408 rtx x;
12409
12410 if (mode == SImode)
12411 gen = gen_atomic_compare_and_swapsi_1;
12412 else
12413 gen = gen_atomic_compare_and_swapdi_1;
12414 emit_insn (gen (retval, mem, oldval, newval));
12415
12416 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
12417 if (x != bval)
12418 convert_move (bval, x, 1);
12419 }
12420
12421 sparc_emit_membar_for_model (model, 3, 2);
12422 }
12423
12424 void
12425 sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
12426 {
12427 rtx t_1, t_2, t_3;
12428
12429 sel = gen_lowpart (DImode, sel);
12430 switch (vmode)
12431 {
12432 case V2SImode:
12433 /* inp = xxxxxxxAxxxxxxxB */
12434 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12435 NULL_RTX, 1, OPTAB_DIRECT);
12436 /* t_1 = ....xxxxxxxAxxx. */
12437 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12438 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
12439 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12440 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
12441 /* sel = .......B */
12442 /* t_1 = ...A.... */
12443 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12444 /* sel = ...A...B */
12445 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
12446 /* sel = AAAABBBB * 4 */
12447 t_1 = force_reg (SImode, GEN_INT (0x01230123));
12448 /* sel = { A*4, A*4+1, A*4+2, ... } */
12449 break;
12450
12451 case V4HImode:
12452 /* inp = xxxAxxxBxxxCxxxD */
12453 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12454 NULL_RTX, 1, OPTAB_DIRECT);
12455 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12456 NULL_RTX, 1, OPTAB_DIRECT);
12457 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
12458 NULL_RTX, 1, OPTAB_DIRECT);
12459 /* t_1 = ..xxxAxxxBxxxCxx */
12460 /* t_2 = ....xxxAxxxBxxxC */
12461 /* t_3 = ......xxxAxxxBxx */
12462 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12463 GEN_INT (0x07),
12464 NULL_RTX, 1, OPTAB_DIRECT);
12465 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12466 GEN_INT (0x0700),
12467 NULL_RTX, 1, OPTAB_DIRECT);
12468 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
12469 GEN_INT (0x070000),
12470 NULL_RTX, 1, OPTAB_DIRECT);
12471 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
12472 GEN_INT (0x07000000),
12473 NULL_RTX, 1, OPTAB_DIRECT);
12474 /* sel = .......D */
12475 /* t_1 = .....C.. */
12476 /* t_2 = ...B.... */
12477 /* t_3 = .A...... */
12478 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12479 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
12480 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
12481 /* sel = .A.B.C.D */
12482 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
12483 /* sel = AABBCCDD * 2 */
12484 t_1 = force_reg (SImode, GEN_INT (0x01010101));
12485 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
12486 break;
12487
12488 case V8QImode:
12489 /* input = xAxBxCxDxExFxGxH */
12490 sel = expand_simple_binop (DImode, AND, sel,
12491 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
12492 | 0x0f0f0f0f),
12493 NULL_RTX, 1, OPTAB_DIRECT);
12494 /* sel = .A.B.C.D.E.F.G.H */
12495 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
12496 NULL_RTX, 1, OPTAB_DIRECT);
12497 /* t_1 = ..A.B.C.D.E.F.G. */
12498 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12499 NULL_RTX, 1, OPTAB_DIRECT);
12500 /* sel = .AABBCCDDEEFFGGH */
12501 sel = expand_simple_binop (DImode, AND, sel,
12502 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
12503 | 0xff00ff),
12504 NULL_RTX, 1, OPTAB_DIRECT);
12505 /* sel = ..AB..CD..EF..GH */
12506 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12507 NULL_RTX, 1, OPTAB_DIRECT);
12508 /* t_1 = ....AB..CD..EF.. */
12509 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12510 NULL_RTX, 1, OPTAB_DIRECT);
12511 /* sel = ..ABABCDCDEFEFGH */
12512 sel = expand_simple_binop (DImode, AND, sel,
12513 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
12514 NULL_RTX, 1, OPTAB_DIRECT);
12515 /* sel = ....ABCD....EFGH */
12516 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12517 NULL_RTX, 1, OPTAB_DIRECT);
12518 /* t_1 = ........ABCD.... */
12519 sel = gen_lowpart (SImode, sel);
12520 t_1 = gen_lowpart (SImode, t_1);
12521 break;
12522
12523 default:
12524 gcc_unreachable ();
12525 }
12526
12527 /* Always perform the final addition/merge within the bmask insn. */
12528 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
12529 }
12530
12531 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
12532
12533 static bool
12534 sparc_frame_pointer_required (void)
12535 {
12536 /* If the stack pointer is dynamically modified in the function, it cannot
12537 serve as the frame pointer. */
12538 if (cfun->calls_alloca)
12539 return true;
12540
12541 /* If the function receives nonlocal gotos, it needs to save the frame
12542 pointer in the nonlocal_goto_save_area object. */
12543 if (cfun->has_nonlocal_label)
12544 return true;
12545
12546 /* In flat mode, that's it. */
12547 if (TARGET_FLAT)
12548 return false;
12549
12550 /* Otherwise, the frame pointer is required if the function isn't leaf, but
12551 we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
12552 return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
12553 }
12554
12555 /* The way this is structured, we can't eliminate SFP in favor of SP
12556 if the frame pointer is required: we want to use the SFP->HFP elimination
12557 in that case. But the test in update_eliminables doesn't know we are
12558 assuming below that we only do the former elimination. */
12559
12560 static bool
12561 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
12562 {
12563 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
12564 }
12565
12566 /* Return the hard frame pointer directly to bypass the stack bias. */
12567
12568 static rtx
12569 sparc_builtin_setjmp_frame_value (void)
12570 {
12571 return hard_frame_pointer_rtx;
12572 }
12573
12574 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
12575 they won't be allocated. */
12576
12577 static void
12578 sparc_conditional_register_usage (void)
12579 {
12580 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
12581 {
12582 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12583 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12584 }
12585 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
12586 /* then honor it. */
12587 if (TARGET_ARCH32 && fixed_regs[5])
12588 fixed_regs[5] = 1;
12589 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
12590 fixed_regs[5] = 0;
12591 if (! TARGET_V9)
12592 {
12593 int regno;
12594 for (regno = SPARC_FIRST_V9_FP_REG;
12595 regno <= SPARC_LAST_V9_FP_REG;
12596 regno++)
12597 fixed_regs[regno] = 1;
12598 /* %fcc0 is used by v8 and v9. */
12599 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
12600 regno <= SPARC_LAST_V9_FCC_REG;
12601 regno++)
12602 fixed_regs[regno] = 1;
12603 }
12604 if (! TARGET_FPU)
12605 {
12606 int regno;
12607 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
12608 fixed_regs[regno] = 1;
12609 }
12610 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
12611 /* then honor it. Likewise with g3 and g4. */
12612 if (fixed_regs[2] == 2)
12613 fixed_regs[2] = ! TARGET_APP_REGS;
12614 if (fixed_regs[3] == 2)
12615 fixed_regs[3] = ! TARGET_APP_REGS;
12616 if (TARGET_ARCH32 && fixed_regs[4] == 2)
12617 fixed_regs[4] = ! TARGET_APP_REGS;
12618 else if (TARGET_CM_EMBMEDANY)
12619 fixed_regs[4] = 1;
12620 else if (fixed_regs[4] == 2)
12621 fixed_regs[4] = 0;
12622 if (TARGET_FLAT)
12623 {
12624 int regno;
12625 /* Disable leaf functions. */
12626 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
12627 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12628 leaf_reg_remap [regno] = regno;
12629 }
12630 if (TARGET_VIS)
12631 global_regs[SPARC_GSR_REG] = 1;
12632 }
12633
12634 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
12635
12636 - We can't load constants into FP registers.
12637 - We can't load FP constants into integer registers when soft-float,
12638 because there is no soft-float pattern with a r/F constraint.
12639 - We can't load FP constants into integer registers for TFmode unless
12640 it is 0.0L, because there is no movtf pattern with a r/F constraint.
12641 - Try and reload integer constants (symbolic or otherwise) back into
12642 registers directly, rather than having them dumped to memory. */
12643
12644 static reg_class_t
12645 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
12646 {
12647 machine_mode mode = GET_MODE (x);
12648 if (CONSTANT_P (x))
12649 {
12650 if (FP_REG_CLASS_P (rclass)
12651 || rclass == GENERAL_OR_FP_REGS
12652 || rclass == GENERAL_OR_EXTRA_FP_REGS
12653 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
12654 || (mode == TFmode && ! const_zero_operand (x, mode)))
12655 return NO_REGS;
12656
12657 if (GET_MODE_CLASS (mode) == MODE_INT)
12658 return GENERAL_REGS;
12659
12660 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12661 {
12662 if (! FP_REG_CLASS_P (rclass)
12663 || !(const_zero_operand (x, mode)
12664 || const_all_ones_operand (x, mode)))
12665 return NO_REGS;
12666 }
12667 }
12668
12669 if (TARGET_VIS3
12670 && ! TARGET_ARCH64
12671 && (rclass == EXTRA_FP_REGS
12672 || rclass == GENERAL_OR_EXTRA_FP_REGS))
12673 {
12674 int regno = true_regnum (x);
12675
12676 if (SPARC_INT_REG_P (regno))
12677 return (rclass == EXTRA_FP_REGS
12678 ? FP_REGS : GENERAL_OR_FP_REGS);
12679 }
12680
12681 return rclass;
12682 }
12683
12684 /* Return true if we use LRA instead of reload pass. */
12685
12686 static bool
12687 sparc_lra_p (void)
12688 {
12689 return TARGET_LRA;
12690 }
12691
12692 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
12693 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
12694
12695 const char *
12696 output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
12697 {
12698 char mulstr[32];
12699
12700 gcc_assert (! TARGET_ARCH64);
12701
12702 if (sparc_check_64 (operands[1], insn) <= 0)
12703 output_asm_insn ("srl\t%L1, 0, %L1", operands);
12704 if (which_alternative == 1)
12705 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
12706 if (GET_CODE (operands[2]) == CONST_INT)
12707 {
12708 if (which_alternative == 1)
12709 {
12710 output_asm_insn ("or\t%L1, %H1, %H1", operands);
12711 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
12712 output_asm_insn (mulstr, operands);
12713 return "srlx\t%L0, 32, %H0";
12714 }
12715 else
12716 {
12717 output_asm_insn ("sllx\t%H1, 32, %3", operands);
12718 output_asm_insn ("or\t%L1, %3, %3", operands);
12719 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
12720 output_asm_insn (mulstr, operands);
12721 output_asm_insn ("srlx\t%3, 32, %H0", operands);
12722 return "mov\t%3, %L0";
12723 }
12724 }
12725 else if (rtx_equal_p (operands[1], operands[2]))
12726 {
12727 if (which_alternative == 1)
12728 {
12729 output_asm_insn ("or\t%L1, %H1, %H1", operands);
12730 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
12731 output_asm_insn (mulstr, operands);
12732 return "srlx\t%L0, 32, %H0";
12733 }
12734 else
12735 {
12736 output_asm_insn ("sllx\t%H1, 32, %3", operands);
12737 output_asm_insn ("or\t%L1, %3, %3", operands);
12738 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
12739 output_asm_insn (mulstr, operands);
12740 output_asm_insn ("srlx\t%3, 32, %H0", operands);
12741 return "mov\t%3, %L0";
12742 }
12743 }
12744 if (sparc_check_64 (operands[2], insn) <= 0)
12745 output_asm_insn ("srl\t%L2, 0, %L2", operands);
12746 if (which_alternative == 1)
12747 {
12748 output_asm_insn ("or\t%L1, %H1, %H1", operands);
12749 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
12750 output_asm_insn ("or\t%L2, %L1, %L1", operands);
12751 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
12752 output_asm_insn (mulstr, operands);
12753 return "srlx\t%L0, 32, %H0";
12754 }
12755 else
12756 {
12757 output_asm_insn ("sllx\t%H1, 32, %3", operands);
12758 output_asm_insn ("sllx\t%H2, 32, %4", operands);
12759 output_asm_insn ("or\t%L1, %3, %3", operands);
12760 output_asm_insn ("or\t%L2, %4, %4", operands);
12761 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
12762 output_asm_insn (mulstr, operands);
12763 output_asm_insn ("srlx\t%3, 32, %H0", operands);
12764 return "mov\t%3, %L0";
12765 }
12766 }
12767
12768 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12769 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
12770 and INNER_MODE are the modes describing TARGET. */
12771
12772 static void
12773 vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
12774 machine_mode inner_mode)
12775 {
12776 rtx t1, final_insn, sel;
12777 int bmask;
12778
12779 t1 = gen_reg_rtx (mode);
12780
12781 elt = convert_modes (SImode, inner_mode, elt, true);
12782 emit_move_insn (gen_lowpart(SImode, t1), elt);
12783
12784 switch (mode)
12785 {
12786 case V2SImode:
12787 final_insn = gen_bshufflev2si_vis (target, t1, t1);
12788 bmask = 0x45674567;
12789 break;
12790 case V4HImode:
12791 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
12792 bmask = 0x67676767;
12793 break;
12794 case V8QImode:
12795 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
12796 bmask = 0x77777777;
12797 break;
12798 default:
12799 gcc_unreachable ();
12800 }
12801
12802 sel = force_reg (SImode, GEN_INT (bmask));
12803 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
12804 emit_insn (final_insn);
12805 }
12806
12807 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12808 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
12809
12810 static void
12811 vector_init_fpmerge (rtx target, rtx elt)
12812 {
12813 rtx t1, t2, t2_low, t3, t3_low;
12814
12815 t1 = gen_reg_rtx (V4QImode);
12816 elt = convert_modes (SImode, QImode, elt, true);
12817 emit_move_insn (gen_lowpart (SImode, t1), elt);
12818
12819 t2 = gen_reg_rtx (V8QImode);
12820 t2_low = gen_lowpart (V4QImode, t2);
12821 emit_insn (gen_fpmerge_vis (t2, t1, t1));
12822
12823 t3 = gen_reg_rtx (V8QImode);
12824 t3_low = gen_lowpart (V4QImode, t3);
12825 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
12826
12827 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
12828 }
12829
12830 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12831 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
12832
12833 static void
12834 vector_init_faligndata (rtx target, rtx elt)
12835 {
12836 rtx t1 = gen_reg_rtx (V4HImode);
12837 int i;
12838
12839 elt = convert_modes (SImode, HImode, elt, true);
12840 emit_move_insn (gen_lowpart (SImode, t1), elt);
12841
12842 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
12843 force_reg (SImode, GEN_INT (6)),
12844 const0_rtx));
12845
12846 for (i = 0; i < 4; i++)
12847 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
12848 }
12849
12850 /* Emit code to initialize TARGET to values for individual fields VALS. */
12851
12852 void
12853 sparc_expand_vector_init (rtx target, rtx vals)
12854 {
12855 const machine_mode mode = GET_MODE (target);
12856 const machine_mode inner_mode = GET_MODE_INNER (mode);
12857 const int n_elts = GET_MODE_NUNITS (mode);
12858 int i, n_var = 0;
12859 bool all_same = true;
12860 rtx mem;
12861
12862 for (i = 0; i < n_elts; i++)
12863 {
12864 rtx x = XVECEXP (vals, 0, i);
12865 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
12866 n_var++;
12867
12868 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
12869 all_same = false;
12870 }
12871
12872 if (n_var == 0)
12873 {
12874 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
12875 return;
12876 }
12877
12878 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
12879 {
12880 if (GET_MODE_SIZE (inner_mode) == 4)
12881 {
12882 emit_move_insn (gen_lowpart (SImode, target),
12883 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
12884 return;
12885 }
12886 else if (GET_MODE_SIZE (inner_mode) == 8)
12887 {
12888 emit_move_insn (gen_lowpart (DImode, target),
12889 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
12890 return;
12891 }
12892 }
12893 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
12894 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
12895 {
12896 emit_move_insn (gen_highpart (word_mode, target),
12897 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
12898 emit_move_insn (gen_lowpart (word_mode, target),
12899 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
12900 return;
12901 }
12902
12903 if (all_same && GET_MODE_SIZE (mode) == 8)
12904 {
12905 if (TARGET_VIS2)
12906 {
12907 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
12908 return;
12909 }
12910 if (mode == V8QImode)
12911 {
12912 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
12913 return;
12914 }
12915 if (mode == V4HImode)
12916 {
12917 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
12918 return;
12919 }
12920 }
12921
12922 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
12923 for (i = 0; i < n_elts; i++)
12924 emit_move_insn (adjust_address_nv (mem, inner_mode,
12925 i * GET_MODE_SIZE (inner_mode)),
12926 XVECEXP (vals, 0, i));
12927 emit_move_insn (target, mem);
12928 }
12929
12930 /* Implement TARGET_SECONDARY_RELOAD. */
12931
12932 static reg_class_t
12933 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
12934 machine_mode mode, secondary_reload_info *sri)
12935 {
12936 enum reg_class rclass = (enum reg_class) rclass_i;
12937
12938 sri->icode = CODE_FOR_nothing;
12939 sri->extra_cost = 0;
12940
12941 /* We need a temporary when loading/storing a HImode/QImode value
12942 between memory and the FPU registers. This can happen when combine puts
12943 a paradoxical subreg in a float/fix conversion insn. */
12944 if (FP_REG_CLASS_P (rclass)
12945 && (mode == HImode || mode == QImode)
12946 && (GET_CODE (x) == MEM
12947 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
12948 && true_regnum (x) == -1)))
12949 return GENERAL_REGS;
12950
12951 /* On 32-bit we need a temporary when loading/storing a DFmode value
12952 between unaligned memory and the upper FPU registers. */
12953 if (TARGET_ARCH32
12954 && rclass == EXTRA_FP_REGS
12955 && mode == DFmode
12956 && GET_CODE (x) == MEM
12957 && ! mem_min_alignment (x, 8))
12958 return FP_REGS;
12959
12960 if (((TARGET_CM_MEDANY
12961 && symbolic_operand (x, mode))
12962 || (TARGET_CM_EMBMEDANY
12963 && text_segment_operand (x, mode)))
12964 && ! flag_pic)
12965 {
12966 if (in_p)
12967 sri->icode = direct_optab_handler (reload_in_optab, mode);
12968 else
12969 sri->icode = direct_optab_handler (reload_out_optab, mode);
12970 return NO_REGS;
12971 }
12972
12973 if (TARGET_VIS3 && TARGET_ARCH32)
12974 {
12975 int regno = true_regnum (x);
12976
12977 /* When using VIS3 fp<-->int register moves, on 32-bit we have
12978 to move 8-byte values in 4-byte pieces. This only works via
12979 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
12980 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
12981 an FP_REGS intermediate move. */
12982 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
12983 || ((general_or_i64_p (rclass)
12984 || rclass == GENERAL_OR_FP_REGS)
12985 && SPARC_FP_REG_P (regno)))
12986 {
12987 sri->extra_cost = 2;
12988 return FP_REGS;
12989 }
12990 }
12991
12992 return NO_REGS;
12993 }
12994
12995 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
12996 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
12997
12998 bool
12999 sparc_expand_conditional_move (machine_mode mode, rtx *operands)
13000 {
13001 enum rtx_code rc = GET_CODE (operands[1]);
13002 machine_mode cmp_mode;
13003 rtx cc_reg, dst, cmp;
13004
13005 cmp = operands[1];
13006 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
13007 return false;
13008
13009 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
13010 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
13011
13012 cmp_mode = GET_MODE (XEXP (cmp, 0));
13013 rc = GET_CODE (cmp);
13014
13015 dst = operands[0];
13016 if (! rtx_equal_p (operands[2], dst)
13017 && ! rtx_equal_p (operands[3], dst))
13018 {
13019 if (reg_overlap_mentioned_p (dst, cmp))
13020 dst = gen_reg_rtx (mode);
13021
13022 emit_move_insn (dst, operands[3]);
13023 }
13024 else if (operands[2] == dst)
13025 {
13026 operands[2] = operands[3];
13027
13028 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
13029 rc = reverse_condition_maybe_unordered (rc);
13030 else
13031 rc = reverse_condition (rc);
13032 }
13033
13034 if (XEXP (cmp, 1) == const0_rtx
13035 && GET_CODE (XEXP (cmp, 0)) == REG
13036 && cmp_mode == DImode
13037 && v9_regcmp_p (rc))
13038 cc_reg = XEXP (cmp, 0);
13039 else
13040 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
13041
13042 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
13043
13044 emit_insn (gen_rtx_SET (dst,
13045 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
13046
13047 if (dst != operands[0])
13048 emit_move_insn (operands[0], dst);
13049
13050 return true;
13051 }
13052
13053 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
13054 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
13055 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
13056 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
13057 code to be used for the condition mask. */
13058
13059 void
13060 sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
13061 {
13062 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
13063 enum rtx_code code = GET_CODE (operands[3]);
13064
13065 mask = gen_reg_rtx (Pmode);
13066 cop0 = operands[4];
13067 cop1 = operands[5];
13068 if (code == LT || code == GE)
13069 {
13070 rtx t;
13071
13072 code = swap_condition (code);
13073 t = cop0; cop0 = cop1; cop1 = t;
13074 }
13075
13076 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
13077
13078 fcmp = gen_rtx_UNSPEC (Pmode,
13079 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
13080 fcode);
13081
13082 cmask = gen_rtx_UNSPEC (DImode,
13083 gen_rtvec (2, mask, gsr),
13084 ccode);
13085
13086 bshuf = gen_rtx_UNSPEC (mode,
13087 gen_rtvec (3, operands[1], operands[2], gsr),
13088 UNSPEC_BSHUFFLE);
13089
13090 emit_insn (gen_rtx_SET (mask, fcmp));
13091 emit_insn (gen_rtx_SET (gsr, cmask));
13092
13093 emit_insn (gen_rtx_SET (operands[0], bshuf));
13094 }
13095
13096 /* On sparc, any mode which naturally allocates into the float
13097 registers should return 4 here. */
13098
13099 unsigned int
13100 sparc_regmode_natural_size (machine_mode mode)
13101 {
13102 int size = UNITS_PER_WORD;
13103
13104 if (TARGET_ARCH64)
13105 {
13106 enum mode_class mclass = GET_MODE_CLASS (mode);
13107
13108 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
13109 size = 4;
13110 }
13111
13112 return size;
13113 }
13114
13115 /* Return TRUE if it is a good idea to tie two pseudo registers
13116 when one has mode MODE1 and one has mode MODE2.
13117 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
13118 for any hard reg, then this must be FALSE for correct output.
13119
13120 For V9 we have to deal with the fact that only the lower 32 floating
13121 point registers are 32-bit addressable. */
13122
13123 bool
13124 sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13125 {
13126 enum mode_class mclass1, mclass2;
13127 unsigned short size1, size2;
13128
13129 if (mode1 == mode2)
13130 return true;
13131
13132 mclass1 = GET_MODE_CLASS (mode1);
13133 mclass2 = GET_MODE_CLASS (mode2);
13134 if (mclass1 != mclass2)
13135 return false;
13136
13137 if (! TARGET_V9)
13138 return true;
13139
13140 /* Classes are the same and we are V9 so we have to deal with upper
13141 vs. lower floating point registers. If one of the modes is a
13142 4-byte mode, and the other is not, we have to mark them as not
13143 tieable because only the lower 32 floating point register are
13144 addressable 32-bits at a time.
13145
13146 We can't just test explicitly for SFmode, otherwise we won't
13147 cover the vector mode cases properly. */
13148
13149 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
13150 return true;
13151
13152 size1 = GET_MODE_SIZE (mode1);
13153 size2 = GET_MODE_SIZE (mode2);
13154 if ((size1 > 4 && size2 == 4)
13155 || (size2 > 4 && size1 == 4))
13156 return false;
13157
13158 return true;
13159 }
13160
13161 /* Implement TARGET_CSTORE_MODE. */
13162
13163 static machine_mode
13164 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
13165 {
13166 return (TARGET_ARCH64 ? DImode : SImode);
13167 }
13168
13169 /* Return the compound expression made of T1 and T2. */
13170
13171 static inline tree
13172 compound_expr (tree t1, tree t2)
13173 {
13174 return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
13175 }
13176
13177 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13178
13179 static void
13180 sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13181 {
13182 if (!TARGET_FPU)
13183 return;
13184
13185 const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
13186 const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
13187
13188 /* We generate the equivalent of feholdexcept (&fenv_var):
13189
13190 unsigned int fenv_var;
13191 __builtin_store_fsr (&fenv_var);
13192
13193 unsigned int tmp1_var;
13194 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
13195
13196 __builtin_load_fsr (&tmp1_var); */
13197
13198 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
13199 TREE_ADDRESSABLE (fenv_var) = 1;
13200 tree fenv_addr = build_fold_addr_expr (fenv_var);
13201 tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
13202 tree hold_stfsr
13203 = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
13204 build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
13205
13206 tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
13207 TREE_ADDRESSABLE (tmp1_var) = 1;
13208 tree masked_fenv_var
13209 = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13210 build_int_cst (unsigned_type_node,
13211 ~(accrued_exception_mask | trap_enable_mask)));
13212 tree hold_mask
13213 = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
13214 NULL_TREE, NULL_TREE);
13215
13216 tree tmp1_addr = build_fold_addr_expr (tmp1_var);
13217 tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
13218 tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
13219
13220 *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
13221
13222 /* We reload the value of tmp1_var to clear the exceptions:
13223
13224 __builtin_load_fsr (&tmp1_var); */
13225
13226 *clear = build_call_expr (ldfsr, 1, tmp1_addr);
13227
13228 /* We generate the equivalent of feupdateenv (&fenv_var):
13229
13230 unsigned int tmp2_var;
13231 __builtin_store_fsr (&tmp2_var);
13232
13233 __builtin_load_fsr (&fenv_var);
13234
13235 if (SPARC_LOW_FE_EXCEPT_VALUES)
13236 tmp2_var >>= 5;
13237 __atomic_feraiseexcept ((int) tmp2_var); */
13238
13239 tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
13240 TREE_ADDRESSABLE (tmp2_var) = 1;
13241 tree tmp2_addr = build_fold_addr_expr (tmp2_var);
13242 tree update_stfsr
13243 = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
13244 build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
13245
13246 tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
13247
13248 tree atomic_feraiseexcept
13249 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13250 tree update_call
13251 = build_call_expr (atomic_feraiseexcept, 1,
13252 fold_convert (integer_type_node, tmp2_var));
13253
13254 if (SPARC_LOW_FE_EXCEPT_VALUES)
13255 {
13256 tree shifted_tmp2_var
13257 = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
13258 build_int_cst (unsigned_type_node, 5));
13259 tree update_shift
13260 = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
13261 update_call = compound_expr (update_shift, update_call);
13262 }
13263
13264 *update
13265 = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
13266 }
13267
13268 #include "gt-sparc.h"