]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sparc/sparc.c
29d2922ba9227904ad758ecaa8f4be21d34089f6
[thirdparty/gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
8 at Cygnus Support.
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
16
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "except.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "diagnostic-core.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "cfglayout.h"
53 #include "gimple.h"
54 #include "langhooks.h"
55 #include "reload.h"
56 #include "params.h"
57 #include "df.h"
58 #include "dwarf2out.h"
59 #include "opts.h"
60
61 /* Processor costs */
62
63 struct processor_costs {
64 /* Integer load */
65 const int int_load;
66
67 /* Integer signed load */
68 const int int_sload;
69
70 /* Integer zeroed load */
71 const int int_zload;
72
73 /* Float load */
74 const int float_load;
75
76 /* fmov, fneg, fabs */
77 const int float_move;
78
79 /* fadd, fsub */
80 const int float_plusminus;
81
82 /* fcmp */
83 const int float_cmp;
84
85 /* fmov, fmovr */
86 const int float_cmove;
87
88 /* fmul */
89 const int float_mul;
90
91 /* fdivs */
92 const int float_div_sf;
93
94 /* fdivd */
95 const int float_div_df;
96
97 /* fsqrts */
98 const int float_sqrt_sf;
99
100 /* fsqrtd */
101 const int float_sqrt_df;
102
103 /* umul/smul */
104 const int int_mul;
105
106 /* mulX */
107 const int int_mulX;
108
109 /* integer multiply cost for each bit set past the most
110 significant 3, so the formula for multiply cost becomes:
111
112 if (rs1 < 0)
113 highest_bit = highest_clear_bit(rs1);
114 else
115 highest_bit = highest_set_bit(rs1);
116 if (highest_bit < 3)
117 highest_bit = 3;
118 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
119
120 A value of zero indicates that the multiply costs is fixed,
121 and not variable. */
122 const int int_mul_bit_factor;
123
124 /* udiv/sdiv */
125 const int int_div;
126
127 /* divX */
128 const int int_divX;
129
130 /* movcc, movr */
131 const int int_cmove;
132
133 /* penalty for shifts, due to scheduling rules etc. */
134 const int shift_penalty;
135 };
136
137 static const
138 struct processor_costs cypress_costs = {
139 COSTS_N_INSNS (2), /* int load */
140 COSTS_N_INSNS (2), /* int signed load */
141 COSTS_N_INSNS (2), /* int zeroed load */
142 COSTS_N_INSNS (2), /* float load */
143 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
144 COSTS_N_INSNS (5), /* fadd, fsub */
145 COSTS_N_INSNS (1), /* fcmp */
146 COSTS_N_INSNS (1), /* fmov, fmovr */
147 COSTS_N_INSNS (7), /* fmul */
148 COSTS_N_INSNS (37), /* fdivs */
149 COSTS_N_INSNS (37), /* fdivd */
150 COSTS_N_INSNS (63), /* fsqrts */
151 COSTS_N_INSNS (63), /* fsqrtd */
152 COSTS_N_INSNS (1), /* imul */
153 COSTS_N_INSNS (1), /* imulX */
154 0, /* imul bit factor */
155 COSTS_N_INSNS (1), /* idiv */
156 COSTS_N_INSNS (1), /* idivX */
157 COSTS_N_INSNS (1), /* movcc/movr */
158 0, /* shift penalty */
159 };
160
161 static const
162 struct processor_costs supersparc_costs = {
163 COSTS_N_INSNS (1), /* int load */
164 COSTS_N_INSNS (1), /* int signed load */
165 COSTS_N_INSNS (1), /* int zeroed load */
166 COSTS_N_INSNS (0), /* float load */
167 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
168 COSTS_N_INSNS (3), /* fadd, fsub */
169 COSTS_N_INSNS (3), /* fcmp */
170 COSTS_N_INSNS (1), /* fmov, fmovr */
171 COSTS_N_INSNS (3), /* fmul */
172 COSTS_N_INSNS (6), /* fdivs */
173 COSTS_N_INSNS (9), /* fdivd */
174 COSTS_N_INSNS (12), /* fsqrts */
175 COSTS_N_INSNS (12), /* fsqrtd */
176 COSTS_N_INSNS (4), /* imul */
177 COSTS_N_INSNS (4), /* imulX */
178 0, /* imul bit factor */
179 COSTS_N_INSNS (4), /* idiv */
180 COSTS_N_INSNS (4), /* idivX */
181 COSTS_N_INSNS (1), /* movcc/movr */
182 1, /* shift penalty */
183 };
184
185 static const
186 struct processor_costs hypersparc_costs = {
187 COSTS_N_INSNS (1), /* int load */
188 COSTS_N_INSNS (1), /* int signed load */
189 COSTS_N_INSNS (1), /* int zeroed load */
190 COSTS_N_INSNS (1), /* float load */
191 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
192 COSTS_N_INSNS (1), /* fadd, fsub */
193 COSTS_N_INSNS (1), /* fcmp */
194 COSTS_N_INSNS (1), /* fmov, fmovr */
195 COSTS_N_INSNS (1), /* fmul */
196 COSTS_N_INSNS (8), /* fdivs */
197 COSTS_N_INSNS (12), /* fdivd */
198 COSTS_N_INSNS (17), /* fsqrts */
199 COSTS_N_INSNS (17), /* fsqrtd */
200 COSTS_N_INSNS (17), /* imul */
201 COSTS_N_INSNS (17), /* imulX */
202 0, /* imul bit factor */
203 COSTS_N_INSNS (17), /* idiv */
204 COSTS_N_INSNS (17), /* idivX */
205 COSTS_N_INSNS (1), /* movcc/movr */
206 0, /* shift penalty */
207 };
208
209 static const
210 struct processor_costs leon_costs = {
211 COSTS_N_INSNS (1), /* int load */
212 COSTS_N_INSNS (1), /* int signed load */
213 COSTS_N_INSNS (1), /* int zeroed load */
214 COSTS_N_INSNS (1), /* float load */
215 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
216 COSTS_N_INSNS (1), /* fadd, fsub */
217 COSTS_N_INSNS (1), /* fcmp */
218 COSTS_N_INSNS (1), /* fmov, fmovr */
219 COSTS_N_INSNS (1), /* fmul */
220 COSTS_N_INSNS (15), /* fdivs */
221 COSTS_N_INSNS (15), /* fdivd */
222 COSTS_N_INSNS (23), /* fsqrts */
223 COSTS_N_INSNS (23), /* fsqrtd */
224 COSTS_N_INSNS (5), /* imul */
225 COSTS_N_INSNS (5), /* imulX */
226 0, /* imul bit factor */
227 COSTS_N_INSNS (5), /* idiv */
228 COSTS_N_INSNS (5), /* idivX */
229 COSTS_N_INSNS (1), /* movcc/movr */
230 0, /* shift penalty */
231 };
232
233 static const
234 struct processor_costs sparclet_costs = {
235 COSTS_N_INSNS (3), /* int load */
236 COSTS_N_INSNS (3), /* int signed load */
237 COSTS_N_INSNS (1), /* int zeroed load */
238 COSTS_N_INSNS (1), /* float load */
239 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
240 COSTS_N_INSNS (1), /* fadd, fsub */
241 COSTS_N_INSNS (1), /* fcmp */
242 COSTS_N_INSNS (1), /* fmov, fmovr */
243 COSTS_N_INSNS (1), /* fmul */
244 COSTS_N_INSNS (1), /* fdivs */
245 COSTS_N_INSNS (1), /* fdivd */
246 COSTS_N_INSNS (1), /* fsqrts */
247 COSTS_N_INSNS (1), /* fsqrtd */
248 COSTS_N_INSNS (5), /* imul */
249 COSTS_N_INSNS (5), /* imulX */
250 0, /* imul bit factor */
251 COSTS_N_INSNS (5), /* idiv */
252 COSTS_N_INSNS (5), /* idivX */
253 COSTS_N_INSNS (1), /* movcc/movr */
254 0, /* shift penalty */
255 };
256
257 static const
258 struct processor_costs ultrasparc_costs = {
259 COSTS_N_INSNS (2), /* int load */
260 COSTS_N_INSNS (3), /* int signed load */
261 COSTS_N_INSNS (2), /* int zeroed load */
262 COSTS_N_INSNS (2), /* float load */
263 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
264 COSTS_N_INSNS (4), /* fadd, fsub */
265 COSTS_N_INSNS (1), /* fcmp */
266 COSTS_N_INSNS (2), /* fmov, fmovr */
267 COSTS_N_INSNS (4), /* fmul */
268 COSTS_N_INSNS (13), /* fdivs */
269 COSTS_N_INSNS (23), /* fdivd */
270 COSTS_N_INSNS (13), /* fsqrts */
271 COSTS_N_INSNS (23), /* fsqrtd */
272 COSTS_N_INSNS (4), /* imul */
273 COSTS_N_INSNS (4), /* imulX */
274 2, /* imul bit factor */
275 COSTS_N_INSNS (37), /* idiv */
276 COSTS_N_INSNS (68), /* idivX */
277 COSTS_N_INSNS (2), /* movcc/movr */
278 2, /* shift penalty */
279 };
280
281 static const
282 struct processor_costs ultrasparc3_costs = {
283 COSTS_N_INSNS (2), /* int load */
284 COSTS_N_INSNS (3), /* int signed load */
285 COSTS_N_INSNS (3), /* int zeroed load */
286 COSTS_N_INSNS (2), /* float load */
287 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
288 COSTS_N_INSNS (4), /* fadd, fsub */
289 COSTS_N_INSNS (5), /* fcmp */
290 COSTS_N_INSNS (3), /* fmov, fmovr */
291 COSTS_N_INSNS (4), /* fmul */
292 COSTS_N_INSNS (17), /* fdivs */
293 COSTS_N_INSNS (20), /* fdivd */
294 COSTS_N_INSNS (20), /* fsqrts */
295 COSTS_N_INSNS (29), /* fsqrtd */
296 COSTS_N_INSNS (6), /* imul */
297 COSTS_N_INSNS (6), /* imulX */
298 0, /* imul bit factor */
299 COSTS_N_INSNS (40), /* idiv */
300 COSTS_N_INSNS (71), /* idivX */
301 COSTS_N_INSNS (2), /* movcc/movr */
302 0, /* shift penalty */
303 };
304
305 static const
306 struct processor_costs niagara_costs = {
307 COSTS_N_INSNS (3), /* int load */
308 COSTS_N_INSNS (3), /* int signed load */
309 COSTS_N_INSNS (3), /* int zeroed load */
310 COSTS_N_INSNS (9), /* float load */
311 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
312 COSTS_N_INSNS (8), /* fadd, fsub */
313 COSTS_N_INSNS (26), /* fcmp */
314 COSTS_N_INSNS (8), /* fmov, fmovr */
315 COSTS_N_INSNS (29), /* fmul */
316 COSTS_N_INSNS (54), /* fdivs */
317 COSTS_N_INSNS (83), /* fdivd */
318 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
319 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
320 COSTS_N_INSNS (11), /* imul */
321 COSTS_N_INSNS (11), /* imulX */
322 0, /* imul bit factor */
323 COSTS_N_INSNS (72), /* idiv */
324 COSTS_N_INSNS (72), /* idivX */
325 COSTS_N_INSNS (1), /* movcc/movr */
326 0, /* shift penalty */
327 };
328
329 static const
330 struct processor_costs niagara2_costs = {
331 COSTS_N_INSNS (3), /* int load */
332 COSTS_N_INSNS (3), /* int signed load */
333 COSTS_N_INSNS (3), /* int zeroed load */
334 COSTS_N_INSNS (3), /* float load */
335 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
336 COSTS_N_INSNS (6), /* fadd, fsub */
337 COSTS_N_INSNS (6), /* fcmp */
338 COSTS_N_INSNS (6), /* fmov, fmovr */
339 COSTS_N_INSNS (6), /* fmul */
340 COSTS_N_INSNS (19), /* fdivs */
341 COSTS_N_INSNS (33), /* fdivd */
342 COSTS_N_INSNS (19), /* fsqrts */
343 COSTS_N_INSNS (33), /* fsqrtd */
344 COSTS_N_INSNS (5), /* imul */
345 COSTS_N_INSNS (5), /* imulX */
346 0, /* imul bit factor */
347 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
348 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
349 COSTS_N_INSNS (1), /* movcc/movr */
350 0, /* shift penalty */
351 };
352
353 static const
354 struct processor_costs niagara3_costs = {
355 COSTS_N_INSNS (3), /* int load */
356 COSTS_N_INSNS (3), /* int signed load */
357 COSTS_N_INSNS (3), /* int zeroed load */
358 COSTS_N_INSNS (3), /* float load */
359 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
360 COSTS_N_INSNS (9), /* fadd, fsub */
361 COSTS_N_INSNS (9), /* fcmp */
362 COSTS_N_INSNS (9), /* fmov, fmovr */
363 COSTS_N_INSNS (9), /* fmul */
364 COSTS_N_INSNS (23), /* fdivs */
365 COSTS_N_INSNS (37), /* fdivd */
366 COSTS_N_INSNS (23), /* fsqrts */
367 COSTS_N_INSNS (37), /* fsqrtd */
368 COSTS_N_INSNS (9), /* imul */
369 COSTS_N_INSNS (9), /* imulX */
370 0, /* imul bit factor */
371 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
372 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
373 COSTS_N_INSNS (1), /* movcc/movr */
374 0, /* shift penalty */
375 };
376
377 static const struct processor_costs *sparc_costs = &cypress_costs;
378
379 #ifdef HAVE_AS_RELAX_OPTION
380 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
381 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
382 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
383 somebody does not branch between the sethi and jmp. */
384 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
385 #else
386 #define LEAF_SIBCALL_SLOT_RESERVED_P \
387 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
388 #endif
389
390 /* Vector to say how input registers are mapped to output registers.
391 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
392 eliminate it. You must use -fomit-frame-pointer to get that. */
393 char leaf_reg_remap[] =
394 { 0, 1, 2, 3, 4, 5, 6, 7,
395 -1, -1, -1, -1, -1, -1, 14, -1,
396 -1, -1, -1, -1, -1, -1, -1, -1,
397 8, 9, 10, 11, 12, 13, -1, 15,
398
399 32, 33, 34, 35, 36, 37, 38, 39,
400 40, 41, 42, 43, 44, 45, 46, 47,
401 48, 49, 50, 51, 52, 53, 54, 55,
402 56, 57, 58, 59, 60, 61, 62, 63,
403 64, 65, 66, 67, 68, 69, 70, 71,
404 72, 73, 74, 75, 76, 77, 78, 79,
405 80, 81, 82, 83, 84, 85, 86, 87,
406 88, 89, 90, 91, 92, 93, 94, 95,
407 96, 97, 98, 99, 100, 101, 102};
408
409 /* Vector, indexed by hard register number, which contains 1
410 for a register that is allowable in a candidate for leaf
411 function treatment. */
412 char sparc_leaf_regs[] =
413 { 1, 1, 1, 1, 1, 1, 1, 1,
414 0, 0, 0, 0, 0, 0, 1, 0,
415 0, 0, 0, 0, 0, 0, 0, 0,
416 1, 1, 1, 1, 1, 1, 0, 1,
417 1, 1, 1, 1, 1, 1, 1, 1,
418 1, 1, 1, 1, 1, 1, 1, 1,
419 1, 1, 1, 1, 1, 1, 1, 1,
420 1, 1, 1, 1, 1, 1, 1, 1,
421 1, 1, 1, 1, 1, 1, 1, 1,
422 1, 1, 1, 1, 1, 1, 1, 1,
423 1, 1, 1, 1, 1, 1, 1, 1,
424 1, 1, 1, 1, 1, 1, 1, 1,
425 1, 1, 1, 1, 1, 1, 1};
426
427 struct GTY(()) machine_function
428 {
429 /* Size of the frame of the function. */
430 HOST_WIDE_INT frame_size;
431
432 /* Size of the frame of the function minus the register window save area
433 and the outgoing argument area. */
434 HOST_WIDE_INT apparent_frame_size;
435
436 /* Register we pretend the frame pointer is allocated to. Normally, this
437 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
438 record "offset" separately as it may be too big for (reg + disp). */
439 rtx frame_base_reg;
440 HOST_WIDE_INT frame_base_offset;
441
442 /* Some local-dynamic TLS symbol name. */
443 const char *some_ld_name;
444
445 /* Number of global or FP registers to be saved (as 4-byte quantities). */
446 int n_global_fp_regs;
447
448 /* True if the current function is leaf and uses only leaf regs,
449 so that the SPARC leaf function optimization can be applied.
450 Private version of current_function_uses_only_leaf_regs, see
451 sparc_expand_prologue for the rationale. */
452 int leaf_function_p;
453
454 /* True if the prologue saves local or in registers. */
455 bool save_local_in_regs_p;
456
457 /* True if the data calculated by sparc_expand_prologue are valid. */
458 bool prologue_data_valid_p;
459 };
460
461 #define sparc_frame_size cfun->machine->frame_size
462 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
463 #define sparc_frame_base_reg cfun->machine->frame_base_reg
464 #define sparc_frame_base_offset cfun->machine->frame_base_offset
465 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
466 #define sparc_leaf_function_p cfun->machine->leaf_function_p
467 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
468 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
469
470 /* 1 if the next opcode is to be specially indented. */
471 int sparc_indent_opcode = 0;
472
473 static void sparc_option_override (void);
474 static void sparc_init_modes (void);
475 static void scan_record_type (const_tree, int *, int *, int *);
476 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
477 const_tree, bool, bool, int *, int *);
478
479 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
480 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
481
482 static void sparc_emit_set_const32 (rtx, rtx);
483 static void sparc_emit_set_const64 (rtx, rtx);
484 static void sparc_output_addr_vec (rtx);
485 static void sparc_output_addr_diff_vec (rtx);
486 static void sparc_output_deferred_case_vectors (void);
487 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
488 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
489 static rtx sparc_builtin_saveregs (void);
490 static int epilogue_renumber (rtx *, int);
491 static bool sparc_assemble_integer (rtx, unsigned int, int);
492 static int set_extends (rtx);
493 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
494 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
495 #ifdef TARGET_SOLARIS
496 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
497 tree) ATTRIBUTE_UNUSED;
498 #endif
499 static int sparc_adjust_cost (rtx, rtx, rtx, int);
500 static int sparc_issue_rate (void);
501 static void sparc_sched_init (FILE *, int, int);
502 static int sparc_use_sched_lookahead (void);
503
504 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
505 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
506 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
507 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
508 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
509
510 static bool sparc_function_ok_for_sibcall (tree, tree);
511 static void sparc_init_libfuncs (void);
512 static void sparc_init_builtins (void);
513 static void sparc_vis_init_builtins (void);
514 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
515 static tree sparc_fold_builtin (tree, int, tree *, bool);
516 static int sparc_vis_mul8x16 (int, int);
517 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
518 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
519 HOST_WIDE_INT, tree);
520 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
521 HOST_WIDE_INT, const_tree);
522 static void sparc_reorg (void);
523 static struct machine_function * sparc_init_machine_status (void);
524 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
525 static rtx sparc_tls_get_addr (void);
526 static rtx sparc_tls_got (void);
527 static const char *get_some_local_dynamic_name (void);
528 static int get_some_local_dynamic_name_1 (rtx *, void *);
529 static int sparc_register_move_cost (enum machine_mode,
530 reg_class_t, reg_class_t);
531 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
532 static rtx sparc_function_value (const_tree, const_tree, bool);
533 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
534 static bool sparc_function_value_regno_p (const unsigned int);
535 static rtx sparc_struct_value_rtx (tree, int);
536 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
537 int *, const_tree, int);
538 static bool sparc_return_in_memory (const_tree, const_tree);
539 static bool sparc_strict_argument_naming (cumulative_args_t);
540 static void sparc_va_start (tree, rtx);
541 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
542 static bool sparc_vector_mode_supported_p (enum machine_mode);
543 static bool sparc_tls_referenced_p (rtx);
544 static rtx sparc_legitimize_tls_address (rtx);
545 static rtx sparc_legitimize_pic_address (rtx, rtx);
546 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
547 static rtx sparc_delegitimize_address (rtx);
548 static bool sparc_mode_dependent_address_p (const_rtx);
549 static bool sparc_pass_by_reference (cumulative_args_t,
550 enum machine_mode, const_tree, bool);
551 static void sparc_function_arg_advance (cumulative_args_t,
552 enum machine_mode, const_tree, bool);
553 static rtx sparc_function_arg_1 (cumulative_args_t,
554 enum machine_mode, const_tree, bool, bool);
555 static rtx sparc_function_arg (cumulative_args_t,
556 enum machine_mode, const_tree, bool);
557 static rtx sparc_function_incoming_arg (cumulative_args_t,
558 enum machine_mode, const_tree, bool);
559 static unsigned int sparc_function_arg_boundary (enum machine_mode,
560 const_tree);
561 static int sparc_arg_partial_bytes (cumulative_args_t,
562 enum machine_mode, tree, bool);
563 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
564 static void sparc_file_end (void);
565 static bool sparc_frame_pointer_required (void);
566 static bool sparc_can_eliminate (const int, const int);
567 static rtx sparc_builtin_setjmp_frame_value (void);
568 static void sparc_conditional_register_usage (void);
569 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
570 static const char *sparc_mangle_type (const_tree);
571 #endif
572 static void sparc_trampoline_init (rtx, tree, rtx);
573 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
574 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
575 static bool sparc_print_operand_punct_valid_p (unsigned char);
576 static void sparc_print_operand (FILE *, rtx, int);
577 static void sparc_print_operand_address (FILE *, rtx);
578 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
579 enum machine_mode, secondary_reload_info *);
580 \f
581 #ifdef SUBTARGET_ATTRIBUTE_TABLE
582 /* Table of valid machine attributes. */
583 static const struct attribute_spec sparc_attribute_table[] =
584 {
585 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
586 do_diagnostic } */
587 SUBTARGET_ATTRIBUTE_TABLE,
588 { NULL, 0, 0, false, false, false, NULL, false }
589 };
590 #endif
591 \f
592 /* Option handling. */
593
594 /* Parsed value. */
595 enum cmodel sparc_cmodel;
596
597 char sparc_hard_reg_printed[8];
598
599 /* Initialize the GCC target structure. */
600
601 /* The default is to use .half rather than .short for aligned HI objects. */
602 #undef TARGET_ASM_ALIGNED_HI_OP
603 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
604
605 #undef TARGET_ASM_UNALIGNED_HI_OP
606 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
607 #undef TARGET_ASM_UNALIGNED_SI_OP
608 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
609 #undef TARGET_ASM_UNALIGNED_DI_OP
610 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
611
612 /* The target hook has to handle DI-mode values. */
613 #undef TARGET_ASM_INTEGER
614 #define TARGET_ASM_INTEGER sparc_assemble_integer
615
616 #undef TARGET_ASM_FUNCTION_PROLOGUE
617 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
618 #undef TARGET_ASM_FUNCTION_EPILOGUE
619 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
620
621 #undef TARGET_SCHED_ADJUST_COST
622 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
623 #undef TARGET_SCHED_ISSUE_RATE
624 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
625 #undef TARGET_SCHED_INIT
626 #define TARGET_SCHED_INIT sparc_sched_init
627 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
628 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
629
630 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
631 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
632
633 #undef TARGET_INIT_LIBFUNCS
634 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
635 #undef TARGET_INIT_BUILTINS
636 #define TARGET_INIT_BUILTINS sparc_init_builtins
637
638 #undef TARGET_LEGITIMIZE_ADDRESS
639 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
640 #undef TARGET_DELEGITIMIZE_ADDRESS
641 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
642 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
643 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
644
645 #undef TARGET_EXPAND_BUILTIN
646 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
647 #undef TARGET_FOLD_BUILTIN
648 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
649
650 #if TARGET_TLS
651 #undef TARGET_HAVE_TLS
652 #define TARGET_HAVE_TLS true
653 #endif
654
655 #undef TARGET_CANNOT_FORCE_CONST_MEM
656 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
657
658 #undef TARGET_ASM_OUTPUT_MI_THUNK
659 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
660 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
661 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
662
663 #undef TARGET_MACHINE_DEPENDENT_REORG
664 #define TARGET_MACHINE_DEPENDENT_REORG sparc_reorg
665
666 #undef TARGET_RTX_COSTS
667 #define TARGET_RTX_COSTS sparc_rtx_costs
668 #undef TARGET_ADDRESS_COST
669 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
670 #undef TARGET_REGISTER_MOVE_COST
671 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
672
673 #undef TARGET_PROMOTE_FUNCTION_MODE
674 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
675
676 #undef TARGET_FUNCTION_VALUE
677 #define TARGET_FUNCTION_VALUE sparc_function_value
678 #undef TARGET_LIBCALL_VALUE
679 #define TARGET_LIBCALL_VALUE sparc_libcall_value
680 #undef TARGET_FUNCTION_VALUE_REGNO_P
681 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
682
683 #undef TARGET_STRUCT_VALUE_RTX
684 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
685 #undef TARGET_RETURN_IN_MEMORY
686 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
687 #undef TARGET_MUST_PASS_IN_STACK
688 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
689 #undef TARGET_PASS_BY_REFERENCE
690 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
691 #undef TARGET_ARG_PARTIAL_BYTES
692 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
693 #undef TARGET_FUNCTION_ARG_ADVANCE
694 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
695 #undef TARGET_FUNCTION_ARG
696 #define TARGET_FUNCTION_ARG sparc_function_arg
697 #undef TARGET_FUNCTION_INCOMING_ARG
698 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
699 #undef TARGET_FUNCTION_ARG_BOUNDARY
700 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
701
702 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
703 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
704 #undef TARGET_STRICT_ARGUMENT_NAMING
705 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
706
707 #undef TARGET_EXPAND_BUILTIN_VA_START
708 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
709 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
710 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
711
712 #undef TARGET_VECTOR_MODE_SUPPORTED_P
713 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
714
715 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
716 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
717
718 #ifdef SUBTARGET_INSERT_ATTRIBUTES
719 #undef TARGET_INSERT_ATTRIBUTES
720 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
721 #endif
722
723 #ifdef SUBTARGET_ATTRIBUTE_TABLE
724 #undef TARGET_ATTRIBUTE_TABLE
725 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
726 #endif
727
728 #undef TARGET_RELAXED_ORDERING
729 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
730
731 #undef TARGET_OPTION_OVERRIDE
732 #define TARGET_OPTION_OVERRIDE sparc_option_override
733
734 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
735 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
736 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
737 #endif
738
739 #undef TARGET_ASM_FILE_END
740 #define TARGET_ASM_FILE_END sparc_file_end
741
742 #undef TARGET_FRAME_POINTER_REQUIRED
743 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
744
745 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
746 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
747
748 #undef TARGET_CAN_ELIMINATE
749 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
750
751 #undef TARGET_PREFERRED_RELOAD_CLASS
752 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
753
754 #undef TARGET_SECONDARY_RELOAD
755 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
756
757 #undef TARGET_CONDITIONAL_REGISTER_USAGE
758 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
759
760 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
761 #undef TARGET_MANGLE_TYPE
762 #define TARGET_MANGLE_TYPE sparc_mangle_type
763 #endif
764
765 #undef TARGET_LEGITIMATE_ADDRESS_P
766 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
767
768 #undef TARGET_LEGITIMATE_CONSTANT_P
769 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
770
771 #undef TARGET_TRAMPOLINE_INIT
772 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
773
774 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
775 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
776 #undef TARGET_PRINT_OPERAND
777 #define TARGET_PRINT_OPERAND sparc_print_operand
778 #undef TARGET_PRINT_OPERAND_ADDRESS
779 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
780
781 struct gcc_target targetm = TARGET_INITIALIZER;
782
783 static void
784 dump_target_flag_bits (const int flags)
785 {
786 if (flags & MASK_64BIT)
787 fprintf (stderr, "64BIT ");
788 if (flags & MASK_APP_REGS)
789 fprintf (stderr, "APP_REGS ");
790 if (flags & MASK_FASTER_STRUCTS)
791 fprintf (stderr, "FASTER_STRUCTS ");
792 if (flags & MASK_FLAT)
793 fprintf (stderr, "FLAT ");
794 if (flags & MASK_FMAF)
795 fprintf (stderr, "FMAF ");
796 if (flags & MASK_FPU)
797 fprintf (stderr, "FPU ");
798 if (flags & MASK_HARD_QUAD)
799 fprintf (stderr, "HARD_QUAD ");
800 if (flags & MASK_POPC)
801 fprintf (stderr, "POPC ");
802 if (flags & MASK_PTR64)
803 fprintf (stderr, "PTR64 ");
804 if (flags & MASK_STACK_BIAS)
805 fprintf (stderr, "STACK_BIAS ");
806 if (flags & MASK_UNALIGNED_DOUBLES)
807 fprintf (stderr, "UNALIGNED_DOUBLES ");
808 if (flags & MASK_V8PLUS)
809 fprintf (stderr, "V8PLUS ");
810 if (flags & MASK_VIS)
811 fprintf (stderr, "VIS ");
812 if (flags & MASK_VIS2)
813 fprintf (stderr, "VIS2 ");
814 if (flags & MASK_VIS3)
815 fprintf (stderr, "VIS3 ");
816 if (flags & MASK_DEPRECATED_V8_INSNS)
817 fprintf (stderr, "DEPRECATED_V8_INSNS ");
818 if (flags & MASK_LITTLE_ENDIAN)
819 fprintf (stderr, "LITTLE_ENDIAN ");
820 if (flags & MASK_SPARCLET)
821 fprintf (stderr, "SPARCLET ");
822 if (flags & MASK_SPARCLITE)
823 fprintf (stderr, "SPARCLITE ");
824 if (flags & MASK_V8)
825 fprintf (stderr, "V8 ");
826 if (flags & MASK_V9)
827 fprintf (stderr, "V9 ");
828 }
829
830 static void
831 dump_target_flags (const char *prefix, const int flags)
832 {
833 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
834 dump_target_flag_bits (flags);
835 fprintf(stderr, "]\n");
836 }
837
838 /* Validate and override various options, and do some machine dependent
839 initialization. */
840
841 static void
842 sparc_option_override (void)
843 {
844 static struct code_model {
845 const char *const name;
846 const enum cmodel value;
847 } const cmodels[] = {
848 { "32", CM_32 },
849 { "medlow", CM_MEDLOW },
850 { "medmid", CM_MEDMID },
851 { "medany", CM_MEDANY },
852 { "embmedany", CM_EMBMEDANY },
853 { NULL, (enum cmodel) 0 }
854 };
855 const struct code_model *cmodel;
856 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
857 static struct cpu_default {
858 const int cpu;
859 const enum processor_type processor;
860 } const cpu_default[] = {
861 /* There must be one entry here for each TARGET_CPU value. */
862 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
863 { TARGET_CPU_v8, PROCESSOR_V8 },
864 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
865 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
866 { TARGET_CPU_leon, PROCESSOR_LEON },
867 { TARGET_CPU_sparclite, PROCESSOR_F930 },
868 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
869 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
870 { TARGET_CPU_v9, PROCESSOR_V9 },
871 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
872 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
873 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
874 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
875 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
876 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
877 { -1, PROCESSOR_V7 }
878 };
879 const struct cpu_default *def;
880 /* Table of values for -m{cpu,tune}=. This must match the order of
881 the PROCESSOR_* enumeration. */
882 static struct cpu_table {
883 const char *const name;
884 const int disable;
885 const int enable;
886 } const cpu_table[] = {
887 { "v7", MASK_ISA, 0 },
888 { "cypress", MASK_ISA, 0 },
889 { "v8", MASK_ISA, MASK_V8 },
890 /* TI TMS390Z55 supersparc */
891 { "supersparc", MASK_ISA, MASK_V8 },
892 { "hypersparc", MASK_ISA, MASK_V8|MASK_FPU },
893 /* LEON */
894 { "leon", MASK_ISA, MASK_V8|MASK_FPU },
895 { "sparclite", MASK_ISA, MASK_SPARCLITE },
896 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
897 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
898 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
899 { "f934", MASK_ISA, MASK_SPARCLITE|MASK_FPU },
900 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
901 { "sparclet", MASK_ISA, MASK_SPARCLET },
902 /* TEMIC sparclet */
903 { "tsc701", MASK_ISA, MASK_SPARCLET },
904 { "v9", MASK_ISA, MASK_V9 },
905 /* UltraSPARC I, II, IIi */
906 { "ultrasparc", MASK_ISA,
907 /* Although insns using %y are deprecated, it is a clear win. */
908 MASK_V9|MASK_DEPRECATED_V8_INSNS },
909 /* UltraSPARC III */
910 /* ??? Check if %y issue still holds true. */
911 { "ultrasparc3", MASK_ISA,
912 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
913 /* UltraSPARC T1 */
914 { "niagara", MASK_ISA,
915 MASK_V9|MASK_DEPRECATED_V8_INSNS },
916 /* UltraSPARC T2 */
917 { "niagara2", MASK_ISA,
918 MASK_V9|MASK_POPC|MASK_VIS2 },
919 /* UltraSPARC T3 */
920 { "niagara3", MASK_ISA,
921 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
922 /* UltraSPARC T4 */
923 { "niagara4", MASK_ISA,
924 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
925 };
926 const struct cpu_table *cpu;
927 unsigned int i;
928 int fpu;
929
930 if (sparc_debug_string != NULL)
931 {
932 const char *q;
933 char *p;
934
935 p = ASTRDUP (sparc_debug_string);
936 while ((q = strtok (p, ",")) != NULL)
937 {
938 bool invert;
939 int mask;
940
941 p = NULL;
942 if (*q == '!')
943 {
944 invert = true;
945 q++;
946 }
947 else
948 invert = false;
949
950 if (! strcmp (q, "all"))
951 mask = MASK_DEBUG_ALL;
952 else if (! strcmp (q, "options"))
953 mask = MASK_DEBUG_OPTIONS;
954 else
955 error ("unknown -mdebug-%s switch", q);
956
957 if (invert)
958 sparc_debug &= ~mask;
959 else
960 sparc_debug |= mask;
961 }
962 }
963
964 if (TARGET_DEBUG_OPTIONS)
965 {
966 dump_target_flags("Initial target_flags", target_flags);
967 dump_target_flags("target_flags_explicit", target_flags_explicit);
968 }
969
970 #ifdef SUBTARGET_OVERRIDE_OPTIONS
971 SUBTARGET_OVERRIDE_OPTIONS;
972 #endif
973
974 #ifndef SPARC_BI_ARCH
975 /* Check for unsupported architecture size. */
976 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
977 error ("%s is not supported by this configuration",
978 DEFAULT_ARCH32_P ? "-m64" : "-m32");
979 #endif
980
981 /* We force all 64bit archs to use 128 bit long double */
982 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
983 {
984 error ("-mlong-double-64 not allowed with -m64");
985 target_flags |= MASK_LONG_DOUBLE_128;
986 }
987
988 /* Code model selection. */
989 sparc_cmodel = SPARC_DEFAULT_CMODEL;
990
991 #ifdef SPARC_BI_ARCH
992 if (TARGET_ARCH32)
993 sparc_cmodel = CM_32;
994 #endif
995
996 if (sparc_cmodel_string != NULL)
997 {
998 if (TARGET_ARCH64)
999 {
1000 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1001 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1002 break;
1003 if (cmodel->name == NULL)
1004 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1005 else
1006 sparc_cmodel = cmodel->value;
1007 }
1008 else
1009 error ("-mcmodel= is not supported on 32 bit systems");
1010 }
1011
1012 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1013 for (i = 8; i < 16; i++)
1014 if (!call_used_regs [i])
1015 {
1016 error ("-fcall-saved-REG is not supported for out registers");
1017 call_used_regs [i] = 1;
1018 }
1019
1020 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
1021
1022 /* Set the default CPU. */
1023 if (!global_options_set.x_sparc_cpu_and_features)
1024 {
1025 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1026 if (def->cpu == TARGET_CPU_DEFAULT)
1027 break;
1028 gcc_assert (def->cpu != -1);
1029 sparc_cpu_and_features = def->processor;
1030 }
1031
1032 if (!global_options_set.x_sparc_cpu)
1033 sparc_cpu = sparc_cpu_and_features;
1034
1035 cpu = &cpu_table[(int) sparc_cpu_and_features];
1036
1037 if (TARGET_DEBUG_OPTIONS)
1038 {
1039 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1040 fprintf (stderr, "sparc_cpu: %s\n",
1041 cpu_table[(int) sparc_cpu].name);
1042 dump_target_flags ("cpu->disable", cpu->disable);
1043 dump_target_flags ("cpu->enable", cpu->enable);
1044 }
1045
1046 target_flags &= ~cpu->disable;
1047 target_flags |= (cpu->enable
1048 #ifndef HAVE_AS_FMAF_HPC_VIS3
1049 & ~(MASK_FMAF | MASK_VIS3)
1050 #endif
1051 );
1052
1053 /* If -mfpu or -mno-fpu was explicitly used, don't override with
1054 the processor default. */
1055 if (target_flags_explicit & MASK_FPU)
1056 target_flags = (target_flags & ~MASK_FPU) | fpu;
1057
1058 /* -mvis2 implies -mvis */
1059 if (TARGET_VIS2)
1060 target_flags |= MASK_VIS;
1061
1062 /* -mvis3 implies -mvis2 and -mvis */
1063 if (TARGET_VIS3)
1064 target_flags |= MASK_VIS2 | MASK_VIS;
1065
1066 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is disabled. */
1067 if (! TARGET_FPU)
1068 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_FMAF);
1069
1070 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1071 are available.
1072 -m64 also implies v9. */
1073 if (TARGET_VIS || TARGET_ARCH64)
1074 {
1075 target_flags |= MASK_V9;
1076 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1077 }
1078
1079 /* -mvis also implies -mv8plus on 32-bit */
1080 if (TARGET_VIS && ! TARGET_ARCH64)
1081 target_flags |= MASK_V8PLUS;
1082
1083 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1084 if (TARGET_V9 && TARGET_ARCH32)
1085 target_flags |= MASK_DEPRECATED_V8_INSNS;
1086
1087 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1088 if (! TARGET_V9 || TARGET_ARCH64)
1089 target_flags &= ~MASK_V8PLUS;
1090
1091 /* Don't use stack biasing in 32 bit mode. */
1092 if (TARGET_ARCH32)
1093 target_flags &= ~MASK_STACK_BIAS;
1094
1095 /* Supply a default value for align_functions. */
1096 if (align_functions == 0
1097 && (sparc_cpu == PROCESSOR_ULTRASPARC
1098 || sparc_cpu == PROCESSOR_ULTRASPARC3
1099 || sparc_cpu == PROCESSOR_NIAGARA
1100 || sparc_cpu == PROCESSOR_NIAGARA2
1101 || sparc_cpu == PROCESSOR_NIAGARA3
1102 || sparc_cpu == PROCESSOR_NIAGARA4))
1103 align_functions = 32;
1104
1105 /* Validate PCC_STRUCT_RETURN. */
1106 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1107 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1108
1109 /* Only use .uaxword when compiling for a 64-bit target. */
1110 if (!TARGET_ARCH64)
1111 targetm.asm_out.unaligned_op.di = NULL;
1112
1113 /* Do various machine dependent initializations. */
1114 sparc_init_modes ();
1115
1116 /* Set up function hooks. */
1117 init_machine_status = sparc_init_machine_status;
1118
1119 switch (sparc_cpu)
1120 {
1121 case PROCESSOR_V7:
1122 case PROCESSOR_CYPRESS:
1123 sparc_costs = &cypress_costs;
1124 break;
1125 case PROCESSOR_V8:
1126 case PROCESSOR_SPARCLITE:
1127 case PROCESSOR_SUPERSPARC:
1128 sparc_costs = &supersparc_costs;
1129 break;
1130 case PROCESSOR_F930:
1131 case PROCESSOR_F934:
1132 case PROCESSOR_HYPERSPARC:
1133 case PROCESSOR_SPARCLITE86X:
1134 sparc_costs = &hypersparc_costs;
1135 break;
1136 case PROCESSOR_LEON:
1137 sparc_costs = &leon_costs;
1138 break;
1139 case PROCESSOR_SPARCLET:
1140 case PROCESSOR_TSC701:
1141 sparc_costs = &sparclet_costs;
1142 break;
1143 case PROCESSOR_V9:
1144 case PROCESSOR_ULTRASPARC:
1145 sparc_costs = &ultrasparc_costs;
1146 break;
1147 case PROCESSOR_ULTRASPARC3:
1148 sparc_costs = &ultrasparc3_costs;
1149 break;
1150 case PROCESSOR_NIAGARA:
1151 sparc_costs = &niagara_costs;
1152 break;
1153 case PROCESSOR_NIAGARA2:
1154 sparc_costs = &niagara2_costs;
1155 break;
1156 case PROCESSOR_NIAGARA3:
1157 case PROCESSOR_NIAGARA4:
1158 sparc_costs = &niagara3_costs;
1159 break;
1160 case PROCESSOR_NATIVE:
1161 gcc_unreachable ();
1162 };
1163
1164 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1165 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1166 target_flags |= MASK_LONG_DOUBLE_128;
1167 #endif
1168
1169 if (TARGET_DEBUG_OPTIONS)
1170 dump_target_flags ("Final target_flags", target_flags);
1171
1172 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
1173 ((sparc_cpu == PROCESSOR_ULTRASPARC
1174 || sparc_cpu == PROCESSOR_NIAGARA
1175 || sparc_cpu == PROCESSOR_NIAGARA2
1176 || sparc_cpu == PROCESSOR_NIAGARA3
1177 || sparc_cpu == PROCESSOR_NIAGARA4)
1178 ? 2
1179 : (sparc_cpu == PROCESSOR_ULTRASPARC3
1180 ? 8 : 3)),
1181 global_options.x_param_values,
1182 global_options_set.x_param_values);
1183 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
1184 ((sparc_cpu == PROCESSOR_ULTRASPARC
1185 || sparc_cpu == PROCESSOR_ULTRASPARC3
1186 || sparc_cpu == PROCESSOR_NIAGARA
1187 || sparc_cpu == PROCESSOR_NIAGARA2
1188 || sparc_cpu == PROCESSOR_NIAGARA3
1189 || sparc_cpu == PROCESSOR_NIAGARA4)
1190 ? 64 : 32),
1191 global_options.x_param_values,
1192 global_options_set.x_param_values);
1193
1194 /* Disable save slot sharing for call-clobbered registers by default.
1195 The IRA sharing algorithm works on single registers only and this
1196 pessimizes for double floating-point registers. */
1197 if (!global_options_set.x_flag_ira_share_save_slots)
1198 flag_ira_share_save_slots = 0;
1199 }
1200 \f
1201 /* Miscellaneous utilities. */
1202
1203 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1204 or branch on register contents instructions. */
1205
1206 int
1207 v9_regcmp_p (enum rtx_code code)
1208 {
1209 return (code == EQ || code == NE || code == GE || code == LT
1210 || code == LE || code == GT);
1211 }
1212
1213 /* Nonzero if OP is a floating point constant which can
1214 be loaded into an integer register using a single
1215 sethi instruction. */
1216
1217 int
1218 fp_sethi_p (rtx op)
1219 {
1220 if (GET_CODE (op) == CONST_DOUBLE)
1221 {
1222 REAL_VALUE_TYPE r;
1223 long i;
1224
1225 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1226 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1227 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1228 }
1229
1230 return 0;
1231 }
1232
1233 /* Nonzero if OP is a floating point constant which can
1234 be loaded into an integer register using a single
1235 mov instruction. */
1236
1237 int
1238 fp_mov_p (rtx op)
1239 {
1240 if (GET_CODE (op) == CONST_DOUBLE)
1241 {
1242 REAL_VALUE_TYPE r;
1243 long i;
1244
1245 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1246 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1247 return SPARC_SIMM13_P (i);
1248 }
1249
1250 return 0;
1251 }
1252
1253 /* Nonzero if OP is a floating point constant which can
1254 be loaded into an integer register using a high/losum
1255 instruction sequence. */
1256
1257 int
1258 fp_high_losum_p (rtx op)
1259 {
1260 /* The constraints calling this should only be in
1261 SFmode move insns, so any constant which cannot
1262 be moved using a single insn will do. */
1263 if (GET_CODE (op) == CONST_DOUBLE)
1264 {
1265 REAL_VALUE_TYPE r;
1266 long i;
1267
1268 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1269 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1270 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1271 }
1272
1273 return 0;
1274 }
1275
1276 /* Return true if the address of LABEL can be loaded by means of the
1277 mov{si,di}_pic_label_ref patterns in PIC mode. */
1278
1279 static bool
1280 can_use_mov_pic_label_ref (rtx label)
1281 {
1282 /* VxWorks does not impose a fixed gap between segments; the run-time
1283 gap can be different from the object-file gap. We therefore can't
1284 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1285 are absolutely sure that X is in the same segment as the GOT.
1286 Unfortunately, the flexibility of linker scripts means that we
1287 can't be sure of that in general, so assume that GOT-relative
1288 accesses are never valid on VxWorks. */
1289 if (TARGET_VXWORKS_RTP)
1290 return false;
1291
1292 /* Similarly, if the label is non-local, it might end up being placed
1293 in a different section than the current one; now mov_pic_label_ref
1294 requires the label and the code to be in the same section. */
1295 if (LABEL_REF_NONLOCAL_P (label))
1296 return false;
1297
1298 /* Finally, if we are reordering basic blocks and partition into hot
1299 and cold sections, this might happen for any label. */
1300 if (flag_reorder_blocks_and_partition)
1301 return false;
1302
1303 return true;
1304 }
1305
1306 /* Expand a move instruction. Return true if all work is done. */
1307
1308 bool
1309 sparc_expand_move (enum machine_mode mode, rtx *operands)
1310 {
1311 /* Handle sets of MEM first. */
1312 if (GET_CODE (operands[0]) == MEM)
1313 {
1314 /* 0 is a register (or a pair of registers) on SPARC. */
1315 if (register_or_zero_operand (operands[1], mode))
1316 return false;
1317
1318 if (!reload_in_progress)
1319 {
1320 operands[0] = validize_mem (operands[0]);
1321 operands[1] = force_reg (mode, operands[1]);
1322 }
1323 }
1324
1325 /* Fixup TLS cases. */
1326 if (TARGET_HAVE_TLS
1327 && CONSTANT_P (operands[1])
1328 && sparc_tls_referenced_p (operands [1]))
1329 {
1330 operands[1] = sparc_legitimize_tls_address (operands[1]);
1331 return false;
1332 }
1333
1334 /* Fixup PIC cases. */
1335 if (flag_pic && CONSTANT_P (operands[1]))
1336 {
1337 if (pic_address_needs_scratch (operands[1]))
1338 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1339
1340 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1341 if (GET_CODE (operands[1]) == LABEL_REF
1342 && can_use_mov_pic_label_ref (operands[1]))
1343 {
1344 if (mode == SImode)
1345 {
1346 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1347 return true;
1348 }
1349
1350 if (mode == DImode)
1351 {
1352 gcc_assert (TARGET_ARCH64);
1353 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1354 return true;
1355 }
1356 }
1357
1358 if (symbolic_operand (operands[1], mode))
1359 {
1360 operands[1]
1361 = sparc_legitimize_pic_address (operands[1],
1362 reload_in_progress
1363 ? operands[0] : NULL_RTX);
1364 return false;
1365 }
1366 }
1367
1368 /* If we are trying to toss an integer constant into FP registers,
1369 or loading a FP or vector constant, force it into memory. */
1370 if (CONSTANT_P (operands[1])
1371 && REG_P (operands[0])
1372 && (SPARC_FP_REG_P (REGNO (operands[0]))
1373 || SCALAR_FLOAT_MODE_P (mode)
1374 || VECTOR_MODE_P (mode)))
1375 {
1376 /* emit_group_store will send such bogosity to us when it is
1377 not storing directly into memory. So fix this up to avoid
1378 crashes in output_constant_pool. */
1379 if (operands [1] == const0_rtx)
1380 operands[1] = CONST0_RTX (mode);
1381
1382 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1383 always other regs. */
1384 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1385 && (const_zero_operand (operands[1], mode)
1386 || const_all_ones_operand (operands[1], mode)))
1387 return false;
1388
1389 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1390 /* We are able to build any SF constant in integer registers
1391 with at most 2 instructions. */
1392 && (mode == SFmode
1393 /* And any DF constant in integer registers. */
1394 || (mode == DFmode
1395 && ! can_create_pseudo_p ())))
1396 return false;
1397
1398 operands[1] = force_const_mem (mode, operands[1]);
1399 if (!reload_in_progress)
1400 operands[1] = validize_mem (operands[1]);
1401 return false;
1402 }
1403
1404 /* Accept non-constants and valid constants unmodified. */
1405 if (!CONSTANT_P (operands[1])
1406 || GET_CODE (operands[1]) == HIGH
1407 || input_operand (operands[1], mode))
1408 return false;
1409
1410 switch (mode)
1411 {
1412 case QImode:
1413 /* All QImode constants require only one insn, so proceed. */
1414 break;
1415
1416 case HImode:
1417 case SImode:
1418 sparc_emit_set_const32 (operands[0], operands[1]);
1419 return true;
1420
1421 case DImode:
1422 /* input_operand should have filtered out 32-bit mode. */
1423 sparc_emit_set_const64 (operands[0], operands[1]);
1424 return true;
1425
1426 default:
1427 gcc_unreachable ();
1428 }
1429
1430 return false;
1431 }
1432
1433 /* Load OP1, a 32-bit constant, into OP0, a register.
1434 We know it can't be done in one insn when we get
1435 here, the move expander guarantees this. */
1436
1437 static void
1438 sparc_emit_set_const32 (rtx op0, rtx op1)
1439 {
1440 enum machine_mode mode = GET_MODE (op0);
1441 rtx temp = op0;
1442
1443 if (can_create_pseudo_p ())
1444 temp = gen_reg_rtx (mode);
1445
1446 if (GET_CODE (op1) == CONST_INT)
1447 {
1448 gcc_assert (!small_int_operand (op1, mode)
1449 && !const_high_operand (op1, mode));
1450
1451 /* Emit them as real moves instead of a HIGH/LO_SUM,
1452 this way CSE can see everything and reuse intermediate
1453 values if it wants. */
1454 emit_insn (gen_rtx_SET (VOIDmode, temp,
1455 GEN_INT (INTVAL (op1)
1456 & ~(HOST_WIDE_INT)0x3ff)));
1457
1458 emit_insn (gen_rtx_SET (VOIDmode,
1459 op0,
1460 gen_rtx_IOR (mode, temp,
1461 GEN_INT (INTVAL (op1) & 0x3ff))));
1462 }
1463 else
1464 {
1465 /* A symbol, emit in the traditional way. */
1466 emit_insn (gen_rtx_SET (VOIDmode, temp,
1467 gen_rtx_HIGH (mode, op1)));
1468 emit_insn (gen_rtx_SET (VOIDmode,
1469 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1470 }
1471 }
1472
1473 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1474 If TEMP is nonzero, we are forbidden to use any other scratch
1475 registers. Otherwise, we are allowed to generate them as needed.
1476
1477 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1478 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1479
1480 void
1481 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1482 {
1483 rtx temp1, temp2, temp3, temp4, temp5;
1484 rtx ti_temp = 0;
1485
1486 if (temp && GET_MODE (temp) == TImode)
1487 {
1488 ti_temp = temp;
1489 temp = gen_rtx_REG (DImode, REGNO (temp));
1490 }
1491
1492 /* SPARC-V9 code-model support. */
1493 switch (sparc_cmodel)
1494 {
1495 case CM_MEDLOW:
1496 /* The range spanned by all instructions in the object is less
1497 than 2^31 bytes (2GB) and the distance from any instruction
1498 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1499 than 2^31 bytes (2GB).
1500
1501 The executable must be in the low 4TB of the virtual address
1502 space.
1503
1504 sethi %hi(symbol), %temp1
1505 or %temp1, %lo(symbol), %reg */
1506 if (temp)
1507 temp1 = temp; /* op0 is allowed. */
1508 else
1509 temp1 = gen_reg_rtx (DImode);
1510
1511 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1512 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1513 break;
1514
1515 case CM_MEDMID:
1516 /* The range spanned by all instructions in the object is less
1517 than 2^31 bytes (2GB) and the distance from any instruction
1518 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1519 than 2^31 bytes (2GB).
1520
1521 The executable must be in the low 16TB of the virtual address
1522 space.
1523
1524 sethi %h44(symbol), %temp1
1525 or %temp1, %m44(symbol), %temp2
1526 sllx %temp2, 12, %temp3
1527 or %temp3, %l44(symbol), %reg */
1528 if (temp)
1529 {
1530 temp1 = op0;
1531 temp2 = op0;
1532 temp3 = temp; /* op0 is allowed. */
1533 }
1534 else
1535 {
1536 temp1 = gen_reg_rtx (DImode);
1537 temp2 = gen_reg_rtx (DImode);
1538 temp3 = gen_reg_rtx (DImode);
1539 }
1540
1541 emit_insn (gen_seth44 (temp1, op1));
1542 emit_insn (gen_setm44 (temp2, temp1, op1));
1543 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1544 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1545 emit_insn (gen_setl44 (op0, temp3, op1));
1546 break;
1547
1548 case CM_MEDANY:
1549 /* The range spanned by all instructions in the object is less
1550 than 2^31 bytes (2GB) and the distance from any instruction
1551 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1552 than 2^31 bytes (2GB).
1553
1554 The executable can be placed anywhere in the virtual address
1555 space.
1556
1557 sethi %hh(symbol), %temp1
1558 sethi %lm(symbol), %temp2
1559 or %temp1, %hm(symbol), %temp3
1560 sllx %temp3, 32, %temp4
1561 or %temp4, %temp2, %temp5
1562 or %temp5, %lo(symbol), %reg */
1563 if (temp)
1564 {
1565 /* It is possible that one of the registers we got for operands[2]
1566 might coincide with that of operands[0] (which is why we made
1567 it TImode). Pick the other one to use as our scratch. */
1568 if (rtx_equal_p (temp, op0))
1569 {
1570 gcc_assert (ti_temp);
1571 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1572 }
1573 temp1 = op0;
1574 temp2 = temp; /* op0 is _not_ allowed, see above. */
1575 temp3 = op0;
1576 temp4 = op0;
1577 temp5 = op0;
1578 }
1579 else
1580 {
1581 temp1 = gen_reg_rtx (DImode);
1582 temp2 = gen_reg_rtx (DImode);
1583 temp3 = gen_reg_rtx (DImode);
1584 temp4 = gen_reg_rtx (DImode);
1585 temp5 = gen_reg_rtx (DImode);
1586 }
1587
1588 emit_insn (gen_sethh (temp1, op1));
1589 emit_insn (gen_setlm (temp2, op1));
1590 emit_insn (gen_sethm (temp3, temp1, op1));
1591 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1592 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1593 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1594 gen_rtx_PLUS (DImode, temp4, temp2)));
1595 emit_insn (gen_setlo (op0, temp5, op1));
1596 break;
1597
1598 case CM_EMBMEDANY:
1599 /* Old old old backwards compatibility kruft here.
1600 Essentially it is MEDLOW with a fixed 64-bit
1601 virtual base added to all data segment addresses.
1602 Text-segment stuff is computed like MEDANY, we can't
1603 reuse the code above because the relocation knobs
1604 look different.
1605
1606 Data segment: sethi %hi(symbol), %temp1
1607 add %temp1, EMBMEDANY_BASE_REG, %temp2
1608 or %temp2, %lo(symbol), %reg */
1609 if (data_segment_operand (op1, GET_MODE (op1)))
1610 {
1611 if (temp)
1612 {
1613 temp1 = temp; /* op0 is allowed. */
1614 temp2 = op0;
1615 }
1616 else
1617 {
1618 temp1 = gen_reg_rtx (DImode);
1619 temp2 = gen_reg_rtx (DImode);
1620 }
1621
1622 emit_insn (gen_embmedany_sethi (temp1, op1));
1623 emit_insn (gen_embmedany_brsum (temp2, temp1));
1624 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1625 }
1626
1627 /* Text segment: sethi %uhi(symbol), %temp1
1628 sethi %hi(symbol), %temp2
1629 or %temp1, %ulo(symbol), %temp3
1630 sllx %temp3, 32, %temp4
1631 or %temp4, %temp2, %temp5
1632 or %temp5, %lo(symbol), %reg */
1633 else
1634 {
1635 if (temp)
1636 {
1637 /* It is possible that one of the registers we got for operands[2]
1638 might coincide with that of operands[0] (which is why we made
1639 it TImode). Pick the other one to use as our scratch. */
1640 if (rtx_equal_p (temp, op0))
1641 {
1642 gcc_assert (ti_temp);
1643 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1644 }
1645 temp1 = op0;
1646 temp2 = temp; /* op0 is _not_ allowed, see above. */
1647 temp3 = op0;
1648 temp4 = op0;
1649 temp5 = op0;
1650 }
1651 else
1652 {
1653 temp1 = gen_reg_rtx (DImode);
1654 temp2 = gen_reg_rtx (DImode);
1655 temp3 = gen_reg_rtx (DImode);
1656 temp4 = gen_reg_rtx (DImode);
1657 temp5 = gen_reg_rtx (DImode);
1658 }
1659
1660 emit_insn (gen_embmedany_textuhi (temp1, op1));
1661 emit_insn (gen_embmedany_texthi (temp2, op1));
1662 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1663 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1664 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1665 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1666 gen_rtx_PLUS (DImode, temp4, temp2)));
1667 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1668 }
1669 break;
1670
1671 default:
1672 gcc_unreachable ();
1673 }
1674 }
1675
1676 #if HOST_BITS_PER_WIDE_INT == 32
1677 static void
1678 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1679 {
1680 gcc_unreachable ();
1681 }
1682 #else
1683 /* These avoid problems when cross compiling. If we do not
1684 go through all this hair then the optimizer will see
1685 invalid REG_EQUAL notes or in some cases none at all. */
1686 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1687 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1688 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1689 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1690
1691 /* The optimizer is not to assume anything about exactly
1692 which bits are set for a HIGH, they are unspecified.
1693 Unfortunately this leads to many missed optimizations
1694 during CSE. We mask out the non-HIGH bits, and matches
1695 a plain movdi, to alleviate this problem. */
1696 static rtx
1697 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1698 {
1699 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1700 }
1701
1702 static rtx
1703 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1704 {
1705 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1706 }
1707
1708 static rtx
1709 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1710 {
1711 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1712 }
1713
1714 static rtx
1715 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1716 {
1717 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1718 }
1719
1720 /* Worker routines for 64-bit constant formation on arch64.
1721 One of the key things to be doing in these emissions is
1722 to create as many temp REGs as possible. This makes it
1723 possible for half-built constants to be used later when
1724 such values are similar to something required later on.
1725 Without doing this, the optimizer cannot see such
1726 opportunities. */
1727
1728 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1729 unsigned HOST_WIDE_INT, int);
1730
1731 static void
1732 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1733 unsigned HOST_WIDE_INT low_bits, int is_neg)
1734 {
1735 unsigned HOST_WIDE_INT high_bits;
1736
1737 if (is_neg)
1738 high_bits = (~low_bits) & 0xffffffff;
1739 else
1740 high_bits = low_bits;
1741
1742 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1743 if (!is_neg)
1744 {
1745 emit_insn (gen_rtx_SET (VOIDmode, op0,
1746 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1747 }
1748 else
1749 {
1750 /* If we are XOR'ing with -1, then we should emit a one's complement
1751 instead. This way the combiner will notice logical operations
1752 such as ANDN later on and substitute. */
1753 if ((low_bits & 0x3ff) == 0x3ff)
1754 {
1755 emit_insn (gen_rtx_SET (VOIDmode, op0,
1756 gen_rtx_NOT (DImode, temp)));
1757 }
1758 else
1759 {
1760 emit_insn (gen_rtx_SET (VOIDmode, op0,
1761 gen_safe_XOR64 (temp,
1762 (-(HOST_WIDE_INT)0x400
1763 | (low_bits & 0x3ff)))));
1764 }
1765 }
1766 }
1767
1768 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1769 unsigned HOST_WIDE_INT, int);
1770
1771 static void
1772 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1773 unsigned HOST_WIDE_INT high_bits,
1774 unsigned HOST_WIDE_INT low_immediate,
1775 int shift_count)
1776 {
1777 rtx temp2 = op0;
1778
1779 if ((high_bits & 0xfffffc00) != 0)
1780 {
1781 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1782 if ((high_bits & ~0xfffffc00) != 0)
1783 emit_insn (gen_rtx_SET (VOIDmode, op0,
1784 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1785 else
1786 temp2 = temp;
1787 }
1788 else
1789 {
1790 emit_insn (gen_safe_SET64 (temp, high_bits));
1791 temp2 = temp;
1792 }
1793
1794 /* Now shift it up into place. */
1795 emit_insn (gen_rtx_SET (VOIDmode, op0,
1796 gen_rtx_ASHIFT (DImode, temp2,
1797 GEN_INT (shift_count))));
1798
1799 /* If there is a low immediate part piece, finish up by
1800 putting that in as well. */
1801 if (low_immediate != 0)
1802 emit_insn (gen_rtx_SET (VOIDmode, op0,
1803 gen_safe_OR64 (op0, low_immediate)));
1804 }
1805
1806 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1807 unsigned HOST_WIDE_INT);
1808
1809 /* Full 64-bit constant decomposition. Even though this is the
1810 'worst' case, we still optimize a few things away. */
1811 static void
1812 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1813 unsigned HOST_WIDE_INT high_bits,
1814 unsigned HOST_WIDE_INT low_bits)
1815 {
1816 rtx sub_temp = op0;
1817
1818 if (can_create_pseudo_p ())
1819 sub_temp = gen_reg_rtx (DImode);
1820
1821 if ((high_bits & 0xfffffc00) != 0)
1822 {
1823 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1824 if ((high_bits & ~0xfffffc00) != 0)
1825 emit_insn (gen_rtx_SET (VOIDmode,
1826 sub_temp,
1827 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1828 else
1829 sub_temp = temp;
1830 }
1831 else
1832 {
1833 emit_insn (gen_safe_SET64 (temp, high_bits));
1834 sub_temp = temp;
1835 }
1836
1837 if (can_create_pseudo_p ())
1838 {
1839 rtx temp2 = gen_reg_rtx (DImode);
1840 rtx temp3 = gen_reg_rtx (DImode);
1841 rtx temp4 = gen_reg_rtx (DImode);
1842
1843 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1844 gen_rtx_ASHIFT (DImode, sub_temp,
1845 GEN_INT (32))));
1846
1847 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1848 if ((low_bits & ~0xfffffc00) != 0)
1849 {
1850 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1851 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1852 emit_insn (gen_rtx_SET (VOIDmode, op0,
1853 gen_rtx_PLUS (DImode, temp4, temp3)));
1854 }
1855 else
1856 {
1857 emit_insn (gen_rtx_SET (VOIDmode, op0,
1858 gen_rtx_PLUS (DImode, temp4, temp2)));
1859 }
1860 }
1861 else
1862 {
1863 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1864 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1865 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1866 int to_shift = 12;
1867
1868 /* We are in the middle of reload, so this is really
1869 painful. However we do still make an attempt to
1870 avoid emitting truly stupid code. */
1871 if (low1 != const0_rtx)
1872 {
1873 emit_insn (gen_rtx_SET (VOIDmode, op0,
1874 gen_rtx_ASHIFT (DImode, sub_temp,
1875 GEN_INT (to_shift))));
1876 emit_insn (gen_rtx_SET (VOIDmode, op0,
1877 gen_rtx_IOR (DImode, op0, low1)));
1878 sub_temp = op0;
1879 to_shift = 12;
1880 }
1881 else
1882 {
1883 to_shift += 12;
1884 }
1885 if (low2 != const0_rtx)
1886 {
1887 emit_insn (gen_rtx_SET (VOIDmode, op0,
1888 gen_rtx_ASHIFT (DImode, sub_temp,
1889 GEN_INT (to_shift))));
1890 emit_insn (gen_rtx_SET (VOIDmode, op0,
1891 gen_rtx_IOR (DImode, op0, low2)));
1892 sub_temp = op0;
1893 to_shift = 8;
1894 }
1895 else
1896 {
1897 to_shift += 8;
1898 }
1899 emit_insn (gen_rtx_SET (VOIDmode, op0,
1900 gen_rtx_ASHIFT (DImode, sub_temp,
1901 GEN_INT (to_shift))));
1902 if (low3 != const0_rtx)
1903 emit_insn (gen_rtx_SET (VOIDmode, op0,
1904 gen_rtx_IOR (DImode, op0, low3)));
1905 /* phew... */
1906 }
1907 }
1908
1909 /* Analyze a 64-bit constant for certain properties. */
1910 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1911 unsigned HOST_WIDE_INT,
1912 int *, int *, int *);
1913
1914 static void
1915 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1916 unsigned HOST_WIDE_INT low_bits,
1917 int *hbsp, int *lbsp, int *abbasp)
1918 {
1919 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1920 int i;
1921
1922 lowest_bit_set = highest_bit_set = -1;
1923 i = 0;
1924 do
1925 {
1926 if ((lowest_bit_set == -1)
1927 && ((low_bits >> i) & 1))
1928 lowest_bit_set = i;
1929 if ((highest_bit_set == -1)
1930 && ((high_bits >> (32 - i - 1)) & 1))
1931 highest_bit_set = (64 - i - 1);
1932 }
1933 while (++i < 32
1934 && ((highest_bit_set == -1)
1935 || (lowest_bit_set == -1)));
1936 if (i == 32)
1937 {
1938 i = 0;
1939 do
1940 {
1941 if ((lowest_bit_set == -1)
1942 && ((high_bits >> i) & 1))
1943 lowest_bit_set = i + 32;
1944 if ((highest_bit_set == -1)
1945 && ((low_bits >> (32 - i - 1)) & 1))
1946 highest_bit_set = 32 - i - 1;
1947 }
1948 while (++i < 32
1949 && ((highest_bit_set == -1)
1950 || (lowest_bit_set == -1)));
1951 }
1952 /* If there are no bits set this should have gone out
1953 as one instruction! */
1954 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1955 all_bits_between_are_set = 1;
1956 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1957 {
1958 if (i < 32)
1959 {
1960 if ((low_bits & (1 << i)) != 0)
1961 continue;
1962 }
1963 else
1964 {
1965 if ((high_bits & (1 << (i - 32))) != 0)
1966 continue;
1967 }
1968 all_bits_between_are_set = 0;
1969 break;
1970 }
1971 *hbsp = highest_bit_set;
1972 *lbsp = lowest_bit_set;
1973 *abbasp = all_bits_between_are_set;
1974 }
1975
1976 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1977
1978 static int
1979 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1980 unsigned HOST_WIDE_INT low_bits)
1981 {
1982 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1983
1984 if (high_bits == 0
1985 || high_bits == 0xffffffff)
1986 return 1;
1987
1988 analyze_64bit_constant (high_bits, low_bits,
1989 &highest_bit_set, &lowest_bit_set,
1990 &all_bits_between_are_set);
1991
1992 if ((highest_bit_set == 63
1993 || lowest_bit_set == 0)
1994 && all_bits_between_are_set != 0)
1995 return 1;
1996
1997 if ((highest_bit_set - lowest_bit_set) < 21)
1998 return 1;
1999
2000 return 0;
2001 }
2002
2003 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2004 unsigned HOST_WIDE_INT,
2005 int, int);
2006
2007 static unsigned HOST_WIDE_INT
2008 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2009 unsigned HOST_WIDE_INT low_bits,
2010 int lowest_bit_set, int shift)
2011 {
2012 HOST_WIDE_INT hi, lo;
2013
2014 if (lowest_bit_set < 32)
2015 {
2016 lo = (low_bits >> lowest_bit_set) << shift;
2017 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2018 }
2019 else
2020 {
2021 lo = 0;
2022 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2023 }
2024 gcc_assert (! (hi & lo));
2025 return (hi | lo);
2026 }
2027
2028 /* Here we are sure to be arch64 and this is an integer constant
2029 being loaded into a register. Emit the most efficient
2030 insn sequence possible. Detection of all the 1-insn cases
2031 has been done already. */
2032 static void
2033 sparc_emit_set_const64 (rtx op0, rtx op1)
2034 {
2035 unsigned HOST_WIDE_INT high_bits, low_bits;
2036 int lowest_bit_set, highest_bit_set;
2037 int all_bits_between_are_set;
2038 rtx temp = 0;
2039
2040 /* Sanity check that we know what we are working with. */
2041 gcc_assert (TARGET_ARCH64
2042 && (GET_CODE (op0) == SUBREG
2043 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2044
2045 if (! can_create_pseudo_p ())
2046 temp = op0;
2047
2048 if (GET_CODE (op1) != CONST_INT)
2049 {
2050 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2051 return;
2052 }
2053
2054 if (! temp)
2055 temp = gen_reg_rtx (DImode);
2056
2057 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2058 low_bits = (INTVAL (op1) & 0xffffffff);
2059
2060 /* low_bits bits 0 --> 31
2061 high_bits bits 32 --> 63 */
2062
2063 analyze_64bit_constant (high_bits, low_bits,
2064 &highest_bit_set, &lowest_bit_set,
2065 &all_bits_between_are_set);
2066
2067 /* First try for a 2-insn sequence. */
2068
2069 /* These situations are preferred because the optimizer can
2070 * do more things with them:
2071 * 1) mov -1, %reg
2072 * sllx %reg, shift, %reg
2073 * 2) mov -1, %reg
2074 * srlx %reg, shift, %reg
2075 * 3) mov some_small_const, %reg
2076 * sllx %reg, shift, %reg
2077 */
2078 if (((highest_bit_set == 63
2079 || lowest_bit_set == 0)
2080 && all_bits_between_are_set != 0)
2081 || ((highest_bit_set - lowest_bit_set) < 12))
2082 {
2083 HOST_WIDE_INT the_const = -1;
2084 int shift = lowest_bit_set;
2085
2086 if ((highest_bit_set != 63
2087 && lowest_bit_set != 0)
2088 || all_bits_between_are_set == 0)
2089 {
2090 the_const =
2091 create_simple_focus_bits (high_bits, low_bits,
2092 lowest_bit_set, 0);
2093 }
2094 else if (lowest_bit_set == 0)
2095 shift = -(63 - highest_bit_set);
2096
2097 gcc_assert (SPARC_SIMM13_P (the_const));
2098 gcc_assert (shift != 0);
2099
2100 emit_insn (gen_safe_SET64 (temp, the_const));
2101 if (shift > 0)
2102 emit_insn (gen_rtx_SET (VOIDmode,
2103 op0,
2104 gen_rtx_ASHIFT (DImode,
2105 temp,
2106 GEN_INT (shift))));
2107 else if (shift < 0)
2108 emit_insn (gen_rtx_SET (VOIDmode,
2109 op0,
2110 gen_rtx_LSHIFTRT (DImode,
2111 temp,
2112 GEN_INT (-shift))));
2113 return;
2114 }
2115
2116 /* Now a range of 22 or less bits set somewhere.
2117 * 1) sethi %hi(focus_bits), %reg
2118 * sllx %reg, shift, %reg
2119 * 2) sethi %hi(focus_bits), %reg
2120 * srlx %reg, shift, %reg
2121 */
2122 if ((highest_bit_set - lowest_bit_set) < 21)
2123 {
2124 unsigned HOST_WIDE_INT focus_bits =
2125 create_simple_focus_bits (high_bits, low_bits,
2126 lowest_bit_set, 10);
2127
2128 gcc_assert (SPARC_SETHI_P (focus_bits));
2129 gcc_assert (lowest_bit_set != 10);
2130
2131 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
2132
2133 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2134 if (lowest_bit_set < 10)
2135 emit_insn (gen_rtx_SET (VOIDmode,
2136 op0,
2137 gen_rtx_LSHIFTRT (DImode, temp,
2138 GEN_INT (10 - lowest_bit_set))));
2139 else if (lowest_bit_set > 10)
2140 emit_insn (gen_rtx_SET (VOIDmode,
2141 op0,
2142 gen_rtx_ASHIFT (DImode, temp,
2143 GEN_INT (lowest_bit_set - 10))));
2144 return;
2145 }
2146
2147 /* 1) sethi %hi(low_bits), %reg
2148 * or %reg, %lo(low_bits), %reg
2149 * 2) sethi %hi(~low_bits), %reg
2150 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2151 */
2152 if (high_bits == 0
2153 || high_bits == 0xffffffff)
2154 {
2155 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2156 (high_bits == 0xffffffff));
2157 return;
2158 }
2159
2160 /* Now, try 3-insn sequences. */
2161
2162 /* 1) sethi %hi(high_bits), %reg
2163 * or %reg, %lo(high_bits), %reg
2164 * sllx %reg, 32, %reg
2165 */
2166 if (low_bits == 0)
2167 {
2168 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2169 return;
2170 }
2171
2172 /* We may be able to do something quick
2173 when the constant is negated, so try that. */
2174 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2175 (~low_bits) & 0xfffffc00))
2176 {
2177 /* NOTE: The trailing bits get XOR'd so we need the
2178 non-negated bits, not the negated ones. */
2179 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2180
2181 if ((((~high_bits) & 0xffffffff) == 0
2182 && ((~low_bits) & 0x80000000) == 0)
2183 || (((~high_bits) & 0xffffffff) == 0xffffffff
2184 && ((~low_bits) & 0x80000000) != 0))
2185 {
2186 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
2187
2188 if ((SPARC_SETHI_P (fast_int)
2189 && (~high_bits & 0xffffffff) == 0)
2190 || SPARC_SIMM13_P (fast_int))
2191 emit_insn (gen_safe_SET64 (temp, fast_int));
2192 else
2193 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2194 }
2195 else
2196 {
2197 rtx negated_const;
2198 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2199 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2200 sparc_emit_set_const64 (temp, negated_const);
2201 }
2202
2203 /* If we are XOR'ing with -1, then we should emit a one's complement
2204 instead. This way the combiner will notice logical operations
2205 such as ANDN later on and substitute. */
2206 if (trailing_bits == 0x3ff)
2207 {
2208 emit_insn (gen_rtx_SET (VOIDmode, op0,
2209 gen_rtx_NOT (DImode, temp)));
2210 }
2211 else
2212 {
2213 emit_insn (gen_rtx_SET (VOIDmode,
2214 op0,
2215 gen_safe_XOR64 (temp,
2216 (-0x400 | trailing_bits))));
2217 }
2218 return;
2219 }
2220
2221 /* 1) sethi %hi(xxx), %reg
2222 * or %reg, %lo(xxx), %reg
2223 * sllx %reg, yyy, %reg
2224 *
2225 * ??? This is just a generalized version of the low_bits==0
2226 * thing above, FIXME...
2227 */
2228 if ((highest_bit_set - lowest_bit_set) < 32)
2229 {
2230 unsigned HOST_WIDE_INT focus_bits =
2231 create_simple_focus_bits (high_bits, low_bits,
2232 lowest_bit_set, 0);
2233
2234 /* We can't get here in this state. */
2235 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2236
2237 /* So what we know is that the set bits straddle the
2238 middle of the 64-bit word. */
2239 sparc_emit_set_const64_quick2 (op0, temp,
2240 focus_bits, 0,
2241 lowest_bit_set);
2242 return;
2243 }
2244
2245 /* 1) sethi %hi(high_bits), %reg
2246 * or %reg, %lo(high_bits), %reg
2247 * sllx %reg, 32, %reg
2248 * or %reg, low_bits, %reg
2249 */
2250 if (SPARC_SIMM13_P(low_bits)
2251 && ((int)low_bits > 0))
2252 {
2253 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2254 return;
2255 }
2256
2257 /* The easiest way when all else fails, is full decomposition. */
2258 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2259 }
2260 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2261
2262 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2263 return the mode to be used for the comparison. For floating-point,
2264 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2265 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2266 processing is needed. */
2267
2268 enum machine_mode
2269 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2270 {
2271 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2272 {
2273 switch (op)
2274 {
2275 case EQ:
2276 case NE:
2277 case UNORDERED:
2278 case ORDERED:
2279 case UNLT:
2280 case UNLE:
2281 case UNGT:
2282 case UNGE:
2283 case UNEQ:
2284 case LTGT:
2285 return CCFPmode;
2286
2287 case LT:
2288 case LE:
2289 case GT:
2290 case GE:
2291 return CCFPEmode;
2292
2293 default:
2294 gcc_unreachable ();
2295 }
2296 }
2297 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2298 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2299 {
2300 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2301 return CCX_NOOVmode;
2302 else
2303 return CC_NOOVmode;
2304 }
2305 else
2306 {
2307 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2308 return CCXmode;
2309 else
2310 return CCmode;
2311 }
2312 }
2313
2314 /* Emit the compare insn and return the CC reg for a CODE comparison
2315 with operands X and Y. */
2316
2317 static rtx
2318 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2319 {
2320 enum machine_mode mode;
2321 rtx cc_reg;
2322
2323 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2324 return x;
2325
2326 mode = SELECT_CC_MODE (code, x, y);
2327
2328 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2329 fcc regs (cse can't tell they're really call clobbered regs and will
2330 remove a duplicate comparison even if there is an intervening function
2331 call - it will then try to reload the cc reg via an int reg which is why
2332 we need the movcc patterns). It is possible to provide the movcc
2333 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2334 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2335 to tell cse that CCFPE mode registers (even pseudos) are call
2336 clobbered. */
2337
2338 /* ??? This is an experiment. Rather than making changes to cse which may
2339 or may not be easy/clean, we do our own cse. This is possible because
2340 we will generate hard registers. Cse knows they're call clobbered (it
2341 doesn't know the same thing about pseudos). If we guess wrong, no big
2342 deal, but if we win, great! */
2343
2344 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2345 #if 1 /* experiment */
2346 {
2347 int reg;
2348 /* We cycle through the registers to ensure they're all exercised. */
2349 static int next_fcc_reg = 0;
2350 /* Previous x,y for each fcc reg. */
2351 static rtx prev_args[4][2];
2352
2353 /* Scan prev_args for x,y. */
2354 for (reg = 0; reg < 4; reg++)
2355 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2356 break;
2357 if (reg == 4)
2358 {
2359 reg = next_fcc_reg;
2360 prev_args[reg][0] = x;
2361 prev_args[reg][1] = y;
2362 next_fcc_reg = (next_fcc_reg + 1) & 3;
2363 }
2364 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2365 }
2366 #else
2367 cc_reg = gen_reg_rtx (mode);
2368 #endif /* ! experiment */
2369 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2370 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2371 else
2372 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2373
2374 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2375 will only result in an unrecognizable insn so no point in asserting. */
2376 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2377
2378 return cc_reg;
2379 }
2380
2381
2382 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2383
2384 rtx
2385 gen_compare_reg (rtx cmp)
2386 {
2387 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2388 }
2389
2390 /* This function is used for v9 only.
2391 DEST is the target of the Scc insn.
2392 CODE is the code for an Scc's comparison.
2393 X and Y are the values we compare.
2394
2395 This function is needed to turn
2396
2397 (set (reg:SI 110)
2398 (gt (reg:CCX 100 %icc)
2399 (const_int 0)))
2400 into
2401 (set (reg:SI 110)
2402 (gt:DI (reg:CCX 100 %icc)
2403 (const_int 0)))
2404
2405 IE: The instruction recognizer needs to see the mode of the comparison to
2406 find the right instruction. We could use "gt:DI" right in the
2407 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2408
2409 static int
2410 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2411 {
2412 if (! TARGET_ARCH64
2413 && (GET_MODE (x) == DImode
2414 || GET_MODE (dest) == DImode))
2415 return 0;
2416
2417 /* Try to use the movrCC insns. */
2418 if (TARGET_ARCH64
2419 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2420 && y == const0_rtx
2421 && v9_regcmp_p (compare_code))
2422 {
2423 rtx op0 = x;
2424 rtx temp;
2425
2426 /* Special case for op0 != 0. This can be done with one instruction if
2427 dest == x. */
2428
2429 if (compare_code == NE
2430 && GET_MODE (dest) == DImode
2431 && rtx_equal_p (op0, dest))
2432 {
2433 emit_insn (gen_rtx_SET (VOIDmode, dest,
2434 gen_rtx_IF_THEN_ELSE (DImode,
2435 gen_rtx_fmt_ee (compare_code, DImode,
2436 op0, const0_rtx),
2437 const1_rtx,
2438 dest)));
2439 return 1;
2440 }
2441
2442 if (reg_overlap_mentioned_p (dest, op0))
2443 {
2444 /* Handle the case where dest == x.
2445 We "early clobber" the result. */
2446 op0 = gen_reg_rtx (GET_MODE (x));
2447 emit_move_insn (op0, x);
2448 }
2449
2450 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2451 if (GET_MODE (op0) != DImode)
2452 {
2453 temp = gen_reg_rtx (DImode);
2454 convert_move (temp, op0, 0);
2455 }
2456 else
2457 temp = op0;
2458 emit_insn (gen_rtx_SET (VOIDmode, dest,
2459 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2460 gen_rtx_fmt_ee (compare_code, DImode,
2461 temp, const0_rtx),
2462 const1_rtx,
2463 dest)));
2464 return 1;
2465 }
2466 else
2467 {
2468 x = gen_compare_reg_1 (compare_code, x, y);
2469 y = const0_rtx;
2470
2471 gcc_assert (GET_MODE (x) != CC_NOOVmode
2472 && GET_MODE (x) != CCX_NOOVmode);
2473
2474 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2475 emit_insn (gen_rtx_SET (VOIDmode, dest,
2476 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2477 gen_rtx_fmt_ee (compare_code,
2478 GET_MODE (x), x, y),
2479 const1_rtx, dest)));
2480 return 1;
2481 }
2482 }
2483
2484
2485 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2486 without jumps using the addx/subx instructions. */
2487
2488 bool
2489 emit_scc_insn (rtx operands[])
2490 {
2491 rtx tem;
2492 rtx x;
2493 rtx y;
2494 enum rtx_code code;
2495
2496 /* The quad-word fp compare library routines all return nonzero to indicate
2497 true, which is different from the equivalent libgcc routines, so we must
2498 handle them specially here. */
2499 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2500 {
2501 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2502 GET_CODE (operands[1]));
2503 operands[2] = XEXP (operands[1], 0);
2504 operands[3] = XEXP (operands[1], 1);
2505 }
2506
2507 code = GET_CODE (operands[1]);
2508 x = operands[2];
2509 y = operands[3];
2510
2511 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2512 more applications). The exception to this is "reg != 0" which can
2513 be done in one instruction on v9 (so we do it). */
2514 if (code == EQ)
2515 {
2516 if (GET_MODE (x) == SImode)
2517 {
2518 rtx pat = gen_seqsi_special (operands[0], x, y);
2519 emit_insn (pat);
2520 return true;
2521 }
2522 else if (GET_MODE (x) == DImode)
2523 {
2524 rtx pat = gen_seqdi_special (operands[0], x, y);
2525 emit_insn (pat);
2526 return true;
2527 }
2528 }
2529
2530 if (code == NE)
2531 {
2532 if (GET_MODE (x) == SImode)
2533 {
2534 rtx pat = gen_snesi_special (operands[0], x, y);
2535 emit_insn (pat);
2536 return true;
2537 }
2538 else if (GET_MODE (x) == DImode)
2539 {
2540 rtx pat = gen_snedi_special (operands[0], x, y);
2541 emit_insn (pat);
2542 return true;
2543 }
2544 }
2545
2546 /* For the rest, on v9 we can use conditional moves. */
2547
2548 if (TARGET_V9)
2549 {
2550 if (gen_v9_scc (operands[0], code, x, y))
2551 return true;
2552 }
2553
2554 /* We can do LTU and GEU using the addx/subx instructions too. And
2555 for GTU/LEU, if both operands are registers swap them and fall
2556 back to the easy case. */
2557 if (code == GTU || code == LEU)
2558 {
2559 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2560 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2561 {
2562 tem = x;
2563 x = y;
2564 y = tem;
2565 code = swap_condition (code);
2566 }
2567 }
2568
2569 if (code == LTU || code == GEU)
2570 {
2571 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2572 gen_rtx_fmt_ee (code, SImode,
2573 gen_compare_reg_1 (code, x, y),
2574 const0_rtx)));
2575 return true;
2576 }
2577
2578 /* Nope, do branches. */
2579 return false;
2580 }
2581
2582 /* Emit a conditional jump insn for the v9 architecture using comparison code
2583 CODE and jump target LABEL.
2584 This function exists to take advantage of the v9 brxx insns. */
2585
2586 static void
2587 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2588 {
2589 emit_jump_insn (gen_rtx_SET (VOIDmode,
2590 pc_rtx,
2591 gen_rtx_IF_THEN_ELSE (VOIDmode,
2592 gen_rtx_fmt_ee (code, GET_MODE (op0),
2593 op0, const0_rtx),
2594 gen_rtx_LABEL_REF (VOIDmode, label),
2595 pc_rtx)));
2596 }
2597
2598 void
2599 emit_conditional_branch_insn (rtx operands[])
2600 {
2601 /* The quad-word fp compare library routines all return nonzero to indicate
2602 true, which is different from the equivalent libgcc routines, so we must
2603 handle them specially here. */
2604 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2605 {
2606 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2607 GET_CODE (operands[0]));
2608 operands[1] = XEXP (operands[0], 0);
2609 operands[2] = XEXP (operands[0], 1);
2610 }
2611
2612 if (TARGET_ARCH64 && operands[2] == const0_rtx
2613 && GET_CODE (operands[1]) == REG
2614 && GET_MODE (operands[1]) == DImode)
2615 {
2616 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2617 return;
2618 }
2619
2620 operands[1] = gen_compare_reg (operands[0]);
2621 operands[2] = const0_rtx;
2622 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2623 operands[1], operands[2]);
2624 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2625 operands[3]));
2626 }
2627
2628
2629 /* Generate a DFmode part of a hard TFmode register.
2630 REG is the TFmode hard register, LOW is 1 for the
2631 low 64bit of the register and 0 otherwise.
2632 */
2633 rtx
2634 gen_df_reg (rtx reg, int low)
2635 {
2636 int regno = REGNO (reg);
2637
2638 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2639 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
2640 return gen_rtx_REG (DFmode, regno);
2641 }
2642 \f
2643 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2644 Unlike normal calls, TFmode operands are passed by reference. It is
2645 assumed that no more than 3 operands are required. */
2646
2647 static void
2648 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2649 {
2650 rtx ret_slot = NULL, arg[3], func_sym;
2651 int i;
2652
2653 /* We only expect to be called for conversions, unary, and binary ops. */
2654 gcc_assert (nargs == 2 || nargs == 3);
2655
2656 for (i = 0; i < nargs; ++i)
2657 {
2658 rtx this_arg = operands[i];
2659 rtx this_slot;
2660
2661 /* TFmode arguments and return values are passed by reference. */
2662 if (GET_MODE (this_arg) == TFmode)
2663 {
2664 int force_stack_temp;
2665
2666 force_stack_temp = 0;
2667 if (TARGET_BUGGY_QP_LIB && i == 0)
2668 force_stack_temp = 1;
2669
2670 if (GET_CODE (this_arg) == MEM
2671 && ! force_stack_temp)
2672 this_arg = XEXP (this_arg, 0);
2673 else if (CONSTANT_P (this_arg)
2674 && ! force_stack_temp)
2675 {
2676 this_slot = force_const_mem (TFmode, this_arg);
2677 this_arg = XEXP (this_slot, 0);
2678 }
2679 else
2680 {
2681 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2682
2683 /* Operand 0 is the return value. We'll copy it out later. */
2684 if (i > 0)
2685 emit_move_insn (this_slot, this_arg);
2686 else
2687 ret_slot = this_slot;
2688
2689 this_arg = XEXP (this_slot, 0);
2690 }
2691 }
2692
2693 arg[i] = this_arg;
2694 }
2695
2696 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2697
2698 if (GET_MODE (operands[0]) == TFmode)
2699 {
2700 if (nargs == 2)
2701 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2702 arg[0], GET_MODE (arg[0]),
2703 arg[1], GET_MODE (arg[1]));
2704 else
2705 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2706 arg[0], GET_MODE (arg[0]),
2707 arg[1], GET_MODE (arg[1]),
2708 arg[2], GET_MODE (arg[2]));
2709
2710 if (ret_slot)
2711 emit_move_insn (operands[0], ret_slot);
2712 }
2713 else
2714 {
2715 rtx ret;
2716
2717 gcc_assert (nargs == 2);
2718
2719 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2720 GET_MODE (operands[0]), 1,
2721 arg[1], GET_MODE (arg[1]));
2722
2723 if (ret != operands[0])
2724 emit_move_insn (operands[0], ret);
2725 }
2726 }
2727
2728 /* Expand soft-float TFmode calls to sparc abi routines. */
2729
2730 static void
2731 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2732 {
2733 const char *func;
2734
2735 switch (code)
2736 {
2737 case PLUS:
2738 func = "_Qp_add";
2739 break;
2740 case MINUS:
2741 func = "_Qp_sub";
2742 break;
2743 case MULT:
2744 func = "_Qp_mul";
2745 break;
2746 case DIV:
2747 func = "_Qp_div";
2748 break;
2749 default:
2750 gcc_unreachable ();
2751 }
2752
2753 emit_soft_tfmode_libcall (func, 3, operands);
2754 }
2755
2756 static void
2757 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2758 {
2759 const char *func;
2760
2761 gcc_assert (code == SQRT);
2762 func = "_Qp_sqrt";
2763
2764 emit_soft_tfmode_libcall (func, 2, operands);
2765 }
2766
2767 static void
2768 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2769 {
2770 const char *func;
2771
2772 switch (code)
2773 {
2774 case FLOAT_EXTEND:
2775 switch (GET_MODE (operands[1]))
2776 {
2777 case SFmode:
2778 func = "_Qp_stoq";
2779 break;
2780 case DFmode:
2781 func = "_Qp_dtoq";
2782 break;
2783 default:
2784 gcc_unreachable ();
2785 }
2786 break;
2787
2788 case FLOAT_TRUNCATE:
2789 switch (GET_MODE (operands[0]))
2790 {
2791 case SFmode:
2792 func = "_Qp_qtos";
2793 break;
2794 case DFmode:
2795 func = "_Qp_qtod";
2796 break;
2797 default:
2798 gcc_unreachable ();
2799 }
2800 break;
2801
2802 case FLOAT:
2803 switch (GET_MODE (operands[1]))
2804 {
2805 case SImode:
2806 func = "_Qp_itoq";
2807 if (TARGET_ARCH64)
2808 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2809 break;
2810 case DImode:
2811 func = "_Qp_xtoq";
2812 break;
2813 default:
2814 gcc_unreachable ();
2815 }
2816 break;
2817
2818 case UNSIGNED_FLOAT:
2819 switch (GET_MODE (operands[1]))
2820 {
2821 case SImode:
2822 func = "_Qp_uitoq";
2823 if (TARGET_ARCH64)
2824 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2825 break;
2826 case DImode:
2827 func = "_Qp_uxtoq";
2828 break;
2829 default:
2830 gcc_unreachable ();
2831 }
2832 break;
2833
2834 case FIX:
2835 switch (GET_MODE (operands[0]))
2836 {
2837 case SImode:
2838 func = "_Qp_qtoi";
2839 break;
2840 case DImode:
2841 func = "_Qp_qtox";
2842 break;
2843 default:
2844 gcc_unreachable ();
2845 }
2846 break;
2847
2848 case UNSIGNED_FIX:
2849 switch (GET_MODE (operands[0]))
2850 {
2851 case SImode:
2852 func = "_Qp_qtoui";
2853 break;
2854 case DImode:
2855 func = "_Qp_qtoux";
2856 break;
2857 default:
2858 gcc_unreachable ();
2859 }
2860 break;
2861
2862 default:
2863 gcc_unreachable ();
2864 }
2865
2866 emit_soft_tfmode_libcall (func, 2, operands);
2867 }
2868
2869 /* Expand a hard-float tfmode operation. All arguments must be in
2870 registers. */
2871
2872 static void
2873 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2874 {
2875 rtx op, dest;
2876
2877 if (GET_RTX_CLASS (code) == RTX_UNARY)
2878 {
2879 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2880 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2881 }
2882 else
2883 {
2884 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2885 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2886 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2887 operands[1], operands[2]);
2888 }
2889
2890 if (register_operand (operands[0], VOIDmode))
2891 dest = operands[0];
2892 else
2893 dest = gen_reg_rtx (GET_MODE (operands[0]));
2894
2895 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2896
2897 if (dest != operands[0])
2898 emit_move_insn (operands[0], dest);
2899 }
2900
2901 void
2902 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2903 {
2904 if (TARGET_HARD_QUAD)
2905 emit_hard_tfmode_operation (code, operands);
2906 else
2907 emit_soft_tfmode_binop (code, operands);
2908 }
2909
2910 void
2911 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2912 {
2913 if (TARGET_HARD_QUAD)
2914 emit_hard_tfmode_operation (code, operands);
2915 else
2916 emit_soft_tfmode_unop (code, operands);
2917 }
2918
2919 void
2920 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2921 {
2922 if (TARGET_HARD_QUAD)
2923 emit_hard_tfmode_operation (code, operands);
2924 else
2925 emit_soft_tfmode_cvt (code, operands);
2926 }
2927 \f
2928 /* Return nonzero if a branch/jump/call instruction will be emitting
2929 nop into its delay slot. */
2930
2931 int
2932 empty_delay_slot (rtx insn)
2933 {
2934 rtx seq;
2935
2936 /* If no previous instruction (should not happen), return true. */
2937 if (PREV_INSN (insn) == NULL)
2938 return 1;
2939
2940 seq = NEXT_INSN (PREV_INSN (insn));
2941 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2942 return 0;
2943
2944 return 1;
2945 }
2946
2947 /* Return nonzero if TRIAL can go into the call delay slot. */
2948
2949 int
2950 tls_call_delay (rtx trial)
2951 {
2952 rtx pat;
2953
2954 /* Binutils allows
2955 call __tls_get_addr, %tgd_call (foo)
2956 add %l7, %o0, %o0, %tgd_add (foo)
2957 while Sun as/ld does not. */
2958 if (TARGET_GNU_TLS || !TARGET_TLS)
2959 return 1;
2960
2961 pat = PATTERN (trial);
2962
2963 /* We must reject tgd_add{32|64}, i.e.
2964 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2965 and tldm_add{32|64}, i.e.
2966 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2967 for Sun as/ld. */
2968 if (GET_CODE (pat) == SET
2969 && GET_CODE (SET_SRC (pat)) == PLUS)
2970 {
2971 rtx unspec = XEXP (SET_SRC (pat), 1);
2972
2973 if (GET_CODE (unspec) == UNSPEC
2974 && (XINT (unspec, 1) == UNSPEC_TLSGD
2975 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2976 return 0;
2977 }
2978
2979 return 1;
2980 }
2981
2982 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2983 instruction. RETURN_P is true if the v9 variant 'return' is to be
2984 considered in the test too.
2985
2986 TRIAL must be a SET whose destination is a REG appropriate for the
2987 'restore' instruction or, if RETURN_P is true, for the 'return'
2988 instruction. */
2989
2990 static int
2991 eligible_for_restore_insn (rtx trial, bool return_p)
2992 {
2993 rtx pat = PATTERN (trial);
2994 rtx src = SET_SRC (pat);
2995 bool src_is_freg = false;
2996 rtx src_reg;
2997
2998 /* Since we now can do moves between float and integer registers when
2999 VIS3 is enabled, we have to catch this case. We can allow such
3000 moves when doing a 'return' however. */
3001 src_reg = src;
3002 if (GET_CODE (src_reg) == SUBREG)
3003 src_reg = SUBREG_REG (src_reg);
3004 if (GET_CODE (src_reg) == REG
3005 && SPARC_FP_REG_P (REGNO (src_reg)))
3006 src_is_freg = true;
3007
3008 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3009 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3010 && arith_operand (src, GET_MODE (src))
3011 && ! src_is_freg)
3012 {
3013 if (TARGET_ARCH64)
3014 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3015 else
3016 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3017 }
3018
3019 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3020 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3021 && arith_double_operand (src, GET_MODE (src))
3022 && ! src_is_freg)
3023 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3024
3025 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3026 else if (! TARGET_FPU && register_operand (src, SFmode))
3027 return 1;
3028
3029 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3030 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3031 return 1;
3032
3033 /* If we have the 'return' instruction, anything that does not use
3034 local or output registers and can go into a delay slot wins. */
3035 else if (return_p
3036 && TARGET_V9
3037 && !epilogue_renumber (&pat, 1)
3038 && get_attr_in_uncond_branch_delay (trial)
3039 == IN_UNCOND_BRANCH_DELAY_TRUE)
3040 return 1;
3041
3042 /* The 'restore src1,src2,dest' pattern for SImode. */
3043 else if (GET_CODE (src) == PLUS
3044 && register_operand (XEXP (src, 0), SImode)
3045 && arith_operand (XEXP (src, 1), SImode))
3046 return 1;
3047
3048 /* The 'restore src1,src2,dest' pattern for DImode. */
3049 else if (GET_CODE (src) == PLUS
3050 && register_operand (XEXP (src, 0), DImode)
3051 && arith_double_operand (XEXP (src, 1), DImode))
3052 return 1;
3053
3054 /* The 'restore src1,%lo(src2),dest' pattern. */
3055 else if (GET_CODE (src) == LO_SUM
3056 && ! TARGET_CM_MEDMID
3057 && ((register_operand (XEXP (src, 0), SImode)
3058 && immediate_operand (XEXP (src, 1), SImode))
3059 || (TARGET_ARCH64
3060 && register_operand (XEXP (src, 0), DImode)
3061 && immediate_operand (XEXP (src, 1), DImode))))
3062 return 1;
3063
3064 /* The 'restore src,src,dest' pattern. */
3065 else if (GET_CODE (src) == ASHIFT
3066 && (register_operand (XEXP (src, 0), SImode)
3067 || register_operand (XEXP (src, 0), DImode))
3068 && XEXP (src, 1) == const1_rtx)
3069 return 1;
3070
3071 return 0;
3072 }
3073
3074 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3075
3076 int
3077 eligible_for_return_delay (rtx trial)
3078 {
3079 int regno;
3080 rtx pat;
3081
3082 if (GET_CODE (trial) != INSN)
3083 return 0;
3084
3085 if (get_attr_length (trial) != 1)
3086 return 0;
3087
3088 /* If the function uses __builtin_eh_return, the eh_return machinery
3089 occupies the delay slot. */
3090 if (crtl->calls_eh_return)
3091 return 0;
3092
3093 /* In the case of a leaf or flat function, anything can go into the slot. */
3094 if (sparc_leaf_function_p || TARGET_FLAT)
3095 return
3096 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
3097
3098 pat = PATTERN (trial);
3099 if (GET_CODE (pat) == PARALLEL)
3100 {
3101 int i;
3102
3103 if (! TARGET_V9)
3104 return 0;
3105 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3106 {
3107 rtx expr = XVECEXP (pat, 0, i);
3108 if (GET_CODE (expr) != SET)
3109 return 0;
3110 if (GET_CODE (SET_DEST (expr)) != REG)
3111 return 0;
3112 regno = REGNO (SET_DEST (expr));
3113 if (regno >= 8 && regno < 24)
3114 return 0;
3115 }
3116 return !epilogue_renumber (&pat, 1)
3117 && (get_attr_in_uncond_branch_delay (trial)
3118 == IN_UNCOND_BRANCH_DELAY_TRUE);
3119 }
3120
3121 if (GET_CODE (pat) != SET)
3122 return 0;
3123
3124 if (GET_CODE (SET_DEST (pat)) != REG)
3125 return 0;
3126
3127 regno = REGNO (SET_DEST (pat));
3128
3129 /* Otherwise, only operations which can be done in tandem with
3130 a `restore' or `return' insn can go into the delay slot. */
3131 if (regno >= 8 && regno < 24)
3132 return 0;
3133
3134 /* If this instruction sets up floating point register and we have a return
3135 instruction, it can probably go in. But restore will not work
3136 with FP_REGS. */
3137 if (! SPARC_INT_REG_P (regno))
3138 return (TARGET_V9
3139 && !epilogue_renumber (&pat, 1)
3140 && get_attr_in_uncond_branch_delay (trial)
3141 == IN_UNCOND_BRANCH_DELAY_TRUE);
3142
3143 return eligible_for_restore_insn (trial, true);
3144 }
3145
3146 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3147
3148 int
3149 eligible_for_sibcall_delay (rtx trial)
3150 {
3151 rtx pat;
3152
3153 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3154 return 0;
3155
3156 if (get_attr_length (trial) != 1)
3157 return 0;
3158
3159 pat = PATTERN (trial);
3160
3161 if (sparc_leaf_function_p || TARGET_FLAT)
3162 {
3163 /* If the tail call is done using the call instruction,
3164 we have to restore %o7 in the delay slot. */
3165 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3166 return 0;
3167
3168 /* %g1 is used to build the function address */
3169 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3170 return 0;
3171
3172 return 1;
3173 }
3174
3175 /* Otherwise, only operations which can be done in tandem with
3176 a `restore' insn can go into the delay slot. */
3177 if (GET_CODE (SET_DEST (pat)) != REG
3178 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3179 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
3180 return 0;
3181
3182 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3183 in most cases. */
3184 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3185 return 0;
3186
3187 return eligible_for_restore_insn (trial, false);
3188 }
3189 \f
3190 /* Determine if it's legal to put X into the constant pool. This
3191 is not possible if X contains the address of a symbol that is
3192 not constant (TLS) or not known at final link time (PIC). */
3193
3194 static bool
3195 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
3196 {
3197 switch (GET_CODE (x))
3198 {
3199 case CONST_INT:
3200 case CONST_DOUBLE:
3201 case CONST_VECTOR:
3202 /* Accept all non-symbolic constants. */
3203 return false;
3204
3205 case LABEL_REF:
3206 /* Labels are OK iff we are non-PIC. */
3207 return flag_pic != 0;
3208
3209 case SYMBOL_REF:
3210 /* 'Naked' TLS symbol references are never OK,
3211 non-TLS symbols are OK iff we are non-PIC. */
3212 if (SYMBOL_REF_TLS_MODEL (x))
3213 return true;
3214 else
3215 return flag_pic != 0;
3216
3217 case CONST:
3218 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3219 case PLUS:
3220 case MINUS:
3221 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3222 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3223 case UNSPEC:
3224 return true;
3225 default:
3226 gcc_unreachable ();
3227 }
3228 }
3229 \f
3230 /* Global Offset Table support. */
3231 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3232 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3233
3234 /* Return the SYMBOL_REF for the Global Offset Table. */
3235
3236 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3237
3238 static rtx
3239 sparc_got (void)
3240 {
3241 if (!sparc_got_symbol)
3242 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3243
3244 return sparc_got_symbol;
3245 }
3246
3247 /* Ensure that we are not using patterns that are not OK with PIC. */
3248
3249 int
3250 check_pic (int i)
3251 {
3252 rtx op;
3253
3254 switch (flag_pic)
3255 {
3256 case 1:
3257 op = recog_data.operand[i];
3258 gcc_assert (GET_CODE (op) != SYMBOL_REF
3259 && (GET_CODE (op) != CONST
3260 || (GET_CODE (XEXP (op, 0)) == MINUS
3261 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3262 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3263 case 2:
3264 default:
3265 return 1;
3266 }
3267 }
3268
3269 /* Return true if X is an address which needs a temporary register when
3270 reloaded while generating PIC code. */
3271
3272 int
3273 pic_address_needs_scratch (rtx x)
3274 {
3275 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3276 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3277 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3278 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3279 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3280 return 1;
3281
3282 return 0;
3283 }
3284
3285 /* Determine if a given RTX is a valid constant. We already know this
3286 satisfies CONSTANT_P. */
3287
3288 static bool
3289 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3290 {
3291 switch (GET_CODE (x))
3292 {
3293 case CONST:
3294 case SYMBOL_REF:
3295 if (sparc_tls_referenced_p (x))
3296 return false;
3297 break;
3298
3299 case CONST_DOUBLE:
3300 if (GET_MODE (x) == VOIDmode)
3301 return true;
3302
3303 /* Floating point constants are generally not ok.
3304 The only exception is 0.0 and all-ones in VIS. */
3305 if (TARGET_VIS
3306 && SCALAR_FLOAT_MODE_P (mode)
3307 && (const_zero_operand (x, mode)
3308 || const_all_ones_operand (x, mode)))
3309 return true;
3310
3311 return false;
3312
3313 case CONST_VECTOR:
3314 /* Vector constants are generally not ok.
3315 The only exception is 0 or -1 in VIS. */
3316 if (TARGET_VIS
3317 && (const_zero_operand (x, mode)
3318 || const_all_ones_operand (x, mode)))
3319 return true;
3320
3321 return false;
3322
3323 default:
3324 break;
3325 }
3326
3327 return true;
3328 }
3329
3330 /* Determine if a given RTX is a valid constant address. */
3331
3332 bool
3333 constant_address_p (rtx x)
3334 {
3335 switch (GET_CODE (x))
3336 {
3337 case LABEL_REF:
3338 case CONST_INT:
3339 case HIGH:
3340 return true;
3341
3342 case CONST:
3343 if (flag_pic && pic_address_needs_scratch (x))
3344 return false;
3345 return sparc_legitimate_constant_p (Pmode, x);
3346
3347 case SYMBOL_REF:
3348 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3349
3350 default:
3351 return false;
3352 }
3353 }
3354
3355 /* Nonzero if the constant value X is a legitimate general operand
3356 when generating PIC code. It is given that flag_pic is on and
3357 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3358
3359 bool
3360 legitimate_pic_operand_p (rtx x)
3361 {
3362 if (pic_address_needs_scratch (x))
3363 return false;
3364 if (sparc_tls_referenced_p (x))
3365 return false;
3366 return true;
3367 }
3368
3369 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3370 (CONST_INT_P (X) \
3371 && INTVAL (X) >= -0x1000 \
3372 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3373
3374 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3375 (CONST_INT_P (X) \
3376 && INTVAL (X) >= -0x1000 \
3377 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3378
3379 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3380
3381 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3382 ordinarily. This changes a bit when generating PIC. */
3383
3384 static bool
3385 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3386 {
3387 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3388
3389 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3390 rs1 = addr;
3391 else if (GET_CODE (addr) == PLUS)
3392 {
3393 rs1 = XEXP (addr, 0);
3394 rs2 = XEXP (addr, 1);
3395
3396 /* Canonicalize. REG comes first, if there are no regs,
3397 LO_SUM comes first. */
3398 if (!REG_P (rs1)
3399 && GET_CODE (rs1) != SUBREG
3400 && (REG_P (rs2)
3401 || GET_CODE (rs2) == SUBREG
3402 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3403 {
3404 rs1 = XEXP (addr, 1);
3405 rs2 = XEXP (addr, 0);
3406 }
3407
3408 if ((flag_pic == 1
3409 && rs1 == pic_offset_table_rtx
3410 && !REG_P (rs2)
3411 && GET_CODE (rs2) != SUBREG
3412 && GET_CODE (rs2) != LO_SUM
3413 && GET_CODE (rs2) != MEM
3414 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3415 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3416 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3417 || ((REG_P (rs1)
3418 || GET_CODE (rs1) == SUBREG)
3419 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3420 {
3421 imm1 = rs2;
3422 rs2 = NULL;
3423 }
3424 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3425 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3426 {
3427 /* We prohibit REG + REG for TFmode when there are no quad move insns
3428 and we consequently need to split. We do this because REG+REG
3429 is not an offsettable address. If we get the situation in reload
3430 where source and destination of a movtf pattern are both MEMs with
3431 REG+REG address, then only one of them gets converted to an
3432 offsettable address. */
3433 if (mode == TFmode
3434 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3435 return 0;
3436
3437 /* We prohibit REG + REG on ARCH32 if not optimizing for
3438 DFmode/DImode because then mem_min_alignment is likely to be zero
3439 after reload and the forced split would lack a matching splitter
3440 pattern. */
3441 if (TARGET_ARCH32 && !optimize
3442 && (mode == DFmode || mode == DImode))
3443 return 0;
3444 }
3445 else if (USE_AS_OFFSETABLE_LO10
3446 && GET_CODE (rs1) == LO_SUM
3447 && TARGET_ARCH64
3448 && ! TARGET_CM_MEDMID
3449 && RTX_OK_FOR_OLO10_P (rs2, mode))
3450 {
3451 rs2 = NULL;
3452 imm1 = XEXP (rs1, 1);
3453 rs1 = XEXP (rs1, 0);
3454 if (!CONSTANT_P (imm1)
3455 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3456 return 0;
3457 }
3458 }
3459 else if (GET_CODE (addr) == LO_SUM)
3460 {
3461 rs1 = XEXP (addr, 0);
3462 imm1 = XEXP (addr, 1);
3463
3464 if (!CONSTANT_P (imm1)
3465 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3466 return 0;
3467
3468 /* We can't allow TFmode in 32-bit mode, because an offset greater
3469 than the alignment (8) may cause the LO_SUM to overflow. */
3470 if (mode == TFmode && TARGET_ARCH32)
3471 return 0;
3472 }
3473 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3474 return 1;
3475 else
3476 return 0;
3477
3478 if (GET_CODE (rs1) == SUBREG)
3479 rs1 = SUBREG_REG (rs1);
3480 if (!REG_P (rs1))
3481 return 0;
3482
3483 if (rs2)
3484 {
3485 if (GET_CODE (rs2) == SUBREG)
3486 rs2 = SUBREG_REG (rs2);
3487 if (!REG_P (rs2))
3488 return 0;
3489 }
3490
3491 if (strict)
3492 {
3493 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3494 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3495 return 0;
3496 }
3497 else
3498 {
3499 if ((! SPARC_INT_REG_P (REGNO (rs1))
3500 && REGNO (rs1) != FRAME_POINTER_REGNUM
3501 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3502 || (rs2
3503 && (! SPARC_INT_REG_P (REGNO (rs2))
3504 && REGNO (rs2) != FRAME_POINTER_REGNUM
3505 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3506 return 0;
3507 }
3508 return 1;
3509 }
3510
3511 /* Return the SYMBOL_REF for the tls_get_addr function. */
3512
3513 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3514
3515 static rtx
3516 sparc_tls_get_addr (void)
3517 {
3518 if (!sparc_tls_symbol)
3519 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3520
3521 return sparc_tls_symbol;
3522 }
3523
3524 /* Return the Global Offset Table to be used in TLS mode. */
3525
3526 static rtx
3527 sparc_tls_got (void)
3528 {
3529 /* In PIC mode, this is just the PIC offset table. */
3530 if (flag_pic)
3531 {
3532 crtl->uses_pic_offset_table = 1;
3533 return pic_offset_table_rtx;
3534 }
3535
3536 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3537 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3538 if (TARGET_SUN_TLS && TARGET_ARCH32)
3539 {
3540 load_got_register ();
3541 return global_offset_table_rtx;
3542 }
3543
3544 /* In all other cases, we load a new pseudo with the GOT symbol. */
3545 return copy_to_reg (sparc_got ());
3546 }
3547
3548 /* Return true if X contains a thread-local symbol. */
3549
3550 static bool
3551 sparc_tls_referenced_p (rtx x)
3552 {
3553 if (!TARGET_HAVE_TLS)
3554 return false;
3555
3556 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3557 x = XEXP (XEXP (x, 0), 0);
3558
3559 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3560 return true;
3561
3562 /* That's all we handle in sparc_legitimize_tls_address for now. */
3563 return false;
3564 }
3565
3566 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3567 this (thread-local) address. */
3568
3569 static rtx
3570 sparc_legitimize_tls_address (rtx addr)
3571 {
3572 rtx temp1, temp2, temp3, ret, o0, got, insn;
3573
3574 gcc_assert (can_create_pseudo_p ());
3575
3576 if (GET_CODE (addr) == SYMBOL_REF)
3577 switch (SYMBOL_REF_TLS_MODEL (addr))
3578 {
3579 case TLS_MODEL_GLOBAL_DYNAMIC:
3580 start_sequence ();
3581 temp1 = gen_reg_rtx (SImode);
3582 temp2 = gen_reg_rtx (SImode);
3583 ret = gen_reg_rtx (Pmode);
3584 o0 = gen_rtx_REG (Pmode, 8);
3585 got = sparc_tls_got ();
3586 emit_insn (gen_tgd_hi22 (temp1, addr));
3587 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3588 if (TARGET_ARCH32)
3589 {
3590 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3591 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3592 addr, const1_rtx));
3593 }
3594 else
3595 {
3596 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3597 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3598 addr, const1_rtx));
3599 }
3600 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3601 insn = get_insns ();
3602 end_sequence ();
3603 emit_libcall_block (insn, ret, o0, addr);
3604 break;
3605
3606 case TLS_MODEL_LOCAL_DYNAMIC:
3607 start_sequence ();
3608 temp1 = gen_reg_rtx (SImode);
3609 temp2 = gen_reg_rtx (SImode);
3610 temp3 = gen_reg_rtx (Pmode);
3611 ret = gen_reg_rtx (Pmode);
3612 o0 = gen_rtx_REG (Pmode, 8);
3613 got = sparc_tls_got ();
3614 emit_insn (gen_tldm_hi22 (temp1));
3615 emit_insn (gen_tldm_lo10 (temp2, temp1));
3616 if (TARGET_ARCH32)
3617 {
3618 emit_insn (gen_tldm_add32 (o0, got, temp2));
3619 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3620 const1_rtx));
3621 }
3622 else
3623 {
3624 emit_insn (gen_tldm_add64 (o0, got, temp2));
3625 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3626 const1_rtx));
3627 }
3628 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3629 insn = get_insns ();
3630 end_sequence ();
3631 emit_libcall_block (insn, temp3, o0,
3632 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3633 UNSPEC_TLSLD_BASE));
3634 temp1 = gen_reg_rtx (SImode);
3635 temp2 = gen_reg_rtx (SImode);
3636 emit_insn (gen_tldo_hix22 (temp1, addr));
3637 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3638 if (TARGET_ARCH32)
3639 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3640 else
3641 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3642 break;
3643
3644 case TLS_MODEL_INITIAL_EXEC:
3645 temp1 = gen_reg_rtx (SImode);
3646 temp2 = gen_reg_rtx (SImode);
3647 temp3 = gen_reg_rtx (Pmode);
3648 got = sparc_tls_got ();
3649 emit_insn (gen_tie_hi22 (temp1, addr));
3650 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3651 if (TARGET_ARCH32)
3652 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3653 else
3654 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3655 if (TARGET_SUN_TLS)
3656 {
3657 ret = gen_reg_rtx (Pmode);
3658 if (TARGET_ARCH32)
3659 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3660 temp3, addr));
3661 else
3662 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3663 temp3, addr));
3664 }
3665 else
3666 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3667 break;
3668
3669 case TLS_MODEL_LOCAL_EXEC:
3670 temp1 = gen_reg_rtx (Pmode);
3671 temp2 = gen_reg_rtx (Pmode);
3672 if (TARGET_ARCH32)
3673 {
3674 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3675 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3676 }
3677 else
3678 {
3679 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3680 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3681 }
3682 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3683 break;
3684
3685 default:
3686 gcc_unreachable ();
3687 }
3688
3689 else if (GET_CODE (addr) == CONST)
3690 {
3691 rtx base, offset;
3692
3693 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3694
3695 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3696 offset = XEXP (XEXP (addr, 0), 1);
3697
3698 base = force_operand (base, NULL_RTX);
3699 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3700 offset = force_reg (Pmode, offset);
3701 ret = gen_rtx_PLUS (Pmode, base, offset);
3702 }
3703
3704 else
3705 gcc_unreachable (); /* for now ... */
3706
3707 return ret;
3708 }
3709
3710 /* Legitimize PIC addresses. If the address is already position-independent,
3711 we return ORIG. Newly generated position-independent addresses go into a
3712 reg. This is REG if nonzero, otherwise we allocate register(s) as
3713 necessary. */
3714
3715 static rtx
3716 sparc_legitimize_pic_address (rtx orig, rtx reg)
3717 {
3718 bool gotdata_op = false;
3719
3720 if (GET_CODE (orig) == SYMBOL_REF
3721 /* See the comment in sparc_expand_move. */
3722 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3723 {
3724 rtx pic_ref, address;
3725 rtx insn;
3726
3727 if (reg == 0)
3728 {
3729 gcc_assert (can_create_pseudo_p ());
3730 reg = gen_reg_rtx (Pmode);
3731 }
3732
3733 if (flag_pic == 2)
3734 {
3735 /* If not during reload, allocate another temp reg here for loading
3736 in the address, so that these instructions can be optimized
3737 properly. */
3738 rtx temp_reg = (! can_create_pseudo_p ()
3739 ? reg : gen_reg_rtx (Pmode));
3740
3741 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3742 won't get confused into thinking that these two instructions
3743 are loading in the true address of the symbol. If in the
3744 future a PIC rtx exists, that should be used instead. */
3745 if (TARGET_ARCH64)
3746 {
3747 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3748 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3749 }
3750 else
3751 {
3752 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3753 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3754 }
3755 address = temp_reg;
3756 gotdata_op = true;
3757 }
3758 else
3759 address = orig;
3760
3761 crtl->uses_pic_offset_table = 1;
3762 if (gotdata_op)
3763 {
3764 if (TARGET_ARCH64)
3765 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3766 pic_offset_table_rtx,
3767 address, orig));
3768 else
3769 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3770 pic_offset_table_rtx,
3771 address, orig));
3772 }
3773 else
3774 {
3775 pic_ref
3776 = gen_const_mem (Pmode,
3777 gen_rtx_PLUS (Pmode,
3778 pic_offset_table_rtx, address));
3779 insn = emit_move_insn (reg, pic_ref);
3780 }
3781
3782 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3783 by loop. */
3784 set_unique_reg_note (insn, REG_EQUAL, orig);
3785 return reg;
3786 }
3787 else if (GET_CODE (orig) == CONST)
3788 {
3789 rtx base, offset;
3790
3791 if (GET_CODE (XEXP (orig, 0)) == PLUS
3792 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3793 return orig;
3794
3795 if (reg == 0)
3796 {
3797 gcc_assert (can_create_pseudo_p ());
3798 reg = gen_reg_rtx (Pmode);
3799 }
3800
3801 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3802 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3803 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3804 base == reg ? NULL_RTX : reg);
3805
3806 if (GET_CODE (offset) == CONST_INT)
3807 {
3808 if (SMALL_INT (offset))
3809 return plus_constant (base, INTVAL (offset));
3810 else if (can_create_pseudo_p ())
3811 offset = force_reg (Pmode, offset);
3812 else
3813 /* If we reach here, then something is seriously wrong. */
3814 gcc_unreachable ();
3815 }
3816 return gen_rtx_PLUS (Pmode, base, offset);
3817 }
3818 else if (GET_CODE (orig) == LABEL_REF)
3819 /* ??? We ought to be checking that the register is live instead, in case
3820 it is eliminated. */
3821 crtl->uses_pic_offset_table = 1;
3822
3823 return orig;
3824 }
3825
3826 /* Try machine-dependent ways of modifying an illegitimate address X
3827 to be legitimate. If we find one, return the new, valid address.
3828
3829 OLDX is the address as it was before break_out_memory_refs was called.
3830 In some cases it is useful to look at this to decide what needs to be done.
3831
3832 MODE is the mode of the operand pointed to by X.
3833
3834 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3835
3836 static rtx
3837 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3838 enum machine_mode mode)
3839 {
3840 rtx orig_x = x;
3841
3842 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3843 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3844 force_operand (XEXP (x, 0), NULL_RTX));
3845 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3846 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3847 force_operand (XEXP (x, 1), NULL_RTX));
3848 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3849 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3850 XEXP (x, 1));
3851 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3852 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3853 force_operand (XEXP (x, 1), NULL_RTX));
3854
3855 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3856 return x;
3857
3858 if (sparc_tls_referenced_p (x))
3859 x = sparc_legitimize_tls_address (x);
3860 else if (flag_pic)
3861 x = sparc_legitimize_pic_address (x, NULL_RTX);
3862 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3863 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3864 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3865 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3866 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3867 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3868 else if (GET_CODE (x) == SYMBOL_REF
3869 || GET_CODE (x) == CONST
3870 || GET_CODE (x) == LABEL_REF)
3871 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3872
3873 return x;
3874 }
3875
3876 /* Delegitimize an address that was legitimized by the above function. */
3877
3878 static rtx
3879 sparc_delegitimize_address (rtx x)
3880 {
3881 x = delegitimize_mem_from_attrs (x);
3882
3883 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3884 switch (XINT (XEXP (x, 1), 1))
3885 {
3886 case UNSPEC_MOVE_PIC:
3887 case UNSPEC_TLSLE:
3888 x = XVECEXP (XEXP (x, 1), 0, 0);
3889 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3890 break;
3891 default:
3892 break;
3893 }
3894
3895 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3896 if (GET_CODE (x) == MINUS
3897 && REG_P (XEXP (x, 0))
3898 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3899 && GET_CODE (XEXP (x, 1)) == LO_SUM
3900 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3901 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3902 {
3903 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3904 gcc_assert (GET_CODE (x) == LABEL_REF);
3905 }
3906
3907 return x;
3908 }
3909
3910 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3911 replace the input X, or the original X if no replacement is called for.
3912 The output parameter *WIN is 1 if the calling macro should goto WIN,
3913 0 if it should not.
3914
3915 For SPARC, we wish to handle addresses by splitting them into
3916 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3917 This cuts the number of extra insns by one.
3918
3919 Do nothing when generating PIC code and the address is a symbolic
3920 operand or requires a scratch register. */
3921
3922 rtx
3923 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3924 int opnum, int type,
3925 int ind_levels ATTRIBUTE_UNUSED, int *win)
3926 {
3927 /* Decompose SImode constants into HIGH+LO_SUM. */
3928 if (CONSTANT_P (x)
3929 && (mode != TFmode || TARGET_ARCH64)
3930 && GET_MODE (x) == SImode
3931 && GET_CODE (x) != LO_SUM
3932 && GET_CODE (x) != HIGH
3933 && sparc_cmodel <= CM_MEDLOW
3934 && !(flag_pic
3935 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3936 {
3937 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3938 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3939 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3940 opnum, (enum reload_type)type);
3941 *win = 1;
3942 return x;
3943 }
3944
3945 /* We have to recognize what we have already generated above. */
3946 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3947 {
3948 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3949 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3950 opnum, (enum reload_type)type);
3951 *win = 1;
3952 return x;
3953 }
3954
3955 *win = 0;
3956 return x;
3957 }
3958
3959 /* Return true if ADDR (a legitimate address expression)
3960 has an effect that depends on the machine mode it is used for.
3961
3962 In PIC mode,
3963
3964 (mem:HI [%l7+a])
3965
3966 is not equivalent to
3967
3968 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3969
3970 because [%l7+a+1] is interpreted as the address of (a+1). */
3971
3972
3973 static bool
3974 sparc_mode_dependent_address_p (const_rtx addr)
3975 {
3976 if (flag_pic && GET_CODE (addr) == PLUS)
3977 {
3978 rtx op0 = XEXP (addr, 0);
3979 rtx op1 = XEXP (addr, 1);
3980 if (op0 == pic_offset_table_rtx
3981 && symbolic_operand (op1, VOIDmode))
3982 return true;
3983 }
3984
3985 return false;
3986 }
3987
3988 #ifdef HAVE_GAS_HIDDEN
3989 # define USE_HIDDEN_LINKONCE 1
3990 #else
3991 # define USE_HIDDEN_LINKONCE 0
3992 #endif
3993
3994 static void
3995 get_pc_thunk_name (char name[32], unsigned int regno)
3996 {
3997 const char *reg_name = reg_names[regno];
3998
3999 /* Skip the leading '%' as that cannot be used in a
4000 symbol name. */
4001 reg_name += 1;
4002
4003 if (USE_HIDDEN_LINKONCE)
4004 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4005 else
4006 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4007 }
4008
4009 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4010
4011 static rtx
4012 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
4013 {
4014 int orig_flag_pic = flag_pic;
4015 rtx insn;
4016
4017 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4018 flag_pic = 0;
4019 if (TARGET_ARCH64)
4020 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
4021 else
4022 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
4023 flag_pic = orig_flag_pic;
4024
4025 return insn;
4026 }
4027
4028 /* Emit code to load the GOT register. */
4029
4030 void
4031 load_got_register (void)
4032 {
4033 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4034 if (!global_offset_table_rtx)
4035 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4036
4037 if (TARGET_VXWORKS_RTP)
4038 emit_insn (gen_vxworks_load_got ());
4039 else
4040 {
4041 /* The GOT symbol is subject to a PC-relative relocation so we need a
4042 helper function to add the PC value and thus get the final value. */
4043 if (!got_helper_rtx)
4044 {
4045 char name[32];
4046 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4047 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4048 }
4049
4050 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
4051 got_helper_rtx,
4052 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
4053 }
4054
4055 /* Need to emit this whether or not we obey regdecls,
4056 since setjmp/longjmp can cause life info to screw up.
4057 ??? In the case where we don't obey regdecls, this is not sufficient
4058 since we may not fall out the bottom. */
4059 emit_use (global_offset_table_rtx);
4060 }
4061
4062 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4063 address of the call target. */
4064
4065 void
4066 sparc_emit_call_insn (rtx pat, rtx addr)
4067 {
4068 rtx insn;
4069
4070 insn = emit_call_insn (pat);
4071
4072 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4073 if (TARGET_VXWORKS_RTP
4074 && flag_pic
4075 && GET_CODE (addr) == SYMBOL_REF
4076 && (SYMBOL_REF_DECL (addr)
4077 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
4078 : !SYMBOL_REF_LOCAL_P (addr)))
4079 {
4080 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4081 crtl->uses_pic_offset_table = 1;
4082 }
4083 }
4084 \f
4085 /* Return 1 if RTX is a MEM which is known to be aligned to at
4086 least a DESIRED byte boundary. */
4087
4088 int
4089 mem_min_alignment (rtx mem, int desired)
4090 {
4091 rtx addr, base, offset;
4092
4093 /* If it's not a MEM we can't accept it. */
4094 if (GET_CODE (mem) != MEM)
4095 return 0;
4096
4097 /* Obviously... */
4098 if (!TARGET_UNALIGNED_DOUBLES
4099 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
4100 return 1;
4101
4102 /* ??? The rest of the function predates MEM_ALIGN so
4103 there is probably a bit of redundancy. */
4104 addr = XEXP (mem, 0);
4105 base = offset = NULL_RTX;
4106 if (GET_CODE (addr) == PLUS)
4107 {
4108 if (GET_CODE (XEXP (addr, 0)) == REG)
4109 {
4110 base = XEXP (addr, 0);
4111
4112 /* What we are saying here is that if the base
4113 REG is aligned properly, the compiler will make
4114 sure any REG based index upon it will be so
4115 as well. */
4116 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4117 offset = XEXP (addr, 1);
4118 else
4119 offset = const0_rtx;
4120 }
4121 }
4122 else if (GET_CODE (addr) == REG)
4123 {
4124 base = addr;
4125 offset = const0_rtx;
4126 }
4127
4128 if (base != NULL_RTX)
4129 {
4130 int regno = REGNO (base);
4131
4132 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4133 {
4134 /* Check if the compiler has recorded some information
4135 about the alignment of the base REG. If reload has
4136 completed, we already matched with proper alignments.
4137 If not running global_alloc, reload might give us
4138 unaligned pointer to local stack though. */
4139 if (((cfun != 0
4140 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4141 || (optimize && reload_completed))
4142 && (INTVAL (offset) & (desired - 1)) == 0)
4143 return 1;
4144 }
4145 else
4146 {
4147 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4148 return 1;
4149 }
4150 }
4151 else if (! TARGET_UNALIGNED_DOUBLES
4152 || CONSTANT_P (addr)
4153 || GET_CODE (addr) == LO_SUM)
4154 {
4155 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4156 is true, in which case we can only assume that an access is aligned if
4157 it is to a constant address, or the address involves a LO_SUM. */
4158 return 1;
4159 }
4160
4161 /* An obviously unaligned address. */
4162 return 0;
4163 }
4164
4165 \f
4166 /* Vectors to keep interesting information about registers where it can easily
4167 be got. We used to use the actual mode value as the bit number, but there
4168 are more than 32 modes now. Instead we use two tables: one indexed by
4169 hard register number, and one indexed by mode. */
4170
4171 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4172 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4173 mapped into one sparc_mode_class mode. */
4174
4175 enum sparc_mode_class {
4176 S_MODE, D_MODE, T_MODE, O_MODE,
4177 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4178 CC_MODE, CCFP_MODE
4179 };
4180
4181 /* Modes for single-word and smaller quantities. */
4182 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4183
4184 /* Modes for double-word and smaller quantities. */
4185 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4186
4187 /* Modes for quad-word and smaller quantities. */
4188 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4189
4190 /* Modes for 8-word and smaller quantities. */
4191 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4192
4193 /* Modes for single-float quantities. We must allow any single word or
4194 smaller quantity. This is because the fix/float conversion instructions
4195 take integer inputs/outputs from the float registers. */
4196 #define SF_MODES (S_MODES)
4197
4198 /* Modes for double-float and smaller quantities. */
4199 #define DF_MODES (D_MODES)
4200
4201 /* Modes for quad-float and smaller quantities. */
4202 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4203
4204 /* Modes for quad-float pairs and smaller quantities. */
4205 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4206
4207 /* Modes for double-float only quantities. */
4208 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4209
4210 /* Modes for quad-float and double-float only quantities. */
4211 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4212
4213 /* Modes for quad-float pairs and double-float only quantities. */
4214 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4215
4216 /* Modes for condition codes. */
4217 #define CC_MODES (1 << (int) CC_MODE)
4218 #define CCFP_MODES (1 << (int) CCFP_MODE)
4219
4220 /* Value is 1 if register/mode pair is acceptable on sparc.
4221 The funny mixture of D and T modes is because integer operations
4222 do not specially operate on tetra quantities, so non-quad-aligned
4223 registers can hold quadword quantities (except %o4 and %i4 because
4224 they cross fixed registers). */
4225
4226 /* This points to either the 32 bit or the 64 bit version. */
4227 const int *hard_regno_mode_classes;
4228
4229 static const int hard_32bit_mode_classes[] = {
4230 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4231 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4232 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4233 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4234
4235 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4236 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4237 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4238 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4239
4240 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4241 and none can hold SFmode/SImode values. */
4242 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4243 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4244 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4245 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4246
4247 /* %fcc[0123] */
4248 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4249
4250 /* %icc, %sfp, %gsr */
4251 CC_MODES, 0, D_MODES
4252 };
4253
4254 static const int hard_64bit_mode_classes[] = {
4255 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4256 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4257 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4258 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4259
4260 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4261 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4262 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4263 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4264
4265 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4266 and none can hold SFmode/SImode values. */
4267 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4268 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4269 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4270 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4271
4272 /* %fcc[0123] */
4273 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4274
4275 /* %icc, %sfp, %gsr */
4276 CC_MODES, 0, D_MODES
4277 };
4278
4279 int sparc_mode_class [NUM_MACHINE_MODES];
4280
4281 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4282
4283 static void
4284 sparc_init_modes (void)
4285 {
4286 int i;
4287
4288 for (i = 0; i < NUM_MACHINE_MODES; i++)
4289 {
4290 switch (GET_MODE_CLASS (i))
4291 {
4292 case MODE_INT:
4293 case MODE_PARTIAL_INT:
4294 case MODE_COMPLEX_INT:
4295 if (GET_MODE_SIZE (i) <= 4)
4296 sparc_mode_class[i] = 1 << (int) S_MODE;
4297 else if (GET_MODE_SIZE (i) == 8)
4298 sparc_mode_class[i] = 1 << (int) D_MODE;
4299 else if (GET_MODE_SIZE (i) == 16)
4300 sparc_mode_class[i] = 1 << (int) T_MODE;
4301 else if (GET_MODE_SIZE (i) == 32)
4302 sparc_mode_class[i] = 1 << (int) O_MODE;
4303 else
4304 sparc_mode_class[i] = 0;
4305 break;
4306 case MODE_VECTOR_INT:
4307 if (GET_MODE_SIZE (i) <= 4)
4308 sparc_mode_class[i] = 1 << (int)SF_MODE;
4309 else if (GET_MODE_SIZE (i) == 8)
4310 sparc_mode_class[i] = 1 << (int)DF_MODE;
4311 break;
4312 case MODE_FLOAT:
4313 case MODE_COMPLEX_FLOAT:
4314 if (GET_MODE_SIZE (i) <= 4)
4315 sparc_mode_class[i] = 1 << (int) SF_MODE;
4316 else if (GET_MODE_SIZE (i) == 8)
4317 sparc_mode_class[i] = 1 << (int) DF_MODE;
4318 else if (GET_MODE_SIZE (i) == 16)
4319 sparc_mode_class[i] = 1 << (int) TF_MODE;
4320 else if (GET_MODE_SIZE (i) == 32)
4321 sparc_mode_class[i] = 1 << (int) OF_MODE;
4322 else
4323 sparc_mode_class[i] = 0;
4324 break;
4325 case MODE_CC:
4326 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4327 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4328 else
4329 sparc_mode_class[i] = 1 << (int) CC_MODE;
4330 break;
4331 default:
4332 sparc_mode_class[i] = 0;
4333 break;
4334 }
4335 }
4336
4337 if (TARGET_ARCH64)
4338 hard_regno_mode_classes = hard_64bit_mode_classes;
4339 else
4340 hard_regno_mode_classes = hard_32bit_mode_classes;
4341
4342 /* Initialize the array used by REGNO_REG_CLASS. */
4343 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4344 {
4345 if (i < 16 && TARGET_V8PLUS)
4346 sparc_regno_reg_class[i] = I64_REGS;
4347 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4348 sparc_regno_reg_class[i] = GENERAL_REGS;
4349 else if (i < 64)
4350 sparc_regno_reg_class[i] = FP_REGS;
4351 else if (i < 96)
4352 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4353 else if (i < 100)
4354 sparc_regno_reg_class[i] = FPCC_REGS;
4355 else
4356 sparc_regno_reg_class[i] = NO_REGS;
4357 }
4358 }
4359 \f
4360 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4361
4362 static inline bool
4363 save_global_or_fp_reg_p (unsigned int regno,
4364 int leaf_function ATTRIBUTE_UNUSED)
4365 {
4366 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4367 }
4368
4369 /* Return whether the return address register (%i7) is needed. */
4370
4371 static inline bool
4372 return_addr_reg_needed_p (int leaf_function)
4373 {
4374 /* If it is live, for example because of __builtin_return_address (0). */
4375 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4376 return true;
4377
4378 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4379 if (!leaf_function
4380 /* Loading the GOT register clobbers %o7. */
4381 || crtl->uses_pic_offset_table
4382 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4383 return true;
4384
4385 return false;
4386 }
4387
4388 /* Return whether REGNO, a local or in register, must be saved/restored. */
4389
4390 static bool
4391 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4392 {
4393 /* General case: call-saved registers live at some point. */
4394 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4395 return true;
4396
4397 /* Frame pointer register (%fp) if needed. */
4398 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4399 return true;
4400
4401 /* Return address register (%i7) if needed. */
4402 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4403 return true;
4404
4405 /* GOT register (%l7) if needed. */
4406 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4407 return true;
4408
4409 /* If the function accesses prior frames, the frame pointer and the return
4410 address of the previous frame must be saved on the stack. */
4411 if (crtl->accesses_prior_frames
4412 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4413 return true;
4414
4415 return false;
4416 }
4417
4418 /* Compute the frame size required by the function. This function is called
4419 during the reload pass and also by sparc_expand_prologue. */
4420
4421 HOST_WIDE_INT
4422 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4423 {
4424 HOST_WIDE_INT frame_size, apparent_frame_size;
4425 int args_size, n_global_fp_regs = 0;
4426 bool save_local_in_regs_p = false;
4427 unsigned int i;
4428
4429 /* If the function allocates dynamic stack space, the dynamic offset is
4430 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4431 if (leaf_function && !cfun->calls_alloca)
4432 args_size = 0;
4433 else
4434 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4435
4436 /* Calculate space needed for global registers. */
4437 if (TARGET_ARCH64)
4438 for (i = 0; i < 8; i++)
4439 if (save_global_or_fp_reg_p (i, 0))
4440 n_global_fp_regs += 2;
4441 else
4442 for (i = 0; i < 8; i += 2)
4443 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4444 n_global_fp_regs += 2;
4445
4446 /* In the flat window model, find out which local and in registers need to
4447 be saved. We don't reserve space in the current frame for them as they
4448 will be spilled into the register window save area of the caller's frame.
4449 However, as soon as we use this register window save area, we must create
4450 that of the current frame to make it the live one. */
4451 if (TARGET_FLAT)
4452 for (i = 16; i < 32; i++)
4453 if (save_local_or_in_reg_p (i, leaf_function))
4454 {
4455 save_local_in_regs_p = true;
4456 break;
4457 }
4458
4459 /* Calculate space needed for FP registers. */
4460 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4461 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4462 n_global_fp_regs += 2;
4463
4464 if (size == 0
4465 && n_global_fp_regs == 0
4466 && args_size == 0
4467 && !save_local_in_regs_p)
4468 frame_size = apparent_frame_size = 0;
4469 else
4470 {
4471 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4472 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4473 apparent_frame_size += n_global_fp_regs * 4;
4474
4475 /* We need to add the size of the outgoing argument area. */
4476 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4477
4478 /* And that of the register window save area. */
4479 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4480
4481 /* Finally, bump to the appropriate alignment. */
4482 frame_size = SPARC_STACK_ALIGN (frame_size);
4483 }
4484
4485 /* Set up values for use in prologue and epilogue. */
4486 sparc_frame_size = frame_size;
4487 sparc_apparent_frame_size = apparent_frame_size;
4488 sparc_n_global_fp_regs = n_global_fp_regs;
4489 sparc_save_local_in_regs_p = save_local_in_regs_p;
4490
4491 return frame_size;
4492 }
4493
4494 /* Output any necessary .register pseudo-ops. */
4495
4496 void
4497 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4498 {
4499 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4500 int i;
4501
4502 if (TARGET_ARCH32)
4503 return;
4504
4505 /* Check if %g[2367] were used without
4506 .register being printed for them already. */
4507 for (i = 2; i < 8; i++)
4508 {
4509 if (df_regs_ever_live_p (i)
4510 && ! sparc_hard_reg_printed [i])
4511 {
4512 sparc_hard_reg_printed [i] = 1;
4513 /* %g7 is used as TLS base register, use #ignore
4514 for it instead of #scratch. */
4515 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4516 i == 7 ? "ignore" : "scratch");
4517 }
4518 if (i == 3) i = 5;
4519 }
4520 #endif
4521 }
4522
4523 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4524
4525 #if PROBE_INTERVAL > 4096
4526 #error Cannot use indexed addressing mode for stack probing
4527 #endif
4528
4529 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4530 inclusive. These are offsets from the current stack pointer.
4531
4532 Note that we don't use the REG+REG addressing mode for the probes because
4533 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4534 so the advantages of having a single code win here. */
4535
4536 static void
4537 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4538 {
4539 rtx g1 = gen_rtx_REG (Pmode, 1);
4540
4541 /* See if we have a constant small number of probes to generate. If so,
4542 that's the easy case. */
4543 if (size <= PROBE_INTERVAL)
4544 {
4545 emit_move_insn (g1, GEN_INT (first));
4546 emit_insn (gen_rtx_SET (VOIDmode, g1,
4547 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4548 emit_stack_probe (plus_constant (g1, -size));
4549 }
4550
4551 /* The run-time loop is made up of 10 insns in the generic case while the
4552 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4553 else if (size <= 5 * PROBE_INTERVAL)
4554 {
4555 HOST_WIDE_INT i;
4556
4557 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4558 emit_insn (gen_rtx_SET (VOIDmode, g1,
4559 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4560 emit_stack_probe (g1);
4561
4562 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4563 it exceeds SIZE. If only two probes are needed, this will not
4564 generate any code. Then probe at FIRST + SIZE. */
4565 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4566 {
4567 emit_insn (gen_rtx_SET (VOIDmode, g1,
4568 plus_constant (g1, -PROBE_INTERVAL)));
4569 emit_stack_probe (g1);
4570 }
4571
4572 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4573 }
4574
4575 /* Otherwise, do the same as above, but in a loop. Note that we must be
4576 extra careful with variables wrapping around because we might be at
4577 the very top (or the very bottom) of the address space and we have
4578 to be able to handle this case properly; in particular, we use an
4579 equality test for the loop condition. */
4580 else
4581 {
4582 HOST_WIDE_INT rounded_size;
4583 rtx g4 = gen_rtx_REG (Pmode, 4);
4584
4585 emit_move_insn (g1, GEN_INT (first));
4586
4587
4588 /* Step 1: round SIZE to the previous multiple of the interval. */
4589
4590 rounded_size = size & -PROBE_INTERVAL;
4591 emit_move_insn (g4, GEN_INT (rounded_size));
4592
4593
4594 /* Step 2: compute initial and final value of the loop counter. */
4595
4596 /* TEST_ADDR = SP + FIRST. */
4597 emit_insn (gen_rtx_SET (VOIDmode, g1,
4598 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4599
4600 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4601 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4602
4603
4604 /* Step 3: the loop
4605
4606 while (TEST_ADDR != LAST_ADDR)
4607 {
4608 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4609 probe at TEST_ADDR
4610 }
4611
4612 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4613 until it is equal to ROUNDED_SIZE. */
4614
4615 if (TARGET_64BIT)
4616 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4617 else
4618 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4619
4620
4621 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4622 that SIZE is equal to ROUNDED_SIZE. */
4623
4624 if (size != rounded_size)
4625 emit_stack_probe (plus_constant (g4, rounded_size - size));
4626 }
4627
4628 /* Make sure nothing is scheduled before we are done. */
4629 emit_insn (gen_blockage ());
4630 }
4631
4632 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4633 absolute addresses. */
4634
4635 const char *
4636 output_probe_stack_range (rtx reg1, rtx reg2)
4637 {
4638 static int labelno = 0;
4639 char loop_lab[32], end_lab[32];
4640 rtx xops[2];
4641
4642 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4643 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4644
4645 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4646
4647 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4648 xops[0] = reg1;
4649 xops[1] = reg2;
4650 output_asm_insn ("cmp\t%0, %1", xops);
4651 if (TARGET_ARCH64)
4652 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4653 else
4654 fputs ("\tbe\t", asm_out_file);
4655 assemble_name_raw (asm_out_file, end_lab);
4656 fputc ('\n', asm_out_file);
4657
4658 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4659 xops[1] = GEN_INT (-PROBE_INTERVAL);
4660 output_asm_insn (" add\t%0, %1, %0", xops);
4661
4662 /* Probe at TEST_ADDR and branch. */
4663 if (TARGET_ARCH64)
4664 fputs ("\tba,pt\t%xcc,", asm_out_file);
4665 else
4666 fputs ("\tba\t", asm_out_file);
4667 assemble_name_raw (asm_out_file, loop_lab);
4668 fputc ('\n', asm_out_file);
4669 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4670 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4671
4672 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4673
4674 return "";
4675 }
4676
4677 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4678 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4679 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4680 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4681 the action to be performed if it returns false. Return the new offset. */
4682
4683 typedef bool (*sorr_pred_t) (unsigned int, int);
4684 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4685
4686 static int
4687 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4688 int offset, int leaf_function, sorr_pred_t save_p,
4689 sorr_act_t action_true, sorr_act_t action_false)
4690 {
4691 unsigned int i;
4692 rtx mem, insn;
4693
4694 if (TARGET_ARCH64 && high <= 32)
4695 {
4696 int fp_offset = -1;
4697
4698 for (i = low; i < high; i++)
4699 {
4700 if (save_p (i, leaf_function))
4701 {
4702 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4703 if (action_true == SORR_SAVE)
4704 {
4705 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4706 RTX_FRAME_RELATED_P (insn) = 1;
4707 }
4708 else /* action_true == SORR_RESTORE */
4709 {
4710 /* The frame pointer must be restored last since its old
4711 value may be used as base address for the frame. This
4712 is problematic in 64-bit mode only because of the lack
4713 of double-word load instruction. */
4714 if (i == HARD_FRAME_POINTER_REGNUM)
4715 fp_offset = offset;
4716 else
4717 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4718 }
4719 offset += 8;
4720 }
4721 else if (action_false == SORR_ADVANCE)
4722 offset += 8;
4723 }
4724
4725 if (fp_offset >= 0)
4726 {
4727 mem = gen_frame_mem (DImode, plus_constant (base, fp_offset));
4728 emit_move_insn (hard_frame_pointer_rtx, mem);
4729 }
4730 }
4731 else
4732 {
4733 for (i = low; i < high; i += 2)
4734 {
4735 bool reg0 = save_p (i, leaf_function);
4736 bool reg1 = save_p (i + 1, leaf_function);
4737 enum machine_mode mode;
4738 int regno;
4739
4740 if (reg0 && reg1)
4741 {
4742 mode = SPARC_INT_REG_P (i) ? DImode : DFmode;
4743 regno = i;
4744 }
4745 else if (reg0)
4746 {
4747 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4748 regno = i;
4749 }
4750 else if (reg1)
4751 {
4752 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4753 regno = i + 1;
4754 offset += 4;
4755 }
4756 else
4757 {
4758 if (action_false == SORR_ADVANCE)
4759 offset += 8;
4760 continue;
4761 }
4762
4763 mem = gen_frame_mem (mode, plus_constant (base, offset));
4764 if (action_true == SORR_SAVE)
4765 {
4766 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4767 RTX_FRAME_RELATED_P (insn) = 1;
4768 if (mode == DImode)
4769 {
4770 rtx set1, set2;
4771 mem = gen_frame_mem (SImode, plus_constant (base, offset));
4772 set1 = gen_rtx_SET (VOIDmode, mem,
4773 gen_rtx_REG (SImode, regno));
4774 RTX_FRAME_RELATED_P (set1) = 1;
4775 mem
4776 = gen_frame_mem (SImode, plus_constant (base, offset + 4));
4777 set2 = gen_rtx_SET (VOIDmode, mem,
4778 gen_rtx_REG (SImode, regno + 1));
4779 RTX_FRAME_RELATED_P (set2) = 1;
4780 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4781 gen_rtx_PARALLEL (VOIDmode,
4782 gen_rtvec (2, set1, set2)));
4783 }
4784 }
4785 else /* action_true == SORR_RESTORE */
4786 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4787
4788 /* Always preserve double-word alignment. */
4789 offset = (offset + 8) & -8;
4790 }
4791 }
4792
4793 return offset;
4794 }
4795
4796 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4797
4798 static rtx
4799 emit_adjust_base_to_offset (rtx base, int offset)
4800 {
4801 /* ??? This might be optimized a little as %g1 might already have a
4802 value close enough that a single add insn will do. */
4803 /* ??? Although, all of this is probably only a temporary fix because
4804 if %g1 can hold a function result, then sparc_expand_epilogue will
4805 lose (the result will be clobbered). */
4806 rtx new_base = gen_rtx_REG (Pmode, 1);
4807 emit_move_insn (new_base, GEN_INT (offset));
4808 emit_insn (gen_rtx_SET (VOIDmode,
4809 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4810 return new_base;
4811 }
4812
4813 /* Emit code to save/restore call-saved global and FP registers. */
4814
4815 static void
4816 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4817 {
4818 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4819 {
4820 base = emit_adjust_base_to_offset (base, offset);
4821 offset = 0;
4822 }
4823
4824 offset
4825 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4826 save_global_or_fp_reg_p, action, SORR_NONE);
4827 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4828 save_global_or_fp_reg_p, action, SORR_NONE);
4829 }
4830
4831 /* Emit code to save/restore call-saved local and in registers. */
4832
4833 static void
4834 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4835 {
4836 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4837 {
4838 base = emit_adjust_base_to_offset (base, offset);
4839 offset = 0;
4840 }
4841
4842 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4843 save_local_or_in_reg_p, action, SORR_ADVANCE);
4844 }
4845
4846 /* Emit a window_save insn. */
4847
4848 static rtx
4849 emit_window_save (rtx increment)
4850 {
4851 rtx insn = emit_insn (gen_window_save (increment));
4852 RTX_FRAME_RELATED_P (insn) = 1;
4853
4854 /* The incoming return address (%o7) is saved in %i7. */
4855 add_reg_note (insn, REG_CFA_REGISTER,
4856 gen_rtx_SET (VOIDmode,
4857 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4858 gen_rtx_REG (Pmode,
4859 INCOMING_RETURN_ADDR_REGNUM)));
4860
4861 /* The window save event. */
4862 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4863
4864 /* The CFA is %fp, the hard frame pointer. */
4865 add_reg_note (insn, REG_CFA_DEF_CFA,
4866 plus_constant (hard_frame_pointer_rtx,
4867 INCOMING_FRAME_SP_OFFSET));
4868
4869 return insn;
4870 }
4871
4872 /* Generate an increment for the stack pointer. */
4873
4874 static rtx
4875 gen_stack_pointer_inc (rtx increment)
4876 {
4877 return gen_rtx_SET (VOIDmode,
4878 stack_pointer_rtx,
4879 gen_rtx_PLUS (Pmode,
4880 stack_pointer_rtx,
4881 increment));
4882 }
4883
4884 /* Generate a decrement for the stack pointer. */
4885
4886 static rtx
4887 gen_stack_pointer_dec (rtx decrement)
4888 {
4889 return gen_rtx_SET (VOIDmode,
4890 stack_pointer_rtx,
4891 gen_rtx_MINUS (Pmode,
4892 stack_pointer_rtx,
4893 decrement));
4894 }
4895
4896 /* Expand the function prologue. The prologue is responsible for reserving
4897 storage for the frame, saving the call-saved registers and loading the
4898 GOT register if needed. */
4899
4900 void
4901 sparc_expand_prologue (void)
4902 {
4903 HOST_WIDE_INT size;
4904 rtx insn;
4905
4906 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4907 on the final value of the flag means deferring the prologue/epilogue
4908 expansion until just before the second scheduling pass, which is too
4909 late to emit multiple epilogues or return insns.
4910
4911 Of course we are making the assumption that the value of the flag
4912 will not change between now and its final value. Of the three parts
4913 of the formula, only the last one can reasonably vary. Let's take a
4914 closer look, after assuming that the first two ones are set to true
4915 (otherwise the last value is effectively silenced).
4916
4917 If only_leaf_regs_used returns false, the global predicate will also
4918 be false so the actual frame size calculated below will be positive.
4919 As a consequence, the save_register_window insn will be emitted in
4920 the instruction stream; now this insn explicitly references %fp
4921 which is not a leaf register so only_leaf_regs_used will always
4922 return false subsequently.
4923
4924 If only_leaf_regs_used returns true, we hope that the subsequent
4925 optimization passes won't cause non-leaf registers to pop up. For
4926 example, the regrename pass has special provisions to not rename to
4927 non-leaf registers in a leaf function. */
4928 sparc_leaf_function_p
4929 = optimize > 0 && current_function_is_leaf && only_leaf_regs_used ();
4930
4931 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4932
4933 if (flag_stack_usage_info)
4934 current_function_static_stack_size = size;
4935
4936 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4937 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4938
4939 if (size == 0)
4940 ; /* do nothing. */
4941 else if (sparc_leaf_function_p)
4942 {
4943 rtx size_int_rtx = GEN_INT (-size);
4944
4945 if (size <= 4096)
4946 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4947 else if (size <= 8192)
4948 {
4949 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4950 /* %sp is still the CFA register. */
4951 RTX_FRAME_RELATED_P (insn) = 1;
4952 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4953 }
4954 else
4955 {
4956 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4957 emit_move_insn (size_rtx, size_int_rtx);
4958 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4959 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4960 gen_stack_pointer_inc (size_int_rtx));
4961 }
4962
4963 RTX_FRAME_RELATED_P (insn) = 1;
4964 }
4965 else
4966 {
4967 rtx size_int_rtx = GEN_INT (-size);
4968
4969 if (size <= 4096)
4970 emit_window_save (size_int_rtx);
4971 else if (size <= 8192)
4972 {
4973 emit_window_save (GEN_INT (-4096));
4974 /* %sp is not the CFA register anymore. */
4975 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4976 }
4977 else
4978 {
4979 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4980 emit_move_insn (size_rtx, size_int_rtx);
4981 emit_window_save (size_rtx);
4982 }
4983 }
4984
4985 if (sparc_leaf_function_p)
4986 {
4987 sparc_frame_base_reg = stack_pointer_rtx;
4988 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4989 }
4990 else
4991 {
4992 sparc_frame_base_reg = hard_frame_pointer_rtx;
4993 sparc_frame_base_offset = SPARC_STACK_BIAS;
4994 }
4995
4996 if (sparc_n_global_fp_regs > 0)
4997 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4998 sparc_frame_base_offset
4999 - sparc_apparent_frame_size,
5000 SORR_SAVE);
5001
5002 /* Load the GOT register if needed. */
5003 if (crtl->uses_pic_offset_table)
5004 load_got_register ();
5005
5006 /* Advertise that the data calculated just above are now valid. */
5007 sparc_prologue_data_valid_p = true;
5008 }
5009
5010 /* Expand the function prologue. The prologue is responsible for reserving
5011 storage for the frame, saving the call-saved registers and loading the
5012 GOT register if needed. */
5013
5014 void
5015 sparc_flat_expand_prologue (void)
5016 {
5017 HOST_WIDE_INT size;
5018 rtx insn;
5019
5020 sparc_leaf_function_p = optimize > 0 && current_function_is_leaf;
5021
5022 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5023
5024 if (flag_stack_usage_info)
5025 current_function_static_stack_size = size;
5026
5027 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
5028 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5029
5030 if (sparc_save_local_in_regs_p)
5031 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
5032 SORR_SAVE);
5033
5034 if (size == 0)
5035 ; /* do nothing. */
5036 else
5037 {
5038 rtx size_int_rtx, size_rtx;
5039
5040 size_rtx = size_int_rtx = GEN_INT (-size);
5041
5042 /* We establish the frame (i.e. decrement the stack pointer) first, even
5043 if we use a frame pointer, because we cannot clobber any call-saved
5044 registers, including the frame pointer, if we haven't created a new
5045 register save area, for the sake of compatibility with the ABI. */
5046 if (size <= 4096)
5047 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5048 else if (size <= 8192 && !frame_pointer_needed)
5049 {
5050 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5051 RTX_FRAME_RELATED_P (insn) = 1;
5052 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5053 }
5054 else
5055 {
5056 size_rtx = gen_rtx_REG (Pmode, 1);
5057 emit_move_insn (size_rtx, size_int_rtx);
5058 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5059 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5060 gen_stack_pointer_inc (size_int_rtx));
5061 }
5062 RTX_FRAME_RELATED_P (insn) = 1;
5063
5064 /* Ensure nothing is scheduled until after the frame is established. */
5065 emit_insn (gen_blockage ());
5066
5067 if (frame_pointer_needed)
5068 {
5069 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5070 gen_rtx_MINUS (Pmode,
5071 stack_pointer_rtx,
5072 size_rtx)));
5073 RTX_FRAME_RELATED_P (insn) = 1;
5074
5075 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5076 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5077 plus_constant (stack_pointer_rtx,
5078 size)));
5079 }
5080
5081 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5082 {
5083 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
5084 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
5085
5086 insn = emit_move_insn (i7, o7);
5087 RTX_FRAME_RELATED_P (insn) = 1;
5088
5089 add_reg_note (insn, REG_CFA_REGISTER,
5090 gen_rtx_SET (VOIDmode, i7, o7));
5091
5092 /* Prevent this instruction from ever being considered dead,
5093 even if this function has no epilogue. */
5094 emit_insn (gen_rtx_USE (VOIDmode, i7));
5095 }
5096 }
5097
5098 if (frame_pointer_needed)
5099 {
5100 sparc_frame_base_reg = hard_frame_pointer_rtx;
5101 sparc_frame_base_offset = SPARC_STACK_BIAS;
5102 }
5103 else
5104 {
5105 sparc_frame_base_reg = stack_pointer_rtx;
5106 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5107 }
5108
5109 if (sparc_n_global_fp_regs > 0)
5110 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5111 sparc_frame_base_offset
5112 - sparc_apparent_frame_size,
5113 SORR_SAVE);
5114
5115 /* Load the GOT register if needed. */
5116 if (crtl->uses_pic_offset_table)
5117 load_got_register ();
5118
5119 /* Advertise that the data calculated just above are now valid. */
5120 sparc_prologue_data_valid_p = true;
5121 }
5122
5123 /* This function generates the assembly code for function entry, which boils
5124 down to emitting the necessary .register directives. */
5125
5126 static void
5127 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5128 {
5129 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5130 if (!TARGET_FLAT)
5131 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
5132
5133 sparc_output_scratch_registers (file);
5134 }
5135
5136 /* Expand the function epilogue, either normal or part of a sibcall.
5137 We emit all the instructions except the return or the call. */
5138
5139 void
5140 sparc_expand_epilogue (bool for_eh)
5141 {
5142 HOST_WIDE_INT size = sparc_frame_size;
5143
5144 if (sparc_n_global_fp_regs > 0)
5145 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5146 sparc_frame_base_offset
5147 - sparc_apparent_frame_size,
5148 SORR_RESTORE);
5149
5150 if (size == 0 || for_eh)
5151 ; /* do nothing. */
5152 else if (sparc_leaf_function_p)
5153 {
5154 if (size <= 4096)
5155 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5156 else if (size <= 8192)
5157 {
5158 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5159 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5160 }
5161 else
5162 {
5163 rtx reg = gen_rtx_REG (Pmode, 1);
5164 emit_move_insn (reg, GEN_INT (-size));
5165 emit_insn (gen_stack_pointer_dec (reg));
5166 }
5167 }
5168 }
5169
5170 /* Expand the function epilogue, either normal or part of a sibcall.
5171 We emit all the instructions except the return or the call. */
5172
5173 void
5174 sparc_flat_expand_epilogue (bool for_eh)
5175 {
5176 HOST_WIDE_INT size = sparc_frame_size;
5177
5178 if (sparc_n_global_fp_regs > 0)
5179 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5180 sparc_frame_base_offset
5181 - sparc_apparent_frame_size,
5182 SORR_RESTORE);
5183
5184 /* If we have a frame pointer, we'll need both to restore it before the
5185 frame is destroyed and use its current value in destroying the frame.
5186 Since we don't have an atomic way to do that in the flat window model,
5187 we save the current value into a temporary register (%g1). */
5188 if (frame_pointer_needed && !for_eh)
5189 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5190
5191 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5192 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5193 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5194
5195 if (sparc_save_local_in_regs_p)
5196 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5197 sparc_frame_base_offset,
5198 SORR_RESTORE);
5199
5200 if (size == 0 || for_eh)
5201 ; /* do nothing. */
5202 else if (frame_pointer_needed)
5203 {
5204 /* Make sure the frame is destroyed after everything else is done. */
5205 emit_insn (gen_blockage ());
5206
5207 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5208 }
5209 else
5210 {
5211 /* Likewise. */
5212 emit_insn (gen_blockage ());
5213
5214 if (size <= 4096)
5215 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5216 else if (size <= 8192)
5217 {
5218 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5219 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5220 }
5221 else
5222 {
5223 rtx reg = gen_rtx_REG (Pmode, 1);
5224 emit_move_insn (reg, GEN_INT (-size));
5225 emit_insn (gen_stack_pointer_dec (reg));
5226 }
5227 }
5228 }
5229
5230 /* Return true if it is appropriate to emit `return' instructions in the
5231 body of a function. */
5232
5233 bool
5234 sparc_can_use_return_insn_p (void)
5235 {
5236 return sparc_prologue_data_valid_p
5237 && sparc_n_global_fp_regs == 0
5238 && TARGET_FLAT
5239 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
5240 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
5241 }
5242
5243 /* This function generates the assembly code for function exit. */
5244
5245 static void
5246 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5247 {
5248 /* If the last two instructions of a function are "call foo; dslot;"
5249 the return address might point to the first instruction in the next
5250 function and we have to output a dummy nop for the sake of sane
5251 backtraces in such cases. This is pointless for sibling calls since
5252 the return address is explicitly adjusted. */
5253
5254 rtx insn, last_real_insn;
5255
5256 insn = get_last_insn ();
5257
5258 last_real_insn = prev_real_insn (insn);
5259 if (last_real_insn
5260 && GET_CODE (last_real_insn) == INSN
5261 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5262 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5263
5264 if (last_real_insn
5265 && CALL_P (last_real_insn)
5266 && !SIBLING_CALL_P (last_real_insn))
5267 fputs("\tnop\n", file);
5268
5269 sparc_output_deferred_case_vectors ();
5270 }
5271
5272 /* Output a 'restore' instruction. */
5273
5274 static void
5275 output_restore (rtx pat)
5276 {
5277 rtx operands[3];
5278
5279 if (! pat)
5280 {
5281 fputs ("\t restore\n", asm_out_file);
5282 return;
5283 }
5284
5285 gcc_assert (GET_CODE (pat) == SET);
5286
5287 operands[0] = SET_DEST (pat);
5288 pat = SET_SRC (pat);
5289
5290 switch (GET_CODE (pat))
5291 {
5292 case PLUS:
5293 operands[1] = XEXP (pat, 0);
5294 operands[2] = XEXP (pat, 1);
5295 output_asm_insn (" restore %r1, %2, %Y0", operands);
5296 break;
5297 case LO_SUM:
5298 operands[1] = XEXP (pat, 0);
5299 operands[2] = XEXP (pat, 1);
5300 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5301 break;
5302 case ASHIFT:
5303 operands[1] = XEXP (pat, 0);
5304 gcc_assert (XEXP (pat, 1) == const1_rtx);
5305 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5306 break;
5307 default:
5308 operands[1] = pat;
5309 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5310 break;
5311 }
5312 }
5313
5314 /* Output a return. */
5315
5316 const char *
5317 output_return (rtx insn)
5318 {
5319 if (crtl->calls_eh_return)
5320 {
5321 /* If the function uses __builtin_eh_return, the eh_return
5322 machinery occupies the delay slot. */
5323 gcc_assert (!final_sequence);
5324
5325 if (flag_delayed_branch)
5326 {
5327 if (!TARGET_FLAT && TARGET_V9)
5328 fputs ("\treturn\t%i7+8\n", asm_out_file);
5329 else
5330 {
5331 if (!TARGET_FLAT)
5332 fputs ("\trestore\n", asm_out_file);
5333
5334 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5335 }
5336
5337 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5338 }
5339 else
5340 {
5341 if (!TARGET_FLAT)
5342 fputs ("\trestore\n", asm_out_file);
5343
5344 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5345 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5346 }
5347 }
5348 else if (sparc_leaf_function_p || TARGET_FLAT)
5349 {
5350 /* This is a leaf or flat function so we don't have to bother restoring
5351 the register window, which frees us from dealing with the convoluted
5352 semantics of restore/return. We simply output the jump to the
5353 return address and the insn in the delay slot (if any). */
5354
5355 return "jmp\t%%o7+%)%#";
5356 }
5357 else
5358 {
5359 /* This is a regular function so we have to restore the register window.
5360 We may have a pending insn for the delay slot, which will be either
5361 combined with the 'restore' instruction or put in the delay slot of
5362 the 'return' instruction. */
5363
5364 if (final_sequence)
5365 {
5366 rtx delay, pat;
5367
5368 delay = NEXT_INSN (insn);
5369 gcc_assert (delay);
5370
5371 pat = PATTERN (delay);
5372
5373 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5374 {
5375 epilogue_renumber (&pat, 0);
5376 return "return\t%%i7+%)%#";
5377 }
5378 else
5379 {
5380 output_asm_insn ("jmp\t%%i7+%)", NULL);
5381 output_restore (pat);
5382 PATTERN (delay) = gen_blockage ();
5383 INSN_CODE (delay) = -1;
5384 }
5385 }
5386 else
5387 {
5388 /* The delay slot is empty. */
5389 if (TARGET_V9)
5390 return "return\t%%i7+%)\n\t nop";
5391 else if (flag_delayed_branch)
5392 return "jmp\t%%i7+%)\n\t restore";
5393 else
5394 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5395 }
5396 }
5397
5398 return "";
5399 }
5400
5401 /* Output a sibling call. */
5402
5403 const char *
5404 output_sibcall (rtx insn, rtx call_operand)
5405 {
5406 rtx operands[1];
5407
5408 gcc_assert (flag_delayed_branch);
5409
5410 operands[0] = call_operand;
5411
5412 if (sparc_leaf_function_p || TARGET_FLAT)
5413 {
5414 /* This is a leaf or flat function so we don't have to bother restoring
5415 the register window. We simply output the jump to the function and
5416 the insn in the delay slot (if any). */
5417
5418 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5419
5420 if (final_sequence)
5421 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5422 operands);
5423 else
5424 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5425 it into branch if possible. */
5426 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5427 operands);
5428 }
5429 else
5430 {
5431 /* This is a regular function so we have to restore the register window.
5432 We may have a pending insn for the delay slot, which will be combined
5433 with the 'restore' instruction. */
5434
5435 output_asm_insn ("call\t%a0, 0", operands);
5436
5437 if (final_sequence)
5438 {
5439 rtx delay = NEXT_INSN (insn);
5440 gcc_assert (delay);
5441
5442 output_restore (PATTERN (delay));
5443
5444 PATTERN (delay) = gen_blockage ();
5445 INSN_CODE (delay) = -1;
5446 }
5447 else
5448 output_restore (NULL_RTX);
5449 }
5450
5451 return "";
5452 }
5453 \f
5454 /* Functions for handling argument passing.
5455
5456 For 32-bit, the first 6 args are normally in registers and the rest are
5457 pushed. Any arg that starts within the first 6 words is at least
5458 partially passed in a register unless its data type forbids.
5459
5460 For 64-bit, the argument registers are laid out as an array of 16 elements
5461 and arguments are added sequentially. The first 6 int args and up to the
5462 first 16 fp args (depending on size) are passed in regs.
5463
5464 Slot Stack Integral Float Float in structure Double Long Double
5465 ---- ----- -------- ----- ------------------ ------ -----------
5466 15 [SP+248] %f31 %f30,%f31 %d30
5467 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5468 13 [SP+232] %f27 %f26,%f27 %d26
5469 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5470 11 [SP+216] %f23 %f22,%f23 %d22
5471 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5472 9 [SP+200] %f19 %f18,%f19 %d18
5473 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5474 7 [SP+184] %f15 %f14,%f15 %d14
5475 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5476 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5477 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5478 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5479 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5480 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5481 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5482
5483 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5484
5485 Integral arguments are always passed as 64-bit quantities appropriately
5486 extended.
5487
5488 Passing of floating point values is handled as follows.
5489 If a prototype is in scope:
5490 If the value is in a named argument (i.e. not a stdarg function or a
5491 value not part of the `...') then the value is passed in the appropriate
5492 fp reg.
5493 If the value is part of the `...' and is passed in one of the first 6
5494 slots then the value is passed in the appropriate int reg.
5495 If the value is part of the `...' and is not passed in one of the first 6
5496 slots then the value is passed in memory.
5497 If a prototype is not in scope:
5498 If the value is one of the first 6 arguments the value is passed in the
5499 appropriate integer reg and the appropriate fp reg.
5500 If the value is not one of the first 6 arguments the value is passed in
5501 the appropriate fp reg and in memory.
5502
5503
5504 Summary of the calling conventions implemented by GCC on the SPARC:
5505
5506 32-bit ABI:
5507 size argument return value
5508
5509 small integer <4 int. reg. int. reg.
5510 word 4 int. reg. int. reg.
5511 double word 8 int. reg. int. reg.
5512
5513 _Complex small integer <8 int. reg. int. reg.
5514 _Complex word 8 int. reg. int. reg.
5515 _Complex double word 16 memory int. reg.
5516
5517 vector integer <=8 int. reg. FP reg.
5518 vector integer >8 memory memory
5519
5520 float 4 int. reg. FP reg.
5521 double 8 int. reg. FP reg.
5522 long double 16 memory memory
5523
5524 _Complex float 8 memory FP reg.
5525 _Complex double 16 memory FP reg.
5526 _Complex long double 32 memory FP reg.
5527
5528 vector float any memory memory
5529
5530 aggregate any memory memory
5531
5532
5533
5534 64-bit ABI:
5535 size argument return value
5536
5537 small integer <8 int. reg. int. reg.
5538 word 8 int. reg. int. reg.
5539 double word 16 int. reg. int. reg.
5540
5541 _Complex small integer <16 int. reg. int. reg.
5542 _Complex word 16 int. reg. int. reg.
5543 _Complex double word 32 memory int. reg.
5544
5545 vector integer <=16 FP reg. FP reg.
5546 vector integer 16<s<=32 memory FP reg.
5547 vector integer >32 memory memory
5548
5549 float 4 FP reg. FP reg.
5550 double 8 FP reg. FP reg.
5551 long double 16 FP reg. FP reg.
5552
5553 _Complex float 8 FP reg. FP reg.
5554 _Complex double 16 FP reg. FP reg.
5555 _Complex long double 32 memory FP reg.
5556
5557 vector float <=16 FP reg. FP reg.
5558 vector float 16<s<=32 memory FP reg.
5559 vector float >32 memory memory
5560
5561 aggregate <=16 reg. reg.
5562 aggregate 16<s<=32 memory reg.
5563 aggregate >32 memory memory
5564
5565
5566
5567 Note #1: complex floating-point types follow the extended SPARC ABIs as
5568 implemented by the Sun compiler.
5569
5570 Note #2: integral vector types follow the scalar floating-point types
5571 conventions to match what is implemented by the Sun VIS SDK.
5572
5573 Note #3: floating-point vector types follow the aggregate types
5574 conventions. */
5575
5576
5577 /* Maximum number of int regs for args. */
5578 #define SPARC_INT_ARG_MAX 6
5579 /* Maximum number of fp regs for args. */
5580 #define SPARC_FP_ARG_MAX 16
5581
5582 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5583
5584 /* Handle the INIT_CUMULATIVE_ARGS macro.
5585 Initialize a variable CUM of type CUMULATIVE_ARGS
5586 for a call to a function whose data type is FNTYPE.
5587 For a library call, FNTYPE is 0. */
5588
5589 void
5590 init_cumulative_args (struct sparc_args *cum, tree fntype,
5591 rtx libname ATTRIBUTE_UNUSED,
5592 tree fndecl ATTRIBUTE_UNUSED)
5593 {
5594 cum->words = 0;
5595 cum->prototype_p = fntype && prototype_p (fntype);
5596 cum->libcall_p = fntype == 0;
5597 }
5598
5599 /* Handle promotion of pointer and integer arguments. */
5600
5601 static enum machine_mode
5602 sparc_promote_function_mode (const_tree type,
5603 enum machine_mode mode,
5604 int *punsignedp,
5605 const_tree fntype ATTRIBUTE_UNUSED,
5606 int for_return ATTRIBUTE_UNUSED)
5607 {
5608 if (type != NULL_TREE && POINTER_TYPE_P (type))
5609 {
5610 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5611 return Pmode;
5612 }
5613
5614 /* Integral arguments are passed as full words, as per the ABI. */
5615 if (GET_MODE_CLASS (mode) == MODE_INT
5616 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5617 return word_mode;
5618
5619 return mode;
5620 }
5621
5622 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5623
5624 static bool
5625 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5626 {
5627 return TARGET_ARCH64 ? true : false;
5628 }
5629
5630 /* Scan the record type TYPE and return the following predicates:
5631 - INTREGS_P: the record contains at least one field or sub-field
5632 that is eligible for promotion in integer registers.
5633 - FP_REGS_P: the record contains at least one field or sub-field
5634 that is eligible for promotion in floating-point registers.
5635 - PACKED_P: the record contains at least one field that is packed.
5636
5637 Sub-fields are not taken into account for the PACKED_P predicate. */
5638
5639 static void
5640 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5641 int *packed_p)
5642 {
5643 tree field;
5644
5645 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5646 {
5647 if (TREE_CODE (field) == FIELD_DECL)
5648 {
5649 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5650 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5651 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5652 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5653 && TARGET_FPU)
5654 *fpregs_p = 1;
5655 else
5656 *intregs_p = 1;
5657
5658 if (packed_p && DECL_PACKED (field))
5659 *packed_p = 1;
5660 }
5661 }
5662 }
5663
5664 /* Compute the slot number to pass an argument in.
5665 Return the slot number or -1 if passing on the stack.
5666
5667 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5668 the preceding args and about the function being called.
5669 MODE is the argument's machine mode.
5670 TYPE is the data type of the argument (as a tree).
5671 This is null for libcalls where that information may
5672 not be available.
5673 NAMED is nonzero if this argument is a named parameter
5674 (otherwise it is an extra parameter matching an ellipsis).
5675 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5676 *PREGNO records the register number to use if scalar type.
5677 *PPADDING records the amount of padding needed in words. */
5678
5679 static int
5680 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5681 const_tree type, bool named, bool incoming_p,
5682 int *pregno, int *ppadding)
5683 {
5684 int regbase = (incoming_p
5685 ? SPARC_INCOMING_INT_ARG_FIRST
5686 : SPARC_OUTGOING_INT_ARG_FIRST);
5687 int slotno = cum->words;
5688 enum mode_class mclass;
5689 int regno;
5690
5691 *ppadding = 0;
5692
5693 if (type && TREE_ADDRESSABLE (type))
5694 return -1;
5695
5696 if (TARGET_ARCH32
5697 && mode == BLKmode
5698 && type
5699 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5700 return -1;
5701
5702 /* For SPARC64, objects requiring 16-byte alignment get it. */
5703 if (TARGET_ARCH64
5704 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5705 && (slotno & 1) != 0)
5706 slotno++, *ppadding = 1;
5707
5708 mclass = GET_MODE_CLASS (mode);
5709 if (type && TREE_CODE (type) == VECTOR_TYPE)
5710 {
5711 /* Vector types deserve special treatment because they are
5712 polymorphic wrt their mode, depending upon whether VIS
5713 instructions are enabled. */
5714 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5715 {
5716 /* The SPARC port defines no floating-point vector modes. */
5717 gcc_assert (mode == BLKmode);
5718 }
5719 else
5720 {
5721 /* Integral vector types should either have a vector
5722 mode or an integral mode, because we are guaranteed
5723 by pass_by_reference that their size is not greater
5724 than 16 bytes and TImode is 16-byte wide. */
5725 gcc_assert (mode != BLKmode);
5726
5727 /* Vector integers are handled like floats according to
5728 the Sun VIS SDK. */
5729 mclass = MODE_FLOAT;
5730 }
5731 }
5732
5733 switch (mclass)
5734 {
5735 case MODE_FLOAT:
5736 case MODE_COMPLEX_FLOAT:
5737 case MODE_VECTOR_INT:
5738 if (TARGET_ARCH64 && TARGET_FPU && named)
5739 {
5740 if (slotno >= SPARC_FP_ARG_MAX)
5741 return -1;
5742 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5743 /* Arguments filling only one single FP register are
5744 right-justified in the outer double FP register. */
5745 if (GET_MODE_SIZE (mode) <= 4)
5746 regno++;
5747 break;
5748 }
5749 /* fallthrough */
5750
5751 case MODE_INT:
5752 case MODE_COMPLEX_INT:
5753 if (slotno >= SPARC_INT_ARG_MAX)
5754 return -1;
5755 regno = regbase + slotno;
5756 break;
5757
5758 case MODE_RANDOM:
5759 if (mode == VOIDmode)
5760 /* MODE is VOIDmode when generating the actual call. */
5761 return -1;
5762
5763 gcc_assert (mode == BLKmode);
5764
5765 if (TARGET_ARCH32
5766 || !type
5767 || (TREE_CODE (type) != VECTOR_TYPE
5768 && TREE_CODE (type) != RECORD_TYPE))
5769 {
5770 if (slotno >= SPARC_INT_ARG_MAX)
5771 return -1;
5772 regno = regbase + slotno;
5773 }
5774 else /* TARGET_ARCH64 && type */
5775 {
5776 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5777
5778 /* First see what kinds of registers we would need. */
5779 if (TREE_CODE (type) == VECTOR_TYPE)
5780 fpregs_p = 1;
5781 else
5782 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5783
5784 /* The ABI obviously doesn't specify how packed structures
5785 are passed. These are defined to be passed in int regs
5786 if possible, otherwise memory. */
5787 if (packed_p || !named)
5788 fpregs_p = 0, intregs_p = 1;
5789
5790 /* If all arg slots are filled, then must pass on stack. */
5791 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5792 return -1;
5793
5794 /* If there are only int args and all int arg slots are filled,
5795 then must pass on stack. */
5796 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5797 return -1;
5798
5799 /* Note that even if all int arg slots are filled, fp members may
5800 still be passed in regs if such regs are available.
5801 *PREGNO isn't set because there may be more than one, it's up
5802 to the caller to compute them. */
5803 return slotno;
5804 }
5805 break;
5806
5807 default :
5808 gcc_unreachable ();
5809 }
5810
5811 *pregno = regno;
5812 return slotno;
5813 }
5814
5815 /* Handle recursive register counting for structure field layout. */
5816
5817 struct function_arg_record_value_parms
5818 {
5819 rtx ret; /* return expression being built. */
5820 int slotno; /* slot number of the argument. */
5821 int named; /* whether the argument is named. */
5822 int regbase; /* regno of the base register. */
5823 int stack; /* 1 if part of the argument is on the stack. */
5824 int intoffset; /* offset of the first pending integer field. */
5825 unsigned int nregs; /* number of words passed in registers. */
5826 };
5827
5828 static void function_arg_record_value_3
5829 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5830 static void function_arg_record_value_2
5831 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5832 static void function_arg_record_value_1
5833 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5834 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5835 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5836
5837 /* A subroutine of function_arg_record_value. Traverse the structure
5838 recursively and determine how many registers will be required. */
5839
5840 static void
5841 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5842 struct function_arg_record_value_parms *parms,
5843 bool packed_p)
5844 {
5845 tree field;
5846
5847 /* We need to compute how many registers are needed so we can
5848 allocate the PARALLEL but before we can do that we need to know
5849 whether there are any packed fields. The ABI obviously doesn't
5850 specify how structures are passed in this case, so they are
5851 defined to be passed in int regs if possible, otherwise memory,
5852 regardless of whether there are fp values present. */
5853
5854 if (! packed_p)
5855 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5856 {
5857 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5858 {
5859 packed_p = true;
5860 break;
5861 }
5862 }
5863
5864 /* Compute how many registers we need. */
5865 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5866 {
5867 if (TREE_CODE (field) == FIELD_DECL)
5868 {
5869 HOST_WIDE_INT bitpos = startbitpos;
5870
5871 if (DECL_SIZE (field) != 0)
5872 {
5873 if (integer_zerop (DECL_SIZE (field)))
5874 continue;
5875
5876 if (host_integerp (bit_position (field), 1))
5877 bitpos += int_bit_position (field);
5878 }
5879
5880 /* ??? FIXME: else assume zero offset. */
5881
5882 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5883 function_arg_record_value_1 (TREE_TYPE (field),
5884 bitpos,
5885 parms,
5886 packed_p);
5887 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5888 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5889 && TARGET_FPU
5890 && parms->named
5891 && ! packed_p)
5892 {
5893 if (parms->intoffset != -1)
5894 {
5895 unsigned int startbit, endbit;
5896 int intslots, this_slotno;
5897
5898 startbit = parms->intoffset & -BITS_PER_WORD;
5899 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5900
5901 intslots = (endbit - startbit) / BITS_PER_WORD;
5902 this_slotno = parms->slotno + parms->intoffset
5903 / BITS_PER_WORD;
5904
5905 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5906 {
5907 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5908 /* We need to pass this field on the stack. */
5909 parms->stack = 1;
5910 }
5911
5912 parms->nregs += intslots;
5913 parms->intoffset = -1;
5914 }
5915
5916 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5917 If it wasn't true we wouldn't be here. */
5918 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5919 && DECL_MODE (field) == BLKmode)
5920 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5921 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5922 parms->nregs += 2;
5923 else
5924 parms->nregs += 1;
5925 }
5926 else
5927 {
5928 if (parms->intoffset == -1)
5929 parms->intoffset = bitpos;
5930 }
5931 }
5932 }
5933 }
5934
5935 /* A subroutine of function_arg_record_value. Assign the bits of the
5936 structure between parms->intoffset and bitpos to integer registers. */
5937
5938 static void
5939 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5940 struct function_arg_record_value_parms *parms)
5941 {
5942 enum machine_mode mode;
5943 unsigned int regno;
5944 unsigned int startbit, endbit;
5945 int this_slotno, intslots, intoffset;
5946 rtx reg;
5947
5948 if (parms->intoffset == -1)
5949 return;
5950
5951 intoffset = parms->intoffset;
5952 parms->intoffset = -1;
5953
5954 startbit = intoffset & -BITS_PER_WORD;
5955 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5956 intslots = (endbit - startbit) / BITS_PER_WORD;
5957 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5958
5959 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5960 if (intslots <= 0)
5961 return;
5962
5963 /* If this is the trailing part of a word, only load that much into
5964 the register. Otherwise load the whole register. Note that in
5965 the latter case we may pick up unwanted bits. It's not a problem
5966 at the moment but may wish to revisit. */
5967
5968 if (intoffset % BITS_PER_WORD != 0)
5969 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5970 MODE_INT);
5971 else
5972 mode = word_mode;
5973
5974 intoffset /= BITS_PER_UNIT;
5975 do
5976 {
5977 regno = parms->regbase + this_slotno;
5978 reg = gen_rtx_REG (mode, regno);
5979 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5980 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5981
5982 this_slotno += 1;
5983 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5984 mode = word_mode;
5985 parms->nregs += 1;
5986 intslots -= 1;
5987 }
5988 while (intslots > 0);
5989 }
5990
5991 /* A subroutine of function_arg_record_value. Traverse the structure
5992 recursively and assign bits to floating point registers. Track which
5993 bits in between need integer registers; invoke function_arg_record_value_3
5994 to make that happen. */
5995
5996 static void
5997 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5998 struct function_arg_record_value_parms *parms,
5999 bool packed_p)
6000 {
6001 tree field;
6002
6003 if (! packed_p)
6004 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6005 {
6006 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6007 {
6008 packed_p = true;
6009 break;
6010 }
6011 }
6012
6013 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6014 {
6015 if (TREE_CODE (field) == FIELD_DECL)
6016 {
6017 HOST_WIDE_INT bitpos = startbitpos;
6018
6019 if (DECL_SIZE (field) != 0)
6020 {
6021 if (integer_zerop (DECL_SIZE (field)))
6022 continue;
6023
6024 if (host_integerp (bit_position (field), 1))
6025 bitpos += int_bit_position (field);
6026 }
6027
6028 /* ??? FIXME: else assume zero offset. */
6029
6030 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
6031 function_arg_record_value_2 (TREE_TYPE (field),
6032 bitpos,
6033 parms,
6034 packed_p);
6035 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
6036 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6037 && TARGET_FPU
6038 && parms->named
6039 && ! packed_p)
6040 {
6041 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
6042 int regno, nregs, pos;
6043 enum machine_mode mode = DECL_MODE (field);
6044 rtx reg;
6045
6046 function_arg_record_value_3 (bitpos, parms);
6047
6048 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6049 && mode == BLKmode)
6050 {
6051 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6052 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6053 }
6054 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6055 {
6056 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6057 nregs = 2;
6058 }
6059 else
6060 nregs = 1;
6061
6062 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
6063 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
6064 regno++;
6065 reg = gen_rtx_REG (mode, regno);
6066 pos = bitpos / BITS_PER_UNIT;
6067 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6068 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6069 parms->nregs += 1;
6070 while (--nregs > 0)
6071 {
6072 regno += GET_MODE_SIZE (mode) / 4;
6073 reg = gen_rtx_REG (mode, regno);
6074 pos += GET_MODE_SIZE (mode);
6075 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6076 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6077 parms->nregs += 1;
6078 }
6079 }
6080 else
6081 {
6082 if (parms->intoffset == -1)
6083 parms->intoffset = bitpos;
6084 }
6085 }
6086 }
6087 }
6088
6089 /* Used by function_arg and sparc_function_value_1 to implement the complex
6090 conventions of the 64-bit ABI for passing and returning structures.
6091 Return an expression valid as a return value for the FUNCTION_ARG
6092 and TARGET_FUNCTION_VALUE.
6093
6094 TYPE is the data type of the argument (as a tree).
6095 This is null for libcalls where that information may
6096 not be available.
6097 MODE is the argument's machine mode.
6098 SLOTNO is the index number of the argument's slot in the parameter array.
6099 NAMED is nonzero if this argument is a named parameter
6100 (otherwise it is an extra parameter matching an ellipsis).
6101 REGBASE is the regno of the base register for the parameter array. */
6102
6103 static rtx
6104 function_arg_record_value (const_tree type, enum machine_mode mode,
6105 int slotno, int named, int regbase)
6106 {
6107 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6108 struct function_arg_record_value_parms parms;
6109 unsigned int nregs;
6110
6111 parms.ret = NULL_RTX;
6112 parms.slotno = slotno;
6113 parms.named = named;
6114 parms.regbase = regbase;
6115 parms.stack = 0;
6116
6117 /* Compute how many registers we need. */
6118 parms.nregs = 0;
6119 parms.intoffset = 0;
6120 function_arg_record_value_1 (type, 0, &parms, false);
6121
6122 /* Take into account pending integer fields. */
6123 if (parms.intoffset != -1)
6124 {
6125 unsigned int startbit, endbit;
6126 int intslots, this_slotno;
6127
6128 startbit = parms.intoffset & -BITS_PER_WORD;
6129 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6130 intslots = (endbit - startbit) / BITS_PER_WORD;
6131 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
6132
6133 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
6134 {
6135 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
6136 /* We need to pass this field on the stack. */
6137 parms.stack = 1;
6138 }
6139
6140 parms.nregs += intslots;
6141 }
6142 nregs = parms.nregs;
6143
6144 /* Allocate the vector and handle some annoying special cases. */
6145 if (nregs == 0)
6146 {
6147 /* ??? Empty structure has no value? Duh? */
6148 if (typesize <= 0)
6149 {
6150 /* Though there's nothing really to store, return a word register
6151 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6152 leads to breakage due to the fact that there are zero bytes to
6153 load. */
6154 return gen_rtx_REG (mode, regbase);
6155 }
6156 else
6157 {
6158 /* ??? C++ has structures with no fields, and yet a size. Give up
6159 for now and pass everything back in integer registers. */
6160 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6161 }
6162 if (nregs + slotno > SPARC_INT_ARG_MAX)
6163 nregs = SPARC_INT_ARG_MAX - slotno;
6164 }
6165 gcc_assert (nregs != 0);
6166
6167 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
6168
6169 /* If at least one field must be passed on the stack, generate
6170 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6171 also be passed on the stack. We can't do much better because the
6172 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6173 of structures for which the fields passed exclusively in registers
6174 are not at the beginning of the structure. */
6175 if (parms.stack)
6176 XVECEXP (parms.ret, 0, 0)
6177 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6178
6179 /* Fill in the entries. */
6180 parms.nregs = 0;
6181 parms.intoffset = 0;
6182 function_arg_record_value_2 (type, 0, &parms, false);
6183 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
6184
6185 gcc_assert (parms.nregs == nregs);
6186
6187 return parms.ret;
6188 }
6189
6190 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6191 of the 64-bit ABI for passing and returning unions.
6192 Return an expression valid as a return value for the FUNCTION_ARG
6193 and TARGET_FUNCTION_VALUE.
6194
6195 SIZE is the size in bytes of the union.
6196 MODE is the argument's machine mode.
6197 REGNO is the hard register the union will be passed in. */
6198
6199 static rtx
6200 function_arg_union_value (int size, enum machine_mode mode, int slotno,
6201 int regno)
6202 {
6203 int nwords = ROUND_ADVANCE (size), i;
6204 rtx regs;
6205
6206 /* See comment in previous function for empty structures. */
6207 if (nwords == 0)
6208 return gen_rtx_REG (mode, regno);
6209
6210 if (slotno == SPARC_INT_ARG_MAX - 1)
6211 nwords = 1;
6212
6213 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6214
6215 for (i = 0; i < nwords; i++)
6216 {
6217 /* Unions are passed left-justified. */
6218 XVECEXP (regs, 0, i)
6219 = gen_rtx_EXPR_LIST (VOIDmode,
6220 gen_rtx_REG (word_mode, regno),
6221 GEN_INT (UNITS_PER_WORD * i));
6222 regno++;
6223 }
6224
6225 return regs;
6226 }
6227
6228 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6229 for passing and returning large (BLKmode) vectors.
6230 Return an expression valid as a return value for the FUNCTION_ARG
6231 and TARGET_FUNCTION_VALUE.
6232
6233 SIZE is the size in bytes of the vector (at least 8 bytes).
6234 REGNO is the FP hard register the vector will be passed in. */
6235
6236 static rtx
6237 function_arg_vector_value (int size, int regno)
6238 {
6239 int i, nregs = size / 8;
6240 rtx regs;
6241
6242 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6243
6244 for (i = 0; i < nregs; i++)
6245 {
6246 XVECEXP (regs, 0, i)
6247 = gen_rtx_EXPR_LIST (VOIDmode,
6248 gen_rtx_REG (DImode, regno + 2*i),
6249 GEN_INT (i*8));
6250 }
6251
6252 return regs;
6253 }
6254
6255 /* Determine where to put an argument to a function.
6256 Value is zero to push the argument on the stack,
6257 or a hard register in which to store the argument.
6258
6259 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6260 the preceding args and about the function being called.
6261 MODE is the argument's machine mode.
6262 TYPE is the data type of the argument (as a tree).
6263 This is null for libcalls where that information may
6264 not be available.
6265 NAMED is true if this argument is a named parameter
6266 (otherwise it is an extra parameter matching an ellipsis).
6267 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6268 TARGET_FUNCTION_INCOMING_ARG. */
6269
6270 static rtx
6271 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6272 const_tree type, bool named, bool incoming_p)
6273 {
6274 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6275
6276 int regbase = (incoming_p
6277 ? SPARC_INCOMING_INT_ARG_FIRST
6278 : SPARC_OUTGOING_INT_ARG_FIRST);
6279 int slotno, regno, padding;
6280 enum mode_class mclass = GET_MODE_CLASS (mode);
6281
6282 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6283 &regno, &padding);
6284 if (slotno == -1)
6285 return 0;
6286
6287 /* Vector types deserve special treatment because they are polymorphic wrt
6288 their mode, depending upon whether VIS instructions are enabled. */
6289 if (type && TREE_CODE (type) == VECTOR_TYPE)
6290 {
6291 HOST_WIDE_INT size = int_size_in_bytes (type);
6292 gcc_assert ((TARGET_ARCH32 && size <= 8)
6293 || (TARGET_ARCH64 && size <= 16));
6294
6295 if (mode == BLKmode)
6296 return function_arg_vector_value (size,
6297 SPARC_FP_ARG_FIRST + 2*slotno);
6298 else
6299 mclass = MODE_FLOAT;
6300 }
6301
6302 if (TARGET_ARCH32)
6303 return gen_rtx_REG (mode, regno);
6304
6305 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6306 and are promoted to registers if possible. */
6307 if (type && TREE_CODE (type) == RECORD_TYPE)
6308 {
6309 HOST_WIDE_INT size = int_size_in_bytes (type);
6310 gcc_assert (size <= 16);
6311
6312 return function_arg_record_value (type, mode, slotno, named, regbase);
6313 }
6314
6315 /* Unions up to 16 bytes in size are passed in integer registers. */
6316 else if (type && TREE_CODE (type) == UNION_TYPE)
6317 {
6318 HOST_WIDE_INT size = int_size_in_bytes (type);
6319 gcc_assert (size <= 16);
6320
6321 return function_arg_union_value (size, mode, slotno, regno);
6322 }
6323
6324 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6325 but also have the slot allocated for them.
6326 If no prototype is in scope fp values in register slots get passed
6327 in two places, either fp regs and int regs or fp regs and memory. */
6328 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6329 && SPARC_FP_REG_P (regno))
6330 {
6331 rtx reg = gen_rtx_REG (mode, regno);
6332 if (cum->prototype_p || cum->libcall_p)
6333 {
6334 /* "* 2" because fp reg numbers are recorded in 4 byte
6335 quantities. */
6336 #if 0
6337 /* ??? This will cause the value to be passed in the fp reg and
6338 in the stack. When a prototype exists we want to pass the
6339 value in the reg but reserve space on the stack. That's an
6340 optimization, and is deferred [for a bit]. */
6341 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6342 return gen_rtx_PARALLEL (mode,
6343 gen_rtvec (2,
6344 gen_rtx_EXPR_LIST (VOIDmode,
6345 NULL_RTX, const0_rtx),
6346 gen_rtx_EXPR_LIST (VOIDmode,
6347 reg, const0_rtx)));
6348 else
6349 #else
6350 /* ??? It seems that passing back a register even when past
6351 the area declared by REG_PARM_STACK_SPACE will allocate
6352 space appropriately, and will not copy the data onto the
6353 stack, exactly as we desire.
6354
6355 This is due to locate_and_pad_parm being called in
6356 expand_call whenever reg_parm_stack_space > 0, which
6357 while beneficial to our example here, would seem to be
6358 in error from what had been intended. Ho hum... -- r~ */
6359 #endif
6360 return reg;
6361 }
6362 else
6363 {
6364 rtx v0, v1;
6365
6366 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6367 {
6368 int intreg;
6369
6370 /* On incoming, we don't need to know that the value
6371 is passed in %f0 and %i0, and it confuses other parts
6372 causing needless spillage even on the simplest cases. */
6373 if (incoming_p)
6374 return reg;
6375
6376 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6377 + (regno - SPARC_FP_ARG_FIRST) / 2);
6378
6379 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6380 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6381 const0_rtx);
6382 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6383 }
6384 else
6385 {
6386 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6387 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6388 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6389 }
6390 }
6391 }
6392
6393 /* All other aggregate types are passed in an integer register in a mode
6394 corresponding to the size of the type. */
6395 else if (type && AGGREGATE_TYPE_P (type))
6396 {
6397 HOST_WIDE_INT size = int_size_in_bytes (type);
6398 gcc_assert (size <= 16);
6399
6400 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6401 }
6402
6403 return gen_rtx_REG (mode, regno);
6404 }
6405
6406 /* Handle the TARGET_FUNCTION_ARG target hook. */
6407
6408 static rtx
6409 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6410 const_tree type, bool named)
6411 {
6412 return sparc_function_arg_1 (cum, mode, type, named, false);
6413 }
6414
6415 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6416
6417 static rtx
6418 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6419 const_tree type, bool named)
6420 {
6421 return sparc_function_arg_1 (cum, mode, type, named, true);
6422 }
6423
6424 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6425
6426 static unsigned int
6427 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6428 {
6429 return ((TARGET_ARCH64
6430 && (GET_MODE_ALIGNMENT (mode) == 128
6431 || (type && TYPE_ALIGN (type) == 128)))
6432 ? 128
6433 : PARM_BOUNDARY);
6434 }
6435
6436 /* For an arg passed partly in registers and partly in memory,
6437 this is the number of bytes of registers used.
6438 For args passed entirely in registers or entirely in memory, zero.
6439
6440 Any arg that starts in the first 6 regs but won't entirely fit in them
6441 needs partial registers on v8. On v9, structures with integer
6442 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6443 values that begin in the last fp reg [where "last fp reg" varies with the
6444 mode] will be split between that reg and memory. */
6445
6446 static int
6447 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6448 tree type, bool named)
6449 {
6450 int slotno, regno, padding;
6451
6452 /* We pass false for incoming_p here, it doesn't matter. */
6453 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6454 false, &regno, &padding);
6455
6456 if (slotno == -1)
6457 return 0;
6458
6459 if (TARGET_ARCH32)
6460 {
6461 if ((slotno + (mode == BLKmode
6462 ? ROUND_ADVANCE (int_size_in_bytes (type))
6463 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6464 > SPARC_INT_ARG_MAX)
6465 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6466 }
6467 else
6468 {
6469 /* We are guaranteed by pass_by_reference that the size of the
6470 argument is not greater than 16 bytes, so we only need to return
6471 one word if the argument is partially passed in registers. */
6472
6473 if (type && AGGREGATE_TYPE_P (type))
6474 {
6475 int size = int_size_in_bytes (type);
6476
6477 if (size > UNITS_PER_WORD
6478 && slotno == SPARC_INT_ARG_MAX - 1)
6479 return UNITS_PER_WORD;
6480 }
6481 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6482 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6483 && ! (TARGET_FPU && named)))
6484 {
6485 /* The complex types are passed as packed types. */
6486 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6487 && slotno == SPARC_INT_ARG_MAX - 1)
6488 return UNITS_PER_WORD;
6489 }
6490 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6491 {
6492 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6493 > SPARC_FP_ARG_MAX)
6494 return UNITS_PER_WORD;
6495 }
6496 }
6497
6498 return 0;
6499 }
6500
6501 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6502 Specify whether to pass the argument by reference. */
6503
6504 static bool
6505 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6506 enum machine_mode mode, const_tree type,
6507 bool named ATTRIBUTE_UNUSED)
6508 {
6509 if (TARGET_ARCH32)
6510 /* Original SPARC 32-bit ABI says that structures and unions,
6511 and quad-precision floats are passed by reference. For Pascal,
6512 also pass arrays by reference. All other base types are passed
6513 in registers.
6514
6515 Extended ABI (as implemented by the Sun compiler) says that all
6516 complex floats are passed by reference. Pass complex integers
6517 in registers up to 8 bytes. More generally, enforce the 2-word
6518 cap for passing arguments in registers.
6519
6520 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6521 integers are passed like floats of the same size, that is in
6522 registers up to 8 bytes. Pass all vector floats by reference
6523 like structure and unions. */
6524 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6525 || mode == SCmode
6526 /* Catch CDImode, TFmode, DCmode and TCmode. */
6527 || GET_MODE_SIZE (mode) > 8
6528 || (type
6529 && TREE_CODE (type) == VECTOR_TYPE
6530 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6531 else
6532 /* Original SPARC 64-bit ABI says that structures and unions
6533 smaller than 16 bytes are passed in registers, as well as
6534 all other base types.
6535
6536 Extended ABI (as implemented by the Sun compiler) says that
6537 complex floats are passed in registers up to 16 bytes. Pass
6538 all complex integers in registers up to 16 bytes. More generally,
6539 enforce the 2-word cap for passing arguments in registers.
6540
6541 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6542 integers are passed like floats of the same size, that is in
6543 registers (up to 16 bytes). Pass all vector floats like structure
6544 and unions. */
6545 return ((type
6546 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6547 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6548 /* Catch CTImode and TCmode. */
6549 || GET_MODE_SIZE (mode) > 16);
6550 }
6551
6552 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6553 Update the data in CUM to advance over an argument
6554 of mode MODE and data type TYPE.
6555 TYPE is null for libcalls where that information may not be available. */
6556
6557 static void
6558 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6559 const_tree type, bool named)
6560 {
6561 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6562 int regno, padding;
6563
6564 /* We pass false for incoming_p here, it doesn't matter. */
6565 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
6566
6567 /* If argument requires leading padding, add it. */
6568 cum->words += padding;
6569
6570 if (TARGET_ARCH32)
6571 {
6572 cum->words += (mode != BLKmode
6573 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6574 : ROUND_ADVANCE (int_size_in_bytes (type)));
6575 }
6576 else
6577 {
6578 if (type && AGGREGATE_TYPE_P (type))
6579 {
6580 int size = int_size_in_bytes (type);
6581
6582 if (size <= 8)
6583 ++cum->words;
6584 else if (size <= 16)
6585 cum->words += 2;
6586 else /* passed by reference */
6587 ++cum->words;
6588 }
6589 else
6590 {
6591 cum->words += (mode != BLKmode
6592 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6593 : ROUND_ADVANCE (int_size_in_bytes (type)));
6594 }
6595 }
6596 }
6597
6598 /* Handle the FUNCTION_ARG_PADDING macro.
6599 For the 64 bit ABI structs are always stored left shifted in their
6600 argument slot. */
6601
6602 enum direction
6603 function_arg_padding (enum machine_mode mode, const_tree type)
6604 {
6605 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6606 return upward;
6607
6608 /* Fall back to the default. */
6609 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6610 }
6611
6612 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6613 Specify whether to return the return value in memory. */
6614
6615 static bool
6616 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6617 {
6618 if (TARGET_ARCH32)
6619 /* Original SPARC 32-bit ABI says that structures and unions,
6620 and quad-precision floats are returned in memory. All other
6621 base types are returned in registers.
6622
6623 Extended ABI (as implemented by the Sun compiler) says that
6624 all complex floats are returned in registers (8 FP registers
6625 at most for '_Complex long double'). Return all complex integers
6626 in registers (4 at most for '_Complex long long').
6627
6628 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6629 integers are returned like floats of the same size, that is in
6630 registers up to 8 bytes and in memory otherwise. Return all
6631 vector floats in memory like structure and unions; note that
6632 they always have BLKmode like the latter. */
6633 return (TYPE_MODE (type) == BLKmode
6634 || TYPE_MODE (type) == TFmode
6635 || (TREE_CODE (type) == VECTOR_TYPE
6636 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6637 else
6638 /* Original SPARC 64-bit ABI says that structures and unions
6639 smaller than 32 bytes are returned in registers, as well as
6640 all other base types.
6641
6642 Extended ABI (as implemented by the Sun compiler) says that all
6643 complex floats are returned in registers (8 FP registers at most
6644 for '_Complex long double'). Return all complex integers in
6645 registers (4 at most for '_Complex TItype').
6646
6647 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6648 integers are returned like floats of the same size, that is in
6649 registers. Return all vector floats like structure and unions;
6650 note that they always have BLKmode like the latter. */
6651 return (TYPE_MODE (type) == BLKmode
6652 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6653 }
6654
6655 /* Handle the TARGET_STRUCT_VALUE target hook.
6656 Return where to find the structure return value address. */
6657
6658 static rtx
6659 sparc_struct_value_rtx (tree fndecl, int incoming)
6660 {
6661 if (TARGET_ARCH64)
6662 return 0;
6663 else
6664 {
6665 rtx mem;
6666
6667 if (incoming)
6668 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6669 STRUCT_VALUE_OFFSET));
6670 else
6671 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6672 STRUCT_VALUE_OFFSET));
6673
6674 /* Only follow the SPARC ABI for fixed-size structure returns.
6675 Variable size structure returns are handled per the normal
6676 procedures in GCC. This is enabled by -mstd-struct-return */
6677 if (incoming == 2
6678 && sparc_std_struct_return
6679 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6680 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6681 {
6682 /* We must check and adjust the return address, as it is
6683 optional as to whether the return object is really
6684 provided. */
6685 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6686 rtx scratch = gen_reg_rtx (SImode);
6687 rtx endlab = gen_label_rtx ();
6688
6689 /* Calculate the return object size */
6690 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6691 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6692 /* Construct a temporary return value */
6693 rtx temp_val
6694 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6695
6696 /* Implement SPARC 32-bit psABI callee return struct checking:
6697
6698 Fetch the instruction where we will return to and see if
6699 it's an unimp instruction (the most significant 10 bits
6700 will be zero). */
6701 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6702 plus_constant (ret_reg, 8)));
6703 /* Assume the size is valid and pre-adjust */
6704 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6705 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6706 0, endlab);
6707 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6708 /* Write the address of the memory pointed to by temp_val into
6709 the memory pointed to by mem */
6710 emit_move_insn (mem, XEXP (temp_val, 0));
6711 emit_label (endlab);
6712 }
6713
6714 return mem;
6715 }
6716 }
6717
6718 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6719 For v9, function return values are subject to the same rules as arguments,
6720 except that up to 32 bytes may be returned in registers. */
6721
6722 static rtx
6723 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6724 bool outgoing)
6725 {
6726 /* Beware that the two values are swapped here wrt function_arg. */
6727 int regbase = (outgoing
6728 ? SPARC_INCOMING_INT_ARG_FIRST
6729 : SPARC_OUTGOING_INT_ARG_FIRST);
6730 enum mode_class mclass = GET_MODE_CLASS (mode);
6731 int regno;
6732
6733 /* Vector types deserve special treatment because they are polymorphic wrt
6734 their mode, depending upon whether VIS instructions are enabled. */
6735 if (type && TREE_CODE (type) == VECTOR_TYPE)
6736 {
6737 HOST_WIDE_INT size = int_size_in_bytes (type);
6738 gcc_assert ((TARGET_ARCH32 && size <= 8)
6739 || (TARGET_ARCH64 && size <= 32));
6740
6741 if (mode == BLKmode)
6742 return function_arg_vector_value (size,
6743 SPARC_FP_ARG_FIRST);
6744 else
6745 mclass = MODE_FLOAT;
6746 }
6747
6748 if (TARGET_ARCH64 && type)
6749 {
6750 /* Structures up to 32 bytes in size are returned in registers. */
6751 if (TREE_CODE (type) == RECORD_TYPE)
6752 {
6753 HOST_WIDE_INT size = int_size_in_bytes (type);
6754 gcc_assert (size <= 32);
6755
6756 return function_arg_record_value (type, mode, 0, 1, regbase);
6757 }
6758
6759 /* Unions up to 32 bytes in size are returned in integer registers. */
6760 else if (TREE_CODE (type) == UNION_TYPE)
6761 {
6762 HOST_WIDE_INT size = int_size_in_bytes (type);
6763 gcc_assert (size <= 32);
6764
6765 return function_arg_union_value (size, mode, 0, regbase);
6766 }
6767
6768 /* Objects that require it are returned in FP registers. */
6769 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6770 ;
6771
6772 /* All other aggregate types are returned in an integer register in a
6773 mode corresponding to the size of the type. */
6774 else if (AGGREGATE_TYPE_P (type))
6775 {
6776 /* All other aggregate types are passed in an integer register
6777 in a mode corresponding to the size of the type. */
6778 HOST_WIDE_INT size = int_size_in_bytes (type);
6779 gcc_assert (size <= 32);
6780
6781 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6782
6783 /* ??? We probably should have made the same ABI change in
6784 3.4.0 as the one we made for unions. The latter was
6785 required by the SCD though, while the former is not
6786 specified, so we favored compatibility and efficiency.
6787
6788 Now we're stuck for aggregates larger than 16 bytes,
6789 because OImode vanished in the meantime. Let's not
6790 try to be unduly clever, and simply follow the ABI
6791 for unions in that case. */
6792 if (mode == BLKmode)
6793 return function_arg_union_value (size, mode, 0, regbase);
6794 else
6795 mclass = MODE_INT;
6796 }
6797
6798 /* We should only have pointer and integer types at this point. This
6799 must match sparc_promote_function_mode. */
6800 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6801 mode = word_mode;
6802 }
6803
6804 /* We should only have pointer and integer types at this point. This must
6805 match sparc_promote_function_mode. */
6806 else if (TARGET_ARCH32
6807 && mclass == MODE_INT
6808 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6809 mode = word_mode;
6810
6811 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6812 regno = SPARC_FP_ARG_FIRST;
6813 else
6814 regno = regbase;
6815
6816 return gen_rtx_REG (mode, regno);
6817 }
6818
6819 /* Handle TARGET_FUNCTION_VALUE.
6820 On the SPARC, the value is found in the first "output" register, but the
6821 called function leaves it in the first "input" register. */
6822
6823 static rtx
6824 sparc_function_value (const_tree valtype,
6825 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6826 bool outgoing)
6827 {
6828 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6829 }
6830
6831 /* Handle TARGET_LIBCALL_VALUE. */
6832
6833 static rtx
6834 sparc_libcall_value (enum machine_mode mode,
6835 const_rtx fun ATTRIBUTE_UNUSED)
6836 {
6837 return sparc_function_value_1 (NULL_TREE, mode, false);
6838 }
6839
6840 /* Handle FUNCTION_VALUE_REGNO_P.
6841 On the SPARC, the first "output" reg is used for integer values, and the
6842 first floating point register is used for floating point values. */
6843
6844 static bool
6845 sparc_function_value_regno_p (const unsigned int regno)
6846 {
6847 return (regno == 8 || regno == 32);
6848 }
6849
6850 /* Do what is necessary for `va_start'. We look at the current function
6851 to determine if stdarg or varargs is used and return the address of
6852 the first unnamed parameter. */
6853
6854 static rtx
6855 sparc_builtin_saveregs (void)
6856 {
6857 int first_reg = crtl->args.info.words;
6858 rtx address;
6859 int regno;
6860
6861 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6862 emit_move_insn (gen_rtx_MEM (word_mode,
6863 gen_rtx_PLUS (Pmode,
6864 frame_pointer_rtx,
6865 GEN_INT (FIRST_PARM_OFFSET (0)
6866 + (UNITS_PER_WORD
6867 * regno)))),
6868 gen_rtx_REG (word_mode,
6869 SPARC_INCOMING_INT_ARG_FIRST + regno));
6870
6871 address = gen_rtx_PLUS (Pmode,
6872 frame_pointer_rtx,
6873 GEN_INT (FIRST_PARM_OFFSET (0)
6874 + UNITS_PER_WORD * first_reg));
6875
6876 return address;
6877 }
6878
6879 /* Implement `va_start' for stdarg. */
6880
6881 static void
6882 sparc_va_start (tree valist, rtx nextarg)
6883 {
6884 nextarg = expand_builtin_saveregs ();
6885 std_expand_builtin_va_start (valist, nextarg);
6886 }
6887
6888 /* Implement `va_arg' for stdarg. */
6889
6890 static tree
6891 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6892 gimple_seq *post_p)
6893 {
6894 HOST_WIDE_INT size, rsize, align;
6895 tree addr, incr;
6896 bool indirect;
6897 tree ptrtype = build_pointer_type (type);
6898
6899 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6900 {
6901 indirect = true;
6902 size = rsize = UNITS_PER_WORD;
6903 align = 0;
6904 }
6905 else
6906 {
6907 indirect = false;
6908 size = int_size_in_bytes (type);
6909 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6910 align = 0;
6911
6912 if (TARGET_ARCH64)
6913 {
6914 /* For SPARC64, objects requiring 16-byte alignment get it. */
6915 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6916 align = 2 * UNITS_PER_WORD;
6917
6918 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6919 are left-justified in their slots. */
6920 if (AGGREGATE_TYPE_P (type))
6921 {
6922 if (size == 0)
6923 size = rsize = UNITS_PER_WORD;
6924 else
6925 size = rsize;
6926 }
6927 }
6928 }
6929
6930 incr = valist;
6931 if (align)
6932 {
6933 incr = fold_build_pointer_plus_hwi (incr, align - 1);
6934 incr = fold_convert (sizetype, incr);
6935 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6936 size_int (-align));
6937 incr = fold_convert (ptr_type_node, incr);
6938 }
6939
6940 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6941 addr = incr;
6942
6943 if (BYTES_BIG_ENDIAN && size < rsize)
6944 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
6945
6946 if (indirect)
6947 {
6948 addr = fold_convert (build_pointer_type (ptrtype), addr);
6949 addr = build_va_arg_indirect_ref (addr);
6950 }
6951
6952 /* If the address isn't aligned properly for the type, we need a temporary.
6953 FIXME: This is inefficient, usually we can do this in registers. */
6954 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6955 {
6956 tree tmp = create_tmp_var (type, "va_arg_tmp");
6957 tree dest_addr = build_fold_addr_expr (tmp);
6958 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
6959 3, dest_addr, addr, size_int (rsize));
6960 TREE_ADDRESSABLE (tmp) = 1;
6961 gimplify_and_add (copy, pre_p);
6962 addr = dest_addr;
6963 }
6964
6965 else
6966 addr = fold_convert (ptrtype, addr);
6967
6968 incr = fold_build_pointer_plus_hwi (incr, rsize);
6969 gimplify_assign (valist, incr, post_p);
6970
6971 return build_va_arg_indirect_ref (addr);
6972 }
6973 \f
6974 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6975 Specify whether the vector mode is supported by the hardware. */
6976
6977 static bool
6978 sparc_vector_mode_supported_p (enum machine_mode mode)
6979 {
6980 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6981 }
6982 \f
6983 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6984
6985 static enum machine_mode
6986 sparc_preferred_simd_mode (enum machine_mode mode)
6987 {
6988 if (TARGET_VIS)
6989 switch (mode)
6990 {
6991 case SImode:
6992 return V2SImode;
6993 case HImode:
6994 return V4HImode;
6995 case QImode:
6996 return V8QImode;
6997
6998 default:;
6999 }
7000
7001 return word_mode;
7002 }
7003 \f
7004 /* Return the string to output an unconditional branch to LABEL, which is
7005 the operand number of the label.
7006
7007 DEST is the destination insn (i.e. the label), INSN is the source. */
7008
7009 const char *
7010 output_ubranch (rtx dest, int label, rtx insn)
7011 {
7012 static char string[64];
7013 bool v9_form = false;
7014 char *p;
7015
7016 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
7017 {
7018 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7019 - INSN_ADDRESSES (INSN_UID (insn)));
7020 /* Leave some instructions for "slop". */
7021 if (delta >= -260000 && delta < 260000)
7022 v9_form = true;
7023 }
7024
7025 if (v9_form)
7026 strcpy (string, "ba%*,pt\t%%xcc, ");
7027 else
7028 strcpy (string, "b%*\t");
7029
7030 p = strchr (string, '\0');
7031 *p++ = '%';
7032 *p++ = 'l';
7033 *p++ = '0' + label;
7034 *p++ = '%';
7035 *p++ = '(';
7036 *p = '\0';
7037
7038 return string;
7039 }
7040
7041 /* Return the string to output a conditional branch to LABEL, which is
7042 the operand number of the label. OP is the conditional expression.
7043 XEXP (OP, 0) is assumed to be a condition code register (integer or
7044 floating point) and its mode specifies what kind of comparison we made.
7045
7046 DEST is the destination insn (i.e. the label), INSN is the source.
7047
7048 REVERSED is nonzero if we should reverse the sense of the comparison.
7049
7050 ANNUL is nonzero if we should generate an annulling branch. */
7051
7052 const char *
7053 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
7054 rtx insn)
7055 {
7056 static char string[64];
7057 enum rtx_code code = GET_CODE (op);
7058 rtx cc_reg = XEXP (op, 0);
7059 enum machine_mode mode = GET_MODE (cc_reg);
7060 const char *labelno, *branch;
7061 int spaces = 8, far;
7062 char *p;
7063
7064 /* v9 branches are limited to +-1MB. If it is too far away,
7065 change
7066
7067 bne,pt %xcc, .LC30
7068
7069 to
7070
7071 be,pn %xcc, .+12
7072 nop
7073 ba .LC30
7074
7075 and
7076
7077 fbne,a,pn %fcc2, .LC29
7078
7079 to
7080
7081 fbe,pt %fcc2, .+16
7082 nop
7083 ba .LC29 */
7084
7085 far = TARGET_V9 && (get_attr_length (insn) >= 3);
7086 if (reversed ^ far)
7087 {
7088 /* Reversal of FP compares takes care -- an ordered compare
7089 becomes an unordered compare and vice versa. */
7090 if (mode == CCFPmode || mode == CCFPEmode)
7091 code = reverse_condition_maybe_unordered (code);
7092 else
7093 code = reverse_condition (code);
7094 }
7095
7096 /* Start by writing the branch condition. */
7097 if (mode == CCFPmode || mode == CCFPEmode)
7098 {
7099 switch (code)
7100 {
7101 case NE:
7102 branch = "fbne";
7103 break;
7104 case EQ:
7105 branch = "fbe";
7106 break;
7107 case GE:
7108 branch = "fbge";
7109 break;
7110 case GT:
7111 branch = "fbg";
7112 break;
7113 case LE:
7114 branch = "fble";
7115 break;
7116 case LT:
7117 branch = "fbl";
7118 break;
7119 case UNORDERED:
7120 branch = "fbu";
7121 break;
7122 case ORDERED:
7123 branch = "fbo";
7124 break;
7125 case UNGT:
7126 branch = "fbug";
7127 break;
7128 case UNLT:
7129 branch = "fbul";
7130 break;
7131 case UNEQ:
7132 branch = "fbue";
7133 break;
7134 case UNGE:
7135 branch = "fbuge";
7136 break;
7137 case UNLE:
7138 branch = "fbule";
7139 break;
7140 case LTGT:
7141 branch = "fblg";
7142 break;
7143
7144 default:
7145 gcc_unreachable ();
7146 }
7147
7148 /* ??? !v9: FP branches cannot be preceded by another floating point
7149 insn. Because there is currently no concept of pre-delay slots,
7150 we can fix this only by always emitting a nop before a floating
7151 point branch. */
7152
7153 string[0] = '\0';
7154 if (! TARGET_V9)
7155 strcpy (string, "nop\n\t");
7156 strcat (string, branch);
7157 }
7158 else
7159 {
7160 switch (code)
7161 {
7162 case NE:
7163 branch = "bne";
7164 break;
7165 case EQ:
7166 branch = "be";
7167 break;
7168 case GE:
7169 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7170 branch = "bpos";
7171 else
7172 branch = "bge";
7173 break;
7174 case GT:
7175 branch = "bg";
7176 break;
7177 case LE:
7178 branch = "ble";
7179 break;
7180 case LT:
7181 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7182 branch = "bneg";
7183 else
7184 branch = "bl";
7185 break;
7186 case GEU:
7187 branch = "bgeu";
7188 break;
7189 case GTU:
7190 branch = "bgu";
7191 break;
7192 case LEU:
7193 branch = "bleu";
7194 break;
7195 case LTU:
7196 branch = "blu";
7197 break;
7198
7199 default:
7200 gcc_unreachable ();
7201 }
7202 strcpy (string, branch);
7203 }
7204 spaces -= strlen (branch);
7205 p = strchr (string, '\0');
7206
7207 /* Now add the annulling, the label, and a possible noop. */
7208 if (annul && ! far)
7209 {
7210 strcpy (p, ",a");
7211 p += 2;
7212 spaces -= 2;
7213 }
7214
7215 if (TARGET_V9)
7216 {
7217 rtx note;
7218 int v8 = 0;
7219
7220 if (! far && insn && INSN_ADDRESSES_SET_P ())
7221 {
7222 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7223 - INSN_ADDRESSES (INSN_UID (insn)));
7224 /* Leave some instructions for "slop". */
7225 if (delta < -260000 || delta >= 260000)
7226 v8 = 1;
7227 }
7228
7229 if (mode == CCFPmode || mode == CCFPEmode)
7230 {
7231 static char v9_fcc_labelno[] = "%%fccX, ";
7232 /* Set the char indicating the number of the fcc reg to use. */
7233 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7234 labelno = v9_fcc_labelno;
7235 if (v8)
7236 {
7237 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7238 labelno = "";
7239 }
7240 }
7241 else if (mode == CCXmode || mode == CCX_NOOVmode)
7242 {
7243 labelno = "%%xcc, ";
7244 gcc_assert (! v8);
7245 }
7246 else
7247 {
7248 labelno = "%%icc, ";
7249 if (v8)
7250 labelno = "";
7251 }
7252
7253 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7254 {
7255 strcpy (p,
7256 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7257 ? ",pt" : ",pn");
7258 p += 3;
7259 spaces -= 3;
7260 }
7261 }
7262 else
7263 labelno = "";
7264
7265 if (spaces > 0)
7266 *p++ = '\t';
7267 else
7268 *p++ = ' ';
7269 strcpy (p, labelno);
7270 p = strchr (p, '\0');
7271 if (far)
7272 {
7273 strcpy (p, ".+12\n\t nop\n\tb\t");
7274 /* Skip the next insn if requested or
7275 if we know that it will be a nop. */
7276 if (annul || ! final_sequence)
7277 p[3] = '6';
7278 p += 14;
7279 }
7280 *p++ = '%';
7281 *p++ = 'l';
7282 *p++ = label + '0';
7283 *p++ = '%';
7284 *p++ = '#';
7285 *p = '\0';
7286
7287 return string;
7288 }
7289
7290 /* Emit a library call comparison between floating point X and Y.
7291 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7292 Return the new operator to be used in the comparison sequence.
7293
7294 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7295 values as arguments instead of the TFmode registers themselves,
7296 that's why we cannot call emit_float_lib_cmp. */
7297
7298 rtx
7299 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7300 {
7301 const char *qpfunc;
7302 rtx slot0, slot1, result, tem, tem2, libfunc;
7303 enum machine_mode mode;
7304 enum rtx_code new_comparison;
7305
7306 switch (comparison)
7307 {
7308 case EQ:
7309 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7310 break;
7311
7312 case NE:
7313 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7314 break;
7315
7316 case GT:
7317 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7318 break;
7319
7320 case GE:
7321 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7322 break;
7323
7324 case LT:
7325 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7326 break;
7327
7328 case LE:
7329 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7330 break;
7331
7332 case ORDERED:
7333 case UNORDERED:
7334 case UNGT:
7335 case UNLT:
7336 case UNEQ:
7337 case UNGE:
7338 case UNLE:
7339 case LTGT:
7340 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7341 break;
7342
7343 default:
7344 gcc_unreachable ();
7345 }
7346
7347 if (TARGET_ARCH64)
7348 {
7349 if (MEM_P (x))
7350 slot0 = x;
7351 else
7352 {
7353 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7354 emit_move_insn (slot0, x);
7355 }
7356
7357 if (MEM_P (y))
7358 slot1 = y;
7359 else
7360 {
7361 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7362 emit_move_insn (slot1, y);
7363 }
7364
7365 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7366 emit_library_call (libfunc, LCT_NORMAL,
7367 DImode, 2,
7368 XEXP (slot0, 0), Pmode,
7369 XEXP (slot1, 0), Pmode);
7370 mode = DImode;
7371 }
7372 else
7373 {
7374 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7375 emit_library_call (libfunc, LCT_NORMAL,
7376 SImode, 2,
7377 x, TFmode, y, TFmode);
7378 mode = SImode;
7379 }
7380
7381
7382 /* Immediately move the result of the libcall into a pseudo
7383 register so reload doesn't clobber the value if it needs
7384 the return register for a spill reg. */
7385 result = gen_reg_rtx (mode);
7386 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7387
7388 switch (comparison)
7389 {
7390 default:
7391 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7392 case ORDERED:
7393 case UNORDERED:
7394 new_comparison = (comparison == UNORDERED ? EQ : NE);
7395 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7396 case UNGT:
7397 case UNGE:
7398 new_comparison = (comparison == UNGT ? GT : NE);
7399 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7400 case UNLE:
7401 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7402 case UNLT:
7403 tem = gen_reg_rtx (mode);
7404 if (TARGET_ARCH32)
7405 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7406 else
7407 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7408 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7409 case UNEQ:
7410 case LTGT:
7411 tem = gen_reg_rtx (mode);
7412 if (TARGET_ARCH32)
7413 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7414 else
7415 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7416 tem2 = gen_reg_rtx (mode);
7417 if (TARGET_ARCH32)
7418 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7419 else
7420 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7421 new_comparison = (comparison == UNEQ ? EQ : NE);
7422 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7423 }
7424
7425 gcc_unreachable ();
7426 }
7427
7428 /* Generate an unsigned DImode to FP conversion. This is the same code
7429 optabs would emit if we didn't have TFmode patterns. */
7430
7431 void
7432 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7433 {
7434 rtx neglab, donelab, i0, i1, f0, in, out;
7435
7436 out = operands[0];
7437 in = force_reg (DImode, operands[1]);
7438 neglab = gen_label_rtx ();
7439 donelab = gen_label_rtx ();
7440 i0 = gen_reg_rtx (DImode);
7441 i1 = gen_reg_rtx (DImode);
7442 f0 = gen_reg_rtx (mode);
7443
7444 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7445
7446 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7447 emit_jump_insn (gen_jump (donelab));
7448 emit_barrier ();
7449
7450 emit_label (neglab);
7451
7452 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7453 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7454 emit_insn (gen_iordi3 (i0, i0, i1));
7455 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7456 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7457
7458 emit_label (donelab);
7459 }
7460
7461 /* Generate an FP to unsigned DImode conversion. This is the same code
7462 optabs would emit if we didn't have TFmode patterns. */
7463
7464 void
7465 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7466 {
7467 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7468
7469 out = operands[0];
7470 in = force_reg (mode, operands[1]);
7471 neglab = gen_label_rtx ();
7472 donelab = gen_label_rtx ();
7473 i0 = gen_reg_rtx (DImode);
7474 i1 = gen_reg_rtx (DImode);
7475 limit = gen_reg_rtx (mode);
7476 f0 = gen_reg_rtx (mode);
7477
7478 emit_move_insn (limit,
7479 CONST_DOUBLE_FROM_REAL_VALUE (
7480 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7481 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7482
7483 emit_insn (gen_rtx_SET (VOIDmode,
7484 out,
7485 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7486 emit_jump_insn (gen_jump (donelab));
7487 emit_barrier ();
7488
7489 emit_label (neglab);
7490
7491 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7492 emit_insn (gen_rtx_SET (VOIDmode,
7493 i0,
7494 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7495 emit_insn (gen_movdi (i1, const1_rtx));
7496 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7497 emit_insn (gen_xordi3 (out, i0, i1));
7498
7499 emit_label (donelab);
7500 }
7501
7502 /* Return the string to output a conditional branch to LABEL, testing
7503 register REG. LABEL is the operand number of the label; REG is the
7504 operand number of the reg. OP is the conditional expression. The mode
7505 of REG says what kind of comparison we made.
7506
7507 DEST is the destination insn (i.e. the label), INSN is the source.
7508
7509 REVERSED is nonzero if we should reverse the sense of the comparison.
7510
7511 ANNUL is nonzero if we should generate an annulling branch. */
7512
7513 const char *
7514 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7515 int annul, rtx insn)
7516 {
7517 static char string[64];
7518 enum rtx_code code = GET_CODE (op);
7519 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7520 rtx note;
7521 int far;
7522 char *p;
7523
7524 /* branch on register are limited to +-128KB. If it is too far away,
7525 change
7526
7527 brnz,pt %g1, .LC30
7528
7529 to
7530
7531 brz,pn %g1, .+12
7532 nop
7533 ba,pt %xcc, .LC30
7534
7535 and
7536
7537 brgez,a,pn %o1, .LC29
7538
7539 to
7540
7541 brlz,pt %o1, .+16
7542 nop
7543 ba,pt %xcc, .LC29 */
7544
7545 far = get_attr_length (insn) >= 3;
7546
7547 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7548 if (reversed ^ far)
7549 code = reverse_condition (code);
7550
7551 /* Only 64 bit versions of these instructions exist. */
7552 gcc_assert (mode == DImode);
7553
7554 /* Start by writing the branch condition. */
7555
7556 switch (code)
7557 {
7558 case NE:
7559 strcpy (string, "brnz");
7560 break;
7561
7562 case EQ:
7563 strcpy (string, "brz");
7564 break;
7565
7566 case GE:
7567 strcpy (string, "brgez");
7568 break;
7569
7570 case LT:
7571 strcpy (string, "brlz");
7572 break;
7573
7574 case LE:
7575 strcpy (string, "brlez");
7576 break;
7577
7578 case GT:
7579 strcpy (string, "brgz");
7580 break;
7581
7582 default:
7583 gcc_unreachable ();
7584 }
7585
7586 p = strchr (string, '\0');
7587
7588 /* Now add the annulling, reg, label, and nop. */
7589 if (annul && ! far)
7590 {
7591 strcpy (p, ",a");
7592 p += 2;
7593 }
7594
7595 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7596 {
7597 strcpy (p,
7598 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7599 ? ",pt" : ",pn");
7600 p += 3;
7601 }
7602
7603 *p = p < string + 8 ? '\t' : ' ';
7604 p++;
7605 *p++ = '%';
7606 *p++ = '0' + reg;
7607 *p++ = ',';
7608 *p++ = ' ';
7609 if (far)
7610 {
7611 int veryfar = 1, delta;
7612
7613 if (INSN_ADDRESSES_SET_P ())
7614 {
7615 delta = (INSN_ADDRESSES (INSN_UID (dest))
7616 - INSN_ADDRESSES (INSN_UID (insn)));
7617 /* Leave some instructions for "slop". */
7618 if (delta >= -260000 && delta < 260000)
7619 veryfar = 0;
7620 }
7621
7622 strcpy (p, ".+12\n\t nop\n\t");
7623 /* Skip the next insn if requested or
7624 if we know that it will be a nop. */
7625 if (annul || ! final_sequence)
7626 p[3] = '6';
7627 p += 12;
7628 if (veryfar)
7629 {
7630 strcpy (p, "b\t");
7631 p += 2;
7632 }
7633 else
7634 {
7635 strcpy (p, "ba,pt\t%%xcc, ");
7636 p += 13;
7637 }
7638 }
7639 *p++ = '%';
7640 *p++ = 'l';
7641 *p++ = '0' + label;
7642 *p++ = '%';
7643 *p++ = '#';
7644 *p = '\0';
7645
7646 return string;
7647 }
7648
7649 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7650 Such instructions cannot be used in the delay slot of return insn on v9.
7651 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7652 */
7653
7654 static int
7655 epilogue_renumber (register rtx *where, int test)
7656 {
7657 register const char *fmt;
7658 register int i;
7659 register enum rtx_code code;
7660
7661 if (*where == 0)
7662 return 0;
7663
7664 code = GET_CODE (*where);
7665
7666 switch (code)
7667 {
7668 case REG:
7669 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7670 return 1;
7671 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7672 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7673 case SCRATCH:
7674 case CC0:
7675 case PC:
7676 case CONST_INT:
7677 case CONST_DOUBLE:
7678 return 0;
7679
7680 /* Do not replace the frame pointer with the stack pointer because
7681 it can cause the delayed instruction to load below the stack.
7682 This occurs when instructions like:
7683
7684 (set (reg/i:SI 24 %i0)
7685 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7686 (const_int -20 [0xffffffec])) 0))
7687
7688 are in the return delayed slot. */
7689 case PLUS:
7690 if (GET_CODE (XEXP (*where, 0)) == REG
7691 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7692 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7693 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7694 return 1;
7695 break;
7696
7697 case MEM:
7698 if (SPARC_STACK_BIAS
7699 && GET_CODE (XEXP (*where, 0)) == REG
7700 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7701 return 1;
7702 break;
7703
7704 default:
7705 break;
7706 }
7707
7708 fmt = GET_RTX_FORMAT (code);
7709
7710 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7711 {
7712 if (fmt[i] == 'E')
7713 {
7714 register int j;
7715 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7716 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7717 return 1;
7718 }
7719 else if (fmt[i] == 'e'
7720 && epilogue_renumber (&(XEXP (*where, i)), test))
7721 return 1;
7722 }
7723 return 0;
7724 }
7725 \f
7726 /* Leaf functions and non-leaf functions have different needs. */
7727
7728 static const int
7729 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7730
7731 static const int
7732 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7733
7734 static const int *const reg_alloc_orders[] = {
7735 reg_leaf_alloc_order,
7736 reg_nonleaf_alloc_order};
7737
7738 void
7739 order_regs_for_local_alloc (void)
7740 {
7741 static int last_order_nonleaf = 1;
7742
7743 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7744 {
7745 last_order_nonleaf = !last_order_nonleaf;
7746 memcpy ((char *) reg_alloc_order,
7747 (const char *) reg_alloc_orders[last_order_nonleaf],
7748 FIRST_PSEUDO_REGISTER * sizeof (int));
7749 }
7750 }
7751 \f
7752 /* Return 1 if REG and MEM are legitimate enough to allow the various
7753 mem<-->reg splits to be run. */
7754
7755 int
7756 sparc_splitdi_legitimate (rtx reg, rtx mem)
7757 {
7758 /* Punt if we are here by mistake. */
7759 gcc_assert (reload_completed);
7760
7761 /* We must have an offsettable memory reference. */
7762 if (! offsettable_memref_p (mem))
7763 return 0;
7764
7765 /* If we have legitimate args for ldd/std, we do not want
7766 the split to happen. */
7767 if ((REGNO (reg) % 2) == 0
7768 && mem_min_alignment (mem, 8))
7769 return 0;
7770
7771 /* Success. */
7772 return 1;
7773 }
7774
7775 /* Like sparc_splitdi_legitimate but for REG <--> REG moves. */
7776
7777 int
7778 sparc_split_regreg_legitimate (rtx reg1, rtx reg2)
7779 {
7780 int regno1, regno2;
7781
7782 if (GET_CODE (reg1) == SUBREG)
7783 reg1 = SUBREG_REG (reg1);
7784 if (GET_CODE (reg1) != REG)
7785 return 0;
7786 regno1 = REGNO (reg1);
7787
7788 if (GET_CODE (reg2) == SUBREG)
7789 reg2 = SUBREG_REG (reg2);
7790 if (GET_CODE (reg2) != REG)
7791 return 0;
7792 regno2 = REGNO (reg2);
7793
7794 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
7795 return 1;
7796
7797 if (TARGET_VIS3)
7798 {
7799 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
7800 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
7801 return 1;
7802 }
7803
7804 return 0;
7805 }
7806
7807 /* Return 1 if x and y are some kind of REG and they refer to
7808 different hard registers. This test is guaranteed to be
7809 run after reload. */
7810
7811 int
7812 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7813 {
7814 if (GET_CODE (x) != REG)
7815 return 0;
7816 if (GET_CODE (y) != REG)
7817 return 0;
7818 if (REGNO (x) == REGNO (y))
7819 return 0;
7820 return 1;
7821 }
7822
7823 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7824 This makes them candidates for using ldd and std insns.
7825
7826 Note reg1 and reg2 *must* be hard registers. */
7827
7828 int
7829 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7830 {
7831 /* We might have been passed a SUBREG. */
7832 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7833 return 0;
7834
7835 if (REGNO (reg1) % 2 != 0)
7836 return 0;
7837
7838 /* Integer ldd is deprecated in SPARC V9 */
7839 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
7840 return 0;
7841
7842 return (REGNO (reg1) == REGNO (reg2) - 1);
7843 }
7844
7845 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7846 an ldd or std insn.
7847
7848 This can only happen when addr1 and addr2, the addresses in mem1
7849 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7850 addr1 must also be aligned on a 64-bit boundary.
7851
7852 Also iff dependent_reg_rtx is not null it should not be used to
7853 compute the address for mem1, i.e. we cannot optimize a sequence
7854 like:
7855 ld [%o0], %o0
7856 ld [%o0 + 4], %o1
7857 to
7858 ldd [%o0], %o0
7859 nor:
7860 ld [%g3 + 4], %g3
7861 ld [%g3], %g2
7862 to
7863 ldd [%g3], %g2
7864
7865 But, note that the transformation from:
7866 ld [%g2 + 4], %g3
7867 ld [%g2], %g2
7868 to
7869 ldd [%g2], %g2
7870 is perfectly fine. Thus, the peephole2 patterns always pass us
7871 the destination register of the first load, never the second one.
7872
7873 For stores we don't have a similar problem, so dependent_reg_rtx is
7874 NULL_RTX. */
7875
7876 int
7877 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7878 {
7879 rtx addr1, addr2;
7880 unsigned int reg1;
7881 HOST_WIDE_INT offset1;
7882
7883 /* The mems cannot be volatile. */
7884 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7885 return 0;
7886
7887 /* MEM1 should be aligned on a 64-bit boundary. */
7888 if (MEM_ALIGN (mem1) < 64)
7889 return 0;
7890
7891 addr1 = XEXP (mem1, 0);
7892 addr2 = XEXP (mem2, 0);
7893
7894 /* Extract a register number and offset (if used) from the first addr. */
7895 if (GET_CODE (addr1) == PLUS)
7896 {
7897 /* If not a REG, return zero. */
7898 if (GET_CODE (XEXP (addr1, 0)) != REG)
7899 return 0;
7900 else
7901 {
7902 reg1 = REGNO (XEXP (addr1, 0));
7903 /* The offset must be constant! */
7904 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7905 return 0;
7906 offset1 = INTVAL (XEXP (addr1, 1));
7907 }
7908 }
7909 else if (GET_CODE (addr1) != REG)
7910 return 0;
7911 else
7912 {
7913 reg1 = REGNO (addr1);
7914 /* This was a simple (mem (reg)) expression. Offset is 0. */
7915 offset1 = 0;
7916 }
7917
7918 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7919 if (GET_CODE (addr2) != PLUS)
7920 return 0;
7921
7922 if (GET_CODE (XEXP (addr2, 0)) != REG
7923 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7924 return 0;
7925
7926 if (reg1 != REGNO (XEXP (addr2, 0)))
7927 return 0;
7928
7929 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7930 return 0;
7931
7932 /* The first offset must be evenly divisible by 8 to ensure the
7933 address is 64 bit aligned. */
7934 if (offset1 % 8 != 0)
7935 return 0;
7936
7937 /* The offset for the second addr must be 4 more than the first addr. */
7938 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7939 return 0;
7940
7941 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7942 instructions. */
7943 return 1;
7944 }
7945
7946 /* Return 1 if reg is a pseudo, or is the first register in
7947 a hard register pair. This makes it suitable for use in
7948 ldd and std insns. */
7949
7950 int
7951 register_ok_for_ldd (rtx reg)
7952 {
7953 /* We might have been passed a SUBREG. */
7954 if (!REG_P (reg))
7955 return 0;
7956
7957 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7958 return (REGNO (reg) % 2 == 0);
7959
7960 return 1;
7961 }
7962
7963 /* Return 1 if OP is a memory whose address is known to be
7964 aligned to 8-byte boundary, or a pseudo during reload.
7965 This makes it suitable for use in ldd and std insns. */
7966
7967 int
7968 memory_ok_for_ldd (rtx op)
7969 {
7970 if (MEM_P (op))
7971 {
7972 /* In 64-bit mode, we assume that the address is word-aligned. */
7973 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7974 return 0;
7975
7976 if (! can_create_pseudo_p ()
7977 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7978 return 0;
7979 }
7980 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7981 {
7982 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7983 return 0;
7984 }
7985 else
7986 return 0;
7987
7988 return 1;
7989 }
7990 \f
7991 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7992
7993 static bool
7994 sparc_print_operand_punct_valid_p (unsigned char code)
7995 {
7996 if (code == '#'
7997 || code == '*'
7998 || code == '('
7999 || code == ')'
8000 || code == '_'
8001 || code == '&')
8002 return true;
8003
8004 return false;
8005 }
8006
8007 /* Implement TARGET_PRINT_OPERAND.
8008 Print operand X (an rtx) in assembler syntax to file FILE.
8009 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8010 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8011
8012 static void
8013 sparc_print_operand (FILE *file, rtx x, int code)
8014 {
8015 switch (code)
8016 {
8017 case '#':
8018 /* Output an insn in a delay slot. */
8019 if (final_sequence)
8020 sparc_indent_opcode = 1;
8021 else
8022 fputs ("\n\t nop", file);
8023 return;
8024 case '*':
8025 /* Output an annul flag if there's nothing for the delay slot and we
8026 are optimizing. This is always used with '(' below.
8027 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8028 this is a dbx bug. So, we only do this when optimizing.
8029 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8030 Always emit a nop in case the next instruction is a branch. */
8031 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
8032 fputs (",a", file);
8033 return;
8034 case '(':
8035 /* Output a 'nop' if there's nothing for the delay slot and we are
8036 not optimizing. This is always used with '*' above. */
8037 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
8038 fputs ("\n\t nop", file);
8039 else if (final_sequence)
8040 sparc_indent_opcode = 1;
8041 return;
8042 case ')':
8043 /* Output the right displacement from the saved PC on function return.
8044 The caller may have placed an "unimp" insn immediately after the call
8045 so we have to account for it. This insn is used in the 32-bit ABI
8046 when calling a function that returns a non zero-sized structure. The
8047 64-bit ABI doesn't have it. Be careful to have this test be the same
8048 as that for the call. The exception is when sparc_std_struct_return
8049 is enabled, the psABI is followed exactly and the adjustment is made
8050 by the code in sparc_struct_value_rtx. The call emitted is the same
8051 when sparc_std_struct_return is enabled. */
8052 if (!TARGET_ARCH64
8053 && cfun->returns_struct
8054 && !sparc_std_struct_return
8055 && DECL_SIZE (DECL_RESULT (current_function_decl))
8056 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
8057 == INTEGER_CST
8058 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
8059 fputs ("12", file);
8060 else
8061 fputc ('8', file);
8062 return;
8063 case '_':
8064 /* Output the Embedded Medium/Anywhere code model base register. */
8065 fputs (EMBMEDANY_BASE_REG, file);
8066 return;
8067 case '&':
8068 /* Print some local dynamic TLS name. */
8069 assemble_name (file, get_some_local_dynamic_name ());
8070 return;
8071
8072 case 'Y':
8073 /* Adjust the operand to take into account a RESTORE operation. */
8074 if (GET_CODE (x) == CONST_INT)
8075 break;
8076 else if (GET_CODE (x) != REG)
8077 output_operand_lossage ("invalid %%Y operand");
8078 else if (REGNO (x) < 8)
8079 fputs (reg_names[REGNO (x)], file);
8080 else if (REGNO (x) >= 24 && REGNO (x) < 32)
8081 fputs (reg_names[REGNO (x)-16], file);
8082 else
8083 output_operand_lossage ("invalid %%Y operand");
8084 return;
8085 case 'L':
8086 /* Print out the low order register name of a register pair. */
8087 if (WORDS_BIG_ENDIAN)
8088 fputs (reg_names[REGNO (x)+1], file);
8089 else
8090 fputs (reg_names[REGNO (x)], file);
8091 return;
8092 case 'H':
8093 /* Print out the high order register name of a register pair. */
8094 if (WORDS_BIG_ENDIAN)
8095 fputs (reg_names[REGNO (x)], file);
8096 else
8097 fputs (reg_names[REGNO (x)+1], file);
8098 return;
8099 case 'R':
8100 /* Print out the second register name of a register pair or quad.
8101 I.e., R (%o0) => %o1. */
8102 fputs (reg_names[REGNO (x)+1], file);
8103 return;
8104 case 'S':
8105 /* Print out the third register name of a register quad.
8106 I.e., S (%o0) => %o2. */
8107 fputs (reg_names[REGNO (x)+2], file);
8108 return;
8109 case 'T':
8110 /* Print out the fourth register name of a register quad.
8111 I.e., T (%o0) => %o3. */
8112 fputs (reg_names[REGNO (x)+3], file);
8113 return;
8114 case 'x':
8115 /* Print a condition code register. */
8116 if (REGNO (x) == SPARC_ICC_REG)
8117 {
8118 /* We don't handle CC[X]_NOOVmode because they're not supposed
8119 to occur here. */
8120 if (GET_MODE (x) == CCmode)
8121 fputs ("%icc", file);
8122 else if (GET_MODE (x) == CCXmode)
8123 fputs ("%xcc", file);
8124 else
8125 gcc_unreachable ();
8126 }
8127 else
8128 /* %fccN register */
8129 fputs (reg_names[REGNO (x)], file);
8130 return;
8131 case 'm':
8132 /* Print the operand's address only. */
8133 output_address (XEXP (x, 0));
8134 return;
8135 case 'r':
8136 /* In this case we need a register. Use %g0 if the
8137 operand is const0_rtx. */
8138 if (x == const0_rtx
8139 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
8140 {
8141 fputs ("%g0", file);
8142 return;
8143 }
8144 else
8145 break;
8146
8147 case 'A':
8148 switch (GET_CODE (x))
8149 {
8150 case IOR: fputs ("or", file); break;
8151 case AND: fputs ("and", file); break;
8152 case XOR: fputs ("xor", file); break;
8153 default: output_operand_lossage ("invalid %%A operand");
8154 }
8155 return;
8156
8157 case 'B':
8158 switch (GET_CODE (x))
8159 {
8160 case IOR: fputs ("orn", file); break;
8161 case AND: fputs ("andn", file); break;
8162 case XOR: fputs ("xnor", file); break;
8163 default: output_operand_lossage ("invalid %%B operand");
8164 }
8165 return;
8166
8167 /* These are used by the conditional move instructions. */
8168 case 'c' :
8169 case 'C':
8170 {
8171 enum rtx_code rc = GET_CODE (x);
8172
8173 if (code == 'c')
8174 {
8175 enum machine_mode mode = GET_MODE (XEXP (x, 0));
8176 if (mode == CCFPmode || mode == CCFPEmode)
8177 rc = reverse_condition_maybe_unordered (GET_CODE (x));
8178 else
8179 rc = reverse_condition (GET_CODE (x));
8180 }
8181 switch (rc)
8182 {
8183 case NE: fputs ("ne", file); break;
8184 case EQ: fputs ("e", file); break;
8185 case GE: fputs ("ge", file); break;
8186 case GT: fputs ("g", file); break;
8187 case LE: fputs ("le", file); break;
8188 case LT: fputs ("l", file); break;
8189 case GEU: fputs ("geu", file); break;
8190 case GTU: fputs ("gu", file); break;
8191 case LEU: fputs ("leu", file); break;
8192 case LTU: fputs ("lu", file); break;
8193 case LTGT: fputs ("lg", file); break;
8194 case UNORDERED: fputs ("u", file); break;
8195 case ORDERED: fputs ("o", file); break;
8196 case UNLT: fputs ("ul", file); break;
8197 case UNLE: fputs ("ule", file); break;
8198 case UNGT: fputs ("ug", file); break;
8199 case UNGE: fputs ("uge", file); break;
8200 case UNEQ: fputs ("ue", file); break;
8201 default: output_operand_lossage (code == 'c'
8202 ? "invalid %%c operand"
8203 : "invalid %%C operand");
8204 }
8205 return;
8206 }
8207
8208 /* These are used by the movr instruction pattern. */
8209 case 'd':
8210 case 'D':
8211 {
8212 enum rtx_code rc = (code == 'd'
8213 ? reverse_condition (GET_CODE (x))
8214 : GET_CODE (x));
8215 switch (rc)
8216 {
8217 case NE: fputs ("ne", file); break;
8218 case EQ: fputs ("e", file); break;
8219 case GE: fputs ("gez", file); break;
8220 case LT: fputs ("lz", file); break;
8221 case LE: fputs ("lez", file); break;
8222 case GT: fputs ("gz", file); break;
8223 default: output_operand_lossage (code == 'd'
8224 ? "invalid %%d operand"
8225 : "invalid %%D operand");
8226 }
8227 return;
8228 }
8229
8230 case 'b':
8231 {
8232 /* Print a sign-extended character. */
8233 int i = trunc_int_for_mode (INTVAL (x), QImode);
8234 fprintf (file, "%d", i);
8235 return;
8236 }
8237
8238 case 'f':
8239 /* Operand must be a MEM; write its address. */
8240 if (GET_CODE (x) != MEM)
8241 output_operand_lossage ("invalid %%f operand");
8242 output_address (XEXP (x, 0));
8243 return;
8244
8245 case 's':
8246 {
8247 /* Print a sign-extended 32-bit value. */
8248 HOST_WIDE_INT i;
8249 if (GET_CODE(x) == CONST_INT)
8250 i = INTVAL (x);
8251 else if (GET_CODE(x) == CONST_DOUBLE)
8252 i = CONST_DOUBLE_LOW (x);
8253 else
8254 {
8255 output_operand_lossage ("invalid %%s operand");
8256 return;
8257 }
8258 i = trunc_int_for_mode (i, SImode);
8259 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
8260 return;
8261 }
8262
8263 case 0:
8264 /* Do nothing special. */
8265 break;
8266
8267 default:
8268 /* Undocumented flag. */
8269 output_operand_lossage ("invalid operand output code");
8270 }
8271
8272 if (GET_CODE (x) == REG)
8273 fputs (reg_names[REGNO (x)], file);
8274 else if (GET_CODE (x) == MEM)
8275 {
8276 fputc ('[', file);
8277 /* Poor Sun assembler doesn't understand absolute addressing. */
8278 if (CONSTANT_P (XEXP (x, 0)))
8279 fputs ("%g0+", file);
8280 output_address (XEXP (x, 0));
8281 fputc (']', file);
8282 }
8283 else if (GET_CODE (x) == HIGH)
8284 {
8285 fputs ("%hi(", file);
8286 output_addr_const (file, XEXP (x, 0));
8287 fputc (')', file);
8288 }
8289 else if (GET_CODE (x) == LO_SUM)
8290 {
8291 sparc_print_operand (file, XEXP (x, 0), 0);
8292 if (TARGET_CM_MEDMID)
8293 fputs ("+%l44(", file);
8294 else
8295 fputs ("+%lo(", file);
8296 output_addr_const (file, XEXP (x, 1));
8297 fputc (')', file);
8298 }
8299 else if (GET_CODE (x) == CONST_DOUBLE
8300 && (GET_MODE (x) == VOIDmode
8301 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8302 {
8303 if (CONST_DOUBLE_HIGH (x) == 0)
8304 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8305 else if (CONST_DOUBLE_HIGH (x) == -1
8306 && CONST_DOUBLE_LOW (x) < 0)
8307 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8308 else
8309 output_operand_lossage ("long long constant not a valid immediate operand");
8310 }
8311 else if (GET_CODE (x) == CONST_DOUBLE)
8312 output_operand_lossage ("floating point constant not a valid immediate operand");
8313 else { output_addr_const (file, x); }
8314 }
8315
8316 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8317
8318 static void
8319 sparc_print_operand_address (FILE *file, rtx x)
8320 {
8321 register rtx base, index = 0;
8322 int offset = 0;
8323 register rtx addr = x;
8324
8325 if (REG_P (addr))
8326 fputs (reg_names[REGNO (addr)], file);
8327 else if (GET_CODE (addr) == PLUS)
8328 {
8329 if (CONST_INT_P (XEXP (addr, 0)))
8330 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8331 else if (CONST_INT_P (XEXP (addr, 1)))
8332 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8333 else
8334 base = XEXP (addr, 0), index = XEXP (addr, 1);
8335 if (GET_CODE (base) == LO_SUM)
8336 {
8337 gcc_assert (USE_AS_OFFSETABLE_LO10
8338 && TARGET_ARCH64
8339 && ! TARGET_CM_MEDMID);
8340 output_operand (XEXP (base, 0), 0);
8341 fputs ("+%lo(", file);
8342 output_address (XEXP (base, 1));
8343 fprintf (file, ")+%d", offset);
8344 }
8345 else
8346 {
8347 fputs (reg_names[REGNO (base)], file);
8348 if (index == 0)
8349 fprintf (file, "%+d", offset);
8350 else if (REG_P (index))
8351 fprintf (file, "+%s", reg_names[REGNO (index)]);
8352 else if (GET_CODE (index) == SYMBOL_REF
8353 || GET_CODE (index) == LABEL_REF
8354 || GET_CODE (index) == CONST)
8355 fputc ('+', file), output_addr_const (file, index);
8356 else gcc_unreachable ();
8357 }
8358 }
8359 else if (GET_CODE (addr) == MINUS
8360 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8361 {
8362 output_addr_const (file, XEXP (addr, 0));
8363 fputs ("-(", file);
8364 output_addr_const (file, XEXP (addr, 1));
8365 fputs ("-.)", file);
8366 }
8367 else if (GET_CODE (addr) == LO_SUM)
8368 {
8369 output_operand (XEXP (addr, 0), 0);
8370 if (TARGET_CM_MEDMID)
8371 fputs ("+%l44(", file);
8372 else
8373 fputs ("+%lo(", file);
8374 output_address (XEXP (addr, 1));
8375 fputc (')', file);
8376 }
8377 else if (flag_pic
8378 && GET_CODE (addr) == CONST
8379 && GET_CODE (XEXP (addr, 0)) == MINUS
8380 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8381 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8382 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8383 {
8384 addr = XEXP (addr, 0);
8385 output_addr_const (file, XEXP (addr, 0));
8386 /* Group the args of the second CONST in parenthesis. */
8387 fputs ("-(", file);
8388 /* Skip past the second CONST--it does nothing for us. */
8389 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8390 /* Close the parenthesis. */
8391 fputc (')', file);
8392 }
8393 else
8394 {
8395 output_addr_const (file, addr);
8396 }
8397 }
8398 \f
8399 /* Target hook for assembling integer objects. The sparc version has
8400 special handling for aligned DI-mode objects. */
8401
8402 static bool
8403 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8404 {
8405 /* ??? We only output .xword's for symbols and only then in environments
8406 where the assembler can handle them. */
8407 if (aligned_p && size == 8
8408 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8409 {
8410 if (TARGET_V9)
8411 {
8412 assemble_integer_with_op ("\t.xword\t", x);
8413 return true;
8414 }
8415 else
8416 {
8417 assemble_aligned_integer (4, const0_rtx);
8418 assemble_aligned_integer (4, x);
8419 return true;
8420 }
8421 }
8422 return default_assemble_integer (x, size, aligned_p);
8423 }
8424 \f
8425 /* Return the value of a code used in the .proc pseudo-op that says
8426 what kind of result this function returns. For non-C types, we pick
8427 the closest C type. */
8428
8429 #ifndef SHORT_TYPE_SIZE
8430 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8431 #endif
8432
8433 #ifndef INT_TYPE_SIZE
8434 #define INT_TYPE_SIZE BITS_PER_WORD
8435 #endif
8436
8437 #ifndef LONG_TYPE_SIZE
8438 #define LONG_TYPE_SIZE BITS_PER_WORD
8439 #endif
8440
8441 #ifndef LONG_LONG_TYPE_SIZE
8442 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8443 #endif
8444
8445 #ifndef FLOAT_TYPE_SIZE
8446 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8447 #endif
8448
8449 #ifndef DOUBLE_TYPE_SIZE
8450 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8451 #endif
8452
8453 #ifndef LONG_DOUBLE_TYPE_SIZE
8454 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8455 #endif
8456
8457 unsigned long
8458 sparc_type_code (register tree type)
8459 {
8460 register unsigned long qualifiers = 0;
8461 register unsigned shift;
8462
8463 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8464 setting more, since some assemblers will give an error for this. Also,
8465 we must be careful to avoid shifts of 32 bits or more to avoid getting
8466 unpredictable results. */
8467
8468 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8469 {
8470 switch (TREE_CODE (type))
8471 {
8472 case ERROR_MARK:
8473 return qualifiers;
8474
8475 case ARRAY_TYPE:
8476 qualifiers |= (3 << shift);
8477 break;
8478
8479 case FUNCTION_TYPE:
8480 case METHOD_TYPE:
8481 qualifiers |= (2 << shift);
8482 break;
8483
8484 case POINTER_TYPE:
8485 case REFERENCE_TYPE:
8486 case OFFSET_TYPE:
8487 qualifiers |= (1 << shift);
8488 break;
8489
8490 case RECORD_TYPE:
8491 return (qualifiers | 8);
8492
8493 case UNION_TYPE:
8494 case QUAL_UNION_TYPE:
8495 return (qualifiers | 9);
8496
8497 case ENUMERAL_TYPE:
8498 return (qualifiers | 10);
8499
8500 case VOID_TYPE:
8501 return (qualifiers | 16);
8502
8503 case INTEGER_TYPE:
8504 /* If this is a range type, consider it to be the underlying
8505 type. */
8506 if (TREE_TYPE (type) != 0)
8507 break;
8508
8509 /* Carefully distinguish all the standard types of C,
8510 without messing up if the language is not C. We do this by
8511 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8512 look at both the names and the above fields, but that's redundant.
8513 Any type whose size is between two C types will be considered
8514 to be the wider of the two types. Also, we do not have a
8515 special code to use for "long long", so anything wider than
8516 long is treated the same. Note that we can't distinguish
8517 between "int" and "long" in this code if they are the same
8518 size, but that's fine, since neither can the assembler. */
8519
8520 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8521 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8522
8523 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8524 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8525
8526 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8527 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8528
8529 else
8530 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8531
8532 case REAL_TYPE:
8533 /* If this is a range type, consider it to be the underlying
8534 type. */
8535 if (TREE_TYPE (type) != 0)
8536 break;
8537
8538 /* Carefully distinguish all the standard types of C,
8539 without messing up if the language is not C. */
8540
8541 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8542 return (qualifiers | 6);
8543
8544 else
8545 return (qualifiers | 7);
8546
8547 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8548 /* ??? We need to distinguish between double and float complex types,
8549 but I don't know how yet because I can't reach this code from
8550 existing front-ends. */
8551 return (qualifiers | 7); /* Who knows? */
8552
8553 case VECTOR_TYPE:
8554 case BOOLEAN_TYPE: /* Boolean truth value type. */
8555 case LANG_TYPE:
8556 case NULLPTR_TYPE:
8557 return qualifiers;
8558
8559 default:
8560 gcc_unreachable (); /* Not a type! */
8561 }
8562 }
8563
8564 return qualifiers;
8565 }
8566 \f
8567 /* Nested function support. */
8568
8569 /* Emit RTL insns to initialize the variable parts of a trampoline.
8570 FNADDR is an RTX for the address of the function's pure code.
8571 CXT is an RTX for the static chain value for the function.
8572
8573 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8574 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8575 (to store insns). This is a bit excessive. Perhaps a different
8576 mechanism would be better here.
8577
8578 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8579
8580 static void
8581 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8582 {
8583 /* SPARC 32-bit trampoline:
8584
8585 sethi %hi(fn), %g1
8586 sethi %hi(static), %g2
8587 jmp %g1+%lo(fn)
8588 or %g2, %lo(static), %g2
8589
8590 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8591 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8592 */
8593
8594 emit_move_insn
8595 (adjust_address (m_tramp, SImode, 0),
8596 expand_binop (SImode, ior_optab,
8597 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8598 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8599 NULL_RTX, 1, OPTAB_DIRECT));
8600
8601 emit_move_insn
8602 (adjust_address (m_tramp, SImode, 4),
8603 expand_binop (SImode, ior_optab,
8604 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8605 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8606 NULL_RTX, 1, OPTAB_DIRECT));
8607
8608 emit_move_insn
8609 (adjust_address (m_tramp, SImode, 8),
8610 expand_binop (SImode, ior_optab,
8611 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8612 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8613 NULL_RTX, 1, OPTAB_DIRECT));
8614
8615 emit_move_insn
8616 (adjust_address (m_tramp, SImode, 12),
8617 expand_binop (SImode, ior_optab,
8618 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8619 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8620 NULL_RTX, 1, OPTAB_DIRECT));
8621
8622 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8623 aligned on a 16 byte boundary so one flush clears it all. */
8624 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8625 if (sparc_cpu != PROCESSOR_ULTRASPARC
8626 && sparc_cpu != PROCESSOR_ULTRASPARC3
8627 && sparc_cpu != PROCESSOR_NIAGARA
8628 && sparc_cpu != PROCESSOR_NIAGARA2
8629 && sparc_cpu != PROCESSOR_NIAGARA3
8630 && sparc_cpu != PROCESSOR_NIAGARA4)
8631 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8632
8633 /* Call __enable_execute_stack after writing onto the stack to make sure
8634 the stack address is accessible. */
8635 #ifdef HAVE_ENABLE_EXECUTE_STACK
8636 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8637 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8638 #endif
8639
8640 }
8641
8642 /* The 64-bit version is simpler because it makes more sense to load the
8643 values as "immediate" data out of the trampoline. It's also easier since
8644 we can read the PC without clobbering a register. */
8645
8646 static void
8647 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8648 {
8649 /* SPARC 64-bit trampoline:
8650
8651 rd %pc, %g1
8652 ldx [%g1+24], %g5
8653 jmp %g5
8654 ldx [%g1+16], %g5
8655 +16 bytes data
8656 */
8657
8658 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8659 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8660 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8661 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8662 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8663 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8664 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8665 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8666 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8667 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8668 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8669
8670 if (sparc_cpu != PROCESSOR_ULTRASPARC
8671 && sparc_cpu != PROCESSOR_ULTRASPARC3
8672 && sparc_cpu != PROCESSOR_NIAGARA
8673 && sparc_cpu != PROCESSOR_NIAGARA2
8674 && sparc_cpu != PROCESSOR_NIAGARA3
8675 && sparc_cpu != PROCESSOR_NIAGARA4)
8676 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8677
8678 /* Call __enable_execute_stack after writing onto the stack to make sure
8679 the stack address is accessible. */
8680 #ifdef HAVE_ENABLE_EXECUTE_STACK
8681 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8682 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8683 #endif
8684 }
8685
8686 /* Worker for TARGET_TRAMPOLINE_INIT. */
8687
8688 static void
8689 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8690 {
8691 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8692 cxt = force_reg (Pmode, cxt);
8693 if (TARGET_ARCH64)
8694 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8695 else
8696 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8697 }
8698 \f
8699 /* Adjust the cost of a scheduling dependency. Return the new cost of
8700 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8701
8702 static int
8703 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8704 {
8705 enum attr_type insn_type;
8706
8707 if (! recog_memoized (insn))
8708 return 0;
8709
8710 insn_type = get_attr_type (insn);
8711
8712 if (REG_NOTE_KIND (link) == 0)
8713 {
8714 /* Data dependency; DEP_INSN writes a register that INSN reads some
8715 cycles later. */
8716
8717 /* if a load, then the dependence must be on the memory address;
8718 add an extra "cycle". Note that the cost could be two cycles
8719 if the reg was written late in an instruction group; we ca not tell
8720 here. */
8721 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8722 return cost + 3;
8723
8724 /* Get the delay only if the address of the store is the dependence. */
8725 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8726 {
8727 rtx pat = PATTERN(insn);
8728 rtx dep_pat = PATTERN (dep_insn);
8729
8730 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8731 return cost; /* This should not happen! */
8732
8733 /* The dependency between the two instructions was on the data that
8734 is being stored. Assume that this implies that the address of the
8735 store is not dependent. */
8736 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8737 return cost;
8738
8739 return cost + 3; /* An approximation. */
8740 }
8741
8742 /* A shift instruction cannot receive its data from an instruction
8743 in the same cycle; add a one cycle penalty. */
8744 if (insn_type == TYPE_SHIFT)
8745 return cost + 3; /* Split before cascade into shift. */
8746 }
8747 else
8748 {
8749 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8750 INSN writes some cycles later. */
8751
8752 /* These are only significant for the fpu unit; writing a fp reg before
8753 the fpu has finished with it stalls the processor. */
8754
8755 /* Reusing an integer register causes no problems. */
8756 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8757 return 0;
8758 }
8759
8760 return cost;
8761 }
8762
8763 static int
8764 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8765 {
8766 enum attr_type insn_type, dep_type;
8767 rtx pat = PATTERN(insn);
8768 rtx dep_pat = PATTERN (dep_insn);
8769
8770 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8771 return cost;
8772
8773 insn_type = get_attr_type (insn);
8774 dep_type = get_attr_type (dep_insn);
8775
8776 switch (REG_NOTE_KIND (link))
8777 {
8778 case 0:
8779 /* Data dependency; DEP_INSN writes a register that INSN reads some
8780 cycles later. */
8781
8782 switch (insn_type)
8783 {
8784 case TYPE_STORE:
8785 case TYPE_FPSTORE:
8786 /* Get the delay iff the address of the store is the dependence. */
8787 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8788 return cost;
8789
8790 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8791 return cost;
8792 return cost + 3;
8793
8794 case TYPE_LOAD:
8795 case TYPE_SLOAD:
8796 case TYPE_FPLOAD:
8797 /* If a load, then the dependence must be on the memory address. If
8798 the addresses aren't equal, then it might be a false dependency */
8799 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8800 {
8801 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8802 || GET_CODE (SET_DEST (dep_pat)) != MEM
8803 || GET_CODE (SET_SRC (pat)) != MEM
8804 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8805 XEXP (SET_SRC (pat), 0)))
8806 return cost + 2;
8807
8808 return cost + 8;
8809 }
8810 break;
8811
8812 case TYPE_BRANCH:
8813 /* Compare to branch latency is 0. There is no benefit from
8814 separating compare and branch. */
8815 if (dep_type == TYPE_COMPARE)
8816 return 0;
8817 /* Floating point compare to branch latency is less than
8818 compare to conditional move. */
8819 if (dep_type == TYPE_FPCMP)
8820 return cost - 1;
8821 break;
8822 default:
8823 break;
8824 }
8825 break;
8826
8827 case REG_DEP_ANTI:
8828 /* Anti-dependencies only penalize the fpu unit. */
8829 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8830 return 0;
8831 break;
8832
8833 default:
8834 break;
8835 }
8836
8837 return cost;
8838 }
8839
8840 static int
8841 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8842 {
8843 switch (sparc_cpu)
8844 {
8845 case PROCESSOR_SUPERSPARC:
8846 cost = supersparc_adjust_cost (insn, link, dep, cost);
8847 break;
8848 case PROCESSOR_HYPERSPARC:
8849 case PROCESSOR_SPARCLITE86X:
8850 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8851 break;
8852 default:
8853 break;
8854 }
8855 return cost;
8856 }
8857
8858 static void
8859 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8860 int sched_verbose ATTRIBUTE_UNUSED,
8861 int max_ready ATTRIBUTE_UNUSED)
8862 {}
8863
8864 static int
8865 sparc_use_sched_lookahead (void)
8866 {
8867 if (sparc_cpu == PROCESSOR_NIAGARA
8868 || sparc_cpu == PROCESSOR_NIAGARA2
8869 || sparc_cpu == PROCESSOR_NIAGARA3
8870 || sparc_cpu == PROCESSOR_NIAGARA4)
8871 return 0;
8872 if (sparc_cpu == PROCESSOR_ULTRASPARC
8873 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8874 return 4;
8875 if ((1 << sparc_cpu) &
8876 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8877 (1 << PROCESSOR_SPARCLITE86X)))
8878 return 3;
8879 return 0;
8880 }
8881
8882 static int
8883 sparc_issue_rate (void)
8884 {
8885 switch (sparc_cpu)
8886 {
8887 case PROCESSOR_NIAGARA:
8888 case PROCESSOR_NIAGARA2:
8889 case PROCESSOR_NIAGARA3:
8890 case PROCESSOR_NIAGARA4:
8891 default:
8892 return 1;
8893 case PROCESSOR_V9:
8894 /* Assume V9 processors are capable of at least dual-issue. */
8895 return 2;
8896 case PROCESSOR_SUPERSPARC:
8897 return 3;
8898 case PROCESSOR_HYPERSPARC:
8899 case PROCESSOR_SPARCLITE86X:
8900 return 2;
8901 case PROCESSOR_ULTRASPARC:
8902 case PROCESSOR_ULTRASPARC3:
8903 return 4;
8904 }
8905 }
8906
8907 static int
8908 set_extends (rtx insn)
8909 {
8910 register rtx pat = PATTERN (insn);
8911
8912 switch (GET_CODE (SET_SRC (pat)))
8913 {
8914 /* Load and some shift instructions zero extend. */
8915 case MEM:
8916 case ZERO_EXTEND:
8917 /* sethi clears the high bits */
8918 case HIGH:
8919 /* LO_SUM is used with sethi. sethi cleared the high
8920 bits and the values used with lo_sum are positive */
8921 case LO_SUM:
8922 /* Store flag stores 0 or 1 */
8923 case LT: case LTU:
8924 case GT: case GTU:
8925 case LE: case LEU:
8926 case GE: case GEU:
8927 case EQ:
8928 case NE:
8929 return 1;
8930 case AND:
8931 {
8932 rtx op0 = XEXP (SET_SRC (pat), 0);
8933 rtx op1 = XEXP (SET_SRC (pat), 1);
8934 if (GET_CODE (op1) == CONST_INT)
8935 return INTVAL (op1) >= 0;
8936 if (GET_CODE (op0) != REG)
8937 return 0;
8938 if (sparc_check_64 (op0, insn) == 1)
8939 return 1;
8940 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8941 }
8942 case IOR:
8943 case XOR:
8944 {
8945 rtx op0 = XEXP (SET_SRC (pat), 0);
8946 rtx op1 = XEXP (SET_SRC (pat), 1);
8947 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8948 return 0;
8949 if (GET_CODE (op1) == CONST_INT)
8950 return INTVAL (op1) >= 0;
8951 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8952 }
8953 case LSHIFTRT:
8954 return GET_MODE (SET_SRC (pat)) == SImode;
8955 /* Positive integers leave the high bits zero. */
8956 case CONST_DOUBLE:
8957 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8958 case CONST_INT:
8959 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8960 case ASHIFTRT:
8961 case SIGN_EXTEND:
8962 return - (GET_MODE (SET_SRC (pat)) == SImode);
8963 case REG:
8964 return sparc_check_64 (SET_SRC (pat), insn);
8965 default:
8966 return 0;
8967 }
8968 }
8969
8970 /* We _ought_ to have only one kind per function, but... */
8971 static GTY(()) rtx sparc_addr_diff_list;
8972 static GTY(()) rtx sparc_addr_list;
8973
8974 void
8975 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8976 {
8977 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8978 if (diff)
8979 sparc_addr_diff_list
8980 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8981 else
8982 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8983 }
8984
8985 static void
8986 sparc_output_addr_vec (rtx vec)
8987 {
8988 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8989 int idx, vlen = XVECLEN (body, 0);
8990
8991 #ifdef ASM_OUTPUT_ADDR_VEC_START
8992 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8993 #endif
8994
8995 #ifdef ASM_OUTPUT_CASE_LABEL
8996 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8997 NEXT_INSN (lab));
8998 #else
8999 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9000 #endif
9001
9002 for (idx = 0; idx < vlen; idx++)
9003 {
9004 ASM_OUTPUT_ADDR_VEC_ELT
9005 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9006 }
9007
9008 #ifdef ASM_OUTPUT_ADDR_VEC_END
9009 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9010 #endif
9011 }
9012
9013 static void
9014 sparc_output_addr_diff_vec (rtx vec)
9015 {
9016 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9017 rtx base = XEXP (XEXP (body, 0), 0);
9018 int idx, vlen = XVECLEN (body, 1);
9019
9020 #ifdef ASM_OUTPUT_ADDR_VEC_START
9021 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9022 #endif
9023
9024 #ifdef ASM_OUTPUT_CASE_LABEL
9025 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9026 NEXT_INSN (lab));
9027 #else
9028 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9029 #endif
9030
9031 for (idx = 0; idx < vlen; idx++)
9032 {
9033 ASM_OUTPUT_ADDR_DIFF_ELT
9034 (asm_out_file,
9035 body,
9036 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
9037 CODE_LABEL_NUMBER (base));
9038 }
9039
9040 #ifdef ASM_OUTPUT_ADDR_VEC_END
9041 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9042 #endif
9043 }
9044
9045 static void
9046 sparc_output_deferred_case_vectors (void)
9047 {
9048 rtx t;
9049 int align;
9050
9051 if (sparc_addr_list == NULL_RTX
9052 && sparc_addr_diff_list == NULL_RTX)
9053 return;
9054
9055 /* Align to cache line in the function's code section. */
9056 switch_to_section (current_function_section ());
9057
9058 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9059 if (align > 0)
9060 ASM_OUTPUT_ALIGN (asm_out_file, align);
9061
9062 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
9063 sparc_output_addr_vec (XEXP (t, 0));
9064 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
9065 sparc_output_addr_diff_vec (XEXP (t, 0));
9066
9067 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
9068 }
9069
9070 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
9071 unknown. Return 1 if the high bits are zero, -1 if the register is
9072 sign extended. */
9073 int
9074 sparc_check_64 (rtx x, rtx insn)
9075 {
9076 /* If a register is set only once it is safe to ignore insns this
9077 code does not know how to handle. The loop will either recognize
9078 the single set and return the correct value or fail to recognize
9079 it and return 0. */
9080 int set_once = 0;
9081 rtx y = x;
9082
9083 gcc_assert (GET_CODE (x) == REG);
9084
9085 if (GET_MODE (x) == DImode)
9086 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
9087
9088 if (flag_expensive_optimizations
9089 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
9090 set_once = 1;
9091
9092 if (insn == 0)
9093 {
9094 if (set_once)
9095 insn = get_last_insn_anywhere ();
9096 else
9097 return 0;
9098 }
9099
9100 while ((insn = PREV_INSN (insn)))
9101 {
9102 switch (GET_CODE (insn))
9103 {
9104 case JUMP_INSN:
9105 case NOTE:
9106 break;
9107 case CODE_LABEL:
9108 case CALL_INSN:
9109 default:
9110 if (! set_once)
9111 return 0;
9112 break;
9113 case INSN:
9114 {
9115 rtx pat = PATTERN (insn);
9116 if (GET_CODE (pat) != SET)
9117 return 0;
9118 if (rtx_equal_p (x, SET_DEST (pat)))
9119 return set_extends (insn);
9120 if (y && rtx_equal_p (y, SET_DEST (pat)))
9121 return set_extends (insn);
9122 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
9123 return 0;
9124 }
9125 }
9126 }
9127 return 0;
9128 }
9129
9130 /* Returns assembly code to perform a DImode shift using
9131 a 64-bit global or out register on SPARC-V8+. */
9132 const char *
9133 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
9134 {
9135 static char asm_code[60];
9136
9137 /* The scratch register is only required when the destination
9138 register is not a 64-bit global or out register. */
9139 if (which_alternative != 2)
9140 operands[3] = operands[0];
9141
9142 /* We can only shift by constants <= 63. */
9143 if (GET_CODE (operands[2]) == CONST_INT)
9144 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
9145
9146 if (GET_CODE (operands[1]) == CONST_INT)
9147 {
9148 output_asm_insn ("mov\t%1, %3", operands);
9149 }
9150 else
9151 {
9152 output_asm_insn ("sllx\t%H1, 32, %3", operands);
9153 if (sparc_check_64 (operands[1], insn) <= 0)
9154 output_asm_insn ("srl\t%L1, 0, %L1", operands);
9155 output_asm_insn ("or\t%L1, %3, %3", operands);
9156 }
9157
9158 strcpy(asm_code, opcode);
9159
9160 if (which_alternative != 2)
9161 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
9162 else
9163 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
9164 }
9165 \f
9166 /* Output rtl to increment the profiler label LABELNO
9167 for profiling a function entry. */
9168
9169 void
9170 sparc_profile_hook (int labelno)
9171 {
9172 char buf[32];
9173 rtx lab, fun;
9174
9175 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
9176 if (NO_PROFILE_COUNTERS)
9177 {
9178 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
9179 }
9180 else
9181 {
9182 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9183 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
9184 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
9185 }
9186 }
9187 \f
9188 #ifdef TARGET_SOLARIS
9189 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9190
9191 static void
9192 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
9193 tree decl ATTRIBUTE_UNUSED)
9194 {
9195 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
9196 {
9197 solaris_elf_asm_comdat_section (name, flags, decl);
9198 return;
9199 }
9200
9201 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
9202
9203 if (!(flags & SECTION_DEBUG))
9204 fputs (",#alloc", asm_out_file);
9205 if (flags & SECTION_WRITE)
9206 fputs (",#write", asm_out_file);
9207 if (flags & SECTION_TLS)
9208 fputs (",#tls", asm_out_file);
9209 if (flags & SECTION_CODE)
9210 fputs (",#execinstr", asm_out_file);
9211
9212 /* ??? Handle SECTION_BSS. */
9213
9214 fputc ('\n', asm_out_file);
9215 }
9216 #endif /* TARGET_SOLARIS */
9217
9218 /* We do not allow indirect calls to be optimized into sibling calls.
9219
9220 We cannot use sibling calls when delayed branches are disabled
9221 because they will likely require the call delay slot to be filled.
9222
9223 Also, on SPARC 32-bit we cannot emit a sibling call when the
9224 current function returns a structure. This is because the "unimp
9225 after call" convention would cause the callee to return to the
9226 wrong place. The generic code already disallows cases where the
9227 function being called returns a structure.
9228
9229 It may seem strange how this last case could occur. Usually there
9230 is code after the call which jumps to epilogue code which dumps the
9231 return value into the struct return area. That ought to invalidate
9232 the sibling call right? Well, in the C++ case we can end up passing
9233 the pointer to the struct return area to a constructor (which returns
9234 void) and then nothing else happens. Such a sibling call would look
9235 valid without the added check here.
9236
9237 VxWorks PIC PLT entries require the global pointer to be initialized
9238 on entry. We therefore can't emit sibling calls to them. */
9239 static bool
9240 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9241 {
9242 return (decl
9243 && flag_delayed_branch
9244 && (TARGET_ARCH64 || ! cfun->returns_struct)
9245 && !(TARGET_VXWORKS_RTP
9246 && flag_pic
9247 && !targetm.binds_local_p (decl)));
9248 }
9249 \f
9250 /* libfunc renaming. */
9251
9252 static void
9253 sparc_init_libfuncs (void)
9254 {
9255 if (TARGET_ARCH32)
9256 {
9257 /* Use the subroutines that Sun's library provides for integer
9258 multiply and divide. The `*' prevents an underscore from
9259 being prepended by the compiler. .umul is a little faster
9260 than .mul. */
9261 set_optab_libfunc (smul_optab, SImode, "*.umul");
9262 set_optab_libfunc (sdiv_optab, SImode, "*.div");
9263 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
9264 set_optab_libfunc (smod_optab, SImode, "*.rem");
9265 set_optab_libfunc (umod_optab, SImode, "*.urem");
9266
9267 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9268 set_optab_libfunc (add_optab, TFmode, "_Q_add");
9269 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
9270 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
9271 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
9272 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
9273
9274 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9275 is because with soft-float, the SFmode and DFmode sqrt
9276 instructions will be absent, and the compiler will notice and
9277 try to use the TFmode sqrt instruction for calls to the
9278 builtin function sqrt, but this fails. */
9279 if (TARGET_FPU)
9280 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
9281
9282 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
9283 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
9284 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
9285 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
9286 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
9287 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
9288
9289 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
9290 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9291 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9292 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9293
9294 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9295 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9296 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9297 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9298
9299 if (DITF_CONVERSION_LIBFUNCS)
9300 {
9301 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9302 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9303 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9304 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9305 }
9306
9307 if (SUN_CONVERSION_LIBFUNCS)
9308 {
9309 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9310 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9311 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9312 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9313 }
9314 }
9315 if (TARGET_ARCH64)
9316 {
9317 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9318 do not exist in the library. Make sure the compiler does not
9319 emit calls to them by accident. (It should always use the
9320 hardware instructions.) */
9321 set_optab_libfunc (smul_optab, SImode, 0);
9322 set_optab_libfunc (sdiv_optab, SImode, 0);
9323 set_optab_libfunc (udiv_optab, SImode, 0);
9324 set_optab_libfunc (smod_optab, SImode, 0);
9325 set_optab_libfunc (umod_optab, SImode, 0);
9326
9327 if (SUN_INTEGER_MULTIPLY_64)
9328 {
9329 set_optab_libfunc (smul_optab, DImode, "__mul64");
9330 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9331 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9332 set_optab_libfunc (smod_optab, DImode, "__rem64");
9333 set_optab_libfunc (umod_optab, DImode, "__urem64");
9334 }
9335
9336 if (SUN_CONVERSION_LIBFUNCS)
9337 {
9338 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9339 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9340 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9341 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9342 }
9343 }
9344 }
9345 \f
9346 static tree def_builtin(const char *name, int code, tree type)
9347 {
9348 return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
9349 NULL_TREE);
9350 }
9351
9352 static tree def_builtin_const(const char *name, int code, tree type)
9353 {
9354 tree t = def_builtin(name, code, type);
9355
9356 if (t)
9357 TREE_READONLY (t) = 1;
9358
9359 return t;
9360 }
9361
9362 /* Implement the TARGET_INIT_BUILTINS target hook.
9363 Create builtin functions for special SPARC instructions. */
9364
9365 static void
9366 sparc_init_builtins (void)
9367 {
9368 if (TARGET_VIS)
9369 sparc_vis_init_builtins ();
9370 }
9371
9372 /* Create builtin functions for VIS 1.0 instructions. */
9373
9374 static void
9375 sparc_vis_init_builtins (void)
9376 {
9377 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9378 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9379 tree v4hi = build_vector_type (intHI_type_node, 4);
9380 tree v2hi = build_vector_type (intHI_type_node, 2);
9381 tree v2si = build_vector_type (intSI_type_node, 2);
9382 tree v1si = build_vector_type (intSI_type_node, 1);
9383
9384 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9385 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9386 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9387 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9388 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9389 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9390 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9391 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9392 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9393 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9394 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9395 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9396 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
9397 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
9398 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9399 v8qi, v8qi,
9400 intDI_type_node, 0);
9401 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
9402 v8qi, v8qi, 0);
9403 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
9404 v8qi, v8qi, 0);
9405 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9406 intDI_type_node,
9407 intDI_type_node, 0);
9408 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
9409 intSI_type_node,
9410 intSI_type_node, 0);
9411 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9412 ptr_type_node,
9413 intSI_type_node, 0);
9414 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9415 ptr_type_node,
9416 intDI_type_node, 0);
9417 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
9418 ptr_type_node,
9419 ptr_type_node, 0);
9420 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
9421 ptr_type_node,
9422 ptr_type_node, 0);
9423 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
9424 v4hi, v4hi, 0);
9425 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
9426 v2si, v2si, 0);
9427 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
9428 v4hi, v4hi, 0);
9429 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
9430 v2si, v2si, 0);
9431 tree void_ftype_di = build_function_type_list (void_type_node,
9432 intDI_type_node, 0);
9433 tree di_ftype_void = build_function_type_list (intDI_type_node,
9434 void_type_node, 0);
9435 tree void_ftype_si = build_function_type_list (void_type_node,
9436 intSI_type_node, 0);
9437 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
9438 float_type_node,
9439 float_type_node, 0);
9440 tree df_ftype_df_df = build_function_type_list (double_type_node,
9441 double_type_node,
9442 double_type_node, 0);
9443
9444 /* Packing and expanding vectors. */
9445 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
9446 v4qi_ftype_v4hi);
9447 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9448 v8qi_ftype_v2si_v8qi);
9449 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9450 v2hi_ftype_v2si);
9451 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
9452 v4hi_ftype_v4qi);
9453 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9454 v8qi_ftype_v4qi_v4qi);
9455
9456 /* Multiplications. */
9457 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9458 v4hi_ftype_v4qi_v4hi);
9459 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9460 v4hi_ftype_v4qi_v2hi);
9461 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9462 v4hi_ftype_v4qi_v2hi);
9463 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9464 v4hi_ftype_v8qi_v4hi);
9465 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9466 v4hi_ftype_v8qi_v4hi);
9467 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9468 v2si_ftype_v4qi_v2hi);
9469 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9470 v2si_ftype_v4qi_v2hi);
9471
9472 /* Data aligning. */
9473 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9474 v4hi_ftype_v4hi_v4hi);
9475 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9476 v8qi_ftype_v8qi_v8qi);
9477 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9478 v2si_ftype_v2si_v2si);
9479 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
9480 di_ftype_di_di);
9481
9482 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
9483 void_ftype_di);
9484 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
9485 di_ftype_void);
9486
9487 if (TARGET_ARCH64)
9488 {
9489 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9490 ptr_ftype_ptr_di);
9491 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
9492 ptr_ftype_ptr_di);
9493 }
9494 else
9495 {
9496 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9497 ptr_ftype_ptr_si);
9498 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
9499 ptr_ftype_ptr_si);
9500 }
9501
9502 /* Pixel distance. */
9503 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9504 di_ftype_v8qi_v8qi_di);
9505
9506 /* Edge handling. */
9507 if (TARGET_ARCH64)
9508 {
9509 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
9510 di_ftype_ptr_ptr);
9511 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
9512 di_ftype_ptr_ptr);
9513 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
9514 di_ftype_ptr_ptr);
9515 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
9516 di_ftype_ptr_ptr);
9517 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
9518 di_ftype_ptr_ptr);
9519 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
9520 di_ftype_ptr_ptr);
9521 if (TARGET_VIS2)
9522 {
9523 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
9524 di_ftype_ptr_ptr);
9525 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
9526 di_ftype_ptr_ptr);
9527 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
9528 di_ftype_ptr_ptr);
9529 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
9530 di_ftype_ptr_ptr);
9531 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
9532 di_ftype_ptr_ptr);
9533 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
9534 di_ftype_ptr_ptr);
9535 }
9536 }
9537 else
9538 {
9539 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
9540 si_ftype_ptr_ptr);
9541 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
9542 si_ftype_ptr_ptr);
9543 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
9544 si_ftype_ptr_ptr);
9545 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
9546 si_ftype_ptr_ptr);
9547 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
9548 si_ftype_ptr_ptr);
9549 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
9550 si_ftype_ptr_ptr);
9551 if (TARGET_VIS2)
9552 {
9553 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
9554 si_ftype_ptr_ptr);
9555 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
9556 si_ftype_ptr_ptr);
9557 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
9558 si_ftype_ptr_ptr);
9559 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
9560 si_ftype_ptr_ptr);
9561 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
9562 si_ftype_ptr_ptr);
9563 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
9564 si_ftype_ptr_ptr);
9565 }
9566 }
9567
9568 /* Pixel compare. */
9569 if (TARGET_ARCH64)
9570 {
9571 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
9572 di_ftype_v4hi_v4hi);
9573 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
9574 di_ftype_v2si_v2si);
9575 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
9576 di_ftype_v4hi_v4hi);
9577 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
9578 di_ftype_v2si_v2si);
9579 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
9580 di_ftype_v4hi_v4hi);
9581 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
9582 di_ftype_v2si_v2si);
9583 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
9584 di_ftype_v4hi_v4hi);
9585 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
9586 di_ftype_v2si_v2si);
9587 }
9588 else
9589 {
9590 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
9591 si_ftype_v4hi_v4hi);
9592 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
9593 si_ftype_v2si_v2si);
9594 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
9595 si_ftype_v4hi_v4hi);
9596 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
9597 si_ftype_v2si_v2si);
9598 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
9599 si_ftype_v4hi_v4hi);
9600 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
9601 si_ftype_v2si_v2si);
9602 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
9603 si_ftype_v4hi_v4hi);
9604 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
9605 si_ftype_v2si_v2si);
9606 }
9607
9608 /* Addition and subtraction. */
9609 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
9610 v4hi_ftype_v4hi_v4hi);
9611 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
9612 v2hi_ftype_v2hi_v2hi);
9613 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
9614 v2si_ftype_v2si_v2si);
9615 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
9616 v1si_ftype_v1si_v1si);
9617 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
9618 v4hi_ftype_v4hi_v4hi);
9619 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
9620 v2hi_ftype_v2hi_v2hi);
9621 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
9622 v2si_ftype_v2si_v2si);
9623 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
9624 v1si_ftype_v1si_v1si);
9625
9626 /* Three-dimensional array addressing. */
9627 if (TARGET_ARCH64)
9628 {
9629 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
9630 di_ftype_di_di);
9631 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
9632 di_ftype_di_di);
9633 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
9634 di_ftype_di_di);
9635 }
9636 else
9637 {
9638 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
9639 si_ftype_si_si);
9640 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
9641 si_ftype_si_si);
9642 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
9643 si_ftype_si_si);
9644 }
9645
9646 if (TARGET_VIS2)
9647 {
9648 /* Byte mask and shuffle */
9649 if (TARGET_ARCH64)
9650 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
9651 di_ftype_di_di);
9652 else
9653 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
9654 si_ftype_si_si);
9655 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
9656 v4hi_ftype_v4hi_v4hi);
9657 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
9658 v8qi_ftype_v8qi_v8qi);
9659 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
9660 v2si_ftype_v2si_v2si);
9661 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
9662 di_ftype_di_di);
9663 }
9664
9665 if (TARGET_VIS3)
9666 {
9667 if (TARGET_ARCH64)
9668 {
9669 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
9670 void_ftype_di);
9671 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
9672 void_ftype_di);
9673 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
9674 void_ftype_di);
9675 }
9676 else
9677 {
9678 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
9679 void_ftype_si);
9680 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
9681 void_ftype_si);
9682 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
9683 void_ftype_si);
9684 }
9685
9686 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
9687 v4hi_ftype_v4hi_v4hi);
9688
9689 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
9690 v4hi_ftype_v4hi_v4hi);
9691 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
9692 v4hi_ftype_v4hi_v4hi);
9693 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
9694 v4hi_ftype_v4hi_v4hi);
9695 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
9696 v4hi_ftype_v4hi_v4hi);
9697 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
9698 v2si_ftype_v2si_v2si);
9699 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
9700 v2si_ftype_v2si_v2si);
9701 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
9702 v2si_ftype_v2si_v2si);
9703 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
9704 v2si_ftype_v2si_v2si);
9705
9706 if (TARGET_ARCH64)
9707 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
9708 di_ftype_v8qi_v8qi);
9709 else
9710 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
9711 si_ftype_v8qi_v8qi);
9712
9713 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
9714 v4hi_ftype_v4hi_v4hi);
9715 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
9716 di_ftype_di_di);
9717 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
9718 di_ftype_di_di);
9719
9720 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
9721 v4hi_ftype_v4hi_v4hi);
9722 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
9723 v2hi_ftype_v2hi_v2hi);
9724 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
9725 v4hi_ftype_v4hi_v4hi);
9726 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
9727 v2hi_ftype_v2hi_v2hi);
9728 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
9729 v2si_ftype_v2si_v2si);
9730 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
9731 v1si_ftype_v1si_v1si);
9732 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
9733 v2si_ftype_v2si_v2si);
9734 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
9735 v1si_ftype_v1si_v1si);
9736
9737 if (TARGET_ARCH64)
9738 {
9739 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
9740 di_ftype_v8qi_v8qi);
9741 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
9742 di_ftype_v8qi_v8qi);
9743 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
9744 di_ftype_v8qi_v8qi);
9745 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
9746 di_ftype_v8qi_v8qi);
9747 }
9748 else
9749 {
9750 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
9751 si_ftype_v8qi_v8qi);
9752 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
9753 si_ftype_v8qi_v8qi);
9754 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
9755 si_ftype_v8qi_v8qi);
9756 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
9757 si_ftype_v8qi_v8qi);
9758 }
9759
9760 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
9761 sf_ftype_sf_sf);
9762 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
9763 df_ftype_df_df);
9764 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
9765 sf_ftype_sf_sf);
9766 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
9767 df_ftype_df_df);
9768 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
9769 sf_ftype_sf_sf);
9770 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
9771 df_ftype_df_df);
9772
9773 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
9774 di_ftype_di_di);
9775 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
9776 di_ftype_di_di);
9777 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
9778 di_ftype_di_di);
9779 }
9780 }
9781
9782 /* Handle TARGET_EXPAND_BUILTIN target hook.
9783 Expand builtin functions for sparc intrinsics. */
9784
9785 static rtx
9786 sparc_expand_builtin (tree exp, rtx target,
9787 rtx subtarget ATTRIBUTE_UNUSED,
9788 enum machine_mode tmode ATTRIBUTE_UNUSED,
9789 int ignore ATTRIBUTE_UNUSED)
9790 {
9791 tree arg;
9792 call_expr_arg_iterator iter;
9793 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9794 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9795 rtx pat, op[4];
9796 int arg_count = 0;
9797 bool nonvoid;
9798
9799 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9800
9801 if (nonvoid)
9802 {
9803 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9804 if (!target
9805 || GET_MODE (target) != tmode
9806 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9807 op[0] = gen_reg_rtx (tmode);
9808 else
9809 op[0] = target;
9810 }
9811 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9812 {
9813 const struct insn_operand_data *insn_op;
9814 int idx;
9815
9816 if (arg == error_mark_node)
9817 return NULL_RTX;
9818
9819 arg_count++;
9820 idx = arg_count - !nonvoid;
9821 insn_op = &insn_data[icode].operand[idx];
9822 op[arg_count] = expand_normal (arg);
9823
9824 if (insn_op->mode == V1DImode
9825 && GET_MODE (op[arg_count]) == DImode)
9826 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
9827 else if (insn_op->mode == V1SImode
9828 && GET_MODE (op[arg_count]) == SImode)
9829 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
9830
9831 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
9832 insn_op->mode))
9833 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
9834 }
9835
9836 switch (arg_count)
9837 {
9838 case 0:
9839 pat = GEN_FCN (icode) (op[0]);
9840 break;
9841 case 1:
9842 if (nonvoid)
9843 pat = GEN_FCN (icode) (op[0], op[1]);
9844 else
9845 pat = GEN_FCN (icode) (op[1]);
9846 break;
9847 case 2:
9848 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9849 break;
9850 case 3:
9851 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9852 break;
9853 default:
9854 gcc_unreachable ();
9855 }
9856
9857 if (!pat)
9858 return NULL_RTX;
9859
9860 emit_insn (pat);
9861
9862 if (nonvoid)
9863 return op[0];
9864 else
9865 return const0_rtx;
9866 }
9867
9868 static int
9869 sparc_vis_mul8x16 (int e8, int e16)
9870 {
9871 return (e8 * e16 + 128) / 256;
9872 }
9873
9874 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
9875 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
9876 constants. A tree list with the results of the multiplications is returned,
9877 and each element in the list is of INNER_TYPE. */
9878
9879 static tree
9880 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
9881 {
9882 tree n_elts = NULL_TREE;
9883 int scale;
9884
9885 switch (fncode)
9886 {
9887 case CODE_FOR_fmul8x16_vis:
9888 for (; elts0 && elts1;
9889 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9890 {
9891 int val
9892 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9893 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
9894 n_elts = tree_cons (NULL_TREE,
9895 build_int_cst (inner_type, val),
9896 n_elts);
9897 }
9898 break;
9899
9900 case CODE_FOR_fmul8x16au_vis:
9901 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9902
9903 for (; elts0; elts0 = TREE_CHAIN (elts0))
9904 {
9905 int val
9906 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9907 scale);
9908 n_elts = tree_cons (NULL_TREE,
9909 build_int_cst (inner_type, val),
9910 n_elts);
9911 }
9912 break;
9913
9914 case CODE_FOR_fmul8x16al_vis:
9915 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
9916
9917 for (; elts0; elts0 = TREE_CHAIN (elts0))
9918 {
9919 int val
9920 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9921 scale);
9922 n_elts = tree_cons (NULL_TREE,
9923 build_int_cst (inner_type, val),
9924 n_elts);
9925 }
9926 break;
9927
9928 default:
9929 gcc_unreachable ();
9930 }
9931
9932 return nreverse (n_elts);
9933
9934 }
9935 /* Handle TARGET_FOLD_BUILTIN target hook.
9936 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
9937 result of the function call is ignored. NULL_TREE is returned if the
9938 function could not be folded. */
9939
9940 static tree
9941 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
9942 tree *args, bool ignore)
9943 {
9944 tree arg0, arg1, arg2;
9945 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
9946 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
9947
9948 if (ignore)
9949 {
9950 /* Note that a switch statement instead of the sequence of tests would
9951 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
9952 and that would yield multiple alternatives with identical values. */
9953 if (icode == CODE_FOR_alignaddrsi_vis
9954 || icode == CODE_FOR_alignaddrdi_vis
9955 || icode == CODE_FOR_wrgsr_vis
9956 || icode == CODE_FOR_bmasksi_vis
9957 || icode == CODE_FOR_bmaskdi_vis
9958 || icode == CODE_FOR_cmask8si_vis
9959 || icode == CODE_FOR_cmask8di_vis
9960 || icode == CODE_FOR_cmask16si_vis
9961 || icode == CODE_FOR_cmask16di_vis
9962 || icode == CODE_FOR_cmask32si_vis
9963 || icode == CODE_FOR_cmask32di_vis)
9964 ;
9965 else
9966 return build_zero_cst (rtype);
9967 }
9968
9969 switch (icode)
9970 {
9971 case CODE_FOR_fexpand_vis:
9972 arg0 = args[0];
9973 STRIP_NOPS (arg0);
9974
9975 if (TREE_CODE (arg0) == VECTOR_CST)
9976 {
9977 tree inner_type = TREE_TYPE (rtype);
9978 tree elts = TREE_VECTOR_CST_ELTS (arg0);
9979 tree n_elts = NULL_TREE;
9980
9981 for (; elts; elts = TREE_CHAIN (elts))
9982 {
9983 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
9984 n_elts = tree_cons (NULL_TREE,
9985 build_int_cst (inner_type, val),
9986 n_elts);
9987 }
9988 return build_vector (rtype, nreverse (n_elts));
9989 }
9990 break;
9991
9992 case CODE_FOR_fmul8x16_vis:
9993 case CODE_FOR_fmul8x16au_vis:
9994 case CODE_FOR_fmul8x16al_vis:
9995 arg0 = args[0];
9996 arg1 = args[1];
9997 STRIP_NOPS (arg0);
9998 STRIP_NOPS (arg1);
9999
10000 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10001 {
10002 tree inner_type = TREE_TYPE (rtype);
10003 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
10004 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
10005 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
10006 elts1);
10007
10008 return build_vector (rtype, n_elts);
10009 }
10010 break;
10011
10012 case CODE_FOR_fpmerge_vis:
10013 arg0 = args[0];
10014 arg1 = args[1];
10015 STRIP_NOPS (arg0);
10016 STRIP_NOPS (arg1);
10017
10018 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10019 {
10020 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
10021 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
10022 tree n_elts = NULL_TREE;
10023
10024 for (; elts0 && elts1;
10025 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
10026 {
10027 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
10028 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
10029 }
10030
10031 return build_vector (rtype, nreverse (n_elts));
10032 }
10033 break;
10034
10035 case CODE_FOR_pdist_vis:
10036 arg0 = args[0];
10037 arg1 = args[1];
10038 arg2 = args[2];
10039 STRIP_NOPS (arg0);
10040 STRIP_NOPS (arg1);
10041 STRIP_NOPS (arg2);
10042
10043 if (TREE_CODE (arg0) == VECTOR_CST
10044 && TREE_CODE (arg1) == VECTOR_CST
10045 && TREE_CODE (arg2) == INTEGER_CST)
10046 {
10047 int overflow = 0;
10048 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
10049 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
10050 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
10051 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
10052
10053 for (; elts0 && elts1;
10054 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
10055 {
10056 unsigned HOST_WIDE_INT
10057 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
10058 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
10059 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
10060 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
10061
10062 unsigned HOST_WIDE_INT l;
10063 HOST_WIDE_INT h;
10064
10065 overflow |= neg_double (low1, high1, &l, &h);
10066 overflow |= add_double (low0, high0, l, h, &l, &h);
10067 if (h < 0)
10068 overflow |= neg_double (l, h, &l, &h);
10069
10070 overflow |= add_double (low, high, l, h, &low, &high);
10071 }
10072
10073 gcc_assert (overflow == 0);
10074
10075 return build_int_cst_wide (rtype, low, high);
10076 }
10077
10078 default:
10079 break;
10080 }
10081
10082 return NULL_TREE;
10083 }
10084 \f
10085 /* ??? This duplicates information provided to the compiler by the
10086 ??? scheduler description. Some day, teach genautomata to output
10087 ??? the latencies and then CSE will just use that. */
10088
10089 static bool
10090 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
10091 int *total, bool speed ATTRIBUTE_UNUSED)
10092 {
10093 enum machine_mode mode = GET_MODE (x);
10094 bool float_mode_p = FLOAT_MODE_P (mode);
10095
10096 switch (code)
10097 {
10098 case CONST_INT:
10099 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
10100 {
10101 *total = 0;
10102 return true;
10103 }
10104 /* FALLTHRU */
10105
10106 case HIGH:
10107 *total = 2;
10108 return true;
10109
10110 case CONST:
10111 case LABEL_REF:
10112 case SYMBOL_REF:
10113 *total = 4;
10114 return true;
10115
10116 case CONST_DOUBLE:
10117 if (GET_MODE (x) == VOIDmode
10118 && ((CONST_DOUBLE_HIGH (x) == 0
10119 && CONST_DOUBLE_LOW (x) < 0x1000)
10120 || (CONST_DOUBLE_HIGH (x) == -1
10121 && CONST_DOUBLE_LOW (x) < 0
10122 && CONST_DOUBLE_LOW (x) >= -0x1000)))
10123 *total = 0;
10124 else
10125 *total = 8;
10126 return true;
10127
10128 case MEM:
10129 /* If outer-code was a sign or zero extension, a cost
10130 of COSTS_N_INSNS (1) was already added in. This is
10131 why we are subtracting it back out. */
10132 if (outer_code == ZERO_EXTEND)
10133 {
10134 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
10135 }
10136 else if (outer_code == SIGN_EXTEND)
10137 {
10138 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
10139 }
10140 else if (float_mode_p)
10141 {
10142 *total = sparc_costs->float_load;
10143 }
10144 else
10145 {
10146 *total = sparc_costs->int_load;
10147 }
10148
10149 return true;
10150
10151 case PLUS:
10152 case MINUS:
10153 if (float_mode_p)
10154 *total = sparc_costs->float_plusminus;
10155 else
10156 *total = COSTS_N_INSNS (1);
10157 return false;
10158
10159 case FMA:
10160 {
10161 rtx sub;
10162
10163 gcc_assert (float_mode_p);
10164 *total = sparc_costs->float_mul;
10165
10166 sub = XEXP (x, 0);
10167 if (GET_CODE (sub) == NEG)
10168 sub = XEXP (sub, 0);
10169 *total += rtx_cost (sub, FMA, 0, speed);
10170
10171 sub = XEXP (x, 2);
10172 if (GET_CODE (sub) == NEG)
10173 sub = XEXP (sub, 0);
10174 *total += rtx_cost (sub, FMA, 2, speed);
10175 return true;
10176 }
10177
10178 case MULT:
10179 if (float_mode_p)
10180 *total = sparc_costs->float_mul;
10181 else if (! TARGET_HARD_MUL)
10182 *total = COSTS_N_INSNS (25);
10183 else
10184 {
10185 int bit_cost;
10186
10187 bit_cost = 0;
10188 if (sparc_costs->int_mul_bit_factor)
10189 {
10190 int nbits;
10191
10192 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
10193 {
10194 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
10195 for (nbits = 0; value != 0; value &= value - 1)
10196 nbits++;
10197 }
10198 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
10199 && GET_MODE (XEXP (x, 1)) == VOIDmode)
10200 {
10201 rtx x1 = XEXP (x, 1);
10202 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
10203 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
10204
10205 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
10206 nbits++;
10207 for (; value2 != 0; value2 &= value2 - 1)
10208 nbits++;
10209 }
10210 else
10211 nbits = 7;
10212
10213 if (nbits < 3)
10214 nbits = 3;
10215 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
10216 bit_cost = COSTS_N_INSNS (bit_cost);
10217 }
10218
10219 if (mode == DImode)
10220 *total = sparc_costs->int_mulX + bit_cost;
10221 else
10222 *total = sparc_costs->int_mul + bit_cost;
10223 }
10224 return false;
10225
10226 case ASHIFT:
10227 case ASHIFTRT:
10228 case LSHIFTRT:
10229 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
10230 return false;
10231
10232 case DIV:
10233 case UDIV:
10234 case MOD:
10235 case UMOD:
10236 if (float_mode_p)
10237 {
10238 if (mode == DFmode)
10239 *total = sparc_costs->float_div_df;
10240 else
10241 *total = sparc_costs->float_div_sf;
10242 }
10243 else
10244 {
10245 if (mode == DImode)
10246 *total = sparc_costs->int_divX;
10247 else
10248 *total = sparc_costs->int_div;
10249 }
10250 return false;
10251
10252 case NEG:
10253 if (! float_mode_p)
10254 {
10255 *total = COSTS_N_INSNS (1);
10256 return false;
10257 }
10258 /* FALLTHRU */
10259
10260 case ABS:
10261 case FLOAT:
10262 case UNSIGNED_FLOAT:
10263 case FIX:
10264 case UNSIGNED_FIX:
10265 case FLOAT_EXTEND:
10266 case FLOAT_TRUNCATE:
10267 *total = sparc_costs->float_move;
10268 return false;
10269
10270 case SQRT:
10271 if (mode == DFmode)
10272 *total = sparc_costs->float_sqrt_df;
10273 else
10274 *total = sparc_costs->float_sqrt_sf;
10275 return false;
10276
10277 case COMPARE:
10278 if (float_mode_p)
10279 *total = sparc_costs->float_cmp;
10280 else
10281 *total = COSTS_N_INSNS (1);
10282 return false;
10283
10284 case IF_THEN_ELSE:
10285 if (float_mode_p)
10286 *total = sparc_costs->float_cmove;
10287 else
10288 *total = sparc_costs->int_cmove;
10289 return false;
10290
10291 case IOR:
10292 /* Handle the NAND vector patterns. */
10293 if (sparc_vector_mode_supported_p (GET_MODE (x))
10294 && GET_CODE (XEXP (x, 0)) == NOT
10295 && GET_CODE (XEXP (x, 1)) == NOT)
10296 {
10297 *total = COSTS_N_INSNS (1);
10298 return true;
10299 }
10300 else
10301 return false;
10302
10303 default:
10304 return false;
10305 }
10306 }
10307
10308 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10309
10310 static inline bool
10311 general_or_i64_p (reg_class_t rclass)
10312 {
10313 return (rclass == GENERAL_REGS || rclass == I64_REGS);
10314 }
10315
10316 /* Implement TARGET_REGISTER_MOVE_COST. */
10317
10318 static int
10319 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10320 reg_class_t from, reg_class_t to)
10321 {
10322 bool need_memory = false;
10323
10324 if (from == FPCC_REGS || to == FPCC_REGS)
10325 need_memory = true;
10326 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
10327 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
10328 {
10329 if (TARGET_VIS3)
10330 {
10331 int size = GET_MODE_SIZE (mode);
10332 if (size == 8 || size == 4)
10333 {
10334 if (! TARGET_ARCH32 || size == 4)
10335 return 4;
10336 else
10337 return 6;
10338 }
10339 }
10340 need_memory = true;
10341 }
10342
10343 if (need_memory)
10344 {
10345 if (sparc_cpu == PROCESSOR_ULTRASPARC
10346 || sparc_cpu == PROCESSOR_ULTRASPARC3
10347 || sparc_cpu == PROCESSOR_NIAGARA
10348 || sparc_cpu == PROCESSOR_NIAGARA2
10349 || sparc_cpu == PROCESSOR_NIAGARA3
10350 || sparc_cpu == PROCESSOR_NIAGARA4)
10351 return 12;
10352
10353 return 6;
10354 }
10355
10356 return 2;
10357 }
10358
10359 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10360 This is achieved by means of a manual dynamic stack space allocation in
10361 the current frame. We make the assumption that SEQ doesn't contain any
10362 function calls, with the possible exception of calls to the GOT helper. */
10363
10364 static void
10365 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
10366 {
10367 /* We must preserve the lowest 16 words for the register save area. */
10368 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
10369 /* We really need only 2 words of fresh stack space. */
10370 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
10371
10372 rtx slot
10373 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
10374 SPARC_STACK_BIAS + offset));
10375
10376 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
10377 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
10378 if (reg2)
10379 emit_insn (gen_rtx_SET (VOIDmode,
10380 adjust_address (slot, word_mode, UNITS_PER_WORD),
10381 reg2));
10382 emit_insn (seq);
10383 if (reg2)
10384 emit_insn (gen_rtx_SET (VOIDmode,
10385 reg2,
10386 adjust_address (slot, word_mode, UNITS_PER_WORD)));
10387 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
10388 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
10389 }
10390
10391 /* Output the assembler code for a thunk function. THUNK_DECL is the
10392 declaration for the thunk function itself, FUNCTION is the decl for
10393 the target function. DELTA is an immediate constant offset to be
10394 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10395 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10396
10397 static void
10398 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10399 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10400 tree function)
10401 {
10402 rtx this_rtx, insn, funexp;
10403 unsigned int int_arg_first;
10404
10405 reload_completed = 1;
10406 epilogue_completed = 1;
10407
10408 emit_note (NOTE_INSN_PROLOGUE_END);
10409
10410 if (TARGET_FLAT)
10411 {
10412 sparc_leaf_function_p = 1;
10413
10414 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10415 }
10416 else if (flag_delayed_branch)
10417 {
10418 /* We will emit a regular sibcall below, so we need to instruct
10419 output_sibcall that we are in a leaf function. */
10420 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
10421
10422 /* This will cause final.c to invoke leaf_renumber_regs so we
10423 must behave as if we were in a not-yet-leafified function. */
10424 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
10425 }
10426 else
10427 {
10428 /* We will emit the sibcall manually below, so we will need to
10429 manually spill non-leaf registers. */
10430 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
10431
10432 /* We really are in a leaf function. */
10433 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10434 }
10435
10436 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
10437 returns a structure, the structure return pointer is there instead. */
10438 if (TARGET_ARCH64
10439 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10440 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
10441 else
10442 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
10443
10444 /* Add DELTA. When possible use a plain add, otherwise load it into
10445 a register first. */
10446 if (delta)
10447 {
10448 rtx delta_rtx = GEN_INT (delta);
10449
10450 if (! SPARC_SIMM13_P (delta))
10451 {
10452 rtx scratch = gen_rtx_REG (Pmode, 1);
10453 emit_move_insn (scratch, delta_rtx);
10454 delta_rtx = scratch;
10455 }
10456
10457 /* THIS_RTX += DELTA. */
10458 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
10459 }
10460
10461 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
10462 if (vcall_offset)
10463 {
10464 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10465 rtx scratch = gen_rtx_REG (Pmode, 1);
10466
10467 gcc_assert (vcall_offset < 0);
10468
10469 /* SCRATCH = *THIS_RTX. */
10470 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
10471
10472 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
10473 may not have any available scratch register at this point. */
10474 if (SPARC_SIMM13_P (vcall_offset))
10475 ;
10476 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
10477 else if (! fixed_regs[5]
10478 /* The below sequence is made up of at least 2 insns,
10479 while the default method may need only one. */
10480 && vcall_offset < -8192)
10481 {
10482 rtx scratch2 = gen_rtx_REG (Pmode, 5);
10483 emit_move_insn (scratch2, vcall_offset_rtx);
10484 vcall_offset_rtx = scratch2;
10485 }
10486 else
10487 {
10488 rtx increment = GEN_INT (-4096);
10489
10490 /* VCALL_OFFSET is a negative number whose typical range can be
10491 estimated as -32768..0 in 32-bit mode. In almost all cases
10492 it is therefore cheaper to emit multiple add insns than
10493 spilling and loading the constant into a register (at least
10494 6 insns). */
10495 while (! SPARC_SIMM13_P (vcall_offset))
10496 {
10497 emit_insn (gen_add2_insn (scratch, increment));
10498 vcall_offset += 4096;
10499 }
10500 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
10501 }
10502
10503 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
10504 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
10505 gen_rtx_PLUS (Pmode,
10506 scratch,
10507 vcall_offset_rtx)));
10508
10509 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
10510 emit_insn (gen_add2_insn (this_rtx, scratch));
10511 }
10512
10513 /* Generate a tail call to the target function. */
10514 if (! TREE_USED (function))
10515 {
10516 assemble_external (function);
10517 TREE_USED (function) = 1;
10518 }
10519 funexp = XEXP (DECL_RTL (function), 0);
10520
10521 if (flag_delayed_branch)
10522 {
10523 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10524 insn = emit_call_insn (gen_sibcall (funexp));
10525 SIBLING_CALL_P (insn) = 1;
10526 }
10527 else
10528 {
10529 /* The hoops we have to jump through in order to generate a sibcall
10530 without using delay slots... */
10531 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
10532
10533 if (flag_pic)
10534 {
10535 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
10536 start_sequence ();
10537 load_got_register (); /* clobbers %o7 */
10538 scratch = sparc_legitimize_pic_address (funexp, scratch);
10539 seq = get_insns ();
10540 end_sequence ();
10541 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
10542 }
10543 else if (TARGET_ARCH32)
10544 {
10545 emit_insn (gen_rtx_SET (VOIDmode,
10546 scratch,
10547 gen_rtx_HIGH (SImode, funexp)));
10548 emit_insn (gen_rtx_SET (VOIDmode,
10549 scratch,
10550 gen_rtx_LO_SUM (SImode, scratch, funexp)));
10551 }
10552 else /* TARGET_ARCH64 */
10553 {
10554 switch (sparc_cmodel)
10555 {
10556 case CM_MEDLOW:
10557 case CM_MEDMID:
10558 /* The destination can serve as a temporary. */
10559 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
10560 break;
10561
10562 case CM_MEDANY:
10563 case CM_EMBMEDANY:
10564 /* The destination cannot serve as a temporary. */
10565 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
10566 start_sequence ();
10567 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
10568 seq = get_insns ();
10569 end_sequence ();
10570 emit_and_preserve (seq, spill_reg, 0);
10571 break;
10572
10573 default:
10574 gcc_unreachable ();
10575 }
10576 }
10577
10578 emit_jump_insn (gen_indirect_jump (scratch));
10579 }
10580
10581 emit_barrier ();
10582
10583 /* Run just enough of rest_of_compilation to get the insns emitted.
10584 There's not really enough bulk here to make other passes such as
10585 instruction scheduling worth while. Note that use_thunk calls
10586 assemble_start_function and assemble_end_function. */
10587 insn = get_insns ();
10588 insn_locators_alloc ();
10589 shorten_branches (insn);
10590 final_start_function (insn, file, 1);
10591 final (insn, file, 1);
10592 final_end_function ();
10593
10594 reload_completed = 0;
10595 epilogue_completed = 0;
10596 }
10597
10598 /* Return true if sparc_output_mi_thunk would be able to output the
10599 assembler code for the thunk function specified by the arguments
10600 it is passed, and false otherwise. */
10601 static bool
10602 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
10603 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
10604 HOST_WIDE_INT vcall_offset,
10605 const_tree function ATTRIBUTE_UNUSED)
10606 {
10607 /* Bound the loop used in the default method above. */
10608 return (vcall_offset >= -32768 || ! fixed_regs[5]);
10609 }
10610
10611 /* We use the machine specific reorg pass to enable workarounds for errata. */
10612
10613 static void
10614 sparc_reorg (void)
10615 {
10616 rtx insn, next;
10617
10618 /* The only erratum we handle for now is that of the AT697F processor. */
10619 if (!sparc_fix_at697f)
10620 return;
10621
10622 /* We need to have the (essentially) final form of the insn stream in order
10623 to properly detect the various hazards. Run delay slot scheduling. */
10624 if (optimize > 0 && flag_delayed_branch)
10625 dbr_schedule (get_insns ());
10626
10627 /* Now look for specific patterns in the insn stream. */
10628 for (insn = get_insns (); insn; insn = next)
10629 {
10630 bool insert_nop = false;
10631 rtx set;
10632
10633 /* Look for a single-word load into an odd-numbered FP register. */
10634 if (NONJUMP_INSN_P (insn)
10635 && (set = single_set (insn)) != NULL_RTX
10636 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
10637 && MEM_P (SET_SRC (set))
10638 && REG_P (SET_DEST (set))
10639 && REGNO (SET_DEST (set)) > 31
10640 && REGNO (SET_DEST (set)) % 2 != 0)
10641 {
10642 /* The wrong dependency is on the enclosing double register. */
10643 unsigned int x = REGNO (SET_DEST (set)) - 1;
10644 unsigned int src1, src2, dest;
10645 int code;
10646
10647 /* If the insn has a delay slot, then it cannot be problematic. */
10648 next = next_active_insn (insn);
10649 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
10650 code = -1;
10651 else
10652 {
10653 extract_insn (next);
10654 code = INSN_CODE (next);
10655 }
10656
10657 switch (code)
10658 {
10659 case CODE_FOR_adddf3:
10660 case CODE_FOR_subdf3:
10661 case CODE_FOR_muldf3:
10662 case CODE_FOR_divdf3:
10663 dest = REGNO (recog_data.operand[0]);
10664 src1 = REGNO (recog_data.operand[1]);
10665 src2 = REGNO (recog_data.operand[2]);
10666 if (src1 != src2)
10667 {
10668 /* Case [1-4]:
10669 ld [address], %fx+1
10670 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
10671 if ((src1 == x || src2 == x)
10672 && (dest == src1 || dest == src2))
10673 insert_nop = true;
10674 }
10675 else
10676 {
10677 /* Case 5:
10678 ld [address], %fx+1
10679 FPOPd %fx, %fx, %fx */
10680 if (src1 == x
10681 && dest == src1
10682 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
10683 insert_nop = true;
10684 }
10685 break;
10686
10687 case CODE_FOR_sqrtdf2:
10688 dest = REGNO (recog_data.operand[0]);
10689 src1 = REGNO (recog_data.operand[1]);
10690 /* Case 6:
10691 ld [address], %fx+1
10692 fsqrtd %fx, %fx */
10693 if (src1 == x && dest == src1)
10694 insert_nop = true;
10695 break;
10696
10697 default:
10698 break;
10699 }
10700 }
10701 else
10702 next = NEXT_INSN (insn);
10703
10704 if (insert_nop)
10705 emit_insn_after (gen_nop (), insn);
10706 }
10707 }
10708
10709 /* How to allocate a 'struct machine_function'. */
10710
10711 static struct machine_function *
10712 sparc_init_machine_status (void)
10713 {
10714 return ggc_alloc_cleared_machine_function ();
10715 }
10716
10717 /* Locate some local-dynamic symbol still in use by this function
10718 so that we can print its name in local-dynamic base patterns. */
10719
10720 static const char *
10721 get_some_local_dynamic_name (void)
10722 {
10723 rtx insn;
10724
10725 if (cfun->machine->some_ld_name)
10726 return cfun->machine->some_ld_name;
10727
10728 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10729 if (INSN_P (insn)
10730 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10731 return cfun->machine->some_ld_name;
10732
10733 gcc_unreachable ();
10734 }
10735
10736 static int
10737 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10738 {
10739 rtx x = *px;
10740
10741 if (x
10742 && GET_CODE (x) == SYMBOL_REF
10743 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10744 {
10745 cfun->machine->some_ld_name = XSTR (x, 0);
10746 return 1;
10747 }
10748
10749 return 0;
10750 }
10751
10752 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10753 We need to emit DTP-relative relocations. */
10754
10755 static void
10756 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
10757 {
10758 switch (size)
10759 {
10760 case 4:
10761 fputs ("\t.word\t%r_tls_dtpoff32(", file);
10762 break;
10763 case 8:
10764 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
10765 break;
10766 default:
10767 gcc_unreachable ();
10768 }
10769 output_addr_const (file, x);
10770 fputs (")", file);
10771 }
10772
10773 /* Do whatever processing is required at the end of a file. */
10774
10775 static void
10776 sparc_file_end (void)
10777 {
10778 /* If we need to emit the special GOT helper function, do so now. */
10779 if (got_helper_rtx)
10780 {
10781 const char *name = XSTR (got_helper_rtx, 0);
10782 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
10783 #ifdef DWARF2_UNWIND_INFO
10784 bool do_cfi;
10785 #endif
10786
10787 if (USE_HIDDEN_LINKONCE)
10788 {
10789 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
10790 get_identifier (name),
10791 build_function_type_list (void_type_node,
10792 NULL_TREE));
10793 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
10794 NULL_TREE, void_type_node);
10795 TREE_STATIC (decl) = 1;
10796 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
10797 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
10798 DECL_VISIBILITY_SPECIFIED (decl) = 1;
10799 resolve_unique_section (decl, 0, flag_function_sections);
10800 allocate_struct_function (decl, true);
10801 cfun->is_thunk = 1;
10802 current_function_decl = decl;
10803 init_varasm_status ();
10804 assemble_start_function (decl, name);
10805 }
10806 else
10807 {
10808 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10809 switch_to_section (text_section);
10810 if (align > 0)
10811 ASM_OUTPUT_ALIGN (asm_out_file, align);
10812 ASM_OUTPUT_LABEL (asm_out_file, name);
10813 }
10814
10815 #ifdef DWARF2_UNWIND_INFO
10816 do_cfi = dwarf2out_do_cfi_asm ();
10817 if (do_cfi)
10818 fprintf (asm_out_file, "\t.cfi_startproc\n");
10819 #endif
10820 if (flag_delayed_branch)
10821 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10822 reg_name, reg_name);
10823 else
10824 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10825 reg_name, reg_name);
10826 #ifdef DWARF2_UNWIND_INFO
10827 if (do_cfi)
10828 fprintf (asm_out_file, "\t.cfi_endproc\n");
10829 #endif
10830 }
10831
10832 if (NEED_INDICATE_EXEC_STACK)
10833 file_end_indicate_exec_stack ();
10834
10835 #ifdef TARGET_SOLARIS
10836 solaris_file_end ();
10837 #endif
10838 }
10839
10840 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10841 /* Implement TARGET_MANGLE_TYPE. */
10842
10843 static const char *
10844 sparc_mangle_type (const_tree type)
10845 {
10846 if (!TARGET_64BIT
10847 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10848 && TARGET_LONG_DOUBLE_128)
10849 return "g";
10850
10851 /* For all other types, use normal C++ mangling. */
10852 return NULL;
10853 }
10854 #endif
10855
10856 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
10857 compare and swap on the word containing the byte or half-word. */
10858
10859 void
10860 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
10861 {
10862 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10863 rtx addr = gen_reg_rtx (Pmode);
10864 rtx off = gen_reg_rtx (SImode);
10865 rtx oldv = gen_reg_rtx (SImode);
10866 rtx newv = gen_reg_rtx (SImode);
10867 rtx oldvalue = gen_reg_rtx (SImode);
10868 rtx newvalue = gen_reg_rtx (SImode);
10869 rtx res = gen_reg_rtx (SImode);
10870 rtx resv = gen_reg_rtx (SImode);
10871 rtx memsi, val, mask, end_label, loop_label, cc;
10872
10873 emit_insn (gen_rtx_SET (VOIDmode, addr,
10874 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10875
10876 if (Pmode != SImode)
10877 addr1 = gen_lowpart (SImode, addr1);
10878 emit_insn (gen_rtx_SET (VOIDmode, off,
10879 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10880
10881 memsi = gen_rtx_MEM (SImode, addr);
10882 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10883 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10884
10885 val = force_reg (SImode, memsi);
10886
10887 emit_insn (gen_rtx_SET (VOIDmode, off,
10888 gen_rtx_XOR (SImode, off,
10889 GEN_INT (GET_MODE (mem) == QImode
10890 ? 3 : 2))));
10891
10892 emit_insn (gen_rtx_SET (VOIDmode, off,
10893 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10894
10895 if (GET_MODE (mem) == QImode)
10896 mask = force_reg (SImode, GEN_INT (0xff));
10897 else
10898 mask = force_reg (SImode, GEN_INT (0xffff));
10899
10900 emit_insn (gen_rtx_SET (VOIDmode, mask,
10901 gen_rtx_ASHIFT (SImode, mask, off)));
10902
10903 emit_insn (gen_rtx_SET (VOIDmode, val,
10904 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10905 val)));
10906
10907 oldval = gen_lowpart (SImode, oldval);
10908 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10909 gen_rtx_ASHIFT (SImode, oldval, off)));
10910
10911 newval = gen_lowpart_common (SImode, newval);
10912 emit_insn (gen_rtx_SET (VOIDmode, newv,
10913 gen_rtx_ASHIFT (SImode, newval, off)));
10914
10915 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10916 gen_rtx_AND (SImode, oldv, mask)));
10917
10918 emit_insn (gen_rtx_SET (VOIDmode, newv,
10919 gen_rtx_AND (SImode, newv, mask)));
10920
10921 end_label = gen_label_rtx ();
10922 loop_label = gen_label_rtx ();
10923 emit_label (loop_label);
10924
10925 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
10926 gen_rtx_IOR (SImode, oldv, val)));
10927
10928 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
10929 gen_rtx_IOR (SImode, newv, val)));
10930
10931 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
10932
10933 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
10934
10935 emit_insn (gen_rtx_SET (VOIDmode, resv,
10936 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10937 res)));
10938
10939 cc = gen_compare_reg_1 (NE, resv, val);
10940 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
10941
10942 /* Use cbranchcc4 to separate the compare and branch! */
10943 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
10944 cc, const0_rtx, loop_label));
10945
10946 emit_label (end_label);
10947
10948 emit_insn (gen_rtx_SET (VOIDmode, res,
10949 gen_rtx_AND (SImode, res, mask)));
10950
10951 emit_insn (gen_rtx_SET (VOIDmode, res,
10952 gen_rtx_LSHIFTRT (SImode, res, off)));
10953
10954 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10955 }
10956
10957 void
10958 sparc_expand_vec_perm_bmask (enum machine_mode vmode, rtx sel)
10959 {
10960 rtx t_1, t_2, t_3;
10961
10962 sel = gen_lowpart (DImode, sel);
10963 switch (vmode)
10964 {
10965 case V2SImode:
10966 /* inp = xxxxxxxAxxxxxxxB */
10967 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
10968 NULL_RTX, 1, OPTAB_DIRECT);
10969 /* t_1 = ....xxxxxxxAxxx. */
10970 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
10971 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
10972 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
10973 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
10974 /* sel = .......B */
10975 /* t_1 = ...A.... */
10976 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
10977 /* sel = ...A...B */
10978 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
10979 /* sel = AAAABBBB * 4 */
10980 t_1 = force_reg (SImode, GEN_INT (0x01230123));
10981 /* sel = { A*4, A*4+1, A*4+2, ... } */
10982 break;
10983
10984 case V4HImode:
10985 /* inp = xxxAxxxBxxxCxxxD */
10986 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
10987 NULL_RTX, 1, OPTAB_DIRECT);
10988 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
10989 NULL_RTX, 1, OPTAB_DIRECT);
10990 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
10991 NULL_RTX, 1, OPTAB_DIRECT);
10992 /* t_1 = ..xxxAxxxBxxxCxx */
10993 /* t_2 = ....xxxAxxxBxxxC */
10994 /* t_3 = ......xxxAxxxBxx */
10995 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
10996 GEN_INT (0x07),
10997 NULL_RTX, 1, OPTAB_DIRECT);
10998 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
10999 GEN_INT (0x0700),
11000 NULL_RTX, 1, OPTAB_DIRECT);
11001 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
11002 GEN_INT (0x070000),
11003 NULL_RTX, 1, OPTAB_DIRECT);
11004 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
11005 GEN_INT (0x07000000),
11006 NULL_RTX, 1, OPTAB_DIRECT);
11007 /* sel = .......D */
11008 /* t_1 = .....C.. */
11009 /* t_2 = ...B.... */
11010 /* t_3 = .A...... */
11011 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
11012 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
11013 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
11014 /* sel = .A.B.C.D */
11015 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
11016 /* sel = AABBCCDD * 2 */
11017 t_1 = force_reg (SImode, GEN_INT (0x01010101));
11018 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
11019 break;
11020
11021 case V8QImode:
11022 /* input = xAxBxCxDxExFxGxH */
11023 sel = expand_simple_binop (DImode, AND, sel,
11024 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
11025 | 0x0f0f0f0f),
11026 NULL_RTX, 1, OPTAB_DIRECT);
11027 /* sel = .A.B.C.D.E.F.G.H */
11028 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
11029 NULL_RTX, 1, OPTAB_DIRECT);
11030 /* t_1 = ..A.B.C.D.E.F.G. */
11031 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11032 NULL_RTX, 1, OPTAB_DIRECT);
11033 /* sel = .AABBCCDDEEFFGGH */
11034 sel = expand_simple_binop (DImode, AND, sel,
11035 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
11036 | 0xff00ff),
11037 NULL_RTX, 1, OPTAB_DIRECT);
11038 /* sel = ..AB..CD..EF..GH */
11039 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11040 NULL_RTX, 1, OPTAB_DIRECT);
11041 /* t_1 = ....AB..CD..EF.. */
11042 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11043 NULL_RTX, 1, OPTAB_DIRECT);
11044 /* sel = ..ABABCDCDEFEFGH */
11045 sel = expand_simple_binop (DImode, AND, sel,
11046 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
11047 NULL_RTX, 1, OPTAB_DIRECT);
11048 /* sel = ....ABCD....EFGH */
11049 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11050 NULL_RTX, 1, OPTAB_DIRECT);
11051 /* t_1 = ........ABCD.... */
11052 sel = gen_lowpart (SImode, sel);
11053 t_1 = gen_lowpart (SImode, t_1);
11054 break;
11055
11056 default:
11057 gcc_unreachable ();
11058 }
11059
11060 /* Always perform the final addition/merge within the bmask insn. */
11061 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
11062 }
11063
11064 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11065
11066 static bool
11067 sparc_frame_pointer_required (void)
11068 {
11069 /* If the stack pointer is dynamically modified in the function, it cannot
11070 serve as the frame pointer. */
11071 if (cfun->calls_alloca)
11072 return true;
11073
11074 /* If the function receives nonlocal gotos, it needs to save the frame
11075 pointer in the nonlocal_goto_save_area object. */
11076 if (cfun->has_nonlocal_label)
11077 return true;
11078
11079 /* In flat mode, that's it. */
11080 if (TARGET_FLAT)
11081 return false;
11082
11083 /* Otherwise, the frame pointer is required if the function isn't leaf. */
11084 return !(current_function_is_leaf && only_leaf_regs_used ());
11085 }
11086
11087 /* The way this is structured, we can't eliminate SFP in favor of SP
11088 if the frame pointer is required: we want to use the SFP->HFP elimination
11089 in that case. But the test in update_eliminables doesn't know we are
11090 assuming below that we only do the former elimination. */
11091
11092 static bool
11093 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
11094 {
11095 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
11096 }
11097
11098 /* Return the hard frame pointer directly to bypass the stack bias. */
11099
11100 static rtx
11101 sparc_builtin_setjmp_frame_value (void)
11102 {
11103 return hard_frame_pointer_rtx;
11104 }
11105
11106 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
11107 they won't be allocated. */
11108
11109 static void
11110 sparc_conditional_register_usage (void)
11111 {
11112 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
11113 {
11114 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11115 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11116 }
11117 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
11118 /* then honor it. */
11119 if (TARGET_ARCH32 && fixed_regs[5])
11120 fixed_regs[5] = 1;
11121 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
11122 fixed_regs[5] = 0;
11123 if (! TARGET_V9)
11124 {
11125 int regno;
11126 for (regno = SPARC_FIRST_V9_FP_REG;
11127 regno <= SPARC_LAST_V9_FP_REG;
11128 regno++)
11129 fixed_regs[regno] = 1;
11130 /* %fcc0 is used by v8 and v9. */
11131 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
11132 regno <= SPARC_LAST_V9_FCC_REG;
11133 regno++)
11134 fixed_regs[regno] = 1;
11135 }
11136 if (! TARGET_FPU)
11137 {
11138 int regno;
11139 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
11140 fixed_regs[regno] = 1;
11141 }
11142 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
11143 /* then honor it. Likewise with g3 and g4. */
11144 if (fixed_regs[2] == 2)
11145 fixed_regs[2] = ! TARGET_APP_REGS;
11146 if (fixed_regs[3] == 2)
11147 fixed_regs[3] = ! TARGET_APP_REGS;
11148 if (TARGET_ARCH32 && fixed_regs[4] == 2)
11149 fixed_regs[4] = ! TARGET_APP_REGS;
11150 else if (TARGET_CM_EMBMEDANY)
11151 fixed_regs[4] = 1;
11152 else if (fixed_regs[4] == 2)
11153 fixed_regs[4] = 0;
11154 if (TARGET_FLAT)
11155 {
11156 int regno;
11157 /* Disable leaf functions. */
11158 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
11159 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
11160 leaf_reg_remap [regno] = regno;
11161 }
11162 if (TARGET_VIS)
11163 global_regs[SPARC_GSR_REG] = 1;
11164 }
11165
11166 /* Implement TARGET_PREFERRED_RELOAD_CLASS
11167
11168 - We can't load constants into FP registers.
11169 - We can't load FP constants into integer registers when soft-float,
11170 because there is no soft-float pattern with a r/F constraint.
11171 - We can't load FP constants into integer registers for TFmode unless
11172 it is 0.0L, because there is no movtf pattern with a r/F constraint.
11173 - Try and reload integer constants (symbolic or otherwise) back into
11174 registers directly, rather than having them dumped to memory. */
11175
11176 static reg_class_t
11177 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
11178 {
11179 enum machine_mode mode = GET_MODE (x);
11180 if (CONSTANT_P (x))
11181 {
11182 if (FP_REG_CLASS_P (rclass)
11183 || rclass == GENERAL_OR_FP_REGS
11184 || rclass == GENERAL_OR_EXTRA_FP_REGS
11185 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
11186 || (mode == TFmode && ! const_zero_operand (x, mode)))
11187 return NO_REGS;
11188
11189 if (GET_MODE_CLASS (mode) == MODE_INT)
11190 return GENERAL_REGS;
11191
11192 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
11193 {
11194 if (! FP_REG_CLASS_P (rclass)
11195 || !(const_zero_operand (x, mode)
11196 || const_all_ones_operand (x, mode)))
11197 return NO_REGS;
11198 }
11199 }
11200
11201 if (TARGET_VIS3
11202 && ! TARGET_ARCH64
11203 && (rclass == EXTRA_FP_REGS
11204 || rclass == GENERAL_OR_EXTRA_FP_REGS))
11205 {
11206 int regno = true_regnum (x);
11207
11208 if (SPARC_INT_REG_P (regno))
11209 return (rclass == EXTRA_FP_REGS
11210 ? FP_REGS : GENERAL_OR_FP_REGS);
11211 }
11212
11213 return rclass;
11214 }
11215
11216 const char *
11217 output_v8plus_mult (rtx insn, rtx *operands, const char *name)
11218 {
11219 char mulstr[32];
11220
11221 gcc_assert (! TARGET_ARCH64);
11222
11223 if (sparc_check_64 (operands[1], insn) <= 0)
11224 output_asm_insn ("srl\t%L1, 0, %L1", operands);
11225 if (which_alternative == 1)
11226 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
11227 if (GET_CODE (operands[2]) == CONST_INT)
11228 {
11229 if (which_alternative == 1)
11230 {
11231 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11232 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", name);
11233 output_asm_insn (mulstr, operands);
11234 return "srlx\t%L0, 32, %H0";
11235 }
11236 else
11237 {
11238 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11239 output_asm_insn ("or\t%L1, %3, %3", operands);
11240 sprintf (mulstr, "%s\t%%3, %%2, %%3", name);
11241 output_asm_insn (mulstr, operands);
11242 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11243 return "mov\t%3, %L0";
11244 }
11245 }
11246 else if (rtx_equal_p (operands[1], operands[2]))
11247 {
11248 if (which_alternative == 1)
11249 {
11250 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11251 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", name);
11252 output_asm_insn (mulstr, operands);
11253 return "srlx\t%L0, 32, %H0";
11254 }
11255 else
11256 {
11257 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11258 output_asm_insn ("or\t%L1, %3, %3", operands);
11259 sprintf (mulstr, "%s\t%%3, %%3, %%3", name);
11260 output_asm_insn (mulstr, operands);
11261 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11262 return "mov\t%3, %L0";
11263 }
11264 }
11265 if (sparc_check_64 (operands[2], insn) <= 0)
11266 output_asm_insn ("srl\t%L2, 0, %L2", operands);
11267 if (which_alternative == 1)
11268 {
11269 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11270 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
11271 output_asm_insn ("or\t%L2, %L1, %L1", operands);
11272 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", name);
11273 output_asm_insn (mulstr, operands);
11274 return "srlx\t%L0, 32, %H0";
11275 }
11276 else
11277 {
11278 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11279 output_asm_insn ("sllx\t%H2, 32, %4", operands);
11280 output_asm_insn ("or\t%L1, %3, %3", operands);
11281 output_asm_insn ("or\t%L2, %4, %4", operands);
11282 sprintf (mulstr, "%s\t%%3, %%4, %%3", name);
11283 output_asm_insn (mulstr, operands);
11284 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11285 return "mov\t%3, %L0";
11286 }
11287 }
11288
11289 void
11290 sparc_expand_vector_init (rtx target, rtx vals)
11291 {
11292 enum machine_mode mode = GET_MODE (target);
11293 enum machine_mode inner_mode = GET_MODE_INNER (mode);
11294 int n_elts = GET_MODE_NUNITS (mode);
11295 int i, n_var = 0;
11296 rtx mem;
11297
11298 for (i = 0; i < n_elts; i++)
11299 {
11300 rtx x = XVECEXP (vals, 0, i);
11301 if (!CONSTANT_P (x))
11302 n_var++;
11303 }
11304
11305 if (n_var == 0)
11306 {
11307 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
11308 return;
11309 }
11310
11311 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
11312 for (i = 0; i < n_elts; i++)
11313 emit_move_insn (adjust_address_nv (mem, inner_mode,
11314 i * GET_MODE_SIZE (inner_mode)),
11315 XVECEXP (vals, 0, i));
11316 emit_move_insn (target, mem);
11317 }
11318
11319 static reg_class_t
11320 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
11321 enum machine_mode mode, secondary_reload_info *sri)
11322 {
11323 enum reg_class rclass = (enum reg_class) rclass_i;
11324
11325 sri->icode = CODE_FOR_nothing;
11326 sri->extra_cost = 0;
11327
11328 /* We need a temporary when loading/storing a HImode/QImode value
11329 between memory and the FPU registers. This can happen when combine puts
11330 a paradoxical subreg in a float/fix conversion insn. */
11331 if (FP_REG_CLASS_P (rclass)
11332 && (mode == HImode || mode == QImode)
11333 && (GET_CODE (x) == MEM
11334 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
11335 && true_regnum (x) == -1)))
11336 return GENERAL_REGS;
11337
11338 /* On 32-bit we need a temporary when loading/storing a DFmode value
11339 between unaligned memory and the upper FPU registers. */
11340 if (TARGET_ARCH32
11341 && rclass == EXTRA_FP_REGS
11342 && mode == DFmode
11343 && GET_CODE (x) == MEM
11344 && ! mem_min_alignment (x, 8))
11345 return FP_REGS;
11346
11347 if (((TARGET_CM_MEDANY
11348 && symbolic_operand (x, mode))
11349 || (TARGET_CM_EMBMEDANY
11350 && text_segment_operand (x, mode)))
11351 && ! flag_pic)
11352 {
11353 if (in_p)
11354 sri->icode = direct_optab_handler (reload_in_optab, mode);
11355 else
11356 sri->icode = direct_optab_handler (reload_out_optab, mode);
11357 return NO_REGS;
11358 }
11359
11360 if (TARGET_VIS3 && TARGET_ARCH32)
11361 {
11362 int regno = true_regnum (x);
11363
11364 /* When using VIS3 fp<-->int register moves, on 32-bit we have
11365 to move 8-byte values in 4-byte pieces. This only works via
11366 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
11367 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
11368 an FP_REGS intermediate move. */
11369 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
11370 || ((general_or_i64_p (rclass)
11371 || rclass == GENERAL_OR_FP_REGS)
11372 && SPARC_FP_REG_P (regno)))
11373 {
11374 sri->extra_cost = 2;
11375 return FP_REGS;
11376 }
11377 }
11378
11379 return NO_REGS;
11380 }
11381
11382 #include "gt-sparc.h"