]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sparc/sparc.c
hooks.c (hook_int_rtx_mode_as_bool_0): New function.
[thirdparty/gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
8 at Cygnus Support.
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
16
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "except.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "diagnostic-core.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "gimple.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "params.h"
56 #include "df.h"
57 #include "opts.h"
58
59 /* Processor costs */
60
61 struct processor_costs {
62 /* Integer load */
63 const int int_load;
64
65 /* Integer signed load */
66 const int int_sload;
67
68 /* Integer zeroed load */
69 const int int_zload;
70
71 /* Float load */
72 const int float_load;
73
74 /* fmov, fneg, fabs */
75 const int float_move;
76
77 /* fadd, fsub */
78 const int float_plusminus;
79
80 /* fcmp */
81 const int float_cmp;
82
83 /* fmov, fmovr */
84 const int float_cmove;
85
86 /* fmul */
87 const int float_mul;
88
89 /* fdivs */
90 const int float_div_sf;
91
92 /* fdivd */
93 const int float_div_df;
94
95 /* fsqrts */
96 const int float_sqrt_sf;
97
98 /* fsqrtd */
99 const int float_sqrt_df;
100
101 /* umul/smul */
102 const int int_mul;
103
104 /* mulX */
105 const int int_mulX;
106
107 /* integer multiply cost for each bit set past the most
108 significant 3, so the formula for multiply cost becomes:
109
110 if (rs1 < 0)
111 highest_bit = highest_clear_bit(rs1);
112 else
113 highest_bit = highest_set_bit(rs1);
114 if (highest_bit < 3)
115 highest_bit = 3;
116 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
117
118 A value of zero indicates that the multiply costs is fixed,
119 and not variable. */
120 const int int_mul_bit_factor;
121
122 /* udiv/sdiv */
123 const int int_div;
124
125 /* divX */
126 const int int_divX;
127
128 /* movcc, movr */
129 const int int_cmove;
130
131 /* penalty for shifts, due to scheduling rules etc. */
132 const int shift_penalty;
133 };
134
135 static const
136 struct processor_costs cypress_costs = {
137 COSTS_N_INSNS (2), /* int load */
138 COSTS_N_INSNS (2), /* int signed load */
139 COSTS_N_INSNS (2), /* int zeroed load */
140 COSTS_N_INSNS (2), /* float load */
141 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
142 COSTS_N_INSNS (5), /* fadd, fsub */
143 COSTS_N_INSNS (1), /* fcmp */
144 COSTS_N_INSNS (1), /* fmov, fmovr */
145 COSTS_N_INSNS (7), /* fmul */
146 COSTS_N_INSNS (37), /* fdivs */
147 COSTS_N_INSNS (37), /* fdivd */
148 COSTS_N_INSNS (63), /* fsqrts */
149 COSTS_N_INSNS (63), /* fsqrtd */
150 COSTS_N_INSNS (1), /* imul */
151 COSTS_N_INSNS (1), /* imulX */
152 0, /* imul bit factor */
153 COSTS_N_INSNS (1), /* idiv */
154 COSTS_N_INSNS (1), /* idivX */
155 COSTS_N_INSNS (1), /* movcc/movr */
156 0, /* shift penalty */
157 };
158
159 static const
160 struct processor_costs supersparc_costs = {
161 COSTS_N_INSNS (1), /* int load */
162 COSTS_N_INSNS (1), /* int signed load */
163 COSTS_N_INSNS (1), /* int zeroed load */
164 COSTS_N_INSNS (0), /* float load */
165 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
166 COSTS_N_INSNS (3), /* fadd, fsub */
167 COSTS_N_INSNS (3), /* fcmp */
168 COSTS_N_INSNS (1), /* fmov, fmovr */
169 COSTS_N_INSNS (3), /* fmul */
170 COSTS_N_INSNS (6), /* fdivs */
171 COSTS_N_INSNS (9), /* fdivd */
172 COSTS_N_INSNS (12), /* fsqrts */
173 COSTS_N_INSNS (12), /* fsqrtd */
174 COSTS_N_INSNS (4), /* imul */
175 COSTS_N_INSNS (4), /* imulX */
176 0, /* imul bit factor */
177 COSTS_N_INSNS (4), /* idiv */
178 COSTS_N_INSNS (4), /* idivX */
179 COSTS_N_INSNS (1), /* movcc/movr */
180 1, /* shift penalty */
181 };
182
183 static const
184 struct processor_costs hypersparc_costs = {
185 COSTS_N_INSNS (1), /* int load */
186 COSTS_N_INSNS (1), /* int signed load */
187 COSTS_N_INSNS (1), /* int zeroed load */
188 COSTS_N_INSNS (1), /* float load */
189 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
190 COSTS_N_INSNS (1), /* fadd, fsub */
191 COSTS_N_INSNS (1), /* fcmp */
192 COSTS_N_INSNS (1), /* fmov, fmovr */
193 COSTS_N_INSNS (1), /* fmul */
194 COSTS_N_INSNS (8), /* fdivs */
195 COSTS_N_INSNS (12), /* fdivd */
196 COSTS_N_INSNS (17), /* fsqrts */
197 COSTS_N_INSNS (17), /* fsqrtd */
198 COSTS_N_INSNS (17), /* imul */
199 COSTS_N_INSNS (17), /* imulX */
200 0, /* imul bit factor */
201 COSTS_N_INSNS (17), /* idiv */
202 COSTS_N_INSNS (17), /* idivX */
203 COSTS_N_INSNS (1), /* movcc/movr */
204 0, /* shift penalty */
205 };
206
207 static const
208 struct processor_costs leon_costs = {
209 COSTS_N_INSNS (1), /* int load */
210 COSTS_N_INSNS (1), /* int signed load */
211 COSTS_N_INSNS (1), /* int zeroed load */
212 COSTS_N_INSNS (1), /* float load */
213 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
214 COSTS_N_INSNS (1), /* fadd, fsub */
215 COSTS_N_INSNS (1), /* fcmp */
216 COSTS_N_INSNS (1), /* fmov, fmovr */
217 COSTS_N_INSNS (1), /* fmul */
218 COSTS_N_INSNS (15), /* fdivs */
219 COSTS_N_INSNS (15), /* fdivd */
220 COSTS_N_INSNS (23), /* fsqrts */
221 COSTS_N_INSNS (23), /* fsqrtd */
222 COSTS_N_INSNS (5), /* imul */
223 COSTS_N_INSNS (5), /* imulX */
224 0, /* imul bit factor */
225 COSTS_N_INSNS (5), /* idiv */
226 COSTS_N_INSNS (5), /* idivX */
227 COSTS_N_INSNS (1), /* movcc/movr */
228 0, /* shift penalty */
229 };
230
231 static const
232 struct processor_costs sparclet_costs = {
233 COSTS_N_INSNS (3), /* int load */
234 COSTS_N_INSNS (3), /* int signed load */
235 COSTS_N_INSNS (1), /* int zeroed load */
236 COSTS_N_INSNS (1), /* float load */
237 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
238 COSTS_N_INSNS (1), /* fadd, fsub */
239 COSTS_N_INSNS (1), /* fcmp */
240 COSTS_N_INSNS (1), /* fmov, fmovr */
241 COSTS_N_INSNS (1), /* fmul */
242 COSTS_N_INSNS (1), /* fdivs */
243 COSTS_N_INSNS (1), /* fdivd */
244 COSTS_N_INSNS (1), /* fsqrts */
245 COSTS_N_INSNS (1), /* fsqrtd */
246 COSTS_N_INSNS (5), /* imul */
247 COSTS_N_INSNS (5), /* imulX */
248 0, /* imul bit factor */
249 COSTS_N_INSNS (5), /* idiv */
250 COSTS_N_INSNS (5), /* idivX */
251 COSTS_N_INSNS (1), /* movcc/movr */
252 0, /* shift penalty */
253 };
254
255 static const
256 struct processor_costs ultrasparc_costs = {
257 COSTS_N_INSNS (2), /* int load */
258 COSTS_N_INSNS (3), /* int signed load */
259 COSTS_N_INSNS (2), /* int zeroed load */
260 COSTS_N_INSNS (2), /* float load */
261 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
262 COSTS_N_INSNS (4), /* fadd, fsub */
263 COSTS_N_INSNS (1), /* fcmp */
264 COSTS_N_INSNS (2), /* fmov, fmovr */
265 COSTS_N_INSNS (4), /* fmul */
266 COSTS_N_INSNS (13), /* fdivs */
267 COSTS_N_INSNS (23), /* fdivd */
268 COSTS_N_INSNS (13), /* fsqrts */
269 COSTS_N_INSNS (23), /* fsqrtd */
270 COSTS_N_INSNS (4), /* imul */
271 COSTS_N_INSNS (4), /* imulX */
272 2, /* imul bit factor */
273 COSTS_N_INSNS (37), /* idiv */
274 COSTS_N_INSNS (68), /* idivX */
275 COSTS_N_INSNS (2), /* movcc/movr */
276 2, /* shift penalty */
277 };
278
279 static const
280 struct processor_costs ultrasparc3_costs = {
281 COSTS_N_INSNS (2), /* int load */
282 COSTS_N_INSNS (3), /* int signed load */
283 COSTS_N_INSNS (3), /* int zeroed load */
284 COSTS_N_INSNS (2), /* float load */
285 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
286 COSTS_N_INSNS (4), /* fadd, fsub */
287 COSTS_N_INSNS (5), /* fcmp */
288 COSTS_N_INSNS (3), /* fmov, fmovr */
289 COSTS_N_INSNS (4), /* fmul */
290 COSTS_N_INSNS (17), /* fdivs */
291 COSTS_N_INSNS (20), /* fdivd */
292 COSTS_N_INSNS (20), /* fsqrts */
293 COSTS_N_INSNS (29), /* fsqrtd */
294 COSTS_N_INSNS (6), /* imul */
295 COSTS_N_INSNS (6), /* imulX */
296 0, /* imul bit factor */
297 COSTS_N_INSNS (40), /* idiv */
298 COSTS_N_INSNS (71), /* idivX */
299 COSTS_N_INSNS (2), /* movcc/movr */
300 0, /* shift penalty */
301 };
302
303 static const
304 struct processor_costs niagara_costs = {
305 COSTS_N_INSNS (3), /* int load */
306 COSTS_N_INSNS (3), /* int signed load */
307 COSTS_N_INSNS (3), /* int zeroed load */
308 COSTS_N_INSNS (9), /* float load */
309 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
310 COSTS_N_INSNS (8), /* fadd, fsub */
311 COSTS_N_INSNS (26), /* fcmp */
312 COSTS_N_INSNS (8), /* fmov, fmovr */
313 COSTS_N_INSNS (29), /* fmul */
314 COSTS_N_INSNS (54), /* fdivs */
315 COSTS_N_INSNS (83), /* fdivd */
316 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
317 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
318 COSTS_N_INSNS (11), /* imul */
319 COSTS_N_INSNS (11), /* imulX */
320 0, /* imul bit factor */
321 COSTS_N_INSNS (72), /* idiv */
322 COSTS_N_INSNS (72), /* idivX */
323 COSTS_N_INSNS (1), /* movcc/movr */
324 0, /* shift penalty */
325 };
326
327 static const
328 struct processor_costs niagara2_costs = {
329 COSTS_N_INSNS (3), /* int load */
330 COSTS_N_INSNS (3), /* int signed load */
331 COSTS_N_INSNS (3), /* int zeroed load */
332 COSTS_N_INSNS (3), /* float load */
333 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
334 COSTS_N_INSNS (6), /* fadd, fsub */
335 COSTS_N_INSNS (6), /* fcmp */
336 COSTS_N_INSNS (6), /* fmov, fmovr */
337 COSTS_N_INSNS (6), /* fmul */
338 COSTS_N_INSNS (19), /* fdivs */
339 COSTS_N_INSNS (33), /* fdivd */
340 COSTS_N_INSNS (19), /* fsqrts */
341 COSTS_N_INSNS (33), /* fsqrtd */
342 COSTS_N_INSNS (5), /* imul */
343 COSTS_N_INSNS (5), /* imulX */
344 0, /* imul bit factor */
345 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
346 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
347 COSTS_N_INSNS (1), /* movcc/movr */
348 0, /* shift penalty */
349 };
350
351 static const
352 struct processor_costs niagara3_costs = {
353 COSTS_N_INSNS (3), /* int load */
354 COSTS_N_INSNS (3), /* int signed load */
355 COSTS_N_INSNS (3), /* int zeroed load */
356 COSTS_N_INSNS (3), /* float load */
357 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
358 COSTS_N_INSNS (9), /* fadd, fsub */
359 COSTS_N_INSNS (9), /* fcmp */
360 COSTS_N_INSNS (9), /* fmov, fmovr */
361 COSTS_N_INSNS (9), /* fmul */
362 COSTS_N_INSNS (23), /* fdivs */
363 COSTS_N_INSNS (37), /* fdivd */
364 COSTS_N_INSNS (23), /* fsqrts */
365 COSTS_N_INSNS (37), /* fsqrtd */
366 COSTS_N_INSNS (9), /* imul */
367 COSTS_N_INSNS (9), /* imulX */
368 0, /* imul bit factor */
369 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
370 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
371 COSTS_N_INSNS (1), /* movcc/movr */
372 0, /* shift penalty */
373 };
374
375 static const
376 struct processor_costs niagara4_costs = {
377 COSTS_N_INSNS (5), /* int load */
378 COSTS_N_INSNS (5), /* int signed load */
379 COSTS_N_INSNS (5), /* int zeroed load */
380 COSTS_N_INSNS (5), /* float load */
381 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
382 COSTS_N_INSNS (11), /* fadd, fsub */
383 COSTS_N_INSNS (11), /* fcmp */
384 COSTS_N_INSNS (11), /* fmov, fmovr */
385 COSTS_N_INSNS (11), /* fmul */
386 COSTS_N_INSNS (24), /* fdivs */
387 COSTS_N_INSNS (37), /* fdivd */
388 COSTS_N_INSNS (24), /* fsqrts */
389 COSTS_N_INSNS (37), /* fsqrtd */
390 COSTS_N_INSNS (12), /* imul */
391 COSTS_N_INSNS (12), /* imulX */
392 0, /* imul bit factor */
393 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
394 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
395 COSTS_N_INSNS (1), /* movcc/movr */
396 0, /* shift penalty */
397 };
398
399 static const struct processor_costs *sparc_costs = &cypress_costs;
400
401 #ifdef HAVE_AS_RELAX_OPTION
402 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
403 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
404 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
405 somebody does not branch between the sethi and jmp. */
406 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
407 #else
408 #define LEAF_SIBCALL_SLOT_RESERVED_P \
409 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
410 #endif
411
412 /* Vector to say how input registers are mapped to output registers.
413 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
414 eliminate it. You must use -fomit-frame-pointer to get that. */
415 char leaf_reg_remap[] =
416 { 0, 1, 2, 3, 4, 5, 6, 7,
417 -1, -1, -1, -1, -1, -1, 14, -1,
418 -1, -1, -1, -1, -1, -1, -1, -1,
419 8, 9, 10, 11, 12, 13, -1, 15,
420
421 32, 33, 34, 35, 36, 37, 38, 39,
422 40, 41, 42, 43, 44, 45, 46, 47,
423 48, 49, 50, 51, 52, 53, 54, 55,
424 56, 57, 58, 59, 60, 61, 62, 63,
425 64, 65, 66, 67, 68, 69, 70, 71,
426 72, 73, 74, 75, 76, 77, 78, 79,
427 80, 81, 82, 83, 84, 85, 86, 87,
428 88, 89, 90, 91, 92, 93, 94, 95,
429 96, 97, 98, 99, 100, 101, 102};
430
431 /* Vector, indexed by hard register number, which contains 1
432 for a register that is allowable in a candidate for leaf
433 function treatment. */
434 char sparc_leaf_regs[] =
435 { 1, 1, 1, 1, 1, 1, 1, 1,
436 0, 0, 0, 0, 0, 0, 1, 0,
437 0, 0, 0, 0, 0, 0, 0, 0,
438 1, 1, 1, 1, 1, 1, 0, 1,
439 1, 1, 1, 1, 1, 1, 1, 1,
440 1, 1, 1, 1, 1, 1, 1, 1,
441 1, 1, 1, 1, 1, 1, 1, 1,
442 1, 1, 1, 1, 1, 1, 1, 1,
443 1, 1, 1, 1, 1, 1, 1, 1,
444 1, 1, 1, 1, 1, 1, 1, 1,
445 1, 1, 1, 1, 1, 1, 1, 1,
446 1, 1, 1, 1, 1, 1, 1, 1,
447 1, 1, 1, 1, 1, 1, 1};
448
449 struct GTY(()) machine_function
450 {
451 /* Size of the frame of the function. */
452 HOST_WIDE_INT frame_size;
453
454 /* Size of the frame of the function minus the register window save area
455 and the outgoing argument area. */
456 HOST_WIDE_INT apparent_frame_size;
457
458 /* Register we pretend the frame pointer is allocated to. Normally, this
459 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
460 record "offset" separately as it may be too big for (reg + disp). */
461 rtx frame_base_reg;
462 HOST_WIDE_INT frame_base_offset;
463
464 /* Some local-dynamic TLS symbol name. */
465 const char *some_ld_name;
466
467 /* Number of global or FP registers to be saved (as 4-byte quantities). */
468 int n_global_fp_regs;
469
470 /* True if the current function is leaf and uses only leaf regs,
471 so that the SPARC leaf function optimization can be applied.
472 Private version of crtl->uses_only_leaf_regs, see
473 sparc_expand_prologue for the rationale. */
474 int leaf_function_p;
475
476 /* True if the prologue saves local or in registers. */
477 bool save_local_in_regs_p;
478
479 /* True if the data calculated by sparc_expand_prologue are valid. */
480 bool prologue_data_valid_p;
481 };
482
483 #define sparc_frame_size cfun->machine->frame_size
484 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
485 #define sparc_frame_base_reg cfun->machine->frame_base_reg
486 #define sparc_frame_base_offset cfun->machine->frame_base_offset
487 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
488 #define sparc_leaf_function_p cfun->machine->leaf_function_p
489 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
490 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
491
492 /* 1 if the next opcode is to be specially indented. */
493 int sparc_indent_opcode = 0;
494
495 static void sparc_option_override (void);
496 static void sparc_init_modes (void);
497 static void scan_record_type (const_tree, int *, int *, int *);
498 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
499 const_tree, bool, bool, int *, int *);
500
501 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
502 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
503
504 static void sparc_emit_set_const32 (rtx, rtx);
505 static void sparc_emit_set_const64 (rtx, rtx);
506 static void sparc_output_addr_vec (rtx);
507 static void sparc_output_addr_diff_vec (rtx);
508 static void sparc_output_deferred_case_vectors (void);
509 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
510 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
511 static rtx sparc_builtin_saveregs (void);
512 static int epilogue_renumber (rtx *, int);
513 static bool sparc_assemble_integer (rtx, unsigned int, int);
514 static int set_extends (rtx);
515 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
516 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
517 #ifdef TARGET_SOLARIS
518 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
519 tree) ATTRIBUTE_UNUSED;
520 #endif
521 static int sparc_adjust_cost (rtx, rtx, rtx, int);
522 static int sparc_issue_rate (void);
523 static void sparc_sched_init (FILE *, int, int);
524 static int sparc_use_sched_lookahead (void);
525
526 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
527 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
528 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
529 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
530 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
531
532 static bool sparc_function_ok_for_sibcall (tree, tree);
533 static void sparc_init_libfuncs (void);
534 static void sparc_init_builtins (void);
535 static void sparc_vis_init_builtins (void);
536 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
537 static tree sparc_fold_builtin (tree, int, tree *, bool);
538 static int sparc_vis_mul8x16 (int, int);
539 static void sparc_handle_vis_mul8x16 (tree *, int, tree, tree, tree);
540 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
541 HOST_WIDE_INT, tree);
542 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
543 HOST_WIDE_INT, const_tree);
544 static void sparc_reorg (void);
545 static struct machine_function * sparc_init_machine_status (void);
546 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
547 static rtx sparc_tls_get_addr (void);
548 static rtx sparc_tls_got (void);
549 static const char *get_some_local_dynamic_name (void);
550 static int get_some_local_dynamic_name_1 (rtx *, void *);
551 static int sparc_register_move_cost (enum machine_mode,
552 reg_class_t, reg_class_t);
553 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
554 static rtx sparc_function_value (const_tree, const_tree, bool);
555 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
556 static bool sparc_function_value_regno_p (const unsigned int);
557 static rtx sparc_struct_value_rtx (tree, int);
558 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
559 int *, const_tree, int);
560 static bool sparc_return_in_memory (const_tree, const_tree);
561 static bool sparc_strict_argument_naming (cumulative_args_t);
562 static void sparc_va_start (tree, rtx);
563 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
564 static bool sparc_vector_mode_supported_p (enum machine_mode);
565 static bool sparc_tls_referenced_p (rtx);
566 static rtx sparc_legitimize_tls_address (rtx);
567 static rtx sparc_legitimize_pic_address (rtx, rtx);
568 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
569 static rtx sparc_delegitimize_address (rtx);
570 static bool sparc_mode_dependent_address_p (const_rtx);
571 static bool sparc_pass_by_reference (cumulative_args_t,
572 enum machine_mode, const_tree, bool);
573 static void sparc_function_arg_advance (cumulative_args_t,
574 enum machine_mode, const_tree, bool);
575 static rtx sparc_function_arg_1 (cumulative_args_t,
576 enum machine_mode, const_tree, bool, bool);
577 static rtx sparc_function_arg (cumulative_args_t,
578 enum machine_mode, const_tree, bool);
579 static rtx sparc_function_incoming_arg (cumulative_args_t,
580 enum machine_mode, const_tree, bool);
581 static unsigned int sparc_function_arg_boundary (enum machine_mode,
582 const_tree);
583 static int sparc_arg_partial_bytes (cumulative_args_t,
584 enum machine_mode, tree, bool);
585 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
586 static void sparc_file_end (void);
587 static bool sparc_frame_pointer_required (void);
588 static bool sparc_can_eliminate (const int, const int);
589 static rtx sparc_builtin_setjmp_frame_value (void);
590 static void sparc_conditional_register_usage (void);
591 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
592 static const char *sparc_mangle_type (const_tree);
593 #endif
594 static void sparc_trampoline_init (rtx, tree, rtx);
595 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
596 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
597 static bool sparc_print_operand_punct_valid_p (unsigned char);
598 static void sparc_print_operand (FILE *, rtx, int);
599 static void sparc_print_operand_address (FILE *, rtx);
600 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
601 enum machine_mode,
602 secondary_reload_info *);
603 \f
604 #ifdef SUBTARGET_ATTRIBUTE_TABLE
605 /* Table of valid machine attributes. */
606 static const struct attribute_spec sparc_attribute_table[] =
607 {
608 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
609 do_diagnostic } */
610 SUBTARGET_ATTRIBUTE_TABLE,
611 { NULL, 0, 0, false, false, false, NULL, false }
612 };
613 #endif
614 \f
615 /* Option handling. */
616
617 /* Parsed value. */
618 enum cmodel sparc_cmodel;
619
620 char sparc_hard_reg_printed[8];
621
622 /* Initialize the GCC target structure. */
623
624 /* The default is to use .half rather than .short for aligned HI objects. */
625 #undef TARGET_ASM_ALIGNED_HI_OP
626 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
627
628 #undef TARGET_ASM_UNALIGNED_HI_OP
629 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
630 #undef TARGET_ASM_UNALIGNED_SI_OP
631 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
632 #undef TARGET_ASM_UNALIGNED_DI_OP
633 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
634
635 /* The target hook has to handle DI-mode values. */
636 #undef TARGET_ASM_INTEGER
637 #define TARGET_ASM_INTEGER sparc_assemble_integer
638
639 #undef TARGET_ASM_FUNCTION_PROLOGUE
640 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
641 #undef TARGET_ASM_FUNCTION_EPILOGUE
642 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
643
644 #undef TARGET_SCHED_ADJUST_COST
645 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
646 #undef TARGET_SCHED_ISSUE_RATE
647 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
648 #undef TARGET_SCHED_INIT
649 #define TARGET_SCHED_INIT sparc_sched_init
650 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
651 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
652
653 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
654 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
655
656 #undef TARGET_INIT_LIBFUNCS
657 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
658 #undef TARGET_INIT_BUILTINS
659 #define TARGET_INIT_BUILTINS sparc_init_builtins
660
661 #undef TARGET_LEGITIMIZE_ADDRESS
662 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
663 #undef TARGET_DELEGITIMIZE_ADDRESS
664 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
665 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
666 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
667
668 #undef TARGET_EXPAND_BUILTIN
669 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
670 #undef TARGET_FOLD_BUILTIN
671 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
672
673 #if TARGET_TLS
674 #undef TARGET_HAVE_TLS
675 #define TARGET_HAVE_TLS true
676 #endif
677
678 #undef TARGET_CANNOT_FORCE_CONST_MEM
679 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
680
681 #undef TARGET_ASM_OUTPUT_MI_THUNK
682 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
683 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
684 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
685
686 #undef TARGET_MACHINE_DEPENDENT_REORG
687 #define TARGET_MACHINE_DEPENDENT_REORG sparc_reorg
688
689 #undef TARGET_RTX_COSTS
690 #define TARGET_RTX_COSTS sparc_rtx_costs
691 #undef TARGET_ADDRESS_COST
692 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
693 #undef TARGET_REGISTER_MOVE_COST
694 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
695
696 #undef TARGET_PROMOTE_FUNCTION_MODE
697 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
698
699 #undef TARGET_FUNCTION_VALUE
700 #define TARGET_FUNCTION_VALUE sparc_function_value
701 #undef TARGET_LIBCALL_VALUE
702 #define TARGET_LIBCALL_VALUE sparc_libcall_value
703 #undef TARGET_FUNCTION_VALUE_REGNO_P
704 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
705
706 #undef TARGET_STRUCT_VALUE_RTX
707 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
708 #undef TARGET_RETURN_IN_MEMORY
709 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
710 #undef TARGET_MUST_PASS_IN_STACK
711 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
712 #undef TARGET_PASS_BY_REFERENCE
713 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
714 #undef TARGET_ARG_PARTIAL_BYTES
715 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
716 #undef TARGET_FUNCTION_ARG_ADVANCE
717 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
718 #undef TARGET_FUNCTION_ARG
719 #define TARGET_FUNCTION_ARG sparc_function_arg
720 #undef TARGET_FUNCTION_INCOMING_ARG
721 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
722 #undef TARGET_FUNCTION_ARG_BOUNDARY
723 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
724
725 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
726 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
727 #undef TARGET_STRICT_ARGUMENT_NAMING
728 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
729
730 #undef TARGET_EXPAND_BUILTIN_VA_START
731 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
732 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
733 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
734
735 #undef TARGET_VECTOR_MODE_SUPPORTED_P
736 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
737
738 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
739 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
740
741 #ifdef SUBTARGET_INSERT_ATTRIBUTES
742 #undef TARGET_INSERT_ATTRIBUTES
743 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
744 #endif
745
746 #ifdef SUBTARGET_ATTRIBUTE_TABLE
747 #undef TARGET_ATTRIBUTE_TABLE
748 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
749 #endif
750
751 #undef TARGET_RELAXED_ORDERING
752 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
753
754 #undef TARGET_OPTION_OVERRIDE
755 #define TARGET_OPTION_OVERRIDE sparc_option_override
756
757 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
758 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
759 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
760 #endif
761
762 #undef TARGET_ASM_FILE_END
763 #define TARGET_ASM_FILE_END sparc_file_end
764
765 #undef TARGET_FRAME_POINTER_REQUIRED
766 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
767
768 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
769 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
770
771 #undef TARGET_CAN_ELIMINATE
772 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
773
774 #undef TARGET_PREFERRED_RELOAD_CLASS
775 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
776
777 #undef TARGET_SECONDARY_RELOAD
778 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
779
780 #undef TARGET_CONDITIONAL_REGISTER_USAGE
781 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
782
783 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
784 #undef TARGET_MANGLE_TYPE
785 #define TARGET_MANGLE_TYPE sparc_mangle_type
786 #endif
787
788 #undef TARGET_LEGITIMATE_ADDRESS_P
789 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
790
791 #undef TARGET_LEGITIMATE_CONSTANT_P
792 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
793
794 #undef TARGET_TRAMPOLINE_INIT
795 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
796
797 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
798 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
799 #undef TARGET_PRINT_OPERAND
800 #define TARGET_PRINT_OPERAND sparc_print_operand
801 #undef TARGET_PRINT_OPERAND_ADDRESS
802 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
803
804 /* The value stored by LDSTUB. */
805 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
806 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
807
808 struct gcc_target targetm = TARGET_INITIALIZER;
809
810 static void
811 dump_target_flag_bits (const int flags)
812 {
813 if (flags & MASK_64BIT)
814 fprintf (stderr, "64BIT ");
815 if (flags & MASK_APP_REGS)
816 fprintf (stderr, "APP_REGS ");
817 if (flags & MASK_FASTER_STRUCTS)
818 fprintf (stderr, "FASTER_STRUCTS ");
819 if (flags & MASK_FLAT)
820 fprintf (stderr, "FLAT ");
821 if (flags & MASK_FMAF)
822 fprintf (stderr, "FMAF ");
823 if (flags & MASK_FPU)
824 fprintf (stderr, "FPU ");
825 if (flags & MASK_HARD_QUAD)
826 fprintf (stderr, "HARD_QUAD ");
827 if (flags & MASK_POPC)
828 fprintf (stderr, "POPC ");
829 if (flags & MASK_PTR64)
830 fprintf (stderr, "PTR64 ");
831 if (flags & MASK_STACK_BIAS)
832 fprintf (stderr, "STACK_BIAS ");
833 if (flags & MASK_UNALIGNED_DOUBLES)
834 fprintf (stderr, "UNALIGNED_DOUBLES ");
835 if (flags & MASK_V8PLUS)
836 fprintf (stderr, "V8PLUS ");
837 if (flags & MASK_VIS)
838 fprintf (stderr, "VIS ");
839 if (flags & MASK_VIS2)
840 fprintf (stderr, "VIS2 ");
841 if (flags & MASK_VIS3)
842 fprintf (stderr, "VIS3 ");
843 if (flags & MASK_DEPRECATED_V8_INSNS)
844 fprintf (stderr, "DEPRECATED_V8_INSNS ");
845 if (flags & MASK_SPARCLET)
846 fprintf (stderr, "SPARCLET ");
847 if (flags & MASK_SPARCLITE)
848 fprintf (stderr, "SPARCLITE ");
849 if (flags & MASK_V8)
850 fprintf (stderr, "V8 ");
851 if (flags & MASK_V9)
852 fprintf (stderr, "V9 ");
853 }
854
855 static void
856 dump_target_flags (const char *prefix, const int flags)
857 {
858 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
859 dump_target_flag_bits (flags);
860 fprintf(stderr, "]\n");
861 }
862
863 /* Validate and override various options, and do some machine dependent
864 initialization. */
865
866 static void
867 sparc_option_override (void)
868 {
869 static struct code_model {
870 const char *const name;
871 const enum cmodel value;
872 } const cmodels[] = {
873 { "32", CM_32 },
874 { "medlow", CM_MEDLOW },
875 { "medmid", CM_MEDMID },
876 { "medany", CM_MEDANY },
877 { "embmedany", CM_EMBMEDANY },
878 { NULL, (enum cmodel) 0 }
879 };
880 const struct code_model *cmodel;
881 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
882 static struct cpu_default {
883 const int cpu;
884 const enum processor_type processor;
885 } const cpu_default[] = {
886 /* There must be one entry here for each TARGET_CPU value. */
887 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
888 { TARGET_CPU_v8, PROCESSOR_V8 },
889 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
890 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
891 { TARGET_CPU_leon, PROCESSOR_LEON },
892 { TARGET_CPU_sparclite, PROCESSOR_F930 },
893 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
894 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
895 { TARGET_CPU_v9, PROCESSOR_V9 },
896 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
897 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
898 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
899 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
900 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
901 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
902 { -1, PROCESSOR_V7 }
903 };
904 const struct cpu_default *def;
905 /* Table of values for -m{cpu,tune}=. This must match the order of
906 the PROCESSOR_* enumeration. */
907 static struct cpu_table {
908 const char *const name;
909 const int disable;
910 const int enable;
911 } const cpu_table[] = {
912 { "v7", MASK_ISA, 0 },
913 { "cypress", MASK_ISA, 0 },
914 { "v8", MASK_ISA, MASK_V8 },
915 /* TI TMS390Z55 supersparc */
916 { "supersparc", MASK_ISA, MASK_V8 },
917 { "hypersparc", MASK_ISA, MASK_V8|MASK_FPU },
918 /* LEON */
919 { "leon", MASK_ISA, MASK_V8|MASK_FPU },
920 { "sparclite", MASK_ISA, MASK_SPARCLITE },
921 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
922 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
923 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
924 { "f934", MASK_ISA, MASK_SPARCLITE|MASK_FPU },
925 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
926 { "sparclet", MASK_ISA, MASK_SPARCLET },
927 /* TEMIC sparclet */
928 { "tsc701", MASK_ISA, MASK_SPARCLET },
929 { "v9", MASK_ISA, MASK_V9 },
930 /* UltraSPARC I, II, IIi */
931 { "ultrasparc", MASK_ISA,
932 /* Although insns using %y are deprecated, it is a clear win. */
933 MASK_V9|MASK_DEPRECATED_V8_INSNS },
934 /* UltraSPARC III */
935 /* ??? Check if %y issue still holds true. */
936 { "ultrasparc3", MASK_ISA,
937 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
938 /* UltraSPARC T1 */
939 { "niagara", MASK_ISA,
940 MASK_V9|MASK_DEPRECATED_V8_INSNS },
941 /* UltraSPARC T2 */
942 { "niagara2", MASK_ISA,
943 MASK_V9|MASK_POPC|MASK_VIS2 },
944 /* UltraSPARC T3 */
945 { "niagara3", MASK_ISA,
946 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
947 /* UltraSPARC T4 */
948 { "niagara4", MASK_ISA,
949 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
950 };
951 const struct cpu_table *cpu;
952 unsigned int i;
953 int fpu;
954
955 if (sparc_debug_string != NULL)
956 {
957 const char *q;
958 char *p;
959
960 p = ASTRDUP (sparc_debug_string);
961 while ((q = strtok (p, ",")) != NULL)
962 {
963 bool invert;
964 int mask;
965
966 p = NULL;
967 if (*q == '!')
968 {
969 invert = true;
970 q++;
971 }
972 else
973 invert = false;
974
975 if (! strcmp (q, "all"))
976 mask = MASK_DEBUG_ALL;
977 else if (! strcmp (q, "options"))
978 mask = MASK_DEBUG_OPTIONS;
979 else
980 error ("unknown -mdebug-%s switch", q);
981
982 if (invert)
983 sparc_debug &= ~mask;
984 else
985 sparc_debug |= mask;
986 }
987 }
988
989 if (TARGET_DEBUG_OPTIONS)
990 {
991 dump_target_flags("Initial target_flags", target_flags);
992 dump_target_flags("target_flags_explicit", target_flags_explicit);
993 }
994
995 #ifdef SUBTARGET_OVERRIDE_OPTIONS
996 SUBTARGET_OVERRIDE_OPTIONS;
997 #endif
998
999 #ifndef SPARC_BI_ARCH
1000 /* Check for unsupported architecture size. */
1001 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
1002 error ("%s is not supported by this configuration",
1003 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1004 #endif
1005
1006 /* We force all 64bit archs to use 128 bit long double */
1007 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
1008 {
1009 error ("-mlong-double-64 not allowed with -m64");
1010 target_flags |= MASK_LONG_DOUBLE_128;
1011 }
1012
1013 /* Code model selection. */
1014 sparc_cmodel = SPARC_DEFAULT_CMODEL;
1015
1016 #ifdef SPARC_BI_ARCH
1017 if (TARGET_ARCH32)
1018 sparc_cmodel = CM_32;
1019 #endif
1020
1021 if (sparc_cmodel_string != NULL)
1022 {
1023 if (TARGET_ARCH64)
1024 {
1025 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1026 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1027 break;
1028 if (cmodel->name == NULL)
1029 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1030 else
1031 sparc_cmodel = cmodel->value;
1032 }
1033 else
1034 error ("-mcmodel= is not supported on 32 bit systems");
1035 }
1036
1037 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1038 for (i = 8; i < 16; i++)
1039 if (!call_used_regs [i])
1040 {
1041 error ("-fcall-saved-REG is not supported for out registers");
1042 call_used_regs [i] = 1;
1043 }
1044
1045 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
1046
1047 /* Set the default CPU. */
1048 if (!global_options_set.x_sparc_cpu_and_features)
1049 {
1050 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1051 if (def->cpu == TARGET_CPU_DEFAULT)
1052 break;
1053 gcc_assert (def->cpu != -1);
1054 sparc_cpu_and_features = def->processor;
1055 }
1056
1057 if (!global_options_set.x_sparc_cpu)
1058 sparc_cpu = sparc_cpu_and_features;
1059
1060 cpu = &cpu_table[(int) sparc_cpu_and_features];
1061
1062 if (TARGET_DEBUG_OPTIONS)
1063 {
1064 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1065 fprintf (stderr, "sparc_cpu: %s\n",
1066 cpu_table[(int) sparc_cpu].name);
1067 dump_target_flags ("cpu->disable", cpu->disable);
1068 dump_target_flags ("cpu->enable", cpu->enable);
1069 }
1070
1071 target_flags &= ~cpu->disable;
1072 target_flags |= (cpu->enable
1073 #ifndef HAVE_AS_FMAF_HPC_VIS3
1074 & ~(MASK_FMAF | MASK_VIS3)
1075 #endif
1076 );
1077
1078 /* If -mfpu or -mno-fpu was explicitly used, don't override with
1079 the processor default. */
1080 if (target_flags_explicit & MASK_FPU)
1081 target_flags = (target_flags & ~MASK_FPU) | fpu;
1082
1083 /* -mvis2 implies -mvis */
1084 if (TARGET_VIS2)
1085 target_flags |= MASK_VIS;
1086
1087 /* -mvis3 implies -mvis2 and -mvis */
1088 if (TARGET_VIS3)
1089 target_flags |= MASK_VIS2 | MASK_VIS;
1090
1091 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is disabled. */
1092 if (! TARGET_FPU)
1093 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_FMAF);
1094
1095 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1096 are available.
1097 -m64 also implies v9. */
1098 if (TARGET_VIS || TARGET_ARCH64)
1099 {
1100 target_flags |= MASK_V9;
1101 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1102 }
1103
1104 /* -mvis also implies -mv8plus on 32-bit */
1105 if (TARGET_VIS && ! TARGET_ARCH64)
1106 target_flags |= MASK_V8PLUS;
1107
1108 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1109 if (TARGET_V9 && TARGET_ARCH32)
1110 target_flags |= MASK_DEPRECATED_V8_INSNS;
1111
1112 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1113 if (! TARGET_V9 || TARGET_ARCH64)
1114 target_flags &= ~MASK_V8PLUS;
1115
1116 /* Don't use stack biasing in 32 bit mode. */
1117 if (TARGET_ARCH32)
1118 target_flags &= ~MASK_STACK_BIAS;
1119
1120 /* Supply a default value for align_functions. */
1121 if (align_functions == 0
1122 && (sparc_cpu == PROCESSOR_ULTRASPARC
1123 || sparc_cpu == PROCESSOR_ULTRASPARC3
1124 || sparc_cpu == PROCESSOR_NIAGARA
1125 || sparc_cpu == PROCESSOR_NIAGARA2
1126 || sparc_cpu == PROCESSOR_NIAGARA3
1127 || sparc_cpu == PROCESSOR_NIAGARA4))
1128 align_functions = 32;
1129
1130 /* Validate PCC_STRUCT_RETURN. */
1131 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1132 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1133
1134 /* Only use .uaxword when compiling for a 64-bit target. */
1135 if (!TARGET_ARCH64)
1136 targetm.asm_out.unaligned_op.di = NULL;
1137
1138 /* Do various machine dependent initializations. */
1139 sparc_init_modes ();
1140
1141 /* Set up function hooks. */
1142 init_machine_status = sparc_init_machine_status;
1143
1144 switch (sparc_cpu)
1145 {
1146 case PROCESSOR_V7:
1147 case PROCESSOR_CYPRESS:
1148 sparc_costs = &cypress_costs;
1149 break;
1150 case PROCESSOR_V8:
1151 case PROCESSOR_SPARCLITE:
1152 case PROCESSOR_SUPERSPARC:
1153 sparc_costs = &supersparc_costs;
1154 break;
1155 case PROCESSOR_F930:
1156 case PROCESSOR_F934:
1157 case PROCESSOR_HYPERSPARC:
1158 case PROCESSOR_SPARCLITE86X:
1159 sparc_costs = &hypersparc_costs;
1160 break;
1161 case PROCESSOR_LEON:
1162 sparc_costs = &leon_costs;
1163 break;
1164 case PROCESSOR_SPARCLET:
1165 case PROCESSOR_TSC701:
1166 sparc_costs = &sparclet_costs;
1167 break;
1168 case PROCESSOR_V9:
1169 case PROCESSOR_ULTRASPARC:
1170 sparc_costs = &ultrasparc_costs;
1171 break;
1172 case PROCESSOR_ULTRASPARC3:
1173 sparc_costs = &ultrasparc3_costs;
1174 break;
1175 case PROCESSOR_NIAGARA:
1176 sparc_costs = &niagara_costs;
1177 break;
1178 case PROCESSOR_NIAGARA2:
1179 sparc_costs = &niagara2_costs;
1180 break;
1181 case PROCESSOR_NIAGARA3:
1182 sparc_costs = &niagara3_costs;
1183 break;
1184 case PROCESSOR_NIAGARA4:
1185 sparc_costs = &niagara4_costs;
1186 break;
1187 case PROCESSOR_NATIVE:
1188 gcc_unreachable ();
1189 };
1190
1191 if (sparc_memory_model == SMM_DEFAULT)
1192 {
1193 /* Choose the memory model for the operating system. */
1194 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1195 if (os_default != SMM_DEFAULT)
1196 sparc_memory_model = os_default;
1197 /* Choose the most relaxed model for the processor. */
1198 else if (TARGET_V9)
1199 sparc_memory_model = SMM_RMO;
1200 else if (TARGET_V8)
1201 sparc_memory_model = SMM_PSO;
1202 else
1203 sparc_memory_model = SMM_SC;
1204 }
1205
1206 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1207 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1208 target_flags |= MASK_LONG_DOUBLE_128;
1209 #endif
1210
1211 if (TARGET_DEBUG_OPTIONS)
1212 dump_target_flags ("Final target_flags", target_flags);
1213
1214 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
1215 ((sparc_cpu == PROCESSOR_ULTRASPARC
1216 || sparc_cpu == PROCESSOR_NIAGARA
1217 || sparc_cpu == PROCESSOR_NIAGARA2
1218 || sparc_cpu == PROCESSOR_NIAGARA3
1219 || sparc_cpu == PROCESSOR_NIAGARA4)
1220 ? 2
1221 : (sparc_cpu == PROCESSOR_ULTRASPARC3
1222 ? 8 : 3)),
1223 global_options.x_param_values,
1224 global_options_set.x_param_values);
1225 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
1226 ((sparc_cpu == PROCESSOR_ULTRASPARC
1227 || sparc_cpu == PROCESSOR_ULTRASPARC3
1228 || sparc_cpu == PROCESSOR_NIAGARA
1229 || sparc_cpu == PROCESSOR_NIAGARA2
1230 || sparc_cpu == PROCESSOR_NIAGARA3
1231 || sparc_cpu == PROCESSOR_NIAGARA4)
1232 ? 64 : 32),
1233 global_options.x_param_values,
1234 global_options_set.x_param_values);
1235
1236 /* Disable save slot sharing for call-clobbered registers by default.
1237 The IRA sharing algorithm works on single registers only and this
1238 pessimizes for double floating-point registers. */
1239 if (!global_options_set.x_flag_ira_share_save_slots)
1240 flag_ira_share_save_slots = 0;
1241 }
1242 \f
1243 /* Miscellaneous utilities. */
1244
1245 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1246 or branch on register contents instructions. */
1247
1248 int
1249 v9_regcmp_p (enum rtx_code code)
1250 {
1251 return (code == EQ || code == NE || code == GE || code == LT
1252 || code == LE || code == GT);
1253 }
1254
1255 /* Nonzero if OP is a floating point constant which can
1256 be loaded into an integer register using a single
1257 sethi instruction. */
1258
1259 int
1260 fp_sethi_p (rtx op)
1261 {
1262 if (GET_CODE (op) == CONST_DOUBLE)
1263 {
1264 REAL_VALUE_TYPE r;
1265 long i;
1266
1267 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1268 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1269 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1270 }
1271
1272 return 0;
1273 }
1274
1275 /* Nonzero if OP is a floating point constant which can
1276 be loaded into an integer register using a single
1277 mov instruction. */
1278
1279 int
1280 fp_mov_p (rtx op)
1281 {
1282 if (GET_CODE (op) == CONST_DOUBLE)
1283 {
1284 REAL_VALUE_TYPE r;
1285 long i;
1286
1287 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1288 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1289 return SPARC_SIMM13_P (i);
1290 }
1291
1292 return 0;
1293 }
1294
1295 /* Nonzero if OP is a floating point constant which can
1296 be loaded into an integer register using a high/losum
1297 instruction sequence. */
1298
1299 int
1300 fp_high_losum_p (rtx op)
1301 {
1302 /* The constraints calling this should only be in
1303 SFmode move insns, so any constant which cannot
1304 be moved using a single insn will do. */
1305 if (GET_CODE (op) == CONST_DOUBLE)
1306 {
1307 REAL_VALUE_TYPE r;
1308 long i;
1309
1310 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1311 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1312 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1313 }
1314
1315 return 0;
1316 }
1317
1318 /* Return true if the address of LABEL can be loaded by means of the
1319 mov{si,di}_pic_label_ref patterns in PIC mode. */
1320
1321 static bool
1322 can_use_mov_pic_label_ref (rtx label)
1323 {
1324 /* VxWorks does not impose a fixed gap between segments; the run-time
1325 gap can be different from the object-file gap. We therefore can't
1326 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1327 are absolutely sure that X is in the same segment as the GOT.
1328 Unfortunately, the flexibility of linker scripts means that we
1329 can't be sure of that in general, so assume that GOT-relative
1330 accesses are never valid on VxWorks. */
1331 if (TARGET_VXWORKS_RTP)
1332 return false;
1333
1334 /* Similarly, if the label is non-local, it might end up being placed
1335 in a different section than the current one; now mov_pic_label_ref
1336 requires the label and the code to be in the same section. */
1337 if (LABEL_REF_NONLOCAL_P (label))
1338 return false;
1339
1340 /* Finally, if we are reordering basic blocks and partition into hot
1341 and cold sections, this might happen for any label. */
1342 if (flag_reorder_blocks_and_partition)
1343 return false;
1344
1345 return true;
1346 }
1347
1348 /* Expand a move instruction. Return true if all work is done. */
1349
1350 bool
1351 sparc_expand_move (enum machine_mode mode, rtx *operands)
1352 {
1353 /* Handle sets of MEM first. */
1354 if (GET_CODE (operands[0]) == MEM)
1355 {
1356 /* 0 is a register (or a pair of registers) on SPARC. */
1357 if (register_or_zero_operand (operands[1], mode))
1358 return false;
1359
1360 if (!reload_in_progress)
1361 {
1362 operands[0] = validize_mem (operands[0]);
1363 operands[1] = force_reg (mode, operands[1]);
1364 }
1365 }
1366
1367 /* Fixup TLS cases. */
1368 if (TARGET_HAVE_TLS
1369 && CONSTANT_P (operands[1])
1370 && sparc_tls_referenced_p (operands [1]))
1371 {
1372 operands[1] = sparc_legitimize_tls_address (operands[1]);
1373 return false;
1374 }
1375
1376 /* Fixup PIC cases. */
1377 if (flag_pic && CONSTANT_P (operands[1]))
1378 {
1379 if (pic_address_needs_scratch (operands[1]))
1380 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1381
1382 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1383 if (GET_CODE (operands[1]) == LABEL_REF
1384 && can_use_mov_pic_label_ref (operands[1]))
1385 {
1386 if (mode == SImode)
1387 {
1388 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1389 return true;
1390 }
1391
1392 if (mode == DImode)
1393 {
1394 gcc_assert (TARGET_ARCH64);
1395 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1396 return true;
1397 }
1398 }
1399
1400 if (symbolic_operand (operands[1], mode))
1401 {
1402 operands[1]
1403 = sparc_legitimize_pic_address (operands[1],
1404 reload_in_progress
1405 ? operands[0] : NULL_RTX);
1406 return false;
1407 }
1408 }
1409
1410 /* If we are trying to toss an integer constant into FP registers,
1411 or loading a FP or vector constant, force it into memory. */
1412 if (CONSTANT_P (operands[1])
1413 && REG_P (operands[0])
1414 && (SPARC_FP_REG_P (REGNO (operands[0]))
1415 || SCALAR_FLOAT_MODE_P (mode)
1416 || VECTOR_MODE_P (mode)))
1417 {
1418 /* emit_group_store will send such bogosity to us when it is
1419 not storing directly into memory. So fix this up to avoid
1420 crashes in output_constant_pool. */
1421 if (operands [1] == const0_rtx)
1422 operands[1] = CONST0_RTX (mode);
1423
1424 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1425 always other regs. */
1426 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1427 && (const_zero_operand (operands[1], mode)
1428 || const_all_ones_operand (operands[1], mode)))
1429 return false;
1430
1431 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1432 /* We are able to build any SF constant in integer registers
1433 with at most 2 instructions. */
1434 && (mode == SFmode
1435 /* And any DF constant in integer registers. */
1436 || (mode == DFmode
1437 && ! can_create_pseudo_p ())))
1438 return false;
1439
1440 operands[1] = force_const_mem (mode, operands[1]);
1441 if (!reload_in_progress)
1442 operands[1] = validize_mem (operands[1]);
1443 return false;
1444 }
1445
1446 /* Accept non-constants and valid constants unmodified. */
1447 if (!CONSTANT_P (operands[1])
1448 || GET_CODE (operands[1]) == HIGH
1449 || input_operand (operands[1], mode))
1450 return false;
1451
1452 switch (mode)
1453 {
1454 case QImode:
1455 /* All QImode constants require only one insn, so proceed. */
1456 break;
1457
1458 case HImode:
1459 case SImode:
1460 sparc_emit_set_const32 (operands[0], operands[1]);
1461 return true;
1462
1463 case DImode:
1464 /* input_operand should have filtered out 32-bit mode. */
1465 sparc_emit_set_const64 (operands[0], operands[1]);
1466 return true;
1467
1468 default:
1469 gcc_unreachable ();
1470 }
1471
1472 return false;
1473 }
1474
1475 /* Load OP1, a 32-bit constant, into OP0, a register.
1476 We know it can't be done in one insn when we get
1477 here, the move expander guarantees this. */
1478
1479 static void
1480 sparc_emit_set_const32 (rtx op0, rtx op1)
1481 {
1482 enum machine_mode mode = GET_MODE (op0);
1483 rtx temp = op0;
1484
1485 if (can_create_pseudo_p ())
1486 temp = gen_reg_rtx (mode);
1487
1488 if (GET_CODE (op1) == CONST_INT)
1489 {
1490 gcc_assert (!small_int_operand (op1, mode)
1491 && !const_high_operand (op1, mode));
1492
1493 /* Emit them as real moves instead of a HIGH/LO_SUM,
1494 this way CSE can see everything and reuse intermediate
1495 values if it wants. */
1496 emit_insn (gen_rtx_SET (VOIDmode, temp,
1497 GEN_INT (INTVAL (op1)
1498 & ~(HOST_WIDE_INT)0x3ff)));
1499
1500 emit_insn (gen_rtx_SET (VOIDmode,
1501 op0,
1502 gen_rtx_IOR (mode, temp,
1503 GEN_INT (INTVAL (op1) & 0x3ff))));
1504 }
1505 else
1506 {
1507 /* A symbol, emit in the traditional way. */
1508 emit_insn (gen_rtx_SET (VOIDmode, temp,
1509 gen_rtx_HIGH (mode, op1)));
1510 emit_insn (gen_rtx_SET (VOIDmode,
1511 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1512 }
1513 }
1514
1515 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1516 If TEMP is nonzero, we are forbidden to use any other scratch
1517 registers. Otherwise, we are allowed to generate them as needed.
1518
1519 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1520 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1521
1522 void
1523 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1524 {
1525 rtx temp1, temp2, temp3, temp4, temp5;
1526 rtx ti_temp = 0;
1527
1528 if (temp && GET_MODE (temp) == TImode)
1529 {
1530 ti_temp = temp;
1531 temp = gen_rtx_REG (DImode, REGNO (temp));
1532 }
1533
1534 /* SPARC-V9 code-model support. */
1535 switch (sparc_cmodel)
1536 {
1537 case CM_MEDLOW:
1538 /* The range spanned by all instructions in the object is less
1539 than 2^31 bytes (2GB) and the distance from any instruction
1540 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1541 than 2^31 bytes (2GB).
1542
1543 The executable must be in the low 4TB of the virtual address
1544 space.
1545
1546 sethi %hi(symbol), %temp1
1547 or %temp1, %lo(symbol), %reg */
1548 if (temp)
1549 temp1 = temp; /* op0 is allowed. */
1550 else
1551 temp1 = gen_reg_rtx (DImode);
1552
1553 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1554 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1555 break;
1556
1557 case CM_MEDMID:
1558 /* The range spanned by all instructions in the object is less
1559 than 2^31 bytes (2GB) and the distance from any instruction
1560 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1561 than 2^31 bytes (2GB).
1562
1563 The executable must be in the low 16TB of the virtual address
1564 space.
1565
1566 sethi %h44(symbol), %temp1
1567 or %temp1, %m44(symbol), %temp2
1568 sllx %temp2, 12, %temp3
1569 or %temp3, %l44(symbol), %reg */
1570 if (temp)
1571 {
1572 temp1 = op0;
1573 temp2 = op0;
1574 temp3 = temp; /* op0 is allowed. */
1575 }
1576 else
1577 {
1578 temp1 = gen_reg_rtx (DImode);
1579 temp2 = gen_reg_rtx (DImode);
1580 temp3 = gen_reg_rtx (DImode);
1581 }
1582
1583 emit_insn (gen_seth44 (temp1, op1));
1584 emit_insn (gen_setm44 (temp2, temp1, op1));
1585 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1586 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1587 emit_insn (gen_setl44 (op0, temp3, op1));
1588 break;
1589
1590 case CM_MEDANY:
1591 /* The range spanned by all instructions in the object is less
1592 than 2^31 bytes (2GB) and the distance from any instruction
1593 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1594 than 2^31 bytes (2GB).
1595
1596 The executable can be placed anywhere in the virtual address
1597 space.
1598
1599 sethi %hh(symbol), %temp1
1600 sethi %lm(symbol), %temp2
1601 or %temp1, %hm(symbol), %temp3
1602 sllx %temp3, 32, %temp4
1603 or %temp4, %temp2, %temp5
1604 or %temp5, %lo(symbol), %reg */
1605 if (temp)
1606 {
1607 /* It is possible that one of the registers we got for operands[2]
1608 might coincide with that of operands[0] (which is why we made
1609 it TImode). Pick the other one to use as our scratch. */
1610 if (rtx_equal_p (temp, op0))
1611 {
1612 gcc_assert (ti_temp);
1613 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1614 }
1615 temp1 = op0;
1616 temp2 = temp; /* op0 is _not_ allowed, see above. */
1617 temp3 = op0;
1618 temp4 = op0;
1619 temp5 = op0;
1620 }
1621 else
1622 {
1623 temp1 = gen_reg_rtx (DImode);
1624 temp2 = gen_reg_rtx (DImode);
1625 temp3 = gen_reg_rtx (DImode);
1626 temp4 = gen_reg_rtx (DImode);
1627 temp5 = gen_reg_rtx (DImode);
1628 }
1629
1630 emit_insn (gen_sethh (temp1, op1));
1631 emit_insn (gen_setlm (temp2, op1));
1632 emit_insn (gen_sethm (temp3, temp1, op1));
1633 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1634 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1635 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1636 gen_rtx_PLUS (DImode, temp4, temp2)));
1637 emit_insn (gen_setlo (op0, temp5, op1));
1638 break;
1639
1640 case CM_EMBMEDANY:
1641 /* Old old old backwards compatibility kruft here.
1642 Essentially it is MEDLOW with a fixed 64-bit
1643 virtual base added to all data segment addresses.
1644 Text-segment stuff is computed like MEDANY, we can't
1645 reuse the code above because the relocation knobs
1646 look different.
1647
1648 Data segment: sethi %hi(symbol), %temp1
1649 add %temp1, EMBMEDANY_BASE_REG, %temp2
1650 or %temp2, %lo(symbol), %reg */
1651 if (data_segment_operand (op1, GET_MODE (op1)))
1652 {
1653 if (temp)
1654 {
1655 temp1 = temp; /* op0 is allowed. */
1656 temp2 = op0;
1657 }
1658 else
1659 {
1660 temp1 = gen_reg_rtx (DImode);
1661 temp2 = gen_reg_rtx (DImode);
1662 }
1663
1664 emit_insn (gen_embmedany_sethi (temp1, op1));
1665 emit_insn (gen_embmedany_brsum (temp2, temp1));
1666 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1667 }
1668
1669 /* Text segment: sethi %uhi(symbol), %temp1
1670 sethi %hi(symbol), %temp2
1671 or %temp1, %ulo(symbol), %temp3
1672 sllx %temp3, 32, %temp4
1673 or %temp4, %temp2, %temp5
1674 or %temp5, %lo(symbol), %reg */
1675 else
1676 {
1677 if (temp)
1678 {
1679 /* It is possible that one of the registers we got for operands[2]
1680 might coincide with that of operands[0] (which is why we made
1681 it TImode). Pick the other one to use as our scratch. */
1682 if (rtx_equal_p (temp, op0))
1683 {
1684 gcc_assert (ti_temp);
1685 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1686 }
1687 temp1 = op0;
1688 temp2 = temp; /* op0 is _not_ allowed, see above. */
1689 temp3 = op0;
1690 temp4 = op0;
1691 temp5 = op0;
1692 }
1693 else
1694 {
1695 temp1 = gen_reg_rtx (DImode);
1696 temp2 = gen_reg_rtx (DImode);
1697 temp3 = gen_reg_rtx (DImode);
1698 temp4 = gen_reg_rtx (DImode);
1699 temp5 = gen_reg_rtx (DImode);
1700 }
1701
1702 emit_insn (gen_embmedany_textuhi (temp1, op1));
1703 emit_insn (gen_embmedany_texthi (temp2, op1));
1704 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1705 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1706 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1707 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1708 gen_rtx_PLUS (DImode, temp4, temp2)));
1709 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1710 }
1711 break;
1712
1713 default:
1714 gcc_unreachable ();
1715 }
1716 }
1717
1718 #if HOST_BITS_PER_WIDE_INT == 32
1719 static void
1720 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1721 {
1722 gcc_unreachable ();
1723 }
1724 #else
1725 /* These avoid problems when cross compiling. If we do not
1726 go through all this hair then the optimizer will see
1727 invalid REG_EQUAL notes or in some cases none at all. */
1728 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1729 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1730 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1731 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1732
1733 /* The optimizer is not to assume anything about exactly
1734 which bits are set for a HIGH, they are unspecified.
1735 Unfortunately this leads to many missed optimizations
1736 during CSE. We mask out the non-HIGH bits, and matches
1737 a plain movdi, to alleviate this problem. */
1738 static rtx
1739 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1740 {
1741 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1742 }
1743
1744 static rtx
1745 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1746 {
1747 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1748 }
1749
1750 static rtx
1751 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1752 {
1753 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1754 }
1755
1756 static rtx
1757 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1758 {
1759 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1760 }
1761
1762 /* Worker routines for 64-bit constant formation on arch64.
1763 One of the key things to be doing in these emissions is
1764 to create as many temp REGs as possible. This makes it
1765 possible for half-built constants to be used later when
1766 such values are similar to something required later on.
1767 Without doing this, the optimizer cannot see such
1768 opportunities. */
1769
1770 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1771 unsigned HOST_WIDE_INT, int);
1772
1773 static void
1774 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1775 unsigned HOST_WIDE_INT low_bits, int is_neg)
1776 {
1777 unsigned HOST_WIDE_INT high_bits;
1778
1779 if (is_neg)
1780 high_bits = (~low_bits) & 0xffffffff;
1781 else
1782 high_bits = low_bits;
1783
1784 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1785 if (!is_neg)
1786 {
1787 emit_insn (gen_rtx_SET (VOIDmode, op0,
1788 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1789 }
1790 else
1791 {
1792 /* If we are XOR'ing with -1, then we should emit a one's complement
1793 instead. This way the combiner will notice logical operations
1794 such as ANDN later on and substitute. */
1795 if ((low_bits & 0x3ff) == 0x3ff)
1796 {
1797 emit_insn (gen_rtx_SET (VOIDmode, op0,
1798 gen_rtx_NOT (DImode, temp)));
1799 }
1800 else
1801 {
1802 emit_insn (gen_rtx_SET (VOIDmode, op0,
1803 gen_safe_XOR64 (temp,
1804 (-(HOST_WIDE_INT)0x400
1805 | (low_bits & 0x3ff)))));
1806 }
1807 }
1808 }
1809
1810 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1811 unsigned HOST_WIDE_INT, int);
1812
1813 static void
1814 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1815 unsigned HOST_WIDE_INT high_bits,
1816 unsigned HOST_WIDE_INT low_immediate,
1817 int shift_count)
1818 {
1819 rtx temp2 = op0;
1820
1821 if ((high_bits & 0xfffffc00) != 0)
1822 {
1823 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1824 if ((high_bits & ~0xfffffc00) != 0)
1825 emit_insn (gen_rtx_SET (VOIDmode, op0,
1826 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1827 else
1828 temp2 = temp;
1829 }
1830 else
1831 {
1832 emit_insn (gen_safe_SET64 (temp, high_bits));
1833 temp2 = temp;
1834 }
1835
1836 /* Now shift it up into place. */
1837 emit_insn (gen_rtx_SET (VOIDmode, op0,
1838 gen_rtx_ASHIFT (DImode, temp2,
1839 GEN_INT (shift_count))));
1840
1841 /* If there is a low immediate part piece, finish up by
1842 putting that in as well. */
1843 if (low_immediate != 0)
1844 emit_insn (gen_rtx_SET (VOIDmode, op0,
1845 gen_safe_OR64 (op0, low_immediate)));
1846 }
1847
1848 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1849 unsigned HOST_WIDE_INT);
1850
1851 /* Full 64-bit constant decomposition. Even though this is the
1852 'worst' case, we still optimize a few things away. */
1853 static void
1854 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1855 unsigned HOST_WIDE_INT high_bits,
1856 unsigned HOST_WIDE_INT low_bits)
1857 {
1858 rtx sub_temp = op0;
1859
1860 if (can_create_pseudo_p ())
1861 sub_temp = gen_reg_rtx (DImode);
1862
1863 if ((high_bits & 0xfffffc00) != 0)
1864 {
1865 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1866 if ((high_bits & ~0xfffffc00) != 0)
1867 emit_insn (gen_rtx_SET (VOIDmode,
1868 sub_temp,
1869 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1870 else
1871 sub_temp = temp;
1872 }
1873 else
1874 {
1875 emit_insn (gen_safe_SET64 (temp, high_bits));
1876 sub_temp = temp;
1877 }
1878
1879 if (can_create_pseudo_p ())
1880 {
1881 rtx temp2 = gen_reg_rtx (DImode);
1882 rtx temp3 = gen_reg_rtx (DImode);
1883 rtx temp4 = gen_reg_rtx (DImode);
1884
1885 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1886 gen_rtx_ASHIFT (DImode, sub_temp,
1887 GEN_INT (32))));
1888
1889 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1890 if ((low_bits & ~0xfffffc00) != 0)
1891 {
1892 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1893 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1894 emit_insn (gen_rtx_SET (VOIDmode, op0,
1895 gen_rtx_PLUS (DImode, temp4, temp3)));
1896 }
1897 else
1898 {
1899 emit_insn (gen_rtx_SET (VOIDmode, op0,
1900 gen_rtx_PLUS (DImode, temp4, temp2)));
1901 }
1902 }
1903 else
1904 {
1905 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1906 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1907 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1908 int to_shift = 12;
1909
1910 /* We are in the middle of reload, so this is really
1911 painful. However we do still make an attempt to
1912 avoid emitting truly stupid code. */
1913 if (low1 != const0_rtx)
1914 {
1915 emit_insn (gen_rtx_SET (VOIDmode, op0,
1916 gen_rtx_ASHIFT (DImode, sub_temp,
1917 GEN_INT (to_shift))));
1918 emit_insn (gen_rtx_SET (VOIDmode, op0,
1919 gen_rtx_IOR (DImode, op0, low1)));
1920 sub_temp = op0;
1921 to_shift = 12;
1922 }
1923 else
1924 {
1925 to_shift += 12;
1926 }
1927 if (low2 != const0_rtx)
1928 {
1929 emit_insn (gen_rtx_SET (VOIDmode, op0,
1930 gen_rtx_ASHIFT (DImode, sub_temp,
1931 GEN_INT (to_shift))));
1932 emit_insn (gen_rtx_SET (VOIDmode, op0,
1933 gen_rtx_IOR (DImode, op0, low2)));
1934 sub_temp = op0;
1935 to_shift = 8;
1936 }
1937 else
1938 {
1939 to_shift += 8;
1940 }
1941 emit_insn (gen_rtx_SET (VOIDmode, op0,
1942 gen_rtx_ASHIFT (DImode, sub_temp,
1943 GEN_INT (to_shift))));
1944 if (low3 != const0_rtx)
1945 emit_insn (gen_rtx_SET (VOIDmode, op0,
1946 gen_rtx_IOR (DImode, op0, low3)));
1947 /* phew... */
1948 }
1949 }
1950
1951 /* Analyze a 64-bit constant for certain properties. */
1952 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1953 unsigned HOST_WIDE_INT,
1954 int *, int *, int *);
1955
1956 static void
1957 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1958 unsigned HOST_WIDE_INT low_bits,
1959 int *hbsp, int *lbsp, int *abbasp)
1960 {
1961 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1962 int i;
1963
1964 lowest_bit_set = highest_bit_set = -1;
1965 i = 0;
1966 do
1967 {
1968 if ((lowest_bit_set == -1)
1969 && ((low_bits >> i) & 1))
1970 lowest_bit_set = i;
1971 if ((highest_bit_set == -1)
1972 && ((high_bits >> (32 - i - 1)) & 1))
1973 highest_bit_set = (64 - i - 1);
1974 }
1975 while (++i < 32
1976 && ((highest_bit_set == -1)
1977 || (lowest_bit_set == -1)));
1978 if (i == 32)
1979 {
1980 i = 0;
1981 do
1982 {
1983 if ((lowest_bit_set == -1)
1984 && ((high_bits >> i) & 1))
1985 lowest_bit_set = i + 32;
1986 if ((highest_bit_set == -1)
1987 && ((low_bits >> (32 - i - 1)) & 1))
1988 highest_bit_set = 32 - i - 1;
1989 }
1990 while (++i < 32
1991 && ((highest_bit_set == -1)
1992 || (lowest_bit_set == -1)));
1993 }
1994 /* If there are no bits set this should have gone out
1995 as one instruction! */
1996 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1997 all_bits_between_are_set = 1;
1998 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1999 {
2000 if (i < 32)
2001 {
2002 if ((low_bits & (1 << i)) != 0)
2003 continue;
2004 }
2005 else
2006 {
2007 if ((high_bits & (1 << (i - 32))) != 0)
2008 continue;
2009 }
2010 all_bits_between_are_set = 0;
2011 break;
2012 }
2013 *hbsp = highest_bit_set;
2014 *lbsp = lowest_bit_set;
2015 *abbasp = all_bits_between_are_set;
2016 }
2017
2018 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2019
2020 static int
2021 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2022 unsigned HOST_WIDE_INT low_bits)
2023 {
2024 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2025
2026 if (high_bits == 0
2027 || high_bits == 0xffffffff)
2028 return 1;
2029
2030 analyze_64bit_constant (high_bits, low_bits,
2031 &highest_bit_set, &lowest_bit_set,
2032 &all_bits_between_are_set);
2033
2034 if ((highest_bit_set == 63
2035 || lowest_bit_set == 0)
2036 && all_bits_between_are_set != 0)
2037 return 1;
2038
2039 if ((highest_bit_set - lowest_bit_set) < 21)
2040 return 1;
2041
2042 return 0;
2043 }
2044
2045 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2046 unsigned HOST_WIDE_INT,
2047 int, int);
2048
2049 static unsigned HOST_WIDE_INT
2050 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2051 unsigned HOST_WIDE_INT low_bits,
2052 int lowest_bit_set, int shift)
2053 {
2054 HOST_WIDE_INT hi, lo;
2055
2056 if (lowest_bit_set < 32)
2057 {
2058 lo = (low_bits >> lowest_bit_set) << shift;
2059 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2060 }
2061 else
2062 {
2063 lo = 0;
2064 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2065 }
2066 gcc_assert (! (hi & lo));
2067 return (hi | lo);
2068 }
2069
2070 /* Here we are sure to be arch64 and this is an integer constant
2071 being loaded into a register. Emit the most efficient
2072 insn sequence possible. Detection of all the 1-insn cases
2073 has been done already. */
2074 static void
2075 sparc_emit_set_const64 (rtx op0, rtx op1)
2076 {
2077 unsigned HOST_WIDE_INT high_bits, low_bits;
2078 int lowest_bit_set, highest_bit_set;
2079 int all_bits_between_are_set;
2080 rtx temp = 0;
2081
2082 /* Sanity check that we know what we are working with. */
2083 gcc_assert (TARGET_ARCH64
2084 && (GET_CODE (op0) == SUBREG
2085 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2086
2087 if (! can_create_pseudo_p ())
2088 temp = op0;
2089
2090 if (GET_CODE (op1) != CONST_INT)
2091 {
2092 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2093 return;
2094 }
2095
2096 if (! temp)
2097 temp = gen_reg_rtx (DImode);
2098
2099 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2100 low_bits = (INTVAL (op1) & 0xffffffff);
2101
2102 /* low_bits bits 0 --> 31
2103 high_bits bits 32 --> 63 */
2104
2105 analyze_64bit_constant (high_bits, low_bits,
2106 &highest_bit_set, &lowest_bit_set,
2107 &all_bits_between_are_set);
2108
2109 /* First try for a 2-insn sequence. */
2110
2111 /* These situations are preferred because the optimizer can
2112 * do more things with them:
2113 * 1) mov -1, %reg
2114 * sllx %reg, shift, %reg
2115 * 2) mov -1, %reg
2116 * srlx %reg, shift, %reg
2117 * 3) mov some_small_const, %reg
2118 * sllx %reg, shift, %reg
2119 */
2120 if (((highest_bit_set == 63
2121 || lowest_bit_set == 0)
2122 && all_bits_between_are_set != 0)
2123 || ((highest_bit_set - lowest_bit_set) < 12))
2124 {
2125 HOST_WIDE_INT the_const = -1;
2126 int shift = lowest_bit_set;
2127
2128 if ((highest_bit_set != 63
2129 && lowest_bit_set != 0)
2130 || all_bits_between_are_set == 0)
2131 {
2132 the_const =
2133 create_simple_focus_bits (high_bits, low_bits,
2134 lowest_bit_set, 0);
2135 }
2136 else if (lowest_bit_set == 0)
2137 shift = -(63 - highest_bit_set);
2138
2139 gcc_assert (SPARC_SIMM13_P (the_const));
2140 gcc_assert (shift != 0);
2141
2142 emit_insn (gen_safe_SET64 (temp, the_const));
2143 if (shift > 0)
2144 emit_insn (gen_rtx_SET (VOIDmode,
2145 op0,
2146 gen_rtx_ASHIFT (DImode,
2147 temp,
2148 GEN_INT (shift))));
2149 else if (shift < 0)
2150 emit_insn (gen_rtx_SET (VOIDmode,
2151 op0,
2152 gen_rtx_LSHIFTRT (DImode,
2153 temp,
2154 GEN_INT (-shift))));
2155 return;
2156 }
2157
2158 /* Now a range of 22 or less bits set somewhere.
2159 * 1) sethi %hi(focus_bits), %reg
2160 * sllx %reg, shift, %reg
2161 * 2) sethi %hi(focus_bits), %reg
2162 * srlx %reg, shift, %reg
2163 */
2164 if ((highest_bit_set - lowest_bit_set) < 21)
2165 {
2166 unsigned HOST_WIDE_INT focus_bits =
2167 create_simple_focus_bits (high_bits, low_bits,
2168 lowest_bit_set, 10);
2169
2170 gcc_assert (SPARC_SETHI_P (focus_bits));
2171 gcc_assert (lowest_bit_set != 10);
2172
2173 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
2174
2175 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2176 if (lowest_bit_set < 10)
2177 emit_insn (gen_rtx_SET (VOIDmode,
2178 op0,
2179 gen_rtx_LSHIFTRT (DImode, temp,
2180 GEN_INT (10 - lowest_bit_set))));
2181 else if (lowest_bit_set > 10)
2182 emit_insn (gen_rtx_SET (VOIDmode,
2183 op0,
2184 gen_rtx_ASHIFT (DImode, temp,
2185 GEN_INT (lowest_bit_set - 10))));
2186 return;
2187 }
2188
2189 /* 1) sethi %hi(low_bits), %reg
2190 * or %reg, %lo(low_bits), %reg
2191 * 2) sethi %hi(~low_bits), %reg
2192 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2193 */
2194 if (high_bits == 0
2195 || high_bits == 0xffffffff)
2196 {
2197 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2198 (high_bits == 0xffffffff));
2199 return;
2200 }
2201
2202 /* Now, try 3-insn sequences. */
2203
2204 /* 1) sethi %hi(high_bits), %reg
2205 * or %reg, %lo(high_bits), %reg
2206 * sllx %reg, 32, %reg
2207 */
2208 if (low_bits == 0)
2209 {
2210 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2211 return;
2212 }
2213
2214 /* We may be able to do something quick
2215 when the constant is negated, so try that. */
2216 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2217 (~low_bits) & 0xfffffc00))
2218 {
2219 /* NOTE: The trailing bits get XOR'd so we need the
2220 non-negated bits, not the negated ones. */
2221 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2222
2223 if ((((~high_bits) & 0xffffffff) == 0
2224 && ((~low_bits) & 0x80000000) == 0)
2225 || (((~high_bits) & 0xffffffff) == 0xffffffff
2226 && ((~low_bits) & 0x80000000) != 0))
2227 {
2228 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
2229
2230 if ((SPARC_SETHI_P (fast_int)
2231 && (~high_bits & 0xffffffff) == 0)
2232 || SPARC_SIMM13_P (fast_int))
2233 emit_insn (gen_safe_SET64 (temp, fast_int));
2234 else
2235 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2236 }
2237 else
2238 {
2239 rtx negated_const;
2240 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2241 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2242 sparc_emit_set_const64 (temp, negated_const);
2243 }
2244
2245 /* If we are XOR'ing with -1, then we should emit a one's complement
2246 instead. This way the combiner will notice logical operations
2247 such as ANDN later on and substitute. */
2248 if (trailing_bits == 0x3ff)
2249 {
2250 emit_insn (gen_rtx_SET (VOIDmode, op0,
2251 gen_rtx_NOT (DImode, temp)));
2252 }
2253 else
2254 {
2255 emit_insn (gen_rtx_SET (VOIDmode,
2256 op0,
2257 gen_safe_XOR64 (temp,
2258 (-0x400 | trailing_bits))));
2259 }
2260 return;
2261 }
2262
2263 /* 1) sethi %hi(xxx), %reg
2264 * or %reg, %lo(xxx), %reg
2265 * sllx %reg, yyy, %reg
2266 *
2267 * ??? This is just a generalized version of the low_bits==0
2268 * thing above, FIXME...
2269 */
2270 if ((highest_bit_set - lowest_bit_set) < 32)
2271 {
2272 unsigned HOST_WIDE_INT focus_bits =
2273 create_simple_focus_bits (high_bits, low_bits,
2274 lowest_bit_set, 0);
2275
2276 /* We can't get here in this state. */
2277 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2278
2279 /* So what we know is that the set bits straddle the
2280 middle of the 64-bit word. */
2281 sparc_emit_set_const64_quick2 (op0, temp,
2282 focus_bits, 0,
2283 lowest_bit_set);
2284 return;
2285 }
2286
2287 /* 1) sethi %hi(high_bits), %reg
2288 * or %reg, %lo(high_bits), %reg
2289 * sllx %reg, 32, %reg
2290 * or %reg, low_bits, %reg
2291 */
2292 if (SPARC_SIMM13_P(low_bits)
2293 && ((int)low_bits > 0))
2294 {
2295 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2296 return;
2297 }
2298
2299 /* The easiest way when all else fails, is full decomposition. */
2300 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2301 }
2302 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2303
2304 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2305 return the mode to be used for the comparison. For floating-point,
2306 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2307 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2308 processing is needed. */
2309
2310 enum machine_mode
2311 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2312 {
2313 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2314 {
2315 switch (op)
2316 {
2317 case EQ:
2318 case NE:
2319 case UNORDERED:
2320 case ORDERED:
2321 case UNLT:
2322 case UNLE:
2323 case UNGT:
2324 case UNGE:
2325 case UNEQ:
2326 case LTGT:
2327 return CCFPmode;
2328
2329 case LT:
2330 case LE:
2331 case GT:
2332 case GE:
2333 return CCFPEmode;
2334
2335 default:
2336 gcc_unreachable ();
2337 }
2338 }
2339 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2340 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2341 {
2342 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2343 return CCX_NOOVmode;
2344 else
2345 return CC_NOOVmode;
2346 }
2347 else
2348 {
2349 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2350 return CCXmode;
2351 else
2352 return CCmode;
2353 }
2354 }
2355
2356 /* Emit the compare insn and return the CC reg for a CODE comparison
2357 with operands X and Y. */
2358
2359 static rtx
2360 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2361 {
2362 enum machine_mode mode;
2363 rtx cc_reg;
2364
2365 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2366 return x;
2367
2368 mode = SELECT_CC_MODE (code, x, y);
2369
2370 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2371 fcc regs (cse can't tell they're really call clobbered regs and will
2372 remove a duplicate comparison even if there is an intervening function
2373 call - it will then try to reload the cc reg via an int reg which is why
2374 we need the movcc patterns). It is possible to provide the movcc
2375 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2376 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2377 to tell cse that CCFPE mode registers (even pseudos) are call
2378 clobbered. */
2379
2380 /* ??? This is an experiment. Rather than making changes to cse which may
2381 or may not be easy/clean, we do our own cse. This is possible because
2382 we will generate hard registers. Cse knows they're call clobbered (it
2383 doesn't know the same thing about pseudos). If we guess wrong, no big
2384 deal, but if we win, great! */
2385
2386 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2387 #if 1 /* experiment */
2388 {
2389 int reg;
2390 /* We cycle through the registers to ensure they're all exercised. */
2391 static int next_fcc_reg = 0;
2392 /* Previous x,y for each fcc reg. */
2393 static rtx prev_args[4][2];
2394
2395 /* Scan prev_args for x,y. */
2396 for (reg = 0; reg < 4; reg++)
2397 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2398 break;
2399 if (reg == 4)
2400 {
2401 reg = next_fcc_reg;
2402 prev_args[reg][0] = x;
2403 prev_args[reg][1] = y;
2404 next_fcc_reg = (next_fcc_reg + 1) & 3;
2405 }
2406 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2407 }
2408 #else
2409 cc_reg = gen_reg_rtx (mode);
2410 #endif /* ! experiment */
2411 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2412 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2413 else
2414 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2415
2416 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2417 will only result in an unrecognizable insn so no point in asserting. */
2418 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2419
2420 return cc_reg;
2421 }
2422
2423
2424 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2425
2426 rtx
2427 gen_compare_reg (rtx cmp)
2428 {
2429 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2430 }
2431
2432 /* This function is used for v9 only.
2433 DEST is the target of the Scc insn.
2434 CODE is the code for an Scc's comparison.
2435 X and Y are the values we compare.
2436
2437 This function is needed to turn
2438
2439 (set (reg:SI 110)
2440 (gt (reg:CCX 100 %icc)
2441 (const_int 0)))
2442 into
2443 (set (reg:SI 110)
2444 (gt:DI (reg:CCX 100 %icc)
2445 (const_int 0)))
2446
2447 IE: The instruction recognizer needs to see the mode of the comparison to
2448 find the right instruction. We could use "gt:DI" right in the
2449 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2450
2451 static int
2452 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2453 {
2454 if (! TARGET_ARCH64
2455 && (GET_MODE (x) == DImode
2456 || GET_MODE (dest) == DImode))
2457 return 0;
2458
2459 /* Try to use the movrCC insns. */
2460 if (TARGET_ARCH64
2461 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2462 && y == const0_rtx
2463 && v9_regcmp_p (compare_code))
2464 {
2465 rtx op0 = x;
2466 rtx temp;
2467
2468 /* Special case for op0 != 0. This can be done with one instruction if
2469 dest == x. */
2470
2471 if (compare_code == NE
2472 && GET_MODE (dest) == DImode
2473 && rtx_equal_p (op0, dest))
2474 {
2475 emit_insn (gen_rtx_SET (VOIDmode, dest,
2476 gen_rtx_IF_THEN_ELSE (DImode,
2477 gen_rtx_fmt_ee (compare_code, DImode,
2478 op0, const0_rtx),
2479 const1_rtx,
2480 dest)));
2481 return 1;
2482 }
2483
2484 if (reg_overlap_mentioned_p (dest, op0))
2485 {
2486 /* Handle the case where dest == x.
2487 We "early clobber" the result. */
2488 op0 = gen_reg_rtx (GET_MODE (x));
2489 emit_move_insn (op0, x);
2490 }
2491
2492 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2493 if (GET_MODE (op0) != DImode)
2494 {
2495 temp = gen_reg_rtx (DImode);
2496 convert_move (temp, op0, 0);
2497 }
2498 else
2499 temp = op0;
2500 emit_insn (gen_rtx_SET (VOIDmode, dest,
2501 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2502 gen_rtx_fmt_ee (compare_code, DImode,
2503 temp, const0_rtx),
2504 const1_rtx,
2505 dest)));
2506 return 1;
2507 }
2508 else
2509 {
2510 x = gen_compare_reg_1 (compare_code, x, y);
2511 y = const0_rtx;
2512
2513 gcc_assert (GET_MODE (x) != CC_NOOVmode
2514 && GET_MODE (x) != CCX_NOOVmode);
2515
2516 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2517 emit_insn (gen_rtx_SET (VOIDmode, dest,
2518 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2519 gen_rtx_fmt_ee (compare_code,
2520 GET_MODE (x), x, y),
2521 const1_rtx, dest)));
2522 return 1;
2523 }
2524 }
2525
2526
2527 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2528 without jumps using the addx/subx instructions. */
2529
2530 bool
2531 emit_scc_insn (rtx operands[])
2532 {
2533 rtx tem;
2534 rtx x;
2535 rtx y;
2536 enum rtx_code code;
2537
2538 /* The quad-word fp compare library routines all return nonzero to indicate
2539 true, which is different from the equivalent libgcc routines, so we must
2540 handle them specially here. */
2541 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2542 {
2543 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2544 GET_CODE (operands[1]));
2545 operands[2] = XEXP (operands[1], 0);
2546 operands[3] = XEXP (operands[1], 1);
2547 }
2548
2549 code = GET_CODE (operands[1]);
2550 x = operands[2];
2551 y = operands[3];
2552
2553 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2554 more applications). The exception to this is "reg != 0" which can
2555 be done in one instruction on v9 (so we do it). */
2556 if (code == EQ)
2557 {
2558 if (GET_MODE (x) == SImode)
2559 {
2560 rtx pat = gen_seqsi_special (operands[0], x, y);
2561 emit_insn (pat);
2562 return true;
2563 }
2564 else if (GET_MODE (x) == DImode)
2565 {
2566 rtx pat = gen_seqdi_special (operands[0], x, y);
2567 emit_insn (pat);
2568 return true;
2569 }
2570 }
2571
2572 if (code == NE)
2573 {
2574 if (GET_MODE (x) == SImode)
2575 {
2576 rtx pat = gen_snesi_special (operands[0], x, y);
2577 emit_insn (pat);
2578 return true;
2579 }
2580 else if (GET_MODE (x) == DImode)
2581 {
2582 rtx pat;
2583 if (TARGET_VIS3)
2584 pat = gen_snedi_special_vis3 (operands[0], x, y);
2585 else
2586 pat = gen_snedi_special (operands[0], x, y);
2587 emit_insn (pat);
2588 return true;
2589 }
2590 }
2591
2592 if (TARGET_V9
2593 && TARGET_ARCH64
2594 && GET_MODE (x) == DImode
2595 && !(TARGET_VIS3
2596 && (code == GTU || code == LTU))
2597 && gen_v9_scc (operands[0], code, x, y))
2598 return true;
2599
2600 /* We can do LTU and GEU using the addx/subx instructions too. And
2601 for GTU/LEU, if both operands are registers swap them and fall
2602 back to the easy case. */
2603 if (code == GTU || code == LEU)
2604 {
2605 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2606 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2607 {
2608 tem = x;
2609 x = y;
2610 y = tem;
2611 code = swap_condition (code);
2612 }
2613 }
2614
2615 if (code == LTU
2616 || (!TARGET_VIS3 && code == GEU))
2617 {
2618 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2619 gen_rtx_fmt_ee (code, SImode,
2620 gen_compare_reg_1 (code, x, y),
2621 const0_rtx)));
2622 return true;
2623 }
2624
2625 /* All the posibilities to use addx/subx based sequences has been
2626 exhausted, try for a 3 instruction sequence using v9 conditional
2627 moves. */
2628 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
2629 return true;
2630
2631 /* Nope, do branches. */
2632 return false;
2633 }
2634
2635 /* Emit a conditional jump insn for the v9 architecture using comparison code
2636 CODE and jump target LABEL.
2637 This function exists to take advantage of the v9 brxx insns. */
2638
2639 static void
2640 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2641 {
2642 emit_jump_insn (gen_rtx_SET (VOIDmode,
2643 pc_rtx,
2644 gen_rtx_IF_THEN_ELSE (VOIDmode,
2645 gen_rtx_fmt_ee (code, GET_MODE (op0),
2646 op0, const0_rtx),
2647 gen_rtx_LABEL_REF (VOIDmode, label),
2648 pc_rtx)));
2649 }
2650
2651 void
2652 emit_conditional_branch_insn (rtx operands[])
2653 {
2654 /* The quad-word fp compare library routines all return nonzero to indicate
2655 true, which is different from the equivalent libgcc routines, so we must
2656 handle them specially here. */
2657 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2658 {
2659 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2660 GET_CODE (operands[0]));
2661 operands[1] = XEXP (operands[0], 0);
2662 operands[2] = XEXP (operands[0], 1);
2663 }
2664
2665 if (TARGET_ARCH64 && operands[2] == const0_rtx
2666 && GET_CODE (operands[1]) == REG
2667 && GET_MODE (operands[1]) == DImode)
2668 {
2669 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2670 return;
2671 }
2672
2673 operands[1] = gen_compare_reg (operands[0]);
2674 operands[2] = const0_rtx;
2675 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2676 operands[1], operands[2]);
2677 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2678 operands[3]));
2679 }
2680
2681
2682 /* Generate a DFmode part of a hard TFmode register.
2683 REG is the TFmode hard register, LOW is 1 for the
2684 low 64bit of the register and 0 otherwise.
2685 */
2686 rtx
2687 gen_df_reg (rtx reg, int low)
2688 {
2689 int regno = REGNO (reg);
2690
2691 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2692 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
2693 return gen_rtx_REG (DFmode, regno);
2694 }
2695 \f
2696 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2697 Unlike normal calls, TFmode operands are passed by reference. It is
2698 assumed that no more than 3 operands are required. */
2699
2700 static void
2701 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2702 {
2703 rtx ret_slot = NULL, arg[3], func_sym;
2704 int i;
2705
2706 /* We only expect to be called for conversions, unary, and binary ops. */
2707 gcc_assert (nargs == 2 || nargs == 3);
2708
2709 for (i = 0; i < nargs; ++i)
2710 {
2711 rtx this_arg = operands[i];
2712 rtx this_slot;
2713
2714 /* TFmode arguments and return values are passed by reference. */
2715 if (GET_MODE (this_arg) == TFmode)
2716 {
2717 int force_stack_temp;
2718
2719 force_stack_temp = 0;
2720 if (TARGET_BUGGY_QP_LIB && i == 0)
2721 force_stack_temp = 1;
2722
2723 if (GET_CODE (this_arg) == MEM
2724 && ! force_stack_temp)
2725 {
2726 tree expr = MEM_EXPR (this_arg);
2727 if (expr)
2728 mark_addressable (expr);
2729 this_arg = XEXP (this_arg, 0);
2730 }
2731 else if (CONSTANT_P (this_arg)
2732 && ! force_stack_temp)
2733 {
2734 this_slot = force_const_mem (TFmode, this_arg);
2735 this_arg = XEXP (this_slot, 0);
2736 }
2737 else
2738 {
2739 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
2740
2741 /* Operand 0 is the return value. We'll copy it out later. */
2742 if (i > 0)
2743 emit_move_insn (this_slot, this_arg);
2744 else
2745 ret_slot = this_slot;
2746
2747 this_arg = XEXP (this_slot, 0);
2748 }
2749 }
2750
2751 arg[i] = this_arg;
2752 }
2753
2754 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2755
2756 if (GET_MODE (operands[0]) == TFmode)
2757 {
2758 if (nargs == 2)
2759 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2760 arg[0], GET_MODE (arg[0]),
2761 arg[1], GET_MODE (arg[1]));
2762 else
2763 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2764 arg[0], GET_MODE (arg[0]),
2765 arg[1], GET_MODE (arg[1]),
2766 arg[2], GET_MODE (arg[2]));
2767
2768 if (ret_slot)
2769 emit_move_insn (operands[0], ret_slot);
2770 }
2771 else
2772 {
2773 rtx ret;
2774
2775 gcc_assert (nargs == 2);
2776
2777 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2778 GET_MODE (operands[0]), 1,
2779 arg[1], GET_MODE (arg[1]));
2780
2781 if (ret != operands[0])
2782 emit_move_insn (operands[0], ret);
2783 }
2784 }
2785
2786 /* Expand soft-float TFmode calls to sparc abi routines. */
2787
2788 static void
2789 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2790 {
2791 const char *func;
2792
2793 switch (code)
2794 {
2795 case PLUS:
2796 func = "_Qp_add";
2797 break;
2798 case MINUS:
2799 func = "_Qp_sub";
2800 break;
2801 case MULT:
2802 func = "_Qp_mul";
2803 break;
2804 case DIV:
2805 func = "_Qp_div";
2806 break;
2807 default:
2808 gcc_unreachable ();
2809 }
2810
2811 emit_soft_tfmode_libcall (func, 3, operands);
2812 }
2813
2814 static void
2815 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2816 {
2817 const char *func;
2818
2819 gcc_assert (code == SQRT);
2820 func = "_Qp_sqrt";
2821
2822 emit_soft_tfmode_libcall (func, 2, operands);
2823 }
2824
2825 static void
2826 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2827 {
2828 const char *func;
2829
2830 switch (code)
2831 {
2832 case FLOAT_EXTEND:
2833 switch (GET_MODE (operands[1]))
2834 {
2835 case SFmode:
2836 func = "_Qp_stoq";
2837 break;
2838 case DFmode:
2839 func = "_Qp_dtoq";
2840 break;
2841 default:
2842 gcc_unreachable ();
2843 }
2844 break;
2845
2846 case FLOAT_TRUNCATE:
2847 switch (GET_MODE (operands[0]))
2848 {
2849 case SFmode:
2850 func = "_Qp_qtos";
2851 break;
2852 case DFmode:
2853 func = "_Qp_qtod";
2854 break;
2855 default:
2856 gcc_unreachable ();
2857 }
2858 break;
2859
2860 case FLOAT:
2861 switch (GET_MODE (operands[1]))
2862 {
2863 case SImode:
2864 func = "_Qp_itoq";
2865 if (TARGET_ARCH64)
2866 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2867 break;
2868 case DImode:
2869 func = "_Qp_xtoq";
2870 break;
2871 default:
2872 gcc_unreachable ();
2873 }
2874 break;
2875
2876 case UNSIGNED_FLOAT:
2877 switch (GET_MODE (operands[1]))
2878 {
2879 case SImode:
2880 func = "_Qp_uitoq";
2881 if (TARGET_ARCH64)
2882 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2883 break;
2884 case DImode:
2885 func = "_Qp_uxtoq";
2886 break;
2887 default:
2888 gcc_unreachable ();
2889 }
2890 break;
2891
2892 case FIX:
2893 switch (GET_MODE (operands[0]))
2894 {
2895 case SImode:
2896 func = "_Qp_qtoi";
2897 break;
2898 case DImode:
2899 func = "_Qp_qtox";
2900 break;
2901 default:
2902 gcc_unreachable ();
2903 }
2904 break;
2905
2906 case UNSIGNED_FIX:
2907 switch (GET_MODE (operands[0]))
2908 {
2909 case SImode:
2910 func = "_Qp_qtoui";
2911 break;
2912 case DImode:
2913 func = "_Qp_qtoux";
2914 break;
2915 default:
2916 gcc_unreachable ();
2917 }
2918 break;
2919
2920 default:
2921 gcc_unreachable ();
2922 }
2923
2924 emit_soft_tfmode_libcall (func, 2, operands);
2925 }
2926
2927 /* Expand a hard-float tfmode operation. All arguments must be in
2928 registers. */
2929
2930 static void
2931 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2932 {
2933 rtx op, dest;
2934
2935 if (GET_RTX_CLASS (code) == RTX_UNARY)
2936 {
2937 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2938 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2939 }
2940 else
2941 {
2942 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2943 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2944 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2945 operands[1], operands[2]);
2946 }
2947
2948 if (register_operand (operands[0], VOIDmode))
2949 dest = operands[0];
2950 else
2951 dest = gen_reg_rtx (GET_MODE (operands[0]));
2952
2953 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2954
2955 if (dest != operands[0])
2956 emit_move_insn (operands[0], dest);
2957 }
2958
2959 void
2960 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2961 {
2962 if (TARGET_HARD_QUAD)
2963 emit_hard_tfmode_operation (code, operands);
2964 else
2965 emit_soft_tfmode_binop (code, operands);
2966 }
2967
2968 void
2969 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2970 {
2971 if (TARGET_HARD_QUAD)
2972 emit_hard_tfmode_operation (code, operands);
2973 else
2974 emit_soft_tfmode_unop (code, operands);
2975 }
2976
2977 void
2978 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2979 {
2980 if (TARGET_HARD_QUAD)
2981 emit_hard_tfmode_operation (code, operands);
2982 else
2983 emit_soft_tfmode_cvt (code, operands);
2984 }
2985 \f
2986 /* Return nonzero if a branch/jump/call instruction will be emitting
2987 nop into its delay slot. */
2988
2989 int
2990 empty_delay_slot (rtx insn)
2991 {
2992 rtx seq;
2993
2994 /* If no previous instruction (should not happen), return true. */
2995 if (PREV_INSN (insn) == NULL)
2996 return 1;
2997
2998 seq = NEXT_INSN (PREV_INSN (insn));
2999 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3000 return 0;
3001
3002 return 1;
3003 }
3004
3005 /* Return nonzero if TRIAL can go into the call delay slot. */
3006
3007 int
3008 tls_call_delay (rtx trial)
3009 {
3010 rtx pat;
3011
3012 /* Binutils allows
3013 call __tls_get_addr, %tgd_call (foo)
3014 add %l7, %o0, %o0, %tgd_add (foo)
3015 while Sun as/ld does not. */
3016 if (TARGET_GNU_TLS || !TARGET_TLS)
3017 return 1;
3018
3019 pat = PATTERN (trial);
3020
3021 /* We must reject tgd_add{32|64}, i.e.
3022 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3023 and tldm_add{32|64}, i.e.
3024 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3025 for Sun as/ld. */
3026 if (GET_CODE (pat) == SET
3027 && GET_CODE (SET_SRC (pat)) == PLUS)
3028 {
3029 rtx unspec = XEXP (SET_SRC (pat), 1);
3030
3031 if (GET_CODE (unspec) == UNSPEC
3032 && (XINT (unspec, 1) == UNSPEC_TLSGD
3033 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3034 return 0;
3035 }
3036
3037 return 1;
3038 }
3039
3040 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3041 instruction. RETURN_P is true if the v9 variant 'return' is to be
3042 considered in the test too.
3043
3044 TRIAL must be a SET whose destination is a REG appropriate for the
3045 'restore' instruction or, if RETURN_P is true, for the 'return'
3046 instruction. */
3047
3048 static int
3049 eligible_for_restore_insn (rtx trial, bool return_p)
3050 {
3051 rtx pat = PATTERN (trial);
3052 rtx src = SET_SRC (pat);
3053 bool src_is_freg = false;
3054 rtx src_reg;
3055
3056 /* Since we now can do moves between float and integer registers when
3057 VIS3 is enabled, we have to catch this case. We can allow such
3058 moves when doing a 'return' however. */
3059 src_reg = src;
3060 if (GET_CODE (src_reg) == SUBREG)
3061 src_reg = SUBREG_REG (src_reg);
3062 if (GET_CODE (src_reg) == REG
3063 && SPARC_FP_REG_P (REGNO (src_reg)))
3064 src_is_freg = true;
3065
3066 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3067 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3068 && arith_operand (src, GET_MODE (src))
3069 && ! src_is_freg)
3070 {
3071 if (TARGET_ARCH64)
3072 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3073 else
3074 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3075 }
3076
3077 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3078 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3079 && arith_double_operand (src, GET_MODE (src))
3080 && ! src_is_freg)
3081 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3082
3083 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3084 else if (! TARGET_FPU && register_operand (src, SFmode))
3085 return 1;
3086
3087 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3088 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3089 return 1;
3090
3091 /* If we have the 'return' instruction, anything that does not use
3092 local or output registers and can go into a delay slot wins. */
3093 else if (return_p
3094 && TARGET_V9
3095 && !epilogue_renumber (&pat, 1)
3096 && get_attr_in_uncond_branch_delay (trial)
3097 == IN_UNCOND_BRANCH_DELAY_TRUE)
3098 return 1;
3099
3100 /* The 'restore src1,src2,dest' pattern for SImode. */
3101 else if (GET_CODE (src) == PLUS
3102 && register_operand (XEXP (src, 0), SImode)
3103 && arith_operand (XEXP (src, 1), SImode))
3104 return 1;
3105
3106 /* The 'restore src1,src2,dest' pattern for DImode. */
3107 else if (GET_CODE (src) == PLUS
3108 && register_operand (XEXP (src, 0), DImode)
3109 && arith_double_operand (XEXP (src, 1), DImode))
3110 return 1;
3111
3112 /* The 'restore src1,%lo(src2),dest' pattern. */
3113 else if (GET_CODE (src) == LO_SUM
3114 && ! TARGET_CM_MEDMID
3115 && ((register_operand (XEXP (src, 0), SImode)
3116 && immediate_operand (XEXP (src, 1), SImode))
3117 || (TARGET_ARCH64
3118 && register_operand (XEXP (src, 0), DImode)
3119 && immediate_operand (XEXP (src, 1), DImode))))
3120 return 1;
3121
3122 /* The 'restore src,src,dest' pattern. */
3123 else if (GET_CODE (src) == ASHIFT
3124 && (register_operand (XEXP (src, 0), SImode)
3125 || register_operand (XEXP (src, 0), DImode))
3126 && XEXP (src, 1) == const1_rtx)
3127 return 1;
3128
3129 return 0;
3130 }
3131
3132 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3133
3134 int
3135 eligible_for_return_delay (rtx trial)
3136 {
3137 int regno;
3138 rtx pat;
3139
3140 if (GET_CODE (trial) != INSN)
3141 return 0;
3142
3143 if (get_attr_length (trial) != 1)
3144 return 0;
3145
3146 /* If the function uses __builtin_eh_return, the eh_return machinery
3147 occupies the delay slot. */
3148 if (crtl->calls_eh_return)
3149 return 0;
3150
3151 /* In the case of a leaf or flat function, anything can go into the slot. */
3152 if (sparc_leaf_function_p || TARGET_FLAT)
3153 return
3154 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
3155
3156 pat = PATTERN (trial);
3157 if (GET_CODE (pat) == PARALLEL)
3158 {
3159 int i;
3160
3161 if (! TARGET_V9)
3162 return 0;
3163 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3164 {
3165 rtx expr = XVECEXP (pat, 0, i);
3166 if (GET_CODE (expr) != SET)
3167 return 0;
3168 if (GET_CODE (SET_DEST (expr)) != REG)
3169 return 0;
3170 regno = REGNO (SET_DEST (expr));
3171 if (regno >= 8 && regno < 24)
3172 return 0;
3173 }
3174 return !epilogue_renumber (&pat, 1)
3175 && (get_attr_in_uncond_branch_delay (trial)
3176 == IN_UNCOND_BRANCH_DELAY_TRUE);
3177 }
3178
3179 if (GET_CODE (pat) != SET)
3180 return 0;
3181
3182 if (GET_CODE (SET_DEST (pat)) != REG)
3183 return 0;
3184
3185 regno = REGNO (SET_DEST (pat));
3186
3187 /* Otherwise, only operations which can be done in tandem with
3188 a `restore' or `return' insn can go into the delay slot. */
3189 if (regno >= 8 && regno < 24)
3190 return 0;
3191
3192 /* If this instruction sets up floating point register and we have a return
3193 instruction, it can probably go in. But restore will not work
3194 with FP_REGS. */
3195 if (! SPARC_INT_REG_P (regno))
3196 return (TARGET_V9
3197 && !epilogue_renumber (&pat, 1)
3198 && get_attr_in_uncond_branch_delay (trial)
3199 == IN_UNCOND_BRANCH_DELAY_TRUE);
3200
3201 return eligible_for_restore_insn (trial, true);
3202 }
3203
3204 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3205
3206 int
3207 eligible_for_sibcall_delay (rtx trial)
3208 {
3209 rtx pat;
3210
3211 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3212 return 0;
3213
3214 if (get_attr_length (trial) != 1)
3215 return 0;
3216
3217 pat = PATTERN (trial);
3218
3219 if (sparc_leaf_function_p || TARGET_FLAT)
3220 {
3221 /* If the tail call is done using the call instruction,
3222 we have to restore %o7 in the delay slot. */
3223 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3224 return 0;
3225
3226 /* %g1 is used to build the function address */
3227 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3228 return 0;
3229
3230 return 1;
3231 }
3232
3233 /* Otherwise, only operations which can be done in tandem with
3234 a `restore' insn can go into the delay slot. */
3235 if (GET_CODE (SET_DEST (pat)) != REG
3236 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3237 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
3238 return 0;
3239
3240 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3241 in most cases. */
3242 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3243 return 0;
3244
3245 return eligible_for_restore_insn (trial, false);
3246 }
3247 \f
3248 /* Determine if it's legal to put X into the constant pool. This
3249 is not possible if X contains the address of a symbol that is
3250 not constant (TLS) or not known at final link time (PIC). */
3251
3252 static bool
3253 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
3254 {
3255 switch (GET_CODE (x))
3256 {
3257 case CONST_INT:
3258 case CONST_DOUBLE:
3259 case CONST_VECTOR:
3260 /* Accept all non-symbolic constants. */
3261 return false;
3262
3263 case LABEL_REF:
3264 /* Labels are OK iff we are non-PIC. */
3265 return flag_pic != 0;
3266
3267 case SYMBOL_REF:
3268 /* 'Naked' TLS symbol references are never OK,
3269 non-TLS symbols are OK iff we are non-PIC. */
3270 if (SYMBOL_REF_TLS_MODEL (x))
3271 return true;
3272 else
3273 return flag_pic != 0;
3274
3275 case CONST:
3276 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3277 case PLUS:
3278 case MINUS:
3279 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3280 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3281 case UNSPEC:
3282 return true;
3283 default:
3284 gcc_unreachable ();
3285 }
3286 }
3287 \f
3288 /* Global Offset Table support. */
3289 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3290 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3291
3292 /* Return the SYMBOL_REF for the Global Offset Table. */
3293
3294 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3295
3296 static rtx
3297 sparc_got (void)
3298 {
3299 if (!sparc_got_symbol)
3300 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3301
3302 return sparc_got_symbol;
3303 }
3304
3305 /* Ensure that we are not using patterns that are not OK with PIC. */
3306
3307 int
3308 check_pic (int i)
3309 {
3310 rtx op;
3311
3312 switch (flag_pic)
3313 {
3314 case 1:
3315 op = recog_data.operand[i];
3316 gcc_assert (GET_CODE (op) != SYMBOL_REF
3317 && (GET_CODE (op) != CONST
3318 || (GET_CODE (XEXP (op, 0)) == MINUS
3319 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3320 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3321 case 2:
3322 default:
3323 return 1;
3324 }
3325 }
3326
3327 /* Return true if X is an address which needs a temporary register when
3328 reloaded while generating PIC code. */
3329
3330 int
3331 pic_address_needs_scratch (rtx x)
3332 {
3333 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3334 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3335 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3336 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3337 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3338 return 1;
3339
3340 return 0;
3341 }
3342
3343 /* Determine if a given RTX is a valid constant. We already know this
3344 satisfies CONSTANT_P. */
3345
3346 static bool
3347 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3348 {
3349 switch (GET_CODE (x))
3350 {
3351 case CONST:
3352 case SYMBOL_REF:
3353 if (sparc_tls_referenced_p (x))
3354 return false;
3355 break;
3356
3357 case CONST_DOUBLE:
3358 if (GET_MODE (x) == VOIDmode)
3359 return true;
3360
3361 /* Floating point constants are generally not ok.
3362 The only exception is 0.0 and all-ones in VIS. */
3363 if (TARGET_VIS
3364 && SCALAR_FLOAT_MODE_P (mode)
3365 && (const_zero_operand (x, mode)
3366 || const_all_ones_operand (x, mode)))
3367 return true;
3368
3369 return false;
3370
3371 case CONST_VECTOR:
3372 /* Vector constants are generally not ok.
3373 The only exception is 0 or -1 in VIS. */
3374 if (TARGET_VIS
3375 && (const_zero_operand (x, mode)
3376 || const_all_ones_operand (x, mode)))
3377 return true;
3378
3379 return false;
3380
3381 default:
3382 break;
3383 }
3384
3385 return true;
3386 }
3387
3388 /* Determine if a given RTX is a valid constant address. */
3389
3390 bool
3391 constant_address_p (rtx x)
3392 {
3393 switch (GET_CODE (x))
3394 {
3395 case LABEL_REF:
3396 case CONST_INT:
3397 case HIGH:
3398 return true;
3399
3400 case CONST:
3401 if (flag_pic && pic_address_needs_scratch (x))
3402 return false;
3403 return sparc_legitimate_constant_p (Pmode, x);
3404
3405 case SYMBOL_REF:
3406 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3407
3408 default:
3409 return false;
3410 }
3411 }
3412
3413 /* Nonzero if the constant value X is a legitimate general operand
3414 when generating PIC code. It is given that flag_pic is on and
3415 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3416
3417 bool
3418 legitimate_pic_operand_p (rtx x)
3419 {
3420 if (pic_address_needs_scratch (x))
3421 return false;
3422 if (sparc_tls_referenced_p (x))
3423 return false;
3424 return true;
3425 }
3426
3427 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3428 (CONST_INT_P (X) \
3429 && INTVAL (X) >= -0x1000 \
3430 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3431
3432 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3433 (CONST_INT_P (X) \
3434 && INTVAL (X) >= -0x1000 \
3435 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3436
3437 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3438
3439 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3440 ordinarily. This changes a bit when generating PIC. */
3441
3442 static bool
3443 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3444 {
3445 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3446
3447 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3448 rs1 = addr;
3449 else if (GET_CODE (addr) == PLUS)
3450 {
3451 rs1 = XEXP (addr, 0);
3452 rs2 = XEXP (addr, 1);
3453
3454 /* Canonicalize. REG comes first, if there are no regs,
3455 LO_SUM comes first. */
3456 if (!REG_P (rs1)
3457 && GET_CODE (rs1) != SUBREG
3458 && (REG_P (rs2)
3459 || GET_CODE (rs2) == SUBREG
3460 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3461 {
3462 rs1 = XEXP (addr, 1);
3463 rs2 = XEXP (addr, 0);
3464 }
3465
3466 if ((flag_pic == 1
3467 && rs1 == pic_offset_table_rtx
3468 && !REG_P (rs2)
3469 && GET_CODE (rs2) != SUBREG
3470 && GET_CODE (rs2) != LO_SUM
3471 && GET_CODE (rs2) != MEM
3472 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3473 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3474 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3475 || ((REG_P (rs1)
3476 || GET_CODE (rs1) == SUBREG)
3477 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3478 {
3479 imm1 = rs2;
3480 rs2 = NULL;
3481 }
3482 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3483 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3484 {
3485 /* We prohibit REG + REG for TFmode when there are no quad move insns
3486 and we consequently need to split. We do this because REG+REG
3487 is not an offsettable address. If we get the situation in reload
3488 where source and destination of a movtf pattern are both MEMs with
3489 REG+REG address, then only one of them gets converted to an
3490 offsettable address. */
3491 if (mode == TFmode
3492 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
3493 return 0;
3494
3495 /* We prohibit REG + REG on ARCH32 if not optimizing for
3496 DFmode/DImode because then mem_min_alignment is likely to be zero
3497 after reload and the forced split would lack a matching splitter
3498 pattern. */
3499 if (TARGET_ARCH32 && !optimize
3500 && (mode == DFmode || mode == DImode))
3501 return 0;
3502 }
3503 else if (USE_AS_OFFSETABLE_LO10
3504 && GET_CODE (rs1) == LO_SUM
3505 && TARGET_ARCH64
3506 && ! TARGET_CM_MEDMID
3507 && RTX_OK_FOR_OLO10_P (rs2, mode))
3508 {
3509 rs2 = NULL;
3510 imm1 = XEXP (rs1, 1);
3511 rs1 = XEXP (rs1, 0);
3512 if (!CONSTANT_P (imm1)
3513 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3514 return 0;
3515 }
3516 }
3517 else if (GET_CODE (addr) == LO_SUM)
3518 {
3519 rs1 = XEXP (addr, 0);
3520 imm1 = XEXP (addr, 1);
3521
3522 if (!CONSTANT_P (imm1)
3523 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3524 return 0;
3525
3526 /* We can't allow TFmode in 32-bit mode, because an offset greater
3527 than the alignment (8) may cause the LO_SUM to overflow. */
3528 if (mode == TFmode && TARGET_ARCH32)
3529 return 0;
3530 }
3531 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3532 return 1;
3533 else
3534 return 0;
3535
3536 if (GET_CODE (rs1) == SUBREG)
3537 rs1 = SUBREG_REG (rs1);
3538 if (!REG_P (rs1))
3539 return 0;
3540
3541 if (rs2)
3542 {
3543 if (GET_CODE (rs2) == SUBREG)
3544 rs2 = SUBREG_REG (rs2);
3545 if (!REG_P (rs2))
3546 return 0;
3547 }
3548
3549 if (strict)
3550 {
3551 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3552 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3553 return 0;
3554 }
3555 else
3556 {
3557 if ((! SPARC_INT_REG_P (REGNO (rs1))
3558 && REGNO (rs1) != FRAME_POINTER_REGNUM
3559 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3560 || (rs2
3561 && (! SPARC_INT_REG_P (REGNO (rs2))
3562 && REGNO (rs2) != FRAME_POINTER_REGNUM
3563 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3564 return 0;
3565 }
3566 return 1;
3567 }
3568
3569 /* Return the SYMBOL_REF for the tls_get_addr function. */
3570
3571 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3572
3573 static rtx
3574 sparc_tls_get_addr (void)
3575 {
3576 if (!sparc_tls_symbol)
3577 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3578
3579 return sparc_tls_symbol;
3580 }
3581
3582 /* Return the Global Offset Table to be used in TLS mode. */
3583
3584 static rtx
3585 sparc_tls_got (void)
3586 {
3587 /* In PIC mode, this is just the PIC offset table. */
3588 if (flag_pic)
3589 {
3590 crtl->uses_pic_offset_table = 1;
3591 return pic_offset_table_rtx;
3592 }
3593
3594 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3595 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3596 if (TARGET_SUN_TLS && TARGET_ARCH32)
3597 {
3598 load_got_register ();
3599 return global_offset_table_rtx;
3600 }
3601
3602 /* In all other cases, we load a new pseudo with the GOT symbol. */
3603 return copy_to_reg (sparc_got ());
3604 }
3605
3606 /* Return true if X contains a thread-local symbol. */
3607
3608 static bool
3609 sparc_tls_referenced_p (rtx x)
3610 {
3611 if (!TARGET_HAVE_TLS)
3612 return false;
3613
3614 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3615 x = XEXP (XEXP (x, 0), 0);
3616
3617 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3618 return true;
3619
3620 /* That's all we handle in sparc_legitimize_tls_address for now. */
3621 return false;
3622 }
3623
3624 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3625 this (thread-local) address. */
3626
3627 static rtx
3628 sparc_legitimize_tls_address (rtx addr)
3629 {
3630 rtx temp1, temp2, temp3, ret, o0, got, insn;
3631
3632 gcc_assert (can_create_pseudo_p ());
3633
3634 if (GET_CODE (addr) == SYMBOL_REF)
3635 switch (SYMBOL_REF_TLS_MODEL (addr))
3636 {
3637 case TLS_MODEL_GLOBAL_DYNAMIC:
3638 start_sequence ();
3639 temp1 = gen_reg_rtx (SImode);
3640 temp2 = gen_reg_rtx (SImode);
3641 ret = gen_reg_rtx (Pmode);
3642 o0 = gen_rtx_REG (Pmode, 8);
3643 got = sparc_tls_got ();
3644 emit_insn (gen_tgd_hi22 (temp1, addr));
3645 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3646 if (TARGET_ARCH32)
3647 {
3648 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3649 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3650 addr, const1_rtx));
3651 }
3652 else
3653 {
3654 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3655 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3656 addr, const1_rtx));
3657 }
3658 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3659 insn = get_insns ();
3660 end_sequence ();
3661 emit_libcall_block (insn, ret, o0, addr);
3662 break;
3663
3664 case TLS_MODEL_LOCAL_DYNAMIC:
3665 start_sequence ();
3666 temp1 = gen_reg_rtx (SImode);
3667 temp2 = gen_reg_rtx (SImode);
3668 temp3 = gen_reg_rtx (Pmode);
3669 ret = gen_reg_rtx (Pmode);
3670 o0 = gen_rtx_REG (Pmode, 8);
3671 got = sparc_tls_got ();
3672 emit_insn (gen_tldm_hi22 (temp1));
3673 emit_insn (gen_tldm_lo10 (temp2, temp1));
3674 if (TARGET_ARCH32)
3675 {
3676 emit_insn (gen_tldm_add32 (o0, got, temp2));
3677 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3678 const1_rtx));
3679 }
3680 else
3681 {
3682 emit_insn (gen_tldm_add64 (o0, got, temp2));
3683 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3684 const1_rtx));
3685 }
3686 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3687 insn = get_insns ();
3688 end_sequence ();
3689 emit_libcall_block (insn, temp3, o0,
3690 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3691 UNSPEC_TLSLD_BASE));
3692 temp1 = gen_reg_rtx (SImode);
3693 temp2 = gen_reg_rtx (SImode);
3694 emit_insn (gen_tldo_hix22 (temp1, addr));
3695 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3696 if (TARGET_ARCH32)
3697 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3698 else
3699 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3700 break;
3701
3702 case TLS_MODEL_INITIAL_EXEC:
3703 temp1 = gen_reg_rtx (SImode);
3704 temp2 = gen_reg_rtx (SImode);
3705 temp3 = gen_reg_rtx (Pmode);
3706 got = sparc_tls_got ();
3707 emit_insn (gen_tie_hi22 (temp1, addr));
3708 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3709 if (TARGET_ARCH32)
3710 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3711 else
3712 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3713 if (TARGET_SUN_TLS)
3714 {
3715 ret = gen_reg_rtx (Pmode);
3716 if (TARGET_ARCH32)
3717 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3718 temp3, addr));
3719 else
3720 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3721 temp3, addr));
3722 }
3723 else
3724 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3725 break;
3726
3727 case TLS_MODEL_LOCAL_EXEC:
3728 temp1 = gen_reg_rtx (Pmode);
3729 temp2 = gen_reg_rtx (Pmode);
3730 if (TARGET_ARCH32)
3731 {
3732 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3733 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3734 }
3735 else
3736 {
3737 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3738 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3739 }
3740 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3741 break;
3742
3743 default:
3744 gcc_unreachable ();
3745 }
3746
3747 else if (GET_CODE (addr) == CONST)
3748 {
3749 rtx base, offset;
3750
3751 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3752
3753 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3754 offset = XEXP (XEXP (addr, 0), 1);
3755
3756 base = force_operand (base, NULL_RTX);
3757 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3758 offset = force_reg (Pmode, offset);
3759 ret = gen_rtx_PLUS (Pmode, base, offset);
3760 }
3761
3762 else
3763 gcc_unreachable (); /* for now ... */
3764
3765 return ret;
3766 }
3767
3768 /* Legitimize PIC addresses. If the address is already position-independent,
3769 we return ORIG. Newly generated position-independent addresses go into a
3770 reg. This is REG if nonzero, otherwise we allocate register(s) as
3771 necessary. */
3772
3773 static rtx
3774 sparc_legitimize_pic_address (rtx orig, rtx reg)
3775 {
3776 bool gotdata_op = false;
3777
3778 if (GET_CODE (orig) == SYMBOL_REF
3779 /* See the comment in sparc_expand_move. */
3780 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3781 {
3782 rtx pic_ref, address;
3783 rtx insn;
3784
3785 if (reg == 0)
3786 {
3787 gcc_assert (can_create_pseudo_p ());
3788 reg = gen_reg_rtx (Pmode);
3789 }
3790
3791 if (flag_pic == 2)
3792 {
3793 /* If not during reload, allocate another temp reg here for loading
3794 in the address, so that these instructions can be optimized
3795 properly. */
3796 rtx temp_reg = (! can_create_pseudo_p ()
3797 ? reg : gen_reg_rtx (Pmode));
3798
3799 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3800 won't get confused into thinking that these two instructions
3801 are loading in the true address of the symbol. If in the
3802 future a PIC rtx exists, that should be used instead. */
3803 if (TARGET_ARCH64)
3804 {
3805 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3806 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3807 }
3808 else
3809 {
3810 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3811 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3812 }
3813 address = temp_reg;
3814 gotdata_op = true;
3815 }
3816 else
3817 address = orig;
3818
3819 crtl->uses_pic_offset_table = 1;
3820 if (gotdata_op)
3821 {
3822 if (TARGET_ARCH64)
3823 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3824 pic_offset_table_rtx,
3825 address, orig));
3826 else
3827 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3828 pic_offset_table_rtx,
3829 address, orig));
3830 }
3831 else
3832 {
3833 pic_ref
3834 = gen_const_mem (Pmode,
3835 gen_rtx_PLUS (Pmode,
3836 pic_offset_table_rtx, address));
3837 insn = emit_move_insn (reg, pic_ref);
3838 }
3839
3840 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3841 by loop. */
3842 set_unique_reg_note (insn, REG_EQUAL, orig);
3843 return reg;
3844 }
3845 else if (GET_CODE (orig) == CONST)
3846 {
3847 rtx base, offset;
3848
3849 if (GET_CODE (XEXP (orig, 0)) == PLUS
3850 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3851 return orig;
3852
3853 if (reg == 0)
3854 {
3855 gcc_assert (can_create_pseudo_p ());
3856 reg = gen_reg_rtx (Pmode);
3857 }
3858
3859 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3860 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3861 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3862 base == reg ? NULL_RTX : reg);
3863
3864 if (GET_CODE (offset) == CONST_INT)
3865 {
3866 if (SMALL_INT (offset))
3867 return plus_constant (Pmode, base, INTVAL (offset));
3868 else if (can_create_pseudo_p ())
3869 offset = force_reg (Pmode, offset);
3870 else
3871 /* If we reach here, then something is seriously wrong. */
3872 gcc_unreachable ();
3873 }
3874 return gen_rtx_PLUS (Pmode, base, offset);
3875 }
3876 else if (GET_CODE (orig) == LABEL_REF)
3877 /* ??? We ought to be checking that the register is live instead, in case
3878 it is eliminated. */
3879 crtl->uses_pic_offset_table = 1;
3880
3881 return orig;
3882 }
3883
3884 /* Try machine-dependent ways of modifying an illegitimate address X
3885 to be legitimate. If we find one, return the new, valid address.
3886
3887 OLDX is the address as it was before break_out_memory_refs was called.
3888 In some cases it is useful to look at this to decide what needs to be done.
3889
3890 MODE is the mode of the operand pointed to by X.
3891
3892 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3893
3894 static rtx
3895 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3896 enum machine_mode mode)
3897 {
3898 rtx orig_x = x;
3899
3900 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3901 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3902 force_operand (XEXP (x, 0), NULL_RTX));
3903 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3904 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3905 force_operand (XEXP (x, 1), NULL_RTX));
3906 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3907 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3908 XEXP (x, 1));
3909 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3910 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3911 force_operand (XEXP (x, 1), NULL_RTX));
3912
3913 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3914 return x;
3915
3916 if (sparc_tls_referenced_p (x))
3917 x = sparc_legitimize_tls_address (x);
3918 else if (flag_pic)
3919 x = sparc_legitimize_pic_address (x, NULL_RTX);
3920 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3921 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3922 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3923 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3924 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3925 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3926 else if (GET_CODE (x) == SYMBOL_REF
3927 || GET_CODE (x) == CONST
3928 || GET_CODE (x) == LABEL_REF)
3929 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3930
3931 return x;
3932 }
3933
3934 /* Delegitimize an address that was legitimized by the above function. */
3935
3936 static rtx
3937 sparc_delegitimize_address (rtx x)
3938 {
3939 x = delegitimize_mem_from_attrs (x);
3940
3941 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3942 switch (XINT (XEXP (x, 1), 1))
3943 {
3944 case UNSPEC_MOVE_PIC:
3945 case UNSPEC_TLSLE:
3946 x = XVECEXP (XEXP (x, 1), 0, 0);
3947 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3948 break;
3949 default:
3950 break;
3951 }
3952
3953 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3954 if (GET_CODE (x) == MINUS
3955 && REG_P (XEXP (x, 0))
3956 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3957 && GET_CODE (XEXP (x, 1)) == LO_SUM
3958 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3959 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3960 {
3961 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3962 gcc_assert (GET_CODE (x) == LABEL_REF);
3963 }
3964
3965 return x;
3966 }
3967
3968 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3969 replace the input X, or the original X if no replacement is called for.
3970 The output parameter *WIN is 1 if the calling macro should goto WIN,
3971 0 if it should not.
3972
3973 For SPARC, we wish to handle addresses by splitting them into
3974 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3975 This cuts the number of extra insns by one.
3976
3977 Do nothing when generating PIC code and the address is a symbolic
3978 operand or requires a scratch register. */
3979
3980 rtx
3981 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3982 int opnum, int type,
3983 int ind_levels ATTRIBUTE_UNUSED, int *win)
3984 {
3985 /* Decompose SImode constants into HIGH+LO_SUM. */
3986 if (CONSTANT_P (x)
3987 && (mode != TFmode || TARGET_ARCH64)
3988 && GET_MODE (x) == SImode
3989 && GET_CODE (x) != LO_SUM
3990 && GET_CODE (x) != HIGH
3991 && sparc_cmodel <= CM_MEDLOW
3992 && !(flag_pic
3993 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3994 {
3995 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3996 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3997 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3998 opnum, (enum reload_type)type);
3999 *win = 1;
4000 return x;
4001 }
4002
4003 /* We have to recognize what we have already generated above. */
4004 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
4005 {
4006 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4007 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4008 opnum, (enum reload_type)type);
4009 *win = 1;
4010 return x;
4011 }
4012
4013 *win = 0;
4014 return x;
4015 }
4016
4017 /* Return true if ADDR (a legitimate address expression)
4018 has an effect that depends on the machine mode it is used for.
4019
4020 In PIC mode,
4021
4022 (mem:HI [%l7+a])
4023
4024 is not equivalent to
4025
4026 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
4027
4028 because [%l7+a+1] is interpreted as the address of (a+1). */
4029
4030
4031 static bool
4032 sparc_mode_dependent_address_p (const_rtx addr)
4033 {
4034 if (flag_pic && GET_CODE (addr) == PLUS)
4035 {
4036 rtx op0 = XEXP (addr, 0);
4037 rtx op1 = XEXP (addr, 1);
4038 if (op0 == pic_offset_table_rtx
4039 && symbolic_operand (op1, VOIDmode))
4040 return true;
4041 }
4042
4043 return false;
4044 }
4045
4046 #ifdef HAVE_GAS_HIDDEN
4047 # define USE_HIDDEN_LINKONCE 1
4048 #else
4049 # define USE_HIDDEN_LINKONCE 0
4050 #endif
4051
4052 static void
4053 get_pc_thunk_name (char name[32], unsigned int regno)
4054 {
4055 const char *reg_name = reg_names[regno];
4056
4057 /* Skip the leading '%' as that cannot be used in a
4058 symbol name. */
4059 reg_name += 1;
4060
4061 if (USE_HIDDEN_LINKONCE)
4062 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4063 else
4064 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4065 }
4066
4067 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4068
4069 static rtx
4070 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
4071 {
4072 int orig_flag_pic = flag_pic;
4073 rtx insn;
4074
4075 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4076 flag_pic = 0;
4077 if (TARGET_ARCH64)
4078 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
4079 else
4080 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
4081 flag_pic = orig_flag_pic;
4082
4083 return insn;
4084 }
4085
4086 /* Emit code to load the GOT register. */
4087
4088 void
4089 load_got_register (void)
4090 {
4091 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4092 if (!global_offset_table_rtx)
4093 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4094
4095 if (TARGET_VXWORKS_RTP)
4096 emit_insn (gen_vxworks_load_got ());
4097 else
4098 {
4099 /* The GOT symbol is subject to a PC-relative relocation so we need a
4100 helper function to add the PC value and thus get the final value. */
4101 if (!got_helper_rtx)
4102 {
4103 char name[32];
4104 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4105 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4106 }
4107
4108 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
4109 got_helper_rtx,
4110 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
4111 }
4112
4113 /* Need to emit this whether or not we obey regdecls,
4114 since setjmp/longjmp can cause life info to screw up.
4115 ??? In the case where we don't obey regdecls, this is not sufficient
4116 since we may not fall out the bottom. */
4117 emit_use (global_offset_table_rtx);
4118 }
4119
4120 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4121 address of the call target. */
4122
4123 void
4124 sparc_emit_call_insn (rtx pat, rtx addr)
4125 {
4126 rtx insn;
4127
4128 insn = emit_call_insn (pat);
4129
4130 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4131 if (TARGET_VXWORKS_RTP
4132 && flag_pic
4133 && GET_CODE (addr) == SYMBOL_REF
4134 && (SYMBOL_REF_DECL (addr)
4135 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
4136 : !SYMBOL_REF_LOCAL_P (addr)))
4137 {
4138 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4139 crtl->uses_pic_offset_table = 1;
4140 }
4141 }
4142 \f
4143 /* Return 1 if RTX is a MEM which is known to be aligned to at
4144 least a DESIRED byte boundary. */
4145
4146 int
4147 mem_min_alignment (rtx mem, int desired)
4148 {
4149 rtx addr, base, offset;
4150
4151 /* If it's not a MEM we can't accept it. */
4152 if (GET_CODE (mem) != MEM)
4153 return 0;
4154
4155 /* Obviously... */
4156 if (!TARGET_UNALIGNED_DOUBLES
4157 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
4158 return 1;
4159
4160 /* ??? The rest of the function predates MEM_ALIGN so
4161 there is probably a bit of redundancy. */
4162 addr = XEXP (mem, 0);
4163 base = offset = NULL_RTX;
4164 if (GET_CODE (addr) == PLUS)
4165 {
4166 if (GET_CODE (XEXP (addr, 0)) == REG)
4167 {
4168 base = XEXP (addr, 0);
4169
4170 /* What we are saying here is that if the base
4171 REG is aligned properly, the compiler will make
4172 sure any REG based index upon it will be so
4173 as well. */
4174 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4175 offset = XEXP (addr, 1);
4176 else
4177 offset = const0_rtx;
4178 }
4179 }
4180 else if (GET_CODE (addr) == REG)
4181 {
4182 base = addr;
4183 offset = const0_rtx;
4184 }
4185
4186 if (base != NULL_RTX)
4187 {
4188 int regno = REGNO (base);
4189
4190 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4191 {
4192 /* Check if the compiler has recorded some information
4193 about the alignment of the base REG. If reload has
4194 completed, we already matched with proper alignments.
4195 If not running global_alloc, reload might give us
4196 unaligned pointer to local stack though. */
4197 if (((cfun != 0
4198 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4199 || (optimize && reload_completed))
4200 && (INTVAL (offset) & (desired - 1)) == 0)
4201 return 1;
4202 }
4203 else
4204 {
4205 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4206 return 1;
4207 }
4208 }
4209 else if (! TARGET_UNALIGNED_DOUBLES
4210 || CONSTANT_P (addr)
4211 || GET_CODE (addr) == LO_SUM)
4212 {
4213 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4214 is true, in which case we can only assume that an access is aligned if
4215 it is to a constant address, or the address involves a LO_SUM. */
4216 return 1;
4217 }
4218
4219 /* An obviously unaligned address. */
4220 return 0;
4221 }
4222
4223 \f
4224 /* Vectors to keep interesting information about registers where it can easily
4225 be got. We used to use the actual mode value as the bit number, but there
4226 are more than 32 modes now. Instead we use two tables: one indexed by
4227 hard register number, and one indexed by mode. */
4228
4229 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4230 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4231 mapped into one sparc_mode_class mode. */
4232
4233 enum sparc_mode_class {
4234 S_MODE, D_MODE, T_MODE, O_MODE,
4235 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4236 CC_MODE, CCFP_MODE
4237 };
4238
4239 /* Modes for single-word and smaller quantities. */
4240 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4241
4242 /* Modes for double-word and smaller quantities. */
4243 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4244
4245 /* Modes for quad-word and smaller quantities. */
4246 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4247
4248 /* Modes for 8-word and smaller quantities. */
4249 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4250
4251 /* Modes for single-float quantities. We must allow any single word or
4252 smaller quantity. This is because the fix/float conversion instructions
4253 take integer inputs/outputs from the float registers. */
4254 #define SF_MODES (S_MODES)
4255
4256 /* Modes for double-float and smaller quantities. */
4257 #define DF_MODES (D_MODES)
4258
4259 /* Modes for quad-float and smaller quantities. */
4260 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4261
4262 /* Modes for quad-float pairs and smaller quantities. */
4263 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4264
4265 /* Modes for double-float only quantities. */
4266 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4267
4268 /* Modes for quad-float and double-float only quantities. */
4269 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4270
4271 /* Modes for quad-float pairs and double-float only quantities. */
4272 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4273
4274 /* Modes for condition codes. */
4275 #define CC_MODES (1 << (int) CC_MODE)
4276 #define CCFP_MODES (1 << (int) CCFP_MODE)
4277
4278 /* Value is 1 if register/mode pair is acceptable on sparc.
4279 The funny mixture of D and T modes is because integer operations
4280 do not specially operate on tetra quantities, so non-quad-aligned
4281 registers can hold quadword quantities (except %o4 and %i4 because
4282 they cross fixed registers). */
4283
4284 /* This points to either the 32 bit or the 64 bit version. */
4285 const int *hard_regno_mode_classes;
4286
4287 static const int hard_32bit_mode_classes[] = {
4288 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4289 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4290 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4291 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4292
4293 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4294 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4295 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4296 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4297
4298 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4299 and none can hold SFmode/SImode values. */
4300 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4301 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4302 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4303 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4304
4305 /* %fcc[0123] */
4306 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4307
4308 /* %icc, %sfp, %gsr */
4309 CC_MODES, 0, D_MODES
4310 };
4311
4312 static const int hard_64bit_mode_classes[] = {
4313 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4314 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4315 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4316 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4317
4318 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4319 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4320 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4321 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4322
4323 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4324 and none can hold SFmode/SImode values. */
4325 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4326 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4327 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4328 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4329
4330 /* %fcc[0123] */
4331 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4332
4333 /* %icc, %sfp, %gsr */
4334 CC_MODES, 0, D_MODES
4335 };
4336
4337 int sparc_mode_class [NUM_MACHINE_MODES];
4338
4339 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4340
4341 static void
4342 sparc_init_modes (void)
4343 {
4344 int i;
4345
4346 for (i = 0; i < NUM_MACHINE_MODES; i++)
4347 {
4348 switch (GET_MODE_CLASS (i))
4349 {
4350 case MODE_INT:
4351 case MODE_PARTIAL_INT:
4352 case MODE_COMPLEX_INT:
4353 if (GET_MODE_SIZE (i) <= 4)
4354 sparc_mode_class[i] = 1 << (int) S_MODE;
4355 else if (GET_MODE_SIZE (i) == 8)
4356 sparc_mode_class[i] = 1 << (int) D_MODE;
4357 else if (GET_MODE_SIZE (i) == 16)
4358 sparc_mode_class[i] = 1 << (int) T_MODE;
4359 else if (GET_MODE_SIZE (i) == 32)
4360 sparc_mode_class[i] = 1 << (int) O_MODE;
4361 else
4362 sparc_mode_class[i] = 0;
4363 break;
4364 case MODE_VECTOR_INT:
4365 if (GET_MODE_SIZE (i) <= 4)
4366 sparc_mode_class[i] = 1 << (int)SF_MODE;
4367 else if (GET_MODE_SIZE (i) == 8)
4368 sparc_mode_class[i] = 1 << (int)DF_MODE;
4369 break;
4370 case MODE_FLOAT:
4371 case MODE_COMPLEX_FLOAT:
4372 if (GET_MODE_SIZE (i) <= 4)
4373 sparc_mode_class[i] = 1 << (int) SF_MODE;
4374 else if (GET_MODE_SIZE (i) == 8)
4375 sparc_mode_class[i] = 1 << (int) DF_MODE;
4376 else if (GET_MODE_SIZE (i) == 16)
4377 sparc_mode_class[i] = 1 << (int) TF_MODE;
4378 else if (GET_MODE_SIZE (i) == 32)
4379 sparc_mode_class[i] = 1 << (int) OF_MODE;
4380 else
4381 sparc_mode_class[i] = 0;
4382 break;
4383 case MODE_CC:
4384 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4385 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4386 else
4387 sparc_mode_class[i] = 1 << (int) CC_MODE;
4388 break;
4389 default:
4390 sparc_mode_class[i] = 0;
4391 break;
4392 }
4393 }
4394
4395 if (TARGET_ARCH64)
4396 hard_regno_mode_classes = hard_64bit_mode_classes;
4397 else
4398 hard_regno_mode_classes = hard_32bit_mode_classes;
4399
4400 /* Initialize the array used by REGNO_REG_CLASS. */
4401 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4402 {
4403 if (i < 16 && TARGET_V8PLUS)
4404 sparc_regno_reg_class[i] = I64_REGS;
4405 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4406 sparc_regno_reg_class[i] = GENERAL_REGS;
4407 else if (i < 64)
4408 sparc_regno_reg_class[i] = FP_REGS;
4409 else if (i < 96)
4410 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4411 else if (i < 100)
4412 sparc_regno_reg_class[i] = FPCC_REGS;
4413 else
4414 sparc_regno_reg_class[i] = NO_REGS;
4415 }
4416 }
4417 \f
4418 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4419
4420 static inline bool
4421 save_global_or_fp_reg_p (unsigned int regno,
4422 int leaf_function ATTRIBUTE_UNUSED)
4423 {
4424 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4425 }
4426
4427 /* Return whether the return address register (%i7) is needed. */
4428
4429 static inline bool
4430 return_addr_reg_needed_p (int leaf_function)
4431 {
4432 /* If it is live, for example because of __builtin_return_address (0). */
4433 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4434 return true;
4435
4436 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4437 if (!leaf_function
4438 /* Loading the GOT register clobbers %o7. */
4439 || crtl->uses_pic_offset_table
4440 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4441 return true;
4442
4443 return false;
4444 }
4445
4446 /* Return whether REGNO, a local or in register, must be saved/restored. */
4447
4448 static bool
4449 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4450 {
4451 /* General case: call-saved registers live at some point. */
4452 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4453 return true;
4454
4455 /* Frame pointer register (%fp) if needed. */
4456 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4457 return true;
4458
4459 /* Return address register (%i7) if needed. */
4460 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4461 return true;
4462
4463 /* GOT register (%l7) if needed. */
4464 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4465 return true;
4466
4467 /* If the function accesses prior frames, the frame pointer and the return
4468 address of the previous frame must be saved on the stack. */
4469 if (crtl->accesses_prior_frames
4470 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4471 return true;
4472
4473 return false;
4474 }
4475
4476 /* Compute the frame size required by the function. This function is called
4477 during the reload pass and also by sparc_expand_prologue. */
4478
4479 HOST_WIDE_INT
4480 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4481 {
4482 HOST_WIDE_INT frame_size, apparent_frame_size;
4483 int args_size, n_global_fp_regs = 0;
4484 bool save_local_in_regs_p = false;
4485 unsigned int i;
4486
4487 /* If the function allocates dynamic stack space, the dynamic offset is
4488 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4489 if (leaf_function && !cfun->calls_alloca)
4490 args_size = 0;
4491 else
4492 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4493
4494 /* Calculate space needed for global registers. */
4495 if (TARGET_ARCH64)
4496 for (i = 0; i < 8; i++)
4497 if (save_global_or_fp_reg_p (i, 0))
4498 n_global_fp_regs += 2;
4499 else
4500 for (i = 0; i < 8; i += 2)
4501 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4502 n_global_fp_regs += 2;
4503
4504 /* In the flat window model, find out which local and in registers need to
4505 be saved. We don't reserve space in the current frame for them as they
4506 will be spilled into the register window save area of the caller's frame.
4507 However, as soon as we use this register window save area, we must create
4508 that of the current frame to make it the live one. */
4509 if (TARGET_FLAT)
4510 for (i = 16; i < 32; i++)
4511 if (save_local_or_in_reg_p (i, leaf_function))
4512 {
4513 save_local_in_regs_p = true;
4514 break;
4515 }
4516
4517 /* Calculate space needed for FP registers. */
4518 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4519 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4520 n_global_fp_regs += 2;
4521
4522 if (size == 0
4523 && n_global_fp_regs == 0
4524 && args_size == 0
4525 && !save_local_in_regs_p)
4526 frame_size = apparent_frame_size = 0;
4527 else
4528 {
4529 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4530 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4531 apparent_frame_size += n_global_fp_regs * 4;
4532
4533 /* We need to add the size of the outgoing argument area. */
4534 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4535
4536 /* And that of the register window save area. */
4537 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4538
4539 /* Finally, bump to the appropriate alignment. */
4540 frame_size = SPARC_STACK_ALIGN (frame_size);
4541 }
4542
4543 /* Set up values for use in prologue and epilogue. */
4544 sparc_frame_size = frame_size;
4545 sparc_apparent_frame_size = apparent_frame_size;
4546 sparc_n_global_fp_regs = n_global_fp_regs;
4547 sparc_save_local_in_regs_p = save_local_in_regs_p;
4548
4549 return frame_size;
4550 }
4551
4552 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
4553
4554 int
4555 sparc_initial_elimination_offset (int to)
4556 {
4557 int offset;
4558
4559 if (to == STACK_POINTER_REGNUM)
4560 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
4561 else
4562 offset = 0;
4563
4564 offset += SPARC_STACK_BIAS;
4565 return offset;
4566 }
4567
4568 /* Output any necessary .register pseudo-ops. */
4569
4570 void
4571 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4572 {
4573 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4574 int i;
4575
4576 if (TARGET_ARCH32)
4577 return;
4578
4579 /* Check if %g[2367] were used without
4580 .register being printed for them already. */
4581 for (i = 2; i < 8; i++)
4582 {
4583 if (df_regs_ever_live_p (i)
4584 && ! sparc_hard_reg_printed [i])
4585 {
4586 sparc_hard_reg_printed [i] = 1;
4587 /* %g7 is used as TLS base register, use #ignore
4588 for it instead of #scratch. */
4589 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4590 i == 7 ? "ignore" : "scratch");
4591 }
4592 if (i == 3) i = 5;
4593 }
4594 #endif
4595 }
4596
4597 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4598
4599 #if PROBE_INTERVAL > 4096
4600 #error Cannot use indexed addressing mode for stack probing
4601 #endif
4602
4603 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4604 inclusive. These are offsets from the current stack pointer.
4605
4606 Note that we don't use the REG+REG addressing mode for the probes because
4607 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4608 so the advantages of having a single code win here. */
4609
4610 static void
4611 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4612 {
4613 rtx g1 = gen_rtx_REG (Pmode, 1);
4614
4615 /* See if we have a constant small number of probes to generate. If so,
4616 that's the easy case. */
4617 if (size <= PROBE_INTERVAL)
4618 {
4619 emit_move_insn (g1, GEN_INT (first));
4620 emit_insn (gen_rtx_SET (VOIDmode, g1,
4621 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4622 emit_stack_probe (plus_constant (Pmode, g1, -size));
4623 }
4624
4625 /* The run-time loop is made up of 10 insns in the generic case while the
4626 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4627 else if (size <= 5 * PROBE_INTERVAL)
4628 {
4629 HOST_WIDE_INT i;
4630
4631 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4632 emit_insn (gen_rtx_SET (VOIDmode, g1,
4633 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4634 emit_stack_probe (g1);
4635
4636 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4637 it exceeds SIZE. If only two probes are needed, this will not
4638 generate any code. Then probe at FIRST + SIZE. */
4639 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4640 {
4641 emit_insn (gen_rtx_SET (VOIDmode, g1,
4642 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
4643 emit_stack_probe (g1);
4644 }
4645
4646 emit_stack_probe (plus_constant (Pmode, g1,
4647 (i - PROBE_INTERVAL) - size));
4648 }
4649
4650 /* Otherwise, do the same as above, but in a loop. Note that we must be
4651 extra careful with variables wrapping around because we might be at
4652 the very top (or the very bottom) of the address space and we have
4653 to be able to handle this case properly; in particular, we use an
4654 equality test for the loop condition. */
4655 else
4656 {
4657 HOST_WIDE_INT rounded_size;
4658 rtx g4 = gen_rtx_REG (Pmode, 4);
4659
4660 emit_move_insn (g1, GEN_INT (first));
4661
4662
4663 /* Step 1: round SIZE to the previous multiple of the interval. */
4664
4665 rounded_size = size & -PROBE_INTERVAL;
4666 emit_move_insn (g4, GEN_INT (rounded_size));
4667
4668
4669 /* Step 2: compute initial and final value of the loop counter. */
4670
4671 /* TEST_ADDR = SP + FIRST. */
4672 emit_insn (gen_rtx_SET (VOIDmode, g1,
4673 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4674
4675 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4676 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4677
4678
4679 /* Step 3: the loop
4680
4681 while (TEST_ADDR != LAST_ADDR)
4682 {
4683 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4684 probe at TEST_ADDR
4685 }
4686
4687 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4688 until it is equal to ROUNDED_SIZE. */
4689
4690 if (TARGET_64BIT)
4691 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4692 else
4693 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4694
4695
4696 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4697 that SIZE is equal to ROUNDED_SIZE. */
4698
4699 if (size != rounded_size)
4700 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
4701 }
4702
4703 /* Make sure nothing is scheduled before we are done. */
4704 emit_insn (gen_blockage ());
4705 }
4706
4707 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4708 absolute addresses. */
4709
4710 const char *
4711 output_probe_stack_range (rtx reg1, rtx reg2)
4712 {
4713 static int labelno = 0;
4714 char loop_lab[32], end_lab[32];
4715 rtx xops[2];
4716
4717 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4718 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4719
4720 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4721
4722 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4723 xops[0] = reg1;
4724 xops[1] = reg2;
4725 output_asm_insn ("cmp\t%0, %1", xops);
4726 if (TARGET_ARCH64)
4727 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4728 else
4729 fputs ("\tbe\t", asm_out_file);
4730 assemble_name_raw (asm_out_file, end_lab);
4731 fputc ('\n', asm_out_file);
4732
4733 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4734 xops[1] = GEN_INT (-PROBE_INTERVAL);
4735 output_asm_insn (" add\t%0, %1, %0", xops);
4736
4737 /* Probe at TEST_ADDR and branch. */
4738 if (TARGET_ARCH64)
4739 fputs ("\tba,pt\t%xcc,", asm_out_file);
4740 else
4741 fputs ("\tba\t", asm_out_file);
4742 assemble_name_raw (asm_out_file, loop_lab);
4743 fputc ('\n', asm_out_file);
4744 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4745 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4746
4747 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4748
4749 return "";
4750 }
4751
4752 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4753 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4754 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4755 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4756 the action to be performed if it returns false. Return the new offset. */
4757
4758 typedef bool (*sorr_pred_t) (unsigned int, int);
4759 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4760
4761 static int
4762 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4763 int offset, int leaf_function, sorr_pred_t save_p,
4764 sorr_act_t action_true, sorr_act_t action_false)
4765 {
4766 unsigned int i;
4767 rtx mem, insn;
4768
4769 if (TARGET_ARCH64 && high <= 32)
4770 {
4771 int fp_offset = -1;
4772
4773 for (i = low; i < high; i++)
4774 {
4775 if (save_p (i, leaf_function))
4776 {
4777 mem = gen_frame_mem (DImode, plus_constant (Pmode,
4778 base, offset));
4779 if (action_true == SORR_SAVE)
4780 {
4781 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4782 RTX_FRAME_RELATED_P (insn) = 1;
4783 }
4784 else /* action_true == SORR_RESTORE */
4785 {
4786 /* The frame pointer must be restored last since its old
4787 value may be used as base address for the frame. This
4788 is problematic in 64-bit mode only because of the lack
4789 of double-word load instruction. */
4790 if (i == HARD_FRAME_POINTER_REGNUM)
4791 fp_offset = offset;
4792 else
4793 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4794 }
4795 offset += 8;
4796 }
4797 else if (action_false == SORR_ADVANCE)
4798 offset += 8;
4799 }
4800
4801 if (fp_offset >= 0)
4802 {
4803 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
4804 emit_move_insn (hard_frame_pointer_rtx, mem);
4805 }
4806 }
4807 else
4808 {
4809 for (i = low; i < high; i += 2)
4810 {
4811 bool reg0 = save_p (i, leaf_function);
4812 bool reg1 = save_p (i + 1, leaf_function);
4813 enum machine_mode mode;
4814 int regno;
4815
4816 if (reg0 && reg1)
4817 {
4818 mode = SPARC_INT_REG_P (i) ? DImode : DFmode;
4819 regno = i;
4820 }
4821 else if (reg0)
4822 {
4823 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4824 regno = i;
4825 }
4826 else if (reg1)
4827 {
4828 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4829 regno = i + 1;
4830 offset += 4;
4831 }
4832 else
4833 {
4834 if (action_false == SORR_ADVANCE)
4835 offset += 8;
4836 continue;
4837 }
4838
4839 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
4840 if (action_true == SORR_SAVE)
4841 {
4842 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4843 RTX_FRAME_RELATED_P (insn) = 1;
4844 if (mode == DImode)
4845 {
4846 rtx set1, set2;
4847 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
4848 offset));
4849 set1 = gen_rtx_SET (VOIDmode, mem,
4850 gen_rtx_REG (SImode, regno));
4851 RTX_FRAME_RELATED_P (set1) = 1;
4852 mem
4853 = gen_frame_mem (SImode, plus_constant (Pmode, base,
4854 offset + 4));
4855 set2 = gen_rtx_SET (VOIDmode, mem,
4856 gen_rtx_REG (SImode, regno + 1));
4857 RTX_FRAME_RELATED_P (set2) = 1;
4858 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4859 gen_rtx_PARALLEL (VOIDmode,
4860 gen_rtvec (2, set1, set2)));
4861 }
4862 }
4863 else /* action_true == SORR_RESTORE */
4864 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4865
4866 /* Always preserve double-word alignment. */
4867 offset = (offset + 8) & -8;
4868 }
4869 }
4870
4871 return offset;
4872 }
4873
4874 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4875
4876 static rtx
4877 emit_adjust_base_to_offset (rtx base, int offset)
4878 {
4879 /* ??? This might be optimized a little as %g1 might already have a
4880 value close enough that a single add insn will do. */
4881 /* ??? Although, all of this is probably only a temporary fix because
4882 if %g1 can hold a function result, then sparc_expand_epilogue will
4883 lose (the result will be clobbered). */
4884 rtx new_base = gen_rtx_REG (Pmode, 1);
4885 emit_move_insn (new_base, GEN_INT (offset));
4886 emit_insn (gen_rtx_SET (VOIDmode,
4887 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4888 return new_base;
4889 }
4890
4891 /* Emit code to save/restore call-saved global and FP registers. */
4892
4893 static void
4894 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4895 {
4896 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4897 {
4898 base = emit_adjust_base_to_offset (base, offset);
4899 offset = 0;
4900 }
4901
4902 offset
4903 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4904 save_global_or_fp_reg_p, action, SORR_NONE);
4905 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4906 save_global_or_fp_reg_p, action, SORR_NONE);
4907 }
4908
4909 /* Emit code to save/restore call-saved local and in registers. */
4910
4911 static void
4912 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4913 {
4914 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4915 {
4916 base = emit_adjust_base_to_offset (base, offset);
4917 offset = 0;
4918 }
4919
4920 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4921 save_local_or_in_reg_p, action, SORR_ADVANCE);
4922 }
4923
4924 /* Emit a window_save insn. */
4925
4926 static rtx
4927 emit_window_save (rtx increment)
4928 {
4929 rtx insn = emit_insn (gen_window_save (increment));
4930 RTX_FRAME_RELATED_P (insn) = 1;
4931
4932 /* The incoming return address (%o7) is saved in %i7. */
4933 add_reg_note (insn, REG_CFA_REGISTER,
4934 gen_rtx_SET (VOIDmode,
4935 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4936 gen_rtx_REG (Pmode,
4937 INCOMING_RETURN_ADDR_REGNUM)));
4938
4939 /* The window save event. */
4940 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4941
4942 /* The CFA is %fp, the hard frame pointer. */
4943 add_reg_note (insn, REG_CFA_DEF_CFA,
4944 plus_constant (Pmode, hard_frame_pointer_rtx,
4945 INCOMING_FRAME_SP_OFFSET));
4946
4947 return insn;
4948 }
4949
4950 /* Generate an increment for the stack pointer. */
4951
4952 static rtx
4953 gen_stack_pointer_inc (rtx increment)
4954 {
4955 return gen_rtx_SET (VOIDmode,
4956 stack_pointer_rtx,
4957 gen_rtx_PLUS (Pmode,
4958 stack_pointer_rtx,
4959 increment));
4960 }
4961
4962 /* Generate a decrement for the stack pointer. */
4963
4964 static rtx
4965 gen_stack_pointer_dec (rtx decrement)
4966 {
4967 return gen_rtx_SET (VOIDmode,
4968 stack_pointer_rtx,
4969 gen_rtx_MINUS (Pmode,
4970 stack_pointer_rtx,
4971 decrement));
4972 }
4973
4974 /* Expand the function prologue. The prologue is responsible for reserving
4975 storage for the frame, saving the call-saved registers and loading the
4976 GOT register if needed. */
4977
4978 void
4979 sparc_expand_prologue (void)
4980 {
4981 HOST_WIDE_INT size;
4982 rtx insn;
4983
4984 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
4985 on the final value of the flag means deferring the prologue/epilogue
4986 expansion until just before the second scheduling pass, which is too
4987 late to emit multiple epilogues or return insns.
4988
4989 Of course we are making the assumption that the value of the flag
4990 will not change between now and its final value. Of the three parts
4991 of the formula, only the last one can reasonably vary. Let's take a
4992 closer look, after assuming that the first two ones are set to true
4993 (otherwise the last value is effectively silenced).
4994
4995 If only_leaf_regs_used returns false, the global predicate will also
4996 be false so the actual frame size calculated below will be positive.
4997 As a consequence, the save_register_window insn will be emitted in
4998 the instruction stream; now this insn explicitly references %fp
4999 which is not a leaf register so only_leaf_regs_used will always
5000 return false subsequently.
5001
5002 If only_leaf_regs_used returns true, we hope that the subsequent
5003 optimization passes won't cause non-leaf registers to pop up. For
5004 example, the regrename pass has special provisions to not rename to
5005 non-leaf registers in a leaf function. */
5006 sparc_leaf_function_p
5007 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
5008
5009 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5010
5011 if (flag_stack_usage_info)
5012 current_function_static_stack_size = size;
5013
5014 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
5015 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5016
5017 if (size == 0)
5018 ; /* do nothing. */
5019 else if (sparc_leaf_function_p)
5020 {
5021 rtx size_int_rtx = GEN_INT (-size);
5022
5023 if (size <= 4096)
5024 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5025 else if (size <= 8192)
5026 {
5027 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5028 RTX_FRAME_RELATED_P (insn) = 1;
5029
5030 /* %sp is still the CFA register. */
5031 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5032 }
5033 else
5034 {
5035 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5036 emit_move_insn (size_rtx, size_int_rtx);
5037 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5038 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5039 gen_stack_pointer_inc (size_int_rtx));
5040 }
5041
5042 RTX_FRAME_RELATED_P (insn) = 1;
5043 }
5044 else
5045 {
5046 rtx size_int_rtx = GEN_INT (-size);
5047
5048 if (size <= 4096)
5049 emit_window_save (size_int_rtx);
5050 else if (size <= 8192)
5051 {
5052 emit_window_save (GEN_INT (-4096));
5053
5054 /* %sp is not the CFA register anymore. */
5055 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5056
5057 /* Make sure no %fp-based store is issued until after the frame is
5058 established. The offset between the frame pointer and the stack
5059 pointer is calculated relative to the value of the stack pointer
5060 at the end of the function prologue, and moving instructions that
5061 access the stack via the frame pointer between the instructions
5062 that decrement the stack pointer could result in accessing the
5063 register window save area, which is volatile. */
5064 emit_insn (gen_frame_blockage ());
5065 }
5066 else
5067 {
5068 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5069 emit_move_insn (size_rtx, size_int_rtx);
5070 emit_window_save (size_rtx);
5071 }
5072 }
5073
5074 if (sparc_leaf_function_p)
5075 {
5076 sparc_frame_base_reg = stack_pointer_rtx;
5077 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5078 }
5079 else
5080 {
5081 sparc_frame_base_reg = hard_frame_pointer_rtx;
5082 sparc_frame_base_offset = SPARC_STACK_BIAS;
5083 }
5084
5085 if (sparc_n_global_fp_regs > 0)
5086 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5087 sparc_frame_base_offset
5088 - sparc_apparent_frame_size,
5089 SORR_SAVE);
5090
5091 /* Load the GOT register if needed. */
5092 if (crtl->uses_pic_offset_table)
5093 load_got_register ();
5094
5095 /* Advertise that the data calculated just above are now valid. */
5096 sparc_prologue_data_valid_p = true;
5097 }
5098
5099 /* Expand the function prologue. The prologue is responsible for reserving
5100 storage for the frame, saving the call-saved registers and loading the
5101 GOT register if needed. */
5102
5103 void
5104 sparc_flat_expand_prologue (void)
5105 {
5106 HOST_WIDE_INT size;
5107 rtx insn;
5108
5109 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
5110
5111 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5112
5113 if (flag_stack_usage_info)
5114 current_function_static_stack_size = size;
5115
5116 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
5117 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5118
5119 if (sparc_save_local_in_regs_p)
5120 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
5121 SORR_SAVE);
5122
5123 if (size == 0)
5124 ; /* do nothing. */
5125 else
5126 {
5127 rtx size_int_rtx, size_rtx;
5128
5129 size_rtx = size_int_rtx = GEN_INT (-size);
5130
5131 /* We establish the frame (i.e. decrement the stack pointer) first, even
5132 if we use a frame pointer, because we cannot clobber any call-saved
5133 registers, including the frame pointer, if we haven't created a new
5134 register save area, for the sake of compatibility with the ABI. */
5135 if (size <= 4096)
5136 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5137 else if (size <= 8192 && !frame_pointer_needed)
5138 {
5139 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5140 RTX_FRAME_RELATED_P (insn) = 1;
5141 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5142 }
5143 else
5144 {
5145 size_rtx = gen_rtx_REG (Pmode, 1);
5146 emit_move_insn (size_rtx, size_int_rtx);
5147 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5148 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5149 gen_stack_pointer_inc (size_int_rtx));
5150 }
5151 RTX_FRAME_RELATED_P (insn) = 1;
5152
5153 /* Ensure nothing is scheduled until after the frame is established. */
5154 emit_insn (gen_blockage ());
5155
5156 if (frame_pointer_needed)
5157 {
5158 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5159 gen_rtx_MINUS (Pmode,
5160 stack_pointer_rtx,
5161 size_rtx)));
5162 RTX_FRAME_RELATED_P (insn) = 1;
5163
5164 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5165 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5166 plus_constant (Pmode, stack_pointer_rtx,
5167 size)));
5168 }
5169
5170 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5171 {
5172 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
5173 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
5174
5175 insn = emit_move_insn (i7, o7);
5176 RTX_FRAME_RELATED_P (insn) = 1;
5177
5178 add_reg_note (insn, REG_CFA_REGISTER,
5179 gen_rtx_SET (VOIDmode, i7, o7));
5180
5181 /* Prevent this instruction from ever being considered dead,
5182 even if this function has no epilogue. */
5183 emit_use (i7);
5184 }
5185 }
5186
5187 if (frame_pointer_needed)
5188 {
5189 sparc_frame_base_reg = hard_frame_pointer_rtx;
5190 sparc_frame_base_offset = SPARC_STACK_BIAS;
5191 }
5192 else
5193 {
5194 sparc_frame_base_reg = stack_pointer_rtx;
5195 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5196 }
5197
5198 if (sparc_n_global_fp_regs > 0)
5199 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5200 sparc_frame_base_offset
5201 - sparc_apparent_frame_size,
5202 SORR_SAVE);
5203
5204 /* Load the GOT register if needed. */
5205 if (crtl->uses_pic_offset_table)
5206 load_got_register ();
5207
5208 /* Advertise that the data calculated just above are now valid. */
5209 sparc_prologue_data_valid_p = true;
5210 }
5211
5212 /* This function generates the assembly code for function entry, which boils
5213 down to emitting the necessary .register directives. */
5214
5215 static void
5216 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5217 {
5218 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5219 if (!TARGET_FLAT)
5220 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
5221
5222 sparc_output_scratch_registers (file);
5223 }
5224
5225 /* Expand the function epilogue, either normal or part of a sibcall.
5226 We emit all the instructions except the return or the call. */
5227
5228 void
5229 sparc_expand_epilogue (bool for_eh)
5230 {
5231 HOST_WIDE_INT size = sparc_frame_size;
5232
5233 if (sparc_n_global_fp_regs > 0)
5234 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5235 sparc_frame_base_offset
5236 - sparc_apparent_frame_size,
5237 SORR_RESTORE);
5238
5239 if (size == 0 || for_eh)
5240 ; /* do nothing. */
5241 else if (sparc_leaf_function_p)
5242 {
5243 if (size <= 4096)
5244 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5245 else if (size <= 8192)
5246 {
5247 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5248 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5249 }
5250 else
5251 {
5252 rtx reg = gen_rtx_REG (Pmode, 1);
5253 emit_move_insn (reg, GEN_INT (-size));
5254 emit_insn (gen_stack_pointer_dec (reg));
5255 }
5256 }
5257 }
5258
5259 /* Expand the function epilogue, either normal or part of a sibcall.
5260 We emit all the instructions except the return or the call. */
5261
5262 void
5263 sparc_flat_expand_epilogue (bool for_eh)
5264 {
5265 HOST_WIDE_INT size = sparc_frame_size;
5266
5267 if (sparc_n_global_fp_regs > 0)
5268 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5269 sparc_frame_base_offset
5270 - sparc_apparent_frame_size,
5271 SORR_RESTORE);
5272
5273 /* If we have a frame pointer, we'll need both to restore it before the
5274 frame is destroyed and use its current value in destroying the frame.
5275 Since we don't have an atomic way to do that in the flat window model,
5276 we save the current value into a temporary register (%g1). */
5277 if (frame_pointer_needed && !for_eh)
5278 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5279
5280 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5281 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5282 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5283
5284 if (sparc_save_local_in_regs_p)
5285 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5286 sparc_frame_base_offset,
5287 SORR_RESTORE);
5288
5289 if (size == 0 || for_eh)
5290 ; /* do nothing. */
5291 else if (frame_pointer_needed)
5292 {
5293 /* Make sure the frame is destroyed after everything else is done. */
5294 emit_insn (gen_blockage ());
5295
5296 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5297 }
5298 else
5299 {
5300 /* Likewise. */
5301 emit_insn (gen_blockage ());
5302
5303 if (size <= 4096)
5304 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5305 else if (size <= 8192)
5306 {
5307 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5308 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5309 }
5310 else
5311 {
5312 rtx reg = gen_rtx_REG (Pmode, 1);
5313 emit_move_insn (reg, GEN_INT (-size));
5314 emit_insn (gen_stack_pointer_dec (reg));
5315 }
5316 }
5317 }
5318
5319 /* Return true if it is appropriate to emit `return' instructions in the
5320 body of a function. */
5321
5322 bool
5323 sparc_can_use_return_insn_p (void)
5324 {
5325 return sparc_prologue_data_valid_p
5326 && sparc_n_global_fp_regs == 0
5327 && TARGET_FLAT
5328 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
5329 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
5330 }
5331
5332 /* This function generates the assembly code for function exit. */
5333
5334 static void
5335 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5336 {
5337 /* If the last two instructions of a function are "call foo; dslot;"
5338 the return address might point to the first instruction in the next
5339 function and we have to output a dummy nop for the sake of sane
5340 backtraces in such cases. This is pointless for sibling calls since
5341 the return address is explicitly adjusted. */
5342
5343 rtx insn, last_real_insn;
5344
5345 insn = get_last_insn ();
5346
5347 last_real_insn = prev_real_insn (insn);
5348 if (last_real_insn
5349 && GET_CODE (last_real_insn) == INSN
5350 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5351 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5352
5353 if (last_real_insn
5354 && CALL_P (last_real_insn)
5355 && !SIBLING_CALL_P (last_real_insn))
5356 fputs("\tnop\n", file);
5357
5358 sparc_output_deferred_case_vectors ();
5359 }
5360
5361 /* Output a 'restore' instruction. */
5362
5363 static void
5364 output_restore (rtx pat)
5365 {
5366 rtx operands[3];
5367
5368 if (! pat)
5369 {
5370 fputs ("\t restore\n", asm_out_file);
5371 return;
5372 }
5373
5374 gcc_assert (GET_CODE (pat) == SET);
5375
5376 operands[0] = SET_DEST (pat);
5377 pat = SET_SRC (pat);
5378
5379 switch (GET_CODE (pat))
5380 {
5381 case PLUS:
5382 operands[1] = XEXP (pat, 0);
5383 operands[2] = XEXP (pat, 1);
5384 output_asm_insn (" restore %r1, %2, %Y0", operands);
5385 break;
5386 case LO_SUM:
5387 operands[1] = XEXP (pat, 0);
5388 operands[2] = XEXP (pat, 1);
5389 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5390 break;
5391 case ASHIFT:
5392 operands[1] = XEXP (pat, 0);
5393 gcc_assert (XEXP (pat, 1) == const1_rtx);
5394 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5395 break;
5396 default:
5397 operands[1] = pat;
5398 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5399 break;
5400 }
5401 }
5402
5403 /* Output a return. */
5404
5405 const char *
5406 output_return (rtx insn)
5407 {
5408 if (crtl->calls_eh_return)
5409 {
5410 /* If the function uses __builtin_eh_return, the eh_return
5411 machinery occupies the delay slot. */
5412 gcc_assert (!final_sequence);
5413
5414 if (flag_delayed_branch)
5415 {
5416 if (!TARGET_FLAT && TARGET_V9)
5417 fputs ("\treturn\t%i7+8\n", asm_out_file);
5418 else
5419 {
5420 if (!TARGET_FLAT)
5421 fputs ("\trestore\n", asm_out_file);
5422
5423 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5424 }
5425
5426 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5427 }
5428 else
5429 {
5430 if (!TARGET_FLAT)
5431 fputs ("\trestore\n", asm_out_file);
5432
5433 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5434 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5435 }
5436 }
5437 else if (sparc_leaf_function_p || TARGET_FLAT)
5438 {
5439 /* This is a leaf or flat function so we don't have to bother restoring
5440 the register window, which frees us from dealing with the convoluted
5441 semantics of restore/return. We simply output the jump to the
5442 return address and the insn in the delay slot (if any). */
5443
5444 return "jmp\t%%o7+%)%#";
5445 }
5446 else
5447 {
5448 /* This is a regular function so we have to restore the register window.
5449 We may have a pending insn for the delay slot, which will be either
5450 combined with the 'restore' instruction or put in the delay slot of
5451 the 'return' instruction. */
5452
5453 if (final_sequence)
5454 {
5455 rtx delay, pat;
5456
5457 delay = NEXT_INSN (insn);
5458 gcc_assert (delay);
5459
5460 pat = PATTERN (delay);
5461
5462 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5463 {
5464 epilogue_renumber (&pat, 0);
5465 return "return\t%%i7+%)%#";
5466 }
5467 else
5468 {
5469 output_asm_insn ("jmp\t%%i7+%)", NULL);
5470 output_restore (pat);
5471 PATTERN (delay) = gen_blockage ();
5472 INSN_CODE (delay) = -1;
5473 }
5474 }
5475 else
5476 {
5477 /* The delay slot is empty. */
5478 if (TARGET_V9)
5479 return "return\t%%i7+%)\n\t nop";
5480 else if (flag_delayed_branch)
5481 return "jmp\t%%i7+%)\n\t restore";
5482 else
5483 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5484 }
5485 }
5486
5487 return "";
5488 }
5489
5490 /* Output a sibling call. */
5491
5492 const char *
5493 output_sibcall (rtx insn, rtx call_operand)
5494 {
5495 rtx operands[1];
5496
5497 gcc_assert (flag_delayed_branch);
5498
5499 operands[0] = call_operand;
5500
5501 if (sparc_leaf_function_p || TARGET_FLAT)
5502 {
5503 /* This is a leaf or flat function so we don't have to bother restoring
5504 the register window. We simply output the jump to the function and
5505 the insn in the delay slot (if any). */
5506
5507 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5508
5509 if (final_sequence)
5510 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5511 operands);
5512 else
5513 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5514 it into branch if possible. */
5515 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5516 operands);
5517 }
5518 else
5519 {
5520 /* This is a regular function so we have to restore the register window.
5521 We may have a pending insn for the delay slot, which will be combined
5522 with the 'restore' instruction. */
5523
5524 output_asm_insn ("call\t%a0, 0", operands);
5525
5526 if (final_sequence)
5527 {
5528 rtx delay = NEXT_INSN (insn);
5529 gcc_assert (delay);
5530
5531 output_restore (PATTERN (delay));
5532
5533 PATTERN (delay) = gen_blockage ();
5534 INSN_CODE (delay) = -1;
5535 }
5536 else
5537 output_restore (NULL_RTX);
5538 }
5539
5540 return "";
5541 }
5542 \f
5543 /* Functions for handling argument passing.
5544
5545 For 32-bit, the first 6 args are normally in registers and the rest are
5546 pushed. Any arg that starts within the first 6 words is at least
5547 partially passed in a register unless its data type forbids.
5548
5549 For 64-bit, the argument registers are laid out as an array of 16 elements
5550 and arguments are added sequentially. The first 6 int args and up to the
5551 first 16 fp args (depending on size) are passed in regs.
5552
5553 Slot Stack Integral Float Float in structure Double Long Double
5554 ---- ----- -------- ----- ------------------ ------ -----------
5555 15 [SP+248] %f31 %f30,%f31 %d30
5556 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5557 13 [SP+232] %f27 %f26,%f27 %d26
5558 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5559 11 [SP+216] %f23 %f22,%f23 %d22
5560 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5561 9 [SP+200] %f19 %f18,%f19 %d18
5562 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5563 7 [SP+184] %f15 %f14,%f15 %d14
5564 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5565 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5566 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5567 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5568 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5569 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5570 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5571
5572 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5573
5574 Integral arguments are always passed as 64-bit quantities appropriately
5575 extended.
5576
5577 Passing of floating point values is handled as follows.
5578 If a prototype is in scope:
5579 If the value is in a named argument (i.e. not a stdarg function or a
5580 value not part of the `...') then the value is passed in the appropriate
5581 fp reg.
5582 If the value is part of the `...' and is passed in one of the first 6
5583 slots then the value is passed in the appropriate int reg.
5584 If the value is part of the `...' and is not passed in one of the first 6
5585 slots then the value is passed in memory.
5586 If a prototype is not in scope:
5587 If the value is one of the first 6 arguments the value is passed in the
5588 appropriate integer reg and the appropriate fp reg.
5589 If the value is not one of the first 6 arguments the value is passed in
5590 the appropriate fp reg and in memory.
5591
5592
5593 Summary of the calling conventions implemented by GCC on the SPARC:
5594
5595 32-bit ABI:
5596 size argument return value
5597
5598 small integer <4 int. reg. int. reg.
5599 word 4 int. reg. int. reg.
5600 double word 8 int. reg. int. reg.
5601
5602 _Complex small integer <8 int. reg. int. reg.
5603 _Complex word 8 int. reg. int. reg.
5604 _Complex double word 16 memory int. reg.
5605
5606 vector integer <=8 int. reg. FP reg.
5607 vector integer >8 memory memory
5608
5609 float 4 int. reg. FP reg.
5610 double 8 int. reg. FP reg.
5611 long double 16 memory memory
5612
5613 _Complex float 8 memory FP reg.
5614 _Complex double 16 memory FP reg.
5615 _Complex long double 32 memory FP reg.
5616
5617 vector float any memory memory
5618
5619 aggregate any memory memory
5620
5621
5622
5623 64-bit ABI:
5624 size argument return value
5625
5626 small integer <8 int. reg. int. reg.
5627 word 8 int. reg. int. reg.
5628 double word 16 int. reg. int. reg.
5629
5630 _Complex small integer <16 int. reg. int. reg.
5631 _Complex word 16 int. reg. int. reg.
5632 _Complex double word 32 memory int. reg.
5633
5634 vector integer <=16 FP reg. FP reg.
5635 vector integer 16<s<=32 memory FP reg.
5636 vector integer >32 memory memory
5637
5638 float 4 FP reg. FP reg.
5639 double 8 FP reg. FP reg.
5640 long double 16 FP reg. FP reg.
5641
5642 _Complex float 8 FP reg. FP reg.
5643 _Complex double 16 FP reg. FP reg.
5644 _Complex long double 32 memory FP reg.
5645
5646 vector float <=16 FP reg. FP reg.
5647 vector float 16<s<=32 memory FP reg.
5648 vector float >32 memory memory
5649
5650 aggregate <=16 reg. reg.
5651 aggregate 16<s<=32 memory reg.
5652 aggregate >32 memory memory
5653
5654
5655
5656 Note #1: complex floating-point types follow the extended SPARC ABIs as
5657 implemented by the Sun compiler.
5658
5659 Note #2: integral vector types follow the scalar floating-point types
5660 conventions to match what is implemented by the Sun VIS SDK.
5661
5662 Note #3: floating-point vector types follow the aggregate types
5663 conventions. */
5664
5665
5666 /* Maximum number of int regs for args. */
5667 #define SPARC_INT_ARG_MAX 6
5668 /* Maximum number of fp regs for args. */
5669 #define SPARC_FP_ARG_MAX 16
5670
5671 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5672
5673 /* Handle the INIT_CUMULATIVE_ARGS macro.
5674 Initialize a variable CUM of type CUMULATIVE_ARGS
5675 for a call to a function whose data type is FNTYPE.
5676 For a library call, FNTYPE is 0. */
5677
5678 void
5679 init_cumulative_args (struct sparc_args *cum, tree fntype,
5680 rtx libname ATTRIBUTE_UNUSED,
5681 tree fndecl ATTRIBUTE_UNUSED)
5682 {
5683 cum->words = 0;
5684 cum->prototype_p = fntype && prototype_p (fntype);
5685 cum->libcall_p = fntype == 0;
5686 }
5687
5688 /* Handle promotion of pointer and integer arguments. */
5689
5690 static enum machine_mode
5691 sparc_promote_function_mode (const_tree type,
5692 enum machine_mode mode,
5693 int *punsignedp,
5694 const_tree fntype ATTRIBUTE_UNUSED,
5695 int for_return ATTRIBUTE_UNUSED)
5696 {
5697 if (type != NULL_TREE && POINTER_TYPE_P (type))
5698 {
5699 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5700 return Pmode;
5701 }
5702
5703 /* Integral arguments are passed as full words, as per the ABI. */
5704 if (GET_MODE_CLASS (mode) == MODE_INT
5705 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5706 return word_mode;
5707
5708 return mode;
5709 }
5710
5711 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5712
5713 static bool
5714 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5715 {
5716 return TARGET_ARCH64 ? true : false;
5717 }
5718
5719 /* Scan the record type TYPE and return the following predicates:
5720 - INTREGS_P: the record contains at least one field or sub-field
5721 that is eligible for promotion in integer registers.
5722 - FP_REGS_P: the record contains at least one field or sub-field
5723 that is eligible for promotion in floating-point registers.
5724 - PACKED_P: the record contains at least one field that is packed.
5725
5726 Sub-fields are not taken into account for the PACKED_P predicate. */
5727
5728 static void
5729 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5730 int *packed_p)
5731 {
5732 tree field;
5733
5734 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5735 {
5736 if (TREE_CODE (field) == FIELD_DECL)
5737 {
5738 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5739 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5740 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5741 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5742 && TARGET_FPU)
5743 *fpregs_p = 1;
5744 else
5745 *intregs_p = 1;
5746
5747 if (packed_p && DECL_PACKED (field))
5748 *packed_p = 1;
5749 }
5750 }
5751 }
5752
5753 /* Compute the slot number to pass an argument in.
5754 Return the slot number or -1 if passing on the stack.
5755
5756 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5757 the preceding args and about the function being called.
5758 MODE is the argument's machine mode.
5759 TYPE is the data type of the argument (as a tree).
5760 This is null for libcalls where that information may
5761 not be available.
5762 NAMED is nonzero if this argument is a named parameter
5763 (otherwise it is an extra parameter matching an ellipsis).
5764 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5765 *PREGNO records the register number to use if scalar type.
5766 *PPADDING records the amount of padding needed in words. */
5767
5768 static int
5769 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5770 const_tree type, bool named, bool incoming_p,
5771 int *pregno, int *ppadding)
5772 {
5773 int regbase = (incoming_p
5774 ? SPARC_INCOMING_INT_ARG_FIRST
5775 : SPARC_OUTGOING_INT_ARG_FIRST);
5776 int slotno = cum->words;
5777 enum mode_class mclass;
5778 int regno;
5779
5780 *ppadding = 0;
5781
5782 if (type && TREE_ADDRESSABLE (type))
5783 return -1;
5784
5785 if (TARGET_ARCH32
5786 && mode == BLKmode
5787 && type
5788 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5789 return -1;
5790
5791 /* For SPARC64, objects requiring 16-byte alignment get it. */
5792 if (TARGET_ARCH64
5793 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5794 && (slotno & 1) != 0)
5795 slotno++, *ppadding = 1;
5796
5797 mclass = GET_MODE_CLASS (mode);
5798 if (type && TREE_CODE (type) == VECTOR_TYPE)
5799 {
5800 /* Vector types deserve special treatment because they are
5801 polymorphic wrt their mode, depending upon whether VIS
5802 instructions are enabled. */
5803 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5804 {
5805 /* The SPARC port defines no floating-point vector modes. */
5806 gcc_assert (mode == BLKmode);
5807 }
5808 else
5809 {
5810 /* Integral vector types should either have a vector
5811 mode or an integral mode, because we are guaranteed
5812 by pass_by_reference that their size is not greater
5813 than 16 bytes and TImode is 16-byte wide. */
5814 gcc_assert (mode != BLKmode);
5815
5816 /* Vector integers are handled like floats according to
5817 the Sun VIS SDK. */
5818 mclass = MODE_FLOAT;
5819 }
5820 }
5821
5822 switch (mclass)
5823 {
5824 case MODE_FLOAT:
5825 case MODE_COMPLEX_FLOAT:
5826 case MODE_VECTOR_INT:
5827 if (TARGET_ARCH64 && TARGET_FPU && named)
5828 {
5829 if (slotno >= SPARC_FP_ARG_MAX)
5830 return -1;
5831 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5832 /* Arguments filling only one single FP register are
5833 right-justified in the outer double FP register. */
5834 if (GET_MODE_SIZE (mode) <= 4)
5835 regno++;
5836 break;
5837 }
5838 /* fallthrough */
5839
5840 case MODE_INT:
5841 case MODE_COMPLEX_INT:
5842 if (slotno >= SPARC_INT_ARG_MAX)
5843 return -1;
5844 regno = regbase + slotno;
5845 break;
5846
5847 case MODE_RANDOM:
5848 if (mode == VOIDmode)
5849 /* MODE is VOIDmode when generating the actual call. */
5850 return -1;
5851
5852 gcc_assert (mode == BLKmode);
5853
5854 if (TARGET_ARCH32
5855 || !type
5856 || (TREE_CODE (type) != VECTOR_TYPE
5857 && TREE_CODE (type) != RECORD_TYPE))
5858 {
5859 if (slotno >= SPARC_INT_ARG_MAX)
5860 return -1;
5861 regno = regbase + slotno;
5862 }
5863 else /* TARGET_ARCH64 && type */
5864 {
5865 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5866
5867 /* First see what kinds of registers we would need. */
5868 if (TREE_CODE (type) == VECTOR_TYPE)
5869 fpregs_p = 1;
5870 else
5871 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5872
5873 /* The ABI obviously doesn't specify how packed structures
5874 are passed. These are defined to be passed in int regs
5875 if possible, otherwise memory. */
5876 if (packed_p || !named)
5877 fpregs_p = 0, intregs_p = 1;
5878
5879 /* If all arg slots are filled, then must pass on stack. */
5880 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5881 return -1;
5882
5883 /* If there are only int args and all int arg slots are filled,
5884 then must pass on stack. */
5885 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5886 return -1;
5887
5888 /* Note that even if all int arg slots are filled, fp members may
5889 still be passed in regs if such regs are available.
5890 *PREGNO isn't set because there may be more than one, it's up
5891 to the caller to compute them. */
5892 return slotno;
5893 }
5894 break;
5895
5896 default :
5897 gcc_unreachable ();
5898 }
5899
5900 *pregno = regno;
5901 return slotno;
5902 }
5903
5904 /* Handle recursive register counting for structure field layout. */
5905
5906 struct function_arg_record_value_parms
5907 {
5908 rtx ret; /* return expression being built. */
5909 int slotno; /* slot number of the argument. */
5910 int named; /* whether the argument is named. */
5911 int regbase; /* regno of the base register. */
5912 int stack; /* 1 if part of the argument is on the stack. */
5913 int intoffset; /* offset of the first pending integer field. */
5914 unsigned int nregs; /* number of words passed in registers. */
5915 };
5916
5917 static void function_arg_record_value_3
5918 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5919 static void function_arg_record_value_2
5920 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5921 static void function_arg_record_value_1
5922 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5923 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5924 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5925
5926 /* A subroutine of function_arg_record_value. Traverse the structure
5927 recursively and determine how many registers will be required. */
5928
5929 static void
5930 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5931 struct function_arg_record_value_parms *parms,
5932 bool packed_p)
5933 {
5934 tree field;
5935
5936 /* We need to compute how many registers are needed so we can
5937 allocate the PARALLEL but before we can do that we need to know
5938 whether there are any packed fields. The ABI obviously doesn't
5939 specify how structures are passed in this case, so they are
5940 defined to be passed in int regs if possible, otherwise memory,
5941 regardless of whether there are fp values present. */
5942
5943 if (! packed_p)
5944 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5945 {
5946 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5947 {
5948 packed_p = true;
5949 break;
5950 }
5951 }
5952
5953 /* Compute how many registers we need. */
5954 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5955 {
5956 if (TREE_CODE (field) == FIELD_DECL)
5957 {
5958 HOST_WIDE_INT bitpos = startbitpos;
5959
5960 if (DECL_SIZE (field) != 0)
5961 {
5962 if (integer_zerop (DECL_SIZE (field)))
5963 continue;
5964
5965 if (host_integerp (bit_position (field), 1))
5966 bitpos += int_bit_position (field);
5967 }
5968
5969 /* ??? FIXME: else assume zero offset. */
5970
5971 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5972 function_arg_record_value_1 (TREE_TYPE (field),
5973 bitpos,
5974 parms,
5975 packed_p);
5976 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5977 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5978 && TARGET_FPU
5979 && parms->named
5980 && ! packed_p)
5981 {
5982 if (parms->intoffset != -1)
5983 {
5984 unsigned int startbit, endbit;
5985 int intslots, this_slotno;
5986
5987 startbit = parms->intoffset & -BITS_PER_WORD;
5988 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5989
5990 intslots = (endbit - startbit) / BITS_PER_WORD;
5991 this_slotno = parms->slotno + parms->intoffset
5992 / BITS_PER_WORD;
5993
5994 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5995 {
5996 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5997 /* We need to pass this field on the stack. */
5998 parms->stack = 1;
5999 }
6000
6001 parms->nregs += intslots;
6002 parms->intoffset = -1;
6003 }
6004
6005 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
6006 If it wasn't true we wouldn't be here. */
6007 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6008 && DECL_MODE (field) == BLKmode)
6009 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6010 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6011 parms->nregs += 2;
6012 else
6013 parms->nregs += 1;
6014 }
6015 else
6016 {
6017 if (parms->intoffset == -1)
6018 parms->intoffset = bitpos;
6019 }
6020 }
6021 }
6022 }
6023
6024 /* A subroutine of function_arg_record_value. Assign the bits of the
6025 structure between parms->intoffset and bitpos to integer registers. */
6026
6027 static void
6028 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
6029 struct function_arg_record_value_parms *parms)
6030 {
6031 enum machine_mode mode;
6032 unsigned int regno;
6033 unsigned int startbit, endbit;
6034 int this_slotno, intslots, intoffset;
6035 rtx reg;
6036
6037 if (parms->intoffset == -1)
6038 return;
6039
6040 intoffset = parms->intoffset;
6041 parms->intoffset = -1;
6042
6043 startbit = intoffset & -BITS_PER_WORD;
6044 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6045 intslots = (endbit - startbit) / BITS_PER_WORD;
6046 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
6047
6048 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
6049 if (intslots <= 0)
6050 return;
6051
6052 /* If this is the trailing part of a word, only load that much into
6053 the register. Otherwise load the whole register. Note that in
6054 the latter case we may pick up unwanted bits. It's not a problem
6055 at the moment but may wish to revisit. */
6056
6057 if (intoffset % BITS_PER_WORD != 0)
6058 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
6059 MODE_INT);
6060 else
6061 mode = word_mode;
6062
6063 intoffset /= BITS_PER_UNIT;
6064 do
6065 {
6066 regno = parms->regbase + this_slotno;
6067 reg = gen_rtx_REG (mode, regno);
6068 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6069 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
6070
6071 this_slotno += 1;
6072 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
6073 mode = word_mode;
6074 parms->nregs += 1;
6075 intslots -= 1;
6076 }
6077 while (intslots > 0);
6078 }
6079
6080 /* A subroutine of function_arg_record_value. Traverse the structure
6081 recursively and assign bits to floating point registers. Track which
6082 bits in between need integer registers; invoke function_arg_record_value_3
6083 to make that happen. */
6084
6085 static void
6086 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
6087 struct function_arg_record_value_parms *parms,
6088 bool packed_p)
6089 {
6090 tree field;
6091
6092 if (! packed_p)
6093 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6094 {
6095 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6096 {
6097 packed_p = true;
6098 break;
6099 }
6100 }
6101
6102 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6103 {
6104 if (TREE_CODE (field) == FIELD_DECL)
6105 {
6106 HOST_WIDE_INT bitpos = startbitpos;
6107
6108 if (DECL_SIZE (field) != 0)
6109 {
6110 if (integer_zerop (DECL_SIZE (field)))
6111 continue;
6112
6113 if (host_integerp (bit_position (field), 1))
6114 bitpos += int_bit_position (field);
6115 }
6116
6117 /* ??? FIXME: else assume zero offset. */
6118
6119 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
6120 function_arg_record_value_2 (TREE_TYPE (field),
6121 bitpos,
6122 parms,
6123 packed_p);
6124 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
6125 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6126 && TARGET_FPU
6127 && parms->named
6128 && ! packed_p)
6129 {
6130 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
6131 int regno, nregs, pos;
6132 enum machine_mode mode = DECL_MODE (field);
6133 rtx reg;
6134
6135 function_arg_record_value_3 (bitpos, parms);
6136
6137 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6138 && mode == BLKmode)
6139 {
6140 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6141 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6142 }
6143 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6144 {
6145 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6146 nregs = 2;
6147 }
6148 else
6149 nregs = 1;
6150
6151 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
6152 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
6153 regno++;
6154 reg = gen_rtx_REG (mode, regno);
6155 pos = bitpos / BITS_PER_UNIT;
6156 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6157 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6158 parms->nregs += 1;
6159 while (--nregs > 0)
6160 {
6161 regno += GET_MODE_SIZE (mode) / 4;
6162 reg = gen_rtx_REG (mode, regno);
6163 pos += GET_MODE_SIZE (mode);
6164 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6165 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6166 parms->nregs += 1;
6167 }
6168 }
6169 else
6170 {
6171 if (parms->intoffset == -1)
6172 parms->intoffset = bitpos;
6173 }
6174 }
6175 }
6176 }
6177
6178 /* Used by function_arg and sparc_function_value_1 to implement the complex
6179 conventions of the 64-bit ABI for passing and returning structures.
6180 Return an expression valid as a return value for the FUNCTION_ARG
6181 and TARGET_FUNCTION_VALUE.
6182
6183 TYPE is the data type of the argument (as a tree).
6184 This is null for libcalls where that information may
6185 not be available.
6186 MODE is the argument's machine mode.
6187 SLOTNO is the index number of the argument's slot in the parameter array.
6188 NAMED is nonzero if this argument is a named parameter
6189 (otherwise it is an extra parameter matching an ellipsis).
6190 REGBASE is the regno of the base register for the parameter array. */
6191
6192 static rtx
6193 function_arg_record_value (const_tree type, enum machine_mode mode,
6194 int slotno, int named, int regbase)
6195 {
6196 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6197 struct function_arg_record_value_parms parms;
6198 unsigned int nregs;
6199
6200 parms.ret = NULL_RTX;
6201 parms.slotno = slotno;
6202 parms.named = named;
6203 parms.regbase = regbase;
6204 parms.stack = 0;
6205
6206 /* Compute how many registers we need. */
6207 parms.nregs = 0;
6208 parms.intoffset = 0;
6209 function_arg_record_value_1 (type, 0, &parms, false);
6210
6211 /* Take into account pending integer fields. */
6212 if (parms.intoffset != -1)
6213 {
6214 unsigned int startbit, endbit;
6215 int intslots, this_slotno;
6216
6217 startbit = parms.intoffset & -BITS_PER_WORD;
6218 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6219 intslots = (endbit - startbit) / BITS_PER_WORD;
6220 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
6221
6222 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
6223 {
6224 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
6225 /* We need to pass this field on the stack. */
6226 parms.stack = 1;
6227 }
6228
6229 parms.nregs += intslots;
6230 }
6231 nregs = parms.nregs;
6232
6233 /* Allocate the vector and handle some annoying special cases. */
6234 if (nregs == 0)
6235 {
6236 /* ??? Empty structure has no value? Duh? */
6237 if (typesize <= 0)
6238 {
6239 /* Though there's nothing really to store, return a word register
6240 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6241 leads to breakage due to the fact that there are zero bytes to
6242 load. */
6243 return gen_rtx_REG (mode, regbase);
6244 }
6245 else
6246 {
6247 /* ??? C++ has structures with no fields, and yet a size. Give up
6248 for now and pass everything back in integer registers. */
6249 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6250 }
6251 if (nregs + slotno > SPARC_INT_ARG_MAX)
6252 nregs = SPARC_INT_ARG_MAX - slotno;
6253 }
6254 gcc_assert (nregs != 0);
6255
6256 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
6257
6258 /* If at least one field must be passed on the stack, generate
6259 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6260 also be passed on the stack. We can't do much better because the
6261 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6262 of structures for which the fields passed exclusively in registers
6263 are not at the beginning of the structure. */
6264 if (parms.stack)
6265 XVECEXP (parms.ret, 0, 0)
6266 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6267
6268 /* Fill in the entries. */
6269 parms.nregs = 0;
6270 parms.intoffset = 0;
6271 function_arg_record_value_2 (type, 0, &parms, false);
6272 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
6273
6274 gcc_assert (parms.nregs == nregs);
6275
6276 return parms.ret;
6277 }
6278
6279 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6280 of the 64-bit ABI for passing and returning unions.
6281 Return an expression valid as a return value for the FUNCTION_ARG
6282 and TARGET_FUNCTION_VALUE.
6283
6284 SIZE is the size in bytes of the union.
6285 MODE is the argument's machine mode.
6286 REGNO is the hard register the union will be passed in. */
6287
6288 static rtx
6289 function_arg_union_value (int size, enum machine_mode mode, int slotno,
6290 int regno)
6291 {
6292 int nwords = ROUND_ADVANCE (size), i;
6293 rtx regs;
6294
6295 /* See comment in previous function for empty structures. */
6296 if (nwords == 0)
6297 return gen_rtx_REG (mode, regno);
6298
6299 if (slotno == SPARC_INT_ARG_MAX - 1)
6300 nwords = 1;
6301
6302 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6303
6304 for (i = 0; i < nwords; i++)
6305 {
6306 /* Unions are passed left-justified. */
6307 XVECEXP (regs, 0, i)
6308 = gen_rtx_EXPR_LIST (VOIDmode,
6309 gen_rtx_REG (word_mode, regno),
6310 GEN_INT (UNITS_PER_WORD * i));
6311 regno++;
6312 }
6313
6314 return regs;
6315 }
6316
6317 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6318 for passing and returning large (BLKmode) vectors.
6319 Return an expression valid as a return value for the FUNCTION_ARG
6320 and TARGET_FUNCTION_VALUE.
6321
6322 SIZE is the size in bytes of the vector (at least 8 bytes).
6323 REGNO is the FP hard register the vector will be passed in. */
6324
6325 static rtx
6326 function_arg_vector_value (int size, int regno)
6327 {
6328 int i, nregs = size / 8;
6329 rtx regs;
6330
6331 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6332
6333 for (i = 0; i < nregs; i++)
6334 {
6335 XVECEXP (regs, 0, i)
6336 = gen_rtx_EXPR_LIST (VOIDmode,
6337 gen_rtx_REG (DImode, regno + 2*i),
6338 GEN_INT (i*8));
6339 }
6340
6341 return regs;
6342 }
6343
6344 /* Determine where to put an argument to a function.
6345 Value is zero to push the argument on the stack,
6346 or a hard register in which to store the argument.
6347
6348 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6349 the preceding args and about the function being called.
6350 MODE is the argument's machine mode.
6351 TYPE is the data type of the argument (as a tree).
6352 This is null for libcalls where that information may
6353 not be available.
6354 NAMED is true if this argument is a named parameter
6355 (otherwise it is an extra parameter matching an ellipsis).
6356 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6357 TARGET_FUNCTION_INCOMING_ARG. */
6358
6359 static rtx
6360 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6361 const_tree type, bool named, bool incoming_p)
6362 {
6363 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6364
6365 int regbase = (incoming_p
6366 ? SPARC_INCOMING_INT_ARG_FIRST
6367 : SPARC_OUTGOING_INT_ARG_FIRST);
6368 int slotno, regno, padding;
6369 enum mode_class mclass = GET_MODE_CLASS (mode);
6370
6371 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6372 &regno, &padding);
6373 if (slotno == -1)
6374 return 0;
6375
6376 /* Vector types deserve special treatment because they are polymorphic wrt
6377 their mode, depending upon whether VIS instructions are enabled. */
6378 if (type && TREE_CODE (type) == VECTOR_TYPE)
6379 {
6380 HOST_WIDE_INT size = int_size_in_bytes (type);
6381 gcc_assert ((TARGET_ARCH32 && size <= 8)
6382 || (TARGET_ARCH64 && size <= 16));
6383
6384 if (mode == BLKmode)
6385 return function_arg_vector_value (size,
6386 SPARC_FP_ARG_FIRST + 2*slotno);
6387 else
6388 mclass = MODE_FLOAT;
6389 }
6390
6391 if (TARGET_ARCH32)
6392 return gen_rtx_REG (mode, regno);
6393
6394 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6395 and are promoted to registers if possible. */
6396 if (type && TREE_CODE (type) == RECORD_TYPE)
6397 {
6398 HOST_WIDE_INT size = int_size_in_bytes (type);
6399 gcc_assert (size <= 16);
6400
6401 return function_arg_record_value (type, mode, slotno, named, regbase);
6402 }
6403
6404 /* Unions up to 16 bytes in size are passed in integer registers. */
6405 else if (type && TREE_CODE (type) == UNION_TYPE)
6406 {
6407 HOST_WIDE_INT size = int_size_in_bytes (type);
6408 gcc_assert (size <= 16);
6409
6410 return function_arg_union_value (size, mode, slotno, regno);
6411 }
6412
6413 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6414 but also have the slot allocated for them.
6415 If no prototype is in scope fp values in register slots get passed
6416 in two places, either fp regs and int regs or fp regs and memory. */
6417 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6418 && SPARC_FP_REG_P (regno))
6419 {
6420 rtx reg = gen_rtx_REG (mode, regno);
6421 if (cum->prototype_p || cum->libcall_p)
6422 {
6423 /* "* 2" because fp reg numbers are recorded in 4 byte
6424 quantities. */
6425 #if 0
6426 /* ??? This will cause the value to be passed in the fp reg and
6427 in the stack. When a prototype exists we want to pass the
6428 value in the reg but reserve space on the stack. That's an
6429 optimization, and is deferred [for a bit]. */
6430 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6431 return gen_rtx_PARALLEL (mode,
6432 gen_rtvec (2,
6433 gen_rtx_EXPR_LIST (VOIDmode,
6434 NULL_RTX, const0_rtx),
6435 gen_rtx_EXPR_LIST (VOIDmode,
6436 reg, const0_rtx)));
6437 else
6438 #else
6439 /* ??? It seems that passing back a register even when past
6440 the area declared by REG_PARM_STACK_SPACE will allocate
6441 space appropriately, and will not copy the data onto the
6442 stack, exactly as we desire.
6443
6444 This is due to locate_and_pad_parm being called in
6445 expand_call whenever reg_parm_stack_space > 0, which
6446 while beneficial to our example here, would seem to be
6447 in error from what had been intended. Ho hum... -- r~ */
6448 #endif
6449 return reg;
6450 }
6451 else
6452 {
6453 rtx v0, v1;
6454
6455 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6456 {
6457 int intreg;
6458
6459 /* On incoming, we don't need to know that the value
6460 is passed in %f0 and %i0, and it confuses other parts
6461 causing needless spillage even on the simplest cases. */
6462 if (incoming_p)
6463 return reg;
6464
6465 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6466 + (regno - SPARC_FP_ARG_FIRST) / 2);
6467
6468 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6469 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6470 const0_rtx);
6471 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6472 }
6473 else
6474 {
6475 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6476 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6477 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6478 }
6479 }
6480 }
6481
6482 /* All other aggregate types are passed in an integer register in a mode
6483 corresponding to the size of the type. */
6484 else if (type && AGGREGATE_TYPE_P (type))
6485 {
6486 HOST_WIDE_INT size = int_size_in_bytes (type);
6487 gcc_assert (size <= 16);
6488
6489 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6490 }
6491
6492 return gen_rtx_REG (mode, regno);
6493 }
6494
6495 /* Handle the TARGET_FUNCTION_ARG target hook. */
6496
6497 static rtx
6498 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6499 const_tree type, bool named)
6500 {
6501 return sparc_function_arg_1 (cum, mode, type, named, false);
6502 }
6503
6504 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6505
6506 static rtx
6507 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6508 const_tree type, bool named)
6509 {
6510 return sparc_function_arg_1 (cum, mode, type, named, true);
6511 }
6512
6513 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6514
6515 static unsigned int
6516 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6517 {
6518 return ((TARGET_ARCH64
6519 && (GET_MODE_ALIGNMENT (mode) == 128
6520 || (type && TYPE_ALIGN (type) == 128)))
6521 ? 128
6522 : PARM_BOUNDARY);
6523 }
6524
6525 /* For an arg passed partly in registers and partly in memory,
6526 this is the number of bytes of registers used.
6527 For args passed entirely in registers or entirely in memory, zero.
6528
6529 Any arg that starts in the first 6 regs but won't entirely fit in them
6530 needs partial registers on v8. On v9, structures with integer
6531 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6532 values that begin in the last fp reg [where "last fp reg" varies with the
6533 mode] will be split between that reg and memory. */
6534
6535 static int
6536 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6537 tree type, bool named)
6538 {
6539 int slotno, regno, padding;
6540
6541 /* We pass false for incoming_p here, it doesn't matter. */
6542 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6543 false, &regno, &padding);
6544
6545 if (slotno == -1)
6546 return 0;
6547
6548 if (TARGET_ARCH32)
6549 {
6550 if ((slotno + (mode == BLKmode
6551 ? ROUND_ADVANCE (int_size_in_bytes (type))
6552 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6553 > SPARC_INT_ARG_MAX)
6554 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6555 }
6556 else
6557 {
6558 /* We are guaranteed by pass_by_reference that the size of the
6559 argument is not greater than 16 bytes, so we only need to return
6560 one word if the argument is partially passed in registers. */
6561
6562 if (type && AGGREGATE_TYPE_P (type))
6563 {
6564 int size = int_size_in_bytes (type);
6565
6566 if (size > UNITS_PER_WORD
6567 && slotno == SPARC_INT_ARG_MAX - 1)
6568 return UNITS_PER_WORD;
6569 }
6570 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6571 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6572 && ! (TARGET_FPU && named)))
6573 {
6574 /* The complex types are passed as packed types. */
6575 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6576 && slotno == SPARC_INT_ARG_MAX - 1)
6577 return UNITS_PER_WORD;
6578 }
6579 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6580 {
6581 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6582 > SPARC_FP_ARG_MAX)
6583 return UNITS_PER_WORD;
6584 }
6585 }
6586
6587 return 0;
6588 }
6589
6590 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6591 Specify whether to pass the argument by reference. */
6592
6593 static bool
6594 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6595 enum machine_mode mode, const_tree type,
6596 bool named ATTRIBUTE_UNUSED)
6597 {
6598 if (TARGET_ARCH32)
6599 /* Original SPARC 32-bit ABI says that structures and unions,
6600 and quad-precision floats are passed by reference. For Pascal,
6601 also pass arrays by reference. All other base types are passed
6602 in registers.
6603
6604 Extended ABI (as implemented by the Sun compiler) says that all
6605 complex floats are passed by reference. Pass complex integers
6606 in registers up to 8 bytes. More generally, enforce the 2-word
6607 cap for passing arguments in registers.
6608
6609 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6610 integers are passed like floats of the same size, that is in
6611 registers up to 8 bytes. Pass all vector floats by reference
6612 like structure and unions. */
6613 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6614 || mode == SCmode
6615 /* Catch CDImode, TFmode, DCmode and TCmode. */
6616 || GET_MODE_SIZE (mode) > 8
6617 || (type
6618 && TREE_CODE (type) == VECTOR_TYPE
6619 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6620 else
6621 /* Original SPARC 64-bit ABI says that structures and unions
6622 smaller than 16 bytes are passed in registers, as well as
6623 all other base types.
6624
6625 Extended ABI (as implemented by the Sun compiler) says that
6626 complex floats are passed in registers up to 16 bytes. Pass
6627 all complex integers in registers up to 16 bytes. More generally,
6628 enforce the 2-word cap for passing arguments in registers.
6629
6630 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6631 integers are passed like floats of the same size, that is in
6632 registers (up to 16 bytes). Pass all vector floats like structure
6633 and unions. */
6634 return ((type
6635 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6636 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6637 /* Catch CTImode and TCmode. */
6638 || GET_MODE_SIZE (mode) > 16);
6639 }
6640
6641 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6642 Update the data in CUM to advance over an argument
6643 of mode MODE and data type TYPE.
6644 TYPE is null for libcalls where that information may not be available. */
6645
6646 static void
6647 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6648 const_tree type, bool named)
6649 {
6650 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6651 int regno, padding;
6652
6653 /* We pass false for incoming_p here, it doesn't matter. */
6654 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
6655
6656 /* If argument requires leading padding, add it. */
6657 cum->words += padding;
6658
6659 if (TARGET_ARCH32)
6660 {
6661 cum->words += (mode != BLKmode
6662 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6663 : ROUND_ADVANCE (int_size_in_bytes (type)));
6664 }
6665 else
6666 {
6667 if (type && AGGREGATE_TYPE_P (type))
6668 {
6669 int size = int_size_in_bytes (type);
6670
6671 if (size <= 8)
6672 ++cum->words;
6673 else if (size <= 16)
6674 cum->words += 2;
6675 else /* passed by reference */
6676 ++cum->words;
6677 }
6678 else
6679 {
6680 cum->words += (mode != BLKmode
6681 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6682 : ROUND_ADVANCE (int_size_in_bytes (type)));
6683 }
6684 }
6685 }
6686
6687 /* Handle the FUNCTION_ARG_PADDING macro.
6688 For the 64 bit ABI structs are always stored left shifted in their
6689 argument slot. */
6690
6691 enum direction
6692 function_arg_padding (enum machine_mode mode, const_tree type)
6693 {
6694 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6695 return upward;
6696
6697 /* Fall back to the default. */
6698 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6699 }
6700
6701 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6702 Specify whether to return the return value in memory. */
6703
6704 static bool
6705 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6706 {
6707 if (TARGET_ARCH32)
6708 /* Original SPARC 32-bit ABI says that structures and unions,
6709 and quad-precision floats are returned in memory. All other
6710 base types are returned in registers.
6711
6712 Extended ABI (as implemented by the Sun compiler) says that
6713 all complex floats are returned in registers (8 FP registers
6714 at most for '_Complex long double'). Return all complex integers
6715 in registers (4 at most for '_Complex long long').
6716
6717 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6718 integers are returned like floats of the same size, that is in
6719 registers up to 8 bytes and in memory otherwise. Return all
6720 vector floats in memory like structure and unions; note that
6721 they always have BLKmode like the latter. */
6722 return (TYPE_MODE (type) == BLKmode
6723 || TYPE_MODE (type) == TFmode
6724 || (TREE_CODE (type) == VECTOR_TYPE
6725 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6726 else
6727 /* Original SPARC 64-bit ABI says that structures and unions
6728 smaller than 32 bytes are returned in registers, as well as
6729 all other base types.
6730
6731 Extended ABI (as implemented by the Sun compiler) says that all
6732 complex floats are returned in registers (8 FP registers at most
6733 for '_Complex long double'). Return all complex integers in
6734 registers (4 at most for '_Complex TItype').
6735
6736 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6737 integers are returned like floats of the same size, that is in
6738 registers. Return all vector floats like structure and unions;
6739 note that they always have BLKmode like the latter. */
6740 return (TYPE_MODE (type) == BLKmode
6741 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6742 }
6743
6744 /* Handle the TARGET_STRUCT_VALUE target hook.
6745 Return where to find the structure return value address. */
6746
6747 static rtx
6748 sparc_struct_value_rtx (tree fndecl, int incoming)
6749 {
6750 if (TARGET_ARCH64)
6751 return 0;
6752 else
6753 {
6754 rtx mem;
6755
6756 if (incoming)
6757 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
6758 STRUCT_VALUE_OFFSET));
6759 else
6760 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
6761 STRUCT_VALUE_OFFSET));
6762
6763 /* Only follow the SPARC ABI for fixed-size structure returns.
6764 Variable size structure returns are handled per the normal
6765 procedures in GCC. This is enabled by -mstd-struct-return */
6766 if (incoming == 2
6767 && sparc_std_struct_return
6768 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6769 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6770 {
6771 /* We must check and adjust the return address, as it is
6772 optional as to whether the return object is really
6773 provided. */
6774 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6775 rtx scratch = gen_reg_rtx (SImode);
6776 rtx endlab = gen_label_rtx ();
6777
6778 /* Calculate the return object size */
6779 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6780 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6781 /* Construct a temporary return value */
6782 rtx temp_val
6783 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6784
6785 /* Implement SPARC 32-bit psABI callee return struct checking:
6786
6787 Fetch the instruction where we will return to and see if
6788 it's an unimp instruction (the most significant 10 bits
6789 will be zero). */
6790 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6791 plus_constant (Pmode,
6792 ret_reg, 8)));
6793 /* Assume the size is valid and pre-adjust */
6794 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6795 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6796 0, endlab);
6797 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6798 /* Write the address of the memory pointed to by temp_val into
6799 the memory pointed to by mem */
6800 emit_move_insn (mem, XEXP (temp_val, 0));
6801 emit_label (endlab);
6802 }
6803
6804 return mem;
6805 }
6806 }
6807
6808 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6809 For v9, function return values are subject to the same rules as arguments,
6810 except that up to 32 bytes may be returned in registers. */
6811
6812 static rtx
6813 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6814 bool outgoing)
6815 {
6816 /* Beware that the two values are swapped here wrt function_arg. */
6817 int regbase = (outgoing
6818 ? SPARC_INCOMING_INT_ARG_FIRST
6819 : SPARC_OUTGOING_INT_ARG_FIRST);
6820 enum mode_class mclass = GET_MODE_CLASS (mode);
6821 int regno;
6822
6823 /* Vector types deserve special treatment because they are polymorphic wrt
6824 their mode, depending upon whether VIS instructions are enabled. */
6825 if (type && TREE_CODE (type) == VECTOR_TYPE)
6826 {
6827 HOST_WIDE_INT size = int_size_in_bytes (type);
6828 gcc_assert ((TARGET_ARCH32 && size <= 8)
6829 || (TARGET_ARCH64 && size <= 32));
6830
6831 if (mode == BLKmode)
6832 return function_arg_vector_value (size,
6833 SPARC_FP_ARG_FIRST);
6834 else
6835 mclass = MODE_FLOAT;
6836 }
6837
6838 if (TARGET_ARCH64 && type)
6839 {
6840 /* Structures up to 32 bytes in size are returned in registers. */
6841 if (TREE_CODE (type) == RECORD_TYPE)
6842 {
6843 HOST_WIDE_INT size = int_size_in_bytes (type);
6844 gcc_assert (size <= 32);
6845
6846 return function_arg_record_value (type, mode, 0, 1, regbase);
6847 }
6848
6849 /* Unions up to 32 bytes in size are returned in integer registers. */
6850 else if (TREE_CODE (type) == UNION_TYPE)
6851 {
6852 HOST_WIDE_INT size = int_size_in_bytes (type);
6853 gcc_assert (size <= 32);
6854
6855 return function_arg_union_value (size, mode, 0, regbase);
6856 }
6857
6858 /* Objects that require it are returned in FP registers. */
6859 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6860 ;
6861
6862 /* All other aggregate types are returned in an integer register in a
6863 mode corresponding to the size of the type. */
6864 else if (AGGREGATE_TYPE_P (type))
6865 {
6866 /* All other aggregate types are passed in an integer register
6867 in a mode corresponding to the size of the type. */
6868 HOST_WIDE_INT size = int_size_in_bytes (type);
6869 gcc_assert (size <= 32);
6870
6871 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6872
6873 /* ??? We probably should have made the same ABI change in
6874 3.4.0 as the one we made for unions. The latter was
6875 required by the SCD though, while the former is not
6876 specified, so we favored compatibility and efficiency.
6877
6878 Now we're stuck for aggregates larger than 16 bytes,
6879 because OImode vanished in the meantime. Let's not
6880 try to be unduly clever, and simply follow the ABI
6881 for unions in that case. */
6882 if (mode == BLKmode)
6883 return function_arg_union_value (size, mode, 0, regbase);
6884 else
6885 mclass = MODE_INT;
6886 }
6887
6888 /* We should only have pointer and integer types at this point. This
6889 must match sparc_promote_function_mode. */
6890 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6891 mode = word_mode;
6892 }
6893
6894 /* We should only have pointer and integer types at this point. This must
6895 match sparc_promote_function_mode. */
6896 else if (TARGET_ARCH32
6897 && mclass == MODE_INT
6898 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6899 mode = word_mode;
6900
6901 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6902 regno = SPARC_FP_ARG_FIRST;
6903 else
6904 regno = regbase;
6905
6906 return gen_rtx_REG (mode, regno);
6907 }
6908
6909 /* Handle TARGET_FUNCTION_VALUE.
6910 On the SPARC, the value is found in the first "output" register, but the
6911 called function leaves it in the first "input" register. */
6912
6913 static rtx
6914 sparc_function_value (const_tree valtype,
6915 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6916 bool outgoing)
6917 {
6918 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6919 }
6920
6921 /* Handle TARGET_LIBCALL_VALUE. */
6922
6923 static rtx
6924 sparc_libcall_value (enum machine_mode mode,
6925 const_rtx fun ATTRIBUTE_UNUSED)
6926 {
6927 return sparc_function_value_1 (NULL_TREE, mode, false);
6928 }
6929
6930 /* Handle FUNCTION_VALUE_REGNO_P.
6931 On the SPARC, the first "output" reg is used for integer values, and the
6932 first floating point register is used for floating point values. */
6933
6934 static bool
6935 sparc_function_value_regno_p (const unsigned int regno)
6936 {
6937 return (regno == 8 || regno == 32);
6938 }
6939
6940 /* Do what is necessary for `va_start'. We look at the current function
6941 to determine if stdarg or varargs is used and return the address of
6942 the first unnamed parameter. */
6943
6944 static rtx
6945 sparc_builtin_saveregs (void)
6946 {
6947 int first_reg = crtl->args.info.words;
6948 rtx address;
6949 int regno;
6950
6951 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6952 emit_move_insn (gen_rtx_MEM (word_mode,
6953 gen_rtx_PLUS (Pmode,
6954 frame_pointer_rtx,
6955 GEN_INT (FIRST_PARM_OFFSET (0)
6956 + (UNITS_PER_WORD
6957 * regno)))),
6958 gen_rtx_REG (word_mode,
6959 SPARC_INCOMING_INT_ARG_FIRST + regno));
6960
6961 address = gen_rtx_PLUS (Pmode,
6962 frame_pointer_rtx,
6963 GEN_INT (FIRST_PARM_OFFSET (0)
6964 + UNITS_PER_WORD * first_reg));
6965
6966 return address;
6967 }
6968
6969 /* Implement `va_start' for stdarg. */
6970
6971 static void
6972 sparc_va_start (tree valist, rtx nextarg)
6973 {
6974 nextarg = expand_builtin_saveregs ();
6975 std_expand_builtin_va_start (valist, nextarg);
6976 }
6977
6978 /* Implement `va_arg' for stdarg. */
6979
6980 static tree
6981 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6982 gimple_seq *post_p)
6983 {
6984 HOST_WIDE_INT size, rsize, align;
6985 tree addr, incr;
6986 bool indirect;
6987 tree ptrtype = build_pointer_type (type);
6988
6989 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6990 {
6991 indirect = true;
6992 size = rsize = UNITS_PER_WORD;
6993 align = 0;
6994 }
6995 else
6996 {
6997 indirect = false;
6998 size = int_size_in_bytes (type);
6999 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7000 align = 0;
7001
7002 if (TARGET_ARCH64)
7003 {
7004 /* For SPARC64, objects requiring 16-byte alignment get it. */
7005 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7006 align = 2 * UNITS_PER_WORD;
7007
7008 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7009 are left-justified in their slots. */
7010 if (AGGREGATE_TYPE_P (type))
7011 {
7012 if (size == 0)
7013 size = rsize = UNITS_PER_WORD;
7014 else
7015 size = rsize;
7016 }
7017 }
7018 }
7019
7020 incr = valist;
7021 if (align)
7022 {
7023 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7024 incr = fold_convert (sizetype, incr);
7025 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7026 size_int (-align));
7027 incr = fold_convert (ptr_type_node, incr);
7028 }
7029
7030 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7031 addr = incr;
7032
7033 if (BYTES_BIG_ENDIAN && size < rsize)
7034 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7035
7036 if (indirect)
7037 {
7038 addr = fold_convert (build_pointer_type (ptrtype), addr);
7039 addr = build_va_arg_indirect_ref (addr);
7040 }
7041
7042 /* If the address isn't aligned properly for the type, we need a temporary.
7043 FIXME: This is inefficient, usually we can do this in registers. */
7044 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7045 {
7046 tree tmp = create_tmp_var (type, "va_arg_tmp");
7047 tree dest_addr = build_fold_addr_expr (tmp);
7048 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7049 3, dest_addr, addr, size_int (rsize));
7050 TREE_ADDRESSABLE (tmp) = 1;
7051 gimplify_and_add (copy, pre_p);
7052 addr = dest_addr;
7053 }
7054
7055 else
7056 addr = fold_convert (ptrtype, addr);
7057
7058 incr = fold_build_pointer_plus_hwi (incr, rsize);
7059 gimplify_assign (valist, incr, post_p);
7060
7061 return build_va_arg_indirect_ref (addr);
7062 }
7063 \f
7064 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
7065 Specify whether the vector mode is supported by the hardware. */
7066
7067 static bool
7068 sparc_vector_mode_supported_p (enum machine_mode mode)
7069 {
7070 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
7071 }
7072 \f
7073 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
7074
7075 static enum machine_mode
7076 sparc_preferred_simd_mode (enum machine_mode mode)
7077 {
7078 if (TARGET_VIS)
7079 switch (mode)
7080 {
7081 case SImode:
7082 return V2SImode;
7083 case HImode:
7084 return V4HImode;
7085 case QImode:
7086 return V8QImode;
7087
7088 default:;
7089 }
7090
7091 return word_mode;
7092 }
7093 \f
7094 /* Return the string to output an unconditional branch to LABEL, which is
7095 the operand number of the label.
7096
7097 DEST is the destination insn (i.e. the label), INSN is the source. */
7098
7099 const char *
7100 output_ubranch (rtx dest, int label, rtx insn)
7101 {
7102 static char string[64];
7103 bool v9_form = false;
7104 char *p;
7105
7106 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
7107 {
7108 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7109 - INSN_ADDRESSES (INSN_UID (insn)));
7110 /* Leave some instructions for "slop". */
7111 if (delta >= -260000 && delta < 260000)
7112 v9_form = true;
7113 }
7114
7115 if (v9_form)
7116 strcpy (string, "ba%*,pt\t%%xcc, ");
7117 else
7118 strcpy (string, "b%*\t");
7119
7120 p = strchr (string, '\0');
7121 *p++ = '%';
7122 *p++ = 'l';
7123 *p++ = '0' + label;
7124 *p++ = '%';
7125 *p++ = '(';
7126 *p = '\0';
7127
7128 return string;
7129 }
7130
7131 /* Return the string to output a conditional branch to LABEL, which is
7132 the operand number of the label. OP is the conditional expression.
7133 XEXP (OP, 0) is assumed to be a condition code register (integer or
7134 floating point) and its mode specifies what kind of comparison we made.
7135
7136 DEST is the destination insn (i.e. the label), INSN is the source.
7137
7138 REVERSED is nonzero if we should reverse the sense of the comparison.
7139
7140 ANNUL is nonzero if we should generate an annulling branch. */
7141
7142 const char *
7143 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
7144 rtx insn)
7145 {
7146 static char string[64];
7147 enum rtx_code code = GET_CODE (op);
7148 rtx cc_reg = XEXP (op, 0);
7149 enum machine_mode mode = GET_MODE (cc_reg);
7150 const char *labelno, *branch;
7151 int spaces = 8, far;
7152 char *p;
7153
7154 /* v9 branches are limited to +-1MB. If it is too far away,
7155 change
7156
7157 bne,pt %xcc, .LC30
7158
7159 to
7160
7161 be,pn %xcc, .+12
7162 nop
7163 ba .LC30
7164
7165 and
7166
7167 fbne,a,pn %fcc2, .LC29
7168
7169 to
7170
7171 fbe,pt %fcc2, .+16
7172 nop
7173 ba .LC29 */
7174
7175 far = TARGET_V9 && (get_attr_length (insn) >= 3);
7176 if (reversed ^ far)
7177 {
7178 /* Reversal of FP compares takes care -- an ordered compare
7179 becomes an unordered compare and vice versa. */
7180 if (mode == CCFPmode || mode == CCFPEmode)
7181 code = reverse_condition_maybe_unordered (code);
7182 else
7183 code = reverse_condition (code);
7184 }
7185
7186 /* Start by writing the branch condition. */
7187 if (mode == CCFPmode || mode == CCFPEmode)
7188 {
7189 switch (code)
7190 {
7191 case NE:
7192 branch = "fbne";
7193 break;
7194 case EQ:
7195 branch = "fbe";
7196 break;
7197 case GE:
7198 branch = "fbge";
7199 break;
7200 case GT:
7201 branch = "fbg";
7202 break;
7203 case LE:
7204 branch = "fble";
7205 break;
7206 case LT:
7207 branch = "fbl";
7208 break;
7209 case UNORDERED:
7210 branch = "fbu";
7211 break;
7212 case ORDERED:
7213 branch = "fbo";
7214 break;
7215 case UNGT:
7216 branch = "fbug";
7217 break;
7218 case UNLT:
7219 branch = "fbul";
7220 break;
7221 case UNEQ:
7222 branch = "fbue";
7223 break;
7224 case UNGE:
7225 branch = "fbuge";
7226 break;
7227 case UNLE:
7228 branch = "fbule";
7229 break;
7230 case LTGT:
7231 branch = "fblg";
7232 break;
7233
7234 default:
7235 gcc_unreachable ();
7236 }
7237
7238 /* ??? !v9: FP branches cannot be preceded by another floating point
7239 insn. Because there is currently no concept of pre-delay slots,
7240 we can fix this only by always emitting a nop before a floating
7241 point branch. */
7242
7243 string[0] = '\0';
7244 if (! TARGET_V9)
7245 strcpy (string, "nop\n\t");
7246 strcat (string, branch);
7247 }
7248 else
7249 {
7250 switch (code)
7251 {
7252 case NE:
7253 branch = "bne";
7254 break;
7255 case EQ:
7256 branch = "be";
7257 break;
7258 case GE:
7259 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7260 branch = "bpos";
7261 else
7262 branch = "bge";
7263 break;
7264 case GT:
7265 branch = "bg";
7266 break;
7267 case LE:
7268 branch = "ble";
7269 break;
7270 case LT:
7271 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7272 branch = "bneg";
7273 else
7274 branch = "bl";
7275 break;
7276 case GEU:
7277 branch = "bgeu";
7278 break;
7279 case GTU:
7280 branch = "bgu";
7281 break;
7282 case LEU:
7283 branch = "bleu";
7284 break;
7285 case LTU:
7286 branch = "blu";
7287 break;
7288
7289 default:
7290 gcc_unreachable ();
7291 }
7292 strcpy (string, branch);
7293 }
7294 spaces -= strlen (branch);
7295 p = strchr (string, '\0');
7296
7297 /* Now add the annulling, the label, and a possible noop. */
7298 if (annul && ! far)
7299 {
7300 strcpy (p, ",a");
7301 p += 2;
7302 spaces -= 2;
7303 }
7304
7305 if (TARGET_V9)
7306 {
7307 rtx note;
7308 int v8 = 0;
7309
7310 if (! far && insn && INSN_ADDRESSES_SET_P ())
7311 {
7312 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7313 - INSN_ADDRESSES (INSN_UID (insn)));
7314 /* Leave some instructions for "slop". */
7315 if (delta < -260000 || delta >= 260000)
7316 v8 = 1;
7317 }
7318
7319 if (mode == CCFPmode || mode == CCFPEmode)
7320 {
7321 static char v9_fcc_labelno[] = "%%fccX, ";
7322 /* Set the char indicating the number of the fcc reg to use. */
7323 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7324 labelno = v9_fcc_labelno;
7325 if (v8)
7326 {
7327 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7328 labelno = "";
7329 }
7330 }
7331 else if (mode == CCXmode || mode == CCX_NOOVmode)
7332 {
7333 labelno = "%%xcc, ";
7334 gcc_assert (! v8);
7335 }
7336 else
7337 {
7338 labelno = "%%icc, ";
7339 if (v8)
7340 labelno = "";
7341 }
7342
7343 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7344 {
7345 strcpy (p,
7346 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7347 ? ",pt" : ",pn");
7348 p += 3;
7349 spaces -= 3;
7350 }
7351 }
7352 else
7353 labelno = "";
7354
7355 if (spaces > 0)
7356 *p++ = '\t';
7357 else
7358 *p++ = ' ';
7359 strcpy (p, labelno);
7360 p = strchr (p, '\0');
7361 if (far)
7362 {
7363 strcpy (p, ".+12\n\t nop\n\tb\t");
7364 /* Skip the next insn if requested or
7365 if we know that it will be a nop. */
7366 if (annul || ! final_sequence)
7367 p[3] = '6';
7368 p += 14;
7369 }
7370 *p++ = '%';
7371 *p++ = 'l';
7372 *p++ = label + '0';
7373 *p++ = '%';
7374 *p++ = '#';
7375 *p = '\0';
7376
7377 return string;
7378 }
7379
7380 /* Emit a library call comparison between floating point X and Y.
7381 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7382 Return the new operator to be used in the comparison sequence.
7383
7384 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7385 values as arguments instead of the TFmode registers themselves,
7386 that's why we cannot call emit_float_lib_cmp. */
7387
7388 rtx
7389 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7390 {
7391 const char *qpfunc;
7392 rtx slot0, slot1, result, tem, tem2, libfunc;
7393 enum machine_mode mode;
7394 enum rtx_code new_comparison;
7395
7396 switch (comparison)
7397 {
7398 case EQ:
7399 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7400 break;
7401
7402 case NE:
7403 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7404 break;
7405
7406 case GT:
7407 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7408 break;
7409
7410 case GE:
7411 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7412 break;
7413
7414 case LT:
7415 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7416 break;
7417
7418 case LE:
7419 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7420 break;
7421
7422 case ORDERED:
7423 case UNORDERED:
7424 case UNGT:
7425 case UNLT:
7426 case UNEQ:
7427 case UNGE:
7428 case UNLE:
7429 case LTGT:
7430 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7431 break;
7432
7433 default:
7434 gcc_unreachable ();
7435 }
7436
7437 if (TARGET_ARCH64)
7438 {
7439 if (MEM_P (x))
7440 {
7441 tree expr = MEM_EXPR (x);
7442 if (expr)
7443 mark_addressable (expr);
7444 slot0 = x;
7445 }
7446 else
7447 {
7448 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
7449 emit_move_insn (slot0, x);
7450 }
7451
7452 if (MEM_P (y))
7453 {
7454 tree expr = MEM_EXPR (y);
7455 if (expr)
7456 mark_addressable (expr);
7457 slot1 = y;
7458 }
7459 else
7460 {
7461 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
7462 emit_move_insn (slot1, y);
7463 }
7464
7465 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7466 emit_library_call (libfunc, LCT_NORMAL,
7467 DImode, 2,
7468 XEXP (slot0, 0), Pmode,
7469 XEXP (slot1, 0), Pmode);
7470 mode = DImode;
7471 }
7472 else
7473 {
7474 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7475 emit_library_call (libfunc, LCT_NORMAL,
7476 SImode, 2,
7477 x, TFmode, y, TFmode);
7478 mode = SImode;
7479 }
7480
7481
7482 /* Immediately move the result of the libcall into a pseudo
7483 register so reload doesn't clobber the value if it needs
7484 the return register for a spill reg. */
7485 result = gen_reg_rtx (mode);
7486 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7487
7488 switch (comparison)
7489 {
7490 default:
7491 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7492 case ORDERED:
7493 case UNORDERED:
7494 new_comparison = (comparison == UNORDERED ? EQ : NE);
7495 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7496 case UNGT:
7497 case UNGE:
7498 new_comparison = (comparison == UNGT ? GT : NE);
7499 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7500 case UNLE:
7501 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7502 case UNLT:
7503 tem = gen_reg_rtx (mode);
7504 if (TARGET_ARCH32)
7505 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7506 else
7507 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7508 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7509 case UNEQ:
7510 case LTGT:
7511 tem = gen_reg_rtx (mode);
7512 if (TARGET_ARCH32)
7513 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7514 else
7515 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7516 tem2 = gen_reg_rtx (mode);
7517 if (TARGET_ARCH32)
7518 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7519 else
7520 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7521 new_comparison = (comparison == UNEQ ? EQ : NE);
7522 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7523 }
7524
7525 gcc_unreachable ();
7526 }
7527
7528 /* Generate an unsigned DImode to FP conversion. This is the same code
7529 optabs would emit if we didn't have TFmode patterns. */
7530
7531 void
7532 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7533 {
7534 rtx neglab, donelab, i0, i1, f0, in, out;
7535
7536 out = operands[0];
7537 in = force_reg (DImode, operands[1]);
7538 neglab = gen_label_rtx ();
7539 donelab = gen_label_rtx ();
7540 i0 = gen_reg_rtx (DImode);
7541 i1 = gen_reg_rtx (DImode);
7542 f0 = gen_reg_rtx (mode);
7543
7544 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7545
7546 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7547 emit_jump_insn (gen_jump (donelab));
7548 emit_barrier ();
7549
7550 emit_label (neglab);
7551
7552 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7553 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7554 emit_insn (gen_iordi3 (i0, i0, i1));
7555 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7556 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7557
7558 emit_label (donelab);
7559 }
7560
7561 /* Generate an FP to unsigned DImode conversion. This is the same code
7562 optabs would emit if we didn't have TFmode patterns. */
7563
7564 void
7565 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7566 {
7567 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7568
7569 out = operands[0];
7570 in = force_reg (mode, operands[1]);
7571 neglab = gen_label_rtx ();
7572 donelab = gen_label_rtx ();
7573 i0 = gen_reg_rtx (DImode);
7574 i1 = gen_reg_rtx (DImode);
7575 limit = gen_reg_rtx (mode);
7576 f0 = gen_reg_rtx (mode);
7577
7578 emit_move_insn (limit,
7579 CONST_DOUBLE_FROM_REAL_VALUE (
7580 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7581 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7582
7583 emit_insn (gen_rtx_SET (VOIDmode,
7584 out,
7585 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7586 emit_jump_insn (gen_jump (donelab));
7587 emit_barrier ();
7588
7589 emit_label (neglab);
7590
7591 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7592 emit_insn (gen_rtx_SET (VOIDmode,
7593 i0,
7594 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7595 emit_insn (gen_movdi (i1, const1_rtx));
7596 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7597 emit_insn (gen_xordi3 (out, i0, i1));
7598
7599 emit_label (donelab);
7600 }
7601
7602 /* Return the string to output a conditional branch to LABEL, testing
7603 register REG. LABEL is the operand number of the label; REG is the
7604 operand number of the reg. OP is the conditional expression. The mode
7605 of REG says what kind of comparison we made.
7606
7607 DEST is the destination insn (i.e. the label), INSN is the source.
7608
7609 REVERSED is nonzero if we should reverse the sense of the comparison.
7610
7611 ANNUL is nonzero if we should generate an annulling branch. */
7612
7613 const char *
7614 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7615 int annul, rtx insn)
7616 {
7617 static char string[64];
7618 enum rtx_code code = GET_CODE (op);
7619 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7620 rtx note;
7621 int far;
7622 char *p;
7623
7624 /* branch on register are limited to +-128KB. If it is too far away,
7625 change
7626
7627 brnz,pt %g1, .LC30
7628
7629 to
7630
7631 brz,pn %g1, .+12
7632 nop
7633 ba,pt %xcc, .LC30
7634
7635 and
7636
7637 brgez,a,pn %o1, .LC29
7638
7639 to
7640
7641 brlz,pt %o1, .+16
7642 nop
7643 ba,pt %xcc, .LC29 */
7644
7645 far = get_attr_length (insn) >= 3;
7646
7647 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7648 if (reversed ^ far)
7649 code = reverse_condition (code);
7650
7651 /* Only 64 bit versions of these instructions exist. */
7652 gcc_assert (mode == DImode);
7653
7654 /* Start by writing the branch condition. */
7655
7656 switch (code)
7657 {
7658 case NE:
7659 strcpy (string, "brnz");
7660 break;
7661
7662 case EQ:
7663 strcpy (string, "brz");
7664 break;
7665
7666 case GE:
7667 strcpy (string, "brgez");
7668 break;
7669
7670 case LT:
7671 strcpy (string, "brlz");
7672 break;
7673
7674 case LE:
7675 strcpy (string, "brlez");
7676 break;
7677
7678 case GT:
7679 strcpy (string, "brgz");
7680 break;
7681
7682 default:
7683 gcc_unreachable ();
7684 }
7685
7686 p = strchr (string, '\0');
7687
7688 /* Now add the annulling, reg, label, and nop. */
7689 if (annul && ! far)
7690 {
7691 strcpy (p, ",a");
7692 p += 2;
7693 }
7694
7695 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7696 {
7697 strcpy (p,
7698 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7699 ? ",pt" : ",pn");
7700 p += 3;
7701 }
7702
7703 *p = p < string + 8 ? '\t' : ' ';
7704 p++;
7705 *p++ = '%';
7706 *p++ = '0' + reg;
7707 *p++ = ',';
7708 *p++ = ' ';
7709 if (far)
7710 {
7711 int veryfar = 1, delta;
7712
7713 if (INSN_ADDRESSES_SET_P ())
7714 {
7715 delta = (INSN_ADDRESSES (INSN_UID (dest))
7716 - INSN_ADDRESSES (INSN_UID (insn)));
7717 /* Leave some instructions for "slop". */
7718 if (delta >= -260000 && delta < 260000)
7719 veryfar = 0;
7720 }
7721
7722 strcpy (p, ".+12\n\t nop\n\t");
7723 /* Skip the next insn if requested or
7724 if we know that it will be a nop. */
7725 if (annul || ! final_sequence)
7726 p[3] = '6';
7727 p += 12;
7728 if (veryfar)
7729 {
7730 strcpy (p, "b\t");
7731 p += 2;
7732 }
7733 else
7734 {
7735 strcpy (p, "ba,pt\t%%xcc, ");
7736 p += 13;
7737 }
7738 }
7739 *p++ = '%';
7740 *p++ = 'l';
7741 *p++ = '0' + label;
7742 *p++ = '%';
7743 *p++ = '#';
7744 *p = '\0';
7745
7746 return string;
7747 }
7748
7749 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7750 Such instructions cannot be used in the delay slot of return insn on v9.
7751 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7752 */
7753
7754 static int
7755 epilogue_renumber (register rtx *where, int test)
7756 {
7757 register const char *fmt;
7758 register int i;
7759 register enum rtx_code code;
7760
7761 if (*where == 0)
7762 return 0;
7763
7764 code = GET_CODE (*where);
7765
7766 switch (code)
7767 {
7768 case REG:
7769 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7770 return 1;
7771 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7772 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7773 case SCRATCH:
7774 case CC0:
7775 case PC:
7776 case CONST_INT:
7777 case CONST_DOUBLE:
7778 return 0;
7779
7780 /* Do not replace the frame pointer with the stack pointer because
7781 it can cause the delayed instruction to load below the stack.
7782 This occurs when instructions like:
7783
7784 (set (reg/i:SI 24 %i0)
7785 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7786 (const_int -20 [0xffffffec])) 0))
7787
7788 are in the return delayed slot. */
7789 case PLUS:
7790 if (GET_CODE (XEXP (*where, 0)) == REG
7791 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7792 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7793 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7794 return 1;
7795 break;
7796
7797 case MEM:
7798 if (SPARC_STACK_BIAS
7799 && GET_CODE (XEXP (*where, 0)) == REG
7800 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7801 return 1;
7802 break;
7803
7804 default:
7805 break;
7806 }
7807
7808 fmt = GET_RTX_FORMAT (code);
7809
7810 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7811 {
7812 if (fmt[i] == 'E')
7813 {
7814 register int j;
7815 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7816 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7817 return 1;
7818 }
7819 else if (fmt[i] == 'e'
7820 && epilogue_renumber (&(XEXP (*where, i)), test))
7821 return 1;
7822 }
7823 return 0;
7824 }
7825 \f
7826 /* Leaf functions and non-leaf functions have different needs. */
7827
7828 static const int
7829 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7830
7831 static const int
7832 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7833
7834 static const int *const reg_alloc_orders[] = {
7835 reg_leaf_alloc_order,
7836 reg_nonleaf_alloc_order};
7837
7838 void
7839 order_regs_for_local_alloc (void)
7840 {
7841 static int last_order_nonleaf = 1;
7842
7843 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7844 {
7845 last_order_nonleaf = !last_order_nonleaf;
7846 memcpy ((char *) reg_alloc_order,
7847 (const char *) reg_alloc_orders[last_order_nonleaf],
7848 FIRST_PSEUDO_REGISTER * sizeof (int));
7849 }
7850 }
7851 \f
7852 /* Return 1 if REG and MEM are legitimate enough to allow the various
7853 mem<-->reg splits to be run. */
7854
7855 int
7856 sparc_splitdi_legitimate (rtx reg, rtx mem)
7857 {
7858 /* Punt if we are here by mistake. */
7859 gcc_assert (reload_completed);
7860
7861 /* We must have an offsettable memory reference. */
7862 if (! offsettable_memref_p (mem))
7863 return 0;
7864
7865 /* If we have legitimate args for ldd/std, we do not want
7866 the split to happen. */
7867 if ((REGNO (reg) % 2) == 0
7868 && mem_min_alignment (mem, 8))
7869 return 0;
7870
7871 /* Success. */
7872 return 1;
7873 }
7874
7875 /* Like sparc_splitdi_legitimate but for REG <--> REG moves. */
7876
7877 int
7878 sparc_split_regreg_legitimate (rtx reg1, rtx reg2)
7879 {
7880 int regno1, regno2;
7881
7882 if (GET_CODE (reg1) == SUBREG)
7883 reg1 = SUBREG_REG (reg1);
7884 if (GET_CODE (reg1) != REG)
7885 return 0;
7886 regno1 = REGNO (reg1);
7887
7888 if (GET_CODE (reg2) == SUBREG)
7889 reg2 = SUBREG_REG (reg2);
7890 if (GET_CODE (reg2) != REG)
7891 return 0;
7892 regno2 = REGNO (reg2);
7893
7894 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
7895 return 1;
7896
7897 if (TARGET_VIS3)
7898 {
7899 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
7900 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
7901 return 1;
7902 }
7903
7904 return 0;
7905 }
7906
7907 /* Return 1 if x and y are some kind of REG and they refer to
7908 different hard registers. This test is guaranteed to be
7909 run after reload. */
7910
7911 int
7912 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7913 {
7914 if (GET_CODE (x) != REG)
7915 return 0;
7916 if (GET_CODE (y) != REG)
7917 return 0;
7918 if (REGNO (x) == REGNO (y))
7919 return 0;
7920 return 1;
7921 }
7922
7923 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7924 This makes them candidates for using ldd and std insns.
7925
7926 Note reg1 and reg2 *must* be hard registers. */
7927
7928 int
7929 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7930 {
7931 /* We might have been passed a SUBREG. */
7932 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7933 return 0;
7934
7935 if (REGNO (reg1) % 2 != 0)
7936 return 0;
7937
7938 /* Integer ldd is deprecated in SPARC V9 */
7939 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
7940 return 0;
7941
7942 return (REGNO (reg1) == REGNO (reg2) - 1);
7943 }
7944
7945 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7946 an ldd or std insn.
7947
7948 This can only happen when addr1 and addr2, the addresses in mem1
7949 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7950 addr1 must also be aligned on a 64-bit boundary.
7951
7952 Also iff dependent_reg_rtx is not null it should not be used to
7953 compute the address for mem1, i.e. we cannot optimize a sequence
7954 like:
7955 ld [%o0], %o0
7956 ld [%o0 + 4], %o1
7957 to
7958 ldd [%o0], %o0
7959 nor:
7960 ld [%g3 + 4], %g3
7961 ld [%g3], %g2
7962 to
7963 ldd [%g3], %g2
7964
7965 But, note that the transformation from:
7966 ld [%g2 + 4], %g3
7967 ld [%g2], %g2
7968 to
7969 ldd [%g2], %g2
7970 is perfectly fine. Thus, the peephole2 patterns always pass us
7971 the destination register of the first load, never the second one.
7972
7973 For stores we don't have a similar problem, so dependent_reg_rtx is
7974 NULL_RTX. */
7975
7976 int
7977 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7978 {
7979 rtx addr1, addr2;
7980 unsigned int reg1;
7981 HOST_WIDE_INT offset1;
7982
7983 /* The mems cannot be volatile. */
7984 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7985 return 0;
7986
7987 /* MEM1 should be aligned on a 64-bit boundary. */
7988 if (MEM_ALIGN (mem1) < 64)
7989 return 0;
7990
7991 addr1 = XEXP (mem1, 0);
7992 addr2 = XEXP (mem2, 0);
7993
7994 /* Extract a register number and offset (if used) from the first addr. */
7995 if (GET_CODE (addr1) == PLUS)
7996 {
7997 /* If not a REG, return zero. */
7998 if (GET_CODE (XEXP (addr1, 0)) != REG)
7999 return 0;
8000 else
8001 {
8002 reg1 = REGNO (XEXP (addr1, 0));
8003 /* The offset must be constant! */
8004 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
8005 return 0;
8006 offset1 = INTVAL (XEXP (addr1, 1));
8007 }
8008 }
8009 else if (GET_CODE (addr1) != REG)
8010 return 0;
8011 else
8012 {
8013 reg1 = REGNO (addr1);
8014 /* This was a simple (mem (reg)) expression. Offset is 0. */
8015 offset1 = 0;
8016 }
8017
8018 /* Make sure the second address is a (mem (plus (reg) (const_int). */
8019 if (GET_CODE (addr2) != PLUS)
8020 return 0;
8021
8022 if (GET_CODE (XEXP (addr2, 0)) != REG
8023 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
8024 return 0;
8025
8026 if (reg1 != REGNO (XEXP (addr2, 0)))
8027 return 0;
8028
8029 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
8030 return 0;
8031
8032 /* The first offset must be evenly divisible by 8 to ensure the
8033 address is 64 bit aligned. */
8034 if (offset1 % 8 != 0)
8035 return 0;
8036
8037 /* The offset for the second addr must be 4 more than the first addr. */
8038 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
8039 return 0;
8040
8041 /* All the tests passed. addr1 and addr2 are valid for ldd and std
8042 instructions. */
8043 return 1;
8044 }
8045
8046 /* Return 1 if reg is a pseudo, or is the first register in
8047 a hard register pair. This makes it suitable for use in
8048 ldd and std insns. */
8049
8050 int
8051 register_ok_for_ldd (rtx reg)
8052 {
8053 /* We might have been passed a SUBREG. */
8054 if (!REG_P (reg))
8055 return 0;
8056
8057 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
8058 return (REGNO (reg) % 2 == 0);
8059
8060 return 1;
8061 }
8062
8063 /* Return 1 if OP is a memory whose address is known to be
8064 aligned to 8-byte boundary, or a pseudo during reload.
8065 This makes it suitable for use in ldd and std insns. */
8066
8067 int
8068 memory_ok_for_ldd (rtx op)
8069 {
8070 if (MEM_P (op))
8071 {
8072 /* In 64-bit mode, we assume that the address is word-aligned. */
8073 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
8074 return 0;
8075
8076 if (! can_create_pseudo_p ()
8077 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
8078 return 0;
8079 }
8080 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
8081 {
8082 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
8083 return 0;
8084 }
8085 else
8086 return 0;
8087
8088 return 1;
8089 }
8090 \f
8091 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8092
8093 static bool
8094 sparc_print_operand_punct_valid_p (unsigned char code)
8095 {
8096 if (code == '#'
8097 || code == '*'
8098 || code == '('
8099 || code == ')'
8100 || code == '_'
8101 || code == '&')
8102 return true;
8103
8104 return false;
8105 }
8106
8107 /* Implement TARGET_PRINT_OPERAND.
8108 Print operand X (an rtx) in assembler syntax to file FILE.
8109 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8110 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8111
8112 static void
8113 sparc_print_operand (FILE *file, rtx x, int code)
8114 {
8115 switch (code)
8116 {
8117 case '#':
8118 /* Output an insn in a delay slot. */
8119 if (final_sequence)
8120 sparc_indent_opcode = 1;
8121 else
8122 fputs ("\n\t nop", file);
8123 return;
8124 case '*':
8125 /* Output an annul flag if there's nothing for the delay slot and we
8126 are optimizing. This is always used with '(' below.
8127 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8128 this is a dbx bug. So, we only do this when optimizing.
8129 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8130 Always emit a nop in case the next instruction is a branch. */
8131 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
8132 fputs (",a", file);
8133 return;
8134 case '(':
8135 /* Output a 'nop' if there's nothing for the delay slot and we are
8136 not optimizing. This is always used with '*' above. */
8137 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
8138 fputs ("\n\t nop", file);
8139 else if (final_sequence)
8140 sparc_indent_opcode = 1;
8141 return;
8142 case ')':
8143 /* Output the right displacement from the saved PC on function return.
8144 The caller may have placed an "unimp" insn immediately after the call
8145 so we have to account for it. This insn is used in the 32-bit ABI
8146 when calling a function that returns a non zero-sized structure. The
8147 64-bit ABI doesn't have it. Be careful to have this test be the same
8148 as that for the call. The exception is when sparc_std_struct_return
8149 is enabled, the psABI is followed exactly and the adjustment is made
8150 by the code in sparc_struct_value_rtx. The call emitted is the same
8151 when sparc_std_struct_return is enabled. */
8152 if (!TARGET_ARCH64
8153 && cfun->returns_struct
8154 && !sparc_std_struct_return
8155 && DECL_SIZE (DECL_RESULT (current_function_decl))
8156 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
8157 == INTEGER_CST
8158 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
8159 fputs ("12", file);
8160 else
8161 fputc ('8', file);
8162 return;
8163 case '_':
8164 /* Output the Embedded Medium/Anywhere code model base register. */
8165 fputs (EMBMEDANY_BASE_REG, file);
8166 return;
8167 case '&':
8168 /* Print some local dynamic TLS name. */
8169 assemble_name (file, get_some_local_dynamic_name ());
8170 return;
8171
8172 case 'Y':
8173 /* Adjust the operand to take into account a RESTORE operation. */
8174 if (GET_CODE (x) == CONST_INT)
8175 break;
8176 else if (GET_CODE (x) != REG)
8177 output_operand_lossage ("invalid %%Y operand");
8178 else if (REGNO (x) < 8)
8179 fputs (reg_names[REGNO (x)], file);
8180 else if (REGNO (x) >= 24 && REGNO (x) < 32)
8181 fputs (reg_names[REGNO (x)-16], file);
8182 else
8183 output_operand_lossage ("invalid %%Y operand");
8184 return;
8185 case 'L':
8186 /* Print out the low order register name of a register pair. */
8187 if (WORDS_BIG_ENDIAN)
8188 fputs (reg_names[REGNO (x)+1], file);
8189 else
8190 fputs (reg_names[REGNO (x)], file);
8191 return;
8192 case 'H':
8193 /* Print out the high order register name of a register pair. */
8194 if (WORDS_BIG_ENDIAN)
8195 fputs (reg_names[REGNO (x)], file);
8196 else
8197 fputs (reg_names[REGNO (x)+1], file);
8198 return;
8199 case 'R':
8200 /* Print out the second register name of a register pair or quad.
8201 I.e., R (%o0) => %o1. */
8202 fputs (reg_names[REGNO (x)+1], file);
8203 return;
8204 case 'S':
8205 /* Print out the third register name of a register quad.
8206 I.e., S (%o0) => %o2. */
8207 fputs (reg_names[REGNO (x)+2], file);
8208 return;
8209 case 'T':
8210 /* Print out the fourth register name of a register quad.
8211 I.e., T (%o0) => %o3. */
8212 fputs (reg_names[REGNO (x)+3], file);
8213 return;
8214 case 'x':
8215 /* Print a condition code register. */
8216 if (REGNO (x) == SPARC_ICC_REG)
8217 {
8218 /* We don't handle CC[X]_NOOVmode because they're not supposed
8219 to occur here. */
8220 if (GET_MODE (x) == CCmode)
8221 fputs ("%icc", file);
8222 else if (GET_MODE (x) == CCXmode)
8223 fputs ("%xcc", file);
8224 else
8225 gcc_unreachable ();
8226 }
8227 else
8228 /* %fccN register */
8229 fputs (reg_names[REGNO (x)], file);
8230 return;
8231 case 'm':
8232 /* Print the operand's address only. */
8233 output_address (XEXP (x, 0));
8234 return;
8235 case 'r':
8236 /* In this case we need a register. Use %g0 if the
8237 operand is const0_rtx. */
8238 if (x == const0_rtx
8239 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
8240 {
8241 fputs ("%g0", file);
8242 return;
8243 }
8244 else
8245 break;
8246
8247 case 'A':
8248 switch (GET_CODE (x))
8249 {
8250 case IOR: fputs ("or", file); break;
8251 case AND: fputs ("and", file); break;
8252 case XOR: fputs ("xor", file); break;
8253 default: output_operand_lossage ("invalid %%A operand");
8254 }
8255 return;
8256
8257 case 'B':
8258 switch (GET_CODE (x))
8259 {
8260 case IOR: fputs ("orn", file); break;
8261 case AND: fputs ("andn", file); break;
8262 case XOR: fputs ("xnor", file); break;
8263 default: output_operand_lossage ("invalid %%B operand");
8264 }
8265 return;
8266
8267 /* This is used by the conditional move instructions. */
8268 case 'C':
8269 {
8270 enum rtx_code rc = GET_CODE (x);
8271
8272 switch (rc)
8273 {
8274 case NE: fputs ("ne", file); break;
8275 case EQ: fputs ("e", file); break;
8276 case GE: fputs ("ge", file); break;
8277 case GT: fputs ("g", file); break;
8278 case LE: fputs ("le", file); break;
8279 case LT: fputs ("l", file); break;
8280 case GEU: fputs ("geu", file); break;
8281 case GTU: fputs ("gu", file); break;
8282 case LEU: fputs ("leu", file); break;
8283 case LTU: fputs ("lu", file); break;
8284 case LTGT: fputs ("lg", file); break;
8285 case UNORDERED: fputs ("u", file); break;
8286 case ORDERED: fputs ("o", file); break;
8287 case UNLT: fputs ("ul", file); break;
8288 case UNLE: fputs ("ule", file); break;
8289 case UNGT: fputs ("ug", file); break;
8290 case UNGE: fputs ("uge", file); break;
8291 case UNEQ: fputs ("ue", file); break;
8292 default: output_operand_lossage ("invalid %%C operand");
8293 }
8294 return;
8295 }
8296
8297 /* This are used by the movr instruction pattern. */
8298 case 'D':
8299 {
8300 enum rtx_code rc = GET_CODE (x);
8301 switch (rc)
8302 {
8303 case NE: fputs ("ne", file); break;
8304 case EQ: fputs ("e", file); break;
8305 case GE: fputs ("gez", file); break;
8306 case LT: fputs ("lz", file); break;
8307 case LE: fputs ("lez", file); break;
8308 case GT: fputs ("gz", file); break;
8309 default: output_operand_lossage ("invalid %%D operand");
8310 }
8311 return;
8312 }
8313
8314 case 'b':
8315 {
8316 /* Print a sign-extended character. */
8317 int i = trunc_int_for_mode (INTVAL (x), QImode);
8318 fprintf (file, "%d", i);
8319 return;
8320 }
8321
8322 case 'f':
8323 /* Operand must be a MEM; write its address. */
8324 if (GET_CODE (x) != MEM)
8325 output_operand_lossage ("invalid %%f operand");
8326 output_address (XEXP (x, 0));
8327 return;
8328
8329 case 's':
8330 {
8331 /* Print a sign-extended 32-bit value. */
8332 HOST_WIDE_INT i;
8333 if (GET_CODE(x) == CONST_INT)
8334 i = INTVAL (x);
8335 else if (GET_CODE(x) == CONST_DOUBLE)
8336 i = CONST_DOUBLE_LOW (x);
8337 else
8338 {
8339 output_operand_lossage ("invalid %%s operand");
8340 return;
8341 }
8342 i = trunc_int_for_mode (i, SImode);
8343 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
8344 return;
8345 }
8346
8347 case 0:
8348 /* Do nothing special. */
8349 break;
8350
8351 default:
8352 /* Undocumented flag. */
8353 output_operand_lossage ("invalid operand output code");
8354 }
8355
8356 if (GET_CODE (x) == REG)
8357 fputs (reg_names[REGNO (x)], file);
8358 else if (GET_CODE (x) == MEM)
8359 {
8360 fputc ('[', file);
8361 /* Poor Sun assembler doesn't understand absolute addressing. */
8362 if (CONSTANT_P (XEXP (x, 0)))
8363 fputs ("%g0+", file);
8364 output_address (XEXP (x, 0));
8365 fputc (']', file);
8366 }
8367 else if (GET_CODE (x) == HIGH)
8368 {
8369 fputs ("%hi(", file);
8370 output_addr_const (file, XEXP (x, 0));
8371 fputc (')', file);
8372 }
8373 else if (GET_CODE (x) == LO_SUM)
8374 {
8375 sparc_print_operand (file, XEXP (x, 0), 0);
8376 if (TARGET_CM_MEDMID)
8377 fputs ("+%l44(", file);
8378 else
8379 fputs ("+%lo(", file);
8380 output_addr_const (file, XEXP (x, 1));
8381 fputc (')', file);
8382 }
8383 else if (GET_CODE (x) == CONST_DOUBLE
8384 && (GET_MODE (x) == VOIDmode
8385 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8386 {
8387 if (CONST_DOUBLE_HIGH (x) == 0)
8388 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8389 else if (CONST_DOUBLE_HIGH (x) == -1
8390 && CONST_DOUBLE_LOW (x) < 0)
8391 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8392 else
8393 output_operand_lossage ("long long constant not a valid immediate operand");
8394 }
8395 else if (GET_CODE (x) == CONST_DOUBLE)
8396 output_operand_lossage ("floating point constant not a valid immediate operand");
8397 else { output_addr_const (file, x); }
8398 }
8399
8400 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8401
8402 static void
8403 sparc_print_operand_address (FILE *file, rtx x)
8404 {
8405 register rtx base, index = 0;
8406 int offset = 0;
8407 register rtx addr = x;
8408
8409 if (REG_P (addr))
8410 fputs (reg_names[REGNO (addr)], file);
8411 else if (GET_CODE (addr) == PLUS)
8412 {
8413 if (CONST_INT_P (XEXP (addr, 0)))
8414 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8415 else if (CONST_INT_P (XEXP (addr, 1)))
8416 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8417 else
8418 base = XEXP (addr, 0), index = XEXP (addr, 1);
8419 if (GET_CODE (base) == LO_SUM)
8420 {
8421 gcc_assert (USE_AS_OFFSETABLE_LO10
8422 && TARGET_ARCH64
8423 && ! TARGET_CM_MEDMID);
8424 output_operand (XEXP (base, 0), 0);
8425 fputs ("+%lo(", file);
8426 output_address (XEXP (base, 1));
8427 fprintf (file, ")+%d", offset);
8428 }
8429 else
8430 {
8431 fputs (reg_names[REGNO (base)], file);
8432 if (index == 0)
8433 fprintf (file, "%+d", offset);
8434 else if (REG_P (index))
8435 fprintf (file, "+%s", reg_names[REGNO (index)]);
8436 else if (GET_CODE (index) == SYMBOL_REF
8437 || GET_CODE (index) == LABEL_REF
8438 || GET_CODE (index) == CONST)
8439 fputc ('+', file), output_addr_const (file, index);
8440 else gcc_unreachable ();
8441 }
8442 }
8443 else if (GET_CODE (addr) == MINUS
8444 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8445 {
8446 output_addr_const (file, XEXP (addr, 0));
8447 fputs ("-(", file);
8448 output_addr_const (file, XEXP (addr, 1));
8449 fputs ("-.)", file);
8450 }
8451 else if (GET_CODE (addr) == LO_SUM)
8452 {
8453 output_operand (XEXP (addr, 0), 0);
8454 if (TARGET_CM_MEDMID)
8455 fputs ("+%l44(", file);
8456 else
8457 fputs ("+%lo(", file);
8458 output_address (XEXP (addr, 1));
8459 fputc (')', file);
8460 }
8461 else if (flag_pic
8462 && GET_CODE (addr) == CONST
8463 && GET_CODE (XEXP (addr, 0)) == MINUS
8464 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8465 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8466 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8467 {
8468 addr = XEXP (addr, 0);
8469 output_addr_const (file, XEXP (addr, 0));
8470 /* Group the args of the second CONST in parenthesis. */
8471 fputs ("-(", file);
8472 /* Skip past the second CONST--it does nothing for us. */
8473 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8474 /* Close the parenthesis. */
8475 fputc (')', file);
8476 }
8477 else
8478 {
8479 output_addr_const (file, addr);
8480 }
8481 }
8482 \f
8483 /* Target hook for assembling integer objects. The sparc version has
8484 special handling for aligned DI-mode objects. */
8485
8486 static bool
8487 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8488 {
8489 /* ??? We only output .xword's for symbols and only then in environments
8490 where the assembler can handle them. */
8491 if (aligned_p && size == 8
8492 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8493 {
8494 if (TARGET_V9)
8495 {
8496 assemble_integer_with_op ("\t.xword\t", x);
8497 return true;
8498 }
8499 else
8500 {
8501 assemble_aligned_integer (4, const0_rtx);
8502 assemble_aligned_integer (4, x);
8503 return true;
8504 }
8505 }
8506 return default_assemble_integer (x, size, aligned_p);
8507 }
8508 \f
8509 /* Return the value of a code used in the .proc pseudo-op that says
8510 what kind of result this function returns. For non-C types, we pick
8511 the closest C type. */
8512
8513 #ifndef SHORT_TYPE_SIZE
8514 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8515 #endif
8516
8517 #ifndef INT_TYPE_SIZE
8518 #define INT_TYPE_SIZE BITS_PER_WORD
8519 #endif
8520
8521 #ifndef LONG_TYPE_SIZE
8522 #define LONG_TYPE_SIZE BITS_PER_WORD
8523 #endif
8524
8525 #ifndef LONG_LONG_TYPE_SIZE
8526 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8527 #endif
8528
8529 #ifndef FLOAT_TYPE_SIZE
8530 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8531 #endif
8532
8533 #ifndef DOUBLE_TYPE_SIZE
8534 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8535 #endif
8536
8537 #ifndef LONG_DOUBLE_TYPE_SIZE
8538 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8539 #endif
8540
8541 unsigned long
8542 sparc_type_code (register tree type)
8543 {
8544 register unsigned long qualifiers = 0;
8545 register unsigned shift;
8546
8547 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8548 setting more, since some assemblers will give an error for this. Also,
8549 we must be careful to avoid shifts of 32 bits or more to avoid getting
8550 unpredictable results. */
8551
8552 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8553 {
8554 switch (TREE_CODE (type))
8555 {
8556 case ERROR_MARK:
8557 return qualifiers;
8558
8559 case ARRAY_TYPE:
8560 qualifiers |= (3 << shift);
8561 break;
8562
8563 case FUNCTION_TYPE:
8564 case METHOD_TYPE:
8565 qualifiers |= (2 << shift);
8566 break;
8567
8568 case POINTER_TYPE:
8569 case REFERENCE_TYPE:
8570 case OFFSET_TYPE:
8571 qualifiers |= (1 << shift);
8572 break;
8573
8574 case RECORD_TYPE:
8575 return (qualifiers | 8);
8576
8577 case UNION_TYPE:
8578 case QUAL_UNION_TYPE:
8579 return (qualifiers | 9);
8580
8581 case ENUMERAL_TYPE:
8582 return (qualifiers | 10);
8583
8584 case VOID_TYPE:
8585 return (qualifiers | 16);
8586
8587 case INTEGER_TYPE:
8588 /* If this is a range type, consider it to be the underlying
8589 type. */
8590 if (TREE_TYPE (type) != 0)
8591 break;
8592
8593 /* Carefully distinguish all the standard types of C,
8594 without messing up if the language is not C. We do this by
8595 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8596 look at both the names and the above fields, but that's redundant.
8597 Any type whose size is between two C types will be considered
8598 to be the wider of the two types. Also, we do not have a
8599 special code to use for "long long", so anything wider than
8600 long is treated the same. Note that we can't distinguish
8601 between "int" and "long" in this code if they are the same
8602 size, but that's fine, since neither can the assembler. */
8603
8604 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8605 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8606
8607 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8608 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8609
8610 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8611 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8612
8613 else
8614 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8615
8616 case REAL_TYPE:
8617 /* If this is a range type, consider it to be the underlying
8618 type. */
8619 if (TREE_TYPE (type) != 0)
8620 break;
8621
8622 /* Carefully distinguish all the standard types of C,
8623 without messing up if the language is not C. */
8624
8625 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8626 return (qualifiers | 6);
8627
8628 else
8629 return (qualifiers | 7);
8630
8631 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8632 /* ??? We need to distinguish between double and float complex types,
8633 but I don't know how yet because I can't reach this code from
8634 existing front-ends. */
8635 return (qualifiers | 7); /* Who knows? */
8636
8637 case VECTOR_TYPE:
8638 case BOOLEAN_TYPE: /* Boolean truth value type. */
8639 case LANG_TYPE:
8640 case NULLPTR_TYPE:
8641 return qualifiers;
8642
8643 default:
8644 gcc_unreachable (); /* Not a type! */
8645 }
8646 }
8647
8648 return qualifiers;
8649 }
8650 \f
8651 /* Nested function support. */
8652
8653 /* Emit RTL insns to initialize the variable parts of a trampoline.
8654 FNADDR is an RTX for the address of the function's pure code.
8655 CXT is an RTX for the static chain value for the function.
8656
8657 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8658 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8659 (to store insns). This is a bit excessive. Perhaps a different
8660 mechanism would be better here.
8661
8662 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8663
8664 static void
8665 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8666 {
8667 /* SPARC 32-bit trampoline:
8668
8669 sethi %hi(fn), %g1
8670 sethi %hi(static), %g2
8671 jmp %g1+%lo(fn)
8672 or %g2, %lo(static), %g2
8673
8674 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8675 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8676 */
8677
8678 emit_move_insn
8679 (adjust_address (m_tramp, SImode, 0),
8680 expand_binop (SImode, ior_optab,
8681 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8682 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8683 NULL_RTX, 1, OPTAB_DIRECT));
8684
8685 emit_move_insn
8686 (adjust_address (m_tramp, SImode, 4),
8687 expand_binop (SImode, ior_optab,
8688 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8689 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8690 NULL_RTX, 1, OPTAB_DIRECT));
8691
8692 emit_move_insn
8693 (adjust_address (m_tramp, SImode, 8),
8694 expand_binop (SImode, ior_optab,
8695 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8696 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8697 NULL_RTX, 1, OPTAB_DIRECT));
8698
8699 emit_move_insn
8700 (adjust_address (m_tramp, SImode, 12),
8701 expand_binop (SImode, ior_optab,
8702 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8703 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8704 NULL_RTX, 1, OPTAB_DIRECT));
8705
8706 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8707 aligned on a 16 byte boundary so one flush clears it all. */
8708 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8709 if (sparc_cpu != PROCESSOR_ULTRASPARC
8710 && sparc_cpu != PROCESSOR_ULTRASPARC3
8711 && sparc_cpu != PROCESSOR_NIAGARA
8712 && sparc_cpu != PROCESSOR_NIAGARA2
8713 && sparc_cpu != PROCESSOR_NIAGARA3
8714 && sparc_cpu != PROCESSOR_NIAGARA4)
8715 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8716
8717 /* Call __enable_execute_stack after writing onto the stack to make sure
8718 the stack address is accessible. */
8719 #ifdef HAVE_ENABLE_EXECUTE_STACK
8720 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8721 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8722 #endif
8723
8724 }
8725
8726 /* The 64-bit version is simpler because it makes more sense to load the
8727 values as "immediate" data out of the trampoline. It's also easier since
8728 we can read the PC without clobbering a register. */
8729
8730 static void
8731 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8732 {
8733 /* SPARC 64-bit trampoline:
8734
8735 rd %pc, %g1
8736 ldx [%g1+24], %g5
8737 jmp %g5
8738 ldx [%g1+16], %g5
8739 +16 bytes data
8740 */
8741
8742 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8743 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8744 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8745 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8746 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8747 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8748 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8749 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8750 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8751 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8752 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8753
8754 if (sparc_cpu != PROCESSOR_ULTRASPARC
8755 && sparc_cpu != PROCESSOR_ULTRASPARC3
8756 && sparc_cpu != PROCESSOR_NIAGARA
8757 && sparc_cpu != PROCESSOR_NIAGARA2
8758 && sparc_cpu != PROCESSOR_NIAGARA3
8759 && sparc_cpu != PROCESSOR_NIAGARA4)
8760 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8761
8762 /* Call __enable_execute_stack after writing onto the stack to make sure
8763 the stack address is accessible. */
8764 #ifdef HAVE_ENABLE_EXECUTE_STACK
8765 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8766 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8767 #endif
8768 }
8769
8770 /* Worker for TARGET_TRAMPOLINE_INIT. */
8771
8772 static void
8773 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8774 {
8775 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8776 cxt = force_reg (Pmode, cxt);
8777 if (TARGET_ARCH64)
8778 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8779 else
8780 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8781 }
8782 \f
8783 /* Adjust the cost of a scheduling dependency. Return the new cost of
8784 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8785
8786 static int
8787 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8788 {
8789 enum attr_type insn_type;
8790
8791 if (! recog_memoized (insn))
8792 return 0;
8793
8794 insn_type = get_attr_type (insn);
8795
8796 if (REG_NOTE_KIND (link) == 0)
8797 {
8798 /* Data dependency; DEP_INSN writes a register that INSN reads some
8799 cycles later. */
8800
8801 /* if a load, then the dependence must be on the memory address;
8802 add an extra "cycle". Note that the cost could be two cycles
8803 if the reg was written late in an instruction group; we ca not tell
8804 here. */
8805 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8806 return cost + 3;
8807
8808 /* Get the delay only if the address of the store is the dependence. */
8809 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8810 {
8811 rtx pat = PATTERN(insn);
8812 rtx dep_pat = PATTERN (dep_insn);
8813
8814 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8815 return cost; /* This should not happen! */
8816
8817 /* The dependency between the two instructions was on the data that
8818 is being stored. Assume that this implies that the address of the
8819 store is not dependent. */
8820 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8821 return cost;
8822
8823 return cost + 3; /* An approximation. */
8824 }
8825
8826 /* A shift instruction cannot receive its data from an instruction
8827 in the same cycle; add a one cycle penalty. */
8828 if (insn_type == TYPE_SHIFT)
8829 return cost + 3; /* Split before cascade into shift. */
8830 }
8831 else
8832 {
8833 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8834 INSN writes some cycles later. */
8835
8836 /* These are only significant for the fpu unit; writing a fp reg before
8837 the fpu has finished with it stalls the processor. */
8838
8839 /* Reusing an integer register causes no problems. */
8840 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8841 return 0;
8842 }
8843
8844 return cost;
8845 }
8846
8847 static int
8848 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8849 {
8850 enum attr_type insn_type, dep_type;
8851 rtx pat = PATTERN(insn);
8852 rtx dep_pat = PATTERN (dep_insn);
8853
8854 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8855 return cost;
8856
8857 insn_type = get_attr_type (insn);
8858 dep_type = get_attr_type (dep_insn);
8859
8860 switch (REG_NOTE_KIND (link))
8861 {
8862 case 0:
8863 /* Data dependency; DEP_INSN writes a register that INSN reads some
8864 cycles later. */
8865
8866 switch (insn_type)
8867 {
8868 case TYPE_STORE:
8869 case TYPE_FPSTORE:
8870 /* Get the delay iff the address of the store is the dependence. */
8871 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8872 return cost;
8873
8874 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8875 return cost;
8876 return cost + 3;
8877
8878 case TYPE_LOAD:
8879 case TYPE_SLOAD:
8880 case TYPE_FPLOAD:
8881 /* If a load, then the dependence must be on the memory address. If
8882 the addresses aren't equal, then it might be a false dependency */
8883 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8884 {
8885 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8886 || GET_CODE (SET_DEST (dep_pat)) != MEM
8887 || GET_CODE (SET_SRC (pat)) != MEM
8888 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8889 XEXP (SET_SRC (pat), 0)))
8890 return cost + 2;
8891
8892 return cost + 8;
8893 }
8894 break;
8895
8896 case TYPE_BRANCH:
8897 /* Compare to branch latency is 0. There is no benefit from
8898 separating compare and branch. */
8899 if (dep_type == TYPE_COMPARE)
8900 return 0;
8901 /* Floating point compare to branch latency is less than
8902 compare to conditional move. */
8903 if (dep_type == TYPE_FPCMP)
8904 return cost - 1;
8905 break;
8906 default:
8907 break;
8908 }
8909 break;
8910
8911 case REG_DEP_ANTI:
8912 /* Anti-dependencies only penalize the fpu unit. */
8913 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8914 return 0;
8915 break;
8916
8917 default:
8918 break;
8919 }
8920
8921 return cost;
8922 }
8923
8924 static int
8925 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8926 {
8927 switch (sparc_cpu)
8928 {
8929 case PROCESSOR_SUPERSPARC:
8930 cost = supersparc_adjust_cost (insn, link, dep, cost);
8931 break;
8932 case PROCESSOR_HYPERSPARC:
8933 case PROCESSOR_SPARCLITE86X:
8934 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8935 break;
8936 default:
8937 break;
8938 }
8939 return cost;
8940 }
8941
8942 static void
8943 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8944 int sched_verbose ATTRIBUTE_UNUSED,
8945 int max_ready ATTRIBUTE_UNUSED)
8946 {}
8947
8948 static int
8949 sparc_use_sched_lookahead (void)
8950 {
8951 if (sparc_cpu == PROCESSOR_NIAGARA
8952 || sparc_cpu == PROCESSOR_NIAGARA2
8953 || sparc_cpu == PROCESSOR_NIAGARA3)
8954 return 0;
8955 if (sparc_cpu == PROCESSOR_NIAGARA4)
8956 return 2;
8957 if (sparc_cpu == PROCESSOR_ULTRASPARC
8958 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8959 return 4;
8960 if ((1 << sparc_cpu) &
8961 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8962 (1 << PROCESSOR_SPARCLITE86X)))
8963 return 3;
8964 return 0;
8965 }
8966
8967 static int
8968 sparc_issue_rate (void)
8969 {
8970 switch (sparc_cpu)
8971 {
8972 case PROCESSOR_NIAGARA:
8973 case PROCESSOR_NIAGARA2:
8974 case PROCESSOR_NIAGARA3:
8975 default:
8976 return 1;
8977 case PROCESSOR_NIAGARA4:
8978 case PROCESSOR_V9:
8979 /* Assume V9 processors are capable of at least dual-issue. */
8980 return 2;
8981 case PROCESSOR_SUPERSPARC:
8982 return 3;
8983 case PROCESSOR_HYPERSPARC:
8984 case PROCESSOR_SPARCLITE86X:
8985 return 2;
8986 case PROCESSOR_ULTRASPARC:
8987 case PROCESSOR_ULTRASPARC3:
8988 return 4;
8989 }
8990 }
8991
8992 static int
8993 set_extends (rtx insn)
8994 {
8995 register rtx pat = PATTERN (insn);
8996
8997 switch (GET_CODE (SET_SRC (pat)))
8998 {
8999 /* Load and some shift instructions zero extend. */
9000 case MEM:
9001 case ZERO_EXTEND:
9002 /* sethi clears the high bits */
9003 case HIGH:
9004 /* LO_SUM is used with sethi. sethi cleared the high
9005 bits and the values used with lo_sum are positive */
9006 case LO_SUM:
9007 /* Store flag stores 0 or 1 */
9008 case LT: case LTU:
9009 case GT: case GTU:
9010 case LE: case LEU:
9011 case GE: case GEU:
9012 case EQ:
9013 case NE:
9014 return 1;
9015 case AND:
9016 {
9017 rtx op0 = XEXP (SET_SRC (pat), 0);
9018 rtx op1 = XEXP (SET_SRC (pat), 1);
9019 if (GET_CODE (op1) == CONST_INT)
9020 return INTVAL (op1) >= 0;
9021 if (GET_CODE (op0) != REG)
9022 return 0;
9023 if (sparc_check_64 (op0, insn) == 1)
9024 return 1;
9025 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9026 }
9027 case IOR:
9028 case XOR:
9029 {
9030 rtx op0 = XEXP (SET_SRC (pat), 0);
9031 rtx op1 = XEXP (SET_SRC (pat), 1);
9032 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
9033 return 0;
9034 if (GET_CODE (op1) == CONST_INT)
9035 return INTVAL (op1) >= 0;
9036 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9037 }
9038 case LSHIFTRT:
9039 return GET_MODE (SET_SRC (pat)) == SImode;
9040 /* Positive integers leave the high bits zero. */
9041 case CONST_DOUBLE:
9042 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
9043 case CONST_INT:
9044 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
9045 case ASHIFTRT:
9046 case SIGN_EXTEND:
9047 return - (GET_MODE (SET_SRC (pat)) == SImode);
9048 case REG:
9049 return sparc_check_64 (SET_SRC (pat), insn);
9050 default:
9051 return 0;
9052 }
9053 }
9054
9055 /* We _ought_ to have only one kind per function, but... */
9056 static GTY(()) rtx sparc_addr_diff_list;
9057 static GTY(()) rtx sparc_addr_list;
9058
9059 void
9060 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
9061 {
9062 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9063 if (diff)
9064 sparc_addr_diff_list
9065 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
9066 else
9067 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
9068 }
9069
9070 static void
9071 sparc_output_addr_vec (rtx vec)
9072 {
9073 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9074 int idx, vlen = XVECLEN (body, 0);
9075
9076 #ifdef ASM_OUTPUT_ADDR_VEC_START
9077 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9078 #endif
9079
9080 #ifdef ASM_OUTPUT_CASE_LABEL
9081 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9082 NEXT_INSN (lab));
9083 #else
9084 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9085 #endif
9086
9087 for (idx = 0; idx < vlen; idx++)
9088 {
9089 ASM_OUTPUT_ADDR_VEC_ELT
9090 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9091 }
9092
9093 #ifdef ASM_OUTPUT_ADDR_VEC_END
9094 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9095 #endif
9096 }
9097
9098 static void
9099 sparc_output_addr_diff_vec (rtx vec)
9100 {
9101 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9102 rtx base = XEXP (XEXP (body, 0), 0);
9103 int idx, vlen = XVECLEN (body, 1);
9104
9105 #ifdef ASM_OUTPUT_ADDR_VEC_START
9106 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9107 #endif
9108
9109 #ifdef ASM_OUTPUT_CASE_LABEL
9110 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9111 NEXT_INSN (lab));
9112 #else
9113 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9114 #endif
9115
9116 for (idx = 0; idx < vlen; idx++)
9117 {
9118 ASM_OUTPUT_ADDR_DIFF_ELT
9119 (asm_out_file,
9120 body,
9121 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
9122 CODE_LABEL_NUMBER (base));
9123 }
9124
9125 #ifdef ASM_OUTPUT_ADDR_VEC_END
9126 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9127 #endif
9128 }
9129
9130 static void
9131 sparc_output_deferred_case_vectors (void)
9132 {
9133 rtx t;
9134 int align;
9135
9136 if (sparc_addr_list == NULL_RTX
9137 && sparc_addr_diff_list == NULL_RTX)
9138 return;
9139
9140 /* Align to cache line in the function's code section. */
9141 switch_to_section (current_function_section ());
9142
9143 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9144 if (align > 0)
9145 ASM_OUTPUT_ALIGN (asm_out_file, align);
9146
9147 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
9148 sparc_output_addr_vec (XEXP (t, 0));
9149 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
9150 sparc_output_addr_diff_vec (XEXP (t, 0));
9151
9152 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
9153 }
9154
9155 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
9156 unknown. Return 1 if the high bits are zero, -1 if the register is
9157 sign extended. */
9158 int
9159 sparc_check_64 (rtx x, rtx insn)
9160 {
9161 /* If a register is set only once it is safe to ignore insns this
9162 code does not know how to handle. The loop will either recognize
9163 the single set and return the correct value or fail to recognize
9164 it and return 0. */
9165 int set_once = 0;
9166 rtx y = x;
9167
9168 gcc_assert (GET_CODE (x) == REG);
9169
9170 if (GET_MODE (x) == DImode)
9171 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
9172
9173 if (flag_expensive_optimizations
9174 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
9175 set_once = 1;
9176
9177 if (insn == 0)
9178 {
9179 if (set_once)
9180 insn = get_last_insn_anywhere ();
9181 else
9182 return 0;
9183 }
9184
9185 while ((insn = PREV_INSN (insn)))
9186 {
9187 switch (GET_CODE (insn))
9188 {
9189 case JUMP_INSN:
9190 case NOTE:
9191 break;
9192 case CODE_LABEL:
9193 case CALL_INSN:
9194 default:
9195 if (! set_once)
9196 return 0;
9197 break;
9198 case INSN:
9199 {
9200 rtx pat = PATTERN (insn);
9201 if (GET_CODE (pat) != SET)
9202 return 0;
9203 if (rtx_equal_p (x, SET_DEST (pat)))
9204 return set_extends (insn);
9205 if (y && rtx_equal_p (y, SET_DEST (pat)))
9206 return set_extends (insn);
9207 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
9208 return 0;
9209 }
9210 }
9211 }
9212 return 0;
9213 }
9214
9215 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
9216 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
9217
9218 const char *
9219 output_v8plus_shift (rtx insn, rtx *operands, const char *opcode)
9220 {
9221 static char asm_code[60];
9222
9223 /* The scratch register is only required when the destination
9224 register is not a 64-bit global or out register. */
9225 if (which_alternative != 2)
9226 operands[3] = operands[0];
9227
9228 /* We can only shift by constants <= 63. */
9229 if (GET_CODE (operands[2]) == CONST_INT)
9230 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
9231
9232 if (GET_CODE (operands[1]) == CONST_INT)
9233 {
9234 output_asm_insn ("mov\t%1, %3", operands);
9235 }
9236 else
9237 {
9238 output_asm_insn ("sllx\t%H1, 32, %3", operands);
9239 if (sparc_check_64 (operands[1], insn) <= 0)
9240 output_asm_insn ("srl\t%L1, 0, %L1", operands);
9241 output_asm_insn ("or\t%L1, %3, %3", operands);
9242 }
9243
9244 strcpy (asm_code, opcode);
9245
9246 if (which_alternative != 2)
9247 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
9248 else
9249 return
9250 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
9251 }
9252 \f
9253 /* Output rtl to increment the profiler label LABELNO
9254 for profiling a function entry. */
9255
9256 void
9257 sparc_profile_hook (int labelno)
9258 {
9259 char buf[32];
9260 rtx lab, fun;
9261
9262 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
9263 if (NO_PROFILE_COUNTERS)
9264 {
9265 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
9266 }
9267 else
9268 {
9269 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9270 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
9271 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
9272 }
9273 }
9274 \f
9275 #ifdef TARGET_SOLARIS
9276 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9277
9278 static void
9279 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
9280 tree decl ATTRIBUTE_UNUSED)
9281 {
9282 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
9283 {
9284 solaris_elf_asm_comdat_section (name, flags, decl);
9285 return;
9286 }
9287
9288 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
9289
9290 if (!(flags & SECTION_DEBUG))
9291 fputs (",#alloc", asm_out_file);
9292 if (flags & SECTION_WRITE)
9293 fputs (",#write", asm_out_file);
9294 if (flags & SECTION_TLS)
9295 fputs (",#tls", asm_out_file);
9296 if (flags & SECTION_CODE)
9297 fputs (",#execinstr", asm_out_file);
9298
9299 /* ??? Handle SECTION_BSS. */
9300
9301 fputc ('\n', asm_out_file);
9302 }
9303 #endif /* TARGET_SOLARIS */
9304
9305 /* We do not allow indirect calls to be optimized into sibling calls.
9306
9307 We cannot use sibling calls when delayed branches are disabled
9308 because they will likely require the call delay slot to be filled.
9309
9310 Also, on SPARC 32-bit we cannot emit a sibling call when the
9311 current function returns a structure. This is because the "unimp
9312 after call" convention would cause the callee to return to the
9313 wrong place. The generic code already disallows cases where the
9314 function being called returns a structure.
9315
9316 It may seem strange how this last case could occur. Usually there
9317 is code after the call which jumps to epilogue code which dumps the
9318 return value into the struct return area. That ought to invalidate
9319 the sibling call right? Well, in the C++ case we can end up passing
9320 the pointer to the struct return area to a constructor (which returns
9321 void) and then nothing else happens. Such a sibling call would look
9322 valid without the added check here.
9323
9324 VxWorks PIC PLT entries require the global pointer to be initialized
9325 on entry. We therefore can't emit sibling calls to them. */
9326 static bool
9327 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9328 {
9329 return (decl
9330 && flag_delayed_branch
9331 && (TARGET_ARCH64 || ! cfun->returns_struct)
9332 && !(TARGET_VXWORKS_RTP
9333 && flag_pic
9334 && !targetm.binds_local_p (decl)));
9335 }
9336 \f
9337 /* libfunc renaming. */
9338
9339 static void
9340 sparc_init_libfuncs (void)
9341 {
9342 if (TARGET_ARCH32)
9343 {
9344 /* Use the subroutines that Sun's library provides for integer
9345 multiply and divide. The `*' prevents an underscore from
9346 being prepended by the compiler. .umul is a little faster
9347 than .mul. */
9348 set_optab_libfunc (smul_optab, SImode, "*.umul");
9349 set_optab_libfunc (sdiv_optab, SImode, "*.div");
9350 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
9351 set_optab_libfunc (smod_optab, SImode, "*.rem");
9352 set_optab_libfunc (umod_optab, SImode, "*.urem");
9353
9354 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9355 set_optab_libfunc (add_optab, TFmode, "_Q_add");
9356 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
9357 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
9358 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
9359 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
9360
9361 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9362 is because with soft-float, the SFmode and DFmode sqrt
9363 instructions will be absent, and the compiler will notice and
9364 try to use the TFmode sqrt instruction for calls to the
9365 builtin function sqrt, but this fails. */
9366 if (TARGET_FPU)
9367 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
9368
9369 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
9370 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
9371 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
9372 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
9373 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
9374 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
9375
9376 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
9377 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9378 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9379 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9380
9381 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9382 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9383 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9384 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9385
9386 if (DITF_CONVERSION_LIBFUNCS)
9387 {
9388 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9389 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9390 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9391 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9392 }
9393
9394 if (SUN_CONVERSION_LIBFUNCS)
9395 {
9396 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9397 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9398 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9399 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9400 }
9401 }
9402 if (TARGET_ARCH64)
9403 {
9404 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9405 do not exist in the library. Make sure the compiler does not
9406 emit calls to them by accident. (It should always use the
9407 hardware instructions.) */
9408 set_optab_libfunc (smul_optab, SImode, 0);
9409 set_optab_libfunc (sdiv_optab, SImode, 0);
9410 set_optab_libfunc (udiv_optab, SImode, 0);
9411 set_optab_libfunc (smod_optab, SImode, 0);
9412 set_optab_libfunc (umod_optab, SImode, 0);
9413
9414 if (SUN_INTEGER_MULTIPLY_64)
9415 {
9416 set_optab_libfunc (smul_optab, DImode, "__mul64");
9417 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9418 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9419 set_optab_libfunc (smod_optab, DImode, "__rem64");
9420 set_optab_libfunc (umod_optab, DImode, "__urem64");
9421 }
9422
9423 if (SUN_CONVERSION_LIBFUNCS)
9424 {
9425 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9426 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9427 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9428 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9429 }
9430 }
9431 }
9432 \f
9433 static tree def_builtin(const char *name, int code, tree type)
9434 {
9435 return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
9436 NULL_TREE);
9437 }
9438
9439 static tree def_builtin_const(const char *name, int code, tree type)
9440 {
9441 tree t = def_builtin(name, code, type);
9442
9443 if (t)
9444 TREE_READONLY (t) = 1;
9445
9446 return t;
9447 }
9448
9449 /* Implement the TARGET_INIT_BUILTINS target hook.
9450 Create builtin functions for special SPARC instructions. */
9451
9452 static void
9453 sparc_init_builtins (void)
9454 {
9455 if (TARGET_VIS)
9456 sparc_vis_init_builtins ();
9457 }
9458
9459 /* Create builtin functions for VIS 1.0 instructions. */
9460
9461 static void
9462 sparc_vis_init_builtins (void)
9463 {
9464 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9465 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9466 tree v4hi = build_vector_type (intHI_type_node, 4);
9467 tree v2hi = build_vector_type (intHI_type_node, 2);
9468 tree v2si = build_vector_type (intSI_type_node, 2);
9469 tree v1si = build_vector_type (intSI_type_node, 1);
9470
9471 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9472 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9473 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9474 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9475 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9476 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9477 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9478 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9479 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9480 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9481 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9482 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9483 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
9484 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
9485 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9486 v8qi, v8qi,
9487 intDI_type_node, 0);
9488 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
9489 v8qi, v8qi, 0);
9490 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
9491 v8qi, v8qi, 0);
9492 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9493 intDI_type_node,
9494 intDI_type_node, 0);
9495 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
9496 intSI_type_node,
9497 intSI_type_node, 0);
9498 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9499 ptr_type_node,
9500 intSI_type_node, 0);
9501 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9502 ptr_type_node,
9503 intDI_type_node, 0);
9504 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
9505 ptr_type_node,
9506 ptr_type_node, 0);
9507 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
9508 ptr_type_node,
9509 ptr_type_node, 0);
9510 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
9511 v4hi, v4hi, 0);
9512 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
9513 v2si, v2si, 0);
9514 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
9515 v4hi, v4hi, 0);
9516 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
9517 v2si, v2si, 0);
9518 tree void_ftype_di = build_function_type_list (void_type_node,
9519 intDI_type_node, 0);
9520 tree di_ftype_void = build_function_type_list (intDI_type_node,
9521 void_type_node, 0);
9522 tree void_ftype_si = build_function_type_list (void_type_node,
9523 intSI_type_node, 0);
9524 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
9525 float_type_node,
9526 float_type_node, 0);
9527 tree df_ftype_df_df = build_function_type_list (double_type_node,
9528 double_type_node,
9529 double_type_node, 0);
9530
9531 /* Packing and expanding vectors. */
9532 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
9533 v4qi_ftype_v4hi);
9534 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9535 v8qi_ftype_v2si_v8qi);
9536 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9537 v2hi_ftype_v2si);
9538 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
9539 v4hi_ftype_v4qi);
9540 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9541 v8qi_ftype_v4qi_v4qi);
9542
9543 /* Multiplications. */
9544 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9545 v4hi_ftype_v4qi_v4hi);
9546 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9547 v4hi_ftype_v4qi_v2hi);
9548 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9549 v4hi_ftype_v4qi_v2hi);
9550 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9551 v4hi_ftype_v8qi_v4hi);
9552 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9553 v4hi_ftype_v8qi_v4hi);
9554 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9555 v2si_ftype_v4qi_v2hi);
9556 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9557 v2si_ftype_v4qi_v2hi);
9558
9559 /* Data aligning. */
9560 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9561 v4hi_ftype_v4hi_v4hi);
9562 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9563 v8qi_ftype_v8qi_v8qi);
9564 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9565 v2si_ftype_v2si_v2si);
9566 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
9567 di_ftype_di_di);
9568
9569 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
9570 void_ftype_di);
9571 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
9572 di_ftype_void);
9573
9574 if (TARGET_ARCH64)
9575 {
9576 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9577 ptr_ftype_ptr_di);
9578 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
9579 ptr_ftype_ptr_di);
9580 }
9581 else
9582 {
9583 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9584 ptr_ftype_ptr_si);
9585 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
9586 ptr_ftype_ptr_si);
9587 }
9588
9589 /* Pixel distance. */
9590 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9591 di_ftype_v8qi_v8qi_di);
9592
9593 /* Edge handling. */
9594 if (TARGET_ARCH64)
9595 {
9596 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
9597 di_ftype_ptr_ptr);
9598 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
9599 di_ftype_ptr_ptr);
9600 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
9601 di_ftype_ptr_ptr);
9602 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
9603 di_ftype_ptr_ptr);
9604 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
9605 di_ftype_ptr_ptr);
9606 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
9607 di_ftype_ptr_ptr);
9608 if (TARGET_VIS2)
9609 {
9610 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
9611 di_ftype_ptr_ptr);
9612 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
9613 di_ftype_ptr_ptr);
9614 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
9615 di_ftype_ptr_ptr);
9616 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
9617 di_ftype_ptr_ptr);
9618 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
9619 di_ftype_ptr_ptr);
9620 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
9621 di_ftype_ptr_ptr);
9622 }
9623 }
9624 else
9625 {
9626 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
9627 si_ftype_ptr_ptr);
9628 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
9629 si_ftype_ptr_ptr);
9630 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
9631 si_ftype_ptr_ptr);
9632 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
9633 si_ftype_ptr_ptr);
9634 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
9635 si_ftype_ptr_ptr);
9636 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
9637 si_ftype_ptr_ptr);
9638 if (TARGET_VIS2)
9639 {
9640 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
9641 si_ftype_ptr_ptr);
9642 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
9643 si_ftype_ptr_ptr);
9644 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
9645 si_ftype_ptr_ptr);
9646 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
9647 si_ftype_ptr_ptr);
9648 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
9649 si_ftype_ptr_ptr);
9650 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
9651 si_ftype_ptr_ptr);
9652 }
9653 }
9654
9655 /* Pixel compare. */
9656 if (TARGET_ARCH64)
9657 {
9658 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
9659 di_ftype_v4hi_v4hi);
9660 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
9661 di_ftype_v2si_v2si);
9662 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
9663 di_ftype_v4hi_v4hi);
9664 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
9665 di_ftype_v2si_v2si);
9666 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
9667 di_ftype_v4hi_v4hi);
9668 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
9669 di_ftype_v2si_v2si);
9670 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
9671 di_ftype_v4hi_v4hi);
9672 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
9673 di_ftype_v2si_v2si);
9674 }
9675 else
9676 {
9677 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
9678 si_ftype_v4hi_v4hi);
9679 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
9680 si_ftype_v2si_v2si);
9681 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
9682 si_ftype_v4hi_v4hi);
9683 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
9684 si_ftype_v2si_v2si);
9685 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
9686 si_ftype_v4hi_v4hi);
9687 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
9688 si_ftype_v2si_v2si);
9689 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
9690 si_ftype_v4hi_v4hi);
9691 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
9692 si_ftype_v2si_v2si);
9693 }
9694
9695 /* Addition and subtraction. */
9696 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
9697 v4hi_ftype_v4hi_v4hi);
9698 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
9699 v2hi_ftype_v2hi_v2hi);
9700 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
9701 v2si_ftype_v2si_v2si);
9702 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
9703 v1si_ftype_v1si_v1si);
9704 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
9705 v4hi_ftype_v4hi_v4hi);
9706 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
9707 v2hi_ftype_v2hi_v2hi);
9708 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
9709 v2si_ftype_v2si_v2si);
9710 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
9711 v1si_ftype_v1si_v1si);
9712
9713 /* Three-dimensional array addressing. */
9714 if (TARGET_ARCH64)
9715 {
9716 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
9717 di_ftype_di_di);
9718 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
9719 di_ftype_di_di);
9720 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
9721 di_ftype_di_di);
9722 }
9723 else
9724 {
9725 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
9726 si_ftype_si_si);
9727 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
9728 si_ftype_si_si);
9729 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
9730 si_ftype_si_si);
9731 }
9732
9733 if (TARGET_VIS2)
9734 {
9735 /* Byte mask and shuffle */
9736 if (TARGET_ARCH64)
9737 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
9738 di_ftype_di_di);
9739 else
9740 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
9741 si_ftype_si_si);
9742 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
9743 v4hi_ftype_v4hi_v4hi);
9744 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
9745 v8qi_ftype_v8qi_v8qi);
9746 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
9747 v2si_ftype_v2si_v2si);
9748 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
9749 di_ftype_di_di);
9750 }
9751
9752 if (TARGET_VIS3)
9753 {
9754 if (TARGET_ARCH64)
9755 {
9756 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
9757 void_ftype_di);
9758 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
9759 void_ftype_di);
9760 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
9761 void_ftype_di);
9762 }
9763 else
9764 {
9765 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
9766 void_ftype_si);
9767 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
9768 void_ftype_si);
9769 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
9770 void_ftype_si);
9771 }
9772
9773 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
9774 v4hi_ftype_v4hi_v4hi);
9775
9776 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
9777 v4hi_ftype_v4hi_v4hi);
9778 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
9779 v4hi_ftype_v4hi_v4hi);
9780 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
9781 v4hi_ftype_v4hi_v4hi);
9782 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
9783 v4hi_ftype_v4hi_v4hi);
9784 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
9785 v2si_ftype_v2si_v2si);
9786 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
9787 v2si_ftype_v2si_v2si);
9788 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
9789 v2si_ftype_v2si_v2si);
9790 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
9791 v2si_ftype_v2si_v2si);
9792
9793 if (TARGET_ARCH64)
9794 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
9795 di_ftype_v8qi_v8qi);
9796 else
9797 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
9798 si_ftype_v8qi_v8qi);
9799
9800 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
9801 v4hi_ftype_v4hi_v4hi);
9802 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
9803 di_ftype_di_di);
9804 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
9805 di_ftype_di_di);
9806
9807 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
9808 v4hi_ftype_v4hi_v4hi);
9809 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
9810 v2hi_ftype_v2hi_v2hi);
9811 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
9812 v4hi_ftype_v4hi_v4hi);
9813 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
9814 v2hi_ftype_v2hi_v2hi);
9815 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
9816 v2si_ftype_v2si_v2si);
9817 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
9818 v1si_ftype_v1si_v1si);
9819 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
9820 v2si_ftype_v2si_v2si);
9821 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
9822 v1si_ftype_v1si_v1si);
9823
9824 if (TARGET_ARCH64)
9825 {
9826 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
9827 di_ftype_v8qi_v8qi);
9828 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
9829 di_ftype_v8qi_v8qi);
9830 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
9831 di_ftype_v8qi_v8qi);
9832 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
9833 di_ftype_v8qi_v8qi);
9834 }
9835 else
9836 {
9837 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
9838 si_ftype_v8qi_v8qi);
9839 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
9840 si_ftype_v8qi_v8qi);
9841 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
9842 si_ftype_v8qi_v8qi);
9843 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
9844 si_ftype_v8qi_v8qi);
9845 }
9846
9847 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
9848 sf_ftype_sf_sf);
9849 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
9850 df_ftype_df_df);
9851 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
9852 sf_ftype_sf_sf);
9853 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
9854 df_ftype_df_df);
9855 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
9856 sf_ftype_sf_sf);
9857 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
9858 df_ftype_df_df);
9859
9860 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
9861 di_ftype_di_di);
9862 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
9863 di_ftype_di_di);
9864 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
9865 di_ftype_di_di);
9866 }
9867 }
9868
9869 /* Handle TARGET_EXPAND_BUILTIN target hook.
9870 Expand builtin functions for sparc intrinsics. */
9871
9872 static rtx
9873 sparc_expand_builtin (tree exp, rtx target,
9874 rtx subtarget ATTRIBUTE_UNUSED,
9875 enum machine_mode tmode ATTRIBUTE_UNUSED,
9876 int ignore ATTRIBUTE_UNUSED)
9877 {
9878 tree arg;
9879 call_expr_arg_iterator iter;
9880 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9881 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9882 rtx pat, op[4];
9883 int arg_count = 0;
9884 bool nonvoid;
9885
9886 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9887
9888 if (nonvoid)
9889 {
9890 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9891 if (!target
9892 || GET_MODE (target) != tmode
9893 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9894 op[0] = gen_reg_rtx (tmode);
9895 else
9896 op[0] = target;
9897 }
9898 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9899 {
9900 const struct insn_operand_data *insn_op;
9901 int idx;
9902
9903 if (arg == error_mark_node)
9904 return NULL_RTX;
9905
9906 arg_count++;
9907 idx = arg_count - !nonvoid;
9908 insn_op = &insn_data[icode].operand[idx];
9909 op[arg_count] = expand_normal (arg);
9910
9911 if (insn_op->mode == V1DImode
9912 && GET_MODE (op[arg_count]) == DImode)
9913 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
9914 else if (insn_op->mode == V1SImode
9915 && GET_MODE (op[arg_count]) == SImode)
9916 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
9917
9918 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
9919 insn_op->mode))
9920 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
9921 }
9922
9923 switch (arg_count)
9924 {
9925 case 0:
9926 pat = GEN_FCN (icode) (op[0]);
9927 break;
9928 case 1:
9929 if (nonvoid)
9930 pat = GEN_FCN (icode) (op[0], op[1]);
9931 else
9932 pat = GEN_FCN (icode) (op[1]);
9933 break;
9934 case 2:
9935 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9936 break;
9937 case 3:
9938 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9939 break;
9940 default:
9941 gcc_unreachable ();
9942 }
9943
9944 if (!pat)
9945 return NULL_RTX;
9946
9947 emit_insn (pat);
9948
9949 if (nonvoid)
9950 return op[0];
9951 else
9952 return const0_rtx;
9953 }
9954
9955 static int
9956 sparc_vis_mul8x16 (int e8, int e16)
9957 {
9958 return (e8 * e16 + 128) / 256;
9959 }
9960
9961 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
9962 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
9963
9964 static void
9965 sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type,
9966 tree cst0, tree cst1)
9967 {
9968 unsigned i, num = VECTOR_CST_NELTS (cst0);
9969 int scale;
9970
9971 switch (fncode)
9972 {
9973 case CODE_FOR_fmul8x16_vis:
9974 for (i = 0; i < num; ++i)
9975 {
9976 int val
9977 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
9978 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
9979 n_elts[i] = build_int_cst (inner_type, val);
9980 }
9981 break;
9982
9983 case CODE_FOR_fmul8x16au_vis:
9984 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
9985
9986 for (i = 0; i < num; ++i)
9987 {
9988 int val
9989 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
9990 scale);
9991 n_elts[i] = build_int_cst (inner_type, val);
9992 }
9993 break;
9994
9995 case CODE_FOR_fmul8x16al_vis:
9996 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
9997
9998 for (i = 0; i < num; ++i)
9999 {
10000 int val
10001 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
10002 scale);
10003 n_elts[i] = build_int_cst (inner_type, val);
10004 }
10005 break;
10006
10007 default:
10008 gcc_unreachable ();
10009 }
10010 }
10011
10012 /* Handle TARGET_FOLD_BUILTIN target hook.
10013 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
10014 result of the function call is ignored. NULL_TREE is returned if the
10015 function could not be folded. */
10016
10017 static tree
10018 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
10019 tree *args, bool ignore)
10020 {
10021 tree arg0, arg1, arg2;
10022 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
10023 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
10024
10025 if (ignore)
10026 {
10027 /* Note that a switch statement instead of the sequence of tests would
10028 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
10029 and that would yield multiple alternatives with identical values. */
10030 if (icode == CODE_FOR_alignaddrsi_vis
10031 || icode == CODE_FOR_alignaddrdi_vis
10032 || icode == CODE_FOR_wrgsr_vis
10033 || icode == CODE_FOR_bmasksi_vis
10034 || icode == CODE_FOR_bmaskdi_vis
10035 || icode == CODE_FOR_cmask8si_vis
10036 || icode == CODE_FOR_cmask8di_vis
10037 || icode == CODE_FOR_cmask16si_vis
10038 || icode == CODE_FOR_cmask16di_vis
10039 || icode == CODE_FOR_cmask32si_vis
10040 || icode == CODE_FOR_cmask32di_vis)
10041 ;
10042 else
10043 return build_zero_cst (rtype);
10044 }
10045
10046 switch (icode)
10047 {
10048 case CODE_FOR_fexpand_vis:
10049 arg0 = args[0];
10050 STRIP_NOPS (arg0);
10051
10052 if (TREE_CODE (arg0) == VECTOR_CST)
10053 {
10054 tree inner_type = TREE_TYPE (rtype);
10055 tree *n_elts;
10056 unsigned i;
10057
10058 n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
10059 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10060 n_elts[i] = build_int_cst (inner_type,
10061 TREE_INT_CST_LOW
10062 (VECTOR_CST_ELT (arg0, i)) << 4);
10063 return build_vector (rtype, n_elts);
10064 }
10065 break;
10066
10067 case CODE_FOR_fmul8x16_vis:
10068 case CODE_FOR_fmul8x16au_vis:
10069 case CODE_FOR_fmul8x16al_vis:
10070 arg0 = args[0];
10071 arg1 = args[1];
10072 STRIP_NOPS (arg0);
10073 STRIP_NOPS (arg1);
10074
10075 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10076 {
10077 tree inner_type = TREE_TYPE (rtype);
10078 tree *n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
10079 sparc_handle_vis_mul8x16 (n_elts, icode, inner_type, arg0, arg1);
10080 return build_vector (rtype, n_elts);
10081 }
10082 break;
10083
10084 case CODE_FOR_fpmerge_vis:
10085 arg0 = args[0];
10086 arg1 = args[1];
10087 STRIP_NOPS (arg0);
10088 STRIP_NOPS (arg1);
10089
10090 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10091 {
10092 tree *n_elts = XALLOCAVEC (tree, 2 * VECTOR_CST_NELTS (arg0));
10093 unsigned i;
10094 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10095 {
10096 n_elts[2*i] = VECTOR_CST_ELT (arg0, i);
10097 n_elts[2*i+1] = VECTOR_CST_ELT (arg1, i);
10098 }
10099
10100 return build_vector (rtype, n_elts);
10101 }
10102 break;
10103
10104 case CODE_FOR_pdist_vis:
10105 arg0 = args[0];
10106 arg1 = args[1];
10107 arg2 = args[2];
10108 STRIP_NOPS (arg0);
10109 STRIP_NOPS (arg1);
10110 STRIP_NOPS (arg2);
10111
10112 if (TREE_CODE (arg0) == VECTOR_CST
10113 && TREE_CODE (arg1) == VECTOR_CST
10114 && TREE_CODE (arg2) == INTEGER_CST)
10115 {
10116 int overflow = 0;
10117 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
10118 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
10119 unsigned i;
10120
10121 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10122 {
10123 unsigned HOST_WIDE_INT
10124 low0 = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg0, i)),
10125 low1 = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg1, i));
10126 HOST_WIDE_INT
10127 high0 = TREE_INT_CST_HIGH (VECTOR_CST_ELT (arg0, i));
10128 HOST_WIDE_INT
10129 high1 = TREE_INT_CST_HIGH (VECTOR_CST_ELT (arg1, i));
10130
10131 unsigned HOST_WIDE_INT l;
10132 HOST_WIDE_INT h;
10133
10134 overflow |= neg_double (low1, high1, &l, &h);
10135 overflow |= add_double (low0, high0, l, h, &l, &h);
10136 if (h < 0)
10137 overflow |= neg_double (l, h, &l, &h);
10138
10139 overflow |= add_double (low, high, l, h, &low, &high);
10140 }
10141
10142 gcc_assert (overflow == 0);
10143
10144 return build_int_cst_wide (rtype, low, high);
10145 }
10146
10147 default:
10148 break;
10149 }
10150
10151 return NULL_TREE;
10152 }
10153 \f
10154 /* ??? This duplicates information provided to the compiler by the
10155 ??? scheduler description. Some day, teach genautomata to output
10156 ??? the latencies and then CSE will just use that. */
10157
10158 static bool
10159 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
10160 int *total, bool speed ATTRIBUTE_UNUSED)
10161 {
10162 enum machine_mode mode = GET_MODE (x);
10163 bool float_mode_p = FLOAT_MODE_P (mode);
10164
10165 switch (code)
10166 {
10167 case CONST_INT:
10168 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
10169 {
10170 *total = 0;
10171 return true;
10172 }
10173 /* FALLTHRU */
10174
10175 case HIGH:
10176 *total = 2;
10177 return true;
10178
10179 case CONST:
10180 case LABEL_REF:
10181 case SYMBOL_REF:
10182 *total = 4;
10183 return true;
10184
10185 case CONST_DOUBLE:
10186 if (GET_MODE (x) == VOIDmode
10187 && ((CONST_DOUBLE_HIGH (x) == 0
10188 && CONST_DOUBLE_LOW (x) < 0x1000)
10189 || (CONST_DOUBLE_HIGH (x) == -1
10190 && CONST_DOUBLE_LOW (x) < 0
10191 && CONST_DOUBLE_LOW (x) >= -0x1000)))
10192 *total = 0;
10193 else
10194 *total = 8;
10195 return true;
10196
10197 case MEM:
10198 /* If outer-code was a sign or zero extension, a cost
10199 of COSTS_N_INSNS (1) was already added in. This is
10200 why we are subtracting it back out. */
10201 if (outer_code == ZERO_EXTEND)
10202 {
10203 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
10204 }
10205 else if (outer_code == SIGN_EXTEND)
10206 {
10207 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
10208 }
10209 else if (float_mode_p)
10210 {
10211 *total = sparc_costs->float_load;
10212 }
10213 else
10214 {
10215 *total = sparc_costs->int_load;
10216 }
10217
10218 return true;
10219
10220 case PLUS:
10221 case MINUS:
10222 if (float_mode_p)
10223 *total = sparc_costs->float_plusminus;
10224 else
10225 *total = COSTS_N_INSNS (1);
10226 return false;
10227
10228 case FMA:
10229 {
10230 rtx sub;
10231
10232 gcc_assert (float_mode_p);
10233 *total = sparc_costs->float_mul;
10234
10235 sub = XEXP (x, 0);
10236 if (GET_CODE (sub) == NEG)
10237 sub = XEXP (sub, 0);
10238 *total += rtx_cost (sub, FMA, 0, speed);
10239
10240 sub = XEXP (x, 2);
10241 if (GET_CODE (sub) == NEG)
10242 sub = XEXP (sub, 0);
10243 *total += rtx_cost (sub, FMA, 2, speed);
10244 return true;
10245 }
10246
10247 case MULT:
10248 if (float_mode_p)
10249 *total = sparc_costs->float_mul;
10250 else if (! TARGET_HARD_MUL)
10251 *total = COSTS_N_INSNS (25);
10252 else
10253 {
10254 int bit_cost;
10255
10256 bit_cost = 0;
10257 if (sparc_costs->int_mul_bit_factor)
10258 {
10259 int nbits;
10260
10261 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
10262 {
10263 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
10264 for (nbits = 0; value != 0; value &= value - 1)
10265 nbits++;
10266 }
10267 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
10268 && GET_MODE (XEXP (x, 1)) == VOIDmode)
10269 {
10270 rtx x1 = XEXP (x, 1);
10271 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
10272 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
10273
10274 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
10275 nbits++;
10276 for (; value2 != 0; value2 &= value2 - 1)
10277 nbits++;
10278 }
10279 else
10280 nbits = 7;
10281
10282 if (nbits < 3)
10283 nbits = 3;
10284 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
10285 bit_cost = COSTS_N_INSNS (bit_cost);
10286 }
10287
10288 if (mode == DImode)
10289 *total = sparc_costs->int_mulX + bit_cost;
10290 else
10291 *total = sparc_costs->int_mul + bit_cost;
10292 }
10293 return false;
10294
10295 case ASHIFT:
10296 case ASHIFTRT:
10297 case LSHIFTRT:
10298 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
10299 return false;
10300
10301 case DIV:
10302 case UDIV:
10303 case MOD:
10304 case UMOD:
10305 if (float_mode_p)
10306 {
10307 if (mode == DFmode)
10308 *total = sparc_costs->float_div_df;
10309 else
10310 *total = sparc_costs->float_div_sf;
10311 }
10312 else
10313 {
10314 if (mode == DImode)
10315 *total = sparc_costs->int_divX;
10316 else
10317 *total = sparc_costs->int_div;
10318 }
10319 return false;
10320
10321 case NEG:
10322 if (! float_mode_p)
10323 {
10324 *total = COSTS_N_INSNS (1);
10325 return false;
10326 }
10327 /* FALLTHRU */
10328
10329 case ABS:
10330 case FLOAT:
10331 case UNSIGNED_FLOAT:
10332 case FIX:
10333 case UNSIGNED_FIX:
10334 case FLOAT_EXTEND:
10335 case FLOAT_TRUNCATE:
10336 *total = sparc_costs->float_move;
10337 return false;
10338
10339 case SQRT:
10340 if (mode == DFmode)
10341 *total = sparc_costs->float_sqrt_df;
10342 else
10343 *total = sparc_costs->float_sqrt_sf;
10344 return false;
10345
10346 case COMPARE:
10347 if (float_mode_p)
10348 *total = sparc_costs->float_cmp;
10349 else
10350 *total = COSTS_N_INSNS (1);
10351 return false;
10352
10353 case IF_THEN_ELSE:
10354 if (float_mode_p)
10355 *total = sparc_costs->float_cmove;
10356 else
10357 *total = sparc_costs->int_cmove;
10358 return false;
10359
10360 case IOR:
10361 /* Handle the NAND vector patterns. */
10362 if (sparc_vector_mode_supported_p (GET_MODE (x))
10363 && GET_CODE (XEXP (x, 0)) == NOT
10364 && GET_CODE (XEXP (x, 1)) == NOT)
10365 {
10366 *total = COSTS_N_INSNS (1);
10367 return true;
10368 }
10369 else
10370 return false;
10371
10372 default:
10373 return false;
10374 }
10375 }
10376
10377 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10378
10379 static inline bool
10380 general_or_i64_p (reg_class_t rclass)
10381 {
10382 return (rclass == GENERAL_REGS || rclass == I64_REGS);
10383 }
10384
10385 /* Implement TARGET_REGISTER_MOVE_COST. */
10386
10387 static int
10388 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10389 reg_class_t from, reg_class_t to)
10390 {
10391 bool need_memory = false;
10392
10393 if (from == FPCC_REGS || to == FPCC_REGS)
10394 need_memory = true;
10395 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
10396 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
10397 {
10398 if (TARGET_VIS3)
10399 {
10400 int size = GET_MODE_SIZE (mode);
10401 if (size == 8 || size == 4)
10402 {
10403 if (! TARGET_ARCH32 || size == 4)
10404 return 4;
10405 else
10406 return 6;
10407 }
10408 }
10409 need_memory = true;
10410 }
10411
10412 if (need_memory)
10413 {
10414 if (sparc_cpu == PROCESSOR_ULTRASPARC
10415 || sparc_cpu == PROCESSOR_ULTRASPARC3
10416 || sparc_cpu == PROCESSOR_NIAGARA
10417 || sparc_cpu == PROCESSOR_NIAGARA2
10418 || sparc_cpu == PROCESSOR_NIAGARA3
10419 || sparc_cpu == PROCESSOR_NIAGARA4)
10420 return 12;
10421
10422 return 6;
10423 }
10424
10425 return 2;
10426 }
10427
10428 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10429 This is achieved by means of a manual dynamic stack space allocation in
10430 the current frame. We make the assumption that SEQ doesn't contain any
10431 function calls, with the possible exception of calls to the GOT helper. */
10432
10433 static void
10434 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
10435 {
10436 /* We must preserve the lowest 16 words for the register save area. */
10437 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
10438 /* We really need only 2 words of fresh stack space. */
10439 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
10440
10441 rtx slot
10442 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
10443 SPARC_STACK_BIAS + offset));
10444
10445 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
10446 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
10447 if (reg2)
10448 emit_insn (gen_rtx_SET (VOIDmode,
10449 adjust_address (slot, word_mode, UNITS_PER_WORD),
10450 reg2));
10451 emit_insn (seq);
10452 if (reg2)
10453 emit_insn (gen_rtx_SET (VOIDmode,
10454 reg2,
10455 adjust_address (slot, word_mode, UNITS_PER_WORD)));
10456 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
10457 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
10458 }
10459
10460 /* Output the assembler code for a thunk function. THUNK_DECL is the
10461 declaration for the thunk function itself, FUNCTION is the decl for
10462 the target function. DELTA is an immediate constant offset to be
10463 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10464 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10465
10466 static void
10467 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10468 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10469 tree function)
10470 {
10471 rtx this_rtx, insn, funexp;
10472 unsigned int int_arg_first;
10473
10474 reload_completed = 1;
10475 epilogue_completed = 1;
10476
10477 emit_note (NOTE_INSN_PROLOGUE_END);
10478
10479 if (TARGET_FLAT)
10480 {
10481 sparc_leaf_function_p = 1;
10482
10483 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10484 }
10485 else if (flag_delayed_branch)
10486 {
10487 /* We will emit a regular sibcall below, so we need to instruct
10488 output_sibcall that we are in a leaf function. */
10489 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
10490
10491 /* This will cause final.c to invoke leaf_renumber_regs so we
10492 must behave as if we were in a not-yet-leafified function. */
10493 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
10494 }
10495 else
10496 {
10497 /* We will emit the sibcall manually below, so we will need to
10498 manually spill non-leaf registers. */
10499 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
10500
10501 /* We really are in a leaf function. */
10502 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10503 }
10504
10505 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
10506 returns a structure, the structure return pointer is there instead. */
10507 if (TARGET_ARCH64
10508 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10509 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
10510 else
10511 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
10512
10513 /* Add DELTA. When possible use a plain add, otherwise load it into
10514 a register first. */
10515 if (delta)
10516 {
10517 rtx delta_rtx = GEN_INT (delta);
10518
10519 if (! SPARC_SIMM13_P (delta))
10520 {
10521 rtx scratch = gen_rtx_REG (Pmode, 1);
10522 emit_move_insn (scratch, delta_rtx);
10523 delta_rtx = scratch;
10524 }
10525
10526 /* THIS_RTX += DELTA. */
10527 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
10528 }
10529
10530 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
10531 if (vcall_offset)
10532 {
10533 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10534 rtx scratch = gen_rtx_REG (Pmode, 1);
10535
10536 gcc_assert (vcall_offset < 0);
10537
10538 /* SCRATCH = *THIS_RTX. */
10539 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
10540
10541 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
10542 may not have any available scratch register at this point. */
10543 if (SPARC_SIMM13_P (vcall_offset))
10544 ;
10545 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
10546 else if (! fixed_regs[5]
10547 /* The below sequence is made up of at least 2 insns,
10548 while the default method may need only one. */
10549 && vcall_offset < -8192)
10550 {
10551 rtx scratch2 = gen_rtx_REG (Pmode, 5);
10552 emit_move_insn (scratch2, vcall_offset_rtx);
10553 vcall_offset_rtx = scratch2;
10554 }
10555 else
10556 {
10557 rtx increment = GEN_INT (-4096);
10558
10559 /* VCALL_OFFSET is a negative number whose typical range can be
10560 estimated as -32768..0 in 32-bit mode. In almost all cases
10561 it is therefore cheaper to emit multiple add insns than
10562 spilling and loading the constant into a register (at least
10563 6 insns). */
10564 while (! SPARC_SIMM13_P (vcall_offset))
10565 {
10566 emit_insn (gen_add2_insn (scratch, increment));
10567 vcall_offset += 4096;
10568 }
10569 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
10570 }
10571
10572 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
10573 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
10574 gen_rtx_PLUS (Pmode,
10575 scratch,
10576 vcall_offset_rtx)));
10577
10578 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
10579 emit_insn (gen_add2_insn (this_rtx, scratch));
10580 }
10581
10582 /* Generate a tail call to the target function. */
10583 if (! TREE_USED (function))
10584 {
10585 assemble_external (function);
10586 TREE_USED (function) = 1;
10587 }
10588 funexp = XEXP (DECL_RTL (function), 0);
10589
10590 if (flag_delayed_branch)
10591 {
10592 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10593 insn = emit_call_insn (gen_sibcall (funexp));
10594 SIBLING_CALL_P (insn) = 1;
10595 }
10596 else
10597 {
10598 /* The hoops we have to jump through in order to generate a sibcall
10599 without using delay slots... */
10600 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
10601
10602 if (flag_pic)
10603 {
10604 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
10605 start_sequence ();
10606 load_got_register (); /* clobbers %o7 */
10607 scratch = sparc_legitimize_pic_address (funexp, scratch);
10608 seq = get_insns ();
10609 end_sequence ();
10610 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
10611 }
10612 else if (TARGET_ARCH32)
10613 {
10614 emit_insn (gen_rtx_SET (VOIDmode,
10615 scratch,
10616 gen_rtx_HIGH (SImode, funexp)));
10617 emit_insn (gen_rtx_SET (VOIDmode,
10618 scratch,
10619 gen_rtx_LO_SUM (SImode, scratch, funexp)));
10620 }
10621 else /* TARGET_ARCH64 */
10622 {
10623 switch (sparc_cmodel)
10624 {
10625 case CM_MEDLOW:
10626 case CM_MEDMID:
10627 /* The destination can serve as a temporary. */
10628 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
10629 break;
10630
10631 case CM_MEDANY:
10632 case CM_EMBMEDANY:
10633 /* The destination cannot serve as a temporary. */
10634 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
10635 start_sequence ();
10636 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
10637 seq = get_insns ();
10638 end_sequence ();
10639 emit_and_preserve (seq, spill_reg, 0);
10640 break;
10641
10642 default:
10643 gcc_unreachable ();
10644 }
10645 }
10646
10647 emit_jump_insn (gen_indirect_jump (scratch));
10648 }
10649
10650 emit_barrier ();
10651
10652 /* Run just enough of rest_of_compilation to get the insns emitted.
10653 There's not really enough bulk here to make other passes such as
10654 instruction scheduling worth while. Note that use_thunk calls
10655 assemble_start_function and assemble_end_function. */
10656 insn = get_insns ();
10657 insn_locators_alloc ();
10658 shorten_branches (insn);
10659 final_start_function (insn, file, 1);
10660 final (insn, file, 1);
10661 final_end_function ();
10662
10663 reload_completed = 0;
10664 epilogue_completed = 0;
10665 }
10666
10667 /* Return true if sparc_output_mi_thunk would be able to output the
10668 assembler code for the thunk function specified by the arguments
10669 it is passed, and false otherwise. */
10670 static bool
10671 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
10672 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
10673 HOST_WIDE_INT vcall_offset,
10674 const_tree function ATTRIBUTE_UNUSED)
10675 {
10676 /* Bound the loop used in the default method above. */
10677 return (vcall_offset >= -32768 || ! fixed_regs[5]);
10678 }
10679
10680 /* We use the machine specific reorg pass to enable workarounds for errata. */
10681
10682 static void
10683 sparc_reorg (void)
10684 {
10685 rtx insn, next;
10686
10687 /* The only erratum we handle for now is that of the AT697F processor. */
10688 if (!sparc_fix_at697f)
10689 return;
10690
10691 /* We need to have the (essentially) final form of the insn stream in order
10692 to properly detect the various hazards. Run delay slot scheduling. */
10693 if (optimize > 0 && flag_delayed_branch)
10694 {
10695 cleanup_barriers ();
10696 dbr_schedule (get_insns ());
10697 }
10698
10699 /* Now look for specific patterns in the insn stream. */
10700 for (insn = get_insns (); insn; insn = next)
10701 {
10702 bool insert_nop = false;
10703 rtx set;
10704
10705 /* Look for a single-word load into an odd-numbered FP register. */
10706 if (NONJUMP_INSN_P (insn)
10707 && (set = single_set (insn)) != NULL_RTX
10708 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
10709 && MEM_P (SET_SRC (set))
10710 && REG_P (SET_DEST (set))
10711 && REGNO (SET_DEST (set)) > 31
10712 && REGNO (SET_DEST (set)) % 2 != 0)
10713 {
10714 /* The wrong dependency is on the enclosing double register. */
10715 unsigned int x = REGNO (SET_DEST (set)) - 1;
10716 unsigned int src1, src2, dest;
10717 int code;
10718
10719 /* If the insn has a delay slot, then it cannot be problematic. */
10720 next = next_active_insn (insn);
10721 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
10722 code = -1;
10723 else
10724 {
10725 extract_insn (next);
10726 code = INSN_CODE (next);
10727 }
10728
10729 switch (code)
10730 {
10731 case CODE_FOR_adddf3:
10732 case CODE_FOR_subdf3:
10733 case CODE_FOR_muldf3:
10734 case CODE_FOR_divdf3:
10735 dest = REGNO (recog_data.operand[0]);
10736 src1 = REGNO (recog_data.operand[1]);
10737 src2 = REGNO (recog_data.operand[2]);
10738 if (src1 != src2)
10739 {
10740 /* Case [1-4]:
10741 ld [address], %fx+1
10742 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
10743 if ((src1 == x || src2 == x)
10744 && (dest == src1 || dest == src2))
10745 insert_nop = true;
10746 }
10747 else
10748 {
10749 /* Case 5:
10750 ld [address], %fx+1
10751 FPOPd %fx, %fx, %fx */
10752 if (src1 == x
10753 && dest == src1
10754 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
10755 insert_nop = true;
10756 }
10757 break;
10758
10759 case CODE_FOR_sqrtdf2:
10760 dest = REGNO (recog_data.operand[0]);
10761 src1 = REGNO (recog_data.operand[1]);
10762 /* Case 6:
10763 ld [address], %fx+1
10764 fsqrtd %fx, %fx */
10765 if (src1 == x && dest == src1)
10766 insert_nop = true;
10767 break;
10768
10769 default:
10770 break;
10771 }
10772 }
10773 else
10774 next = NEXT_INSN (insn);
10775
10776 if (insert_nop)
10777 emit_insn_after (gen_nop (), insn);
10778 }
10779 }
10780
10781 /* How to allocate a 'struct machine_function'. */
10782
10783 static struct machine_function *
10784 sparc_init_machine_status (void)
10785 {
10786 return ggc_alloc_cleared_machine_function ();
10787 }
10788
10789 /* Locate some local-dynamic symbol still in use by this function
10790 so that we can print its name in local-dynamic base patterns. */
10791
10792 static const char *
10793 get_some_local_dynamic_name (void)
10794 {
10795 rtx insn;
10796
10797 if (cfun->machine->some_ld_name)
10798 return cfun->machine->some_ld_name;
10799
10800 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10801 if (INSN_P (insn)
10802 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10803 return cfun->machine->some_ld_name;
10804
10805 gcc_unreachable ();
10806 }
10807
10808 static int
10809 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10810 {
10811 rtx x = *px;
10812
10813 if (x
10814 && GET_CODE (x) == SYMBOL_REF
10815 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10816 {
10817 cfun->machine->some_ld_name = XSTR (x, 0);
10818 return 1;
10819 }
10820
10821 return 0;
10822 }
10823
10824 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10825 We need to emit DTP-relative relocations. */
10826
10827 static void
10828 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
10829 {
10830 switch (size)
10831 {
10832 case 4:
10833 fputs ("\t.word\t%r_tls_dtpoff32(", file);
10834 break;
10835 case 8:
10836 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
10837 break;
10838 default:
10839 gcc_unreachable ();
10840 }
10841 output_addr_const (file, x);
10842 fputs (")", file);
10843 }
10844
10845 /* Do whatever processing is required at the end of a file. */
10846
10847 static void
10848 sparc_file_end (void)
10849 {
10850 /* If we need to emit the special GOT helper function, do so now. */
10851 if (got_helper_rtx)
10852 {
10853 const char *name = XSTR (got_helper_rtx, 0);
10854 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
10855 #ifdef DWARF2_UNWIND_INFO
10856 bool do_cfi;
10857 #endif
10858
10859 if (USE_HIDDEN_LINKONCE)
10860 {
10861 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
10862 get_identifier (name),
10863 build_function_type_list (void_type_node,
10864 NULL_TREE));
10865 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
10866 NULL_TREE, void_type_node);
10867 TREE_PUBLIC (decl) = 1;
10868 TREE_STATIC (decl) = 1;
10869 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
10870 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
10871 DECL_VISIBILITY_SPECIFIED (decl) = 1;
10872 resolve_unique_section (decl, 0, flag_function_sections);
10873 allocate_struct_function (decl, true);
10874 cfun->is_thunk = 1;
10875 current_function_decl = decl;
10876 init_varasm_status ();
10877 assemble_start_function (decl, name);
10878 }
10879 else
10880 {
10881 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10882 switch_to_section (text_section);
10883 if (align > 0)
10884 ASM_OUTPUT_ALIGN (asm_out_file, align);
10885 ASM_OUTPUT_LABEL (asm_out_file, name);
10886 }
10887
10888 #ifdef DWARF2_UNWIND_INFO
10889 do_cfi = dwarf2out_do_cfi_asm ();
10890 if (do_cfi)
10891 fprintf (asm_out_file, "\t.cfi_startproc\n");
10892 #endif
10893 if (flag_delayed_branch)
10894 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10895 reg_name, reg_name);
10896 else
10897 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10898 reg_name, reg_name);
10899 #ifdef DWARF2_UNWIND_INFO
10900 if (do_cfi)
10901 fprintf (asm_out_file, "\t.cfi_endproc\n");
10902 #endif
10903 }
10904
10905 if (NEED_INDICATE_EXEC_STACK)
10906 file_end_indicate_exec_stack ();
10907
10908 #ifdef TARGET_SOLARIS
10909 solaris_file_end ();
10910 #endif
10911 }
10912
10913 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10914 /* Implement TARGET_MANGLE_TYPE. */
10915
10916 static const char *
10917 sparc_mangle_type (const_tree type)
10918 {
10919 if (!TARGET_64BIT
10920 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10921 && TARGET_LONG_DOUBLE_128)
10922 return "g";
10923
10924 /* For all other types, use normal C++ mangling. */
10925 return NULL;
10926 }
10927 #endif
10928
10929 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
10930 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
10931 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
10932
10933 void
10934 sparc_emit_membar_for_model (enum memmodel model,
10935 int load_store, int before_after)
10936 {
10937 /* Bits for the MEMBAR mmask field. */
10938 const int LoadLoad = 1;
10939 const int StoreLoad = 2;
10940 const int LoadStore = 4;
10941 const int StoreStore = 8;
10942
10943 int mm = 0, implied = 0;
10944
10945 switch (sparc_memory_model)
10946 {
10947 case SMM_SC:
10948 /* Sequential Consistency. All memory transactions are immediately
10949 visible in sequential execution order. No barriers needed. */
10950 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
10951 break;
10952
10953 case SMM_TSO:
10954 /* Total Store Ordering: all memory transactions with store semantics
10955 are followed by an implied StoreStore. */
10956 implied |= StoreStore;
10957 /* FALLTHRU */
10958
10959 case SMM_PSO:
10960 /* Partial Store Ordering: all memory transactions with load semantics
10961 are followed by an implied LoadLoad | LoadStore. */
10962 implied |= LoadLoad | LoadStore;
10963
10964 /* If we're not looking for a raw barrer (before+after), then atomic
10965 operations get the benefit of being both load and store. */
10966 if (load_store == 3 && before_after == 2)
10967 implied |= StoreLoad | StoreStore;
10968 /* FALLTHRU */
10969
10970 case SMM_RMO:
10971 /* Relaxed Memory Ordering: no implicit bits. */
10972 break;
10973
10974 default:
10975 gcc_unreachable ();
10976 }
10977
10978 if (before_after & 1)
10979 {
10980 if (model == MEMMODEL_ACQUIRE
10981 || model == MEMMODEL_ACQ_REL
10982 || model == MEMMODEL_SEQ_CST)
10983 {
10984 if (load_store & 1)
10985 mm |= LoadLoad | LoadStore;
10986 if (load_store & 2)
10987 mm |= StoreLoad | StoreStore;
10988 }
10989 }
10990 if (before_after & 2)
10991 {
10992 if (model == MEMMODEL_RELEASE
10993 || model == MEMMODEL_ACQ_REL
10994 || model == MEMMODEL_SEQ_CST)
10995 {
10996 if (load_store & 1)
10997 mm |= LoadLoad | StoreLoad;
10998 if (load_store & 2)
10999 mm |= LoadStore | StoreStore;
11000 }
11001 }
11002
11003 /* Remove the bits implied by the system memory model. */
11004 mm &= ~implied;
11005
11006 /* For raw barriers (before+after), always emit a barrier.
11007 This will become a compile-time barrier if needed. */
11008 if (mm || before_after == 3)
11009 emit_insn (gen_membar (GEN_INT (mm)));
11010 }
11011
11012 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
11013 compare and swap on the word containing the byte or half-word. */
11014
11015 static void
11016 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
11017 rtx oldval, rtx newval)
11018 {
11019 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
11020 rtx addr = gen_reg_rtx (Pmode);
11021 rtx off = gen_reg_rtx (SImode);
11022 rtx oldv = gen_reg_rtx (SImode);
11023 rtx newv = gen_reg_rtx (SImode);
11024 rtx oldvalue = gen_reg_rtx (SImode);
11025 rtx newvalue = gen_reg_rtx (SImode);
11026 rtx res = gen_reg_rtx (SImode);
11027 rtx resv = gen_reg_rtx (SImode);
11028 rtx memsi, val, mask, end_label, loop_label, cc;
11029
11030 emit_insn (gen_rtx_SET (VOIDmode, addr,
11031 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
11032
11033 if (Pmode != SImode)
11034 addr1 = gen_lowpart (SImode, addr1);
11035 emit_insn (gen_rtx_SET (VOIDmode, off,
11036 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
11037
11038 memsi = gen_rtx_MEM (SImode, addr);
11039 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
11040 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
11041
11042 val = copy_to_reg (memsi);
11043
11044 emit_insn (gen_rtx_SET (VOIDmode, off,
11045 gen_rtx_XOR (SImode, off,
11046 GEN_INT (GET_MODE (mem) == QImode
11047 ? 3 : 2))));
11048
11049 emit_insn (gen_rtx_SET (VOIDmode, off,
11050 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
11051
11052 if (GET_MODE (mem) == QImode)
11053 mask = force_reg (SImode, GEN_INT (0xff));
11054 else
11055 mask = force_reg (SImode, GEN_INT (0xffff));
11056
11057 emit_insn (gen_rtx_SET (VOIDmode, mask,
11058 gen_rtx_ASHIFT (SImode, mask, off)));
11059
11060 emit_insn (gen_rtx_SET (VOIDmode, val,
11061 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
11062 val)));
11063
11064 oldval = gen_lowpart (SImode, oldval);
11065 emit_insn (gen_rtx_SET (VOIDmode, oldv,
11066 gen_rtx_ASHIFT (SImode, oldval, off)));
11067
11068 newval = gen_lowpart_common (SImode, newval);
11069 emit_insn (gen_rtx_SET (VOIDmode, newv,
11070 gen_rtx_ASHIFT (SImode, newval, off)));
11071
11072 emit_insn (gen_rtx_SET (VOIDmode, oldv,
11073 gen_rtx_AND (SImode, oldv, mask)));
11074
11075 emit_insn (gen_rtx_SET (VOIDmode, newv,
11076 gen_rtx_AND (SImode, newv, mask)));
11077
11078 end_label = gen_label_rtx ();
11079 loop_label = gen_label_rtx ();
11080 emit_label (loop_label);
11081
11082 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
11083 gen_rtx_IOR (SImode, oldv, val)));
11084
11085 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
11086 gen_rtx_IOR (SImode, newv, val)));
11087
11088 emit_move_insn (bool_result, const1_rtx);
11089
11090 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
11091
11092 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
11093
11094 emit_insn (gen_rtx_SET (VOIDmode, resv,
11095 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
11096 res)));
11097
11098 emit_move_insn (bool_result, const0_rtx);
11099
11100 cc = gen_compare_reg_1 (NE, resv, val);
11101 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
11102
11103 /* Use cbranchcc4 to separate the compare and branch! */
11104 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
11105 cc, const0_rtx, loop_label));
11106
11107 emit_label (end_label);
11108
11109 emit_insn (gen_rtx_SET (VOIDmode, res,
11110 gen_rtx_AND (SImode, res, mask)));
11111
11112 emit_insn (gen_rtx_SET (VOIDmode, res,
11113 gen_rtx_LSHIFTRT (SImode, res, off)));
11114
11115 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
11116 }
11117
11118 /* Expand code to perform a compare-and-swap. */
11119
11120 void
11121 sparc_expand_compare_and_swap (rtx operands[])
11122 {
11123 rtx bval, retval, mem, oldval, newval;
11124 enum machine_mode mode;
11125 enum memmodel model;
11126
11127 bval = operands[0];
11128 retval = operands[1];
11129 mem = operands[2];
11130 oldval = operands[3];
11131 newval = operands[4];
11132 model = (enum memmodel) INTVAL (operands[6]);
11133 mode = GET_MODE (mem);
11134
11135 sparc_emit_membar_for_model (model, 3, 1);
11136
11137 if (reg_overlap_mentioned_p (retval, oldval))
11138 oldval = copy_to_reg (oldval);
11139
11140 if (mode == QImode || mode == HImode)
11141 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
11142 else
11143 {
11144 rtx (*gen) (rtx, rtx, rtx, rtx);
11145 rtx x;
11146
11147 if (mode == SImode)
11148 gen = gen_atomic_compare_and_swapsi_1;
11149 else
11150 gen = gen_atomic_compare_and_swapdi_1;
11151 emit_insn (gen (retval, mem, oldval, newval));
11152
11153 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
11154 if (x != bval)
11155 convert_move (bval, x, 1);
11156 }
11157
11158 sparc_emit_membar_for_model (model, 3, 2);
11159 }
11160
11161 void
11162 sparc_expand_vec_perm_bmask (enum machine_mode vmode, rtx sel)
11163 {
11164 rtx t_1, t_2, t_3;
11165
11166 sel = gen_lowpart (DImode, sel);
11167 switch (vmode)
11168 {
11169 case V2SImode:
11170 /* inp = xxxxxxxAxxxxxxxB */
11171 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11172 NULL_RTX, 1, OPTAB_DIRECT);
11173 /* t_1 = ....xxxxxxxAxxx. */
11174 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
11175 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
11176 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
11177 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
11178 /* sel = .......B */
11179 /* t_1 = ...A.... */
11180 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
11181 /* sel = ...A...B */
11182 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
11183 /* sel = AAAABBBB * 4 */
11184 t_1 = force_reg (SImode, GEN_INT (0x01230123));
11185 /* sel = { A*4, A*4+1, A*4+2, ... } */
11186 break;
11187
11188 case V4HImode:
11189 /* inp = xxxAxxxBxxxCxxxD */
11190 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11191 NULL_RTX, 1, OPTAB_DIRECT);
11192 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11193 NULL_RTX, 1, OPTAB_DIRECT);
11194 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
11195 NULL_RTX, 1, OPTAB_DIRECT);
11196 /* t_1 = ..xxxAxxxBxxxCxx */
11197 /* t_2 = ....xxxAxxxBxxxC */
11198 /* t_3 = ......xxxAxxxBxx */
11199 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
11200 GEN_INT (0x07),
11201 NULL_RTX, 1, OPTAB_DIRECT);
11202 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
11203 GEN_INT (0x0700),
11204 NULL_RTX, 1, OPTAB_DIRECT);
11205 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
11206 GEN_INT (0x070000),
11207 NULL_RTX, 1, OPTAB_DIRECT);
11208 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
11209 GEN_INT (0x07000000),
11210 NULL_RTX, 1, OPTAB_DIRECT);
11211 /* sel = .......D */
11212 /* t_1 = .....C.. */
11213 /* t_2 = ...B.... */
11214 /* t_3 = .A...... */
11215 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
11216 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
11217 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
11218 /* sel = .A.B.C.D */
11219 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
11220 /* sel = AABBCCDD * 2 */
11221 t_1 = force_reg (SImode, GEN_INT (0x01010101));
11222 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
11223 break;
11224
11225 case V8QImode:
11226 /* input = xAxBxCxDxExFxGxH */
11227 sel = expand_simple_binop (DImode, AND, sel,
11228 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
11229 | 0x0f0f0f0f),
11230 NULL_RTX, 1, OPTAB_DIRECT);
11231 /* sel = .A.B.C.D.E.F.G.H */
11232 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
11233 NULL_RTX, 1, OPTAB_DIRECT);
11234 /* t_1 = ..A.B.C.D.E.F.G. */
11235 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11236 NULL_RTX, 1, OPTAB_DIRECT);
11237 /* sel = .AABBCCDDEEFFGGH */
11238 sel = expand_simple_binop (DImode, AND, sel,
11239 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
11240 | 0xff00ff),
11241 NULL_RTX, 1, OPTAB_DIRECT);
11242 /* sel = ..AB..CD..EF..GH */
11243 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11244 NULL_RTX, 1, OPTAB_DIRECT);
11245 /* t_1 = ....AB..CD..EF.. */
11246 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11247 NULL_RTX, 1, OPTAB_DIRECT);
11248 /* sel = ..ABABCDCDEFEFGH */
11249 sel = expand_simple_binop (DImode, AND, sel,
11250 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
11251 NULL_RTX, 1, OPTAB_DIRECT);
11252 /* sel = ....ABCD....EFGH */
11253 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11254 NULL_RTX, 1, OPTAB_DIRECT);
11255 /* t_1 = ........ABCD.... */
11256 sel = gen_lowpart (SImode, sel);
11257 t_1 = gen_lowpart (SImode, t_1);
11258 break;
11259
11260 default:
11261 gcc_unreachable ();
11262 }
11263
11264 /* Always perform the final addition/merge within the bmask insn. */
11265 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
11266 }
11267
11268 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11269
11270 static bool
11271 sparc_frame_pointer_required (void)
11272 {
11273 /* If the stack pointer is dynamically modified in the function, it cannot
11274 serve as the frame pointer. */
11275 if (cfun->calls_alloca)
11276 return true;
11277
11278 /* If the function receives nonlocal gotos, it needs to save the frame
11279 pointer in the nonlocal_goto_save_area object. */
11280 if (cfun->has_nonlocal_label)
11281 return true;
11282
11283 /* In flat mode, that's it. */
11284 if (TARGET_FLAT)
11285 return false;
11286
11287 /* Otherwise, the frame pointer is required if the function isn't leaf. */
11288 return !(crtl->is_leaf && only_leaf_regs_used ());
11289 }
11290
11291 /* The way this is structured, we can't eliminate SFP in favor of SP
11292 if the frame pointer is required: we want to use the SFP->HFP elimination
11293 in that case. But the test in update_eliminables doesn't know we are
11294 assuming below that we only do the former elimination. */
11295
11296 static bool
11297 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
11298 {
11299 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
11300 }
11301
11302 /* Return the hard frame pointer directly to bypass the stack bias. */
11303
11304 static rtx
11305 sparc_builtin_setjmp_frame_value (void)
11306 {
11307 return hard_frame_pointer_rtx;
11308 }
11309
11310 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
11311 they won't be allocated. */
11312
11313 static void
11314 sparc_conditional_register_usage (void)
11315 {
11316 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
11317 {
11318 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11319 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11320 }
11321 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
11322 /* then honor it. */
11323 if (TARGET_ARCH32 && fixed_regs[5])
11324 fixed_regs[5] = 1;
11325 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
11326 fixed_regs[5] = 0;
11327 if (! TARGET_V9)
11328 {
11329 int regno;
11330 for (regno = SPARC_FIRST_V9_FP_REG;
11331 regno <= SPARC_LAST_V9_FP_REG;
11332 regno++)
11333 fixed_regs[regno] = 1;
11334 /* %fcc0 is used by v8 and v9. */
11335 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
11336 regno <= SPARC_LAST_V9_FCC_REG;
11337 regno++)
11338 fixed_regs[regno] = 1;
11339 }
11340 if (! TARGET_FPU)
11341 {
11342 int regno;
11343 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
11344 fixed_regs[regno] = 1;
11345 }
11346 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
11347 /* then honor it. Likewise with g3 and g4. */
11348 if (fixed_regs[2] == 2)
11349 fixed_regs[2] = ! TARGET_APP_REGS;
11350 if (fixed_regs[3] == 2)
11351 fixed_regs[3] = ! TARGET_APP_REGS;
11352 if (TARGET_ARCH32 && fixed_regs[4] == 2)
11353 fixed_regs[4] = ! TARGET_APP_REGS;
11354 else if (TARGET_CM_EMBMEDANY)
11355 fixed_regs[4] = 1;
11356 else if (fixed_regs[4] == 2)
11357 fixed_regs[4] = 0;
11358 if (TARGET_FLAT)
11359 {
11360 int regno;
11361 /* Disable leaf functions. */
11362 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
11363 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
11364 leaf_reg_remap [regno] = regno;
11365 }
11366 if (TARGET_VIS)
11367 global_regs[SPARC_GSR_REG] = 1;
11368 }
11369
11370 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
11371
11372 - We can't load constants into FP registers.
11373 - We can't load FP constants into integer registers when soft-float,
11374 because there is no soft-float pattern with a r/F constraint.
11375 - We can't load FP constants into integer registers for TFmode unless
11376 it is 0.0L, because there is no movtf pattern with a r/F constraint.
11377 - Try and reload integer constants (symbolic or otherwise) back into
11378 registers directly, rather than having them dumped to memory. */
11379
11380 static reg_class_t
11381 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
11382 {
11383 enum machine_mode mode = GET_MODE (x);
11384 if (CONSTANT_P (x))
11385 {
11386 if (FP_REG_CLASS_P (rclass)
11387 || rclass == GENERAL_OR_FP_REGS
11388 || rclass == GENERAL_OR_EXTRA_FP_REGS
11389 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
11390 || (mode == TFmode && ! const_zero_operand (x, mode)))
11391 return NO_REGS;
11392
11393 if (GET_MODE_CLASS (mode) == MODE_INT)
11394 return GENERAL_REGS;
11395
11396 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
11397 {
11398 if (! FP_REG_CLASS_P (rclass)
11399 || !(const_zero_operand (x, mode)
11400 || const_all_ones_operand (x, mode)))
11401 return NO_REGS;
11402 }
11403 }
11404
11405 if (TARGET_VIS3
11406 && ! TARGET_ARCH64
11407 && (rclass == EXTRA_FP_REGS
11408 || rclass == GENERAL_OR_EXTRA_FP_REGS))
11409 {
11410 int regno = true_regnum (x);
11411
11412 if (SPARC_INT_REG_P (regno))
11413 return (rclass == EXTRA_FP_REGS
11414 ? FP_REGS : GENERAL_OR_FP_REGS);
11415 }
11416
11417 return rclass;
11418 }
11419
11420 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
11421 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
11422
11423 const char *
11424 output_v8plus_mult (rtx insn, rtx *operands, const char *opcode)
11425 {
11426 char mulstr[32];
11427
11428 gcc_assert (! TARGET_ARCH64);
11429
11430 if (sparc_check_64 (operands[1], insn) <= 0)
11431 output_asm_insn ("srl\t%L1, 0, %L1", operands);
11432 if (which_alternative == 1)
11433 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
11434 if (GET_CODE (operands[2]) == CONST_INT)
11435 {
11436 if (which_alternative == 1)
11437 {
11438 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11439 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
11440 output_asm_insn (mulstr, operands);
11441 return "srlx\t%L0, 32, %H0";
11442 }
11443 else
11444 {
11445 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11446 output_asm_insn ("or\t%L1, %3, %3", operands);
11447 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
11448 output_asm_insn (mulstr, operands);
11449 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11450 return "mov\t%3, %L0";
11451 }
11452 }
11453 else if (rtx_equal_p (operands[1], operands[2]))
11454 {
11455 if (which_alternative == 1)
11456 {
11457 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11458 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
11459 output_asm_insn (mulstr, operands);
11460 return "srlx\t%L0, 32, %H0";
11461 }
11462 else
11463 {
11464 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11465 output_asm_insn ("or\t%L1, %3, %3", operands);
11466 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
11467 output_asm_insn (mulstr, operands);
11468 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11469 return "mov\t%3, %L0";
11470 }
11471 }
11472 if (sparc_check_64 (operands[2], insn) <= 0)
11473 output_asm_insn ("srl\t%L2, 0, %L2", operands);
11474 if (which_alternative == 1)
11475 {
11476 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11477 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
11478 output_asm_insn ("or\t%L2, %L1, %L1", operands);
11479 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
11480 output_asm_insn (mulstr, operands);
11481 return "srlx\t%L0, 32, %H0";
11482 }
11483 else
11484 {
11485 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11486 output_asm_insn ("sllx\t%H2, 32, %4", operands);
11487 output_asm_insn ("or\t%L1, %3, %3", operands);
11488 output_asm_insn ("or\t%L2, %4, %4", operands);
11489 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
11490 output_asm_insn (mulstr, operands);
11491 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11492 return "mov\t%3, %L0";
11493 }
11494 }
11495
11496 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11497 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
11498 and INNER_MODE are the modes describing TARGET. */
11499
11500 static void
11501 vector_init_bshuffle (rtx target, rtx elt, enum machine_mode mode,
11502 enum machine_mode inner_mode)
11503 {
11504 rtx t1, final_insn;
11505 int bmask;
11506
11507 t1 = gen_reg_rtx (mode);
11508
11509 elt = convert_modes (SImode, inner_mode, elt, true);
11510 emit_move_insn (gen_lowpart(SImode, t1), elt);
11511
11512 switch (mode)
11513 {
11514 case V2SImode:
11515 final_insn = gen_bshufflev2si_vis (target, t1, t1);
11516 bmask = 0x45674567;
11517 break;
11518 case V4HImode:
11519 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
11520 bmask = 0x67676767;
11521 break;
11522 case V8QImode:
11523 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
11524 bmask = 0x77777777;
11525 break;
11526 default:
11527 gcc_unreachable ();
11528 }
11529
11530 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), CONST0_RTX (SImode),
11531 force_reg (SImode, GEN_INT (bmask))));
11532 emit_insn (final_insn);
11533 }
11534
11535 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11536 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
11537
11538 static void
11539 vector_init_fpmerge (rtx target, rtx elt)
11540 {
11541 rtx t1, t2, t2_low, t3, t3_low;
11542
11543 t1 = gen_reg_rtx (V4QImode);
11544 elt = convert_modes (SImode, QImode, elt, true);
11545 emit_move_insn (gen_lowpart (SImode, t1), elt);
11546
11547 t2 = gen_reg_rtx (V8QImode);
11548 t2_low = gen_lowpart (V4QImode, t2);
11549 emit_insn (gen_fpmerge_vis (t2, t1, t1));
11550
11551 t3 = gen_reg_rtx (V8QImode);
11552 t3_low = gen_lowpart (V4QImode, t3);
11553 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
11554
11555 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
11556 }
11557
11558 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11559 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
11560
11561 static void
11562 vector_init_faligndata (rtx target, rtx elt)
11563 {
11564 rtx t1 = gen_reg_rtx (V4HImode);
11565 int i;
11566
11567 elt = convert_modes (SImode, HImode, elt, true);
11568 emit_move_insn (gen_lowpart (SImode, t1), elt);
11569
11570 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
11571 force_reg (SImode, GEN_INT (6)),
11572 const0_rtx));
11573
11574 for (i = 0; i < 4; i++)
11575 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
11576 }
11577
11578 /* Emit code to initialize TARGET to values for individual fields VALS. */
11579
11580 void
11581 sparc_expand_vector_init (rtx target, rtx vals)
11582 {
11583 const enum machine_mode mode = GET_MODE (target);
11584 const enum machine_mode inner_mode = GET_MODE_INNER (mode);
11585 const int n_elts = GET_MODE_NUNITS (mode);
11586 int i, n_var = 0;
11587 bool all_same;
11588 rtx mem;
11589
11590 all_same = true;
11591 for (i = 0; i < n_elts; i++)
11592 {
11593 rtx x = XVECEXP (vals, 0, i);
11594 if (!CONSTANT_P (x))
11595 n_var++;
11596
11597 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
11598 all_same = false;
11599 }
11600
11601 if (n_var == 0)
11602 {
11603 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
11604 return;
11605 }
11606
11607 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
11608 {
11609 if (GET_MODE_SIZE (inner_mode) == 4)
11610 {
11611 emit_move_insn (gen_lowpart (SImode, target),
11612 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
11613 return;
11614 }
11615 else if (GET_MODE_SIZE (inner_mode) == 8)
11616 {
11617 emit_move_insn (gen_lowpart (DImode, target),
11618 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
11619 return;
11620 }
11621 }
11622 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
11623 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
11624 {
11625 emit_move_insn (gen_highpart (word_mode, target),
11626 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
11627 emit_move_insn (gen_lowpart (word_mode, target),
11628 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
11629 return;
11630 }
11631
11632 if (all_same && GET_MODE_SIZE (mode) == 8)
11633 {
11634 if (TARGET_VIS2)
11635 {
11636 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
11637 return;
11638 }
11639 if (mode == V8QImode)
11640 {
11641 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
11642 return;
11643 }
11644 if (mode == V4HImode)
11645 {
11646 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
11647 return;
11648 }
11649 }
11650
11651 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
11652 for (i = 0; i < n_elts; i++)
11653 emit_move_insn (adjust_address_nv (mem, inner_mode,
11654 i * GET_MODE_SIZE (inner_mode)),
11655 XVECEXP (vals, 0, i));
11656 emit_move_insn (target, mem);
11657 }
11658
11659 /* Implement TARGET_SECONDARY_RELOAD. */
11660
11661 static reg_class_t
11662 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
11663 enum machine_mode mode, secondary_reload_info *sri)
11664 {
11665 enum reg_class rclass = (enum reg_class) rclass_i;
11666
11667 sri->icode = CODE_FOR_nothing;
11668 sri->extra_cost = 0;
11669
11670 /* We need a temporary when loading/storing a HImode/QImode value
11671 between memory and the FPU registers. This can happen when combine puts
11672 a paradoxical subreg in a float/fix conversion insn. */
11673 if (FP_REG_CLASS_P (rclass)
11674 && (mode == HImode || mode == QImode)
11675 && (GET_CODE (x) == MEM
11676 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
11677 && true_regnum (x) == -1)))
11678 return GENERAL_REGS;
11679
11680 /* On 32-bit we need a temporary when loading/storing a DFmode value
11681 between unaligned memory and the upper FPU registers. */
11682 if (TARGET_ARCH32
11683 && rclass == EXTRA_FP_REGS
11684 && mode == DFmode
11685 && GET_CODE (x) == MEM
11686 && ! mem_min_alignment (x, 8))
11687 return FP_REGS;
11688
11689 if (((TARGET_CM_MEDANY
11690 && symbolic_operand (x, mode))
11691 || (TARGET_CM_EMBMEDANY
11692 && text_segment_operand (x, mode)))
11693 && ! flag_pic)
11694 {
11695 if (in_p)
11696 sri->icode = direct_optab_handler (reload_in_optab, mode);
11697 else
11698 sri->icode = direct_optab_handler (reload_out_optab, mode);
11699 return NO_REGS;
11700 }
11701
11702 if (TARGET_VIS3 && TARGET_ARCH32)
11703 {
11704 int regno = true_regnum (x);
11705
11706 /* When using VIS3 fp<-->int register moves, on 32-bit we have
11707 to move 8-byte values in 4-byte pieces. This only works via
11708 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
11709 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
11710 an FP_REGS intermediate move. */
11711 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
11712 || ((general_or_i64_p (rclass)
11713 || rclass == GENERAL_OR_FP_REGS)
11714 && SPARC_FP_REG_P (regno)))
11715 {
11716 sri->extra_cost = 2;
11717 return FP_REGS;
11718 }
11719 }
11720
11721 return NO_REGS;
11722 }
11723
11724 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
11725 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
11726
11727 bool
11728 sparc_expand_conditional_move (enum machine_mode mode, rtx *operands)
11729 {
11730 enum rtx_code rc = GET_CODE (operands[1]);
11731 enum machine_mode cmp_mode;
11732 rtx cc_reg, dst, cmp;
11733
11734 cmp = operands[1];
11735 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
11736 return false;
11737
11738 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
11739 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
11740
11741 cmp_mode = GET_MODE (XEXP (cmp, 0));
11742 rc = GET_CODE (cmp);
11743
11744 dst = operands[0];
11745 if (! rtx_equal_p (operands[2], dst)
11746 && ! rtx_equal_p (operands[3], dst))
11747 {
11748 if (reg_overlap_mentioned_p (dst, cmp))
11749 dst = gen_reg_rtx (mode);
11750
11751 emit_move_insn (dst, operands[3]);
11752 }
11753 else if (operands[2] == dst)
11754 {
11755 operands[2] = operands[3];
11756
11757 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
11758 rc = reverse_condition_maybe_unordered (rc);
11759 else
11760 rc = reverse_condition (rc);
11761 }
11762
11763 if (XEXP (cmp, 1) == const0_rtx
11764 && GET_CODE (XEXP (cmp, 0)) == REG
11765 && cmp_mode == DImode
11766 && v9_regcmp_p (rc))
11767 cc_reg = XEXP (cmp, 0);
11768 else
11769 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
11770
11771 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
11772
11773 emit_insn (gen_rtx_SET (VOIDmode, dst,
11774 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
11775
11776 if (dst != operands[0])
11777 emit_move_insn (operands[0], dst);
11778
11779 return true;
11780 }
11781
11782 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
11783 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
11784 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
11785 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
11786 code to be used for the condition mask. */
11787
11788 void
11789 sparc_expand_vcond (enum machine_mode mode, rtx *operands, int ccode, int fcode)
11790 {
11791 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
11792 enum rtx_code code = GET_CODE (operands[3]);
11793
11794 mask = gen_reg_rtx (Pmode);
11795 cop0 = operands[4];
11796 cop1 = operands[5];
11797 if (code == LT || code == GE)
11798 {
11799 rtx t;
11800
11801 code = swap_condition (code);
11802 t = cop0; cop0 = cop1; cop1 = t;
11803 }
11804
11805 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
11806
11807 fcmp = gen_rtx_UNSPEC (Pmode,
11808 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
11809 fcode);
11810
11811 cmask = gen_rtx_UNSPEC (DImode,
11812 gen_rtvec (2, mask, gsr),
11813 ccode);
11814
11815 bshuf = gen_rtx_UNSPEC (mode,
11816 gen_rtvec (3, operands[1], operands[2], gsr),
11817 UNSPEC_BSHUFFLE);
11818
11819 emit_insn (gen_rtx_SET (VOIDmode, mask, fcmp));
11820 emit_insn (gen_rtx_SET (VOIDmode, gsr, cmask));
11821
11822 emit_insn (gen_rtx_SET (VOIDmode, operands[0], bshuf));
11823 }
11824
11825 /* On sparc, any mode which naturally allocates into the float
11826 registers should return 4 here. */
11827
11828 unsigned int
11829 sparc_regmode_natural_size (enum machine_mode mode)
11830 {
11831 int size = UNITS_PER_WORD;
11832
11833 if (TARGET_ARCH64)
11834 {
11835 enum mode_class mclass = GET_MODE_CLASS (mode);
11836
11837 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
11838 size = 4;
11839 }
11840
11841 return size;
11842 }
11843
11844 /* Return TRUE if it is a good idea to tie two pseudo registers
11845 when one has mode MODE1 and one has mode MODE2.
11846 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
11847 for any hard reg, then this must be FALSE for correct output.
11848
11849 For V9 we have to deal with the fact that only the lower 32 floating
11850 point registers are 32-bit addressable. */
11851
11852 bool
11853 sparc_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
11854 {
11855 enum mode_class mclass1, mclass2;
11856 unsigned short size1, size2;
11857
11858 if (mode1 == mode2)
11859 return true;
11860
11861 mclass1 = GET_MODE_CLASS (mode1);
11862 mclass2 = GET_MODE_CLASS (mode2);
11863 if (mclass1 != mclass2)
11864 return false;
11865
11866 if (! TARGET_V9)
11867 return true;
11868
11869 /* Classes are the same and we are V9 so we have to deal with upper
11870 vs. lower floating point registers. If one of the modes is a
11871 4-byte mode, and the other is not, we have to mark them as not
11872 tieable because only the lower 32 floating point register are
11873 addressable 32-bits at a time.
11874
11875 We can't just test explicitly for SFmode, otherwise we won't
11876 cover the vector mode cases properly. */
11877
11878 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
11879 return true;
11880
11881 size1 = GET_MODE_SIZE (mode1);
11882 size2 = GET_MODE_SIZE (mode2);
11883 if ((size1 > 4 && size2 == 4)
11884 || (size2 > 4 && size1 == 4))
11885 return false;
11886
11887 return true;
11888 }
11889
11890 #include "gt-sparc.h"