]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sparc/sparc.c
190c82ec0ce278d47161df1db2e29dd15bdabcd1
[thirdparty/gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "insn-codes.h"
37 #include "conditions.h"
38 #include "output.h"
39 #include "insn-attr.h"
40 #include "flags.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
52 #include "tree-gimple.h"
53 #include "langhooks.h"
54 #include "params.h"
55 #include "df.h"
56
57 /* Processor costs */
58 static const
59 struct processor_costs cypress_costs = {
60 COSTS_N_INSNS (2), /* int load */
61 COSTS_N_INSNS (2), /* int signed load */
62 COSTS_N_INSNS (2), /* int zeroed load */
63 COSTS_N_INSNS (2), /* float load */
64 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
65 COSTS_N_INSNS (5), /* fadd, fsub */
66 COSTS_N_INSNS (1), /* fcmp */
67 COSTS_N_INSNS (1), /* fmov, fmovr */
68 COSTS_N_INSNS (7), /* fmul */
69 COSTS_N_INSNS (37), /* fdivs */
70 COSTS_N_INSNS (37), /* fdivd */
71 COSTS_N_INSNS (63), /* fsqrts */
72 COSTS_N_INSNS (63), /* fsqrtd */
73 COSTS_N_INSNS (1), /* imul */
74 COSTS_N_INSNS (1), /* imulX */
75 0, /* imul bit factor */
76 COSTS_N_INSNS (1), /* idiv */
77 COSTS_N_INSNS (1), /* idivX */
78 COSTS_N_INSNS (1), /* movcc/movr */
79 0, /* shift penalty */
80 };
81
82 static const
83 struct processor_costs supersparc_costs = {
84 COSTS_N_INSNS (1), /* int load */
85 COSTS_N_INSNS (1), /* int signed load */
86 COSTS_N_INSNS (1), /* int zeroed load */
87 COSTS_N_INSNS (0), /* float load */
88 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
89 COSTS_N_INSNS (3), /* fadd, fsub */
90 COSTS_N_INSNS (3), /* fcmp */
91 COSTS_N_INSNS (1), /* fmov, fmovr */
92 COSTS_N_INSNS (3), /* fmul */
93 COSTS_N_INSNS (6), /* fdivs */
94 COSTS_N_INSNS (9), /* fdivd */
95 COSTS_N_INSNS (12), /* fsqrts */
96 COSTS_N_INSNS (12), /* fsqrtd */
97 COSTS_N_INSNS (4), /* imul */
98 COSTS_N_INSNS (4), /* imulX */
99 0, /* imul bit factor */
100 COSTS_N_INSNS (4), /* idiv */
101 COSTS_N_INSNS (4), /* idivX */
102 COSTS_N_INSNS (1), /* movcc/movr */
103 1, /* shift penalty */
104 };
105
106 static const
107 struct processor_costs hypersparc_costs = {
108 COSTS_N_INSNS (1), /* int load */
109 COSTS_N_INSNS (1), /* int signed load */
110 COSTS_N_INSNS (1), /* int zeroed load */
111 COSTS_N_INSNS (1), /* float load */
112 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
113 COSTS_N_INSNS (1), /* fadd, fsub */
114 COSTS_N_INSNS (1), /* fcmp */
115 COSTS_N_INSNS (1), /* fmov, fmovr */
116 COSTS_N_INSNS (1), /* fmul */
117 COSTS_N_INSNS (8), /* fdivs */
118 COSTS_N_INSNS (12), /* fdivd */
119 COSTS_N_INSNS (17), /* fsqrts */
120 COSTS_N_INSNS (17), /* fsqrtd */
121 COSTS_N_INSNS (17), /* imul */
122 COSTS_N_INSNS (17), /* imulX */
123 0, /* imul bit factor */
124 COSTS_N_INSNS (17), /* idiv */
125 COSTS_N_INSNS (17), /* idivX */
126 COSTS_N_INSNS (1), /* movcc/movr */
127 0, /* shift penalty */
128 };
129
130 static const
131 struct processor_costs sparclet_costs = {
132 COSTS_N_INSNS (3), /* int load */
133 COSTS_N_INSNS (3), /* int signed load */
134 COSTS_N_INSNS (1), /* int zeroed load */
135 COSTS_N_INSNS (1), /* float load */
136 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
137 COSTS_N_INSNS (1), /* fadd, fsub */
138 COSTS_N_INSNS (1), /* fcmp */
139 COSTS_N_INSNS (1), /* fmov, fmovr */
140 COSTS_N_INSNS (1), /* fmul */
141 COSTS_N_INSNS (1), /* fdivs */
142 COSTS_N_INSNS (1), /* fdivd */
143 COSTS_N_INSNS (1), /* fsqrts */
144 COSTS_N_INSNS (1), /* fsqrtd */
145 COSTS_N_INSNS (5), /* imul */
146 COSTS_N_INSNS (5), /* imulX */
147 0, /* imul bit factor */
148 COSTS_N_INSNS (5), /* idiv */
149 COSTS_N_INSNS (5), /* idivX */
150 COSTS_N_INSNS (1), /* movcc/movr */
151 0, /* shift penalty */
152 };
153
154 static const
155 struct processor_costs ultrasparc_costs = {
156 COSTS_N_INSNS (2), /* int load */
157 COSTS_N_INSNS (3), /* int signed load */
158 COSTS_N_INSNS (2), /* int zeroed load */
159 COSTS_N_INSNS (2), /* float load */
160 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
161 COSTS_N_INSNS (4), /* fadd, fsub */
162 COSTS_N_INSNS (1), /* fcmp */
163 COSTS_N_INSNS (2), /* fmov, fmovr */
164 COSTS_N_INSNS (4), /* fmul */
165 COSTS_N_INSNS (13), /* fdivs */
166 COSTS_N_INSNS (23), /* fdivd */
167 COSTS_N_INSNS (13), /* fsqrts */
168 COSTS_N_INSNS (23), /* fsqrtd */
169 COSTS_N_INSNS (4), /* imul */
170 COSTS_N_INSNS (4), /* imulX */
171 2, /* imul bit factor */
172 COSTS_N_INSNS (37), /* idiv */
173 COSTS_N_INSNS (68), /* idivX */
174 COSTS_N_INSNS (2), /* movcc/movr */
175 2, /* shift penalty */
176 };
177
178 static const
179 struct processor_costs ultrasparc3_costs = {
180 COSTS_N_INSNS (2), /* int load */
181 COSTS_N_INSNS (3), /* int signed load */
182 COSTS_N_INSNS (3), /* int zeroed load */
183 COSTS_N_INSNS (2), /* float load */
184 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
185 COSTS_N_INSNS (4), /* fadd, fsub */
186 COSTS_N_INSNS (5), /* fcmp */
187 COSTS_N_INSNS (3), /* fmov, fmovr */
188 COSTS_N_INSNS (4), /* fmul */
189 COSTS_N_INSNS (17), /* fdivs */
190 COSTS_N_INSNS (20), /* fdivd */
191 COSTS_N_INSNS (20), /* fsqrts */
192 COSTS_N_INSNS (29), /* fsqrtd */
193 COSTS_N_INSNS (6), /* imul */
194 COSTS_N_INSNS (6), /* imulX */
195 0, /* imul bit factor */
196 COSTS_N_INSNS (40), /* idiv */
197 COSTS_N_INSNS (71), /* idivX */
198 COSTS_N_INSNS (2), /* movcc/movr */
199 0, /* shift penalty */
200 };
201
202 static const
203 struct processor_costs niagara_costs = {
204 COSTS_N_INSNS (3), /* int load */
205 COSTS_N_INSNS (3), /* int signed load */
206 COSTS_N_INSNS (3), /* int zeroed load */
207 COSTS_N_INSNS (9), /* float load */
208 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
209 COSTS_N_INSNS (8), /* fadd, fsub */
210 COSTS_N_INSNS (26), /* fcmp */
211 COSTS_N_INSNS (8), /* fmov, fmovr */
212 COSTS_N_INSNS (29), /* fmul */
213 COSTS_N_INSNS (54), /* fdivs */
214 COSTS_N_INSNS (83), /* fdivd */
215 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
216 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
217 COSTS_N_INSNS (11), /* imul */
218 COSTS_N_INSNS (11), /* imulX */
219 0, /* imul bit factor */
220 COSTS_N_INSNS (72), /* idiv */
221 COSTS_N_INSNS (72), /* idivX */
222 COSTS_N_INSNS (1), /* movcc/movr */
223 0, /* shift penalty */
224 };
225
226 const struct processor_costs *sparc_costs = &cypress_costs;
227
228 #ifdef HAVE_AS_RELAX_OPTION
229 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
230 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
231 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
232 somebody does not branch between the sethi and jmp. */
233 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
234 #else
235 #define LEAF_SIBCALL_SLOT_RESERVED_P \
236 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
237 #endif
238
239 /* Global variables for machine-dependent things. */
240
241 /* Size of frame. Need to know this to emit return insns from leaf procedures.
242 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
243 reload pass. This is important as the value is later used for scheduling
244 (to see what can go in a delay slot).
245 APPARENT_FSIZE is the size of the stack less the register save area and less
246 the outgoing argument area. It is used when saving call preserved regs. */
247 static HOST_WIDE_INT apparent_fsize;
248 static HOST_WIDE_INT actual_fsize;
249
250 /* Number of live general or floating point registers needed to be
251 saved (as 4-byte quantities). */
252 static int num_gfregs;
253
254 /* The alias set for prologue/epilogue register save/restore. */
255 static GTY(()) int sparc_sr_alias_set;
256
257 /* The alias set for the structure return value. */
258 static GTY(()) int struct_value_alias_set;
259
260 /* Save the operands last given to a compare for use when we
261 generate a scc or bcc insn. */
262 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
263
264 /* Vector to say how input registers are mapped to output registers.
265 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
266 eliminate it. You must use -fomit-frame-pointer to get that. */
267 char leaf_reg_remap[] =
268 { 0, 1, 2, 3, 4, 5, 6, 7,
269 -1, -1, -1, -1, -1, -1, 14, -1,
270 -1, -1, -1, -1, -1, -1, -1, -1,
271 8, 9, 10, 11, 12, 13, -1, 15,
272
273 32, 33, 34, 35, 36, 37, 38, 39,
274 40, 41, 42, 43, 44, 45, 46, 47,
275 48, 49, 50, 51, 52, 53, 54, 55,
276 56, 57, 58, 59, 60, 61, 62, 63,
277 64, 65, 66, 67, 68, 69, 70, 71,
278 72, 73, 74, 75, 76, 77, 78, 79,
279 80, 81, 82, 83, 84, 85, 86, 87,
280 88, 89, 90, 91, 92, 93, 94, 95,
281 96, 97, 98, 99, 100};
282
283 /* Vector, indexed by hard register number, which contains 1
284 for a register that is allowable in a candidate for leaf
285 function treatment. */
286 char sparc_leaf_regs[] =
287 { 1, 1, 1, 1, 1, 1, 1, 1,
288 0, 0, 0, 0, 0, 0, 1, 0,
289 0, 0, 0, 0, 0, 0, 0, 0,
290 1, 1, 1, 1, 1, 1, 0, 1,
291 1, 1, 1, 1, 1, 1, 1, 1,
292 1, 1, 1, 1, 1, 1, 1, 1,
293 1, 1, 1, 1, 1, 1, 1, 1,
294 1, 1, 1, 1, 1, 1, 1, 1,
295 1, 1, 1, 1, 1, 1, 1, 1,
296 1, 1, 1, 1, 1, 1, 1, 1,
297 1, 1, 1, 1, 1, 1, 1, 1,
298 1, 1, 1, 1, 1, 1, 1, 1,
299 1, 1, 1, 1, 1};
300
301 struct machine_function GTY(())
302 {
303 /* Some local-dynamic TLS symbol name. */
304 const char *some_ld_name;
305
306 /* True if the current function is leaf and uses only leaf regs,
307 so that the SPARC leaf function optimization can be applied.
308 Private version of current_function_uses_only_leaf_regs, see
309 sparc_expand_prologue for the rationale. */
310 int leaf_function_p;
311
312 /* True if the data calculated by sparc_expand_prologue are valid. */
313 bool prologue_data_valid_p;
314 };
315
316 #define sparc_leaf_function_p cfun->machine->leaf_function_p
317 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
318
319 /* Register we pretend to think the frame pointer is allocated to.
320 Normally, this is %fp, but if we are in a leaf procedure, this
321 is %sp+"something". We record "something" separately as it may
322 be too big for reg+constant addressing. */
323 static rtx frame_base_reg;
324 static HOST_WIDE_INT frame_base_offset;
325
326 /* 1 if the next opcode is to be specially indented. */
327 int sparc_indent_opcode = 0;
328
329 static bool sparc_handle_option (size_t, const char *, int);
330 static void sparc_init_modes (void);
331 static void scan_record_type (tree, int *, int *, int *);
332 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
333 tree, int, int, int *, int *);
334
335 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
336 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
337
338 static void sparc_output_addr_vec (rtx);
339 static void sparc_output_addr_diff_vec (rtx);
340 static void sparc_output_deferred_case_vectors (void);
341 static rtx sparc_builtin_saveregs (void);
342 static int epilogue_renumber (rtx *, int);
343 static bool sparc_assemble_integer (rtx, unsigned int, int);
344 static int set_extends (rtx);
345 static void emit_pic_helper (void);
346 static void load_pic_register (bool);
347 static int save_or_restore_regs (int, int, rtx, int, int);
348 static void emit_save_or_restore_regs (int);
349 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
350 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
351 #ifdef OBJECT_FORMAT_ELF
352 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
353 #endif
354
355 static int sparc_adjust_cost (rtx, rtx, rtx, int);
356 static int sparc_issue_rate (void);
357 static void sparc_sched_init (FILE *, int, int);
358 static int sparc_use_sched_lookahead (void);
359
360 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
361 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
362 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
363 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
364 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
365
366 static bool sparc_function_ok_for_sibcall (tree, tree);
367 static void sparc_init_libfuncs (void);
368 static void sparc_init_builtins (void);
369 static void sparc_vis_init_builtins (void);
370 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
371 static tree sparc_fold_builtin (tree, tree, bool);
372 static int sparc_vis_mul8x16 (int, int);
373 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
374 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
375 HOST_WIDE_INT, tree);
376 static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
377 HOST_WIDE_INT, tree);
378 static struct machine_function * sparc_init_machine_status (void);
379 static bool sparc_cannot_force_const_mem (rtx);
380 static rtx sparc_tls_get_addr (void);
381 static rtx sparc_tls_got (void);
382 static const char *get_some_local_dynamic_name (void);
383 static int get_some_local_dynamic_name_1 (rtx *, void *);
384 static bool sparc_rtx_costs (rtx, int, int, int *);
385 static bool sparc_promote_prototypes (tree);
386 static rtx sparc_struct_value_rtx (tree, int);
387 static bool sparc_return_in_memory (tree, tree);
388 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
389 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
390 static bool sparc_vector_mode_supported_p (enum machine_mode);
391 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
392 enum machine_mode, tree, bool);
393 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
394 enum machine_mode, tree, bool);
395 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
396 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
397 static void sparc_file_end (void);
398 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
399 static const char *sparc_mangle_fundamental_type (tree);
400 #endif
401 #ifdef SUBTARGET_ATTRIBUTE_TABLE
402 const struct attribute_spec sparc_attribute_table[];
403 #endif
404 \f
405 /* Option handling. */
406
407 /* Parsed value. */
408 enum cmodel sparc_cmodel;
409
410 char sparc_hard_reg_printed[8];
411
412 struct sparc_cpu_select sparc_select[] =
413 {
414 /* switch name, tune arch */
415 { (char *)0, "default", 1, 1 },
416 { (char *)0, "-mcpu=", 1, 1 },
417 { (char *)0, "-mtune=", 1, 0 },
418 { 0, 0, 0, 0 }
419 };
420
421 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
422 enum processor_type sparc_cpu;
423
424 /* Whether\fan FPU option was specified. */
425 static bool fpu_option_set = false;
426
427 /* Initialize the GCC target structure. */
428
429 /* The sparc default is to use .half rather than .short for aligned
430 HI objects. Use .word instead of .long on non-ELF systems. */
431 #undef TARGET_ASM_ALIGNED_HI_OP
432 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
433 #ifndef OBJECT_FORMAT_ELF
434 #undef TARGET_ASM_ALIGNED_SI_OP
435 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
436 #endif
437
438 #undef TARGET_ASM_UNALIGNED_HI_OP
439 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
440 #undef TARGET_ASM_UNALIGNED_SI_OP
441 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
442 #undef TARGET_ASM_UNALIGNED_DI_OP
443 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
444
445 /* The target hook has to handle DI-mode values. */
446 #undef TARGET_ASM_INTEGER
447 #define TARGET_ASM_INTEGER sparc_assemble_integer
448
449 #undef TARGET_ASM_FUNCTION_PROLOGUE
450 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
451 #undef TARGET_ASM_FUNCTION_EPILOGUE
452 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
453
454 #undef TARGET_SCHED_ADJUST_COST
455 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
456 #undef TARGET_SCHED_ISSUE_RATE
457 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
458 #undef TARGET_SCHED_INIT
459 #define TARGET_SCHED_INIT sparc_sched_init
460 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
461 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
462
463 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
464 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
465
466 #undef TARGET_INIT_LIBFUNCS
467 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
468 #undef TARGET_INIT_BUILTINS
469 #define TARGET_INIT_BUILTINS sparc_init_builtins
470
471 #undef TARGET_EXPAND_BUILTIN
472 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
473 #undef TARGET_FOLD_BUILTIN
474 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
475
476 #if TARGET_TLS
477 #undef TARGET_HAVE_TLS
478 #define TARGET_HAVE_TLS true
479 #endif
480
481 #undef TARGET_CANNOT_FORCE_CONST_MEM
482 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
483
484 #undef TARGET_ASM_OUTPUT_MI_THUNK
485 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
486 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
487 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
488
489 #undef TARGET_RTX_COSTS
490 #define TARGET_RTX_COSTS sparc_rtx_costs
491 #undef TARGET_ADDRESS_COST
492 #define TARGET_ADDRESS_COST hook_int_rtx_0
493
494 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
495 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
496 test for this value. */
497 #undef TARGET_PROMOTE_FUNCTION_ARGS
498 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
499
500 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
501 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
502 test for this value. */
503 #undef TARGET_PROMOTE_FUNCTION_RETURN
504 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
505
506 #undef TARGET_PROMOTE_PROTOTYPES
507 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
508
509 #undef TARGET_STRUCT_VALUE_RTX
510 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
511 #undef TARGET_RETURN_IN_MEMORY
512 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
513 #undef TARGET_MUST_PASS_IN_STACK
514 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
515 #undef TARGET_PASS_BY_REFERENCE
516 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
517 #undef TARGET_ARG_PARTIAL_BYTES
518 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
519
520 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
521 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
522 #undef TARGET_STRICT_ARGUMENT_NAMING
523 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
524
525 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
526 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
527
528 #undef TARGET_VECTOR_MODE_SUPPORTED_P
529 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
530
531 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
532 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
533
534 #ifdef SUBTARGET_INSERT_ATTRIBUTES
535 #undef TARGET_INSERT_ATTRIBUTES
536 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
537 #endif
538
539 #ifdef SUBTARGET_ATTRIBUTE_TABLE
540 #undef TARGET_ATTRIBUTE_TABLE
541 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
542 #endif
543
544 #undef TARGET_RELAXED_ORDERING
545 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
546
547 #undef TARGET_DEFAULT_TARGET_FLAGS
548 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
549 #undef TARGET_HANDLE_OPTION
550 #define TARGET_HANDLE_OPTION sparc_handle_option
551
552 #if TARGET_GNU_TLS
553 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
554 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
555 #endif
556
557 #undef TARGET_ASM_FILE_END
558 #define TARGET_ASM_FILE_END sparc_file_end
559
560 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
561 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
562 #define TARGET_MANGLE_FUNDAMENTAL_TYPE sparc_mangle_fundamental_type
563 #endif
564
565 struct gcc_target targetm = TARGET_INITIALIZER;
566
567 /* Implement TARGET_HANDLE_OPTION. */
568
569 static bool
570 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
571 {
572 switch (code)
573 {
574 case OPT_mfpu:
575 case OPT_mhard_float:
576 case OPT_msoft_float:
577 fpu_option_set = true;
578 break;
579
580 case OPT_mcpu_:
581 sparc_select[1].string = arg;
582 break;
583
584 case OPT_mtune_:
585 sparc_select[2].string = arg;
586 break;
587 }
588
589 return true;
590 }
591
592 /* Validate and override various options, and do some machine dependent
593 initialization. */
594
595 void
596 sparc_override_options (void)
597 {
598 static struct code_model {
599 const char *const name;
600 const int value;
601 } const cmodels[] = {
602 { "32", CM_32 },
603 { "medlow", CM_MEDLOW },
604 { "medmid", CM_MEDMID },
605 { "medany", CM_MEDANY },
606 { "embmedany", CM_EMBMEDANY },
607 { 0, 0 }
608 };
609 const struct code_model *cmodel;
610 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
611 static struct cpu_default {
612 const int cpu;
613 const char *const name;
614 } const cpu_default[] = {
615 /* There must be one entry here for each TARGET_CPU value. */
616 { TARGET_CPU_sparc, "cypress" },
617 { TARGET_CPU_sparclet, "tsc701" },
618 { TARGET_CPU_sparclite, "f930" },
619 { TARGET_CPU_v8, "v8" },
620 { TARGET_CPU_hypersparc, "hypersparc" },
621 { TARGET_CPU_sparclite86x, "sparclite86x" },
622 { TARGET_CPU_supersparc, "supersparc" },
623 { TARGET_CPU_v9, "v9" },
624 { TARGET_CPU_ultrasparc, "ultrasparc" },
625 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
626 { TARGET_CPU_niagara, "niagara" },
627 { 0, 0 }
628 };
629 const struct cpu_default *def;
630 /* Table of values for -m{cpu,tune}=. */
631 static struct cpu_table {
632 const char *const name;
633 const enum processor_type processor;
634 const int disable;
635 const int enable;
636 } const cpu_table[] = {
637 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
638 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
639 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
640 /* TI TMS390Z55 supersparc */
641 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
642 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
643 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
644 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
645 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
646 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
647 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
648 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
649 MASK_SPARCLITE },
650 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
651 /* TEMIC sparclet */
652 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
653 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
654 /* TI ultrasparc I, II, IIi */
655 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
656 /* Although insns using %y are deprecated, it is a clear win on current
657 ultrasparcs. */
658 |MASK_DEPRECATED_V8_INSNS},
659 /* TI ultrasparc III */
660 /* ??? Check if %y issue still holds true in ultra3. */
661 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
662 /* UltraSPARC T1 */
663 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
664 { 0, 0, 0, 0 }
665 };
666 const struct cpu_table *cpu;
667 const struct sparc_cpu_select *sel;
668 int fpu;
669
670 #ifndef SPARC_BI_ARCH
671 /* Check for unsupported architecture size. */
672 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
673 error ("%s is not supported by this configuration",
674 DEFAULT_ARCH32_P ? "-m64" : "-m32");
675 #endif
676
677 /* We force all 64bit archs to use 128 bit long double */
678 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
679 {
680 error ("-mlong-double-64 not allowed with -m64");
681 target_flags |= MASK_LONG_DOUBLE_128;
682 }
683
684 /* Code model selection. */
685 sparc_cmodel = SPARC_DEFAULT_CMODEL;
686
687 #ifdef SPARC_BI_ARCH
688 if (TARGET_ARCH32)
689 sparc_cmodel = CM_32;
690 #endif
691
692 if (sparc_cmodel_string != NULL)
693 {
694 if (TARGET_ARCH64)
695 {
696 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
697 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
698 break;
699 if (cmodel->name == NULL)
700 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
701 else
702 sparc_cmodel = cmodel->value;
703 }
704 else
705 error ("-mcmodel= is not supported on 32 bit systems");
706 }
707
708 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
709
710 /* Set the default CPU. */
711 for (def = &cpu_default[0]; def->name; ++def)
712 if (def->cpu == TARGET_CPU_DEFAULT)
713 break;
714 gcc_assert (def->name);
715 sparc_select[0].string = def->name;
716
717 for (sel = &sparc_select[0]; sel->name; ++sel)
718 {
719 if (sel->string)
720 {
721 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
722 if (! strcmp (sel->string, cpu->name))
723 {
724 if (sel->set_tune_p)
725 sparc_cpu = cpu->processor;
726
727 if (sel->set_arch_p)
728 {
729 target_flags &= ~cpu->disable;
730 target_flags |= cpu->enable;
731 }
732 break;
733 }
734
735 if (! cpu->name)
736 error ("bad value (%s) for %s switch", sel->string, sel->name);
737 }
738 }
739
740 /* If -mfpu or -mno-fpu was explicitly used, don't override with
741 the processor default. */
742 if (fpu_option_set)
743 target_flags = (target_flags & ~MASK_FPU) | fpu;
744
745 /* Don't allow -mvis if FPU is disabled. */
746 if (! TARGET_FPU)
747 target_flags &= ~MASK_VIS;
748
749 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
750 are available.
751 -m64 also implies v9. */
752 if (TARGET_VIS || TARGET_ARCH64)
753 {
754 target_flags |= MASK_V9;
755 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
756 }
757
758 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
759 if (TARGET_V9 && TARGET_ARCH32)
760 target_flags |= MASK_DEPRECATED_V8_INSNS;
761
762 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
763 if (! TARGET_V9 || TARGET_ARCH64)
764 target_flags &= ~MASK_V8PLUS;
765
766 /* Don't use stack biasing in 32 bit mode. */
767 if (TARGET_ARCH32)
768 target_flags &= ~MASK_STACK_BIAS;
769
770 /* Supply a default value for align_functions. */
771 if (align_functions == 0
772 && (sparc_cpu == PROCESSOR_ULTRASPARC
773 || sparc_cpu == PROCESSOR_ULTRASPARC3
774 || sparc_cpu == PROCESSOR_NIAGARA))
775 align_functions = 32;
776
777 /* Validate PCC_STRUCT_RETURN. */
778 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
779 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
780
781 /* Only use .uaxword when compiling for a 64-bit target. */
782 if (!TARGET_ARCH64)
783 targetm.asm_out.unaligned_op.di = NULL;
784
785 /* Do various machine dependent initializations. */
786 sparc_init_modes ();
787
788 /* Acquire unique alias sets for our private stuff. */
789 sparc_sr_alias_set = new_alias_set ();
790 struct_value_alias_set = new_alias_set ();
791
792 /* Set up function hooks. */
793 init_machine_status = sparc_init_machine_status;
794
795 switch (sparc_cpu)
796 {
797 case PROCESSOR_V7:
798 case PROCESSOR_CYPRESS:
799 sparc_costs = &cypress_costs;
800 break;
801 case PROCESSOR_V8:
802 case PROCESSOR_SPARCLITE:
803 case PROCESSOR_SUPERSPARC:
804 sparc_costs = &supersparc_costs;
805 break;
806 case PROCESSOR_F930:
807 case PROCESSOR_F934:
808 case PROCESSOR_HYPERSPARC:
809 case PROCESSOR_SPARCLITE86X:
810 sparc_costs = &hypersparc_costs;
811 break;
812 case PROCESSOR_SPARCLET:
813 case PROCESSOR_TSC701:
814 sparc_costs = &sparclet_costs;
815 break;
816 case PROCESSOR_V9:
817 case PROCESSOR_ULTRASPARC:
818 sparc_costs = &ultrasparc_costs;
819 break;
820 case PROCESSOR_ULTRASPARC3:
821 sparc_costs = &ultrasparc3_costs;
822 break;
823 case PROCESSOR_NIAGARA:
824 sparc_costs = &niagara_costs;
825 break;
826 };
827
828 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
829 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
830 target_flags |= MASK_LONG_DOUBLE_128;
831 #endif
832
833 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
834 set_param_value ("simultaneous-prefetches",
835 ((sparc_cpu == PROCESSOR_ULTRASPARC
836 || sparc_cpu == PROCESSOR_NIAGARA)
837 ? 2
838 : (sparc_cpu == PROCESSOR_ULTRASPARC3
839 ? 8 : 3)));
840 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
841 set_param_value ("l1-cache-line-size",
842 ((sparc_cpu == PROCESSOR_ULTRASPARC
843 || sparc_cpu == PROCESSOR_ULTRASPARC3
844 || sparc_cpu == PROCESSOR_NIAGARA)
845 ? 64 : 32));
846 }
847 \f
848 #ifdef SUBTARGET_ATTRIBUTE_TABLE
849 /* Table of valid machine attributes. */
850 const struct attribute_spec sparc_attribute_table[] =
851 {
852 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
853 SUBTARGET_ATTRIBUTE_TABLE,
854 { NULL, 0, 0, false, false, false, NULL }
855 };
856 #endif
857 \f
858 /* Miscellaneous utilities. */
859
860 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
861 or branch on register contents instructions. */
862
863 int
864 v9_regcmp_p (enum rtx_code code)
865 {
866 return (code == EQ || code == NE || code == GE || code == LT
867 || code == LE || code == GT);
868 }
869
870 /* Nonzero if OP is a floating point constant which can
871 be loaded into an integer register using a single
872 sethi instruction. */
873
874 int
875 fp_sethi_p (rtx op)
876 {
877 if (GET_CODE (op) == CONST_DOUBLE)
878 {
879 REAL_VALUE_TYPE r;
880 long i;
881
882 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
883 REAL_VALUE_TO_TARGET_SINGLE (r, i);
884 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
885 }
886
887 return 0;
888 }
889
890 /* Nonzero if OP is a floating point constant which can
891 be loaded into an integer register using a single
892 mov instruction. */
893
894 int
895 fp_mov_p (rtx op)
896 {
897 if (GET_CODE (op) == CONST_DOUBLE)
898 {
899 REAL_VALUE_TYPE r;
900 long i;
901
902 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
903 REAL_VALUE_TO_TARGET_SINGLE (r, i);
904 return SPARC_SIMM13_P (i);
905 }
906
907 return 0;
908 }
909
910 /* Nonzero if OP is a floating point constant which can
911 be loaded into an integer register using a high/losum
912 instruction sequence. */
913
914 int
915 fp_high_losum_p (rtx op)
916 {
917 /* The constraints calling this should only be in
918 SFmode move insns, so any constant which cannot
919 be moved using a single insn will do. */
920 if (GET_CODE (op) == CONST_DOUBLE)
921 {
922 REAL_VALUE_TYPE r;
923 long i;
924
925 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
926 REAL_VALUE_TO_TARGET_SINGLE (r, i);
927 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
928 }
929
930 return 0;
931 }
932
933 /* Expand a move instruction. Return true if all work is done. */
934
935 bool
936 sparc_expand_move (enum machine_mode mode, rtx *operands)
937 {
938 /* Handle sets of MEM first. */
939 if (GET_CODE (operands[0]) == MEM)
940 {
941 /* 0 is a register (or a pair of registers) on SPARC. */
942 if (register_or_zero_operand (operands[1], mode))
943 return false;
944
945 if (!reload_in_progress)
946 {
947 operands[0] = validize_mem (operands[0]);
948 operands[1] = force_reg (mode, operands[1]);
949 }
950 }
951
952 /* Fixup TLS cases. */
953 if (TARGET_HAVE_TLS
954 && CONSTANT_P (operands[1])
955 && GET_CODE (operands[1]) != HIGH
956 && sparc_tls_referenced_p (operands [1]))
957 {
958 rtx sym = operands[1];
959 rtx addend = NULL;
960
961 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
962 {
963 addend = XEXP (XEXP (sym, 0), 1);
964 sym = XEXP (XEXP (sym, 0), 0);
965 }
966
967 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
968
969 sym = legitimize_tls_address (sym);
970 if (addend)
971 {
972 sym = gen_rtx_PLUS (mode, sym, addend);
973 sym = force_operand (sym, operands[0]);
974 }
975 operands[1] = sym;
976 }
977
978 /* Fixup PIC cases. */
979 if (flag_pic && CONSTANT_P (operands[1]))
980 {
981 if (pic_address_needs_scratch (operands[1]))
982 operands[1] = legitimize_pic_address (operands[1], mode, 0);
983
984 /* VxWorks does not impose a fixed gap between segments; the run-time
985 gap can be different from the object-file gap. We therefore can't
986 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
987 are absolutely sure that X is in the same segment as the GOT.
988 Unfortunately, the flexibility of linker scripts means that we
989 can't be sure of that in general, so assume that _G_O_T_-relative
990 accesses are never valid on VxWorks. */
991 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
992 {
993 if (mode == SImode)
994 {
995 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
996 return true;
997 }
998
999 if (mode == DImode)
1000 {
1001 gcc_assert (TARGET_ARCH64);
1002 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1003 return true;
1004 }
1005 }
1006
1007 if (symbolic_operand (operands[1], mode))
1008 {
1009 operands[1] = legitimize_pic_address (operands[1],
1010 mode,
1011 (reload_in_progress ?
1012 operands[0] :
1013 NULL_RTX));
1014 return false;
1015 }
1016 }
1017
1018 /* If we are trying to toss an integer constant into FP registers,
1019 or loading a FP or vector constant, force it into memory. */
1020 if (CONSTANT_P (operands[1])
1021 && REG_P (operands[0])
1022 && (SPARC_FP_REG_P (REGNO (operands[0]))
1023 || SCALAR_FLOAT_MODE_P (mode)
1024 || VECTOR_MODE_P (mode)))
1025 {
1026 /* emit_group_store will send such bogosity to us when it is
1027 not storing directly into memory. So fix this up to avoid
1028 crashes in output_constant_pool. */
1029 if (operands [1] == const0_rtx)
1030 operands[1] = CONST0_RTX (mode);
1031
1032 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1033 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1034 && const_zero_operand (operands[1], mode))
1035 return false;
1036
1037 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1038 /* We are able to build any SF constant in integer registers
1039 with at most 2 instructions. */
1040 && (mode == SFmode
1041 /* And any DF constant in integer registers. */
1042 || (mode == DFmode
1043 && (reload_completed || reload_in_progress))))
1044 return false;
1045
1046 operands[1] = force_const_mem (mode, operands[1]);
1047 if (!reload_in_progress)
1048 operands[1] = validize_mem (operands[1]);
1049 return false;
1050 }
1051
1052 /* Accept non-constants and valid constants unmodified. */
1053 if (!CONSTANT_P (operands[1])
1054 || GET_CODE (operands[1]) == HIGH
1055 || input_operand (operands[1], mode))
1056 return false;
1057
1058 switch (mode)
1059 {
1060 case QImode:
1061 /* All QImode constants require only one insn, so proceed. */
1062 break;
1063
1064 case HImode:
1065 case SImode:
1066 sparc_emit_set_const32 (operands[0], operands[1]);
1067 return true;
1068
1069 case DImode:
1070 /* input_operand should have filtered out 32-bit mode. */
1071 sparc_emit_set_const64 (operands[0], operands[1]);
1072 return true;
1073
1074 default:
1075 gcc_unreachable ();
1076 }
1077
1078 return false;
1079 }
1080
1081 /* Load OP1, a 32-bit constant, into OP0, a register.
1082 We know it can't be done in one insn when we get
1083 here, the move expander guarantees this. */
1084
1085 void
1086 sparc_emit_set_const32 (rtx op0, rtx op1)
1087 {
1088 enum machine_mode mode = GET_MODE (op0);
1089 rtx temp;
1090
1091 if (reload_in_progress || reload_completed)
1092 temp = op0;
1093 else
1094 temp = gen_reg_rtx (mode);
1095
1096 if (GET_CODE (op1) == CONST_INT)
1097 {
1098 gcc_assert (!small_int_operand (op1, mode)
1099 && !const_high_operand (op1, mode));
1100
1101 /* Emit them as real moves instead of a HIGH/LO_SUM,
1102 this way CSE can see everything and reuse intermediate
1103 values if it wants. */
1104 emit_insn (gen_rtx_SET (VOIDmode, temp,
1105 GEN_INT (INTVAL (op1)
1106 & ~(HOST_WIDE_INT)0x3ff)));
1107
1108 emit_insn (gen_rtx_SET (VOIDmode,
1109 op0,
1110 gen_rtx_IOR (mode, temp,
1111 GEN_INT (INTVAL (op1) & 0x3ff))));
1112 }
1113 else
1114 {
1115 /* A symbol, emit in the traditional way. */
1116 emit_insn (gen_rtx_SET (VOIDmode, temp,
1117 gen_rtx_HIGH (mode, op1)));
1118 emit_insn (gen_rtx_SET (VOIDmode,
1119 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1120 }
1121 }
1122
1123 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1124 If TEMP is nonzero, we are forbidden to use any other scratch
1125 registers. Otherwise, we are allowed to generate them as needed.
1126
1127 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1128 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1129
1130 void
1131 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1132 {
1133 rtx temp1, temp2, temp3, temp4, temp5;
1134 rtx ti_temp = 0;
1135
1136 if (temp && GET_MODE (temp) == TImode)
1137 {
1138 ti_temp = temp;
1139 temp = gen_rtx_REG (DImode, REGNO (temp));
1140 }
1141
1142 /* SPARC-V9 code-model support. */
1143 switch (sparc_cmodel)
1144 {
1145 case CM_MEDLOW:
1146 /* The range spanned by all instructions in the object is less
1147 than 2^31 bytes (2GB) and the distance from any instruction
1148 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1149 than 2^31 bytes (2GB).
1150
1151 The executable must be in the low 4TB of the virtual address
1152 space.
1153
1154 sethi %hi(symbol), %temp1
1155 or %temp1, %lo(symbol), %reg */
1156 if (temp)
1157 temp1 = temp; /* op0 is allowed. */
1158 else
1159 temp1 = gen_reg_rtx (DImode);
1160
1161 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1162 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1163 break;
1164
1165 case CM_MEDMID:
1166 /* The range spanned by all instructions in the object is less
1167 than 2^31 bytes (2GB) and the distance from any instruction
1168 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1169 than 2^31 bytes (2GB).
1170
1171 The executable must be in the low 16TB of the virtual address
1172 space.
1173
1174 sethi %h44(symbol), %temp1
1175 or %temp1, %m44(symbol), %temp2
1176 sllx %temp2, 12, %temp3
1177 or %temp3, %l44(symbol), %reg */
1178 if (temp)
1179 {
1180 temp1 = op0;
1181 temp2 = op0;
1182 temp3 = temp; /* op0 is allowed. */
1183 }
1184 else
1185 {
1186 temp1 = gen_reg_rtx (DImode);
1187 temp2 = gen_reg_rtx (DImode);
1188 temp3 = gen_reg_rtx (DImode);
1189 }
1190
1191 emit_insn (gen_seth44 (temp1, op1));
1192 emit_insn (gen_setm44 (temp2, temp1, op1));
1193 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1194 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1195 emit_insn (gen_setl44 (op0, temp3, op1));
1196 break;
1197
1198 case CM_MEDANY:
1199 /* The range spanned by all instructions in the object is less
1200 than 2^31 bytes (2GB) and the distance from any instruction
1201 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1202 than 2^31 bytes (2GB).
1203
1204 The executable can be placed anywhere in the virtual address
1205 space.
1206
1207 sethi %hh(symbol), %temp1
1208 sethi %lm(symbol), %temp2
1209 or %temp1, %hm(symbol), %temp3
1210 sllx %temp3, 32, %temp4
1211 or %temp4, %temp2, %temp5
1212 or %temp5, %lo(symbol), %reg */
1213 if (temp)
1214 {
1215 /* It is possible that one of the registers we got for operands[2]
1216 might coincide with that of operands[0] (which is why we made
1217 it TImode). Pick the other one to use as our scratch. */
1218 if (rtx_equal_p (temp, op0))
1219 {
1220 gcc_assert (ti_temp);
1221 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1222 }
1223 temp1 = op0;
1224 temp2 = temp; /* op0 is _not_ allowed, see above. */
1225 temp3 = op0;
1226 temp4 = op0;
1227 temp5 = op0;
1228 }
1229 else
1230 {
1231 temp1 = gen_reg_rtx (DImode);
1232 temp2 = gen_reg_rtx (DImode);
1233 temp3 = gen_reg_rtx (DImode);
1234 temp4 = gen_reg_rtx (DImode);
1235 temp5 = gen_reg_rtx (DImode);
1236 }
1237
1238 emit_insn (gen_sethh (temp1, op1));
1239 emit_insn (gen_setlm (temp2, op1));
1240 emit_insn (gen_sethm (temp3, temp1, op1));
1241 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1242 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1243 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1244 gen_rtx_PLUS (DImode, temp4, temp2)));
1245 emit_insn (gen_setlo (op0, temp5, op1));
1246 break;
1247
1248 case CM_EMBMEDANY:
1249 /* Old old old backwards compatibility kruft here.
1250 Essentially it is MEDLOW with a fixed 64-bit
1251 virtual base added to all data segment addresses.
1252 Text-segment stuff is computed like MEDANY, we can't
1253 reuse the code above because the relocation knobs
1254 look different.
1255
1256 Data segment: sethi %hi(symbol), %temp1
1257 add %temp1, EMBMEDANY_BASE_REG, %temp2
1258 or %temp2, %lo(symbol), %reg */
1259 if (data_segment_operand (op1, GET_MODE (op1)))
1260 {
1261 if (temp)
1262 {
1263 temp1 = temp; /* op0 is allowed. */
1264 temp2 = op0;
1265 }
1266 else
1267 {
1268 temp1 = gen_reg_rtx (DImode);
1269 temp2 = gen_reg_rtx (DImode);
1270 }
1271
1272 emit_insn (gen_embmedany_sethi (temp1, op1));
1273 emit_insn (gen_embmedany_brsum (temp2, temp1));
1274 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1275 }
1276
1277 /* Text segment: sethi %uhi(symbol), %temp1
1278 sethi %hi(symbol), %temp2
1279 or %temp1, %ulo(symbol), %temp3
1280 sllx %temp3, 32, %temp4
1281 or %temp4, %temp2, %temp5
1282 or %temp5, %lo(symbol), %reg */
1283 else
1284 {
1285 if (temp)
1286 {
1287 /* It is possible that one of the registers we got for operands[2]
1288 might coincide with that of operands[0] (which is why we made
1289 it TImode). Pick the other one to use as our scratch. */
1290 if (rtx_equal_p (temp, op0))
1291 {
1292 gcc_assert (ti_temp);
1293 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1294 }
1295 temp1 = op0;
1296 temp2 = temp; /* op0 is _not_ allowed, see above. */
1297 temp3 = op0;
1298 temp4 = op0;
1299 temp5 = op0;
1300 }
1301 else
1302 {
1303 temp1 = gen_reg_rtx (DImode);
1304 temp2 = gen_reg_rtx (DImode);
1305 temp3 = gen_reg_rtx (DImode);
1306 temp4 = gen_reg_rtx (DImode);
1307 temp5 = gen_reg_rtx (DImode);
1308 }
1309
1310 emit_insn (gen_embmedany_textuhi (temp1, op1));
1311 emit_insn (gen_embmedany_texthi (temp2, op1));
1312 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1313 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1314 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1315 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1316 gen_rtx_PLUS (DImode, temp4, temp2)));
1317 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1318 }
1319 break;
1320
1321 default:
1322 gcc_unreachable ();
1323 }
1324 }
1325
1326 #if HOST_BITS_PER_WIDE_INT == 32
1327 void
1328 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1329 {
1330 gcc_unreachable ();
1331 }
1332 #else
1333 /* These avoid problems when cross compiling. If we do not
1334 go through all this hair then the optimizer will see
1335 invalid REG_EQUAL notes or in some cases none at all. */
1336 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1337 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1338 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1339 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1340
1341 /* The optimizer is not to assume anything about exactly
1342 which bits are set for a HIGH, they are unspecified.
1343 Unfortunately this leads to many missed optimizations
1344 during CSE. We mask out the non-HIGH bits, and matches
1345 a plain movdi, to alleviate this problem. */
1346 static rtx
1347 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1348 {
1349 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1350 }
1351
1352 static rtx
1353 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1354 {
1355 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1356 }
1357
1358 static rtx
1359 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1360 {
1361 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1362 }
1363
1364 static rtx
1365 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1366 {
1367 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1368 }
1369
1370 /* Worker routines for 64-bit constant formation on arch64.
1371 One of the key things to be doing in these emissions is
1372 to create as many temp REGs as possible. This makes it
1373 possible for half-built constants to be used later when
1374 such values are similar to something required later on.
1375 Without doing this, the optimizer cannot see such
1376 opportunities. */
1377
1378 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1379 unsigned HOST_WIDE_INT, int);
1380
1381 static void
1382 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1383 unsigned HOST_WIDE_INT low_bits, int is_neg)
1384 {
1385 unsigned HOST_WIDE_INT high_bits;
1386
1387 if (is_neg)
1388 high_bits = (~low_bits) & 0xffffffff;
1389 else
1390 high_bits = low_bits;
1391
1392 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1393 if (!is_neg)
1394 {
1395 emit_insn (gen_rtx_SET (VOIDmode, op0,
1396 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1397 }
1398 else
1399 {
1400 /* If we are XOR'ing with -1, then we should emit a one's complement
1401 instead. This way the combiner will notice logical operations
1402 such as ANDN later on and substitute. */
1403 if ((low_bits & 0x3ff) == 0x3ff)
1404 {
1405 emit_insn (gen_rtx_SET (VOIDmode, op0,
1406 gen_rtx_NOT (DImode, temp)));
1407 }
1408 else
1409 {
1410 emit_insn (gen_rtx_SET (VOIDmode, op0,
1411 gen_safe_XOR64 (temp,
1412 (-(HOST_WIDE_INT)0x400
1413 | (low_bits & 0x3ff)))));
1414 }
1415 }
1416 }
1417
1418 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1419 unsigned HOST_WIDE_INT, int);
1420
1421 static void
1422 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1423 unsigned HOST_WIDE_INT high_bits,
1424 unsigned HOST_WIDE_INT low_immediate,
1425 int shift_count)
1426 {
1427 rtx temp2 = op0;
1428
1429 if ((high_bits & 0xfffffc00) != 0)
1430 {
1431 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1432 if ((high_bits & ~0xfffffc00) != 0)
1433 emit_insn (gen_rtx_SET (VOIDmode, op0,
1434 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1435 else
1436 temp2 = temp;
1437 }
1438 else
1439 {
1440 emit_insn (gen_safe_SET64 (temp, high_bits));
1441 temp2 = temp;
1442 }
1443
1444 /* Now shift it up into place. */
1445 emit_insn (gen_rtx_SET (VOIDmode, op0,
1446 gen_rtx_ASHIFT (DImode, temp2,
1447 GEN_INT (shift_count))));
1448
1449 /* If there is a low immediate part piece, finish up by
1450 putting that in as well. */
1451 if (low_immediate != 0)
1452 emit_insn (gen_rtx_SET (VOIDmode, op0,
1453 gen_safe_OR64 (op0, low_immediate)));
1454 }
1455
1456 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1457 unsigned HOST_WIDE_INT);
1458
1459 /* Full 64-bit constant decomposition. Even though this is the
1460 'worst' case, we still optimize a few things away. */
1461 static void
1462 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1463 unsigned HOST_WIDE_INT high_bits,
1464 unsigned HOST_WIDE_INT low_bits)
1465 {
1466 rtx sub_temp;
1467
1468 if (reload_in_progress || reload_completed)
1469 sub_temp = op0;
1470 else
1471 sub_temp = gen_reg_rtx (DImode);
1472
1473 if ((high_bits & 0xfffffc00) != 0)
1474 {
1475 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1476 if ((high_bits & ~0xfffffc00) != 0)
1477 emit_insn (gen_rtx_SET (VOIDmode,
1478 sub_temp,
1479 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1480 else
1481 sub_temp = temp;
1482 }
1483 else
1484 {
1485 emit_insn (gen_safe_SET64 (temp, high_bits));
1486 sub_temp = temp;
1487 }
1488
1489 if (!reload_in_progress && !reload_completed)
1490 {
1491 rtx temp2 = gen_reg_rtx (DImode);
1492 rtx temp3 = gen_reg_rtx (DImode);
1493 rtx temp4 = gen_reg_rtx (DImode);
1494
1495 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1496 gen_rtx_ASHIFT (DImode, sub_temp,
1497 GEN_INT (32))));
1498
1499 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1500 if ((low_bits & ~0xfffffc00) != 0)
1501 {
1502 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1503 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1504 emit_insn (gen_rtx_SET (VOIDmode, op0,
1505 gen_rtx_PLUS (DImode, temp4, temp3)));
1506 }
1507 else
1508 {
1509 emit_insn (gen_rtx_SET (VOIDmode, op0,
1510 gen_rtx_PLUS (DImode, temp4, temp2)));
1511 }
1512 }
1513 else
1514 {
1515 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1516 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1517 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1518 int to_shift = 12;
1519
1520 /* We are in the middle of reload, so this is really
1521 painful. However we do still make an attempt to
1522 avoid emitting truly stupid code. */
1523 if (low1 != const0_rtx)
1524 {
1525 emit_insn (gen_rtx_SET (VOIDmode, op0,
1526 gen_rtx_ASHIFT (DImode, sub_temp,
1527 GEN_INT (to_shift))));
1528 emit_insn (gen_rtx_SET (VOIDmode, op0,
1529 gen_rtx_IOR (DImode, op0, low1)));
1530 sub_temp = op0;
1531 to_shift = 12;
1532 }
1533 else
1534 {
1535 to_shift += 12;
1536 }
1537 if (low2 != const0_rtx)
1538 {
1539 emit_insn (gen_rtx_SET (VOIDmode, op0,
1540 gen_rtx_ASHIFT (DImode, sub_temp,
1541 GEN_INT (to_shift))));
1542 emit_insn (gen_rtx_SET (VOIDmode, op0,
1543 gen_rtx_IOR (DImode, op0, low2)));
1544 sub_temp = op0;
1545 to_shift = 8;
1546 }
1547 else
1548 {
1549 to_shift += 8;
1550 }
1551 emit_insn (gen_rtx_SET (VOIDmode, op0,
1552 gen_rtx_ASHIFT (DImode, sub_temp,
1553 GEN_INT (to_shift))));
1554 if (low3 != const0_rtx)
1555 emit_insn (gen_rtx_SET (VOIDmode, op0,
1556 gen_rtx_IOR (DImode, op0, low3)));
1557 /* phew... */
1558 }
1559 }
1560
1561 /* Analyze a 64-bit constant for certain properties. */
1562 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1563 unsigned HOST_WIDE_INT,
1564 int *, int *, int *);
1565
1566 static void
1567 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1568 unsigned HOST_WIDE_INT low_bits,
1569 int *hbsp, int *lbsp, int *abbasp)
1570 {
1571 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1572 int i;
1573
1574 lowest_bit_set = highest_bit_set = -1;
1575 i = 0;
1576 do
1577 {
1578 if ((lowest_bit_set == -1)
1579 && ((low_bits >> i) & 1))
1580 lowest_bit_set = i;
1581 if ((highest_bit_set == -1)
1582 && ((high_bits >> (32 - i - 1)) & 1))
1583 highest_bit_set = (64 - i - 1);
1584 }
1585 while (++i < 32
1586 && ((highest_bit_set == -1)
1587 || (lowest_bit_set == -1)));
1588 if (i == 32)
1589 {
1590 i = 0;
1591 do
1592 {
1593 if ((lowest_bit_set == -1)
1594 && ((high_bits >> i) & 1))
1595 lowest_bit_set = i + 32;
1596 if ((highest_bit_set == -1)
1597 && ((low_bits >> (32 - i - 1)) & 1))
1598 highest_bit_set = 32 - i - 1;
1599 }
1600 while (++i < 32
1601 && ((highest_bit_set == -1)
1602 || (lowest_bit_set == -1)));
1603 }
1604 /* If there are no bits set this should have gone out
1605 as one instruction! */
1606 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1607 all_bits_between_are_set = 1;
1608 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1609 {
1610 if (i < 32)
1611 {
1612 if ((low_bits & (1 << i)) != 0)
1613 continue;
1614 }
1615 else
1616 {
1617 if ((high_bits & (1 << (i - 32))) != 0)
1618 continue;
1619 }
1620 all_bits_between_are_set = 0;
1621 break;
1622 }
1623 *hbsp = highest_bit_set;
1624 *lbsp = lowest_bit_set;
1625 *abbasp = all_bits_between_are_set;
1626 }
1627
1628 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1629
1630 static int
1631 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1632 unsigned HOST_WIDE_INT low_bits)
1633 {
1634 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1635
1636 if (high_bits == 0
1637 || high_bits == 0xffffffff)
1638 return 1;
1639
1640 analyze_64bit_constant (high_bits, low_bits,
1641 &highest_bit_set, &lowest_bit_set,
1642 &all_bits_between_are_set);
1643
1644 if ((highest_bit_set == 63
1645 || lowest_bit_set == 0)
1646 && all_bits_between_are_set != 0)
1647 return 1;
1648
1649 if ((highest_bit_set - lowest_bit_set) < 21)
1650 return 1;
1651
1652 return 0;
1653 }
1654
1655 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1656 unsigned HOST_WIDE_INT,
1657 int, int);
1658
1659 static unsigned HOST_WIDE_INT
1660 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1661 unsigned HOST_WIDE_INT low_bits,
1662 int lowest_bit_set, int shift)
1663 {
1664 HOST_WIDE_INT hi, lo;
1665
1666 if (lowest_bit_set < 32)
1667 {
1668 lo = (low_bits >> lowest_bit_set) << shift;
1669 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1670 }
1671 else
1672 {
1673 lo = 0;
1674 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1675 }
1676 gcc_assert (! (hi & lo));
1677 return (hi | lo);
1678 }
1679
1680 /* Here we are sure to be arch64 and this is an integer constant
1681 being loaded into a register. Emit the most efficient
1682 insn sequence possible. Detection of all the 1-insn cases
1683 has been done already. */
1684 void
1685 sparc_emit_set_const64 (rtx op0, rtx op1)
1686 {
1687 unsigned HOST_WIDE_INT high_bits, low_bits;
1688 int lowest_bit_set, highest_bit_set;
1689 int all_bits_between_are_set;
1690 rtx temp = 0;
1691
1692 /* Sanity check that we know what we are working with. */
1693 gcc_assert (TARGET_ARCH64
1694 && (GET_CODE (op0) == SUBREG
1695 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1696
1697 if (reload_in_progress || reload_completed)
1698 temp = op0;
1699
1700 if (GET_CODE (op1) != CONST_INT)
1701 {
1702 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1703 return;
1704 }
1705
1706 if (! temp)
1707 temp = gen_reg_rtx (DImode);
1708
1709 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1710 low_bits = (INTVAL (op1) & 0xffffffff);
1711
1712 /* low_bits bits 0 --> 31
1713 high_bits bits 32 --> 63 */
1714
1715 analyze_64bit_constant (high_bits, low_bits,
1716 &highest_bit_set, &lowest_bit_set,
1717 &all_bits_between_are_set);
1718
1719 /* First try for a 2-insn sequence. */
1720
1721 /* These situations are preferred because the optimizer can
1722 * do more things with them:
1723 * 1) mov -1, %reg
1724 * sllx %reg, shift, %reg
1725 * 2) mov -1, %reg
1726 * srlx %reg, shift, %reg
1727 * 3) mov some_small_const, %reg
1728 * sllx %reg, shift, %reg
1729 */
1730 if (((highest_bit_set == 63
1731 || lowest_bit_set == 0)
1732 && all_bits_between_are_set != 0)
1733 || ((highest_bit_set - lowest_bit_set) < 12))
1734 {
1735 HOST_WIDE_INT the_const = -1;
1736 int shift = lowest_bit_set;
1737
1738 if ((highest_bit_set != 63
1739 && lowest_bit_set != 0)
1740 || all_bits_between_are_set == 0)
1741 {
1742 the_const =
1743 create_simple_focus_bits (high_bits, low_bits,
1744 lowest_bit_set, 0);
1745 }
1746 else if (lowest_bit_set == 0)
1747 shift = -(63 - highest_bit_set);
1748
1749 gcc_assert (SPARC_SIMM13_P (the_const));
1750 gcc_assert (shift != 0);
1751
1752 emit_insn (gen_safe_SET64 (temp, the_const));
1753 if (shift > 0)
1754 emit_insn (gen_rtx_SET (VOIDmode,
1755 op0,
1756 gen_rtx_ASHIFT (DImode,
1757 temp,
1758 GEN_INT (shift))));
1759 else if (shift < 0)
1760 emit_insn (gen_rtx_SET (VOIDmode,
1761 op0,
1762 gen_rtx_LSHIFTRT (DImode,
1763 temp,
1764 GEN_INT (-shift))));
1765 return;
1766 }
1767
1768 /* Now a range of 22 or less bits set somewhere.
1769 * 1) sethi %hi(focus_bits), %reg
1770 * sllx %reg, shift, %reg
1771 * 2) sethi %hi(focus_bits), %reg
1772 * srlx %reg, shift, %reg
1773 */
1774 if ((highest_bit_set - lowest_bit_set) < 21)
1775 {
1776 unsigned HOST_WIDE_INT focus_bits =
1777 create_simple_focus_bits (high_bits, low_bits,
1778 lowest_bit_set, 10);
1779
1780 gcc_assert (SPARC_SETHI_P (focus_bits));
1781 gcc_assert (lowest_bit_set != 10);
1782
1783 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1784
1785 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1786 if (lowest_bit_set < 10)
1787 emit_insn (gen_rtx_SET (VOIDmode,
1788 op0,
1789 gen_rtx_LSHIFTRT (DImode, temp,
1790 GEN_INT (10 - lowest_bit_set))));
1791 else if (lowest_bit_set > 10)
1792 emit_insn (gen_rtx_SET (VOIDmode,
1793 op0,
1794 gen_rtx_ASHIFT (DImode, temp,
1795 GEN_INT (lowest_bit_set - 10))));
1796 return;
1797 }
1798
1799 /* 1) sethi %hi(low_bits), %reg
1800 * or %reg, %lo(low_bits), %reg
1801 * 2) sethi %hi(~low_bits), %reg
1802 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1803 */
1804 if (high_bits == 0
1805 || high_bits == 0xffffffff)
1806 {
1807 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1808 (high_bits == 0xffffffff));
1809 return;
1810 }
1811
1812 /* Now, try 3-insn sequences. */
1813
1814 /* 1) sethi %hi(high_bits), %reg
1815 * or %reg, %lo(high_bits), %reg
1816 * sllx %reg, 32, %reg
1817 */
1818 if (low_bits == 0)
1819 {
1820 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1821 return;
1822 }
1823
1824 /* We may be able to do something quick
1825 when the constant is negated, so try that. */
1826 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1827 (~low_bits) & 0xfffffc00))
1828 {
1829 /* NOTE: The trailing bits get XOR'd so we need the
1830 non-negated bits, not the negated ones. */
1831 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1832
1833 if ((((~high_bits) & 0xffffffff) == 0
1834 && ((~low_bits) & 0x80000000) == 0)
1835 || (((~high_bits) & 0xffffffff) == 0xffffffff
1836 && ((~low_bits) & 0x80000000) != 0))
1837 {
1838 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1839
1840 if ((SPARC_SETHI_P (fast_int)
1841 && (~high_bits & 0xffffffff) == 0)
1842 || SPARC_SIMM13_P (fast_int))
1843 emit_insn (gen_safe_SET64 (temp, fast_int));
1844 else
1845 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1846 }
1847 else
1848 {
1849 rtx negated_const;
1850 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1851 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1852 sparc_emit_set_const64 (temp, negated_const);
1853 }
1854
1855 /* If we are XOR'ing with -1, then we should emit a one's complement
1856 instead. This way the combiner will notice logical operations
1857 such as ANDN later on and substitute. */
1858 if (trailing_bits == 0x3ff)
1859 {
1860 emit_insn (gen_rtx_SET (VOIDmode, op0,
1861 gen_rtx_NOT (DImode, temp)));
1862 }
1863 else
1864 {
1865 emit_insn (gen_rtx_SET (VOIDmode,
1866 op0,
1867 gen_safe_XOR64 (temp,
1868 (-0x400 | trailing_bits))));
1869 }
1870 return;
1871 }
1872
1873 /* 1) sethi %hi(xxx), %reg
1874 * or %reg, %lo(xxx), %reg
1875 * sllx %reg, yyy, %reg
1876 *
1877 * ??? This is just a generalized version of the low_bits==0
1878 * thing above, FIXME...
1879 */
1880 if ((highest_bit_set - lowest_bit_set) < 32)
1881 {
1882 unsigned HOST_WIDE_INT focus_bits =
1883 create_simple_focus_bits (high_bits, low_bits,
1884 lowest_bit_set, 0);
1885
1886 /* We can't get here in this state. */
1887 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1888
1889 /* So what we know is that the set bits straddle the
1890 middle of the 64-bit word. */
1891 sparc_emit_set_const64_quick2 (op0, temp,
1892 focus_bits, 0,
1893 lowest_bit_set);
1894 return;
1895 }
1896
1897 /* 1) sethi %hi(high_bits), %reg
1898 * or %reg, %lo(high_bits), %reg
1899 * sllx %reg, 32, %reg
1900 * or %reg, low_bits, %reg
1901 */
1902 if (SPARC_SIMM13_P(low_bits)
1903 && ((int)low_bits > 0))
1904 {
1905 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1906 return;
1907 }
1908
1909 /* The easiest way when all else fails, is full decomposition. */
1910 #if 0
1911 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1912 high_bits, low_bits, ~high_bits, ~low_bits);
1913 #endif
1914 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1915 }
1916 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1917
1918 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1919 return the mode to be used for the comparison. For floating-point,
1920 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1921 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1922 processing is needed. */
1923
1924 enum machine_mode
1925 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1926 {
1927 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1928 {
1929 switch (op)
1930 {
1931 case EQ:
1932 case NE:
1933 case UNORDERED:
1934 case ORDERED:
1935 case UNLT:
1936 case UNLE:
1937 case UNGT:
1938 case UNGE:
1939 case UNEQ:
1940 case LTGT:
1941 return CCFPmode;
1942
1943 case LT:
1944 case LE:
1945 case GT:
1946 case GE:
1947 return CCFPEmode;
1948
1949 default:
1950 gcc_unreachable ();
1951 }
1952 }
1953 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1954 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1955 {
1956 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1957 return CCX_NOOVmode;
1958 else
1959 return CC_NOOVmode;
1960 }
1961 else
1962 {
1963 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1964 return CCXmode;
1965 else
1966 return CCmode;
1967 }
1968 }
1969
1970 /* X and Y are two things to compare using CODE. Emit the compare insn and
1971 return the rtx for the cc reg in the proper mode. */
1972
1973 rtx
1974 gen_compare_reg (enum rtx_code code)
1975 {
1976 rtx x = sparc_compare_op0;
1977 rtx y = sparc_compare_op1;
1978 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
1979 rtx cc_reg;
1980
1981 if (sparc_compare_emitted != NULL_RTX)
1982 {
1983 cc_reg = sparc_compare_emitted;
1984 sparc_compare_emitted = NULL_RTX;
1985 return cc_reg;
1986 }
1987
1988 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
1989 fcc regs (cse can't tell they're really call clobbered regs and will
1990 remove a duplicate comparison even if there is an intervening function
1991 call - it will then try to reload the cc reg via an int reg which is why
1992 we need the movcc patterns). It is possible to provide the movcc
1993 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
1994 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
1995 to tell cse that CCFPE mode registers (even pseudos) are call
1996 clobbered. */
1997
1998 /* ??? This is an experiment. Rather than making changes to cse which may
1999 or may not be easy/clean, we do our own cse. This is possible because
2000 we will generate hard registers. Cse knows they're call clobbered (it
2001 doesn't know the same thing about pseudos). If we guess wrong, no big
2002 deal, but if we win, great! */
2003
2004 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2005 #if 1 /* experiment */
2006 {
2007 int reg;
2008 /* We cycle through the registers to ensure they're all exercised. */
2009 static int next_fcc_reg = 0;
2010 /* Previous x,y for each fcc reg. */
2011 static rtx prev_args[4][2];
2012
2013 /* Scan prev_args for x,y. */
2014 for (reg = 0; reg < 4; reg++)
2015 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2016 break;
2017 if (reg == 4)
2018 {
2019 reg = next_fcc_reg;
2020 prev_args[reg][0] = x;
2021 prev_args[reg][1] = y;
2022 next_fcc_reg = (next_fcc_reg + 1) & 3;
2023 }
2024 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2025 }
2026 #else
2027 cc_reg = gen_reg_rtx (mode);
2028 #endif /* ! experiment */
2029 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2030 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2031 else
2032 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2033
2034 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2035 gen_rtx_COMPARE (mode, x, y)));
2036
2037 return cc_reg;
2038 }
2039
2040 /* This function is used for v9 only.
2041 CODE is the code for an Scc's comparison.
2042 OPERANDS[0] is the target of the Scc insn.
2043 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2044 been generated yet).
2045
2046 This function is needed to turn
2047
2048 (set (reg:SI 110)
2049 (gt (reg:CCX 100 %icc)
2050 (const_int 0)))
2051 into
2052 (set (reg:SI 110)
2053 (gt:DI (reg:CCX 100 %icc)
2054 (const_int 0)))
2055
2056 IE: The instruction recognizer needs to see the mode of the comparison to
2057 find the right instruction. We could use "gt:DI" right in the
2058 define_expand, but leaving it out allows us to handle DI, SI, etc.
2059
2060 We refer to the global sparc compare operands sparc_compare_op0 and
2061 sparc_compare_op1. */
2062
2063 int
2064 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2065 {
2066 if (! TARGET_ARCH64
2067 && (GET_MODE (sparc_compare_op0) == DImode
2068 || GET_MODE (operands[0]) == DImode))
2069 return 0;
2070
2071 /* Try to use the movrCC insns. */
2072 if (TARGET_ARCH64
2073 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2074 && sparc_compare_op1 == const0_rtx
2075 && v9_regcmp_p (compare_code))
2076 {
2077 rtx op0 = sparc_compare_op0;
2078 rtx temp;
2079
2080 /* Special case for op0 != 0. This can be done with one instruction if
2081 operands[0] == sparc_compare_op0. */
2082
2083 if (compare_code == NE
2084 && GET_MODE (operands[0]) == DImode
2085 && rtx_equal_p (op0, operands[0]))
2086 {
2087 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2088 gen_rtx_IF_THEN_ELSE (DImode,
2089 gen_rtx_fmt_ee (compare_code, DImode,
2090 op0, const0_rtx),
2091 const1_rtx,
2092 operands[0])));
2093 return 1;
2094 }
2095
2096 if (reg_overlap_mentioned_p (operands[0], op0))
2097 {
2098 /* Handle the case where operands[0] == sparc_compare_op0.
2099 We "early clobber" the result. */
2100 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2101 emit_move_insn (op0, sparc_compare_op0);
2102 }
2103
2104 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2105 if (GET_MODE (op0) != DImode)
2106 {
2107 temp = gen_reg_rtx (DImode);
2108 convert_move (temp, op0, 0);
2109 }
2110 else
2111 temp = op0;
2112 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2113 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2114 gen_rtx_fmt_ee (compare_code, DImode,
2115 temp, const0_rtx),
2116 const1_rtx,
2117 operands[0])));
2118 return 1;
2119 }
2120 else
2121 {
2122 operands[1] = gen_compare_reg (compare_code);
2123
2124 switch (GET_MODE (operands[1]))
2125 {
2126 case CCmode :
2127 case CCXmode :
2128 case CCFPEmode :
2129 case CCFPmode :
2130 break;
2131 default :
2132 gcc_unreachable ();
2133 }
2134 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2135 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2136 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2137 gen_rtx_fmt_ee (compare_code,
2138 GET_MODE (operands[1]),
2139 operands[1], const0_rtx),
2140 const1_rtx, operands[0])));
2141 return 1;
2142 }
2143 }
2144
2145 /* Emit a conditional jump insn for the v9 architecture using comparison code
2146 CODE and jump target LABEL.
2147 This function exists to take advantage of the v9 brxx insns. */
2148
2149 void
2150 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2151 {
2152 gcc_assert (sparc_compare_emitted == NULL_RTX);
2153 emit_jump_insn (gen_rtx_SET (VOIDmode,
2154 pc_rtx,
2155 gen_rtx_IF_THEN_ELSE (VOIDmode,
2156 gen_rtx_fmt_ee (code, GET_MODE (op0),
2157 op0, const0_rtx),
2158 gen_rtx_LABEL_REF (VOIDmode, label),
2159 pc_rtx)));
2160 }
2161
2162 /* Generate a DFmode part of a hard TFmode register.
2163 REG is the TFmode hard register, LOW is 1 for the
2164 low 64bit of the register and 0 otherwise.
2165 */
2166 rtx
2167 gen_df_reg (rtx reg, int low)
2168 {
2169 int regno = REGNO (reg);
2170
2171 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2172 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2173 return gen_rtx_REG (DFmode, regno);
2174 }
2175 \f
2176 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2177 Unlike normal calls, TFmode operands are passed by reference. It is
2178 assumed that no more than 3 operands are required. */
2179
2180 static void
2181 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2182 {
2183 rtx ret_slot = NULL, arg[3], func_sym;
2184 int i;
2185
2186 /* We only expect to be called for conversions, unary, and binary ops. */
2187 gcc_assert (nargs == 2 || nargs == 3);
2188
2189 for (i = 0; i < nargs; ++i)
2190 {
2191 rtx this_arg = operands[i];
2192 rtx this_slot;
2193
2194 /* TFmode arguments and return values are passed by reference. */
2195 if (GET_MODE (this_arg) == TFmode)
2196 {
2197 int force_stack_temp;
2198
2199 force_stack_temp = 0;
2200 if (TARGET_BUGGY_QP_LIB && i == 0)
2201 force_stack_temp = 1;
2202
2203 if (GET_CODE (this_arg) == MEM
2204 && ! force_stack_temp)
2205 this_arg = XEXP (this_arg, 0);
2206 else if (CONSTANT_P (this_arg)
2207 && ! force_stack_temp)
2208 {
2209 this_slot = force_const_mem (TFmode, this_arg);
2210 this_arg = XEXP (this_slot, 0);
2211 }
2212 else
2213 {
2214 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2215
2216 /* Operand 0 is the return value. We'll copy it out later. */
2217 if (i > 0)
2218 emit_move_insn (this_slot, this_arg);
2219 else
2220 ret_slot = this_slot;
2221
2222 this_arg = XEXP (this_slot, 0);
2223 }
2224 }
2225
2226 arg[i] = this_arg;
2227 }
2228
2229 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2230
2231 if (GET_MODE (operands[0]) == TFmode)
2232 {
2233 if (nargs == 2)
2234 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2235 arg[0], GET_MODE (arg[0]),
2236 arg[1], GET_MODE (arg[1]));
2237 else
2238 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2239 arg[0], GET_MODE (arg[0]),
2240 arg[1], GET_MODE (arg[1]),
2241 arg[2], GET_MODE (arg[2]));
2242
2243 if (ret_slot)
2244 emit_move_insn (operands[0], ret_slot);
2245 }
2246 else
2247 {
2248 rtx ret;
2249
2250 gcc_assert (nargs == 2);
2251
2252 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2253 GET_MODE (operands[0]), 1,
2254 arg[1], GET_MODE (arg[1]));
2255
2256 if (ret != operands[0])
2257 emit_move_insn (operands[0], ret);
2258 }
2259 }
2260
2261 /* Expand soft-float TFmode calls to sparc abi routines. */
2262
2263 static void
2264 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2265 {
2266 const char *func;
2267
2268 switch (code)
2269 {
2270 case PLUS:
2271 func = "_Qp_add";
2272 break;
2273 case MINUS:
2274 func = "_Qp_sub";
2275 break;
2276 case MULT:
2277 func = "_Qp_mul";
2278 break;
2279 case DIV:
2280 func = "_Qp_div";
2281 break;
2282 default:
2283 gcc_unreachable ();
2284 }
2285
2286 emit_soft_tfmode_libcall (func, 3, operands);
2287 }
2288
2289 static void
2290 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2291 {
2292 const char *func;
2293
2294 gcc_assert (code == SQRT);
2295 func = "_Qp_sqrt";
2296
2297 emit_soft_tfmode_libcall (func, 2, operands);
2298 }
2299
2300 static void
2301 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2302 {
2303 const char *func;
2304
2305 switch (code)
2306 {
2307 case FLOAT_EXTEND:
2308 switch (GET_MODE (operands[1]))
2309 {
2310 case SFmode:
2311 func = "_Qp_stoq";
2312 break;
2313 case DFmode:
2314 func = "_Qp_dtoq";
2315 break;
2316 default:
2317 gcc_unreachable ();
2318 }
2319 break;
2320
2321 case FLOAT_TRUNCATE:
2322 switch (GET_MODE (operands[0]))
2323 {
2324 case SFmode:
2325 func = "_Qp_qtos";
2326 break;
2327 case DFmode:
2328 func = "_Qp_qtod";
2329 break;
2330 default:
2331 gcc_unreachable ();
2332 }
2333 break;
2334
2335 case FLOAT:
2336 switch (GET_MODE (operands[1]))
2337 {
2338 case SImode:
2339 func = "_Qp_itoq";
2340 break;
2341 case DImode:
2342 func = "_Qp_xtoq";
2343 break;
2344 default:
2345 gcc_unreachable ();
2346 }
2347 break;
2348
2349 case UNSIGNED_FLOAT:
2350 switch (GET_MODE (operands[1]))
2351 {
2352 case SImode:
2353 func = "_Qp_uitoq";
2354 break;
2355 case DImode:
2356 func = "_Qp_uxtoq";
2357 break;
2358 default:
2359 gcc_unreachable ();
2360 }
2361 break;
2362
2363 case FIX:
2364 switch (GET_MODE (operands[0]))
2365 {
2366 case SImode:
2367 func = "_Qp_qtoi";
2368 break;
2369 case DImode:
2370 func = "_Qp_qtox";
2371 break;
2372 default:
2373 gcc_unreachable ();
2374 }
2375 break;
2376
2377 case UNSIGNED_FIX:
2378 switch (GET_MODE (operands[0]))
2379 {
2380 case SImode:
2381 func = "_Qp_qtoui";
2382 break;
2383 case DImode:
2384 func = "_Qp_qtoux";
2385 break;
2386 default:
2387 gcc_unreachable ();
2388 }
2389 break;
2390
2391 default:
2392 gcc_unreachable ();
2393 }
2394
2395 emit_soft_tfmode_libcall (func, 2, operands);
2396 }
2397
2398 /* Expand a hard-float tfmode operation. All arguments must be in
2399 registers. */
2400
2401 static void
2402 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2403 {
2404 rtx op, dest;
2405
2406 if (GET_RTX_CLASS (code) == RTX_UNARY)
2407 {
2408 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2409 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2410 }
2411 else
2412 {
2413 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2414 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2415 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2416 operands[1], operands[2]);
2417 }
2418
2419 if (register_operand (operands[0], VOIDmode))
2420 dest = operands[0];
2421 else
2422 dest = gen_reg_rtx (GET_MODE (operands[0]));
2423
2424 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2425
2426 if (dest != operands[0])
2427 emit_move_insn (operands[0], dest);
2428 }
2429
2430 void
2431 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2432 {
2433 if (TARGET_HARD_QUAD)
2434 emit_hard_tfmode_operation (code, operands);
2435 else
2436 emit_soft_tfmode_binop (code, operands);
2437 }
2438
2439 void
2440 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2441 {
2442 if (TARGET_HARD_QUAD)
2443 emit_hard_tfmode_operation (code, operands);
2444 else
2445 emit_soft_tfmode_unop (code, operands);
2446 }
2447
2448 void
2449 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2450 {
2451 if (TARGET_HARD_QUAD)
2452 emit_hard_tfmode_operation (code, operands);
2453 else
2454 emit_soft_tfmode_cvt (code, operands);
2455 }
2456 \f
2457 /* Return nonzero if a branch/jump/call instruction will be emitting
2458 nop into its delay slot. */
2459
2460 int
2461 empty_delay_slot (rtx insn)
2462 {
2463 rtx seq;
2464
2465 /* If no previous instruction (should not happen), return true. */
2466 if (PREV_INSN (insn) == NULL)
2467 return 1;
2468
2469 seq = NEXT_INSN (PREV_INSN (insn));
2470 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2471 return 0;
2472
2473 return 1;
2474 }
2475
2476 /* Return nonzero if TRIAL can go into the call delay slot. */
2477
2478 int
2479 tls_call_delay (rtx trial)
2480 {
2481 rtx pat;
2482
2483 /* Binutils allows
2484 call __tls_get_addr, %tgd_call (foo)
2485 add %l7, %o0, %o0, %tgd_add (foo)
2486 while Sun as/ld does not. */
2487 if (TARGET_GNU_TLS || !TARGET_TLS)
2488 return 1;
2489
2490 pat = PATTERN (trial);
2491
2492 /* We must reject tgd_add{32|64}, i.e.
2493 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2494 and tldm_add{32|64}, i.e.
2495 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2496 for Sun as/ld. */
2497 if (GET_CODE (pat) == SET
2498 && GET_CODE (SET_SRC (pat)) == PLUS)
2499 {
2500 rtx unspec = XEXP (SET_SRC (pat), 1);
2501
2502 if (GET_CODE (unspec) == UNSPEC
2503 && (XINT (unspec, 1) == UNSPEC_TLSGD
2504 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2505 return 0;
2506 }
2507
2508 return 1;
2509 }
2510
2511 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2512 instruction. RETURN_P is true if the v9 variant 'return' is to be
2513 considered in the test too.
2514
2515 TRIAL must be a SET whose destination is a REG appropriate for the
2516 'restore' instruction or, if RETURN_P is true, for the 'return'
2517 instruction. */
2518
2519 static int
2520 eligible_for_restore_insn (rtx trial, bool return_p)
2521 {
2522 rtx pat = PATTERN (trial);
2523 rtx src = SET_SRC (pat);
2524
2525 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2526 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2527 && arith_operand (src, GET_MODE (src)))
2528 {
2529 if (TARGET_ARCH64)
2530 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2531 else
2532 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2533 }
2534
2535 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2536 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2537 && arith_double_operand (src, GET_MODE (src)))
2538 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2539
2540 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2541 else if (! TARGET_FPU && register_operand (src, SFmode))
2542 return 1;
2543
2544 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2545 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2546 return 1;
2547
2548 /* If we have the 'return' instruction, anything that does not use
2549 local or output registers and can go into a delay slot wins. */
2550 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2551 && (get_attr_in_uncond_branch_delay (trial)
2552 == IN_UNCOND_BRANCH_DELAY_TRUE))
2553 return 1;
2554
2555 /* The 'restore src1,src2,dest' pattern for SImode. */
2556 else if (GET_CODE (src) == PLUS
2557 && register_operand (XEXP (src, 0), SImode)
2558 && arith_operand (XEXP (src, 1), SImode))
2559 return 1;
2560
2561 /* The 'restore src1,src2,dest' pattern for DImode. */
2562 else if (GET_CODE (src) == PLUS
2563 && register_operand (XEXP (src, 0), DImode)
2564 && arith_double_operand (XEXP (src, 1), DImode))
2565 return 1;
2566
2567 /* The 'restore src1,%lo(src2),dest' pattern. */
2568 else if (GET_CODE (src) == LO_SUM
2569 && ! TARGET_CM_MEDMID
2570 && ((register_operand (XEXP (src, 0), SImode)
2571 && immediate_operand (XEXP (src, 1), SImode))
2572 || (TARGET_ARCH64
2573 && register_operand (XEXP (src, 0), DImode)
2574 && immediate_operand (XEXP (src, 1), DImode))))
2575 return 1;
2576
2577 /* The 'restore src,src,dest' pattern. */
2578 else if (GET_CODE (src) == ASHIFT
2579 && (register_operand (XEXP (src, 0), SImode)
2580 || register_operand (XEXP (src, 0), DImode))
2581 && XEXP (src, 1) == const1_rtx)
2582 return 1;
2583
2584 return 0;
2585 }
2586
2587 /* Return nonzero if TRIAL can go into the function return's
2588 delay slot. */
2589
2590 int
2591 eligible_for_return_delay (rtx trial)
2592 {
2593 rtx pat;
2594
2595 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2596 return 0;
2597
2598 if (get_attr_length (trial) != 1)
2599 return 0;
2600
2601 /* If there are any call-saved registers, we should scan TRIAL if it
2602 does not reference them. For now just make it easy. */
2603 if (num_gfregs)
2604 return 0;
2605
2606 /* If the function uses __builtin_eh_return, the eh_return machinery
2607 occupies the delay slot. */
2608 if (current_function_calls_eh_return)
2609 return 0;
2610
2611 /* In the case of a true leaf function, anything can go into the slot. */
2612 if (sparc_leaf_function_p)
2613 return get_attr_in_uncond_branch_delay (trial)
2614 == IN_UNCOND_BRANCH_DELAY_TRUE;
2615
2616 pat = PATTERN (trial);
2617
2618 /* Otherwise, only operations which can be done in tandem with
2619 a `restore' or `return' insn can go into the delay slot. */
2620 if (GET_CODE (SET_DEST (pat)) != REG
2621 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2622 return 0;
2623
2624 /* If this instruction sets up floating point register and we have a return
2625 instruction, it can probably go in. But restore will not work
2626 with FP_REGS. */
2627 if (REGNO (SET_DEST (pat)) >= 32)
2628 return (TARGET_V9
2629 && ! epilogue_renumber (&pat, 1)
2630 && (get_attr_in_uncond_branch_delay (trial)
2631 == IN_UNCOND_BRANCH_DELAY_TRUE));
2632
2633 return eligible_for_restore_insn (trial, true);
2634 }
2635
2636 /* Return nonzero if TRIAL can go into the sibling call's
2637 delay slot. */
2638
2639 int
2640 eligible_for_sibcall_delay (rtx trial)
2641 {
2642 rtx pat;
2643
2644 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2645 return 0;
2646
2647 if (get_attr_length (trial) != 1)
2648 return 0;
2649
2650 pat = PATTERN (trial);
2651
2652 if (sparc_leaf_function_p)
2653 {
2654 /* If the tail call is done using the call instruction,
2655 we have to restore %o7 in the delay slot. */
2656 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2657 return 0;
2658
2659 /* %g1 is used to build the function address */
2660 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2661 return 0;
2662
2663 return 1;
2664 }
2665
2666 /* Otherwise, only operations which can be done in tandem with
2667 a `restore' insn can go into the delay slot. */
2668 if (GET_CODE (SET_DEST (pat)) != REG
2669 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2670 || REGNO (SET_DEST (pat)) >= 32)
2671 return 0;
2672
2673 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2674 in most cases. */
2675 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2676 return 0;
2677
2678 return eligible_for_restore_insn (trial, false);
2679 }
2680
2681 int
2682 short_branch (int uid1, int uid2)
2683 {
2684 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2685
2686 /* Leave a few words of "slop". */
2687 if (delta >= -1023 && delta <= 1022)
2688 return 1;
2689
2690 return 0;
2691 }
2692
2693 /* Return nonzero if REG is not used after INSN.
2694 We assume REG is a reload reg, and therefore does
2695 not live past labels or calls or jumps. */
2696 int
2697 reg_unused_after (rtx reg, rtx insn)
2698 {
2699 enum rtx_code code, prev_code = UNKNOWN;
2700
2701 while ((insn = NEXT_INSN (insn)))
2702 {
2703 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2704 return 1;
2705
2706 code = GET_CODE (insn);
2707 if (GET_CODE (insn) == CODE_LABEL)
2708 return 1;
2709
2710 if (INSN_P (insn))
2711 {
2712 rtx set = single_set (insn);
2713 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2714 if (set && in_src)
2715 return 0;
2716 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2717 return 1;
2718 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2719 return 0;
2720 }
2721 prev_code = code;
2722 }
2723 return 1;
2724 }
2725 \f
2726 /* Determine if it's legal to put X into the constant pool. This
2727 is not possible if X contains the address of a symbol that is
2728 not constant (TLS) or not known at final link time (PIC). */
2729
2730 static bool
2731 sparc_cannot_force_const_mem (rtx x)
2732 {
2733 switch (GET_CODE (x))
2734 {
2735 case CONST_INT:
2736 case CONST_DOUBLE:
2737 case CONST_VECTOR:
2738 /* Accept all non-symbolic constants. */
2739 return false;
2740
2741 case LABEL_REF:
2742 /* Labels are OK iff we are non-PIC. */
2743 return flag_pic != 0;
2744
2745 case SYMBOL_REF:
2746 /* 'Naked' TLS symbol references are never OK,
2747 non-TLS symbols are OK iff we are non-PIC. */
2748 if (SYMBOL_REF_TLS_MODEL (x))
2749 return true;
2750 else
2751 return flag_pic != 0;
2752
2753 case CONST:
2754 return sparc_cannot_force_const_mem (XEXP (x, 0));
2755 case PLUS:
2756 case MINUS:
2757 return sparc_cannot_force_const_mem (XEXP (x, 0))
2758 || sparc_cannot_force_const_mem (XEXP (x, 1));
2759 case UNSPEC:
2760 return true;
2761 default:
2762 gcc_unreachable ();
2763 }
2764 }
2765 \f
2766 /* PIC support. */
2767 static GTY(()) char pic_helper_symbol_name[256];
2768 static GTY(()) rtx pic_helper_symbol;
2769 static GTY(()) bool pic_helper_emitted_p = false;
2770 static GTY(()) rtx global_offset_table;
2771
2772 /* Ensure that we are not using patterns that are not OK with PIC. */
2773
2774 int
2775 check_pic (int i)
2776 {
2777 switch (flag_pic)
2778 {
2779 case 1:
2780 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2781 && (GET_CODE (recog_data.operand[i]) != CONST
2782 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2783 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2784 == global_offset_table)
2785 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2786 == CONST))));
2787 case 2:
2788 default:
2789 return 1;
2790 }
2791 }
2792
2793 /* Return true if X is an address which needs a temporary register when
2794 reloaded while generating PIC code. */
2795
2796 int
2797 pic_address_needs_scratch (rtx x)
2798 {
2799 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2800 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2801 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2802 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2803 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2804 return 1;
2805
2806 return 0;
2807 }
2808
2809 /* Determine if a given RTX is a valid constant. We already know this
2810 satisfies CONSTANT_P. */
2811
2812 bool
2813 legitimate_constant_p (rtx x)
2814 {
2815 rtx inner;
2816
2817 switch (GET_CODE (x))
2818 {
2819 case SYMBOL_REF:
2820 /* TLS symbols are not constant. */
2821 if (SYMBOL_REF_TLS_MODEL (x))
2822 return false;
2823 break;
2824
2825 case CONST:
2826 inner = XEXP (x, 0);
2827
2828 /* Offsets of TLS symbols are never valid.
2829 Discourage CSE from creating them. */
2830 if (GET_CODE (inner) == PLUS
2831 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2832 return false;
2833 break;
2834
2835 case CONST_DOUBLE:
2836 if (GET_MODE (x) == VOIDmode)
2837 return true;
2838
2839 /* Floating point constants are generally not ok.
2840 The only exception is 0.0 in VIS. */
2841 if (TARGET_VIS
2842 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2843 && const_zero_operand (x, GET_MODE (x)))
2844 return true;
2845
2846 return false;
2847
2848 case CONST_VECTOR:
2849 /* Vector constants are generally not ok.
2850 The only exception is 0 in VIS. */
2851 if (TARGET_VIS
2852 && const_zero_operand (x, GET_MODE (x)))
2853 return true;
2854
2855 return false;
2856
2857 default:
2858 break;
2859 }
2860
2861 return true;
2862 }
2863
2864 /* Determine if a given RTX is a valid constant address. */
2865
2866 bool
2867 constant_address_p (rtx x)
2868 {
2869 switch (GET_CODE (x))
2870 {
2871 case LABEL_REF:
2872 case CONST_INT:
2873 case HIGH:
2874 return true;
2875
2876 case CONST:
2877 if (flag_pic && pic_address_needs_scratch (x))
2878 return false;
2879 return legitimate_constant_p (x);
2880
2881 case SYMBOL_REF:
2882 return !flag_pic && legitimate_constant_p (x);
2883
2884 default:
2885 return false;
2886 }
2887 }
2888
2889 /* Nonzero if the constant value X is a legitimate general operand
2890 when generating PIC code. It is given that flag_pic is on and
2891 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2892
2893 bool
2894 legitimate_pic_operand_p (rtx x)
2895 {
2896 if (pic_address_needs_scratch (x))
2897 return false;
2898 if (SPARC_SYMBOL_REF_TLS_P (x)
2899 || (GET_CODE (x) == CONST
2900 && GET_CODE (XEXP (x, 0)) == PLUS
2901 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2902 return false;
2903 return true;
2904 }
2905
2906 /* Return nonzero if ADDR is a valid memory address.
2907 STRICT specifies whether strict register checking applies. */
2908
2909 int
2910 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2911 {
2912 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2913
2914 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2915 rs1 = addr;
2916 else if (GET_CODE (addr) == PLUS)
2917 {
2918 rs1 = XEXP (addr, 0);
2919 rs2 = XEXP (addr, 1);
2920
2921 /* Canonicalize. REG comes first, if there are no regs,
2922 LO_SUM comes first. */
2923 if (!REG_P (rs1)
2924 && GET_CODE (rs1) != SUBREG
2925 && (REG_P (rs2)
2926 || GET_CODE (rs2) == SUBREG
2927 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2928 {
2929 rs1 = XEXP (addr, 1);
2930 rs2 = XEXP (addr, 0);
2931 }
2932
2933 if ((flag_pic == 1
2934 && rs1 == pic_offset_table_rtx
2935 && !REG_P (rs2)
2936 && GET_CODE (rs2) != SUBREG
2937 && GET_CODE (rs2) != LO_SUM
2938 && GET_CODE (rs2) != MEM
2939 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2940 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2941 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2942 || ((REG_P (rs1)
2943 || GET_CODE (rs1) == SUBREG)
2944 && RTX_OK_FOR_OFFSET_P (rs2)))
2945 {
2946 imm1 = rs2;
2947 rs2 = NULL;
2948 }
2949 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2950 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2951 {
2952 /* We prohibit REG + REG for TFmode when there are no quad move insns
2953 and we consequently need to split. We do this because REG+REG
2954 is not an offsettable address. If we get the situation in reload
2955 where source and destination of a movtf pattern are both MEMs with
2956 REG+REG address, then only one of them gets converted to an
2957 offsettable address. */
2958 if (mode == TFmode
2959 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2960 return 0;
2961
2962 /* We prohibit REG + REG on ARCH32 if not optimizing for
2963 DFmode/DImode because then mem_min_alignment is likely to be zero
2964 after reload and the forced split would lack a matching splitter
2965 pattern. */
2966 if (TARGET_ARCH32 && !optimize
2967 && (mode == DFmode || mode == DImode))
2968 return 0;
2969 }
2970 else if (USE_AS_OFFSETABLE_LO10
2971 && GET_CODE (rs1) == LO_SUM
2972 && TARGET_ARCH64
2973 && ! TARGET_CM_MEDMID
2974 && RTX_OK_FOR_OLO10_P (rs2))
2975 {
2976 rs2 = NULL;
2977 imm1 = XEXP (rs1, 1);
2978 rs1 = XEXP (rs1, 0);
2979 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2980 return 0;
2981 }
2982 }
2983 else if (GET_CODE (addr) == LO_SUM)
2984 {
2985 rs1 = XEXP (addr, 0);
2986 imm1 = XEXP (addr, 1);
2987
2988 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2989 return 0;
2990
2991 /* We can't allow TFmode in 32-bit mode, because an offset greater
2992 than the alignment (8) may cause the LO_SUM to overflow. */
2993 if (mode == TFmode && TARGET_ARCH32)
2994 return 0;
2995 }
2996 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
2997 return 1;
2998 else
2999 return 0;
3000
3001 if (GET_CODE (rs1) == SUBREG)
3002 rs1 = SUBREG_REG (rs1);
3003 if (!REG_P (rs1))
3004 return 0;
3005
3006 if (rs2)
3007 {
3008 if (GET_CODE (rs2) == SUBREG)
3009 rs2 = SUBREG_REG (rs2);
3010 if (!REG_P (rs2))
3011 return 0;
3012 }
3013
3014 if (strict)
3015 {
3016 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3017 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3018 return 0;
3019 }
3020 else
3021 {
3022 if ((REGNO (rs1) >= 32
3023 && REGNO (rs1) != FRAME_POINTER_REGNUM
3024 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3025 || (rs2
3026 && (REGNO (rs2) >= 32
3027 && REGNO (rs2) != FRAME_POINTER_REGNUM
3028 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3029 return 0;
3030 }
3031 return 1;
3032 }
3033
3034 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3035
3036 static GTY(()) rtx sparc_tls_symbol;
3037
3038 static rtx
3039 sparc_tls_get_addr (void)
3040 {
3041 if (!sparc_tls_symbol)
3042 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3043
3044 return sparc_tls_symbol;
3045 }
3046
3047 static rtx
3048 sparc_tls_got (void)
3049 {
3050 rtx temp;
3051 if (flag_pic)
3052 {
3053 current_function_uses_pic_offset_table = 1;
3054 return pic_offset_table_rtx;
3055 }
3056
3057 if (!global_offset_table)
3058 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3059 temp = gen_reg_rtx (Pmode);
3060 emit_move_insn (temp, global_offset_table);
3061 return temp;
3062 }
3063
3064 /* Return 1 if *X is a thread-local symbol. */
3065
3066 static int
3067 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3068 {
3069 return SPARC_SYMBOL_REF_TLS_P (*x);
3070 }
3071
3072 /* Return 1 if X contains a thread-local symbol. */
3073
3074 bool
3075 sparc_tls_referenced_p (rtx x)
3076 {
3077 if (!TARGET_HAVE_TLS)
3078 return false;
3079
3080 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3081 }
3082
3083 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3084 this (thread-local) address. */
3085
3086 rtx
3087 legitimize_tls_address (rtx addr)
3088 {
3089 rtx temp1, temp2, temp3, ret, o0, got, insn;
3090
3091 gcc_assert (! no_new_pseudos);
3092
3093 if (GET_CODE (addr) == SYMBOL_REF)
3094 switch (SYMBOL_REF_TLS_MODEL (addr))
3095 {
3096 case TLS_MODEL_GLOBAL_DYNAMIC:
3097 start_sequence ();
3098 temp1 = gen_reg_rtx (SImode);
3099 temp2 = gen_reg_rtx (SImode);
3100 ret = gen_reg_rtx (Pmode);
3101 o0 = gen_rtx_REG (Pmode, 8);
3102 got = sparc_tls_got ();
3103 emit_insn (gen_tgd_hi22 (temp1, addr));
3104 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3105 if (TARGET_ARCH32)
3106 {
3107 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3108 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3109 addr, const1_rtx));
3110 }
3111 else
3112 {
3113 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3114 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3115 addr, const1_rtx));
3116 }
3117 CALL_INSN_FUNCTION_USAGE (insn)
3118 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3119 CALL_INSN_FUNCTION_USAGE (insn));
3120 insn = get_insns ();
3121 end_sequence ();
3122 emit_libcall_block (insn, ret, o0, addr);
3123 break;
3124
3125 case TLS_MODEL_LOCAL_DYNAMIC:
3126 start_sequence ();
3127 temp1 = gen_reg_rtx (SImode);
3128 temp2 = gen_reg_rtx (SImode);
3129 temp3 = gen_reg_rtx (Pmode);
3130 ret = gen_reg_rtx (Pmode);
3131 o0 = gen_rtx_REG (Pmode, 8);
3132 got = sparc_tls_got ();
3133 emit_insn (gen_tldm_hi22 (temp1));
3134 emit_insn (gen_tldm_lo10 (temp2, temp1));
3135 if (TARGET_ARCH32)
3136 {
3137 emit_insn (gen_tldm_add32 (o0, got, temp2));
3138 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3139 const1_rtx));
3140 }
3141 else
3142 {
3143 emit_insn (gen_tldm_add64 (o0, got, temp2));
3144 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3145 const1_rtx));
3146 }
3147 CALL_INSN_FUNCTION_USAGE (insn)
3148 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3149 CALL_INSN_FUNCTION_USAGE (insn));
3150 insn = get_insns ();
3151 end_sequence ();
3152 emit_libcall_block (insn, temp3, o0,
3153 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3154 UNSPEC_TLSLD_BASE));
3155 temp1 = gen_reg_rtx (SImode);
3156 temp2 = gen_reg_rtx (SImode);
3157 emit_insn (gen_tldo_hix22 (temp1, addr));
3158 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3159 if (TARGET_ARCH32)
3160 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3161 else
3162 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3163 break;
3164
3165 case TLS_MODEL_INITIAL_EXEC:
3166 temp1 = gen_reg_rtx (SImode);
3167 temp2 = gen_reg_rtx (SImode);
3168 temp3 = gen_reg_rtx (Pmode);
3169 got = sparc_tls_got ();
3170 emit_insn (gen_tie_hi22 (temp1, addr));
3171 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3172 if (TARGET_ARCH32)
3173 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3174 else
3175 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3176 if (TARGET_SUN_TLS)
3177 {
3178 ret = gen_reg_rtx (Pmode);
3179 if (TARGET_ARCH32)
3180 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3181 temp3, addr));
3182 else
3183 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3184 temp3, addr));
3185 }
3186 else
3187 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3188 break;
3189
3190 case TLS_MODEL_LOCAL_EXEC:
3191 temp1 = gen_reg_rtx (Pmode);
3192 temp2 = gen_reg_rtx (Pmode);
3193 if (TARGET_ARCH32)
3194 {
3195 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3196 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3197 }
3198 else
3199 {
3200 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3201 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3202 }
3203 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3204 break;
3205
3206 default:
3207 gcc_unreachable ();
3208 }
3209
3210 else
3211 gcc_unreachable (); /* for now ... */
3212
3213 return ret;
3214 }
3215
3216
3217 /* Legitimize PIC addresses. If the address is already position-independent,
3218 we return ORIG. Newly generated position-independent addresses go into a
3219 reg. This is REG if nonzero, otherwise we allocate register(s) as
3220 necessary. */
3221
3222 rtx
3223 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3224 rtx reg)
3225 {
3226 if (GET_CODE (orig) == SYMBOL_REF
3227 /* See the comment in sparc_expand_move. */
3228 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3229 {
3230 rtx pic_ref, address;
3231 rtx insn;
3232
3233 if (reg == 0)
3234 {
3235 gcc_assert (! reload_in_progress && ! reload_completed);
3236 reg = gen_reg_rtx (Pmode);
3237 }
3238
3239 if (flag_pic == 2)
3240 {
3241 /* If not during reload, allocate another temp reg here for loading
3242 in the address, so that these instructions can be optimized
3243 properly. */
3244 rtx temp_reg = ((reload_in_progress || reload_completed)
3245 ? reg : gen_reg_rtx (Pmode));
3246
3247 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3248 won't get confused into thinking that these two instructions
3249 are loading in the true address of the symbol. If in the
3250 future a PIC rtx exists, that should be used instead. */
3251 if (TARGET_ARCH64)
3252 {
3253 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3254 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3255 }
3256 else
3257 {
3258 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3259 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3260 }
3261 address = temp_reg;
3262 }
3263 else
3264 address = orig;
3265
3266 pic_ref = gen_const_mem (Pmode,
3267 gen_rtx_PLUS (Pmode,
3268 pic_offset_table_rtx, address));
3269 current_function_uses_pic_offset_table = 1;
3270 insn = emit_move_insn (reg, pic_ref);
3271 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3272 by loop. */
3273 set_unique_reg_note (insn, REG_EQUAL, orig);
3274 return reg;
3275 }
3276 else if (GET_CODE (orig) == CONST)
3277 {
3278 rtx base, offset;
3279
3280 if (GET_CODE (XEXP (orig, 0)) == PLUS
3281 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3282 return orig;
3283
3284 if (reg == 0)
3285 {
3286 gcc_assert (! reload_in_progress && ! reload_completed);
3287 reg = gen_reg_rtx (Pmode);
3288 }
3289
3290 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3291 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3292 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3293 base == reg ? 0 : reg);
3294
3295 if (GET_CODE (offset) == CONST_INT)
3296 {
3297 if (SMALL_INT (offset))
3298 return plus_constant (base, INTVAL (offset));
3299 else if (! reload_in_progress && ! reload_completed)
3300 offset = force_reg (Pmode, offset);
3301 else
3302 /* If we reach here, then something is seriously wrong. */
3303 gcc_unreachable ();
3304 }
3305 return gen_rtx_PLUS (Pmode, base, offset);
3306 }
3307 else if (GET_CODE (orig) == LABEL_REF)
3308 /* ??? Why do we do this? */
3309 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3310 the register is live instead, in case it is eliminated. */
3311 current_function_uses_pic_offset_table = 1;
3312
3313 return orig;
3314 }
3315
3316 /* Try machine-dependent ways of modifying an illegitimate address X
3317 to be legitimate. If we find one, return the new, valid address.
3318
3319 OLDX is the address as it was before break_out_memory_refs was called.
3320 In some cases it is useful to look at this to decide what needs to be done.
3321
3322 MODE is the mode of the operand pointed to by X. */
3323
3324 rtx
3325 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3326 {
3327 rtx orig_x = x;
3328
3329 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3330 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3331 force_operand (XEXP (x, 0), NULL_RTX));
3332 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3333 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3334 force_operand (XEXP (x, 1), NULL_RTX));
3335 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3336 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3337 XEXP (x, 1));
3338 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3339 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3340 force_operand (XEXP (x, 1), NULL_RTX));
3341
3342 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3343 return x;
3344
3345 if (SPARC_SYMBOL_REF_TLS_P (x))
3346 x = legitimize_tls_address (x);
3347 else if (flag_pic)
3348 x = legitimize_pic_address (x, mode, 0);
3349 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3350 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3351 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3352 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3353 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3354 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3355 else if (GET_CODE (x) == SYMBOL_REF
3356 || GET_CODE (x) == CONST
3357 || GET_CODE (x) == LABEL_REF)
3358 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3359 return x;
3360 }
3361
3362 /* Emit the special PIC helper function. */
3363
3364 static void
3365 emit_pic_helper (void)
3366 {
3367 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3368 int align;
3369
3370 switch_to_section (text_section);
3371
3372 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3373 if (align > 0)
3374 ASM_OUTPUT_ALIGN (asm_out_file, align);
3375 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3376 if (flag_delayed_branch)
3377 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3378 pic_name, pic_name);
3379 else
3380 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3381 pic_name, pic_name);
3382
3383 pic_helper_emitted_p = true;
3384 }
3385
3386 /* Emit code to load the PIC register. */
3387
3388 static void
3389 load_pic_register (bool delay_pic_helper)
3390 {
3391 int orig_flag_pic = flag_pic;
3392
3393 if (TARGET_VXWORKS_RTP)
3394 {
3395 emit_insn (gen_vxworks_load_got ());
3396 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3397 return;
3398 }
3399
3400 /* If we haven't initialized the special PIC symbols, do so now. */
3401 if (!pic_helper_symbol_name[0])
3402 {
3403 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3404 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3405 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3406 }
3407
3408 /* If we haven't emitted the special PIC helper function, do so now unless
3409 we are requested to delay it. */
3410 if (!delay_pic_helper && !pic_helper_emitted_p)
3411 emit_pic_helper ();
3412
3413 flag_pic = 0;
3414 if (TARGET_ARCH64)
3415 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3416 pic_helper_symbol));
3417 else
3418 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3419 pic_helper_symbol));
3420 flag_pic = orig_flag_pic;
3421
3422 /* Need to emit this whether or not we obey regdecls,
3423 since setjmp/longjmp can cause life info to screw up.
3424 ??? In the case where we don't obey regdecls, this is not sufficient
3425 since we may not fall out the bottom. */
3426 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3427 }
3428
3429 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3430 address of the call target. */
3431
3432 void
3433 sparc_emit_call_insn (rtx pat, rtx addr)
3434 {
3435 rtx insn;
3436
3437 insn = emit_call_insn (pat);
3438
3439 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3440 if (TARGET_VXWORKS_RTP
3441 && flag_pic
3442 && GET_CODE (addr) == SYMBOL_REF
3443 && (SYMBOL_REF_DECL (addr)
3444 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3445 : !SYMBOL_REF_LOCAL_P (addr)))
3446 {
3447 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3448 current_function_uses_pic_offset_table = 1;
3449 }
3450 }
3451 \f
3452 /* Return 1 if RTX is a MEM which is known to be aligned to at
3453 least a DESIRED byte boundary. */
3454
3455 int
3456 mem_min_alignment (rtx mem, int desired)
3457 {
3458 rtx addr, base, offset;
3459
3460 /* If it's not a MEM we can't accept it. */
3461 if (GET_CODE (mem) != MEM)
3462 return 0;
3463
3464 /* Obviously... */
3465 if (!TARGET_UNALIGNED_DOUBLES
3466 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3467 return 1;
3468
3469 /* ??? The rest of the function predates MEM_ALIGN so
3470 there is probably a bit of redundancy. */
3471 addr = XEXP (mem, 0);
3472 base = offset = NULL_RTX;
3473 if (GET_CODE (addr) == PLUS)
3474 {
3475 if (GET_CODE (XEXP (addr, 0)) == REG)
3476 {
3477 base = XEXP (addr, 0);
3478
3479 /* What we are saying here is that if the base
3480 REG is aligned properly, the compiler will make
3481 sure any REG based index upon it will be so
3482 as well. */
3483 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3484 offset = XEXP (addr, 1);
3485 else
3486 offset = const0_rtx;
3487 }
3488 }
3489 else if (GET_CODE (addr) == REG)
3490 {
3491 base = addr;
3492 offset = const0_rtx;
3493 }
3494
3495 if (base != NULL_RTX)
3496 {
3497 int regno = REGNO (base);
3498
3499 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3500 {
3501 /* Check if the compiler has recorded some information
3502 about the alignment of the base REG. If reload has
3503 completed, we already matched with proper alignments.
3504 If not running global_alloc, reload might give us
3505 unaligned pointer to local stack though. */
3506 if (((cfun != 0
3507 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3508 || (optimize && reload_completed))
3509 && (INTVAL (offset) & (desired - 1)) == 0)
3510 return 1;
3511 }
3512 else
3513 {
3514 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3515 return 1;
3516 }
3517 }
3518 else if (! TARGET_UNALIGNED_DOUBLES
3519 || CONSTANT_P (addr)
3520 || GET_CODE (addr) == LO_SUM)
3521 {
3522 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3523 is true, in which case we can only assume that an access is aligned if
3524 it is to a constant address, or the address involves a LO_SUM. */
3525 return 1;
3526 }
3527
3528 /* An obviously unaligned address. */
3529 return 0;
3530 }
3531
3532 \f
3533 /* Vectors to keep interesting information about registers where it can easily
3534 be got. We used to use the actual mode value as the bit number, but there
3535 are more than 32 modes now. Instead we use two tables: one indexed by
3536 hard register number, and one indexed by mode. */
3537
3538 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3539 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3540 mapped into one sparc_mode_class mode. */
3541
3542 enum sparc_mode_class {
3543 S_MODE, D_MODE, T_MODE, O_MODE,
3544 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3545 CC_MODE, CCFP_MODE
3546 };
3547
3548 /* Modes for single-word and smaller quantities. */
3549 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3550
3551 /* Modes for double-word and smaller quantities. */
3552 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3553
3554 /* Modes for quad-word and smaller quantities. */
3555 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3556
3557 /* Modes for 8-word and smaller quantities. */
3558 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3559
3560 /* Modes for single-float quantities. We must allow any single word or
3561 smaller quantity. This is because the fix/float conversion instructions
3562 take integer inputs/outputs from the float registers. */
3563 #define SF_MODES (S_MODES)
3564
3565 /* Modes for double-float and smaller quantities. */
3566 #define DF_MODES (S_MODES | D_MODES)
3567
3568 /* Modes for double-float only quantities. */
3569 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3570
3571 /* Modes for quad-float only quantities. */
3572 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3573
3574 /* Modes for quad-float and smaller quantities. */
3575 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3576
3577 /* Modes for quad-float and double-float quantities. */
3578 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3579
3580 /* Modes for quad-float pair only quantities. */
3581 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3582
3583 /* Modes for quad-float pairs and smaller quantities. */
3584 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3585
3586 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3587
3588 /* Modes for condition codes. */
3589 #define CC_MODES (1 << (int) CC_MODE)
3590 #define CCFP_MODES (1 << (int) CCFP_MODE)
3591
3592 /* Value is 1 if register/mode pair is acceptable on sparc.
3593 The funny mixture of D and T modes is because integer operations
3594 do not specially operate on tetra quantities, so non-quad-aligned
3595 registers can hold quadword quantities (except %o4 and %i4 because
3596 they cross fixed registers). */
3597
3598 /* This points to either the 32 bit or the 64 bit version. */
3599 const int *hard_regno_mode_classes;
3600
3601 static const int hard_32bit_mode_classes[] = {
3602 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3603 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3604 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3605 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3606
3607 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3608 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3609 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3610 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3611
3612 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3613 and none can hold SFmode/SImode values. */
3614 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3615 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3616 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3617 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3618
3619 /* %fcc[0123] */
3620 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3621
3622 /* %icc */
3623 CC_MODES
3624 };
3625
3626 static const int hard_64bit_mode_classes[] = {
3627 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3628 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3629 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3630 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3631
3632 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3633 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3634 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3635 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3636
3637 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3638 and none can hold SFmode/SImode values. */
3639 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3640 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3641 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3642 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3643
3644 /* %fcc[0123] */
3645 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3646
3647 /* %icc */
3648 CC_MODES
3649 };
3650
3651 int sparc_mode_class [NUM_MACHINE_MODES];
3652
3653 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3654
3655 static void
3656 sparc_init_modes (void)
3657 {
3658 int i;
3659
3660 for (i = 0; i < NUM_MACHINE_MODES; i++)
3661 {
3662 switch (GET_MODE_CLASS (i))
3663 {
3664 case MODE_INT:
3665 case MODE_PARTIAL_INT:
3666 case MODE_COMPLEX_INT:
3667 if (GET_MODE_SIZE (i) <= 4)
3668 sparc_mode_class[i] = 1 << (int) S_MODE;
3669 else if (GET_MODE_SIZE (i) == 8)
3670 sparc_mode_class[i] = 1 << (int) D_MODE;
3671 else if (GET_MODE_SIZE (i) == 16)
3672 sparc_mode_class[i] = 1 << (int) T_MODE;
3673 else if (GET_MODE_SIZE (i) == 32)
3674 sparc_mode_class[i] = 1 << (int) O_MODE;
3675 else
3676 sparc_mode_class[i] = 0;
3677 break;
3678 case MODE_VECTOR_INT:
3679 if (GET_MODE_SIZE (i) <= 4)
3680 sparc_mode_class[i] = 1 << (int)SF_MODE;
3681 else if (GET_MODE_SIZE (i) == 8)
3682 sparc_mode_class[i] = 1 << (int)DF_MODE;
3683 break;
3684 case MODE_FLOAT:
3685 case MODE_COMPLEX_FLOAT:
3686 if (GET_MODE_SIZE (i) <= 4)
3687 sparc_mode_class[i] = 1 << (int) SF_MODE;
3688 else if (GET_MODE_SIZE (i) == 8)
3689 sparc_mode_class[i] = 1 << (int) DF_MODE;
3690 else if (GET_MODE_SIZE (i) == 16)
3691 sparc_mode_class[i] = 1 << (int) TF_MODE;
3692 else if (GET_MODE_SIZE (i) == 32)
3693 sparc_mode_class[i] = 1 << (int) OF_MODE;
3694 else
3695 sparc_mode_class[i] = 0;
3696 break;
3697 case MODE_CC:
3698 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3699 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3700 else
3701 sparc_mode_class[i] = 1 << (int) CC_MODE;
3702 break;
3703 default:
3704 sparc_mode_class[i] = 0;
3705 break;
3706 }
3707 }
3708
3709 if (TARGET_ARCH64)
3710 hard_regno_mode_classes = hard_64bit_mode_classes;
3711 else
3712 hard_regno_mode_classes = hard_32bit_mode_classes;
3713
3714 /* Initialize the array used by REGNO_REG_CLASS. */
3715 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3716 {
3717 if (i < 16 && TARGET_V8PLUS)
3718 sparc_regno_reg_class[i] = I64_REGS;
3719 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3720 sparc_regno_reg_class[i] = GENERAL_REGS;
3721 else if (i < 64)
3722 sparc_regno_reg_class[i] = FP_REGS;
3723 else if (i < 96)
3724 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3725 else if (i < 100)
3726 sparc_regno_reg_class[i] = FPCC_REGS;
3727 else
3728 sparc_regno_reg_class[i] = NO_REGS;
3729 }
3730 }
3731 \f
3732 /* Compute the frame size required by the function. This function is called
3733 during the reload pass and also by sparc_expand_prologue. */
3734
3735 HOST_WIDE_INT
3736 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3737 {
3738 int outgoing_args_size = (current_function_outgoing_args_size
3739 + REG_PARM_STACK_SPACE (current_function_decl));
3740 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3741 int i;
3742
3743 if (TARGET_ARCH64)
3744 {
3745 for (i = 0; i < 8; i++)
3746 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3747 n_regs += 2;
3748 }
3749 else
3750 {
3751 for (i = 0; i < 8; i += 2)
3752 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3753 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3754 n_regs += 2;
3755 }
3756
3757 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3758 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3759 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3760 n_regs += 2;
3761
3762 /* Set up values for use in prologue and epilogue. */
3763 num_gfregs = n_regs;
3764
3765 if (leaf_function_p
3766 && n_regs == 0
3767 && size == 0
3768 && current_function_outgoing_args_size == 0)
3769 actual_fsize = apparent_fsize = 0;
3770 else
3771 {
3772 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3773 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3774 apparent_fsize += n_regs * 4;
3775 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3776 }
3777
3778 /* Make sure nothing can clobber our register windows.
3779 If a SAVE must be done, or there is a stack-local variable,
3780 the register window area must be allocated. */
3781 if (! leaf_function_p || size > 0)
3782 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3783
3784 return SPARC_STACK_ALIGN (actual_fsize);
3785 }
3786
3787 /* Output any necessary .register pseudo-ops. */
3788
3789 void
3790 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3791 {
3792 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3793 int i;
3794
3795 if (TARGET_ARCH32)
3796 return;
3797
3798 /* Check if %g[2367] were used without
3799 .register being printed for them already. */
3800 for (i = 2; i < 8; i++)
3801 {
3802 if (df_regs_ever_live_p (i)
3803 && ! sparc_hard_reg_printed [i])
3804 {
3805 sparc_hard_reg_printed [i] = 1;
3806 /* %g7 is used as TLS base register, use #ignore
3807 for it instead of #scratch. */
3808 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3809 i == 7 ? "ignore" : "scratch");
3810 }
3811 if (i == 3) i = 5;
3812 }
3813 #endif
3814 }
3815
3816 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3817 as needed. LOW should be double-word aligned for 32-bit registers.
3818 Return the new OFFSET. */
3819
3820 #define SORR_SAVE 0
3821 #define SORR_RESTORE 1
3822
3823 static int
3824 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3825 {
3826 rtx mem, insn;
3827 int i;
3828
3829 if (TARGET_ARCH64 && high <= 32)
3830 {
3831 for (i = low; i < high; i++)
3832 {
3833 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3834 {
3835 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3836 set_mem_alias_set (mem, sparc_sr_alias_set);
3837 if (action == SORR_SAVE)
3838 {
3839 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3840 RTX_FRAME_RELATED_P (insn) = 1;
3841 }
3842 else /* action == SORR_RESTORE */
3843 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3844 offset += 8;
3845 }
3846 }
3847 }
3848 else
3849 {
3850 for (i = low; i < high; i += 2)
3851 {
3852 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
3853 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
3854 enum machine_mode mode;
3855 int regno;
3856
3857 if (reg0 && reg1)
3858 {
3859 mode = i < 32 ? DImode : DFmode;
3860 regno = i;
3861 }
3862 else if (reg0)
3863 {
3864 mode = i < 32 ? SImode : SFmode;
3865 regno = i;
3866 }
3867 else if (reg1)
3868 {
3869 mode = i < 32 ? SImode : SFmode;
3870 regno = i + 1;
3871 offset += 4;
3872 }
3873 else
3874 continue;
3875
3876 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3877 set_mem_alias_set (mem, sparc_sr_alias_set);
3878 if (action == SORR_SAVE)
3879 {
3880 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3881 RTX_FRAME_RELATED_P (insn) = 1;
3882 }
3883 else /* action == SORR_RESTORE */
3884 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3885
3886 /* Always preserve double-word alignment. */
3887 offset = (offset + 7) & -8;
3888 }
3889 }
3890
3891 return offset;
3892 }
3893
3894 /* Emit code to save call-saved registers. */
3895
3896 static void
3897 emit_save_or_restore_regs (int action)
3898 {
3899 HOST_WIDE_INT offset;
3900 rtx base;
3901
3902 offset = frame_base_offset - apparent_fsize;
3903
3904 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3905 {
3906 /* ??? This might be optimized a little as %g1 might already have a
3907 value close enough that a single add insn will do. */
3908 /* ??? Although, all of this is probably only a temporary fix
3909 because if %g1 can hold a function result, then
3910 sparc_expand_epilogue will lose (the result will be
3911 clobbered). */
3912 base = gen_rtx_REG (Pmode, 1);
3913 emit_move_insn (base, GEN_INT (offset));
3914 emit_insn (gen_rtx_SET (VOIDmode,
3915 base,
3916 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3917 offset = 0;
3918 }
3919 else
3920 base = frame_base_reg;
3921
3922 offset = save_or_restore_regs (0, 8, base, offset, action);
3923 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3924 }
3925
3926 /* Generate a save_register_window insn. */
3927
3928 static rtx
3929 gen_save_register_window (rtx increment)
3930 {
3931 if (TARGET_ARCH64)
3932 return gen_save_register_windowdi (increment);
3933 else
3934 return gen_save_register_windowsi (increment);
3935 }
3936
3937 /* Generate an increment for the stack pointer. */
3938
3939 static rtx
3940 gen_stack_pointer_inc (rtx increment)
3941 {
3942 return gen_rtx_SET (VOIDmode,
3943 stack_pointer_rtx,
3944 gen_rtx_PLUS (Pmode,
3945 stack_pointer_rtx,
3946 increment));
3947 }
3948
3949 /* Generate a decrement for the stack pointer. */
3950
3951 static rtx
3952 gen_stack_pointer_dec (rtx decrement)
3953 {
3954 return gen_rtx_SET (VOIDmode,
3955 stack_pointer_rtx,
3956 gen_rtx_MINUS (Pmode,
3957 stack_pointer_rtx,
3958 decrement));
3959 }
3960
3961 /* Expand the function prologue. The prologue is responsible for reserving
3962 storage for the frame, saving the call-saved registers and loading the
3963 PIC register if needed. */
3964
3965 void
3966 sparc_expand_prologue (void)
3967 {
3968 rtx insn;
3969 int i;
3970
3971 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
3972 on the final value of the flag means deferring the prologue/epilogue
3973 expansion until just before the second scheduling pass, which is too
3974 late to emit multiple epilogues or return insns.
3975
3976 Of course we are making the assumption that the value of the flag
3977 will not change between now and its final value. Of the three parts
3978 of the formula, only the last one can reasonably vary. Let's take a
3979 closer look, after assuming that the first two ones are set to true
3980 (otherwise the last value is effectively silenced).
3981
3982 If only_leaf_regs_used returns false, the global predicate will also
3983 be false so the actual frame size calculated below will be positive.
3984 As a consequence, the save_register_window insn will be emitted in
3985 the instruction stream; now this insn explicitly references %fp
3986 which is not a leaf register so only_leaf_regs_used will always
3987 return false subsequently.
3988
3989 If only_leaf_regs_used returns true, we hope that the subsequent
3990 optimization passes won't cause non-leaf registers to pop up. For
3991 example, the regrename pass has special provisions to not rename to
3992 non-leaf registers in a leaf function. */
3993 sparc_leaf_function_p
3994 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
3995
3996 /* Need to use actual_fsize, since we are also allocating
3997 space for our callee (and our own register save area). */
3998 actual_fsize
3999 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4000
4001 /* Advertise that the data calculated just above are now valid. */
4002 sparc_prologue_data_valid_p = true;
4003
4004 if (sparc_leaf_function_p)
4005 {
4006 frame_base_reg = stack_pointer_rtx;
4007 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4008 }
4009 else
4010 {
4011 frame_base_reg = hard_frame_pointer_rtx;
4012 frame_base_offset = SPARC_STACK_BIAS;
4013 }
4014
4015 if (actual_fsize == 0)
4016 /* do nothing. */ ;
4017 else if (sparc_leaf_function_p)
4018 {
4019 if (actual_fsize <= 4096)
4020 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4021 else if (actual_fsize <= 8192)
4022 {
4023 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4024 /* %sp is still the CFA register. */
4025 RTX_FRAME_RELATED_P (insn) = 1;
4026 insn
4027 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4028 }
4029 else
4030 {
4031 rtx reg = gen_rtx_REG (Pmode, 1);
4032 emit_move_insn (reg, GEN_INT (-actual_fsize));
4033 insn = emit_insn (gen_stack_pointer_inc (reg));
4034 REG_NOTES (insn) =
4035 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4036 gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
4037 REG_NOTES (insn));
4038 }
4039
4040 RTX_FRAME_RELATED_P (insn) = 1;
4041 }
4042 else
4043 {
4044 if (actual_fsize <= 4096)
4045 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4046 else if (actual_fsize <= 8192)
4047 {
4048 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4049 /* %sp is not the CFA register anymore. */
4050 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4051 }
4052 else
4053 {
4054 rtx reg = gen_rtx_REG (Pmode, 1);
4055 emit_move_insn (reg, GEN_INT (-actual_fsize));
4056 insn = emit_insn (gen_save_register_window (reg));
4057 }
4058
4059 RTX_FRAME_RELATED_P (insn) = 1;
4060 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4061 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4062 }
4063
4064 if (num_gfregs)
4065 emit_save_or_restore_regs (SORR_SAVE);
4066
4067 /* Load the PIC register if needed. */
4068 if (flag_pic && current_function_uses_pic_offset_table)
4069 load_pic_register (false);
4070 }
4071
4072 /* This function generates the assembly code for function entry, which boils
4073 down to emitting the necessary .register directives. */
4074
4075 static void
4076 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4077 {
4078 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4079 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4080
4081 sparc_output_scratch_registers (file);
4082 }
4083
4084 /* Expand the function epilogue, either normal or part of a sibcall.
4085 We emit all the instructions except the return or the call. */
4086
4087 void
4088 sparc_expand_epilogue (void)
4089 {
4090 if (num_gfregs)
4091 emit_save_or_restore_regs (SORR_RESTORE);
4092
4093 if (actual_fsize == 0)
4094 /* do nothing. */ ;
4095 else if (sparc_leaf_function_p)
4096 {
4097 if (actual_fsize <= 4096)
4098 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4099 else if (actual_fsize <= 8192)
4100 {
4101 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4102 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4103 }
4104 else
4105 {
4106 rtx reg = gen_rtx_REG (Pmode, 1);
4107 emit_move_insn (reg, GEN_INT (-actual_fsize));
4108 emit_insn (gen_stack_pointer_dec (reg));
4109 }
4110 }
4111 }
4112
4113 /* Return true if it is appropriate to emit `return' instructions in the
4114 body of a function. */
4115
4116 bool
4117 sparc_can_use_return_insn_p (void)
4118 {
4119 return sparc_prologue_data_valid_p
4120 && (actual_fsize == 0 || !sparc_leaf_function_p);
4121 }
4122
4123 /* This function generates the assembly code for function exit. */
4124
4125 static void
4126 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4127 {
4128 /* If code does not drop into the epilogue, we have to still output
4129 a dummy nop for the sake of sane backtraces. Otherwise, if the
4130 last two instructions of a function were "call foo; dslot;" this
4131 can make the return PC of foo (i.e. address of call instruction
4132 plus 8) point to the first instruction in the next function. */
4133
4134 rtx insn, last_real_insn;
4135
4136 insn = get_last_insn ();
4137
4138 last_real_insn = prev_real_insn (insn);
4139 if (last_real_insn
4140 && GET_CODE (last_real_insn) == INSN
4141 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4142 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4143
4144 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4145 fputs("\tnop\n", file);
4146
4147 sparc_output_deferred_case_vectors ();
4148 }
4149
4150 /* Output a 'restore' instruction. */
4151
4152 static void
4153 output_restore (rtx pat)
4154 {
4155 rtx operands[3];
4156
4157 if (! pat)
4158 {
4159 fputs ("\t restore\n", asm_out_file);
4160 return;
4161 }
4162
4163 gcc_assert (GET_CODE (pat) == SET);
4164
4165 operands[0] = SET_DEST (pat);
4166 pat = SET_SRC (pat);
4167
4168 switch (GET_CODE (pat))
4169 {
4170 case PLUS:
4171 operands[1] = XEXP (pat, 0);
4172 operands[2] = XEXP (pat, 1);
4173 output_asm_insn (" restore %r1, %2, %Y0", operands);
4174 break;
4175 case LO_SUM:
4176 operands[1] = XEXP (pat, 0);
4177 operands[2] = XEXP (pat, 1);
4178 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4179 break;
4180 case ASHIFT:
4181 operands[1] = XEXP (pat, 0);
4182 gcc_assert (XEXP (pat, 1) == const1_rtx);
4183 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4184 break;
4185 default:
4186 operands[1] = pat;
4187 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4188 break;
4189 }
4190 }
4191
4192 /* Output a return. */
4193
4194 const char *
4195 output_return (rtx insn)
4196 {
4197 if (sparc_leaf_function_p)
4198 {
4199 /* This is a leaf function so we don't have to bother restoring the
4200 register window, which frees us from dealing with the convoluted
4201 semantics of restore/return. We simply output the jump to the
4202 return address and the insn in the delay slot (if any). */
4203
4204 gcc_assert (! current_function_calls_eh_return);
4205
4206 return "jmp\t%%o7+%)%#";
4207 }
4208 else
4209 {
4210 /* This is a regular function so we have to restore the register window.
4211 We may have a pending insn for the delay slot, which will be either
4212 combined with the 'restore' instruction or put in the delay slot of
4213 the 'return' instruction. */
4214
4215 if (current_function_calls_eh_return)
4216 {
4217 /* If the function uses __builtin_eh_return, the eh_return
4218 machinery occupies the delay slot. */
4219 gcc_assert (! final_sequence);
4220
4221 if (! flag_delayed_branch)
4222 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4223
4224 if (TARGET_V9)
4225 fputs ("\treturn\t%i7+8\n", asm_out_file);
4226 else
4227 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4228
4229 if (flag_delayed_branch)
4230 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4231 else
4232 fputs ("\t nop\n", asm_out_file);
4233 }
4234 else if (final_sequence)
4235 {
4236 rtx delay, pat;
4237
4238 delay = NEXT_INSN (insn);
4239 gcc_assert (delay);
4240
4241 pat = PATTERN (delay);
4242
4243 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4244 {
4245 epilogue_renumber (&pat, 0);
4246 return "return\t%%i7+%)%#";
4247 }
4248 else
4249 {
4250 output_asm_insn ("jmp\t%%i7+%)", NULL);
4251 output_restore (pat);
4252 PATTERN (delay) = gen_blockage ();
4253 INSN_CODE (delay) = -1;
4254 }
4255 }
4256 else
4257 {
4258 /* The delay slot is empty. */
4259 if (TARGET_V9)
4260 return "return\t%%i7+%)\n\t nop";
4261 else if (flag_delayed_branch)
4262 return "jmp\t%%i7+%)\n\t restore";
4263 else
4264 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4265 }
4266 }
4267
4268 return "";
4269 }
4270
4271 /* Output a sibling call. */
4272
4273 const char *
4274 output_sibcall (rtx insn, rtx call_operand)
4275 {
4276 rtx operands[1];
4277
4278 gcc_assert (flag_delayed_branch);
4279
4280 operands[0] = call_operand;
4281
4282 if (sparc_leaf_function_p)
4283 {
4284 /* This is a leaf function so we don't have to bother restoring the
4285 register window. We simply output the jump to the function and
4286 the insn in the delay slot (if any). */
4287
4288 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4289
4290 if (final_sequence)
4291 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4292 operands);
4293 else
4294 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4295 it into branch if possible. */
4296 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4297 operands);
4298 }
4299 else
4300 {
4301 /* This is a regular function so we have to restore the register window.
4302 We may have a pending insn for the delay slot, which will be combined
4303 with the 'restore' instruction. */
4304
4305 output_asm_insn ("call\t%a0, 0", operands);
4306
4307 if (final_sequence)
4308 {
4309 rtx delay = NEXT_INSN (insn);
4310 gcc_assert (delay);
4311
4312 output_restore (PATTERN (delay));
4313
4314 PATTERN (delay) = gen_blockage ();
4315 INSN_CODE (delay) = -1;
4316 }
4317 else
4318 output_restore (NULL_RTX);
4319 }
4320
4321 return "";
4322 }
4323 \f
4324 /* Functions for handling argument passing.
4325
4326 For 32-bit, the first 6 args are normally in registers and the rest are
4327 pushed. Any arg that starts within the first 6 words is at least
4328 partially passed in a register unless its data type forbids.
4329
4330 For 64-bit, the argument registers are laid out as an array of 16 elements
4331 and arguments are added sequentially. The first 6 int args and up to the
4332 first 16 fp args (depending on size) are passed in regs.
4333
4334 Slot Stack Integral Float Float in structure Double Long Double
4335 ---- ----- -------- ----- ------------------ ------ -----------
4336 15 [SP+248] %f31 %f30,%f31 %d30
4337 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4338 13 [SP+232] %f27 %f26,%f27 %d26
4339 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4340 11 [SP+216] %f23 %f22,%f23 %d22
4341 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4342 9 [SP+200] %f19 %f18,%f19 %d18
4343 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4344 7 [SP+184] %f15 %f14,%f15 %d14
4345 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4346 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4347 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4348 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4349 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4350 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4351 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4352
4353 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4354
4355 Integral arguments are always passed as 64-bit quantities appropriately
4356 extended.
4357
4358 Passing of floating point values is handled as follows.
4359 If a prototype is in scope:
4360 If the value is in a named argument (i.e. not a stdarg function or a
4361 value not part of the `...') then the value is passed in the appropriate
4362 fp reg.
4363 If the value is part of the `...' and is passed in one of the first 6
4364 slots then the value is passed in the appropriate int reg.
4365 If the value is part of the `...' and is not passed in one of the first 6
4366 slots then the value is passed in memory.
4367 If a prototype is not in scope:
4368 If the value is one of the first 6 arguments the value is passed in the
4369 appropriate integer reg and the appropriate fp reg.
4370 If the value is not one of the first 6 arguments the value is passed in
4371 the appropriate fp reg and in memory.
4372
4373
4374 Summary of the calling conventions implemented by GCC on SPARC:
4375
4376 32-bit ABI:
4377 size argument return value
4378
4379 small integer <4 int. reg. int. reg.
4380 word 4 int. reg. int. reg.
4381 double word 8 int. reg. int. reg.
4382
4383 _Complex small integer <8 int. reg. int. reg.
4384 _Complex word 8 int. reg. int. reg.
4385 _Complex double word 16 memory int. reg.
4386
4387 vector integer <=8 int. reg. FP reg.
4388 vector integer >8 memory memory
4389
4390 float 4 int. reg. FP reg.
4391 double 8 int. reg. FP reg.
4392 long double 16 memory memory
4393
4394 _Complex float 8 memory FP reg.
4395 _Complex double 16 memory FP reg.
4396 _Complex long double 32 memory FP reg.
4397
4398 vector float any memory memory
4399
4400 aggregate any memory memory
4401
4402
4403
4404 64-bit ABI:
4405 size argument return value
4406
4407 small integer <8 int. reg. int. reg.
4408 word 8 int. reg. int. reg.
4409 double word 16 int. reg. int. reg.
4410
4411 _Complex small integer <16 int. reg. int. reg.
4412 _Complex word 16 int. reg. int. reg.
4413 _Complex double word 32 memory int. reg.
4414
4415 vector integer <=16 FP reg. FP reg.
4416 vector integer 16<s<=32 memory FP reg.
4417 vector integer >32 memory memory
4418
4419 float 4 FP reg. FP reg.
4420 double 8 FP reg. FP reg.
4421 long double 16 FP reg. FP reg.
4422
4423 _Complex float 8 FP reg. FP reg.
4424 _Complex double 16 FP reg. FP reg.
4425 _Complex long double 32 memory FP reg.
4426
4427 vector float <=16 FP reg. FP reg.
4428 vector float 16<s<=32 memory FP reg.
4429 vector float >32 memory memory
4430
4431 aggregate <=16 reg. reg.
4432 aggregate 16<s<=32 memory reg.
4433 aggregate >32 memory memory
4434
4435
4436
4437 Note #1: complex floating-point types follow the extended SPARC ABIs as
4438 implemented by the Sun compiler.
4439
4440 Note #2: integral vector types follow the scalar floating-point types
4441 conventions to match what is implemented by the Sun VIS SDK.
4442
4443 Note #3: floating-point vector types follow the aggregate types
4444 conventions. */
4445
4446
4447 /* Maximum number of int regs for args. */
4448 #define SPARC_INT_ARG_MAX 6
4449 /* Maximum number of fp regs for args. */
4450 #define SPARC_FP_ARG_MAX 16
4451
4452 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4453
4454 /* Handle the INIT_CUMULATIVE_ARGS macro.
4455 Initialize a variable CUM of type CUMULATIVE_ARGS
4456 for a call to a function whose data type is FNTYPE.
4457 For a library call, FNTYPE is 0. */
4458
4459 void
4460 init_cumulative_args (struct sparc_args *cum, tree fntype,
4461 rtx libname ATTRIBUTE_UNUSED,
4462 tree fndecl ATTRIBUTE_UNUSED)
4463 {
4464 cum->words = 0;
4465 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4466 cum->libcall_p = fntype == 0;
4467 }
4468
4469 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4470 When a prototype says `char' or `short', really pass an `int'. */
4471
4472 static bool
4473 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
4474 {
4475 return TARGET_ARCH32 ? true : false;
4476 }
4477
4478 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4479
4480 static bool
4481 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4482 {
4483 return TARGET_ARCH64 ? true : false;
4484 }
4485
4486 /* Scan the record type TYPE and return the following predicates:
4487 - INTREGS_P: the record contains at least one field or sub-field
4488 that is eligible for promotion in integer registers.
4489 - FP_REGS_P: the record contains at least one field or sub-field
4490 that is eligible for promotion in floating-point registers.
4491 - PACKED_P: the record contains at least one field that is packed.
4492
4493 Sub-fields are not taken into account for the PACKED_P predicate. */
4494
4495 static void
4496 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4497 {
4498 tree field;
4499
4500 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4501 {
4502 if (TREE_CODE (field) == FIELD_DECL)
4503 {
4504 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4505 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4506 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4507 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4508 && TARGET_FPU)
4509 *fpregs_p = 1;
4510 else
4511 *intregs_p = 1;
4512
4513 if (packed_p && DECL_PACKED (field))
4514 *packed_p = 1;
4515 }
4516 }
4517 }
4518
4519 /* Compute the slot number to pass an argument in.
4520 Return the slot number or -1 if passing on the stack.
4521
4522 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4523 the preceding args and about the function being called.
4524 MODE is the argument's machine mode.
4525 TYPE is the data type of the argument (as a tree).
4526 This is null for libcalls where that information may
4527 not be available.
4528 NAMED is nonzero if this argument is a named parameter
4529 (otherwise it is an extra parameter matching an ellipsis).
4530 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4531 *PREGNO records the register number to use if scalar type.
4532 *PPADDING records the amount of padding needed in words. */
4533
4534 static int
4535 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4536 tree type, int named, int incoming_p,
4537 int *pregno, int *ppadding)
4538 {
4539 int regbase = (incoming_p
4540 ? SPARC_INCOMING_INT_ARG_FIRST
4541 : SPARC_OUTGOING_INT_ARG_FIRST);
4542 int slotno = cum->words;
4543 enum mode_class mclass;
4544 int regno;
4545
4546 *ppadding = 0;
4547
4548 if (type && TREE_ADDRESSABLE (type))
4549 return -1;
4550
4551 if (TARGET_ARCH32
4552 && mode == BLKmode
4553 && type
4554 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4555 return -1;
4556
4557 /* For SPARC64, objects requiring 16-byte alignment get it. */
4558 if (TARGET_ARCH64
4559 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4560 && (slotno & 1) != 0)
4561 slotno++, *ppadding = 1;
4562
4563 mclass = GET_MODE_CLASS (mode);
4564 if (type && TREE_CODE (type) == VECTOR_TYPE)
4565 {
4566 /* Vector types deserve special treatment because they are
4567 polymorphic wrt their mode, depending upon whether VIS
4568 instructions are enabled. */
4569 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4570 {
4571 /* The SPARC port defines no floating-point vector modes. */
4572 gcc_assert (mode == BLKmode);
4573 }
4574 else
4575 {
4576 /* Integral vector types should either have a vector
4577 mode or an integral mode, because we are guaranteed
4578 by pass_by_reference that their size is not greater
4579 than 16 bytes and TImode is 16-byte wide. */
4580 gcc_assert (mode != BLKmode);
4581
4582 /* Vector integers are handled like floats according to
4583 the Sun VIS SDK. */
4584 mclass = MODE_FLOAT;
4585 }
4586 }
4587
4588 switch (mclass)
4589 {
4590 case MODE_FLOAT:
4591 case MODE_COMPLEX_FLOAT:
4592 if (TARGET_ARCH64 && TARGET_FPU && named)
4593 {
4594 if (slotno >= SPARC_FP_ARG_MAX)
4595 return -1;
4596 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4597 /* Arguments filling only one single FP register are
4598 right-justified in the outer double FP register. */
4599 if (GET_MODE_SIZE (mode) <= 4)
4600 regno++;
4601 break;
4602 }
4603 /* fallthrough */
4604
4605 case MODE_INT:
4606 case MODE_COMPLEX_INT:
4607 if (slotno >= SPARC_INT_ARG_MAX)
4608 return -1;
4609 regno = regbase + slotno;
4610 break;
4611
4612 case MODE_RANDOM:
4613 if (mode == VOIDmode)
4614 /* MODE is VOIDmode when generating the actual call. */
4615 return -1;
4616
4617 gcc_assert (mode == BLKmode);
4618
4619 if (TARGET_ARCH32
4620 || !type
4621 || (TREE_CODE (type) != VECTOR_TYPE
4622 && TREE_CODE (type) != RECORD_TYPE))
4623 {
4624 if (slotno >= SPARC_INT_ARG_MAX)
4625 return -1;
4626 regno = regbase + slotno;
4627 }
4628 else /* TARGET_ARCH64 && type */
4629 {
4630 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4631
4632 /* First see what kinds of registers we would need. */
4633 if (TREE_CODE (type) == VECTOR_TYPE)
4634 fpregs_p = 1;
4635 else
4636 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4637
4638 /* The ABI obviously doesn't specify how packed structures
4639 are passed. These are defined to be passed in int regs
4640 if possible, otherwise memory. */
4641 if (packed_p || !named)
4642 fpregs_p = 0, intregs_p = 1;
4643
4644 /* If all arg slots are filled, then must pass on stack. */
4645 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4646 return -1;
4647
4648 /* If there are only int args and all int arg slots are filled,
4649 then must pass on stack. */
4650 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4651 return -1;
4652
4653 /* Note that even if all int arg slots are filled, fp members may
4654 still be passed in regs if such regs are available.
4655 *PREGNO isn't set because there may be more than one, it's up
4656 to the caller to compute them. */
4657 return slotno;
4658 }
4659 break;
4660
4661 default :
4662 gcc_unreachable ();
4663 }
4664
4665 *pregno = regno;
4666 return slotno;
4667 }
4668
4669 /* Handle recursive register counting for structure field layout. */
4670
4671 struct function_arg_record_value_parms
4672 {
4673 rtx ret; /* return expression being built. */
4674 int slotno; /* slot number of the argument. */
4675 int named; /* whether the argument is named. */
4676 int regbase; /* regno of the base register. */
4677 int stack; /* 1 if part of the argument is on the stack. */
4678 int intoffset; /* offset of the first pending integer field. */
4679 unsigned int nregs; /* number of words passed in registers. */
4680 };
4681
4682 static void function_arg_record_value_3
4683 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4684 static void function_arg_record_value_2
4685 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4686 static void function_arg_record_value_1
4687 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4688 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
4689 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4690
4691 /* A subroutine of function_arg_record_value. Traverse the structure
4692 recursively and determine how many registers will be required. */
4693
4694 static void
4695 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
4696 struct function_arg_record_value_parms *parms,
4697 bool packed_p)
4698 {
4699 tree field;
4700
4701 /* We need to compute how many registers are needed so we can
4702 allocate the PARALLEL but before we can do that we need to know
4703 whether there are any packed fields. The ABI obviously doesn't
4704 specify how structures are passed in this case, so they are
4705 defined to be passed in int regs if possible, otherwise memory,
4706 regardless of whether there are fp values present. */
4707
4708 if (! packed_p)
4709 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4710 {
4711 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4712 {
4713 packed_p = true;
4714 break;
4715 }
4716 }
4717
4718 /* Compute how many registers we need. */
4719 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4720 {
4721 if (TREE_CODE (field) == FIELD_DECL)
4722 {
4723 HOST_WIDE_INT bitpos = startbitpos;
4724
4725 if (DECL_SIZE (field) != 0)
4726 {
4727 if (integer_zerop (DECL_SIZE (field)))
4728 continue;
4729
4730 if (host_integerp (bit_position (field), 1))
4731 bitpos += int_bit_position (field);
4732 }
4733
4734 /* ??? FIXME: else assume zero offset. */
4735
4736 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4737 function_arg_record_value_1 (TREE_TYPE (field),
4738 bitpos,
4739 parms,
4740 packed_p);
4741 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4742 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4743 && TARGET_FPU
4744 && parms->named
4745 && ! packed_p)
4746 {
4747 if (parms->intoffset != -1)
4748 {
4749 unsigned int startbit, endbit;
4750 int intslots, this_slotno;
4751
4752 startbit = parms->intoffset & -BITS_PER_WORD;
4753 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4754
4755 intslots = (endbit - startbit) / BITS_PER_WORD;
4756 this_slotno = parms->slotno + parms->intoffset
4757 / BITS_PER_WORD;
4758
4759 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4760 {
4761 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4762 /* We need to pass this field on the stack. */
4763 parms->stack = 1;
4764 }
4765
4766 parms->nregs += intslots;
4767 parms->intoffset = -1;
4768 }
4769
4770 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4771 If it wasn't true we wouldn't be here. */
4772 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4773 && DECL_MODE (field) == BLKmode)
4774 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4775 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4776 parms->nregs += 2;
4777 else
4778 parms->nregs += 1;
4779 }
4780 else
4781 {
4782 if (parms->intoffset == -1)
4783 parms->intoffset = bitpos;
4784 }
4785 }
4786 }
4787 }
4788
4789 /* A subroutine of function_arg_record_value. Assign the bits of the
4790 structure between parms->intoffset and bitpos to integer registers. */
4791
4792 static void
4793 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4794 struct function_arg_record_value_parms *parms)
4795 {
4796 enum machine_mode mode;
4797 unsigned int regno;
4798 unsigned int startbit, endbit;
4799 int this_slotno, intslots, intoffset;
4800 rtx reg;
4801
4802 if (parms->intoffset == -1)
4803 return;
4804
4805 intoffset = parms->intoffset;
4806 parms->intoffset = -1;
4807
4808 startbit = intoffset & -BITS_PER_WORD;
4809 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4810 intslots = (endbit - startbit) / BITS_PER_WORD;
4811 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4812
4813 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4814 if (intslots <= 0)
4815 return;
4816
4817 /* If this is the trailing part of a word, only load that much into
4818 the register. Otherwise load the whole register. Note that in
4819 the latter case we may pick up unwanted bits. It's not a problem
4820 at the moment but may wish to revisit. */
4821
4822 if (intoffset % BITS_PER_WORD != 0)
4823 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4824 MODE_INT);
4825 else
4826 mode = word_mode;
4827
4828 intoffset /= BITS_PER_UNIT;
4829 do
4830 {
4831 regno = parms->regbase + this_slotno;
4832 reg = gen_rtx_REG (mode, regno);
4833 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4834 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4835
4836 this_slotno += 1;
4837 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4838 mode = word_mode;
4839 parms->nregs += 1;
4840 intslots -= 1;
4841 }
4842 while (intslots > 0);
4843 }
4844
4845 /* A subroutine of function_arg_record_value. Traverse the structure
4846 recursively and assign bits to floating point registers. Track which
4847 bits in between need integer registers; invoke function_arg_record_value_3
4848 to make that happen. */
4849
4850 static void
4851 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
4852 struct function_arg_record_value_parms *parms,
4853 bool packed_p)
4854 {
4855 tree field;
4856
4857 if (! packed_p)
4858 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4859 {
4860 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4861 {
4862 packed_p = true;
4863 break;
4864 }
4865 }
4866
4867 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4868 {
4869 if (TREE_CODE (field) == FIELD_DECL)
4870 {
4871 HOST_WIDE_INT bitpos = startbitpos;
4872
4873 if (DECL_SIZE (field) != 0)
4874 {
4875 if (integer_zerop (DECL_SIZE (field)))
4876 continue;
4877
4878 if (host_integerp (bit_position (field), 1))
4879 bitpos += int_bit_position (field);
4880 }
4881
4882 /* ??? FIXME: else assume zero offset. */
4883
4884 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4885 function_arg_record_value_2 (TREE_TYPE (field),
4886 bitpos,
4887 parms,
4888 packed_p);
4889 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4890 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4891 && TARGET_FPU
4892 && parms->named
4893 && ! packed_p)
4894 {
4895 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4896 int regno, nregs, pos;
4897 enum machine_mode mode = DECL_MODE (field);
4898 rtx reg;
4899
4900 function_arg_record_value_3 (bitpos, parms);
4901
4902 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4903 && mode == BLKmode)
4904 {
4905 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4906 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4907 }
4908 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4909 {
4910 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4911 nregs = 2;
4912 }
4913 else
4914 nregs = 1;
4915
4916 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4917 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4918 regno++;
4919 reg = gen_rtx_REG (mode, regno);
4920 pos = bitpos / BITS_PER_UNIT;
4921 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4922 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4923 parms->nregs += 1;
4924 while (--nregs > 0)
4925 {
4926 regno += GET_MODE_SIZE (mode) / 4;
4927 reg = gen_rtx_REG (mode, regno);
4928 pos += GET_MODE_SIZE (mode);
4929 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4930 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4931 parms->nregs += 1;
4932 }
4933 }
4934 else
4935 {
4936 if (parms->intoffset == -1)
4937 parms->intoffset = bitpos;
4938 }
4939 }
4940 }
4941 }
4942
4943 /* Used by function_arg and function_value to implement the complex
4944 conventions of the 64-bit ABI for passing and returning structures.
4945 Return an expression valid as a return value for the two macros
4946 FUNCTION_ARG and FUNCTION_VALUE.
4947
4948 TYPE is the data type of the argument (as a tree).
4949 This is null for libcalls where that information may
4950 not be available.
4951 MODE is the argument's machine mode.
4952 SLOTNO is the index number of the argument's slot in the parameter array.
4953 NAMED is nonzero if this argument is a named parameter
4954 (otherwise it is an extra parameter matching an ellipsis).
4955 REGBASE is the regno of the base register for the parameter array. */
4956
4957 static rtx
4958 function_arg_record_value (tree type, enum machine_mode mode,
4959 int slotno, int named, int regbase)
4960 {
4961 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4962 struct function_arg_record_value_parms parms;
4963 unsigned int nregs;
4964
4965 parms.ret = NULL_RTX;
4966 parms.slotno = slotno;
4967 parms.named = named;
4968 parms.regbase = regbase;
4969 parms.stack = 0;
4970
4971 /* Compute how many registers we need. */
4972 parms.nregs = 0;
4973 parms.intoffset = 0;
4974 function_arg_record_value_1 (type, 0, &parms, false);
4975
4976 /* Take into account pending integer fields. */
4977 if (parms.intoffset != -1)
4978 {
4979 unsigned int startbit, endbit;
4980 int intslots, this_slotno;
4981
4982 startbit = parms.intoffset & -BITS_PER_WORD;
4983 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4984 intslots = (endbit - startbit) / BITS_PER_WORD;
4985 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4986
4987 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4988 {
4989 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4990 /* We need to pass this field on the stack. */
4991 parms.stack = 1;
4992 }
4993
4994 parms.nregs += intslots;
4995 }
4996 nregs = parms.nregs;
4997
4998 /* Allocate the vector and handle some annoying special cases. */
4999 if (nregs == 0)
5000 {
5001 /* ??? Empty structure has no value? Duh? */
5002 if (typesize <= 0)
5003 {
5004 /* Though there's nothing really to store, return a word register
5005 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5006 leads to breakage due to the fact that there are zero bytes to
5007 load. */
5008 return gen_rtx_REG (mode, regbase);
5009 }
5010 else
5011 {
5012 /* ??? C++ has structures with no fields, and yet a size. Give up
5013 for now and pass everything back in integer registers. */
5014 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5015 }
5016 if (nregs + slotno > SPARC_INT_ARG_MAX)
5017 nregs = SPARC_INT_ARG_MAX - slotno;
5018 }
5019 gcc_assert (nregs != 0);
5020
5021 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5022
5023 /* If at least one field must be passed on the stack, generate
5024 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5025 also be passed on the stack. We can't do much better because the
5026 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5027 of structures for which the fields passed exclusively in registers
5028 are not at the beginning of the structure. */
5029 if (parms.stack)
5030 XVECEXP (parms.ret, 0, 0)
5031 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5032
5033 /* Fill in the entries. */
5034 parms.nregs = 0;
5035 parms.intoffset = 0;
5036 function_arg_record_value_2 (type, 0, &parms, false);
5037 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5038
5039 gcc_assert (parms.nregs == nregs);
5040
5041 return parms.ret;
5042 }
5043
5044 /* Used by function_arg and function_value to implement the conventions
5045 of the 64-bit ABI for passing and returning unions.
5046 Return an expression valid as a return value for the two macros
5047 FUNCTION_ARG and FUNCTION_VALUE.
5048
5049 SIZE is the size in bytes of the union.
5050 MODE is the argument's machine mode.
5051 REGNO is the hard register the union will be passed in. */
5052
5053 static rtx
5054 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5055 int regno)
5056 {
5057 int nwords = ROUND_ADVANCE (size), i;
5058 rtx regs;
5059
5060 /* See comment in previous function for empty structures. */
5061 if (nwords == 0)
5062 return gen_rtx_REG (mode, regno);
5063
5064 if (slotno == SPARC_INT_ARG_MAX - 1)
5065 nwords = 1;
5066
5067 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5068
5069 for (i = 0; i < nwords; i++)
5070 {
5071 /* Unions are passed left-justified. */
5072 XVECEXP (regs, 0, i)
5073 = gen_rtx_EXPR_LIST (VOIDmode,
5074 gen_rtx_REG (word_mode, regno),
5075 GEN_INT (UNITS_PER_WORD * i));
5076 regno++;
5077 }
5078
5079 return regs;
5080 }
5081
5082 /* Used by function_arg and function_value to implement the conventions
5083 for passing and returning large (BLKmode) vectors.
5084 Return an expression valid as a return value for the two macros
5085 FUNCTION_ARG and FUNCTION_VALUE.
5086
5087 SIZE is the size in bytes of the vector.
5088 BASE_MODE is the argument's base machine mode.
5089 REGNO is the FP hard register the vector will be passed in. */
5090
5091 static rtx
5092 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5093 {
5094 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5095 int nregs = size / base_mode_size, i;
5096 rtx regs;
5097
5098 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5099
5100 for (i = 0; i < nregs; i++)
5101 {
5102 XVECEXP (regs, 0, i)
5103 = gen_rtx_EXPR_LIST (VOIDmode,
5104 gen_rtx_REG (base_mode, regno),
5105 GEN_INT (base_mode_size * i));
5106 regno += base_mode_size / 4;
5107 }
5108
5109 return regs;
5110 }
5111
5112 /* Handle the FUNCTION_ARG macro.
5113 Determine where to put an argument to a function.
5114 Value is zero to push the argument on the stack,
5115 or a hard register in which to store the argument.
5116
5117 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5118 the preceding args and about the function being called.
5119 MODE is the argument's machine mode.
5120 TYPE is the data type of the argument (as a tree).
5121 This is null for libcalls where that information may
5122 not be available.
5123 NAMED is nonzero if this argument is a named parameter
5124 (otherwise it is an extra parameter matching an ellipsis).
5125 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5126
5127 rtx
5128 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5129 tree type, int named, int incoming_p)
5130 {
5131 int regbase = (incoming_p
5132 ? SPARC_INCOMING_INT_ARG_FIRST
5133 : SPARC_OUTGOING_INT_ARG_FIRST);
5134 int slotno, regno, padding;
5135 enum mode_class mclass = GET_MODE_CLASS (mode);
5136
5137 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5138 &regno, &padding);
5139 if (slotno == -1)
5140 return 0;
5141
5142 /* Vector types deserve special treatment because they are polymorphic wrt
5143 their mode, depending upon whether VIS instructions are enabled. */
5144 if (type && TREE_CODE (type) == VECTOR_TYPE)
5145 {
5146 HOST_WIDE_INT size = int_size_in_bytes (type);
5147 gcc_assert ((TARGET_ARCH32 && size <= 8)
5148 || (TARGET_ARCH64 && size <= 16));
5149
5150 if (mode == BLKmode)
5151 return function_arg_vector_value (size,
5152 TYPE_MODE (TREE_TYPE (type)),
5153 SPARC_FP_ARG_FIRST + 2*slotno);
5154 else
5155 mclass = MODE_FLOAT;
5156 }
5157
5158 if (TARGET_ARCH32)
5159 return gen_rtx_REG (mode, regno);
5160
5161 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5162 and are promoted to registers if possible. */
5163 if (type && TREE_CODE (type) == RECORD_TYPE)
5164 {
5165 HOST_WIDE_INT size = int_size_in_bytes (type);
5166 gcc_assert (size <= 16);
5167
5168 return function_arg_record_value (type, mode, slotno, named, regbase);
5169 }
5170
5171 /* Unions up to 16 bytes in size are passed in integer registers. */
5172 else if (type && TREE_CODE (type) == UNION_TYPE)
5173 {
5174 HOST_WIDE_INT size = int_size_in_bytes (type);
5175 gcc_assert (size <= 16);
5176
5177 return function_arg_union_value (size, mode, slotno, regno);
5178 }
5179
5180 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5181 but also have the slot allocated for them.
5182 If no prototype is in scope fp values in register slots get passed
5183 in two places, either fp regs and int regs or fp regs and memory. */
5184 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5185 && SPARC_FP_REG_P (regno))
5186 {
5187 rtx reg = gen_rtx_REG (mode, regno);
5188 if (cum->prototype_p || cum->libcall_p)
5189 {
5190 /* "* 2" because fp reg numbers are recorded in 4 byte
5191 quantities. */
5192 #if 0
5193 /* ??? This will cause the value to be passed in the fp reg and
5194 in the stack. When a prototype exists we want to pass the
5195 value in the reg but reserve space on the stack. That's an
5196 optimization, and is deferred [for a bit]. */
5197 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5198 return gen_rtx_PARALLEL (mode,
5199 gen_rtvec (2,
5200 gen_rtx_EXPR_LIST (VOIDmode,
5201 NULL_RTX, const0_rtx),
5202 gen_rtx_EXPR_LIST (VOIDmode,
5203 reg, const0_rtx)));
5204 else
5205 #else
5206 /* ??? It seems that passing back a register even when past
5207 the area declared by REG_PARM_STACK_SPACE will allocate
5208 space appropriately, and will not copy the data onto the
5209 stack, exactly as we desire.
5210
5211 This is due to locate_and_pad_parm being called in
5212 expand_call whenever reg_parm_stack_space > 0, which
5213 while beneficial to our example here, would seem to be
5214 in error from what had been intended. Ho hum... -- r~ */
5215 #endif
5216 return reg;
5217 }
5218 else
5219 {
5220 rtx v0, v1;
5221
5222 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5223 {
5224 int intreg;
5225
5226 /* On incoming, we don't need to know that the value
5227 is passed in %f0 and %i0, and it confuses other parts
5228 causing needless spillage even on the simplest cases. */
5229 if (incoming_p)
5230 return reg;
5231
5232 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5233 + (regno - SPARC_FP_ARG_FIRST) / 2);
5234
5235 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5236 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5237 const0_rtx);
5238 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5239 }
5240 else
5241 {
5242 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5243 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5244 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5245 }
5246 }
5247 }
5248
5249 /* All other aggregate types are passed in an integer register in a mode
5250 corresponding to the size of the type. */
5251 else if (type && AGGREGATE_TYPE_P (type))
5252 {
5253 HOST_WIDE_INT size = int_size_in_bytes (type);
5254 gcc_assert (size <= 16);
5255
5256 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5257 }
5258
5259 return gen_rtx_REG (mode, regno);
5260 }
5261
5262 /* For an arg passed partly in registers and partly in memory,
5263 this is the number of bytes of registers used.
5264 For args passed entirely in registers or entirely in memory, zero.
5265
5266 Any arg that starts in the first 6 regs but won't entirely fit in them
5267 needs partial registers on v8. On v9, structures with integer
5268 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5269 values that begin in the last fp reg [where "last fp reg" varies with the
5270 mode] will be split between that reg and memory. */
5271
5272 static int
5273 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5274 tree type, bool named)
5275 {
5276 int slotno, regno, padding;
5277
5278 /* We pass 0 for incoming_p here, it doesn't matter. */
5279 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5280
5281 if (slotno == -1)
5282 return 0;
5283
5284 if (TARGET_ARCH32)
5285 {
5286 if ((slotno + (mode == BLKmode
5287 ? ROUND_ADVANCE (int_size_in_bytes (type))
5288 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5289 > SPARC_INT_ARG_MAX)
5290 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5291 }
5292 else
5293 {
5294 /* We are guaranteed by pass_by_reference that the size of the
5295 argument is not greater than 16 bytes, so we only need to return
5296 one word if the argument is partially passed in registers. */
5297
5298 if (type && AGGREGATE_TYPE_P (type))
5299 {
5300 int size = int_size_in_bytes (type);
5301
5302 if (size > UNITS_PER_WORD
5303 && slotno == SPARC_INT_ARG_MAX - 1)
5304 return UNITS_PER_WORD;
5305 }
5306 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5307 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5308 && ! (TARGET_FPU && named)))
5309 {
5310 /* The complex types are passed as packed types. */
5311 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5312 && slotno == SPARC_INT_ARG_MAX - 1)
5313 return UNITS_PER_WORD;
5314 }
5315 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5316 {
5317 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5318 > SPARC_FP_ARG_MAX)
5319 return UNITS_PER_WORD;
5320 }
5321 }
5322
5323 return 0;
5324 }
5325
5326 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5327 Specify whether to pass the argument by reference. */
5328
5329 static bool
5330 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5331 enum machine_mode mode, tree type,
5332 bool named ATTRIBUTE_UNUSED)
5333 {
5334 if (TARGET_ARCH32)
5335 /* Original SPARC 32-bit ABI says that structures and unions,
5336 and quad-precision floats are passed by reference. For Pascal,
5337 also pass arrays by reference. All other base types are passed
5338 in registers.
5339
5340 Extended ABI (as implemented by the Sun compiler) says that all
5341 complex floats are passed by reference. Pass complex integers
5342 in registers up to 8 bytes. More generally, enforce the 2-word
5343 cap for passing arguments in registers.
5344
5345 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5346 integers are passed like floats of the same size, that is in
5347 registers up to 8 bytes. Pass all vector floats by reference
5348 like structure and unions. */
5349 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5350 || mode == SCmode
5351 /* Catch CDImode, TFmode, DCmode and TCmode. */
5352 || GET_MODE_SIZE (mode) > 8
5353 || (type
5354 && TREE_CODE (type) == VECTOR_TYPE
5355 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5356 else
5357 /* Original SPARC 64-bit ABI says that structures and unions
5358 smaller than 16 bytes are passed in registers, as well as
5359 all other base types.
5360
5361 Extended ABI (as implemented by the Sun compiler) says that
5362 complex floats are passed in registers up to 16 bytes. Pass
5363 all complex integers in registers up to 16 bytes. More generally,
5364 enforce the 2-word cap for passing arguments in registers.
5365
5366 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5367 integers are passed like floats of the same size, that is in
5368 registers (up to 16 bytes). Pass all vector floats like structure
5369 and unions. */
5370 return ((type
5371 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5372 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5373 /* Catch CTImode and TCmode. */
5374 || GET_MODE_SIZE (mode) > 16);
5375 }
5376
5377 /* Handle the FUNCTION_ARG_ADVANCE macro.
5378 Update the data in CUM to advance over an argument
5379 of mode MODE and data type TYPE.
5380 TYPE is null for libcalls where that information may not be available. */
5381
5382 void
5383 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5384 tree type, int named)
5385 {
5386 int slotno, regno, padding;
5387
5388 /* We pass 0 for incoming_p here, it doesn't matter. */
5389 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5390
5391 /* If register required leading padding, add it. */
5392 if (slotno != -1)
5393 cum->words += padding;
5394
5395 if (TARGET_ARCH32)
5396 {
5397 cum->words += (mode != BLKmode
5398 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5399 : ROUND_ADVANCE (int_size_in_bytes (type)));
5400 }
5401 else
5402 {
5403 if (type && AGGREGATE_TYPE_P (type))
5404 {
5405 int size = int_size_in_bytes (type);
5406
5407 if (size <= 8)
5408 ++cum->words;
5409 else if (size <= 16)
5410 cum->words += 2;
5411 else /* passed by reference */
5412 ++cum->words;
5413 }
5414 else
5415 {
5416 cum->words += (mode != BLKmode
5417 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5418 : ROUND_ADVANCE (int_size_in_bytes (type)));
5419 }
5420 }
5421 }
5422
5423 /* Handle the FUNCTION_ARG_PADDING macro.
5424 For the 64 bit ABI structs are always stored left shifted in their
5425 argument slot. */
5426
5427 enum direction
5428 function_arg_padding (enum machine_mode mode, tree type)
5429 {
5430 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5431 return upward;
5432
5433 /* Fall back to the default. */
5434 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5435 }
5436
5437 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5438 Specify whether to return the return value in memory. */
5439
5440 static bool
5441 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5442 {
5443 if (TARGET_ARCH32)
5444 /* Original SPARC 32-bit ABI says that structures and unions,
5445 and quad-precision floats are returned in memory. All other
5446 base types are returned in registers.
5447
5448 Extended ABI (as implemented by the Sun compiler) says that
5449 all complex floats are returned in registers (8 FP registers
5450 at most for '_Complex long double'). Return all complex integers
5451 in registers (4 at most for '_Complex long long').
5452
5453 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5454 integers are returned like floats of the same size, that is in
5455 registers up to 8 bytes and in memory otherwise. Return all
5456 vector floats in memory like structure and unions; note that
5457 they always have BLKmode like the latter. */
5458 return (TYPE_MODE (type) == BLKmode
5459 || TYPE_MODE (type) == TFmode
5460 || (TREE_CODE (type) == VECTOR_TYPE
5461 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5462 else
5463 /* Original SPARC 64-bit ABI says that structures and unions
5464 smaller than 32 bytes are returned in registers, as well as
5465 all other base types.
5466
5467 Extended ABI (as implemented by the Sun compiler) says that all
5468 complex floats are returned in registers (8 FP registers at most
5469 for '_Complex long double'). Return all complex integers in
5470 registers (4 at most for '_Complex TItype').
5471
5472 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5473 integers are returned like floats of the same size, that is in
5474 registers. Return all vector floats like structure and unions;
5475 note that they always have BLKmode like the latter. */
5476 return ((TYPE_MODE (type) == BLKmode
5477 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5478 }
5479
5480 /* Handle the TARGET_STRUCT_VALUE target hook.
5481 Return where to find the structure return value address. */
5482
5483 static rtx
5484 sparc_struct_value_rtx (tree fndecl, int incoming)
5485 {
5486 if (TARGET_ARCH64)
5487 return 0;
5488 else
5489 {
5490 rtx mem;
5491
5492 if (incoming)
5493 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5494 STRUCT_VALUE_OFFSET));
5495 else
5496 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5497 STRUCT_VALUE_OFFSET));
5498
5499 /* Only follow the SPARC ABI for fixed-size structure returns.
5500 Variable size structure returns are handled per the normal
5501 procedures in GCC. This is enabled by -mstd-struct-return */
5502 if (incoming == 2
5503 && sparc_std_struct_return
5504 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5505 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5506 {
5507 /* We must check and adjust the return address, as it is
5508 optional as to whether the return object is really
5509 provided. */
5510 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5511 rtx scratch = gen_reg_rtx (SImode);
5512 rtx endlab = gen_label_rtx ();
5513
5514 /* Calculate the return object size */
5515 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5516 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5517 /* Construct a temporary return value */
5518 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5519
5520 /* Implement SPARC 32-bit psABI callee returns struck checking
5521 requirements:
5522
5523 Fetch the instruction where we will return to and see if
5524 it's an unimp instruction (the most significant 10 bits
5525 will be zero). */
5526 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5527 plus_constant (ret_rtx, 8)));
5528 /* Assume the size is valid and pre-adjust */
5529 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5530 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5531 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5532 /* Assign stack temp:
5533 Write the address of the memory pointed to by temp_val into
5534 the memory pointed to by mem */
5535 emit_move_insn (mem, XEXP (temp_val, 0));
5536 emit_label (endlab);
5537 }
5538
5539 set_mem_alias_set (mem, struct_value_alias_set);
5540 return mem;
5541 }
5542 }
5543
5544 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5545 For v9, function return values are subject to the same rules as arguments,
5546 except that up to 32 bytes may be returned in registers. */
5547
5548 rtx
5549 function_value (tree type, enum machine_mode mode, int incoming_p)
5550 {
5551 /* Beware that the two values are swapped here wrt function_arg. */
5552 int regbase = (incoming_p
5553 ? SPARC_OUTGOING_INT_ARG_FIRST
5554 : SPARC_INCOMING_INT_ARG_FIRST);
5555 enum mode_class mclass = GET_MODE_CLASS (mode);
5556 int regno;
5557
5558 /* Vector types deserve special treatment because they are polymorphic wrt
5559 their mode, depending upon whether VIS instructions are enabled. */
5560 if (type && TREE_CODE (type) == VECTOR_TYPE)
5561 {
5562 HOST_WIDE_INT size = int_size_in_bytes (type);
5563 gcc_assert ((TARGET_ARCH32 && size <= 8)
5564 || (TARGET_ARCH64 && size <= 32));
5565
5566 if (mode == BLKmode)
5567 return function_arg_vector_value (size,
5568 TYPE_MODE (TREE_TYPE (type)),
5569 SPARC_FP_ARG_FIRST);
5570 else
5571 mclass = MODE_FLOAT;
5572 }
5573
5574 if (TARGET_ARCH64 && type)
5575 {
5576 /* Structures up to 32 bytes in size are returned in registers. */
5577 if (TREE_CODE (type) == RECORD_TYPE)
5578 {
5579 HOST_WIDE_INT size = int_size_in_bytes (type);
5580 gcc_assert (size <= 32);
5581
5582 return function_arg_record_value (type, mode, 0, 1, regbase);
5583 }
5584
5585 /* Unions up to 32 bytes in size are returned in integer registers. */
5586 else if (TREE_CODE (type) == UNION_TYPE)
5587 {
5588 HOST_WIDE_INT size = int_size_in_bytes (type);
5589 gcc_assert (size <= 32);
5590
5591 return function_arg_union_value (size, mode, 0, regbase);
5592 }
5593
5594 /* Objects that require it are returned in FP registers. */
5595 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5596 ;
5597
5598 /* All other aggregate types are returned in an integer register in a
5599 mode corresponding to the size of the type. */
5600 else if (AGGREGATE_TYPE_P (type))
5601 {
5602 /* All other aggregate types are passed in an integer register
5603 in a mode corresponding to the size of the type. */
5604 HOST_WIDE_INT size = int_size_in_bytes (type);
5605 gcc_assert (size <= 32);
5606
5607 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5608
5609 /* ??? We probably should have made the same ABI change in
5610 3.4.0 as the one we made for unions. The latter was
5611 required by the SCD though, while the former is not
5612 specified, so we favored compatibility and efficiency.
5613
5614 Now we're stuck for aggregates larger than 16 bytes,
5615 because OImode vanished in the meantime. Let's not
5616 try to be unduly clever, and simply follow the ABI
5617 for unions in that case. */
5618 if (mode == BLKmode)
5619 return function_arg_union_value (size, mode, 0, regbase);
5620 else
5621 mclass = MODE_INT;
5622 }
5623
5624 /* This must match PROMOTE_FUNCTION_MODE. */
5625 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5626 mode = word_mode;
5627 }
5628
5629 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5630 regno = SPARC_FP_ARG_FIRST;
5631 else
5632 regno = regbase;
5633
5634 return gen_rtx_REG (mode, regno);
5635 }
5636
5637 /* Do what is necessary for `va_start'. We look at the current function
5638 to determine if stdarg or varargs is used and return the address of
5639 the first unnamed parameter. */
5640
5641 static rtx
5642 sparc_builtin_saveregs (void)
5643 {
5644 int first_reg = current_function_args_info.words;
5645 rtx address;
5646 int regno;
5647
5648 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5649 emit_move_insn (gen_rtx_MEM (word_mode,
5650 gen_rtx_PLUS (Pmode,
5651 frame_pointer_rtx,
5652 GEN_INT (FIRST_PARM_OFFSET (0)
5653 + (UNITS_PER_WORD
5654 * regno)))),
5655 gen_rtx_REG (word_mode,
5656 SPARC_INCOMING_INT_ARG_FIRST + regno));
5657
5658 address = gen_rtx_PLUS (Pmode,
5659 frame_pointer_rtx,
5660 GEN_INT (FIRST_PARM_OFFSET (0)
5661 + UNITS_PER_WORD * first_reg));
5662
5663 return address;
5664 }
5665
5666 /* Implement `va_start' for stdarg. */
5667
5668 void
5669 sparc_va_start (tree valist, rtx nextarg)
5670 {
5671 nextarg = expand_builtin_saveregs ();
5672 std_expand_builtin_va_start (valist, nextarg);
5673 }
5674
5675 /* Implement `va_arg' for stdarg. */
5676
5677 static tree
5678 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5679 {
5680 HOST_WIDE_INT size, rsize, align;
5681 tree addr, incr;
5682 bool indirect;
5683 tree ptrtype = build_pointer_type (type);
5684
5685 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5686 {
5687 indirect = true;
5688 size = rsize = UNITS_PER_WORD;
5689 align = 0;
5690 }
5691 else
5692 {
5693 indirect = false;
5694 size = int_size_in_bytes (type);
5695 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5696 align = 0;
5697
5698 if (TARGET_ARCH64)
5699 {
5700 /* For SPARC64, objects requiring 16-byte alignment get it. */
5701 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5702 align = 2 * UNITS_PER_WORD;
5703
5704 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5705 are left-justified in their slots. */
5706 if (AGGREGATE_TYPE_P (type))
5707 {
5708 if (size == 0)
5709 size = rsize = UNITS_PER_WORD;
5710 else
5711 size = rsize;
5712 }
5713 }
5714 }
5715
5716 incr = valist;
5717 if (align)
5718 {
5719 incr = fold_build2 (PLUS_EXPR, ptr_type_node, incr,
5720 ssize_int (align - 1));
5721 incr = fold_build2 (BIT_AND_EXPR, ptr_type_node, incr,
5722 ssize_int (-align));
5723 }
5724
5725 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5726 addr = incr;
5727
5728 if (BYTES_BIG_ENDIAN && size < rsize)
5729 addr = fold_build2 (PLUS_EXPR, ptr_type_node, incr,
5730 ssize_int (rsize - size));
5731
5732 if (indirect)
5733 {
5734 addr = fold_convert (build_pointer_type (ptrtype), addr);
5735 addr = build_va_arg_indirect_ref (addr);
5736 }
5737 /* If the address isn't aligned properly for the type,
5738 we may need to copy to a temporary.
5739 FIXME: This is inefficient. Usually we can do this
5740 in registers. */
5741 else if (align == 0
5742 && TYPE_ALIGN (type) > BITS_PER_WORD)
5743 {
5744 tree tmp = create_tmp_var (type, "va_arg_tmp");
5745 tree dest_addr = build_fold_addr_expr (tmp);
5746
5747 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY], 3,
5748 dest_addr,
5749 addr,
5750 size_int (rsize));
5751
5752 gimplify_and_add (copy, pre_p);
5753 addr = dest_addr;
5754 }
5755 else
5756 addr = fold_convert (ptrtype, addr);
5757
5758 incr = fold_build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize));
5759 incr = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, incr);
5760 gimplify_and_add (incr, post_p);
5761
5762 return build_va_arg_indirect_ref (addr);
5763 }
5764 \f
5765 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5766 Specify whether the vector mode is supported by the hardware. */
5767
5768 static bool
5769 sparc_vector_mode_supported_p (enum machine_mode mode)
5770 {
5771 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5772 }
5773 \f
5774 /* Return the string to output an unconditional branch to LABEL, which is
5775 the operand number of the label.
5776
5777 DEST is the destination insn (i.e. the label), INSN is the source. */
5778
5779 const char *
5780 output_ubranch (rtx dest, int label, rtx insn)
5781 {
5782 static char string[64];
5783 bool v9_form = false;
5784 char *p;
5785
5786 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5787 {
5788 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5789 - INSN_ADDRESSES (INSN_UID (insn)));
5790 /* Leave some instructions for "slop". */
5791 if (delta >= -260000 && delta < 260000)
5792 v9_form = true;
5793 }
5794
5795 if (v9_form)
5796 strcpy (string, "ba%*,pt\t%%xcc, ");
5797 else
5798 strcpy (string, "b%*\t");
5799
5800 p = strchr (string, '\0');
5801 *p++ = '%';
5802 *p++ = 'l';
5803 *p++ = '0' + label;
5804 *p++ = '%';
5805 *p++ = '(';
5806 *p = '\0';
5807
5808 return string;
5809 }
5810
5811 /* Return the string to output a conditional branch to LABEL, which is
5812 the operand number of the label. OP is the conditional expression.
5813 XEXP (OP, 0) is assumed to be a condition code register (integer or
5814 floating point) and its mode specifies what kind of comparison we made.
5815
5816 DEST is the destination insn (i.e. the label), INSN is the source.
5817
5818 REVERSED is nonzero if we should reverse the sense of the comparison.
5819
5820 ANNUL is nonzero if we should generate an annulling branch. */
5821
5822 const char *
5823 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5824 rtx insn)
5825 {
5826 static char string[64];
5827 enum rtx_code code = GET_CODE (op);
5828 rtx cc_reg = XEXP (op, 0);
5829 enum machine_mode mode = GET_MODE (cc_reg);
5830 const char *labelno, *branch;
5831 int spaces = 8, far;
5832 char *p;
5833
5834 /* v9 branches are limited to +-1MB. If it is too far away,
5835 change
5836
5837 bne,pt %xcc, .LC30
5838
5839 to
5840
5841 be,pn %xcc, .+12
5842 nop
5843 ba .LC30
5844
5845 and
5846
5847 fbne,a,pn %fcc2, .LC29
5848
5849 to
5850
5851 fbe,pt %fcc2, .+16
5852 nop
5853 ba .LC29 */
5854
5855 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5856 if (reversed ^ far)
5857 {
5858 /* Reversal of FP compares takes care -- an ordered compare
5859 becomes an unordered compare and vice versa. */
5860 if (mode == CCFPmode || mode == CCFPEmode)
5861 code = reverse_condition_maybe_unordered (code);
5862 else
5863 code = reverse_condition (code);
5864 }
5865
5866 /* Start by writing the branch condition. */
5867 if (mode == CCFPmode || mode == CCFPEmode)
5868 {
5869 switch (code)
5870 {
5871 case NE:
5872 branch = "fbne";
5873 break;
5874 case EQ:
5875 branch = "fbe";
5876 break;
5877 case GE:
5878 branch = "fbge";
5879 break;
5880 case GT:
5881 branch = "fbg";
5882 break;
5883 case LE:
5884 branch = "fble";
5885 break;
5886 case LT:
5887 branch = "fbl";
5888 break;
5889 case UNORDERED:
5890 branch = "fbu";
5891 break;
5892 case ORDERED:
5893 branch = "fbo";
5894 break;
5895 case UNGT:
5896 branch = "fbug";
5897 break;
5898 case UNLT:
5899 branch = "fbul";
5900 break;
5901 case UNEQ:
5902 branch = "fbue";
5903 break;
5904 case UNGE:
5905 branch = "fbuge";
5906 break;
5907 case UNLE:
5908 branch = "fbule";
5909 break;
5910 case LTGT:
5911 branch = "fblg";
5912 break;
5913
5914 default:
5915 gcc_unreachable ();
5916 }
5917
5918 /* ??? !v9: FP branches cannot be preceded by another floating point
5919 insn. Because there is currently no concept of pre-delay slots,
5920 we can fix this only by always emitting a nop before a floating
5921 point branch. */
5922
5923 string[0] = '\0';
5924 if (! TARGET_V9)
5925 strcpy (string, "nop\n\t");
5926 strcat (string, branch);
5927 }
5928 else
5929 {
5930 switch (code)
5931 {
5932 case NE:
5933 branch = "bne";
5934 break;
5935 case EQ:
5936 branch = "be";
5937 break;
5938 case GE:
5939 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5940 branch = "bpos";
5941 else
5942 branch = "bge";
5943 break;
5944 case GT:
5945 branch = "bg";
5946 break;
5947 case LE:
5948 branch = "ble";
5949 break;
5950 case LT:
5951 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5952 branch = "bneg";
5953 else
5954 branch = "bl";
5955 break;
5956 case GEU:
5957 branch = "bgeu";
5958 break;
5959 case GTU:
5960 branch = "bgu";
5961 break;
5962 case LEU:
5963 branch = "bleu";
5964 break;
5965 case LTU:
5966 branch = "blu";
5967 break;
5968
5969 default:
5970 gcc_unreachable ();
5971 }
5972 strcpy (string, branch);
5973 }
5974 spaces -= strlen (branch);
5975 p = strchr (string, '\0');
5976
5977 /* Now add the annulling, the label, and a possible noop. */
5978 if (annul && ! far)
5979 {
5980 strcpy (p, ",a");
5981 p += 2;
5982 spaces -= 2;
5983 }
5984
5985 if (TARGET_V9)
5986 {
5987 rtx note;
5988 int v8 = 0;
5989
5990 if (! far && insn && INSN_ADDRESSES_SET_P ())
5991 {
5992 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5993 - INSN_ADDRESSES (INSN_UID (insn)));
5994 /* Leave some instructions for "slop". */
5995 if (delta < -260000 || delta >= 260000)
5996 v8 = 1;
5997 }
5998
5999 if (mode == CCFPmode || mode == CCFPEmode)
6000 {
6001 static char v9_fcc_labelno[] = "%%fccX, ";
6002 /* Set the char indicating the number of the fcc reg to use. */
6003 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6004 labelno = v9_fcc_labelno;
6005 if (v8)
6006 {
6007 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6008 labelno = "";
6009 }
6010 }
6011 else if (mode == CCXmode || mode == CCX_NOOVmode)
6012 {
6013 labelno = "%%xcc, ";
6014 gcc_assert (! v8);
6015 }
6016 else
6017 {
6018 labelno = "%%icc, ";
6019 if (v8)
6020 labelno = "";
6021 }
6022
6023 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6024 {
6025 strcpy (p,
6026 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6027 ? ",pt" : ",pn");
6028 p += 3;
6029 spaces -= 3;
6030 }
6031 }
6032 else
6033 labelno = "";
6034
6035 if (spaces > 0)
6036 *p++ = '\t';
6037 else
6038 *p++ = ' ';
6039 strcpy (p, labelno);
6040 p = strchr (p, '\0');
6041 if (far)
6042 {
6043 strcpy (p, ".+12\n\t nop\n\tb\t");
6044 /* Skip the next insn if requested or
6045 if we know that it will be a nop. */
6046 if (annul || ! final_sequence)
6047 p[3] = '6';
6048 p += 14;
6049 }
6050 *p++ = '%';
6051 *p++ = 'l';
6052 *p++ = label + '0';
6053 *p++ = '%';
6054 *p++ = '#';
6055 *p = '\0';
6056
6057 return string;
6058 }
6059
6060 /* Emit a library call comparison between floating point X and Y.
6061 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6062 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6063 values as arguments instead of the TFmode registers themselves,
6064 that's why we cannot call emit_float_lib_cmp. */
6065 void
6066 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6067 {
6068 const char *qpfunc;
6069 rtx slot0, slot1, result, tem, tem2;
6070 enum machine_mode mode;
6071
6072 switch (comparison)
6073 {
6074 case EQ:
6075 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6076 break;
6077
6078 case NE:
6079 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6080 break;
6081
6082 case GT:
6083 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6084 break;
6085
6086 case GE:
6087 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6088 break;
6089
6090 case LT:
6091 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6092 break;
6093
6094 case LE:
6095 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6096 break;
6097
6098 case ORDERED:
6099 case UNORDERED:
6100 case UNGT:
6101 case UNLT:
6102 case UNEQ:
6103 case UNGE:
6104 case UNLE:
6105 case LTGT:
6106 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6107 break;
6108
6109 default:
6110 gcc_unreachable ();
6111 }
6112
6113 if (TARGET_ARCH64)
6114 {
6115 if (GET_CODE (x) != MEM)
6116 {
6117 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6118 emit_move_insn (slot0, x);
6119 }
6120 else
6121 slot0 = x;
6122
6123 if (GET_CODE (y) != MEM)
6124 {
6125 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6126 emit_move_insn (slot1, y);
6127 }
6128 else
6129 slot1 = y;
6130
6131 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6132 DImode, 2,
6133 XEXP (slot0, 0), Pmode,
6134 XEXP (slot1, 0), Pmode);
6135
6136 mode = DImode;
6137 }
6138 else
6139 {
6140 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6141 SImode, 2,
6142 x, TFmode, y, TFmode);
6143
6144 mode = SImode;
6145 }
6146
6147
6148 /* Immediately move the result of the libcall into a pseudo
6149 register so reload doesn't clobber the value if it needs
6150 the return register for a spill reg. */
6151 result = gen_reg_rtx (mode);
6152 emit_move_insn (result, hard_libcall_value (mode));
6153
6154 switch (comparison)
6155 {
6156 default:
6157 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6158 break;
6159 case ORDERED:
6160 case UNORDERED:
6161 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6162 NULL_RTX, mode, 0);
6163 break;
6164 case UNGT:
6165 case UNGE:
6166 emit_cmp_insn (result, const1_rtx,
6167 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6168 break;
6169 case UNLE:
6170 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6171 break;
6172 case UNLT:
6173 tem = gen_reg_rtx (mode);
6174 if (TARGET_ARCH32)
6175 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6176 else
6177 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6178 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6179 break;
6180 case UNEQ:
6181 case LTGT:
6182 tem = gen_reg_rtx (mode);
6183 if (TARGET_ARCH32)
6184 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6185 else
6186 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6187 tem2 = gen_reg_rtx (mode);
6188 if (TARGET_ARCH32)
6189 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6190 else
6191 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6192 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6193 NULL_RTX, mode, 0);
6194 break;
6195 }
6196 }
6197
6198 /* Generate an unsigned DImode to FP conversion. This is the same code
6199 optabs would emit if we didn't have TFmode patterns. */
6200
6201 void
6202 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6203 {
6204 rtx neglab, donelab, i0, i1, f0, in, out;
6205
6206 out = operands[0];
6207 in = force_reg (DImode, operands[1]);
6208 neglab = gen_label_rtx ();
6209 donelab = gen_label_rtx ();
6210 i0 = gen_reg_rtx (DImode);
6211 i1 = gen_reg_rtx (DImode);
6212 f0 = gen_reg_rtx (mode);
6213
6214 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6215
6216 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6217 emit_jump_insn (gen_jump (donelab));
6218 emit_barrier ();
6219
6220 emit_label (neglab);
6221
6222 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6223 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6224 emit_insn (gen_iordi3 (i0, i0, i1));
6225 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6226 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6227
6228 emit_label (donelab);
6229 }
6230
6231 /* Generate an FP to unsigned DImode conversion. This is the same code
6232 optabs would emit if we didn't have TFmode patterns. */
6233
6234 void
6235 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6236 {
6237 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6238
6239 out = operands[0];
6240 in = force_reg (mode, operands[1]);
6241 neglab = gen_label_rtx ();
6242 donelab = gen_label_rtx ();
6243 i0 = gen_reg_rtx (DImode);
6244 i1 = gen_reg_rtx (DImode);
6245 limit = gen_reg_rtx (mode);
6246 f0 = gen_reg_rtx (mode);
6247
6248 emit_move_insn (limit,
6249 CONST_DOUBLE_FROM_REAL_VALUE (
6250 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6251 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6252
6253 emit_insn (gen_rtx_SET (VOIDmode,
6254 out,
6255 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6256 emit_jump_insn (gen_jump (donelab));
6257 emit_barrier ();
6258
6259 emit_label (neglab);
6260
6261 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6262 emit_insn (gen_rtx_SET (VOIDmode,
6263 i0,
6264 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6265 emit_insn (gen_movdi (i1, const1_rtx));
6266 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6267 emit_insn (gen_xordi3 (out, i0, i1));
6268
6269 emit_label (donelab);
6270 }
6271
6272 /* Return the string to output a conditional branch to LABEL, testing
6273 register REG. LABEL is the operand number of the label; REG is the
6274 operand number of the reg. OP is the conditional expression. The mode
6275 of REG says what kind of comparison we made.
6276
6277 DEST is the destination insn (i.e. the label), INSN is the source.
6278
6279 REVERSED is nonzero if we should reverse the sense of the comparison.
6280
6281 ANNUL is nonzero if we should generate an annulling branch. */
6282
6283 const char *
6284 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6285 int annul, rtx insn)
6286 {
6287 static char string[64];
6288 enum rtx_code code = GET_CODE (op);
6289 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6290 rtx note;
6291 int far;
6292 char *p;
6293
6294 /* branch on register are limited to +-128KB. If it is too far away,
6295 change
6296
6297 brnz,pt %g1, .LC30
6298
6299 to
6300
6301 brz,pn %g1, .+12
6302 nop
6303 ba,pt %xcc, .LC30
6304
6305 and
6306
6307 brgez,a,pn %o1, .LC29
6308
6309 to
6310
6311 brlz,pt %o1, .+16
6312 nop
6313 ba,pt %xcc, .LC29 */
6314
6315 far = get_attr_length (insn) >= 3;
6316
6317 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6318 if (reversed ^ far)
6319 code = reverse_condition (code);
6320
6321 /* Only 64 bit versions of these instructions exist. */
6322 gcc_assert (mode == DImode);
6323
6324 /* Start by writing the branch condition. */
6325
6326 switch (code)
6327 {
6328 case NE:
6329 strcpy (string, "brnz");
6330 break;
6331
6332 case EQ:
6333 strcpy (string, "brz");
6334 break;
6335
6336 case GE:
6337 strcpy (string, "brgez");
6338 break;
6339
6340 case LT:
6341 strcpy (string, "brlz");
6342 break;
6343
6344 case LE:
6345 strcpy (string, "brlez");
6346 break;
6347
6348 case GT:
6349 strcpy (string, "brgz");
6350 break;
6351
6352 default:
6353 gcc_unreachable ();
6354 }
6355
6356 p = strchr (string, '\0');
6357
6358 /* Now add the annulling, reg, label, and nop. */
6359 if (annul && ! far)
6360 {
6361 strcpy (p, ",a");
6362 p += 2;
6363 }
6364
6365 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6366 {
6367 strcpy (p,
6368 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6369 ? ",pt" : ",pn");
6370 p += 3;
6371 }
6372
6373 *p = p < string + 8 ? '\t' : ' ';
6374 p++;
6375 *p++ = '%';
6376 *p++ = '0' + reg;
6377 *p++ = ',';
6378 *p++ = ' ';
6379 if (far)
6380 {
6381 int veryfar = 1, delta;
6382
6383 if (INSN_ADDRESSES_SET_P ())
6384 {
6385 delta = (INSN_ADDRESSES (INSN_UID (dest))
6386 - INSN_ADDRESSES (INSN_UID (insn)));
6387 /* Leave some instructions for "slop". */
6388 if (delta >= -260000 && delta < 260000)
6389 veryfar = 0;
6390 }
6391
6392 strcpy (p, ".+12\n\t nop\n\t");
6393 /* Skip the next insn if requested or
6394 if we know that it will be a nop. */
6395 if (annul || ! final_sequence)
6396 p[3] = '6';
6397 p += 12;
6398 if (veryfar)
6399 {
6400 strcpy (p, "b\t");
6401 p += 2;
6402 }
6403 else
6404 {
6405 strcpy (p, "ba,pt\t%%xcc, ");
6406 p += 13;
6407 }
6408 }
6409 *p++ = '%';
6410 *p++ = 'l';
6411 *p++ = '0' + label;
6412 *p++ = '%';
6413 *p++ = '#';
6414 *p = '\0';
6415
6416 return string;
6417 }
6418
6419 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6420 Such instructions cannot be used in the delay slot of return insn on v9.
6421 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6422 */
6423
6424 static int
6425 epilogue_renumber (register rtx *where, int test)
6426 {
6427 register const char *fmt;
6428 register int i;
6429 register enum rtx_code code;
6430
6431 if (*where == 0)
6432 return 0;
6433
6434 code = GET_CODE (*where);
6435
6436 switch (code)
6437 {
6438 case REG:
6439 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6440 return 1;
6441 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6442 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6443 case SCRATCH:
6444 case CC0:
6445 case PC:
6446 case CONST_INT:
6447 case CONST_DOUBLE:
6448 return 0;
6449
6450 /* Do not replace the frame pointer with the stack pointer because
6451 it can cause the delayed instruction to load below the stack.
6452 This occurs when instructions like:
6453
6454 (set (reg/i:SI 24 %i0)
6455 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6456 (const_int -20 [0xffffffec])) 0))
6457
6458 are in the return delayed slot. */
6459 case PLUS:
6460 if (GET_CODE (XEXP (*where, 0)) == REG
6461 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6462 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6463 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6464 return 1;
6465 break;
6466
6467 case MEM:
6468 if (SPARC_STACK_BIAS
6469 && GET_CODE (XEXP (*where, 0)) == REG
6470 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6471 return 1;
6472 break;
6473
6474 default:
6475 break;
6476 }
6477
6478 fmt = GET_RTX_FORMAT (code);
6479
6480 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6481 {
6482 if (fmt[i] == 'E')
6483 {
6484 register int j;
6485 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6486 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6487 return 1;
6488 }
6489 else if (fmt[i] == 'e'
6490 && epilogue_renumber (&(XEXP (*where, i)), test))
6491 return 1;
6492 }
6493 return 0;
6494 }
6495 \f
6496 /* Leaf functions and non-leaf functions have different needs. */
6497
6498 static const int
6499 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6500
6501 static const int
6502 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6503
6504 static const int *const reg_alloc_orders[] = {
6505 reg_leaf_alloc_order,
6506 reg_nonleaf_alloc_order};
6507
6508 void
6509 order_regs_for_local_alloc (void)
6510 {
6511 static int last_order_nonleaf = 1;
6512
6513 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6514 {
6515 last_order_nonleaf = !last_order_nonleaf;
6516 memcpy ((char *) reg_alloc_order,
6517 (const char *) reg_alloc_orders[last_order_nonleaf],
6518 FIRST_PSEUDO_REGISTER * sizeof (int));
6519 }
6520 }
6521 \f
6522 /* Return 1 if REG and MEM are legitimate enough to allow the various
6523 mem<-->reg splits to be run. */
6524
6525 int
6526 sparc_splitdi_legitimate (rtx reg, rtx mem)
6527 {
6528 /* Punt if we are here by mistake. */
6529 gcc_assert (reload_completed);
6530
6531 /* We must have an offsettable memory reference. */
6532 if (! offsettable_memref_p (mem))
6533 return 0;
6534
6535 /* If we have legitimate args for ldd/std, we do not want
6536 the split to happen. */
6537 if ((REGNO (reg) % 2) == 0
6538 && mem_min_alignment (mem, 8))
6539 return 0;
6540
6541 /* Success. */
6542 return 1;
6543 }
6544
6545 /* Return 1 if x and y are some kind of REG and they refer to
6546 different hard registers. This test is guaranteed to be
6547 run after reload. */
6548
6549 int
6550 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6551 {
6552 if (GET_CODE (x) != REG)
6553 return 0;
6554 if (GET_CODE (y) != REG)
6555 return 0;
6556 if (REGNO (x) == REGNO (y))
6557 return 0;
6558 return 1;
6559 }
6560
6561 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6562 This makes them candidates for using ldd and std insns.
6563
6564 Note reg1 and reg2 *must* be hard registers. */
6565
6566 int
6567 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6568 {
6569 /* We might have been passed a SUBREG. */
6570 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6571 return 0;
6572
6573 if (REGNO (reg1) % 2 != 0)
6574 return 0;
6575
6576 /* Integer ldd is deprecated in SPARC V9 */
6577 if (TARGET_V9 && REGNO (reg1) < 32)
6578 return 0;
6579
6580 return (REGNO (reg1) == REGNO (reg2) - 1);
6581 }
6582
6583 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6584 an ldd or std insn.
6585
6586 This can only happen when addr1 and addr2, the addresses in mem1
6587 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6588 addr1 must also be aligned on a 64-bit boundary.
6589
6590 Also iff dependent_reg_rtx is not null it should not be used to
6591 compute the address for mem1, i.e. we cannot optimize a sequence
6592 like:
6593 ld [%o0], %o0
6594 ld [%o0 + 4], %o1
6595 to
6596 ldd [%o0], %o0
6597 nor:
6598 ld [%g3 + 4], %g3
6599 ld [%g3], %g2
6600 to
6601 ldd [%g3], %g2
6602
6603 But, note that the transformation from:
6604 ld [%g2 + 4], %g3
6605 ld [%g2], %g2
6606 to
6607 ldd [%g2], %g2
6608 is perfectly fine. Thus, the peephole2 patterns always pass us
6609 the destination register of the first load, never the second one.
6610
6611 For stores we don't have a similar problem, so dependent_reg_rtx is
6612 NULL_RTX. */
6613
6614 int
6615 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6616 {
6617 rtx addr1, addr2;
6618 unsigned int reg1;
6619 HOST_WIDE_INT offset1;
6620
6621 /* The mems cannot be volatile. */
6622 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6623 return 0;
6624
6625 /* MEM1 should be aligned on a 64-bit boundary. */
6626 if (MEM_ALIGN (mem1) < 64)
6627 return 0;
6628
6629 addr1 = XEXP (mem1, 0);
6630 addr2 = XEXP (mem2, 0);
6631
6632 /* Extract a register number and offset (if used) from the first addr. */
6633 if (GET_CODE (addr1) == PLUS)
6634 {
6635 /* If not a REG, return zero. */
6636 if (GET_CODE (XEXP (addr1, 0)) != REG)
6637 return 0;
6638 else
6639 {
6640 reg1 = REGNO (XEXP (addr1, 0));
6641 /* The offset must be constant! */
6642 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6643 return 0;
6644 offset1 = INTVAL (XEXP (addr1, 1));
6645 }
6646 }
6647 else if (GET_CODE (addr1) != REG)
6648 return 0;
6649 else
6650 {
6651 reg1 = REGNO (addr1);
6652 /* This was a simple (mem (reg)) expression. Offset is 0. */
6653 offset1 = 0;
6654 }
6655
6656 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6657 if (GET_CODE (addr2) != PLUS)
6658 return 0;
6659
6660 if (GET_CODE (XEXP (addr2, 0)) != REG
6661 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6662 return 0;
6663
6664 if (reg1 != REGNO (XEXP (addr2, 0)))
6665 return 0;
6666
6667 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6668 return 0;
6669
6670 /* The first offset must be evenly divisible by 8 to ensure the
6671 address is 64 bit aligned. */
6672 if (offset1 % 8 != 0)
6673 return 0;
6674
6675 /* The offset for the second addr must be 4 more than the first addr. */
6676 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6677 return 0;
6678
6679 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6680 instructions. */
6681 return 1;
6682 }
6683
6684 /* Return 1 if reg is a pseudo, or is the first register in
6685 a hard register pair. This makes it a candidate for use in
6686 ldd and std insns. */
6687
6688 int
6689 register_ok_for_ldd (rtx reg)
6690 {
6691 /* We might have been passed a SUBREG. */
6692 if (GET_CODE (reg) != REG)
6693 return 0;
6694
6695 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6696 return (REGNO (reg) % 2 == 0);
6697 else
6698 return 1;
6699 }
6700 \f
6701 /* Print operand X (an rtx) in assembler syntax to file FILE.
6702 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6703 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6704
6705 void
6706 print_operand (FILE *file, rtx x, int code)
6707 {
6708 switch (code)
6709 {
6710 case '#':
6711 /* Output an insn in a delay slot. */
6712 if (final_sequence)
6713 sparc_indent_opcode = 1;
6714 else
6715 fputs ("\n\t nop", file);
6716 return;
6717 case '*':
6718 /* Output an annul flag if there's nothing for the delay slot and we
6719 are optimizing. This is always used with '(' below.
6720 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6721 this is a dbx bug. So, we only do this when optimizing.
6722 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6723 Always emit a nop in case the next instruction is a branch. */
6724 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6725 fputs (",a", file);
6726 return;
6727 case '(':
6728 /* Output a 'nop' if there's nothing for the delay slot and we are
6729 not optimizing. This is always used with '*' above. */
6730 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6731 fputs ("\n\t nop", file);
6732 else if (final_sequence)
6733 sparc_indent_opcode = 1;
6734 return;
6735 case ')':
6736 /* Output the right displacement from the saved PC on function return.
6737 The caller may have placed an "unimp" insn immediately after the call
6738 so we have to account for it. This insn is used in the 32-bit ABI
6739 when calling a function that returns a non zero-sized structure. The
6740 64-bit ABI doesn't have it. Be careful to have this test be the same
6741 as that used on the call. The exception here is that when
6742 sparc_std_struct_return is enabled, the psABI is followed exactly
6743 and the adjustment is made by the code in sparc_struct_value_rtx.
6744 The call emitted is the same when sparc_std_struct_return is
6745 present. */
6746 if (! TARGET_ARCH64
6747 && current_function_returns_struct
6748 && ! sparc_std_struct_return
6749 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6750 == INTEGER_CST)
6751 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6752 fputs ("12", file);
6753 else
6754 fputc ('8', file);
6755 return;
6756 case '_':
6757 /* Output the Embedded Medium/Anywhere code model base register. */
6758 fputs (EMBMEDANY_BASE_REG, file);
6759 return;
6760 case '&':
6761 /* Print some local dynamic TLS name. */
6762 assemble_name (file, get_some_local_dynamic_name ());
6763 return;
6764
6765 case 'Y':
6766 /* Adjust the operand to take into account a RESTORE operation. */
6767 if (GET_CODE (x) == CONST_INT)
6768 break;
6769 else if (GET_CODE (x) != REG)
6770 output_operand_lossage ("invalid %%Y operand");
6771 else if (REGNO (x) < 8)
6772 fputs (reg_names[REGNO (x)], file);
6773 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6774 fputs (reg_names[REGNO (x)-16], file);
6775 else
6776 output_operand_lossage ("invalid %%Y operand");
6777 return;
6778 case 'L':
6779 /* Print out the low order register name of a register pair. */
6780 if (WORDS_BIG_ENDIAN)
6781 fputs (reg_names[REGNO (x)+1], file);
6782 else
6783 fputs (reg_names[REGNO (x)], file);
6784 return;
6785 case 'H':
6786 /* Print out the high order register name of a register pair. */
6787 if (WORDS_BIG_ENDIAN)
6788 fputs (reg_names[REGNO (x)], file);
6789 else
6790 fputs (reg_names[REGNO (x)+1], file);
6791 return;
6792 case 'R':
6793 /* Print out the second register name of a register pair or quad.
6794 I.e., R (%o0) => %o1. */
6795 fputs (reg_names[REGNO (x)+1], file);
6796 return;
6797 case 'S':
6798 /* Print out the third register name of a register quad.
6799 I.e., S (%o0) => %o2. */
6800 fputs (reg_names[REGNO (x)+2], file);
6801 return;
6802 case 'T':
6803 /* Print out the fourth register name of a register quad.
6804 I.e., T (%o0) => %o3. */
6805 fputs (reg_names[REGNO (x)+3], file);
6806 return;
6807 case 'x':
6808 /* Print a condition code register. */
6809 if (REGNO (x) == SPARC_ICC_REG)
6810 {
6811 /* We don't handle CC[X]_NOOVmode because they're not supposed
6812 to occur here. */
6813 if (GET_MODE (x) == CCmode)
6814 fputs ("%icc", file);
6815 else if (GET_MODE (x) == CCXmode)
6816 fputs ("%xcc", file);
6817 else
6818 gcc_unreachable ();
6819 }
6820 else
6821 /* %fccN register */
6822 fputs (reg_names[REGNO (x)], file);
6823 return;
6824 case 'm':
6825 /* Print the operand's address only. */
6826 output_address (XEXP (x, 0));
6827 return;
6828 case 'r':
6829 /* In this case we need a register. Use %g0 if the
6830 operand is const0_rtx. */
6831 if (x == const0_rtx
6832 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6833 {
6834 fputs ("%g0", file);
6835 return;
6836 }
6837 else
6838 break;
6839
6840 case 'A':
6841 switch (GET_CODE (x))
6842 {
6843 case IOR: fputs ("or", file); break;
6844 case AND: fputs ("and", file); break;
6845 case XOR: fputs ("xor", file); break;
6846 default: output_operand_lossage ("invalid %%A operand");
6847 }
6848 return;
6849
6850 case 'B':
6851 switch (GET_CODE (x))
6852 {
6853 case IOR: fputs ("orn", file); break;
6854 case AND: fputs ("andn", file); break;
6855 case XOR: fputs ("xnor", file); break;
6856 default: output_operand_lossage ("invalid %%B operand");
6857 }
6858 return;
6859
6860 /* These are used by the conditional move instructions. */
6861 case 'c' :
6862 case 'C':
6863 {
6864 enum rtx_code rc = GET_CODE (x);
6865
6866 if (code == 'c')
6867 {
6868 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6869 if (mode == CCFPmode || mode == CCFPEmode)
6870 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6871 else
6872 rc = reverse_condition (GET_CODE (x));
6873 }
6874 switch (rc)
6875 {
6876 case NE: fputs ("ne", file); break;
6877 case EQ: fputs ("e", file); break;
6878 case GE: fputs ("ge", file); break;
6879 case GT: fputs ("g", file); break;
6880 case LE: fputs ("le", file); break;
6881 case LT: fputs ("l", file); break;
6882 case GEU: fputs ("geu", file); break;
6883 case GTU: fputs ("gu", file); break;
6884 case LEU: fputs ("leu", file); break;
6885 case LTU: fputs ("lu", file); break;
6886 case LTGT: fputs ("lg", file); break;
6887 case UNORDERED: fputs ("u", file); break;
6888 case ORDERED: fputs ("o", file); break;
6889 case UNLT: fputs ("ul", file); break;
6890 case UNLE: fputs ("ule", file); break;
6891 case UNGT: fputs ("ug", file); break;
6892 case UNGE: fputs ("uge", file); break;
6893 case UNEQ: fputs ("ue", file); break;
6894 default: output_operand_lossage (code == 'c'
6895 ? "invalid %%c operand"
6896 : "invalid %%C operand");
6897 }
6898 return;
6899 }
6900
6901 /* These are used by the movr instruction pattern. */
6902 case 'd':
6903 case 'D':
6904 {
6905 enum rtx_code rc = (code == 'd'
6906 ? reverse_condition (GET_CODE (x))
6907 : GET_CODE (x));
6908 switch (rc)
6909 {
6910 case NE: fputs ("ne", file); break;
6911 case EQ: fputs ("e", file); break;
6912 case GE: fputs ("gez", file); break;
6913 case LT: fputs ("lz", file); break;
6914 case LE: fputs ("lez", file); break;
6915 case GT: fputs ("gz", file); break;
6916 default: output_operand_lossage (code == 'd'
6917 ? "invalid %%d operand"
6918 : "invalid %%D operand");
6919 }
6920 return;
6921 }
6922
6923 case 'b':
6924 {
6925 /* Print a sign-extended character. */
6926 int i = trunc_int_for_mode (INTVAL (x), QImode);
6927 fprintf (file, "%d", i);
6928 return;
6929 }
6930
6931 case 'f':
6932 /* Operand must be a MEM; write its address. */
6933 if (GET_CODE (x) != MEM)
6934 output_operand_lossage ("invalid %%f operand");
6935 output_address (XEXP (x, 0));
6936 return;
6937
6938 case 's':
6939 {
6940 /* Print a sign-extended 32-bit value. */
6941 HOST_WIDE_INT i;
6942 if (GET_CODE(x) == CONST_INT)
6943 i = INTVAL (x);
6944 else if (GET_CODE(x) == CONST_DOUBLE)
6945 i = CONST_DOUBLE_LOW (x);
6946 else
6947 {
6948 output_operand_lossage ("invalid %%s operand");
6949 return;
6950 }
6951 i = trunc_int_for_mode (i, SImode);
6952 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6953 return;
6954 }
6955
6956 case 0:
6957 /* Do nothing special. */
6958 break;
6959
6960 default:
6961 /* Undocumented flag. */
6962 output_operand_lossage ("invalid operand output code");
6963 }
6964
6965 if (GET_CODE (x) == REG)
6966 fputs (reg_names[REGNO (x)], file);
6967 else if (GET_CODE (x) == MEM)
6968 {
6969 fputc ('[', file);
6970 /* Poor Sun assembler doesn't understand absolute addressing. */
6971 if (CONSTANT_P (XEXP (x, 0)))
6972 fputs ("%g0+", file);
6973 output_address (XEXP (x, 0));
6974 fputc (']', file);
6975 }
6976 else if (GET_CODE (x) == HIGH)
6977 {
6978 fputs ("%hi(", file);
6979 output_addr_const (file, XEXP (x, 0));
6980 fputc (')', file);
6981 }
6982 else if (GET_CODE (x) == LO_SUM)
6983 {
6984 print_operand (file, XEXP (x, 0), 0);
6985 if (TARGET_CM_MEDMID)
6986 fputs ("+%l44(", file);
6987 else
6988 fputs ("+%lo(", file);
6989 output_addr_const (file, XEXP (x, 1));
6990 fputc (')', file);
6991 }
6992 else if (GET_CODE (x) == CONST_DOUBLE
6993 && (GET_MODE (x) == VOIDmode
6994 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
6995 {
6996 if (CONST_DOUBLE_HIGH (x) == 0)
6997 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
6998 else if (CONST_DOUBLE_HIGH (x) == -1
6999 && CONST_DOUBLE_LOW (x) < 0)
7000 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7001 else
7002 output_operand_lossage ("long long constant not a valid immediate operand");
7003 }
7004 else if (GET_CODE (x) == CONST_DOUBLE)
7005 output_operand_lossage ("floating point constant not a valid immediate operand");
7006 else { output_addr_const (file, x); }
7007 }
7008 \f
7009 /* Target hook for assembling integer objects. The sparc version has
7010 special handling for aligned DI-mode objects. */
7011
7012 static bool
7013 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7014 {
7015 /* ??? We only output .xword's for symbols and only then in environments
7016 where the assembler can handle them. */
7017 if (aligned_p && size == 8
7018 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7019 {
7020 if (TARGET_V9)
7021 {
7022 assemble_integer_with_op ("\t.xword\t", x);
7023 return true;
7024 }
7025 else
7026 {
7027 assemble_aligned_integer (4, const0_rtx);
7028 assemble_aligned_integer (4, x);
7029 return true;
7030 }
7031 }
7032 return default_assemble_integer (x, size, aligned_p);
7033 }
7034 \f
7035 /* Return the value of a code used in the .proc pseudo-op that says
7036 what kind of result this function returns. For non-C types, we pick
7037 the closest C type. */
7038
7039 #ifndef SHORT_TYPE_SIZE
7040 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7041 #endif
7042
7043 #ifndef INT_TYPE_SIZE
7044 #define INT_TYPE_SIZE BITS_PER_WORD
7045 #endif
7046
7047 #ifndef LONG_TYPE_SIZE
7048 #define LONG_TYPE_SIZE BITS_PER_WORD
7049 #endif
7050
7051 #ifndef LONG_LONG_TYPE_SIZE
7052 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7053 #endif
7054
7055 #ifndef FLOAT_TYPE_SIZE
7056 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7057 #endif
7058
7059 #ifndef DOUBLE_TYPE_SIZE
7060 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7061 #endif
7062
7063 #ifndef LONG_DOUBLE_TYPE_SIZE
7064 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7065 #endif
7066
7067 unsigned long
7068 sparc_type_code (register tree type)
7069 {
7070 register unsigned long qualifiers = 0;
7071 register unsigned shift;
7072
7073 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7074 setting more, since some assemblers will give an error for this. Also,
7075 we must be careful to avoid shifts of 32 bits or more to avoid getting
7076 unpredictable results. */
7077
7078 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7079 {
7080 switch (TREE_CODE (type))
7081 {
7082 case ERROR_MARK:
7083 return qualifiers;
7084
7085 case ARRAY_TYPE:
7086 qualifiers |= (3 << shift);
7087 break;
7088
7089 case FUNCTION_TYPE:
7090 case METHOD_TYPE:
7091 qualifiers |= (2 << shift);
7092 break;
7093
7094 case POINTER_TYPE:
7095 case REFERENCE_TYPE:
7096 case OFFSET_TYPE:
7097 qualifiers |= (1 << shift);
7098 break;
7099
7100 case RECORD_TYPE:
7101 return (qualifiers | 8);
7102
7103 case UNION_TYPE:
7104 case QUAL_UNION_TYPE:
7105 return (qualifiers | 9);
7106
7107 case ENUMERAL_TYPE:
7108 return (qualifiers | 10);
7109
7110 case VOID_TYPE:
7111 return (qualifiers | 16);
7112
7113 case INTEGER_TYPE:
7114 /* If this is a range type, consider it to be the underlying
7115 type. */
7116 if (TREE_TYPE (type) != 0)
7117 break;
7118
7119 /* Carefully distinguish all the standard types of C,
7120 without messing up if the language is not C. We do this by
7121 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7122 look at both the names and the above fields, but that's redundant.
7123 Any type whose size is between two C types will be considered
7124 to be the wider of the two types. Also, we do not have a
7125 special code to use for "long long", so anything wider than
7126 long is treated the same. Note that we can't distinguish
7127 between "int" and "long" in this code if they are the same
7128 size, but that's fine, since neither can the assembler. */
7129
7130 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7131 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7132
7133 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7134 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7135
7136 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7137 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7138
7139 else
7140 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7141
7142 case REAL_TYPE:
7143 /* If this is a range type, consider it to be the underlying
7144 type. */
7145 if (TREE_TYPE (type) != 0)
7146 break;
7147
7148 /* Carefully distinguish all the standard types of C,
7149 without messing up if the language is not C. */
7150
7151 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7152 return (qualifiers | 6);
7153
7154 else
7155 return (qualifiers | 7);
7156
7157 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7158 /* ??? We need to distinguish between double and float complex types,
7159 but I don't know how yet because I can't reach this code from
7160 existing front-ends. */
7161 return (qualifiers | 7); /* Who knows? */
7162
7163 case VECTOR_TYPE:
7164 case BOOLEAN_TYPE: /* Boolean truth value type. */
7165 case LANG_TYPE: /* ? */
7166 return qualifiers;
7167
7168 default:
7169 gcc_unreachable (); /* Not a type! */
7170 }
7171 }
7172
7173 return qualifiers;
7174 }
7175 \f
7176 /* Nested function support. */
7177
7178 /* Emit RTL insns to initialize the variable parts of a trampoline.
7179 FNADDR is an RTX for the address of the function's pure code.
7180 CXT is an RTX for the static chain value for the function.
7181
7182 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7183 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7184 (to store insns). This is a bit excessive. Perhaps a different
7185 mechanism would be better here.
7186
7187 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7188
7189 void
7190 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7191 {
7192 /* SPARC 32-bit trampoline:
7193
7194 sethi %hi(fn), %g1
7195 sethi %hi(static), %g2
7196 jmp %g1+%lo(fn)
7197 or %g2, %lo(static), %g2
7198
7199 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7200 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7201 */
7202
7203 emit_move_insn
7204 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7205 expand_binop (SImode, ior_optab,
7206 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7207 size_int (10), 0, 1),
7208 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7209 NULL_RTX, 1, OPTAB_DIRECT));
7210
7211 emit_move_insn
7212 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7213 expand_binop (SImode, ior_optab,
7214 expand_shift (RSHIFT_EXPR, SImode, cxt,
7215 size_int (10), 0, 1),
7216 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7217 NULL_RTX, 1, OPTAB_DIRECT));
7218
7219 emit_move_insn
7220 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7221 expand_binop (SImode, ior_optab,
7222 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7223 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7224 NULL_RTX, 1, OPTAB_DIRECT));
7225
7226 emit_move_insn
7227 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7228 expand_binop (SImode, ior_optab,
7229 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7230 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7231 NULL_RTX, 1, OPTAB_DIRECT));
7232
7233 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7234 aligned on a 16 byte boundary so one flush clears it all. */
7235 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7236 if (sparc_cpu != PROCESSOR_ULTRASPARC
7237 && sparc_cpu != PROCESSOR_ULTRASPARC3
7238 && sparc_cpu != PROCESSOR_NIAGARA)
7239 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7240 plus_constant (tramp, 8)))));
7241
7242 /* Call __enable_execute_stack after writing onto the stack to make sure
7243 the stack address is accessible. */
7244 #ifdef ENABLE_EXECUTE_STACK
7245 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7246 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7247 #endif
7248
7249 }
7250
7251 /* The 64-bit version is simpler because it makes more sense to load the
7252 values as "immediate" data out of the trampoline. It's also easier since
7253 we can read the PC without clobbering a register. */
7254
7255 void
7256 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7257 {
7258 /* SPARC 64-bit trampoline:
7259
7260 rd %pc, %g1
7261 ldx [%g1+24], %g5
7262 jmp %g5
7263 ldx [%g1+16], %g5
7264 +16 bytes data
7265 */
7266
7267 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7268 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7269 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7270 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7271 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7272 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7273 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7274 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7275 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7276 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7277 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7278
7279 if (sparc_cpu != PROCESSOR_ULTRASPARC
7280 && sparc_cpu != PROCESSOR_ULTRASPARC3
7281 && sparc_cpu != PROCESSOR_NIAGARA)
7282 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7283
7284 /* Call __enable_execute_stack after writing onto the stack to make sure
7285 the stack address is accessible. */
7286 #ifdef ENABLE_EXECUTE_STACK
7287 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7288 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7289 #endif
7290 }
7291 \f
7292 /* Adjust the cost of a scheduling dependency. Return the new cost of
7293 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7294
7295 static int
7296 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7297 {
7298 enum attr_type insn_type;
7299
7300 if (! recog_memoized (insn))
7301 return 0;
7302
7303 insn_type = get_attr_type (insn);
7304
7305 if (REG_NOTE_KIND (link) == 0)
7306 {
7307 /* Data dependency; DEP_INSN writes a register that INSN reads some
7308 cycles later. */
7309
7310 /* if a load, then the dependence must be on the memory address;
7311 add an extra "cycle". Note that the cost could be two cycles
7312 if the reg was written late in an instruction group; we ca not tell
7313 here. */
7314 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7315 return cost + 3;
7316
7317 /* Get the delay only if the address of the store is the dependence. */
7318 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7319 {
7320 rtx pat = PATTERN(insn);
7321 rtx dep_pat = PATTERN (dep_insn);
7322
7323 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7324 return cost; /* This should not happen! */
7325
7326 /* The dependency between the two instructions was on the data that
7327 is being stored. Assume that this implies that the address of the
7328 store is not dependent. */
7329 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7330 return cost;
7331
7332 return cost + 3; /* An approximation. */
7333 }
7334
7335 /* A shift instruction cannot receive its data from an instruction
7336 in the same cycle; add a one cycle penalty. */
7337 if (insn_type == TYPE_SHIFT)
7338 return cost + 3; /* Split before cascade into shift. */
7339 }
7340 else
7341 {
7342 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7343 INSN writes some cycles later. */
7344
7345 /* These are only significant for the fpu unit; writing a fp reg before
7346 the fpu has finished with it stalls the processor. */
7347
7348 /* Reusing an integer register causes no problems. */
7349 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7350 return 0;
7351 }
7352
7353 return cost;
7354 }
7355
7356 static int
7357 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7358 {
7359 enum attr_type insn_type, dep_type;
7360 rtx pat = PATTERN(insn);
7361 rtx dep_pat = PATTERN (dep_insn);
7362
7363 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7364 return cost;
7365
7366 insn_type = get_attr_type (insn);
7367 dep_type = get_attr_type (dep_insn);
7368
7369 switch (REG_NOTE_KIND (link))
7370 {
7371 case 0:
7372 /* Data dependency; DEP_INSN writes a register that INSN reads some
7373 cycles later. */
7374
7375 switch (insn_type)
7376 {
7377 case TYPE_STORE:
7378 case TYPE_FPSTORE:
7379 /* Get the delay iff the address of the store is the dependence. */
7380 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7381 return cost;
7382
7383 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7384 return cost;
7385 return cost + 3;
7386
7387 case TYPE_LOAD:
7388 case TYPE_SLOAD:
7389 case TYPE_FPLOAD:
7390 /* If a load, then the dependence must be on the memory address. If
7391 the addresses aren't equal, then it might be a false dependency */
7392 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7393 {
7394 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7395 || GET_CODE (SET_DEST (dep_pat)) != MEM
7396 || GET_CODE (SET_SRC (pat)) != MEM
7397 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7398 XEXP (SET_SRC (pat), 0)))
7399 return cost + 2;
7400
7401 return cost + 8;
7402 }
7403 break;
7404
7405 case TYPE_BRANCH:
7406 /* Compare to branch latency is 0. There is no benefit from
7407 separating compare and branch. */
7408 if (dep_type == TYPE_COMPARE)
7409 return 0;
7410 /* Floating point compare to branch latency is less than
7411 compare to conditional move. */
7412 if (dep_type == TYPE_FPCMP)
7413 return cost - 1;
7414 break;
7415 default:
7416 break;
7417 }
7418 break;
7419
7420 case REG_DEP_ANTI:
7421 /* Anti-dependencies only penalize the fpu unit. */
7422 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7423 return 0;
7424 break;
7425
7426 default:
7427 break;
7428 }
7429
7430 return cost;
7431 }
7432
7433 static int
7434 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7435 {
7436 switch (sparc_cpu)
7437 {
7438 case PROCESSOR_SUPERSPARC:
7439 cost = supersparc_adjust_cost (insn, link, dep, cost);
7440 break;
7441 case PROCESSOR_HYPERSPARC:
7442 case PROCESSOR_SPARCLITE86X:
7443 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7444 break;
7445 default:
7446 break;
7447 }
7448 return cost;
7449 }
7450
7451 static void
7452 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7453 int sched_verbose ATTRIBUTE_UNUSED,
7454 int max_ready ATTRIBUTE_UNUSED)
7455 {
7456 }
7457
7458 static int
7459 sparc_use_sched_lookahead (void)
7460 {
7461 if (sparc_cpu == PROCESSOR_NIAGARA)
7462 return 0;
7463 if (sparc_cpu == PROCESSOR_ULTRASPARC
7464 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7465 return 4;
7466 if ((1 << sparc_cpu) &
7467 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7468 (1 << PROCESSOR_SPARCLITE86X)))
7469 return 3;
7470 return 0;
7471 }
7472
7473 static int
7474 sparc_issue_rate (void)
7475 {
7476 switch (sparc_cpu)
7477 {
7478 case PROCESSOR_NIAGARA:
7479 default:
7480 return 1;
7481 case PROCESSOR_V9:
7482 /* Assume V9 processors are capable of at least dual-issue. */
7483 return 2;
7484 case PROCESSOR_SUPERSPARC:
7485 return 3;
7486 case PROCESSOR_HYPERSPARC:
7487 case PROCESSOR_SPARCLITE86X:
7488 return 2;
7489 case PROCESSOR_ULTRASPARC:
7490 case PROCESSOR_ULTRASPARC3:
7491 return 4;
7492 }
7493 }
7494
7495 static int
7496 set_extends (rtx insn)
7497 {
7498 register rtx pat = PATTERN (insn);
7499
7500 switch (GET_CODE (SET_SRC (pat)))
7501 {
7502 /* Load and some shift instructions zero extend. */
7503 case MEM:
7504 case ZERO_EXTEND:
7505 /* sethi clears the high bits */
7506 case HIGH:
7507 /* LO_SUM is used with sethi. sethi cleared the high
7508 bits and the values used with lo_sum are positive */
7509 case LO_SUM:
7510 /* Store flag stores 0 or 1 */
7511 case LT: case LTU:
7512 case GT: case GTU:
7513 case LE: case LEU:
7514 case GE: case GEU:
7515 case EQ:
7516 case NE:
7517 return 1;
7518 case AND:
7519 {
7520 rtx op0 = XEXP (SET_SRC (pat), 0);
7521 rtx op1 = XEXP (SET_SRC (pat), 1);
7522 if (GET_CODE (op1) == CONST_INT)
7523 return INTVAL (op1) >= 0;
7524 if (GET_CODE (op0) != REG)
7525 return 0;
7526 if (sparc_check_64 (op0, insn) == 1)
7527 return 1;
7528 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7529 }
7530 case IOR:
7531 case XOR:
7532 {
7533 rtx op0 = XEXP (SET_SRC (pat), 0);
7534 rtx op1 = XEXP (SET_SRC (pat), 1);
7535 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7536 return 0;
7537 if (GET_CODE (op1) == CONST_INT)
7538 return INTVAL (op1) >= 0;
7539 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7540 }
7541 case LSHIFTRT:
7542 return GET_MODE (SET_SRC (pat)) == SImode;
7543 /* Positive integers leave the high bits zero. */
7544 case CONST_DOUBLE:
7545 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7546 case CONST_INT:
7547 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7548 case ASHIFTRT:
7549 case SIGN_EXTEND:
7550 return - (GET_MODE (SET_SRC (pat)) == SImode);
7551 case REG:
7552 return sparc_check_64 (SET_SRC (pat), insn);
7553 default:
7554 return 0;
7555 }
7556 }
7557
7558 /* We _ought_ to have only one kind per function, but... */
7559 static GTY(()) rtx sparc_addr_diff_list;
7560 static GTY(()) rtx sparc_addr_list;
7561
7562 void
7563 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7564 {
7565 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7566 if (diff)
7567 sparc_addr_diff_list
7568 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7569 else
7570 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7571 }
7572
7573 static void
7574 sparc_output_addr_vec (rtx vec)
7575 {
7576 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7577 int idx, vlen = XVECLEN (body, 0);
7578
7579 #ifdef ASM_OUTPUT_ADDR_VEC_START
7580 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7581 #endif
7582
7583 #ifdef ASM_OUTPUT_CASE_LABEL
7584 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7585 NEXT_INSN (lab));
7586 #else
7587 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7588 #endif
7589
7590 for (idx = 0; idx < vlen; idx++)
7591 {
7592 ASM_OUTPUT_ADDR_VEC_ELT
7593 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7594 }
7595
7596 #ifdef ASM_OUTPUT_ADDR_VEC_END
7597 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7598 #endif
7599 }
7600
7601 static void
7602 sparc_output_addr_diff_vec (rtx vec)
7603 {
7604 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7605 rtx base = XEXP (XEXP (body, 0), 0);
7606 int idx, vlen = XVECLEN (body, 1);
7607
7608 #ifdef ASM_OUTPUT_ADDR_VEC_START
7609 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7610 #endif
7611
7612 #ifdef ASM_OUTPUT_CASE_LABEL
7613 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7614 NEXT_INSN (lab));
7615 #else
7616 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7617 #endif
7618
7619 for (idx = 0; idx < vlen; idx++)
7620 {
7621 ASM_OUTPUT_ADDR_DIFF_ELT
7622 (asm_out_file,
7623 body,
7624 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7625 CODE_LABEL_NUMBER (base));
7626 }
7627
7628 #ifdef ASM_OUTPUT_ADDR_VEC_END
7629 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7630 #endif
7631 }
7632
7633 static void
7634 sparc_output_deferred_case_vectors (void)
7635 {
7636 rtx t;
7637 int align;
7638
7639 if (sparc_addr_list == NULL_RTX
7640 && sparc_addr_diff_list == NULL_RTX)
7641 return;
7642
7643 /* Align to cache line in the function's code section. */
7644 switch_to_section (current_function_section ());
7645
7646 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7647 if (align > 0)
7648 ASM_OUTPUT_ALIGN (asm_out_file, align);
7649
7650 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7651 sparc_output_addr_vec (XEXP (t, 0));
7652 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7653 sparc_output_addr_diff_vec (XEXP (t, 0));
7654
7655 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7656 }
7657
7658 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7659 unknown. Return 1 if the high bits are zero, -1 if the register is
7660 sign extended. */
7661 int
7662 sparc_check_64 (rtx x, rtx insn)
7663 {
7664 /* If a register is set only once it is safe to ignore insns this
7665 code does not know how to handle. The loop will either recognize
7666 the single set and return the correct value or fail to recognize
7667 it and return 0. */
7668 int set_once = 0;
7669 rtx y = x;
7670
7671 gcc_assert (GET_CODE (x) == REG);
7672
7673 if (GET_MODE (x) == DImode)
7674 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7675
7676 if (flag_expensive_optimizations
7677 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7678 set_once = 1;
7679
7680 if (insn == 0)
7681 {
7682 if (set_once)
7683 insn = get_last_insn_anywhere ();
7684 else
7685 return 0;
7686 }
7687
7688 while ((insn = PREV_INSN (insn)))
7689 {
7690 switch (GET_CODE (insn))
7691 {
7692 case JUMP_INSN:
7693 case NOTE:
7694 break;
7695 case CODE_LABEL:
7696 case CALL_INSN:
7697 default:
7698 if (! set_once)
7699 return 0;
7700 break;
7701 case INSN:
7702 {
7703 rtx pat = PATTERN (insn);
7704 if (GET_CODE (pat) != SET)
7705 return 0;
7706 if (rtx_equal_p (x, SET_DEST (pat)))
7707 return set_extends (insn);
7708 if (y && rtx_equal_p (y, SET_DEST (pat)))
7709 return set_extends (insn);
7710 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7711 return 0;
7712 }
7713 }
7714 }
7715 return 0;
7716 }
7717
7718 /* Returns assembly code to perform a DImode shift using
7719 a 64-bit global or out register on SPARC-V8+. */
7720 const char *
7721 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7722 {
7723 static char asm_code[60];
7724
7725 /* The scratch register is only required when the destination
7726 register is not a 64-bit global or out register. */
7727 if (which_alternative != 2)
7728 operands[3] = operands[0];
7729
7730 /* We can only shift by constants <= 63. */
7731 if (GET_CODE (operands[2]) == CONST_INT)
7732 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7733
7734 if (GET_CODE (operands[1]) == CONST_INT)
7735 {
7736 output_asm_insn ("mov\t%1, %3", operands);
7737 }
7738 else
7739 {
7740 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7741 if (sparc_check_64 (operands[1], insn) <= 0)
7742 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7743 output_asm_insn ("or\t%L1, %3, %3", operands);
7744 }
7745
7746 strcpy(asm_code, opcode);
7747
7748 if (which_alternative != 2)
7749 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7750 else
7751 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7752 }
7753 \f
7754 /* Output rtl to increment the profiler label LABELNO
7755 for profiling a function entry. */
7756
7757 void
7758 sparc_profile_hook (int labelno)
7759 {
7760 char buf[32];
7761 rtx lab, fun;
7762
7763 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7764 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7765 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7766
7767 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7768 }
7769 \f
7770 #ifdef OBJECT_FORMAT_ELF
7771 static void
7772 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7773 tree decl)
7774 {
7775 if (flags & SECTION_MERGE)
7776 {
7777 /* entsize cannot be expressed in this section attributes
7778 encoding style. */
7779 default_elf_asm_named_section (name, flags, decl);
7780 return;
7781 }
7782
7783 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7784
7785 if (!(flags & SECTION_DEBUG))
7786 fputs (",#alloc", asm_out_file);
7787 if (flags & SECTION_WRITE)
7788 fputs (",#write", asm_out_file);
7789 if (flags & SECTION_TLS)
7790 fputs (",#tls", asm_out_file);
7791 if (flags & SECTION_CODE)
7792 fputs (",#execinstr", asm_out_file);
7793
7794 /* ??? Handle SECTION_BSS. */
7795
7796 fputc ('\n', asm_out_file);
7797 }
7798 #endif /* OBJECT_FORMAT_ELF */
7799
7800 /* We do not allow indirect calls to be optimized into sibling calls.
7801
7802 We cannot use sibling calls when delayed branches are disabled
7803 because they will likely require the call delay slot to be filled.
7804
7805 Also, on SPARC 32-bit we cannot emit a sibling call when the
7806 current function returns a structure. This is because the "unimp
7807 after call" convention would cause the callee to return to the
7808 wrong place. The generic code already disallows cases where the
7809 function being called returns a structure.
7810
7811 It may seem strange how this last case could occur. Usually there
7812 is code after the call which jumps to epilogue code which dumps the
7813 return value into the struct return area. That ought to invalidate
7814 the sibling call right? Well, in the C++ case we can end up passing
7815 the pointer to the struct return area to a constructor (which returns
7816 void) and then nothing else happens. Such a sibling call would look
7817 valid without the added check here.
7818
7819 VxWorks PIC PLT entries require the global pointer to be initialized
7820 on entry. We therefore can't emit sibling calls to them. */
7821 static bool
7822 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7823 {
7824 return (decl
7825 && flag_delayed_branch
7826 && (TARGET_ARCH64 || ! current_function_returns_struct)
7827 && !(TARGET_VXWORKS_RTP
7828 && flag_pic
7829 && !targetm.binds_local_p (decl)));
7830 }
7831 \f
7832 /* libfunc renaming. */
7833 #include "config/gofast.h"
7834
7835 static void
7836 sparc_init_libfuncs (void)
7837 {
7838 if (TARGET_ARCH32)
7839 {
7840 /* Use the subroutines that Sun's library provides for integer
7841 multiply and divide. The `*' prevents an underscore from
7842 being prepended by the compiler. .umul is a little faster
7843 than .mul. */
7844 set_optab_libfunc (smul_optab, SImode, "*.umul");
7845 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7846 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7847 set_optab_libfunc (smod_optab, SImode, "*.rem");
7848 set_optab_libfunc (umod_optab, SImode, "*.urem");
7849
7850 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7851 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7852 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7853 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7854 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7855 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7856
7857 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7858 is because with soft-float, the SFmode and DFmode sqrt
7859 instructions will be absent, and the compiler will notice and
7860 try to use the TFmode sqrt instruction for calls to the
7861 builtin function sqrt, but this fails. */
7862 if (TARGET_FPU)
7863 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7864
7865 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7866 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7867 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7868 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7869 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7870 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7871
7872 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7873 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7874 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7875 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7876
7877 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7878 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7879 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7880 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7881
7882 if (DITF_CONVERSION_LIBFUNCS)
7883 {
7884 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7885 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7886 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7887 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7888 }
7889
7890 if (SUN_CONVERSION_LIBFUNCS)
7891 {
7892 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7893 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7894 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7895 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7896 }
7897 }
7898 if (TARGET_ARCH64)
7899 {
7900 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7901 do not exist in the library. Make sure the compiler does not
7902 emit calls to them by accident. (It should always use the
7903 hardware instructions.) */
7904 set_optab_libfunc (smul_optab, SImode, 0);
7905 set_optab_libfunc (sdiv_optab, SImode, 0);
7906 set_optab_libfunc (udiv_optab, SImode, 0);
7907 set_optab_libfunc (smod_optab, SImode, 0);
7908 set_optab_libfunc (umod_optab, SImode, 0);
7909
7910 if (SUN_INTEGER_MULTIPLY_64)
7911 {
7912 set_optab_libfunc (smul_optab, DImode, "__mul64");
7913 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7914 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7915 set_optab_libfunc (smod_optab, DImode, "__rem64");
7916 set_optab_libfunc (umod_optab, DImode, "__urem64");
7917 }
7918
7919 if (SUN_CONVERSION_LIBFUNCS)
7920 {
7921 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7922 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7923 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7924 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7925 }
7926 }
7927
7928 gofast_maybe_init_libfuncs ();
7929 }
7930 \f
7931 #define def_builtin(NAME, CODE, TYPE) \
7932 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7933 NULL_TREE)
7934
7935 /* Implement the TARGET_INIT_BUILTINS target hook.
7936 Create builtin functions for special SPARC instructions. */
7937
7938 static void
7939 sparc_init_builtins (void)
7940 {
7941 if (TARGET_VIS)
7942 sparc_vis_init_builtins ();
7943 }
7944
7945 /* Create builtin functions for VIS 1.0 instructions. */
7946
7947 static void
7948 sparc_vis_init_builtins (void)
7949 {
7950 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7951 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7952 tree v4hi = build_vector_type (intHI_type_node, 4);
7953 tree v2hi = build_vector_type (intHI_type_node, 2);
7954 tree v2si = build_vector_type (intSI_type_node, 2);
7955
7956 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
7957 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
7958 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
7959 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
7960 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
7961 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
7962 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
7963 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
7964 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
7965 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
7966 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
7967 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
7968 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
7969 v8qi, v8qi,
7970 intDI_type_node, 0);
7971 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
7972 intDI_type_node,
7973 intDI_type_node, 0);
7974 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
7975 ptr_type_node,
7976 intSI_type_node, 0);
7977 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
7978 ptr_type_node,
7979 intDI_type_node, 0);
7980
7981 /* Packing and expanding vectors. */
7982 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
7983 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
7984 v8qi_ftype_v2si_v8qi);
7985 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
7986 v2hi_ftype_v2si);
7987 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
7988 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
7989 v8qi_ftype_v4qi_v4qi);
7990
7991 /* Multiplications. */
7992 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
7993 v4hi_ftype_v4qi_v4hi);
7994 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
7995 v4hi_ftype_v4qi_v2hi);
7996 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
7997 v4hi_ftype_v4qi_v2hi);
7998 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
7999 v4hi_ftype_v8qi_v4hi);
8000 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8001 v4hi_ftype_v8qi_v4hi);
8002 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8003 v2si_ftype_v4qi_v2hi);
8004 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8005 v2si_ftype_v4qi_v2hi);
8006
8007 /* Data aligning. */
8008 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8009 v4hi_ftype_v4hi_v4hi);
8010 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8011 v8qi_ftype_v8qi_v8qi);
8012 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8013 v2si_ftype_v2si_v2si);
8014 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8015 di_ftype_di_di);
8016 if (TARGET_ARCH64)
8017 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8018 ptr_ftype_ptr_di);
8019 else
8020 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8021 ptr_ftype_ptr_si);
8022
8023 /* Pixel distance. */
8024 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8025 di_ftype_v8qi_v8qi_di);
8026 }
8027
8028 /* Handle TARGET_EXPAND_BUILTIN target hook.
8029 Expand builtin functions for sparc intrinsics. */
8030
8031 static rtx
8032 sparc_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8033 enum machine_mode tmode, int ignore ATTRIBUTE_UNUSED)
8034 {
8035 tree arg;
8036 call_expr_arg_iterator iter;
8037 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8038 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8039 rtx pat, op[4];
8040 enum machine_mode mode[4];
8041 int arg_count = 0;
8042
8043 mode[arg_count] = tmode;
8044
8045 if (target == 0
8046 || GET_MODE (target) != tmode
8047 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8048 op[arg_count] = gen_reg_rtx (tmode);
8049 else
8050 op[arg_count] = target;
8051
8052 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8053 {
8054 arg_count++;
8055 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8056 op[arg_count] = expand_normal (arg);
8057
8058 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8059 mode[arg_count]))
8060 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8061 }
8062
8063 switch (arg_count)
8064 {
8065 case 1:
8066 pat = GEN_FCN (icode) (op[0], op[1]);
8067 break;
8068 case 2:
8069 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8070 break;
8071 case 3:
8072 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8073 break;
8074 default:
8075 gcc_unreachable ();
8076 }
8077
8078 if (!pat)
8079 return NULL_RTX;
8080
8081 emit_insn (pat);
8082
8083 return op[0];
8084 }
8085
8086 static int
8087 sparc_vis_mul8x16 (int e8, int e16)
8088 {
8089 return (e8 * e16 + 128) / 256;
8090 }
8091
8092 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8093 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8094 constants. A tree list with the results of the multiplications is returned,
8095 and each element in the list is of INNER_TYPE. */
8096
8097 static tree
8098 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8099 {
8100 tree n_elts = NULL_TREE;
8101 int scale;
8102
8103 switch (fncode)
8104 {
8105 case CODE_FOR_fmul8x16_vis:
8106 for (; elts0 && elts1;
8107 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8108 {
8109 int val
8110 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8111 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8112 n_elts = tree_cons (NULL_TREE,
8113 build_int_cst (inner_type, val),
8114 n_elts);
8115 }
8116 break;
8117
8118 case CODE_FOR_fmul8x16au_vis:
8119 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8120
8121 for (; elts0; elts0 = TREE_CHAIN (elts0))
8122 {
8123 int val
8124 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8125 scale);
8126 n_elts = tree_cons (NULL_TREE,
8127 build_int_cst (inner_type, val),
8128 n_elts);
8129 }
8130 break;
8131
8132 case CODE_FOR_fmul8x16al_vis:
8133 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8134
8135 for (; elts0; elts0 = TREE_CHAIN (elts0))
8136 {
8137 int val
8138 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8139 scale);
8140 n_elts = tree_cons (NULL_TREE,
8141 build_int_cst (inner_type, val),
8142 n_elts);
8143 }
8144 break;
8145
8146 default:
8147 gcc_unreachable ();
8148 }
8149
8150 return nreverse (n_elts);
8151
8152 }
8153 /* Handle TARGET_FOLD_BUILTIN target hook.
8154 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8155 result of the function call is ignored. NULL_TREE is returned if the
8156 function could not be folded. */
8157
8158 static tree
8159 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8160 {
8161 tree arg0, arg1, arg2;
8162 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8163
8164
8165 if (ignore && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8166 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8167 return build_int_cst (rtype, 0);
8168
8169 switch (DECL_FUNCTION_CODE (fndecl))
8170 {
8171 case CODE_FOR_fexpand_vis:
8172 arg0 = TREE_VALUE (arglist);
8173 STRIP_NOPS (arg0);
8174
8175 if (TREE_CODE (arg0) == VECTOR_CST)
8176 {
8177 tree inner_type = TREE_TYPE (rtype);
8178 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8179 tree n_elts = NULL_TREE;
8180
8181 for (; elts; elts = TREE_CHAIN (elts))
8182 {
8183 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8184 n_elts = tree_cons (NULL_TREE,
8185 build_int_cst (inner_type, val),
8186 n_elts);
8187 }
8188 return build_vector (rtype, nreverse (n_elts));
8189 }
8190 break;
8191
8192 case CODE_FOR_fmul8x16_vis:
8193 case CODE_FOR_fmul8x16au_vis:
8194 case CODE_FOR_fmul8x16al_vis:
8195 arg0 = TREE_VALUE (arglist);
8196 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8197 STRIP_NOPS (arg0);
8198 STRIP_NOPS (arg1);
8199
8200 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8201 {
8202 tree inner_type = TREE_TYPE (rtype);
8203 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8204 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8205 tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8206 inner_type, elts0, elts1);
8207
8208 return build_vector (rtype, n_elts);
8209 }
8210 break;
8211
8212 case CODE_FOR_fpmerge_vis:
8213 arg0 = TREE_VALUE (arglist);
8214 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8215 STRIP_NOPS (arg0);
8216 STRIP_NOPS (arg1);
8217
8218 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8219 {
8220 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8221 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8222 tree n_elts = NULL_TREE;
8223
8224 for (; elts0 && elts1;
8225 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8226 {
8227 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8228 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8229 }
8230
8231 return build_vector (rtype, nreverse (n_elts));
8232 }
8233 break;
8234
8235 case CODE_FOR_pdist_vis:
8236 arg0 = TREE_VALUE (arglist);
8237 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8238 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8239 STRIP_NOPS (arg0);
8240 STRIP_NOPS (arg1);
8241 STRIP_NOPS (arg2);
8242
8243 if (TREE_CODE (arg0) == VECTOR_CST
8244 && TREE_CODE (arg1) == VECTOR_CST
8245 && TREE_CODE (arg2) == INTEGER_CST)
8246 {
8247 int overflow = 0;
8248 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8249 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8250 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8251 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8252
8253 for (; elts0 && elts1;
8254 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8255 {
8256 unsigned HOST_WIDE_INT
8257 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8258 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8259 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8260 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8261
8262 unsigned HOST_WIDE_INT l;
8263 HOST_WIDE_INT h;
8264
8265 overflow |= neg_double (low1, high1, &l, &h);
8266 overflow |= add_double (low0, high0, l, h, &l, &h);
8267 if (h < 0)
8268 overflow |= neg_double (l, h, &l, &h);
8269
8270 overflow |= add_double (low, high, l, h, &low, &high);
8271 }
8272
8273 gcc_assert (overflow == 0);
8274
8275 return build_int_cst_wide (rtype, low, high);
8276 }
8277
8278 default:
8279 break;
8280 }
8281 return NULL_TREE;
8282 }
8283 \f
8284 int
8285 sparc_extra_constraint_check (rtx op, int c, int strict)
8286 {
8287 int reload_ok_mem;
8288
8289 if (TARGET_ARCH64
8290 && (c == 'T' || c == 'U'))
8291 return 0;
8292
8293 switch (c)
8294 {
8295 case 'Q':
8296 return fp_sethi_p (op);
8297
8298 case 'R':
8299 return fp_mov_p (op);
8300
8301 case 'S':
8302 return fp_high_losum_p (op);
8303
8304 case 'U':
8305 if (! strict
8306 || (GET_CODE (op) == REG
8307 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8308 || reg_renumber[REGNO (op)] >= 0)))
8309 return register_ok_for_ldd (op);
8310
8311 return 0;
8312
8313 case 'W':
8314 case 'T':
8315 break;
8316
8317 case 'Y':
8318 return const_zero_operand (op, GET_MODE (op));
8319
8320 default:
8321 return 0;
8322 }
8323
8324 /* Our memory extra constraints have to emulate the
8325 behavior of 'm' and 'o' in order for reload to work
8326 correctly. */
8327 if (GET_CODE (op) == MEM)
8328 {
8329 reload_ok_mem = 0;
8330 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8331 && (! strict
8332 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8333 reload_ok_mem = 1;
8334 }
8335 else
8336 {
8337 reload_ok_mem = (reload_in_progress
8338 && GET_CODE (op) == REG
8339 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8340 && reg_renumber [REGNO (op)] < 0);
8341 }
8342
8343 return reload_ok_mem;
8344 }
8345
8346 /* ??? This duplicates information provided to the compiler by the
8347 ??? scheduler description. Some day, teach genautomata to output
8348 ??? the latencies and then CSE will just use that. */
8349
8350 static bool
8351 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8352 {
8353 enum machine_mode mode = GET_MODE (x);
8354 bool float_mode_p = FLOAT_MODE_P (mode);
8355
8356 switch (code)
8357 {
8358 case CONST_INT:
8359 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8360 {
8361 *total = 0;
8362 return true;
8363 }
8364 /* FALLTHRU */
8365
8366 case HIGH:
8367 *total = 2;
8368 return true;
8369
8370 case CONST:
8371 case LABEL_REF:
8372 case SYMBOL_REF:
8373 *total = 4;
8374 return true;
8375
8376 case CONST_DOUBLE:
8377 if (GET_MODE (x) == VOIDmode
8378 && ((CONST_DOUBLE_HIGH (x) == 0
8379 && CONST_DOUBLE_LOW (x) < 0x1000)
8380 || (CONST_DOUBLE_HIGH (x) == -1
8381 && CONST_DOUBLE_LOW (x) < 0
8382 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8383 *total = 0;
8384 else
8385 *total = 8;
8386 return true;
8387
8388 case MEM:
8389 /* If outer-code was a sign or zero extension, a cost
8390 of COSTS_N_INSNS (1) was already added in. This is
8391 why we are subtracting it back out. */
8392 if (outer_code == ZERO_EXTEND)
8393 {
8394 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8395 }
8396 else if (outer_code == SIGN_EXTEND)
8397 {
8398 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8399 }
8400 else if (float_mode_p)
8401 {
8402 *total = sparc_costs->float_load;
8403 }
8404 else
8405 {
8406 *total = sparc_costs->int_load;
8407 }
8408
8409 return true;
8410
8411 case PLUS:
8412 case MINUS:
8413 if (float_mode_p)
8414 *total = sparc_costs->float_plusminus;
8415 else
8416 *total = COSTS_N_INSNS (1);
8417 return false;
8418
8419 case MULT:
8420 if (float_mode_p)
8421 *total = sparc_costs->float_mul;
8422 else if (! TARGET_HARD_MUL)
8423 *total = COSTS_N_INSNS (25);
8424 else
8425 {
8426 int bit_cost;
8427
8428 bit_cost = 0;
8429 if (sparc_costs->int_mul_bit_factor)
8430 {
8431 int nbits;
8432
8433 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8434 {
8435 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8436 for (nbits = 0; value != 0; value &= value - 1)
8437 nbits++;
8438 }
8439 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8440 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8441 {
8442 rtx x1 = XEXP (x, 1);
8443 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8444 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8445
8446 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8447 nbits++;
8448 for (; value2 != 0; value2 &= value2 - 1)
8449 nbits++;
8450 }
8451 else
8452 nbits = 7;
8453
8454 if (nbits < 3)
8455 nbits = 3;
8456 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8457 bit_cost = COSTS_N_INSNS (bit_cost);
8458 }
8459
8460 if (mode == DImode)
8461 *total = sparc_costs->int_mulX + bit_cost;
8462 else
8463 *total = sparc_costs->int_mul + bit_cost;
8464 }
8465 return false;
8466
8467 case ASHIFT:
8468 case ASHIFTRT:
8469 case LSHIFTRT:
8470 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8471 return false;
8472
8473 case DIV:
8474 case UDIV:
8475 case MOD:
8476 case UMOD:
8477 if (float_mode_p)
8478 {
8479 if (mode == DFmode)
8480 *total = sparc_costs->float_div_df;
8481 else
8482 *total = sparc_costs->float_div_sf;
8483 }
8484 else
8485 {
8486 if (mode == DImode)
8487 *total = sparc_costs->int_divX;
8488 else
8489 *total = sparc_costs->int_div;
8490 }
8491 return false;
8492
8493 case NEG:
8494 if (! float_mode_p)
8495 {
8496 *total = COSTS_N_INSNS (1);
8497 return false;
8498 }
8499 /* FALLTHRU */
8500
8501 case ABS:
8502 case FLOAT:
8503 case UNSIGNED_FLOAT:
8504 case FIX:
8505 case UNSIGNED_FIX:
8506 case FLOAT_EXTEND:
8507 case FLOAT_TRUNCATE:
8508 *total = sparc_costs->float_move;
8509 return false;
8510
8511 case SQRT:
8512 if (mode == DFmode)
8513 *total = sparc_costs->float_sqrt_df;
8514 else
8515 *total = sparc_costs->float_sqrt_sf;
8516 return false;
8517
8518 case COMPARE:
8519 if (float_mode_p)
8520 *total = sparc_costs->float_cmp;
8521 else
8522 *total = COSTS_N_INSNS (1);
8523 return false;
8524
8525 case IF_THEN_ELSE:
8526 if (float_mode_p)
8527 *total = sparc_costs->float_cmove;
8528 else
8529 *total = sparc_costs->int_cmove;
8530 return false;
8531
8532 case IOR:
8533 /* Handle the NAND vector patterns. */
8534 if (sparc_vector_mode_supported_p (GET_MODE (x))
8535 && GET_CODE (XEXP (x, 0)) == NOT
8536 && GET_CODE (XEXP (x, 1)) == NOT)
8537 {
8538 *total = COSTS_N_INSNS (1);
8539 return true;
8540 }
8541 else
8542 return false;
8543
8544 default:
8545 return false;
8546 }
8547 }
8548
8549 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8550 This is achieved by means of a manual dynamic stack space allocation in
8551 the current frame. We make the assumption that SEQ doesn't contain any
8552 function calls, with the possible exception of calls to the PIC helper. */
8553
8554 static void
8555 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8556 {
8557 /* We must preserve the lowest 16 words for the register save area. */
8558 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8559 /* We really need only 2 words of fresh stack space. */
8560 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8561
8562 rtx slot
8563 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8564 SPARC_STACK_BIAS + offset));
8565
8566 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8567 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8568 if (reg2)
8569 emit_insn (gen_rtx_SET (VOIDmode,
8570 adjust_address (slot, word_mode, UNITS_PER_WORD),
8571 reg2));
8572 emit_insn (seq);
8573 if (reg2)
8574 emit_insn (gen_rtx_SET (VOIDmode,
8575 reg2,
8576 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8577 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8578 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8579 }
8580
8581 /* Output the assembler code for a thunk function. THUNK_DECL is the
8582 declaration for the thunk function itself, FUNCTION is the decl for
8583 the target function. DELTA is an immediate constant offset to be
8584 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8585 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8586
8587 static void
8588 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8589 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8590 tree function)
8591 {
8592 rtx this, insn, funexp;
8593 unsigned int int_arg_first;
8594
8595 reload_completed = 1;
8596 epilogue_completed = 1;
8597 no_new_pseudos = 1;
8598
8599 emit_note (NOTE_INSN_PROLOGUE_END);
8600
8601 if (flag_delayed_branch)
8602 {
8603 /* We will emit a regular sibcall below, so we need to instruct
8604 output_sibcall that we are in a leaf function. */
8605 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8606
8607 /* This will cause final.c to invoke leaf_renumber_regs so we
8608 must behave as if we were in a not-yet-leafified function. */
8609 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8610 }
8611 else
8612 {
8613 /* We will emit the sibcall manually below, so we will need to
8614 manually spill non-leaf registers. */
8615 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8616
8617 /* We really are in a leaf function. */
8618 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8619 }
8620
8621 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8622 returns a structure, the structure return pointer is there instead. */
8623 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8624 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8625 else
8626 this = gen_rtx_REG (Pmode, int_arg_first);
8627
8628 /* Add DELTA. When possible use a plain add, otherwise load it into
8629 a register first. */
8630 if (delta)
8631 {
8632 rtx delta_rtx = GEN_INT (delta);
8633
8634 if (! SPARC_SIMM13_P (delta))
8635 {
8636 rtx scratch = gen_rtx_REG (Pmode, 1);
8637 emit_move_insn (scratch, delta_rtx);
8638 delta_rtx = scratch;
8639 }
8640
8641 /* THIS += DELTA. */
8642 emit_insn (gen_add2_insn (this, delta_rtx));
8643 }
8644
8645 /* Add the word at address (*THIS + VCALL_OFFSET). */
8646 if (vcall_offset)
8647 {
8648 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8649 rtx scratch = gen_rtx_REG (Pmode, 1);
8650
8651 gcc_assert (vcall_offset < 0);
8652
8653 /* SCRATCH = *THIS. */
8654 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8655
8656 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8657 may not have any available scratch register at this point. */
8658 if (SPARC_SIMM13_P (vcall_offset))
8659 ;
8660 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8661 else if (! fixed_regs[5]
8662 /* The below sequence is made up of at least 2 insns,
8663 while the default method may need only one. */
8664 && vcall_offset < -8192)
8665 {
8666 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8667 emit_move_insn (scratch2, vcall_offset_rtx);
8668 vcall_offset_rtx = scratch2;
8669 }
8670 else
8671 {
8672 rtx increment = GEN_INT (-4096);
8673
8674 /* VCALL_OFFSET is a negative number whose typical range can be
8675 estimated as -32768..0 in 32-bit mode. In almost all cases
8676 it is therefore cheaper to emit multiple add insns than
8677 spilling and loading the constant into a register (at least
8678 6 insns). */
8679 while (! SPARC_SIMM13_P (vcall_offset))
8680 {
8681 emit_insn (gen_add2_insn (scratch, increment));
8682 vcall_offset += 4096;
8683 }
8684 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8685 }
8686
8687 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8688 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8689 gen_rtx_PLUS (Pmode,
8690 scratch,
8691 vcall_offset_rtx)));
8692
8693 /* THIS += *(*THIS + VCALL_OFFSET). */
8694 emit_insn (gen_add2_insn (this, scratch));
8695 }
8696
8697 /* Generate a tail call to the target function. */
8698 if (! TREE_USED (function))
8699 {
8700 assemble_external (function);
8701 TREE_USED (function) = 1;
8702 }
8703 funexp = XEXP (DECL_RTL (function), 0);
8704
8705 if (flag_delayed_branch)
8706 {
8707 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8708 insn = emit_call_insn (gen_sibcall (funexp));
8709 SIBLING_CALL_P (insn) = 1;
8710 }
8711 else
8712 {
8713 /* The hoops we have to jump through in order to generate a sibcall
8714 without using delay slots... */
8715 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8716
8717 if (flag_pic)
8718 {
8719 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8720 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8721 start_sequence ();
8722 /* Delay emitting the PIC helper function because it needs to
8723 change the section and we are emitting assembly code. */
8724 load_pic_register (true); /* clobbers %o7 */
8725 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8726 seq = get_insns ();
8727 end_sequence ();
8728 emit_and_preserve (seq, spill_reg, spill_reg2);
8729 }
8730 else if (TARGET_ARCH32)
8731 {
8732 emit_insn (gen_rtx_SET (VOIDmode,
8733 scratch,
8734 gen_rtx_HIGH (SImode, funexp)));
8735 emit_insn (gen_rtx_SET (VOIDmode,
8736 scratch,
8737 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8738 }
8739 else /* TARGET_ARCH64 */
8740 {
8741 switch (sparc_cmodel)
8742 {
8743 case CM_MEDLOW:
8744 case CM_MEDMID:
8745 /* The destination can serve as a temporary. */
8746 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8747 break;
8748
8749 case CM_MEDANY:
8750 case CM_EMBMEDANY:
8751 /* The destination cannot serve as a temporary. */
8752 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8753 start_sequence ();
8754 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8755 seq = get_insns ();
8756 end_sequence ();
8757 emit_and_preserve (seq, spill_reg, 0);
8758 break;
8759
8760 default:
8761 gcc_unreachable ();
8762 }
8763 }
8764
8765 emit_jump_insn (gen_indirect_jump (scratch));
8766 }
8767
8768 emit_barrier ();
8769
8770 /* Run just enough of rest_of_compilation to get the insns emitted.
8771 There's not really enough bulk here to make other passes such as
8772 instruction scheduling worth while. Note that use_thunk calls
8773 assemble_start_function and assemble_end_function. */
8774 insn = get_insns ();
8775 insn_locators_alloc ();
8776 shorten_branches (insn);
8777 final_start_function (insn, file, 1);
8778 final (insn, file, 1);
8779 final_end_function ();
8780
8781 reload_completed = 0;
8782 epilogue_completed = 0;
8783 no_new_pseudos = 0;
8784 }
8785
8786 /* Return true if sparc_output_mi_thunk would be able to output the
8787 assembler code for the thunk function specified by the arguments
8788 it is passed, and false otherwise. */
8789 static bool
8790 sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8791 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8792 HOST_WIDE_INT vcall_offset,
8793 tree function ATTRIBUTE_UNUSED)
8794 {
8795 /* Bound the loop used in the default method above. */
8796 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8797 }
8798
8799 /* How to allocate a 'struct machine_function'. */
8800
8801 static struct machine_function *
8802 sparc_init_machine_status (void)
8803 {
8804 return ggc_alloc_cleared (sizeof (struct machine_function));
8805 }
8806
8807 /* Locate some local-dynamic symbol still in use by this function
8808 so that we can print its name in local-dynamic base patterns. */
8809
8810 static const char *
8811 get_some_local_dynamic_name (void)
8812 {
8813 rtx insn;
8814
8815 if (cfun->machine->some_ld_name)
8816 return cfun->machine->some_ld_name;
8817
8818 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8819 if (INSN_P (insn)
8820 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8821 return cfun->machine->some_ld_name;
8822
8823 gcc_unreachable ();
8824 }
8825
8826 static int
8827 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8828 {
8829 rtx x = *px;
8830
8831 if (x
8832 && GET_CODE (x) == SYMBOL_REF
8833 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8834 {
8835 cfun->machine->some_ld_name = XSTR (x, 0);
8836 return 1;
8837 }
8838
8839 return 0;
8840 }
8841
8842 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8843 This is called from dwarf2out.c to emit call frame instructions
8844 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8845 static void
8846 sparc_dwarf_handle_frame_unspec (const char *label,
8847 rtx pattern ATTRIBUTE_UNUSED,
8848 int index ATTRIBUTE_UNUSED)
8849 {
8850 gcc_assert (index == UNSPECV_SAVEW);
8851 dwarf2out_window_save (label);
8852 }
8853
8854 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8855 We need to emit DTP-relative relocations. */
8856
8857 static void
8858 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8859 {
8860 switch (size)
8861 {
8862 case 4:
8863 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8864 break;
8865 case 8:
8866 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8867 break;
8868 default:
8869 gcc_unreachable ();
8870 }
8871 output_addr_const (file, x);
8872 fputs (")", file);
8873 }
8874
8875 /* Do whatever processing is required at the end of a file. */
8876
8877 static void
8878 sparc_file_end (void)
8879 {
8880 /* If we haven't emitted the special PIC helper function, do so now. */
8881 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8882 emit_pic_helper ();
8883
8884 if (NEED_INDICATE_EXEC_STACK)
8885 file_end_indicate_exec_stack ();
8886 }
8887
8888 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8889 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
8890
8891 static const char *
8892 sparc_mangle_fundamental_type (tree type)
8893 {
8894 if (!TARGET_64BIT
8895 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8896 && TARGET_LONG_DOUBLE_128)
8897 return "g";
8898
8899 /* For all other types, use normal C++ mangling. */
8900 return NULL;
8901 }
8902 #endif
8903
8904 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8905 compare and swap on the word containing the byte or half-word. */
8906
8907 void
8908 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8909 {
8910 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8911 rtx addr = gen_reg_rtx (Pmode);
8912 rtx off = gen_reg_rtx (SImode);
8913 rtx oldv = gen_reg_rtx (SImode);
8914 rtx newv = gen_reg_rtx (SImode);
8915 rtx oldvalue = gen_reg_rtx (SImode);
8916 rtx newvalue = gen_reg_rtx (SImode);
8917 rtx res = gen_reg_rtx (SImode);
8918 rtx resv = gen_reg_rtx (SImode);
8919 rtx memsi, val, mask, end_label, loop_label, cc;
8920
8921 emit_insn (gen_rtx_SET (VOIDmode, addr,
8922 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8923
8924 if (Pmode != SImode)
8925 addr1 = gen_lowpart (SImode, addr1);
8926 emit_insn (gen_rtx_SET (VOIDmode, off,
8927 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8928
8929 memsi = gen_rtx_MEM (SImode, addr);
8930 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8931 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8932
8933 val = force_reg (SImode, memsi);
8934
8935 emit_insn (gen_rtx_SET (VOIDmode, off,
8936 gen_rtx_XOR (SImode, off,
8937 GEN_INT (GET_MODE (mem) == QImode
8938 ? 3 : 2))));
8939
8940 emit_insn (gen_rtx_SET (VOIDmode, off,
8941 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8942
8943 if (GET_MODE (mem) == QImode)
8944 mask = force_reg (SImode, GEN_INT (0xff));
8945 else
8946 mask = force_reg (SImode, GEN_INT (0xffff));
8947
8948 emit_insn (gen_rtx_SET (VOIDmode, mask,
8949 gen_rtx_ASHIFT (SImode, mask, off)));
8950
8951 emit_insn (gen_rtx_SET (VOIDmode, val,
8952 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8953 val)));
8954
8955 oldval = gen_lowpart (SImode, oldval);
8956 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8957 gen_rtx_ASHIFT (SImode, oldval, off)));
8958
8959 newval = gen_lowpart_common (SImode, newval);
8960 emit_insn (gen_rtx_SET (VOIDmode, newv,
8961 gen_rtx_ASHIFT (SImode, newval, off)));
8962
8963 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8964 gen_rtx_AND (SImode, oldv, mask)));
8965
8966 emit_insn (gen_rtx_SET (VOIDmode, newv,
8967 gen_rtx_AND (SImode, newv, mask)));
8968
8969 end_label = gen_label_rtx ();
8970 loop_label = gen_label_rtx ();
8971 emit_label (loop_label);
8972
8973 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
8974 gen_rtx_IOR (SImode, oldv, val)));
8975
8976 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
8977 gen_rtx_IOR (SImode, newv, val)));
8978
8979 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
8980
8981 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
8982
8983 emit_insn (gen_rtx_SET (VOIDmode, resv,
8984 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8985 res)));
8986
8987 sparc_compare_op0 = resv;
8988 sparc_compare_op1 = val;
8989 cc = gen_compare_reg (NE);
8990
8991 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
8992
8993 sparc_compare_emitted = cc;
8994 emit_jump_insn (gen_bne (loop_label));
8995
8996 emit_label (end_label);
8997
8998 emit_insn (gen_rtx_SET (VOIDmode, res,
8999 gen_rtx_AND (SImode, res, mask)));
9000
9001 emit_insn (gen_rtx_SET (VOIDmode, res,
9002 gen_rtx_LSHIFTRT (SImode, res, off)));
9003
9004 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9005 }
9006
9007 #include "gt-sparc.h"