]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/sparc/sparc.c
./:
[thirdparty/gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "tm_p.h"
47 #include "debug.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
51 #include "gimple.h"
52 #include "langhooks.h"
53 #include "params.h"
54 #include "df.h"
55
56 /* Processor costs */
57 static const
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
79 };
80
81 static const
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
103 };
104
105 static const
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
127 };
128
129 static const
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
151 };
152
153 static const
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
175 };
176
177 static const
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
199 };
200
201 static const
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
223 };
224
225 static const
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
247 };
248
249 const struct processor_costs *sparc_costs = &cypress_costs;
250
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
257 #else
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
260 #endif
261
262 /* Global variables for machine-dependent things. */
263
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
272
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
276
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
279
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
282
283 /* Save the operands last given to a compare for use when we
284 generate a scc or bcc insn. */
285 rtx sparc_compare_op0, sparc_compare_op1;
286
287 /* Vector to say how input registers are mapped to output registers.
288 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
289 eliminate it. You must use -fomit-frame-pointer to get that. */
290 char leaf_reg_remap[] =
291 { 0, 1, 2, 3, 4, 5, 6, 7,
292 -1, -1, -1, -1, -1, -1, 14, -1,
293 -1, -1, -1, -1, -1, -1, -1, -1,
294 8, 9, 10, 11, 12, 13, -1, 15,
295
296 32, 33, 34, 35, 36, 37, 38, 39,
297 40, 41, 42, 43, 44, 45, 46, 47,
298 48, 49, 50, 51, 52, 53, 54, 55,
299 56, 57, 58, 59, 60, 61, 62, 63,
300 64, 65, 66, 67, 68, 69, 70, 71,
301 72, 73, 74, 75, 76, 77, 78, 79,
302 80, 81, 82, 83, 84, 85, 86, 87,
303 88, 89, 90, 91, 92, 93, 94, 95,
304 96, 97, 98, 99, 100};
305
306 /* Vector, indexed by hard register number, which contains 1
307 for a register that is allowable in a candidate for leaf
308 function treatment. */
309 char sparc_leaf_regs[] =
310 { 1, 1, 1, 1, 1, 1, 1, 1,
311 0, 0, 0, 0, 0, 0, 1, 0,
312 0, 0, 0, 0, 0, 0, 0, 0,
313 1, 1, 1, 1, 1, 1, 0, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
320 1, 1, 1, 1, 1, 1, 1, 1,
321 1, 1, 1, 1, 1, 1, 1, 1,
322 1, 1, 1, 1, 1};
323
324 struct GTY(()) machine_function
325 {
326 /* Some local-dynamic TLS symbol name. */
327 const char *some_ld_name;
328
329 /* True if the current function is leaf and uses only leaf regs,
330 so that the SPARC leaf function optimization can be applied.
331 Private version of current_function_uses_only_leaf_regs, see
332 sparc_expand_prologue for the rationale. */
333 int leaf_function_p;
334
335 /* True if the data calculated by sparc_expand_prologue are valid. */
336 bool prologue_data_valid_p;
337 };
338
339 #define sparc_leaf_function_p cfun->machine->leaf_function_p
340 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
341
342 /* Register we pretend to think the frame pointer is allocated to.
343 Normally, this is %fp, but if we are in a leaf procedure, this
344 is %sp+"something". We record "something" separately as it may
345 be too big for reg+constant addressing. */
346 static rtx frame_base_reg;
347 static HOST_WIDE_INT frame_base_offset;
348
349 /* 1 if the next opcode is to be specially indented. */
350 int sparc_indent_opcode = 0;
351
352 static bool sparc_handle_option (size_t, const char *, int);
353 static void sparc_init_modes (void);
354 static void scan_record_type (tree, int *, int *, int *);
355 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
356 tree, int, int, int *, int *);
357
358 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
359 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
360
361 static void sparc_output_addr_vec (rtx);
362 static void sparc_output_addr_diff_vec (rtx);
363 static void sparc_output_deferred_case_vectors (void);
364 static rtx sparc_builtin_saveregs (void);
365 static int epilogue_renumber (rtx *, int);
366 static bool sparc_assemble_integer (rtx, unsigned int, int);
367 static int set_extends (rtx);
368 static void emit_pic_helper (void);
369 static void load_pic_register (bool);
370 static int save_or_restore_regs (int, int, rtx, int, int);
371 static void emit_save_or_restore_regs (int);
372 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
373 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
374 #ifdef OBJECT_FORMAT_ELF
375 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
376 #endif
377
378 static int sparc_adjust_cost (rtx, rtx, rtx, int);
379 static int sparc_issue_rate (void);
380 static void sparc_sched_init (FILE *, int, int);
381 static int sparc_use_sched_lookahead (void);
382
383 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
384 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
385 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
386 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
387 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
388
389 static bool sparc_function_ok_for_sibcall (tree, tree);
390 static void sparc_init_libfuncs (void);
391 static void sparc_init_builtins (void);
392 static void sparc_vis_init_builtins (void);
393 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
394 static tree sparc_fold_builtin (tree, tree, bool);
395 static int sparc_vis_mul8x16 (int, int);
396 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
397 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
398 HOST_WIDE_INT, tree);
399 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
400 HOST_WIDE_INT, const_tree);
401 static struct machine_function * sparc_init_machine_status (void);
402 static bool sparc_cannot_force_const_mem (rtx);
403 static rtx sparc_tls_get_addr (void);
404 static rtx sparc_tls_got (void);
405 static const char *get_some_local_dynamic_name (void);
406 static int get_some_local_dynamic_name_1 (rtx *, void *);
407 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
408 static bool sparc_promote_prototypes (const_tree);
409 static rtx sparc_struct_value_rtx (tree, int);
410 static bool sparc_return_in_memory (const_tree, const_tree);
411 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
412 static void sparc_va_start (tree, rtx);
413 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
414 static bool sparc_vector_mode_supported_p (enum machine_mode);
415 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
416 enum machine_mode, const_tree, bool);
417 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
418 enum machine_mode, tree, bool);
419 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
420 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
421 static void sparc_file_end (void);
422 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
423 static const char *sparc_mangle_type (const_tree);
424 #endif
425 #ifdef SUBTARGET_ATTRIBUTE_TABLE
426 const struct attribute_spec sparc_attribute_table[];
427 #endif
428 \f
429 /* Option handling. */
430
431 /* Parsed value. */
432 enum cmodel sparc_cmodel;
433
434 char sparc_hard_reg_printed[8];
435
436 struct sparc_cpu_select sparc_select[] =
437 {
438 /* switch name, tune arch */
439 { (char *)0, "default", 1, 1 },
440 { (char *)0, "-mcpu=", 1, 1 },
441 { (char *)0, "-mtune=", 1, 0 },
442 { 0, 0, 0, 0 }
443 };
444
445 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
446 enum processor_type sparc_cpu;
447
448 /* Whether\fan FPU option was specified. */
449 static bool fpu_option_set = false;
450
451 /* Initialize the GCC target structure. */
452
453 /* The sparc default is to use .half rather than .short for aligned
454 HI objects. Use .word instead of .long on non-ELF systems. */
455 #undef TARGET_ASM_ALIGNED_HI_OP
456 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
457 #ifndef OBJECT_FORMAT_ELF
458 #undef TARGET_ASM_ALIGNED_SI_OP
459 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
460 #endif
461
462 #undef TARGET_ASM_UNALIGNED_HI_OP
463 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
464 #undef TARGET_ASM_UNALIGNED_SI_OP
465 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
466 #undef TARGET_ASM_UNALIGNED_DI_OP
467 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
468
469 /* The target hook has to handle DI-mode values. */
470 #undef TARGET_ASM_INTEGER
471 #define TARGET_ASM_INTEGER sparc_assemble_integer
472
473 #undef TARGET_ASM_FUNCTION_PROLOGUE
474 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
475 #undef TARGET_ASM_FUNCTION_EPILOGUE
476 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
477
478 #undef TARGET_SCHED_ADJUST_COST
479 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
480 #undef TARGET_SCHED_ISSUE_RATE
481 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
482 #undef TARGET_SCHED_INIT
483 #define TARGET_SCHED_INIT sparc_sched_init
484 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
485 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
486
487 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
488 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
489
490 #undef TARGET_INIT_LIBFUNCS
491 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
492 #undef TARGET_INIT_BUILTINS
493 #define TARGET_INIT_BUILTINS sparc_init_builtins
494
495 #undef TARGET_EXPAND_BUILTIN
496 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
497 #undef TARGET_FOLD_BUILTIN
498 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
499
500 #if TARGET_TLS
501 #undef TARGET_HAVE_TLS
502 #define TARGET_HAVE_TLS true
503 #endif
504
505 #undef TARGET_CANNOT_FORCE_CONST_MEM
506 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
507
508 #undef TARGET_ASM_OUTPUT_MI_THUNK
509 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
510 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
511 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
512
513 #undef TARGET_RTX_COSTS
514 #define TARGET_RTX_COSTS sparc_rtx_costs
515 #undef TARGET_ADDRESS_COST
516 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
517
518 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
519 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
520 test for this value. */
521 #undef TARGET_PROMOTE_FUNCTION_ARGS
522 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
523
524 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
525 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
526 test for this value. */
527 #undef TARGET_PROMOTE_FUNCTION_RETURN
528 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
529
530 #undef TARGET_PROMOTE_PROTOTYPES
531 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
532
533 #undef TARGET_STRUCT_VALUE_RTX
534 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
535 #undef TARGET_RETURN_IN_MEMORY
536 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
537 #undef TARGET_MUST_PASS_IN_STACK
538 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
539 #undef TARGET_PASS_BY_REFERENCE
540 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
541 #undef TARGET_ARG_PARTIAL_BYTES
542 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
543
544 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
545 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
546 #undef TARGET_STRICT_ARGUMENT_NAMING
547 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
548
549 #undef TARGET_EXPAND_BUILTIN_VA_START
550 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
551 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
552 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
553
554 #undef TARGET_VECTOR_MODE_SUPPORTED_P
555 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
556
557 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
558 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
559
560 #ifdef SUBTARGET_INSERT_ATTRIBUTES
561 #undef TARGET_INSERT_ATTRIBUTES
562 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
563 #endif
564
565 #ifdef SUBTARGET_ATTRIBUTE_TABLE
566 #undef TARGET_ATTRIBUTE_TABLE
567 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
568 #endif
569
570 #undef TARGET_RELAXED_ORDERING
571 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
572
573 #undef TARGET_DEFAULT_TARGET_FLAGS
574 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
575 #undef TARGET_HANDLE_OPTION
576 #define TARGET_HANDLE_OPTION sparc_handle_option
577
578 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
579 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
580 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
581 #endif
582
583 #undef TARGET_ASM_FILE_END
584 #define TARGET_ASM_FILE_END sparc_file_end
585
586 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
587 #undef TARGET_MANGLE_TYPE
588 #define TARGET_MANGLE_TYPE sparc_mangle_type
589 #endif
590
591 struct gcc_target targetm = TARGET_INITIALIZER;
592
593 /* Implement TARGET_HANDLE_OPTION. */
594
595 static bool
596 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
597 {
598 switch (code)
599 {
600 case OPT_mfpu:
601 case OPT_mhard_float:
602 case OPT_msoft_float:
603 fpu_option_set = true;
604 break;
605
606 case OPT_mcpu_:
607 sparc_select[1].string = arg;
608 break;
609
610 case OPT_mtune_:
611 sparc_select[2].string = arg;
612 break;
613 }
614
615 return true;
616 }
617
618 /* Validate and override various options, and do some machine dependent
619 initialization. */
620
621 void
622 sparc_override_options (void)
623 {
624 static struct code_model {
625 const char *const name;
626 const enum cmodel value;
627 } const cmodels[] = {
628 { "32", CM_32 },
629 { "medlow", CM_MEDLOW },
630 { "medmid", CM_MEDMID },
631 { "medany", CM_MEDANY },
632 { "embmedany", CM_EMBMEDANY },
633 { NULL, (enum cmodel) 0 }
634 };
635 const struct code_model *cmodel;
636 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
637 static struct cpu_default {
638 const int cpu;
639 const char *const name;
640 } const cpu_default[] = {
641 /* There must be one entry here for each TARGET_CPU value. */
642 { TARGET_CPU_sparc, "cypress" },
643 { TARGET_CPU_sparclet, "tsc701" },
644 { TARGET_CPU_sparclite, "f930" },
645 { TARGET_CPU_v8, "v8" },
646 { TARGET_CPU_hypersparc, "hypersparc" },
647 { TARGET_CPU_sparclite86x, "sparclite86x" },
648 { TARGET_CPU_supersparc, "supersparc" },
649 { TARGET_CPU_v9, "v9" },
650 { TARGET_CPU_ultrasparc, "ultrasparc" },
651 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
652 { TARGET_CPU_niagara, "niagara" },
653 { TARGET_CPU_niagara2, "niagara2" },
654 { 0, 0 }
655 };
656 const struct cpu_default *def;
657 /* Table of values for -m{cpu,tune}=. */
658 static struct cpu_table {
659 const char *const name;
660 const enum processor_type processor;
661 const int disable;
662 const int enable;
663 } const cpu_table[] = {
664 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
665 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
666 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
667 /* TI TMS390Z55 supersparc */
668 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
669 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
670 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
671 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
672 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
673 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
674 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
675 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
676 MASK_SPARCLITE },
677 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
678 /* TEMIC sparclet */
679 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
680 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
681 /* TI ultrasparc I, II, IIi */
682 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
683 /* Although insns using %y are deprecated, it is a clear win on current
684 ultrasparcs. */
685 |MASK_DEPRECATED_V8_INSNS},
686 /* TI ultrasparc III */
687 /* ??? Check if %y issue still holds true in ultra3. */
688 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
689 /* UltraSPARC T1 */
690 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
691 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
692 { 0, (enum processor_type) 0, 0, 0 }
693 };
694 const struct cpu_table *cpu;
695 const struct sparc_cpu_select *sel;
696 int fpu;
697
698 #ifndef SPARC_BI_ARCH
699 /* Check for unsupported architecture size. */
700 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
701 error ("%s is not supported by this configuration",
702 DEFAULT_ARCH32_P ? "-m64" : "-m32");
703 #endif
704
705 /* We force all 64bit archs to use 128 bit long double */
706 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
707 {
708 error ("-mlong-double-64 not allowed with -m64");
709 target_flags |= MASK_LONG_DOUBLE_128;
710 }
711
712 /* Code model selection. */
713 sparc_cmodel = SPARC_DEFAULT_CMODEL;
714
715 #ifdef SPARC_BI_ARCH
716 if (TARGET_ARCH32)
717 sparc_cmodel = CM_32;
718 #endif
719
720 if (sparc_cmodel_string != NULL)
721 {
722 if (TARGET_ARCH64)
723 {
724 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
725 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
726 break;
727 if (cmodel->name == NULL)
728 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
729 else
730 sparc_cmodel = cmodel->value;
731 }
732 else
733 error ("-mcmodel= is not supported on 32 bit systems");
734 }
735
736 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
737
738 /* Set the default CPU. */
739 for (def = &cpu_default[0]; def->name; ++def)
740 if (def->cpu == TARGET_CPU_DEFAULT)
741 break;
742 gcc_assert (def->name);
743 sparc_select[0].string = def->name;
744
745 for (sel = &sparc_select[0]; sel->name; ++sel)
746 {
747 if (sel->string)
748 {
749 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
750 if (! strcmp (sel->string, cpu->name))
751 {
752 if (sel->set_tune_p)
753 sparc_cpu = cpu->processor;
754
755 if (sel->set_arch_p)
756 {
757 target_flags &= ~cpu->disable;
758 target_flags |= cpu->enable;
759 }
760 break;
761 }
762
763 if (! cpu->name)
764 error ("bad value (%s) for %s switch", sel->string, sel->name);
765 }
766 }
767
768 /* If -mfpu or -mno-fpu was explicitly used, don't override with
769 the processor default. */
770 if (fpu_option_set)
771 target_flags = (target_flags & ~MASK_FPU) | fpu;
772
773 /* Don't allow -mvis if FPU is disabled. */
774 if (! TARGET_FPU)
775 target_flags &= ~MASK_VIS;
776
777 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
778 are available.
779 -m64 also implies v9. */
780 if (TARGET_VIS || TARGET_ARCH64)
781 {
782 target_flags |= MASK_V9;
783 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
784 }
785
786 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
787 if (TARGET_V9 && TARGET_ARCH32)
788 target_flags |= MASK_DEPRECATED_V8_INSNS;
789
790 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
791 if (! TARGET_V9 || TARGET_ARCH64)
792 target_flags &= ~MASK_V8PLUS;
793
794 /* Don't use stack biasing in 32 bit mode. */
795 if (TARGET_ARCH32)
796 target_flags &= ~MASK_STACK_BIAS;
797
798 /* Supply a default value for align_functions. */
799 if (align_functions == 0
800 && (sparc_cpu == PROCESSOR_ULTRASPARC
801 || sparc_cpu == PROCESSOR_ULTRASPARC3
802 || sparc_cpu == PROCESSOR_NIAGARA
803 || sparc_cpu == PROCESSOR_NIAGARA2))
804 align_functions = 32;
805
806 /* Validate PCC_STRUCT_RETURN. */
807 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
808 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
809
810 /* Only use .uaxword when compiling for a 64-bit target. */
811 if (!TARGET_ARCH64)
812 targetm.asm_out.unaligned_op.di = NULL;
813
814 /* Do various machine dependent initializations. */
815 sparc_init_modes ();
816
817 /* Acquire unique alias sets for our private stuff. */
818 sparc_sr_alias_set = new_alias_set ();
819 struct_value_alias_set = new_alias_set ();
820
821 /* Set up function hooks. */
822 init_machine_status = sparc_init_machine_status;
823
824 switch (sparc_cpu)
825 {
826 case PROCESSOR_V7:
827 case PROCESSOR_CYPRESS:
828 sparc_costs = &cypress_costs;
829 break;
830 case PROCESSOR_V8:
831 case PROCESSOR_SPARCLITE:
832 case PROCESSOR_SUPERSPARC:
833 sparc_costs = &supersparc_costs;
834 break;
835 case PROCESSOR_F930:
836 case PROCESSOR_F934:
837 case PROCESSOR_HYPERSPARC:
838 case PROCESSOR_SPARCLITE86X:
839 sparc_costs = &hypersparc_costs;
840 break;
841 case PROCESSOR_SPARCLET:
842 case PROCESSOR_TSC701:
843 sparc_costs = &sparclet_costs;
844 break;
845 case PROCESSOR_V9:
846 case PROCESSOR_ULTRASPARC:
847 sparc_costs = &ultrasparc_costs;
848 break;
849 case PROCESSOR_ULTRASPARC3:
850 sparc_costs = &ultrasparc3_costs;
851 break;
852 case PROCESSOR_NIAGARA:
853 sparc_costs = &niagara_costs;
854 break;
855 case PROCESSOR_NIAGARA2:
856 sparc_costs = &niagara2_costs;
857 break;
858 };
859
860 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
861 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
862 target_flags |= MASK_LONG_DOUBLE_128;
863 #endif
864
865 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
866 set_param_value ("simultaneous-prefetches",
867 ((sparc_cpu == PROCESSOR_ULTRASPARC
868 || sparc_cpu == PROCESSOR_NIAGARA
869 || sparc_cpu == PROCESSOR_NIAGARA2)
870 ? 2
871 : (sparc_cpu == PROCESSOR_ULTRASPARC3
872 ? 8 : 3)));
873 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
874 set_param_value ("l1-cache-line-size",
875 ((sparc_cpu == PROCESSOR_ULTRASPARC
876 || sparc_cpu == PROCESSOR_ULTRASPARC3
877 || sparc_cpu == PROCESSOR_NIAGARA
878 || sparc_cpu == PROCESSOR_NIAGARA2)
879 ? 64 : 32));
880 }
881 \f
882 #ifdef SUBTARGET_ATTRIBUTE_TABLE
883 /* Table of valid machine attributes. */
884 const struct attribute_spec sparc_attribute_table[] =
885 {
886 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
887 SUBTARGET_ATTRIBUTE_TABLE,
888 { NULL, 0, 0, false, false, false, NULL }
889 };
890 #endif
891 \f
892 /* Miscellaneous utilities. */
893
894 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
895 or branch on register contents instructions. */
896
897 int
898 v9_regcmp_p (enum rtx_code code)
899 {
900 return (code == EQ || code == NE || code == GE || code == LT
901 || code == LE || code == GT);
902 }
903
904 /* Nonzero if OP is a floating point constant which can
905 be loaded into an integer register using a single
906 sethi instruction. */
907
908 int
909 fp_sethi_p (rtx op)
910 {
911 if (GET_CODE (op) == CONST_DOUBLE)
912 {
913 REAL_VALUE_TYPE r;
914 long i;
915
916 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
917 REAL_VALUE_TO_TARGET_SINGLE (r, i);
918 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
919 }
920
921 return 0;
922 }
923
924 /* Nonzero if OP is a floating point constant which can
925 be loaded into an integer register using a single
926 mov instruction. */
927
928 int
929 fp_mov_p (rtx op)
930 {
931 if (GET_CODE (op) == CONST_DOUBLE)
932 {
933 REAL_VALUE_TYPE r;
934 long i;
935
936 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
937 REAL_VALUE_TO_TARGET_SINGLE (r, i);
938 return SPARC_SIMM13_P (i);
939 }
940
941 return 0;
942 }
943
944 /* Nonzero if OP is a floating point constant which can
945 be loaded into an integer register using a high/losum
946 instruction sequence. */
947
948 int
949 fp_high_losum_p (rtx op)
950 {
951 /* The constraints calling this should only be in
952 SFmode move insns, so any constant which cannot
953 be moved using a single insn will do. */
954 if (GET_CODE (op) == CONST_DOUBLE)
955 {
956 REAL_VALUE_TYPE r;
957 long i;
958
959 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
960 REAL_VALUE_TO_TARGET_SINGLE (r, i);
961 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
962 }
963
964 return 0;
965 }
966
967 /* Expand a move instruction. Return true if all work is done. */
968
969 bool
970 sparc_expand_move (enum machine_mode mode, rtx *operands)
971 {
972 /* Handle sets of MEM first. */
973 if (GET_CODE (operands[0]) == MEM)
974 {
975 /* 0 is a register (or a pair of registers) on SPARC. */
976 if (register_or_zero_operand (operands[1], mode))
977 return false;
978
979 if (!reload_in_progress)
980 {
981 operands[0] = validize_mem (operands[0]);
982 operands[1] = force_reg (mode, operands[1]);
983 }
984 }
985
986 /* Fixup TLS cases. */
987 if (TARGET_HAVE_TLS
988 && CONSTANT_P (operands[1])
989 && GET_CODE (operands[1]) != HIGH
990 && sparc_tls_referenced_p (operands [1]))
991 {
992 rtx sym = operands[1];
993 rtx addend = NULL;
994
995 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
996 {
997 addend = XEXP (XEXP (sym, 0), 1);
998 sym = XEXP (XEXP (sym, 0), 0);
999 }
1000
1001 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1002
1003 sym = legitimize_tls_address (sym);
1004 if (addend)
1005 {
1006 sym = gen_rtx_PLUS (mode, sym, addend);
1007 sym = force_operand (sym, operands[0]);
1008 }
1009 operands[1] = sym;
1010 }
1011
1012 /* Fixup PIC cases. */
1013 if (flag_pic && CONSTANT_P (operands[1]))
1014 {
1015 if (pic_address_needs_scratch (operands[1]))
1016 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1017
1018 /* VxWorks does not impose a fixed gap between segments; the run-time
1019 gap can be different from the object-file gap. We therefore can't
1020 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1021 are absolutely sure that X is in the same segment as the GOT.
1022 Unfortunately, the flexibility of linker scripts means that we
1023 can't be sure of that in general, so assume that _G_O_T_-relative
1024 accesses are never valid on VxWorks. */
1025 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1026 {
1027 if (mode == SImode)
1028 {
1029 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1030 return true;
1031 }
1032
1033 if (mode == DImode)
1034 {
1035 gcc_assert (TARGET_ARCH64);
1036 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1037 return true;
1038 }
1039 }
1040
1041 if (symbolic_operand (operands[1], mode))
1042 {
1043 operands[1] = legitimize_pic_address (operands[1],
1044 mode,
1045 (reload_in_progress ?
1046 operands[0] :
1047 NULL_RTX));
1048 return false;
1049 }
1050 }
1051
1052 /* If we are trying to toss an integer constant into FP registers,
1053 or loading a FP or vector constant, force it into memory. */
1054 if (CONSTANT_P (operands[1])
1055 && REG_P (operands[0])
1056 && (SPARC_FP_REG_P (REGNO (operands[0]))
1057 || SCALAR_FLOAT_MODE_P (mode)
1058 || VECTOR_MODE_P (mode)))
1059 {
1060 /* emit_group_store will send such bogosity to us when it is
1061 not storing directly into memory. So fix this up to avoid
1062 crashes in output_constant_pool. */
1063 if (operands [1] == const0_rtx)
1064 operands[1] = CONST0_RTX (mode);
1065
1066 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1067 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1068 && const_zero_operand (operands[1], mode))
1069 return false;
1070
1071 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1072 /* We are able to build any SF constant in integer registers
1073 with at most 2 instructions. */
1074 && (mode == SFmode
1075 /* And any DF constant in integer registers. */
1076 || (mode == DFmode
1077 && (reload_completed || reload_in_progress))))
1078 return false;
1079
1080 operands[1] = force_const_mem (mode, operands[1]);
1081 if (!reload_in_progress)
1082 operands[1] = validize_mem (operands[1]);
1083 return false;
1084 }
1085
1086 /* Accept non-constants and valid constants unmodified. */
1087 if (!CONSTANT_P (operands[1])
1088 || GET_CODE (operands[1]) == HIGH
1089 || input_operand (operands[1], mode))
1090 return false;
1091
1092 switch (mode)
1093 {
1094 case QImode:
1095 /* All QImode constants require only one insn, so proceed. */
1096 break;
1097
1098 case HImode:
1099 case SImode:
1100 sparc_emit_set_const32 (operands[0], operands[1]);
1101 return true;
1102
1103 case DImode:
1104 /* input_operand should have filtered out 32-bit mode. */
1105 sparc_emit_set_const64 (operands[0], operands[1]);
1106 return true;
1107
1108 default:
1109 gcc_unreachable ();
1110 }
1111
1112 return false;
1113 }
1114
1115 /* Load OP1, a 32-bit constant, into OP0, a register.
1116 We know it can't be done in one insn when we get
1117 here, the move expander guarantees this. */
1118
1119 void
1120 sparc_emit_set_const32 (rtx op0, rtx op1)
1121 {
1122 enum machine_mode mode = GET_MODE (op0);
1123 rtx temp;
1124
1125 if (reload_in_progress || reload_completed)
1126 temp = op0;
1127 else
1128 temp = gen_reg_rtx (mode);
1129
1130 if (GET_CODE (op1) == CONST_INT)
1131 {
1132 gcc_assert (!small_int_operand (op1, mode)
1133 && !const_high_operand (op1, mode));
1134
1135 /* Emit them as real moves instead of a HIGH/LO_SUM,
1136 this way CSE can see everything and reuse intermediate
1137 values if it wants. */
1138 emit_insn (gen_rtx_SET (VOIDmode, temp,
1139 GEN_INT (INTVAL (op1)
1140 & ~(HOST_WIDE_INT)0x3ff)));
1141
1142 emit_insn (gen_rtx_SET (VOIDmode,
1143 op0,
1144 gen_rtx_IOR (mode, temp,
1145 GEN_INT (INTVAL (op1) & 0x3ff))));
1146 }
1147 else
1148 {
1149 /* A symbol, emit in the traditional way. */
1150 emit_insn (gen_rtx_SET (VOIDmode, temp,
1151 gen_rtx_HIGH (mode, op1)));
1152 emit_insn (gen_rtx_SET (VOIDmode,
1153 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1154 }
1155 }
1156
1157 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1158 If TEMP is nonzero, we are forbidden to use any other scratch
1159 registers. Otherwise, we are allowed to generate them as needed.
1160
1161 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1162 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1163
1164 void
1165 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1166 {
1167 rtx temp1, temp2, temp3, temp4, temp5;
1168 rtx ti_temp = 0;
1169
1170 if (temp && GET_MODE (temp) == TImode)
1171 {
1172 ti_temp = temp;
1173 temp = gen_rtx_REG (DImode, REGNO (temp));
1174 }
1175
1176 /* SPARC-V9 code-model support. */
1177 switch (sparc_cmodel)
1178 {
1179 case CM_MEDLOW:
1180 /* The range spanned by all instructions in the object is less
1181 than 2^31 bytes (2GB) and the distance from any instruction
1182 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1183 than 2^31 bytes (2GB).
1184
1185 The executable must be in the low 4TB of the virtual address
1186 space.
1187
1188 sethi %hi(symbol), %temp1
1189 or %temp1, %lo(symbol), %reg */
1190 if (temp)
1191 temp1 = temp; /* op0 is allowed. */
1192 else
1193 temp1 = gen_reg_rtx (DImode);
1194
1195 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1196 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1197 break;
1198
1199 case CM_MEDMID:
1200 /* The range spanned by all instructions in the object is less
1201 than 2^31 bytes (2GB) and the distance from any instruction
1202 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1203 than 2^31 bytes (2GB).
1204
1205 The executable must be in the low 16TB of the virtual address
1206 space.
1207
1208 sethi %h44(symbol), %temp1
1209 or %temp1, %m44(symbol), %temp2
1210 sllx %temp2, 12, %temp3
1211 or %temp3, %l44(symbol), %reg */
1212 if (temp)
1213 {
1214 temp1 = op0;
1215 temp2 = op0;
1216 temp3 = temp; /* op0 is allowed. */
1217 }
1218 else
1219 {
1220 temp1 = gen_reg_rtx (DImode);
1221 temp2 = gen_reg_rtx (DImode);
1222 temp3 = gen_reg_rtx (DImode);
1223 }
1224
1225 emit_insn (gen_seth44 (temp1, op1));
1226 emit_insn (gen_setm44 (temp2, temp1, op1));
1227 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1228 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1229 emit_insn (gen_setl44 (op0, temp3, op1));
1230 break;
1231
1232 case CM_MEDANY:
1233 /* The range spanned by all instructions in the object is less
1234 than 2^31 bytes (2GB) and the distance from any instruction
1235 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1236 than 2^31 bytes (2GB).
1237
1238 The executable can be placed anywhere in the virtual address
1239 space.
1240
1241 sethi %hh(symbol), %temp1
1242 sethi %lm(symbol), %temp2
1243 or %temp1, %hm(symbol), %temp3
1244 sllx %temp3, 32, %temp4
1245 or %temp4, %temp2, %temp5
1246 or %temp5, %lo(symbol), %reg */
1247 if (temp)
1248 {
1249 /* It is possible that one of the registers we got for operands[2]
1250 might coincide with that of operands[0] (which is why we made
1251 it TImode). Pick the other one to use as our scratch. */
1252 if (rtx_equal_p (temp, op0))
1253 {
1254 gcc_assert (ti_temp);
1255 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1256 }
1257 temp1 = op0;
1258 temp2 = temp; /* op0 is _not_ allowed, see above. */
1259 temp3 = op0;
1260 temp4 = op0;
1261 temp5 = op0;
1262 }
1263 else
1264 {
1265 temp1 = gen_reg_rtx (DImode);
1266 temp2 = gen_reg_rtx (DImode);
1267 temp3 = gen_reg_rtx (DImode);
1268 temp4 = gen_reg_rtx (DImode);
1269 temp5 = gen_reg_rtx (DImode);
1270 }
1271
1272 emit_insn (gen_sethh (temp1, op1));
1273 emit_insn (gen_setlm (temp2, op1));
1274 emit_insn (gen_sethm (temp3, temp1, op1));
1275 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1276 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1277 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1278 gen_rtx_PLUS (DImode, temp4, temp2)));
1279 emit_insn (gen_setlo (op0, temp5, op1));
1280 break;
1281
1282 case CM_EMBMEDANY:
1283 /* Old old old backwards compatibility kruft here.
1284 Essentially it is MEDLOW with a fixed 64-bit
1285 virtual base added to all data segment addresses.
1286 Text-segment stuff is computed like MEDANY, we can't
1287 reuse the code above because the relocation knobs
1288 look different.
1289
1290 Data segment: sethi %hi(symbol), %temp1
1291 add %temp1, EMBMEDANY_BASE_REG, %temp2
1292 or %temp2, %lo(symbol), %reg */
1293 if (data_segment_operand (op1, GET_MODE (op1)))
1294 {
1295 if (temp)
1296 {
1297 temp1 = temp; /* op0 is allowed. */
1298 temp2 = op0;
1299 }
1300 else
1301 {
1302 temp1 = gen_reg_rtx (DImode);
1303 temp2 = gen_reg_rtx (DImode);
1304 }
1305
1306 emit_insn (gen_embmedany_sethi (temp1, op1));
1307 emit_insn (gen_embmedany_brsum (temp2, temp1));
1308 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1309 }
1310
1311 /* Text segment: sethi %uhi(symbol), %temp1
1312 sethi %hi(symbol), %temp2
1313 or %temp1, %ulo(symbol), %temp3
1314 sllx %temp3, 32, %temp4
1315 or %temp4, %temp2, %temp5
1316 or %temp5, %lo(symbol), %reg */
1317 else
1318 {
1319 if (temp)
1320 {
1321 /* It is possible that one of the registers we got for operands[2]
1322 might coincide with that of operands[0] (which is why we made
1323 it TImode). Pick the other one to use as our scratch. */
1324 if (rtx_equal_p (temp, op0))
1325 {
1326 gcc_assert (ti_temp);
1327 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1328 }
1329 temp1 = op0;
1330 temp2 = temp; /* op0 is _not_ allowed, see above. */
1331 temp3 = op0;
1332 temp4 = op0;
1333 temp5 = op0;
1334 }
1335 else
1336 {
1337 temp1 = gen_reg_rtx (DImode);
1338 temp2 = gen_reg_rtx (DImode);
1339 temp3 = gen_reg_rtx (DImode);
1340 temp4 = gen_reg_rtx (DImode);
1341 temp5 = gen_reg_rtx (DImode);
1342 }
1343
1344 emit_insn (gen_embmedany_textuhi (temp1, op1));
1345 emit_insn (gen_embmedany_texthi (temp2, op1));
1346 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1347 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1348 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1349 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1350 gen_rtx_PLUS (DImode, temp4, temp2)));
1351 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1352 }
1353 break;
1354
1355 default:
1356 gcc_unreachable ();
1357 }
1358 }
1359
1360 #if HOST_BITS_PER_WIDE_INT == 32
1361 void
1362 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1363 {
1364 gcc_unreachable ();
1365 }
1366 #else
1367 /* These avoid problems when cross compiling. If we do not
1368 go through all this hair then the optimizer will see
1369 invalid REG_EQUAL notes or in some cases none at all. */
1370 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1371 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1372 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1373 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1374
1375 /* The optimizer is not to assume anything about exactly
1376 which bits are set for a HIGH, they are unspecified.
1377 Unfortunately this leads to many missed optimizations
1378 during CSE. We mask out the non-HIGH bits, and matches
1379 a plain movdi, to alleviate this problem. */
1380 static rtx
1381 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1382 {
1383 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1384 }
1385
1386 static rtx
1387 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1388 {
1389 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1390 }
1391
1392 static rtx
1393 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1394 {
1395 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1396 }
1397
1398 static rtx
1399 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1400 {
1401 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1402 }
1403
1404 /* Worker routines for 64-bit constant formation on arch64.
1405 One of the key things to be doing in these emissions is
1406 to create as many temp REGs as possible. This makes it
1407 possible for half-built constants to be used later when
1408 such values are similar to something required later on.
1409 Without doing this, the optimizer cannot see such
1410 opportunities. */
1411
1412 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1413 unsigned HOST_WIDE_INT, int);
1414
1415 static void
1416 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1417 unsigned HOST_WIDE_INT low_bits, int is_neg)
1418 {
1419 unsigned HOST_WIDE_INT high_bits;
1420
1421 if (is_neg)
1422 high_bits = (~low_bits) & 0xffffffff;
1423 else
1424 high_bits = low_bits;
1425
1426 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1427 if (!is_neg)
1428 {
1429 emit_insn (gen_rtx_SET (VOIDmode, op0,
1430 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1431 }
1432 else
1433 {
1434 /* If we are XOR'ing with -1, then we should emit a one's complement
1435 instead. This way the combiner will notice logical operations
1436 such as ANDN later on and substitute. */
1437 if ((low_bits & 0x3ff) == 0x3ff)
1438 {
1439 emit_insn (gen_rtx_SET (VOIDmode, op0,
1440 gen_rtx_NOT (DImode, temp)));
1441 }
1442 else
1443 {
1444 emit_insn (gen_rtx_SET (VOIDmode, op0,
1445 gen_safe_XOR64 (temp,
1446 (-(HOST_WIDE_INT)0x400
1447 | (low_bits & 0x3ff)))));
1448 }
1449 }
1450 }
1451
1452 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1453 unsigned HOST_WIDE_INT, int);
1454
1455 static void
1456 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1457 unsigned HOST_WIDE_INT high_bits,
1458 unsigned HOST_WIDE_INT low_immediate,
1459 int shift_count)
1460 {
1461 rtx temp2 = op0;
1462
1463 if ((high_bits & 0xfffffc00) != 0)
1464 {
1465 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1466 if ((high_bits & ~0xfffffc00) != 0)
1467 emit_insn (gen_rtx_SET (VOIDmode, op0,
1468 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1469 else
1470 temp2 = temp;
1471 }
1472 else
1473 {
1474 emit_insn (gen_safe_SET64 (temp, high_bits));
1475 temp2 = temp;
1476 }
1477
1478 /* Now shift it up into place. */
1479 emit_insn (gen_rtx_SET (VOIDmode, op0,
1480 gen_rtx_ASHIFT (DImode, temp2,
1481 GEN_INT (shift_count))));
1482
1483 /* If there is a low immediate part piece, finish up by
1484 putting that in as well. */
1485 if (low_immediate != 0)
1486 emit_insn (gen_rtx_SET (VOIDmode, op0,
1487 gen_safe_OR64 (op0, low_immediate)));
1488 }
1489
1490 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1491 unsigned HOST_WIDE_INT);
1492
1493 /* Full 64-bit constant decomposition. Even though this is the
1494 'worst' case, we still optimize a few things away. */
1495 static void
1496 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1497 unsigned HOST_WIDE_INT high_bits,
1498 unsigned HOST_WIDE_INT low_bits)
1499 {
1500 rtx sub_temp;
1501
1502 if (reload_in_progress || reload_completed)
1503 sub_temp = op0;
1504 else
1505 sub_temp = gen_reg_rtx (DImode);
1506
1507 if ((high_bits & 0xfffffc00) != 0)
1508 {
1509 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1510 if ((high_bits & ~0xfffffc00) != 0)
1511 emit_insn (gen_rtx_SET (VOIDmode,
1512 sub_temp,
1513 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1514 else
1515 sub_temp = temp;
1516 }
1517 else
1518 {
1519 emit_insn (gen_safe_SET64 (temp, high_bits));
1520 sub_temp = temp;
1521 }
1522
1523 if (!reload_in_progress && !reload_completed)
1524 {
1525 rtx temp2 = gen_reg_rtx (DImode);
1526 rtx temp3 = gen_reg_rtx (DImode);
1527 rtx temp4 = gen_reg_rtx (DImode);
1528
1529 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1530 gen_rtx_ASHIFT (DImode, sub_temp,
1531 GEN_INT (32))));
1532
1533 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1534 if ((low_bits & ~0xfffffc00) != 0)
1535 {
1536 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1537 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1538 emit_insn (gen_rtx_SET (VOIDmode, op0,
1539 gen_rtx_PLUS (DImode, temp4, temp3)));
1540 }
1541 else
1542 {
1543 emit_insn (gen_rtx_SET (VOIDmode, op0,
1544 gen_rtx_PLUS (DImode, temp4, temp2)));
1545 }
1546 }
1547 else
1548 {
1549 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1550 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1551 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1552 int to_shift = 12;
1553
1554 /* We are in the middle of reload, so this is really
1555 painful. However we do still make an attempt to
1556 avoid emitting truly stupid code. */
1557 if (low1 != const0_rtx)
1558 {
1559 emit_insn (gen_rtx_SET (VOIDmode, op0,
1560 gen_rtx_ASHIFT (DImode, sub_temp,
1561 GEN_INT (to_shift))));
1562 emit_insn (gen_rtx_SET (VOIDmode, op0,
1563 gen_rtx_IOR (DImode, op0, low1)));
1564 sub_temp = op0;
1565 to_shift = 12;
1566 }
1567 else
1568 {
1569 to_shift += 12;
1570 }
1571 if (low2 != const0_rtx)
1572 {
1573 emit_insn (gen_rtx_SET (VOIDmode, op0,
1574 gen_rtx_ASHIFT (DImode, sub_temp,
1575 GEN_INT (to_shift))));
1576 emit_insn (gen_rtx_SET (VOIDmode, op0,
1577 gen_rtx_IOR (DImode, op0, low2)));
1578 sub_temp = op0;
1579 to_shift = 8;
1580 }
1581 else
1582 {
1583 to_shift += 8;
1584 }
1585 emit_insn (gen_rtx_SET (VOIDmode, op0,
1586 gen_rtx_ASHIFT (DImode, sub_temp,
1587 GEN_INT (to_shift))));
1588 if (low3 != const0_rtx)
1589 emit_insn (gen_rtx_SET (VOIDmode, op0,
1590 gen_rtx_IOR (DImode, op0, low3)));
1591 /* phew... */
1592 }
1593 }
1594
1595 /* Analyze a 64-bit constant for certain properties. */
1596 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1597 unsigned HOST_WIDE_INT,
1598 int *, int *, int *);
1599
1600 static void
1601 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1602 unsigned HOST_WIDE_INT low_bits,
1603 int *hbsp, int *lbsp, int *abbasp)
1604 {
1605 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1606 int i;
1607
1608 lowest_bit_set = highest_bit_set = -1;
1609 i = 0;
1610 do
1611 {
1612 if ((lowest_bit_set == -1)
1613 && ((low_bits >> i) & 1))
1614 lowest_bit_set = i;
1615 if ((highest_bit_set == -1)
1616 && ((high_bits >> (32 - i - 1)) & 1))
1617 highest_bit_set = (64 - i - 1);
1618 }
1619 while (++i < 32
1620 && ((highest_bit_set == -1)
1621 || (lowest_bit_set == -1)));
1622 if (i == 32)
1623 {
1624 i = 0;
1625 do
1626 {
1627 if ((lowest_bit_set == -1)
1628 && ((high_bits >> i) & 1))
1629 lowest_bit_set = i + 32;
1630 if ((highest_bit_set == -1)
1631 && ((low_bits >> (32 - i - 1)) & 1))
1632 highest_bit_set = 32 - i - 1;
1633 }
1634 while (++i < 32
1635 && ((highest_bit_set == -1)
1636 || (lowest_bit_set == -1)));
1637 }
1638 /* If there are no bits set this should have gone out
1639 as one instruction! */
1640 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1641 all_bits_between_are_set = 1;
1642 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1643 {
1644 if (i < 32)
1645 {
1646 if ((low_bits & (1 << i)) != 0)
1647 continue;
1648 }
1649 else
1650 {
1651 if ((high_bits & (1 << (i - 32))) != 0)
1652 continue;
1653 }
1654 all_bits_between_are_set = 0;
1655 break;
1656 }
1657 *hbsp = highest_bit_set;
1658 *lbsp = lowest_bit_set;
1659 *abbasp = all_bits_between_are_set;
1660 }
1661
1662 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1663
1664 static int
1665 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1666 unsigned HOST_WIDE_INT low_bits)
1667 {
1668 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1669
1670 if (high_bits == 0
1671 || high_bits == 0xffffffff)
1672 return 1;
1673
1674 analyze_64bit_constant (high_bits, low_bits,
1675 &highest_bit_set, &lowest_bit_set,
1676 &all_bits_between_are_set);
1677
1678 if ((highest_bit_set == 63
1679 || lowest_bit_set == 0)
1680 && all_bits_between_are_set != 0)
1681 return 1;
1682
1683 if ((highest_bit_set - lowest_bit_set) < 21)
1684 return 1;
1685
1686 return 0;
1687 }
1688
1689 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1690 unsigned HOST_WIDE_INT,
1691 int, int);
1692
1693 static unsigned HOST_WIDE_INT
1694 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1695 unsigned HOST_WIDE_INT low_bits,
1696 int lowest_bit_set, int shift)
1697 {
1698 HOST_WIDE_INT hi, lo;
1699
1700 if (lowest_bit_set < 32)
1701 {
1702 lo = (low_bits >> lowest_bit_set) << shift;
1703 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1704 }
1705 else
1706 {
1707 lo = 0;
1708 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1709 }
1710 gcc_assert (! (hi & lo));
1711 return (hi | lo);
1712 }
1713
1714 /* Here we are sure to be arch64 and this is an integer constant
1715 being loaded into a register. Emit the most efficient
1716 insn sequence possible. Detection of all the 1-insn cases
1717 has been done already. */
1718 void
1719 sparc_emit_set_const64 (rtx op0, rtx op1)
1720 {
1721 unsigned HOST_WIDE_INT high_bits, low_bits;
1722 int lowest_bit_set, highest_bit_set;
1723 int all_bits_between_are_set;
1724 rtx temp = 0;
1725
1726 /* Sanity check that we know what we are working with. */
1727 gcc_assert (TARGET_ARCH64
1728 && (GET_CODE (op0) == SUBREG
1729 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1730
1731 if (reload_in_progress || reload_completed)
1732 temp = op0;
1733
1734 if (GET_CODE (op1) != CONST_INT)
1735 {
1736 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1737 return;
1738 }
1739
1740 if (! temp)
1741 temp = gen_reg_rtx (DImode);
1742
1743 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1744 low_bits = (INTVAL (op1) & 0xffffffff);
1745
1746 /* low_bits bits 0 --> 31
1747 high_bits bits 32 --> 63 */
1748
1749 analyze_64bit_constant (high_bits, low_bits,
1750 &highest_bit_set, &lowest_bit_set,
1751 &all_bits_between_are_set);
1752
1753 /* First try for a 2-insn sequence. */
1754
1755 /* These situations are preferred because the optimizer can
1756 * do more things with them:
1757 * 1) mov -1, %reg
1758 * sllx %reg, shift, %reg
1759 * 2) mov -1, %reg
1760 * srlx %reg, shift, %reg
1761 * 3) mov some_small_const, %reg
1762 * sllx %reg, shift, %reg
1763 */
1764 if (((highest_bit_set == 63
1765 || lowest_bit_set == 0)
1766 && all_bits_between_are_set != 0)
1767 || ((highest_bit_set - lowest_bit_set) < 12))
1768 {
1769 HOST_WIDE_INT the_const = -1;
1770 int shift = lowest_bit_set;
1771
1772 if ((highest_bit_set != 63
1773 && lowest_bit_set != 0)
1774 || all_bits_between_are_set == 0)
1775 {
1776 the_const =
1777 create_simple_focus_bits (high_bits, low_bits,
1778 lowest_bit_set, 0);
1779 }
1780 else if (lowest_bit_set == 0)
1781 shift = -(63 - highest_bit_set);
1782
1783 gcc_assert (SPARC_SIMM13_P (the_const));
1784 gcc_assert (shift != 0);
1785
1786 emit_insn (gen_safe_SET64 (temp, the_const));
1787 if (shift > 0)
1788 emit_insn (gen_rtx_SET (VOIDmode,
1789 op0,
1790 gen_rtx_ASHIFT (DImode,
1791 temp,
1792 GEN_INT (shift))));
1793 else if (shift < 0)
1794 emit_insn (gen_rtx_SET (VOIDmode,
1795 op0,
1796 gen_rtx_LSHIFTRT (DImode,
1797 temp,
1798 GEN_INT (-shift))));
1799 return;
1800 }
1801
1802 /* Now a range of 22 or less bits set somewhere.
1803 * 1) sethi %hi(focus_bits), %reg
1804 * sllx %reg, shift, %reg
1805 * 2) sethi %hi(focus_bits), %reg
1806 * srlx %reg, shift, %reg
1807 */
1808 if ((highest_bit_set - lowest_bit_set) < 21)
1809 {
1810 unsigned HOST_WIDE_INT focus_bits =
1811 create_simple_focus_bits (high_bits, low_bits,
1812 lowest_bit_set, 10);
1813
1814 gcc_assert (SPARC_SETHI_P (focus_bits));
1815 gcc_assert (lowest_bit_set != 10);
1816
1817 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1818
1819 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1820 if (lowest_bit_set < 10)
1821 emit_insn (gen_rtx_SET (VOIDmode,
1822 op0,
1823 gen_rtx_LSHIFTRT (DImode, temp,
1824 GEN_INT (10 - lowest_bit_set))));
1825 else if (lowest_bit_set > 10)
1826 emit_insn (gen_rtx_SET (VOIDmode,
1827 op0,
1828 gen_rtx_ASHIFT (DImode, temp,
1829 GEN_INT (lowest_bit_set - 10))));
1830 return;
1831 }
1832
1833 /* 1) sethi %hi(low_bits), %reg
1834 * or %reg, %lo(low_bits), %reg
1835 * 2) sethi %hi(~low_bits), %reg
1836 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1837 */
1838 if (high_bits == 0
1839 || high_bits == 0xffffffff)
1840 {
1841 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1842 (high_bits == 0xffffffff));
1843 return;
1844 }
1845
1846 /* Now, try 3-insn sequences. */
1847
1848 /* 1) sethi %hi(high_bits), %reg
1849 * or %reg, %lo(high_bits), %reg
1850 * sllx %reg, 32, %reg
1851 */
1852 if (low_bits == 0)
1853 {
1854 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1855 return;
1856 }
1857
1858 /* We may be able to do something quick
1859 when the constant is negated, so try that. */
1860 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1861 (~low_bits) & 0xfffffc00))
1862 {
1863 /* NOTE: The trailing bits get XOR'd so we need the
1864 non-negated bits, not the negated ones. */
1865 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1866
1867 if ((((~high_bits) & 0xffffffff) == 0
1868 && ((~low_bits) & 0x80000000) == 0)
1869 || (((~high_bits) & 0xffffffff) == 0xffffffff
1870 && ((~low_bits) & 0x80000000) != 0))
1871 {
1872 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1873
1874 if ((SPARC_SETHI_P (fast_int)
1875 && (~high_bits & 0xffffffff) == 0)
1876 || SPARC_SIMM13_P (fast_int))
1877 emit_insn (gen_safe_SET64 (temp, fast_int));
1878 else
1879 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1880 }
1881 else
1882 {
1883 rtx negated_const;
1884 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1885 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1886 sparc_emit_set_const64 (temp, negated_const);
1887 }
1888
1889 /* If we are XOR'ing with -1, then we should emit a one's complement
1890 instead. This way the combiner will notice logical operations
1891 such as ANDN later on and substitute. */
1892 if (trailing_bits == 0x3ff)
1893 {
1894 emit_insn (gen_rtx_SET (VOIDmode, op0,
1895 gen_rtx_NOT (DImode, temp)));
1896 }
1897 else
1898 {
1899 emit_insn (gen_rtx_SET (VOIDmode,
1900 op0,
1901 gen_safe_XOR64 (temp,
1902 (-0x400 | trailing_bits))));
1903 }
1904 return;
1905 }
1906
1907 /* 1) sethi %hi(xxx), %reg
1908 * or %reg, %lo(xxx), %reg
1909 * sllx %reg, yyy, %reg
1910 *
1911 * ??? This is just a generalized version of the low_bits==0
1912 * thing above, FIXME...
1913 */
1914 if ((highest_bit_set - lowest_bit_set) < 32)
1915 {
1916 unsigned HOST_WIDE_INT focus_bits =
1917 create_simple_focus_bits (high_bits, low_bits,
1918 lowest_bit_set, 0);
1919
1920 /* We can't get here in this state. */
1921 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1922
1923 /* So what we know is that the set bits straddle the
1924 middle of the 64-bit word. */
1925 sparc_emit_set_const64_quick2 (op0, temp,
1926 focus_bits, 0,
1927 lowest_bit_set);
1928 return;
1929 }
1930
1931 /* 1) sethi %hi(high_bits), %reg
1932 * or %reg, %lo(high_bits), %reg
1933 * sllx %reg, 32, %reg
1934 * or %reg, low_bits, %reg
1935 */
1936 if (SPARC_SIMM13_P(low_bits)
1937 && ((int)low_bits > 0))
1938 {
1939 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1940 return;
1941 }
1942
1943 /* The easiest way when all else fails, is full decomposition. */
1944 #if 0
1945 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1946 high_bits, low_bits, ~high_bits, ~low_bits);
1947 #endif
1948 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1949 }
1950 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1951
1952 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1953 return the mode to be used for the comparison. For floating-point,
1954 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1955 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1956 processing is needed. */
1957
1958 enum machine_mode
1959 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1960 {
1961 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1962 {
1963 switch (op)
1964 {
1965 case EQ:
1966 case NE:
1967 case UNORDERED:
1968 case ORDERED:
1969 case UNLT:
1970 case UNLE:
1971 case UNGT:
1972 case UNGE:
1973 case UNEQ:
1974 case LTGT:
1975 return CCFPmode;
1976
1977 case LT:
1978 case LE:
1979 case GT:
1980 case GE:
1981 return CCFPEmode;
1982
1983 default:
1984 gcc_unreachable ();
1985 }
1986 }
1987 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1988 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1989 {
1990 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1991 return CCX_NOOVmode;
1992 else
1993 return CC_NOOVmode;
1994 }
1995 else
1996 {
1997 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1998 return CCXmode;
1999 else
2000 return CCmode;
2001 }
2002 }
2003
2004 /* Emit the compare insn and return the CC reg for a CODE comparison. */
2005
2006 rtx
2007 gen_compare_reg (enum rtx_code code)
2008 {
2009 enum machine_mode mode;
2010 rtx x, y, cc_reg;
2011
2012 if (GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_CC)
2013 return sparc_compare_op0;
2014
2015 x = sparc_compare_op0;
2016 y = sparc_compare_op1;
2017 mode = SELECT_CC_MODE (code, x, y);
2018
2019 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2020 fcc regs (cse can't tell they're really call clobbered regs and will
2021 remove a duplicate comparison even if there is an intervening function
2022 call - it will then try to reload the cc reg via an int reg which is why
2023 we need the movcc patterns). It is possible to provide the movcc
2024 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2025 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2026 to tell cse that CCFPE mode registers (even pseudos) are call
2027 clobbered. */
2028
2029 /* ??? This is an experiment. Rather than making changes to cse which may
2030 or may not be easy/clean, we do our own cse. This is possible because
2031 we will generate hard registers. Cse knows they're call clobbered (it
2032 doesn't know the same thing about pseudos). If we guess wrong, no big
2033 deal, but if we win, great! */
2034
2035 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2036 #if 1 /* experiment */
2037 {
2038 int reg;
2039 /* We cycle through the registers to ensure they're all exercised. */
2040 static int next_fcc_reg = 0;
2041 /* Previous x,y for each fcc reg. */
2042 static rtx prev_args[4][2];
2043
2044 /* Scan prev_args for x,y. */
2045 for (reg = 0; reg < 4; reg++)
2046 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2047 break;
2048 if (reg == 4)
2049 {
2050 reg = next_fcc_reg;
2051 prev_args[reg][0] = x;
2052 prev_args[reg][1] = y;
2053 next_fcc_reg = (next_fcc_reg + 1) & 3;
2054 }
2055 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2056 }
2057 #else
2058 cc_reg = gen_reg_rtx (mode);
2059 #endif /* ! experiment */
2060 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2061 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2062 else
2063 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2064
2065 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2066 will only result in an unrecognizable insn so no point in asserting. */
2067 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2068
2069 return cc_reg;
2070 }
2071
2072 /* Same as above but return the whole compare operator. */
2073
2074 rtx
2075 gen_compare_operator (enum rtx_code code)
2076 {
2077 rtx cc_reg;
2078
2079 if (GET_MODE (sparc_compare_op0) == TFmode && !TARGET_HARD_QUAD)
2080 code
2081 = sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, code);
2082
2083 cc_reg = gen_compare_reg (code);
2084 return gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2085 }
2086
2087 /* This function is used for v9 only.
2088 CODE is the code for an Scc's comparison.
2089 OPERANDS[0] is the target of the Scc insn.
2090 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2091 been generated yet).
2092
2093 This function is needed to turn
2094
2095 (set (reg:SI 110)
2096 (gt (reg:CCX 100 %icc)
2097 (const_int 0)))
2098 into
2099 (set (reg:SI 110)
2100 (gt:DI (reg:CCX 100 %icc)
2101 (const_int 0)))
2102
2103 IE: The instruction recognizer needs to see the mode of the comparison to
2104 find the right instruction. We could use "gt:DI" right in the
2105 define_expand, but leaving it out allows us to handle DI, SI, etc.
2106
2107 We refer to the global sparc compare operands sparc_compare_op0 and
2108 sparc_compare_op1. */
2109
2110 int
2111 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2112 {
2113 if (! TARGET_ARCH64
2114 && (GET_MODE (sparc_compare_op0) == DImode
2115 || GET_MODE (operands[0]) == DImode))
2116 return 0;
2117
2118 /* Try to use the movrCC insns. */
2119 if (TARGET_ARCH64
2120 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2121 && sparc_compare_op1 == const0_rtx
2122 && v9_regcmp_p (compare_code))
2123 {
2124 rtx op0 = sparc_compare_op0;
2125 rtx temp;
2126
2127 /* Special case for op0 != 0. This can be done with one instruction if
2128 operands[0] == sparc_compare_op0. */
2129
2130 if (compare_code == NE
2131 && GET_MODE (operands[0]) == DImode
2132 && rtx_equal_p (op0, operands[0]))
2133 {
2134 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2135 gen_rtx_IF_THEN_ELSE (DImode,
2136 gen_rtx_fmt_ee (compare_code, DImode,
2137 op0, const0_rtx),
2138 const1_rtx,
2139 operands[0])));
2140 return 1;
2141 }
2142
2143 if (reg_overlap_mentioned_p (operands[0], op0))
2144 {
2145 /* Handle the case where operands[0] == sparc_compare_op0.
2146 We "early clobber" the result. */
2147 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2148 emit_move_insn (op0, sparc_compare_op0);
2149 }
2150
2151 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2152 if (GET_MODE (op0) != DImode)
2153 {
2154 temp = gen_reg_rtx (DImode);
2155 convert_move (temp, op0, 0);
2156 }
2157 else
2158 temp = op0;
2159 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2160 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2161 gen_rtx_fmt_ee (compare_code, DImode,
2162 temp, const0_rtx),
2163 const1_rtx,
2164 operands[0])));
2165 return 1;
2166 }
2167 else
2168 {
2169 operands[1] = gen_compare_reg (compare_code);
2170
2171 switch (GET_MODE (operands[1]))
2172 {
2173 case CCmode :
2174 case CCXmode :
2175 case CCFPEmode :
2176 case CCFPmode :
2177 break;
2178 default :
2179 gcc_unreachable ();
2180 }
2181 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2182 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2183 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2184 gen_rtx_fmt_ee (compare_code,
2185 GET_MODE (operands[1]),
2186 operands[1], const0_rtx),
2187 const1_rtx, operands[0])));
2188 return 1;
2189 }
2190 }
2191
2192 /* Emit a conditional jump insn for the v9 architecture using comparison code
2193 CODE and jump target LABEL.
2194 This function exists to take advantage of the v9 brxx insns. */
2195
2196 void
2197 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2198 {
2199 gcc_assert (GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) != MODE_CC);
2200 emit_jump_insn (gen_rtx_SET (VOIDmode,
2201 pc_rtx,
2202 gen_rtx_IF_THEN_ELSE (VOIDmode,
2203 gen_rtx_fmt_ee (code, GET_MODE (op0),
2204 op0, const0_rtx),
2205 gen_rtx_LABEL_REF (VOIDmode, label),
2206 pc_rtx)));
2207 }
2208
2209 /* Generate a DFmode part of a hard TFmode register.
2210 REG is the TFmode hard register, LOW is 1 for the
2211 low 64bit of the register and 0 otherwise.
2212 */
2213 rtx
2214 gen_df_reg (rtx reg, int low)
2215 {
2216 int regno = REGNO (reg);
2217
2218 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2219 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2220 return gen_rtx_REG (DFmode, regno);
2221 }
2222 \f
2223 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2224 Unlike normal calls, TFmode operands are passed by reference. It is
2225 assumed that no more than 3 operands are required. */
2226
2227 static void
2228 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2229 {
2230 rtx ret_slot = NULL, arg[3], func_sym;
2231 int i;
2232
2233 /* We only expect to be called for conversions, unary, and binary ops. */
2234 gcc_assert (nargs == 2 || nargs == 3);
2235
2236 for (i = 0; i < nargs; ++i)
2237 {
2238 rtx this_arg = operands[i];
2239 rtx this_slot;
2240
2241 /* TFmode arguments and return values are passed by reference. */
2242 if (GET_MODE (this_arg) == TFmode)
2243 {
2244 int force_stack_temp;
2245
2246 force_stack_temp = 0;
2247 if (TARGET_BUGGY_QP_LIB && i == 0)
2248 force_stack_temp = 1;
2249
2250 if (GET_CODE (this_arg) == MEM
2251 && ! force_stack_temp)
2252 this_arg = XEXP (this_arg, 0);
2253 else if (CONSTANT_P (this_arg)
2254 && ! force_stack_temp)
2255 {
2256 this_slot = force_const_mem (TFmode, this_arg);
2257 this_arg = XEXP (this_slot, 0);
2258 }
2259 else
2260 {
2261 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2262
2263 /* Operand 0 is the return value. We'll copy it out later. */
2264 if (i > 0)
2265 emit_move_insn (this_slot, this_arg);
2266 else
2267 ret_slot = this_slot;
2268
2269 this_arg = XEXP (this_slot, 0);
2270 }
2271 }
2272
2273 arg[i] = this_arg;
2274 }
2275
2276 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2277
2278 if (GET_MODE (operands[0]) == TFmode)
2279 {
2280 if (nargs == 2)
2281 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2282 arg[0], GET_MODE (arg[0]),
2283 arg[1], GET_MODE (arg[1]));
2284 else
2285 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2286 arg[0], GET_MODE (arg[0]),
2287 arg[1], GET_MODE (arg[1]),
2288 arg[2], GET_MODE (arg[2]));
2289
2290 if (ret_slot)
2291 emit_move_insn (operands[0], ret_slot);
2292 }
2293 else
2294 {
2295 rtx ret;
2296
2297 gcc_assert (nargs == 2);
2298
2299 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2300 GET_MODE (operands[0]), 1,
2301 arg[1], GET_MODE (arg[1]));
2302
2303 if (ret != operands[0])
2304 emit_move_insn (operands[0], ret);
2305 }
2306 }
2307
2308 /* Expand soft-float TFmode calls to sparc abi routines. */
2309
2310 static void
2311 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2312 {
2313 const char *func;
2314
2315 switch (code)
2316 {
2317 case PLUS:
2318 func = "_Qp_add";
2319 break;
2320 case MINUS:
2321 func = "_Qp_sub";
2322 break;
2323 case MULT:
2324 func = "_Qp_mul";
2325 break;
2326 case DIV:
2327 func = "_Qp_div";
2328 break;
2329 default:
2330 gcc_unreachable ();
2331 }
2332
2333 emit_soft_tfmode_libcall (func, 3, operands);
2334 }
2335
2336 static void
2337 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2338 {
2339 const char *func;
2340
2341 gcc_assert (code == SQRT);
2342 func = "_Qp_sqrt";
2343
2344 emit_soft_tfmode_libcall (func, 2, operands);
2345 }
2346
2347 static void
2348 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2349 {
2350 const char *func;
2351
2352 switch (code)
2353 {
2354 case FLOAT_EXTEND:
2355 switch (GET_MODE (operands[1]))
2356 {
2357 case SFmode:
2358 func = "_Qp_stoq";
2359 break;
2360 case DFmode:
2361 func = "_Qp_dtoq";
2362 break;
2363 default:
2364 gcc_unreachable ();
2365 }
2366 break;
2367
2368 case FLOAT_TRUNCATE:
2369 switch (GET_MODE (operands[0]))
2370 {
2371 case SFmode:
2372 func = "_Qp_qtos";
2373 break;
2374 case DFmode:
2375 func = "_Qp_qtod";
2376 break;
2377 default:
2378 gcc_unreachable ();
2379 }
2380 break;
2381
2382 case FLOAT:
2383 switch (GET_MODE (operands[1]))
2384 {
2385 case SImode:
2386 func = "_Qp_itoq";
2387 if (TARGET_ARCH64)
2388 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2389 break;
2390 case DImode:
2391 func = "_Qp_xtoq";
2392 break;
2393 default:
2394 gcc_unreachable ();
2395 }
2396 break;
2397
2398 case UNSIGNED_FLOAT:
2399 switch (GET_MODE (operands[1]))
2400 {
2401 case SImode:
2402 func = "_Qp_uitoq";
2403 if (TARGET_ARCH64)
2404 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2405 break;
2406 case DImode:
2407 func = "_Qp_uxtoq";
2408 break;
2409 default:
2410 gcc_unreachable ();
2411 }
2412 break;
2413
2414 case FIX:
2415 switch (GET_MODE (operands[0]))
2416 {
2417 case SImode:
2418 func = "_Qp_qtoi";
2419 break;
2420 case DImode:
2421 func = "_Qp_qtox";
2422 break;
2423 default:
2424 gcc_unreachable ();
2425 }
2426 break;
2427
2428 case UNSIGNED_FIX:
2429 switch (GET_MODE (operands[0]))
2430 {
2431 case SImode:
2432 func = "_Qp_qtoui";
2433 break;
2434 case DImode:
2435 func = "_Qp_qtoux";
2436 break;
2437 default:
2438 gcc_unreachable ();
2439 }
2440 break;
2441
2442 default:
2443 gcc_unreachable ();
2444 }
2445
2446 emit_soft_tfmode_libcall (func, 2, operands);
2447 }
2448
2449 /* Expand a hard-float tfmode operation. All arguments must be in
2450 registers. */
2451
2452 static void
2453 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2454 {
2455 rtx op, dest;
2456
2457 if (GET_RTX_CLASS (code) == RTX_UNARY)
2458 {
2459 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2460 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2461 }
2462 else
2463 {
2464 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2465 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2466 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2467 operands[1], operands[2]);
2468 }
2469
2470 if (register_operand (operands[0], VOIDmode))
2471 dest = operands[0];
2472 else
2473 dest = gen_reg_rtx (GET_MODE (operands[0]));
2474
2475 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2476
2477 if (dest != operands[0])
2478 emit_move_insn (operands[0], dest);
2479 }
2480
2481 void
2482 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2483 {
2484 if (TARGET_HARD_QUAD)
2485 emit_hard_tfmode_operation (code, operands);
2486 else
2487 emit_soft_tfmode_binop (code, operands);
2488 }
2489
2490 void
2491 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2492 {
2493 if (TARGET_HARD_QUAD)
2494 emit_hard_tfmode_operation (code, operands);
2495 else
2496 emit_soft_tfmode_unop (code, operands);
2497 }
2498
2499 void
2500 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2501 {
2502 if (TARGET_HARD_QUAD)
2503 emit_hard_tfmode_operation (code, operands);
2504 else
2505 emit_soft_tfmode_cvt (code, operands);
2506 }
2507 \f
2508 /* Return nonzero if a branch/jump/call instruction will be emitting
2509 nop into its delay slot. */
2510
2511 int
2512 empty_delay_slot (rtx insn)
2513 {
2514 rtx seq;
2515
2516 /* If no previous instruction (should not happen), return true. */
2517 if (PREV_INSN (insn) == NULL)
2518 return 1;
2519
2520 seq = NEXT_INSN (PREV_INSN (insn));
2521 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2522 return 0;
2523
2524 return 1;
2525 }
2526
2527 /* Return nonzero if TRIAL can go into the call delay slot. */
2528
2529 int
2530 tls_call_delay (rtx trial)
2531 {
2532 rtx pat;
2533
2534 /* Binutils allows
2535 call __tls_get_addr, %tgd_call (foo)
2536 add %l7, %o0, %o0, %tgd_add (foo)
2537 while Sun as/ld does not. */
2538 if (TARGET_GNU_TLS || !TARGET_TLS)
2539 return 1;
2540
2541 pat = PATTERN (trial);
2542
2543 /* We must reject tgd_add{32|64}, i.e.
2544 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2545 and tldm_add{32|64}, i.e.
2546 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2547 for Sun as/ld. */
2548 if (GET_CODE (pat) == SET
2549 && GET_CODE (SET_SRC (pat)) == PLUS)
2550 {
2551 rtx unspec = XEXP (SET_SRC (pat), 1);
2552
2553 if (GET_CODE (unspec) == UNSPEC
2554 && (XINT (unspec, 1) == UNSPEC_TLSGD
2555 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2556 return 0;
2557 }
2558
2559 return 1;
2560 }
2561
2562 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2563 instruction. RETURN_P is true if the v9 variant 'return' is to be
2564 considered in the test too.
2565
2566 TRIAL must be a SET whose destination is a REG appropriate for the
2567 'restore' instruction or, if RETURN_P is true, for the 'return'
2568 instruction. */
2569
2570 static int
2571 eligible_for_restore_insn (rtx trial, bool return_p)
2572 {
2573 rtx pat = PATTERN (trial);
2574 rtx src = SET_SRC (pat);
2575
2576 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2577 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2578 && arith_operand (src, GET_MODE (src)))
2579 {
2580 if (TARGET_ARCH64)
2581 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2582 else
2583 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2584 }
2585
2586 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2587 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2588 && arith_double_operand (src, GET_MODE (src)))
2589 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2590
2591 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2592 else if (! TARGET_FPU && register_operand (src, SFmode))
2593 return 1;
2594
2595 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2596 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2597 return 1;
2598
2599 /* If we have the 'return' instruction, anything that does not use
2600 local or output registers and can go into a delay slot wins. */
2601 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2602 && (get_attr_in_uncond_branch_delay (trial)
2603 == IN_UNCOND_BRANCH_DELAY_TRUE))
2604 return 1;
2605
2606 /* The 'restore src1,src2,dest' pattern for SImode. */
2607 else if (GET_CODE (src) == PLUS
2608 && register_operand (XEXP (src, 0), SImode)
2609 && arith_operand (XEXP (src, 1), SImode))
2610 return 1;
2611
2612 /* The 'restore src1,src2,dest' pattern for DImode. */
2613 else if (GET_CODE (src) == PLUS
2614 && register_operand (XEXP (src, 0), DImode)
2615 && arith_double_operand (XEXP (src, 1), DImode))
2616 return 1;
2617
2618 /* The 'restore src1,%lo(src2),dest' pattern. */
2619 else if (GET_CODE (src) == LO_SUM
2620 && ! TARGET_CM_MEDMID
2621 && ((register_operand (XEXP (src, 0), SImode)
2622 && immediate_operand (XEXP (src, 1), SImode))
2623 || (TARGET_ARCH64
2624 && register_operand (XEXP (src, 0), DImode)
2625 && immediate_operand (XEXP (src, 1), DImode))))
2626 return 1;
2627
2628 /* The 'restore src,src,dest' pattern. */
2629 else if (GET_CODE (src) == ASHIFT
2630 && (register_operand (XEXP (src, 0), SImode)
2631 || register_operand (XEXP (src, 0), DImode))
2632 && XEXP (src, 1) == const1_rtx)
2633 return 1;
2634
2635 return 0;
2636 }
2637
2638 /* Return nonzero if TRIAL can go into the function return's
2639 delay slot. */
2640
2641 int
2642 eligible_for_return_delay (rtx trial)
2643 {
2644 rtx pat;
2645
2646 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2647 return 0;
2648
2649 if (get_attr_length (trial) != 1)
2650 return 0;
2651
2652 /* If there are any call-saved registers, we should scan TRIAL if it
2653 does not reference them. For now just make it easy. */
2654 if (num_gfregs)
2655 return 0;
2656
2657 /* If the function uses __builtin_eh_return, the eh_return machinery
2658 occupies the delay slot. */
2659 if (crtl->calls_eh_return)
2660 return 0;
2661
2662 /* In the case of a true leaf function, anything can go into the slot. */
2663 if (sparc_leaf_function_p)
2664 return get_attr_in_uncond_branch_delay (trial)
2665 == IN_UNCOND_BRANCH_DELAY_TRUE;
2666
2667 pat = PATTERN (trial);
2668
2669 /* Otherwise, only operations which can be done in tandem with
2670 a `restore' or `return' insn can go into the delay slot. */
2671 if (GET_CODE (SET_DEST (pat)) != REG
2672 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2673 return 0;
2674
2675 /* If this instruction sets up floating point register and we have a return
2676 instruction, it can probably go in. But restore will not work
2677 with FP_REGS. */
2678 if (REGNO (SET_DEST (pat)) >= 32)
2679 return (TARGET_V9
2680 && ! epilogue_renumber (&pat, 1)
2681 && (get_attr_in_uncond_branch_delay (trial)
2682 == IN_UNCOND_BRANCH_DELAY_TRUE));
2683
2684 return eligible_for_restore_insn (trial, true);
2685 }
2686
2687 /* Return nonzero if TRIAL can go into the sibling call's
2688 delay slot. */
2689
2690 int
2691 eligible_for_sibcall_delay (rtx trial)
2692 {
2693 rtx pat;
2694
2695 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2696 return 0;
2697
2698 if (get_attr_length (trial) != 1)
2699 return 0;
2700
2701 pat = PATTERN (trial);
2702
2703 if (sparc_leaf_function_p)
2704 {
2705 /* If the tail call is done using the call instruction,
2706 we have to restore %o7 in the delay slot. */
2707 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2708 return 0;
2709
2710 /* %g1 is used to build the function address */
2711 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2712 return 0;
2713
2714 return 1;
2715 }
2716
2717 /* Otherwise, only operations which can be done in tandem with
2718 a `restore' insn can go into the delay slot. */
2719 if (GET_CODE (SET_DEST (pat)) != REG
2720 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2721 || REGNO (SET_DEST (pat)) >= 32)
2722 return 0;
2723
2724 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2725 in most cases. */
2726 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2727 return 0;
2728
2729 return eligible_for_restore_insn (trial, false);
2730 }
2731
2732 int
2733 short_branch (int uid1, int uid2)
2734 {
2735 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2736
2737 /* Leave a few words of "slop". */
2738 if (delta >= -1023 && delta <= 1022)
2739 return 1;
2740
2741 return 0;
2742 }
2743
2744 /* Return nonzero if REG is not used after INSN.
2745 We assume REG is a reload reg, and therefore does
2746 not live past labels or calls or jumps. */
2747 int
2748 reg_unused_after (rtx reg, rtx insn)
2749 {
2750 enum rtx_code code, prev_code = UNKNOWN;
2751
2752 while ((insn = NEXT_INSN (insn)))
2753 {
2754 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2755 return 1;
2756
2757 code = GET_CODE (insn);
2758 if (GET_CODE (insn) == CODE_LABEL)
2759 return 1;
2760
2761 if (INSN_P (insn))
2762 {
2763 rtx set = single_set (insn);
2764 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2765 if (set && in_src)
2766 return 0;
2767 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2768 return 1;
2769 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2770 return 0;
2771 }
2772 prev_code = code;
2773 }
2774 return 1;
2775 }
2776 \f
2777 /* Determine if it's legal to put X into the constant pool. This
2778 is not possible if X contains the address of a symbol that is
2779 not constant (TLS) or not known at final link time (PIC). */
2780
2781 static bool
2782 sparc_cannot_force_const_mem (rtx x)
2783 {
2784 switch (GET_CODE (x))
2785 {
2786 case CONST_INT:
2787 case CONST_DOUBLE:
2788 case CONST_VECTOR:
2789 /* Accept all non-symbolic constants. */
2790 return false;
2791
2792 case LABEL_REF:
2793 /* Labels are OK iff we are non-PIC. */
2794 return flag_pic != 0;
2795
2796 case SYMBOL_REF:
2797 /* 'Naked' TLS symbol references are never OK,
2798 non-TLS symbols are OK iff we are non-PIC. */
2799 if (SYMBOL_REF_TLS_MODEL (x))
2800 return true;
2801 else
2802 return flag_pic != 0;
2803
2804 case CONST:
2805 return sparc_cannot_force_const_mem (XEXP (x, 0));
2806 case PLUS:
2807 case MINUS:
2808 return sparc_cannot_force_const_mem (XEXP (x, 0))
2809 || sparc_cannot_force_const_mem (XEXP (x, 1));
2810 case UNSPEC:
2811 return true;
2812 default:
2813 gcc_unreachable ();
2814 }
2815 }
2816 \f
2817 /* PIC support. */
2818 static GTY(()) char pic_helper_symbol_name[256];
2819 static GTY(()) rtx pic_helper_symbol;
2820 static GTY(()) bool pic_helper_emitted_p = false;
2821 static GTY(()) rtx global_offset_table;
2822
2823 /* Ensure that we are not using patterns that are not OK with PIC. */
2824
2825 int
2826 check_pic (int i)
2827 {
2828 switch (flag_pic)
2829 {
2830 case 1:
2831 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2832 && (GET_CODE (recog_data.operand[i]) != CONST
2833 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2834 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2835 == global_offset_table)
2836 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2837 == CONST))));
2838 case 2:
2839 default:
2840 return 1;
2841 }
2842 }
2843
2844 /* Return true if X is an address which needs a temporary register when
2845 reloaded while generating PIC code. */
2846
2847 int
2848 pic_address_needs_scratch (rtx x)
2849 {
2850 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2851 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2852 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2853 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2854 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2855 return 1;
2856
2857 return 0;
2858 }
2859
2860 /* Determine if a given RTX is a valid constant. We already know this
2861 satisfies CONSTANT_P. */
2862
2863 bool
2864 legitimate_constant_p (rtx x)
2865 {
2866 rtx inner;
2867
2868 switch (GET_CODE (x))
2869 {
2870 case SYMBOL_REF:
2871 /* TLS symbols are not constant. */
2872 if (SYMBOL_REF_TLS_MODEL (x))
2873 return false;
2874 break;
2875
2876 case CONST:
2877 inner = XEXP (x, 0);
2878
2879 /* Offsets of TLS symbols are never valid.
2880 Discourage CSE from creating them. */
2881 if (GET_CODE (inner) == PLUS
2882 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2883 return false;
2884 break;
2885
2886 case CONST_DOUBLE:
2887 if (GET_MODE (x) == VOIDmode)
2888 return true;
2889
2890 /* Floating point constants are generally not ok.
2891 The only exception is 0.0 in VIS. */
2892 if (TARGET_VIS
2893 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2894 && const_zero_operand (x, GET_MODE (x)))
2895 return true;
2896
2897 return false;
2898
2899 case CONST_VECTOR:
2900 /* Vector constants are generally not ok.
2901 The only exception is 0 in VIS. */
2902 if (TARGET_VIS
2903 && const_zero_operand (x, GET_MODE (x)))
2904 return true;
2905
2906 return false;
2907
2908 default:
2909 break;
2910 }
2911
2912 return true;
2913 }
2914
2915 /* Determine if a given RTX is a valid constant address. */
2916
2917 bool
2918 constant_address_p (rtx x)
2919 {
2920 switch (GET_CODE (x))
2921 {
2922 case LABEL_REF:
2923 case CONST_INT:
2924 case HIGH:
2925 return true;
2926
2927 case CONST:
2928 if (flag_pic && pic_address_needs_scratch (x))
2929 return false;
2930 return legitimate_constant_p (x);
2931
2932 case SYMBOL_REF:
2933 return !flag_pic && legitimate_constant_p (x);
2934
2935 default:
2936 return false;
2937 }
2938 }
2939
2940 /* Nonzero if the constant value X is a legitimate general operand
2941 when generating PIC code. It is given that flag_pic is on and
2942 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2943
2944 bool
2945 legitimate_pic_operand_p (rtx x)
2946 {
2947 if (pic_address_needs_scratch (x))
2948 return false;
2949 if (SPARC_SYMBOL_REF_TLS_P (x)
2950 || (GET_CODE (x) == CONST
2951 && GET_CODE (XEXP (x, 0)) == PLUS
2952 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2953 return false;
2954 return true;
2955 }
2956
2957 /* Return nonzero if ADDR is a valid memory address.
2958 STRICT specifies whether strict register checking applies. */
2959
2960 int
2961 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2962 {
2963 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2964
2965 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2966 rs1 = addr;
2967 else if (GET_CODE (addr) == PLUS)
2968 {
2969 rs1 = XEXP (addr, 0);
2970 rs2 = XEXP (addr, 1);
2971
2972 /* Canonicalize. REG comes first, if there are no regs,
2973 LO_SUM comes first. */
2974 if (!REG_P (rs1)
2975 && GET_CODE (rs1) != SUBREG
2976 && (REG_P (rs2)
2977 || GET_CODE (rs2) == SUBREG
2978 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2979 {
2980 rs1 = XEXP (addr, 1);
2981 rs2 = XEXP (addr, 0);
2982 }
2983
2984 if ((flag_pic == 1
2985 && rs1 == pic_offset_table_rtx
2986 && !REG_P (rs2)
2987 && GET_CODE (rs2) != SUBREG
2988 && GET_CODE (rs2) != LO_SUM
2989 && GET_CODE (rs2) != MEM
2990 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2991 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2992 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2993 || ((REG_P (rs1)
2994 || GET_CODE (rs1) == SUBREG)
2995 && RTX_OK_FOR_OFFSET_P (rs2)))
2996 {
2997 imm1 = rs2;
2998 rs2 = NULL;
2999 }
3000 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3001 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3002 {
3003 /* We prohibit REG + REG for TFmode when there are no quad move insns
3004 and we consequently need to split. We do this because REG+REG
3005 is not an offsettable address. If we get the situation in reload
3006 where source and destination of a movtf pattern are both MEMs with
3007 REG+REG address, then only one of them gets converted to an
3008 offsettable address. */
3009 if (mode == TFmode
3010 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3011 return 0;
3012
3013 /* We prohibit REG + REG on ARCH32 if not optimizing for
3014 DFmode/DImode because then mem_min_alignment is likely to be zero
3015 after reload and the forced split would lack a matching splitter
3016 pattern. */
3017 if (TARGET_ARCH32 && !optimize
3018 && (mode == DFmode || mode == DImode))
3019 return 0;
3020 }
3021 else if (USE_AS_OFFSETABLE_LO10
3022 && GET_CODE (rs1) == LO_SUM
3023 && TARGET_ARCH64
3024 && ! TARGET_CM_MEDMID
3025 && RTX_OK_FOR_OLO10_P (rs2))
3026 {
3027 rs2 = NULL;
3028 imm1 = XEXP (rs1, 1);
3029 rs1 = XEXP (rs1, 0);
3030 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3031 return 0;
3032 }
3033 }
3034 else if (GET_CODE (addr) == LO_SUM)
3035 {
3036 rs1 = XEXP (addr, 0);
3037 imm1 = XEXP (addr, 1);
3038
3039 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3040 return 0;
3041
3042 /* We can't allow TFmode in 32-bit mode, because an offset greater
3043 than the alignment (8) may cause the LO_SUM to overflow. */
3044 if (mode == TFmode && TARGET_ARCH32)
3045 return 0;
3046 }
3047 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3048 return 1;
3049 else
3050 return 0;
3051
3052 if (GET_CODE (rs1) == SUBREG)
3053 rs1 = SUBREG_REG (rs1);
3054 if (!REG_P (rs1))
3055 return 0;
3056
3057 if (rs2)
3058 {
3059 if (GET_CODE (rs2) == SUBREG)
3060 rs2 = SUBREG_REG (rs2);
3061 if (!REG_P (rs2))
3062 return 0;
3063 }
3064
3065 if (strict)
3066 {
3067 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3068 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3069 return 0;
3070 }
3071 else
3072 {
3073 if ((REGNO (rs1) >= 32
3074 && REGNO (rs1) != FRAME_POINTER_REGNUM
3075 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3076 || (rs2
3077 && (REGNO (rs2) >= 32
3078 && REGNO (rs2) != FRAME_POINTER_REGNUM
3079 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3080 return 0;
3081 }
3082 return 1;
3083 }
3084
3085 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3086
3087 static GTY(()) rtx sparc_tls_symbol;
3088
3089 static rtx
3090 sparc_tls_get_addr (void)
3091 {
3092 if (!sparc_tls_symbol)
3093 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3094
3095 return sparc_tls_symbol;
3096 }
3097
3098 static rtx
3099 sparc_tls_got (void)
3100 {
3101 rtx temp;
3102 if (flag_pic)
3103 {
3104 crtl->uses_pic_offset_table = 1;
3105 return pic_offset_table_rtx;
3106 }
3107
3108 if (!global_offset_table)
3109 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3110 temp = gen_reg_rtx (Pmode);
3111 emit_move_insn (temp, global_offset_table);
3112 return temp;
3113 }
3114
3115 /* Return 1 if *X is a thread-local symbol. */
3116
3117 static int
3118 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3119 {
3120 return SPARC_SYMBOL_REF_TLS_P (*x);
3121 }
3122
3123 /* Return 1 if X contains a thread-local symbol. */
3124
3125 bool
3126 sparc_tls_referenced_p (rtx x)
3127 {
3128 if (!TARGET_HAVE_TLS)
3129 return false;
3130
3131 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3132 }
3133
3134 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3135 this (thread-local) address. */
3136
3137 rtx
3138 legitimize_tls_address (rtx addr)
3139 {
3140 rtx temp1, temp2, temp3, ret, o0, got, insn;
3141
3142 gcc_assert (can_create_pseudo_p ());
3143
3144 if (GET_CODE (addr) == SYMBOL_REF)
3145 switch (SYMBOL_REF_TLS_MODEL (addr))
3146 {
3147 case TLS_MODEL_GLOBAL_DYNAMIC:
3148 start_sequence ();
3149 temp1 = gen_reg_rtx (SImode);
3150 temp2 = gen_reg_rtx (SImode);
3151 ret = gen_reg_rtx (Pmode);
3152 o0 = gen_rtx_REG (Pmode, 8);
3153 got = sparc_tls_got ();
3154 emit_insn (gen_tgd_hi22 (temp1, addr));
3155 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3156 if (TARGET_ARCH32)
3157 {
3158 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3159 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3160 addr, const1_rtx));
3161 }
3162 else
3163 {
3164 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3165 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3166 addr, const1_rtx));
3167 }
3168 CALL_INSN_FUNCTION_USAGE (insn)
3169 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3170 CALL_INSN_FUNCTION_USAGE (insn));
3171 insn = get_insns ();
3172 end_sequence ();
3173 emit_libcall_block (insn, ret, o0, addr);
3174 break;
3175
3176 case TLS_MODEL_LOCAL_DYNAMIC:
3177 start_sequence ();
3178 temp1 = gen_reg_rtx (SImode);
3179 temp2 = gen_reg_rtx (SImode);
3180 temp3 = gen_reg_rtx (Pmode);
3181 ret = gen_reg_rtx (Pmode);
3182 o0 = gen_rtx_REG (Pmode, 8);
3183 got = sparc_tls_got ();
3184 emit_insn (gen_tldm_hi22 (temp1));
3185 emit_insn (gen_tldm_lo10 (temp2, temp1));
3186 if (TARGET_ARCH32)
3187 {
3188 emit_insn (gen_tldm_add32 (o0, got, temp2));
3189 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3190 const1_rtx));
3191 }
3192 else
3193 {
3194 emit_insn (gen_tldm_add64 (o0, got, temp2));
3195 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3196 const1_rtx));
3197 }
3198 CALL_INSN_FUNCTION_USAGE (insn)
3199 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3200 CALL_INSN_FUNCTION_USAGE (insn));
3201 insn = get_insns ();
3202 end_sequence ();
3203 emit_libcall_block (insn, temp3, o0,
3204 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3205 UNSPEC_TLSLD_BASE));
3206 temp1 = gen_reg_rtx (SImode);
3207 temp2 = gen_reg_rtx (SImode);
3208 emit_insn (gen_tldo_hix22 (temp1, addr));
3209 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3210 if (TARGET_ARCH32)
3211 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3212 else
3213 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3214 break;
3215
3216 case TLS_MODEL_INITIAL_EXEC:
3217 temp1 = gen_reg_rtx (SImode);
3218 temp2 = gen_reg_rtx (SImode);
3219 temp3 = gen_reg_rtx (Pmode);
3220 got = sparc_tls_got ();
3221 emit_insn (gen_tie_hi22 (temp1, addr));
3222 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3223 if (TARGET_ARCH32)
3224 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3225 else
3226 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3227 if (TARGET_SUN_TLS)
3228 {
3229 ret = gen_reg_rtx (Pmode);
3230 if (TARGET_ARCH32)
3231 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3232 temp3, addr));
3233 else
3234 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3235 temp3, addr));
3236 }
3237 else
3238 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3239 break;
3240
3241 case TLS_MODEL_LOCAL_EXEC:
3242 temp1 = gen_reg_rtx (Pmode);
3243 temp2 = gen_reg_rtx (Pmode);
3244 if (TARGET_ARCH32)
3245 {
3246 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3247 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3248 }
3249 else
3250 {
3251 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3252 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3253 }
3254 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3255 break;
3256
3257 default:
3258 gcc_unreachable ();
3259 }
3260
3261 else
3262 gcc_unreachable (); /* for now ... */
3263
3264 return ret;
3265 }
3266
3267
3268 /* Legitimize PIC addresses. If the address is already position-independent,
3269 we return ORIG. Newly generated position-independent addresses go into a
3270 reg. This is REG if nonzero, otherwise we allocate register(s) as
3271 necessary. */
3272
3273 rtx
3274 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3275 rtx reg)
3276 {
3277 if (GET_CODE (orig) == SYMBOL_REF
3278 /* See the comment in sparc_expand_move. */
3279 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3280 {
3281 rtx pic_ref, address;
3282 rtx insn;
3283
3284 if (reg == 0)
3285 {
3286 gcc_assert (! reload_in_progress && ! reload_completed);
3287 reg = gen_reg_rtx (Pmode);
3288 }
3289
3290 if (flag_pic == 2)
3291 {
3292 /* If not during reload, allocate another temp reg here for loading
3293 in the address, so that these instructions can be optimized
3294 properly. */
3295 rtx temp_reg = ((reload_in_progress || reload_completed)
3296 ? reg : gen_reg_rtx (Pmode));
3297
3298 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3299 won't get confused into thinking that these two instructions
3300 are loading in the true address of the symbol. If in the
3301 future a PIC rtx exists, that should be used instead. */
3302 if (TARGET_ARCH64)
3303 {
3304 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3305 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3306 }
3307 else
3308 {
3309 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3310 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3311 }
3312 address = temp_reg;
3313 }
3314 else
3315 address = orig;
3316
3317 pic_ref = gen_const_mem (Pmode,
3318 gen_rtx_PLUS (Pmode,
3319 pic_offset_table_rtx, address));
3320 crtl->uses_pic_offset_table = 1;
3321 insn = emit_move_insn (reg, pic_ref);
3322 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3323 by loop. */
3324 set_unique_reg_note (insn, REG_EQUAL, orig);
3325 return reg;
3326 }
3327 else if (GET_CODE (orig) == CONST)
3328 {
3329 rtx base, offset;
3330
3331 if (GET_CODE (XEXP (orig, 0)) == PLUS
3332 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3333 return orig;
3334
3335 if (reg == 0)
3336 {
3337 gcc_assert (! reload_in_progress && ! reload_completed);
3338 reg = gen_reg_rtx (Pmode);
3339 }
3340
3341 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3342 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3343 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3344 base == reg ? 0 : reg);
3345
3346 if (GET_CODE (offset) == CONST_INT)
3347 {
3348 if (SMALL_INT (offset))
3349 return plus_constant (base, INTVAL (offset));
3350 else if (! reload_in_progress && ! reload_completed)
3351 offset = force_reg (Pmode, offset);
3352 else
3353 /* If we reach here, then something is seriously wrong. */
3354 gcc_unreachable ();
3355 }
3356 return gen_rtx_PLUS (Pmode, base, offset);
3357 }
3358 else if (GET_CODE (orig) == LABEL_REF)
3359 /* ??? Why do we do this? */
3360 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3361 the register is live instead, in case it is eliminated. */
3362 crtl->uses_pic_offset_table = 1;
3363
3364 return orig;
3365 }
3366
3367 /* Try machine-dependent ways of modifying an illegitimate address X
3368 to be legitimate. If we find one, return the new, valid address.
3369
3370 OLDX is the address as it was before break_out_memory_refs was called.
3371 In some cases it is useful to look at this to decide what needs to be done.
3372
3373 MODE is the mode of the operand pointed to by X. */
3374
3375 rtx
3376 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3377 {
3378 rtx orig_x = x;
3379
3380 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3381 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3382 force_operand (XEXP (x, 0), NULL_RTX));
3383 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3384 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3385 force_operand (XEXP (x, 1), NULL_RTX));
3386 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3387 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3388 XEXP (x, 1));
3389 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3390 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3391 force_operand (XEXP (x, 1), NULL_RTX));
3392
3393 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3394 return x;
3395
3396 if (SPARC_SYMBOL_REF_TLS_P (x))
3397 x = legitimize_tls_address (x);
3398 else if (flag_pic)
3399 x = legitimize_pic_address (x, mode, 0);
3400 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3401 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3402 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3403 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3404 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3405 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3406 else if (GET_CODE (x) == SYMBOL_REF
3407 || GET_CODE (x) == CONST
3408 || GET_CODE (x) == LABEL_REF)
3409 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3410 return x;
3411 }
3412
3413 /* Emit the special PIC helper function. */
3414
3415 static void
3416 emit_pic_helper (void)
3417 {
3418 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3419 int align;
3420
3421 switch_to_section (text_section);
3422
3423 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3424 if (align > 0)
3425 ASM_OUTPUT_ALIGN (asm_out_file, align);
3426 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3427 if (flag_delayed_branch)
3428 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3429 pic_name, pic_name);
3430 else
3431 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3432 pic_name, pic_name);
3433
3434 pic_helper_emitted_p = true;
3435 }
3436
3437 /* Emit code to load the PIC register. */
3438
3439 static void
3440 load_pic_register (bool delay_pic_helper)
3441 {
3442 int orig_flag_pic = flag_pic;
3443
3444 if (TARGET_VXWORKS_RTP)
3445 {
3446 emit_insn (gen_vxworks_load_got ());
3447 emit_use (pic_offset_table_rtx);
3448 return;
3449 }
3450
3451 /* If we haven't initialized the special PIC symbols, do so now. */
3452 if (!pic_helper_symbol_name[0])
3453 {
3454 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3455 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3456 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3457 }
3458
3459 /* If we haven't emitted the special PIC helper function, do so now unless
3460 we are requested to delay it. */
3461 if (!delay_pic_helper && !pic_helper_emitted_p)
3462 emit_pic_helper ();
3463
3464 flag_pic = 0;
3465 if (TARGET_ARCH64)
3466 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3467 pic_helper_symbol));
3468 else
3469 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3470 pic_helper_symbol));
3471 flag_pic = orig_flag_pic;
3472
3473 /* Need to emit this whether or not we obey regdecls,
3474 since setjmp/longjmp can cause life info to screw up.
3475 ??? In the case where we don't obey regdecls, this is not sufficient
3476 since we may not fall out the bottom. */
3477 emit_use (pic_offset_table_rtx);
3478 }
3479
3480 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3481 address of the call target. */
3482
3483 void
3484 sparc_emit_call_insn (rtx pat, rtx addr)
3485 {
3486 rtx insn;
3487
3488 insn = emit_call_insn (pat);
3489
3490 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3491 if (TARGET_VXWORKS_RTP
3492 && flag_pic
3493 && GET_CODE (addr) == SYMBOL_REF
3494 && (SYMBOL_REF_DECL (addr)
3495 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3496 : !SYMBOL_REF_LOCAL_P (addr)))
3497 {
3498 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3499 crtl->uses_pic_offset_table = 1;
3500 }
3501 }
3502 \f
3503 /* Return 1 if RTX is a MEM which is known to be aligned to at
3504 least a DESIRED byte boundary. */
3505
3506 int
3507 mem_min_alignment (rtx mem, int desired)
3508 {
3509 rtx addr, base, offset;
3510
3511 /* If it's not a MEM we can't accept it. */
3512 if (GET_CODE (mem) != MEM)
3513 return 0;
3514
3515 /* Obviously... */
3516 if (!TARGET_UNALIGNED_DOUBLES
3517 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3518 return 1;
3519
3520 /* ??? The rest of the function predates MEM_ALIGN so
3521 there is probably a bit of redundancy. */
3522 addr = XEXP (mem, 0);
3523 base = offset = NULL_RTX;
3524 if (GET_CODE (addr) == PLUS)
3525 {
3526 if (GET_CODE (XEXP (addr, 0)) == REG)
3527 {
3528 base = XEXP (addr, 0);
3529
3530 /* What we are saying here is that if the base
3531 REG is aligned properly, the compiler will make
3532 sure any REG based index upon it will be so
3533 as well. */
3534 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3535 offset = XEXP (addr, 1);
3536 else
3537 offset = const0_rtx;
3538 }
3539 }
3540 else if (GET_CODE (addr) == REG)
3541 {
3542 base = addr;
3543 offset = const0_rtx;
3544 }
3545
3546 if (base != NULL_RTX)
3547 {
3548 int regno = REGNO (base);
3549
3550 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3551 {
3552 /* Check if the compiler has recorded some information
3553 about the alignment of the base REG. If reload has
3554 completed, we already matched with proper alignments.
3555 If not running global_alloc, reload might give us
3556 unaligned pointer to local stack though. */
3557 if (((cfun != 0
3558 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3559 || (optimize && reload_completed))
3560 && (INTVAL (offset) & (desired - 1)) == 0)
3561 return 1;
3562 }
3563 else
3564 {
3565 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3566 return 1;
3567 }
3568 }
3569 else if (! TARGET_UNALIGNED_DOUBLES
3570 || CONSTANT_P (addr)
3571 || GET_CODE (addr) == LO_SUM)
3572 {
3573 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3574 is true, in which case we can only assume that an access is aligned if
3575 it is to a constant address, or the address involves a LO_SUM. */
3576 return 1;
3577 }
3578
3579 /* An obviously unaligned address. */
3580 return 0;
3581 }
3582
3583 \f
3584 /* Vectors to keep interesting information about registers where it can easily
3585 be got. We used to use the actual mode value as the bit number, but there
3586 are more than 32 modes now. Instead we use two tables: one indexed by
3587 hard register number, and one indexed by mode. */
3588
3589 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3590 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3591 mapped into one sparc_mode_class mode. */
3592
3593 enum sparc_mode_class {
3594 S_MODE, D_MODE, T_MODE, O_MODE,
3595 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3596 CC_MODE, CCFP_MODE
3597 };
3598
3599 /* Modes for single-word and smaller quantities. */
3600 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3601
3602 /* Modes for double-word and smaller quantities. */
3603 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3604
3605 /* Modes for quad-word and smaller quantities. */
3606 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3607
3608 /* Modes for 8-word and smaller quantities. */
3609 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3610
3611 /* Modes for single-float quantities. We must allow any single word or
3612 smaller quantity. This is because the fix/float conversion instructions
3613 take integer inputs/outputs from the float registers. */
3614 #define SF_MODES (S_MODES)
3615
3616 /* Modes for double-float and smaller quantities. */
3617 #define DF_MODES (S_MODES | D_MODES)
3618
3619 /* Modes for double-float only quantities. */
3620 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3621
3622 /* Modes for quad-float only quantities. */
3623 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3624
3625 /* Modes for quad-float and smaller quantities. */
3626 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3627
3628 /* Modes for quad-float and double-float quantities. */
3629 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3630
3631 /* Modes for quad-float pair only quantities. */
3632 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3633
3634 /* Modes for quad-float pairs and smaller quantities. */
3635 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3636
3637 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3638
3639 /* Modes for condition codes. */
3640 #define CC_MODES (1 << (int) CC_MODE)
3641 #define CCFP_MODES (1 << (int) CCFP_MODE)
3642
3643 /* Value is 1 if register/mode pair is acceptable on sparc.
3644 The funny mixture of D and T modes is because integer operations
3645 do not specially operate on tetra quantities, so non-quad-aligned
3646 registers can hold quadword quantities (except %o4 and %i4 because
3647 they cross fixed registers). */
3648
3649 /* This points to either the 32 bit or the 64 bit version. */
3650 const int *hard_regno_mode_classes;
3651
3652 static const int hard_32bit_mode_classes[] = {
3653 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3654 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3655 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3656 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3657
3658 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3659 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3660 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3661 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3662
3663 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3664 and none can hold SFmode/SImode values. */
3665 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3666 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3667 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3668 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3669
3670 /* %fcc[0123] */
3671 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3672
3673 /* %icc */
3674 CC_MODES
3675 };
3676
3677 static const int hard_64bit_mode_classes[] = {
3678 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3679 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3680 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3681 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3682
3683 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3684 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3685 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3686 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3687
3688 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3689 and none can hold SFmode/SImode values. */
3690 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3691 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3692 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3693 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3694
3695 /* %fcc[0123] */
3696 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3697
3698 /* %icc */
3699 CC_MODES
3700 };
3701
3702 int sparc_mode_class [NUM_MACHINE_MODES];
3703
3704 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3705
3706 static void
3707 sparc_init_modes (void)
3708 {
3709 int i;
3710
3711 for (i = 0; i < NUM_MACHINE_MODES; i++)
3712 {
3713 switch (GET_MODE_CLASS (i))
3714 {
3715 case MODE_INT:
3716 case MODE_PARTIAL_INT:
3717 case MODE_COMPLEX_INT:
3718 if (GET_MODE_SIZE (i) <= 4)
3719 sparc_mode_class[i] = 1 << (int) S_MODE;
3720 else if (GET_MODE_SIZE (i) == 8)
3721 sparc_mode_class[i] = 1 << (int) D_MODE;
3722 else if (GET_MODE_SIZE (i) == 16)
3723 sparc_mode_class[i] = 1 << (int) T_MODE;
3724 else if (GET_MODE_SIZE (i) == 32)
3725 sparc_mode_class[i] = 1 << (int) O_MODE;
3726 else
3727 sparc_mode_class[i] = 0;
3728 break;
3729 case MODE_VECTOR_INT:
3730 if (GET_MODE_SIZE (i) <= 4)
3731 sparc_mode_class[i] = 1 << (int)SF_MODE;
3732 else if (GET_MODE_SIZE (i) == 8)
3733 sparc_mode_class[i] = 1 << (int)DF_MODE;
3734 break;
3735 case MODE_FLOAT:
3736 case MODE_COMPLEX_FLOAT:
3737 if (GET_MODE_SIZE (i) <= 4)
3738 sparc_mode_class[i] = 1 << (int) SF_MODE;
3739 else if (GET_MODE_SIZE (i) == 8)
3740 sparc_mode_class[i] = 1 << (int) DF_MODE;
3741 else if (GET_MODE_SIZE (i) == 16)
3742 sparc_mode_class[i] = 1 << (int) TF_MODE;
3743 else if (GET_MODE_SIZE (i) == 32)
3744 sparc_mode_class[i] = 1 << (int) OF_MODE;
3745 else
3746 sparc_mode_class[i] = 0;
3747 break;
3748 case MODE_CC:
3749 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3750 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3751 else
3752 sparc_mode_class[i] = 1 << (int) CC_MODE;
3753 break;
3754 default:
3755 sparc_mode_class[i] = 0;
3756 break;
3757 }
3758 }
3759
3760 if (TARGET_ARCH64)
3761 hard_regno_mode_classes = hard_64bit_mode_classes;
3762 else
3763 hard_regno_mode_classes = hard_32bit_mode_classes;
3764
3765 /* Initialize the array used by REGNO_REG_CLASS. */
3766 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3767 {
3768 if (i < 16 && TARGET_V8PLUS)
3769 sparc_regno_reg_class[i] = I64_REGS;
3770 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3771 sparc_regno_reg_class[i] = GENERAL_REGS;
3772 else if (i < 64)
3773 sparc_regno_reg_class[i] = FP_REGS;
3774 else if (i < 96)
3775 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3776 else if (i < 100)
3777 sparc_regno_reg_class[i] = FPCC_REGS;
3778 else
3779 sparc_regno_reg_class[i] = NO_REGS;
3780 }
3781 }
3782 \f
3783 /* Compute the frame size required by the function. This function is called
3784 during the reload pass and also by sparc_expand_prologue. */
3785
3786 HOST_WIDE_INT
3787 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3788 {
3789 int outgoing_args_size = (crtl->outgoing_args_size
3790 + REG_PARM_STACK_SPACE (current_function_decl));
3791 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3792 int i;
3793
3794 if (TARGET_ARCH64)
3795 {
3796 for (i = 0; i < 8; i++)
3797 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3798 n_regs += 2;
3799 }
3800 else
3801 {
3802 for (i = 0; i < 8; i += 2)
3803 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3804 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3805 n_regs += 2;
3806 }
3807
3808 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3809 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3810 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3811 n_regs += 2;
3812
3813 /* Set up values for use in prologue and epilogue. */
3814 num_gfregs = n_regs;
3815
3816 if (leaf_function_p
3817 && n_regs == 0
3818 && size == 0
3819 && crtl->outgoing_args_size == 0)
3820 actual_fsize = apparent_fsize = 0;
3821 else
3822 {
3823 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3824 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3825 apparent_fsize += n_regs * 4;
3826 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3827 }
3828
3829 /* Make sure nothing can clobber our register windows.
3830 If a SAVE must be done, or there is a stack-local variable,
3831 the register window area must be allocated. */
3832 if (! leaf_function_p || size > 0)
3833 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3834
3835 return SPARC_STACK_ALIGN (actual_fsize);
3836 }
3837
3838 /* Output any necessary .register pseudo-ops. */
3839
3840 void
3841 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3842 {
3843 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3844 int i;
3845
3846 if (TARGET_ARCH32)
3847 return;
3848
3849 /* Check if %g[2367] were used without
3850 .register being printed for them already. */
3851 for (i = 2; i < 8; i++)
3852 {
3853 if (df_regs_ever_live_p (i)
3854 && ! sparc_hard_reg_printed [i])
3855 {
3856 sparc_hard_reg_printed [i] = 1;
3857 /* %g7 is used as TLS base register, use #ignore
3858 for it instead of #scratch. */
3859 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3860 i == 7 ? "ignore" : "scratch");
3861 }
3862 if (i == 3) i = 5;
3863 }
3864 #endif
3865 }
3866
3867 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3868 as needed. LOW should be double-word aligned for 32-bit registers.
3869 Return the new OFFSET. */
3870
3871 #define SORR_SAVE 0
3872 #define SORR_RESTORE 1
3873
3874 static int
3875 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3876 {
3877 rtx mem, insn;
3878 int i;
3879
3880 if (TARGET_ARCH64 && high <= 32)
3881 {
3882 for (i = low; i < high; i++)
3883 {
3884 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3885 {
3886 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3887 set_mem_alias_set (mem, sparc_sr_alias_set);
3888 if (action == SORR_SAVE)
3889 {
3890 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3891 RTX_FRAME_RELATED_P (insn) = 1;
3892 }
3893 else /* action == SORR_RESTORE */
3894 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3895 offset += 8;
3896 }
3897 }
3898 }
3899 else
3900 {
3901 for (i = low; i < high; i += 2)
3902 {
3903 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
3904 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
3905 enum machine_mode mode;
3906 int regno;
3907
3908 if (reg0 && reg1)
3909 {
3910 mode = i < 32 ? DImode : DFmode;
3911 regno = i;
3912 }
3913 else if (reg0)
3914 {
3915 mode = i < 32 ? SImode : SFmode;
3916 regno = i;
3917 }
3918 else if (reg1)
3919 {
3920 mode = i < 32 ? SImode : SFmode;
3921 regno = i + 1;
3922 offset += 4;
3923 }
3924 else
3925 continue;
3926
3927 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3928 set_mem_alias_set (mem, sparc_sr_alias_set);
3929 if (action == SORR_SAVE)
3930 {
3931 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3932 RTX_FRAME_RELATED_P (insn) = 1;
3933 }
3934 else /* action == SORR_RESTORE */
3935 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3936
3937 /* Always preserve double-word alignment. */
3938 offset = (offset + 7) & -8;
3939 }
3940 }
3941
3942 return offset;
3943 }
3944
3945 /* Emit code to save call-saved registers. */
3946
3947 static void
3948 emit_save_or_restore_regs (int action)
3949 {
3950 HOST_WIDE_INT offset;
3951 rtx base;
3952
3953 offset = frame_base_offset - apparent_fsize;
3954
3955 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3956 {
3957 /* ??? This might be optimized a little as %g1 might already have a
3958 value close enough that a single add insn will do. */
3959 /* ??? Although, all of this is probably only a temporary fix
3960 because if %g1 can hold a function result, then
3961 sparc_expand_epilogue will lose (the result will be
3962 clobbered). */
3963 base = gen_rtx_REG (Pmode, 1);
3964 emit_move_insn (base, GEN_INT (offset));
3965 emit_insn (gen_rtx_SET (VOIDmode,
3966 base,
3967 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3968 offset = 0;
3969 }
3970 else
3971 base = frame_base_reg;
3972
3973 offset = save_or_restore_regs (0, 8, base, offset, action);
3974 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3975 }
3976
3977 /* Generate a save_register_window insn. */
3978
3979 static rtx
3980 gen_save_register_window (rtx increment)
3981 {
3982 if (TARGET_ARCH64)
3983 return gen_save_register_windowdi (increment);
3984 else
3985 return gen_save_register_windowsi (increment);
3986 }
3987
3988 /* Generate an increment for the stack pointer. */
3989
3990 static rtx
3991 gen_stack_pointer_inc (rtx increment)
3992 {
3993 return gen_rtx_SET (VOIDmode,
3994 stack_pointer_rtx,
3995 gen_rtx_PLUS (Pmode,
3996 stack_pointer_rtx,
3997 increment));
3998 }
3999
4000 /* Generate a decrement for the stack pointer. */
4001
4002 static rtx
4003 gen_stack_pointer_dec (rtx decrement)
4004 {
4005 return gen_rtx_SET (VOIDmode,
4006 stack_pointer_rtx,
4007 gen_rtx_MINUS (Pmode,
4008 stack_pointer_rtx,
4009 decrement));
4010 }
4011
4012 /* Expand the function prologue. The prologue is responsible for reserving
4013 storage for the frame, saving the call-saved registers and loading the
4014 PIC register if needed. */
4015
4016 void
4017 sparc_expand_prologue (void)
4018 {
4019 rtx insn;
4020 int i;
4021
4022 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4023 on the final value of the flag means deferring the prologue/epilogue
4024 expansion until just before the second scheduling pass, which is too
4025 late to emit multiple epilogues or return insns.
4026
4027 Of course we are making the assumption that the value of the flag
4028 will not change between now and its final value. Of the three parts
4029 of the formula, only the last one can reasonably vary. Let's take a
4030 closer look, after assuming that the first two ones are set to true
4031 (otherwise the last value is effectively silenced).
4032
4033 If only_leaf_regs_used returns false, the global predicate will also
4034 be false so the actual frame size calculated below will be positive.
4035 As a consequence, the save_register_window insn will be emitted in
4036 the instruction stream; now this insn explicitly references %fp
4037 which is not a leaf register so only_leaf_regs_used will always
4038 return false subsequently.
4039
4040 If only_leaf_regs_used returns true, we hope that the subsequent
4041 optimization passes won't cause non-leaf registers to pop up. For
4042 example, the regrename pass has special provisions to not rename to
4043 non-leaf registers in a leaf function. */
4044 sparc_leaf_function_p
4045 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4046
4047 /* Need to use actual_fsize, since we are also allocating
4048 space for our callee (and our own register save area). */
4049 actual_fsize
4050 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4051
4052 /* Advertise that the data calculated just above are now valid. */
4053 sparc_prologue_data_valid_p = true;
4054
4055 if (sparc_leaf_function_p)
4056 {
4057 frame_base_reg = stack_pointer_rtx;
4058 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4059 }
4060 else
4061 {
4062 frame_base_reg = hard_frame_pointer_rtx;
4063 frame_base_offset = SPARC_STACK_BIAS;
4064 }
4065
4066 if (actual_fsize == 0)
4067 /* do nothing. */ ;
4068 else if (sparc_leaf_function_p)
4069 {
4070 if (actual_fsize <= 4096)
4071 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4072 else if (actual_fsize <= 8192)
4073 {
4074 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4075 /* %sp is still the CFA register. */
4076 RTX_FRAME_RELATED_P (insn) = 1;
4077 insn
4078 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4079 }
4080 else
4081 {
4082 rtx reg = gen_rtx_REG (Pmode, 1);
4083 emit_move_insn (reg, GEN_INT (-actual_fsize));
4084 insn = emit_insn (gen_stack_pointer_inc (reg));
4085 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4086 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4087 }
4088
4089 RTX_FRAME_RELATED_P (insn) = 1;
4090 }
4091 else
4092 {
4093 if (actual_fsize <= 4096)
4094 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4095 else if (actual_fsize <= 8192)
4096 {
4097 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4098 /* %sp is not the CFA register anymore. */
4099 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4100 }
4101 else
4102 {
4103 rtx reg = gen_rtx_REG (Pmode, 1);
4104 emit_move_insn (reg, GEN_INT (-actual_fsize));
4105 insn = emit_insn (gen_save_register_window (reg));
4106 }
4107
4108 RTX_FRAME_RELATED_P (insn) = 1;
4109 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4110 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4111 }
4112
4113 if (num_gfregs)
4114 emit_save_or_restore_regs (SORR_SAVE);
4115
4116 /* Load the PIC register if needed. */
4117 if (flag_pic && crtl->uses_pic_offset_table)
4118 load_pic_register (false);
4119 }
4120
4121 /* This function generates the assembly code for function entry, which boils
4122 down to emitting the necessary .register directives. */
4123
4124 static void
4125 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4126 {
4127 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4128 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4129
4130 sparc_output_scratch_registers (file);
4131 }
4132
4133 /* Expand the function epilogue, either normal or part of a sibcall.
4134 We emit all the instructions except the return or the call. */
4135
4136 void
4137 sparc_expand_epilogue (void)
4138 {
4139 if (num_gfregs)
4140 emit_save_or_restore_regs (SORR_RESTORE);
4141
4142 if (actual_fsize == 0)
4143 /* do nothing. */ ;
4144 else if (sparc_leaf_function_p)
4145 {
4146 if (actual_fsize <= 4096)
4147 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4148 else if (actual_fsize <= 8192)
4149 {
4150 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4151 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4152 }
4153 else
4154 {
4155 rtx reg = gen_rtx_REG (Pmode, 1);
4156 emit_move_insn (reg, GEN_INT (-actual_fsize));
4157 emit_insn (gen_stack_pointer_dec (reg));
4158 }
4159 }
4160 }
4161
4162 /* Return true if it is appropriate to emit `return' instructions in the
4163 body of a function. */
4164
4165 bool
4166 sparc_can_use_return_insn_p (void)
4167 {
4168 return sparc_prologue_data_valid_p
4169 && (actual_fsize == 0 || !sparc_leaf_function_p);
4170 }
4171
4172 /* This function generates the assembly code for function exit. */
4173
4174 static void
4175 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4176 {
4177 /* If code does not drop into the epilogue, we have to still output
4178 a dummy nop for the sake of sane backtraces. Otherwise, if the
4179 last two instructions of a function were "call foo; dslot;" this
4180 can make the return PC of foo (i.e. address of call instruction
4181 plus 8) point to the first instruction in the next function. */
4182
4183 rtx insn, last_real_insn;
4184
4185 insn = get_last_insn ();
4186
4187 last_real_insn = prev_real_insn (insn);
4188 if (last_real_insn
4189 && GET_CODE (last_real_insn) == INSN
4190 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4191 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4192
4193 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4194 fputs("\tnop\n", file);
4195
4196 sparc_output_deferred_case_vectors ();
4197 }
4198
4199 /* Output a 'restore' instruction. */
4200
4201 static void
4202 output_restore (rtx pat)
4203 {
4204 rtx operands[3];
4205
4206 if (! pat)
4207 {
4208 fputs ("\t restore\n", asm_out_file);
4209 return;
4210 }
4211
4212 gcc_assert (GET_CODE (pat) == SET);
4213
4214 operands[0] = SET_DEST (pat);
4215 pat = SET_SRC (pat);
4216
4217 switch (GET_CODE (pat))
4218 {
4219 case PLUS:
4220 operands[1] = XEXP (pat, 0);
4221 operands[2] = XEXP (pat, 1);
4222 output_asm_insn (" restore %r1, %2, %Y0", operands);
4223 break;
4224 case LO_SUM:
4225 operands[1] = XEXP (pat, 0);
4226 operands[2] = XEXP (pat, 1);
4227 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4228 break;
4229 case ASHIFT:
4230 operands[1] = XEXP (pat, 0);
4231 gcc_assert (XEXP (pat, 1) == const1_rtx);
4232 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4233 break;
4234 default:
4235 operands[1] = pat;
4236 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4237 break;
4238 }
4239 }
4240
4241 /* Output a return. */
4242
4243 const char *
4244 output_return (rtx insn)
4245 {
4246 if (sparc_leaf_function_p)
4247 {
4248 /* This is a leaf function so we don't have to bother restoring the
4249 register window, which frees us from dealing with the convoluted
4250 semantics of restore/return. We simply output the jump to the
4251 return address and the insn in the delay slot (if any). */
4252
4253 gcc_assert (! crtl->calls_eh_return);
4254
4255 return "jmp\t%%o7+%)%#";
4256 }
4257 else
4258 {
4259 /* This is a regular function so we have to restore the register window.
4260 We may have a pending insn for the delay slot, which will be either
4261 combined with the 'restore' instruction or put in the delay slot of
4262 the 'return' instruction. */
4263
4264 if (crtl->calls_eh_return)
4265 {
4266 /* If the function uses __builtin_eh_return, the eh_return
4267 machinery occupies the delay slot. */
4268 gcc_assert (! final_sequence);
4269
4270 if (! flag_delayed_branch)
4271 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4272
4273 if (TARGET_V9)
4274 fputs ("\treturn\t%i7+8\n", asm_out_file);
4275 else
4276 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4277
4278 if (flag_delayed_branch)
4279 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4280 else
4281 fputs ("\t nop\n", asm_out_file);
4282 }
4283 else if (final_sequence)
4284 {
4285 rtx delay, pat;
4286
4287 delay = NEXT_INSN (insn);
4288 gcc_assert (delay);
4289
4290 pat = PATTERN (delay);
4291
4292 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4293 {
4294 epilogue_renumber (&pat, 0);
4295 return "return\t%%i7+%)%#";
4296 }
4297 else
4298 {
4299 output_asm_insn ("jmp\t%%i7+%)", NULL);
4300 output_restore (pat);
4301 PATTERN (delay) = gen_blockage ();
4302 INSN_CODE (delay) = -1;
4303 }
4304 }
4305 else
4306 {
4307 /* The delay slot is empty. */
4308 if (TARGET_V9)
4309 return "return\t%%i7+%)\n\t nop";
4310 else if (flag_delayed_branch)
4311 return "jmp\t%%i7+%)\n\t restore";
4312 else
4313 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4314 }
4315 }
4316
4317 return "";
4318 }
4319
4320 /* Output a sibling call. */
4321
4322 const char *
4323 output_sibcall (rtx insn, rtx call_operand)
4324 {
4325 rtx operands[1];
4326
4327 gcc_assert (flag_delayed_branch);
4328
4329 operands[0] = call_operand;
4330
4331 if (sparc_leaf_function_p)
4332 {
4333 /* This is a leaf function so we don't have to bother restoring the
4334 register window. We simply output the jump to the function and
4335 the insn in the delay slot (if any). */
4336
4337 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4338
4339 if (final_sequence)
4340 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4341 operands);
4342 else
4343 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4344 it into branch if possible. */
4345 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4346 operands);
4347 }
4348 else
4349 {
4350 /* This is a regular function so we have to restore the register window.
4351 We may have a pending insn for the delay slot, which will be combined
4352 with the 'restore' instruction. */
4353
4354 output_asm_insn ("call\t%a0, 0", operands);
4355
4356 if (final_sequence)
4357 {
4358 rtx delay = NEXT_INSN (insn);
4359 gcc_assert (delay);
4360
4361 output_restore (PATTERN (delay));
4362
4363 PATTERN (delay) = gen_blockage ();
4364 INSN_CODE (delay) = -1;
4365 }
4366 else
4367 output_restore (NULL_RTX);
4368 }
4369
4370 return "";
4371 }
4372 \f
4373 /* Functions for handling argument passing.
4374
4375 For 32-bit, the first 6 args are normally in registers and the rest are
4376 pushed. Any arg that starts within the first 6 words is at least
4377 partially passed in a register unless its data type forbids.
4378
4379 For 64-bit, the argument registers are laid out as an array of 16 elements
4380 and arguments are added sequentially. The first 6 int args and up to the
4381 first 16 fp args (depending on size) are passed in regs.
4382
4383 Slot Stack Integral Float Float in structure Double Long Double
4384 ---- ----- -------- ----- ------------------ ------ -----------
4385 15 [SP+248] %f31 %f30,%f31 %d30
4386 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4387 13 [SP+232] %f27 %f26,%f27 %d26
4388 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4389 11 [SP+216] %f23 %f22,%f23 %d22
4390 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4391 9 [SP+200] %f19 %f18,%f19 %d18
4392 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4393 7 [SP+184] %f15 %f14,%f15 %d14
4394 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4395 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4396 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4397 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4398 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4399 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4400 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4401
4402 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4403
4404 Integral arguments are always passed as 64-bit quantities appropriately
4405 extended.
4406
4407 Passing of floating point values is handled as follows.
4408 If a prototype is in scope:
4409 If the value is in a named argument (i.e. not a stdarg function or a
4410 value not part of the `...') then the value is passed in the appropriate
4411 fp reg.
4412 If the value is part of the `...' and is passed in one of the first 6
4413 slots then the value is passed in the appropriate int reg.
4414 If the value is part of the `...' and is not passed in one of the first 6
4415 slots then the value is passed in memory.
4416 If a prototype is not in scope:
4417 If the value is one of the first 6 arguments the value is passed in the
4418 appropriate integer reg and the appropriate fp reg.
4419 If the value is not one of the first 6 arguments the value is passed in
4420 the appropriate fp reg and in memory.
4421
4422
4423 Summary of the calling conventions implemented by GCC on SPARC:
4424
4425 32-bit ABI:
4426 size argument return value
4427
4428 small integer <4 int. reg. int. reg.
4429 word 4 int. reg. int. reg.
4430 double word 8 int. reg. int. reg.
4431
4432 _Complex small integer <8 int. reg. int. reg.
4433 _Complex word 8 int. reg. int. reg.
4434 _Complex double word 16 memory int. reg.
4435
4436 vector integer <=8 int. reg. FP reg.
4437 vector integer >8 memory memory
4438
4439 float 4 int. reg. FP reg.
4440 double 8 int. reg. FP reg.
4441 long double 16 memory memory
4442
4443 _Complex float 8 memory FP reg.
4444 _Complex double 16 memory FP reg.
4445 _Complex long double 32 memory FP reg.
4446
4447 vector float any memory memory
4448
4449 aggregate any memory memory
4450
4451
4452
4453 64-bit ABI:
4454 size argument return value
4455
4456 small integer <8 int. reg. int. reg.
4457 word 8 int. reg. int. reg.
4458 double word 16 int. reg. int. reg.
4459
4460 _Complex small integer <16 int. reg. int. reg.
4461 _Complex word 16 int. reg. int. reg.
4462 _Complex double word 32 memory int. reg.
4463
4464 vector integer <=16 FP reg. FP reg.
4465 vector integer 16<s<=32 memory FP reg.
4466 vector integer >32 memory memory
4467
4468 float 4 FP reg. FP reg.
4469 double 8 FP reg. FP reg.
4470 long double 16 FP reg. FP reg.
4471
4472 _Complex float 8 FP reg. FP reg.
4473 _Complex double 16 FP reg. FP reg.
4474 _Complex long double 32 memory FP reg.
4475
4476 vector float <=16 FP reg. FP reg.
4477 vector float 16<s<=32 memory FP reg.
4478 vector float >32 memory memory
4479
4480 aggregate <=16 reg. reg.
4481 aggregate 16<s<=32 memory reg.
4482 aggregate >32 memory memory
4483
4484
4485
4486 Note #1: complex floating-point types follow the extended SPARC ABIs as
4487 implemented by the Sun compiler.
4488
4489 Note #2: integral vector types follow the scalar floating-point types
4490 conventions to match what is implemented by the Sun VIS SDK.
4491
4492 Note #3: floating-point vector types follow the aggregate types
4493 conventions. */
4494
4495
4496 /* Maximum number of int regs for args. */
4497 #define SPARC_INT_ARG_MAX 6
4498 /* Maximum number of fp regs for args. */
4499 #define SPARC_FP_ARG_MAX 16
4500
4501 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4502
4503 /* Handle the INIT_CUMULATIVE_ARGS macro.
4504 Initialize a variable CUM of type CUMULATIVE_ARGS
4505 for a call to a function whose data type is FNTYPE.
4506 For a library call, FNTYPE is 0. */
4507
4508 void
4509 init_cumulative_args (struct sparc_args *cum, tree fntype,
4510 rtx libname ATTRIBUTE_UNUSED,
4511 tree fndecl ATTRIBUTE_UNUSED)
4512 {
4513 cum->words = 0;
4514 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4515 cum->libcall_p = fntype == 0;
4516 }
4517
4518 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4519 When a prototype says `char' or `short', really pass an `int'. */
4520
4521 static bool
4522 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4523 {
4524 return TARGET_ARCH32 ? true : false;
4525 }
4526
4527 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4528
4529 static bool
4530 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4531 {
4532 return TARGET_ARCH64 ? true : false;
4533 }
4534
4535 /* Scan the record type TYPE and return the following predicates:
4536 - INTREGS_P: the record contains at least one field or sub-field
4537 that is eligible for promotion in integer registers.
4538 - FP_REGS_P: the record contains at least one field or sub-field
4539 that is eligible for promotion in floating-point registers.
4540 - PACKED_P: the record contains at least one field that is packed.
4541
4542 Sub-fields are not taken into account for the PACKED_P predicate. */
4543
4544 static void
4545 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4546 {
4547 tree field;
4548
4549 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4550 {
4551 if (TREE_CODE (field) == FIELD_DECL)
4552 {
4553 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4554 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4555 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4556 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4557 && TARGET_FPU)
4558 *fpregs_p = 1;
4559 else
4560 *intregs_p = 1;
4561
4562 if (packed_p && DECL_PACKED (field))
4563 *packed_p = 1;
4564 }
4565 }
4566 }
4567
4568 /* Compute the slot number to pass an argument in.
4569 Return the slot number or -1 if passing on the stack.
4570
4571 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4572 the preceding args and about the function being called.
4573 MODE is the argument's machine mode.
4574 TYPE is the data type of the argument (as a tree).
4575 This is null for libcalls where that information may
4576 not be available.
4577 NAMED is nonzero if this argument is a named parameter
4578 (otherwise it is an extra parameter matching an ellipsis).
4579 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4580 *PREGNO records the register number to use if scalar type.
4581 *PPADDING records the amount of padding needed in words. */
4582
4583 static int
4584 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4585 tree type, int named, int incoming_p,
4586 int *pregno, int *ppadding)
4587 {
4588 int regbase = (incoming_p
4589 ? SPARC_INCOMING_INT_ARG_FIRST
4590 : SPARC_OUTGOING_INT_ARG_FIRST);
4591 int slotno = cum->words;
4592 enum mode_class mclass;
4593 int regno;
4594
4595 *ppadding = 0;
4596
4597 if (type && TREE_ADDRESSABLE (type))
4598 return -1;
4599
4600 if (TARGET_ARCH32
4601 && mode == BLKmode
4602 && type
4603 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4604 return -1;
4605
4606 /* For SPARC64, objects requiring 16-byte alignment get it. */
4607 if (TARGET_ARCH64
4608 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4609 && (slotno & 1) != 0)
4610 slotno++, *ppadding = 1;
4611
4612 mclass = GET_MODE_CLASS (mode);
4613 if (type && TREE_CODE (type) == VECTOR_TYPE)
4614 {
4615 /* Vector types deserve special treatment because they are
4616 polymorphic wrt their mode, depending upon whether VIS
4617 instructions are enabled. */
4618 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4619 {
4620 /* The SPARC port defines no floating-point vector modes. */
4621 gcc_assert (mode == BLKmode);
4622 }
4623 else
4624 {
4625 /* Integral vector types should either have a vector
4626 mode or an integral mode, because we are guaranteed
4627 by pass_by_reference that their size is not greater
4628 than 16 bytes and TImode is 16-byte wide. */
4629 gcc_assert (mode != BLKmode);
4630
4631 /* Vector integers are handled like floats according to
4632 the Sun VIS SDK. */
4633 mclass = MODE_FLOAT;
4634 }
4635 }
4636
4637 switch (mclass)
4638 {
4639 case MODE_FLOAT:
4640 case MODE_COMPLEX_FLOAT:
4641 case MODE_VECTOR_INT:
4642 if (TARGET_ARCH64 && TARGET_FPU && named)
4643 {
4644 if (slotno >= SPARC_FP_ARG_MAX)
4645 return -1;
4646 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4647 /* Arguments filling only one single FP register are
4648 right-justified in the outer double FP register. */
4649 if (GET_MODE_SIZE (mode) <= 4)
4650 regno++;
4651 break;
4652 }
4653 /* fallthrough */
4654
4655 case MODE_INT:
4656 case MODE_COMPLEX_INT:
4657 if (slotno >= SPARC_INT_ARG_MAX)
4658 return -1;
4659 regno = regbase + slotno;
4660 break;
4661
4662 case MODE_RANDOM:
4663 if (mode == VOIDmode)
4664 /* MODE is VOIDmode when generating the actual call. */
4665 return -1;
4666
4667 gcc_assert (mode == BLKmode);
4668
4669 if (TARGET_ARCH32
4670 || !type
4671 || (TREE_CODE (type) != VECTOR_TYPE
4672 && TREE_CODE (type) != RECORD_TYPE))
4673 {
4674 if (slotno >= SPARC_INT_ARG_MAX)
4675 return -1;
4676 regno = regbase + slotno;
4677 }
4678 else /* TARGET_ARCH64 && type */
4679 {
4680 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4681
4682 /* First see what kinds of registers we would need. */
4683 if (TREE_CODE (type) == VECTOR_TYPE)
4684 fpregs_p = 1;
4685 else
4686 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4687
4688 /* The ABI obviously doesn't specify how packed structures
4689 are passed. These are defined to be passed in int regs
4690 if possible, otherwise memory. */
4691 if (packed_p || !named)
4692 fpregs_p = 0, intregs_p = 1;
4693
4694 /* If all arg slots are filled, then must pass on stack. */
4695 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4696 return -1;
4697
4698 /* If there are only int args and all int arg slots are filled,
4699 then must pass on stack. */
4700 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4701 return -1;
4702
4703 /* Note that even if all int arg slots are filled, fp members may
4704 still be passed in regs if such regs are available.
4705 *PREGNO isn't set because there may be more than one, it's up
4706 to the caller to compute them. */
4707 return slotno;
4708 }
4709 break;
4710
4711 default :
4712 gcc_unreachable ();
4713 }
4714
4715 *pregno = regno;
4716 return slotno;
4717 }
4718
4719 /* Handle recursive register counting for structure field layout. */
4720
4721 struct function_arg_record_value_parms
4722 {
4723 rtx ret; /* return expression being built. */
4724 int slotno; /* slot number of the argument. */
4725 int named; /* whether the argument is named. */
4726 int regbase; /* regno of the base register. */
4727 int stack; /* 1 if part of the argument is on the stack. */
4728 int intoffset; /* offset of the first pending integer field. */
4729 unsigned int nregs; /* number of words passed in registers. */
4730 };
4731
4732 static void function_arg_record_value_3
4733 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4734 static void function_arg_record_value_2
4735 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4736 static void function_arg_record_value_1
4737 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4738 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4739 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4740
4741 /* A subroutine of function_arg_record_value. Traverse the structure
4742 recursively and determine how many registers will be required. */
4743
4744 static void
4745 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4746 struct function_arg_record_value_parms *parms,
4747 bool packed_p)
4748 {
4749 tree field;
4750
4751 /* We need to compute how many registers are needed so we can
4752 allocate the PARALLEL but before we can do that we need to know
4753 whether there are any packed fields. The ABI obviously doesn't
4754 specify how structures are passed in this case, so they are
4755 defined to be passed in int regs if possible, otherwise memory,
4756 regardless of whether there are fp values present. */
4757
4758 if (! packed_p)
4759 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4760 {
4761 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4762 {
4763 packed_p = true;
4764 break;
4765 }
4766 }
4767
4768 /* Compute how many registers we need. */
4769 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4770 {
4771 if (TREE_CODE (field) == FIELD_DECL)
4772 {
4773 HOST_WIDE_INT bitpos = startbitpos;
4774
4775 if (DECL_SIZE (field) != 0)
4776 {
4777 if (integer_zerop (DECL_SIZE (field)))
4778 continue;
4779
4780 if (host_integerp (bit_position (field), 1))
4781 bitpos += int_bit_position (field);
4782 }
4783
4784 /* ??? FIXME: else assume zero offset. */
4785
4786 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4787 function_arg_record_value_1 (TREE_TYPE (field),
4788 bitpos,
4789 parms,
4790 packed_p);
4791 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4792 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4793 && TARGET_FPU
4794 && parms->named
4795 && ! packed_p)
4796 {
4797 if (parms->intoffset != -1)
4798 {
4799 unsigned int startbit, endbit;
4800 int intslots, this_slotno;
4801
4802 startbit = parms->intoffset & -BITS_PER_WORD;
4803 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4804
4805 intslots = (endbit - startbit) / BITS_PER_WORD;
4806 this_slotno = parms->slotno + parms->intoffset
4807 / BITS_PER_WORD;
4808
4809 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4810 {
4811 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4812 /* We need to pass this field on the stack. */
4813 parms->stack = 1;
4814 }
4815
4816 parms->nregs += intslots;
4817 parms->intoffset = -1;
4818 }
4819
4820 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4821 If it wasn't true we wouldn't be here. */
4822 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4823 && DECL_MODE (field) == BLKmode)
4824 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4825 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4826 parms->nregs += 2;
4827 else
4828 parms->nregs += 1;
4829 }
4830 else
4831 {
4832 if (parms->intoffset == -1)
4833 parms->intoffset = bitpos;
4834 }
4835 }
4836 }
4837 }
4838
4839 /* A subroutine of function_arg_record_value. Assign the bits of the
4840 structure between parms->intoffset and bitpos to integer registers. */
4841
4842 static void
4843 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4844 struct function_arg_record_value_parms *parms)
4845 {
4846 enum machine_mode mode;
4847 unsigned int regno;
4848 unsigned int startbit, endbit;
4849 int this_slotno, intslots, intoffset;
4850 rtx reg;
4851
4852 if (parms->intoffset == -1)
4853 return;
4854
4855 intoffset = parms->intoffset;
4856 parms->intoffset = -1;
4857
4858 startbit = intoffset & -BITS_PER_WORD;
4859 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4860 intslots = (endbit - startbit) / BITS_PER_WORD;
4861 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4862
4863 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4864 if (intslots <= 0)
4865 return;
4866
4867 /* If this is the trailing part of a word, only load that much into
4868 the register. Otherwise load the whole register. Note that in
4869 the latter case we may pick up unwanted bits. It's not a problem
4870 at the moment but may wish to revisit. */
4871
4872 if (intoffset % BITS_PER_WORD != 0)
4873 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4874 MODE_INT);
4875 else
4876 mode = word_mode;
4877
4878 intoffset /= BITS_PER_UNIT;
4879 do
4880 {
4881 regno = parms->regbase + this_slotno;
4882 reg = gen_rtx_REG (mode, regno);
4883 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4884 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4885
4886 this_slotno += 1;
4887 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4888 mode = word_mode;
4889 parms->nregs += 1;
4890 intslots -= 1;
4891 }
4892 while (intslots > 0);
4893 }
4894
4895 /* A subroutine of function_arg_record_value. Traverse the structure
4896 recursively and assign bits to floating point registers. Track which
4897 bits in between need integer registers; invoke function_arg_record_value_3
4898 to make that happen. */
4899
4900 static void
4901 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
4902 struct function_arg_record_value_parms *parms,
4903 bool packed_p)
4904 {
4905 tree field;
4906
4907 if (! packed_p)
4908 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4909 {
4910 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4911 {
4912 packed_p = true;
4913 break;
4914 }
4915 }
4916
4917 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4918 {
4919 if (TREE_CODE (field) == FIELD_DECL)
4920 {
4921 HOST_WIDE_INT bitpos = startbitpos;
4922
4923 if (DECL_SIZE (field) != 0)
4924 {
4925 if (integer_zerop (DECL_SIZE (field)))
4926 continue;
4927
4928 if (host_integerp (bit_position (field), 1))
4929 bitpos += int_bit_position (field);
4930 }
4931
4932 /* ??? FIXME: else assume zero offset. */
4933
4934 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4935 function_arg_record_value_2 (TREE_TYPE (field),
4936 bitpos,
4937 parms,
4938 packed_p);
4939 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4940 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4941 && TARGET_FPU
4942 && parms->named
4943 && ! packed_p)
4944 {
4945 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4946 int regno, nregs, pos;
4947 enum machine_mode mode = DECL_MODE (field);
4948 rtx reg;
4949
4950 function_arg_record_value_3 (bitpos, parms);
4951
4952 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4953 && mode == BLKmode)
4954 {
4955 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4956 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4957 }
4958 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4959 {
4960 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4961 nregs = 2;
4962 }
4963 else
4964 nregs = 1;
4965
4966 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4967 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4968 regno++;
4969 reg = gen_rtx_REG (mode, regno);
4970 pos = bitpos / BITS_PER_UNIT;
4971 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4972 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4973 parms->nregs += 1;
4974 while (--nregs > 0)
4975 {
4976 regno += GET_MODE_SIZE (mode) / 4;
4977 reg = gen_rtx_REG (mode, regno);
4978 pos += GET_MODE_SIZE (mode);
4979 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4980 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4981 parms->nregs += 1;
4982 }
4983 }
4984 else
4985 {
4986 if (parms->intoffset == -1)
4987 parms->intoffset = bitpos;
4988 }
4989 }
4990 }
4991 }
4992
4993 /* Used by function_arg and function_value to implement the complex
4994 conventions of the 64-bit ABI for passing and returning structures.
4995 Return an expression valid as a return value for the two macros
4996 FUNCTION_ARG and FUNCTION_VALUE.
4997
4998 TYPE is the data type of the argument (as a tree).
4999 This is null for libcalls where that information may
5000 not be available.
5001 MODE is the argument's machine mode.
5002 SLOTNO is the index number of the argument's slot in the parameter array.
5003 NAMED is nonzero if this argument is a named parameter
5004 (otherwise it is an extra parameter matching an ellipsis).
5005 REGBASE is the regno of the base register for the parameter array. */
5006
5007 static rtx
5008 function_arg_record_value (const_tree type, enum machine_mode mode,
5009 int slotno, int named, int regbase)
5010 {
5011 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5012 struct function_arg_record_value_parms parms;
5013 unsigned int nregs;
5014
5015 parms.ret = NULL_RTX;
5016 parms.slotno = slotno;
5017 parms.named = named;
5018 parms.regbase = regbase;
5019 parms.stack = 0;
5020
5021 /* Compute how many registers we need. */
5022 parms.nregs = 0;
5023 parms.intoffset = 0;
5024 function_arg_record_value_1 (type, 0, &parms, false);
5025
5026 /* Take into account pending integer fields. */
5027 if (parms.intoffset != -1)
5028 {
5029 unsigned int startbit, endbit;
5030 int intslots, this_slotno;
5031
5032 startbit = parms.intoffset & -BITS_PER_WORD;
5033 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5034 intslots = (endbit - startbit) / BITS_PER_WORD;
5035 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5036
5037 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5038 {
5039 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5040 /* We need to pass this field on the stack. */
5041 parms.stack = 1;
5042 }
5043
5044 parms.nregs += intslots;
5045 }
5046 nregs = parms.nregs;
5047
5048 /* Allocate the vector and handle some annoying special cases. */
5049 if (nregs == 0)
5050 {
5051 /* ??? Empty structure has no value? Duh? */
5052 if (typesize <= 0)
5053 {
5054 /* Though there's nothing really to store, return a word register
5055 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5056 leads to breakage due to the fact that there are zero bytes to
5057 load. */
5058 return gen_rtx_REG (mode, regbase);
5059 }
5060 else
5061 {
5062 /* ??? C++ has structures with no fields, and yet a size. Give up
5063 for now and pass everything back in integer registers. */
5064 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5065 }
5066 if (nregs + slotno > SPARC_INT_ARG_MAX)
5067 nregs = SPARC_INT_ARG_MAX - slotno;
5068 }
5069 gcc_assert (nregs != 0);
5070
5071 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5072
5073 /* If at least one field must be passed on the stack, generate
5074 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5075 also be passed on the stack. We can't do much better because the
5076 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5077 of structures for which the fields passed exclusively in registers
5078 are not at the beginning of the structure. */
5079 if (parms.stack)
5080 XVECEXP (parms.ret, 0, 0)
5081 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5082
5083 /* Fill in the entries. */
5084 parms.nregs = 0;
5085 parms.intoffset = 0;
5086 function_arg_record_value_2 (type, 0, &parms, false);
5087 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5088
5089 gcc_assert (parms.nregs == nregs);
5090
5091 return parms.ret;
5092 }
5093
5094 /* Used by function_arg and function_value to implement the conventions
5095 of the 64-bit ABI for passing and returning unions.
5096 Return an expression valid as a return value for the two macros
5097 FUNCTION_ARG and FUNCTION_VALUE.
5098
5099 SIZE is the size in bytes of the union.
5100 MODE is the argument's machine mode.
5101 REGNO is the hard register the union will be passed in. */
5102
5103 static rtx
5104 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5105 int regno)
5106 {
5107 int nwords = ROUND_ADVANCE (size), i;
5108 rtx regs;
5109
5110 /* See comment in previous function for empty structures. */
5111 if (nwords == 0)
5112 return gen_rtx_REG (mode, regno);
5113
5114 if (slotno == SPARC_INT_ARG_MAX - 1)
5115 nwords = 1;
5116
5117 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5118
5119 for (i = 0; i < nwords; i++)
5120 {
5121 /* Unions are passed left-justified. */
5122 XVECEXP (regs, 0, i)
5123 = gen_rtx_EXPR_LIST (VOIDmode,
5124 gen_rtx_REG (word_mode, regno),
5125 GEN_INT (UNITS_PER_WORD * i));
5126 regno++;
5127 }
5128
5129 return regs;
5130 }
5131
5132 /* Used by function_arg and function_value to implement the conventions
5133 for passing and returning large (BLKmode) vectors.
5134 Return an expression valid as a return value for the two macros
5135 FUNCTION_ARG and FUNCTION_VALUE.
5136
5137 SIZE is the size in bytes of the vector (at least 8 bytes).
5138 REGNO is the FP hard register the vector will be passed in. */
5139
5140 static rtx
5141 function_arg_vector_value (int size, int regno)
5142 {
5143 int i, nregs = size / 8;
5144 rtx regs;
5145
5146 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5147
5148 for (i = 0; i < nregs; i++)
5149 {
5150 XVECEXP (regs, 0, i)
5151 = gen_rtx_EXPR_LIST (VOIDmode,
5152 gen_rtx_REG (DImode, regno + 2*i),
5153 GEN_INT (i*8));
5154 }
5155
5156 return regs;
5157 }
5158
5159 /* Handle the FUNCTION_ARG macro.
5160 Determine where to put an argument to a function.
5161 Value is zero to push the argument on the stack,
5162 or a hard register in which to store the argument.
5163
5164 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5165 the preceding args and about the function being called.
5166 MODE is the argument's machine mode.
5167 TYPE is the data type of the argument (as a tree).
5168 This is null for libcalls where that information may
5169 not be available.
5170 NAMED is nonzero if this argument is a named parameter
5171 (otherwise it is an extra parameter matching an ellipsis).
5172 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5173
5174 rtx
5175 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5176 tree type, int named, int incoming_p)
5177 {
5178 int regbase = (incoming_p
5179 ? SPARC_INCOMING_INT_ARG_FIRST
5180 : SPARC_OUTGOING_INT_ARG_FIRST);
5181 int slotno, regno, padding;
5182 enum mode_class mclass = GET_MODE_CLASS (mode);
5183
5184 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5185 &regno, &padding);
5186 if (slotno == -1)
5187 return 0;
5188
5189 /* Vector types deserve special treatment because they are polymorphic wrt
5190 their mode, depending upon whether VIS instructions are enabled. */
5191 if (type && TREE_CODE (type) == VECTOR_TYPE)
5192 {
5193 HOST_WIDE_INT size = int_size_in_bytes (type);
5194 gcc_assert ((TARGET_ARCH32 && size <= 8)
5195 || (TARGET_ARCH64 && size <= 16));
5196
5197 if (mode == BLKmode)
5198 return function_arg_vector_value (size,
5199 SPARC_FP_ARG_FIRST + 2*slotno);
5200 else
5201 mclass = MODE_FLOAT;
5202 }
5203
5204 if (TARGET_ARCH32)
5205 return gen_rtx_REG (mode, regno);
5206
5207 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5208 and are promoted to registers if possible. */
5209 if (type && TREE_CODE (type) == RECORD_TYPE)
5210 {
5211 HOST_WIDE_INT size = int_size_in_bytes (type);
5212 gcc_assert (size <= 16);
5213
5214 return function_arg_record_value (type, mode, slotno, named, regbase);
5215 }
5216
5217 /* Unions up to 16 bytes in size are passed in integer registers. */
5218 else if (type && TREE_CODE (type) == UNION_TYPE)
5219 {
5220 HOST_WIDE_INT size = int_size_in_bytes (type);
5221 gcc_assert (size <= 16);
5222
5223 return function_arg_union_value (size, mode, slotno, regno);
5224 }
5225
5226 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5227 but also have the slot allocated for them.
5228 If no prototype is in scope fp values in register slots get passed
5229 in two places, either fp regs and int regs or fp regs and memory. */
5230 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5231 && SPARC_FP_REG_P (regno))
5232 {
5233 rtx reg = gen_rtx_REG (mode, regno);
5234 if (cum->prototype_p || cum->libcall_p)
5235 {
5236 /* "* 2" because fp reg numbers are recorded in 4 byte
5237 quantities. */
5238 #if 0
5239 /* ??? This will cause the value to be passed in the fp reg and
5240 in the stack. When a prototype exists we want to pass the
5241 value in the reg but reserve space on the stack. That's an
5242 optimization, and is deferred [for a bit]. */
5243 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5244 return gen_rtx_PARALLEL (mode,
5245 gen_rtvec (2,
5246 gen_rtx_EXPR_LIST (VOIDmode,
5247 NULL_RTX, const0_rtx),
5248 gen_rtx_EXPR_LIST (VOIDmode,
5249 reg, const0_rtx)));
5250 else
5251 #else
5252 /* ??? It seems that passing back a register even when past
5253 the area declared by REG_PARM_STACK_SPACE will allocate
5254 space appropriately, and will not copy the data onto the
5255 stack, exactly as we desire.
5256
5257 This is due to locate_and_pad_parm being called in
5258 expand_call whenever reg_parm_stack_space > 0, which
5259 while beneficial to our example here, would seem to be
5260 in error from what had been intended. Ho hum... -- r~ */
5261 #endif
5262 return reg;
5263 }
5264 else
5265 {
5266 rtx v0, v1;
5267
5268 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5269 {
5270 int intreg;
5271
5272 /* On incoming, we don't need to know that the value
5273 is passed in %f0 and %i0, and it confuses other parts
5274 causing needless spillage even on the simplest cases. */
5275 if (incoming_p)
5276 return reg;
5277
5278 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5279 + (regno - SPARC_FP_ARG_FIRST) / 2);
5280
5281 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5282 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5283 const0_rtx);
5284 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5285 }
5286 else
5287 {
5288 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5289 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5290 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5291 }
5292 }
5293 }
5294
5295 /* All other aggregate types are passed in an integer register in a mode
5296 corresponding to the size of the type. */
5297 else if (type && AGGREGATE_TYPE_P (type))
5298 {
5299 HOST_WIDE_INT size = int_size_in_bytes (type);
5300 gcc_assert (size <= 16);
5301
5302 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5303 }
5304
5305 return gen_rtx_REG (mode, regno);
5306 }
5307
5308 /* For an arg passed partly in registers and partly in memory,
5309 this is the number of bytes of registers used.
5310 For args passed entirely in registers or entirely in memory, zero.
5311
5312 Any arg that starts in the first 6 regs but won't entirely fit in them
5313 needs partial registers on v8. On v9, structures with integer
5314 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5315 values that begin in the last fp reg [where "last fp reg" varies with the
5316 mode] will be split between that reg and memory. */
5317
5318 static int
5319 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5320 tree type, bool named)
5321 {
5322 int slotno, regno, padding;
5323
5324 /* We pass 0 for incoming_p here, it doesn't matter. */
5325 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5326
5327 if (slotno == -1)
5328 return 0;
5329
5330 if (TARGET_ARCH32)
5331 {
5332 if ((slotno + (mode == BLKmode
5333 ? ROUND_ADVANCE (int_size_in_bytes (type))
5334 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5335 > SPARC_INT_ARG_MAX)
5336 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5337 }
5338 else
5339 {
5340 /* We are guaranteed by pass_by_reference that the size of the
5341 argument is not greater than 16 bytes, so we only need to return
5342 one word if the argument is partially passed in registers. */
5343
5344 if (type && AGGREGATE_TYPE_P (type))
5345 {
5346 int size = int_size_in_bytes (type);
5347
5348 if (size > UNITS_PER_WORD
5349 && slotno == SPARC_INT_ARG_MAX - 1)
5350 return UNITS_PER_WORD;
5351 }
5352 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5353 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5354 && ! (TARGET_FPU && named)))
5355 {
5356 /* The complex types are passed as packed types. */
5357 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5358 && slotno == SPARC_INT_ARG_MAX - 1)
5359 return UNITS_PER_WORD;
5360 }
5361 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5362 {
5363 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5364 > SPARC_FP_ARG_MAX)
5365 return UNITS_PER_WORD;
5366 }
5367 }
5368
5369 return 0;
5370 }
5371
5372 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5373 Specify whether to pass the argument by reference. */
5374
5375 static bool
5376 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5377 enum machine_mode mode, const_tree type,
5378 bool named ATTRIBUTE_UNUSED)
5379 {
5380 if (TARGET_ARCH32)
5381 /* Original SPARC 32-bit ABI says that structures and unions,
5382 and quad-precision floats are passed by reference. For Pascal,
5383 also pass arrays by reference. All other base types are passed
5384 in registers.
5385
5386 Extended ABI (as implemented by the Sun compiler) says that all
5387 complex floats are passed by reference. Pass complex integers
5388 in registers up to 8 bytes. More generally, enforce the 2-word
5389 cap for passing arguments in registers.
5390
5391 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5392 integers are passed like floats of the same size, that is in
5393 registers up to 8 bytes. Pass all vector floats by reference
5394 like structure and unions. */
5395 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5396 || mode == SCmode
5397 /* Catch CDImode, TFmode, DCmode and TCmode. */
5398 || GET_MODE_SIZE (mode) > 8
5399 || (type
5400 && TREE_CODE (type) == VECTOR_TYPE
5401 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5402 else
5403 /* Original SPARC 64-bit ABI says that structures and unions
5404 smaller than 16 bytes are passed in registers, as well as
5405 all other base types.
5406
5407 Extended ABI (as implemented by the Sun compiler) says that
5408 complex floats are passed in registers up to 16 bytes. Pass
5409 all complex integers in registers up to 16 bytes. More generally,
5410 enforce the 2-word cap for passing arguments in registers.
5411
5412 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5413 integers are passed like floats of the same size, that is in
5414 registers (up to 16 bytes). Pass all vector floats like structure
5415 and unions. */
5416 return ((type
5417 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5418 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5419 /* Catch CTImode and TCmode. */
5420 || GET_MODE_SIZE (mode) > 16);
5421 }
5422
5423 /* Handle the FUNCTION_ARG_ADVANCE macro.
5424 Update the data in CUM to advance over an argument
5425 of mode MODE and data type TYPE.
5426 TYPE is null for libcalls where that information may not be available. */
5427
5428 void
5429 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5430 tree type, int named)
5431 {
5432 int slotno, regno, padding;
5433
5434 /* We pass 0 for incoming_p here, it doesn't matter. */
5435 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5436
5437 /* If register required leading padding, add it. */
5438 if (slotno != -1)
5439 cum->words += padding;
5440
5441 if (TARGET_ARCH32)
5442 {
5443 cum->words += (mode != BLKmode
5444 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5445 : ROUND_ADVANCE (int_size_in_bytes (type)));
5446 }
5447 else
5448 {
5449 if (type && AGGREGATE_TYPE_P (type))
5450 {
5451 int size = int_size_in_bytes (type);
5452
5453 if (size <= 8)
5454 ++cum->words;
5455 else if (size <= 16)
5456 cum->words += 2;
5457 else /* passed by reference */
5458 ++cum->words;
5459 }
5460 else
5461 {
5462 cum->words += (mode != BLKmode
5463 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5464 : ROUND_ADVANCE (int_size_in_bytes (type)));
5465 }
5466 }
5467 }
5468
5469 /* Handle the FUNCTION_ARG_PADDING macro.
5470 For the 64 bit ABI structs are always stored left shifted in their
5471 argument slot. */
5472
5473 enum direction
5474 function_arg_padding (enum machine_mode mode, const_tree type)
5475 {
5476 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5477 return upward;
5478
5479 /* Fall back to the default. */
5480 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5481 }
5482
5483 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5484 Specify whether to return the return value in memory. */
5485
5486 static bool
5487 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5488 {
5489 if (TARGET_ARCH32)
5490 /* Original SPARC 32-bit ABI says that structures and unions,
5491 and quad-precision floats are returned in memory. All other
5492 base types are returned in registers.
5493
5494 Extended ABI (as implemented by the Sun compiler) says that
5495 all complex floats are returned in registers (8 FP registers
5496 at most for '_Complex long double'). Return all complex integers
5497 in registers (4 at most for '_Complex long long').
5498
5499 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5500 integers are returned like floats of the same size, that is in
5501 registers up to 8 bytes and in memory otherwise. Return all
5502 vector floats in memory like structure and unions; note that
5503 they always have BLKmode like the latter. */
5504 return (TYPE_MODE (type) == BLKmode
5505 || TYPE_MODE (type) == TFmode
5506 || (TREE_CODE (type) == VECTOR_TYPE
5507 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5508 else
5509 /* Original SPARC 64-bit ABI says that structures and unions
5510 smaller than 32 bytes are returned in registers, as well as
5511 all other base types.
5512
5513 Extended ABI (as implemented by the Sun compiler) says that all
5514 complex floats are returned in registers (8 FP registers at most
5515 for '_Complex long double'). Return all complex integers in
5516 registers (4 at most for '_Complex TItype').
5517
5518 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5519 integers are returned like floats of the same size, that is in
5520 registers. Return all vector floats like structure and unions;
5521 note that they always have BLKmode like the latter. */
5522 return ((TYPE_MODE (type) == BLKmode
5523 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5524 }
5525
5526 /* Handle the TARGET_STRUCT_VALUE target hook.
5527 Return where to find the structure return value address. */
5528
5529 static rtx
5530 sparc_struct_value_rtx (tree fndecl, int incoming)
5531 {
5532 if (TARGET_ARCH64)
5533 return 0;
5534 else
5535 {
5536 rtx mem;
5537
5538 if (incoming)
5539 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5540 STRUCT_VALUE_OFFSET));
5541 else
5542 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5543 STRUCT_VALUE_OFFSET));
5544
5545 /* Only follow the SPARC ABI for fixed-size structure returns.
5546 Variable size structure returns are handled per the normal
5547 procedures in GCC. This is enabled by -mstd-struct-return */
5548 if (incoming == 2
5549 && sparc_std_struct_return
5550 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5551 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5552 {
5553 /* We must check and adjust the return address, as it is
5554 optional as to whether the return object is really
5555 provided. */
5556 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5557 rtx scratch = gen_reg_rtx (SImode);
5558 rtx endlab = gen_label_rtx ();
5559
5560 /* Calculate the return object size */
5561 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5562 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5563 /* Construct a temporary return value */
5564 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5565
5566 /* Implement SPARC 32-bit psABI callee returns struck checking
5567 requirements:
5568
5569 Fetch the instruction where we will return to and see if
5570 it's an unimp instruction (the most significant 10 bits
5571 will be zero). */
5572 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5573 plus_constant (ret_rtx, 8)));
5574 /* Assume the size is valid and pre-adjust */
5575 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5576 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5577 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5578 /* Assign stack temp:
5579 Write the address of the memory pointed to by temp_val into
5580 the memory pointed to by mem */
5581 emit_move_insn (mem, XEXP (temp_val, 0));
5582 emit_label (endlab);
5583 }
5584
5585 set_mem_alias_set (mem, struct_value_alias_set);
5586 return mem;
5587 }
5588 }
5589
5590 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5591 For v9, function return values are subject to the same rules as arguments,
5592 except that up to 32 bytes may be returned in registers. */
5593
5594 rtx
5595 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5596 {
5597 /* Beware that the two values are swapped here wrt function_arg. */
5598 int regbase = (incoming_p
5599 ? SPARC_OUTGOING_INT_ARG_FIRST
5600 : SPARC_INCOMING_INT_ARG_FIRST);
5601 enum mode_class mclass = GET_MODE_CLASS (mode);
5602 int regno;
5603
5604 /* Vector types deserve special treatment because they are polymorphic wrt
5605 their mode, depending upon whether VIS instructions are enabled. */
5606 if (type && TREE_CODE (type) == VECTOR_TYPE)
5607 {
5608 HOST_WIDE_INT size = int_size_in_bytes (type);
5609 gcc_assert ((TARGET_ARCH32 && size <= 8)
5610 || (TARGET_ARCH64 && size <= 32));
5611
5612 if (mode == BLKmode)
5613 return function_arg_vector_value (size,
5614 SPARC_FP_ARG_FIRST);
5615 else
5616 mclass = MODE_FLOAT;
5617 }
5618
5619 if (TARGET_ARCH64 && type)
5620 {
5621 /* Structures up to 32 bytes in size are returned in registers. */
5622 if (TREE_CODE (type) == RECORD_TYPE)
5623 {
5624 HOST_WIDE_INT size = int_size_in_bytes (type);
5625 gcc_assert (size <= 32);
5626
5627 return function_arg_record_value (type, mode, 0, 1, regbase);
5628 }
5629
5630 /* Unions up to 32 bytes in size are returned in integer registers. */
5631 else if (TREE_CODE (type) == UNION_TYPE)
5632 {
5633 HOST_WIDE_INT size = int_size_in_bytes (type);
5634 gcc_assert (size <= 32);
5635
5636 return function_arg_union_value (size, mode, 0, regbase);
5637 }
5638
5639 /* Objects that require it are returned in FP registers. */
5640 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5641 ;
5642
5643 /* All other aggregate types are returned in an integer register in a
5644 mode corresponding to the size of the type. */
5645 else if (AGGREGATE_TYPE_P (type))
5646 {
5647 /* All other aggregate types are passed in an integer register
5648 in a mode corresponding to the size of the type. */
5649 HOST_WIDE_INT size = int_size_in_bytes (type);
5650 gcc_assert (size <= 32);
5651
5652 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5653
5654 /* ??? We probably should have made the same ABI change in
5655 3.4.0 as the one we made for unions. The latter was
5656 required by the SCD though, while the former is not
5657 specified, so we favored compatibility and efficiency.
5658
5659 Now we're stuck for aggregates larger than 16 bytes,
5660 because OImode vanished in the meantime. Let's not
5661 try to be unduly clever, and simply follow the ABI
5662 for unions in that case. */
5663 if (mode == BLKmode)
5664 return function_arg_union_value (size, mode, 0, regbase);
5665 else
5666 mclass = MODE_INT;
5667 }
5668
5669 /* This must match PROMOTE_FUNCTION_MODE. */
5670 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5671 mode = word_mode;
5672 }
5673
5674 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5675 regno = SPARC_FP_ARG_FIRST;
5676 else
5677 regno = regbase;
5678
5679 return gen_rtx_REG (mode, regno);
5680 }
5681
5682 /* Do what is necessary for `va_start'. We look at the current function
5683 to determine if stdarg or varargs is used and return the address of
5684 the first unnamed parameter. */
5685
5686 static rtx
5687 sparc_builtin_saveregs (void)
5688 {
5689 int first_reg = crtl->args.info.words;
5690 rtx address;
5691 int regno;
5692
5693 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5694 emit_move_insn (gen_rtx_MEM (word_mode,
5695 gen_rtx_PLUS (Pmode,
5696 frame_pointer_rtx,
5697 GEN_INT (FIRST_PARM_OFFSET (0)
5698 + (UNITS_PER_WORD
5699 * regno)))),
5700 gen_rtx_REG (word_mode,
5701 SPARC_INCOMING_INT_ARG_FIRST + regno));
5702
5703 address = gen_rtx_PLUS (Pmode,
5704 frame_pointer_rtx,
5705 GEN_INT (FIRST_PARM_OFFSET (0)
5706 + UNITS_PER_WORD * first_reg));
5707
5708 return address;
5709 }
5710
5711 /* Implement `va_start' for stdarg. */
5712
5713 static void
5714 sparc_va_start (tree valist, rtx nextarg)
5715 {
5716 nextarg = expand_builtin_saveregs ();
5717 std_expand_builtin_va_start (valist, nextarg);
5718 }
5719
5720 /* Implement `va_arg' for stdarg. */
5721
5722 static tree
5723 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5724 gimple_seq *post_p)
5725 {
5726 HOST_WIDE_INT size, rsize, align;
5727 tree addr, incr;
5728 bool indirect;
5729 tree ptrtype = build_pointer_type (type);
5730
5731 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5732 {
5733 indirect = true;
5734 size = rsize = UNITS_PER_WORD;
5735 align = 0;
5736 }
5737 else
5738 {
5739 indirect = false;
5740 size = int_size_in_bytes (type);
5741 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5742 align = 0;
5743
5744 if (TARGET_ARCH64)
5745 {
5746 /* For SPARC64, objects requiring 16-byte alignment get it. */
5747 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5748 align = 2 * UNITS_PER_WORD;
5749
5750 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5751 are left-justified in their slots. */
5752 if (AGGREGATE_TYPE_P (type))
5753 {
5754 if (size == 0)
5755 size = rsize = UNITS_PER_WORD;
5756 else
5757 size = rsize;
5758 }
5759 }
5760 }
5761
5762 incr = valist;
5763 if (align)
5764 {
5765 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5766 size_int (align - 1));
5767 incr = fold_convert (sizetype, incr);
5768 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5769 size_int (-align));
5770 incr = fold_convert (ptr_type_node, incr);
5771 }
5772
5773 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5774 addr = incr;
5775
5776 if (BYTES_BIG_ENDIAN && size < rsize)
5777 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5778 size_int (rsize - size));
5779
5780 if (indirect)
5781 {
5782 addr = fold_convert (build_pointer_type (ptrtype), addr);
5783 addr = build_va_arg_indirect_ref (addr);
5784 }
5785
5786 /* If the address isn't aligned properly for the type, we need a temporary.
5787 FIXME: This is inefficient, usually we can do this in registers. */
5788 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5789 {
5790 tree tmp = create_tmp_var (type, "va_arg_tmp");
5791 tree dest_addr = build_fold_addr_expr (tmp);
5792 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5793 3, dest_addr, addr, size_int (rsize));
5794 TREE_ADDRESSABLE (tmp) = 1;
5795 gimplify_and_add (copy, pre_p);
5796 addr = dest_addr;
5797 }
5798
5799 else
5800 addr = fold_convert (ptrtype, addr);
5801
5802 incr
5803 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5804 gimplify_assign (valist, incr, post_p);
5805
5806 return build_va_arg_indirect_ref (addr);
5807 }
5808 \f
5809 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5810 Specify whether the vector mode is supported by the hardware. */
5811
5812 static bool
5813 sparc_vector_mode_supported_p (enum machine_mode mode)
5814 {
5815 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5816 }
5817 \f
5818 /* Return the string to output an unconditional branch to LABEL, which is
5819 the operand number of the label.
5820
5821 DEST is the destination insn (i.e. the label), INSN is the source. */
5822
5823 const char *
5824 output_ubranch (rtx dest, int label, rtx insn)
5825 {
5826 static char string[64];
5827 bool v9_form = false;
5828 char *p;
5829
5830 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5831 {
5832 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5833 - INSN_ADDRESSES (INSN_UID (insn)));
5834 /* Leave some instructions for "slop". */
5835 if (delta >= -260000 && delta < 260000)
5836 v9_form = true;
5837 }
5838
5839 if (v9_form)
5840 strcpy (string, "ba%*,pt\t%%xcc, ");
5841 else
5842 strcpy (string, "b%*\t");
5843
5844 p = strchr (string, '\0');
5845 *p++ = '%';
5846 *p++ = 'l';
5847 *p++ = '0' + label;
5848 *p++ = '%';
5849 *p++ = '(';
5850 *p = '\0';
5851
5852 return string;
5853 }
5854
5855 /* Return the string to output a conditional branch to LABEL, which is
5856 the operand number of the label. OP is the conditional expression.
5857 XEXP (OP, 0) is assumed to be a condition code register (integer or
5858 floating point) and its mode specifies what kind of comparison we made.
5859
5860 DEST is the destination insn (i.e. the label), INSN is the source.
5861
5862 REVERSED is nonzero if we should reverse the sense of the comparison.
5863
5864 ANNUL is nonzero if we should generate an annulling branch. */
5865
5866 const char *
5867 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5868 rtx insn)
5869 {
5870 static char string[64];
5871 enum rtx_code code = GET_CODE (op);
5872 rtx cc_reg = XEXP (op, 0);
5873 enum machine_mode mode = GET_MODE (cc_reg);
5874 const char *labelno, *branch;
5875 int spaces = 8, far;
5876 char *p;
5877
5878 /* v9 branches are limited to +-1MB. If it is too far away,
5879 change
5880
5881 bne,pt %xcc, .LC30
5882
5883 to
5884
5885 be,pn %xcc, .+12
5886 nop
5887 ba .LC30
5888
5889 and
5890
5891 fbne,a,pn %fcc2, .LC29
5892
5893 to
5894
5895 fbe,pt %fcc2, .+16
5896 nop
5897 ba .LC29 */
5898
5899 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5900 if (reversed ^ far)
5901 {
5902 /* Reversal of FP compares takes care -- an ordered compare
5903 becomes an unordered compare and vice versa. */
5904 if (mode == CCFPmode || mode == CCFPEmode)
5905 code = reverse_condition_maybe_unordered (code);
5906 else
5907 code = reverse_condition (code);
5908 }
5909
5910 /* Start by writing the branch condition. */
5911 if (mode == CCFPmode || mode == CCFPEmode)
5912 {
5913 switch (code)
5914 {
5915 case NE:
5916 branch = "fbne";
5917 break;
5918 case EQ:
5919 branch = "fbe";
5920 break;
5921 case GE:
5922 branch = "fbge";
5923 break;
5924 case GT:
5925 branch = "fbg";
5926 break;
5927 case LE:
5928 branch = "fble";
5929 break;
5930 case LT:
5931 branch = "fbl";
5932 break;
5933 case UNORDERED:
5934 branch = "fbu";
5935 break;
5936 case ORDERED:
5937 branch = "fbo";
5938 break;
5939 case UNGT:
5940 branch = "fbug";
5941 break;
5942 case UNLT:
5943 branch = "fbul";
5944 break;
5945 case UNEQ:
5946 branch = "fbue";
5947 break;
5948 case UNGE:
5949 branch = "fbuge";
5950 break;
5951 case UNLE:
5952 branch = "fbule";
5953 break;
5954 case LTGT:
5955 branch = "fblg";
5956 break;
5957
5958 default:
5959 gcc_unreachable ();
5960 }
5961
5962 /* ??? !v9: FP branches cannot be preceded by another floating point
5963 insn. Because there is currently no concept of pre-delay slots,
5964 we can fix this only by always emitting a nop before a floating
5965 point branch. */
5966
5967 string[0] = '\0';
5968 if (! TARGET_V9)
5969 strcpy (string, "nop\n\t");
5970 strcat (string, branch);
5971 }
5972 else
5973 {
5974 switch (code)
5975 {
5976 case NE:
5977 branch = "bne";
5978 break;
5979 case EQ:
5980 branch = "be";
5981 break;
5982 case GE:
5983 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5984 branch = "bpos";
5985 else
5986 branch = "bge";
5987 break;
5988 case GT:
5989 branch = "bg";
5990 break;
5991 case LE:
5992 branch = "ble";
5993 break;
5994 case LT:
5995 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5996 branch = "bneg";
5997 else
5998 branch = "bl";
5999 break;
6000 case GEU:
6001 branch = "bgeu";
6002 break;
6003 case GTU:
6004 branch = "bgu";
6005 break;
6006 case LEU:
6007 branch = "bleu";
6008 break;
6009 case LTU:
6010 branch = "blu";
6011 break;
6012
6013 default:
6014 gcc_unreachable ();
6015 }
6016 strcpy (string, branch);
6017 }
6018 spaces -= strlen (branch);
6019 p = strchr (string, '\0');
6020
6021 /* Now add the annulling, the label, and a possible noop. */
6022 if (annul && ! far)
6023 {
6024 strcpy (p, ",a");
6025 p += 2;
6026 spaces -= 2;
6027 }
6028
6029 if (TARGET_V9)
6030 {
6031 rtx note;
6032 int v8 = 0;
6033
6034 if (! far && insn && INSN_ADDRESSES_SET_P ())
6035 {
6036 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6037 - INSN_ADDRESSES (INSN_UID (insn)));
6038 /* Leave some instructions for "slop". */
6039 if (delta < -260000 || delta >= 260000)
6040 v8 = 1;
6041 }
6042
6043 if (mode == CCFPmode || mode == CCFPEmode)
6044 {
6045 static char v9_fcc_labelno[] = "%%fccX, ";
6046 /* Set the char indicating the number of the fcc reg to use. */
6047 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6048 labelno = v9_fcc_labelno;
6049 if (v8)
6050 {
6051 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6052 labelno = "";
6053 }
6054 }
6055 else if (mode == CCXmode || mode == CCX_NOOVmode)
6056 {
6057 labelno = "%%xcc, ";
6058 gcc_assert (! v8);
6059 }
6060 else
6061 {
6062 labelno = "%%icc, ";
6063 if (v8)
6064 labelno = "";
6065 }
6066
6067 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6068 {
6069 strcpy (p,
6070 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6071 ? ",pt" : ",pn");
6072 p += 3;
6073 spaces -= 3;
6074 }
6075 }
6076 else
6077 labelno = "";
6078
6079 if (spaces > 0)
6080 *p++ = '\t';
6081 else
6082 *p++ = ' ';
6083 strcpy (p, labelno);
6084 p = strchr (p, '\0');
6085 if (far)
6086 {
6087 strcpy (p, ".+12\n\t nop\n\tb\t");
6088 /* Skip the next insn if requested or
6089 if we know that it will be a nop. */
6090 if (annul || ! final_sequence)
6091 p[3] = '6';
6092 p += 14;
6093 }
6094 *p++ = '%';
6095 *p++ = 'l';
6096 *p++ = label + '0';
6097 *p++ = '%';
6098 *p++ = '#';
6099 *p = '\0';
6100
6101 return string;
6102 }
6103
6104 /* Emit a library call comparison between floating point X and Y.
6105 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6106 Return the new operator to be used in the comparison sequence.
6107
6108 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6109 values as arguments instead of the TFmode registers themselves,
6110 that's why we cannot call emit_float_lib_cmp. */
6111
6112 enum rtx_code
6113 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6114 {
6115 const char *qpfunc;
6116 rtx slot0, slot1, result, tem, tem2;
6117 enum machine_mode mode;
6118 enum rtx_code new_comparison;
6119
6120 switch (comparison)
6121 {
6122 case EQ:
6123 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6124 break;
6125
6126 case NE:
6127 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6128 break;
6129
6130 case GT:
6131 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6132 break;
6133
6134 case GE:
6135 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6136 break;
6137
6138 case LT:
6139 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6140 break;
6141
6142 case LE:
6143 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6144 break;
6145
6146 case ORDERED:
6147 case UNORDERED:
6148 case UNGT:
6149 case UNLT:
6150 case UNEQ:
6151 case UNGE:
6152 case UNLE:
6153 case LTGT:
6154 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6155 break;
6156
6157 default:
6158 gcc_unreachable ();
6159 }
6160
6161 if (TARGET_ARCH64)
6162 {
6163 if (MEM_P (x))
6164 slot0 = x;
6165 else
6166 {
6167 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6168 emit_move_insn (slot0, x);
6169 }
6170
6171 if (MEM_P (y))
6172 slot1 = y;
6173 else
6174 {
6175 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6176 emit_move_insn (slot1, y);
6177 }
6178
6179 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6180 DImode, 2,
6181 XEXP (slot0, 0), Pmode,
6182 XEXP (slot1, 0), Pmode);
6183 mode = DImode;
6184 }
6185 else
6186 {
6187 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6188 SImode, 2,
6189 x, TFmode, y, TFmode);
6190 mode = SImode;
6191 }
6192
6193
6194 /* Immediately move the result of the libcall into a pseudo
6195 register so reload doesn't clobber the value if it needs
6196 the return register for a spill reg. */
6197 result = gen_reg_rtx (mode);
6198 emit_move_insn (result, hard_libcall_value (mode));
6199
6200 switch (comparison)
6201 {
6202 default:
6203 new_comparison = NE;
6204 emit_cmp_insn (result, const0_rtx, new_comparison, NULL_RTX, mode, 0);
6205 break;
6206 case ORDERED:
6207 case UNORDERED:
6208 new_comparison = (comparison == UNORDERED ? EQ : NE);
6209 emit_cmp_insn (result, GEN_INT(3), new_comparison, NULL_RTX, mode, 0);
6210 break;
6211 case UNGT:
6212 case UNGE:
6213 new_comparison = (comparison == UNGT ? GT : NE);
6214 emit_cmp_insn (result, const1_rtx, new_comparison, NULL_RTX, mode, 0);
6215 break;
6216 case UNLE:
6217 new_comparison = NE;
6218 emit_cmp_insn (result, const2_rtx, new_comparison, NULL_RTX, mode, 0);
6219 break;
6220 case UNLT:
6221 tem = gen_reg_rtx (mode);
6222 if (TARGET_ARCH32)
6223 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6224 else
6225 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6226 new_comparison = NE;
6227 emit_cmp_insn (tem, const0_rtx, new_comparison, NULL_RTX, mode, 0);
6228 break;
6229 case UNEQ:
6230 case LTGT:
6231 tem = gen_reg_rtx (mode);
6232 if (TARGET_ARCH32)
6233 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6234 else
6235 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6236 tem2 = gen_reg_rtx (mode);
6237 if (TARGET_ARCH32)
6238 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6239 else
6240 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6241 new_comparison = (comparison == UNEQ ? EQ : NE);
6242 emit_cmp_insn (tem2, const0_rtx, new_comparison, NULL_RTX, mode, 0);
6243 break;
6244 }
6245
6246 return new_comparison;
6247 }
6248
6249 /* Generate an unsigned DImode to FP conversion. This is the same code
6250 optabs would emit if we didn't have TFmode patterns. */
6251
6252 void
6253 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6254 {
6255 rtx neglab, donelab, i0, i1, f0, in, out;
6256
6257 out = operands[0];
6258 in = force_reg (DImode, operands[1]);
6259 neglab = gen_label_rtx ();
6260 donelab = gen_label_rtx ();
6261 i0 = gen_reg_rtx (DImode);
6262 i1 = gen_reg_rtx (DImode);
6263 f0 = gen_reg_rtx (mode);
6264
6265 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6266
6267 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6268 emit_jump_insn (gen_jump (donelab));
6269 emit_barrier ();
6270
6271 emit_label (neglab);
6272
6273 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6274 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6275 emit_insn (gen_iordi3 (i0, i0, i1));
6276 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6277 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6278
6279 emit_label (donelab);
6280 }
6281
6282 /* Generate an FP to unsigned DImode conversion. This is the same code
6283 optabs would emit if we didn't have TFmode patterns. */
6284
6285 void
6286 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6287 {
6288 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6289
6290 out = operands[0];
6291 in = force_reg (mode, operands[1]);
6292 neglab = gen_label_rtx ();
6293 donelab = gen_label_rtx ();
6294 i0 = gen_reg_rtx (DImode);
6295 i1 = gen_reg_rtx (DImode);
6296 limit = gen_reg_rtx (mode);
6297 f0 = gen_reg_rtx (mode);
6298
6299 emit_move_insn (limit,
6300 CONST_DOUBLE_FROM_REAL_VALUE (
6301 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6302 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6303
6304 emit_insn (gen_rtx_SET (VOIDmode,
6305 out,
6306 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6307 emit_jump_insn (gen_jump (donelab));
6308 emit_barrier ();
6309
6310 emit_label (neglab);
6311
6312 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6313 emit_insn (gen_rtx_SET (VOIDmode,
6314 i0,
6315 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6316 emit_insn (gen_movdi (i1, const1_rtx));
6317 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6318 emit_insn (gen_xordi3 (out, i0, i1));
6319
6320 emit_label (donelab);
6321 }
6322
6323 /* Return the string to output a conditional branch to LABEL, testing
6324 register REG. LABEL is the operand number of the label; REG is the
6325 operand number of the reg. OP is the conditional expression. The mode
6326 of REG says what kind of comparison we made.
6327
6328 DEST is the destination insn (i.e. the label), INSN is the source.
6329
6330 REVERSED is nonzero if we should reverse the sense of the comparison.
6331
6332 ANNUL is nonzero if we should generate an annulling branch. */
6333
6334 const char *
6335 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6336 int annul, rtx insn)
6337 {
6338 static char string[64];
6339 enum rtx_code code = GET_CODE (op);
6340 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6341 rtx note;
6342 int far;
6343 char *p;
6344
6345 /* branch on register are limited to +-128KB. If it is too far away,
6346 change
6347
6348 brnz,pt %g1, .LC30
6349
6350 to
6351
6352 brz,pn %g1, .+12
6353 nop
6354 ba,pt %xcc, .LC30
6355
6356 and
6357
6358 brgez,a,pn %o1, .LC29
6359
6360 to
6361
6362 brlz,pt %o1, .+16
6363 nop
6364 ba,pt %xcc, .LC29 */
6365
6366 far = get_attr_length (insn) >= 3;
6367
6368 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6369 if (reversed ^ far)
6370 code = reverse_condition (code);
6371
6372 /* Only 64 bit versions of these instructions exist. */
6373 gcc_assert (mode == DImode);
6374
6375 /* Start by writing the branch condition. */
6376
6377 switch (code)
6378 {
6379 case NE:
6380 strcpy (string, "brnz");
6381 break;
6382
6383 case EQ:
6384 strcpy (string, "brz");
6385 break;
6386
6387 case GE:
6388 strcpy (string, "brgez");
6389 break;
6390
6391 case LT:
6392 strcpy (string, "brlz");
6393 break;
6394
6395 case LE:
6396 strcpy (string, "brlez");
6397 break;
6398
6399 case GT:
6400 strcpy (string, "brgz");
6401 break;
6402
6403 default:
6404 gcc_unreachable ();
6405 }
6406
6407 p = strchr (string, '\0');
6408
6409 /* Now add the annulling, reg, label, and nop. */
6410 if (annul && ! far)
6411 {
6412 strcpy (p, ",a");
6413 p += 2;
6414 }
6415
6416 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6417 {
6418 strcpy (p,
6419 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6420 ? ",pt" : ",pn");
6421 p += 3;
6422 }
6423
6424 *p = p < string + 8 ? '\t' : ' ';
6425 p++;
6426 *p++ = '%';
6427 *p++ = '0' + reg;
6428 *p++ = ',';
6429 *p++ = ' ';
6430 if (far)
6431 {
6432 int veryfar = 1, delta;
6433
6434 if (INSN_ADDRESSES_SET_P ())
6435 {
6436 delta = (INSN_ADDRESSES (INSN_UID (dest))
6437 - INSN_ADDRESSES (INSN_UID (insn)));
6438 /* Leave some instructions for "slop". */
6439 if (delta >= -260000 && delta < 260000)
6440 veryfar = 0;
6441 }
6442
6443 strcpy (p, ".+12\n\t nop\n\t");
6444 /* Skip the next insn if requested or
6445 if we know that it will be a nop. */
6446 if (annul || ! final_sequence)
6447 p[3] = '6';
6448 p += 12;
6449 if (veryfar)
6450 {
6451 strcpy (p, "b\t");
6452 p += 2;
6453 }
6454 else
6455 {
6456 strcpy (p, "ba,pt\t%%xcc, ");
6457 p += 13;
6458 }
6459 }
6460 *p++ = '%';
6461 *p++ = 'l';
6462 *p++ = '0' + label;
6463 *p++ = '%';
6464 *p++ = '#';
6465 *p = '\0';
6466
6467 return string;
6468 }
6469
6470 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6471 Such instructions cannot be used in the delay slot of return insn on v9.
6472 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6473 */
6474
6475 static int
6476 epilogue_renumber (register rtx *where, int test)
6477 {
6478 register const char *fmt;
6479 register int i;
6480 register enum rtx_code code;
6481
6482 if (*where == 0)
6483 return 0;
6484
6485 code = GET_CODE (*where);
6486
6487 switch (code)
6488 {
6489 case REG:
6490 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6491 return 1;
6492 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6493 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6494 case SCRATCH:
6495 case CC0:
6496 case PC:
6497 case CONST_INT:
6498 case CONST_DOUBLE:
6499 return 0;
6500
6501 /* Do not replace the frame pointer with the stack pointer because
6502 it can cause the delayed instruction to load below the stack.
6503 This occurs when instructions like:
6504
6505 (set (reg/i:SI 24 %i0)
6506 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6507 (const_int -20 [0xffffffec])) 0))
6508
6509 are in the return delayed slot. */
6510 case PLUS:
6511 if (GET_CODE (XEXP (*where, 0)) == REG
6512 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6513 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6514 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6515 return 1;
6516 break;
6517
6518 case MEM:
6519 if (SPARC_STACK_BIAS
6520 && GET_CODE (XEXP (*where, 0)) == REG
6521 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6522 return 1;
6523 break;
6524
6525 default:
6526 break;
6527 }
6528
6529 fmt = GET_RTX_FORMAT (code);
6530
6531 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6532 {
6533 if (fmt[i] == 'E')
6534 {
6535 register int j;
6536 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6537 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6538 return 1;
6539 }
6540 else if (fmt[i] == 'e'
6541 && epilogue_renumber (&(XEXP (*where, i)), test))
6542 return 1;
6543 }
6544 return 0;
6545 }
6546 \f
6547 /* Leaf functions and non-leaf functions have different needs. */
6548
6549 static const int
6550 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6551
6552 static const int
6553 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6554
6555 static const int *const reg_alloc_orders[] = {
6556 reg_leaf_alloc_order,
6557 reg_nonleaf_alloc_order};
6558
6559 void
6560 order_regs_for_local_alloc (void)
6561 {
6562 static int last_order_nonleaf = 1;
6563
6564 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6565 {
6566 last_order_nonleaf = !last_order_nonleaf;
6567 memcpy ((char *) reg_alloc_order,
6568 (const char *) reg_alloc_orders[last_order_nonleaf],
6569 FIRST_PSEUDO_REGISTER * sizeof (int));
6570 }
6571 }
6572 \f
6573 /* Return 1 if REG and MEM are legitimate enough to allow the various
6574 mem<-->reg splits to be run. */
6575
6576 int
6577 sparc_splitdi_legitimate (rtx reg, rtx mem)
6578 {
6579 /* Punt if we are here by mistake. */
6580 gcc_assert (reload_completed);
6581
6582 /* We must have an offsettable memory reference. */
6583 if (! offsettable_memref_p (mem))
6584 return 0;
6585
6586 /* If we have legitimate args for ldd/std, we do not want
6587 the split to happen. */
6588 if ((REGNO (reg) % 2) == 0
6589 && mem_min_alignment (mem, 8))
6590 return 0;
6591
6592 /* Success. */
6593 return 1;
6594 }
6595
6596 /* Return 1 if x and y are some kind of REG and they refer to
6597 different hard registers. This test is guaranteed to be
6598 run after reload. */
6599
6600 int
6601 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6602 {
6603 if (GET_CODE (x) != REG)
6604 return 0;
6605 if (GET_CODE (y) != REG)
6606 return 0;
6607 if (REGNO (x) == REGNO (y))
6608 return 0;
6609 return 1;
6610 }
6611
6612 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6613 This makes them candidates for using ldd and std insns.
6614
6615 Note reg1 and reg2 *must* be hard registers. */
6616
6617 int
6618 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6619 {
6620 /* We might have been passed a SUBREG. */
6621 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6622 return 0;
6623
6624 if (REGNO (reg1) % 2 != 0)
6625 return 0;
6626
6627 /* Integer ldd is deprecated in SPARC V9 */
6628 if (TARGET_V9 && REGNO (reg1) < 32)
6629 return 0;
6630
6631 return (REGNO (reg1) == REGNO (reg2) - 1);
6632 }
6633
6634 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6635 an ldd or std insn.
6636
6637 This can only happen when addr1 and addr2, the addresses in mem1
6638 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6639 addr1 must also be aligned on a 64-bit boundary.
6640
6641 Also iff dependent_reg_rtx is not null it should not be used to
6642 compute the address for mem1, i.e. we cannot optimize a sequence
6643 like:
6644 ld [%o0], %o0
6645 ld [%o0 + 4], %o1
6646 to
6647 ldd [%o0], %o0
6648 nor:
6649 ld [%g3 + 4], %g3
6650 ld [%g3], %g2
6651 to
6652 ldd [%g3], %g2
6653
6654 But, note that the transformation from:
6655 ld [%g2 + 4], %g3
6656 ld [%g2], %g2
6657 to
6658 ldd [%g2], %g2
6659 is perfectly fine. Thus, the peephole2 patterns always pass us
6660 the destination register of the first load, never the second one.
6661
6662 For stores we don't have a similar problem, so dependent_reg_rtx is
6663 NULL_RTX. */
6664
6665 int
6666 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6667 {
6668 rtx addr1, addr2;
6669 unsigned int reg1;
6670 HOST_WIDE_INT offset1;
6671
6672 /* The mems cannot be volatile. */
6673 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6674 return 0;
6675
6676 /* MEM1 should be aligned on a 64-bit boundary. */
6677 if (MEM_ALIGN (mem1) < 64)
6678 return 0;
6679
6680 addr1 = XEXP (mem1, 0);
6681 addr2 = XEXP (mem2, 0);
6682
6683 /* Extract a register number and offset (if used) from the first addr. */
6684 if (GET_CODE (addr1) == PLUS)
6685 {
6686 /* If not a REG, return zero. */
6687 if (GET_CODE (XEXP (addr1, 0)) != REG)
6688 return 0;
6689 else
6690 {
6691 reg1 = REGNO (XEXP (addr1, 0));
6692 /* The offset must be constant! */
6693 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6694 return 0;
6695 offset1 = INTVAL (XEXP (addr1, 1));
6696 }
6697 }
6698 else if (GET_CODE (addr1) != REG)
6699 return 0;
6700 else
6701 {
6702 reg1 = REGNO (addr1);
6703 /* This was a simple (mem (reg)) expression. Offset is 0. */
6704 offset1 = 0;
6705 }
6706
6707 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6708 if (GET_CODE (addr2) != PLUS)
6709 return 0;
6710
6711 if (GET_CODE (XEXP (addr2, 0)) != REG
6712 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6713 return 0;
6714
6715 if (reg1 != REGNO (XEXP (addr2, 0)))
6716 return 0;
6717
6718 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6719 return 0;
6720
6721 /* The first offset must be evenly divisible by 8 to ensure the
6722 address is 64 bit aligned. */
6723 if (offset1 % 8 != 0)
6724 return 0;
6725
6726 /* The offset for the second addr must be 4 more than the first addr. */
6727 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6728 return 0;
6729
6730 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6731 instructions. */
6732 return 1;
6733 }
6734
6735 /* Return 1 if reg is a pseudo, or is the first register in
6736 a hard register pair. This makes it suitable for use in
6737 ldd and std insns. */
6738
6739 int
6740 register_ok_for_ldd (rtx reg)
6741 {
6742 /* We might have been passed a SUBREG. */
6743 if (!REG_P (reg))
6744 return 0;
6745
6746 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6747 return (REGNO (reg) % 2 == 0);
6748
6749 return 1;
6750 }
6751
6752 /* Return 1 if OP is a memory whose address is known to be
6753 aligned to 8-byte boundary, or a pseudo during reload.
6754 This makes it suitable for use in ldd and std insns. */
6755
6756 int
6757 memory_ok_for_ldd (rtx op)
6758 {
6759 if (MEM_P (op))
6760 {
6761 /* In 64-bit mode, we assume that the address is word-aligned. */
6762 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6763 return 0;
6764
6765 if ((reload_in_progress || reload_completed)
6766 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6767 return 0;
6768 }
6769 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6770 {
6771 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6772 return 0;
6773 }
6774 else
6775 return 0;
6776
6777 return 1;
6778 }
6779 \f
6780 /* Print operand X (an rtx) in assembler syntax to file FILE.
6781 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6782 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6783
6784 void
6785 print_operand (FILE *file, rtx x, int code)
6786 {
6787 switch (code)
6788 {
6789 case '#':
6790 /* Output an insn in a delay slot. */
6791 if (final_sequence)
6792 sparc_indent_opcode = 1;
6793 else
6794 fputs ("\n\t nop", file);
6795 return;
6796 case '*':
6797 /* Output an annul flag if there's nothing for the delay slot and we
6798 are optimizing. This is always used with '(' below.
6799 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6800 this is a dbx bug. So, we only do this when optimizing.
6801 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6802 Always emit a nop in case the next instruction is a branch. */
6803 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6804 fputs (",a", file);
6805 return;
6806 case '(':
6807 /* Output a 'nop' if there's nothing for the delay slot and we are
6808 not optimizing. This is always used with '*' above. */
6809 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6810 fputs ("\n\t nop", file);
6811 else if (final_sequence)
6812 sparc_indent_opcode = 1;
6813 return;
6814 case ')':
6815 /* Output the right displacement from the saved PC on function return.
6816 The caller may have placed an "unimp" insn immediately after the call
6817 so we have to account for it. This insn is used in the 32-bit ABI
6818 when calling a function that returns a non zero-sized structure. The
6819 64-bit ABI doesn't have it. Be careful to have this test be the same
6820 as that used on the call. The exception here is that when
6821 sparc_std_struct_return is enabled, the psABI is followed exactly
6822 and the adjustment is made by the code in sparc_struct_value_rtx.
6823 The call emitted is the same when sparc_std_struct_return is
6824 present. */
6825 if (! TARGET_ARCH64
6826 && cfun->returns_struct
6827 && ! sparc_std_struct_return
6828 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6829 == INTEGER_CST)
6830 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6831 fputs ("12", file);
6832 else
6833 fputc ('8', file);
6834 return;
6835 case '_':
6836 /* Output the Embedded Medium/Anywhere code model base register. */
6837 fputs (EMBMEDANY_BASE_REG, file);
6838 return;
6839 case '&':
6840 /* Print some local dynamic TLS name. */
6841 assemble_name (file, get_some_local_dynamic_name ());
6842 return;
6843
6844 case 'Y':
6845 /* Adjust the operand to take into account a RESTORE operation. */
6846 if (GET_CODE (x) == CONST_INT)
6847 break;
6848 else if (GET_CODE (x) != REG)
6849 output_operand_lossage ("invalid %%Y operand");
6850 else if (REGNO (x) < 8)
6851 fputs (reg_names[REGNO (x)], file);
6852 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6853 fputs (reg_names[REGNO (x)-16], file);
6854 else
6855 output_operand_lossage ("invalid %%Y operand");
6856 return;
6857 case 'L':
6858 /* Print out the low order register name of a register pair. */
6859 if (WORDS_BIG_ENDIAN)
6860 fputs (reg_names[REGNO (x)+1], file);
6861 else
6862 fputs (reg_names[REGNO (x)], file);
6863 return;
6864 case 'H':
6865 /* Print out the high order register name of a register pair. */
6866 if (WORDS_BIG_ENDIAN)
6867 fputs (reg_names[REGNO (x)], file);
6868 else
6869 fputs (reg_names[REGNO (x)+1], file);
6870 return;
6871 case 'R':
6872 /* Print out the second register name of a register pair or quad.
6873 I.e., R (%o0) => %o1. */
6874 fputs (reg_names[REGNO (x)+1], file);
6875 return;
6876 case 'S':
6877 /* Print out the third register name of a register quad.
6878 I.e., S (%o0) => %o2. */
6879 fputs (reg_names[REGNO (x)+2], file);
6880 return;
6881 case 'T':
6882 /* Print out the fourth register name of a register quad.
6883 I.e., T (%o0) => %o3. */
6884 fputs (reg_names[REGNO (x)+3], file);
6885 return;
6886 case 'x':
6887 /* Print a condition code register. */
6888 if (REGNO (x) == SPARC_ICC_REG)
6889 {
6890 /* We don't handle CC[X]_NOOVmode because they're not supposed
6891 to occur here. */
6892 if (GET_MODE (x) == CCmode)
6893 fputs ("%icc", file);
6894 else if (GET_MODE (x) == CCXmode)
6895 fputs ("%xcc", file);
6896 else
6897 gcc_unreachable ();
6898 }
6899 else
6900 /* %fccN register */
6901 fputs (reg_names[REGNO (x)], file);
6902 return;
6903 case 'm':
6904 /* Print the operand's address only. */
6905 output_address (XEXP (x, 0));
6906 return;
6907 case 'r':
6908 /* In this case we need a register. Use %g0 if the
6909 operand is const0_rtx. */
6910 if (x == const0_rtx
6911 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6912 {
6913 fputs ("%g0", file);
6914 return;
6915 }
6916 else
6917 break;
6918
6919 case 'A':
6920 switch (GET_CODE (x))
6921 {
6922 case IOR: fputs ("or", file); break;
6923 case AND: fputs ("and", file); break;
6924 case XOR: fputs ("xor", file); break;
6925 default: output_operand_lossage ("invalid %%A operand");
6926 }
6927 return;
6928
6929 case 'B':
6930 switch (GET_CODE (x))
6931 {
6932 case IOR: fputs ("orn", file); break;
6933 case AND: fputs ("andn", file); break;
6934 case XOR: fputs ("xnor", file); break;
6935 default: output_operand_lossage ("invalid %%B operand");
6936 }
6937 return;
6938
6939 /* These are used by the conditional move instructions. */
6940 case 'c' :
6941 case 'C':
6942 {
6943 enum rtx_code rc = GET_CODE (x);
6944
6945 if (code == 'c')
6946 {
6947 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6948 if (mode == CCFPmode || mode == CCFPEmode)
6949 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6950 else
6951 rc = reverse_condition (GET_CODE (x));
6952 }
6953 switch (rc)
6954 {
6955 case NE: fputs ("ne", file); break;
6956 case EQ: fputs ("e", file); break;
6957 case GE: fputs ("ge", file); break;
6958 case GT: fputs ("g", file); break;
6959 case LE: fputs ("le", file); break;
6960 case LT: fputs ("l", file); break;
6961 case GEU: fputs ("geu", file); break;
6962 case GTU: fputs ("gu", file); break;
6963 case LEU: fputs ("leu", file); break;
6964 case LTU: fputs ("lu", file); break;
6965 case LTGT: fputs ("lg", file); break;
6966 case UNORDERED: fputs ("u", file); break;
6967 case ORDERED: fputs ("o", file); break;
6968 case UNLT: fputs ("ul", file); break;
6969 case UNLE: fputs ("ule", file); break;
6970 case UNGT: fputs ("ug", file); break;
6971 case UNGE: fputs ("uge", file); break;
6972 case UNEQ: fputs ("ue", file); break;
6973 default: output_operand_lossage (code == 'c'
6974 ? "invalid %%c operand"
6975 : "invalid %%C operand");
6976 }
6977 return;
6978 }
6979
6980 /* These are used by the movr instruction pattern. */
6981 case 'd':
6982 case 'D':
6983 {
6984 enum rtx_code rc = (code == 'd'
6985 ? reverse_condition (GET_CODE (x))
6986 : GET_CODE (x));
6987 switch (rc)
6988 {
6989 case NE: fputs ("ne", file); break;
6990 case EQ: fputs ("e", file); break;
6991 case GE: fputs ("gez", file); break;
6992 case LT: fputs ("lz", file); break;
6993 case LE: fputs ("lez", file); break;
6994 case GT: fputs ("gz", file); break;
6995 default: output_operand_lossage (code == 'd'
6996 ? "invalid %%d operand"
6997 : "invalid %%D operand");
6998 }
6999 return;
7000 }
7001
7002 case 'b':
7003 {
7004 /* Print a sign-extended character. */
7005 int i = trunc_int_for_mode (INTVAL (x), QImode);
7006 fprintf (file, "%d", i);
7007 return;
7008 }
7009
7010 case 'f':
7011 /* Operand must be a MEM; write its address. */
7012 if (GET_CODE (x) != MEM)
7013 output_operand_lossage ("invalid %%f operand");
7014 output_address (XEXP (x, 0));
7015 return;
7016
7017 case 's':
7018 {
7019 /* Print a sign-extended 32-bit value. */
7020 HOST_WIDE_INT i;
7021 if (GET_CODE(x) == CONST_INT)
7022 i = INTVAL (x);
7023 else if (GET_CODE(x) == CONST_DOUBLE)
7024 i = CONST_DOUBLE_LOW (x);
7025 else
7026 {
7027 output_operand_lossage ("invalid %%s operand");
7028 return;
7029 }
7030 i = trunc_int_for_mode (i, SImode);
7031 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7032 return;
7033 }
7034
7035 case 0:
7036 /* Do nothing special. */
7037 break;
7038
7039 default:
7040 /* Undocumented flag. */
7041 output_operand_lossage ("invalid operand output code");
7042 }
7043
7044 if (GET_CODE (x) == REG)
7045 fputs (reg_names[REGNO (x)], file);
7046 else if (GET_CODE (x) == MEM)
7047 {
7048 fputc ('[', file);
7049 /* Poor Sun assembler doesn't understand absolute addressing. */
7050 if (CONSTANT_P (XEXP (x, 0)))
7051 fputs ("%g0+", file);
7052 output_address (XEXP (x, 0));
7053 fputc (']', file);
7054 }
7055 else if (GET_CODE (x) == HIGH)
7056 {
7057 fputs ("%hi(", file);
7058 output_addr_const (file, XEXP (x, 0));
7059 fputc (')', file);
7060 }
7061 else if (GET_CODE (x) == LO_SUM)
7062 {
7063 print_operand (file, XEXP (x, 0), 0);
7064 if (TARGET_CM_MEDMID)
7065 fputs ("+%l44(", file);
7066 else
7067 fputs ("+%lo(", file);
7068 output_addr_const (file, XEXP (x, 1));
7069 fputc (')', file);
7070 }
7071 else if (GET_CODE (x) == CONST_DOUBLE
7072 && (GET_MODE (x) == VOIDmode
7073 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7074 {
7075 if (CONST_DOUBLE_HIGH (x) == 0)
7076 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7077 else if (CONST_DOUBLE_HIGH (x) == -1
7078 && CONST_DOUBLE_LOW (x) < 0)
7079 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7080 else
7081 output_operand_lossage ("long long constant not a valid immediate operand");
7082 }
7083 else if (GET_CODE (x) == CONST_DOUBLE)
7084 output_operand_lossage ("floating point constant not a valid immediate operand");
7085 else { output_addr_const (file, x); }
7086 }
7087 \f
7088 /* Target hook for assembling integer objects. The sparc version has
7089 special handling for aligned DI-mode objects. */
7090
7091 static bool
7092 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7093 {
7094 /* ??? We only output .xword's for symbols and only then in environments
7095 where the assembler can handle them. */
7096 if (aligned_p && size == 8
7097 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7098 {
7099 if (TARGET_V9)
7100 {
7101 assemble_integer_with_op ("\t.xword\t", x);
7102 return true;
7103 }
7104 else
7105 {
7106 assemble_aligned_integer (4, const0_rtx);
7107 assemble_aligned_integer (4, x);
7108 return true;
7109 }
7110 }
7111 return default_assemble_integer (x, size, aligned_p);
7112 }
7113 \f
7114 /* Return the value of a code used in the .proc pseudo-op that says
7115 what kind of result this function returns. For non-C types, we pick
7116 the closest C type. */
7117
7118 #ifndef SHORT_TYPE_SIZE
7119 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7120 #endif
7121
7122 #ifndef INT_TYPE_SIZE
7123 #define INT_TYPE_SIZE BITS_PER_WORD
7124 #endif
7125
7126 #ifndef LONG_TYPE_SIZE
7127 #define LONG_TYPE_SIZE BITS_PER_WORD
7128 #endif
7129
7130 #ifndef LONG_LONG_TYPE_SIZE
7131 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7132 #endif
7133
7134 #ifndef FLOAT_TYPE_SIZE
7135 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7136 #endif
7137
7138 #ifndef DOUBLE_TYPE_SIZE
7139 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7140 #endif
7141
7142 #ifndef LONG_DOUBLE_TYPE_SIZE
7143 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7144 #endif
7145
7146 unsigned long
7147 sparc_type_code (register tree type)
7148 {
7149 register unsigned long qualifiers = 0;
7150 register unsigned shift;
7151
7152 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7153 setting more, since some assemblers will give an error for this. Also,
7154 we must be careful to avoid shifts of 32 bits or more to avoid getting
7155 unpredictable results. */
7156
7157 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7158 {
7159 switch (TREE_CODE (type))
7160 {
7161 case ERROR_MARK:
7162 return qualifiers;
7163
7164 case ARRAY_TYPE:
7165 qualifiers |= (3 << shift);
7166 break;
7167
7168 case FUNCTION_TYPE:
7169 case METHOD_TYPE:
7170 qualifiers |= (2 << shift);
7171 break;
7172
7173 case POINTER_TYPE:
7174 case REFERENCE_TYPE:
7175 case OFFSET_TYPE:
7176 qualifiers |= (1 << shift);
7177 break;
7178
7179 case RECORD_TYPE:
7180 return (qualifiers | 8);
7181
7182 case UNION_TYPE:
7183 case QUAL_UNION_TYPE:
7184 return (qualifiers | 9);
7185
7186 case ENUMERAL_TYPE:
7187 return (qualifiers | 10);
7188
7189 case VOID_TYPE:
7190 return (qualifiers | 16);
7191
7192 case INTEGER_TYPE:
7193 /* If this is a range type, consider it to be the underlying
7194 type. */
7195 if (TREE_TYPE (type) != 0)
7196 break;
7197
7198 /* Carefully distinguish all the standard types of C,
7199 without messing up if the language is not C. We do this by
7200 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7201 look at both the names and the above fields, but that's redundant.
7202 Any type whose size is between two C types will be considered
7203 to be the wider of the two types. Also, we do not have a
7204 special code to use for "long long", so anything wider than
7205 long is treated the same. Note that we can't distinguish
7206 between "int" and "long" in this code if they are the same
7207 size, but that's fine, since neither can the assembler. */
7208
7209 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7210 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7211
7212 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7213 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7214
7215 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7216 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7217
7218 else
7219 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7220
7221 case REAL_TYPE:
7222 /* If this is a range type, consider it to be the underlying
7223 type. */
7224 if (TREE_TYPE (type) != 0)
7225 break;
7226
7227 /* Carefully distinguish all the standard types of C,
7228 without messing up if the language is not C. */
7229
7230 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7231 return (qualifiers | 6);
7232
7233 else
7234 return (qualifiers | 7);
7235
7236 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7237 /* ??? We need to distinguish between double and float complex types,
7238 but I don't know how yet because I can't reach this code from
7239 existing front-ends. */
7240 return (qualifiers | 7); /* Who knows? */
7241
7242 case VECTOR_TYPE:
7243 case BOOLEAN_TYPE: /* Boolean truth value type. */
7244 case LANG_TYPE: /* ? */
7245 return qualifiers;
7246
7247 default:
7248 gcc_unreachable (); /* Not a type! */
7249 }
7250 }
7251
7252 return qualifiers;
7253 }
7254 \f
7255 /* Nested function support. */
7256
7257 /* Emit RTL insns to initialize the variable parts of a trampoline.
7258 FNADDR is an RTX for the address of the function's pure code.
7259 CXT is an RTX for the static chain value for the function.
7260
7261 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7262 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7263 (to store insns). This is a bit excessive. Perhaps a different
7264 mechanism would be better here.
7265
7266 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7267
7268 void
7269 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7270 {
7271 /* SPARC 32-bit trampoline:
7272
7273 sethi %hi(fn), %g1
7274 sethi %hi(static), %g2
7275 jmp %g1+%lo(fn)
7276 or %g2, %lo(static), %g2
7277
7278 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7279 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7280 */
7281
7282 emit_move_insn
7283 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7284 expand_binop (SImode, ior_optab,
7285 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7286 size_int (10), 0, 1),
7287 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7288 NULL_RTX, 1, OPTAB_DIRECT));
7289
7290 emit_move_insn
7291 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7292 expand_binop (SImode, ior_optab,
7293 expand_shift (RSHIFT_EXPR, SImode, cxt,
7294 size_int (10), 0, 1),
7295 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7296 NULL_RTX, 1, OPTAB_DIRECT));
7297
7298 emit_move_insn
7299 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7300 expand_binop (SImode, ior_optab,
7301 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7302 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7303 NULL_RTX, 1, OPTAB_DIRECT));
7304
7305 emit_move_insn
7306 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7307 expand_binop (SImode, ior_optab,
7308 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7309 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7310 NULL_RTX, 1, OPTAB_DIRECT));
7311
7312 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7313 aligned on a 16 byte boundary so one flush clears it all. */
7314 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7315 if (sparc_cpu != PROCESSOR_ULTRASPARC
7316 && sparc_cpu != PROCESSOR_ULTRASPARC3
7317 && sparc_cpu != PROCESSOR_NIAGARA
7318 && sparc_cpu != PROCESSOR_NIAGARA2)
7319 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7320 plus_constant (tramp, 8)))));
7321
7322 /* Call __enable_execute_stack after writing onto the stack to make sure
7323 the stack address is accessible. */
7324 #ifdef ENABLE_EXECUTE_STACK
7325 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7326 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7327 #endif
7328
7329 }
7330
7331 /* The 64-bit version is simpler because it makes more sense to load the
7332 values as "immediate" data out of the trampoline. It's also easier since
7333 we can read the PC without clobbering a register. */
7334
7335 void
7336 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7337 {
7338 /* SPARC 64-bit trampoline:
7339
7340 rd %pc, %g1
7341 ldx [%g1+24], %g5
7342 jmp %g5
7343 ldx [%g1+16], %g5
7344 +16 bytes data
7345 */
7346
7347 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7348 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7349 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7350 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7351 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7352 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7353 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7354 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7355 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7356 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7357 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7358
7359 if (sparc_cpu != PROCESSOR_ULTRASPARC
7360 && sparc_cpu != PROCESSOR_ULTRASPARC3
7361 && sparc_cpu != PROCESSOR_NIAGARA
7362 && sparc_cpu != PROCESSOR_NIAGARA2)
7363 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7364
7365 /* Call __enable_execute_stack after writing onto the stack to make sure
7366 the stack address is accessible. */
7367 #ifdef ENABLE_EXECUTE_STACK
7368 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7369 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7370 #endif
7371 }
7372 \f
7373 /* Adjust the cost of a scheduling dependency. Return the new cost of
7374 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7375
7376 static int
7377 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7378 {
7379 enum attr_type insn_type;
7380
7381 if (! recog_memoized (insn))
7382 return 0;
7383
7384 insn_type = get_attr_type (insn);
7385
7386 if (REG_NOTE_KIND (link) == 0)
7387 {
7388 /* Data dependency; DEP_INSN writes a register that INSN reads some
7389 cycles later. */
7390
7391 /* if a load, then the dependence must be on the memory address;
7392 add an extra "cycle". Note that the cost could be two cycles
7393 if the reg was written late in an instruction group; we ca not tell
7394 here. */
7395 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7396 return cost + 3;
7397
7398 /* Get the delay only if the address of the store is the dependence. */
7399 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7400 {
7401 rtx pat = PATTERN(insn);
7402 rtx dep_pat = PATTERN (dep_insn);
7403
7404 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7405 return cost; /* This should not happen! */
7406
7407 /* The dependency between the two instructions was on the data that
7408 is being stored. Assume that this implies that the address of the
7409 store is not dependent. */
7410 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7411 return cost;
7412
7413 return cost + 3; /* An approximation. */
7414 }
7415
7416 /* A shift instruction cannot receive its data from an instruction
7417 in the same cycle; add a one cycle penalty. */
7418 if (insn_type == TYPE_SHIFT)
7419 return cost + 3; /* Split before cascade into shift. */
7420 }
7421 else
7422 {
7423 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7424 INSN writes some cycles later. */
7425
7426 /* These are only significant for the fpu unit; writing a fp reg before
7427 the fpu has finished with it stalls the processor. */
7428
7429 /* Reusing an integer register causes no problems. */
7430 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7431 return 0;
7432 }
7433
7434 return cost;
7435 }
7436
7437 static int
7438 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7439 {
7440 enum attr_type insn_type, dep_type;
7441 rtx pat = PATTERN(insn);
7442 rtx dep_pat = PATTERN (dep_insn);
7443
7444 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7445 return cost;
7446
7447 insn_type = get_attr_type (insn);
7448 dep_type = get_attr_type (dep_insn);
7449
7450 switch (REG_NOTE_KIND (link))
7451 {
7452 case 0:
7453 /* Data dependency; DEP_INSN writes a register that INSN reads some
7454 cycles later. */
7455
7456 switch (insn_type)
7457 {
7458 case TYPE_STORE:
7459 case TYPE_FPSTORE:
7460 /* Get the delay iff the address of the store is the dependence. */
7461 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7462 return cost;
7463
7464 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7465 return cost;
7466 return cost + 3;
7467
7468 case TYPE_LOAD:
7469 case TYPE_SLOAD:
7470 case TYPE_FPLOAD:
7471 /* If a load, then the dependence must be on the memory address. If
7472 the addresses aren't equal, then it might be a false dependency */
7473 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7474 {
7475 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7476 || GET_CODE (SET_DEST (dep_pat)) != MEM
7477 || GET_CODE (SET_SRC (pat)) != MEM
7478 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7479 XEXP (SET_SRC (pat), 0)))
7480 return cost + 2;
7481
7482 return cost + 8;
7483 }
7484 break;
7485
7486 case TYPE_BRANCH:
7487 /* Compare to branch latency is 0. There is no benefit from
7488 separating compare and branch. */
7489 if (dep_type == TYPE_COMPARE)
7490 return 0;
7491 /* Floating point compare to branch latency is less than
7492 compare to conditional move. */
7493 if (dep_type == TYPE_FPCMP)
7494 return cost - 1;
7495 break;
7496 default:
7497 break;
7498 }
7499 break;
7500
7501 case REG_DEP_ANTI:
7502 /* Anti-dependencies only penalize the fpu unit. */
7503 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7504 return 0;
7505 break;
7506
7507 default:
7508 break;
7509 }
7510
7511 return cost;
7512 }
7513
7514 static int
7515 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7516 {
7517 switch (sparc_cpu)
7518 {
7519 case PROCESSOR_SUPERSPARC:
7520 cost = supersparc_adjust_cost (insn, link, dep, cost);
7521 break;
7522 case PROCESSOR_HYPERSPARC:
7523 case PROCESSOR_SPARCLITE86X:
7524 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7525 break;
7526 default:
7527 break;
7528 }
7529 return cost;
7530 }
7531
7532 static void
7533 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7534 int sched_verbose ATTRIBUTE_UNUSED,
7535 int max_ready ATTRIBUTE_UNUSED)
7536 {
7537 }
7538
7539 static int
7540 sparc_use_sched_lookahead (void)
7541 {
7542 if (sparc_cpu == PROCESSOR_NIAGARA
7543 || sparc_cpu == PROCESSOR_NIAGARA2)
7544 return 0;
7545 if (sparc_cpu == PROCESSOR_ULTRASPARC
7546 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7547 return 4;
7548 if ((1 << sparc_cpu) &
7549 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7550 (1 << PROCESSOR_SPARCLITE86X)))
7551 return 3;
7552 return 0;
7553 }
7554
7555 static int
7556 sparc_issue_rate (void)
7557 {
7558 switch (sparc_cpu)
7559 {
7560 case PROCESSOR_NIAGARA:
7561 case PROCESSOR_NIAGARA2:
7562 default:
7563 return 1;
7564 case PROCESSOR_V9:
7565 /* Assume V9 processors are capable of at least dual-issue. */
7566 return 2;
7567 case PROCESSOR_SUPERSPARC:
7568 return 3;
7569 case PROCESSOR_HYPERSPARC:
7570 case PROCESSOR_SPARCLITE86X:
7571 return 2;
7572 case PROCESSOR_ULTRASPARC:
7573 case PROCESSOR_ULTRASPARC3:
7574 return 4;
7575 }
7576 }
7577
7578 static int
7579 set_extends (rtx insn)
7580 {
7581 register rtx pat = PATTERN (insn);
7582
7583 switch (GET_CODE (SET_SRC (pat)))
7584 {
7585 /* Load and some shift instructions zero extend. */
7586 case MEM:
7587 case ZERO_EXTEND:
7588 /* sethi clears the high bits */
7589 case HIGH:
7590 /* LO_SUM is used with sethi. sethi cleared the high
7591 bits and the values used with lo_sum are positive */
7592 case LO_SUM:
7593 /* Store flag stores 0 or 1 */
7594 case LT: case LTU:
7595 case GT: case GTU:
7596 case LE: case LEU:
7597 case GE: case GEU:
7598 case EQ:
7599 case NE:
7600 return 1;
7601 case AND:
7602 {
7603 rtx op0 = XEXP (SET_SRC (pat), 0);
7604 rtx op1 = XEXP (SET_SRC (pat), 1);
7605 if (GET_CODE (op1) == CONST_INT)
7606 return INTVAL (op1) >= 0;
7607 if (GET_CODE (op0) != REG)
7608 return 0;
7609 if (sparc_check_64 (op0, insn) == 1)
7610 return 1;
7611 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7612 }
7613 case IOR:
7614 case XOR:
7615 {
7616 rtx op0 = XEXP (SET_SRC (pat), 0);
7617 rtx op1 = XEXP (SET_SRC (pat), 1);
7618 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7619 return 0;
7620 if (GET_CODE (op1) == CONST_INT)
7621 return INTVAL (op1) >= 0;
7622 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7623 }
7624 case LSHIFTRT:
7625 return GET_MODE (SET_SRC (pat)) == SImode;
7626 /* Positive integers leave the high bits zero. */
7627 case CONST_DOUBLE:
7628 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7629 case CONST_INT:
7630 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7631 case ASHIFTRT:
7632 case SIGN_EXTEND:
7633 return - (GET_MODE (SET_SRC (pat)) == SImode);
7634 case REG:
7635 return sparc_check_64 (SET_SRC (pat), insn);
7636 default:
7637 return 0;
7638 }
7639 }
7640
7641 /* We _ought_ to have only one kind per function, but... */
7642 static GTY(()) rtx sparc_addr_diff_list;
7643 static GTY(()) rtx sparc_addr_list;
7644
7645 void
7646 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7647 {
7648 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7649 if (diff)
7650 sparc_addr_diff_list
7651 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7652 else
7653 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7654 }
7655
7656 static void
7657 sparc_output_addr_vec (rtx vec)
7658 {
7659 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7660 int idx, vlen = XVECLEN (body, 0);
7661
7662 #ifdef ASM_OUTPUT_ADDR_VEC_START
7663 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7664 #endif
7665
7666 #ifdef ASM_OUTPUT_CASE_LABEL
7667 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7668 NEXT_INSN (lab));
7669 #else
7670 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7671 #endif
7672
7673 for (idx = 0; idx < vlen; idx++)
7674 {
7675 ASM_OUTPUT_ADDR_VEC_ELT
7676 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7677 }
7678
7679 #ifdef ASM_OUTPUT_ADDR_VEC_END
7680 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7681 #endif
7682 }
7683
7684 static void
7685 sparc_output_addr_diff_vec (rtx vec)
7686 {
7687 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7688 rtx base = XEXP (XEXP (body, 0), 0);
7689 int idx, vlen = XVECLEN (body, 1);
7690
7691 #ifdef ASM_OUTPUT_ADDR_VEC_START
7692 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7693 #endif
7694
7695 #ifdef ASM_OUTPUT_CASE_LABEL
7696 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7697 NEXT_INSN (lab));
7698 #else
7699 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7700 #endif
7701
7702 for (idx = 0; idx < vlen; idx++)
7703 {
7704 ASM_OUTPUT_ADDR_DIFF_ELT
7705 (asm_out_file,
7706 body,
7707 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7708 CODE_LABEL_NUMBER (base));
7709 }
7710
7711 #ifdef ASM_OUTPUT_ADDR_VEC_END
7712 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7713 #endif
7714 }
7715
7716 static void
7717 sparc_output_deferred_case_vectors (void)
7718 {
7719 rtx t;
7720 int align;
7721
7722 if (sparc_addr_list == NULL_RTX
7723 && sparc_addr_diff_list == NULL_RTX)
7724 return;
7725
7726 /* Align to cache line in the function's code section. */
7727 switch_to_section (current_function_section ());
7728
7729 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7730 if (align > 0)
7731 ASM_OUTPUT_ALIGN (asm_out_file, align);
7732
7733 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7734 sparc_output_addr_vec (XEXP (t, 0));
7735 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7736 sparc_output_addr_diff_vec (XEXP (t, 0));
7737
7738 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7739 }
7740
7741 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7742 unknown. Return 1 if the high bits are zero, -1 if the register is
7743 sign extended. */
7744 int
7745 sparc_check_64 (rtx x, rtx insn)
7746 {
7747 /* If a register is set only once it is safe to ignore insns this
7748 code does not know how to handle. The loop will either recognize
7749 the single set and return the correct value or fail to recognize
7750 it and return 0. */
7751 int set_once = 0;
7752 rtx y = x;
7753
7754 gcc_assert (GET_CODE (x) == REG);
7755
7756 if (GET_MODE (x) == DImode)
7757 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7758
7759 if (flag_expensive_optimizations
7760 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7761 set_once = 1;
7762
7763 if (insn == 0)
7764 {
7765 if (set_once)
7766 insn = get_last_insn_anywhere ();
7767 else
7768 return 0;
7769 }
7770
7771 while ((insn = PREV_INSN (insn)))
7772 {
7773 switch (GET_CODE (insn))
7774 {
7775 case JUMP_INSN:
7776 case NOTE:
7777 break;
7778 case CODE_LABEL:
7779 case CALL_INSN:
7780 default:
7781 if (! set_once)
7782 return 0;
7783 break;
7784 case INSN:
7785 {
7786 rtx pat = PATTERN (insn);
7787 if (GET_CODE (pat) != SET)
7788 return 0;
7789 if (rtx_equal_p (x, SET_DEST (pat)))
7790 return set_extends (insn);
7791 if (y && rtx_equal_p (y, SET_DEST (pat)))
7792 return set_extends (insn);
7793 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7794 return 0;
7795 }
7796 }
7797 }
7798 return 0;
7799 }
7800
7801 /* Returns assembly code to perform a DImode shift using
7802 a 64-bit global or out register on SPARC-V8+. */
7803 const char *
7804 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7805 {
7806 static char asm_code[60];
7807
7808 /* The scratch register is only required when the destination
7809 register is not a 64-bit global or out register. */
7810 if (which_alternative != 2)
7811 operands[3] = operands[0];
7812
7813 /* We can only shift by constants <= 63. */
7814 if (GET_CODE (operands[2]) == CONST_INT)
7815 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7816
7817 if (GET_CODE (operands[1]) == CONST_INT)
7818 {
7819 output_asm_insn ("mov\t%1, %3", operands);
7820 }
7821 else
7822 {
7823 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7824 if (sparc_check_64 (operands[1], insn) <= 0)
7825 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7826 output_asm_insn ("or\t%L1, %3, %3", operands);
7827 }
7828
7829 strcpy(asm_code, opcode);
7830
7831 if (which_alternative != 2)
7832 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7833 else
7834 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7835 }
7836 \f
7837 /* Output rtl to increment the profiler label LABELNO
7838 for profiling a function entry. */
7839
7840 void
7841 sparc_profile_hook (int labelno)
7842 {
7843 char buf[32];
7844 rtx lab, fun;
7845
7846 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7847 if (NO_PROFILE_COUNTERS)
7848 {
7849 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7850 }
7851 else
7852 {
7853 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7854 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7855 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7856 }
7857 }
7858 \f
7859 #ifdef OBJECT_FORMAT_ELF
7860 static void
7861 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7862 tree decl)
7863 {
7864 if (flags & SECTION_MERGE)
7865 {
7866 /* entsize cannot be expressed in this section attributes
7867 encoding style. */
7868 default_elf_asm_named_section (name, flags, decl);
7869 return;
7870 }
7871
7872 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7873
7874 if (!(flags & SECTION_DEBUG))
7875 fputs (",#alloc", asm_out_file);
7876 if (flags & SECTION_WRITE)
7877 fputs (",#write", asm_out_file);
7878 if (flags & SECTION_TLS)
7879 fputs (",#tls", asm_out_file);
7880 if (flags & SECTION_CODE)
7881 fputs (",#execinstr", asm_out_file);
7882
7883 /* ??? Handle SECTION_BSS. */
7884
7885 fputc ('\n', asm_out_file);
7886 }
7887 #endif /* OBJECT_FORMAT_ELF */
7888
7889 /* We do not allow indirect calls to be optimized into sibling calls.
7890
7891 We cannot use sibling calls when delayed branches are disabled
7892 because they will likely require the call delay slot to be filled.
7893
7894 Also, on SPARC 32-bit we cannot emit a sibling call when the
7895 current function returns a structure. This is because the "unimp
7896 after call" convention would cause the callee to return to the
7897 wrong place. The generic code already disallows cases where the
7898 function being called returns a structure.
7899
7900 It may seem strange how this last case could occur. Usually there
7901 is code after the call which jumps to epilogue code which dumps the
7902 return value into the struct return area. That ought to invalidate
7903 the sibling call right? Well, in the C++ case we can end up passing
7904 the pointer to the struct return area to a constructor (which returns
7905 void) and then nothing else happens. Such a sibling call would look
7906 valid without the added check here.
7907
7908 VxWorks PIC PLT entries require the global pointer to be initialized
7909 on entry. We therefore can't emit sibling calls to them. */
7910 static bool
7911 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7912 {
7913 return (decl
7914 && flag_delayed_branch
7915 && (TARGET_ARCH64 || ! cfun->returns_struct)
7916 && !(TARGET_VXWORKS_RTP
7917 && flag_pic
7918 && !targetm.binds_local_p (decl)));
7919 }
7920 \f
7921 /* libfunc renaming. */
7922 #include "config/gofast.h"
7923
7924 static void
7925 sparc_init_libfuncs (void)
7926 {
7927 if (TARGET_ARCH32)
7928 {
7929 /* Use the subroutines that Sun's library provides for integer
7930 multiply and divide. The `*' prevents an underscore from
7931 being prepended by the compiler. .umul is a little faster
7932 than .mul. */
7933 set_optab_libfunc (smul_optab, SImode, "*.umul");
7934 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7935 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7936 set_optab_libfunc (smod_optab, SImode, "*.rem");
7937 set_optab_libfunc (umod_optab, SImode, "*.urem");
7938
7939 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7940 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7941 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7942 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7943 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7944 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7945
7946 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7947 is because with soft-float, the SFmode and DFmode sqrt
7948 instructions will be absent, and the compiler will notice and
7949 try to use the TFmode sqrt instruction for calls to the
7950 builtin function sqrt, but this fails. */
7951 if (TARGET_FPU)
7952 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7953
7954 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7955 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7956 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7957 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7958 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7959 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7960
7961 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7962 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7963 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7964 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7965
7966 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7967 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7968 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7969 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7970
7971 if (DITF_CONVERSION_LIBFUNCS)
7972 {
7973 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7974 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7975 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7976 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7977 }
7978
7979 if (SUN_CONVERSION_LIBFUNCS)
7980 {
7981 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7982 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7983 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7984 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7985 }
7986 }
7987 if (TARGET_ARCH64)
7988 {
7989 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7990 do not exist in the library. Make sure the compiler does not
7991 emit calls to them by accident. (It should always use the
7992 hardware instructions.) */
7993 set_optab_libfunc (smul_optab, SImode, 0);
7994 set_optab_libfunc (sdiv_optab, SImode, 0);
7995 set_optab_libfunc (udiv_optab, SImode, 0);
7996 set_optab_libfunc (smod_optab, SImode, 0);
7997 set_optab_libfunc (umod_optab, SImode, 0);
7998
7999 if (SUN_INTEGER_MULTIPLY_64)
8000 {
8001 set_optab_libfunc (smul_optab, DImode, "__mul64");
8002 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8003 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8004 set_optab_libfunc (smod_optab, DImode, "__rem64");
8005 set_optab_libfunc (umod_optab, DImode, "__urem64");
8006 }
8007
8008 if (SUN_CONVERSION_LIBFUNCS)
8009 {
8010 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8011 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8012 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8013 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8014 }
8015 }
8016
8017 gofast_maybe_init_libfuncs ();
8018 }
8019 \f
8020 #define def_builtin(NAME, CODE, TYPE) \
8021 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8022 NULL_TREE)
8023
8024 /* Implement the TARGET_INIT_BUILTINS target hook.
8025 Create builtin functions for special SPARC instructions. */
8026
8027 static void
8028 sparc_init_builtins (void)
8029 {
8030 if (TARGET_VIS)
8031 sparc_vis_init_builtins ();
8032 }
8033
8034 /* Create builtin functions for VIS 1.0 instructions. */
8035
8036 static void
8037 sparc_vis_init_builtins (void)
8038 {
8039 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8040 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8041 tree v4hi = build_vector_type (intHI_type_node, 4);
8042 tree v2hi = build_vector_type (intHI_type_node, 2);
8043 tree v2si = build_vector_type (intSI_type_node, 2);
8044
8045 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8046 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8047 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8048 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8049 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8050 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8051 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8052 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8053 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8054 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8055 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8056 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8057 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8058 v8qi, v8qi,
8059 intDI_type_node, 0);
8060 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8061 intDI_type_node,
8062 intDI_type_node, 0);
8063 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8064 ptr_type_node,
8065 intSI_type_node, 0);
8066 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8067 ptr_type_node,
8068 intDI_type_node, 0);
8069
8070 /* Packing and expanding vectors. */
8071 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8072 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8073 v8qi_ftype_v2si_v8qi);
8074 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8075 v2hi_ftype_v2si);
8076 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8077 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8078 v8qi_ftype_v4qi_v4qi);
8079
8080 /* Multiplications. */
8081 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8082 v4hi_ftype_v4qi_v4hi);
8083 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8084 v4hi_ftype_v4qi_v2hi);
8085 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8086 v4hi_ftype_v4qi_v2hi);
8087 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8088 v4hi_ftype_v8qi_v4hi);
8089 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8090 v4hi_ftype_v8qi_v4hi);
8091 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8092 v2si_ftype_v4qi_v2hi);
8093 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8094 v2si_ftype_v4qi_v2hi);
8095
8096 /* Data aligning. */
8097 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8098 v4hi_ftype_v4hi_v4hi);
8099 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8100 v8qi_ftype_v8qi_v8qi);
8101 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8102 v2si_ftype_v2si_v2si);
8103 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8104 di_ftype_di_di);
8105 if (TARGET_ARCH64)
8106 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8107 ptr_ftype_ptr_di);
8108 else
8109 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8110 ptr_ftype_ptr_si);
8111
8112 /* Pixel distance. */
8113 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8114 di_ftype_v8qi_v8qi_di);
8115 }
8116
8117 /* Handle TARGET_EXPAND_BUILTIN target hook.
8118 Expand builtin functions for sparc intrinsics. */
8119
8120 static rtx
8121 sparc_expand_builtin (tree exp, rtx target,
8122 rtx subtarget ATTRIBUTE_UNUSED,
8123 enum machine_mode tmode ATTRIBUTE_UNUSED,
8124 int ignore ATTRIBUTE_UNUSED)
8125 {
8126 tree arg;
8127 call_expr_arg_iterator iter;
8128 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8129 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8130 rtx pat, op[4];
8131 enum machine_mode mode[4];
8132 int arg_count = 0;
8133
8134 mode[0] = insn_data[icode].operand[0].mode;
8135 if (!target
8136 || GET_MODE (target) != mode[0]
8137 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8138 op[0] = gen_reg_rtx (mode[0]);
8139 else
8140 op[0] = target;
8141
8142 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8143 {
8144 arg_count++;
8145 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8146 op[arg_count] = expand_normal (arg);
8147
8148 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8149 mode[arg_count]))
8150 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8151 }
8152
8153 switch (arg_count)
8154 {
8155 case 1:
8156 pat = GEN_FCN (icode) (op[0], op[1]);
8157 break;
8158 case 2:
8159 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8160 break;
8161 case 3:
8162 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8163 break;
8164 default:
8165 gcc_unreachable ();
8166 }
8167
8168 if (!pat)
8169 return NULL_RTX;
8170
8171 emit_insn (pat);
8172
8173 return op[0];
8174 }
8175
8176 static int
8177 sparc_vis_mul8x16 (int e8, int e16)
8178 {
8179 return (e8 * e16 + 128) / 256;
8180 }
8181
8182 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8183 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8184 constants. A tree list with the results of the multiplications is returned,
8185 and each element in the list is of INNER_TYPE. */
8186
8187 static tree
8188 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8189 {
8190 tree n_elts = NULL_TREE;
8191 int scale;
8192
8193 switch (fncode)
8194 {
8195 case CODE_FOR_fmul8x16_vis:
8196 for (; elts0 && elts1;
8197 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8198 {
8199 int val
8200 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8201 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8202 n_elts = tree_cons (NULL_TREE,
8203 build_int_cst (inner_type, val),
8204 n_elts);
8205 }
8206 break;
8207
8208 case CODE_FOR_fmul8x16au_vis:
8209 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8210
8211 for (; elts0; elts0 = TREE_CHAIN (elts0))
8212 {
8213 int val
8214 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8215 scale);
8216 n_elts = tree_cons (NULL_TREE,
8217 build_int_cst (inner_type, val),
8218 n_elts);
8219 }
8220 break;
8221
8222 case CODE_FOR_fmul8x16al_vis:
8223 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8224
8225 for (; elts0; elts0 = TREE_CHAIN (elts0))
8226 {
8227 int val
8228 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8229 scale);
8230 n_elts = tree_cons (NULL_TREE,
8231 build_int_cst (inner_type, val),
8232 n_elts);
8233 }
8234 break;
8235
8236 default:
8237 gcc_unreachable ();
8238 }
8239
8240 return nreverse (n_elts);
8241
8242 }
8243 /* Handle TARGET_FOLD_BUILTIN target hook.
8244 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8245 result of the function call is ignored. NULL_TREE is returned if the
8246 function could not be folded. */
8247
8248 static tree
8249 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8250 {
8251 tree arg0, arg1, arg2;
8252 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8253 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8254
8255 if (ignore
8256 && icode != CODE_FOR_alignaddrsi_vis
8257 && icode != CODE_FOR_alignaddrdi_vis)
8258 return fold_convert (rtype, integer_zero_node);
8259
8260 switch (icode)
8261 {
8262 case CODE_FOR_fexpand_vis:
8263 arg0 = TREE_VALUE (arglist);
8264 STRIP_NOPS (arg0);
8265
8266 if (TREE_CODE (arg0) == VECTOR_CST)
8267 {
8268 tree inner_type = TREE_TYPE (rtype);
8269 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8270 tree n_elts = NULL_TREE;
8271
8272 for (; elts; elts = TREE_CHAIN (elts))
8273 {
8274 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8275 n_elts = tree_cons (NULL_TREE,
8276 build_int_cst (inner_type, val),
8277 n_elts);
8278 }
8279 return build_vector (rtype, nreverse (n_elts));
8280 }
8281 break;
8282
8283 case CODE_FOR_fmul8x16_vis:
8284 case CODE_FOR_fmul8x16au_vis:
8285 case CODE_FOR_fmul8x16al_vis:
8286 arg0 = TREE_VALUE (arglist);
8287 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8288 STRIP_NOPS (arg0);
8289 STRIP_NOPS (arg1);
8290
8291 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8292 {
8293 tree inner_type = TREE_TYPE (rtype);
8294 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8295 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8296 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8297 elts1);
8298
8299 return build_vector (rtype, n_elts);
8300 }
8301 break;
8302
8303 case CODE_FOR_fpmerge_vis:
8304 arg0 = TREE_VALUE (arglist);
8305 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8306 STRIP_NOPS (arg0);
8307 STRIP_NOPS (arg1);
8308
8309 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8310 {
8311 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8312 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8313 tree n_elts = NULL_TREE;
8314
8315 for (; elts0 && elts1;
8316 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8317 {
8318 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8319 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8320 }
8321
8322 return build_vector (rtype, nreverse (n_elts));
8323 }
8324 break;
8325
8326 case CODE_FOR_pdist_vis:
8327 arg0 = TREE_VALUE (arglist);
8328 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8329 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8330 STRIP_NOPS (arg0);
8331 STRIP_NOPS (arg1);
8332 STRIP_NOPS (arg2);
8333
8334 if (TREE_CODE (arg0) == VECTOR_CST
8335 && TREE_CODE (arg1) == VECTOR_CST
8336 && TREE_CODE (arg2) == INTEGER_CST)
8337 {
8338 int overflow = 0;
8339 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8340 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8341 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8342 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8343
8344 for (; elts0 && elts1;
8345 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8346 {
8347 unsigned HOST_WIDE_INT
8348 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8349 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8350 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8351 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8352
8353 unsigned HOST_WIDE_INT l;
8354 HOST_WIDE_INT h;
8355
8356 overflow |= neg_double (low1, high1, &l, &h);
8357 overflow |= add_double (low0, high0, l, h, &l, &h);
8358 if (h < 0)
8359 overflow |= neg_double (l, h, &l, &h);
8360
8361 overflow |= add_double (low, high, l, h, &low, &high);
8362 }
8363
8364 gcc_assert (overflow == 0);
8365
8366 return build_int_cst_wide (rtype, low, high);
8367 }
8368
8369 default:
8370 break;
8371 }
8372
8373 return NULL_TREE;
8374 }
8375 \f
8376 /* ??? This duplicates information provided to the compiler by the
8377 ??? scheduler description. Some day, teach genautomata to output
8378 ??? the latencies and then CSE will just use that. */
8379
8380 static bool
8381 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8382 bool speed ATTRIBUTE_UNUSED)
8383 {
8384 enum machine_mode mode = GET_MODE (x);
8385 bool float_mode_p = FLOAT_MODE_P (mode);
8386
8387 switch (code)
8388 {
8389 case CONST_INT:
8390 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8391 {
8392 *total = 0;
8393 return true;
8394 }
8395 /* FALLTHRU */
8396
8397 case HIGH:
8398 *total = 2;
8399 return true;
8400
8401 case CONST:
8402 case LABEL_REF:
8403 case SYMBOL_REF:
8404 *total = 4;
8405 return true;
8406
8407 case CONST_DOUBLE:
8408 if (GET_MODE (x) == VOIDmode
8409 && ((CONST_DOUBLE_HIGH (x) == 0
8410 && CONST_DOUBLE_LOW (x) < 0x1000)
8411 || (CONST_DOUBLE_HIGH (x) == -1
8412 && CONST_DOUBLE_LOW (x) < 0
8413 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8414 *total = 0;
8415 else
8416 *total = 8;
8417 return true;
8418
8419 case MEM:
8420 /* If outer-code was a sign or zero extension, a cost
8421 of COSTS_N_INSNS (1) was already added in. This is
8422 why we are subtracting it back out. */
8423 if (outer_code == ZERO_EXTEND)
8424 {
8425 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8426 }
8427 else if (outer_code == SIGN_EXTEND)
8428 {
8429 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8430 }
8431 else if (float_mode_p)
8432 {
8433 *total = sparc_costs->float_load;
8434 }
8435 else
8436 {
8437 *total = sparc_costs->int_load;
8438 }
8439
8440 return true;
8441
8442 case PLUS:
8443 case MINUS:
8444 if (float_mode_p)
8445 *total = sparc_costs->float_plusminus;
8446 else
8447 *total = COSTS_N_INSNS (1);
8448 return false;
8449
8450 case MULT:
8451 if (float_mode_p)
8452 *total = sparc_costs->float_mul;
8453 else if (! TARGET_HARD_MUL)
8454 *total = COSTS_N_INSNS (25);
8455 else
8456 {
8457 int bit_cost;
8458
8459 bit_cost = 0;
8460 if (sparc_costs->int_mul_bit_factor)
8461 {
8462 int nbits;
8463
8464 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8465 {
8466 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8467 for (nbits = 0; value != 0; value &= value - 1)
8468 nbits++;
8469 }
8470 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8471 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8472 {
8473 rtx x1 = XEXP (x, 1);
8474 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8475 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8476
8477 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8478 nbits++;
8479 for (; value2 != 0; value2 &= value2 - 1)
8480 nbits++;
8481 }
8482 else
8483 nbits = 7;
8484
8485 if (nbits < 3)
8486 nbits = 3;
8487 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8488 bit_cost = COSTS_N_INSNS (bit_cost);
8489 }
8490
8491 if (mode == DImode)
8492 *total = sparc_costs->int_mulX + bit_cost;
8493 else
8494 *total = sparc_costs->int_mul + bit_cost;
8495 }
8496 return false;
8497
8498 case ASHIFT:
8499 case ASHIFTRT:
8500 case LSHIFTRT:
8501 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8502 return false;
8503
8504 case DIV:
8505 case UDIV:
8506 case MOD:
8507 case UMOD:
8508 if (float_mode_p)
8509 {
8510 if (mode == DFmode)
8511 *total = sparc_costs->float_div_df;
8512 else
8513 *total = sparc_costs->float_div_sf;
8514 }
8515 else
8516 {
8517 if (mode == DImode)
8518 *total = sparc_costs->int_divX;
8519 else
8520 *total = sparc_costs->int_div;
8521 }
8522 return false;
8523
8524 case NEG:
8525 if (! float_mode_p)
8526 {
8527 *total = COSTS_N_INSNS (1);
8528 return false;
8529 }
8530 /* FALLTHRU */
8531
8532 case ABS:
8533 case FLOAT:
8534 case UNSIGNED_FLOAT:
8535 case FIX:
8536 case UNSIGNED_FIX:
8537 case FLOAT_EXTEND:
8538 case FLOAT_TRUNCATE:
8539 *total = sparc_costs->float_move;
8540 return false;
8541
8542 case SQRT:
8543 if (mode == DFmode)
8544 *total = sparc_costs->float_sqrt_df;
8545 else
8546 *total = sparc_costs->float_sqrt_sf;
8547 return false;
8548
8549 case COMPARE:
8550 if (float_mode_p)
8551 *total = sparc_costs->float_cmp;
8552 else
8553 *total = COSTS_N_INSNS (1);
8554 return false;
8555
8556 case IF_THEN_ELSE:
8557 if (float_mode_p)
8558 *total = sparc_costs->float_cmove;
8559 else
8560 *total = sparc_costs->int_cmove;
8561 return false;
8562
8563 case IOR:
8564 /* Handle the NAND vector patterns. */
8565 if (sparc_vector_mode_supported_p (GET_MODE (x))
8566 && GET_CODE (XEXP (x, 0)) == NOT
8567 && GET_CODE (XEXP (x, 1)) == NOT)
8568 {
8569 *total = COSTS_N_INSNS (1);
8570 return true;
8571 }
8572 else
8573 return false;
8574
8575 default:
8576 return false;
8577 }
8578 }
8579
8580 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8581 This is achieved by means of a manual dynamic stack space allocation in
8582 the current frame. We make the assumption that SEQ doesn't contain any
8583 function calls, with the possible exception of calls to the PIC helper. */
8584
8585 static void
8586 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8587 {
8588 /* We must preserve the lowest 16 words for the register save area. */
8589 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8590 /* We really need only 2 words of fresh stack space. */
8591 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8592
8593 rtx slot
8594 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8595 SPARC_STACK_BIAS + offset));
8596
8597 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8598 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8599 if (reg2)
8600 emit_insn (gen_rtx_SET (VOIDmode,
8601 adjust_address (slot, word_mode, UNITS_PER_WORD),
8602 reg2));
8603 emit_insn (seq);
8604 if (reg2)
8605 emit_insn (gen_rtx_SET (VOIDmode,
8606 reg2,
8607 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8608 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8609 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8610 }
8611
8612 /* Output the assembler code for a thunk function. THUNK_DECL is the
8613 declaration for the thunk function itself, FUNCTION is the decl for
8614 the target function. DELTA is an immediate constant offset to be
8615 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8616 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8617
8618 static void
8619 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8620 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8621 tree function)
8622 {
8623 rtx this_rtx, insn, funexp;
8624 unsigned int int_arg_first;
8625
8626 reload_completed = 1;
8627 epilogue_completed = 1;
8628
8629 emit_note (NOTE_INSN_PROLOGUE_END);
8630
8631 if (flag_delayed_branch)
8632 {
8633 /* We will emit a regular sibcall below, so we need to instruct
8634 output_sibcall that we are in a leaf function. */
8635 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8636
8637 /* This will cause final.c to invoke leaf_renumber_regs so we
8638 must behave as if we were in a not-yet-leafified function. */
8639 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8640 }
8641 else
8642 {
8643 /* We will emit the sibcall manually below, so we will need to
8644 manually spill non-leaf registers. */
8645 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8646
8647 /* We really are in a leaf function. */
8648 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8649 }
8650
8651 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8652 returns a structure, the structure return pointer is there instead. */
8653 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8654 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8655 else
8656 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8657
8658 /* Add DELTA. When possible use a plain add, otherwise load it into
8659 a register first. */
8660 if (delta)
8661 {
8662 rtx delta_rtx = GEN_INT (delta);
8663
8664 if (! SPARC_SIMM13_P (delta))
8665 {
8666 rtx scratch = gen_rtx_REG (Pmode, 1);
8667 emit_move_insn (scratch, delta_rtx);
8668 delta_rtx = scratch;
8669 }
8670
8671 /* THIS_RTX += DELTA. */
8672 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8673 }
8674
8675 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8676 if (vcall_offset)
8677 {
8678 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8679 rtx scratch = gen_rtx_REG (Pmode, 1);
8680
8681 gcc_assert (vcall_offset < 0);
8682
8683 /* SCRATCH = *THIS_RTX. */
8684 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8685
8686 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8687 may not have any available scratch register at this point. */
8688 if (SPARC_SIMM13_P (vcall_offset))
8689 ;
8690 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8691 else if (! fixed_regs[5]
8692 /* The below sequence is made up of at least 2 insns,
8693 while the default method may need only one. */
8694 && vcall_offset < -8192)
8695 {
8696 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8697 emit_move_insn (scratch2, vcall_offset_rtx);
8698 vcall_offset_rtx = scratch2;
8699 }
8700 else
8701 {
8702 rtx increment = GEN_INT (-4096);
8703
8704 /* VCALL_OFFSET is a negative number whose typical range can be
8705 estimated as -32768..0 in 32-bit mode. In almost all cases
8706 it is therefore cheaper to emit multiple add insns than
8707 spilling and loading the constant into a register (at least
8708 6 insns). */
8709 while (! SPARC_SIMM13_P (vcall_offset))
8710 {
8711 emit_insn (gen_add2_insn (scratch, increment));
8712 vcall_offset += 4096;
8713 }
8714 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8715 }
8716
8717 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8718 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8719 gen_rtx_PLUS (Pmode,
8720 scratch,
8721 vcall_offset_rtx)));
8722
8723 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8724 emit_insn (gen_add2_insn (this_rtx, scratch));
8725 }
8726
8727 /* Generate a tail call to the target function. */
8728 if (! TREE_USED (function))
8729 {
8730 assemble_external (function);
8731 TREE_USED (function) = 1;
8732 }
8733 funexp = XEXP (DECL_RTL (function), 0);
8734
8735 if (flag_delayed_branch)
8736 {
8737 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8738 insn = emit_call_insn (gen_sibcall (funexp));
8739 SIBLING_CALL_P (insn) = 1;
8740 }
8741 else
8742 {
8743 /* The hoops we have to jump through in order to generate a sibcall
8744 without using delay slots... */
8745 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8746
8747 if (flag_pic)
8748 {
8749 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8750 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8751 start_sequence ();
8752 /* Delay emitting the PIC helper function because it needs to
8753 change the section and we are emitting assembly code. */
8754 load_pic_register (true); /* clobbers %o7 */
8755 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8756 seq = get_insns ();
8757 end_sequence ();
8758 emit_and_preserve (seq, spill_reg, spill_reg2);
8759 }
8760 else if (TARGET_ARCH32)
8761 {
8762 emit_insn (gen_rtx_SET (VOIDmode,
8763 scratch,
8764 gen_rtx_HIGH (SImode, funexp)));
8765 emit_insn (gen_rtx_SET (VOIDmode,
8766 scratch,
8767 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8768 }
8769 else /* TARGET_ARCH64 */
8770 {
8771 switch (sparc_cmodel)
8772 {
8773 case CM_MEDLOW:
8774 case CM_MEDMID:
8775 /* The destination can serve as a temporary. */
8776 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8777 break;
8778
8779 case CM_MEDANY:
8780 case CM_EMBMEDANY:
8781 /* The destination cannot serve as a temporary. */
8782 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8783 start_sequence ();
8784 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8785 seq = get_insns ();
8786 end_sequence ();
8787 emit_and_preserve (seq, spill_reg, 0);
8788 break;
8789
8790 default:
8791 gcc_unreachable ();
8792 }
8793 }
8794
8795 emit_jump_insn (gen_indirect_jump (scratch));
8796 }
8797
8798 emit_barrier ();
8799
8800 /* Run just enough of rest_of_compilation to get the insns emitted.
8801 There's not really enough bulk here to make other passes such as
8802 instruction scheduling worth while. Note that use_thunk calls
8803 assemble_start_function and assemble_end_function. */
8804 insn = get_insns ();
8805 insn_locators_alloc ();
8806 shorten_branches (insn);
8807 final_start_function (insn, file, 1);
8808 final (insn, file, 1);
8809 final_end_function ();
8810 free_after_compilation (cfun);
8811
8812 reload_completed = 0;
8813 epilogue_completed = 0;
8814 }
8815
8816 /* Return true if sparc_output_mi_thunk would be able to output the
8817 assembler code for the thunk function specified by the arguments
8818 it is passed, and false otherwise. */
8819 static bool
8820 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8821 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8822 HOST_WIDE_INT vcall_offset,
8823 const_tree function ATTRIBUTE_UNUSED)
8824 {
8825 /* Bound the loop used in the default method above. */
8826 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8827 }
8828
8829 /* How to allocate a 'struct machine_function'. */
8830
8831 static struct machine_function *
8832 sparc_init_machine_status (void)
8833 {
8834 return GGC_CNEW (struct machine_function);
8835 }
8836
8837 /* Locate some local-dynamic symbol still in use by this function
8838 so that we can print its name in local-dynamic base patterns. */
8839
8840 static const char *
8841 get_some_local_dynamic_name (void)
8842 {
8843 rtx insn;
8844
8845 if (cfun->machine->some_ld_name)
8846 return cfun->machine->some_ld_name;
8847
8848 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8849 if (INSN_P (insn)
8850 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8851 return cfun->machine->some_ld_name;
8852
8853 gcc_unreachable ();
8854 }
8855
8856 static int
8857 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8858 {
8859 rtx x = *px;
8860
8861 if (x
8862 && GET_CODE (x) == SYMBOL_REF
8863 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8864 {
8865 cfun->machine->some_ld_name = XSTR (x, 0);
8866 return 1;
8867 }
8868
8869 return 0;
8870 }
8871
8872 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8873 This is called from dwarf2out.c to emit call frame instructions
8874 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8875 static void
8876 sparc_dwarf_handle_frame_unspec (const char *label,
8877 rtx pattern ATTRIBUTE_UNUSED,
8878 int index ATTRIBUTE_UNUSED)
8879 {
8880 gcc_assert (index == UNSPECV_SAVEW);
8881 dwarf2out_window_save (label);
8882 }
8883
8884 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8885 We need to emit DTP-relative relocations. */
8886
8887 static void
8888 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8889 {
8890 switch (size)
8891 {
8892 case 4:
8893 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8894 break;
8895 case 8:
8896 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8897 break;
8898 default:
8899 gcc_unreachable ();
8900 }
8901 output_addr_const (file, x);
8902 fputs (")", file);
8903 }
8904
8905 /* Do whatever processing is required at the end of a file. */
8906
8907 static void
8908 sparc_file_end (void)
8909 {
8910 /* If we haven't emitted the special PIC helper function, do so now. */
8911 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8912 emit_pic_helper ();
8913
8914 if (NEED_INDICATE_EXEC_STACK)
8915 file_end_indicate_exec_stack ();
8916 }
8917
8918 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8919 /* Implement TARGET_MANGLE_TYPE. */
8920
8921 static const char *
8922 sparc_mangle_type (const_tree type)
8923 {
8924 if (!TARGET_64BIT
8925 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8926 && TARGET_LONG_DOUBLE_128)
8927 return "g";
8928
8929 /* For all other types, use normal C++ mangling. */
8930 return NULL;
8931 }
8932 #endif
8933
8934 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8935 compare and swap on the word containing the byte or half-word. */
8936
8937 void
8938 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8939 {
8940 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8941 rtx addr = gen_reg_rtx (Pmode);
8942 rtx off = gen_reg_rtx (SImode);
8943 rtx oldv = gen_reg_rtx (SImode);
8944 rtx newv = gen_reg_rtx (SImode);
8945 rtx oldvalue = gen_reg_rtx (SImode);
8946 rtx newvalue = gen_reg_rtx (SImode);
8947 rtx res = gen_reg_rtx (SImode);
8948 rtx resv = gen_reg_rtx (SImode);
8949 rtx memsi, val, mask, end_label, loop_label, cc;
8950
8951 emit_insn (gen_rtx_SET (VOIDmode, addr,
8952 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8953
8954 if (Pmode != SImode)
8955 addr1 = gen_lowpart (SImode, addr1);
8956 emit_insn (gen_rtx_SET (VOIDmode, off,
8957 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8958
8959 memsi = gen_rtx_MEM (SImode, addr);
8960 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8961 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8962
8963 val = force_reg (SImode, memsi);
8964
8965 emit_insn (gen_rtx_SET (VOIDmode, off,
8966 gen_rtx_XOR (SImode, off,
8967 GEN_INT (GET_MODE (mem) == QImode
8968 ? 3 : 2))));
8969
8970 emit_insn (gen_rtx_SET (VOIDmode, off,
8971 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8972
8973 if (GET_MODE (mem) == QImode)
8974 mask = force_reg (SImode, GEN_INT (0xff));
8975 else
8976 mask = force_reg (SImode, GEN_INT (0xffff));
8977
8978 emit_insn (gen_rtx_SET (VOIDmode, mask,
8979 gen_rtx_ASHIFT (SImode, mask, off)));
8980
8981 emit_insn (gen_rtx_SET (VOIDmode, val,
8982 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8983 val)));
8984
8985 oldval = gen_lowpart (SImode, oldval);
8986 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8987 gen_rtx_ASHIFT (SImode, oldval, off)));
8988
8989 newval = gen_lowpart_common (SImode, newval);
8990 emit_insn (gen_rtx_SET (VOIDmode, newv,
8991 gen_rtx_ASHIFT (SImode, newval, off)));
8992
8993 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8994 gen_rtx_AND (SImode, oldv, mask)));
8995
8996 emit_insn (gen_rtx_SET (VOIDmode, newv,
8997 gen_rtx_AND (SImode, newv, mask)));
8998
8999 end_label = gen_label_rtx ();
9000 loop_label = gen_label_rtx ();
9001 emit_label (loop_label);
9002
9003 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9004 gen_rtx_IOR (SImode, oldv, val)));
9005
9006 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9007 gen_rtx_IOR (SImode, newv, val)));
9008
9009 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9010
9011 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9012
9013 emit_insn (gen_rtx_SET (VOIDmode, resv,
9014 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9015 res)));
9016
9017 sparc_compare_op0 = resv;
9018 sparc_compare_op1 = val;
9019 cc = gen_compare_reg (NE);
9020
9021 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9022
9023 sparc_compare_op0 = cc;
9024 sparc_compare_op1 = const0_rtx;
9025 emit_jump_insn (gen_bne (loop_label));
9026
9027 emit_label (end_label);
9028
9029 emit_insn (gen_rtx_SET (VOIDmode, res,
9030 gen_rtx_AND (SImode, res, mask)));
9031
9032 emit_insn (gen_rtx_SET (VOIDmode, res,
9033 gen_rtx_LSHIFTRT (SImode, res, off)));
9034
9035 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9036 }
9037
9038 #include "gt-sparc.h"