1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
45 #include "diagnostic-core.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "cfglayout.h"
54 #include "langhooks.h"
58 #include "dwarf2out.h"
63 struct processor_costs
{
67 /* Integer signed load */
70 /* Integer zeroed load */
76 /* fmov, fneg, fabs */
80 const int float_plusminus
;
86 const int float_cmove
;
92 const int float_div_sf
;
95 const int float_div_df
;
98 const int float_sqrt_sf
;
101 const int float_sqrt_df
;
109 /* integer multiply cost for each bit set past the most
110 significant 3, so the formula for multiply cost becomes:
113 highest_bit = highest_clear_bit(rs1);
115 highest_bit = highest_set_bit(rs1);
118 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
120 A value of zero indicates that the multiply costs is fixed,
122 const int int_mul_bit_factor
;
133 /* penalty for shifts, due to scheduling rules etc. */
134 const int shift_penalty
;
138 struct processor_costs cypress_costs
= {
139 COSTS_N_INSNS (2), /* int load */
140 COSTS_N_INSNS (2), /* int signed load */
141 COSTS_N_INSNS (2), /* int zeroed load */
142 COSTS_N_INSNS (2), /* float load */
143 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
144 COSTS_N_INSNS (5), /* fadd, fsub */
145 COSTS_N_INSNS (1), /* fcmp */
146 COSTS_N_INSNS (1), /* fmov, fmovr */
147 COSTS_N_INSNS (7), /* fmul */
148 COSTS_N_INSNS (37), /* fdivs */
149 COSTS_N_INSNS (37), /* fdivd */
150 COSTS_N_INSNS (63), /* fsqrts */
151 COSTS_N_INSNS (63), /* fsqrtd */
152 COSTS_N_INSNS (1), /* imul */
153 COSTS_N_INSNS (1), /* imulX */
154 0, /* imul bit factor */
155 COSTS_N_INSNS (1), /* idiv */
156 COSTS_N_INSNS (1), /* idivX */
157 COSTS_N_INSNS (1), /* movcc/movr */
158 0, /* shift penalty */
162 struct processor_costs supersparc_costs
= {
163 COSTS_N_INSNS (1), /* int load */
164 COSTS_N_INSNS (1), /* int signed load */
165 COSTS_N_INSNS (1), /* int zeroed load */
166 COSTS_N_INSNS (0), /* float load */
167 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
168 COSTS_N_INSNS (3), /* fadd, fsub */
169 COSTS_N_INSNS (3), /* fcmp */
170 COSTS_N_INSNS (1), /* fmov, fmovr */
171 COSTS_N_INSNS (3), /* fmul */
172 COSTS_N_INSNS (6), /* fdivs */
173 COSTS_N_INSNS (9), /* fdivd */
174 COSTS_N_INSNS (12), /* fsqrts */
175 COSTS_N_INSNS (12), /* fsqrtd */
176 COSTS_N_INSNS (4), /* imul */
177 COSTS_N_INSNS (4), /* imulX */
178 0, /* imul bit factor */
179 COSTS_N_INSNS (4), /* idiv */
180 COSTS_N_INSNS (4), /* idivX */
181 COSTS_N_INSNS (1), /* movcc/movr */
182 1, /* shift penalty */
186 struct processor_costs hypersparc_costs
= {
187 COSTS_N_INSNS (1), /* int load */
188 COSTS_N_INSNS (1), /* int signed load */
189 COSTS_N_INSNS (1), /* int zeroed load */
190 COSTS_N_INSNS (1), /* float load */
191 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
192 COSTS_N_INSNS (1), /* fadd, fsub */
193 COSTS_N_INSNS (1), /* fcmp */
194 COSTS_N_INSNS (1), /* fmov, fmovr */
195 COSTS_N_INSNS (1), /* fmul */
196 COSTS_N_INSNS (8), /* fdivs */
197 COSTS_N_INSNS (12), /* fdivd */
198 COSTS_N_INSNS (17), /* fsqrts */
199 COSTS_N_INSNS (17), /* fsqrtd */
200 COSTS_N_INSNS (17), /* imul */
201 COSTS_N_INSNS (17), /* imulX */
202 0, /* imul bit factor */
203 COSTS_N_INSNS (17), /* idiv */
204 COSTS_N_INSNS (17), /* idivX */
205 COSTS_N_INSNS (1), /* movcc/movr */
206 0, /* shift penalty */
210 struct processor_costs leon_costs
= {
211 COSTS_N_INSNS (1), /* int load */
212 COSTS_N_INSNS (1), /* int signed load */
213 COSTS_N_INSNS (1), /* int zeroed load */
214 COSTS_N_INSNS (1), /* float load */
215 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
216 COSTS_N_INSNS (1), /* fadd, fsub */
217 COSTS_N_INSNS (1), /* fcmp */
218 COSTS_N_INSNS (1), /* fmov, fmovr */
219 COSTS_N_INSNS (1), /* fmul */
220 COSTS_N_INSNS (15), /* fdivs */
221 COSTS_N_INSNS (15), /* fdivd */
222 COSTS_N_INSNS (23), /* fsqrts */
223 COSTS_N_INSNS (23), /* fsqrtd */
224 COSTS_N_INSNS (5), /* imul */
225 COSTS_N_INSNS (5), /* imulX */
226 0, /* imul bit factor */
227 COSTS_N_INSNS (5), /* idiv */
228 COSTS_N_INSNS (5), /* idivX */
229 COSTS_N_INSNS (1), /* movcc/movr */
230 0, /* shift penalty */
234 struct processor_costs sparclet_costs
= {
235 COSTS_N_INSNS (3), /* int load */
236 COSTS_N_INSNS (3), /* int signed load */
237 COSTS_N_INSNS (1), /* int zeroed load */
238 COSTS_N_INSNS (1), /* float load */
239 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
240 COSTS_N_INSNS (1), /* fadd, fsub */
241 COSTS_N_INSNS (1), /* fcmp */
242 COSTS_N_INSNS (1), /* fmov, fmovr */
243 COSTS_N_INSNS (1), /* fmul */
244 COSTS_N_INSNS (1), /* fdivs */
245 COSTS_N_INSNS (1), /* fdivd */
246 COSTS_N_INSNS (1), /* fsqrts */
247 COSTS_N_INSNS (1), /* fsqrtd */
248 COSTS_N_INSNS (5), /* imul */
249 COSTS_N_INSNS (5), /* imulX */
250 0, /* imul bit factor */
251 COSTS_N_INSNS (5), /* idiv */
252 COSTS_N_INSNS (5), /* idivX */
253 COSTS_N_INSNS (1), /* movcc/movr */
254 0, /* shift penalty */
258 struct processor_costs ultrasparc_costs
= {
259 COSTS_N_INSNS (2), /* int load */
260 COSTS_N_INSNS (3), /* int signed load */
261 COSTS_N_INSNS (2), /* int zeroed load */
262 COSTS_N_INSNS (2), /* float load */
263 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
264 COSTS_N_INSNS (4), /* fadd, fsub */
265 COSTS_N_INSNS (1), /* fcmp */
266 COSTS_N_INSNS (2), /* fmov, fmovr */
267 COSTS_N_INSNS (4), /* fmul */
268 COSTS_N_INSNS (13), /* fdivs */
269 COSTS_N_INSNS (23), /* fdivd */
270 COSTS_N_INSNS (13), /* fsqrts */
271 COSTS_N_INSNS (23), /* fsqrtd */
272 COSTS_N_INSNS (4), /* imul */
273 COSTS_N_INSNS (4), /* imulX */
274 2, /* imul bit factor */
275 COSTS_N_INSNS (37), /* idiv */
276 COSTS_N_INSNS (68), /* idivX */
277 COSTS_N_INSNS (2), /* movcc/movr */
278 2, /* shift penalty */
282 struct processor_costs ultrasparc3_costs
= {
283 COSTS_N_INSNS (2), /* int load */
284 COSTS_N_INSNS (3), /* int signed load */
285 COSTS_N_INSNS (3), /* int zeroed load */
286 COSTS_N_INSNS (2), /* float load */
287 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
288 COSTS_N_INSNS (4), /* fadd, fsub */
289 COSTS_N_INSNS (5), /* fcmp */
290 COSTS_N_INSNS (3), /* fmov, fmovr */
291 COSTS_N_INSNS (4), /* fmul */
292 COSTS_N_INSNS (17), /* fdivs */
293 COSTS_N_INSNS (20), /* fdivd */
294 COSTS_N_INSNS (20), /* fsqrts */
295 COSTS_N_INSNS (29), /* fsqrtd */
296 COSTS_N_INSNS (6), /* imul */
297 COSTS_N_INSNS (6), /* imulX */
298 0, /* imul bit factor */
299 COSTS_N_INSNS (40), /* idiv */
300 COSTS_N_INSNS (71), /* idivX */
301 COSTS_N_INSNS (2), /* movcc/movr */
302 0, /* shift penalty */
306 struct processor_costs niagara_costs
= {
307 COSTS_N_INSNS (3), /* int load */
308 COSTS_N_INSNS (3), /* int signed load */
309 COSTS_N_INSNS (3), /* int zeroed load */
310 COSTS_N_INSNS (9), /* float load */
311 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
312 COSTS_N_INSNS (8), /* fadd, fsub */
313 COSTS_N_INSNS (26), /* fcmp */
314 COSTS_N_INSNS (8), /* fmov, fmovr */
315 COSTS_N_INSNS (29), /* fmul */
316 COSTS_N_INSNS (54), /* fdivs */
317 COSTS_N_INSNS (83), /* fdivd */
318 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
319 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
320 COSTS_N_INSNS (11), /* imul */
321 COSTS_N_INSNS (11), /* imulX */
322 0, /* imul bit factor */
323 COSTS_N_INSNS (72), /* idiv */
324 COSTS_N_INSNS (72), /* idivX */
325 COSTS_N_INSNS (1), /* movcc/movr */
326 0, /* shift penalty */
330 struct processor_costs niagara2_costs
= {
331 COSTS_N_INSNS (3), /* int load */
332 COSTS_N_INSNS (3), /* int signed load */
333 COSTS_N_INSNS (3), /* int zeroed load */
334 COSTS_N_INSNS (3), /* float load */
335 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
336 COSTS_N_INSNS (6), /* fadd, fsub */
337 COSTS_N_INSNS (6), /* fcmp */
338 COSTS_N_INSNS (6), /* fmov, fmovr */
339 COSTS_N_INSNS (6), /* fmul */
340 COSTS_N_INSNS (19), /* fdivs */
341 COSTS_N_INSNS (33), /* fdivd */
342 COSTS_N_INSNS (19), /* fsqrts */
343 COSTS_N_INSNS (33), /* fsqrtd */
344 COSTS_N_INSNS (5), /* imul */
345 COSTS_N_INSNS (5), /* imulX */
346 0, /* imul bit factor */
347 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
348 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
349 COSTS_N_INSNS (1), /* movcc/movr */
350 0, /* shift penalty */
354 struct processor_costs niagara3_costs
= {
355 COSTS_N_INSNS (3), /* int load */
356 COSTS_N_INSNS (3), /* int signed load */
357 COSTS_N_INSNS (3), /* int zeroed load */
358 COSTS_N_INSNS (3), /* float load */
359 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
360 COSTS_N_INSNS (9), /* fadd, fsub */
361 COSTS_N_INSNS (9), /* fcmp */
362 COSTS_N_INSNS (9), /* fmov, fmovr */
363 COSTS_N_INSNS (9), /* fmul */
364 COSTS_N_INSNS (23), /* fdivs */
365 COSTS_N_INSNS (37), /* fdivd */
366 COSTS_N_INSNS (23), /* fsqrts */
367 COSTS_N_INSNS (37), /* fsqrtd */
368 COSTS_N_INSNS (9), /* imul */
369 COSTS_N_INSNS (9), /* imulX */
370 0, /* imul bit factor */
371 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
372 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
373 COSTS_N_INSNS (1), /* movcc/movr */
374 0, /* shift penalty */
377 static const struct processor_costs
*sparc_costs
= &cypress_costs
;
379 #ifdef HAVE_AS_RELAX_OPTION
380 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
381 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
382 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
383 somebody does not branch between the sethi and jmp. */
384 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
386 #define LEAF_SIBCALL_SLOT_RESERVED_P \
387 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
390 /* Vector to say how input registers are mapped to output registers.
391 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
392 eliminate it. You must use -fomit-frame-pointer to get that. */
393 char leaf_reg_remap
[] =
394 { 0, 1, 2, 3, 4, 5, 6, 7,
395 -1, -1, -1, -1, -1, -1, 14, -1,
396 -1, -1, -1, -1, -1, -1, -1, -1,
397 8, 9, 10, 11, 12, 13, -1, 15,
399 32, 33, 34, 35, 36, 37, 38, 39,
400 40, 41, 42, 43, 44, 45, 46, 47,
401 48, 49, 50, 51, 52, 53, 54, 55,
402 56, 57, 58, 59, 60, 61, 62, 63,
403 64, 65, 66, 67, 68, 69, 70, 71,
404 72, 73, 74, 75, 76, 77, 78, 79,
405 80, 81, 82, 83, 84, 85, 86, 87,
406 88, 89, 90, 91, 92, 93, 94, 95,
407 96, 97, 98, 99, 100, 101, 102};
409 /* Vector, indexed by hard register number, which contains 1
410 for a register that is allowable in a candidate for leaf
411 function treatment. */
412 char sparc_leaf_regs
[] =
413 { 1, 1, 1, 1, 1, 1, 1, 1,
414 0, 0, 0, 0, 0, 0, 1, 0,
415 0, 0, 0, 0, 0, 0, 0, 0,
416 1, 1, 1, 1, 1, 1, 0, 1,
417 1, 1, 1, 1, 1, 1, 1, 1,
418 1, 1, 1, 1, 1, 1, 1, 1,
419 1, 1, 1, 1, 1, 1, 1, 1,
420 1, 1, 1, 1, 1, 1, 1, 1,
421 1, 1, 1, 1, 1, 1, 1, 1,
422 1, 1, 1, 1, 1, 1, 1, 1,
423 1, 1, 1, 1, 1, 1, 1, 1,
424 1, 1, 1, 1, 1, 1, 1, 1,
425 1, 1, 1, 1, 1, 1, 1};
427 struct GTY(()) machine_function
429 /* Size of the frame of the function. */
430 HOST_WIDE_INT frame_size
;
432 /* Size of the frame of the function minus the register window save area
433 and the outgoing argument area. */
434 HOST_WIDE_INT apparent_frame_size
;
436 /* Register we pretend the frame pointer is allocated to. Normally, this
437 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
438 record "offset" separately as it may be too big for (reg + disp). */
440 HOST_WIDE_INT frame_base_offset
;
442 /* Some local-dynamic TLS symbol name. */
443 const char *some_ld_name
;
445 /* Number of global or FP registers to be saved (as 4-byte quantities). */
446 int n_global_fp_regs
;
448 /* True if the current function is leaf and uses only leaf regs,
449 so that the SPARC leaf function optimization can be applied.
450 Private version of current_function_uses_only_leaf_regs, see
451 sparc_expand_prologue for the rationale. */
454 /* True if the prologue saves local or in registers. */
455 bool save_local_in_regs_p
;
457 /* True if the data calculated by sparc_expand_prologue are valid. */
458 bool prologue_data_valid_p
;
461 #define sparc_frame_size cfun->machine->frame_size
462 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
463 #define sparc_frame_base_reg cfun->machine->frame_base_reg
464 #define sparc_frame_base_offset cfun->machine->frame_base_offset
465 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
466 #define sparc_leaf_function_p cfun->machine->leaf_function_p
467 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
468 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
470 /* 1 if the next opcode is to be specially indented. */
471 int sparc_indent_opcode
= 0;
473 static void sparc_option_override (void);
474 static void sparc_init_modes (void);
475 static void scan_record_type (const_tree
, int *, int *, int *);
476 static int function_arg_slotno (const CUMULATIVE_ARGS
*, enum machine_mode
,
477 const_tree
, bool, bool, int *, int *);
479 static int supersparc_adjust_cost (rtx
, rtx
, rtx
, int);
480 static int hypersparc_adjust_cost (rtx
, rtx
, rtx
, int);
482 static void sparc_emit_set_const32 (rtx
, rtx
);
483 static void sparc_emit_set_const64 (rtx
, rtx
);
484 static void sparc_output_addr_vec (rtx
);
485 static void sparc_output_addr_diff_vec (rtx
);
486 static void sparc_output_deferred_case_vectors (void);
487 static bool sparc_legitimate_address_p (enum machine_mode
, rtx
, bool);
488 static bool sparc_legitimate_constant_p (enum machine_mode
, rtx
);
489 static rtx
sparc_builtin_saveregs (void);
490 static int epilogue_renumber (rtx
*, int);
491 static bool sparc_assemble_integer (rtx
, unsigned int, int);
492 static int set_extends (rtx
);
493 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT
);
494 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT
);
495 #ifdef TARGET_SOLARIS
496 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
497 tree
) ATTRIBUTE_UNUSED
;
499 static int sparc_adjust_cost (rtx
, rtx
, rtx
, int);
500 static int sparc_issue_rate (void);
501 static void sparc_sched_init (FILE *, int, int);
502 static int sparc_use_sched_lookahead (void);
504 static void emit_soft_tfmode_libcall (const char *, int, rtx
*);
505 static void emit_soft_tfmode_binop (enum rtx_code
, rtx
*);
506 static void emit_soft_tfmode_unop (enum rtx_code
, rtx
*);
507 static void emit_soft_tfmode_cvt (enum rtx_code
, rtx
*);
508 static void emit_hard_tfmode_operation (enum rtx_code
, rtx
*);
510 static bool sparc_function_ok_for_sibcall (tree
, tree
);
511 static void sparc_init_libfuncs (void);
512 static void sparc_init_builtins (void);
513 static void sparc_vis_init_builtins (void);
514 static rtx
sparc_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
515 static tree
sparc_fold_builtin (tree
, int, tree
*, bool);
516 static int sparc_vis_mul8x16 (int, int);
517 static tree
sparc_handle_vis_mul8x16 (int, tree
, tree
, tree
);
518 static void sparc_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
519 HOST_WIDE_INT
, tree
);
520 static bool sparc_can_output_mi_thunk (const_tree
, HOST_WIDE_INT
,
521 HOST_WIDE_INT
, const_tree
);
522 static void sparc_reorg (void);
523 static struct machine_function
* sparc_init_machine_status (void);
524 static bool sparc_cannot_force_const_mem (enum machine_mode
, rtx
);
525 static rtx
sparc_tls_get_addr (void);
526 static rtx
sparc_tls_got (void);
527 static const char *get_some_local_dynamic_name (void);
528 static int get_some_local_dynamic_name_1 (rtx
*, void *);
529 static int sparc_register_move_cost (enum machine_mode
,
530 reg_class_t
, reg_class_t
);
531 static bool sparc_rtx_costs (rtx
, int, int, int, int *, bool);
532 static rtx
sparc_function_value (const_tree
, const_tree
, bool);
533 static rtx
sparc_libcall_value (enum machine_mode
, const_rtx
);
534 static bool sparc_function_value_regno_p (const unsigned int);
535 static rtx
sparc_struct_value_rtx (tree
, int);
536 static enum machine_mode
sparc_promote_function_mode (const_tree
, enum machine_mode
,
537 int *, const_tree
, int);
538 static bool sparc_return_in_memory (const_tree
, const_tree
);
539 static bool sparc_strict_argument_naming (cumulative_args_t
);
540 static void sparc_va_start (tree
, rtx
);
541 static tree
sparc_gimplify_va_arg (tree
, tree
, gimple_seq
*, gimple_seq
*);
542 static bool sparc_vector_mode_supported_p (enum machine_mode
);
543 static bool sparc_tls_referenced_p (rtx
);
544 static rtx
sparc_legitimize_tls_address (rtx
);
545 static rtx
sparc_legitimize_pic_address (rtx
, rtx
);
546 static rtx
sparc_legitimize_address (rtx
, rtx
, enum machine_mode
);
547 static rtx
sparc_delegitimize_address (rtx
);
548 static bool sparc_mode_dependent_address_p (const_rtx
);
549 static bool sparc_pass_by_reference (cumulative_args_t
,
550 enum machine_mode
, const_tree
, bool);
551 static void sparc_function_arg_advance (cumulative_args_t
,
552 enum machine_mode
, const_tree
, bool);
553 static rtx
sparc_function_arg_1 (cumulative_args_t
,
554 enum machine_mode
, const_tree
, bool, bool);
555 static rtx
sparc_function_arg (cumulative_args_t
,
556 enum machine_mode
, const_tree
, bool);
557 static rtx
sparc_function_incoming_arg (cumulative_args_t
,
558 enum machine_mode
, const_tree
, bool);
559 static unsigned int sparc_function_arg_boundary (enum machine_mode
,
561 static int sparc_arg_partial_bytes (cumulative_args_t
,
562 enum machine_mode
, tree
, bool);
563 static void sparc_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
564 static void sparc_file_end (void);
565 static bool sparc_frame_pointer_required (void);
566 static bool sparc_can_eliminate (const int, const int);
567 static rtx
sparc_builtin_setjmp_frame_value (void);
568 static void sparc_conditional_register_usage (void);
569 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
570 static const char *sparc_mangle_type (const_tree
);
572 static void sparc_trampoline_init (rtx
, tree
, rtx
);
573 static enum machine_mode
sparc_preferred_simd_mode (enum machine_mode
);
574 static reg_class_t
sparc_preferred_reload_class (rtx x
, reg_class_t rclass
);
575 static bool sparc_print_operand_punct_valid_p (unsigned char);
576 static void sparc_print_operand (FILE *, rtx
, int);
577 static void sparc_print_operand_address (FILE *, rtx
);
578 static reg_class_t
sparc_secondary_reload (bool, rtx
, reg_class_t
,
579 enum machine_mode
, secondary_reload_info
*);
581 #ifdef SUBTARGET_ATTRIBUTE_TABLE
582 /* Table of valid machine attributes. */
583 static const struct attribute_spec sparc_attribute_table
[] =
585 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
587 SUBTARGET_ATTRIBUTE_TABLE
,
588 { NULL
, 0, 0, false, false, false, NULL
, false }
592 /* Option handling. */
595 enum cmodel sparc_cmodel
;
597 char sparc_hard_reg_printed
[8];
599 /* Initialize the GCC target structure. */
601 /* The default is to use .half rather than .short for aligned HI objects. */
602 #undef TARGET_ASM_ALIGNED_HI_OP
603 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
605 #undef TARGET_ASM_UNALIGNED_HI_OP
606 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
607 #undef TARGET_ASM_UNALIGNED_SI_OP
608 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
609 #undef TARGET_ASM_UNALIGNED_DI_OP
610 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
612 /* The target hook has to handle DI-mode values. */
613 #undef TARGET_ASM_INTEGER
614 #define TARGET_ASM_INTEGER sparc_assemble_integer
616 #undef TARGET_ASM_FUNCTION_PROLOGUE
617 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
618 #undef TARGET_ASM_FUNCTION_EPILOGUE
619 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
621 #undef TARGET_SCHED_ADJUST_COST
622 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
623 #undef TARGET_SCHED_ISSUE_RATE
624 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
625 #undef TARGET_SCHED_INIT
626 #define TARGET_SCHED_INIT sparc_sched_init
627 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
628 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
630 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
631 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
633 #undef TARGET_INIT_LIBFUNCS
634 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
635 #undef TARGET_INIT_BUILTINS
636 #define TARGET_INIT_BUILTINS sparc_init_builtins
638 #undef TARGET_LEGITIMIZE_ADDRESS
639 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
640 #undef TARGET_DELEGITIMIZE_ADDRESS
641 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
642 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
643 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
645 #undef TARGET_EXPAND_BUILTIN
646 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
647 #undef TARGET_FOLD_BUILTIN
648 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
651 #undef TARGET_HAVE_TLS
652 #define TARGET_HAVE_TLS true
655 #undef TARGET_CANNOT_FORCE_CONST_MEM
656 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
658 #undef TARGET_ASM_OUTPUT_MI_THUNK
659 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
660 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
661 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
663 #undef TARGET_MACHINE_DEPENDENT_REORG
664 #define TARGET_MACHINE_DEPENDENT_REORG sparc_reorg
666 #undef TARGET_RTX_COSTS
667 #define TARGET_RTX_COSTS sparc_rtx_costs
668 #undef TARGET_ADDRESS_COST
669 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
670 #undef TARGET_REGISTER_MOVE_COST
671 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
673 #undef TARGET_PROMOTE_FUNCTION_MODE
674 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
676 #undef TARGET_FUNCTION_VALUE
677 #define TARGET_FUNCTION_VALUE sparc_function_value
678 #undef TARGET_LIBCALL_VALUE
679 #define TARGET_LIBCALL_VALUE sparc_libcall_value
680 #undef TARGET_FUNCTION_VALUE_REGNO_P
681 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
683 #undef TARGET_STRUCT_VALUE_RTX
684 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
685 #undef TARGET_RETURN_IN_MEMORY
686 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
687 #undef TARGET_MUST_PASS_IN_STACK
688 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
689 #undef TARGET_PASS_BY_REFERENCE
690 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
691 #undef TARGET_ARG_PARTIAL_BYTES
692 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
693 #undef TARGET_FUNCTION_ARG_ADVANCE
694 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
695 #undef TARGET_FUNCTION_ARG
696 #define TARGET_FUNCTION_ARG sparc_function_arg
697 #undef TARGET_FUNCTION_INCOMING_ARG
698 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
699 #undef TARGET_FUNCTION_ARG_BOUNDARY
700 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
702 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
703 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
704 #undef TARGET_STRICT_ARGUMENT_NAMING
705 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
707 #undef TARGET_EXPAND_BUILTIN_VA_START
708 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
709 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
710 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
712 #undef TARGET_VECTOR_MODE_SUPPORTED_P
713 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
715 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
716 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
718 #ifdef SUBTARGET_INSERT_ATTRIBUTES
719 #undef TARGET_INSERT_ATTRIBUTES
720 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
723 #ifdef SUBTARGET_ATTRIBUTE_TABLE
724 #undef TARGET_ATTRIBUTE_TABLE
725 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
728 #undef TARGET_RELAXED_ORDERING
729 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
731 #undef TARGET_OPTION_OVERRIDE
732 #define TARGET_OPTION_OVERRIDE sparc_option_override
734 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
735 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
736 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
739 #undef TARGET_ASM_FILE_END
740 #define TARGET_ASM_FILE_END sparc_file_end
742 #undef TARGET_FRAME_POINTER_REQUIRED
743 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
745 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
746 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
748 #undef TARGET_CAN_ELIMINATE
749 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
751 #undef TARGET_PREFERRED_RELOAD_CLASS
752 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
754 #undef TARGET_SECONDARY_RELOAD
755 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
757 #undef TARGET_CONDITIONAL_REGISTER_USAGE
758 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
760 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
761 #undef TARGET_MANGLE_TYPE
762 #define TARGET_MANGLE_TYPE sparc_mangle_type
765 #undef TARGET_LEGITIMATE_ADDRESS_P
766 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
768 #undef TARGET_LEGITIMATE_CONSTANT_P
769 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
771 #undef TARGET_TRAMPOLINE_INIT
772 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
774 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
775 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
776 #undef TARGET_PRINT_OPERAND
777 #define TARGET_PRINT_OPERAND sparc_print_operand
778 #undef TARGET_PRINT_OPERAND_ADDRESS
779 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
781 struct gcc_target targetm
= TARGET_INITIALIZER
;
784 dump_target_flag_bits (const int flags
)
786 if (flags
& MASK_64BIT
)
787 fprintf (stderr
, "64BIT ");
788 if (flags
& MASK_APP_REGS
)
789 fprintf (stderr
, "APP_REGS ");
790 if (flags
& MASK_FASTER_STRUCTS
)
791 fprintf (stderr
, "FASTER_STRUCTS ");
792 if (flags
& MASK_FLAT
)
793 fprintf (stderr
, "FLAT ");
794 if (flags
& MASK_FMAF
)
795 fprintf (stderr
, "FMAF ");
796 if (flags
& MASK_FPU
)
797 fprintf (stderr
, "FPU ");
798 if (flags
& MASK_HARD_QUAD
)
799 fprintf (stderr
, "HARD_QUAD ");
800 if (flags
& MASK_POPC
)
801 fprintf (stderr
, "POPC ");
802 if (flags
& MASK_PTR64
)
803 fprintf (stderr
, "PTR64 ");
804 if (flags
& MASK_STACK_BIAS
)
805 fprintf (stderr
, "STACK_BIAS ");
806 if (flags
& MASK_UNALIGNED_DOUBLES
)
807 fprintf (stderr
, "UNALIGNED_DOUBLES ");
808 if (flags
& MASK_V8PLUS
)
809 fprintf (stderr
, "V8PLUS ");
810 if (flags
& MASK_VIS
)
811 fprintf (stderr
, "VIS ");
812 if (flags
& MASK_VIS2
)
813 fprintf (stderr
, "VIS2 ");
814 if (flags
& MASK_VIS3
)
815 fprintf (stderr
, "VIS3 ");
816 if (flags
& MASK_DEPRECATED_V8_INSNS
)
817 fprintf (stderr
, "DEPRECATED_V8_INSNS ");
818 if (flags
& MASK_LITTLE_ENDIAN
)
819 fprintf (stderr
, "LITTLE_ENDIAN ");
820 if (flags
& MASK_SPARCLET
)
821 fprintf (stderr
, "SPARCLET ");
822 if (flags
& MASK_SPARCLITE
)
823 fprintf (stderr
, "SPARCLITE ");
825 fprintf (stderr
, "V8 ");
827 fprintf (stderr
, "V9 ");
831 dump_target_flags (const char *prefix
, const int flags
)
833 fprintf (stderr
, "%s: (%08x) [ ", prefix
, flags
);
834 dump_target_flag_bits (flags
);
835 fprintf(stderr
, "]\n");
838 /* Validate and override various options, and do some machine dependent
842 sparc_option_override (void)
844 static struct code_model
{
845 const char *const name
;
846 const enum cmodel value
;
847 } const cmodels
[] = {
849 { "medlow", CM_MEDLOW
},
850 { "medmid", CM_MEDMID
},
851 { "medany", CM_MEDANY
},
852 { "embmedany", CM_EMBMEDANY
},
853 { NULL
, (enum cmodel
) 0 }
855 const struct code_model
*cmodel
;
856 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
857 static struct cpu_default
{
859 const enum processor_type processor
;
860 } const cpu_default
[] = {
861 /* There must be one entry here for each TARGET_CPU value. */
862 { TARGET_CPU_sparc
, PROCESSOR_CYPRESS
},
863 { TARGET_CPU_v8
, PROCESSOR_V8
},
864 { TARGET_CPU_supersparc
, PROCESSOR_SUPERSPARC
},
865 { TARGET_CPU_hypersparc
, PROCESSOR_HYPERSPARC
},
866 { TARGET_CPU_leon
, PROCESSOR_LEON
},
867 { TARGET_CPU_sparclite
, PROCESSOR_F930
},
868 { TARGET_CPU_sparclite86x
, PROCESSOR_SPARCLITE86X
},
869 { TARGET_CPU_sparclet
, PROCESSOR_TSC701
},
870 { TARGET_CPU_v9
, PROCESSOR_V9
},
871 { TARGET_CPU_ultrasparc
, PROCESSOR_ULTRASPARC
},
872 { TARGET_CPU_ultrasparc3
, PROCESSOR_ULTRASPARC3
},
873 { TARGET_CPU_niagara
, PROCESSOR_NIAGARA
},
874 { TARGET_CPU_niagara2
, PROCESSOR_NIAGARA2
},
875 { TARGET_CPU_niagara3
, PROCESSOR_NIAGARA3
},
876 { TARGET_CPU_niagara4
, PROCESSOR_NIAGARA4
},
879 const struct cpu_default
*def
;
880 /* Table of values for -m{cpu,tune}=. This must match the order of
881 the PROCESSOR_* enumeration. */
882 static struct cpu_table
{
883 const char *const name
;
886 } const cpu_table
[] = {
887 { "v7", MASK_ISA
, 0 },
888 { "cypress", MASK_ISA
, 0 },
889 { "v8", MASK_ISA
, MASK_V8
},
890 /* TI TMS390Z55 supersparc */
891 { "supersparc", MASK_ISA
, MASK_V8
},
892 { "hypersparc", MASK_ISA
, MASK_V8
|MASK_FPU
},
894 { "leon", MASK_ISA
, MASK_V8
|MASK_FPU
},
895 { "sparclite", MASK_ISA
, MASK_SPARCLITE
},
896 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
897 { "f930", MASK_ISA
|MASK_FPU
, MASK_SPARCLITE
},
898 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
899 { "f934", MASK_ISA
, MASK_SPARCLITE
|MASK_FPU
},
900 { "sparclite86x", MASK_ISA
|MASK_FPU
, MASK_SPARCLITE
},
901 { "sparclet", MASK_ISA
, MASK_SPARCLET
},
903 { "tsc701", MASK_ISA
, MASK_SPARCLET
},
904 { "v9", MASK_ISA
, MASK_V9
},
905 /* UltraSPARC I, II, IIi */
906 { "ultrasparc", MASK_ISA
,
907 /* Although insns using %y are deprecated, it is a clear win. */
908 MASK_V9
|MASK_DEPRECATED_V8_INSNS
},
910 /* ??? Check if %y issue still holds true. */
911 { "ultrasparc3", MASK_ISA
,
912 MASK_V9
|MASK_DEPRECATED_V8_INSNS
|MASK_VIS2
},
914 { "niagara", MASK_ISA
,
915 MASK_V9
|MASK_DEPRECATED_V8_INSNS
},
917 { "niagara2", MASK_ISA
,
918 MASK_V9
|MASK_POPC
|MASK_VIS2
},
920 { "niagara3", MASK_ISA
,
921 MASK_V9
|MASK_POPC
|MASK_VIS2
|MASK_VIS3
|MASK_FMAF
},
923 { "niagara4", MASK_ISA
,
924 MASK_V9
|MASK_POPC
|MASK_VIS2
|MASK_VIS3
|MASK_FMAF
},
926 const struct cpu_table
*cpu
;
930 if (sparc_debug_string
!= NULL
)
935 p
= ASTRDUP (sparc_debug_string
);
936 while ((q
= strtok (p
, ",")) != NULL
)
950 if (! strcmp (q
, "all"))
951 mask
= MASK_DEBUG_ALL
;
952 else if (! strcmp (q
, "options"))
953 mask
= MASK_DEBUG_OPTIONS
;
955 error ("unknown -mdebug-%s switch", q
);
958 sparc_debug
&= ~mask
;
964 if (TARGET_DEBUG_OPTIONS
)
966 dump_target_flags("Initial target_flags", target_flags
);
967 dump_target_flags("target_flags_explicit", target_flags_explicit
);
970 #ifdef SUBTARGET_OVERRIDE_OPTIONS
971 SUBTARGET_OVERRIDE_OPTIONS
;
974 #ifndef SPARC_BI_ARCH
975 /* Check for unsupported architecture size. */
976 if (! TARGET_64BIT
!= DEFAULT_ARCH32_P
)
977 error ("%s is not supported by this configuration",
978 DEFAULT_ARCH32_P
? "-m64" : "-m32");
981 /* We force all 64bit archs to use 128 bit long double */
982 if (TARGET_64BIT
&& ! TARGET_LONG_DOUBLE_128
)
984 error ("-mlong-double-64 not allowed with -m64");
985 target_flags
|= MASK_LONG_DOUBLE_128
;
988 /* Code model selection. */
989 sparc_cmodel
= SPARC_DEFAULT_CMODEL
;
993 sparc_cmodel
= CM_32
;
996 if (sparc_cmodel_string
!= NULL
)
1000 for (cmodel
= &cmodels
[0]; cmodel
->name
; cmodel
++)
1001 if (strcmp (sparc_cmodel_string
, cmodel
->name
) == 0)
1003 if (cmodel
->name
== NULL
)
1004 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string
);
1006 sparc_cmodel
= cmodel
->value
;
1009 error ("-mcmodel= is not supported on 32 bit systems");
1012 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1013 for (i
= 8; i
< 16; i
++)
1014 if (!call_used_regs
[i
])
1016 error ("-fcall-saved-REG is not supported for out registers");
1017 call_used_regs
[i
] = 1;
1020 fpu
= target_flags
& MASK_FPU
; /* save current -mfpu status */
1022 /* Set the default CPU. */
1023 if (!global_options_set
.x_sparc_cpu_and_features
)
1025 for (def
= &cpu_default
[0]; def
->cpu
!= -1; ++def
)
1026 if (def
->cpu
== TARGET_CPU_DEFAULT
)
1028 gcc_assert (def
->cpu
!= -1);
1029 sparc_cpu_and_features
= def
->processor
;
1032 if (!global_options_set
.x_sparc_cpu
)
1033 sparc_cpu
= sparc_cpu_and_features
;
1035 cpu
= &cpu_table
[(int) sparc_cpu_and_features
];
1037 if (TARGET_DEBUG_OPTIONS
)
1039 fprintf (stderr
, "sparc_cpu_and_features: %s\n", cpu
->name
);
1040 fprintf (stderr
, "sparc_cpu: %s\n",
1041 cpu_table
[(int) sparc_cpu
].name
);
1042 dump_target_flags ("cpu->disable", cpu
->disable
);
1043 dump_target_flags ("cpu->enable", cpu
->enable
);
1046 target_flags
&= ~cpu
->disable
;
1047 target_flags
|= (cpu
->enable
1048 #ifndef HAVE_AS_FMAF_HPC_VIS3
1049 & ~(MASK_FMAF
| MASK_VIS3
)
1053 /* If -mfpu or -mno-fpu was explicitly used, don't override with
1054 the processor default. */
1055 if (target_flags_explicit
& MASK_FPU
)
1056 target_flags
= (target_flags
& ~MASK_FPU
) | fpu
;
1058 /* -mvis2 implies -mvis */
1060 target_flags
|= MASK_VIS
;
1062 /* -mvis3 implies -mvis2 and -mvis */
1064 target_flags
|= MASK_VIS2
| MASK_VIS
;
1066 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is disabled. */
1068 target_flags
&= ~(MASK_VIS
| MASK_VIS2
| MASK_VIS3
| MASK_FMAF
);
1070 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1072 -m64 also implies v9. */
1073 if (TARGET_VIS
|| TARGET_ARCH64
)
1075 target_flags
|= MASK_V9
;
1076 target_flags
&= ~(MASK_V8
| MASK_SPARCLET
| MASK_SPARCLITE
);
1079 /* -mvis also implies -mv8plus on 32-bit */
1080 if (TARGET_VIS
&& ! TARGET_ARCH64
)
1081 target_flags
|= MASK_V8PLUS
;
1083 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1084 if (TARGET_V9
&& TARGET_ARCH32
)
1085 target_flags
|= MASK_DEPRECATED_V8_INSNS
;
1087 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1088 if (! TARGET_V9
|| TARGET_ARCH64
)
1089 target_flags
&= ~MASK_V8PLUS
;
1091 /* Don't use stack biasing in 32 bit mode. */
1093 target_flags
&= ~MASK_STACK_BIAS
;
1095 /* Supply a default value for align_functions. */
1096 if (align_functions
== 0
1097 && (sparc_cpu
== PROCESSOR_ULTRASPARC
1098 || sparc_cpu
== PROCESSOR_ULTRASPARC3
1099 || sparc_cpu
== PROCESSOR_NIAGARA
1100 || sparc_cpu
== PROCESSOR_NIAGARA2
1101 || sparc_cpu
== PROCESSOR_NIAGARA3
1102 || sparc_cpu
== PROCESSOR_NIAGARA4
))
1103 align_functions
= 32;
1105 /* Validate PCC_STRUCT_RETURN. */
1106 if (flag_pcc_struct_return
== DEFAULT_PCC_STRUCT_RETURN
)
1107 flag_pcc_struct_return
= (TARGET_ARCH64
? 0 : 1);
1109 /* Only use .uaxword when compiling for a 64-bit target. */
1111 targetm
.asm_out
.unaligned_op
.di
= NULL
;
1113 /* Do various machine dependent initializations. */
1114 sparc_init_modes ();
1116 /* Set up function hooks. */
1117 init_machine_status
= sparc_init_machine_status
;
1122 case PROCESSOR_CYPRESS
:
1123 sparc_costs
= &cypress_costs
;
1126 case PROCESSOR_SPARCLITE
:
1127 case PROCESSOR_SUPERSPARC
:
1128 sparc_costs
= &supersparc_costs
;
1130 case PROCESSOR_F930
:
1131 case PROCESSOR_F934
:
1132 case PROCESSOR_HYPERSPARC
:
1133 case PROCESSOR_SPARCLITE86X
:
1134 sparc_costs
= &hypersparc_costs
;
1136 case PROCESSOR_LEON
:
1137 sparc_costs
= &leon_costs
;
1139 case PROCESSOR_SPARCLET
:
1140 case PROCESSOR_TSC701
:
1141 sparc_costs
= &sparclet_costs
;
1144 case PROCESSOR_ULTRASPARC
:
1145 sparc_costs
= &ultrasparc_costs
;
1147 case PROCESSOR_ULTRASPARC3
:
1148 sparc_costs
= &ultrasparc3_costs
;
1150 case PROCESSOR_NIAGARA
:
1151 sparc_costs
= &niagara_costs
;
1153 case PROCESSOR_NIAGARA2
:
1154 sparc_costs
= &niagara2_costs
;
1156 case PROCESSOR_NIAGARA3
:
1157 case PROCESSOR_NIAGARA4
:
1158 sparc_costs
= &niagara3_costs
;
1160 case PROCESSOR_NATIVE
:
1164 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1165 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
1166 target_flags
|= MASK_LONG_DOUBLE_128
;
1169 if (TARGET_DEBUG_OPTIONS
)
1170 dump_target_flags ("Final target_flags", target_flags
);
1172 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
1173 ((sparc_cpu
== PROCESSOR_ULTRASPARC
1174 || sparc_cpu
== PROCESSOR_NIAGARA
1175 || sparc_cpu
== PROCESSOR_NIAGARA2
1176 || sparc_cpu
== PROCESSOR_NIAGARA3
1177 || sparc_cpu
== PROCESSOR_NIAGARA4
)
1179 : (sparc_cpu
== PROCESSOR_ULTRASPARC3
1181 global_options
.x_param_values
,
1182 global_options_set
.x_param_values
);
1183 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
1184 ((sparc_cpu
== PROCESSOR_ULTRASPARC
1185 || sparc_cpu
== PROCESSOR_ULTRASPARC3
1186 || sparc_cpu
== PROCESSOR_NIAGARA
1187 || sparc_cpu
== PROCESSOR_NIAGARA2
1188 || sparc_cpu
== PROCESSOR_NIAGARA3
1189 || sparc_cpu
== PROCESSOR_NIAGARA4
)
1191 global_options
.x_param_values
,
1192 global_options_set
.x_param_values
);
1194 /* Disable save slot sharing for call-clobbered registers by default.
1195 The IRA sharing algorithm works on single registers only and this
1196 pessimizes for double floating-point registers. */
1197 if (!global_options_set
.x_flag_ira_share_save_slots
)
1198 flag_ira_share_save_slots
= 0;
1201 /* Miscellaneous utilities. */
1203 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1204 or branch on register contents instructions. */
1207 v9_regcmp_p (enum rtx_code code
)
1209 return (code
== EQ
|| code
== NE
|| code
== GE
|| code
== LT
1210 || code
== LE
|| code
== GT
);
1213 /* Nonzero if OP is a floating point constant which can
1214 be loaded into an integer register using a single
1215 sethi instruction. */
1220 if (GET_CODE (op
) == CONST_DOUBLE
)
1225 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
1226 REAL_VALUE_TO_TARGET_SINGLE (r
, i
);
1227 return !SPARC_SIMM13_P (i
) && SPARC_SETHI_P (i
);
1233 /* Nonzero if OP is a floating point constant which can
1234 be loaded into an integer register using a single
1240 if (GET_CODE (op
) == CONST_DOUBLE
)
1245 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
1246 REAL_VALUE_TO_TARGET_SINGLE (r
, i
);
1247 return SPARC_SIMM13_P (i
);
1253 /* Nonzero if OP is a floating point constant which can
1254 be loaded into an integer register using a high/losum
1255 instruction sequence. */
1258 fp_high_losum_p (rtx op
)
1260 /* The constraints calling this should only be in
1261 SFmode move insns, so any constant which cannot
1262 be moved using a single insn will do. */
1263 if (GET_CODE (op
) == CONST_DOUBLE
)
1268 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
1269 REAL_VALUE_TO_TARGET_SINGLE (r
, i
);
1270 return !SPARC_SIMM13_P (i
) && !SPARC_SETHI_P (i
);
1276 /* Return true if the address of LABEL can be loaded by means of the
1277 mov{si,di}_pic_label_ref patterns in PIC mode. */
1280 can_use_mov_pic_label_ref (rtx label
)
1282 /* VxWorks does not impose a fixed gap between segments; the run-time
1283 gap can be different from the object-file gap. We therefore can't
1284 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1285 are absolutely sure that X is in the same segment as the GOT.
1286 Unfortunately, the flexibility of linker scripts means that we
1287 can't be sure of that in general, so assume that GOT-relative
1288 accesses are never valid on VxWorks. */
1289 if (TARGET_VXWORKS_RTP
)
1292 /* Similarly, if the label is non-local, it might end up being placed
1293 in a different section than the current one; now mov_pic_label_ref
1294 requires the label and the code to be in the same section. */
1295 if (LABEL_REF_NONLOCAL_P (label
))
1298 /* Finally, if we are reordering basic blocks and partition into hot
1299 and cold sections, this might happen for any label. */
1300 if (flag_reorder_blocks_and_partition
)
1306 /* Expand a move instruction. Return true if all work is done. */
1309 sparc_expand_move (enum machine_mode mode
, rtx
*operands
)
1311 /* Handle sets of MEM first. */
1312 if (GET_CODE (operands
[0]) == MEM
)
1314 /* 0 is a register (or a pair of registers) on SPARC. */
1315 if (register_or_zero_operand (operands
[1], mode
))
1318 if (!reload_in_progress
)
1320 operands
[0] = validize_mem (operands
[0]);
1321 operands
[1] = force_reg (mode
, operands
[1]);
1325 /* Fixup TLS cases. */
1327 && CONSTANT_P (operands
[1])
1328 && sparc_tls_referenced_p (operands
[1]))
1330 operands
[1] = sparc_legitimize_tls_address (operands
[1]);
1334 /* Fixup PIC cases. */
1335 if (flag_pic
&& CONSTANT_P (operands
[1]))
1337 if (pic_address_needs_scratch (operands
[1]))
1338 operands
[1] = sparc_legitimize_pic_address (operands
[1], NULL_RTX
);
1340 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1341 if (GET_CODE (operands
[1]) == LABEL_REF
1342 && can_use_mov_pic_label_ref (operands
[1]))
1346 emit_insn (gen_movsi_pic_label_ref (operands
[0], operands
[1]));
1352 gcc_assert (TARGET_ARCH64
);
1353 emit_insn (gen_movdi_pic_label_ref (operands
[0], operands
[1]));
1358 if (symbolic_operand (operands
[1], mode
))
1361 = sparc_legitimize_pic_address (operands
[1],
1363 ? operands
[0] : NULL_RTX
);
1368 /* If we are trying to toss an integer constant into FP registers,
1369 or loading a FP or vector constant, force it into memory. */
1370 if (CONSTANT_P (operands
[1])
1371 && REG_P (operands
[0])
1372 && (SPARC_FP_REG_P (REGNO (operands
[0]))
1373 || SCALAR_FLOAT_MODE_P (mode
)
1374 || VECTOR_MODE_P (mode
)))
1376 /* emit_group_store will send such bogosity to us when it is
1377 not storing directly into memory. So fix this up to avoid
1378 crashes in output_constant_pool. */
1379 if (operands
[1] == const0_rtx
)
1380 operands
[1] = CONST0_RTX (mode
);
1382 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1383 always other regs. */
1384 if ((TARGET_VIS
|| REGNO (operands
[0]) < SPARC_FIRST_FP_REG
)
1385 && (const_zero_operand (operands
[1], mode
)
1386 || const_all_ones_operand (operands
[1], mode
)))
1389 if (REGNO (operands
[0]) < SPARC_FIRST_FP_REG
1390 /* We are able to build any SF constant in integer registers
1391 with at most 2 instructions. */
1393 /* And any DF constant in integer registers. */
1395 && ! can_create_pseudo_p ())))
1398 operands
[1] = force_const_mem (mode
, operands
[1]);
1399 if (!reload_in_progress
)
1400 operands
[1] = validize_mem (operands
[1]);
1404 /* Accept non-constants and valid constants unmodified. */
1405 if (!CONSTANT_P (operands
[1])
1406 || GET_CODE (operands
[1]) == HIGH
1407 || input_operand (operands
[1], mode
))
1413 /* All QImode constants require only one insn, so proceed. */
1418 sparc_emit_set_const32 (operands
[0], operands
[1]);
1422 /* input_operand should have filtered out 32-bit mode. */
1423 sparc_emit_set_const64 (operands
[0], operands
[1]);
1433 /* Load OP1, a 32-bit constant, into OP0, a register.
1434 We know it can't be done in one insn when we get
1435 here, the move expander guarantees this. */
1438 sparc_emit_set_const32 (rtx op0
, rtx op1
)
1440 enum machine_mode mode
= GET_MODE (op0
);
1443 if (can_create_pseudo_p ())
1444 temp
= gen_reg_rtx (mode
);
1446 if (GET_CODE (op1
) == CONST_INT
)
1448 gcc_assert (!small_int_operand (op1
, mode
)
1449 && !const_high_operand (op1
, mode
));
1451 /* Emit them as real moves instead of a HIGH/LO_SUM,
1452 this way CSE can see everything and reuse intermediate
1453 values if it wants. */
1454 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
1455 GEN_INT (INTVAL (op1
)
1456 & ~(HOST_WIDE_INT
)0x3ff)));
1458 emit_insn (gen_rtx_SET (VOIDmode
,
1460 gen_rtx_IOR (mode
, temp
,
1461 GEN_INT (INTVAL (op1
) & 0x3ff))));
1465 /* A symbol, emit in the traditional way. */
1466 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
1467 gen_rtx_HIGH (mode
, op1
)));
1468 emit_insn (gen_rtx_SET (VOIDmode
,
1469 op0
, gen_rtx_LO_SUM (mode
, temp
, op1
)));
1473 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1474 If TEMP is nonzero, we are forbidden to use any other scratch
1475 registers. Otherwise, we are allowed to generate them as needed.
1477 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1478 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1481 sparc_emit_set_symbolic_const64 (rtx op0
, rtx op1
, rtx temp
)
1483 rtx temp1
, temp2
, temp3
, temp4
, temp5
;
1486 if (temp
&& GET_MODE (temp
) == TImode
)
1489 temp
= gen_rtx_REG (DImode
, REGNO (temp
));
1492 /* SPARC-V9 code-model support. */
1493 switch (sparc_cmodel
)
1496 /* The range spanned by all instructions in the object is less
1497 than 2^31 bytes (2GB) and the distance from any instruction
1498 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1499 than 2^31 bytes (2GB).
1501 The executable must be in the low 4TB of the virtual address
1504 sethi %hi(symbol), %temp1
1505 or %temp1, %lo(symbol), %reg */
1507 temp1
= temp
; /* op0 is allowed. */
1509 temp1
= gen_reg_rtx (DImode
);
1511 emit_insn (gen_rtx_SET (VOIDmode
, temp1
, gen_rtx_HIGH (DImode
, op1
)));
1512 emit_insn (gen_rtx_SET (VOIDmode
, op0
, gen_rtx_LO_SUM (DImode
, temp1
, op1
)));
1516 /* The range spanned by all instructions in the object is less
1517 than 2^31 bytes (2GB) and the distance from any instruction
1518 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1519 than 2^31 bytes (2GB).
1521 The executable must be in the low 16TB of the virtual address
1524 sethi %h44(symbol), %temp1
1525 or %temp1, %m44(symbol), %temp2
1526 sllx %temp2, 12, %temp3
1527 or %temp3, %l44(symbol), %reg */
1532 temp3
= temp
; /* op0 is allowed. */
1536 temp1
= gen_reg_rtx (DImode
);
1537 temp2
= gen_reg_rtx (DImode
);
1538 temp3
= gen_reg_rtx (DImode
);
1541 emit_insn (gen_seth44 (temp1
, op1
));
1542 emit_insn (gen_setm44 (temp2
, temp1
, op1
));
1543 emit_insn (gen_rtx_SET (VOIDmode
, temp3
,
1544 gen_rtx_ASHIFT (DImode
, temp2
, GEN_INT (12))));
1545 emit_insn (gen_setl44 (op0
, temp3
, op1
));
1549 /* The range spanned by all instructions in the object is less
1550 than 2^31 bytes (2GB) and the distance from any instruction
1551 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1552 than 2^31 bytes (2GB).
1554 The executable can be placed anywhere in the virtual address
1557 sethi %hh(symbol), %temp1
1558 sethi %lm(symbol), %temp2
1559 or %temp1, %hm(symbol), %temp3
1560 sllx %temp3, 32, %temp4
1561 or %temp4, %temp2, %temp5
1562 or %temp5, %lo(symbol), %reg */
1565 /* It is possible that one of the registers we got for operands[2]
1566 might coincide with that of operands[0] (which is why we made
1567 it TImode). Pick the other one to use as our scratch. */
1568 if (rtx_equal_p (temp
, op0
))
1570 gcc_assert (ti_temp
);
1571 temp
= gen_rtx_REG (DImode
, REGNO (temp
) + 1);
1574 temp2
= temp
; /* op0 is _not_ allowed, see above. */
1581 temp1
= gen_reg_rtx (DImode
);
1582 temp2
= gen_reg_rtx (DImode
);
1583 temp3
= gen_reg_rtx (DImode
);
1584 temp4
= gen_reg_rtx (DImode
);
1585 temp5
= gen_reg_rtx (DImode
);
1588 emit_insn (gen_sethh (temp1
, op1
));
1589 emit_insn (gen_setlm (temp2
, op1
));
1590 emit_insn (gen_sethm (temp3
, temp1
, op1
));
1591 emit_insn (gen_rtx_SET (VOIDmode
, temp4
,
1592 gen_rtx_ASHIFT (DImode
, temp3
, GEN_INT (32))));
1593 emit_insn (gen_rtx_SET (VOIDmode
, temp5
,
1594 gen_rtx_PLUS (DImode
, temp4
, temp2
)));
1595 emit_insn (gen_setlo (op0
, temp5
, op1
));
1599 /* Old old old backwards compatibility kruft here.
1600 Essentially it is MEDLOW with a fixed 64-bit
1601 virtual base added to all data segment addresses.
1602 Text-segment stuff is computed like MEDANY, we can't
1603 reuse the code above because the relocation knobs
1606 Data segment: sethi %hi(symbol), %temp1
1607 add %temp1, EMBMEDANY_BASE_REG, %temp2
1608 or %temp2, %lo(symbol), %reg */
1609 if (data_segment_operand (op1
, GET_MODE (op1
)))
1613 temp1
= temp
; /* op0 is allowed. */
1618 temp1
= gen_reg_rtx (DImode
);
1619 temp2
= gen_reg_rtx (DImode
);
1622 emit_insn (gen_embmedany_sethi (temp1
, op1
));
1623 emit_insn (gen_embmedany_brsum (temp2
, temp1
));
1624 emit_insn (gen_embmedany_losum (op0
, temp2
, op1
));
1627 /* Text segment: sethi %uhi(symbol), %temp1
1628 sethi %hi(symbol), %temp2
1629 or %temp1, %ulo(symbol), %temp3
1630 sllx %temp3, 32, %temp4
1631 or %temp4, %temp2, %temp5
1632 or %temp5, %lo(symbol), %reg */
1637 /* It is possible that one of the registers we got for operands[2]
1638 might coincide with that of operands[0] (which is why we made
1639 it TImode). Pick the other one to use as our scratch. */
1640 if (rtx_equal_p (temp
, op0
))
1642 gcc_assert (ti_temp
);
1643 temp
= gen_rtx_REG (DImode
, REGNO (temp
) + 1);
1646 temp2
= temp
; /* op0 is _not_ allowed, see above. */
1653 temp1
= gen_reg_rtx (DImode
);
1654 temp2
= gen_reg_rtx (DImode
);
1655 temp3
= gen_reg_rtx (DImode
);
1656 temp4
= gen_reg_rtx (DImode
);
1657 temp5
= gen_reg_rtx (DImode
);
1660 emit_insn (gen_embmedany_textuhi (temp1
, op1
));
1661 emit_insn (gen_embmedany_texthi (temp2
, op1
));
1662 emit_insn (gen_embmedany_textulo (temp3
, temp1
, op1
));
1663 emit_insn (gen_rtx_SET (VOIDmode
, temp4
,
1664 gen_rtx_ASHIFT (DImode
, temp3
, GEN_INT (32))));
1665 emit_insn (gen_rtx_SET (VOIDmode
, temp5
,
1666 gen_rtx_PLUS (DImode
, temp4
, temp2
)));
1667 emit_insn (gen_embmedany_textlo (op0
, temp5
, op1
));
1676 #if HOST_BITS_PER_WIDE_INT == 32
1678 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED
, rtx op1 ATTRIBUTE_UNUSED
)
1683 /* These avoid problems when cross compiling. If we do not
1684 go through all this hair then the optimizer will see
1685 invalid REG_EQUAL notes or in some cases none at all. */
1686 static rtx
gen_safe_HIGH64 (rtx
, HOST_WIDE_INT
);
1687 static rtx
gen_safe_SET64 (rtx
, HOST_WIDE_INT
);
1688 static rtx
gen_safe_OR64 (rtx
, HOST_WIDE_INT
);
1689 static rtx
gen_safe_XOR64 (rtx
, HOST_WIDE_INT
);
1691 /* The optimizer is not to assume anything about exactly
1692 which bits are set for a HIGH, they are unspecified.
1693 Unfortunately this leads to many missed optimizations
1694 during CSE. We mask out the non-HIGH bits, and matches
1695 a plain movdi, to alleviate this problem. */
1697 gen_safe_HIGH64 (rtx dest
, HOST_WIDE_INT val
)
1699 return gen_rtx_SET (VOIDmode
, dest
, GEN_INT (val
& ~(HOST_WIDE_INT
)0x3ff));
1703 gen_safe_SET64 (rtx dest
, HOST_WIDE_INT val
)
1705 return gen_rtx_SET (VOIDmode
, dest
, GEN_INT (val
));
1709 gen_safe_OR64 (rtx src
, HOST_WIDE_INT val
)
1711 return gen_rtx_IOR (DImode
, src
, GEN_INT (val
));
1715 gen_safe_XOR64 (rtx src
, HOST_WIDE_INT val
)
1717 return gen_rtx_XOR (DImode
, src
, GEN_INT (val
));
1720 /* Worker routines for 64-bit constant formation on arch64.
1721 One of the key things to be doing in these emissions is
1722 to create as many temp REGs as possible. This makes it
1723 possible for half-built constants to be used later when
1724 such values are similar to something required later on.
1725 Without doing this, the optimizer cannot see such
1728 static void sparc_emit_set_const64_quick1 (rtx
, rtx
,
1729 unsigned HOST_WIDE_INT
, int);
1732 sparc_emit_set_const64_quick1 (rtx op0
, rtx temp
,
1733 unsigned HOST_WIDE_INT low_bits
, int is_neg
)
1735 unsigned HOST_WIDE_INT high_bits
;
1738 high_bits
= (~low_bits
) & 0xffffffff;
1740 high_bits
= low_bits
;
1742 emit_insn (gen_safe_HIGH64 (temp
, high_bits
));
1745 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1746 gen_safe_OR64 (temp
, (high_bits
& 0x3ff))));
1750 /* If we are XOR'ing with -1, then we should emit a one's complement
1751 instead. This way the combiner will notice logical operations
1752 such as ANDN later on and substitute. */
1753 if ((low_bits
& 0x3ff) == 0x3ff)
1755 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1756 gen_rtx_NOT (DImode
, temp
)));
1760 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1761 gen_safe_XOR64 (temp
,
1762 (-(HOST_WIDE_INT
)0x400
1763 | (low_bits
& 0x3ff)))));
1768 static void sparc_emit_set_const64_quick2 (rtx
, rtx
, unsigned HOST_WIDE_INT
,
1769 unsigned HOST_WIDE_INT
, int);
1772 sparc_emit_set_const64_quick2 (rtx op0
, rtx temp
,
1773 unsigned HOST_WIDE_INT high_bits
,
1774 unsigned HOST_WIDE_INT low_immediate
,
1779 if ((high_bits
& 0xfffffc00) != 0)
1781 emit_insn (gen_safe_HIGH64 (temp
, high_bits
));
1782 if ((high_bits
& ~0xfffffc00) != 0)
1783 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1784 gen_safe_OR64 (temp
, (high_bits
& 0x3ff))));
1790 emit_insn (gen_safe_SET64 (temp
, high_bits
));
1794 /* Now shift it up into place. */
1795 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1796 gen_rtx_ASHIFT (DImode
, temp2
,
1797 GEN_INT (shift_count
))));
1799 /* If there is a low immediate part piece, finish up by
1800 putting that in as well. */
1801 if (low_immediate
!= 0)
1802 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1803 gen_safe_OR64 (op0
, low_immediate
)));
1806 static void sparc_emit_set_const64_longway (rtx
, rtx
, unsigned HOST_WIDE_INT
,
1807 unsigned HOST_WIDE_INT
);
1809 /* Full 64-bit constant decomposition. Even though this is the
1810 'worst' case, we still optimize a few things away. */
1812 sparc_emit_set_const64_longway (rtx op0
, rtx temp
,
1813 unsigned HOST_WIDE_INT high_bits
,
1814 unsigned HOST_WIDE_INT low_bits
)
1818 if (can_create_pseudo_p ())
1819 sub_temp
= gen_reg_rtx (DImode
);
1821 if ((high_bits
& 0xfffffc00) != 0)
1823 emit_insn (gen_safe_HIGH64 (temp
, high_bits
));
1824 if ((high_bits
& ~0xfffffc00) != 0)
1825 emit_insn (gen_rtx_SET (VOIDmode
,
1827 gen_safe_OR64 (temp
, (high_bits
& 0x3ff))));
1833 emit_insn (gen_safe_SET64 (temp
, high_bits
));
1837 if (can_create_pseudo_p ())
1839 rtx temp2
= gen_reg_rtx (DImode
);
1840 rtx temp3
= gen_reg_rtx (DImode
);
1841 rtx temp4
= gen_reg_rtx (DImode
);
1843 emit_insn (gen_rtx_SET (VOIDmode
, temp4
,
1844 gen_rtx_ASHIFT (DImode
, sub_temp
,
1847 emit_insn (gen_safe_HIGH64 (temp2
, low_bits
));
1848 if ((low_bits
& ~0xfffffc00) != 0)
1850 emit_insn (gen_rtx_SET (VOIDmode
, temp3
,
1851 gen_safe_OR64 (temp2
, (low_bits
& 0x3ff))));
1852 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1853 gen_rtx_PLUS (DImode
, temp4
, temp3
)));
1857 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1858 gen_rtx_PLUS (DImode
, temp4
, temp2
)));
1863 rtx low1
= GEN_INT ((low_bits
>> (32 - 12)) & 0xfff);
1864 rtx low2
= GEN_INT ((low_bits
>> (32 - 12 - 12)) & 0xfff);
1865 rtx low3
= GEN_INT ((low_bits
>> (32 - 12 - 12 - 8)) & 0x0ff);
1868 /* We are in the middle of reload, so this is really
1869 painful. However we do still make an attempt to
1870 avoid emitting truly stupid code. */
1871 if (low1
!= const0_rtx
)
1873 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1874 gen_rtx_ASHIFT (DImode
, sub_temp
,
1875 GEN_INT (to_shift
))));
1876 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1877 gen_rtx_IOR (DImode
, op0
, low1
)));
1885 if (low2
!= const0_rtx
)
1887 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1888 gen_rtx_ASHIFT (DImode
, sub_temp
,
1889 GEN_INT (to_shift
))));
1890 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1891 gen_rtx_IOR (DImode
, op0
, low2
)));
1899 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1900 gen_rtx_ASHIFT (DImode
, sub_temp
,
1901 GEN_INT (to_shift
))));
1902 if (low3
!= const0_rtx
)
1903 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
1904 gen_rtx_IOR (DImode
, op0
, low3
)));
1909 /* Analyze a 64-bit constant for certain properties. */
1910 static void analyze_64bit_constant (unsigned HOST_WIDE_INT
,
1911 unsigned HOST_WIDE_INT
,
1912 int *, int *, int *);
1915 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits
,
1916 unsigned HOST_WIDE_INT low_bits
,
1917 int *hbsp
, int *lbsp
, int *abbasp
)
1919 int lowest_bit_set
, highest_bit_set
, all_bits_between_are_set
;
1922 lowest_bit_set
= highest_bit_set
= -1;
1926 if ((lowest_bit_set
== -1)
1927 && ((low_bits
>> i
) & 1))
1929 if ((highest_bit_set
== -1)
1930 && ((high_bits
>> (32 - i
- 1)) & 1))
1931 highest_bit_set
= (64 - i
- 1);
1934 && ((highest_bit_set
== -1)
1935 || (lowest_bit_set
== -1)));
1941 if ((lowest_bit_set
== -1)
1942 && ((high_bits
>> i
) & 1))
1943 lowest_bit_set
= i
+ 32;
1944 if ((highest_bit_set
== -1)
1945 && ((low_bits
>> (32 - i
- 1)) & 1))
1946 highest_bit_set
= 32 - i
- 1;
1949 && ((highest_bit_set
== -1)
1950 || (lowest_bit_set
== -1)));
1952 /* If there are no bits set this should have gone out
1953 as one instruction! */
1954 gcc_assert (lowest_bit_set
!= -1 && highest_bit_set
!= -1);
1955 all_bits_between_are_set
= 1;
1956 for (i
= lowest_bit_set
; i
<= highest_bit_set
; i
++)
1960 if ((low_bits
& (1 << i
)) != 0)
1965 if ((high_bits
& (1 << (i
- 32))) != 0)
1968 all_bits_between_are_set
= 0;
1971 *hbsp
= highest_bit_set
;
1972 *lbsp
= lowest_bit_set
;
1973 *abbasp
= all_bits_between_are_set
;
1976 static int const64_is_2insns (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
);
1979 const64_is_2insns (unsigned HOST_WIDE_INT high_bits
,
1980 unsigned HOST_WIDE_INT low_bits
)
1982 int highest_bit_set
, lowest_bit_set
, all_bits_between_are_set
;
1985 || high_bits
== 0xffffffff)
1988 analyze_64bit_constant (high_bits
, low_bits
,
1989 &highest_bit_set
, &lowest_bit_set
,
1990 &all_bits_between_are_set
);
1992 if ((highest_bit_set
== 63
1993 || lowest_bit_set
== 0)
1994 && all_bits_between_are_set
!= 0)
1997 if ((highest_bit_set
- lowest_bit_set
) < 21)
2003 static unsigned HOST_WIDE_INT
create_simple_focus_bits (unsigned HOST_WIDE_INT
,
2004 unsigned HOST_WIDE_INT
,
2007 static unsigned HOST_WIDE_INT
2008 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits
,
2009 unsigned HOST_WIDE_INT low_bits
,
2010 int lowest_bit_set
, int shift
)
2012 HOST_WIDE_INT hi
, lo
;
2014 if (lowest_bit_set
< 32)
2016 lo
= (low_bits
>> lowest_bit_set
) << shift
;
2017 hi
= ((high_bits
<< (32 - lowest_bit_set
)) << shift
);
2022 hi
= ((high_bits
>> (lowest_bit_set
- 32)) << shift
);
2024 gcc_assert (! (hi
& lo
));
2028 /* Here we are sure to be arch64 and this is an integer constant
2029 being loaded into a register. Emit the most efficient
2030 insn sequence possible. Detection of all the 1-insn cases
2031 has been done already. */
2033 sparc_emit_set_const64 (rtx op0
, rtx op1
)
2035 unsigned HOST_WIDE_INT high_bits
, low_bits
;
2036 int lowest_bit_set
, highest_bit_set
;
2037 int all_bits_between_are_set
;
2040 /* Sanity check that we know what we are working with. */
2041 gcc_assert (TARGET_ARCH64
2042 && (GET_CODE (op0
) == SUBREG
2043 || (REG_P (op0
) && ! SPARC_FP_REG_P (REGNO (op0
)))));
2045 if (! can_create_pseudo_p ())
2048 if (GET_CODE (op1
) != CONST_INT
)
2050 sparc_emit_set_symbolic_const64 (op0
, op1
, temp
);
2055 temp
= gen_reg_rtx (DImode
);
2057 high_bits
= ((INTVAL (op1
) >> 32) & 0xffffffff);
2058 low_bits
= (INTVAL (op1
) & 0xffffffff);
2060 /* low_bits bits 0 --> 31
2061 high_bits bits 32 --> 63 */
2063 analyze_64bit_constant (high_bits
, low_bits
,
2064 &highest_bit_set
, &lowest_bit_set
,
2065 &all_bits_between_are_set
);
2067 /* First try for a 2-insn sequence. */
2069 /* These situations are preferred because the optimizer can
2070 * do more things with them:
2072 * sllx %reg, shift, %reg
2074 * srlx %reg, shift, %reg
2075 * 3) mov some_small_const, %reg
2076 * sllx %reg, shift, %reg
2078 if (((highest_bit_set
== 63
2079 || lowest_bit_set
== 0)
2080 && all_bits_between_are_set
!= 0)
2081 || ((highest_bit_set
- lowest_bit_set
) < 12))
2083 HOST_WIDE_INT the_const
= -1;
2084 int shift
= lowest_bit_set
;
2086 if ((highest_bit_set
!= 63
2087 && lowest_bit_set
!= 0)
2088 || all_bits_between_are_set
== 0)
2091 create_simple_focus_bits (high_bits
, low_bits
,
2094 else if (lowest_bit_set
== 0)
2095 shift
= -(63 - highest_bit_set
);
2097 gcc_assert (SPARC_SIMM13_P (the_const
));
2098 gcc_assert (shift
!= 0);
2100 emit_insn (gen_safe_SET64 (temp
, the_const
));
2102 emit_insn (gen_rtx_SET (VOIDmode
,
2104 gen_rtx_ASHIFT (DImode
,
2108 emit_insn (gen_rtx_SET (VOIDmode
,
2110 gen_rtx_LSHIFTRT (DImode
,
2112 GEN_INT (-shift
))));
2116 /* Now a range of 22 or less bits set somewhere.
2117 * 1) sethi %hi(focus_bits), %reg
2118 * sllx %reg, shift, %reg
2119 * 2) sethi %hi(focus_bits), %reg
2120 * srlx %reg, shift, %reg
2122 if ((highest_bit_set
- lowest_bit_set
) < 21)
2124 unsigned HOST_WIDE_INT focus_bits
=
2125 create_simple_focus_bits (high_bits
, low_bits
,
2126 lowest_bit_set
, 10);
2128 gcc_assert (SPARC_SETHI_P (focus_bits
));
2129 gcc_assert (lowest_bit_set
!= 10);
2131 emit_insn (gen_safe_HIGH64 (temp
, focus_bits
));
2133 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2134 if (lowest_bit_set
< 10)
2135 emit_insn (gen_rtx_SET (VOIDmode
,
2137 gen_rtx_LSHIFTRT (DImode
, temp
,
2138 GEN_INT (10 - lowest_bit_set
))));
2139 else if (lowest_bit_set
> 10)
2140 emit_insn (gen_rtx_SET (VOIDmode
,
2142 gen_rtx_ASHIFT (DImode
, temp
,
2143 GEN_INT (lowest_bit_set
- 10))));
2147 /* 1) sethi %hi(low_bits), %reg
2148 * or %reg, %lo(low_bits), %reg
2149 * 2) sethi %hi(~low_bits), %reg
2150 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2153 || high_bits
== 0xffffffff)
2155 sparc_emit_set_const64_quick1 (op0
, temp
, low_bits
,
2156 (high_bits
== 0xffffffff));
2160 /* Now, try 3-insn sequences. */
2162 /* 1) sethi %hi(high_bits), %reg
2163 * or %reg, %lo(high_bits), %reg
2164 * sllx %reg, 32, %reg
2168 sparc_emit_set_const64_quick2 (op0
, temp
, high_bits
, 0, 32);
2172 /* We may be able to do something quick
2173 when the constant is negated, so try that. */
2174 if (const64_is_2insns ((~high_bits
) & 0xffffffff,
2175 (~low_bits
) & 0xfffffc00))
2177 /* NOTE: The trailing bits get XOR'd so we need the
2178 non-negated bits, not the negated ones. */
2179 unsigned HOST_WIDE_INT trailing_bits
= low_bits
& 0x3ff;
2181 if ((((~high_bits
) & 0xffffffff) == 0
2182 && ((~low_bits
) & 0x80000000) == 0)
2183 || (((~high_bits
) & 0xffffffff) == 0xffffffff
2184 && ((~low_bits
) & 0x80000000) != 0))
2186 unsigned HOST_WIDE_INT fast_int
= (~low_bits
& 0xffffffff);
2188 if ((SPARC_SETHI_P (fast_int
)
2189 && (~high_bits
& 0xffffffff) == 0)
2190 || SPARC_SIMM13_P (fast_int
))
2191 emit_insn (gen_safe_SET64 (temp
, fast_int
));
2193 sparc_emit_set_const64 (temp
, GEN_INT (fast_int
));
2198 negated_const
= GEN_INT (((~low_bits
) & 0xfffffc00) |
2199 (((HOST_WIDE_INT
)((~high_bits
) & 0xffffffff))<<32));
2200 sparc_emit_set_const64 (temp
, negated_const
);
2203 /* If we are XOR'ing with -1, then we should emit a one's complement
2204 instead. This way the combiner will notice logical operations
2205 such as ANDN later on and substitute. */
2206 if (trailing_bits
== 0x3ff)
2208 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2209 gen_rtx_NOT (DImode
, temp
)));
2213 emit_insn (gen_rtx_SET (VOIDmode
,
2215 gen_safe_XOR64 (temp
,
2216 (-0x400 | trailing_bits
))));
2221 /* 1) sethi %hi(xxx), %reg
2222 * or %reg, %lo(xxx), %reg
2223 * sllx %reg, yyy, %reg
2225 * ??? This is just a generalized version of the low_bits==0
2226 * thing above, FIXME...
2228 if ((highest_bit_set
- lowest_bit_set
) < 32)
2230 unsigned HOST_WIDE_INT focus_bits
=
2231 create_simple_focus_bits (high_bits
, low_bits
,
2234 /* We can't get here in this state. */
2235 gcc_assert (highest_bit_set
>= 32 && lowest_bit_set
< 32);
2237 /* So what we know is that the set bits straddle the
2238 middle of the 64-bit word. */
2239 sparc_emit_set_const64_quick2 (op0
, temp
,
2245 /* 1) sethi %hi(high_bits), %reg
2246 * or %reg, %lo(high_bits), %reg
2247 * sllx %reg, 32, %reg
2248 * or %reg, low_bits, %reg
2250 if (SPARC_SIMM13_P(low_bits
)
2251 && ((int)low_bits
> 0))
2253 sparc_emit_set_const64_quick2 (op0
, temp
, high_bits
, low_bits
, 32);
2257 /* The easiest way when all else fails, is full decomposition. */
2258 sparc_emit_set_const64_longway (op0
, temp
, high_bits
, low_bits
);
2260 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2262 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2263 return the mode to be used for the comparison. For floating-point,
2264 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2265 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2266 processing is needed. */
2269 select_cc_mode (enum rtx_code op
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2271 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2297 else if (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
2298 || GET_CODE (x
) == NEG
|| GET_CODE (x
) == ASHIFT
)
2300 if (TARGET_ARCH64
&& GET_MODE (x
) == DImode
)
2301 return CCX_NOOVmode
;
2307 if (TARGET_ARCH64
&& GET_MODE (x
) == DImode
)
2314 /* Emit the compare insn and return the CC reg for a CODE comparison
2315 with operands X and Y. */
2318 gen_compare_reg_1 (enum rtx_code code
, rtx x
, rtx y
)
2320 enum machine_mode mode
;
2323 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
)
2326 mode
= SELECT_CC_MODE (code
, x
, y
);
2328 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2329 fcc regs (cse can't tell they're really call clobbered regs and will
2330 remove a duplicate comparison even if there is an intervening function
2331 call - it will then try to reload the cc reg via an int reg which is why
2332 we need the movcc patterns). It is possible to provide the movcc
2333 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2334 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2335 to tell cse that CCFPE mode registers (even pseudos) are call
2338 /* ??? This is an experiment. Rather than making changes to cse which may
2339 or may not be easy/clean, we do our own cse. This is possible because
2340 we will generate hard registers. Cse knows they're call clobbered (it
2341 doesn't know the same thing about pseudos). If we guess wrong, no big
2342 deal, but if we win, great! */
2344 if (TARGET_V9
&& GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2345 #if 1 /* experiment */
2348 /* We cycle through the registers to ensure they're all exercised. */
2349 static int next_fcc_reg
= 0;
2350 /* Previous x,y for each fcc reg. */
2351 static rtx prev_args
[4][2];
2353 /* Scan prev_args for x,y. */
2354 for (reg
= 0; reg
< 4; reg
++)
2355 if (prev_args
[reg
][0] == x
&& prev_args
[reg
][1] == y
)
2360 prev_args
[reg
][0] = x
;
2361 prev_args
[reg
][1] = y
;
2362 next_fcc_reg
= (next_fcc_reg
+ 1) & 3;
2364 cc_reg
= gen_rtx_REG (mode
, reg
+ SPARC_FIRST_V9_FCC_REG
);
2367 cc_reg
= gen_reg_rtx (mode
);
2368 #endif /* ! experiment */
2369 else if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2370 cc_reg
= gen_rtx_REG (mode
, SPARC_FCC_REG
);
2372 cc_reg
= gen_rtx_REG (mode
, SPARC_ICC_REG
);
2374 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2375 will only result in an unrecognizable insn so no point in asserting. */
2376 emit_insn (gen_rtx_SET (VOIDmode
, cc_reg
, gen_rtx_COMPARE (mode
, x
, y
)));
2382 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2385 gen_compare_reg (rtx cmp
)
2387 return gen_compare_reg_1 (GET_CODE (cmp
), XEXP (cmp
, 0), XEXP (cmp
, 1));
2390 /* This function is used for v9 only.
2391 DEST is the target of the Scc insn.
2392 CODE is the code for an Scc's comparison.
2393 X and Y are the values we compare.
2395 This function is needed to turn
2398 (gt (reg:CCX 100 %icc)
2402 (gt:DI (reg:CCX 100 %icc)
2405 IE: The instruction recognizer needs to see the mode of the comparison to
2406 find the right instruction. We could use "gt:DI" right in the
2407 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2410 gen_v9_scc (rtx dest
, enum rtx_code compare_code
, rtx x
, rtx y
)
2413 && (GET_MODE (x
) == DImode
2414 || GET_MODE (dest
) == DImode
))
2417 /* Try to use the movrCC insns. */
2419 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
2421 && v9_regcmp_p (compare_code
))
2426 /* Special case for op0 != 0. This can be done with one instruction if
2429 if (compare_code
== NE
2430 && GET_MODE (dest
) == DImode
2431 && rtx_equal_p (op0
, dest
))
2433 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
2434 gen_rtx_IF_THEN_ELSE (DImode
,
2435 gen_rtx_fmt_ee (compare_code
, DImode
,
2442 if (reg_overlap_mentioned_p (dest
, op0
))
2444 /* Handle the case where dest == x.
2445 We "early clobber" the result. */
2446 op0
= gen_reg_rtx (GET_MODE (x
));
2447 emit_move_insn (op0
, x
);
2450 emit_insn (gen_rtx_SET (VOIDmode
, dest
, const0_rtx
));
2451 if (GET_MODE (op0
) != DImode
)
2453 temp
= gen_reg_rtx (DImode
);
2454 convert_move (temp
, op0
, 0);
2458 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
2459 gen_rtx_IF_THEN_ELSE (GET_MODE (dest
),
2460 gen_rtx_fmt_ee (compare_code
, DImode
,
2468 x
= gen_compare_reg_1 (compare_code
, x
, y
);
2471 gcc_assert (GET_MODE (x
) != CC_NOOVmode
2472 && GET_MODE (x
) != CCX_NOOVmode
);
2474 emit_insn (gen_rtx_SET (VOIDmode
, dest
, const0_rtx
));
2475 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
2476 gen_rtx_IF_THEN_ELSE (GET_MODE (dest
),
2477 gen_rtx_fmt_ee (compare_code
,
2478 GET_MODE (x
), x
, y
),
2479 const1_rtx
, dest
)));
2485 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2486 without jumps using the addx/subx instructions. */
2489 emit_scc_insn (rtx operands
[])
2496 /* The quad-word fp compare library routines all return nonzero to indicate
2497 true, which is different from the equivalent libgcc routines, so we must
2498 handle them specially here. */
2499 if (GET_MODE (operands
[2]) == TFmode
&& ! TARGET_HARD_QUAD
)
2501 operands
[1] = sparc_emit_float_lib_cmp (operands
[2], operands
[3],
2502 GET_CODE (operands
[1]));
2503 operands
[2] = XEXP (operands
[1], 0);
2504 operands
[3] = XEXP (operands
[1], 1);
2507 code
= GET_CODE (operands
[1]);
2511 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2512 more applications). The exception to this is "reg != 0" which can
2513 be done in one instruction on v9 (so we do it). */
2516 if (GET_MODE (x
) == SImode
)
2518 rtx pat
= gen_seqsi_special (operands
[0], x
, y
);
2522 else if (GET_MODE (x
) == DImode
)
2524 rtx pat
= gen_seqdi_special (operands
[0], x
, y
);
2532 if (GET_MODE (x
) == SImode
)
2534 rtx pat
= gen_snesi_special (operands
[0], x
, y
);
2538 else if (GET_MODE (x
) == DImode
)
2540 rtx pat
= gen_snedi_special (operands
[0], x
, y
);
2546 /* For the rest, on v9 we can use conditional moves. */
2550 if (gen_v9_scc (operands
[0], code
, x
, y
))
2554 /* We can do LTU and GEU using the addx/subx instructions too. And
2555 for GTU/LEU, if both operands are registers swap them and fall
2556 back to the easy case. */
2557 if (code
== GTU
|| code
== LEU
)
2559 if ((GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
2560 && (GET_CODE (y
) == REG
|| GET_CODE (y
) == SUBREG
))
2565 code
= swap_condition (code
);
2569 if (code
== LTU
|| code
== GEU
)
2571 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
2572 gen_rtx_fmt_ee (code
, SImode
,
2573 gen_compare_reg_1 (code
, x
, y
),
2578 /* Nope, do branches. */
2582 /* Emit a conditional jump insn for the v9 architecture using comparison code
2583 CODE and jump target LABEL.
2584 This function exists to take advantage of the v9 brxx insns. */
2587 emit_v9_brxx_insn (enum rtx_code code
, rtx op0
, rtx label
)
2589 emit_jump_insn (gen_rtx_SET (VOIDmode
,
2591 gen_rtx_IF_THEN_ELSE (VOIDmode
,
2592 gen_rtx_fmt_ee (code
, GET_MODE (op0
),
2594 gen_rtx_LABEL_REF (VOIDmode
, label
),
2599 emit_conditional_branch_insn (rtx operands
[])
2601 /* The quad-word fp compare library routines all return nonzero to indicate
2602 true, which is different from the equivalent libgcc routines, so we must
2603 handle them specially here. */
2604 if (GET_MODE (operands
[1]) == TFmode
&& ! TARGET_HARD_QUAD
)
2606 operands
[0] = sparc_emit_float_lib_cmp (operands
[1], operands
[2],
2607 GET_CODE (operands
[0]));
2608 operands
[1] = XEXP (operands
[0], 0);
2609 operands
[2] = XEXP (operands
[0], 1);
2612 if (TARGET_ARCH64
&& operands
[2] == const0_rtx
2613 && GET_CODE (operands
[1]) == REG
2614 && GET_MODE (operands
[1]) == DImode
)
2616 emit_v9_brxx_insn (GET_CODE (operands
[0]), operands
[1], operands
[3]);
2620 operands
[1] = gen_compare_reg (operands
[0]);
2621 operands
[2] = const0_rtx
;
2622 operands
[0] = gen_rtx_fmt_ee (GET_CODE (operands
[0]), VOIDmode
,
2623 operands
[1], operands
[2]);
2624 emit_jump_insn (gen_cbranchcc4 (operands
[0], operands
[1], operands
[2],
2629 /* Generate a DFmode part of a hard TFmode register.
2630 REG is the TFmode hard register, LOW is 1 for the
2631 low 64bit of the register and 0 otherwise.
2634 gen_df_reg (rtx reg
, int low
)
2636 int regno
= REGNO (reg
);
2638 if ((WORDS_BIG_ENDIAN
== 0) ^ (low
!= 0))
2639 regno
+= (TARGET_ARCH64
&& SPARC_INT_REG_P (regno
)) ? 1 : 2;
2640 return gen_rtx_REG (DFmode
, regno
);
2643 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2644 Unlike normal calls, TFmode operands are passed by reference. It is
2645 assumed that no more than 3 operands are required. */
2648 emit_soft_tfmode_libcall (const char *func_name
, int nargs
, rtx
*operands
)
2650 rtx ret_slot
= NULL
, arg
[3], func_sym
;
2653 /* We only expect to be called for conversions, unary, and binary ops. */
2654 gcc_assert (nargs
== 2 || nargs
== 3);
2656 for (i
= 0; i
< nargs
; ++i
)
2658 rtx this_arg
= operands
[i
];
2661 /* TFmode arguments and return values are passed by reference. */
2662 if (GET_MODE (this_arg
) == TFmode
)
2664 int force_stack_temp
;
2666 force_stack_temp
= 0;
2667 if (TARGET_BUGGY_QP_LIB
&& i
== 0)
2668 force_stack_temp
= 1;
2670 if (GET_CODE (this_arg
) == MEM
2671 && ! force_stack_temp
)
2672 this_arg
= XEXP (this_arg
, 0);
2673 else if (CONSTANT_P (this_arg
)
2674 && ! force_stack_temp
)
2676 this_slot
= force_const_mem (TFmode
, this_arg
);
2677 this_arg
= XEXP (this_slot
, 0);
2681 this_slot
= assign_stack_temp (TFmode
, GET_MODE_SIZE (TFmode
), 0);
2683 /* Operand 0 is the return value. We'll copy it out later. */
2685 emit_move_insn (this_slot
, this_arg
);
2687 ret_slot
= this_slot
;
2689 this_arg
= XEXP (this_slot
, 0);
2696 func_sym
= gen_rtx_SYMBOL_REF (Pmode
, func_name
);
2698 if (GET_MODE (operands
[0]) == TFmode
)
2701 emit_library_call (func_sym
, LCT_NORMAL
, VOIDmode
, 2,
2702 arg
[0], GET_MODE (arg
[0]),
2703 arg
[1], GET_MODE (arg
[1]));
2705 emit_library_call (func_sym
, LCT_NORMAL
, VOIDmode
, 3,
2706 arg
[0], GET_MODE (arg
[0]),
2707 arg
[1], GET_MODE (arg
[1]),
2708 arg
[2], GET_MODE (arg
[2]));
2711 emit_move_insn (operands
[0], ret_slot
);
2717 gcc_assert (nargs
== 2);
2719 ret
= emit_library_call_value (func_sym
, operands
[0], LCT_NORMAL
,
2720 GET_MODE (operands
[0]), 1,
2721 arg
[1], GET_MODE (arg
[1]));
2723 if (ret
!= operands
[0])
2724 emit_move_insn (operands
[0], ret
);
2728 /* Expand soft-float TFmode calls to sparc abi routines. */
2731 emit_soft_tfmode_binop (enum rtx_code code
, rtx
*operands
)
2753 emit_soft_tfmode_libcall (func
, 3, operands
);
2757 emit_soft_tfmode_unop (enum rtx_code code
, rtx
*operands
)
2761 gcc_assert (code
== SQRT
);
2764 emit_soft_tfmode_libcall (func
, 2, operands
);
2768 emit_soft_tfmode_cvt (enum rtx_code code
, rtx
*operands
)
2775 switch (GET_MODE (operands
[1]))
2788 case FLOAT_TRUNCATE
:
2789 switch (GET_MODE (operands
[0]))
2803 switch (GET_MODE (operands
[1]))
2808 operands
[1] = gen_rtx_SIGN_EXTEND (DImode
, operands
[1]);
2818 case UNSIGNED_FLOAT
:
2819 switch (GET_MODE (operands
[1]))
2824 operands
[1] = gen_rtx_ZERO_EXTEND (DImode
, operands
[1]);
2835 switch (GET_MODE (operands
[0]))
2849 switch (GET_MODE (operands
[0]))
2866 emit_soft_tfmode_libcall (func
, 2, operands
);
2869 /* Expand a hard-float tfmode operation. All arguments must be in
2873 emit_hard_tfmode_operation (enum rtx_code code
, rtx
*operands
)
2877 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
2879 operands
[1] = force_reg (GET_MODE (operands
[1]), operands
[1]);
2880 op
= gen_rtx_fmt_e (code
, GET_MODE (operands
[0]), operands
[1]);
2884 operands
[1] = force_reg (GET_MODE (operands
[1]), operands
[1]);
2885 operands
[2] = force_reg (GET_MODE (operands
[2]), operands
[2]);
2886 op
= gen_rtx_fmt_ee (code
, GET_MODE (operands
[0]),
2887 operands
[1], operands
[2]);
2890 if (register_operand (operands
[0], VOIDmode
))
2893 dest
= gen_reg_rtx (GET_MODE (operands
[0]));
2895 emit_insn (gen_rtx_SET (VOIDmode
, dest
, op
));
2897 if (dest
!= operands
[0])
2898 emit_move_insn (operands
[0], dest
);
2902 emit_tfmode_binop (enum rtx_code code
, rtx
*operands
)
2904 if (TARGET_HARD_QUAD
)
2905 emit_hard_tfmode_operation (code
, operands
);
2907 emit_soft_tfmode_binop (code
, operands
);
2911 emit_tfmode_unop (enum rtx_code code
, rtx
*operands
)
2913 if (TARGET_HARD_QUAD
)
2914 emit_hard_tfmode_operation (code
, operands
);
2916 emit_soft_tfmode_unop (code
, operands
);
2920 emit_tfmode_cvt (enum rtx_code code
, rtx
*operands
)
2922 if (TARGET_HARD_QUAD
)
2923 emit_hard_tfmode_operation (code
, operands
);
2925 emit_soft_tfmode_cvt (code
, operands
);
2928 /* Return nonzero if a branch/jump/call instruction will be emitting
2929 nop into its delay slot. */
2932 empty_delay_slot (rtx insn
)
2936 /* If no previous instruction (should not happen), return true. */
2937 if (PREV_INSN (insn
) == NULL
)
2940 seq
= NEXT_INSN (PREV_INSN (insn
));
2941 if (GET_CODE (PATTERN (seq
)) == SEQUENCE
)
2947 /* Return nonzero if TRIAL can go into the call delay slot. */
2950 tls_call_delay (rtx trial
)
2955 call __tls_get_addr, %tgd_call (foo)
2956 add %l7, %o0, %o0, %tgd_add (foo)
2957 while Sun as/ld does not. */
2958 if (TARGET_GNU_TLS
|| !TARGET_TLS
)
2961 pat
= PATTERN (trial
);
2963 /* We must reject tgd_add{32|64}, i.e.
2964 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2965 and tldm_add{32|64}, i.e.
2966 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2968 if (GET_CODE (pat
) == SET
2969 && GET_CODE (SET_SRC (pat
)) == PLUS
)
2971 rtx unspec
= XEXP (SET_SRC (pat
), 1);
2973 if (GET_CODE (unspec
) == UNSPEC
2974 && (XINT (unspec
, 1) == UNSPEC_TLSGD
2975 || XINT (unspec
, 1) == UNSPEC_TLSLDM
))
2982 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2983 instruction. RETURN_P is true if the v9 variant 'return' is to be
2984 considered in the test too.
2986 TRIAL must be a SET whose destination is a REG appropriate for the
2987 'restore' instruction or, if RETURN_P is true, for the 'return'
2991 eligible_for_restore_insn (rtx trial
, bool return_p
)
2993 rtx pat
= PATTERN (trial
);
2994 rtx src
= SET_SRC (pat
);
2995 bool src_is_freg
= false;
2998 /* Since we now can do moves between float and integer registers when
2999 VIS3 is enabled, we have to catch this case. We can allow such
3000 moves when doing a 'return' however. */
3002 if (GET_CODE (src_reg
) == SUBREG
)
3003 src_reg
= SUBREG_REG (src_reg
);
3004 if (GET_CODE (src_reg
) == REG
3005 && SPARC_FP_REG_P (REGNO (src_reg
)))
3008 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3009 if (GET_MODE_CLASS (GET_MODE (src
)) != MODE_FLOAT
3010 && arith_operand (src
, GET_MODE (src
))
3014 return GET_MODE_SIZE (GET_MODE (src
)) <= GET_MODE_SIZE (DImode
);
3016 return GET_MODE_SIZE (GET_MODE (src
)) <= GET_MODE_SIZE (SImode
);
3019 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3020 else if (GET_MODE_CLASS (GET_MODE (src
)) != MODE_FLOAT
3021 && arith_double_operand (src
, GET_MODE (src
))
3023 return GET_MODE_SIZE (GET_MODE (src
)) <= GET_MODE_SIZE (DImode
);
3025 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3026 else if (! TARGET_FPU
&& register_operand (src
, SFmode
))
3029 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3030 else if (! TARGET_FPU
&& TARGET_ARCH64
&& register_operand (src
, DFmode
))
3033 /* If we have the 'return' instruction, anything that does not use
3034 local or output registers and can go into a delay slot wins. */
3037 && !epilogue_renumber (&pat
, 1)
3038 && get_attr_in_uncond_branch_delay (trial
)
3039 == IN_UNCOND_BRANCH_DELAY_TRUE
)
3042 /* The 'restore src1,src2,dest' pattern for SImode. */
3043 else if (GET_CODE (src
) == PLUS
3044 && register_operand (XEXP (src
, 0), SImode
)
3045 && arith_operand (XEXP (src
, 1), SImode
))
3048 /* The 'restore src1,src2,dest' pattern for DImode. */
3049 else if (GET_CODE (src
) == PLUS
3050 && register_operand (XEXP (src
, 0), DImode
)
3051 && arith_double_operand (XEXP (src
, 1), DImode
))
3054 /* The 'restore src1,%lo(src2),dest' pattern. */
3055 else if (GET_CODE (src
) == LO_SUM
3056 && ! TARGET_CM_MEDMID
3057 && ((register_operand (XEXP (src
, 0), SImode
)
3058 && immediate_operand (XEXP (src
, 1), SImode
))
3060 && register_operand (XEXP (src
, 0), DImode
)
3061 && immediate_operand (XEXP (src
, 1), DImode
))))
3064 /* The 'restore src,src,dest' pattern. */
3065 else if (GET_CODE (src
) == ASHIFT
3066 && (register_operand (XEXP (src
, 0), SImode
)
3067 || register_operand (XEXP (src
, 0), DImode
))
3068 && XEXP (src
, 1) == const1_rtx
)
3074 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3077 eligible_for_return_delay (rtx trial
)
3082 if (GET_CODE (trial
) != INSN
)
3085 if (get_attr_length (trial
) != 1)
3088 /* If the function uses __builtin_eh_return, the eh_return machinery
3089 occupies the delay slot. */
3090 if (crtl
->calls_eh_return
)
3093 /* In the case of a leaf or flat function, anything can go into the slot. */
3094 if (sparc_leaf_function_p
|| TARGET_FLAT
)
3096 get_attr_in_uncond_branch_delay (trial
) == IN_UNCOND_BRANCH_DELAY_TRUE
;
3098 pat
= PATTERN (trial
);
3099 if (GET_CODE (pat
) == PARALLEL
)
3105 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
3107 rtx expr
= XVECEXP (pat
, 0, i
);
3108 if (GET_CODE (expr
) != SET
)
3110 if (GET_CODE (SET_DEST (expr
)) != REG
)
3112 regno
= REGNO (SET_DEST (expr
));
3113 if (regno
>= 8 && regno
< 24)
3116 return !epilogue_renumber (&pat
, 1)
3117 && (get_attr_in_uncond_branch_delay (trial
)
3118 == IN_UNCOND_BRANCH_DELAY_TRUE
);
3121 if (GET_CODE (pat
) != SET
)
3124 if (GET_CODE (SET_DEST (pat
)) != REG
)
3127 regno
= REGNO (SET_DEST (pat
));
3129 /* Otherwise, only operations which can be done in tandem with
3130 a `restore' or `return' insn can go into the delay slot. */
3131 if (regno
>= 8 && regno
< 24)
3134 /* If this instruction sets up floating point register and we have a return
3135 instruction, it can probably go in. But restore will not work
3137 if (! SPARC_INT_REG_P (regno
))
3139 && !epilogue_renumber (&pat
, 1)
3140 && get_attr_in_uncond_branch_delay (trial
)
3141 == IN_UNCOND_BRANCH_DELAY_TRUE
);
3143 return eligible_for_restore_insn (trial
, true);
3146 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3149 eligible_for_sibcall_delay (rtx trial
)
3153 if (GET_CODE (trial
) != INSN
|| GET_CODE (PATTERN (trial
)) != SET
)
3156 if (get_attr_length (trial
) != 1)
3159 pat
= PATTERN (trial
);
3161 if (sparc_leaf_function_p
|| TARGET_FLAT
)
3163 /* If the tail call is done using the call instruction,
3164 we have to restore %o7 in the delay slot. */
3165 if (LEAF_SIBCALL_SLOT_RESERVED_P
)
3168 /* %g1 is used to build the function address */
3169 if (reg_mentioned_p (gen_rtx_REG (Pmode
, 1), pat
))
3175 /* Otherwise, only operations which can be done in tandem with
3176 a `restore' insn can go into the delay slot. */
3177 if (GET_CODE (SET_DEST (pat
)) != REG
3178 || (REGNO (SET_DEST (pat
)) >= 8 && REGNO (SET_DEST (pat
)) < 24)
3179 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat
))))
3182 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3184 if (reg_mentioned_p (gen_rtx_REG (Pmode
, 15), pat
))
3187 return eligible_for_restore_insn (trial
, false);
3190 /* Determine if it's legal to put X into the constant pool. This
3191 is not possible if X contains the address of a symbol that is
3192 not constant (TLS) or not known at final link time (PIC). */
3195 sparc_cannot_force_const_mem (enum machine_mode mode
, rtx x
)
3197 switch (GET_CODE (x
))
3202 /* Accept all non-symbolic constants. */
3206 /* Labels are OK iff we are non-PIC. */
3207 return flag_pic
!= 0;
3210 /* 'Naked' TLS symbol references are never OK,
3211 non-TLS symbols are OK iff we are non-PIC. */
3212 if (SYMBOL_REF_TLS_MODEL (x
))
3215 return flag_pic
!= 0;
3218 return sparc_cannot_force_const_mem (mode
, XEXP (x
, 0));
3221 return sparc_cannot_force_const_mem (mode
, XEXP (x
, 0))
3222 || sparc_cannot_force_const_mem (mode
, XEXP (x
, 1));
3230 /* Global Offset Table support. */
3231 static GTY(()) rtx got_helper_rtx
= NULL_RTX
;
3232 static GTY(()) rtx global_offset_table_rtx
= NULL_RTX
;
3234 /* Return the SYMBOL_REF for the Global Offset Table. */
3236 static GTY(()) rtx sparc_got_symbol
= NULL_RTX
;
3241 if (!sparc_got_symbol
)
3242 sparc_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
3244 return sparc_got_symbol
;
3247 /* Ensure that we are not using patterns that are not OK with PIC. */
3257 op
= recog_data
.operand
[i
];
3258 gcc_assert (GET_CODE (op
) != SYMBOL_REF
3259 && (GET_CODE (op
) != CONST
3260 || (GET_CODE (XEXP (op
, 0)) == MINUS
3261 && XEXP (XEXP (op
, 0), 0) == sparc_got ()
3262 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST
)));
3269 /* Return true if X is an address which needs a temporary register when
3270 reloaded while generating PIC code. */
3273 pic_address_needs_scratch (rtx x
)
3275 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3276 if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == PLUS
3277 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
3278 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3279 && ! SMALL_INT (XEXP (XEXP (x
, 0), 1)))
3285 /* Determine if a given RTX is a valid constant. We already know this
3286 satisfies CONSTANT_P. */
3289 sparc_legitimate_constant_p (enum machine_mode mode
, rtx x
)
3291 switch (GET_CODE (x
))
3295 if (sparc_tls_referenced_p (x
))
3300 if (GET_MODE (x
) == VOIDmode
)
3303 /* Floating point constants are generally not ok.
3304 The only exception is 0.0 and all-ones in VIS. */
3306 && SCALAR_FLOAT_MODE_P (mode
)
3307 && (const_zero_operand (x
, mode
)
3308 || const_all_ones_operand (x
, mode
)))
3314 /* Vector constants are generally not ok.
3315 The only exception is 0 or -1 in VIS. */
3317 && (const_zero_operand (x
, mode
)
3318 || const_all_ones_operand (x
, mode
)))
3330 /* Determine if a given RTX is a valid constant address. */
3333 constant_address_p (rtx x
)
3335 switch (GET_CODE (x
))
3343 if (flag_pic
&& pic_address_needs_scratch (x
))
3345 return sparc_legitimate_constant_p (Pmode
, x
);
3348 return !flag_pic
&& sparc_legitimate_constant_p (Pmode
, x
);
3355 /* Nonzero if the constant value X is a legitimate general operand
3356 when generating PIC code. It is given that flag_pic is on and
3357 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3360 legitimate_pic_operand_p (rtx x
)
3362 if (pic_address_needs_scratch (x
))
3364 if (sparc_tls_referenced_p (x
))
3369 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3371 && INTVAL (X) >= -0x1000 \
3372 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3374 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3376 && INTVAL (X) >= -0x1000 \
3377 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3379 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3381 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3382 ordinarily. This changes a bit when generating PIC. */
3385 sparc_legitimate_address_p (enum machine_mode mode
, rtx addr
, bool strict
)
3387 rtx rs1
= NULL
, rs2
= NULL
, imm1
= NULL
;
3389 if (REG_P (addr
) || GET_CODE (addr
) == SUBREG
)
3391 else if (GET_CODE (addr
) == PLUS
)
3393 rs1
= XEXP (addr
, 0);
3394 rs2
= XEXP (addr
, 1);
3396 /* Canonicalize. REG comes first, if there are no regs,
3397 LO_SUM comes first. */
3399 && GET_CODE (rs1
) != SUBREG
3401 || GET_CODE (rs2
) == SUBREG
3402 || (GET_CODE (rs2
) == LO_SUM
&& GET_CODE (rs1
) != LO_SUM
)))
3404 rs1
= XEXP (addr
, 1);
3405 rs2
= XEXP (addr
, 0);
3409 && rs1
== pic_offset_table_rtx
3411 && GET_CODE (rs2
) != SUBREG
3412 && GET_CODE (rs2
) != LO_SUM
3413 && GET_CODE (rs2
) != MEM
3414 && !(GET_CODE (rs2
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (rs2
))
3415 && (! symbolic_operand (rs2
, VOIDmode
) || mode
== Pmode
)
3416 && (GET_CODE (rs2
) != CONST_INT
|| SMALL_INT (rs2
)))
3418 || GET_CODE (rs1
) == SUBREG
)
3419 && RTX_OK_FOR_OFFSET_P (rs2
, mode
)))
3424 else if ((REG_P (rs1
) || GET_CODE (rs1
) == SUBREG
)
3425 && (REG_P (rs2
) || GET_CODE (rs2
) == SUBREG
))
3427 /* We prohibit REG + REG for TFmode when there are no quad move insns
3428 and we consequently need to split. We do this because REG+REG
3429 is not an offsettable address. If we get the situation in reload
3430 where source and destination of a movtf pattern are both MEMs with
3431 REG+REG address, then only one of them gets converted to an
3432 offsettable address. */
3434 && ! (TARGET_FPU
&& TARGET_ARCH64
&& TARGET_HARD_QUAD
))
3437 /* We prohibit REG + REG on ARCH32 if not optimizing for
3438 DFmode/DImode because then mem_min_alignment is likely to be zero
3439 after reload and the forced split would lack a matching splitter
3441 if (TARGET_ARCH32
&& !optimize
3442 && (mode
== DFmode
|| mode
== DImode
))
3445 else if (USE_AS_OFFSETABLE_LO10
3446 && GET_CODE (rs1
) == LO_SUM
3448 && ! TARGET_CM_MEDMID
3449 && RTX_OK_FOR_OLO10_P (rs2
, mode
))
3452 imm1
= XEXP (rs1
, 1);
3453 rs1
= XEXP (rs1
, 0);
3454 if (!CONSTANT_P (imm1
)
3455 || (GET_CODE (rs1
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (rs1
)))
3459 else if (GET_CODE (addr
) == LO_SUM
)
3461 rs1
= XEXP (addr
, 0);
3462 imm1
= XEXP (addr
, 1);
3464 if (!CONSTANT_P (imm1
)
3465 || (GET_CODE (rs1
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (rs1
)))
3468 /* We can't allow TFmode in 32-bit mode, because an offset greater
3469 than the alignment (8) may cause the LO_SUM to overflow. */
3470 if (mode
== TFmode
&& TARGET_ARCH32
)
3473 else if (GET_CODE (addr
) == CONST_INT
&& SMALL_INT (addr
))
3478 if (GET_CODE (rs1
) == SUBREG
)
3479 rs1
= SUBREG_REG (rs1
);
3485 if (GET_CODE (rs2
) == SUBREG
)
3486 rs2
= SUBREG_REG (rs2
);
3493 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1
))
3494 || (rs2
&& !REGNO_OK_FOR_BASE_P (REGNO (rs2
))))
3499 if ((! SPARC_INT_REG_P (REGNO (rs1
))
3500 && REGNO (rs1
) != FRAME_POINTER_REGNUM
3501 && REGNO (rs1
) < FIRST_PSEUDO_REGISTER
)
3503 && (! SPARC_INT_REG_P (REGNO (rs2
))
3504 && REGNO (rs2
) != FRAME_POINTER_REGNUM
3505 && REGNO (rs2
) < FIRST_PSEUDO_REGISTER
)))
3511 /* Return the SYMBOL_REF for the tls_get_addr function. */
3513 static GTY(()) rtx sparc_tls_symbol
= NULL_RTX
;
3516 sparc_tls_get_addr (void)
3518 if (!sparc_tls_symbol
)
3519 sparc_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "__tls_get_addr");
3521 return sparc_tls_symbol
;
3524 /* Return the Global Offset Table to be used in TLS mode. */
3527 sparc_tls_got (void)
3529 /* In PIC mode, this is just the PIC offset table. */
3532 crtl
->uses_pic_offset_table
= 1;
3533 return pic_offset_table_rtx
;
3536 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3537 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3538 if (TARGET_SUN_TLS
&& TARGET_ARCH32
)
3540 load_got_register ();
3541 return global_offset_table_rtx
;
3544 /* In all other cases, we load a new pseudo with the GOT symbol. */
3545 return copy_to_reg (sparc_got ());
3548 /* Return true if X contains a thread-local symbol. */
3551 sparc_tls_referenced_p (rtx x
)
3553 if (!TARGET_HAVE_TLS
)
3556 if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == PLUS
)
3557 x
= XEXP (XEXP (x
, 0), 0);
3559 if (GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
))
3562 /* That's all we handle in sparc_legitimize_tls_address for now. */
3566 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3567 this (thread-local) address. */
3570 sparc_legitimize_tls_address (rtx addr
)
3572 rtx temp1
, temp2
, temp3
, ret
, o0
, got
, insn
;
3574 gcc_assert (can_create_pseudo_p ());
3576 if (GET_CODE (addr
) == SYMBOL_REF
)
3577 switch (SYMBOL_REF_TLS_MODEL (addr
))
3579 case TLS_MODEL_GLOBAL_DYNAMIC
:
3581 temp1
= gen_reg_rtx (SImode
);
3582 temp2
= gen_reg_rtx (SImode
);
3583 ret
= gen_reg_rtx (Pmode
);
3584 o0
= gen_rtx_REG (Pmode
, 8);
3585 got
= sparc_tls_got ();
3586 emit_insn (gen_tgd_hi22 (temp1
, addr
));
3587 emit_insn (gen_tgd_lo10 (temp2
, temp1
, addr
));
3590 emit_insn (gen_tgd_add32 (o0
, got
, temp2
, addr
));
3591 insn
= emit_call_insn (gen_tgd_call32 (o0
, sparc_tls_get_addr (),
3596 emit_insn (gen_tgd_add64 (o0
, got
, temp2
, addr
));
3597 insn
= emit_call_insn (gen_tgd_call64 (o0
, sparc_tls_get_addr (),
3600 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), o0
);
3601 insn
= get_insns ();
3603 emit_libcall_block (insn
, ret
, o0
, addr
);
3606 case TLS_MODEL_LOCAL_DYNAMIC
:
3608 temp1
= gen_reg_rtx (SImode
);
3609 temp2
= gen_reg_rtx (SImode
);
3610 temp3
= gen_reg_rtx (Pmode
);
3611 ret
= gen_reg_rtx (Pmode
);
3612 o0
= gen_rtx_REG (Pmode
, 8);
3613 got
= sparc_tls_got ();
3614 emit_insn (gen_tldm_hi22 (temp1
));
3615 emit_insn (gen_tldm_lo10 (temp2
, temp1
));
3618 emit_insn (gen_tldm_add32 (o0
, got
, temp2
));
3619 insn
= emit_call_insn (gen_tldm_call32 (o0
, sparc_tls_get_addr (),
3624 emit_insn (gen_tldm_add64 (o0
, got
, temp2
));
3625 insn
= emit_call_insn (gen_tldm_call64 (o0
, sparc_tls_get_addr (),
3628 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), o0
);
3629 insn
= get_insns ();
3631 emit_libcall_block (insn
, temp3
, o0
,
3632 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
3633 UNSPEC_TLSLD_BASE
));
3634 temp1
= gen_reg_rtx (SImode
);
3635 temp2
= gen_reg_rtx (SImode
);
3636 emit_insn (gen_tldo_hix22 (temp1
, addr
));
3637 emit_insn (gen_tldo_lox10 (temp2
, temp1
, addr
));
3639 emit_insn (gen_tldo_add32 (ret
, temp3
, temp2
, addr
));
3641 emit_insn (gen_tldo_add64 (ret
, temp3
, temp2
, addr
));
3644 case TLS_MODEL_INITIAL_EXEC
:
3645 temp1
= gen_reg_rtx (SImode
);
3646 temp2
= gen_reg_rtx (SImode
);
3647 temp3
= gen_reg_rtx (Pmode
);
3648 got
= sparc_tls_got ();
3649 emit_insn (gen_tie_hi22 (temp1
, addr
));
3650 emit_insn (gen_tie_lo10 (temp2
, temp1
, addr
));
3652 emit_insn (gen_tie_ld32 (temp3
, got
, temp2
, addr
));
3654 emit_insn (gen_tie_ld64 (temp3
, got
, temp2
, addr
));
3657 ret
= gen_reg_rtx (Pmode
);
3659 emit_insn (gen_tie_add32 (ret
, gen_rtx_REG (Pmode
, 7),
3662 emit_insn (gen_tie_add64 (ret
, gen_rtx_REG (Pmode
, 7),
3666 ret
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, 7), temp3
);
3669 case TLS_MODEL_LOCAL_EXEC
:
3670 temp1
= gen_reg_rtx (Pmode
);
3671 temp2
= gen_reg_rtx (Pmode
);
3674 emit_insn (gen_tle_hix22_sp32 (temp1
, addr
));
3675 emit_insn (gen_tle_lox10_sp32 (temp2
, temp1
, addr
));
3679 emit_insn (gen_tle_hix22_sp64 (temp1
, addr
));
3680 emit_insn (gen_tle_lox10_sp64 (temp2
, temp1
, addr
));
3682 ret
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, 7), temp2
);
3689 else if (GET_CODE (addr
) == CONST
)
3693 gcc_assert (GET_CODE (XEXP (addr
, 0)) == PLUS
);
3695 base
= sparc_legitimize_tls_address (XEXP (XEXP (addr
, 0), 0));
3696 offset
= XEXP (XEXP (addr
, 0), 1);
3698 base
= force_operand (base
, NULL_RTX
);
3699 if (!(GET_CODE (offset
) == CONST_INT
&& SMALL_INT (offset
)))
3700 offset
= force_reg (Pmode
, offset
);
3701 ret
= gen_rtx_PLUS (Pmode
, base
, offset
);
3705 gcc_unreachable (); /* for now ... */
3710 /* Legitimize PIC addresses. If the address is already position-independent,
3711 we return ORIG. Newly generated position-independent addresses go into a
3712 reg. This is REG if nonzero, otherwise we allocate register(s) as
3716 sparc_legitimize_pic_address (rtx orig
, rtx reg
)
3718 bool gotdata_op
= false;
3720 if (GET_CODE (orig
) == SYMBOL_REF
3721 /* See the comment in sparc_expand_move. */
3722 || (GET_CODE (orig
) == LABEL_REF
&& !can_use_mov_pic_label_ref (orig
)))
3724 rtx pic_ref
, address
;
3729 gcc_assert (can_create_pseudo_p ());
3730 reg
= gen_reg_rtx (Pmode
);
3735 /* If not during reload, allocate another temp reg here for loading
3736 in the address, so that these instructions can be optimized
3738 rtx temp_reg
= (! can_create_pseudo_p ()
3739 ? reg
: gen_reg_rtx (Pmode
));
3741 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3742 won't get confused into thinking that these two instructions
3743 are loading in the true address of the symbol. If in the
3744 future a PIC rtx exists, that should be used instead. */
3747 emit_insn (gen_movdi_high_pic (temp_reg
, orig
));
3748 emit_insn (gen_movdi_lo_sum_pic (temp_reg
, temp_reg
, orig
));
3752 emit_insn (gen_movsi_high_pic (temp_reg
, orig
));
3753 emit_insn (gen_movsi_lo_sum_pic (temp_reg
, temp_reg
, orig
));
3761 crtl
->uses_pic_offset_table
= 1;
3765 insn
= emit_insn (gen_movdi_pic_gotdata_op (reg
,
3766 pic_offset_table_rtx
,
3769 insn
= emit_insn (gen_movsi_pic_gotdata_op (reg
,
3770 pic_offset_table_rtx
,
3776 = gen_const_mem (Pmode
,
3777 gen_rtx_PLUS (Pmode
,
3778 pic_offset_table_rtx
, address
));
3779 insn
= emit_move_insn (reg
, pic_ref
);
3782 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3784 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
3787 else if (GET_CODE (orig
) == CONST
)
3791 if (GET_CODE (XEXP (orig
, 0)) == PLUS
3792 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
3797 gcc_assert (can_create_pseudo_p ());
3798 reg
= gen_reg_rtx (Pmode
);
3801 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
3802 base
= sparc_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), reg
);
3803 offset
= sparc_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
3804 base
== reg
? NULL_RTX
: reg
);
3806 if (GET_CODE (offset
) == CONST_INT
)
3808 if (SMALL_INT (offset
))
3809 return plus_constant (base
, INTVAL (offset
));
3810 else if (can_create_pseudo_p ())
3811 offset
= force_reg (Pmode
, offset
);
3813 /* If we reach here, then something is seriously wrong. */
3816 return gen_rtx_PLUS (Pmode
, base
, offset
);
3818 else if (GET_CODE (orig
) == LABEL_REF
)
3819 /* ??? We ought to be checking that the register is live instead, in case
3820 it is eliminated. */
3821 crtl
->uses_pic_offset_table
= 1;
3826 /* Try machine-dependent ways of modifying an illegitimate address X
3827 to be legitimate. If we find one, return the new, valid address.
3829 OLDX is the address as it was before break_out_memory_refs was called.
3830 In some cases it is useful to look at this to decide what needs to be done.
3832 MODE is the mode of the operand pointed to by X.
3834 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3837 sparc_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
3838 enum machine_mode mode
)
3842 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
)
3843 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 1),
3844 force_operand (XEXP (x
, 0), NULL_RTX
));
3845 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == MULT
)
3846 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
3847 force_operand (XEXP (x
, 1), NULL_RTX
));
3848 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == PLUS
)
3849 x
= gen_rtx_PLUS (Pmode
, force_operand (XEXP (x
, 0), NULL_RTX
),
3851 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == PLUS
)
3852 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
3853 force_operand (XEXP (x
, 1), NULL_RTX
));
3855 if (x
!= orig_x
&& sparc_legitimate_address_p (mode
, x
, FALSE
))
3858 if (sparc_tls_referenced_p (x
))
3859 x
= sparc_legitimize_tls_address (x
);
3861 x
= sparc_legitimize_pic_address (x
, NULL_RTX
);
3862 else if (GET_CODE (x
) == PLUS
&& CONSTANT_ADDRESS_P (XEXP (x
, 1)))
3863 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
3864 copy_to_mode_reg (Pmode
, XEXP (x
, 1)));
3865 else if (GET_CODE (x
) == PLUS
&& CONSTANT_ADDRESS_P (XEXP (x
, 0)))
3866 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 1),
3867 copy_to_mode_reg (Pmode
, XEXP (x
, 0)));
3868 else if (GET_CODE (x
) == SYMBOL_REF
3869 || GET_CODE (x
) == CONST
3870 || GET_CODE (x
) == LABEL_REF
)
3871 x
= copy_to_suggested_reg (x
, NULL_RTX
, Pmode
);
3876 /* Delegitimize an address that was legitimized by the above function. */
3879 sparc_delegitimize_address (rtx x
)
3881 x
= delegitimize_mem_from_attrs (x
);
3883 if (GET_CODE (x
) == LO_SUM
&& GET_CODE (XEXP (x
, 1)) == UNSPEC
)
3884 switch (XINT (XEXP (x
, 1), 1))
3886 case UNSPEC_MOVE_PIC
:
3888 x
= XVECEXP (XEXP (x
, 1), 0, 0);
3889 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
3895 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3896 if (GET_CODE (x
) == MINUS
3897 && REG_P (XEXP (x
, 0))
3898 && REGNO (XEXP (x
, 0)) == PIC_OFFSET_TABLE_REGNUM
3899 && GET_CODE (XEXP (x
, 1)) == LO_SUM
3900 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == UNSPEC
3901 && XINT (XEXP (XEXP (x
, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL
)
3903 x
= XVECEXP (XEXP (XEXP (x
, 1), 1), 0, 0);
3904 gcc_assert (GET_CODE (x
) == LABEL_REF
);
3910 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3911 replace the input X, or the original X if no replacement is called for.
3912 The output parameter *WIN is 1 if the calling macro should goto WIN,
3915 For SPARC, we wish to handle addresses by splitting them into
3916 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3917 This cuts the number of extra insns by one.
3919 Do nothing when generating PIC code and the address is a symbolic
3920 operand or requires a scratch register. */
3923 sparc_legitimize_reload_address (rtx x
, enum machine_mode mode
,
3924 int opnum
, int type
,
3925 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
3927 /* Decompose SImode constants into HIGH+LO_SUM. */
3929 && (mode
!= TFmode
|| TARGET_ARCH64
)
3930 && GET_MODE (x
) == SImode
3931 && GET_CODE (x
) != LO_SUM
3932 && GET_CODE (x
) != HIGH
3933 && sparc_cmodel
<= CM_MEDLOW
3935 && (symbolic_operand (x
, Pmode
) || pic_address_needs_scratch (x
))))
3937 x
= gen_rtx_LO_SUM (GET_MODE (x
), gen_rtx_HIGH (GET_MODE (x
), x
), x
);
3938 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
3939 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
3940 opnum
, (enum reload_type
)type
);
3945 /* We have to recognize what we have already generated above. */
3946 if (GET_CODE (x
) == LO_SUM
&& GET_CODE (XEXP (x
, 0)) == HIGH
)
3948 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
3949 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
3950 opnum
, (enum reload_type
)type
);
3959 /* Return true if ADDR (a legitimate address expression)
3960 has an effect that depends on the machine mode it is used for.
3966 is not equivalent to
3968 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3970 because [%l7+a+1] is interpreted as the address of (a+1). */
3974 sparc_mode_dependent_address_p (const_rtx addr
)
3976 if (flag_pic
&& GET_CODE (addr
) == PLUS
)
3978 rtx op0
= XEXP (addr
, 0);
3979 rtx op1
= XEXP (addr
, 1);
3980 if (op0
== pic_offset_table_rtx
3981 && symbolic_operand (op1
, VOIDmode
))
3988 #ifdef HAVE_GAS_HIDDEN
3989 # define USE_HIDDEN_LINKONCE 1
3991 # define USE_HIDDEN_LINKONCE 0
3995 get_pc_thunk_name (char name
[32], unsigned int regno
)
3997 const char *reg_name
= reg_names
[regno
];
3999 /* Skip the leading '%' as that cannot be used in a
4003 if (USE_HIDDEN_LINKONCE
)
4004 sprintf (name
, "__sparc_get_pc_thunk.%s", reg_name
);
4006 ASM_GENERATE_INTERNAL_LABEL (name
, "LADDPC", regno
);
4009 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4012 gen_load_pcrel_sym (rtx op0
, rtx op1
, rtx op2
, rtx op3
)
4014 int orig_flag_pic
= flag_pic
;
4017 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4020 insn
= gen_load_pcrel_symdi (op0
, op1
, op2
, op3
);
4022 insn
= gen_load_pcrel_symsi (op0
, op1
, op2
, op3
);
4023 flag_pic
= orig_flag_pic
;
4028 /* Emit code to load the GOT register. */
4031 load_got_register (void)
4033 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4034 if (!global_offset_table_rtx
)
4035 global_offset_table_rtx
= gen_rtx_REG (Pmode
, GLOBAL_OFFSET_TABLE_REGNUM
);
4037 if (TARGET_VXWORKS_RTP
)
4038 emit_insn (gen_vxworks_load_got ());
4041 /* The GOT symbol is subject to a PC-relative relocation so we need a
4042 helper function to add the PC value and thus get the final value. */
4043 if (!got_helper_rtx
)
4046 get_pc_thunk_name (name
, GLOBAL_OFFSET_TABLE_REGNUM
);
4047 got_helper_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
4050 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx
, sparc_got (),
4052 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM
)));
4055 /* Need to emit this whether or not we obey regdecls,
4056 since setjmp/longjmp can cause life info to screw up.
4057 ??? In the case where we don't obey regdecls, this is not sufficient
4058 since we may not fall out the bottom. */
4059 emit_use (global_offset_table_rtx
);
4062 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4063 address of the call target. */
4066 sparc_emit_call_insn (rtx pat
, rtx addr
)
4070 insn
= emit_call_insn (pat
);
4072 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4073 if (TARGET_VXWORKS_RTP
4075 && GET_CODE (addr
) == SYMBOL_REF
4076 && (SYMBOL_REF_DECL (addr
)
4077 ? !targetm
.binds_local_p (SYMBOL_REF_DECL (addr
))
4078 : !SYMBOL_REF_LOCAL_P (addr
)))
4080 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
4081 crtl
->uses_pic_offset_table
= 1;
4085 /* Return 1 if RTX is a MEM which is known to be aligned to at
4086 least a DESIRED byte boundary. */
4089 mem_min_alignment (rtx mem
, int desired
)
4091 rtx addr
, base
, offset
;
4093 /* If it's not a MEM we can't accept it. */
4094 if (GET_CODE (mem
) != MEM
)
4098 if (!TARGET_UNALIGNED_DOUBLES
4099 && MEM_ALIGN (mem
) / BITS_PER_UNIT
>= (unsigned)desired
)
4102 /* ??? The rest of the function predates MEM_ALIGN so
4103 there is probably a bit of redundancy. */
4104 addr
= XEXP (mem
, 0);
4105 base
= offset
= NULL_RTX
;
4106 if (GET_CODE (addr
) == PLUS
)
4108 if (GET_CODE (XEXP (addr
, 0)) == REG
)
4110 base
= XEXP (addr
, 0);
4112 /* What we are saying here is that if the base
4113 REG is aligned properly, the compiler will make
4114 sure any REG based index upon it will be so
4116 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
4117 offset
= XEXP (addr
, 1);
4119 offset
= const0_rtx
;
4122 else if (GET_CODE (addr
) == REG
)
4125 offset
= const0_rtx
;
4128 if (base
!= NULL_RTX
)
4130 int regno
= REGNO (base
);
4132 if (regno
!= HARD_FRAME_POINTER_REGNUM
&& regno
!= STACK_POINTER_REGNUM
)
4134 /* Check if the compiler has recorded some information
4135 about the alignment of the base REG. If reload has
4136 completed, we already matched with proper alignments.
4137 If not running global_alloc, reload might give us
4138 unaligned pointer to local stack though. */
4140 && REGNO_POINTER_ALIGN (regno
) >= desired
* BITS_PER_UNIT
)
4141 || (optimize
&& reload_completed
))
4142 && (INTVAL (offset
) & (desired
- 1)) == 0)
4147 if (((INTVAL (offset
) - SPARC_STACK_BIAS
) & (desired
- 1)) == 0)
4151 else if (! TARGET_UNALIGNED_DOUBLES
4152 || CONSTANT_P (addr
)
4153 || GET_CODE (addr
) == LO_SUM
)
4155 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4156 is true, in which case we can only assume that an access is aligned if
4157 it is to a constant address, or the address involves a LO_SUM. */
4161 /* An obviously unaligned address. */
4166 /* Vectors to keep interesting information about registers where it can easily
4167 be got. We used to use the actual mode value as the bit number, but there
4168 are more than 32 modes now. Instead we use two tables: one indexed by
4169 hard register number, and one indexed by mode. */
4171 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4172 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4173 mapped into one sparc_mode_class mode. */
4175 enum sparc_mode_class
{
4176 S_MODE
, D_MODE
, T_MODE
, O_MODE
,
4177 SF_MODE
, DF_MODE
, TF_MODE
, OF_MODE
,
4181 /* Modes for single-word and smaller quantities. */
4182 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4184 /* Modes for double-word and smaller quantities. */
4185 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4187 /* Modes for quad-word and smaller quantities. */
4188 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4190 /* Modes for 8-word and smaller quantities. */
4191 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4193 /* Modes for single-float quantities. We must allow any single word or
4194 smaller quantity. This is because the fix/float conversion instructions
4195 take integer inputs/outputs from the float registers. */
4196 #define SF_MODES (S_MODES)
4198 /* Modes for double-float and smaller quantities. */
4199 #define DF_MODES (D_MODES)
4201 /* Modes for quad-float and smaller quantities. */
4202 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4204 /* Modes for quad-float pairs and smaller quantities. */
4205 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4207 /* Modes for double-float only quantities. */
4208 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4210 /* Modes for quad-float and double-float only quantities. */
4211 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4213 /* Modes for quad-float pairs and double-float only quantities. */
4214 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4216 /* Modes for condition codes. */
4217 #define CC_MODES (1 << (int) CC_MODE)
4218 #define CCFP_MODES (1 << (int) CCFP_MODE)
4220 /* Value is 1 if register/mode pair is acceptable on sparc.
4221 The funny mixture of D and T modes is because integer operations
4222 do not specially operate on tetra quantities, so non-quad-aligned
4223 registers can hold quadword quantities (except %o4 and %i4 because
4224 they cross fixed registers). */
4226 /* This points to either the 32 bit or the 64 bit version. */
4227 const int *hard_regno_mode_classes
;
4229 static const int hard_32bit_mode_classes
[] = {
4230 S_MODES
, S_MODES
, T_MODES
, S_MODES
, T_MODES
, S_MODES
, D_MODES
, S_MODES
,
4231 T_MODES
, S_MODES
, T_MODES
, S_MODES
, D_MODES
, S_MODES
, D_MODES
, S_MODES
,
4232 T_MODES
, S_MODES
, T_MODES
, S_MODES
, T_MODES
, S_MODES
, D_MODES
, S_MODES
,
4233 T_MODES
, S_MODES
, T_MODES
, S_MODES
, D_MODES
, S_MODES
, D_MODES
, S_MODES
,
4235 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4236 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4237 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4238 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, TF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4240 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4241 and none can hold SFmode/SImode values. */
4242 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4243 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4244 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4245 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, TF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4248 CCFP_MODES
, CCFP_MODES
, CCFP_MODES
, CCFP_MODES
,
4250 /* %icc, %sfp, %gsr */
4251 CC_MODES
, 0, D_MODES
4254 static const int hard_64bit_mode_classes
[] = {
4255 D_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
,
4256 O_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
,
4257 T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
,
4258 O_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
,
4260 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4261 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4262 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4263 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, TF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4265 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4266 and none can hold SFmode/SImode values. */
4267 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4268 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4269 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4270 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, TF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4273 CCFP_MODES
, CCFP_MODES
, CCFP_MODES
, CCFP_MODES
,
4275 /* %icc, %sfp, %gsr */
4276 CC_MODES
, 0, D_MODES
4279 int sparc_mode_class
[NUM_MACHINE_MODES
];
4281 enum reg_class sparc_regno_reg_class
[FIRST_PSEUDO_REGISTER
];
4284 sparc_init_modes (void)
4288 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4290 switch (GET_MODE_CLASS (i
))
4293 case MODE_PARTIAL_INT
:
4294 case MODE_COMPLEX_INT
:
4295 if (GET_MODE_SIZE (i
) <= 4)
4296 sparc_mode_class
[i
] = 1 << (int) S_MODE
;
4297 else if (GET_MODE_SIZE (i
) == 8)
4298 sparc_mode_class
[i
] = 1 << (int) D_MODE
;
4299 else if (GET_MODE_SIZE (i
) == 16)
4300 sparc_mode_class
[i
] = 1 << (int) T_MODE
;
4301 else if (GET_MODE_SIZE (i
) == 32)
4302 sparc_mode_class
[i
] = 1 << (int) O_MODE
;
4304 sparc_mode_class
[i
] = 0;
4306 case MODE_VECTOR_INT
:
4307 if (GET_MODE_SIZE (i
) <= 4)
4308 sparc_mode_class
[i
] = 1 << (int)SF_MODE
;
4309 else if (GET_MODE_SIZE (i
) == 8)
4310 sparc_mode_class
[i
] = 1 << (int)DF_MODE
;
4313 case MODE_COMPLEX_FLOAT
:
4314 if (GET_MODE_SIZE (i
) <= 4)
4315 sparc_mode_class
[i
] = 1 << (int) SF_MODE
;
4316 else if (GET_MODE_SIZE (i
) == 8)
4317 sparc_mode_class
[i
] = 1 << (int) DF_MODE
;
4318 else if (GET_MODE_SIZE (i
) == 16)
4319 sparc_mode_class
[i
] = 1 << (int) TF_MODE
;
4320 else if (GET_MODE_SIZE (i
) == 32)
4321 sparc_mode_class
[i
] = 1 << (int) OF_MODE
;
4323 sparc_mode_class
[i
] = 0;
4326 if (i
== (int) CCFPmode
|| i
== (int) CCFPEmode
)
4327 sparc_mode_class
[i
] = 1 << (int) CCFP_MODE
;
4329 sparc_mode_class
[i
] = 1 << (int) CC_MODE
;
4332 sparc_mode_class
[i
] = 0;
4338 hard_regno_mode_classes
= hard_64bit_mode_classes
;
4340 hard_regno_mode_classes
= hard_32bit_mode_classes
;
4342 /* Initialize the array used by REGNO_REG_CLASS. */
4343 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
4345 if (i
< 16 && TARGET_V8PLUS
)
4346 sparc_regno_reg_class
[i
] = I64_REGS
;
4347 else if (i
< 32 || i
== FRAME_POINTER_REGNUM
)
4348 sparc_regno_reg_class
[i
] = GENERAL_REGS
;
4350 sparc_regno_reg_class
[i
] = FP_REGS
;
4352 sparc_regno_reg_class
[i
] = EXTRA_FP_REGS
;
4354 sparc_regno_reg_class
[i
] = FPCC_REGS
;
4356 sparc_regno_reg_class
[i
] = NO_REGS
;
4360 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4363 save_global_or_fp_reg_p (unsigned int regno
,
4364 int leaf_function ATTRIBUTE_UNUSED
)
4366 return !call_used_regs
[regno
] && df_regs_ever_live_p (regno
);
4369 /* Return whether the return address register (%i7) is needed. */
4372 return_addr_reg_needed_p (int leaf_function
)
4374 /* If it is live, for example because of __builtin_return_address (0). */
4375 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM
))
4378 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4380 /* Loading the GOT register clobbers %o7. */
4381 || crtl
->uses_pic_offset_table
4382 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM
))
4388 /* Return whether REGNO, a local or in register, must be saved/restored. */
4391 save_local_or_in_reg_p (unsigned int regno
, int leaf_function
)
4393 /* General case: call-saved registers live at some point. */
4394 if (!call_used_regs
[regno
] && df_regs_ever_live_p (regno
))
4397 /* Frame pointer register (%fp) if needed. */
4398 if (regno
== HARD_FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
4401 /* Return address register (%i7) if needed. */
4402 if (regno
== RETURN_ADDR_REGNUM
&& return_addr_reg_needed_p (leaf_function
))
4405 /* GOT register (%l7) if needed. */
4406 if (regno
== PIC_OFFSET_TABLE_REGNUM
&& crtl
->uses_pic_offset_table
)
4409 /* If the function accesses prior frames, the frame pointer and the return
4410 address of the previous frame must be saved on the stack. */
4411 if (crtl
->accesses_prior_frames
4412 && (regno
== HARD_FRAME_POINTER_REGNUM
|| regno
== RETURN_ADDR_REGNUM
))
4418 /* Compute the frame size required by the function. This function is called
4419 during the reload pass and also by sparc_expand_prologue. */
4422 sparc_compute_frame_size (HOST_WIDE_INT size
, int leaf_function
)
4424 HOST_WIDE_INT frame_size
, apparent_frame_size
;
4425 int args_size
, n_global_fp_regs
= 0;
4426 bool save_local_in_regs_p
= false;
4429 /* If the function allocates dynamic stack space, the dynamic offset is
4430 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4431 if (leaf_function
&& !cfun
->calls_alloca
)
4434 args_size
= crtl
->outgoing_args_size
+ REG_PARM_STACK_SPACE (cfun
->decl
);
4436 /* Calculate space needed for global registers. */
4438 for (i
= 0; i
< 8; i
++)
4439 if (save_global_or_fp_reg_p (i
, 0))
4440 n_global_fp_regs
+= 2;
4442 for (i
= 0; i
< 8; i
+= 2)
4443 if (save_global_or_fp_reg_p (i
, 0) || save_global_or_fp_reg_p (i
+ 1, 0))
4444 n_global_fp_regs
+= 2;
4446 /* In the flat window model, find out which local and in registers need to
4447 be saved. We don't reserve space in the current frame for them as they
4448 will be spilled into the register window save area of the caller's frame.
4449 However, as soon as we use this register window save area, we must create
4450 that of the current frame to make it the live one. */
4452 for (i
= 16; i
< 32; i
++)
4453 if (save_local_or_in_reg_p (i
, leaf_function
))
4455 save_local_in_regs_p
= true;
4459 /* Calculate space needed for FP registers. */
4460 for (i
= 32; i
< (TARGET_V9
? 96 : 64); i
+= 2)
4461 if (save_global_or_fp_reg_p (i
, 0) || save_global_or_fp_reg_p (i
+ 1, 0))
4462 n_global_fp_regs
+= 2;
4465 && n_global_fp_regs
== 0
4467 && !save_local_in_regs_p
)
4468 frame_size
= apparent_frame_size
= 0;
4471 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4472 apparent_frame_size
= (size
- STARTING_FRAME_OFFSET
+ 7) & -8;
4473 apparent_frame_size
+= n_global_fp_regs
* 4;
4475 /* We need to add the size of the outgoing argument area. */
4476 frame_size
= apparent_frame_size
+ ((args_size
+ 7) & -8);
4478 /* And that of the register window save area. */
4479 frame_size
+= FIRST_PARM_OFFSET (cfun
->decl
);
4481 /* Finally, bump to the appropriate alignment. */
4482 frame_size
= SPARC_STACK_ALIGN (frame_size
);
4485 /* Set up values for use in prologue and epilogue. */
4486 sparc_frame_size
= frame_size
;
4487 sparc_apparent_frame_size
= apparent_frame_size
;
4488 sparc_n_global_fp_regs
= n_global_fp_regs
;
4489 sparc_save_local_in_regs_p
= save_local_in_regs_p
;
4494 /* Output any necessary .register pseudo-ops. */
4497 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED
)
4499 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4505 /* Check if %g[2367] were used without
4506 .register being printed for them already. */
4507 for (i
= 2; i
< 8; i
++)
4509 if (df_regs_ever_live_p (i
)
4510 && ! sparc_hard_reg_printed
[i
])
4512 sparc_hard_reg_printed
[i
] = 1;
4513 /* %g7 is used as TLS base register, use #ignore
4514 for it instead of #scratch. */
4515 fprintf (file
, "\t.register\t%%g%d, #%s\n", i
,
4516 i
== 7 ? "ignore" : "scratch");
4523 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4525 #if PROBE_INTERVAL > 4096
4526 #error Cannot use indexed addressing mode for stack probing
4529 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4530 inclusive. These are offsets from the current stack pointer.
4532 Note that we don't use the REG+REG addressing mode for the probes because
4533 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4534 so the advantages of having a single code win here. */
4537 sparc_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
4539 rtx g1
= gen_rtx_REG (Pmode
, 1);
4541 /* See if we have a constant small number of probes to generate. If so,
4542 that's the easy case. */
4543 if (size
<= PROBE_INTERVAL
)
4545 emit_move_insn (g1
, GEN_INT (first
));
4546 emit_insn (gen_rtx_SET (VOIDmode
, g1
,
4547 gen_rtx_MINUS (Pmode
, stack_pointer_rtx
, g1
)));
4548 emit_stack_probe (plus_constant (g1
, -size
));
4551 /* The run-time loop is made up of 10 insns in the generic case while the
4552 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4553 else if (size
<= 5 * PROBE_INTERVAL
)
4557 emit_move_insn (g1
, GEN_INT (first
+ PROBE_INTERVAL
));
4558 emit_insn (gen_rtx_SET (VOIDmode
, g1
,
4559 gen_rtx_MINUS (Pmode
, stack_pointer_rtx
, g1
)));
4560 emit_stack_probe (g1
);
4562 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4563 it exceeds SIZE. If only two probes are needed, this will not
4564 generate any code. Then probe at FIRST + SIZE. */
4565 for (i
= 2 * PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
4567 emit_insn (gen_rtx_SET (VOIDmode
, g1
,
4568 plus_constant (g1
, -PROBE_INTERVAL
)));
4569 emit_stack_probe (g1
);
4572 emit_stack_probe (plus_constant (g1
, (i
- PROBE_INTERVAL
) - size
));
4575 /* Otherwise, do the same as above, but in a loop. Note that we must be
4576 extra careful with variables wrapping around because we might be at
4577 the very top (or the very bottom) of the address space and we have
4578 to be able to handle this case properly; in particular, we use an
4579 equality test for the loop condition. */
4582 HOST_WIDE_INT rounded_size
;
4583 rtx g4
= gen_rtx_REG (Pmode
, 4);
4585 emit_move_insn (g1
, GEN_INT (first
));
4588 /* Step 1: round SIZE to the previous multiple of the interval. */
4590 rounded_size
= size
& -PROBE_INTERVAL
;
4591 emit_move_insn (g4
, GEN_INT (rounded_size
));
4594 /* Step 2: compute initial and final value of the loop counter. */
4596 /* TEST_ADDR = SP + FIRST. */
4597 emit_insn (gen_rtx_SET (VOIDmode
, g1
,
4598 gen_rtx_MINUS (Pmode
, stack_pointer_rtx
, g1
)));
4600 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4601 emit_insn (gen_rtx_SET (VOIDmode
, g4
, gen_rtx_MINUS (Pmode
, g1
, g4
)));
4606 while (TEST_ADDR != LAST_ADDR)
4608 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4612 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4613 until it is equal to ROUNDED_SIZE. */
4616 emit_insn (gen_probe_stack_rangedi (g1
, g1
, g4
));
4618 emit_insn (gen_probe_stack_rangesi (g1
, g1
, g4
));
4621 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4622 that SIZE is equal to ROUNDED_SIZE. */
4624 if (size
!= rounded_size
)
4625 emit_stack_probe (plus_constant (g4
, rounded_size
- size
));
4628 /* Make sure nothing is scheduled before we are done. */
4629 emit_insn (gen_blockage ());
4632 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4633 absolute addresses. */
4636 output_probe_stack_range (rtx reg1
, rtx reg2
)
4638 static int labelno
= 0;
4639 char loop_lab
[32], end_lab
[32];
4642 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
4643 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
4645 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
4647 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4650 output_asm_insn ("cmp\t%0, %1", xops
);
4652 fputs ("\tbe,pn\t%xcc,", asm_out_file
);
4654 fputs ("\tbe\t", asm_out_file
);
4655 assemble_name_raw (asm_out_file
, end_lab
);
4656 fputc ('\n', asm_out_file
);
4658 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4659 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
4660 output_asm_insn (" add\t%0, %1, %0", xops
);
4662 /* Probe at TEST_ADDR and branch. */
4664 fputs ("\tba,pt\t%xcc,", asm_out_file
);
4666 fputs ("\tba\t", asm_out_file
);
4667 assemble_name_raw (asm_out_file
, loop_lab
);
4668 fputc ('\n', asm_out_file
);
4669 xops
[1] = GEN_INT (SPARC_STACK_BIAS
);
4670 output_asm_insn (" st\t%%g0, [%0+%1]", xops
);
4672 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
4677 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4678 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4679 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4680 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4681 the action to be performed if it returns false. Return the new offset. */
4683 typedef bool (*sorr_pred_t
) (unsigned int, int);
4684 typedef enum { SORR_NONE
, SORR_ADVANCE
, SORR_SAVE
, SORR_RESTORE
} sorr_act_t
;
4687 emit_save_or_restore_regs (unsigned int low
, unsigned int high
, rtx base
,
4688 int offset
, int leaf_function
, sorr_pred_t save_p
,
4689 sorr_act_t action_true
, sorr_act_t action_false
)
4694 if (TARGET_ARCH64
&& high
<= 32)
4698 for (i
= low
; i
< high
; i
++)
4700 if (save_p (i
, leaf_function
))
4702 mem
= gen_frame_mem (DImode
, plus_constant (base
, offset
));
4703 if (action_true
== SORR_SAVE
)
4705 insn
= emit_move_insn (mem
, gen_rtx_REG (DImode
, i
));
4706 RTX_FRAME_RELATED_P (insn
) = 1;
4708 else /* action_true == SORR_RESTORE */
4710 /* The frame pointer must be restored last since its old
4711 value may be used as base address for the frame. This
4712 is problematic in 64-bit mode only because of the lack
4713 of double-word load instruction. */
4714 if (i
== HARD_FRAME_POINTER_REGNUM
)
4717 emit_move_insn (gen_rtx_REG (DImode
, i
), mem
);
4721 else if (action_false
== SORR_ADVANCE
)
4727 mem
= gen_frame_mem (DImode
, plus_constant (base
, fp_offset
));
4728 emit_move_insn (hard_frame_pointer_rtx
, mem
);
4733 for (i
= low
; i
< high
; i
+= 2)
4735 bool reg0
= save_p (i
, leaf_function
);
4736 bool reg1
= save_p (i
+ 1, leaf_function
);
4737 enum machine_mode mode
;
4742 mode
= SPARC_INT_REG_P (i
) ? DImode
: DFmode
;
4747 mode
= SPARC_INT_REG_P (i
) ? SImode
: SFmode
;
4752 mode
= SPARC_INT_REG_P (i
) ? SImode
: SFmode
;
4758 if (action_false
== SORR_ADVANCE
)
4763 mem
= gen_frame_mem (mode
, plus_constant (base
, offset
));
4764 if (action_true
== SORR_SAVE
)
4766 insn
= emit_move_insn (mem
, gen_rtx_REG (mode
, regno
));
4767 RTX_FRAME_RELATED_P (insn
) = 1;
4771 mem
= gen_frame_mem (SImode
, plus_constant (base
, offset
));
4772 set1
= gen_rtx_SET (VOIDmode
, mem
,
4773 gen_rtx_REG (SImode
, regno
));
4774 RTX_FRAME_RELATED_P (set1
) = 1;
4776 = gen_frame_mem (SImode
, plus_constant (base
, offset
+ 4));
4777 set2
= gen_rtx_SET (VOIDmode
, mem
,
4778 gen_rtx_REG (SImode
, regno
+ 1));
4779 RTX_FRAME_RELATED_P (set2
) = 1;
4780 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4781 gen_rtx_PARALLEL (VOIDmode
,
4782 gen_rtvec (2, set1
, set2
)));
4785 else /* action_true == SORR_RESTORE */
4786 emit_move_insn (gen_rtx_REG (mode
, regno
), mem
);
4788 /* Always preserve double-word alignment. */
4789 offset
= (offset
+ 8) & -8;
4796 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4799 emit_adjust_base_to_offset (rtx base
, int offset
)
4801 /* ??? This might be optimized a little as %g1 might already have a
4802 value close enough that a single add insn will do. */
4803 /* ??? Although, all of this is probably only a temporary fix because
4804 if %g1 can hold a function result, then sparc_expand_epilogue will
4805 lose (the result will be clobbered). */
4806 rtx new_base
= gen_rtx_REG (Pmode
, 1);
4807 emit_move_insn (new_base
, GEN_INT (offset
));
4808 emit_insn (gen_rtx_SET (VOIDmode
,
4809 new_base
, gen_rtx_PLUS (Pmode
, base
, new_base
)));
4813 /* Emit code to save/restore call-saved global and FP registers. */
4816 emit_save_or_restore_global_fp_regs (rtx base
, int offset
, sorr_act_t action
)
4818 if (offset
< -4096 || offset
+ sparc_n_global_fp_regs
* 4 > 4095)
4820 base
= emit_adjust_base_to_offset (base
, offset
);
4825 = emit_save_or_restore_regs (0, 8, base
, offset
, 0,
4826 save_global_or_fp_reg_p
, action
, SORR_NONE
);
4827 emit_save_or_restore_regs (32, TARGET_V9
? 96 : 64, base
, offset
, 0,
4828 save_global_or_fp_reg_p
, action
, SORR_NONE
);
4831 /* Emit code to save/restore call-saved local and in registers. */
4834 emit_save_or_restore_local_in_regs (rtx base
, int offset
, sorr_act_t action
)
4836 if (offset
< -4096 || offset
+ 16 * UNITS_PER_WORD
> 4095)
4838 base
= emit_adjust_base_to_offset (base
, offset
);
4842 emit_save_or_restore_regs (16, 32, base
, offset
, sparc_leaf_function_p
,
4843 save_local_or_in_reg_p
, action
, SORR_ADVANCE
);
4846 /* Emit a window_save insn. */
4849 emit_window_save (rtx increment
)
4851 rtx insn
= emit_insn (gen_window_save (increment
));
4852 RTX_FRAME_RELATED_P (insn
) = 1;
4854 /* The incoming return address (%o7) is saved in %i7. */
4855 add_reg_note (insn
, REG_CFA_REGISTER
,
4856 gen_rtx_SET (VOIDmode
,
4857 gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
),
4859 INCOMING_RETURN_ADDR_REGNUM
)));
4861 /* The window save event. */
4862 add_reg_note (insn
, REG_CFA_WINDOW_SAVE
, const0_rtx
);
4864 /* The CFA is %fp, the hard frame pointer. */
4865 add_reg_note (insn
, REG_CFA_DEF_CFA
,
4866 plus_constant (hard_frame_pointer_rtx
,
4867 INCOMING_FRAME_SP_OFFSET
));
4872 /* Generate an increment for the stack pointer. */
4875 gen_stack_pointer_inc (rtx increment
)
4877 return gen_rtx_SET (VOIDmode
,
4879 gen_rtx_PLUS (Pmode
,
4884 /* Generate a decrement for the stack pointer. */
4887 gen_stack_pointer_dec (rtx decrement
)
4889 return gen_rtx_SET (VOIDmode
,
4891 gen_rtx_MINUS (Pmode
,
4896 /* Expand the function prologue. The prologue is responsible for reserving
4897 storage for the frame, saving the call-saved registers and loading the
4898 GOT register if needed. */
4901 sparc_expand_prologue (void)
4906 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4907 on the final value of the flag means deferring the prologue/epilogue
4908 expansion until just before the second scheduling pass, which is too
4909 late to emit multiple epilogues or return insns.
4911 Of course we are making the assumption that the value of the flag
4912 will not change between now and its final value. Of the three parts
4913 of the formula, only the last one can reasonably vary. Let's take a
4914 closer look, after assuming that the first two ones are set to true
4915 (otherwise the last value is effectively silenced).
4917 If only_leaf_regs_used returns false, the global predicate will also
4918 be false so the actual frame size calculated below will be positive.
4919 As a consequence, the save_register_window insn will be emitted in
4920 the instruction stream; now this insn explicitly references %fp
4921 which is not a leaf register so only_leaf_regs_used will always
4922 return false subsequently.
4924 If only_leaf_regs_used returns true, we hope that the subsequent
4925 optimization passes won't cause non-leaf registers to pop up. For
4926 example, the regrename pass has special provisions to not rename to
4927 non-leaf registers in a leaf function. */
4928 sparc_leaf_function_p
4929 = optimize
> 0 && current_function_is_leaf
&& only_leaf_regs_used ();
4931 size
= sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p
);
4933 if (flag_stack_usage_info
)
4934 current_function_static_stack_size
= size
;
4936 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
&& size
)
4937 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
4941 else if (sparc_leaf_function_p
)
4943 rtx size_int_rtx
= GEN_INT (-size
);
4946 insn
= emit_insn (gen_stack_pointer_inc (size_int_rtx
));
4947 else if (size
<= 8192)
4949 insn
= emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4950 /* %sp is still the CFA register. */
4951 RTX_FRAME_RELATED_P (insn
) = 1;
4952 insn
= emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size
)));
4956 rtx size_rtx
= gen_rtx_REG (Pmode
, 1);
4957 emit_move_insn (size_rtx
, size_int_rtx
);
4958 insn
= emit_insn (gen_stack_pointer_inc (size_rtx
));
4959 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4960 gen_stack_pointer_inc (size_int_rtx
));
4963 RTX_FRAME_RELATED_P (insn
) = 1;
4967 rtx size_int_rtx
= GEN_INT (-size
);
4970 emit_window_save (size_int_rtx
);
4971 else if (size
<= 8192)
4973 emit_window_save (GEN_INT (-4096));
4974 /* %sp is not the CFA register anymore. */
4975 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size
)));
4979 rtx size_rtx
= gen_rtx_REG (Pmode
, 1);
4980 emit_move_insn (size_rtx
, size_int_rtx
);
4981 emit_window_save (size_rtx
);
4985 if (sparc_leaf_function_p
)
4987 sparc_frame_base_reg
= stack_pointer_rtx
;
4988 sparc_frame_base_offset
= size
+ SPARC_STACK_BIAS
;
4992 sparc_frame_base_reg
= hard_frame_pointer_rtx
;
4993 sparc_frame_base_offset
= SPARC_STACK_BIAS
;
4996 if (sparc_n_global_fp_regs
> 0)
4997 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg
,
4998 sparc_frame_base_offset
4999 - sparc_apparent_frame_size
,
5002 /* Load the GOT register if needed. */
5003 if (crtl
->uses_pic_offset_table
)
5004 load_got_register ();
5006 /* Advertise that the data calculated just above are now valid. */
5007 sparc_prologue_data_valid_p
= true;
5010 /* Expand the function prologue. The prologue is responsible for reserving
5011 storage for the frame, saving the call-saved registers and loading the
5012 GOT register if needed. */
5015 sparc_flat_expand_prologue (void)
5020 sparc_leaf_function_p
= optimize
> 0 && current_function_is_leaf
;
5022 size
= sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p
);
5024 if (flag_stack_usage_info
)
5025 current_function_static_stack_size
= size
;
5027 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
&& size
)
5028 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
5030 if (sparc_save_local_in_regs_p
)
5031 emit_save_or_restore_local_in_regs (stack_pointer_rtx
, SPARC_STACK_BIAS
,
5038 rtx size_int_rtx
, size_rtx
;
5040 size_rtx
= size_int_rtx
= GEN_INT (-size
);
5042 /* We establish the frame (i.e. decrement the stack pointer) first, even
5043 if we use a frame pointer, because we cannot clobber any call-saved
5044 registers, including the frame pointer, if we haven't created a new
5045 register save area, for the sake of compatibility with the ABI. */
5047 insn
= emit_insn (gen_stack_pointer_inc (size_int_rtx
));
5048 else if (size
<= 8192 && !frame_pointer_needed
)
5050 insn
= emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5051 RTX_FRAME_RELATED_P (insn
) = 1;
5052 insn
= emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size
)));
5056 size_rtx
= gen_rtx_REG (Pmode
, 1);
5057 emit_move_insn (size_rtx
, size_int_rtx
);
5058 insn
= emit_insn (gen_stack_pointer_inc (size_rtx
));
5059 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
5060 gen_stack_pointer_inc (size_int_rtx
));
5062 RTX_FRAME_RELATED_P (insn
) = 1;
5064 /* Ensure nothing is scheduled until after the frame is established. */
5065 emit_insn (gen_blockage ());
5067 if (frame_pointer_needed
)
5069 insn
= emit_insn (gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
5070 gen_rtx_MINUS (Pmode
,
5073 RTX_FRAME_RELATED_P (insn
) = 1;
5075 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
5076 gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
5077 plus_constant (stack_pointer_rtx
,
5081 if (return_addr_reg_needed_p (sparc_leaf_function_p
))
5083 rtx o7
= gen_rtx_REG (Pmode
, INCOMING_RETURN_ADDR_REGNUM
);
5084 rtx i7
= gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
);
5086 insn
= emit_move_insn (i7
, o7
);
5087 RTX_FRAME_RELATED_P (insn
) = 1;
5089 add_reg_note (insn
, REG_CFA_REGISTER
,
5090 gen_rtx_SET (VOIDmode
, i7
, o7
));
5092 /* Prevent this instruction from ever being considered dead,
5093 even if this function has no epilogue. */
5094 emit_insn (gen_rtx_USE (VOIDmode
, i7
));
5098 if (frame_pointer_needed
)
5100 sparc_frame_base_reg
= hard_frame_pointer_rtx
;
5101 sparc_frame_base_offset
= SPARC_STACK_BIAS
;
5105 sparc_frame_base_reg
= stack_pointer_rtx
;
5106 sparc_frame_base_offset
= size
+ SPARC_STACK_BIAS
;
5109 if (sparc_n_global_fp_regs
> 0)
5110 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg
,
5111 sparc_frame_base_offset
5112 - sparc_apparent_frame_size
,
5115 /* Load the GOT register if needed. */
5116 if (crtl
->uses_pic_offset_table
)
5117 load_got_register ();
5119 /* Advertise that the data calculated just above are now valid. */
5120 sparc_prologue_data_valid_p
= true;
5123 /* This function generates the assembly code for function entry, which boils
5124 down to emitting the necessary .register directives. */
5127 sparc_asm_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
5129 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5131 gcc_assert (sparc_leaf_function_p
== current_function_uses_only_leaf_regs
);
5133 sparc_output_scratch_registers (file
);
5136 /* Expand the function epilogue, either normal or part of a sibcall.
5137 We emit all the instructions except the return or the call. */
5140 sparc_expand_epilogue (bool for_eh
)
5142 HOST_WIDE_INT size
= sparc_frame_size
;
5144 if (sparc_n_global_fp_regs
> 0)
5145 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg
,
5146 sparc_frame_base_offset
5147 - sparc_apparent_frame_size
,
5150 if (size
== 0 || for_eh
)
5152 else if (sparc_leaf_function_p
)
5155 emit_insn (gen_stack_pointer_dec (GEN_INT (-size
)));
5156 else if (size
<= 8192)
5158 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5159 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size
)));
5163 rtx reg
= gen_rtx_REG (Pmode
, 1);
5164 emit_move_insn (reg
, GEN_INT (-size
));
5165 emit_insn (gen_stack_pointer_dec (reg
));
5170 /* Expand the function epilogue, either normal or part of a sibcall.
5171 We emit all the instructions except the return or the call. */
5174 sparc_flat_expand_epilogue (bool for_eh
)
5176 HOST_WIDE_INT size
= sparc_frame_size
;
5178 if (sparc_n_global_fp_regs
> 0)
5179 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg
,
5180 sparc_frame_base_offset
5181 - sparc_apparent_frame_size
,
5184 /* If we have a frame pointer, we'll need both to restore it before the
5185 frame is destroyed and use its current value in destroying the frame.
5186 Since we don't have an atomic way to do that in the flat window model,
5187 we save the current value into a temporary register (%g1). */
5188 if (frame_pointer_needed
&& !for_eh
)
5189 emit_move_insn (gen_rtx_REG (Pmode
, 1), hard_frame_pointer_rtx
);
5191 if (return_addr_reg_needed_p (sparc_leaf_function_p
))
5192 emit_move_insn (gen_rtx_REG (Pmode
, INCOMING_RETURN_ADDR_REGNUM
),
5193 gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
));
5195 if (sparc_save_local_in_regs_p
)
5196 emit_save_or_restore_local_in_regs (sparc_frame_base_reg
,
5197 sparc_frame_base_offset
,
5200 if (size
== 0 || for_eh
)
5202 else if (frame_pointer_needed
)
5204 /* Make sure the frame is destroyed after everything else is done. */
5205 emit_insn (gen_blockage ());
5207 emit_move_insn (stack_pointer_rtx
, gen_rtx_REG (Pmode
, 1));
5212 emit_insn (gen_blockage ());
5215 emit_insn (gen_stack_pointer_dec (GEN_INT (-size
)));
5216 else if (size
<= 8192)
5218 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5219 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size
)));
5223 rtx reg
= gen_rtx_REG (Pmode
, 1);
5224 emit_move_insn (reg
, GEN_INT (-size
));
5225 emit_insn (gen_stack_pointer_dec (reg
));
5230 /* Return true if it is appropriate to emit `return' instructions in the
5231 body of a function. */
5234 sparc_can_use_return_insn_p (void)
5236 return sparc_prologue_data_valid_p
5237 && sparc_n_global_fp_regs
== 0
5239 ? (sparc_frame_size
== 0 && !sparc_save_local_in_regs_p
)
5240 : (sparc_frame_size
== 0 || !sparc_leaf_function_p
);
5243 /* This function generates the assembly code for function exit. */
5246 sparc_asm_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
5248 /* If the last two instructions of a function are "call foo; dslot;"
5249 the return address might point to the first instruction in the next
5250 function and we have to output a dummy nop for the sake of sane
5251 backtraces in such cases. This is pointless for sibling calls since
5252 the return address is explicitly adjusted. */
5254 rtx insn
, last_real_insn
;
5256 insn
= get_last_insn ();
5258 last_real_insn
= prev_real_insn (insn
);
5260 && GET_CODE (last_real_insn
) == INSN
5261 && GET_CODE (PATTERN (last_real_insn
)) == SEQUENCE
)
5262 last_real_insn
= XVECEXP (PATTERN (last_real_insn
), 0, 0);
5265 && CALL_P (last_real_insn
)
5266 && !SIBLING_CALL_P (last_real_insn
))
5267 fputs("\tnop\n", file
);
5269 sparc_output_deferred_case_vectors ();
5272 /* Output a 'restore' instruction. */
5275 output_restore (rtx pat
)
5281 fputs ("\t restore\n", asm_out_file
);
5285 gcc_assert (GET_CODE (pat
) == SET
);
5287 operands
[0] = SET_DEST (pat
);
5288 pat
= SET_SRC (pat
);
5290 switch (GET_CODE (pat
))
5293 operands
[1] = XEXP (pat
, 0);
5294 operands
[2] = XEXP (pat
, 1);
5295 output_asm_insn (" restore %r1, %2, %Y0", operands
);
5298 operands
[1] = XEXP (pat
, 0);
5299 operands
[2] = XEXP (pat
, 1);
5300 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands
);
5303 operands
[1] = XEXP (pat
, 0);
5304 gcc_assert (XEXP (pat
, 1) == const1_rtx
);
5305 output_asm_insn (" restore %r1, %r1, %Y0", operands
);
5309 output_asm_insn (" restore %%g0, %1, %Y0", operands
);
5314 /* Output a return. */
5317 output_return (rtx insn
)
5319 if (crtl
->calls_eh_return
)
5321 /* If the function uses __builtin_eh_return, the eh_return
5322 machinery occupies the delay slot. */
5323 gcc_assert (!final_sequence
);
5325 if (flag_delayed_branch
)
5327 if (!TARGET_FLAT
&& TARGET_V9
)
5328 fputs ("\treturn\t%i7+8\n", asm_out_file
);
5332 fputs ("\trestore\n", asm_out_file
);
5334 fputs ("\tjmp\t%o7+8\n", asm_out_file
);
5337 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file
);
5342 fputs ("\trestore\n", asm_out_file
);
5344 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file
);
5345 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file
);
5348 else if (sparc_leaf_function_p
|| TARGET_FLAT
)
5350 /* This is a leaf or flat function so we don't have to bother restoring
5351 the register window, which frees us from dealing with the convoluted
5352 semantics of restore/return. We simply output the jump to the
5353 return address and the insn in the delay slot (if any). */
5355 return "jmp\t%%o7+%)%#";
5359 /* This is a regular function so we have to restore the register window.
5360 We may have a pending insn for the delay slot, which will be either
5361 combined with the 'restore' instruction or put in the delay slot of
5362 the 'return' instruction. */
5368 delay
= NEXT_INSN (insn
);
5371 pat
= PATTERN (delay
);
5373 if (TARGET_V9
&& ! epilogue_renumber (&pat
, 1))
5375 epilogue_renumber (&pat
, 0);
5376 return "return\t%%i7+%)%#";
5380 output_asm_insn ("jmp\t%%i7+%)", NULL
);
5381 output_restore (pat
);
5382 PATTERN (delay
) = gen_blockage ();
5383 INSN_CODE (delay
) = -1;
5388 /* The delay slot is empty. */
5390 return "return\t%%i7+%)\n\t nop";
5391 else if (flag_delayed_branch
)
5392 return "jmp\t%%i7+%)\n\t restore";
5394 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5401 /* Output a sibling call. */
5404 output_sibcall (rtx insn
, rtx call_operand
)
5408 gcc_assert (flag_delayed_branch
);
5410 operands
[0] = call_operand
;
5412 if (sparc_leaf_function_p
|| TARGET_FLAT
)
5414 /* This is a leaf or flat function so we don't have to bother restoring
5415 the register window. We simply output the jump to the function and
5416 the insn in the delay slot (if any). */
5418 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P
&& final_sequence
));
5421 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5424 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5425 it into branch if possible. */
5426 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5431 /* This is a regular function so we have to restore the register window.
5432 We may have a pending insn for the delay slot, which will be combined
5433 with the 'restore' instruction. */
5435 output_asm_insn ("call\t%a0, 0", operands
);
5439 rtx delay
= NEXT_INSN (insn
);
5442 output_restore (PATTERN (delay
));
5444 PATTERN (delay
) = gen_blockage ();
5445 INSN_CODE (delay
) = -1;
5448 output_restore (NULL_RTX
);
5454 /* Functions for handling argument passing.
5456 For 32-bit, the first 6 args are normally in registers and the rest are
5457 pushed. Any arg that starts within the first 6 words is at least
5458 partially passed in a register unless its data type forbids.
5460 For 64-bit, the argument registers are laid out as an array of 16 elements
5461 and arguments are added sequentially. The first 6 int args and up to the
5462 first 16 fp args (depending on size) are passed in regs.
5464 Slot Stack Integral Float Float in structure Double Long Double
5465 ---- ----- -------- ----- ------------------ ------ -----------
5466 15 [SP+248] %f31 %f30,%f31 %d30
5467 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5468 13 [SP+232] %f27 %f26,%f27 %d26
5469 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5470 11 [SP+216] %f23 %f22,%f23 %d22
5471 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5472 9 [SP+200] %f19 %f18,%f19 %d18
5473 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5474 7 [SP+184] %f15 %f14,%f15 %d14
5475 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5476 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5477 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5478 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5479 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5480 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5481 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5483 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5485 Integral arguments are always passed as 64-bit quantities appropriately
5488 Passing of floating point values is handled as follows.
5489 If a prototype is in scope:
5490 If the value is in a named argument (i.e. not a stdarg function or a
5491 value not part of the `...') then the value is passed in the appropriate
5493 If the value is part of the `...' and is passed in one of the first 6
5494 slots then the value is passed in the appropriate int reg.
5495 If the value is part of the `...' and is not passed in one of the first 6
5496 slots then the value is passed in memory.
5497 If a prototype is not in scope:
5498 If the value is one of the first 6 arguments the value is passed in the
5499 appropriate integer reg and the appropriate fp reg.
5500 If the value is not one of the first 6 arguments the value is passed in
5501 the appropriate fp reg and in memory.
5504 Summary of the calling conventions implemented by GCC on the SPARC:
5507 size argument return value
5509 small integer <4 int. reg. int. reg.
5510 word 4 int. reg. int. reg.
5511 double word 8 int. reg. int. reg.
5513 _Complex small integer <8 int. reg. int. reg.
5514 _Complex word 8 int. reg. int. reg.
5515 _Complex double word 16 memory int. reg.
5517 vector integer <=8 int. reg. FP reg.
5518 vector integer >8 memory memory
5520 float 4 int. reg. FP reg.
5521 double 8 int. reg. FP reg.
5522 long double 16 memory memory
5524 _Complex float 8 memory FP reg.
5525 _Complex double 16 memory FP reg.
5526 _Complex long double 32 memory FP reg.
5528 vector float any memory memory
5530 aggregate any memory memory
5535 size argument return value
5537 small integer <8 int. reg. int. reg.
5538 word 8 int. reg. int. reg.
5539 double word 16 int. reg. int. reg.
5541 _Complex small integer <16 int. reg. int. reg.
5542 _Complex word 16 int. reg. int. reg.
5543 _Complex double word 32 memory int. reg.
5545 vector integer <=16 FP reg. FP reg.
5546 vector integer 16<s<=32 memory FP reg.
5547 vector integer >32 memory memory
5549 float 4 FP reg. FP reg.
5550 double 8 FP reg. FP reg.
5551 long double 16 FP reg. FP reg.
5553 _Complex float 8 FP reg. FP reg.
5554 _Complex double 16 FP reg. FP reg.
5555 _Complex long double 32 memory FP reg.
5557 vector float <=16 FP reg. FP reg.
5558 vector float 16<s<=32 memory FP reg.
5559 vector float >32 memory memory
5561 aggregate <=16 reg. reg.
5562 aggregate 16<s<=32 memory reg.
5563 aggregate >32 memory memory
5567 Note #1: complex floating-point types follow the extended SPARC ABIs as
5568 implemented by the Sun compiler.
5570 Note #2: integral vector types follow the scalar floating-point types
5571 conventions to match what is implemented by the Sun VIS SDK.
5573 Note #3: floating-point vector types follow the aggregate types
5577 /* Maximum number of int regs for args. */
5578 #define SPARC_INT_ARG_MAX 6
5579 /* Maximum number of fp regs for args. */
5580 #define SPARC_FP_ARG_MAX 16
5582 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5584 /* Handle the INIT_CUMULATIVE_ARGS macro.
5585 Initialize a variable CUM of type CUMULATIVE_ARGS
5586 for a call to a function whose data type is FNTYPE.
5587 For a library call, FNTYPE is 0. */
5590 init_cumulative_args (struct sparc_args
*cum
, tree fntype
,
5591 rtx libname ATTRIBUTE_UNUSED
,
5592 tree fndecl ATTRIBUTE_UNUSED
)
5595 cum
->prototype_p
= fntype
&& prototype_p (fntype
);
5596 cum
->libcall_p
= fntype
== 0;
5599 /* Handle promotion of pointer and integer arguments. */
5601 static enum machine_mode
5602 sparc_promote_function_mode (const_tree type
,
5603 enum machine_mode mode
,
5605 const_tree fntype ATTRIBUTE_UNUSED
,
5606 int for_return ATTRIBUTE_UNUSED
)
5608 if (type
!= NULL_TREE
&& POINTER_TYPE_P (type
))
5610 *punsignedp
= POINTERS_EXTEND_UNSIGNED
;
5614 /* Integral arguments are passed as full words, as per the ABI. */
5615 if (GET_MODE_CLASS (mode
) == MODE_INT
5616 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5622 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5625 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
5627 return TARGET_ARCH64
? true : false;
5630 /* Scan the record type TYPE and return the following predicates:
5631 - INTREGS_P: the record contains at least one field or sub-field
5632 that is eligible for promotion in integer registers.
5633 - FP_REGS_P: the record contains at least one field or sub-field
5634 that is eligible for promotion in floating-point registers.
5635 - PACKED_P: the record contains at least one field that is packed.
5637 Sub-fields are not taken into account for the PACKED_P predicate. */
5640 scan_record_type (const_tree type
, int *intregs_p
, int *fpregs_p
,
5645 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
5647 if (TREE_CODE (field
) == FIELD_DECL
)
5649 if (TREE_CODE (TREE_TYPE (field
)) == RECORD_TYPE
)
5650 scan_record_type (TREE_TYPE (field
), intregs_p
, fpregs_p
, 0);
5651 else if ((FLOAT_TYPE_P (TREE_TYPE (field
))
5652 || TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
5658 if (packed_p
&& DECL_PACKED (field
))
5664 /* Compute the slot number to pass an argument in.
5665 Return the slot number or -1 if passing on the stack.
5667 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5668 the preceding args and about the function being called.
5669 MODE is the argument's machine mode.
5670 TYPE is the data type of the argument (as a tree).
5671 This is null for libcalls where that information may
5673 NAMED is nonzero if this argument is a named parameter
5674 (otherwise it is an extra parameter matching an ellipsis).
5675 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5676 *PREGNO records the register number to use if scalar type.
5677 *PPADDING records the amount of padding needed in words. */
5680 function_arg_slotno (const struct sparc_args
*cum
, enum machine_mode mode
,
5681 const_tree type
, bool named
, bool incoming_p
,
5682 int *pregno
, int *ppadding
)
5684 int regbase
= (incoming_p
5685 ? SPARC_INCOMING_INT_ARG_FIRST
5686 : SPARC_OUTGOING_INT_ARG_FIRST
);
5687 int slotno
= cum
->words
;
5688 enum mode_class mclass
;
5693 if (type
&& TREE_ADDRESSABLE (type
))
5699 && TYPE_ALIGN (type
) % PARM_BOUNDARY
!= 0)
5702 /* For SPARC64, objects requiring 16-byte alignment get it. */
5704 && (type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
)) >= 128
5705 && (slotno
& 1) != 0)
5706 slotno
++, *ppadding
= 1;
5708 mclass
= GET_MODE_CLASS (mode
);
5709 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
5711 /* Vector types deserve special treatment because they are
5712 polymorphic wrt their mode, depending upon whether VIS
5713 instructions are enabled. */
5714 if (TREE_CODE (TREE_TYPE (type
)) == REAL_TYPE
)
5716 /* The SPARC port defines no floating-point vector modes. */
5717 gcc_assert (mode
== BLKmode
);
5721 /* Integral vector types should either have a vector
5722 mode or an integral mode, because we are guaranteed
5723 by pass_by_reference that their size is not greater
5724 than 16 bytes and TImode is 16-byte wide. */
5725 gcc_assert (mode
!= BLKmode
);
5727 /* Vector integers are handled like floats according to
5729 mclass
= MODE_FLOAT
;
5736 case MODE_COMPLEX_FLOAT
:
5737 case MODE_VECTOR_INT
:
5738 if (TARGET_ARCH64
&& TARGET_FPU
&& named
)
5740 if (slotno
>= SPARC_FP_ARG_MAX
)
5742 regno
= SPARC_FP_ARG_FIRST
+ slotno
* 2;
5743 /* Arguments filling only one single FP register are
5744 right-justified in the outer double FP register. */
5745 if (GET_MODE_SIZE (mode
) <= 4)
5752 case MODE_COMPLEX_INT
:
5753 if (slotno
>= SPARC_INT_ARG_MAX
)
5755 regno
= regbase
+ slotno
;
5759 if (mode
== VOIDmode
)
5760 /* MODE is VOIDmode when generating the actual call. */
5763 gcc_assert (mode
== BLKmode
);
5767 || (TREE_CODE (type
) != VECTOR_TYPE
5768 && TREE_CODE (type
) != RECORD_TYPE
))
5770 if (slotno
>= SPARC_INT_ARG_MAX
)
5772 regno
= regbase
+ slotno
;
5774 else /* TARGET_ARCH64 && type */
5776 int intregs_p
= 0, fpregs_p
= 0, packed_p
= 0;
5778 /* First see what kinds of registers we would need. */
5779 if (TREE_CODE (type
) == VECTOR_TYPE
)
5782 scan_record_type (type
, &intregs_p
, &fpregs_p
, &packed_p
);
5784 /* The ABI obviously doesn't specify how packed structures
5785 are passed. These are defined to be passed in int regs
5786 if possible, otherwise memory. */
5787 if (packed_p
|| !named
)
5788 fpregs_p
= 0, intregs_p
= 1;
5790 /* If all arg slots are filled, then must pass on stack. */
5791 if (fpregs_p
&& slotno
>= SPARC_FP_ARG_MAX
)
5794 /* If there are only int args and all int arg slots are filled,
5795 then must pass on stack. */
5796 if (!fpregs_p
&& intregs_p
&& slotno
>= SPARC_INT_ARG_MAX
)
5799 /* Note that even if all int arg slots are filled, fp members may
5800 still be passed in regs if such regs are available.
5801 *PREGNO isn't set because there may be more than one, it's up
5802 to the caller to compute them. */
5815 /* Handle recursive register counting for structure field layout. */
5817 struct function_arg_record_value_parms
5819 rtx ret
; /* return expression being built. */
5820 int slotno
; /* slot number of the argument. */
5821 int named
; /* whether the argument is named. */
5822 int regbase
; /* regno of the base register. */
5823 int stack
; /* 1 if part of the argument is on the stack. */
5824 int intoffset
; /* offset of the first pending integer field. */
5825 unsigned int nregs
; /* number of words passed in registers. */
5828 static void function_arg_record_value_3
5829 (HOST_WIDE_INT
, struct function_arg_record_value_parms
*);
5830 static void function_arg_record_value_2
5831 (const_tree
, HOST_WIDE_INT
, struct function_arg_record_value_parms
*, bool);
5832 static void function_arg_record_value_1
5833 (const_tree
, HOST_WIDE_INT
, struct function_arg_record_value_parms
*, bool);
5834 static rtx
function_arg_record_value (const_tree
, enum machine_mode
, int, int, int);
5835 static rtx
function_arg_union_value (int, enum machine_mode
, int, int);
5837 /* A subroutine of function_arg_record_value. Traverse the structure
5838 recursively and determine how many registers will be required. */
5841 function_arg_record_value_1 (const_tree type
, HOST_WIDE_INT startbitpos
,
5842 struct function_arg_record_value_parms
*parms
,
5847 /* We need to compute how many registers are needed so we can
5848 allocate the PARALLEL but before we can do that we need to know
5849 whether there are any packed fields. The ABI obviously doesn't
5850 specify how structures are passed in this case, so they are
5851 defined to be passed in int regs if possible, otherwise memory,
5852 regardless of whether there are fp values present. */
5855 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
5857 if (TREE_CODE (field
) == FIELD_DECL
&& DECL_PACKED (field
))
5864 /* Compute how many registers we need. */
5865 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
5867 if (TREE_CODE (field
) == FIELD_DECL
)
5869 HOST_WIDE_INT bitpos
= startbitpos
;
5871 if (DECL_SIZE (field
) != 0)
5873 if (integer_zerop (DECL_SIZE (field
)))
5876 if (host_integerp (bit_position (field
), 1))
5877 bitpos
+= int_bit_position (field
);
5880 /* ??? FIXME: else assume zero offset. */
5882 if (TREE_CODE (TREE_TYPE (field
)) == RECORD_TYPE
)
5883 function_arg_record_value_1 (TREE_TYPE (field
),
5887 else if ((FLOAT_TYPE_P (TREE_TYPE (field
))
5888 || TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
5893 if (parms
->intoffset
!= -1)
5895 unsigned int startbit
, endbit
;
5896 int intslots
, this_slotno
;
5898 startbit
= parms
->intoffset
& -BITS_PER_WORD
;
5899 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
5901 intslots
= (endbit
- startbit
) / BITS_PER_WORD
;
5902 this_slotno
= parms
->slotno
+ parms
->intoffset
5905 if (intslots
> 0 && intslots
> SPARC_INT_ARG_MAX
- this_slotno
)
5907 intslots
= MAX (0, SPARC_INT_ARG_MAX
- this_slotno
);
5908 /* We need to pass this field on the stack. */
5912 parms
->nregs
+= intslots
;
5913 parms
->intoffset
= -1;
5916 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5917 If it wasn't true we wouldn't be here. */
5918 if (TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
5919 && DECL_MODE (field
) == BLKmode
)
5920 parms
->nregs
+= TYPE_VECTOR_SUBPARTS (TREE_TYPE (field
));
5921 else if (TREE_CODE (TREE_TYPE (field
)) == COMPLEX_TYPE
)
5928 if (parms
->intoffset
== -1)
5929 parms
->intoffset
= bitpos
;
5935 /* A subroutine of function_arg_record_value. Assign the bits of the
5936 structure between parms->intoffset and bitpos to integer registers. */
5939 function_arg_record_value_3 (HOST_WIDE_INT bitpos
,
5940 struct function_arg_record_value_parms
*parms
)
5942 enum machine_mode mode
;
5944 unsigned int startbit
, endbit
;
5945 int this_slotno
, intslots
, intoffset
;
5948 if (parms
->intoffset
== -1)
5951 intoffset
= parms
->intoffset
;
5952 parms
->intoffset
= -1;
5954 startbit
= intoffset
& -BITS_PER_WORD
;
5955 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
5956 intslots
= (endbit
- startbit
) / BITS_PER_WORD
;
5957 this_slotno
= parms
->slotno
+ intoffset
/ BITS_PER_WORD
;
5959 intslots
= MIN (intslots
, SPARC_INT_ARG_MAX
- this_slotno
);
5963 /* If this is the trailing part of a word, only load that much into
5964 the register. Otherwise load the whole register. Note that in
5965 the latter case we may pick up unwanted bits. It's not a problem
5966 at the moment but may wish to revisit. */
5968 if (intoffset
% BITS_PER_WORD
!= 0)
5969 mode
= smallest_mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
5974 intoffset
/= BITS_PER_UNIT
;
5977 regno
= parms
->regbase
+ this_slotno
;
5978 reg
= gen_rtx_REG (mode
, regno
);
5979 XVECEXP (parms
->ret
, 0, parms
->stack
+ parms
->nregs
)
5980 = gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
5983 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
5988 while (intslots
> 0);
5991 /* A subroutine of function_arg_record_value. Traverse the structure
5992 recursively and assign bits to floating point registers. Track which
5993 bits in between need integer registers; invoke function_arg_record_value_3
5994 to make that happen. */
5997 function_arg_record_value_2 (const_tree type
, HOST_WIDE_INT startbitpos
,
5998 struct function_arg_record_value_parms
*parms
,
6004 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
6006 if (TREE_CODE (field
) == FIELD_DECL
&& DECL_PACKED (field
))
6013 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
6015 if (TREE_CODE (field
) == FIELD_DECL
)
6017 HOST_WIDE_INT bitpos
= startbitpos
;
6019 if (DECL_SIZE (field
) != 0)
6021 if (integer_zerop (DECL_SIZE (field
)))
6024 if (host_integerp (bit_position (field
), 1))
6025 bitpos
+= int_bit_position (field
);
6028 /* ??? FIXME: else assume zero offset. */
6030 if (TREE_CODE (TREE_TYPE (field
)) == RECORD_TYPE
)
6031 function_arg_record_value_2 (TREE_TYPE (field
),
6035 else if ((FLOAT_TYPE_P (TREE_TYPE (field
))
6036 || TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
6041 int this_slotno
= parms
->slotno
+ bitpos
/ BITS_PER_WORD
;
6042 int regno
, nregs
, pos
;
6043 enum machine_mode mode
= DECL_MODE (field
);
6046 function_arg_record_value_3 (bitpos
, parms
);
6048 if (TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
6051 mode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (field
)));
6052 nregs
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (field
));
6054 else if (TREE_CODE (TREE_TYPE (field
)) == COMPLEX_TYPE
)
6056 mode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (field
)));
6062 regno
= SPARC_FP_ARG_FIRST
+ this_slotno
* 2;
6063 if (GET_MODE_SIZE (mode
) <= 4 && (bitpos
& 32) != 0)
6065 reg
= gen_rtx_REG (mode
, regno
);
6066 pos
= bitpos
/ BITS_PER_UNIT
;
6067 XVECEXP (parms
->ret
, 0, parms
->stack
+ parms
->nregs
)
6068 = gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (pos
));
6072 regno
+= GET_MODE_SIZE (mode
) / 4;
6073 reg
= gen_rtx_REG (mode
, regno
);
6074 pos
+= GET_MODE_SIZE (mode
);
6075 XVECEXP (parms
->ret
, 0, parms
->stack
+ parms
->nregs
)
6076 = gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (pos
));
6082 if (parms
->intoffset
== -1)
6083 parms
->intoffset
= bitpos
;
6089 /* Used by function_arg and sparc_function_value_1 to implement the complex
6090 conventions of the 64-bit ABI for passing and returning structures.
6091 Return an expression valid as a return value for the FUNCTION_ARG
6092 and TARGET_FUNCTION_VALUE.
6094 TYPE is the data type of the argument (as a tree).
6095 This is null for libcalls where that information may
6097 MODE is the argument's machine mode.
6098 SLOTNO is the index number of the argument's slot in the parameter array.
6099 NAMED is nonzero if this argument is a named parameter
6100 (otherwise it is an extra parameter matching an ellipsis).
6101 REGBASE is the regno of the base register for the parameter array. */
6104 function_arg_record_value (const_tree type
, enum machine_mode mode
,
6105 int slotno
, int named
, int regbase
)
6107 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
6108 struct function_arg_record_value_parms parms
;
6111 parms
.ret
= NULL_RTX
;
6112 parms
.slotno
= slotno
;
6113 parms
.named
= named
;
6114 parms
.regbase
= regbase
;
6117 /* Compute how many registers we need. */
6119 parms
.intoffset
= 0;
6120 function_arg_record_value_1 (type
, 0, &parms
, false);
6122 /* Take into account pending integer fields. */
6123 if (parms
.intoffset
!= -1)
6125 unsigned int startbit
, endbit
;
6126 int intslots
, this_slotno
;
6128 startbit
= parms
.intoffset
& -BITS_PER_WORD
;
6129 endbit
= (typesize
*BITS_PER_UNIT
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
6130 intslots
= (endbit
- startbit
) / BITS_PER_WORD
;
6131 this_slotno
= slotno
+ parms
.intoffset
/ BITS_PER_WORD
;
6133 if (intslots
> 0 && intslots
> SPARC_INT_ARG_MAX
- this_slotno
)
6135 intslots
= MAX (0, SPARC_INT_ARG_MAX
- this_slotno
);
6136 /* We need to pass this field on the stack. */
6140 parms
.nregs
+= intslots
;
6142 nregs
= parms
.nregs
;
6144 /* Allocate the vector and handle some annoying special cases. */
6147 /* ??? Empty structure has no value? Duh? */
6150 /* Though there's nothing really to store, return a word register
6151 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6152 leads to breakage due to the fact that there are zero bytes to
6154 return gen_rtx_REG (mode
, regbase
);
6158 /* ??? C++ has structures with no fields, and yet a size. Give up
6159 for now and pass everything back in integer registers. */
6160 nregs
= (typesize
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
6162 if (nregs
+ slotno
> SPARC_INT_ARG_MAX
)
6163 nregs
= SPARC_INT_ARG_MAX
- slotno
;
6165 gcc_assert (nregs
!= 0);
6167 parms
.ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (parms
.stack
+ nregs
));
6169 /* If at least one field must be passed on the stack, generate
6170 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6171 also be passed on the stack. We can't do much better because the
6172 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6173 of structures for which the fields passed exclusively in registers
6174 are not at the beginning of the structure. */
6176 XVECEXP (parms
.ret
, 0, 0)
6177 = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
6179 /* Fill in the entries. */
6181 parms
.intoffset
= 0;
6182 function_arg_record_value_2 (type
, 0, &parms
, false);
6183 function_arg_record_value_3 (typesize
* BITS_PER_UNIT
, &parms
);
6185 gcc_assert (parms
.nregs
== nregs
);
6190 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6191 of the 64-bit ABI for passing and returning unions.
6192 Return an expression valid as a return value for the FUNCTION_ARG
6193 and TARGET_FUNCTION_VALUE.
6195 SIZE is the size in bytes of the union.
6196 MODE is the argument's machine mode.
6197 REGNO is the hard register the union will be passed in. */
6200 function_arg_union_value (int size
, enum machine_mode mode
, int slotno
,
6203 int nwords
= ROUND_ADVANCE (size
), i
;
6206 /* See comment in previous function for empty structures. */
6208 return gen_rtx_REG (mode
, regno
);
6210 if (slotno
== SPARC_INT_ARG_MAX
- 1)
6213 regs
= gen_rtx_PARALLEL (mode
, rtvec_alloc (nwords
));
6215 for (i
= 0; i
< nwords
; i
++)
6217 /* Unions are passed left-justified. */
6218 XVECEXP (regs
, 0, i
)
6219 = gen_rtx_EXPR_LIST (VOIDmode
,
6220 gen_rtx_REG (word_mode
, regno
),
6221 GEN_INT (UNITS_PER_WORD
* i
));
6228 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6229 for passing and returning large (BLKmode) vectors.
6230 Return an expression valid as a return value for the FUNCTION_ARG
6231 and TARGET_FUNCTION_VALUE.
6233 SIZE is the size in bytes of the vector (at least 8 bytes).
6234 REGNO is the FP hard register the vector will be passed in. */
6237 function_arg_vector_value (int size
, int regno
)
6239 int i
, nregs
= size
/ 8;
6242 regs
= gen_rtx_PARALLEL (BLKmode
, rtvec_alloc (nregs
));
6244 for (i
= 0; i
< nregs
; i
++)
6246 XVECEXP (regs
, 0, i
)
6247 = gen_rtx_EXPR_LIST (VOIDmode
,
6248 gen_rtx_REG (DImode
, regno
+ 2*i
),
6255 /* Determine where to put an argument to a function.
6256 Value is zero to push the argument on the stack,
6257 or a hard register in which to store the argument.
6259 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6260 the preceding args and about the function being called.
6261 MODE is the argument's machine mode.
6262 TYPE is the data type of the argument (as a tree).
6263 This is null for libcalls where that information may
6265 NAMED is true if this argument is a named parameter
6266 (otherwise it is an extra parameter matching an ellipsis).
6267 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6268 TARGET_FUNCTION_INCOMING_ARG. */
6271 sparc_function_arg_1 (cumulative_args_t cum_v
, enum machine_mode mode
,
6272 const_tree type
, bool named
, bool incoming_p
)
6274 const CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6276 int regbase
= (incoming_p
6277 ? SPARC_INCOMING_INT_ARG_FIRST
6278 : SPARC_OUTGOING_INT_ARG_FIRST
);
6279 int slotno
, regno
, padding
;
6280 enum mode_class mclass
= GET_MODE_CLASS (mode
);
6282 slotno
= function_arg_slotno (cum
, mode
, type
, named
, incoming_p
,
6287 /* Vector types deserve special treatment because they are polymorphic wrt
6288 their mode, depending upon whether VIS instructions are enabled. */
6289 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
6291 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6292 gcc_assert ((TARGET_ARCH32
&& size
<= 8)
6293 || (TARGET_ARCH64
&& size
<= 16));
6295 if (mode
== BLKmode
)
6296 return function_arg_vector_value (size
,
6297 SPARC_FP_ARG_FIRST
+ 2*slotno
);
6299 mclass
= MODE_FLOAT
;
6303 return gen_rtx_REG (mode
, regno
);
6305 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6306 and are promoted to registers if possible. */
6307 if (type
&& TREE_CODE (type
) == RECORD_TYPE
)
6309 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6310 gcc_assert (size
<= 16);
6312 return function_arg_record_value (type
, mode
, slotno
, named
, regbase
);
6315 /* Unions up to 16 bytes in size are passed in integer registers. */
6316 else if (type
&& TREE_CODE (type
) == UNION_TYPE
)
6318 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6319 gcc_assert (size
<= 16);
6321 return function_arg_union_value (size
, mode
, slotno
, regno
);
6324 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6325 but also have the slot allocated for them.
6326 If no prototype is in scope fp values in register slots get passed
6327 in two places, either fp regs and int regs or fp regs and memory. */
6328 else if ((mclass
== MODE_FLOAT
|| mclass
== MODE_COMPLEX_FLOAT
)
6329 && SPARC_FP_REG_P (regno
))
6331 rtx reg
= gen_rtx_REG (mode
, regno
);
6332 if (cum
->prototype_p
|| cum
->libcall_p
)
6334 /* "* 2" because fp reg numbers are recorded in 4 byte
6337 /* ??? This will cause the value to be passed in the fp reg and
6338 in the stack. When a prototype exists we want to pass the
6339 value in the reg but reserve space on the stack. That's an
6340 optimization, and is deferred [for a bit]. */
6341 if ((regno
- SPARC_FP_ARG_FIRST
) >= SPARC_INT_ARG_MAX
* 2)
6342 return gen_rtx_PARALLEL (mode
,
6344 gen_rtx_EXPR_LIST (VOIDmode
,
6345 NULL_RTX
, const0_rtx
),
6346 gen_rtx_EXPR_LIST (VOIDmode
,
6350 /* ??? It seems that passing back a register even when past
6351 the area declared by REG_PARM_STACK_SPACE will allocate
6352 space appropriately, and will not copy the data onto the
6353 stack, exactly as we desire.
6355 This is due to locate_and_pad_parm being called in
6356 expand_call whenever reg_parm_stack_space > 0, which
6357 while beneficial to our example here, would seem to be
6358 in error from what had been intended. Ho hum... -- r~ */
6366 if ((regno
- SPARC_FP_ARG_FIRST
) < SPARC_INT_ARG_MAX
* 2)
6370 /* On incoming, we don't need to know that the value
6371 is passed in %f0 and %i0, and it confuses other parts
6372 causing needless spillage even on the simplest cases. */
6376 intreg
= (SPARC_OUTGOING_INT_ARG_FIRST
6377 + (regno
- SPARC_FP_ARG_FIRST
) / 2);
6379 v0
= gen_rtx_EXPR_LIST (VOIDmode
, reg
, const0_rtx
);
6380 v1
= gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (mode
, intreg
),
6382 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, v0
, v1
));
6386 v0
= gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
6387 v1
= gen_rtx_EXPR_LIST (VOIDmode
, reg
, const0_rtx
);
6388 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, v0
, v1
));
6393 /* All other aggregate types are passed in an integer register in a mode
6394 corresponding to the size of the type. */
6395 else if (type
&& AGGREGATE_TYPE_P (type
))
6397 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6398 gcc_assert (size
<= 16);
6400 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
6403 return gen_rtx_REG (mode
, regno
);
6406 /* Handle the TARGET_FUNCTION_ARG target hook. */
6409 sparc_function_arg (cumulative_args_t cum
, enum machine_mode mode
,
6410 const_tree type
, bool named
)
6412 return sparc_function_arg_1 (cum
, mode
, type
, named
, false);
6415 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6418 sparc_function_incoming_arg (cumulative_args_t cum
, enum machine_mode mode
,
6419 const_tree type
, bool named
)
6421 return sparc_function_arg_1 (cum
, mode
, type
, named
, true);
6424 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6427 sparc_function_arg_boundary (enum machine_mode mode
, const_tree type
)
6429 return ((TARGET_ARCH64
6430 && (GET_MODE_ALIGNMENT (mode
) == 128
6431 || (type
&& TYPE_ALIGN (type
) == 128)))
6436 /* For an arg passed partly in registers and partly in memory,
6437 this is the number of bytes of registers used.
6438 For args passed entirely in registers or entirely in memory, zero.
6440 Any arg that starts in the first 6 regs but won't entirely fit in them
6441 needs partial registers on v8. On v9, structures with integer
6442 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6443 values that begin in the last fp reg [where "last fp reg" varies with the
6444 mode] will be split between that reg and memory. */
6447 sparc_arg_partial_bytes (cumulative_args_t cum
, enum machine_mode mode
,
6448 tree type
, bool named
)
6450 int slotno
, regno
, padding
;
6452 /* We pass false for incoming_p here, it doesn't matter. */
6453 slotno
= function_arg_slotno (get_cumulative_args (cum
), mode
, type
, named
,
6454 false, ®no
, &padding
);
6461 if ((slotno
+ (mode
== BLKmode
6462 ? ROUND_ADVANCE (int_size_in_bytes (type
))
6463 : ROUND_ADVANCE (GET_MODE_SIZE (mode
))))
6464 > SPARC_INT_ARG_MAX
)
6465 return (SPARC_INT_ARG_MAX
- slotno
) * UNITS_PER_WORD
;
6469 /* We are guaranteed by pass_by_reference that the size of the
6470 argument is not greater than 16 bytes, so we only need to return
6471 one word if the argument is partially passed in registers. */
6473 if (type
&& AGGREGATE_TYPE_P (type
))
6475 int size
= int_size_in_bytes (type
);
6477 if (size
> UNITS_PER_WORD
6478 && slotno
== SPARC_INT_ARG_MAX
- 1)
6479 return UNITS_PER_WORD
;
6481 else if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_INT
6482 || (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
6483 && ! (TARGET_FPU
&& named
)))
6485 /* The complex types are passed as packed types. */
6486 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
6487 && slotno
== SPARC_INT_ARG_MAX
- 1)
6488 return UNITS_PER_WORD
;
6490 else if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
6492 if ((slotno
+ GET_MODE_SIZE (mode
) / UNITS_PER_WORD
)
6494 return UNITS_PER_WORD
;
6501 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6502 Specify whether to pass the argument by reference. */
6505 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
6506 enum machine_mode mode
, const_tree type
,
6507 bool named ATTRIBUTE_UNUSED
)
6510 /* Original SPARC 32-bit ABI says that structures and unions,
6511 and quad-precision floats are passed by reference. For Pascal,
6512 also pass arrays by reference. All other base types are passed
6515 Extended ABI (as implemented by the Sun compiler) says that all
6516 complex floats are passed by reference. Pass complex integers
6517 in registers up to 8 bytes. More generally, enforce the 2-word
6518 cap for passing arguments in registers.
6520 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6521 integers are passed like floats of the same size, that is in
6522 registers up to 8 bytes. Pass all vector floats by reference
6523 like structure and unions. */
6524 return ((type
&& (AGGREGATE_TYPE_P (type
) || VECTOR_FLOAT_TYPE_P (type
)))
6526 /* Catch CDImode, TFmode, DCmode and TCmode. */
6527 || GET_MODE_SIZE (mode
) > 8
6529 && TREE_CODE (type
) == VECTOR_TYPE
6530 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8));
6532 /* Original SPARC 64-bit ABI says that structures and unions
6533 smaller than 16 bytes are passed in registers, as well as
6534 all other base types.
6536 Extended ABI (as implemented by the Sun compiler) says that
6537 complex floats are passed in registers up to 16 bytes. Pass
6538 all complex integers in registers up to 16 bytes. More generally,
6539 enforce the 2-word cap for passing arguments in registers.
6541 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6542 integers are passed like floats of the same size, that is in
6543 registers (up to 16 bytes). Pass all vector floats like structure
6546 && (AGGREGATE_TYPE_P (type
) || TREE_CODE (type
) == VECTOR_TYPE
)
6547 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 16)
6548 /* Catch CTImode and TCmode. */
6549 || GET_MODE_SIZE (mode
) > 16);
6552 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6553 Update the data in CUM to advance over an argument
6554 of mode MODE and data type TYPE.
6555 TYPE is null for libcalls where that information may not be available. */
6558 sparc_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
6559 const_tree type
, bool named
)
6561 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6564 /* We pass false for incoming_p here, it doesn't matter. */
6565 function_arg_slotno (cum
, mode
, type
, named
, false, ®no
, &padding
);
6567 /* If argument requires leading padding, add it. */
6568 cum
->words
+= padding
;
6572 cum
->words
+= (mode
!= BLKmode
6573 ? ROUND_ADVANCE (GET_MODE_SIZE (mode
))
6574 : ROUND_ADVANCE (int_size_in_bytes (type
)));
6578 if (type
&& AGGREGATE_TYPE_P (type
))
6580 int size
= int_size_in_bytes (type
);
6584 else if (size
<= 16)
6586 else /* passed by reference */
6591 cum
->words
+= (mode
!= BLKmode
6592 ? ROUND_ADVANCE (GET_MODE_SIZE (mode
))
6593 : ROUND_ADVANCE (int_size_in_bytes (type
)));
6598 /* Handle the FUNCTION_ARG_PADDING macro.
6599 For the 64 bit ABI structs are always stored left shifted in their
6603 function_arg_padding (enum machine_mode mode
, const_tree type
)
6605 if (TARGET_ARCH64
&& type
!= 0 && AGGREGATE_TYPE_P (type
))
6608 /* Fall back to the default. */
6609 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
6612 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6613 Specify whether to return the return value in memory. */
6616 sparc_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
6619 /* Original SPARC 32-bit ABI says that structures and unions,
6620 and quad-precision floats are returned in memory. All other
6621 base types are returned in registers.
6623 Extended ABI (as implemented by the Sun compiler) says that
6624 all complex floats are returned in registers (8 FP registers
6625 at most for '_Complex long double'). Return all complex integers
6626 in registers (4 at most for '_Complex long long').
6628 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6629 integers are returned like floats of the same size, that is in
6630 registers up to 8 bytes and in memory otherwise. Return all
6631 vector floats in memory like structure and unions; note that
6632 they always have BLKmode like the latter. */
6633 return (TYPE_MODE (type
) == BLKmode
6634 || TYPE_MODE (type
) == TFmode
6635 || (TREE_CODE (type
) == VECTOR_TYPE
6636 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8));
6638 /* Original SPARC 64-bit ABI says that structures and unions
6639 smaller than 32 bytes are returned in registers, as well as
6640 all other base types.
6642 Extended ABI (as implemented by the Sun compiler) says that all
6643 complex floats are returned in registers (8 FP registers at most
6644 for '_Complex long double'). Return all complex integers in
6645 registers (4 at most for '_Complex TItype').
6647 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6648 integers are returned like floats of the same size, that is in
6649 registers. Return all vector floats like structure and unions;
6650 note that they always have BLKmode like the latter. */
6651 return (TYPE_MODE (type
) == BLKmode
6652 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 32);
6655 /* Handle the TARGET_STRUCT_VALUE target hook.
6656 Return where to find the structure return value address. */
6659 sparc_struct_value_rtx (tree fndecl
, int incoming
)
6668 mem
= gen_frame_mem (Pmode
, plus_constant (frame_pointer_rtx
,
6669 STRUCT_VALUE_OFFSET
));
6671 mem
= gen_frame_mem (Pmode
, plus_constant (stack_pointer_rtx
,
6672 STRUCT_VALUE_OFFSET
));
6674 /* Only follow the SPARC ABI for fixed-size structure returns.
6675 Variable size structure returns are handled per the normal
6676 procedures in GCC. This is enabled by -mstd-struct-return */
6678 && sparc_std_struct_return
6679 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl
))
6680 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl
))) == INTEGER_CST
)
6682 /* We must check and adjust the return address, as it is
6683 optional as to whether the return object is really
6685 rtx ret_reg
= gen_rtx_REG (Pmode
, 31);
6686 rtx scratch
= gen_reg_rtx (SImode
);
6687 rtx endlab
= gen_label_rtx ();
6689 /* Calculate the return object size */
6690 tree size
= TYPE_SIZE_UNIT (TREE_TYPE (fndecl
));
6691 rtx size_rtx
= GEN_INT (TREE_INT_CST_LOW (size
) & 0xfff);
6692 /* Construct a temporary return value */
6694 = assign_stack_local (Pmode
, TREE_INT_CST_LOW (size
), 0);
6696 /* Implement SPARC 32-bit psABI callee return struct checking:
6698 Fetch the instruction where we will return to and see if
6699 it's an unimp instruction (the most significant 10 bits
6701 emit_move_insn (scratch
, gen_rtx_MEM (SImode
,
6702 plus_constant (ret_reg
, 8)));
6703 /* Assume the size is valid and pre-adjust */
6704 emit_insn (gen_add3_insn (ret_reg
, ret_reg
, GEN_INT (4)));
6705 emit_cmp_and_jump_insns (scratch
, size_rtx
, EQ
, const0_rtx
, SImode
,
6707 emit_insn (gen_sub3_insn (ret_reg
, ret_reg
, GEN_INT (4)));
6708 /* Write the address of the memory pointed to by temp_val into
6709 the memory pointed to by mem */
6710 emit_move_insn (mem
, XEXP (temp_val
, 0));
6711 emit_label (endlab
);
6718 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6719 For v9, function return values are subject to the same rules as arguments,
6720 except that up to 32 bytes may be returned in registers. */
6723 sparc_function_value_1 (const_tree type
, enum machine_mode mode
,
6726 /* Beware that the two values are swapped here wrt function_arg. */
6727 int regbase
= (outgoing
6728 ? SPARC_INCOMING_INT_ARG_FIRST
6729 : SPARC_OUTGOING_INT_ARG_FIRST
);
6730 enum mode_class mclass
= GET_MODE_CLASS (mode
);
6733 /* Vector types deserve special treatment because they are polymorphic wrt
6734 their mode, depending upon whether VIS instructions are enabled. */
6735 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
6737 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6738 gcc_assert ((TARGET_ARCH32
&& size
<= 8)
6739 || (TARGET_ARCH64
&& size
<= 32));
6741 if (mode
== BLKmode
)
6742 return function_arg_vector_value (size
,
6743 SPARC_FP_ARG_FIRST
);
6745 mclass
= MODE_FLOAT
;
6748 if (TARGET_ARCH64
&& type
)
6750 /* Structures up to 32 bytes in size are returned in registers. */
6751 if (TREE_CODE (type
) == RECORD_TYPE
)
6753 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6754 gcc_assert (size
<= 32);
6756 return function_arg_record_value (type
, mode
, 0, 1, regbase
);
6759 /* Unions up to 32 bytes in size are returned in integer registers. */
6760 else if (TREE_CODE (type
) == UNION_TYPE
)
6762 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6763 gcc_assert (size
<= 32);
6765 return function_arg_union_value (size
, mode
, 0, regbase
);
6768 /* Objects that require it are returned in FP registers. */
6769 else if (mclass
== MODE_FLOAT
|| mclass
== MODE_COMPLEX_FLOAT
)
6772 /* All other aggregate types are returned in an integer register in a
6773 mode corresponding to the size of the type. */
6774 else if (AGGREGATE_TYPE_P (type
))
6776 /* All other aggregate types are passed in an integer register
6777 in a mode corresponding to the size of the type. */
6778 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6779 gcc_assert (size
<= 32);
6781 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
6783 /* ??? We probably should have made the same ABI change in
6784 3.4.0 as the one we made for unions. The latter was
6785 required by the SCD though, while the former is not
6786 specified, so we favored compatibility and efficiency.
6788 Now we're stuck for aggregates larger than 16 bytes,
6789 because OImode vanished in the meantime. Let's not
6790 try to be unduly clever, and simply follow the ABI
6791 for unions in that case. */
6792 if (mode
== BLKmode
)
6793 return function_arg_union_value (size
, mode
, 0, regbase
);
6798 /* We should only have pointer and integer types at this point. This
6799 must match sparc_promote_function_mode. */
6800 else if (mclass
== MODE_INT
&& GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
6804 /* We should only have pointer and integer types at this point. This must
6805 match sparc_promote_function_mode. */
6806 else if (TARGET_ARCH32
6807 && mclass
== MODE_INT
6808 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
6811 if ((mclass
== MODE_FLOAT
|| mclass
== MODE_COMPLEX_FLOAT
) && TARGET_FPU
)
6812 regno
= SPARC_FP_ARG_FIRST
;
6816 return gen_rtx_REG (mode
, regno
);
6819 /* Handle TARGET_FUNCTION_VALUE.
6820 On the SPARC, the value is found in the first "output" register, but the
6821 called function leaves it in the first "input" register. */
6824 sparc_function_value (const_tree valtype
,
6825 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
6828 return sparc_function_value_1 (valtype
, TYPE_MODE (valtype
), outgoing
);
6831 /* Handle TARGET_LIBCALL_VALUE. */
6834 sparc_libcall_value (enum machine_mode mode
,
6835 const_rtx fun ATTRIBUTE_UNUSED
)
6837 return sparc_function_value_1 (NULL_TREE
, mode
, false);
6840 /* Handle FUNCTION_VALUE_REGNO_P.
6841 On the SPARC, the first "output" reg is used for integer values, and the
6842 first floating point register is used for floating point values. */
6845 sparc_function_value_regno_p (const unsigned int regno
)
6847 return (regno
== 8 || regno
== 32);
6850 /* Do what is necessary for `va_start'. We look at the current function
6851 to determine if stdarg or varargs is used and return the address of
6852 the first unnamed parameter. */
6855 sparc_builtin_saveregs (void)
6857 int first_reg
= crtl
->args
.info
.words
;
6861 for (regno
= first_reg
; regno
< SPARC_INT_ARG_MAX
; regno
++)
6862 emit_move_insn (gen_rtx_MEM (word_mode
,
6863 gen_rtx_PLUS (Pmode
,
6865 GEN_INT (FIRST_PARM_OFFSET (0)
6868 gen_rtx_REG (word_mode
,
6869 SPARC_INCOMING_INT_ARG_FIRST
+ regno
));
6871 address
= gen_rtx_PLUS (Pmode
,
6873 GEN_INT (FIRST_PARM_OFFSET (0)
6874 + UNITS_PER_WORD
* first_reg
));
6879 /* Implement `va_start' for stdarg. */
6882 sparc_va_start (tree valist
, rtx nextarg
)
6884 nextarg
= expand_builtin_saveregs ();
6885 std_expand_builtin_va_start (valist
, nextarg
);
6888 /* Implement `va_arg' for stdarg. */
6891 sparc_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
6894 HOST_WIDE_INT size
, rsize
, align
;
6897 tree ptrtype
= build_pointer_type (type
);
6899 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
6902 size
= rsize
= UNITS_PER_WORD
;
6908 size
= int_size_in_bytes (type
);
6909 rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
6914 /* For SPARC64, objects requiring 16-byte alignment get it. */
6915 if (TYPE_ALIGN (type
) >= 2 * (unsigned) BITS_PER_WORD
)
6916 align
= 2 * UNITS_PER_WORD
;
6918 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6919 are left-justified in their slots. */
6920 if (AGGREGATE_TYPE_P (type
))
6923 size
= rsize
= UNITS_PER_WORD
;
6933 incr
= fold_build_pointer_plus_hwi (incr
, align
- 1);
6934 incr
= fold_convert (sizetype
, incr
);
6935 incr
= fold_build2 (BIT_AND_EXPR
, sizetype
, incr
,
6937 incr
= fold_convert (ptr_type_node
, incr
);
6940 gimplify_expr (&incr
, pre_p
, post_p
, is_gimple_val
, fb_rvalue
);
6943 if (BYTES_BIG_ENDIAN
&& size
< rsize
)
6944 addr
= fold_build_pointer_plus_hwi (incr
, rsize
- size
);
6948 addr
= fold_convert (build_pointer_type (ptrtype
), addr
);
6949 addr
= build_va_arg_indirect_ref (addr
);
6952 /* If the address isn't aligned properly for the type, we need a temporary.
6953 FIXME: This is inefficient, usually we can do this in registers. */
6954 else if (align
== 0 && TYPE_ALIGN (type
) > BITS_PER_WORD
)
6956 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
6957 tree dest_addr
= build_fold_addr_expr (tmp
);
6958 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
6959 3, dest_addr
, addr
, size_int (rsize
));
6960 TREE_ADDRESSABLE (tmp
) = 1;
6961 gimplify_and_add (copy
, pre_p
);
6966 addr
= fold_convert (ptrtype
, addr
);
6968 incr
= fold_build_pointer_plus_hwi (incr
, rsize
);
6969 gimplify_assign (valist
, incr
, post_p
);
6971 return build_va_arg_indirect_ref (addr
);
6974 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6975 Specify whether the vector mode is supported by the hardware. */
6978 sparc_vector_mode_supported_p (enum machine_mode mode
)
6980 return TARGET_VIS
&& VECTOR_MODE_P (mode
) ? true : false;
6983 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6985 static enum machine_mode
6986 sparc_preferred_simd_mode (enum machine_mode mode
)
7004 /* Return the string to output an unconditional branch to LABEL, which is
7005 the operand number of the label.
7007 DEST is the destination insn (i.e. the label), INSN is the source. */
7010 output_ubranch (rtx dest
, int label
, rtx insn
)
7012 static char string
[64];
7013 bool v9_form
= false;
7016 if (TARGET_V9
&& INSN_ADDRESSES_SET_P ())
7018 int delta
= (INSN_ADDRESSES (INSN_UID (dest
))
7019 - INSN_ADDRESSES (INSN_UID (insn
)));
7020 /* Leave some instructions for "slop". */
7021 if (delta
>= -260000 && delta
< 260000)
7026 strcpy (string
, "ba%*,pt\t%%xcc, ");
7028 strcpy (string
, "b%*\t");
7030 p
= strchr (string
, '\0');
7041 /* Return the string to output a conditional branch to LABEL, which is
7042 the operand number of the label. OP is the conditional expression.
7043 XEXP (OP, 0) is assumed to be a condition code register (integer or
7044 floating point) and its mode specifies what kind of comparison we made.
7046 DEST is the destination insn (i.e. the label), INSN is the source.
7048 REVERSED is nonzero if we should reverse the sense of the comparison.
7050 ANNUL is nonzero if we should generate an annulling branch. */
7053 output_cbranch (rtx op
, rtx dest
, int label
, int reversed
, int annul
,
7056 static char string
[64];
7057 enum rtx_code code
= GET_CODE (op
);
7058 rtx cc_reg
= XEXP (op
, 0);
7059 enum machine_mode mode
= GET_MODE (cc_reg
);
7060 const char *labelno
, *branch
;
7061 int spaces
= 8, far
;
7064 /* v9 branches are limited to +-1MB. If it is too far away,
7077 fbne,a,pn %fcc2, .LC29
7085 far
= TARGET_V9
&& (get_attr_length (insn
) >= 3);
7088 /* Reversal of FP compares takes care -- an ordered compare
7089 becomes an unordered compare and vice versa. */
7090 if (mode
== CCFPmode
|| mode
== CCFPEmode
)
7091 code
= reverse_condition_maybe_unordered (code
);
7093 code
= reverse_condition (code
);
7096 /* Start by writing the branch condition. */
7097 if (mode
== CCFPmode
|| mode
== CCFPEmode
)
7148 /* ??? !v9: FP branches cannot be preceded by another floating point
7149 insn. Because there is currently no concept of pre-delay slots,
7150 we can fix this only by always emitting a nop before a floating
7155 strcpy (string
, "nop\n\t");
7156 strcat (string
, branch
);
7169 if (mode
== CC_NOOVmode
|| mode
== CCX_NOOVmode
)
7181 if (mode
== CC_NOOVmode
|| mode
== CCX_NOOVmode
)
7202 strcpy (string
, branch
);
7204 spaces
-= strlen (branch
);
7205 p
= strchr (string
, '\0');
7207 /* Now add the annulling, the label, and a possible noop. */
7220 if (! far
&& insn
&& INSN_ADDRESSES_SET_P ())
7222 int delta
= (INSN_ADDRESSES (INSN_UID (dest
))
7223 - INSN_ADDRESSES (INSN_UID (insn
)));
7224 /* Leave some instructions for "slop". */
7225 if (delta
< -260000 || delta
>= 260000)
7229 if (mode
== CCFPmode
|| mode
== CCFPEmode
)
7231 static char v9_fcc_labelno
[] = "%%fccX, ";
7232 /* Set the char indicating the number of the fcc reg to use. */
7233 v9_fcc_labelno
[5] = REGNO (cc_reg
) - SPARC_FIRST_V9_FCC_REG
+ '0';
7234 labelno
= v9_fcc_labelno
;
7237 gcc_assert (REGNO (cc_reg
) == SPARC_FCC_REG
);
7241 else if (mode
== CCXmode
|| mode
== CCX_NOOVmode
)
7243 labelno
= "%%xcc, ";
7248 labelno
= "%%icc, ";
7253 if (*labelno
&& insn
&& (note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
)))
7256 ((INTVAL (XEXP (note
, 0)) >= REG_BR_PROB_BASE
/ 2) ^ far
)
7269 strcpy (p
, labelno
);
7270 p
= strchr (p
, '\0');
7273 strcpy (p
, ".+12\n\t nop\n\tb\t");
7274 /* Skip the next insn if requested or
7275 if we know that it will be a nop. */
7276 if (annul
|| ! final_sequence
)
7290 /* Emit a library call comparison between floating point X and Y.
7291 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7292 Return the new operator to be used in the comparison sequence.
7294 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7295 values as arguments instead of the TFmode registers themselves,
7296 that's why we cannot call emit_float_lib_cmp. */
7299 sparc_emit_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
)
7302 rtx slot0
, slot1
, result
, tem
, tem2
, libfunc
;
7303 enum machine_mode mode
;
7304 enum rtx_code new_comparison
;
7309 qpfunc
= (TARGET_ARCH64
? "_Qp_feq" : "_Q_feq");
7313 qpfunc
= (TARGET_ARCH64
? "_Qp_fne" : "_Q_fne");
7317 qpfunc
= (TARGET_ARCH64
? "_Qp_fgt" : "_Q_fgt");
7321 qpfunc
= (TARGET_ARCH64
? "_Qp_fge" : "_Q_fge");
7325 qpfunc
= (TARGET_ARCH64
? "_Qp_flt" : "_Q_flt");
7329 qpfunc
= (TARGET_ARCH64
? "_Qp_fle" : "_Q_fle");
7340 qpfunc
= (TARGET_ARCH64
? "_Qp_cmp" : "_Q_cmp");
7353 slot0
= assign_stack_temp (TFmode
, GET_MODE_SIZE(TFmode
), 0);
7354 emit_move_insn (slot0
, x
);
7361 slot1
= assign_stack_temp (TFmode
, GET_MODE_SIZE(TFmode
), 0);
7362 emit_move_insn (slot1
, y
);
7365 libfunc
= gen_rtx_SYMBOL_REF (Pmode
, qpfunc
);
7366 emit_library_call (libfunc
, LCT_NORMAL
,
7368 XEXP (slot0
, 0), Pmode
,
7369 XEXP (slot1
, 0), Pmode
);
7374 libfunc
= gen_rtx_SYMBOL_REF (Pmode
, qpfunc
);
7375 emit_library_call (libfunc
, LCT_NORMAL
,
7377 x
, TFmode
, y
, TFmode
);
7382 /* Immediately move the result of the libcall into a pseudo
7383 register so reload doesn't clobber the value if it needs
7384 the return register for a spill reg. */
7385 result
= gen_reg_rtx (mode
);
7386 emit_move_insn (result
, hard_libcall_value (mode
, libfunc
));
7391 return gen_rtx_NE (VOIDmode
, result
, const0_rtx
);
7394 new_comparison
= (comparison
== UNORDERED
? EQ
: NE
);
7395 return gen_rtx_fmt_ee (new_comparison
, VOIDmode
, result
, GEN_INT(3));
7398 new_comparison
= (comparison
== UNGT
? GT
: NE
);
7399 return gen_rtx_fmt_ee (new_comparison
, VOIDmode
, result
, const1_rtx
);
7401 return gen_rtx_NE (VOIDmode
, result
, const2_rtx
);
7403 tem
= gen_reg_rtx (mode
);
7405 emit_insn (gen_andsi3 (tem
, result
, const1_rtx
));
7407 emit_insn (gen_anddi3 (tem
, result
, const1_rtx
));
7408 return gen_rtx_NE (VOIDmode
, tem
, const0_rtx
);
7411 tem
= gen_reg_rtx (mode
);
7413 emit_insn (gen_addsi3 (tem
, result
, const1_rtx
));
7415 emit_insn (gen_adddi3 (tem
, result
, const1_rtx
));
7416 tem2
= gen_reg_rtx (mode
);
7418 emit_insn (gen_andsi3 (tem2
, tem
, const2_rtx
));
7420 emit_insn (gen_anddi3 (tem2
, tem
, const2_rtx
));
7421 new_comparison
= (comparison
== UNEQ
? EQ
: NE
);
7422 return gen_rtx_fmt_ee (new_comparison
, VOIDmode
, tem2
, const0_rtx
);
7428 /* Generate an unsigned DImode to FP conversion. This is the same code
7429 optabs would emit if we didn't have TFmode patterns. */
7432 sparc_emit_floatunsdi (rtx
*operands
, enum machine_mode mode
)
7434 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
7437 in
= force_reg (DImode
, operands
[1]);
7438 neglab
= gen_label_rtx ();
7439 donelab
= gen_label_rtx ();
7440 i0
= gen_reg_rtx (DImode
);
7441 i1
= gen_reg_rtx (DImode
);
7442 f0
= gen_reg_rtx (mode
);
7444 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
7446 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
7447 emit_jump_insn (gen_jump (donelab
));
7450 emit_label (neglab
);
7452 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
7453 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
7454 emit_insn (gen_iordi3 (i0
, i0
, i1
));
7455 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
7456 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
7458 emit_label (donelab
);
7461 /* Generate an FP to unsigned DImode conversion. This is the same code
7462 optabs would emit if we didn't have TFmode patterns. */
7465 sparc_emit_fixunsdi (rtx
*operands
, enum machine_mode mode
)
7467 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
, limit
;
7470 in
= force_reg (mode
, operands
[1]);
7471 neglab
= gen_label_rtx ();
7472 donelab
= gen_label_rtx ();
7473 i0
= gen_reg_rtx (DImode
);
7474 i1
= gen_reg_rtx (DImode
);
7475 limit
= gen_reg_rtx (mode
);
7476 f0
= gen_reg_rtx (mode
);
7478 emit_move_insn (limit
,
7479 CONST_DOUBLE_FROM_REAL_VALUE (
7480 REAL_VALUE_ATOF ("9223372036854775808.0", mode
), mode
));
7481 emit_cmp_and_jump_insns (in
, limit
, GE
, NULL_RTX
, mode
, 0, neglab
);
7483 emit_insn (gen_rtx_SET (VOIDmode
,
7485 gen_rtx_FIX (DImode
, gen_rtx_FIX (mode
, in
))));
7486 emit_jump_insn (gen_jump (donelab
));
7489 emit_label (neglab
);
7491 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_MINUS (mode
, in
, limit
)));
7492 emit_insn (gen_rtx_SET (VOIDmode
,
7494 gen_rtx_FIX (DImode
, gen_rtx_FIX (mode
, f0
))));
7495 emit_insn (gen_movdi (i1
, const1_rtx
));
7496 emit_insn (gen_ashldi3 (i1
, i1
, GEN_INT (63)));
7497 emit_insn (gen_xordi3 (out
, i0
, i1
));
7499 emit_label (donelab
);
7502 /* Return the string to output a conditional branch to LABEL, testing
7503 register REG. LABEL is the operand number of the label; REG is the
7504 operand number of the reg. OP is the conditional expression. The mode
7505 of REG says what kind of comparison we made.
7507 DEST is the destination insn (i.e. the label), INSN is the source.
7509 REVERSED is nonzero if we should reverse the sense of the comparison.
7511 ANNUL is nonzero if we should generate an annulling branch. */
7514 output_v9branch (rtx op
, rtx dest
, int reg
, int label
, int reversed
,
7515 int annul
, rtx insn
)
7517 static char string
[64];
7518 enum rtx_code code
= GET_CODE (op
);
7519 enum machine_mode mode
= GET_MODE (XEXP (op
, 0));
7524 /* branch on register are limited to +-128KB. If it is too far away,
7537 brgez,a,pn %o1, .LC29
7543 ba,pt %xcc, .LC29 */
7545 far
= get_attr_length (insn
) >= 3;
7547 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7549 code
= reverse_condition (code
);
7551 /* Only 64 bit versions of these instructions exist. */
7552 gcc_assert (mode
== DImode
);
7554 /* Start by writing the branch condition. */
7559 strcpy (string
, "brnz");
7563 strcpy (string
, "brz");
7567 strcpy (string
, "brgez");
7571 strcpy (string
, "brlz");
7575 strcpy (string
, "brlez");
7579 strcpy (string
, "brgz");
7586 p
= strchr (string
, '\0');
7588 /* Now add the annulling, reg, label, and nop. */
7595 if (insn
&& (note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
)))
7598 ((INTVAL (XEXP (note
, 0)) >= REG_BR_PROB_BASE
/ 2) ^ far
)
7603 *p
= p
< string
+ 8 ? '\t' : ' ';
7611 int veryfar
= 1, delta
;
7613 if (INSN_ADDRESSES_SET_P ())
7615 delta
= (INSN_ADDRESSES (INSN_UID (dest
))
7616 - INSN_ADDRESSES (INSN_UID (insn
)));
7617 /* Leave some instructions for "slop". */
7618 if (delta
>= -260000 && delta
< 260000)
7622 strcpy (p
, ".+12\n\t nop\n\t");
7623 /* Skip the next insn if requested or
7624 if we know that it will be a nop. */
7625 if (annul
|| ! final_sequence
)
7635 strcpy (p
, "ba,pt\t%%xcc, ");
7649 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7650 Such instructions cannot be used in the delay slot of return insn on v9.
7651 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7655 epilogue_renumber (register rtx
*where
, int test
)
7657 register const char *fmt
;
7659 register enum rtx_code code
;
7664 code
= GET_CODE (*where
);
7669 if (REGNO (*where
) >= 8 && REGNO (*where
) < 24) /* oX or lX */
7671 if (! test
&& REGNO (*where
) >= 24 && REGNO (*where
) < 32)
7672 *where
= gen_rtx_REG (GET_MODE (*where
), OUTGOING_REGNO (REGNO(*where
)));
7680 /* Do not replace the frame pointer with the stack pointer because
7681 it can cause the delayed instruction to load below the stack.
7682 This occurs when instructions like:
7684 (set (reg/i:SI 24 %i0)
7685 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7686 (const_int -20 [0xffffffec])) 0))
7688 are in the return delayed slot. */
7690 if (GET_CODE (XEXP (*where
, 0)) == REG
7691 && REGNO (XEXP (*where
, 0)) == HARD_FRAME_POINTER_REGNUM
7692 && (GET_CODE (XEXP (*where
, 1)) != CONST_INT
7693 || INTVAL (XEXP (*where
, 1)) < SPARC_STACK_BIAS
))
7698 if (SPARC_STACK_BIAS
7699 && GET_CODE (XEXP (*where
, 0)) == REG
7700 && REGNO (XEXP (*where
, 0)) == HARD_FRAME_POINTER_REGNUM
)
7708 fmt
= GET_RTX_FORMAT (code
);
7710 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
7715 for (j
= XVECLEN (*where
, i
) - 1; j
>= 0; j
--)
7716 if (epilogue_renumber (&(XVECEXP (*where
, i
, j
)), test
))
7719 else if (fmt
[i
] == 'e'
7720 && epilogue_renumber (&(XEXP (*where
, i
)), test
))
7726 /* Leaf functions and non-leaf functions have different needs. */
7729 reg_leaf_alloc_order
[] = REG_LEAF_ALLOC_ORDER
;
7732 reg_nonleaf_alloc_order
[] = REG_ALLOC_ORDER
;
7734 static const int *const reg_alloc_orders
[] = {
7735 reg_leaf_alloc_order
,
7736 reg_nonleaf_alloc_order
};
7739 order_regs_for_local_alloc (void)
7741 static int last_order_nonleaf
= 1;
7743 if (df_regs_ever_live_p (15) != last_order_nonleaf
)
7745 last_order_nonleaf
= !last_order_nonleaf
;
7746 memcpy ((char *) reg_alloc_order
,
7747 (const char *) reg_alloc_orders
[last_order_nonleaf
],
7748 FIRST_PSEUDO_REGISTER
* sizeof (int));
7752 /* Return 1 if REG and MEM are legitimate enough to allow the various
7753 mem<-->reg splits to be run. */
7756 sparc_splitdi_legitimate (rtx reg
, rtx mem
)
7758 /* Punt if we are here by mistake. */
7759 gcc_assert (reload_completed
);
7761 /* We must have an offsettable memory reference. */
7762 if (! offsettable_memref_p (mem
))
7765 /* If we have legitimate args for ldd/std, we do not want
7766 the split to happen. */
7767 if ((REGNO (reg
) % 2) == 0
7768 && mem_min_alignment (mem
, 8))
7775 /* Like sparc_splitdi_legitimate but for REG <--> REG moves. */
7778 sparc_split_regreg_legitimate (rtx reg1
, rtx reg2
)
7782 if (GET_CODE (reg1
) == SUBREG
)
7783 reg1
= SUBREG_REG (reg1
);
7784 if (GET_CODE (reg1
) != REG
)
7786 regno1
= REGNO (reg1
);
7788 if (GET_CODE (reg2
) == SUBREG
)
7789 reg2
= SUBREG_REG (reg2
);
7790 if (GET_CODE (reg2
) != REG
)
7792 regno2
= REGNO (reg2
);
7794 if (SPARC_INT_REG_P (regno1
) && SPARC_INT_REG_P (regno2
))
7799 if ((SPARC_INT_REG_P (regno1
) && SPARC_FP_REG_P (regno2
))
7800 || (SPARC_FP_REG_P (regno1
) && SPARC_INT_REG_P (regno2
)))
7807 /* Return 1 if x and y are some kind of REG and they refer to
7808 different hard registers. This test is guaranteed to be
7809 run after reload. */
7812 sparc_absnegfloat_split_legitimate (rtx x
, rtx y
)
7814 if (GET_CODE (x
) != REG
)
7816 if (GET_CODE (y
) != REG
)
7818 if (REGNO (x
) == REGNO (y
))
7823 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7824 This makes them candidates for using ldd and std insns.
7826 Note reg1 and reg2 *must* be hard registers. */
7829 registers_ok_for_ldd_peep (rtx reg1
, rtx reg2
)
7831 /* We might have been passed a SUBREG. */
7832 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
7835 if (REGNO (reg1
) % 2 != 0)
7838 /* Integer ldd is deprecated in SPARC V9 */
7839 if (TARGET_V9
&& SPARC_INT_REG_P (REGNO (reg1
)))
7842 return (REGNO (reg1
) == REGNO (reg2
) - 1);
7845 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7848 This can only happen when addr1 and addr2, the addresses in mem1
7849 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7850 addr1 must also be aligned on a 64-bit boundary.
7852 Also iff dependent_reg_rtx is not null it should not be used to
7853 compute the address for mem1, i.e. we cannot optimize a sequence
7865 But, note that the transformation from:
7870 is perfectly fine. Thus, the peephole2 patterns always pass us
7871 the destination register of the first load, never the second one.
7873 For stores we don't have a similar problem, so dependent_reg_rtx is
7877 mems_ok_for_ldd_peep (rtx mem1
, rtx mem2
, rtx dependent_reg_rtx
)
7881 HOST_WIDE_INT offset1
;
7883 /* The mems cannot be volatile. */
7884 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
7887 /* MEM1 should be aligned on a 64-bit boundary. */
7888 if (MEM_ALIGN (mem1
) < 64)
7891 addr1
= XEXP (mem1
, 0);
7892 addr2
= XEXP (mem2
, 0);
7894 /* Extract a register number and offset (if used) from the first addr. */
7895 if (GET_CODE (addr1
) == PLUS
)
7897 /* If not a REG, return zero. */
7898 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
7902 reg1
= REGNO (XEXP (addr1
, 0));
7903 /* The offset must be constant! */
7904 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
7906 offset1
= INTVAL (XEXP (addr1
, 1));
7909 else if (GET_CODE (addr1
) != REG
)
7913 reg1
= REGNO (addr1
);
7914 /* This was a simple (mem (reg)) expression. Offset is 0. */
7918 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7919 if (GET_CODE (addr2
) != PLUS
)
7922 if (GET_CODE (XEXP (addr2
, 0)) != REG
7923 || GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
7926 if (reg1
!= REGNO (XEXP (addr2
, 0)))
7929 if (dependent_reg_rtx
!= NULL_RTX
&& reg1
== REGNO (dependent_reg_rtx
))
7932 /* The first offset must be evenly divisible by 8 to ensure the
7933 address is 64 bit aligned. */
7934 if (offset1
% 8 != 0)
7937 /* The offset for the second addr must be 4 more than the first addr. */
7938 if (INTVAL (XEXP (addr2
, 1)) != offset1
+ 4)
7941 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7946 /* Return 1 if reg is a pseudo, or is the first register in
7947 a hard register pair. This makes it suitable for use in
7948 ldd and std insns. */
7951 register_ok_for_ldd (rtx reg
)
7953 /* We might have been passed a SUBREG. */
7957 if (REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
7958 return (REGNO (reg
) % 2 == 0);
7963 /* Return 1 if OP is a memory whose address is known to be
7964 aligned to 8-byte boundary, or a pseudo during reload.
7965 This makes it suitable for use in ldd and std insns. */
7968 memory_ok_for_ldd (rtx op
)
7972 /* In 64-bit mode, we assume that the address is word-aligned. */
7973 if (TARGET_ARCH32
&& !mem_min_alignment (op
, 8))
7976 if (! can_create_pseudo_p ()
7977 && !strict_memory_address_p (Pmode
, XEXP (op
, 0)))
7980 else if (REG_P (op
) && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)
7982 if (!(reload_in_progress
&& reg_renumber
[REGNO (op
)] < 0))
7991 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7994 sparc_print_operand_punct_valid_p (unsigned char code
)
8007 /* Implement TARGET_PRINT_OPERAND.
8008 Print operand X (an rtx) in assembler syntax to file FILE.
8009 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8010 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8013 sparc_print_operand (FILE *file
, rtx x
, int code
)
8018 /* Output an insn in a delay slot. */
8020 sparc_indent_opcode
= 1;
8022 fputs ("\n\t nop", file
);
8025 /* Output an annul flag if there's nothing for the delay slot and we
8026 are optimizing. This is always used with '(' below.
8027 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8028 this is a dbx bug. So, we only do this when optimizing.
8029 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8030 Always emit a nop in case the next instruction is a branch. */
8031 if (! final_sequence
&& (optimize
&& (int)sparc_cpu
< PROCESSOR_V9
))
8035 /* Output a 'nop' if there's nothing for the delay slot and we are
8036 not optimizing. This is always used with '*' above. */
8037 if (! final_sequence
&& ! (optimize
&& (int)sparc_cpu
< PROCESSOR_V9
))
8038 fputs ("\n\t nop", file
);
8039 else if (final_sequence
)
8040 sparc_indent_opcode
= 1;
8043 /* Output the right displacement from the saved PC on function return.
8044 The caller may have placed an "unimp" insn immediately after the call
8045 so we have to account for it. This insn is used in the 32-bit ABI
8046 when calling a function that returns a non zero-sized structure. The
8047 64-bit ABI doesn't have it. Be careful to have this test be the same
8048 as that for the call. The exception is when sparc_std_struct_return
8049 is enabled, the psABI is followed exactly and the adjustment is made
8050 by the code in sparc_struct_value_rtx. The call emitted is the same
8051 when sparc_std_struct_return is enabled. */
8053 && cfun
->returns_struct
8054 && !sparc_std_struct_return
8055 && DECL_SIZE (DECL_RESULT (current_function_decl
))
8056 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl
)))
8058 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl
))))
8064 /* Output the Embedded Medium/Anywhere code model base register. */
8065 fputs (EMBMEDANY_BASE_REG
, file
);
8068 /* Print some local dynamic TLS name. */
8069 assemble_name (file
, get_some_local_dynamic_name ());
8073 /* Adjust the operand to take into account a RESTORE operation. */
8074 if (GET_CODE (x
) == CONST_INT
)
8076 else if (GET_CODE (x
) != REG
)
8077 output_operand_lossage ("invalid %%Y operand");
8078 else if (REGNO (x
) < 8)
8079 fputs (reg_names
[REGNO (x
)], file
);
8080 else if (REGNO (x
) >= 24 && REGNO (x
) < 32)
8081 fputs (reg_names
[REGNO (x
)-16], file
);
8083 output_operand_lossage ("invalid %%Y operand");
8086 /* Print out the low order register name of a register pair. */
8087 if (WORDS_BIG_ENDIAN
)
8088 fputs (reg_names
[REGNO (x
)+1], file
);
8090 fputs (reg_names
[REGNO (x
)], file
);
8093 /* Print out the high order register name of a register pair. */
8094 if (WORDS_BIG_ENDIAN
)
8095 fputs (reg_names
[REGNO (x
)], file
);
8097 fputs (reg_names
[REGNO (x
)+1], file
);
8100 /* Print out the second register name of a register pair or quad.
8101 I.e., R (%o0) => %o1. */
8102 fputs (reg_names
[REGNO (x
)+1], file
);
8105 /* Print out the third register name of a register quad.
8106 I.e., S (%o0) => %o2. */
8107 fputs (reg_names
[REGNO (x
)+2], file
);
8110 /* Print out the fourth register name of a register quad.
8111 I.e., T (%o0) => %o3. */
8112 fputs (reg_names
[REGNO (x
)+3], file
);
8115 /* Print a condition code register. */
8116 if (REGNO (x
) == SPARC_ICC_REG
)
8118 /* We don't handle CC[X]_NOOVmode because they're not supposed
8120 if (GET_MODE (x
) == CCmode
)
8121 fputs ("%icc", file
);
8122 else if (GET_MODE (x
) == CCXmode
)
8123 fputs ("%xcc", file
);
8128 /* %fccN register */
8129 fputs (reg_names
[REGNO (x
)], file
);
8132 /* Print the operand's address only. */
8133 output_address (XEXP (x
, 0));
8136 /* In this case we need a register. Use %g0 if the
8137 operand is const0_rtx. */
8139 || (GET_MODE (x
) != VOIDmode
&& x
== CONST0_RTX (GET_MODE (x
))))
8141 fputs ("%g0", file
);
8148 switch (GET_CODE (x
))
8150 case IOR
: fputs ("or", file
); break;
8151 case AND
: fputs ("and", file
); break;
8152 case XOR
: fputs ("xor", file
); break;
8153 default: output_operand_lossage ("invalid %%A operand");
8158 switch (GET_CODE (x
))
8160 case IOR
: fputs ("orn", file
); break;
8161 case AND
: fputs ("andn", file
); break;
8162 case XOR
: fputs ("xnor", file
); break;
8163 default: output_operand_lossage ("invalid %%B operand");
8167 /* These are used by the conditional move instructions. */
8171 enum rtx_code rc
= GET_CODE (x
);
8175 enum machine_mode mode
= GET_MODE (XEXP (x
, 0));
8176 if (mode
== CCFPmode
|| mode
== CCFPEmode
)
8177 rc
= reverse_condition_maybe_unordered (GET_CODE (x
));
8179 rc
= reverse_condition (GET_CODE (x
));
8183 case NE
: fputs ("ne", file
); break;
8184 case EQ
: fputs ("e", file
); break;
8185 case GE
: fputs ("ge", file
); break;
8186 case GT
: fputs ("g", file
); break;
8187 case LE
: fputs ("le", file
); break;
8188 case LT
: fputs ("l", file
); break;
8189 case GEU
: fputs ("geu", file
); break;
8190 case GTU
: fputs ("gu", file
); break;
8191 case LEU
: fputs ("leu", file
); break;
8192 case LTU
: fputs ("lu", file
); break;
8193 case LTGT
: fputs ("lg", file
); break;
8194 case UNORDERED
: fputs ("u", file
); break;
8195 case ORDERED
: fputs ("o", file
); break;
8196 case UNLT
: fputs ("ul", file
); break;
8197 case UNLE
: fputs ("ule", file
); break;
8198 case UNGT
: fputs ("ug", file
); break;
8199 case UNGE
: fputs ("uge", file
); break;
8200 case UNEQ
: fputs ("ue", file
); break;
8201 default: output_operand_lossage (code
== 'c'
8202 ? "invalid %%c operand"
8203 : "invalid %%C operand");
8208 /* These are used by the movr instruction pattern. */
8212 enum rtx_code rc
= (code
== 'd'
8213 ? reverse_condition (GET_CODE (x
))
8217 case NE
: fputs ("ne", file
); break;
8218 case EQ
: fputs ("e", file
); break;
8219 case GE
: fputs ("gez", file
); break;
8220 case LT
: fputs ("lz", file
); break;
8221 case LE
: fputs ("lez", file
); break;
8222 case GT
: fputs ("gz", file
); break;
8223 default: output_operand_lossage (code
== 'd'
8224 ? "invalid %%d operand"
8225 : "invalid %%D operand");
8232 /* Print a sign-extended character. */
8233 int i
= trunc_int_for_mode (INTVAL (x
), QImode
);
8234 fprintf (file
, "%d", i
);
8239 /* Operand must be a MEM; write its address. */
8240 if (GET_CODE (x
) != MEM
)
8241 output_operand_lossage ("invalid %%f operand");
8242 output_address (XEXP (x
, 0));
8247 /* Print a sign-extended 32-bit value. */
8249 if (GET_CODE(x
) == CONST_INT
)
8251 else if (GET_CODE(x
) == CONST_DOUBLE
)
8252 i
= CONST_DOUBLE_LOW (x
);
8255 output_operand_lossage ("invalid %%s operand");
8258 i
= trunc_int_for_mode (i
, SImode
);
8259 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, i
);
8264 /* Do nothing special. */
8268 /* Undocumented flag. */
8269 output_operand_lossage ("invalid operand output code");
8272 if (GET_CODE (x
) == REG
)
8273 fputs (reg_names
[REGNO (x
)], file
);
8274 else if (GET_CODE (x
) == MEM
)
8277 /* Poor Sun assembler doesn't understand absolute addressing. */
8278 if (CONSTANT_P (XEXP (x
, 0)))
8279 fputs ("%g0+", file
);
8280 output_address (XEXP (x
, 0));
8283 else if (GET_CODE (x
) == HIGH
)
8285 fputs ("%hi(", file
);
8286 output_addr_const (file
, XEXP (x
, 0));
8289 else if (GET_CODE (x
) == LO_SUM
)
8291 sparc_print_operand (file
, XEXP (x
, 0), 0);
8292 if (TARGET_CM_MEDMID
)
8293 fputs ("+%l44(", file
);
8295 fputs ("+%lo(", file
);
8296 output_addr_const (file
, XEXP (x
, 1));
8299 else if (GET_CODE (x
) == CONST_DOUBLE
8300 && (GET_MODE (x
) == VOIDmode
8301 || GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
))
8303 if (CONST_DOUBLE_HIGH (x
) == 0)
8304 fprintf (file
, "%u", (unsigned int) CONST_DOUBLE_LOW (x
));
8305 else if (CONST_DOUBLE_HIGH (x
) == -1
8306 && CONST_DOUBLE_LOW (x
) < 0)
8307 fprintf (file
, "%d", (int) CONST_DOUBLE_LOW (x
));
8309 output_operand_lossage ("long long constant not a valid immediate operand");
8311 else if (GET_CODE (x
) == CONST_DOUBLE
)
8312 output_operand_lossage ("floating point constant not a valid immediate operand");
8313 else { output_addr_const (file
, x
); }
8316 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8319 sparc_print_operand_address (FILE *file
, rtx x
)
8321 register rtx base
, index
= 0;
8323 register rtx addr
= x
;
8326 fputs (reg_names
[REGNO (addr
)], file
);
8327 else if (GET_CODE (addr
) == PLUS
)
8329 if (CONST_INT_P (XEXP (addr
, 0)))
8330 offset
= INTVAL (XEXP (addr
, 0)), base
= XEXP (addr
, 1);
8331 else if (CONST_INT_P (XEXP (addr
, 1)))
8332 offset
= INTVAL (XEXP (addr
, 1)), base
= XEXP (addr
, 0);
8334 base
= XEXP (addr
, 0), index
= XEXP (addr
, 1);
8335 if (GET_CODE (base
) == LO_SUM
)
8337 gcc_assert (USE_AS_OFFSETABLE_LO10
8339 && ! TARGET_CM_MEDMID
);
8340 output_operand (XEXP (base
, 0), 0);
8341 fputs ("+%lo(", file
);
8342 output_address (XEXP (base
, 1));
8343 fprintf (file
, ")+%d", offset
);
8347 fputs (reg_names
[REGNO (base
)], file
);
8349 fprintf (file
, "%+d", offset
);
8350 else if (REG_P (index
))
8351 fprintf (file
, "+%s", reg_names
[REGNO (index
)]);
8352 else if (GET_CODE (index
) == SYMBOL_REF
8353 || GET_CODE (index
) == LABEL_REF
8354 || GET_CODE (index
) == CONST
)
8355 fputc ('+', file
), output_addr_const (file
, index
);
8356 else gcc_unreachable ();
8359 else if (GET_CODE (addr
) == MINUS
8360 && GET_CODE (XEXP (addr
, 1)) == LABEL_REF
)
8362 output_addr_const (file
, XEXP (addr
, 0));
8364 output_addr_const (file
, XEXP (addr
, 1));
8365 fputs ("-.)", file
);
8367 else if (GET_CODE (addr
) == LO_SUM
)
8369 output_operand (XEXP (addr
, 0), 0);
8370 if (TARGET_CM_MEDMID
)
8371 fputs ("+%l44(", file
);
8373 fputs ("+%lo(", file
);
8374 output_address (XEXP (addr
, 1));
8378 && GET_CODE (addr
) == CONST
8379 && GET_CODE (XEXP (addr
, 0)) == MINUS
8380 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST
8381 && GET_CODE (XEXP (XEXP (XEXP (addr
, 0), 1), 0)) == MINUS
8382 && XEXP (XEXP (XEXP (XEXP (addr
, 0), 1), 0), 1) == pc_rtx
)
8384 addr
= XEXP (addr
, 0);
8385 output_addr_const (file
, XEXP (addr
, 0));
8386 /* Group the args of the second CONST in parenthesis. */
8388 /* Skip past the second CONST--it does nothing for us. */
8389 output_addr_const (file
, XEXP (XEXP (addr
, 1), 0));
8390 /* Close the parenthesis. */
8395 output_addr_const (file
, addr
);
8399 /* Target hook for assembling integer objects. The sparc version has
8400 special handling for aligned DI-mode objects. */
8403 sparc_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
8405 /* ??? We only output .xword's for symbols and only then in environments
8406 where the assembler can handle them. */
8407 if (aligned_p
&& size
== 8
8408 && (GET_CODE (x
) != CONST_INT
&& GET_CODE (x
) != CONST_DOUBLE
))
8412 assemble_integer_with_op ("\t.xword\t", x
);
8417 assemble_aligned_integer (4, const0_rtx
);
8418 assemble_aligned_integer (4, x
);
8422 return default_assemble_integer (x
, size
, aligned_p
);
8425 /* Return the value of a code used in the .proc pseudo-op that says
8426 what kind of result this function returns. For non-C types, we pick
8427 the closest C type. */
8429 #ifndef SHORT_TYPE_SIZE
8430 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8433 #ifndef INT_TYPE_SIZE
8434 #define INT_TYPE_SIZE BITS_PER_WORD
8437 #ifndef LONG_TYPE_SIZE
8438 #define LONG_TYPE_SIZE BITS_PER_WORD
8441 #ifndef LONG_LONG_TYPE_SIZE
8442 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8445 #ifndef FLOAT_TYPE_SIZE
8446 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8449 #ifndef DOUBLE_TYPE_SIZE
8450 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8453 #ifndef LONG_DOUBLE_TYPE_SIZE
8454 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8458 sparc_type_code (register tree type
)
8460 register unsigned long qualifiers
= 0;
8461 register unsigned shift
;
8463 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8464 setting more, since some assemblers will give an error for this. Also,
8465 we must be careful to avoid shifts of 32 bits or more to avoid getting
8466 unpredictable results. */
8468 for (shift
= 6; shift
< 30; shift
+= 2, type
= TREE_TYPE (type
))
8470 switch (TREE_CODE (type
))
8476 qualifiers
|= (3 << shift
);
8481 qualifiers
|= (2 << shift
);
8485 case REFERENCE_TYPE
:
8487 qualifiers
|= (1 << shift
);
8491 return (qualifiers
| 8);
8494 case QUAL_UNION_TYPE
:
8495 return (qualifiers
| 9);
8498 return (qualifiers
| 10);
8501 return (qualifiers
| 16);
8504 /* If this is a range type, consider it to be the underlying
8506 if (TREE_TYPE (type
) != 0)
8509 /* Carefully distinguish all the standard types of C,
8510 without messing up if the language is not C. We do this by
8511 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8512 look at both the names and the above fields, but that's redundant.
8513 Any type whose size is between two C types will be considered
8514 to be the wider of the two types. Also, we do not have a
8515 special code to use for "long long", so anything wider than
8516 long is treated the same. Note that we can't distinguish
8517 between "int" and "long" in this code if they are the same
8518 size, but that's fine, since neither can the assembler. */
8520 if (TYPE_PRECISION (type
) <= CHAR_TYPE_SIZE
)
8521 return (qualifiers
| (TYPE_UNSIGNED (type
) ? 12 : 2));
8523 else if (TYPE_PRECISION (type
) <= SHORT_TYPE_SIZE
)
8524 return (qualifiers
| (TYPE_UNSIGNED (type
) ? 13 : 3));
8526 else if (TYPE_PRECISION (type
) <= INT_TYPE_SIZE
)
8527 return (qualifiers
| (TYPE_UNSIGNED (type
) ? 14 : 4));
8530 return (qualifiers
| (TYPE_UNSIGNED (type
) ? 15 : 5));
8533 /* If this is a range type, consider it to be the underlying
8535 if (TREE_TYPE (type
) != 0)
8538 /* Carefully distinguish all the standard types of C,
8539 without messing up if the language is not C. */
8541 if (TYPE_PRECISION (type
) == FLOAT_TYPE_SIZE
)
8542 return (qualifiers
| 6);
8545 return (qualifiers
| 7);
8547 case COMPLEX_TYPE
: /* GNU Fortran COMPLEX type. */
8548 /* ??? We need to distinguish between double and float complex types,
8549 but I don't know how yet because I can't reach this code from
8550 existing front-ends. */
8551 return (qualifiers
| 7); /* Who knows? */
8554 case BOOLEAN_TYPE
: /* Boolean truth value type. */
8560 gcc_unreachable (); /* Not a type! */
8567 /* Nested function support. */
8569 /* Emit RTL insns to initialize the variable parts of a trampoline.
8570 FNADDR is an RTX for the address of the function's pure code.
8571 CXT is an RTX for the static chain value for the function.
8573 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8574 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8575 (to store insns). This is a bit excessive. Perhaps a different
8576 mechanism would be better here.
8578 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8581 sparc32_initialize_trampoline (rtx m_tramp
, rtx fnaddr
, rtx cxt
)
8583 /* SPARC 32-bit trampoline:
8586 sethi %hi(static), %g2
8588 or %g2, %lo(static), %g2
8590 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8591 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8595 (adjust_address (m_tramp
, SImode
, 0),
8596 expand_binop (SImode
, ior_optab
,
8597 expand_shift (RSHIFT_EXPR
, SImode
, fnaddr
, 10, 0, 1),
8598 GEN_INT (trunc_int_for_mode (0x03000000, SImode
)),
8599 NULL_RTX
, 1, OPTAB_DIRECT
));
8602 (adjust_address (m_tramp
, SImode
, 4),
8603 expand_binop (SImode
, ior_optab
,
8604 expand_shift (RSHIFT_EXPR
, SImode
, cxt
, 10, 0, 1),
8605 GEN_INT (trunc_int_for_mode (0x05000000, SImode
)),
8606 NULL_RTX
, 1, OPTAB_DIRECT
));
8609 (adjust_address (m_tramp
, SImode
, 8),
8610 expand_binop (SImode
, ior_optab
,
8611 expand_and (SImode
, fnaddr
, GEN_INT (0x3ff), NULL_RTX
),
8612 GEN_INT (trunc_int_for_mode (0x81c06000, SImode
)),
8613 NULL_RTX
, 1, OPTAB_DIRECT
));
8616 (adjust_address (m_tramp
, SImode
, 12),
8617 expand_binop (SImode
, ior_optab
,
8618 expand_and (SImode
, cxt
, GEN_INT (0x3ff), NULL_RTX
),
8619 GEN_INT (trunc_int_for_mode (0x8410a000, SImode
)),
8620 NULL_RTX
, 1, OPTAB_DIRECT
));
8622 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8623 aligned on a 16 byte boundary so one flush clears it all. */
8624 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp
, SImode
, 0))));
8625 if (sparc_cpu
!= PROCESSOR_ULTRASPARC
8626 && sparc_cpu
!= PROCESSOR_ULTRASPARC3
8627 && sparc_cpu
!= PROCESSOR_NIAGARA
8628 && sparc_cpu
!= PROCESSOR_NIAGARA2
8629 && sparc_cpu
!= PROCESSOR_NIAGARA3
8630 && sparc_cpu
!= PROCESSOR_NIAGARA4
)
8631 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp
, SImode
, 8))));
8633 /* Call __enable_execute_stack after writing onto the stack to make sure
8634 the stack address is accessible. */
8635 #ifdef HAVE_ENABLE_EXECUTE_STACK
8636 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
8637 LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
8642 /* The 64-bit version is simpler because it makes more sense to load the
8643 values as "immediate" data out of the trampoline. It's also easier since
8644 we can read the PC without clobbering a register. */
8647 sparc64_initialize_trampoline (rtx m_tramp
, rtx fnaddr
, rtx cxt
)
8649 /* SPARC 64-bit trampoline:
8658 emit_move_insn (adjust_address (m_tramp
, SImode
, 0),
8659 GEN_INT (trunc_int_for_mode (0x83414000, SImode
)));
8660 emit_move_insn (adjust_address (m_tramp
, SImode
, 4),
8661 GEN_INT (trunc_int_for_mode (0xca586018, SImode
)));
8662 emit_move_insn (adjust_address (m_tramp
, SImode
, 8),
8663 GEN_INT (trunc_int_for_mode (0x81c14000, SImode
)));
8664 emit_move_insn (adjust_address (m_tramp
, SImode
, 12),
8665 GEN_INT (trunc_int_for_mode (0xca586010, SImode
)));
8666 emit_move_insn (adjust_address (m_tramp
, DImode
, 16), cxt
);
8667 emit_move_insn (adjust_address (m_tramp
, DImode
, 24), fnaddr
);
8668 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp
, DImode
, 0))));
8670 if (sparc_cpu
!= PROCESSOR_ULTRASPARC
8671 && sparc_cpu
!= PROCESSOR_ULTRASPARC3
8672 && sparc_cpu
!= PROCESSOR_NIAGARA
8673 && sparc_cpu
!= PROCESSOR_NIAGARA2
8674 && sparc_cpu
!= PROCESSOR_NIAGARA3
8675 && sparc_cpu
!= PROCESSOR_NIAGARA4
)
8676 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp
, DImode
, 8))));
8678 /* Call __enable_execute_stack after writing onto the stack to make sure
8679 the stack address is accessible. */
8680 #ifdef HAVE_ENABLE_EXECUTE_STACK
8681 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
8682 LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
8686 /* Worker for TARGET_TRAMPOLINE_INIT. */
8689 sparc_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
8691 rtx fnaddr
= force_reg (Pmode
, XEXP (DECL_RTL (fndecl
), 0));
8692 cxt
= force_reg (Pmode
, cxt
);
8694 sparc64_initialize_trampoline (m_tramp
, fnaddr
, cxt
);
8696 sparc32_initialize_trampoline (m_tramp
, fnaddr
, cxt
);
8699 /* Adjust the cost of a scheduling dependency. Return the new cost of
8700 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8703 supersparc_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
8705 enum attr_type insn_type
;
8707 if (! recog_memoized (insn
))
8710 insn_type
= get_attr_type (insn
);
8712 if (REG_NOTE_KIND (link
) == 0)
8714 /* Data dependency; DEP_INSN writes a register that INSN reads some
8717 /* if a load, then the dependence must be on the memory address;
8718 add an extra "cycle". Note that the cost could be two cycles
8719 if the reg was written late in an instruction group; we ca not tell
8721 if (insn_type
== TYPE_LOAD
|| insn_type
== TYPE_FPLOAD
)
8724 /* Get the delay only if the address of the store is the dependence. */
8725 if (insn_type
== TYPE_STORE
|| insn_type
== TYPE_FPSTORE
)
8727 rtx pat
= PATTERN(insn
);
8728 rtx dep_pat
= PATTERN (dep_insn
);
8730 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
8731 return cost
; /* This should not happen! */
8733 /* The dependency between the two instructions was on the data that
8734 is being stored. Assume that this implies that the address of the
8735 store is not dependent. */
8736 if (rtx_equal_p (SET_DEST (dep_pat
), SET_SRC (pat
)))
8739 return cost
+ 3; /* An approximation. */
8742 /* A shift instruction cannot receive its data from an instruction
8743 in the same cycle; add a one cycle penalty. */
8744 if (insn_type
== TYPE_SHIFT
)
8745 return cost
+ 3; /* Split before cascade into shift. */
8749 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8750 INSN writes some cycles later. */
8752 /* These are only significant for the fpu unit; writing a fp reg before
8753 the fpu has finished with it stalls the processor. */
8755 /* Reusing an integer register causes no problems. */
8756 if (insn_type
== TYPE_IALU
|| insn_type
== TYPE_SHIFT
)
8764 hypersparc_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
8766 enum attr_type insn_type
, dep_type
;
8767 rtx pat
= PATTERN(insn
);
8768 rtx dep_pat
= PATTERN (dep_insn
);
8770 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
8773 insn_type
= get_attr_type (insn
);
8774 dep_type
= get_attr_type (dep_insn
);
8776 switch (REG_NOTE_KIND (link
))
8779 /* Data dependency; DEP_INSN writes a register that INSN reads some
8786 /* Get the delay iff the address of the store is the dependence. */
8787 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
8790 if (rtx_equal_p (SET_DEST (dep_pat
), SET_SRC (pat
)))
8797 /* If a load, then the dependence must be on the memory address. If
8798 the addresses aren't equal, then it might be a false dependency */
8799 if (dep_type
== TYPE_STORE
|| dep_type
== TYPE_FPSTORE
)
8801 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
8802 || GET_CODE (SET_DEST (dep_pat
)) != MEM
8803 || GET_CODE (SET_SRC (pat
)) != MEM
8804 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat
), 0),
8805 XEXP (SET_SRC (pat
), 0)))
8813 /* Compare to branch latency is 0. There is no benefit from
8814 separating compare and branch. */
8815 if (dep_type
== TYPE_COMPARE
)
8817 /* Floating point compare to branch latency is less than
8818 compare to conditional move. */
8819 if (dep_type
== TYPE_FPCMP
)
8828 /* Anti-dependencies only penalize the fpu unit. */
8829 if (insn_type
== TYPE_IALU
|| insn_type
== TYPE_SHIFT
)
8841 sparc_adjust_cost(rtx insn
, rtx link
, rtx dep
, int cost
)
8845 case PROCESSOR_SUPERSPARC
:
8846 cost
= supersparc_adjust_cost (insn
, link
, dep
, cost
);
8848 case PROCESSOR_HYPERSPARC
:
8849 case PROCESSOR_SPARCLITE86X
:
8850 cost
= hypersparc_adjust_cost (insn
, link
, dep
, cost
);
8859 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
8860 int sched_verbose ATTRIBUTE_UNUSED
,
8861 int max_ready ATTRIBUTE_UNUSED
)
8865 sparc_use_sched_lookahead (void)
8867 if (sparc_cpu
== PROCESSOR_NIAGARA
8868 || sparc_cpu
== PROCESSOR_NIAGARA2
8869 || sparc_cpu
== PROCESSOR_NIAGARA3
8870 || sparc_cpu
== PROCESSOR_NIAGARA4
)
8872 if (sparc_cpu
== PROCESSOR_ULTRASPARC
8873 || sparc_cpu
== PROCESSOR_ULTRASPARC3
)
8875 if ((1 << sparc_cpu
) &
8876 ((1 << PROCESSOR_SUPERSPARC
) | (1 << PROCESSOR_HYPERSPARC
) |
8877 (1 << PROCESSOR_SPARCLITE86X
)))
8883 sparc_issue_rate (void)
8887 case PROCESSOR_NIAGARA
:
8888 case PROCESSOR_NIAGARA2
:
8889 case PROCESSOR_NIAGARA3
:
8890 case PROCESSOR_NIAGARA4
:
8894 /* Assume V9 processors are capable of at least dual-issue. */
8896 case PROCESSOR_SUPERSPARC
:
8898 case PROCESSOR_HYPERSPARC
:
8899 case PROCESSOR_SPARCLITE86X
:
8901 case PROCESSOR_ULTRASPARC
:
8902 case PROCESSOR_ULTRASPARC3
:
8908 set_extends (rtx insn
)
8910 register rtx pat
= PATTERN (insn
);
8912 switch (GET_CODE (SET_SRC (pat
)))
8914 /* Load and some shift instructions zero extend. */
8917 /* sethi clears the high bits */
8919 /* LO_SUM is used with sethi. sethi cleared the high
8920 bits and the values used with lo_sum are positive */
8922 /* Store flag stores 0 or 1 */
8932 rtx op0
= XEXP (SET_SRC (pat
), 0);
8933 rtx op1
= XEXP (SET_SRC (pat
), 1);
8934 if (GET_CODE (op1
) == CONST_INT
)
8935 return INTVAL (op1
) >= 0;
8936 if (GET_CODE (op0
) != REG
)
8938 if (sparc_check_64 (op0
, insn
) == 1)
8940 return (GET_CODE (op1
) == REG
&& sparc_check_64 (op1
, insn
) == 1);
8945 rtx op0
= XEXP (SET_SRC (pat
), 0);
8946 rtx op1
= XEXP (SET_SRC (pat
), 1);
8947 if (GET_CODE (op0
) != REG
|| sparc_check_64 (op0
, insn
) <= 0)
8949 if (GET_CODE (op1
) == CONST_INT
)
8950 return INTVAL (op1
) >= 0;
8951 return (GET_CODE (op1
) == REG
&& sparc_check_64 (op1
, insn
) == 1);
8954 return GET_MODE (SET_SRC (pat
)) == SImode
;
8955 /* Positive integers leave the high bits zero. */
8957 return ! (CONST_DOUBLE_LOW (SET_SRC (pat
)) & 0x80000000);
8959 return ! (INTVAL (SET_SRC (pat
)) & 0x80000000);
8962 return - (GET_MODE (SET_SRC (pat
)) == SImode
);
8964 return sparc_check_64 (SET_SRC (pat
), insn
);
8970 /* We _ought_ to have only one kind per function, but... */
8971 static GTY(()) rtx sparc_addr_diff_list
;
8972 static GTY(()) rtx sparc_addr_list
;
8975 sparc_defer_case_vector (rtx lab
, rtx vec
, int diff
)
8977 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
8979 sparc_addr_diff_list
8980 = gen_rtx_EXPR_LIST (VOIDmode
, vec
, sparc_addr_diff_list
);
8982 sparc_addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
, sparc_addr_list
);
8986 sparc_output_addr_vec (rtx vec
)
8988 rtx lab
= XEXP (vec
, 0), body
= XEXP (vec
, 1);
8989 int idx
, vlen
= XVECLEN (body
, 0);
8991 #ifdef ASM_OUTPUT_ADDR_VEC_START
8992 ASM_OUTPUT_ADDR_VEC_START (asm_out_file
);
8995 #ifdef ASM_OUTPUT_CASE_LABEL
8996 ASM_OUTPUT_CASE_LABEL (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
),
8999 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
9002 for (idx
= 0; idx
< vlen
; idx
++)
9004 ASM_OUTPUT_ADDR_VEC_ELT
9005 (asm_out_file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
9008 #ifdef ASM_OUTPUT_ADDR_VEC_END
9009 ASM_OUTPUT_ADDR_VEC_END (asm_out_file
);
9014 sparc_output_addr_diff_vec (rtx vec
)
9016 rtx lab
= XEXP (vec
, 0), body
= XEXP (vec
, 1);
9017 rtx base
= XEXP (XEXP (body
, 0), 0);
9018 int idx
, vlen
= XVECLEN (body
, 1);
9020 #ifdef ASM_OUTPUT_ADDR_VEC_START
9021 ASM_OUTPUT_ADDR_VEC_START (asm_out_file
);
9024 #ifdef ASM_OUTPUT_CASE_LABEL
9025 ASM_OUTPUT_CASE_LABEL (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
),
9028 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
9031 for (idx
= 0; idx
< vlen
; idx
++)
9033 ASM_OUTPUT_ADDR_DIFF_ELT
9036 CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 1, idx
), 0)),
9037 CODE_LABEL_NUMBER (base
));
9040 #ifdef ASM_OUTPUT_ADDR_VEC_END
9041 ASM_OUTPUT_ADDR_VEC_END (asm_out_file
);
9046 sparc_output_deferred_case_vectors (void)
9051 if (sparc_addr_list
== NULL_RTX
9052 && sparc_addr_diff_list
== NULL_RTX
)
9055 /* Align to cache line in the function's code section. */
9056 switch_to_section (current_function_section ());
9058 align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
9060 ASM_OUTPUT_ALIGN (asm_out_file
, align
);
9062 for (t
= sparc_addr_list
; t
; t
= XEXP (t
, 1))
9063 sparc_output_addr_vec (XEXP (t
, 0));
9064 for (t
= sparc_addr_diff_list
; t
; t
= XEXP (t
, 1))
9065 sparc_output_addr_diff_vec (XEXP (t
, 0));
9067 sparc_addr_list
= sparc_addr_diff_list
= NULL_RTX
;
9070 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
9071 unknown. Return 1 if the high bits are zero, -1 if the register is
9074 sparc_check_64 (rtx x
, rtx insn
)
9076 /* If a register is set only once it is safe to ignore insns this
9077 code does not know how to handle. The loop will either recognize
9078 the single set and return the correct value or fail to recognize
9083 gcc_assert (GET_CODE (x
) == REG
);
9085 if (GET_MODE (x
) == DImode
)
9086 y
= gen_rtx_REG (SImode
, REGNO (x
) + WORDS_BIG_ENDIAN
);
9088 if (flag_expensive_optimizations
9089 && df
&& DF_REG_DEF_COUNT (REGNO (y
)) == 1)
9095 insn
= get_last_insn_anywhere ();
9100 while ((insn
= PREV_INSN (insn
)))
9102 switch (GET_CODE (insn
))
9115 rtx pat
= PATTERN (insn
);
9116 if (GET_CODE (pat
) != SET
)
9118 if (rtx_equal_p (x
, SET_DEST (pat
)))
9119 return set_extends (insn
);
9120 if (y
&& rtx_equal_p (y
, SET_DEST (pat
)))
9121 return set_extends (insn
);
9122 if (reg_overlap_mentioned_p (SET_DEST (pat
), y
))
9130 /* Returns assembly code to perform a DImode shift using
9131 a 64-bit global or out register on SPARC-V8+. */
9133 output_v8plus_shift (rtx
*operands
, rtx insn
, const char *opcode
)
9135 static char asm_code
[60];
9137 /* The scratch register is only required when the destination
9138 register is not a 64-bit global or out register. */
9139 if (which_alternative
!= 2)
9140 operands
[3] = operands
[0];
9142 /* We can only shift by constants <= 63. */
9143 if (GET_CODE (operands
[2]) == CONST_INT
)
9144 operands
[2] = GEN_INT (INTVAL (operands
[2]) & 0x3f);
9146 if (GET_CODE (operands
[1]) == CONST_INT
)
9148 output_asm_insn ("mov\t%1, %3", operands
);
9152 output_asm_insn ("sllx\t%H1, 32, %3", operands
);
9153 if (sparc_check_64 (operands
[1], insn
) <= 0)
9154 output_asm_insn ("srl\t%L1, 0, %L1", operands
);
9155 output_asm_insn ("or\t%L1, %3, %3", operands
);
9158 strcpy(asm_code
, opcode
);
9160 if (which_alternative
!= 2)
9161 return strcat (asm_code
, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
9163 return strcat (asm_code
, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
9166 /* Output rtl to increment the profiler label LABELNO
9167 for profiling a function entry. */
9170 sparc_profile_hook (int labelno
)
9175 fun
= gen_rtx_SYMBOL_REF (Pmode
, MCOUNT_FUNCTION
);
9176 if (NO_PROFILE_COUNTERS
)
9178 emit_library_call (fun
, LCT_NORMAL
, VOIDmode
, 0);
9182 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
9183 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
9184 emit_library_call (fun
, LCT_NORMAL
, VOIDmode
, 1, lab
, Pmode
);
9188 #ifdef TARGET_SOLARIS
9189 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9192 sparc_solaris_elf_asm_named_section (const char *name
, unsigned int flags
,
9193 tree decl ATTRIBUTE_UNUSED
)
9195 if (HAVE_COMDAT_GROUP
&& flags
& SECTION_LINKONCE
)
9197 solaris_elf_asm_comdat_section (name
, flags
, decl
);
9201 fprintf (asm_out_file
, "\t.section\t\"%s\"", name
);
9203 if (!(flags
& SECTION_DEBUG
))
9204 fputs (",#alloc", asm_out_file
);
9205 if (flags
& SECTION_WRITE
)
9206 fputs (",#write", asm_out_file
);
9207 if (flags
& SECTION_TLS
)
9208 fputs (",#tls", asm_out_file
);
9209 if (flags
& SECTION_CODE
)
9210 fputs (",#execinstr", asm_out_file
);
9212 /* ??? Handle SECTION_BSS. */
9214 fputc ('\n', asm_out_file
);
9216 #endif /* TARGET_SOLARIS */
9218 /* We do not allow indirect calls to be optimized into sibling calls.
9220 We cannot use sibling calls when delayed branches are disabled
9221 because they will likely require the call delay slot to be filled.
9223 Also, on SPARC 32-bit we cannot emit a sibling call when the
9224 current function returns a structure. This is because the "unimp
9225 after call" convention would cause the callee to return to the
9226 wrong place. The generic code already disallows cases where the
9227 function being called returns a structure.
9229 It may seem strange how this last case could occur. Usually there
9230 is code after the call which jumps to epilogue code which dumps the
9231 return value into the struct return area. That ought to invalidate
9232 the sibling call right? Well, in the C++ case we can end up passing
9233 the pointer to the struct return area to a constructor (which returns
9234 void) and then nothing else happens. Such a sibling call would look
9235 valid without the added check here.
9237 VxWorks PIC PLT entries require the global pointer to be initialized
9238 on entry. We therefore can't emit sibling calls to them. */
9240 sparc_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
9243 && flag_delayed_branch
9244 && (TARGET_ARCH64
|| ! cfun
->returns_struct
)
9245 && !(TARGET_VXWORKS_RTP
9247 && !targetm
.binds_local_p (decl
)));
9250 /* libfunc renaming. */
9253 sparc_init_libfuncs (void)
9257 /* Use the subroutines that Sun's library provides for integer
9258 multiply and divide. The `*' prevents an underscore from
9259 being prepended by the compiler. .umul is a little faster
9261 set_optab_libfunc (smul_optab
, SImode
, "*.umul");
9262 set_optab_libfunc (sdiv_optab
, SImode
, "*.div");
9263 set_optab_libfunc (udiv_optab
, SImode
, "*.udiv");
9264 set_optab_libfunc (smod_optab
, SImode
, "*.rem");
9265 set_optab_libfunc (umod_optab
, SImode
, "*.urem");
9267 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9268 set_optab_libfunc (add_optab
, TFmode
, "_Q_add");
9269 set_optab_libfunc (sub_optab
, TFmode
, "_Q_sub");
9270 set_optab_libfunc (neg_optab
, TFmode
, "_Q_neg");
9271 set_optab_libfunc (smul_optab
, TFmode
, "_Q_mul");
9272 set_optab_libfunc (sdiv_optab
, TFmode
, "_Q_div");
9274 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9275 is because with soft-float, the SFmode and DFmode sqrt
9276 instructions will be absent, and the compiler will notice and
9277 try to use the TFmode sqrt instruction for calls to the
9278 builtin function sqrt, but this fails. */
9280 set_optab_libfunc (sqrt_optab
, TFmode
, "_Q_sqrt");
9282 set_optab_libfunc (eq_optab
, TFmode
, "_Q_feq");
9283 set_optab_libfunc (ne_optab
, TFmode
, "_Q_fne");
9284 set_optab_libfunc (gt_optab
, TFmode
, "_Q_fgt");
9285 set_optab_libfunc (ge_optab
, TFmode
, "_Q_fge");
9286 set_optab_libfunc (lt_optab
, TFmode
, "_Q_flt");
9287 set_optab_libfunc (le_optab
, TFmode
, "_Q_fle");
9289 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_Q_stoq");
9290 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_Q_dtoq");
9291 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_Q_qtos");
9292 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_Q_qtod");
9294 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_Q_qtoi");
9295 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_Q_qtou");
9296 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_Q_itoq");
9297 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_Q_utoq");
9299 if (DITF_CONVERSION_LIBFUNCS
)
9301 set_conv_libfunc (sfix_optab
, DImode
, TFmode
, "_Q_qtoll");
9302 set_conv_libfunc (ufix_optab
, DImode
, TFmode
, "_Q_qtoull");
9303 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
, "_Q_lltoq");
9304 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
, "_Q_ulltoq");
9307 if (SUN_CONVERSION_LIBFUNCS
)
9309 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, "__ftoll");
9310 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, "__ftoull");
9311 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, "__dtoll");
9312 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, "__dtoull");
9317 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9318 do not exist in the library. Make sure the compiler does not
9319 emit calls to them by accident. (It should always use the
9320 hardware instructions.) */
9321 set_optab_libfunc (smul_optab
, SImode
, 0);
9322 set_optab_libfunc (sdiv_optab
, SImode
, 0);
9323 set_optab_libfunc (udiv_optab
, SImode
, 0);
9324 set_optab_libfunc (smod_optab
, SImode
, 0);
9325 set_optab_libfunc (umod_optab
, SImode
, 0);
9327 if (SUN_INTEGER_MULTIPLY_64
)
9329 set_optab_libfunc (smul_optab
, DImode
, "__mul64");
9330 set_optab_libfunc (sdiv_optab
, DImode
, "__div64");
9331 set_optab_libfunc (udiv_optab
, DImode
, "__udiv64");
9332 set_optab_libfunc (smod_optab
, DImode
, "__rem64");
9333 set_optab_libfunc (umod_optab
, DImode
, "__urem64");
9336 if (SUN_CONVERSION_LIBFUNCS
)
9338 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, "__ftol");
9339 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, "__ftoul");
9340 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, "__dtol");
9341 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, "__dtoul");
9346 static tree
def_builtin(const char *name
, int code
, tree type
)
9348 return add_builtin_function(name
, type
, code
, BUILT_IN_MD
, NULL
,
9352 static tree
def_builtin_const(const char *name
, int code
, tree type
)
9354 tree t
= def_builtin(name
, code
, type
);
9357 TREE_READONLY (t
) = 1;
9362 /* Implement the TARGET_INIT_BUILTINS target hook.
9363 Create builtin functions for special SPARC instructions. */
9366 sparc_init_builtins (void)
9369 sparc_vis_init_builtins ();
9372 /* Create builtin functions for VIS 1.0 instructions. */
9375 sparc_vis_init_builtins (void)
9377 tree v4qi
= build_vector_type (unsigned_intQI_type_node
, 4);
9378 tree v8qi
= build_vector_type (unsigned_intQI_type_node
, 8);
9379 tree v4hi
= build_vector_type (intHI_type_node
, 4);
9380 tree v2hi
= build_vector_type (intHI_type_node
, 2);
9381 tree v2si
= build_vector_type (intSI_type_node
, 2);
9382 tree v1si
= build_vector_type (intSI_type_node
, 1);
9384 tree v4qi_ftype_v4hi
= build_function_type_list (v4qi
, v4hi
, 0);
9385 tree v8qi_ftype_v2si_v8qi
= build_function_type_list (v8qi
, v2si
, v8qi
, 0);
9386 tree v2hi_ftype_v2si
= build_function_type_list (v2hi
, v2si
, 0);
9387 tree v4hi_ftype_v4qi
= build_function_type_list (v4hi
, v4qi
, 0);
9388 tree v8qi_ftype_v4qi_v4qi
= build_function_type_list (v8qi
, v4qi
, v4qi
, 0);
9389 tree v4hi_ftype_v4qi_v4hi
= build_function_type_list (v4hi
, v4qi
, v4hi
, 0);
9390 tree v4hi_ftype_v4qi_v2hi
= build_function_type_list (v4hi
, v4qi
, v2hi
, 0);
9391 tree v2si_ftype_v4qi_v2hi
= build_function_type_list (v2si
, v4qi
, v2hi
, 0);
9392 tree v4hi_ftype_v8qi_v4hi
= build_function_type_list (v4hi
, v8qi
, v4hi
, 0);
9393 tree v4hi_ftype_v4hi_v4hi
= build_function_type_list (v4hi
, v4hi
, v4hi
, 0);
9394 tree v2si_ftype_v2si_v2si
= build_function_type_list (v2si
, v2si
, v2si
, 0);
9395 tree v8qi_ftype_v8qi_v8qi
= build_function_type_list (v8qi
, v8qi
, v8qi
, 0);
9396 tree v2hi_ftype_v2hi_v2hi
= build_function_type_list (v2hi
, v2hi
, v2hi
, 0);
9397 tree v1si_ftype_v1si_v1si
= build_function_type_list (v1si
, v1si
, v1si
, 0);
9398 tree di_ftype_v8qi_v8qi_di
= build_function_type_list (intDI_type_node
,
9400 intDI_type_node
, 0);
9401 tree di_ftype_v8qi_v8qi
= build_function_type_list (intDI_type_node
,
9403 tree si_ftype_v8qi_v8qi
= build_function_type_list (intSI_type_node
,
9405 tree di_ftype_di_di
= build_function_type_list (intDI_type_node
,
9407 intDI_type_node
, 0);
9408 tree si_ftype_si_si
= build_function_type_list (intSI_type_node
,
9410 intSI_type_node
, 0);
9411 tree ptr_ftype_ptr_si
= build_function_type_list (ptr_type_node
,
9413 intSI_type_node
, 0);
9414 tree ptr_ftype_ptr_di
= build_function_type_list (ptr_type_node
,
9416 intDI_type_node
, 0);
9417 tree si_ftype_ptr_ptr
= build_function_type_list (intSI_type_node
,
9420 tree di_ftype_ptr_ptr
= build_function_type_list (intDI_type_node
,
9423 tree si_ftype_v4hi_v4hi
= build_function_type_list (intSI_type_node
,
9425 tree si_ftype_v2si_v2si
= build_function_type_list (intSI_type_node
,
9427 tree di_ftype_v4hi_v4hi
= build_function_type_list (intDI_type_node
,
9429 tree di_ftype_v2si_v2si
= build_function_type_list (intDI_type_node
,
9431 tree void_ftype_di
= build_function_type_list (void_type_node
,
9432 intDI_type_node
, 0);
9433 tree di_ftype_void
= build_function_type_list (intDI_type_node
,
9435 tree void_ftype_si
= build_function_type_list (void_type_node
,
9436 intSI_type_node
, 0);
9437 tree sf_ftype_sf_sf
= build_function_type_list (float_type_node
,
9439 float_type_node
, 0);
9440 tree df_ftype_df_df
= build_function_type_list (double_type_node
,
9442 double_type_node
, 0);
9444 /* Packing and expanding vectors. */
9445 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis
,
9447 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis
,
9448 v8qi_ftype_v2si_v8qi
);
9449 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis
,
9451 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis
,
9453 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis
,
9454 v8qi_ftype_v4qi_v4qi
);
9456 /* Multiplications. */
9457 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis
,
9458 v4hi_ftype_v4qi_v4hi
);
9459 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis
,
9460 v4hi_ftype_v4qi_v2hi
);
9461 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis
,
9462 v4hi_ftype_v4qi_v2hi
);
9463 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis
,
9464 v4hi_ftype_v8qi_v4hi
);
9465 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis
,
9466 v4hi_ftype_v8qi_v4hi
);
9467 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis
,
9468 v2si_ftype_v4qi_v2hi
);
9469 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis
,
9470 v2si_ftype_v4qi_v2hi
);
9472 /* Data aligning. */
9473 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis
,
9474 v4hi_ftype_v4hi_v4hi
);
9475 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis
,
9476 v8qi_ftype_v8qi_v8qi
);
9477 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis
,
9478 v2si_ftype_v2si_v2si
);
9479 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis
,
9482 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis
,
9484 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis
,
9489 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis
,
9491 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis
,
9496 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis
,
9498 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis
,
9502 /* Pixel distance. */
9503 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis
,
9504 di_ftype_v8qi_v8qi_di
);
9506 /* Edge handling. */
9509 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis
,
9511 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis
,
9513 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis
,
9515 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis
,
9517 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis
,
9519 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis
,
9523 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis
,
9525 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis
,
9527 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis
,
9529 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis
,
9531 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis
,
9533 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis
,
9539 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis
,
9541 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis
,
9543 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis
,
9545 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis
,
9547 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis
,
9549 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis
,
9553 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis
,
9555 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis
,
9557 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis
,
9559 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis
,
9561 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis
,
9563 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis
,
9568 /* Pixel compare. */
9571 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis
,
9572 di_ftype_v4hi_v4hi
);
9573 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis
,
9574 di_ftype_v2si_v2si
);
9575 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis
,
9576 di_ftype_v4hi_v4hi
);
9577 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis
,
9578 di_ftype_v2si_v2si
);
9579 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis
,
9580 di_ftype_v4hi_v4hi
);
9581 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis
,
9582 di_ftype_v2si_v2si
);
9583 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis
,
9584 di_ftype_v4hi_v4hi
);
9585 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis
,
9586 di_ftype_v2si_v2si
);
9590 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis
,
9591 si_ftype_v4hi_v4hi
);
9592 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis
,
9593 si_ftype_v2si_v2si
);
9594 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis
,
9595 si_ftype_v4hi_v4hi
);
9596 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis
,
9597 si_ftype_v2si_v2si
);
9598 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis
,
9599 si_ftype_v4hi_v4hi
);
9600 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis
,
9601 si_ftype_v2si_v2si
);
9602 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis
,
9603 si_ftype_v4hi_v4hi
);
9604 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis
,
9605 si_ftype_v2si_v2si
);
9608 /* Addition and subtraction. */
9609 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3
,
9610 v4hi_ftype_v4hi_v4hi
);
9611 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3
,
9612 v2hi_ftype_v2hi_v2hi
);
9613 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3
,
9614 v2si_ftype_v2si_v2si
);
9615 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3
,
9616 v1si_ftype_v1si_v1si
);
9617 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3
,
9618 v4hi_ftype_v4hi_v4hi
);
9619 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3
,
9620 v2hi_ftype_v2hi_v2hi
);
9621 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3
,
9622 v2si_ftype_v2si_v2si
);
9623 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3
,
9624 v1si_ftype_v1si_v1si
);
9626 /* Three-dimensional array addressing. */
9629 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis
,
9631 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis
,
9633 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis
,
9638 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis
,
9640 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis
,
9642 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis
,
9648 /* Byte mask and shuffle */
9650 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis
,
9653 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis
,
9655 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis
,
9656 v4hi_ftype_v4hi_v4hi
);
9657 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis
,
9658 v8qi_ftype_v8qi_v8qi
);
9659 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis
,
9660 v2si_ftype_v2si_v2si
);
9661 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis
,
9669 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis
,
9671 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis
,
9673 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis
,
9678 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis
,
9680 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis
,
9682 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis
,
9686 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis
,
9687 v4hi_ftype_v4hi_v4hi
);
9689 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3
,
9690 v4hi_ftype_v4hi_v4hi
);
9691 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3
,
9692 v4hi_ftype_v4hi_v4hi
);
9693 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3
,
9694 v4hi_ftype_v4hi_v4hi
);
9695 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3
,
9696 v4hi_ftype_v4hi_v4hi
);
9697 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3
,
9698 v2si_ftype_v2si_v2si
);
9699 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3
,
9700 v2si_ftype_v2si_v2si
);
9701 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3
,
9702 v2si_ftype_v2si_v2si
);
9703 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3
,
9704 v2si_ftype_v2si_v2si
);
9707 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis
,
9708 di_ftype_v8qi_v8qi
);
9710 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis
,
9711 si_ftype_v8qi_v8qi
);
9713 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis
,
9714 v4hi_ftype_v4hi_v4hi
);
9715 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis
,
9717 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis
,
9720 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3
,
9721 v4hi_ftype_v4hi_v4hi
);
9722 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3
,
9723 v2hi_ftype_v2hi_v2hi
);
9724 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3
,
9725 v4hi_ftype_v4hi_v4hi
);
9726 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3
,
9727 v2hi_ftype_v2hi_v2hi
);
9728 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3
,
9729 v2si_ftype_v2si_v2si
);
9730 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3
,
9731 v1si_ftype_v1si_v1si
);
9732 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3
,
9733 v2si_ftype_v2si_v2si
);
9734 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3
,
9735 v1si_ftype_v1si_v1si
);
9739 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis
,
9740 di_ftype_v8qi_v8qi
);
9741 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis
,
9742 di_ftype_v8qi_v8qi
);
9743 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis
,
9744 di_ftype_v8qi_v8qi
);
9745 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis
,
9746 di_ftype_v8qi_v8qi
);
9750 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis
,
9751 si_ftype_v8qi_v8qi
);
9752 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis
,
9753 si_ftype_v8qi_v8qi
);
9754 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis
,
9755 si_ftype_v8qi_v8qi
);
9756 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis
,
9757 si_ftype_v8qi_v8qi
);
9760 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis
,
9762 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis
,
9764 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis
,
9766 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis
,
9768 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis
,
9770 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis
,
9773 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis
,
9775 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis
,
9777 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis
,
9782 /* Handle TARGET_EXPAND_BUILTIN target hook.
9783 Expand builtin functions for sparc intrinsics. */
9786 sparc_expand_builtin (tree exp
, rtx target
,
9787 rtx subtarget ATTRIBUTE_UNUSED
,
9788 enum machine_mode tmode ATTRIBUTE_UNUSED
,
9789 int ignore ATTRIBUTE_UNUSED
)
9792 call_expr_arg_iterator iter
;
9793 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
9794 unsigned int icode
= DECL_FUNCTION_CODE (fndecl
);
9799 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
9803 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9805 || GET_MODE (target
) != tmode
9806 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9807 op
[0] = gen_reg_rtx (tmode
);
9811 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
9813 const struct insn_operand_data
*insn_op
;
9816 if (arg
== error_mark_node
)
9820 idx
= arg_count
- !nonvoid
;
9821 insn_op
= &insn_data
[icode
].operand
[idx
];
9822 op
[arg_count
] = expand_normal (arg
);
9824 if (insn_op
->mode
== V1DImode
9825 && GET_MODE (op
[arg_count
]) == DImode
)
9826 op
[arg_count
] = gen_lowpart (V1DImode
, op
[arg_count
]);
9827 else if (insn_op
->mode
== V1SImode
9828 && GET_MODE (op
[arg_count
]) == SImode
)
9829 op
[arg_count
] = gen_lowpart (V1SImode
, op
[arg_count
]);
9831 if (! (*insn_data
[icode
].operand
[idx
].predicate
) (op
[arg_count
],
9833 op
[arg_count
] = copy_to_mode_reg (insn_op
->mode
, op
[arg_count
]);
9839 pat
= GEN_FCN (icode
) (op
[0]);
9843 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
9845 pat
= GEN_FCN (icode
) (op
[1]);
9848 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
9851 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
9869 sparc_vis_mul8x16 (int e8
, int e16
)
9871 return (e8
* e16
+ 128) / 256;
9874 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
9875 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
9876 constants. A tree list with the results of the multiplications is returned,
9877 and each element in the list is of INNER_TYPE. */
9880 sparc_handle_vis_mul8x16 (int fncode
, tree inner_type
, tree elts0
, tree elts1
)
9882 tree n_elts
= NULL_TREE
;
9887 case CODE_FOR_fmul8x16_vis
:
9888 for (; elts0
&& elts1
;
9889 elts0
= TREE_CHAIN (elts0
), elts1
= TREE_CHAIN (elts1
))
9892 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0
)),
9893 TREE_INT_CST_LOW (TREE_VALUE (elts1
)));
9894 n_elts
= tree_cons (NULL_TREE
,
9895 build_int_cst (inner_type
, val
),
9900 case CODE_FOR_fmul8x16au_vis
:
9901 scale
= TREE_INT_CST_LOW (TREE_VALUE (elts1
));
9903 for (; elts0
; elts0
= TREE_CHAIN (elts0
))
9906 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0
)),
9908 n_elts
= tree_cons (NULL_TREE
,
9909 build_int_cst (inner_type
, val
),
9914 case CODE_FOR_fmul8x16al_vis
:
9915 scale
= TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1
)));
9917 for (; elts0
; elts0
= TREE_CHAIN (elts0
))
9920 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0
)),
9922 n_elts
= tree_cons (NULL_TREE
,
9923 build_int_cst (inner_type
, val
),
9932 return nreverse (n_elts
);
9935 /* Handle TARGET_FOLD_BUILTIN target hook.
9936 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
9937 result of the function call is ignored. NULL_TREE is returned if the
9938 function could not be folded. */
9941 sparc_fold_builtin (tree fndecl
, int n_args ATTRIBUTE_UNUSED
,
9942 tree
*args
, bool ignore
)
9944 tree arg0
, arg1
, arg2
;
9945 tree rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
9946 enum insn_code icode
= (enum insn_code
) DECL_FUNCTION_CODE (fndecl
);
9950 /* Note that a switch statement instead of the sequence of tests would
9951 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
9952 and that would yield multiple alternatives with identical values. */
9953 if (icode
== CODE_FOR_alignaddrsi_vis
9954 || icode
== CODE_FOR_alignaddrdi_vis
9955 || icode
== CODE_FOR_wrgsr_vis
9956 || icode
== CODE_FOR_bmasksi_vis
9957 || icode
== CODE_FOR_bmaskdi_vis
9958 || icode
== CODE_FOR_cmask8si_vis
9959 || icode
== CODE_FOR_cmask8di_vis
9960 || icode
== CODE_FOR_cmask16si_vis
9961 || icode
== CODE_FOR_cmask16di_vis
9962 || icode
== CODE_FOR_cmask32si_vis
9963 || icode
== CODE_FOR_cmask32di_vis
)
9966 return build_zero_cst (rtype
);
9971 case CODE_FOR_fexpand_vis
:
9975 if (TREE_CODE (arg0
) == VECTOR_CST
)
9977 tree inner_type
= TREE_TYPE (rtype
);
9978 tree elts
= TREE_VECTOR_CST_ELTS (arg0
);
9979 tree n_elts
= NULL_TREE
;
9981 for (; elts
; elts
= TREE_CHAIN (elts
))
9983 unsigned int val
= TREE_INT_CST_LOW (TREE_VALUE (elts
)) << 4;
9984 n_elts
= tree_cons (NULL_TREE
,
9985 build_int_cst (inner_type
, val
),
9988 return build_vector (rtype
, nreverse (n_elts
));
9992 case CODE_FOR_fmul8x16_vis
:
9993 case CODE_FOR_fmul8x16au_vis
:
9994 case CODE_FOR_fmul8x16al_vis
:
10000 if (TREE_CODE (arg0
) == VECTOR_CST
&& TREE_CODE (arg1
) == VECTOR_CST
)
10002 tree inner_type
= TREE_TYPE (rtype
);
10003 tree elts0
= TREE_VECTOR_CST_ELTS (arg0
);
10004 tree elts1
= TREE_VECTOR_CST_ELTS (arg1
);
10005 tree n_elts
= sparc_handle_vis_mul8x16 (icode
, inner_type
, elts0
,
10008 return build_vector (rtype
, n_elts
);
10012 case CODE_FOR_fpmerge_vis
:
10018 if (TREE_CODE (arg0
) == VECTOR_CST
&& TREE_CODE (arg1
) == VECTOR_CST
)
10020 tree elts0
= TREE_VECTOR_CST_ELTS (arg0
);
10021 tree elts1
= TREE_VECTOR_CST_ELTS (arg1
);
10022 tree n_elts
= NULL_TREE
;
10024 for (; elts0
&& elts1
;
10025 elts0
= TREE_CHAIN (elts0
), elts1
= TREE_CHAIN (elts1
))
10027 n_elts
= tree_cons (NULL_TREE
, TREE_VALUE (elts0
), n_elts
);
10028 n_elts
= tree_cons (NULL_TREE
, TREE_VALUE (elts1
), n_elts
);
10031 return build_vector (rtype
, nreverse (n_elts
));
10035 case CODE_FOR_pdist_vis
:
10043 if (TREE_CODE (arg0
) == VECTOR_CST
10044 && TREE_CODE (arg1
) == VECTOR_CST
10045 && TREE_CODE (arg2
) == INTEGER_CST
)
10048 unsigned HOST_WIDE_INT low
= TREE_INT_CST_LOW (arg2
);
10049 HOST_WIDE_INT high
= TREE_INT_CST_HIGH (arg2
);
10050 tree elts0
= TREE_VECTOR_CST_ELTS (arg0
);
10051 tree elts1
= TREE_VECTOR_CST_ELTS (arg1
);
10053 for (; elts0
&& elts1
;
10054 elts0
= TREE_CHAIN (elts0
), elts1
= TREE_CHAIN (elts1
))
10056 unsigned HOST_WIDE_INT
10057 low0
= TREE_INT_CST_LOW (TREE_VALUE (elts0
)),
10058 low1
= TREE_INT_CST_LOW (TREE_VALUE (elts1
));
10059 HOST_WIDE_INT high0
= TREE_INT_CST_HIGH (TREE_VALUE (elts0
));
10060 HOST_WIDE_INT high1
= TREE_INT_CST_HIGH (TREE_VALUE (elts1
));
10062 unsigned HOST_WIDE_INT l
;
10065 overflow
|= neg_double (low1
, high1
, &l
, &h
);
10066 overflow
|= add_double (low0
, high0
, l
, h
, &l
, &h
);
10068 overflow
|= neg_double (l
, h
, &l
, &h
);
10070 overflow
|= add_double (low
, high
, l
, h
, &low
, &high
);
10073 gcc_assert (overflow
== 0);
10075 return build_int_cst_wide (rtype
, low
, high
);
10085 /* ??? This duplicates information provided to the compiler by the
10086 ??? scheduler description. Some day, teach genautomata to output
10087 ??? the latencies and then CSE will just use that. */
10090 sparc_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
10091 int *total
, bool speed ATTRIBUTE_UNUSED
)
10093 enum machine_mode mode
= GET_MODE (x
);
10094 bool float_mode_p
= FLOAT_MODE_P (mode
);
10099 if (INTVAL (x
) < 0x1000 && INTVAL (x
) >= -0x1000)
10117 if (GET_MODE (x
) == VOIDmode
10118 && ((CONST_DOUBLE_HIGH (x
) == 0
10119 && CONST_DOUBLE_LOW (x
) < 0x1000)
10120 || (CONST_DOUBLE_HIGH (x
) == -1
10121 && CONST_DOUBLE_LOW (x
) < 0
10122 && CONST_DOUBLE_LOW (x
) >= -0x1000)))
10129 /* If outer-code was a sign or zero extension, a cost
10130 of COSTS_N_INSNS (1) was already added in. This is
10131 why we are subtracting it back out. */
10132 if (outer_code
== ZERO_EXTEND
)
10134 *total
= sparc_costs
->int_zload
- COSTS_N_INSNS (1);
10136 else if (outer_code
== SIGN_EXTEND
)
10138 *total
= sparc_costs
->int_sload
- COSTS_N_INSNS (1);
10140 else if (float_mode_p
)
10142 *total
= sparc_costs
->float_load
;
10146 *total
= sparc_costs
->int_load
;
10154 *total
= sparc_costs
->float_plusminus
;
10156 *total
= COSTS_N_INSNS (1);
10163 gcc_assert (float_mode_p
);
10164 *total
= sparc_costs
->float_mul
;
10167 if (GET_CODE (sub
) == NEG
)
10168 sub
= XEXP (sub
, 0);
10169 *total
+= rtx_cost (sub
, FMA
, 0, speed
);
10172 if (GET_CODE (sub
) == NEG
)
10173 sub
= XEXP (sub
, 0);
10174 *total
+= rtx_cost (sub
, FMA
, 2, speed
);
10180 *total
= sparc_costs
->float_mul
;
10181 else if (! TARGET_HARD_MUL
)
10182 *total
= COSTS_N_INSNS (25);
10188 if (sparc_costs
->int_mul_bit_factor
)
10192 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
10194 unsigned HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
10195 for (nbits
= 0; value
!= 0; value
&= value
- 1)
10198 else if (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
10199 && GET_MODE (XEXP (x
, 1)) == VOIDmode
)
10201 rtx x1
= XEXP (x
, 1);
10202 unsigned HOST_WIDE_INT value1
= CONST_DOUBLE_LOW (x1
);
10203 unsigned HOST_WIDE_INT value2
= CONST_DOUBLE_HIGH (x1
);
10205 for (nbits
= 0; value1
!= 0; value1
&= value1
- 1)
10207 for (; value2
!= 0; value2
&= value2
- 1)
10215 bit_cost
= (nbits
- 3) / sparc_costs
->int_mul_bit_factor
;
10216 bit_cost
= COSTS_N_INSNS (bit_cost
);
10219 if (mode
== DImode
)
10220 *total
= sparc_costs
->int_mulX
+ bit_cost
;
10222 *total
= sparc_costs
->int_mul
+ bit_cost
;
10229 *total
= COSTS_N_INSNS (1) + sparc_costs
->shift_penalty
;
10238 if (mode
== DFmode
)
10239 *total
= sparc_costs
->float_div_df
;
10241 *total
= sparc_costs
->float_div_sf
;
10245 if (mode
== DImode
)
10246 *total
= sparc_costs
->int_divX
;
10248 *total
= sparc_costs
->int_div
;
10253 if (! float_mode_p
)
10255 *total
= COSTS_N_INSNS (1);
10262 case UNSIGNED_FLOAT
:
10266 case FLOAT_TRUNCATE
:
10267 *total
= sparc_costs
->float_move
;
10271 if (mode
== DFmode
)
10272 *total
= sparc_costs
->float_sqrt_df
;
10274 *total
= sparc_costs
->float_sqrt_sf
;
10279 *total
= sparc_costs
->float_cmp
;
10281 *total
= COSTS_N_INSNS (1);
10286 *total
= sparc_costs
->float_cmove
;
10288 *total
= sparc_costs
->int_cmove
;
10292 /* Handle the NAND vector patterns. */
10293 if (sparc_vector_mode_supported_p (GET_MODE (x
))
10294 && GET_CODE (XEXP (x
, 0)) == NOT
10295 && GET_CODE (XEXP (x
, 1)) == NOT
)
10297 *total
= COSTS_N_INSNS (1);
10308 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10311 general_or_i64_p (reg_class_t rclass
)
10313 return (rclass
== GENERAL_REGS
|| rclass
== I64_REGS
);
10316 /* Implement TARGET_REGISTER_MOVE_COST. */
10319 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
10320 reg_class_t from
, reg_class_t to
)
10322 bool need_memory
= false;
10324 if (from
== FPCC_REGS
|| to
== FPCC_REGS
)
10325 need_memory
= true;
10326 else if ((FP_REG_CLASS_P (from
) && general_or_i64_p (to
))
10327 || (general_or_i64_p (from
) && FP_REG_CLASS_P (to
)))
10331 int size
= GET_MODE_SIZE (mode
);
10332 if (size
== 8 || size
== 4)
10334 if (! TARGET_ARCH32
|| size
== 4)
10340 need_memory
= true;
10345 if (sparc_cpu
== PROCESSOR_ULTRASPARC
10346 || sparc_cpu
== PROCESSOR_ULTRASPARC3
10347 || sparc_cpu
== PROCESSOR_NIAGARA
10348 || sparc_cpu
== PROCESSOR_NIAGARA2
10349 || sparc_cpu
== PROCESSOR_NIAGARA3
10350 || sparc_cpu
== PROCESSOR_NIAGARA4
)
10359 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10360 This is achieved by means of a manual dynamic stack space allocation in
10361 the current frame. We make the assumption that SEQ doesn't contain any
10362 function calls, with the possible exception of calls to the GOT helper. */
10365 emit_and_preserve (rtx seq
, rtx reg
, rtx reg2
)
10367 /* We must preserve the lowest 16 words for the register save area. */
10368 HOST_WIDE_INT offset
= 16*UNITS_PER_WORD
;
10369 /* We really need only 2 words of fresh stack space. */
10370 HOST_WIDE_INT size
= SPARC_STACK_ALIGN (offset
+ 2*UNITS_PER_WORD
);
10373 = gen_rtx_MEM (word_mode
, plus_constant (stack_pointer_rtx
,
10374 SPARC_STACK_BIAS
+ offset
));
10376 emit_insn (gen_stack_pointer_dec (GEN_INT (size
)));
10377 emit_insn (gen_rtx_SET (VOIDmode
, slot
, reg
));
10379 emit_insn (gen_rtx_SET (VOIDmode
,
10380 adjust_address (slot
, word_mode
, UNITS_PER_WORD
),
10384 emit_insn (gen_rtx_SET (VOIDmode
,
10386 adjust_address (slot
, word_mode
, UNITS_PER_WORD
)));
10387 emit_insn (gen_rtx_SET (VOIDmode
, reg
, slot
));
10388 emit_insn (gen_stack_pointer_inc (GEN_INT (size
)));
10391 /* Output the assembler code for a thunk function. THUNK_DECL is the
10392 declaration for the thunk function itself, FUNCTION is the decl for
10393 the target function. DELTA is an immediate constant offset to be
10394 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10395 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10398 sparc_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
10399 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
10402 rtx this_rtx
, insn
, funexp
;
10403 unsigned int int_arg_first
;
10405 reload_completed
= 1;
10406 epilogue_completed
= 1;
10408 emit_note (NOTE_INSN_PROLOGUE_END
);
10412 sparc_leaf_function_p
= 1;
10414 int_arg_first
= SPARC_OUTGOING_INT_ARG_FIRST
;
10416 else if (flag_delayed_branch
)
10418 /* We will emit a regular sibcall below, so we need to instruct
10419 output_sibcall that we are in a leaf function. */
10420 sparc_leaf_function_p
= current_function_uses_only_leaf_regs
= 1;
10422 /* This will cause final.c to invoke leaf_renumber_regs so we
10423 must behave as if we were in a not-yet-leafified function. */
10424 int_arg_first
= SPARC_INCOMING_INT_ARG_FIRST
;
10428 /* We will emit the sibcall manually below, so we will need to
10429 manually spill non-leaf registers. */
10430 sparc_leaf_function_p
= current_function_uses_only_leaf_regs
= 0;
10432 /* We really are in a leaf function. */
10433 int_arg_first
= SPARC_OUTGOING_INT_ARG_FIRST
;
10436 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
10437 returns a structure, the structure return pointer is there instead. */
10439 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
10440 this_rtx
= gen_rtx_REG (Pmode
, int_arg_first
+ 1);
10442 this_rtx
= gen_rtx_REG (Pmode
, int_arg_first
);
10444 /* Add DELTA. When possible use a plain add, otherwise load it into
10445 a register first. */
10448 rtx delta_rtx
= GEN_INT (delta
);
10450 if (! SPARC_SIMM13_P (delta
))
10452 rtx scratch
= gen_rtx_REG (Pmode
, 1);
10453 emit_move_insn (scratch
, delta_rtx
);
10454 delta_rtx
= scratch
;
10457 /* THIS_RTX += DELTA. */
10458 emit_insn (gen_add2_insn (this_rtx
, delta_rtx
));
10461 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
10464 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
10465 rtx scratch
= gen_rtx_REG (Pmode
, 1);
10467 gcc_assert (vcall_offset
< 0);
10469 /* SCRATCH = *THIS_RTX. */
10470 emit_move_insn (scratch
, gen_rtx_MEM (Pmode
, this_rtx
));
10472 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
10473 may not have any available scratch register at this point. */
10474 if (SPARC_SIMM13_P (vcall_offset
))
10476 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
10477 else if (! fixed_regs
[5]
10478 /* The below sequence is made up of at least 2 insns,
10479 while the default method may need only one. */
10480 && vcall_offset
< -8192)
10482 rtx scratch2
= gen_rtx_REG (Pmode
, 5);
10483 emit_move_insn (scratch2
, vcall_offset_rtx
);
10484 vcall_offset_rtx
= scratch2
;
10488 rtx increment
= GEN_INT (-4096);
10490 /* VCALL_OFFSET is a negative number whose typical range can be
10491 estimated as -32768..0 in 32-bit mode. In almost all cases
10492 it is therefore cheaper to emit multiple add insns than
10493 spilling and loading the constant into a register (at least
10495 while (! SPARC_SIMM13_P (vcall_offset
))
10497 emit_insn (gen_add2_insn (scratch
, increment
));
10498 vcall_offset
+= 4096;
10500 vcall_offset_rtx
= GEN_INT (vcall_offset
); /* cannot be 0 */
10503 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
10504 emit_move_insn (scratch
, gen_rtx_MEM (Pmode
,
10505 gen_rtx_PLUS (Pmode
,
10507 vcall_offset_rtx
)));
10509 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
10510 emit_insn (gen_add2_insn (this_rtx
, scratch
));
10513 /* Generate a tail call to the target function. */
10514 if (! TREE_USED (function
))
10516 assemble_external (function
);
10517 TREE_USED (function
) = 1;
10519 funexp
= XEXP (DECL_RTL (function
), 0);
10521 if (flag_delayed_branch
)
10523 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
10524 insn
= emit_call_insn (gen_sibcall (funexp
));
10525 SIBLING_CALL_P (insn
) = 1;
10529 /* The hoops we have to jump through in order to generate a sibcall
10530 without using delay slots... */
10531 rtx spill_reg
, seq
, scratch
= gen_rtx_REG (Pmode
, 1);
10535 spill_reg
= gen_rtx_REG (word_mode
, 15); /* %o7 */
10537 load_got_register (); /* clobbers %o7 */
10538 scratch
= sparc_legitimize_pic_address (funexp
, scratch
);
10539 seq
= get_insns ();
10541 emit_and_preserve (seq
, spill_reg
, pic_offset_table_rtx
);
10543 else if (TARGET_ARCH32
)
10545 emit_insn (gen_rtx_SET (VOIDmode
,
10547 gen_rtx_HIGH (SImode
, funexp
)));
10548 emit_insn (gen_rtx_SET (VOIDmode
,
10550 gen_rtx_LO_SUM (SImode
, scratch
, funexp
)));
10552 else /* TARGET_ARCH64 */
10554 switch (sparc_cmodel
)
10558 /* The destination can serve as a temporary. */
10559 sparc_emit_set_symbolic_const64 (scratch
, funexp
, scratch
);
10564 /* The destination cannot serve as a temporary. */
10565 spill_reg
= gen_rtx_REG (DImode
, 15); /* %o7 */
10567 sparc_emit_set_symbolic_const64 (scratch
, funexp
, spill_reg
);
10568 seq
= get_insns ();
10570 emit_and_preserve (seq
, spill_reg
, 0);
10574 gcc_unreachable ();
10578 emit_jump_insn (gen_indirect_jump (scratch
));
10583 /* Run just enough of rest_of_compilation to get the insns emitted.
10584 There's not really enough bulk here to make other passes such as
10585 instruction scheduling worth while. Note that use_thunk calls
10586 assemble_start_function and assemble_end_function. */
10587 insn
= get_insns ();
10588 insn_locators_alloc ();
10589 shorten_branches (insn
);
10590 final_start_function (insn
, file
, 1);
10591 final (insn
, file
, 1);
10592 final_end_function ();
10594 reload_completed
= 0;
10595 epilogue_completed
= 0;
10598 /* Return true if sparc_output_mi_thunk would be able to output the
10599 assembler code for the thunk function specified by the arguments
10600 it is passed, and false otherwise. */
10602 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
10603 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
10604 HOST_WIDE_INT vcall_offset
,
10605 const_tree function ATTRIBUTE_UNUSED
)
10607 /* Bound the loop used in the default method above. */
10608 return (vcall_offset
>= -32768 || ! fixed_regs
[5]);
10611 /* We use the machine specific reorg pass to enable workarounds for errata. */
10618 /* The only erratum we handle for now is that of the AT697F processor. */
10619 if (!sparc_fix_at697f
)
10622 /* We need to have the (essentially) final form of the insn stream in order
10623 to properly detect the various hazards. Run delay slot scheduling. */
10624 if (optimize
> 0 && flag_delayed_branch
)
10625 dbr_schedule (get_insns ());
10627 /* Now look for specific patterns in the insn stream. */
10628 for (insn
= get_insns (); insn
; insn
= next
)
10630 bool insert_nop
= false;
10633 /* Look for a single-word load into an odd-numbered FP register. */
10634 if (NONJUMP_INSN_P (insn
)
10635 && (set
= single_set (insn
)) != NULL_RTX
10636 && GET_MODE_SIZE (GET_MODE (SET_SRC (set
))) == 4
10637 && MEM_P (SET_SRC (set
))
10638 && REG_P (SET_DEST (set
))
10639 && REGNO (SET_DEST (set
)) > 31
10640 && REGNO (SET_DEST (set
)) % 2 != 0)
10642 /* The wrong dependency is on the enclosing double register. */
10643 unsigned int x
= REGNO (SET_DEST (set
)) - 1;
10644 unsigned int src1
, src2
, dest
;
10647 /* If the insn has a delay slot, then it cannot be problematic. */
10648 next
= next_active_insn (insn
);
10649 if (NONJUMP_INSN_P (next
) && GET_CODE (PATTERN (next
)) == SEQUENCE
)
10653 extract_insn (next
);
10654 code
= INSN_CODE (next
);
10659 case CODE_FOR_adddf3
:
10660 case CODE_FOR_subdf3
:
10661 case CODE_FOR_muldf3
:
10662 case CODE_FOR_divdf3
:
10663 dest
= REGNO (recog_data
.operand
[0]);
10664 src1
= REGNO (recog_data
.operand
[1]);
10665 src2
= REGNO (recog_data
.operand
[2]);
10669 ld [address], %fx+1
10670 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
10671 if ((src1
== x
|| src2
== x
)
10672 && (dest
== src1
|| dest
== src2
))
10678 ld [address], %fx+1
10679 FPOPd %fx, %fx, %fx */
10682 && (code
== CODE_FOR_adddf3
|| code
== CODE_FOR_muldf3
))
10687 case CODE_FOR_sqrtdf2
:
10688 dest
= REGNO (recog_data
.operand
[0]);
10689 src1
= REGNO (recog_data
.operand
[1]);
10691 ld [address], %fx+1
10693 if (src1
== x
&& dest
== src1
)
10702 next
= NEXT_INSN (insn
);
10705 emit_insn_after (gen_nop (), insn
);
10709 /* How to allocate a 'struct machine_function'. */
10711 static struct machine_function
*
10712 sparc_init_machine_status (void)
10714 return ggc_alloc_cleared_machine_function ();
10717 /* Locate some local-dynamic symbol still in use by this function
10718 so that we can print its name in local-dynamic base patterns. */
10720 static const char *
10721 get_some_local_dynamic_name (void)
10725 if (cfun
->machine
->some_ld_name
)
10726 return cfun
->machine
->some_ld_name
;
10728 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
10730 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
10731 return cfun
->machine
->some_ld_name
;
10733 gcc_unreachable ();
10737 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
10742 && GET_CODE (x
) == SYMBOL_REF
10743 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
10745 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
10752 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10753 We need to emit DTP-relative relocations. */
10756 sparc_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
10761 fputs ("\t.word\t%r_tls_dtpoff32(", file
);
10764 fputs ("\t.xword\t%r_tls_dtpoff64(", file
);
10767 gcc_unreachable ();
10769 output_addr_const (file
, x
);
10773 /* Do whatever processing is required at the end of a file. */
10776 sparc_file_end (void)
10778 /* If we need to emit the special GOT helper function, do so now. */
10779 if (got_helper_rtx
)
10781 const char *name
= XSTR (got_helper_rtx
, 0);
10782 const char *reg_name
= reg_names
[GLOBAL_OFFSET_TABLE_REGNUM
];
10783 #ifdef DWARF2_UNWIND_INFO
10787 if (USE_HIDDEN_LINKONCE
)
10789 tree decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
10790 get_identifier (name
),
10791 build_function_type_list (void_type_node
,
10793 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
10794 NULL_TREE
, void_type_node
);
10795 TREE_STATIC (decl
) = 1;
10796 make_decl_one_only (decl
, DECL_ASSEMBLER_NAME (decl
));
10797 DECL_VISIBILITY (decl
) = VISIBILITY_HIDDEN
;
10798 DECL_VISIBILITY_SPECIFIED (decl
) = 1;
10799 resolve_unique_section (decl
, 0, flag_function_sections
);
10800 allocate_struct_function (decl
, true);
10801 cfun
->is_thunk
= 1;
10802 current_function_decl
= decl
;
10803 init_varasm_status ();
10804 assemble_start_function (decl
, name
);
10808 const int align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
10809 switch_to_section (text_section
);
10811 ASM_OUTPUT_ALIGN (asm_out_file
, align
);
10812 ASM_OUTPUT_LABEL (asm_out_file
, name
);
10815 #ifdef DWARF2_UNWIND_INFO
10816 do_cfi
= dwarf2out_do_cfi_asm ();
10818 fprintf (asm_out_file
, "\t.cfi_startproc\n");
10820 if (flag_delayed_branch
)
10821 fprintf (asm_out_file
, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10822 reg_name
, reg_name
);
10824 fprintf (asm_out_file
, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10825 reg_name
, reg_name
);
10826 #ifdef DWARF2_UNWIND_INFO
10828 fprintf (asm_out_file
, "\t.cfi_endproc\n");
10832 if (NEED_INDICATE_EXEC_STACK
)
10833 file_end_indicate_exec_stack ();
10835 #ifdef TARGET_SOLARIS
10836 solaris_file_end ();
10840 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10841 /* Implement TARGET_MANGLE_TYPE. */
10843 static const char *
10844 sparc_mangle_type (const_tree type
)
10847 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
10848 && TARGET_LONG_DOUBLE_128
)
10851 /* For all other types, use normal C++ mangling. */
10856 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
10857 compare and swap on the word containing the byte or half-word. */
10860 sparc_expand_compare_and_swap_12 (rtx result
, rtx mem
, rtx oldval
, rtx newval
)
10862 rtx addr1
= force_reg (Pmode
, XEXP (mem
, 0));
10863 rtx addr
= gen_reg_rtx (Pmode
);
10864 rtx off
= gen_reg_rtx (SImode
);
10865 rtx oldv
= gen_reg_rtx (SImode
);
10866 rtx newv
= gen_reg_rtx (SImode
);
10867 rtx oldvalue
= gen_reg_rtx (SImode
);
10868 rtx newvalue
= gen_reg_rtx (SImode
);
10869 rtx res
= gen_reg_rtx (SImode
);
10870 rtx resv
= gen_reg_rtx (SImode
);
10871 rtx memsi
, val
, mask
, end_label
, loop_label
, cc
;
10873 emit_insn (gen_rtx_SET (VOIDmode
, addr
,
10874 gen_rtx_AND (Pmode
, addr1
, GEN_INT (-4))));
10876 if (Pmode
!= SImode
)
10877 addr1
= gen_lowpart (SImode
, addr1
);
10878 emit_insn (gen_rtx_SET (VOIDmode
, off
,
10879 gen_rtx_AND (SImode
, addr1
, GEN_INT (3))));
10881 memsi
= gen_rtx_MEM (SImode
, addr
);
10882 set_mem_alias_set (memsi
, ALIAS_SET_MEMORY_BARRIER
);
10883 MEM_VOLATILE_P (memsi
) = MEM_VOLATILE_P (mem
);
10885 val
= force_reg (SImode
, memsi
);
10887 emit_insn (gen_rtx_SET (VOIDmode
, off
,
10888 gen_rtx_XOR (SImode
, off
,
10889 GEN_INT (GET_MODE (mem
) == QImode
10892 emit_insn (gen_rtx_SET (VOIDmode
, off
,
10893 gen_rtx_ASHIFT (SImode
, off
, GEN_INT (3))));
10895 if (GET_MODE (mem
) == QImode
)
10896 mask
= force_reg (SImode
, GEN_INT (0xff));
10898 mask
= force_reg (SImode
, GEN_INT (0xffff));
10900 emit_insn (gen_rtx_SET (VOIDmode
, mask
,
10901 gen_rtx_ASHIFT (SImode
, mask
, off
)));
10903 emit_insn (gen_rtx_SET (VOIDmode
, val
,
10904 gen_rtx_AND (SImode
, gen_rtx_NOT (SImode
, mask
),
10907 oldval
= gen_lowpart (SImode
, oldval
);
10908 emit_insn (gen_rtx_SET (VOIDmode
, oldv
,
10909 gen_rtx_ASHIFT (SImode
, oldval
, off
)));
10911 newval
= gen_lowpart_common (SImode
, newval
);
10912 emit_insn (gen_rtx_SET (VOIDmode
, newv
,
10913 gen_rtx_ASHIFT (SImode
, newval
, off
)));
10915 emit_insn (gen_rtx_SET (VOIDmode
, oldv
,
10916 gen_rtx_AND (SImode
, oldv
, mask
)));
10918 emit_insn (gen_rtx_SET (VOIDmode
, newv
,
10919 gen_rtx_AND (SImode
, newv
, mask
)));
10921 end_label
= gen_label_rtx ();
10922 loop_label
= gen_label_rtx ();
10923 emit_label (loop_label
);
10925 emit_insn (gen_rtx_SET (VOIDmode
, oldvalue
,
10926 gen_rtx_IOR (SImode
, oldv
, val
)));
10928 emit_insn (gen_rtx_SET (VOIDmode
, newvalue
,
10929 gen_rtx_IOR (SImode
, newv
, val
)));
10931 emit_insn (gen_sync_compare_and_swapsi (res
, memsi
, oldvalue
, newvalue
));
10933 emit_cmp_and_jump_insns (res
, oldvalue
, EQ
, NULL
, SImode
, 0, end_label
);
10935 emit_insn (gen_rtx_SET (VOIDmode
, resv
,
10936 gen_rtx_AND (SImode
, gen_rtx_NOT (SImode
, mask
),
10939 cc
= gen_compare_reg_1 (NE
, resv
, val
);
10940 emit_insn (gen_rtx_SET (VOIDmode
, val
, resv
));
10942 /* Use cbranchcc4 to separate the compare and branch! */
10943 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode
, cc
, const0_rtx
),
10944 cc
, const0_rtx
, loop_label
));
10946 emit_label (end_label
);
10948 emit_insn (gen_rtx_SET (VOIDmode
, res
,
10949 gen_rtx_AND (SImode
, res
, mask
)));
10951 emit_insn (gen_rtx_SET (VOIDmode
, res
,
10952 gen_rtx_LSHIFTRT (SImode
, res
, off
)));
10954 emit_move_insn (result
, gen_lowpart (GET_MODE (result
), res
));
10958 sparc_expand_vec_perm_bmask (enum machine_mode vmode
, rtx sel
)
10962 sel
= gen_lowpart (DImode
, sel
);
10966 /* inp = xxxxxxxAxxxxxxxB */
10967 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (16),
10968 NULL_RTX
, 1, OPTAB_DIRECT
);
10969 /* t_1 = ....xxxxxxxAxxx. */
10970 sel
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, sel
),
10971 GEN_INT (3), NULL_RTX
, 1, OPTAB_DIRECT
);
10972 t_1
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, t_1
),
10973 GEN_INT (0x30000), NULL_RTX
, 1, OPTAB_DIRECT
);
10974 /* sel = .......B */
10975 /* t_1 = ...A.... */
10976 sel
= expand_simple_binop (SImode
, IOR
, sel
, t_1
, sel
, 1, OPTAB_DIRECT
);
10977 /* sel = ...A...B */
10978 sel
= expand_mult (SImode
, sel
, GEN_INT (0x4444), sel
, 1);
10979 /* sel = AAAABBBB * 4 */
10980 t_1
= force_reg (SImode
, GEN_INT (0x01230123));
10981 /* sel = { A*4, A*4+1, A*4+2, ... } */
10985 /* inp = xxxAxxxBxxxCxxxD */
10986 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (8),
10987 NULL_RTX
, 1, OPTAB_DIRECT
);
10988 t_2
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (16),
10989 NULL_RTX
, 1, OPTAB_DIRECT
);
10990 t_3
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (24),
10991 NULL_RTX
, 1, OPTAB_DIRECT
);
10992 /* t_1 = ..xxxAxxxBxxxCxx */
10993 /* t_2 = ....xxxAxxxBxxxC */
10994 /* t_3 = ......xxxAxxxBxx */
10995 sel
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, sel
),
10997 NULL_RTX
, 1, OPTAB_DIRECT
);
10998 t_1
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, t_1
),
11000 NULL_RTX
, 1, OPTAB_DIRECT
);
11001 t_2
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, t_2
),
11002 GEN_INT (0x070000),
11003 NULL_RTX
, 1, OPTAB_DIRECT
);
11004 t_3
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, t_3
),
11005 GEN_INT (0x07000000),
11006 NULL_RTX
, 1, OPTAB_DIRECT
);
11007 /* sel = .......D */
11008 /* t_1 = .....C.. */
11009 /* t_2 = ...B.... */
11010 /* t_3 = .A...... */
11011 sel
= expand_simple_binop (SImode
, IOR
, sel
, t_1
, sel
, 1, OPTAB_DIRECT
);
11012 t_2
= expand_simple_binop (SImode
, IOR
, t_2
, t_3
, t_2
, 1, OPTAB_DIRECT
);
11013 sel
= expand_simple_binop (SImode
, IOR
, sel
, t_2
, sel
, 1, OPTAB_DIRECT
);
11014 /* sel = .A.B.C.D */
11015 sel
= expand_mult (SImode
, sel
, GEN_INT (0x22), sel
, 1);
11016 /* sel = AABBCCDD * 2 */
11017 t_1
= force_reg (SImode
, GEN_INT (0x01010101));
11018 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
11022 /* input = xAxBxCxDxExFxGxH */
11023 sel
= expand_simple_binop (DImode
, AND
, sel
,
11024 GEN_INT ((HOST_WIDE_INT
)0x0f0f0f0f << 32
11026 NULL_RTX
, 1, OPTAB_DIRECT
);
11027 /* sel = .A.B.C.D.E.F.G.H */
11028 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (4),
11029 NULL_RTX
, 1, OPTAB_DIRECT
);
11030 /* t_1 = ..A.B.C.D.E.F.G. */
11031 sel
= expand_simple_binop (DImode
, IOR
, sel
, t_1
,
11032 NULL_RTX
, 1, OPTAB_DIRECT
);
11033 /* sel = .AABBCCDDEEFFGGH */
11034 sel
= expand_simple_binop (DImode
, AND
, sel
,
11035 GEN_INT ((HOST_WIDE_INT
)0xff00ff << 32
11037 NULL_RTX
, 1, OPTAB_DIRECT
);
11038 /* sel = ..AB..CD..EF..GH */
11039 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (8),
11040 NULL_RTX
, 1, OPTAB_DIRECT
);
11041 /* t_1 = ....AB..CD..EF.. */
11042 sel
= expand_simple_binop (DImode
, IOR
, sel
, t_1
,
11043 NULL_RTX
, 1, OPTAB_DIRECT
);
11044 /* sel = ..ABABCDCDEFEFGH */
11045 sel
= expand_simple_binop (DImode
, AND
, sel
,
11046 GEN_INT ((HOST_WIDE_INT
)0xffff << 32 | 0xffff),
11047 NULL_RTX
, 1, OPTAB_DIRECT
);
11048 /* sel = ....ABCD....EFGH */
11049 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (16),
11050 NULL_RTX
, 1, OPTAB_DIRECT
);
11051 /* t_1 = ........ABCD.... */
11052 sel
= gen_lowpart (SImode
, sel
);
11053 t_1
= gen_lowpart (SImode
, t_1
);
11057 gcc_unreachable ();
11060 /* Always perform the final addition/merge within the bmask insn. */
11061 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode
), sel
, t_1
));
11064 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11067 sparc_frame_pointer_required (void)
11069 /* If the stack pointer is dynamically modified in the function, it cannot
11070 serve as the frame pointer. */
11071 if (cfun
->calls_alloca
)
11074 /* If the function receives nonlocal gotos, it needs to save the frame
11075 pointer in the nonlocal_goto_save_area object. */
11076 if (cfun
->has_nonlocal_label
)
11079 /* In flat mode, that's it. */
11083 /* Otherwise, the frame pointer is required if the function isn't leaf. */
11084 return !(current_function_is_leaf
&& only_leaf_regs_used ());
11087 /* The way this is structured, we can't eliminate SFP in favor of SP
11088 if the frame pointer is required: we want to use the SFP->HFP elimination
11089 in that case. But the test in update_eliminables doesn't know we are
11090 assuming below that we only do the former elimination. */
11093 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
11095 return to
== HARD_FRAME_POINTER_REGNUM
|| !sparc_frame_pointer_required ();
11098 /* Return the hard frame pointer directly to bypass the stack bias. */
11101 sparc_builtin_setjmp_frame_value (void)
11103 return hard_frame_pointer_rtx
;
11106 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
11107 they won't be allocated. */
11110 sparc_conditional_register_usage (void)
11112 if (PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
11114 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
11115 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
11117 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
11118 /* then honor it. */
11119 if (TARGET_ARCH32
&& fixed_regs
[5])
11121 else if (TARGET_ARCH64
&& fixed_regs
[5] == 2)
11126 for (regno
= SPARC_FIRST_V9_FP_REG
;
11127 regno
<= SPARC_LAST_V9_FP_REG
;
11129 fixed_regs
[regno
] = 1;
11130 /* %fcc0 is used by v8 and v9. */
11131 for (regno
= SPARC_FIRST_V9_FCC_REG
+ 1;
11132 regno
<= SPARC_LAST_V9_FCC_REG
;
11134 fixed_regs
[regno
] = 1;
11139 for (regno
= 32; regno
< SPARC_LAST_V9_FCC_REG
; regno
++)
11140 fixed_regs
[regno
] = 1;
11142 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
11143 /* then honor it. Likewise with g3 and g4. */
11144 if (fixed_regs
[2] == 2)
11145 fixed_regs
[2] = ! TARGET_APP_REGS
;
11146 if (fixed_regs
[3] == 2)
11147 fixed_regs
[3] = ! TARGET_APP_REGS
;
11148 if (TARGET_ARCH32
&& fixed_regs
[4] == 2)
11149 fixed_regs
[4] = ! TARGET_APP_REGS
;
11150 else if (TARGET_CM_EMBMEDANY
)
11152 else if (fixed_regs
[4] == 2)
11157 /* Disable leaf functions. */
11158 memset (sparc_leaf_regs
, 0, FIRST_PSEUDO_REGISTER
);
11159 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
11160 leaf_reg_remap
[regno
] = regno
;
11163 global_regs
[SPARC_GSR_REG
] = 1;
11166 /* Implement TARGET_PREFERRED_RELOAD_CLASS
11168 - We can't load constants into FP registers.
11169 - We can't load FP constants into integer registers when soft-float,
11170 because there is no soft-float pattern with a r/F constraint.
11171 - We can't load FP constants into integer registers for TFmode unless
11172 it is 0.0L, because there is no movtf pattern with a r/F constraint.
11173 - Try and reload integer constants (symbolic or otherwise) back into
11174 registers directly, rather than having them dumped to memory. */
11177 sparc_preferred_reload_class (rtx x
, reg_class_t rclass
)
11179 enum machine_mode mode
= GET_MODE (x
);
11180 if (CONSTANT_P (x
))
11182 if (FP_REG_CLASS_P (rclass
)
11183 || rclass
== GENERAL_OR_FP_REGS
11184 || rclass
== GENERAL_OR_EXTRA_FP_REGS
11185 || (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ! TARGET_FPU
)
11186 || (mode
== TFmode
&& ! const_zero_operand (x
, mode
)))
11189 if (GET_MODE_CLASS (mode
) == MODE_INT
)
11190 return GENERAL_REGS
;
11192 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
11194 if (! FP_REG_CLASS_P (rclass
)
11195 || !(const_zero_operand (x
, mode
)
11196 || const_all_ones_operand (x
, mode
)))
11203 && (rclass
== EXTRA_FP_REGS
11204 || rclass
== GENERAL_OR_EXTRA_FP_REGS
))
11206 int regno
= true_regnum (x
);
11208 if (SPARC_INT_REG_P (regno
))
11209 return (rclass
== EXTRA_FP_REGS
11210 ? FP_REGS
: GENERAL_OR_FP_REGS
);
11217 output_v8plus_mult (rtx insn
, rtx
*operands
, const char *name
)
11221 gcc_assert (! TARGET_ARCH64
);
11223 if (sparc_check_64 (operands
[1], insn
) <= 0)
11224 output_asm_insn ("srl\t%L1, 0, %L1", operands
);
11225 if (which_alternative
== 1)
11226 output_asm_insn ("sllx\t%H1, 32, %H1", operands
);
11227 if (GET_CODE (operands
[2]) == CONST_INT
)
11229 if (which_alternative
== 1)
11231 output_asm_insn ("or\t%L1, %H1, %H1", operands
);
11232 sprintf (mulstr
, "%s\t%%H1, %%2, %%L0", name
);
11233 output_asm_insn (mulstr
, operands
);
11234 return "srlx\t%L0, 32, %H0";
11238 output_asm_insn ("sllx\t%H1, 32, %3", operands
);
11239 output_asm_insn ("or\t%L1, %3, %3", operands
);
11240 sprintf (mulstr
, "%s\t%%3, %%2, %%3", name
);
11241 output_asm_insn (mulstr
, operands
);
11242 output_asm_insn ("srlx\t%3, 32, %H0", operands
);
11243 return "mov\t%3, %L0";
11246 else if (rtx_equal_p (operands
[1], operands
[2]))
11248 if (which_alternative
== 1)
11250 output_asm_insn ("or\t%L1, %H1, %H1", operands
);
11251 sprintf (mulstr
, "%s\t%%H1, %%H1, %%L0", name
);
11252 output_asm_insn (mulstr
, operands
);
11253 return "srlx\t%L0, 32, %H0";
11257 output_asm_insn ("sllx\t%H1, 32, %3", operands
);
11258 output_asm_insn ("or\t%L1, %3, %3", operands
);
11259 sprintf (mulstr
, "%s\t%%3, %%3, %%3", name
);
11260 output_asm_insn (mulstr
, operands
);
11261 output_asm_insn ("srlx\t%3, 32, %H0", operands
);
11262 return "mov\t%3, %L0";
11265 if (sparc_check_64 (operands
[2], insn
) <= 0)
11266 output_asm_insn ("srl\t%L2, 0, %L2", operands
);
11267 if (which_alternative
== 1)
11269 output_asm_insn ("or\t%L1, %H1, %H1", operands
);
11270 output_asm_insn ("sllx\t%H2, 32, %L1", operands
);
11271 output_asm_insn ("or\t%L2, %L1, %L1", operands
);
11272 sprintf (mulstr
, "%s\t%%H1, %%L1, %%L0", name
);
11273 output_asm_insn (mulstr
, operands
);
11274 return "srlx\t%L0, 32, %H0";
11278 output_asm_insn ("sllx\t%H1, 32, %3", operands
);
11279 output_asm_insn ("sllx\t%H2, 32, %4", operands
);
11280 output_asm_insn ("or\t%L1, %3, %3", operands
);
11281 output_asm_insn ("or\t%L2, %4, %4", operands
);
11282 sprintf (mulstr
, "%s\t%%3, %%4, %%3", name
);
11283 output_asm_insn (mulstr
, operands
);
11284 output_asm_insn ("srlx\t%3, 32, %H0", operands
);
11285 return "mov\t%3, %L0";
11290 sparc_expand_vector_init (rtx target
, rtx vals
)
11292 enum machine_mode mode
= GET_MODE (target
);
11293 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
11294 int n_elts
= GET_MODE_NUNITS (mode
);
11298 for (i
= 0; i
< n_elts
; i
++)
11300 rtx x
= XVECEXP (vals
, 0, i
);
11301 if (!CONSTANT_P (x
))
11307 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
11311 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
), 0);
11312 for (i
= 0; i
< n_elts
; i
++)
11313 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
11314 i
* GET_MODE_SIZE (inner_mode
)),
11315 XVECEXP (vals
, 0, i
));
11316 emit_move_insn (target
, mem
);
11320 sparc_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
11321 enum machine_mode mode
, secondary_reload_info
*sri
)
11323 enum reg_class rclass
= (enum reg_class
) rclass_i
;
11325 sri
->icode
= CODE_FOR_nothing
;
11326 sri
->extra_cost
= 0;
11328 /* We need a temporary when loading/storing a HImode/QImode value
11329 between memory and the FPU registers. This can happen when combine puts
11330 a paradoxical subreg in a float/fix conversion insn. */
11331 if (FP_REG_CLASS_P (rclass
)
11332 && (mode
== HImode
|| mode
== QImode
)
11333 && (GET_CODE (x
) == MEM
11334 || ((GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
11335 && true_regnum (x
) == -1)))
11336 return GENERAL_REGS
;
11338 /* On 32-bit we need a temporary when loading/storing a DFmode value
11339 between unaligned memory and the upper FPU registers. */
11341 && rclass
== EXTRA_FP_REGS
11343 && GET_CODE (x
) == MEM
11344 && ! mem_min_alignment (x
, 8))
11347 if (((TARGET_CM_MEDANY
11348 && symbolic_operand (x
, mode
))
11349 || (TARGET_CM_EMBMEDANY
11350 && text_segment_operand (x
, mode
)))
11354 sri
->icode
= direct_optab_handler (reload_in_optab
, mode
);
11356 sri
->icode
= direct_optab_handler (reload_out_optab
, mode
);
11360 if (TARGET_VIS3
&& TARGET_ARCH32
)
11362 int regno
= true_regnum (x
);
11364 /* When using VIS3 fp<-->int register moves, on 32-bit we have
11365 to move 8-byte values in 4-byte pieces. This only works via
11366 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
11367 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
11368 an FP_REGS intermediate move. */
11369 if ((rclass
== EXTRA_FP_REGS
&& SPARC_INT_REG_P (regno
))
11370 || ((general_or_i64_p (rclass
)
11371 || rclass
== GENERAL_OR_FP_REGS
)
11372 && SPARC_FP_REG_P (regno
)))
11374 sri
->extra_cost
= 2;
11382 #include "gt-sparc.h"