1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type
{
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF
,
154 MIPS_V2SF_FTYPE_V2SF_V2SF
,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT
,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF
,
157 MIPS_V2SF_FTYPE_SF_SF
,
158 MIPS_INT_FTYPE_V2SF_V2SF
,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF
,
160 MIPS_INT_FTYPE_SF_SF
,
161 MIPS_INT_FTYPE_DF_DF
,
168 /* For MIPS DSP ASE */
170 MIPS_DI_FTYPE_DI_SI_SI
,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI
,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI
,
174 MIPS_SI_FTYPE_PTR_SI
,
178 MIPS_SI_FTYPE_V2HI_V2HI
,
180 MIPS_SI_FTYPE_V4QI_V4QI
,
183 MIPS_V2HI_FTYPE_SI_SI
,
184 MIPS_V2HI_FTYPE_V2HI
,
185 MIPS_V2HI_FTYPE_V2HI_SI
,
186 MIPS_V2HI_FTYPE_V2HI_V2HI
,
187 MIPS_V2HI_FTYPE_V4QI
,
188 MIPS_V2HI_FTYPE_V4QI_V2HI
,
190 MIPS_V4QI_FTYPE_V2HI_V2HI
,
191 MIPS_V4QI_FTYPE_V4QI_SI
,
192 MIPS_V4QI_FTYPE_V4QI_V4QI
,
193 MIPS_VOID_FTYPE_SI_SI
,
194 MIPS_VOID_FTYPE_V2HI_V2HI
,
195 MIPS_VOID_FTYPE_V4QI_V4QI
,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI
,
199 MIPS_SI_FTYPE_SI_SI_SI
,
200 MIPS_DI_FTYPE_DI_USI_USI
,
202 MIPS_DI_FTYPE_USI_USI
,
203 MIPS_V2HI_FTYPE_SI_SI_SI
,
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET
,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY
,
239 MIPS_BUILTIN_CMP_ALL
,
240 MIPS_BUILTIN_CMP_UPPER
,
241 MIPS_BUILTIN_CMP_LOWER
,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE
,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition
{
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND
)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions
[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY
)
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn
) (rtx
, rtx
);
285 struct mips16_constant
;
286 struct mips_arg_info
;
287 struct mips_address_info
;
288 struct mips_integer_op
;
291 static bool mips_valid_base_register_p (rtx
, enum machine_mode
, int);
292 static bool mips_classify_address (struct mips_address_info
*, rtx
,
293 enum machine_mode
, int);
294 static bool mips_cannot_force_const_mem (rtx
);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode
, const_rtx
);
296 static int mips_symbol_insns (enum mips_symbol_type
, enum machine_mode
);
297 static bool mips16_unextended_reference_p (enum machine_mode mode
, rtx
, rtx
);
298 static rtx
mips_force_temporary (rtx
, rtx
);
299 static rtx
mips_unspec_offset_high (rtx
, rtx
, rtx
, enum mips_symbol_type
);
300 static rtx
mips_add_offset (rtx
, rtx
, HOST_WIDE_INT
);
301 static unsigned int mips_build_shift (struct mips_integer_op
*, HOST_WIDE_INT
);
302 static unsigned int mips_build_lower (struct mips_integer_op
*,
303 unsigned HOST_WIDE_INT
);
304 static unsigned int mips_build_integer (struct mips_integer_op
*,
305 unsigned HOST_WIDE_INT
);
306 static void mips_legitimize_const_move (enum machine_mode
, rtx
, rtx
);
307 static int m16_check_op (rtx
, int, int, int);
308 static bool mips_rtx_costs (rtx
, int, int, int *);
309 static int mips_address_cost (rtx
);
310 static void mips_emit_compare (enum rtx_code
*, rtx
*, rtx
*, bool);
311 static void mips_load_call_address (rtx
, rtx
, int);
312 static bool mips_function_ok_for_sibcall (tree
, tree
);
313 static void mips_block_move_straight (rtx
, rtx
, HOST_WIDE_INT
);
314 static void mips_adjust_block_mem (rtx
, HOST_WIDE_INT
, rtx
*, rtx
*);
315 static void mips_block_move_loop (rtx
, rtx
, HOST_WIDE_INT
);
316 static void mips_arg_info (const CUMULATIVE_ARGS
*, enum machine_mode
,
317 tree
, int, struct mips_arg_info
*);
318 static bool mips_get_unaligned_mem (rtx
*, unsigned int, int, rtx
*, rtx
*);
319 static void mips_set_architecture (const struct mips_cpu_info
*);
320 static void mips_set_tune (const struct mips_cpu_info
*);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function
*mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx
, enum mips_symbol_context
,
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx
*, void *);
327 static int mips_rewrite_small_data_1 (rtx
*, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode
, int, HOST_WIDE_INT
,
332 mips_save_restore_fn
);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT
, mips_save_restore_fn
);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT
);
337 static void mips_set_frame_expr (rtx
);
338 static rtx
mips_frame_set (rtx
, rtx
);
339 static void mips_save_reg (rtx
, rtx
);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT
);
341 static void mips_restore_reg (rtx
, rtx
);
342 static void mips_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
343 HOST_WIDE_INT
, tree
);
344 static section
*mips_select_rtx_section (enum machine_mode
, rtx
,
345 unsigned HOST_WIDE_INT
);
346 static section
*mips_function_rodata_section (tree
);
347 static bool mips_in_small_data_p (const_tree
);
348 static bool mips_use_anchors_for_symbol_p (const_rtx
);
349 static int mips_fpr_return_fields (const_tree
, tree
*);
350 static bool mips_return_in_msb (const_tree
);
351 static rtx
mips_return_fpr_pair (enum machine_mode mode
,
352 enum machine_mode mode1
, HOST_WIDE_INT
,
353 enum machine_mode mode2
, HOST_WIDE_INT
);
354 static rtx
mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx
dump_constants_1 (enum machine_mode
, rtx
, rtx
);
358 static void dump_constants (struct mips16_constant
*, rtx
);
359 static int mips16_insn_length (rtx
);
360 static int mips16_rewrite_pool_refs (rtx
*, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim
*);
363 static void mips_sim_init (struct mips_sim
*, state_t
);
364 static void mips_sim_next_cycle (struct mips_sim
*);
365 static void mips_sim_wait_reg (struct mips_sim
*, rtx
, rtx
);
366 static int mips_sim_wait_regs_2 (rtx
*, void *);
367 static void mips_sim_wait_regs_1 (rtx
*, void *);
368 static void mips_sim_wait_regs (struct mips_sim
*, rtx
);
369 static void mips_sim_wait_units (struct mips_sim
*, rtx
);
370 static void mips_sim_wait_insn (struct mips_sim
*, rtx
);
371 static void mips_sim_record_set (rtx
, const_rtx
, void *);
372 static void mips_sim_issue_insn (struct mips_sim
*, rtx
);
373 static void mips_sim_issue_nop (struct mips_sim
*);
374 static void mips_sim_finish_insn (struct mips_sim
*, rtx
);
375 static void vr4130_avoid_branch_rt_conflict (rtx
);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx
, rtx
, int *, rtx
*, rtx
);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info
*mips_parse_cpu (const char *);
383 static const struct mips_cpu_info
*mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree
, const_tree
);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS
*);
386 static void mips_macc_chains_record (rtx
);
387 static void mips_macc_chains_reorder (rtx
*, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx
, const_rtx
, void *);
389 static bool vr4130_true_reg_dependence_p (rtx
);
390 static bool vr4130_swap_insns_p (rtx
, rtx
);
391 static void vr4130_reorder (rtx
*, int);
392 static void mips_promote_ready (rtx
*, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx
*, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx
, int);
396 static int mips_adjust_cost (rtx
, rtx
, rtx
, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS
*, enum machine_mode
,
402 static tree
mips_build_builtin_va_list (void);
403 static tree
mips_gimplify_va_arg_expr (tree
, tree
, tree
*, tree
*);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode mode
,
406 static bool mips_callee_copies (CUMULATIVE_ARGS
*, enum machine_mode mode
,
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode mode
,
410 static bool mips_valid_pointer_mode (enum machine_mode
);
411 static bool mips_scalar_mode_supported_p (enum machine_mode
);
412 static bool mips_vector_mode_supported_p (enum machine_mode
);
413 static rtx
mips_prepare_builtin_arg (enum insn_code
, unsigned int, tree
, unsigned int);
414 static rtx
mips_prepare_builtin_target (enum insn_code
, unsigned int, rtx
);
415 static rtx
mips_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
416 static void mips_init_builtins (void);
417 static rtx
mips_expand_builtin_direct (enum insn_code
, rtx
, tree
, bool);
418 static rtx
mips_expand_builtin_movtf (enum mips_builtin_type
,
419 enum insn_code
, enum mips_fp_condition
,
421 static rtx
mips_expand_builtin_compare (enum mips_builtin_type
,
422 enum insn_code
, enum mips_fp_condition
,
424 static rtx
mips_expand_builtin_bposge (enum mips_builtin_type
, rtx
);
425 static void mips_encode_section_info (tree
, rtx
, int);
426 static void mips_extra_live_on_entry (bitmap
);
427 static int mips_comp_type_attributes (const_tree
, const_tree
);
428 static void mips_set_mips16_mode (int);
429 static void mips_insert_attributes (tree
, tree
*);
430 static tree
mips_merge_decl_attributes (tree
, tree
);
431 static void mips_set_current_function (tree
);
432 static int mips_mode_rep_extended (enum machine_mode
, enum machine_mode
);
433 static bool mips_offset_within_alignment_p (rtx
, HOST_WIDE_INT
);
434 static void mips_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
436 /* Structure to be filled in by compute_frame_size with register
437 save masks, and offsets for the current function. */
439 struct mips_frame_info
GTY(())
441 HOST_WIDE_INT total_size
; /* # bytes that the entire frame takes up */
442 HOST_WIDE_INT var_size
; /* # bytes that variables take up */
443 HOST_WIDE_INT args_size
; /* # bytes that outgoing arguments take up */
444 HOST_WIDE_INT cprestore_size
; /* # bytes that the .cprestore slot takes up */
445 HOST_WIDE_INT gp_reg_size
; /* # bytes needed to store gp regs */
446 HOST_WIDE_INT fp_reg_size
; /* # bytes needed to store fp regs */
447 unsigned int mask
; /* mask of saved gp registers */
448 unsigned int fmask
; /* mask of saved fp registers */
449 HOST_WIDE_INT gp_save_offset
; /* offset from vfp to store gp registers */
450 HOST_WIDE_INT fp_save_offset
; /* offset from vfp to store fp registers */
451 HOST_WIDE_INT gp_sp_offset
; /* offset from new sp to store gp registers */
452 HOST_WIDE_INT fp_sp_offset
; /* offset from new sp to store fp registers */
453 bool initialized
; /* true if frame size already calculated */
454 int num_gp
; /* number of gp registers saved */
455 int num_fp
; /* number of fp registers saved */
458 struct machine_function
GTY(()) {
459 /* Pseudo-reg holding the value of $28 in a mips16 function which
460 refers to GP relative global variables. */
461 rtx mips16_gp_pseudo_rtx
;
463 /* The number of extra stack bytes taken up by register varargs.
464 This area is allocated by the callee at the very top of the frame. */
467 /* Current frame information, calculated by compute_frame_size. */
468 struct mips_frame_info frame
;
470 /* The register to use as the global pointer within this function. */
471 unsigned int global_pointer
;
473 /* True if mips_adjust_insn_length should ignore an instruction's
475 bool ignore_hazard_length_p
;
477 /* True if the whole function is suitable for .set noreorder and
479 bool all_noreorder_p
;
481 /* True if the function is known to have an instruction that needs $gp. */
484 /* True if we have emitted an instruction to initialize
485 mips16_gp_pseudo_rtx. */
486 bool initialized_mips16_gp_pseudo_p
;
489 /* Information about a single argument. */
492 /* True if the argument is passed in a floating-point register, or
493 would have been if we hadn't run out of registers. */
496 /* The number of words passed in registers, rounded up. */
497 unsigned int reg_words
;
499 /* For EABI, the offset of the first register from GP_ARG_FIRST or
500 FP_ARG_FIRST. For other ABIs, the offset of the first register from
501 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
502 comment for details).
504 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
506 unsigned int reg_offset
;
508 /* The number of words that must be passed on the stack, rounded up. */
509 unsigned int stack_words
;
511 /* The offset from the start of the stack overflow area of the argument's
512 first stack word. Only meaningful when STACK_WORDS is nonzero. */
513 unsigned int stack_offset
;
517 /* Information about an address described by mips_address_type.
523 REG is the base register and OFFSET is the constant offset.
526 REG is the register that contains the high part of the address,
527 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
528 is the type of OFFSET's symbol.
531 SYMBOL_TYPE is the type of symbol being referenced. */
533 struct mips_address_info
535 enum mips_address_type type
;
538 enum mips_symbol_type symbol_type
;
542 /* One stage in a constant building sequence. These sequences have
546 A = A CODE[1] VALUE[1]
547 A = A CODE[2] VALUE[2]
550 where A is an accumulator, each CODE[i] is a binary rtl operation
551 and each VALUE[i] is a constant integer. */
552 struct mips_integer_op
{
554 unsigned HOST_WIDE_INT value
;
558 /* The largest number of operations needed to load an integer constant.
559 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
560 When the lowest bit is clear, we can try, but reject a sequence with
561 an extra SLL at the end. */
562 #define MIPS_MAX_INTEGER_OPS 7
564 /* Information about a MIPS16e SAVE or RESTORE instruction. */
565 struct mips16e_save_restore_info
{
566 /* The number of argument registers saved by a SAVE instruction.
567 0 for RESTORE instructions. */
570 /* Bit X is set if the instruction saves or restores GPR X. */
573 /* The total number of bytes to allocate. */
577 /* Global variables for machine-dependent things. */
579 /* Threshold for data being put into the small data/bss area, instead
580 of the normal data area. */
581 int mips_section_threshold
= -1;
583 /* Count the number of .file directives, so that .loc is up to date. */
584 int num_source_filenames
= 0;
586 /* Count the number of sdb related labels are generated (to find block
587 start and end boundaries). */
588 int sdb_label_count
= 0;
590 /* Next label # for each statement for Silicon Graphics IRIS systems. */
593 /* Name of the file containing the current function. */
594 const char *current_function_file
= "";
596 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
602 /* The next branch instruction is a branch likely, not branch normal. */
603 int mips_branch_likely
;
605 /* The operands passed to the last cmpMM expander. */
608 /* The target cpu for code generation. */
609 enum processor_type mips_arch
;
610 const struct mips_cpu_info
*mips_arch_info
;
612 /* The target cpu for optimization and scheduling. */
613 enum processor_type mips_tune
;
614 const struct mips_cpu_info
*mips_tune_info
;
616 /* Which instruction set architecture to use. */
619 /* Which ABI to use. */
620 int mips_abi
= MIPS_ABI_DEFAULT
;
622 /* Cost information to use. */
623 const struct mips_rtx_cost_data
*mips_cost
;
625 /* Remember the ambient target flags, excluding mips16. */
626 static int mips_base_target_flags
;
627 /* The mips16 command-line target flags only. */
628 static bool mips_base_mips16
;
629 /* Similar copies of option settings. */
630 static int mips_base_schedule_insns
; /* flag_schedule_insns */
631 static int mips_base_reorder_blocks_and_partition
; /* flag_reorder... */
632 static int mips_base_move_loop_invariants
; /* flag_move_loop_invariants */
633 static int mips_base_align_loops
; /* align_loops */
634 static int mips_base_align_jumps
; /* align_jumps */
635 static int mips_base_align_functions
; /* align_functions */
636 static GTY(()) int mips16_flipper
;
638 /* The -mtext-loads setting. */
639 enum mips_code_readable_setting mips_code_readable
= CODE_READABLE_YES
;
641 /* The -mllsc setting. */
642 enum mips_llsc_setting mips_llsc
= LLSC_DEFAULT
;
644 /* The architecture selected by -mipsN. */
645 static const struct mips_cpu_info
*mips_isa_info
;
647 /* If TRUE, we split addresses into their high and low parts in the RTL. */
648 int mips_split_addresses
;
650 /* Mode used for saving/restoring general purpose registers. */
651 static enum machine_mode gpr_mode
;
653 /* Array giving truth value on whether or not a given hard register
654 can support a given mode. */
655 char mips_hard_regno_mode_ok
[(int)MAX_MACHINE_MODE
][FIRST_PSEUDO_REGISTER
];
657 /* List of all MIPS punctuation characters used by print_operand. */
658 char mips_print_operand_punct
[256];
660 /* Map GCC register number to debugger register number. */
661 int mips_dbx_regno
[FIRST_PSEUDO_REGISTER
];
662 int mips_dwarf_regno
[FIRST_PSEUDO_REGISTER
];
664 /* A copy of the original flag_delayed_branch: see override_options. */
665 static int mips_flag_delayed_branch
;
667 static GTY (()) int mips_output_filename_first_time
= 1;
669 /* mips_split_p[X] is true if symbols of type X can be split by
670 mips_split_symbol(). */
671 bool mips_split_p
[NUM_SYMBOL_TYPES
];
673 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
674 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
675 if they are matched by a special .md file pattern. */
676 static const char *mips_lo_relocs
[NUM_SYMBOL_TYPES
];
678 /* Likewise for HIGHs. */
679 static const char *mips_hi_relocs
[NUM_SYMBOL_TYPES
];
681 /* Map hard register number to register class */
682 const enum reg_class mips_regno_to_class
[] =
684 LEA_REGS
, LEA_REGS
, M16_NA_REGS
, V1_REG
,
685 M16_REGS
, M16_REGS
, M16_REGS
, M16_REGS
,
686 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
687 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
688 M16_NA_REGS
, M16_NA_REGS
, LEA_REGS
, LEA_REGS
,
689 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
690 T_REG
, PIC_FN_ADDR_REG
, LEA_REGS
, LEA_REGS
,
691 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
692 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
693 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
694 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
695 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
696 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
697 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
698 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
699 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
700 MD0_REG
, MD1_REG
, NO_REGS
, ST_REGS
,
701 ST_REGS
, ST_REGS
, ST_REGS
, ST_REGS
,
702 ST_REGS
, ST_REGS
, ST_REGS
, NO_REGS
,
703 NO_REGS
, ALL_REGS
, ALL_REGS
, NO_REGS
,
704 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
705 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
706 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
707 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
708 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
709 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
710 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
711 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
712 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
713 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
714 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
715 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
716 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
717 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
718 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
719 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
720 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
721 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
722 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
723 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
724 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
725 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
726 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
727 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
728 DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
,
729 DSP_ACC_REGS
, DSP_ACC_REGS
, ALL_REGS
, ALL_REGS
,
730 ALL_REGS
, ALL_REGS
, ALL_REGS
, ALL_REGS
733 /* Table of machine dependent attributes. */
734 const struct attribute_spec mips_attribute_table
[] =
736 { "long_call", 0, 0, false, true, true, NULL
},
737 { "far", 0, 0, false, true, true, NULL
},
738 { "near", 0, 0, false, true, true, NULL
},
739 /* Switch MIPS16 ASE on and off per-function. We would really like
740 to make these type attributes, but GCC doesn't provide the hooks
741 we need to support the right conversion rules. As declaration
742 attributes, they affect code generation but don't carry other
744 { "mips16", 0, 0, true, false, false, NULL
},
745 { "nomips16", 0, 0, true, false, false, NULL
},
746 { NULL
, 0, 0, false, false, false, NULL
}
749 /* A table describing all the processors gcc knows about. Names are
750 matched in the order listed. The first mention of an ISA level is
751 taken as the canonical name for that ISA.
753 To ease comparison, please keep this table in the same order
754 as gas's mips_cpu_info_table[]. Please also make sure that
755 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
756 options correctly. */
757 const struct mips_cpu_info mips_cpu_info_table
[] = {
758 /* Entries for generic ISAs */
759 { "mips1", PROCESSOR_R3000
, 1 },
760 { "mips2", PROCESSOR_R6000
, 2 },
761 { "mips3", PROCESSOR_R4000
, 3 },
762 { "mips4", PROCESSOR_R8000
, 4 },
763 { "mips32", PROCESSOR_4KC
, 32 },
764 { "mips32r2", PROCESSOR_M4K
, 33 },
765 { "mips64", PROCESSOR_5KC
, 64 },
768 { "r3000", PROCESSOR_R3000
, 1 },
769 { "r2000", PROCESSOR_R3000
, 1 }, /* = r3000 */
770 { "r3900", PROCESSOR_R3900
, 1 },
773 { "r6000", PROCESSOR_R6000
, 2 },
776 { "r4000", PROCESSOR_R4000
, 3 },
777 { "vr4100", PROCESSOR_R4100
, 3 },
778 { "vr4111", PROCESSOR_R4111
, 3 },
779 { "vr4120", PROCESSOR_R4120
, 3 },
780 { "vr4130", PROCESSOR_R4130
, 3 },
781 { "vr4300", PROCESSOR_R4300
, 3 },
782 { "r4400", PROCESSOR_R4000
, 3 }, /* = r4000 */
783 { "r4600", PROCESSOR_R4600
, 3 },
784 { "orion", PROCESSOR_R4600
, 3 }, /* = r4600 */
785 { "r4650", PROCESSOR_R4650
, 3 },
788 { "r8000", PROCESSOR_R8000
, 4 },
789 { "vr5000", PROCESSOR_R5000
, 4 },
790 { "vr5400", PROCESSOR_R5400
, 4 },
791 { "vr5500", PROCESSOR_R5500
, 4 },
792 { "rm7000", PROCESSOR_R7000
, 4 },
793 { "rm9000", PROCESSOR_R9000
, 4 },
796 { "4kc", PROCESSOR_4KC
, 32 },
797 { "4km", PROCESSOR_4KC
, 32 }, /* = 4kc */
798 { "4kp", PROCESSOR_4KP
, 32 },
799 { "4ksc", PROCESSOR_4KC
, 32 },
801 /* MIPS32 Release 2 */
802 { "m4k", PROCESSOR_M4K
, 33 },
803 { "4kec", PROCESSOR_4KC
, 33 },
804 { "4kem", PROCESSOR_4KC
, 33 },
805 { "4kep", PROCESSOR_4KP
, 33 },
806 { "4ksd", PROCESSOR_4KC
, 33 },
808 { "24kc", PROCESSOR_24KC
, 33 },
809 { "24kf2_1", PROCESSOR_24KF2_1
, 33 },
810 { "24kf", PROCESSOR_24KF2_1
, 33 },
811 { "24kf1_1", PROCESSOR_24KF1_1
, 33 },
812 { "24kfx", PROCESSOR_24KF1_1
, 33 },
813 { "24kx", PROCESSOR_24KF1_1
, 33 },
815 { "24kec", PROCESSOR_24KC
, 33 }, /* 24K with DSP */
816 { "24kef2_1", PROCESSOR_24KF2_1
, 33 },
817 { "24kef", PROCESSOR_24KF2_1
, 33 },
818 { "24kef1_1", PROCESSOR_24KF1_1
, 33 },
819 { "24kefx", PROCESSOR_24KF1_1
, 33 },
820 { "24kex", PROCESSOR_24KF1_1
, 33 },
822 { "34kc", PROCESSOR_24KC
, 33 }, /* 34K with MT/DSP */
823 { "34kf2_1", PROCESSOR_24KF2_1
, 33 },
824 { "34kf", PROCESSOR_24KF2_1
, 33 },
825 { "34kf1_1", PROCESSOR_24KF1_1
, 33 },
826 { "34kfx", PROCESSOR_24KF1_1
, 33 },
827 { "34kx", PROCESSOR_24KF1_1
, 33 },
829 { "74kc", PROCESSOR_74KC
, 33 }, /* 74K with DSPr2 */
830 { "74kf2_1", PROCESSOR_74KF2_1
, 33 },
831 { "74kf", PROCESSOR_74KF2_1
, 33 },
832 { "74kf1_1", PROCESSOR_74KF1_1
, 33 },
833 { "74kfx", PROCESSOR_74KF1_1
, 33 },
834 { "74kx", PROCESSOR_74KF1_1
, 33 },
835 { "74kf3_2", PROCESSOR_74KF3_2
, 33 },
838 { "5kc", PROCESSOR_5KC
, 64 },
839 { "5kf", PROCESSOR_5KF
, 64 },
840 { "20kc", PROCESSOR_20KC
, 64 },
841 { "sb1", PROCESSOR_SB1
, 64 },
842 { "sb1a", PROCESSOR_SB1A
, 64 },
843 { "sr71000", PROCESSOR_SR71000
, 64 },
849 /* Default costs. If these are used for a processor we should look
850 up the actual costs. */
851 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
852 COSTS_N_INSNS (7), /* fp_mult_sf */ \
853 COSTS_N_INSNS (8), /* fp_mult_df */ \
854 COSTS_N_INSNS (23), /* fp_div_sf */ \
855 COSTS_N_INSNS (36), /* fp_div_df */ \
856 COSTS_N_INSNS (10), /* int_mult_si */ \
857 COSTS_N_INSNS (10), /* int_mult_di */ \
858 COSTS_N_INSNS (69), /* int_div_si */ \
859 COSTS_N_INSNS (69), /* int_div_di */ \
860 2, /* branch_cost */ \
861 4 /* memory_latency */
863 /* Need to replace these with the costs of calling the appropriate
865 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
866 COSTS_N_INSNS (256), /* fp_mult_sf */ \
867 COSTS_N_INSNS (256), /* fp_mult_df */ \
868 COSTS_N_INSNS (256), /* fp_div_sf */ \
869 COSTS_N_INSNS (256) /* fp_div_df */
871 static struct mips_rtx_cost_data
const mips_rtx_cost_optimize_size
=
873 COSTS_N_INSNS (1), /* fp_add */
874 COSTS_N_INSNS (1), /* fp_mult_sf */
875 COSTS_N_INSNS (1), /* fp_mult_df */
876 COSTS_N_INSNS (1), /* fp_div_sf */
877 COSTS_N_INSNS (1), /* fp_div_df */
878 COSTS_N_INSNS (1), /* int_mult_si */
879 COSTS_N_INSNS (1), /* int_mult_di */
880 COSTS_N_INSNS (1), /* int_div_si */
881 COSTS_N_INSNS (1), /* int_div_di */
883 4 /* memory_latency */
886 static struct mips_rtx_cost_data
const mips_rtx_cost_data
[PROCESSOR_MAX
] =
889 COSTS_N_INSNS (2), /* fp_add */
890 COSTS_N_INSNS (4), /* fp_mult_sf */
891 COSTS_N_INSNS (5), /* fp_mult_df */
892 COSTS_N_INSNS (12), /* fp_div_sf */
893 COSTS_N_INSNS (19), /* fp_div_df */
894 COSTS_N_INSNS (12), /* int_mult_si */
895 COSTS_N_INSNS (12), /* int_mult_di */
896 COSTS_N_INSNS (35), /* int_div_si */
897 COSTS_N_INSNS (35), /* int_div_di */
899 4 /* memory_latency */
904 COSTS_N_INSNS (6), /* int_mult_si */
905 COSTS_N_INSNS (6), /* int_mult_di */
906 COSTS_N_INSNS (36), /* int_div_si */
907 COSTS_N_INSNS (36), /* int_div_di */
909 4 /* memory_latency */
913 COSTS_N_INSNS (36), /* int_mult_si */
914 COSTS_N_INSNS (36), /* int_mult_di */
915 COSTS_N_INSNS (37), /* int_div_si */
916 COSTS_N_INSNS (37), /* int_div_di */
918 4 /* memory_latency */
922 COSTS_N_INSNS (4), /* int_mult_si */
923 COSTS_N_INSNS (11), /* int_mult_di */
924 COSTS_N_INSNS (36), /* int_div_si */
925 COSTS_N_INSNS (68), /* int_div_di */
927 4 /* memory_latency */
930 COSTS_N_INSNS (4), /* fp_add */
931 COSTS_N_INSNS (4), /* fp_mult_sf */
932 COSTS_N_INSNS (5), /* fp_mult_df */
933 COSTS_N_INSNS (17), /* fp_div_sf */
934 COSTS_N_INSNS (32), /* fp_div_df */
935 COSTS_N_INSNS (4), /* int_mult_si */
936 COSTS_N_INSNS (11), /* int_mult_di */
937 COSTS_N_INSNS (36), /* int_div_si */
938 COSTS_N_INSNS (68), /* int_div_di */
940 4 /* memory_latency */
943 COSTS_N_INSNS (4), /* fp_add */
944 COSTS_N_INSNS (4), /* fp_mult_sf */
945 COSTS_N_INSNS (5), /* fp_mult_df */
946 COSTS_N_INSNS (17), /* fp_div_sf */
947 COSTS_N_INSNS (32), /* fp_div_df */
948 COSTS_N_INSNS (4), /* int_mult_si */
949 COSTS_N_INSNS (7), /* int_mult_di */
950 COSTS_N_INSNS (42), /* int_div_si */
951 COSTS_N_INSNS (72), /* int_div_di */
953 4 /* memory_latency */
957 COSTS_N_INSNS (5), /* int_mult_si */
958 COSTS_N_INSNS (5), /* int_mult_di */
959 COSTS_N_INSNS (41), /* int_div_si */
960 COSTS_N_INSNS (41), /* int_div_di */
962 4 /* memory_latency */
965 COSTS_N_INSNS (8), /* fp_add */
966 COSTS_N_INSNS (8), /* fp_mult_sf */
967 COSTS_N_INSNS (10), /* fp_mult_df */
968 COSTS_N_INSNS (34), /* fp_div_sf */
969 COSTS_N_INSNS (64), /* fp_div_df */
970 COSTS_N_INSNS (5), /* int_mult_si */
971 COSTS_N_INSNS (5), /* int_mult_di */
972 COSTS_N_INSNS (41), /* int_div_si */
973 COSTS_N_INSNS (41), /* int_div_di */
975 4 /* memory_latency */
978 COSTS_N_INSNS (4), /* fp_add */
979 COSTS_N_INSNS (4), /* fp_mult_sf */
980 COSTS_N_INSNS (5), /* fp_mult_df */
981 COSTS_N_INSNS (17), /* fp_div_sf */
982 COSTS_N_INSNS (32), /* fp_div_df */
983 COSTS_N_INSNS (5), /* int_mult_si */
984 COSTS_N_INSNS (5), /* int_mult_di */
985 COSTS_N_INSNS (41), /* int_div_si */
986 COSTS_N_INSNS (41), /* int_div_di */
988 4 /* memory_latency */
992 COSTS_N_INSNS (5), /* int_mult_si */
993 COSTS_N_INSNS (5), /* int_mult_di */
994 COSTS_N_INSNS (41), /* int_div_si */
995 COSTS_N_INSNS (41), /* int_div_di */
997 4 /* memory_latency */
1000 COSTS_N_INSNS (8), /* fp_add */
1001 COSTS_N_INSNS (8), /* fp_mult_sf */
1002 COSTS_N_INSNS (10), /* fp_mult_df */
1003 COSTS_N_INSNS (34), /* fp_div_sf */
1004 COSTS_N_INSNS (64), /* fp_div_df */
1005 COSTS_N_INSNS (5), /* int_mult_si */
1006 COSTS_N_INSNS (5), /* int_mult_di */
1007 COSTS_N_INSNS (41), /* int_div_si */
1008 COSTS_N_INSNS (41), /* int_div_di */
1009 1, /* branch_cost */
1010 4 /* memory_latency */
1013 COSTS_N_INSNS (4), /* fp_add */
1014 COSTS_N_INSNS (4), /* fp_mult_sf */
1015 COSTS_N_INSNS (5), /* fp_mult_df */
1016 COSTS_N_INSNS (17), /* fp_div_sf */
1017 COSTS_N_INSNS (32), /* fp_div_df */
1018 COSTS_N_INSNS (5), /* int_mult_si */
1019 COSTS_N_INSNS (5), /* int_mult_di */
1020 COSTS_N_INSNS (41), /* int_div_si */
1021 COSTS_N_INSNS (41), /* int_div_di */
1022 1, /* branch_cost */
1023 4 /* memory_latency */
1026 COSTS_N_INSNS (6), /* fp_add */
1027 COSTS_N_INSNS (6), /* fp_mult_sf */
1028 COSTS_N_INSNS (7), /* fp_mult_df */
1029 COSTS_N_INSNS (25), /* fp_div_sf */
1030 COSTS_N_INSNS (48), /* fp_div_df */
1031 COSTS_N_INSNS (5), /* int_mult_si */
1032 COSTS_N_INSNS (5), /* int_mult_di */
1033 COSTS_N_INSNS (41), /* int_div_si */
1034 COSTS_N_INSNS (41), /* int_div_di */
1035 1, /* branch_cost */
1036 4 /* memory_latency */
1042 COSTS_N_INSNS (2), /* fp_add */
1043 COSTS_N_INSNS (4), /* fp_mult_sf */
1044 COSTS_N_INSNS (5), /* fp_mult_df */
1045 COSTS_N_INSNS (12), /* fp_div_sf */
1046 COSTS_N_INSNS (19), /* fp_div_df */
1047 COSTS_N_INSNS (2), /* int_mult_si */
1048 COSTS_N_INSNS (2), /* int_mult_di */
1049 COSTS_N_INSNS (35), /* int_div_si */
1050 COSTS_N_INSNS (35), /* int_div_di */
1051 1, /* branch_cost */
1052 4 /* memory_latency */
1055 COSTS_N_INSNS (3), /* fp_add */
1056 COSTS_N_INSNS (5), /* fp_mult_sf */
1057 COSTS_N_INSNS (6), /* fp_mult_df */
1058 COSTS_N_INSNS (15), /* fp_div_sf */
1059 COSTS_N_INSNS (16), /* fp_div_df */
1060 COSTS_N_INSNS (17), /* int_mult_si */
1061 COSTS_N_INSNS (17), /* int_mult_di */
1062 COSTS_N_INSNS (38), /* int_div_si */
1063 COSTS_N_INSNS (38), /* int_div_di */
1064 2, /* branch_cost */
1065 6 /* memory_latency */
1068 COSTS_N_INSNS (6), /* fp_add */
1069 COSTS_N_INSNS (7), /* fp_mult_sf */
1070 COSTS_N_INSNS (8), /* fp_mult_df */
1071 COSTS_N_INSNS (23), /* fp_div_sf */
1072 COSTS_N_INSNS (36), /* fp_div_df */
1073 COSTS_N_INSNS (10), /* int_mult_si */
1074 COSTS_N_INSNS (10), /* int_mult_di */
1075 COSTS_N_INSNS (69), /* int_div_si */
1076 COSTS_N_INSNS (69), /* int_div_di */
1077 2, /* branch_cost */
1078 6 /* memory_latency */
1090 /* The only costs that appear to be updated here are
1091 integer multiplication. */
1093 COSTS_N_INSNS (4), /* int_mult_si */
1094 COSTS_N_INSNS (6), /* int_mult_di */
1095 COSTS_N_INSNS (69), /* int_div_si */
1096 COSTS_N_INSNS (69), /* int_div_di */
1097 1, /* branch_cost */
1098 4 /* memory_latency */
1110 COSTS_N_INSNS (6), /* fp_add */
1111 COSTS_N_INSNS (4), /* fp_mult_sf */
1112 COSTS_N_INSNS (5), /* fp_mult_df */
1113 COSTS_N_INSNS (23), /* fp_div_sf */
1114 COSTS_N_INSNS (36), /* fp_div_df */
1115 COSTS_N_INSNS (5), /* int_mult_si */
1116 COSTS_N_INSNS (5), /* int_mult_di */
1117 COSTS_N_INSNS (36), /* int_div_si */
1118 COSTS_N_INSNS (36), /* int_div_di */
1119 1, /* branch_cost */
1120 4 /* memory_latency */
1123 COSTS_N_INSNS (6), /* fp_add */
1124 COSTS_N_INSNS (5), /* fp_mult_sf */
1125 COSTS_N_INSNS (6), /* fp_mult_df */
1126 COSTS_N_INSNS (30), /* fp_div_sf */
1127 COSTS_N_INSNS (59), /* fp_div_df */
1128 COSTS_N_INSNS (3), /* int_mult_si */
1129 COSTS_N_INSNS (4), /* int_mult_di */
1130 COSTS_N_INSNS (42), /* int_div_si */
1131 COSTS_N_INSNS (74), /* int_div_di */
1132 1, /* branch_cost */
1133 4 /* memory_latency */
1136 COSTS_N_INSNS (6), /* fp_add */
1137 COSTS_N_INSNS (5), /* fp_mult_sf */
1138 COSTS_N_INSNS (6), /* fp_mult_df */
1139 COSTS_N_INSNS (30), /* fp_div_sf */
1140 COSTS_N_INSNS (59), /* fp_div_df */
1141 COSTS_N_INSNS (5), /* int_mult_si */
1142 COSTS_N_INSNS (9), /* int_mult_di */
1143 COSTS_N_INSNS (42), /* int_div_si */
1144 COSTS_N_INSNS (74), /* int_div_di */
1145 1, /* branch_cost */
1146 4 /* memory_latency */
1149 /* The only costs that are changed here are
1150 integer multiplication. */
1151 COSTS_N_INSNS (6), /* fp_add */
1152 COSTS_N_INSNS (7), /* fp_mult_sf */
1153 COSTS_N_INSNS (8), /* fp_mult_df */
1154 COSTS_N_INSNS (23), /* fp_div_sf */
1155 COSTS_N_INSNS (36), /* fp_div_df */
1156 COSTS_N_INSNS (5), /* int_mult_si */
1157 COSTS_N_INSNS (9), /* int_mult_di */
1158 COSTS_N_INSNS (69), /* int_div_si */
1159 COSTS_N_INSNS (69), /* int_div_di */
1160 1, /* branch_cost */
1161 4 /* memory_latency */
1167 /* The only costs that are changed here are
1168 integer multiplication. */
1169 COSTS_N_INSNS (6), /* fp_add */
1170 COSTS_N_INSNS (7), /* fp_mult_sf */
1171 COSTS_N_INSNS (8), /* fp_mult_df */
1172 COSTS_N_INSNS (23), /* fp_div_sf */
1173 COSTS_N_INSNS (36), /* fp_div_df */
1174 COSTS_N_INSNS (3), /* int_mult_si */
1175 COSTS_N_INSNS (8), /* int_mult_di */
1176 COSTS_N_INSNS (69), /* int_div_si */
1177 COSTS_N_INSNS (69), /* int_div_di */
1178 1, /* branch_cost */
1179 4 /* memory_latency */
1182 /* These costs are the same as the SB-1A below. */
1183 COSTS_N_INSNS (4), /* fp_add */
1184 COSTS_N_INSNS (4), /* fp_mult_sf */
1185 COSTS_N_INSNS (4), /* fp_mult_df */
1186 COSTS_N_INSNS (24), /* fp_div_sf */
1187 COSTS_N_INSNS (32), /* fp_div_df */
1188 COSTS_N_INSNS (3), /* int_mult_si */
1189 COSTS_N_INSNS (4), /* int_mult_di */
1190 COSTS_N_INSNS (36), /* int_div_si */
1191 COSTS_N_INSNS (68), /* int_div_di */
1192 1, /* branch_cost */
1193 4 /* memory_latency */
1196 /* These costs are the same as the SB-1 above. */
1197 COSTS_N_INSNS (4), /* fp_add */
1198 COSTS_N_INSNS (4), /* fp_mult_sf */
1199 COSTS_N_INSNS (4), /* fp_mult_df */
1200 COSTS_N_INSNS (24), /* fp_div_sf */
1201 COSTS_N_INSNS (32), /* fp_div_df */
1202 COSTS_N_INSNS (3), /* int_mult_si */
1203 COSTS_N_INSNS (4), /* int_mult_di */
1204 COSTS_N_INSNS (36), /* int_div_si */
1205 COSTS_N_INSNS (68), /* int_div_di */
1206 1, /* branch_cost */
1207 4 /* memory_latency */
1214 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1215 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1216 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1217 static const unsigned char mips16e_s2_s8_regs
[] = {
1218 30, 23, 22, 21, 20, 19, 18
1220 static const unsigned char mips16e_a0_a3_regs
[] = {
1224 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1225 ordered from the uppermost in memory to the lowest in memory. */
1226 static const unsigned char mips16e_save_restore_regs
[] = {
1227 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1230 /* Initialize the GCC target structure. */
1231 #undef TARGET_ASM_ALIGNED_HI_OP
1232 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1233 #undef TARGET_ASM_ALIGNED_SI_OP
1234 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1235 #undef TARGET_ASM_ALIGNED_DI_OP
1236 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1238 #undef TARGET_ASM_FUNCTION_PROLOGUE
1239 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1240 #undef TARGET_ASM_FUNCTION_EPILOGUE
1241 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1242 #undef TARGET_ASM_SELECT_RTX_SECTION
1243 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1244 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1245 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1247 #undef TARGET_SCHED_INIT
1248 #define TARGET_SCHED_INIT mips_sched_init
1249 #undef TARGET_SCHED_REORDER
1250 #define TARGET_SCHED_REORDER mips_sched_reorder
1251 #undef TARGET_SCHED_REORDER2
1252 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1253 #undef TARGET_SCHED_VARIABLE_ISSUE
1254 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1255 #undef TARGET_SCHED_ADJUST_COST
1256 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1257 #undef TARGET_SCHED_ISSUE_RATE
1258 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1259 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1260 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1261 mips_multipass_dfa_lookahead
1263 #undef TARGET_DEFAULT_TARGET_FLAGS
1264 #define TARGET_DEFAULT_TARGET_FLAGS \
1266 | TARGET_CPU_DEFAULT \
1267 | TARGET_ENDIAN_DEFAULT \
1268 | TARGET_FP_EXCEPTIONS_DEFAULT \
1269 | MASK_CHECK_ZERO_DIV \
1271 #undef TARGET_HANDLE_OPTION
1272 #define TARGET_HANDLE_OPTION mips_handle_option
1274 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1275 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1277 #undef TARGET_INSERT_ATTRIBUTES
1278 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
1279 #undef TARGET_MERGE_DECL_ATTRIBUTES
1280 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
1281 #undef TARGET_SET_CURRENT_FUNCTION
1282 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
1284 #undef TARGET_VALID_POINTER_MODE
1285 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1286 #undef TARGET_RTX_COSTS
1287 #define TARGET_RTX_COSTS mips_rtx_costs
1288 #undef TARGET_ADDRESS_COST
1289 #define TARGET_ADDRESS_COST mips_address_cost
1291 #undef TARGET_IN_SMALL_DATA_P
1292 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1294 #undef TARGET_MACHINE_DEPENDENT_REORG
1295 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1297 #undef TARGET_ASM_FILE_START
1298 #define TARGET_ASM_FILE_START mips_file_start
1299 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1300 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1302 #undef TARGET_INIT_LIBFUNCS
1303 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1305 #undef TARGET_BUILD_BUILTIN_VA_LIST
1306 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1307 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1308 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1310 #undef TARGET_PROMOTE_FUNCTION_ARGS
1311 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1312 #undef TARGET_PROMOTE_FUNCTION_RETURN
1313 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1314 #undef TARGET_PROMOTE_PROTOTYPES
1315 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1317 #undef TARGET_RETURN_IN_MEMORY
1318 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1319 #undef TARGET_RETURN_IN_MSB
1320 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1322 #undef TARGET_ASM_OUTPUT_MI_THUNK
1323 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1324 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1325 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1327 #undef TARGET_SETUP_INCOMING_VARARGS
1328 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1329 #undef TARGET_STRICT_ARGUMENT_NAMING
1330 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1331 #undef TARGET_MUST_PASS_IN_STACK
1332 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1333 #undef TARGET_PASS_BY_REFERENCE
1334 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1335 #undef TARGET_CALLEE_COPIES
1336 #define TARGET_CALLEE_COPIES mips_callee_copies
1337 #undef TARGET_ARG_PARTIAL_BYTES
1338 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1340 #undef TARGET_MODE_REP_EXTENDED
1341 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1343 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1344 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1346 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1347 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
1349 #undef TARGET_INIT_BUILTINS
1350 #define TARGET_INIT_BUILTINS mips_init_builtins
1351 #undef TARGET_EXPAND_BUILTIN
1352 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1354 #undef TARGET_HAVE_TLS
1355 #define TARGET_HAVE_TLS HAVE_AS_TLS
1357 #undef TARGET_CANNOT_FORCE_CONST_MEM
1358 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1360 #undef TARGET_ENCODE_SECTION_INFO
1361 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1363 #undef TARGET_ATTRIBUTE_TABLE
1364 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1365 /* All our function attributes are related to how out-of-line copies should
1366 be compiled or called. They don't in themselves prevent inlining. */
1367 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
1368 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
1370 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1371 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1373 #undef TARGET_MIN_ANCHOR_OFFSET
1374 #define TARGET_MIN_ANCHOR_OFFSET -32768
1375 #undef TARGET_MAX_ANCHOR_OFFSET
1376 #define TARGET_MAX_ANCHOR_OFFSET 32767
1377 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1378 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1379 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1380 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1382 #undef TARGET_COMP_TYPE_ATTRIBUTES
1383 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1385 #ifdef HAVE_AS_DTPRELWORD
1386 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1387 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1390 struct gcc_target targetm
= TARGET_INITIALIZER
;
1393 /* Predicates to test for presence of "near" and "far"/"long_call"
1394 attributes on the given TYPE. */
1397 mips_near_type_p (const_tree type
)
1399 return lookup_attribute ("near", TYPE_ATTRIBUTES (type
)) != NULL
;
1403 mips_far_type_p (const_tree type
)
1405 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type
)) != NULL
1406 || lookup_attribute ("far", TYPE_ATTRIBUTES (type
)) != NULL
);
1409 /* Similar predicates for "mips16"/"nomips16" attributes. */
1412 mips_mips16_decl_p (const_tree decl
)
1414 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl
)) != NULL
;
1418 mips_nomips16_decl_p (const_tree decl
)
1420 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl
)) != NULL
;
1423 /* Return 0 if the attributes for two types are incompatible, 1 if they
1424 are compatible, and 2 if they are nearly compatible (which causes a
1425 warning to be generated). */
1428 mips_comp_type_attributes (const_tree type1
, const_tree type2
)
1430 /* Check for mismatch of non-default calling convention. */
1431 if (TREE_CODE (type1
) != FUNCTION_TYPE
)
1434 /* Disallow mixed near/far attributes. */
1435 if (mips_far_type_p (type1
) && mips_near_type_p (type2
))
1437 if (mips_near_type_p (type1
) && mips_far_type_p (type2
))
1443 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1444 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1447 mips_split_plus (rtx x
, rtx
*base_ptr
, HOST_WIDE_INT
*offset_ptr
)
1449 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1451 *base_ptr
= XEXP (x
, 0);
1452 *offset_ptr
= INTVAL (XEXP (x
, 1));
1461 /* Return true if SYMBOL_REF X is associated with a global symbol
1462 (in the STB_GLOBAL sense). */
1465 mips_global_symbol_p (const_rtx x
)
1467 const_tree
const decl
= SYMBOL_REF_DECL (x
);
1470 return !SYMBOL_REF_LOCAL_P (x
);
1472 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1473 or weak symbols. Relocations in the object file will be against
1474 the target symbol, so it's that symbol's binding that matters here. */
1475 return DECL_P (decl
) && (TREE_PUBLIC (decl
) || DECL_WEAK (decl
));
1478 /* Return true if SYMBOL_REF X binds locally. */
1481 mips_symbol_binds_local_p (const_rtx x
)
1483 return (SYMBOL_REF_DECL (x
)
1484 ? targetm
.binds_local_p (SYMBOL_REF_DECL (x
))
1485 : SYMBOL_REF_LOCAL_P (x
));
1488 /* Return true if rtx constants of mode MODE should be put into a small
1492 mips_rtx_constant_in_small_data_p (enum machine_mode mode
)
1494 return (!TARGET_EMBEDDED_DATA
1495 && TARGET_LOCAL_SDATA
1496 && GET_MODE_SIZE (mode
) <= mips_section_threshold
);
1499 /* Return the method that should be used to access SYMBOL_REF or
1500 LABEL_REF X in context CONTEXT. */
1502 static enum mips_symbol_type
1503 mips_classify_symbol (const_rtx x
, enum mips_symbol_context context
)
1506 return SYMBOL_GOT_DISP
;
1508 if (GET_CODE (x
) == LABEL_REF
)
1510 /* LABEL_REFs are used for jump tables as well as text labels.
1511 Only return SYMBOL_PC_RELATIVE if we know the label is in
1512 the text section. */
1513 if (TARGET_MIPS16_SHORT_JUMP_TABLES
)
1514 return SYMBOL_PC_RELATIVE
;
1515 if (TARGET_ABICALLS
&& !TARGET_ABSOLUTE_ABICALLS
)
1516 return SYMBOL_GOT_PAGE_OFST
;
1517 return SYMBOL_ABSOLUTE
;
1520 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
1522 if (SYMBOL_REF_TLS_MODEL (x
))
1525 if (CONSTANT_POOL_ADDRESS_P (x
))
1527 if (TARGET_MIPS16_TEXT_LOADS
)
1528 return SYMBOL_PC_RELATIVE
;
1530 if (TARGET_MIPS16_PCREL_LOADS
&& context
== SYMBOL_CONTEXT_MEM
)
1531 return SYMBOL_PC_RELATIVE
;
1533 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x
)))
1534 return SYMBOL_GP_RELATIVE
;
1537 /* Do not use small-data accesses for weak symbols; they may end up
1540 && SYMBOL_REF_SMALL_P (x
)
1541 && !SYMBOL_REF_WEAK (x
))
1542 return SYMBOL_GP_RELATIVE
;
1544 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1547 && !(TARGET_ABSOLUTE_ABICALLS
&& mips_symbol_binds_local_p (x
)))
1549 /* There are three cases to consider:
1551 - o32 PIC (either with or without explicit relocs)
1552 - n32/n64 PIC without explicit relocs
1553 - n32/n64 PIC with explicit relocs
1555 In the first case, both local and global accesses will use an
1556 R_MIPS_GOT16 relocation. We must correctly predict which of
1557 the two semantics (local or global) the assembler and linker
1558 will apply. The choice depends on the symbol's binding rather
1559 than its visibility.
1561 In the second case, the assembler will not use R_MIPS_GOT16
1562 relocations, but it chooses between local and global accesses
1563 in the same way as for o32 PIC.
1565 In the third case we have more freedom since both forms of
1566 access will work for any kind of symbol. However, there seems
1567 little point in doing things differently. */
1568 if (mips_global_symbol_p (x
))
1569 return SYMBOL_GOT_DISP
;
1571 return SYMBOL_GOT_PAGE_OFST
;
1574 if (TARGET_MIPS16_PCREL_LOADS
&& context
!= SYMBOL_CONTEXT_CALL
)
1575 return SYMBOL_FORCE_TO_MEM
;
1576 return SYMBOL_ABSOLUTE
;
1579 /* Classify symbolic expression X, given that it appears in context
1582 static enum mips_symbol_type
1583 mips_classify_symbolic_expression (rtx x
, enum mips_symbol_context context
)
1587 split_const (x
, &x
, &offset
);
1588 if (UNSPEC_ADDRESS_P (x
))
1589 return UNSPEC_ADDRESS_TYPE (x
);
1591 return mips_classify_symbol (x
, context
);
1594 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1595 is the alignment (in bytes) of SYMBOL_REF X. */
1598 mips_offset_within_alignment_p (rtx x
, HOST_WIDE_INT offset
)
1600 /* If for some reason we can't get the alignment for the
1601 symbol, initializing this to one means we will only accept
1603 HOST_WIDE_INT align
= 1;
1606 /* Get the alignment of the symbol we're referring to. */
1607 t
= SYMBOL_REF_DECL (x
);
1609 align
= DECL_ALIGN_UNIT (t
);
1611 return offset
>= 0 && offset
< align
;
1614 /* Return true if X is a symbolic constant that can be used in context
1615 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1618 mips_symbolic_constant_p (rtx x
, enum mips_symbol_context context
,
1619 enum mips_symbol_type
*symbol_type
)
1623 split_const (x
, &x
, &offset
);
1624 if (UNSPEC_ADDRESS_P (x
))
1626 *symbol_type
= UNSPEC_ADDRESS_TYPE (x
);
1627 x
= UNSPEC_ADDRESS (x
);
1629 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1631 *symbol_type
= mips_classify_symbol (x
, context
);
1632 if (*symbol_type
== SYMBOL_TLS
)
1638 if (offset
== const0_rtx
)
1641 /* Check whether a nonzero offset is valid for the underlying
1643 switch (*symbol_type
)
1645 case SYMBOL_ABSOLUTE
:
1646 case SYMBOL_FORCE_TO_MEM
:
1647 case SYMBOL_32_HIGH
:
1648 case SYMBOL_64_HIGH
:
1651 /* If the target has 64-bit pointers and the object file only
1652 supports 32-bit symbols, the values of those symbols will be
1653 sign-extended. In this case we can't allow an arbitrary offset
1654 in case the 32-bit value X + OFFSET has a different sign from X. */
1655 if (Pmode
== DImode
&& !ABI_HAS_64BIT_SYMBOLS
)
1656 return offset_within_block_p (x
, INTVAL (offset
));
1658 /* In other cases the relocations can handle any offset. */
1661 case SYMBOL_PC_RELATIVE
:
1662 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1663 In this case, we no longer have access to the underlying constant,
1664 but the original symbol-based access was known to be valid. */
1665 if (GET_CODE (x
) == LABEL_REF
)
1670 case SYMBOL_GP_RELATIVE
:
1671 /* Make sure that the offset refers to something within the
1672 same object block. This should guarantee that the final
1673 PC- or GP-relative offset is within the 16-bit limit. */
1674 return offset_within_block_p (x
, INTVAL (offset
));
1676 case SYMBOL_GOT_PAGE_OFST
:
1677 case SYMBOL_GOTOFF_PAGE
:
1678 /* If the symbol is global, the GOT entry will contain the symbol's
1679 address, and we will apply a 16-bit offset after loading it.
1680 If the symbol is local, the linker should provide enough local
1681 GOT entries for a 16-bit offset, but larger offsets may lead
1683 return SMALL_INT (offset
);
1687 /* There is no carry between the HI and LO REL relocations, so the
1688 offset is only valid if we know it won't lead to such a carry. */
1689 return mips_offset_within_alignment_p (x
, INTVAL (offset
));
1691 case SYMBOL_GOT_DISP
:
1692 case SYMBOL_GOTOFF_DISP
:
1693 case SYMBOL_GOTOFF_CALL
:
1694 case SYMBOL_GOTOFF_LOADGP
:
1697 case SYMBOL_GOTTPREL
:
1706 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1709 mips_regno_mode_ok_for_base_p (int regno
, enum machine_mode mode
, int strict
)
1711 if (!HARD_REGISTER_NUM_P (regno
))
1715 regno
= reg_renumber
[regno
];
1718 /* These fake registers will be eliminated to either the stack or
1719 hard frame pointer, both of which are usually valid base registers.
1720 Reload deals with the cases where the eliminated form isn't valid. */
1721 if (regno
== ARG_POINTER_REGNUM
|| regno
== FRAME_POINTER_REGNUM
)
1724 /* In mips16 mode, the stack pointer can only address word and doubleword
1725 values, nothing smaller. There are two problems here:
1727 (a) Instantiating virtual registers can introduce new uses of the
1728 stack pointer. If these virtual registers are valid addresses,
1729 the stack pointer should be too.
1731 (b) Most uses of the stack pointer are not made explicit until
1732 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1733 We don't know until that stage whether we'll be eliminating to the
1734 stack pointer (which needs the restriction) or the hard frame
1735 pointer (which doesn't).
1737 All in all, it seems more consistent to only enforce this restriction
1738 during and after reload. */
1739 if (TARGET_MIPS16
&& regno
== STACK_POINTER_REGNUM
)
1740 return !strict
|| GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8;
1742 return TARGET_MIPS16
? M16_REG_P (regno
) : GP_REG_P (regno
);
1746 /* Return true if X is a valid base register for the given mode.
1747 Allow only hard registers if STRICT. */
1750 mips_valid_base_register_p (rtx x
, enum machine_mode mode
, int strict
)
1752 if (!strict
&& GET_CODE (x
) == SUBREG
)
1756 && mips_regno_mode_ok_for_base_p (REGNO (x
), mode
, strict
));
1760 /* Return true if X is a valid address for machine mode MODE. If it is,
1761 fill in INFO appropriately. STRICT is true if we should only accept
1762 hard base registers. */
1765 mips_classify_address (struct mips_address_info
*info
, rtx x
,
1766 enum machine_mode mode
, int strict
)
1768 switch (GET_CODE (x
))
1772 info
->type
= ADDRESS_REG
;
1774 info
->offset
= const0_rtx
;
1775 return mips_valid_base_register_p (info
->reg
, mode
, strict
);
1778 info
->type
= ADDRESS_REG
;
1779 info
->reg
= XEXP (x
, 0);
1780 info
->offset
= XEXP (x
, 1);
1781 return (mips_valid_base_register_p (info
->reg
, mode
, strict
)
1782 && const_arith_operand (info
->offset
, VOIDmode
));
1785 info
->type
= ADDRESS_LO_SUM
;
1786 info
->reg
= XEXP (x
, 0);
1787 info
->offset
= XEXP (x
, 1);
1788 /* We have to trust the creator of the LO_SUM to do something vaguely
1789 sane. Target-independent code that creates a LO_SUM should also
1790 create and verify the matching HIGH. Target-independent code that
1791 adds an offset to a LO_SUM must prove that the offset will not
1792 induce a carry. Failure to do either of these things would be
1793 a bug, and we are not required to check for it here. The MIPS
1794 backend itself should only create LO_SUMs for valid symbolic
1795 constants, with the high part being either a HIGH or a copy
1798 = mips_classify_symbolic_expression (info
->offset
, SYMBOL_CONTEXT_MEM
);
1799 return (mips_valid_base_register_p (info
->reg
, mode
, strict
)
1800 && mips_symbol_insns (info
->symbol_type
, mode
) > 0
1801 && mips_lo_relocs
[info
->symbol_type
] != 0);
1804 /* Small-integer addresses don't occur very often, but they
1805 are legitimate if $0 is a valid base register. */
1806 info
->type
= ADDRESS_CONST_INT
;
1807 return !TARGET_MIPS16
&& SMALL_INT (x
);
1812 info
->type
= ADDRESS_SYMBOLIC
;
1813 return (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_MEM
,
1815 && mips_symbol_insns (info
->symbol_type
, mode
) > 0
1816 && !mips_split_p
[info
->symbol_type
]);
1823 /* Return true if X is a thread-local symbol. */
1826 mips_tls_operand_p (rtx x
)
1828 return GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0;
1831 /* Return true if X can not be forced into a constant pool. */
1834 mips_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
1836 return mips_tls_operand_p (*x
);
1839 /* Return true if X can not be forced into a constant pool. */
1842 mips_cannot_force_const_mem (rtx x
)
1848 /* As an optimization, reject constants that mips_legitimize_move
1851 Suppose we have a multi-instruction sequence that loads constant C
1852 into register R. If R does not get allocated a hard register, and
1853 R is used in an operand that allows both registers and memory
1854 references, reload will consider forcing C into memory and using
1855 one of the instruction's memory alternatives. Returning false
1856 here will force it to use an input reload instead. */
1857 if (GET_CODE (x
) == CONST_INT
)
1860 split_const (x
, &base
, &offset
);
1861 if (symbolic_operand (base
, VOIDmode
) && SMALL_INT (offset
))
1865 if (TARGET_HAVE_TLS
&& for_each_rtx (&x
, &mips_tls_symbol_ref_1
, 0))
1871 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1872 constants when we're using a per-function constant pool. */
1875 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
,
1876 const_rtx x ATTRIBUTE_UNUSED
)
1878 return !TARGET_MIPS16_PCREL_LOADS
;
1881 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1882 single instruction. We rely on the fact that, in the worst case,
1883 all instructions involved in a MIPS16 address calculation are usually
1887 mips_symbol_insns_1 (enum mips_symbol_type type
, enum machine_mode mode
)
1891 case SYMBOL_ABSOLUTE
:
1892 /* When using 64-bit symbols, we need 5 preparatory instructions,
1895 lui $at,%highest(symbol)
1896 daddiu $at,$at,%higher(symbol)
1898 daddiu $at,$at,%hi(symbol)
1901 The final address is then $at + %lo(symbol). With 32-bit
1902 symbols we just need a preparatory lui for normal mode and
1903 a preparatory "li; sll" for MIPS16. */
1904 return ABI_HAS_64BIT_SYMBOLS
? 6 : TARGET_MIPS16
? 3 : 2;
1906 case SYMBOL_GP_RELATIVE
:
1907 /* Treat GP-relative accesses as taking a single instruction on
1908 MIPS16 too; the copy of $gp can often be shared. */
1911 case SYMBOL_PC_RELATIVE
:
1912 /* PC-relative constants can be only be used with addiupc,
1914 if (mode
== MAX_MACHINE_MODE
1915 || GET_MODE_SIZE (mode
) == 4
1916 || GET_MODE_SIZE (mode
) == 8)
1919 /* The constant must be loaded using addiupc first. */
1922 case SYMBOL_FORCE_TO_MEM
:
1923 /* LEAs will be converted into constant-pool references by
1925 if (mode
== MAX_MACHINE_MODE
)
1928 /* The constant must be loaded from the constant pool. */
1931 case SYMBOL_GOT_DISP
:
1932 /* The constant will have to be loaded from the GOT before it
1933 is used in an address. */
1934 if (mode
!= MAX_MACHINE_MODE
)
1939 case SYMBOL_GOT_PAGE_OFST
:
1940 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1941 the local/global classification is accurate. See override_options
1944 The worst cases are:
1946 (1) For local symbols when generating o32 or o64 code. The assembler
1952 ...and the final address will be $at + %lo(symbol).
1954 (2) For global symbols when -mxgot. The assembler will use:
1956 lui $at,%got_hi(symbol)
1959 ...and the final address will be $at + %got_lo(symbol). */
1962 case SYMBOL_GOTOFF_PAGE
:
1963 case SYMBOL_GOTOFF_DISP
:
1964 case SYMBOL_GOTOFF_CALL
:
1965 case SYMBOL_GOTOFF_LOADGP
:
1966 case SYMBOL_32_HIGH
:
1967 case SYMBOL_64_HIGH
:
1973 case SYMBOL_GOTTPREL
:
1976 /* A 16-bit constant formed by a single relocation, or a 32-bit
1977 constant formed from a high 16-bit relocation and a low 16-bit
1978 relocation. Use mips_split_p to determine which. */
1979 return !mips_split_p
[type
] ? 1 : TARGET_MIPS16
? 3 : 2;
1982 /* We don't treat a bare TLS symbol as a constant. */
1988 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1989 to load symbols of type TYPE into a register. Return 0 if the given
1990 type of symbol cannot be used as an immediate operand.
1992 Otherwise, return the number of instructions needed to load or store
1993 values of mode MODE to or from addresses of type TYPE. Return 0 if
1994 the given type of symbol is not valid in addresses.
1996 In both cases, treat extended MIPS16 instructions as two instructions. */
1999 mips_symbol_insns (enum mips_symbol_type type
, enum machine_mode mode
)
2001 return mips_symbol_insns_1 (type
, mode
) * (TARGET_MIPS16
? 2 : 1);
2004 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2007 mips_stack_address_p (rtx x
, enum machine_mode mode
)
2009 struct mips_address_info addr
;
2011 return (mips_classify_address (&addr
, x
, mode
, false)
2012 && addr
.type
== ADDRESS_REG
2013 && addr
.reg
== stack_pointer_rtx
);
2016 /* Return true if a value at OFFSET bytes from BASE can be accessed
2017 using an unextended mips16 instruction. MODE is the mode of the
2020 Usually the offset in an unextended instruction is a 5-bit field.
2021 The offset is unsigned and shifted left once for HIs, twice
2022 for SIs, and so on. An exception is SImode accesses off the
2023 stack pointer, which have an 8-bit immediate field. */
2026 mips16_unextended_reference_p (enum machine_mode mode
, rtx base
, rtx offset
)
2029 && GET_CODE (offset
) == CONST_INT
2030 && INTVAL (offset
) >= 0
2031 && (INTVAL (offset
) & (GET_MODE_SIZE (mode
) - 1)) == 0)
2033 if (GET_MODE_SIZE (mode
) == 4 && base
== stack_pointer_rtx
)
2034 return INTVAL (offset
) < 256 * GET_MODE_SIZE (mode
);
2035 return INTVAL (offset
) < 32 * GET_MODE_SIZE (mode
);
2041 /* Return the number of instructions needed to load or store a value
2042 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
2043 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
2044 otherwise assume that a single load or store is enough.
2046 For mips16 code, count extended instructions as two instructions. */
2049 mips_address_insns (rtx x
, enum machine_mode mode
, bool might_split_p
)
2051 struct mips_address_info addr
;
2054 /* BLKmode is used for single unaligned loads and stores and should
2055 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2056 meaningless, so we have to single it out as a special case one way
2058 if (mode
!= BLKmode
&& might_split_p
)
2059 factor
= (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2063 if (mips_classify_address (&addr
, x
, mode
, false))
2068 && !mips16_unextended_reference_p (mode
, addr
.reg
, addr
.offset
))
2072 case ADDRESS_LO_SUM
:
2073 return (TARGET_MIPS16
? factor
* 2 : factor
);
2075 case ADDRESS_CONST_INT
:
2078 case ADDRESS_SYMBOLIC
:
2079 return factor
* mips_symbol_insns (addr
.symbol_type
, mode
);
2085 /* Likewise for constant X. */
2088 mips_const_insns (rtx x
)
2090 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
2091 enum mips_symbol_type symbol_type
;
2094 switch (GET_CODE (x
))
2097 if (!mips_symbolic_constant_p (XEXP (x
, 0), SYMBOL_CONTEXT_LEA
,
2099 || !mips_split_p
[symbol_type
])
2102 /* This is simply an lui for normal mode. It is an extended
2103 "li" followed by an extended "sll" for MIPS16. */
2104 return TARGET_MIPS16
? 4 : 1;
2108 /* Unsigned 8-bit constants can be loaded using an unextended
2109 LI instruction. Unsigned 16-bit constants can be loaded
2110 using an extended LI. Negative constants must be loaded
2111 using LI and then negated. */
2112 return (INTVAL (x
) >= 0 && INTVAL (x
) < 256 ? 1
2113 : SMALL_OPERAND_UNSIGNED (INTVAL (x
)) ? 2
2114 : INTVAL (x
) > -256 && INTVAL (x
) < 0 ? 2
2115 : SMALL_OPERAND_UNSIGNED (-INTVAL (x
)) ? 3
2118 return mips_build_integer (codes
, INTVAL (x
));
2122 return (!TARGET_MIPS16
&& x
== CONST0_RTX (GET_MODE (x
)) ? 1 : 0);
2128 /* See if we can refer to X directly. */
2129 if (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_LEA
, &symbol_type
))
2130 return mips_symbol_insns (symbol_type
, MAX_MACHINE_MODE
);
2132 /* Otherwise try splitting the constant into a base and offset.
2133 16-bit offsets can be added using an extra addiu. Larger offsets
2134 must be calculated separately and then added to the base. */
2135 split_const (x
, &x
, &offset
);
2138 int n
= mips_const_insns (x
);
2141 if (SMALL_INT (offset
))
2144 return n
+ 1 + mips_build_integer (codes
, INTVAL (offset
));
2151 return mips_symbol_insns (mips_classify_symbol (x
, SYMBOL_CONTEXT_LEA
),
2160 /* Return the number of instructions needed to implement INSN,
2161 given that it loads from or stores to MEM. Count extended
2162 mips16 instructions as two instructions. */
2165 mips_load_store_insns (rtx mem
, rtx insn
)
2167 enum machine_mode mode
;
2171 gcc_assert (MEM_P (mem
));
2172 mode
= GET_MODE (mem
);
2174 /* Try to prove that INSN does not need to be split. */
2175 might_split_p
= true;
2176 if (GET_MODE_BITSIZE (mode
) == 64)
2178 set
= single_set (insn
);
2179 if (set
&& !mips_split_64bit_move_p (SET_DEST (set
), SET_SRC (set
)))
2180 might_split_p
= false;
2183 return mips_address_insns (XEXP (mem
, 0), mode
, might_split_p
);
2187 /* Return the number of instructions needed for an integer division. */
2190 mips_idiv_insns (void)
2195 if (TARGET_CHECK_ZERO_DIV
)
2197 if (GENERATE_DIVIDE_TRAPS
)
2203 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
2208 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2209 returns a nonzero value if X is a legitimate address for a memory
2210 operand of the indicated MODE. STRICT is nonzero if this function
2211 is called during reload. */
2214 mips_legitimate_address_p (enum machine_mode mode
, rtx x
, int strict
)
2216 struct mips_address_info addr
;
2218 return mips_classify_address (&addr
, x
, mode
, strict
);
2221 /* Emit a move from SRC to DEST. Assume that the move expanders can
2222 handle all moves if !can_create_pseudo_p (). The distinction is
2223 important because, unlike emit_move_insn, the move expanders know
2224 how to force Pmode objects into the constant pool even when the
2225 constant pool address is not itself legitimate. */
2228 mips_emit_move (rtx dest
, rtx src
)
2230 return (can_create_pseudo_p ()
2231 ? emit_move_insn (dest
, src
)
2232 : emit_move_insn_1 (dest
, src
));
2235 /* Copy VALUE to a register and return that register. If new psuedos
2236 are allowed, copy it into a new register, otherwise use DEST. */
2239 mips_force_temporary (rtx dest
, rtx value
)
2241 if (can_create_pseudo_p ())
2242 return force_reg (Pmode
, value
);
2245 mips_emit_move (copy_rtx (dest
), value
);
2251 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2252 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2253 constant in that context and can be split into a high part and a LO_SUM.
2254 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2255 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2257 TEMP is as for mips_force_temporary and is used to load the high
2258 part into a register. */
2261 mips_split_symbol (rtx temp
, rtx addr
, enum machine_mode mode
, rtx
*lo_sum_out
)
2263 enum mips_symbol_context context
;
2264 enum mips_symbol_type symbol_type
;
2267 context
= (mode
== MAX_MACHINE_MODE
2268 ? SYMBOL_CONTEXT_LEA
2269 : SYMBOL_CONTEXT_MEM
);
2270 if (!mips_symbolic_constant_p (addr
, context
, &symbol_type
)
2271 || mips_symbol_insns (symbol_type
, mode
) == 0
2272 || !mips_split_p
[symbol_type
])
2277 if (symbol_type
== SYMBOL_GP_RELATIVE
)
2279 if (!can_create_pseudo_p ())
2281 emit_insn (gen_load_const_gp (copy_rtx (temp
)));
2285 high
= mips16_gp_pseudo_reg ();
2289 high
= gen_rtx_HIGH (Pmode
, copy_rtx (addr
));
2290 high
= mips_force_temporary (temp
, high
);
2292 *lo_sum_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
2298 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2299 and add CONST_INT OFFSET to the result. */
2302 mips_unspec_address_offset (rtx base
, rtx offset
,
2303 enum mips_symbol_type symbol_type
)
2305 base
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, base
),
2306 UNSPEC_ADDRESS_FIRST
+ symbol_type
);
2307 if (offset
!= const0_rtx
)
2308 base
= gen_rtx_PLUS (Pmode
, base
, offset
);
2309 return gen_rtx_CONST (Pmode
, base
);
2312 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2313 type SYMBOL_TYPE. */
2316 mips_unspec_address (rtx address
, enum mips_symbol_type symbol_type
)
2320 split_const (address
, &base
, &offset
);
2321 return mips_unspec_address_offset (base
, offset
, symbol_type
);
2325 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2326 high part to BASE and return the result. Just return BASE otherwise.
2327 TEMP is available as a temporary register if needed.
2329 The returned expression can be used as the first operand to a LO_SUM. */
2332 mips_unspec_offset_high (rtx temp
, rtx base
, rtx addr
,
2333 enum mips_symbol_type symbol_type
)
2335 if (mips_split_p
[symbol_type
])
2337 addr
= gen_rtx_HIGH (Pmode
, mips_unspec_address (addr
, symbol_type
));
2338 addr
= mips_force_temporary (temp
, addr
);
2339 return mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, addr
, base
));
2345 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2346 mips_force_temporary; it is only needed when OFFSET is not a
2350 mips_add_offset (rtx temp
, rtx reg
, HOST_WIDE_INT offset
)
2352 if (!SMALL_OPERAND (offset
))
2357 /* Load the full offset into a register so that we can use
2358 an unextended instruction for the address itself. */
2359 high
= GEN_INT (offset
);
2364 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2365 high
= GEN_INT (CONST_HIGH_PART (offset
));
2366 offset
= CONST_LOW_PART (offset
);
2368 high
= mips_force_temporary (temp
, high
);
2369 reg
= mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, high
, reg
));
2371 return plus_constant (reg
, offset
);
2374 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2375 referencing, and TYPE is the symbol type to use (either global
2376 dynamic or local dynamic). V0 is an RTX for the return value
2377 location. The entire insn sequence is returned. */
2379 static GTY(()) rtx mips_tls_symbol
;
2382 mips_call_tls_get_addr (rtx sym
, enum mips_symbol_type type
, rtx v0
)
2384 rtx insn
, loc
, tga
, a0
;
2386 a0
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
2388 if (!mips_tls_symbol
)
2389 mips_tls_symbol
= init_one_libfunc ("__tls_get_addr");
2391 loc
= mips_unspec_address (sym
, type
);
2395 emit_insn (gen_rtx_SET (Pmode
, a0
,
2396 gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, loc
)));
2397 tga
= gen_rtx_MEM (Pmode
, mips_tls_symbol
);
2398 insn
= emit_call_insn (gen_call_value (v0
, tga
, const0_rtx
, const0_rtx
));
2399 CONST_OR_PURE_CALL_P (insn
) = 1;
2400 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), v0
);
2401 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), a0
);
2402 insn
= get_insns ();
2409 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2410 return value will be a valid address and move_operand (either a REG
2414 mips_legitimize_tls_address (rtx loc
)
2416 rtx dest
, insn
, v0
, v1
, tmp1
, tmp2
, eqv
;
2417 enum tls_model model
;
2421 sorry ("MIPS16 TLS");
2422 return gen_reg_rtx (Pmode
);
2425 v0
= gen_rtx_REG (Pmode
, GP_RETURN
);
2426 v1
= gen_rtx_REG (Pmode
, GP_RETURN
+ 1);
2428 model
= SYMBOL_REF_TLS_MODEL (loc
);
2429 /* Only TARGET_ABICALLS code can have more than one module; other
2430 code must be be static and should not use a GOT. All TLS models
2431 reduce to local exec in this situation. */
2432 if (!TARGET_ABICALLS
)
2433 model
= TLS_MODEL_LOCAL_EXEC
;
2437 case TLS_MODEL_GLOBAL_DYNAMIC
:
2438 insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSGD
, v0
);
2439 dest
= gen_reg_rtx (Pmode
);
2440 emit_libcall_block (insn
, dest
, v0
, loc
);
2443 case TLS_MODEL_LOCAL_DYNAMIC
:
2444 insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSLDM
, v0
);
2445 tmp1
= gen_reg_rtx (Pmode
);
2447 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2448 share the LDM result with other LD model accesses. */
2449 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2451 emit_libcall_block (insn
, tmp1
, v0
, eqv
);
2453 tmp2
= mips_unspec_offset_high (NULL
, tmp1
, loc
, SYMBOL_DTPREL
);
2454 dest
= gen_rtx_LO_SUM (Pmode
, tmp2
,
2455 mips_unspec_address (loc
, SYMBOL_DTPREL
));
2458 case TLS_MODEL_INITIAL_EXEC
:
2459 tmp1
= gen_reg_rtx (Pmode
);
2460 tmp2
= mips_unspec_address (loc
, SYMBOL_GOTTPREL
);
2461 if (Pmode
== DImode
)
2463 emit_insn (gen_tls_get_tp_di (v1
));
2464 emit_insn (gen_load_gotdi (tmp1
, pic_offset_table_rtx
, tmp2
));
2468 emit_insn (gen_tls_get_tp_si (v1
));
2469 emit_insn (gen_load_gotsi (tmp1
, pic_offset_table_rtx
, tmp2
));
2471 dest
= gen_reg_rtx (Pmode
);
2472 emit_insn (gen_add3_insn (dest
, tmp1
, v1
));
2475 case TLS_MODEL_LOCAL_EXEC
:
2476 if (Pmode
== DImode
)
2477 emit_insn (gen_tls_get_tp_di (v1
));
2479 emit_insn (gen_tls_get_tp_si (v1
));
2481 tmp1
= mips_unspec_offset_high (NULL
, v1
, loc
, SYMBOL_TPREL
);
2482 dest
= gen_rtx_LO_SUM (Pmode
, tmp1
,
2483 mips_unspec_address (loc
, SYMBOL_TPREL
));
2493 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2494 be legitimized in a way that the generic machinery might not expect,
2495 put the new address in *XLOC and return true. MODE is the mode of
2496 the memory being accessed. */
2499 mips_legitimize_address (rtx
*xloc
, enum machine_mode mode
)
2501 if (mips_tls_operand_p (*xloc
))
2503 *xloc
= mips_legitimize_tls_address (*xloc
);
2507 /* See if the address can split into a high part and a LO_SUM. */
2508 if (mips_split_symbol (NULL
, *xloc
, mode
, xloc
))
2511 if (GET_CODE (*xloc
) == PLUS
&& GET_CODE (XEXP (*xloc
, 1)) == CONST_INT
)
2513 /* Handle REG + CONSTANT using mips_add_offset. */
2516 reg
= XEXP (*xloc
, 0);
2517 if (!mips_valid_base_register_p (reg
, mode
, 0))
2518 reg
= copy_to_mode_reg (Pmode
, reg
);
2519 *xloc
= mips_add_offset (0, reg
, INTVAL (XEXP (*xloc
, 1)));
2527 /* Subroutine of mips_build_integer (with the same interface).
2528 Assume that the final action in the sequence should be a left shift. */
2531 mips_build_shift (struct mips_integer_op
*codes
, HOST_WIDE_INT value
)
2533 unsigned int i
, shift
;
2535 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2536 since signed numbers are easier to load than unsigned ones. */
2538 while ((value
& 1) == 0)
2539 value
/= 2, shift
++;
2541 i
= mips_build_integer (codes
, value
);
2542 codes
[i
].code
= ASHIFT
;
2543 codes
[i
].value
= shift
;
2548 /* As for mips_build_shift, but assume that the final action will be
2549 an IOR or PLUS operation. */
2552 mips_build_lower (struct mips_integer_op
*codes
, unsigned HOST_WIDE_INT value
)
2554 unsigned HOST_WIDE_INT high
;
2557 high
= value
& ~(unsigned HOST_WIDE_INT
) 0xffff;
2558 if (!LUI_OPERAND (high
) && (value
& 0x18000) == 0x18000)
2560 /* The constant is too complex to load with a simple lui/ori pair
2561 so our goal is to clear as many trailing zeros as possible.
2562 In this case, we know bit 16 is set and that the low 16 bits
2563 form a negative number. If we subtract that number from VALUE,
2564 we will clear at least the lowest 17 bits, maybe more. */
2565 i
= mips_build_integer (codes
, CONST_HIGH_PART (value
));
2566 codes
[i
].code
= PLUS
;
2567 codes
[i
].value
= CONST_LOW_PART (value
);
2571 i
= mips_build_integer (codes
, high
);
2572 codes
[i
].code
= IOR
;
2573 codes
[i
].value
= value
& 0xffff;
2579 /* Fill CODES with a sequence of rtl operations to load VALUE.
2580 Return the number of operations needed. */
2583 mips_build_integer (struct mips_integer_op
*codes
,
2584 unsigned HOST_WIDE_INT value
)
2586 if (SMALL_OPERAND (value
)
2587 || SMALL_OPERAND_UNSIGNED (value
)
2588 || LUI_OPERAND (value
))
2590 /* The value can be loaded with a single instruction. */
2591 codes
[0].code
= UNKNOWN
;
2592 codes
[0].value
= value
;
2595 else if ((value
& 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value
)))
2597 /* Either the constant is a simple LUI/ORI combination or its
2598 lowest bit is set. We don't want to shift in this case. */
2599 return mips_build_lower (codes
, value
);
2601 else if ((value
& 0xffff) == 0)
2603 /* The constant will need at least three actions. The lowest
2604 16 bits are clear, so the final action will be a shift. */
2605 return mips_build_shift (codes
, value
);
2609 /* The final action could be a shift, add or inclusive OR.
2610 Rather than use a complex condition to select the best
2611 approach, try both mips_build_shift and mips_build_lower
2612 and pick the one that gives the shortest sequence.
2613 Note that this case is only used once per constant. */
2614 struct mips_integer_op alt_codes
[MIPS_MAX_INTEGER_OPS
];
2615 unsigned int cost
, alt_cost
;
2617 cost
= mips_build_shift (codes
, value
);
2618 alt_cost
= mips_build_lower (alt_codes
, value
);
2619 if (alt_cost
< cost
)
2621 memcpy (codes
, alt_codes
, alt_cost
* sizeof (codes
[0]));
2629 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2632 mips_move_integer (rtx dest
, rtx temp
, unsigned HOST_WIDE_INT value
)
2634 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
2635 enum machine_mode mode
;
2636 unsigned int i
, cost
;
2639 mode
= GET_MODE (dest
);
2640 cost
= mips_build_integer (codes
, value
);
2642 /* Apply each binary operation to X. Invariant: X is a legitimate
2643 source operand for a SET pattern. */
2644 x
= GEN_INT (codes
[0].value
);
2645 for (i
= 1; i
< cost
; i
++)
2647 if (!can_create_pseudo_p ())
2649 emit_insn (gen_rtx_SET (VOIDmode
, temp
, x
));
2653 x
= force_reg (mode
, x
);
2654 x
= gen_rtx_fmt_ee (codes
[i
].code
, mode
, x
, GEN_INT (codes
[i
].value
));
2657 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
2661 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2662 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2666 mips_legitimize_const_move (enum machine_mode mode
, rtx dest
, rtx src
)
2670 /* Split moves of big integers into smaller pieces. */
2671 if (splittable_const_int_operand (src
, mode
))
2673 mips_move_integer (dest
, dest
, INTVAL (src
));
2677 /* Split moves of symbolic constants into high/low pairs. */
2678 if (mips_split_symbol (dest
, src
, MAX_MACHINE_MODE
, &src
))
2680 emit_insn (gen_rtx_SET (VOIDmode
, dest
, src
));
2684 if (mips_tls_operand_p (src
))
2686 mips_emit_move (dest
, mips_legitimize_tls_address (src
));
2690 /* If we have (const (plus symbol offset)), and that expression cannot
2691 be forced into memory, load the symbol first and add in the offset.
2692 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2693 forced into memory, as it usually produces better code. */
2694 split_const (src
, &base
, &offset
);
2695 if (offset
!= const0_rtx
2696 && (targetm
.cannot_force_const_mem (src
)
2697 || (!TARGET_MIPS16
&& can_create_pseudo_p ())))
2699 base
= mips_force_temporary (dest
, base
);
2700 mips_emit_move (dest
, mips_add_offset (0, base
, INTVAL (offset
)));
2704 src
= force_const_mem (mode
, src
);
2706 /* When using explicit relocs, constant pool references are sometimes
2707 not legitimate addresses. */
2708 mips_split_symbol (dest
, XEXP (src
, 0), mode
, &XEXP (src
, 0));
2709 mips_emit_move (dest
, src
);
2713 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2714 sequence that is valid. */
2717 mips_legitimize_move (enum machine_mode mode
, rtx dest
, rtx src
)
2719 if (!register_operand (dest
, mode
) && !reg_or_0_operand (src
, mode
))
2721 mips_emit_move (dest
, force_reg (mode
, src
));
2725 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2726 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
2727 && REG_P (src
) && MD_REG_P (REGNO (src
))
2728 && REG_P (dest
) && GP_REG_P (REGNO (dest
)))
2730 int other_regno
= REGNO (src
) == HI_REGNUM
? LO_REGNUM
: HI_REGNUM
;
2731 if (GET_MODE_SIZE (mode
) <= 4)
2732 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode
, REGNO (dest
)),
2733 gen_rtx_REG (SImode
, REGNO (src
)),
2734 gen_rtx_REG (SImode
, other_regno
)));
2736 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode
, REGNO (dest
)),
2737 gen_rtx_REG (DImode
, REGNO (src
)),
2738 gen_rtx_REG (DImode
, other_regno
)));
2742 /* We need to deal with constants that would be legitimate
2743 immediate_operands but not legitimate move_operands. */
2744 if (CONSTANT_P (src
) && !move_operand (src
, mode
))
2746 mips_legitimize_const_move (mode
, dest
, src
);
2747 set_unique_reg_note (get_last_insn (), REG_EQUAL
, copy_rtx (src
));
2753 /* We need a lot of little routines to check constant values on the
2754 mips16. These are used to figure out how long the instruction will
2755 be. It would be much better to do this using constraints, but
2756 there aren't nearly enough letters available. */
2759 m16_check_op (rtx op
, int low
, int high
, int mask
)
2761 return (GET_CODE (op
) == CONST_INT
2762 && INTVAL (op
) >= low
2763 && INTVAL (op
) <= high
2764 && (INTVAL (op
) & mask
) == 0);
2768 m16_uimm3_b (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2770 return m16_check_op (op
, 0x1, 0x8, 0);
2774 m16_simm4_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2776 return m16_check_op (op
, - 0x8, 0x7, 0);
2780 m16_nsimm4_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2782 return m16_check_op (op
, - 0x7, 0x8, 0);
2786 m16_simm5_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2788 return m16_check_op (op
, - 0x10, 0xf, 0);
2792 m16_nsimm5_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2794 return m16_check_op (op
, - 0xf, 0x10, 0);
2798 m16_uimm5_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2800 return m16_check_op (op
, (- 0x10) << 2, 0xf << 2, 3);
2804 m16_nuimm5_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2806 return m16_check_op (op
, (- 0xf) << 2, 0x10 << 2, 3);
2810 m16_simm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2812 return m16_check_op (op
, - 0x80, 0x7f, 0);
2816 m16_nsimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2818 return m16_check_op (op
, - 0x7f, 0x80, 0);
2822 m16_uimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2824 return m16_check_op (op
, 0x0, 0xff, 0);
2828 m16_nuimm8_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2830 return m16_check_op (op
, - 0xff, 0x0, 0);
2834 m16_uimm8_m1_1 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2836 return m16_check_op (op
, - 0x1, 0xfe, 0);
2840 m16_uimm8_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2842 return m16_check_op (op
, 0x0, 0xff << 2, 3);
2846 m16_nuimm8_4 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2848 return m16_check_op (op
, (- 0xff) << 2, 0x0, 3);
2852 m16_simm8_8 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2854 return m16_check_op (op
, (- 0x80) << 3, 0x7f << 3, 7);
2858 m16_nsimm8_8 (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2860 return m16_check_op (op
, (- 0x7f) << 3, 0x80 << 3, 7);
2863 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2864 address instruction. */
2867 mips_lwxs_address_p (rtx addr
)
2870 && GET_CODE (addr
) == PLUS
2871 && REG_P (XEXP (addr
, 1)))
2873 rtx offset
= XEXP (addr
, 0);
2874 if (GET_CODE (offset
) == MULT
2875 && REG_P (XEXP (offset
, 0))
2876 && GET_CODE (XEXP (offset
, 1)) == CONST_INT
2877 && INTVAL (XEXP (offset
, 1)) == 4)
2883 /* The cost of loading values from the constant pool. It should be
2884 larger than the cost of any constant we want to synthesize inline. */
2886 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2888 /* Return the cost of X when used as an operand to the MIPS16 instruction
2889 that implements CODE. Return -1 if there is no such instruction, or if
2890 X is not a valid immediate operand for it. */
2893 mips16_constant_cost (int code
, HOST_WIDE_INT x
)
2900 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2901 other shifts are extended. The shift patterns truncate the shift
2902 count to the right size, so there are no out-of-range values. */
2903 if (IN_RANGE (x
, 1, 8))
2905 return COSTS_N_INSNS (1);
2908 if (IN_RANGE (x
, -128, 127))
2910 if (SMALL_OPERAND (x
))
2911 return COSTS_N_INSNS (1);
2915 /* Like LE, but reject the always-true case. */
2919 /* We add 1 to the immediate and use SLT. */
2922 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2925 if (IN_RANGE (x
, 0, 255))
2927 if (SMALL_OPERAND_UNSIGNED (x
))
2928 return COSTS_N_INSNS (1);
2933 /* Equality comparisons with 0 are cheap. */
2943 /* Return true if there is a non-MIPS16 instruction that implements CODE
2944 and if that instruction accepts X as an immediate operand. */
2947 mips_immediate_operand_p (int code
, HOST_WIDE_INT x
)
2954 /* All shift counts are truncated to a valid constant. */
2959 /* Likewise rotates, if the target supports rotates at all. */
2965 /* These instructions take 16-bit unsigned immediates. */
2966 return SMALL_OPERAND_UNSIGNED (x
);
2971 /* These instructions take 16-bit signed immediates. */
2972 return SMALL_OPERAND (x
);
2978 /* The "immediate" forms of these instructions are really
2979 implemented as comparisons with register 0. */
2984 /* Likewise, meaning that the only valid immediate operand is 1. */
2988 /* We add 1 to the immediate and use SLT. */
2989 return SMALL_OPERAND (x
+ 1);
2992 /* Likewise SLTU, but reject the always-true case. */
2993 return SMALL_OPERAND (x
+ 1) && x
+ 1 != 0;
2997 /* The bit position and size are immediate operands. */
2998 return ISA_HAS_EXT_INS
;
3001 /* By default assume that $0 can be used for 0. */
3006 /* Return the cost of binary operation X, given that the instruction
3007 sequence for a word-sized or smaller operation has cost SINGLE_COST
3008 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3011 mips_binary_cost (rtx x
, int single_cost
, int double_cost
)
3015 if (GET_MODE_SIZE (GET_MODE (x
)) == UNITS_PER_WORD
* 2)
3020 + rtx_cost (XEXP (x
, 0), 0)
3021 + rtx_cost (XEXP (x
, 1), GET_CODE (x
)));
3024 /* Return the cost of floating-point multiplications of mode MODE. */
3027 mips_fp_mult_cost (enum machine_mode mode
)
3029 return mode
== DFmode
? mips_cost
->fp_mult_df
: mips_cost
->fp_mult_sf
;
3032 /* Return the cost of floating-point divisions of mode MODE. */
3035 mips_fp_div_cost (enum machine_mode mode
)
3037 return mode
== DFmode
? mips_cost
->fp_div_df
: mips_cost
->fp_div_sf
;
3040 /* Return the cost of sign-extending OP to mode MODE, not including the
3041 cost of OP itself. */
3044 mips_sign_extend_cost (enum machine_mode mode
, rtx op
)
3047 /* Extended loads are as cheap as unextended ones. */
3050 if (TARGET_64BIT
&& mode
== DImode
&& GET_MODE (op
) == SImode
)
3051 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3054 if (ISA_HAS_SEB_SEH
|| GENERATE_MIPS16E
)
3055 /* We can use SEB or SEH. */
3056 return COSTS_N_INSNS (1);
3058 /* We need to use a shift left and a shift right. */
3059 return COSTS_N_INSNS (TARGET_MIPS16
? 4 : 2);
3062 /* Return the cost of zero-extending OP to mode MODE, not including the
3063 cost of OP itself. */
3066 mips_zero_extend_cost (enum machine_mode mode
, rtx op
)
3069 /* Extended loads are as cheap as unextended ones. */
3072 if (TARGET_64BIT
&& mode
== DImode
&& GET_MODE (op
) == SImode
)
3073 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3074 return COSTS_N_INSNS (TARGET_MIPS16
? 4 : 2);
3076 if (GENERATE_MIPS16E
)
3077 /* We can use ZEB or ZEH. */
3078 return COSTS_N_INSNS (1);
3081 /* We need to load 0xff or 0xffff into a register and use AND. */
3082 return COSTS_N_INSNS (GET_MODE (op
) == QImode
? 2 : 3);
3084 /* We can use ANDI. */
3085 return COSTS_N_INSNS (1);
3088 /* Implement TARGET_RTX_COSTS. */
3091 mips_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
3093 enum machine_mode mode
= GET_MODE (x
);
3094 bool float_mode_p
= FLOAT_MODE_P (mode
);
3098 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3099 appear in the instruction stream, and the cost of a comparison is
3100 really the cost of the branch or scc condition. At the time of
3101 writing, gcc only uses an explicit outer COMPARE code when optabs
3102 is testing whether a constant is expensive enough to force into a
3103 register. We want optabs to pass such constants through the MIPS
3104 expanders instead, so make all constants very cheap here. */
3105 if (outer_code
== COMPARE
)
3107 gcc_assert (CONSTANT_P (x
));
3115 /* Treat *clear_upper32-style ANDs as having zero cost in the
3116 second operand. The cost is entirely in the first operand.
3118 ??? This is needed because we would otherwise try to CSE
3119 the constant operand. Although that's the right thing for
3120 instructions that continue to be a register operation throughout
3121 compilation, it is disastrous for instructions that could
3122 later be converted into a memory operation. */
3124 && outer_code
== AND
3125 && UINTVAL (x
) == 0xffffffff)
3133 cost
= mips16_constant_cost (outer_code
, INTVAL (x
));
3142 /* When not optimizing for size, we care more about the cost
3143 of hot code, and hot code is often in a loop. If a constant
3144 operand needs to be forced into a register, we will often be
3145 able to hoist the constant load out of the loop, so the load
3146 should not contribute to the cost. */
3148 || mips_immediate_operand_p (outer_code
, INTVAL (x
)))
3160 if (force_to_mem_operand (x
, VOIDmode
))
3162 *total
= COSTS_N_INSNS (1);
3165 cost
= mips_const_insns (x
);
3168 /* If the constant is likely to be stored in a GPR, SETs of
3169 single-insn constants are as cheap as register sets; we
3170 never want to CSE them.
3172 Don't reduce the cost of storing a floating-point zero in
3173 FPRs. If we have a zero in an FPR for other reasons, we
3174 can get better cfg-cleanup and delayed-branch results by
3175 using it consistently, rather than using $0 sometimes and
3176 an FPR at other times. Also, moves between floating-point
3177 registers are sometimes cheaper than (D)MTC1 $0. */
3179 && outer_code
== SET
3180 && !(float_mode_p
&& TARGET_HARD_FLOAT
))
3182 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3183 want to CSE the constant itself. It is usually better to
3184 have N copies of the last operation in the sequence and one
3185 shared copy of the other operations. (Note that this is
3186 not true for MIPS16 code, where the final operation in the
3187 sequence is often an extended instruction.)
3189 Also, if we have a CONST_INT, we don't know whether it is
3190 for a word or doubleword operation, so we cannot rely on
3191 the result of mips_build_integer. */
3192 else if (!TARGET_MIPS16
3193 && (outer_code
== SET
|| mode
== VOIDmode
))
3195 *total
= COSTS_N_INSNS (cost
);
3198 /* The value will need to be fetched from the constant pool. */
3199 *total
= CONSTANT_POOL_COST
;
3203 /* If the address is legitimate, return the number of
3204 instructions it needs. */
3206 cost
= mips_address_insns (addr
, mode
, true);
3209 *total
= COSTS_N_INSNS (cost
+ 1);
3212 /* Check for a scaled indexed address. */
3213 if (mips_lwxs_address_p (addr
))
3215 *total
= COSTS_N_INSNS (2);
3218 /* Otherwise use the default handling. */
3222 *total
= COSTS_N_INSNS (6);
3226 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 2 : 1);
3230 /* Check for a *clear_upper32 pattern and treat it like a zero
3231 extension. See the pattern's comment for details. */
3234 && CONST_INT_P (XEXP (x
, 1))
3235 && UINTVAL (XEXP (x
, 1)) == 0xffffffff)
3237 *total
= (mips_zero_extend_cost (mode
, XEXP (x
, 0))
3238 + rtx_cost (XEXP (x
, 0), 0));
3245 /* Double-word operations use two single-word operations. */
3246 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3254 if (CONSTANT_P (XEXP (x
, 1)))
3255 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3257 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3262 *total
= mips_cost
->fp_add
;
3264 *total
= COSTS_N_INSNS (4);
3268 /* Low-part immediates need an extended MIPS16 instruction. */
3269 *total
= (COSTS_N_INSNS (TARGET_MIPS16
? 2 : 1)
3270 + rtx_cost (XEXP (x
, 0), 0));
3285 /* Branch comparisons have VOIDmode, so use the first operand's
3287 mode
= GET_MODE (XEXP (x
, 0));
3288 if (FLOAT_MODE_P (mode
))
3290 *total
= mips_cost
->fp_add
;
3293 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3298 && ISA_HAS_NMADD_NMSUB
3299 && TARGET_FUSED_MADD
3300 && !HONOR_NANS (mode
)
3301 && !HONOR_SIGNED_ZEROS (mode
))
3303 /* See if we can use NMADD or NMSUB. See mips.md for the
3304 associated patterns. */
3305 rtx op0
= XEXP (x
, 0);
3306 rtx op1
= XEXP (x
, 1);
3307 if (GET_CODE (op0
) == MULT
&& GET_CODE (XEXP (op0
, 0)) == NEG
)
3309 *total
= (mips_fp_mult_cost (mode
)
3310 + rtx_cost (XEXP (XEXP (op0
, 0), 0), 0)
3311 + rtx_cost (XEXP (op0
, 1), 0)
3312 + rtx_cost (op1
, 0));
3315 if (GET_CODE (op1
) == MULT
)
3317 *total
= (mips_fp_mult_cost (mode
)
3319 + rtx_cost (XEXP (op1
, 0), 0)
3320 + rtx_cost (XEXP (op1
, 1), 0));
3330 && TARGET_FUSED_MADD
3331 && GET_CODE (XEXP (x
, 0)) == MULT
)
3334 *total
= mips_cost
->fp_add
;
3338 /* Double-word operations require three single-word operations and
3339 an SLTU. The MIPS16 version then needs to move the result of
3340 the SLTU from $24 to a MIPS16 register. */
3341 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1),
3342 COSTS_N_INSNS (TARGET_MIPS16
? 5 : 4));
3347 && ISA_HAS_NMADD_NMSUB
3348 && TARGET_FUSED_MADD
3349 && !HONOR_NANS (mode
)
3350 && HONOR_SIGNED_ZEROS (mode
))
3352 /* See if we can use NMADD or NMSUB. See mips.md for the
3353 associated patterns. */
3354 rtx op
= XEXP (x
, 0);
3355 if ((GET_CODE (op
) == PLUS
|| GET_CODE (op
) == MINUS
)
3356 && GET_CODE (XEXP (op
, 0)) == MULT
)
3358 *total
= (mips_fp_mult_cost (mode
)
3359 + rtx_cost (XEXP (XEXP (op
, 0), 0), 0)
3360 + rtx_cost (XEXP (XEXP (op
, 0), 1), 0)
3361 + rtx_cost (XEXP (op
, 1), 0));
3367 *total
= mips_cost
->fp_add
;
3369 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 4 : 1);
3374 *total
= mips_fp_mult_cost (mode
);
3375 else if (mode
== DImode
&& !TARGET_64BIT
)
3376 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3377 where the mulsidi3 always includes an MFHI and an MFLO. */
3378 *total
= (optimize_size
3379 ? COSTS_N_INSNS (ISA_HAS_MUL3
? 7 : 9)
3380 : mips_cost
->int_mult_si
* 3 + 6);
3381 else if (optimize_size
)
3382 *total
= (ISA_HAS_MUL3
? 1 : 2);
3383 else if (mode
== DImode
)
3384 *total
= mips_cost
->int_mult_di
;
3386 *total
= mips_cost
->int_mult_si
;
3390 /* Check for a reciprocal. */
3391 if (float_mode_p
&& XEXP (x
, 0) == CONST1_RTX (mode
))
3394 && flag_unsafe_math_optimizations
3395 && (outer_code
== SQRT
|| GET_CODE (XEXP (x
, 1)) == SQRT
))
3397 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3398 division as being free. */
3399 *total
= rtx_cost (XEXP (x
, 1), 0);
3404 *total
= mips_fp_div_cost (mode
) + rtx_cost (XEXP (x
, 1), 0);
3414 *total
= mips_fp_div_cost (mode
);
3423 /* It is our responsibility to make division by a power of 2
3424 as cheap as 2 register additions if we want the division
3425 expanders to be used for such operations; see the setting
3426 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3427 should always produce shorter code than using
3428 expand_sdiv2_pow2. */
3430 && CONST_INT_P (XEXP (x
, 1))
3431 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
3433 *total
= COSTS_N_INSNS (2) + rtx_cost (XEXP (x
, 0), 0);
3436 *total
= COSTS_N_INSNS (mips_idiv_insns ());
3438 else if (mode
== DImode
)
3439 *total
= mips_cost
->int_div_di
;
3441 *total
= mips_cost
->int_div_si
;
3445 *total
= mips_sign_extend_cost (mode
, XEXP (x
, 0));
3449 *total
= mips_zero_extend_cost (mode
, XEXP (x
, 0));
3453 case UNSIGNED_FLOAT
:
3456 case FLOAT_TRUNCATE
:
3457 *total
= mips_cost
->fp_add
;
3465 /* Provide the costs of an addressing mode that contains ADDR.
3466 If ADDR is not a valid address, its cost is irrelevant. */
3469 mips_address_cost (rtx addr
)
3471 return mips_address_insns (addr
, SImode
, false);
3474 /* Return one word of double-word value OP, taking into account the fixed
3475 endianness of certain registers. HIGH_P is true to select the high part,
3476 false to select the low part. */
3479 mips_subword (rtx op
, int high_p
)
3482 enum machine_mode mode
;
3484 mode
= GET_MODE (op
);
3485 if (mode
== VOIDmode
)
3488 if (TARGET_BIG_ENDIAN
? !high_p
: high_p
)
3489 byte
= UNITS_PER_WORD
;
3493 if (FP_REG_RTX_P (op
))
3494 return gen_rtx_REG (word_mode
, high_p
? REGNO (op
) + 1 : REGNO (op
));
3497 return mips_rewrite_small_data (adjust_address (op
, word_mode
, byte
));
3499 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
3503 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3506 mips_split_64bit_move_p (rtx dest
, rtx src
)
3511 /* FP->FP moves can be done in a single instruction. */
3512 if (FP_REG_RTX_P (src
) && FP_REG_RTX_P (dest
))
3515 /* Check for floating-point loads and stores. They can be done using
3516 ldc1 and sdc1 on MIPS II and above. */
3519 if (FP_REG_RTX_P (dest
) && MEM_P (src
))
3521 if (FP_REG_RTX_P (src
) && MEM_P (dest
))
3528 /* Split a 64-bit move from SRC to DEST assuming that
3529 mips_split_64bit_move_p holds.
3531 Moves into and out of FPRs cause some difficulty here. Such moves
3532 will always be DFmode, since paired FPRs are not allowed to store
3533 DImode values. The most natural representation would be two separate
3534 32-bit moves, such as:
3536 (set (reg:SI $f0) (mem:SI ...))
3537 (set (reg:SI $f1) (mem:SI ...))
3539 However, the second insn is invalid because odd-numbered FPRs are
3540 not allowed to store independent values. Use the patterns load_df_low,
3541 load_df_high and store_df_high instead. */
3544 mips_split_64bit_move (rtx dest
, rtx src
)
3546 if (FP_REG_RTX_P (dest
))
3548 /* Loading an FPR from memory or from GPRs. */
3551 dest
= gen_lowpart (DFmode
, dest
);
3552 emit_insn (gen_load_df_low (dest
, mips_subword (src
, 0)));
3553 emit_insn (gen_mthc1 (dest
, mips_subword (src
, 1),
3558 emit_insn (gen_load_df_low (copy_rtx (dest
),
3559 mips_subword (src
, 0)));
3560 emit_insn (gen_load_df_high (dest
, mips_subword (src
, 1),
3564 else if (FP_REG_RTX_P (src
))
3566 /* Storing an FPR into memory or GPRs. */
3569 src
= gen_lowpart (DFmode
, src
);
3570 mips_emit_move (mips_subword (dest
, 0), mips_subword (src
, 0));
3571 emit_insn (gen_mfhc1 (mips_subword (dest
, 1), src
));
3575 mips_emit_move (mips_subword (dest
, 0), mips_subword (src
, 0));
3576 emit_insn (gen_store_df_high (mips_subword (dest
, 1), src
));
3581 /* The operation can be split into two normal moves. Decide in
3582 which order to do them. */
3585 low_dest
= mips_subword (dest
, 0);
3586 if (REG_P (low_dest
)
3587 && reg_overlap_mentioned_p (low_dest
, src
))
3589 mips_emit_move (mips_subword (dest
, 1), mips_subword (src
, 1));
3590 mips_emit_move (low_dest
, mips_subword (src
, 0));
3594 mips_emit_move (low_dest
, mips_subword (src
, 0));
3595 mips_emit_move (mips_subword (dest
, 1), mips_subword (src
, 1));
3600 /* Return the appropriate instructions to move SRC into DEST. Assume
3601 that SRC is operand 1 and DEST is operand 0. */
3604 mips_output_move (rtx dest
, rtx src
)
3606 enum rtx_code dest_code
, src_code
;
3607 enum mips_symbol_type symbol_type
;
3610 dest_code
= GET_CODE (dest
);
3611 src_code
= GET_CODE (src
);
3612 dbl_p
= (GET_MODE_SIZE (GET_MODE (dest
)) == 8);
3614 if (dbl_p
&& mips_split_64bit_move_p (dest
, src
))
3617 if ((src_code
== REG
&& GP_REG_P (REGNO (src
)))
3618 || (!TARGET_MIPS16
&& src
== CONST0_RTX (GET_MODE (dest
))))
3620 if (dest_code
== REG
)
3622 if (GP_REG_P (REGNO (dest
)))
3623 return "move\t%0,%z1";
3625 if (MD_REG_P (REGNO (dest
)))
3628 if (DSP_ACC_REG_P (REGNO (dest
)))
3630 static char retval
[] = "mt__\t%z1,%q0";
3631 retval
[2] = reg_names
[REGNO (dest
)][4];
3632 retval
[3] = reg_names
[REGNO (dest
)][5];
3636 if (FP_REG_P (REGNO (dest
)))
3637 return (dbl_p
? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3639 if (ALL_COP_REG_P (REGNO (dest
)))
3641 static char retval
[] = "dmtc_\t%z1,%0";
3643 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
3644 return (dbl_p
? retval
: retval
+ 1);
3647 if (dest_code
== MEM
)
3648 return (dbl_p
? "sd\t%z1,%0" : "sw\t%z1,%0");
3650 if (dest_code
== REG
&& GP_REG_P (REGNO (dest
)))
3652 if (src_code
== REG
)
3654 if (DSP_ACC_REG_P (REGNO (src
)))
3656 static char retval
[] = "mf__\t%0,%q1";
3657 retval
[2] = reg_names
[REGNO (src
)][4];
3658 retval
[3] = reg_names
[REGNO (src
)][5];
3662 if (ST_REG_P (REGNO (src
)) && ISA_HAS_8CC
)
3663 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3665 if (FP_REG_P (REGNO (src
)))
3666 return (dbl_p
? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3668 if (ALL_COP_REG_P (REGNO (src
)))
3670 static char retval
[] = "dmfc_\t%0,%1";
3672 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
3673 return (dbl_p
? retval
: retval
+ 1);
3677 if (src_code
== MEM
)
3678 return (dbl_p
? "ld\t%0,%1" : "lw\t%0,%1");
3680 if (src_code
== CONST_INT
)
3682 /* Don't use the X format, because that will give out of
3683 range numbers for 64-bit hosts and 32-bit targets. */
3685 return "li\t%0,%1\t\t\t# %X1";
3687 if (INTVAL (src
) >= 0 && INTVAL (src
) <= 0xffff)
3690 if (INTVAL (src
) < 0 && INTVAL (src
) >= -0xffff)
3694 if (src_code
== HIGH
)
3695 return TARGET_MIPS16
? "#" : "lui\t%0,%h1";
3697 if (CONST_GP_P (src
))
3698 return "move\t%0,%1";
3700 if (mips_symbolic_constant_p (src
, SYMBOL_CONTEXT_LEA
, &symbol_type
)
3701 && mips_lo_relocs
[symbol_type
] != 0)
3703 /* A signed 16-bit constant formed by applying a relocation
3704 operator to a symbolic address. */
3705 gcc_assert (!mips_split_p
[symbol_type
]);
3706 return "li\t%0,%R1";
3709 if (symbolic_operand (src
, VOIDmode
))
3711 gcc_assert (TARGET_MIPS16
3712 ? TARGET_MIPS16_TEXT_LOADS
3713 : !TARGET_EXPLICIT_RELOCS
);
3714 return (dbl_p
? "dla\t%0,%1" : "la\t%0,%1");
3717 if (src_code
== REG
&& FP_REG_P (REGNO (src
)))
3719 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
3721 if (GET_MODE (dest
) == V2SFmode
)
3722 return "mov.ps\t%0,%1";
3724 return (dbl_p
? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3727 if (dest_code
== MEM
)
3728 return (dbl_p
? "sdc1\t%1,%0" : "swc1\t%1,%0");
3730 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
3732 if (src_code
== MEM
)
3733 return (dbl_p
? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3735 if (dest_code
== REG
&& ALL_COP_REG_P (REGNO (dest
)) && src_code
== MEM
)
3737 static char retval
[] = "l_c_\t%0,%1";
3739 retval
[1] = (dbl_p
? 'd' : 'w');
3740 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
3743 if (dest_code
== MEM
&& src_code
== REG
&& ALL_COP_REG_P (REGNO (src
)))
3745 static char retval
[] = "s_c_\t%1,%0";
3747 retval
[1] = (dbl_p
? 'd' : 'w');
3748 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
3754 /* Restore $gp from its save slot. Valid only when using o32 or
3758 mips_restore_gp (void)
3762 gcc_assert (TARGET_ABICALLS
&& TARGET_OLDABI
);
3764 address
= mips_add_offset (pic_offset_table_rtx
,
3765 frame_pointer_needed
3766 ? hard_frame_pointer_rtx
3767 : stack_pointer_rtx
,
3768 current_function_outgoing_args_size
);
3769 slot
= gen_rtx_MEM (Pmode
, address
);
3771 mips_emit_move (pic_offset_table_rtx
, slot
);
3772 if (!TARGET_EXPLICIT_RELOCS
)
3773 emit_insn (gen_blockage ());
3776 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3779 mips_emit_binary (enum rtx_code code
, rtx target
, rtx op0
, rtx op1
)
3781 emit_insn (gen_rtx_SET (VOIDmode
, target
,
3782 gen_rtx_fmt_ee (code
, GET_MODE (target
), op0
, op1
)));
3785 /* Return true if CMP1 is a suitable second operand for relational
3786 operator CODE. See also the *sCC patterns in mips.md. */
3789 mips_relational_operand_ok_p (enum rtx_code code
, rtx cmp1
)
3795 return reg_or_0_operand (cmp1
, VOIDmode
);
3799 return !TARGET_MIPS16
&& cmp1
== const1_rtx
;
3803 return arith_operand (cmp1
, VOIDmode
);
3806 return sle_operand (cmp1
, VOIDmode
);
3809 return sleu_operand (cmp1
, VOIDmode
);
3816 /* Canonicalize LE or LEU comparisons into LT comparisons when
3817 possible to avoid extra instructions or inverting the
3821 mips_canonicalize_comparison (enum rtx_code
*code
, rtx
*cmp1
,
3822 enum machine_mode mode
)
3824 HOST_WIDE_INT original
, plus_one
;
3826 if (GET_CODE (*cmp1
) != CONST_INT
)
3829 original
= INTVAL (*cmp1
);
3830 plus_one
= trunc_int_for_mode ((unsigned HOST_WIDE_INT
) original
+ 1, mode
);
3835 if (original
< plus_one
)
3838 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
3847 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
3860 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3861 result in TARGET. CMP0 and TARGET are register_operands that have
3862 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3863 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3866 mips_emit_int_relational (enum rtx_code code
, bool *invert_ptr
,
3867 rtx target
, rtx cmp0
, rtx cmp1
)
3869 /* First see if there is a MIPS instruction that can do this operation
3870 with CMP1 in its current form. If not, try to canonicalize the
3871 comparison to LT. If that fails, try doing the same for the
3872 inverse operation. If that also fails, force CMP1 into a register
3874 if (mips_relational_operand_ok_p (code
, cmp1
))
3875 mips_emit_binary (code
, target
, cmp0
, cmp1
);
3876 else if (mips_canonicalize_comparison (&code
, &cmp1
, GET_MODE (target
)))
3877 mips_emit_binary (code
, target
, cmp0
, cmp1
);
3880 enum rtx_code inv_code
= reverse_condition (code
);
3881 if (!mips_relational_operand_ok_p (inv_code
, cmp1
))
3883 cmp1
= force_reg (GET_MODE (cmp0
), cmp1
);
3884 mips_emit_int_relational (code
, invert_ptr
, target
, cmp0
, cmp1
);
3886 else if (invert_ptr
== 0)
3888 rtx inv_target
= gen_reg_rtx (GET_MODE (target
));
3889 mips_emit_binary (inv_code
, inv_target
, cmp0
, cmp1
);
3890 mips_emit_binary (XOR
, target
, inv_target
, const1_rtx
);
3894 *invert_ptr
= !*invert_ptr
;
3895 mips_emit_binary (inv_code
, target
, cmp0
, cmp1
);
3900 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3901 The register will have the same mode as CMP0. */
3904 mips_zero_if_equal (rtx cmp0
, rtx cmp1
)
3906 if (cmp1
== const0_rtx
)
3909 if (uns_arith_operand (cmp1
, VOIDmode
))
3910 return expand_binop (GET_MODE (cmp0
), xor_optab
,
3911 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
3913 return expand_binop (GET_MODE (cmp0
), sub_optab
,
3914 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
3917 /* Convert *CODE into a code that can be used in a floating-point
3918 scc instruction (c.<cond>.<fmt>). Return true if the values of
3919 the condition code registers will be inverted, with 0 indicating
3920 that the condition holds. */
3923 mips_reverse_fp_cond_p (enum rtx_code
*code
)
3930 *code
= reverse_condition_maybe_unordered (*code
);
3938 /* Convert a comparison into something that can be used in a branch or
3939 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3940 being compared and *CODE is the code used to compare them.
3942 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3943 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3944 otherwise any standard branch condition can be used. The standard branch
3947 - EQ/NE between two registers.
3948 - any comparison between a register and zero. */
3951 mips_emit_compare (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
, bool need_eq_ne_p
)
3953 if (GET_MODE_CLASS (GET_MODE (cmp_operands
[0])) == MODE_INT
)
3955 if (!need_eq_ne_p
&& cmp_operands
[1] == const0_rtx
)
3957 *op0
= cmp_operands
[0];
3958 *op1
= cmp_operands
[1];
3960 else if (*code
== EQ
|| *code
== NE
)
3964 *op0
= mips_zero_if_equal (cmp_operands
[0], cmp_operands
[1]);
3969 *op0
= cmp_operands
[0];
3970 *op1
= force_reg (GET_MODE (*op0
), cmp_operands
[1]);
3975 /* The comparison needs a separate scc instruction. Store the
3976 result of the scc in *OP0 and compare it against zero. */
3977 bool invert
= false;
3978 *op0
= gen_reg_rtx (GET_MODE (cmp_operands
[0]));
3980 mips_emit_int_relational (*code
, &invert
, *op0
,
3981 cmp_operands
[0], cmp_operands
[1]);
3982 *code
= (invert
? EQ
: NE
);
3985 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands
[0])))
3987 *op0
= gen_rtx_REG (CCDSPmode
, CCDSP_CC_REGNUM
);
3988 mips_emit_binary (*code
, *op0
, cmp_operands
[0], cmp_operands
[1]);
3994 enum rtx_code cmp_code
;
3996 /* Floating-point tests use a separate c.cond.fmt comparison to
3997 set a condition code register. The branch or conditional move
3998 will then compare that register against zero.
4000 Set CMP_CODE to the code of the comparison instruction and
4001 *CODE to the code that the branch or move should use. */
4003 *code
= mips_reverse_fp_cond_p (&cmp_code
) ? EQ
: NE
;
4005 ? gen_reg_rtx (CCmode
)
4006 : gen_rtx_REG (CCmode
, FPSW_REGNUM
));
4008 mips_emit_binary (cmp_code
, *op0
, cmp_operands
[0], cmp_operands
[1]);
4012 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4013 Store the result in TARGET and return true if successful.
4015 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
4018 mips_emit_scc (enum rtx_code code
, rtx target
)
4020 if (GET_MODE_CLASS (GET_MODE (cmp_operands
[0])) != MODE_INT
)
4023 target
= gen_lowpart (GET_MODE (cmp_operands
[0]), target
);
4024 if (code
== EQ
|| code
== NE
)
4026 rtx zie
= mips_zero_if_equal (cmp_operands
[0], cmp_operands
[1]);
4027 mips_emit_binary (code
, target
, zie
, const0_rtx
);
4030 mips_emit_int_relational (code
, 0, target
,
4031 cmp_operands
[0], cmp_operands
[1]);
4035 /* Emit the common code for doing conditional branches.
4036 operand[0] is the label to jump to.
4037 The comparison operands are saved away by cmp{si,di,sf,df}. */
4040 gen_conditional_branch (rtx
*operands
, enum rtx_code code
)
4042 rtx op0
, op1
, condition
;
4044 mips_emit_compare (&code
, &op0
, &op1
, TARGET_MIPS16
);
4045 condition
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
4046 emit_jump_insn (gen_condjump (condition
, operands
[0]));
4051 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4052 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4055 mips_expand_vcondv2sf (rtx dest
, rtx true_src
, rtx false_src
,
4056 enum rtx_code cond
, rtx cmp_op0
, rtx cmp_op1
)
4061 reversed_p
= mips_reverse_fp_cond_p (&cond
);
4062 cmp_result
= gen_reg_rtx (CCV2mode
);
4063 emit_insn (gen_scc_ps (cmp_result
,
4064 gen_rtx_fmt_ee (cond
, VOIDmode
, cmp_op0
, cmp_op1
)));
4066 emit_insn (gen_mips_cond_move_tf_ps (dest
, false_src
, true_src
,
4069 emit_insn (gen_mips_cond_move_tf_ps (dest
, true_src
, false_src
,
4073 /* Emit the common code for conditional moves. OPERANDS is the array
4074 of operands passed to the conditional move define_expand. */
4077 gen_conditional_move (rtx
*operands
)
4082 code
= GET_CODE (operands
[1]);
4083 mips_emit_compare (&code
, &op0
, &op1
, true);
4084 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
4085 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
4086 gen_rtx_fmt_ee (code
,
4089 operands
[2], operands
[3])));
4092 /* Emit a conditional trap. OPERANDS is the array of operands passed to
4093 the conditional_trap expander. */
4096 mips_gen_conditional_trap (rtx
*operands
)
4099 enum rtx_code cmp_code
= GET_CODE (operands
[0]);
4100 enum machine_mode mode
= GET_MODE (cmp_operands
[0]);
4102 /* MIPS conditional trap machine instructions don't have GT or LE
4103 flavors, so we must invert the comparison and convert to LT and
4104 GE, respectively. */
4107 case GT
: cmp_code
= LT
; break;
4108 case LE
: cmp_code
= GE
; break;
4109 case GTU
: cmp_code
= LTU
; break;
4110 case LEU
: cmp_code
= GEU
; break;
4113 if (cmp_code
== GET_CODE (operands
[0]))
4115 op0
= cmp_operands
[0];
4116 op1
= cmp_operands
[1];
4120 op0
= cmp_operands
[1];
4121 op1
= cmp_operands
[0];
4123 op0
= force_reg (mode
, op0
);
4124 if (!arith_operand (op1
, mode
))
4125 op1
= force_reg (mode
, op1
);
4127 emit_insn (gen_rtx_TRAP_IF (VOIDmode
,
4128 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
),
4132 /* Return true if function DECL is a MIPS16 function. Return the ambient
4133 setting if DECL is null. */
4136 mips_use_mips16_mode_p (tree decl
)
4140 /* Nested functions must use the same frame pointer as their
4141 parent and must therefore use the same ISA mode. */
4142 tree parent
= decl_function_context (decl
);
4145 if (mips_mips16_decl_p (decl
))
4147 if (mips_nomips16_decl_p (decl
))
4150 return mips_base_mips16
;
4153 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
4156 mips_ok_for_lazy_binding_p (rtx x
)
4158 return (TARGET_USE_GOT
4159 && GET_CODE (x
) == SYMBOL_REF
4160 && !mips_symbol_binds_local_p (x
));
4163 /* Load function address ADDR into register DEST. SIBCALL_P is true
4164 if the address is needed for a sibling call. */
4167 mips_load_call_address (rtx dest
, rtx addr
, int sibcall_p
)
4169 /* If we're generating PIC, and this call is to a global function,
4170 try to allow its address to be resolved lazily. This isn't
4171 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
4172 to the stub would be our caller's gp, not ours. */
4173 if (TARGET_EXPLICIT_RELOCS
4174 && !(sibcall_p
&& TARGET_CALL_SAVED_GP
)
4175 && mips_ok_for_lazy_binding_p (addr
))
4177 rtx high
, lo_sum_symbol
;
4179 high
= mips_unspec_offset_high (dest
, pic_offset_table_rtx
,
4180 addr
, SYMBOL_GOTOFF_CALL
);
4181 lo_sum_symbol
= mips_unspec_address (addr
, SYMBOL_GOTOFF_CALL
);
4182 if (Pmode
== SImode
)
4183 emit_insn (gen_load_callsi (dest
, high
, lo_sum_symbol
));
4185 emit_insn (gen_load_calldi (dest
, high
, lo_sum_symbol
));
4188 mips_emit_move (dest
, addr
);
4192 /* Expand a call or call_value instruction. RESULT is where the
4193 result will go (null for calls), ADDR is the address of the
4194 function, ARGS_SIZE is the size of the arguments and AUX is
4195 the value passed to us by mips_function_arg. SIBCALL_P is true
4196 if we are expanding a sibling call, false if we're expanding
4200 mips_expand_call (rtx result
, rtx addr
, rtx args_size
, rtx aux
, int sibcall_p
)
4202 rtx orig_addr
, pattern
, insn
;
4205 if (!call_insn_operand (addr
, VOIDmode
))
4207 addr
= gen_reg_rtx (Pmode
);
4208 mips_load_call_address (addr
, orig_addr
, sibcall_p
);
4212 && TARGET_HARD_FLOAT_ABI
4213 && build_mips16_call_stub (result
, addr
, args_size
,
4214 aux
== 0 ? 0 : (int) GET_MODE (aux
)))
4218 pattern
= (sibcall_p
4219 ? gen_sibcall_internal (addr
, args_size
)
4220 : gen_call_internal (addr
, args_size
));
4221 else if (GET_CODE (result
) == PARALLEL
&& XVECLEN (result
, 0) == 2)
4225 reg1
= XEXP (XVECEXP (result
, 0, 0), 0);
4226 reg2
= XEXP (XVECEXP (result
, 0, 1), 0);
4229 ? gen_sibcall_value_multiple_internal (reg1
, addr
, args_size
, reg2
)
4230 : gen_call_value_multiple_internal (reg1
, addr
, args_size
, reg2
));
4233 pattern
= (sibcall_p
4234 ? gen_sibcall_value_internal (result
, addr
, args_size
)
4235 : gen_call_value_internal (result
, addr
, args_size
));
4237 insn
= emit_call_insn (pattern
);
4239 /* Lazy-binding stubs require $gp to be valid on entry. */
4240 if (mips_ok_for_lazy_binding_p (orig_addr
))
4241 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
4245 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4248 mips_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
4250 if (!TARGET_SIBCALLS
)
4253 /* We can't do a sibcall if the called function is a MIPS16 function
4254 because there is no direct "jx" instruction equivalent to "jalx" to
4255 switch the ISA mode. */
4256 if (mips_use_mips16_mode_p (decl
))
4259 /* ...and when -minterlink-mips16 is in effect, assume that external
4260 functions could be MIPS16 ones unless an attribute explicitly
4261 tells us otherwise. We only care about cases where the sibling
4262 and normal calls would both be direct. */
4263 if (TARGET_INTERLINK_MIPS16
4265 && DECL_EXTERNAL (decl
)
4266 && !mips_nomips16_decl_p (decl
)
4267 && const_call_insn_operand (XEXP (DECL_RTL (decl
), 0), VOIDmode
))
4274 /* Emit code to move general operand SRC into condition-code
4275 register DEST. SCRATCH is a scratch TFmode float register.
4282 where FP1 and FP2 are single-precision float registers
4283 taken from SCRATCH. */
4286 mips_emit_fcc_reload (rtx dest
, rtx src
, rtx scratch
)
4290 /* Change the source to SFmode. */
4292 src
= adjust_address (src
, SFmode
, 0);
4293 else if (REG_P (src
) || GET_CODE (src
) == SUBREG
)
4294 src
= gen_rtx_REG (SFmode
, true_regnum (src
));
4296 fp1
= gen_rtx_REG (SFmode
, REGNO (scratch
));
4297 fp2
= gen_rtx_REG (SFmode
, REGNO (scratch
) + MAX_FPRS_PER_FMT
);
4299 mips_emit_move (copy_rtx (fp1
), src
);
4300 mips_emit_move (copy_rtx (fp2
), CONST0_RTX (SFmode
));
4301 emit_insn (gen_slt_sf (dest
, fp2
, fp1
));
4304 /* Emit code to change the current function's return address to
4305 ADDRESS. SCRATCH is available as a scratch register, if needed.
4306 ADDRESS and SCRATCH are both word-mode GPRs. */
4309 mips_set_return_address (rtx address
, rtx scratch
)
4313 compute_frame_size (get_frame_size ());
4314 gcc_assert ((cfun
->machine
->frame
.mask
>> 31) & 1);
4315 slot_address
= mips_add_offset (scratch
, stack_pointer_rtx
,
4316 cfun
->machine
->frame
.gp_sp_offset
);
4318 mips_emit_move (gen_rtx_MEM (GET_MODE (address
), slot_address
), address
);
4321 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
4322 Assume that the areas do not overlap. */
4325 mips_block_move_straight (rtx dest
, rtx src
, HOST_WIDE_INT length
)
4327 HOST_WIDE_INT offset
, delta
;
4328 unsigned HOST_WIDE_INT bits
;
4330 enum machine_mode mode
;
4333 /* Work out how many bits to move at a time. If both operands have
4334 half-word alignment, it is usually better to move in half words.
4335 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
4336 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
4337 Otherwise move word-sized chunks. */
4338 if (MEM_ALIGN (src
) == BITS_PER_WORD
/ 2
4339 && MEM_ALIGN (dest
) == BITS_PER_WORD
/ 2)
4340 bits
= BITS_PER_WORD
/ 2;
4342 bits
= BITS_PER_WORD
;
4344 mode
= mode_for_size (bits
, MODE_INT
, 0);
4345 delta
= bits
/ BITS_PER_UNIT
;
4347 /* Allocate a buffer for the temporary registers. */
4348 regs
= alloca (sizeof (rtx
) * length
/ delta
);
4350 /* Load as many BITS-sized chunks as possible. Use a normal load if
4351 the source has enough alignment, otherwise use left/right pairs. */
4352 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
4354 regs
[i
] = gen_reg_rtx (mode
);
4355 if (MEM_ALIGN (src
) >= bits
)
4356 mips_emit_move (regs
[i
], adjust_address (src
, mode
, offset
));
4359 rtx part
= adjust_address (src
, BLKmode
, offset
);
4360 if (!mips_expand_unaligned_load (regs
[i
], part
, bits
, 0))
4365 /* Copy the chunks to the destination. */
4366 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
4367 if (MEM_ALIGN (dest
) >= bits
)
4368 mips_emit_move (adjust_address (dest
, mode
, offset
), regs
[i
]);
4371 rtx part
= adjust_address (dest
, BLKmode
, offset
);
4372 if (!mips_expand_unaligned_store (part
, regs
[i
], bits
, 0))
4376 /* Mop up any left-over bytes. */
4377 if (offset
< length
)
4379 src
= adjust_address (src
, BLKmode
, offset
);
4380 dest
= adjust_address (dest
, BLKmode
, offset
);
4381 move_by_pieces (dest
, src
, length
- offset
,
4382 MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)), 0);
4386 #define MAX_MOVE_REGS 4
4387 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
4390 /* Helper function for doing a loop-based block operation on memory
4391 reference MEM. Each iteration of the loop will operate on LENGTH
4394 Create a new base register for use within the loop and point it to
4395 the start of MEM. Create a new memory reference that uses this
4396 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
4399 mips_adjust_block_mem (rtx mem
, HOST_WIDE_INT length
,
4400 rtx
*loop_reg
, rtx
*loop_mem
)
4402 *loop_reg
= copy_addr_to_reg (XEXP (mem
, 0));
4404 /* Although the new mem does not refer to a known location,
4405 it does keep up to LENGTH bytes of alignment. */
4406 *loop_mem
= change_address (mem
, BLKmode
, *loop_reg
);
4407 set_mem_align (*loop_mem
, MIN (MEM_ALIGN (mem
), length
* BITS_PER_UNIT
));
4411 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
4412 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
4413 memory regions do not overlap. */
4416 mips_block_move_loop (rtx dest
, rtx src
, HOST_WIDE_INT length
)
4418 rtx label
, src_reg
, dest_reg
, final_src
;
4419 HOST_WIDE_INT leftover
;
4421 leftover
= length
% MAX_MOVE_BYTES
;
4424 /* Create registers and memory references for use within the loop. */
4425 mips_adjust_block_mem (src
, MAX_MOVE_BYTES
, &src_reg
, &src
);
4426 mips_adjust_block_mem (dest
, MAX_MOVE_BYTES
, &dest_reg
, &dest
);
4428 /* Calculate the value that SRC_REG should have after the last iteration
4430 final_src
= expand_simple_binop (Pmode
, PLUS
, src_reg
, GEN_INT (length
),
4433 /* Emit the start of the loop. */
4434 label
= gen_label_rtx ();
4437 /* Emit the loop body. */
4438 mips_block_move_straight (dest
, src
, MAX_MOVE_BYTES
);
4440 /* Move on to the next block. */
4441 mips_emit_move (src_reg
, plus_constant (src_reg
, MAX_MOVE_BYTES
));
4442 mips_emit_move (dest_reg
, plus_constant (dest_reg
, MAX_MOVE_BYTES
));
4444 /* Emit the loop condition. */
4445 if (Pmode
== DImode
)
4446 emit_insn (gen_cmpdi (src_reg
, final_src
));
4448 emit_insn (gen_cmpsi (src_reg
, final_src
));
4449 emit_jump_insn (gen_bne (label
));
4451 /* Mop up any left-over bytes. */
4453 mips_block_move_straight (dest
, src
, leftover
);
4457 /* Expand a loop of synci insns for the address range [BEGIN, END). */
4460 mips_expand_synci_loop (rtx begin
, rtx end
)
4462 rtx inc
, label
, cmp
, cmp_result
;
4464 /* Load INC with the cache line size (rdhwr INC,$1). */
4465 inc
= gen_reg_rtx (SImode
);
4466 emit_insn (gen_rdhwr (inc
, const1_rtx
));
4468 /* Loop back to here. */
4469 label
= gen_label_rtx ();
4472 emit_insn (gen_synci (begin
));
4474 cmp
= gen_reg_rtx (Pmode
);
4475 mips_emit_binary (GTU
, cmp
, begin
, end
);
4477 mips_emit_binary (PLUS
, begin
, begin
, inc
);
4479 cmp_result
= gen_rtx_EQ (VOIDmode
, cmp
, const0_rtx
);
4480 emit_jump_insn (gen_condjump (cmp_result
, label
));
4483 /* Expand a movmemsi instruction. */
4486 mips_expand_block_move (rtx dest
, rtx src
, rtx length
)
4488 if (GET_CODE (length
) == CONST_INT
)
4490 if (INTVAL (length
) <= 2 * MAX_MOVE_BYTES
)
4492 mips_block_move_straight (dest
, src
, INTVAL (length
));
4497 mips_block_move_loop (dest
, src
, INTVAL (length
));
4504 /* Argument support functions. */
4506 /* Initialize CUMULATIVE_ARGS for a function. */
4509 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
4510 rtx libname ATTRIBUTE_UNUSED
)
4512 static CUMULATIVE_ARGS zero_cum
;
4513 tree param
, next_param
;
4516 cum
->prototype
= (fntype
&& TYPE_ARG_TYPES (fntype
));
4518 /* Determine if this function has variable arguments. This is
4519 indicated by the last argument being 'void_type_mode' if there
4520 are no variable arguments. The standard MIPS calling sequence
4521 passes all arguments in the general purpose registers in this case. */
4523 for (param
= fntype
? TYPE_ARG_TYPES (fntype
) : 0;
4524 param
!= 0; param
= next_param
)
4526 next_param
= TREE_CHAIN (param
);
4527 if (next_param
== 0 && TREE_VALUE (param
) != void_type_node
)
4528 cum
->gp_reg_found
= 1;
4533 /* Fill INFO with information about a single argument. CUM is the
4534 cumulative state for earlier arguments. MODE is the mode of this
4535 argument and TYPE is its type (if known). NAMED is true if this
4536 is a named (fixed) argument rather than a variable one. */
4539 mips_arg_info (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4540 tree type
, int named
, struct mips_arg_info
*info
)
4542 bool doubleword_aligned_p
;
4543 unsigned int num_bytes
, num_words
, max_regs
;
4545 /* Work out the size of the argument. */
4546 num_bytes
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
4547 num_words
= (num_bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
4549 /* Decide whether it should go in a floating-point register, assuming
4550 one is free. Later code checks for availability.
4552 The checks against UNITS_PER_FPVALUE handle the soft-float and
4553 single-float cases. */
4557 /* The EABI conventions have traditionally been defined in terms
4558 of TYPE_MODE, regardless of the actual type. */
4559 info
->fpr_p
= ((GET_MODE_CLASS (mode
) == MODE_FLOAT
4560 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4561 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4566 /* Only leading floating-point scalars are passed in
4567 floating-point registers. We also handle vector floats the same
4568 say, which is OK because they are not covered by the standard ABI. */
4569 info
->fpr_p
= (!cum
->gp_reg_found
4570 && cum
->arg_number
< 2
4571 && (type
== 0 || SCALAR_FLOAT_TYPE_P (type
)
4572 || VECTOR_FLOAT_TYPE_P (type
))
4573 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
4574 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4575 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4580 /* Scalar and complex floating-point types are passed in
4581 floating-point registers. */
4582 info
->fpr_p
= (named
4583 && (type
== 0 || FLOAT_TYPE_P (type
))
4584 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
4585 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
4586 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
)
4587 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_FPVALUE
);
4589 /* ??? According to the ABI documentation, the real and imaginary
4590 parts of complex floats should be passed in individual registers.
4591 The real and imaginary parts of stack arguments are supposed
4592 to be contiguous and there should be an extra word of padding
4595 This has two problems. First, it makes it impossible to use a
4596 single "void *" va_list type, since register and stack arguments
4597 are passed differently. (At the time of writing, MIPSpro cannot
4598 handle complex float varargs correctly.) Second, it's unclear
4599 what should happen when there is only one register free.
4601 For now, we assume that named complex floats should go into FPRs
4602 if there are two FPRs free, otherwise they should be passed in the
4603 same way as a struct containing two floats. */
4605 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
4606 && GET_MODE_UNIT_SIZE (mode
) < UNITS_PER_FPVALUE
)
4608 if (cum
->num_gprs
>= MAX_ARGS_IN_REGISTERS
- 1)
4609 info
->fpr_p
= false;
4619 /* See whether the argument has doubleword alignment. */
4620 doubleword_aligned_p
= FUNCTION_ARG_BOUNDARY (mode
, type
) > BITS_PER_WORD
;
4622 /* Set REG_OFFSET to the register count we're interested in.
4623 The EABI allocates the floating-point registers separately,
4624 but the other ABIs allocate them like integer registers. */
4625 info
->reg_offset
= (mips_abi
== ABI_EABI
&& info
->fpr_p
4629 /* Advance to an even register if the argument is doubleword-aligned. */
4630 if (doubleword_aligned_p
)
4631 info
->reg_offset
+= info
->reg_offset
& 1;
4633 /* Work out the offset of a stack argument. */
4634 info
->stack_offset
= cum
->stack_words
;
4635 if (doubleword_aligned_p
)
4636 info
->stack_offset
+= info
->stack_offset
& 1;
4638 max_regs
= MAX_ARGS_IN_REGISTERS
- info
->reg_offset
;
4640 /* Partition the argument between registers and stack. */
4641 info
->reg_words
= MIN (num_words
, max_regs
);
4642 info
->stack_words
= num_words
- info
->reg_words
;
4646 /* INFO describes an argument that is passed in a single-register value.
4647 Return the register it uses, assuming that FPRs are available if
4651 mips_arg_regno (const struct mips_arg_info
*info
, bool hard_float_p
)
4653 if (!info
->fpr_p
|| !hard_float_p
)
4654 return GP_ARG_FIRST
+ info
->reg_offset
;
4655 else if (mips_abi
== ABI_32
&& TARGET_DOUBLE_FLOAT
&& info
->reg_offset
> 0)
4656 /* In o32, the second argument is always passed in $f14
4657 for TARGET_DOUBLE_FLOAT, regardless of whether the
4658 first argument was a word or doubleword. */
4659 return FP_ARG_FIRST
+ 2;
4661 return FP_ARG_FIRST
+ info
->reg_offset
;
4664 /* Implement FUNCTION_ARG_ADVANCE. */
4667 function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4668 tree type
, int named
)
4670 struct mips_arg_info info
;
4672 mips_arg_info (cum
, mode
, type
, named
, &info
);
4675 cum
->gp_reg_found
= true;
4677 /* See the comment above the cumulative args structure in mips.h
4678 for an explanation of what this code does. It assumes the O32
4679 ABI, which passes at most 2 arguments in float registers. */
4680 if (cum
->arg_number
< 2 && info
.fpr_p
)
4681 cum
->fp_code
+= (mode
== SFmode
? 1 : 2) << (cum
->arg_number
* 2);
4683 if (mips_abi
!= ABI_EABI
|| !info
.fpr_p
)
4684 cum
->num_gprs
= info
.reg_offset
+ info
.reg_words
;
4685 else if (info
.reg_words
> 0)
4686 cum
->num_fprs
+= MAX_FPRS_PER_FMT
;
4688 if (info
.stack_words
> 0)
4689 cum
->stack_words
= info
.stack_offset
+ info
.stack_words
;
4694 /* Implement FUNCTION_ARG. */
4697 function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4698 tree type
, int named
)
4700 struct mips_arg_info info
;
4702 /* We will be called with a mode of VOIDmode after the last argument
4703 has been seen. Whatever we return will be passed to the call
4704 insn. If we need a mips16 fp_code, return a REG with the code
4705 stored as the mode. */
4706 if (mode
== VOIDmode
)
4708 if (TARGET_MIPS16
&& cum
->fp_code
!= 0)
4709 return gen_rtx_REG ((enum machine_mode
) cum
->fp_code
, 0);
4715 mips_arg_info (cum
, mode
, type
, named
, &info
);
4717 /* Return straight away if the whole argument is passed on the stack. */
4718 if (info
.reg_offset
== MAX_ARGS_IN_REGISTERS
)
4722 && TREE_CODE (type
) == RECORD_TYPE
4724 && TYPE_SIZE_UNIT (type
)
4725 && host_integerp (TYPE_SIZE_UNIT (type
), 1)
4728 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4729 structure contains a double in its entirety, then that 64-bit
4730 chunk is passed in a floating point register. */
4733 /* First check to see if there is any such field. */
4734 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
4735 if (TREE_CODE (field
) == FIELD_DECL
4736 && TREE_CODE (TREE_TYPE (field
)) == REAL_TYPE
4737 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
4738 && host_integerp (bit_position (field
), 0)
4739 && int_bit_position (field
) % BITS_PER_WORD
== 0)
4744 /* Now handle the special case by returning a PARALLEL
4745 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4746 chunks are passed in registers. */
4748 HOST_WIDE_INT bitpos
;
4751 /* assign_parms checks the mode of ENTRY_PARM, so we must
4752 use the actual mode here. */
4753 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (info
.reg_words
));
4756 field
= TYPE_FIELDS (type
);
4757 for (i
= 0; i
< info
.reg_words
; i
++)
4761 for (; field
; field
= TREE_CHAIN (field
))
4762 if (TREE_CODE (field
) == FIELD_DECL
4763 && int_bit_position (field
) >= bitpos
)
4767 && int_bit_position (field
) == bitpos
4768 && TREE_CODE (TREE_TYPE (field
)) == REAL_TYPE
4769 && !TARGET_SOFT_FLOAT
4770 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
)
4771 reg
= gen_rtx_REG (DFmode
, FP_ARG_FIRST
+ info
.reg_offset
+ i
);
4773 reg
= gen_rtx_REG (DImode
, GP_ARG_FIRST
+ info
.reg_offset
+ i
);
4776 = gen_rtx_EXPR_LIST (VOIDmode
, reg
,
4777 GEN_INT (bitpos
/ BITS_PER_UNIT
));
4779 bitpos
+= BITS_PER_WORD
;
4785 /* Handle the n32/n64 conventions for passing complex floating-point
4786 arguments in FPR pairs. The real part goes in the lower register
4787 and the imaginary part goes in the upper register. */
4790 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
4793 enum machine_mode inner
;
4796 inner
= GET_MODE_INNER (mode
);
4797 reg
= FP_ARG_FIRST
+ info
.reg_offset
;
4798 if (info
.reg_words
* UNITS_PER_WORD
== GET_MODE_SIZE (inner
))
4800 /* Real part in registers, imaginary part on stack. */
4801 gcc_assert (info
.stack_words
== info
.reg_words
);
4802 return gen_rtx_REG (inner
, reg
);
4806 gcc_assert (info
.stack_words
== 0);
4807 real
= gen_rtx_EXPR_LIST (VOIDmode
,
4808 gen_rtx_REG (inner
, reg
),
4810 imag
= gen_rtx_EXPR_LIST (VOIDmode
,
4812 reg
+ info
.reg_words
/ 2),
4813 GEN_INT (GET_MODE_SIZE (inner
)));
4814 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, real
, imag
));
4818 return gen_rtx_REG (mode
, mips_arg_regno (&info
, TARGET_HARD_FLOAT
));
4822 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4825 mips_arg_partial_bytes (CUMULATIVE_ARGS
*cum
,
4826 enum machine_mode mode
, tree type
, bool named
)
4828 struct mips_arg_info info
;
4830 mips_arg_info (cum
, mode
, type
, named
, &info
);
4831 return info
.stack_words
> 0 ? info
.reg_words
* UNITS_PER_WORD
: 0;
4835 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4836 PARM_BOUNDARY bits of alignment, but will be given anything up
4837 to STACK_BOUNDARY bits if the type requires it. */
4840 function_arg_boundary (enum machine_mode mode
, tree type
)
4842 unsigned int alignment
;
4844 alignment
= type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
);
4845 if (alignment
< PARM_BOUNDARY
)
4846 alignment
= PARM_BOUNDARY
;
4847 if (alignment
> STACK_BOUNDARY
)
4848 alignment
= STACK_BOUNDARY
;
4852 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4853 upward rather than downward. In other words, return true if the
4854 first byte of the stack slot has useful data, false if the last
4858 mips_pad_arg_upward (enum machine_mode mode
, const_tree type
)
4860 /* On little-endian targets, the first byte of every stack argument
4861 is passed in the first byte of the stack slot. */
4862 if (!BYTES_BIG_ENDIAN
)
4865 /* Otherwise, integral types are padded downward: the last byte of a
4866 stack argument is passed in the last byte of the stack slot. */
4868 ? (INTEGRAL_TYPE_P (type
)
4869 || POINTER_TYPE_P (type
)
4870 || FIXED_POINT_TYPE_P (type
))
4871 : (GET_MODE_CLASS (mode
) == MODE_INT
4872 || ALL_SCALAR_FIXED_POINT_MODE_P (mode
)))
4875 /* Big-endian o64 pads floating-point arguments downward. */
4876 if (mips_abi
== ABI_O64
)
4877 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4880 /* Other types are padded upward for o32, o64, n32 and n64. */
4881 if (mips_abi
!= ABI_EABI
)
4884 /* Arguments smaller than a stack slot are padded downward. */
4885 if (mode
!= BLKmode
)
4886 return (GET_MODE_BITSIZE (mode
) >= PARM_BOUNDARY
);
4888 return (int_size_in_bytes (type
) >= (PARM_BOUNDARY
/ BITS_PER_UNIT
));
4892 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4893 if the least significant byte of the register has useful data. Return
4894 the opposite if the most significant byte does. */
4897 mips_pad_reg_upward (enum machine_mode mode
, tree type
)
4899 /* No shifting is required for floating-point arguments. */
4900 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4901 return !BYTES_BIG_ENDIAN
;
4903 /* Otherwise, apply the same padding to register arguments as we do
4904 to stack arguments. */
4905 return mips_pad_arg_upward (mode
, type
);
4909 mips_setup_incoming_varargs (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
4910 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
4913 CUMULATIVE_ARGS local_cum
;
4914 int gp_saved
, fp_saved
;
4916 /* The caller has advanced CUM up to, but not beyond, the last named
4917 argument. Advance a local copy of CUM past the last "real" named
4918 argument, to find out how many registers are left over. */
4921 FUNCTION_ARG_ADVANCE (local_cum
, mode
, type
, 1);
4923 /* Found out how many registers we need to save. */
4924 gp_saved
= MAX_ARGS_IN_REGISTERS
- local_cum
.num_gprs
;
4925 fp_saved
= (EABI_FLOAT_VARARGS_P
4926 ? MAX_ARGS_IN_REGISTERS
- local_cum
.num_fprs
4935 ptr
= plus_constant (virtual_incoming_args_rtx
,
4936 REG_PARM_STACK_SPACE (cfun
->decl
)
4937 - gp_saved
* UNITS_PER_WORD
);
4938 mem
= gen_rtx_MEM (BLKmode
, ptr
);
4939 set_mem_alias_set (mem
, get_varargs_alias_set ());
4941 move_block_from_reg (local_cum
.num_gprs
+ GP_ARG_FIRST
,
4946 /* We can't use move_block_from_reg, because it will use
4948 enum machine_mode mode
;
4951 /* Set OFF to the offset from virtual_incoming_args_rtx of
4952 the first float register. The FP save area lies below
4953 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4954 off
= -gp_saved
* UNITS_PER_WORD
;
4955 off
&= ~(UNITS_PER_FPVALUE
- 1);
4956 off
-= fp_saved
* UNITS_PER_FPREG
;
4958 mode
= TARGET_SINGLE_FLOAT
? SFmode
: DFmode
;
4960 for (i
= local_cum
.num_fprs
; i
< MAX_ARGS_IN_REGISTERS
;
4961 i
+= MAX_FPRS_PER_FMT
)
4965 ptr
= plus_constant (virtual_incoming_args_rtx
, off
);
4966 mem
= gen_rtx_MEM (mode
, ptr
);
4967 set_mem_alias_set (mem
, get_varargs_alias_set ());
4968 mips_emit_move (mem
, gen_rtx_REG (mode
, FP_ARG_FIRST
+ i
));
4969 off
+= UNITS_PER_HWFPVALUE
;
4973 if (REG_PARM_STACK_SPACE (cfun
->decl
) == 0)
4974 cfun
->machine
->varargs_size
= (gp_saved
* UNITS_PER_WORD
4975 + fp_saved
* UNITS_PER_FPREG
);
4978 /* Create the va_list data type.
4979 We keep 3 pointers, and two offsets.
4980 Two pointers are to the overflow area, which starts at the CFA.
4981 One of these is constant, for addressing into the GPR save area below it.
4982 The other is advanced up the stack through the overflow region.
4983 The third pointer is to the GPR save area. Since the FPR save area
4984 is just below it, we can address FPR slots off this pointer.
4985 We also keep two one-byte offsets, which are to be subtracted from the
4986 constant pointers to yield addresses in the GPR and FPR save areas.
4987 These are downcounted as float or non-float arguments are used,
4988 and when they get to zero, the argument must be obtained from the
4990 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4991 pointer is enough. It's started at the GPR save area, and is
4993 Note that the GPR save area is not constant size, due to optimization
4994 in the prologue. Hence, we can't use a design with two pointers
4995 and two offsets, although we could have designed this with two pointers
4996 and three offsets. */
4999 mips_build_builtin_va_list (void)
5001 if (EABI_FLOAT_VARARGS_P
)
5003 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
, f_res
, record
;
5006 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
5008 f_ovfl
= build_decl (FIELD_DECL
, get_identifier ("__overflow_argptr"),
5010 f_gtop
= build_decl (FIELD_DECL
, get_identifier ("__gpr_top"),
5012 f_ftop
= build_decl (FIELD_DECL
, get_identifier ("__fpr_top"),
5014 f_goff
= build_decl (FIELD_DECL
, get_identifier ("__gpr_offset"),
5015 unsigned_char_type_node
);
5016 f_foff
= build_decl (FIELD_DECL
, get_identifier ("__fpr_offset"),
5017 unsigned_char_type_node
);
5018 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5019 warn on every user file. */
5020 index
= build_int_cst (NULL_TREE
, GET_MODE_SIZE (ptr_mode
) - 2 - 1);
5021 array
= build_array_type (unsigned_char_type_node
,
5022 build_index_type (index
));
5023 f_res
= build_decl (FIELD_DECL
, get_identifier ("__reserved"), array
);
5025 DECL_FIELD_CONTEXT (f_ovfl
) = record
;
5026 DECL_FIELD_CONTEXT (f_gtop
) = record
;
5027 DECL_FIELD_CONTEXT (f_ftop
) = record
;
5028 DECL_FIELD_CONTEXT (f_goff
) = record
;
5029 DECL_FIELD_CONTEXT (f_foff
) = record
;
5030 DECL_FIELD_CONTEXT (f_res
) = record
;
5032 TYPE_FIELDS (record
) = f_ovfl
;
5033 TREE_CHAIN (f_ovfl
) = f_gtop
;
5034 TREE_CHAIN (f_gtop
) = f_ftop
;
5035 TREE_CHAIN (f_ftop
) = f_goff
;
5036 TREE_CHAIN (f_goff
) = f_foff
;
5037 TREE_CHAIN (f_foff
) = f_res
;
5039 layout_type (record
);
5042 else if (TARGET_IRIX
&& TARGET_IRIX6
)
5043 /* On IRIX 6, this type is 'char *'. */
5044 return build_pointer_type (char_type_node
);
5046 /* Otherwise, we use 'void *'. */
5047 return ptr_type_node
;
5050 /* Implement va_start. */
5053 mips_va_start (tree valist
, rtx nextarg
)
5055 if (EABI_FLOAT_VARARGS_P
)
5057 const CUMULATIVE_ARGS
*cum
;
5058 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
5059 tree ovfl
, gtop
, ftop
, goff
, foff
;
5061 int gpr_save_area_size
;
5062 int fpr_save_area_size
;
5065 cum
= ¤t_function_args_info
;
5067 = (MAX_ARGS_IN_REGISTERS
- cum
->num_gprs
) * UNITS_PER_WORD
;
5069 = (MAX_ARGS_IN_REGISTERS
- cum
->num_fprs
) * UNITS_PER_FPREG
;
5071 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
5072 f_gtop
= TREE_CHAIN (f_ovfl
);
5073 f_ftop
= TREE_CHAIN (f_gtop
);
5074 f_goff
= TREE_CHAIN (f_ftop
);
5075 f_foff
= TREE_CHAIN (f_goff
);
5077 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
5079 gtop
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
), valist
, f_gtop
,
5081 ftop
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
), valist
, f_ftop
,
5083 goff
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
), valist
, f_goff
,
5085 foff
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
), valist
, f_foff
,
5088 /* Emit code to initialize OVFL, which points to the next varargs
5089 stack argument. CUM->STACK_WORDS gives the number of stack
5090 words used by named arguments. */
5091 t
= make_tree (TREE_TYPE (ovfl
), virtual_incoming_args_rtx
);
5092 if (cum
->stack_words
> 0)
5093 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovfl
), t
,
5094 size_int (cum
->stack_words
* UNITS_PER_WORD
));
5095 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (ovfl
), ovfl
, t
);
5096 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5098 /* Emit code to initialize GTOP, the top of the GPR save area. */
5099 t
= make_tree (TREE_TYPE (gtop
), virtual_incoming_args_rtx
);
5100 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (gtop
), gtop
, t
);
5101 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5103 /* Emit code to initialize FTOP, the top of the FPR save area.
5104 This address is gpr_save_area_bytes below GTOP, rounded
5105 down to the next fp-aligned boundary. */
5106 t
= make_tree (TREE_TYPE (ftop
), virtual_incoming_args_rtx
);
5107 fpr_offset
= gpr_save_area_size
+ UNITS_PER_FPVALUE
- 1;
5108 fpr_offset
&= ~(UNITS_PER_FPVALUE
- 1);
5110 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ftop
), t
,
5111 size_int (-fpr_offset
));
5112 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (ftop
), ftop
, t
);
5113 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5115 /* Emit code to initialize GOFF, the offset from GTOP of the
5116 next GPR argument. */
5117 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (goff
), goff
,
5118 build_int_cst (NULL_TREE
, gpr_save_area_size
));
5119 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5121 /* Likewise emit code to initialize FOFF, the offset from FTOP
5122 of the next FPR argument. */
5123 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (foff
), foff
,
5124 build_int_cst (NULL_TREE
, fpr_save_area_size
));
5125 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5129 nextarg
= plus_constant (nextarg
, -cfun
->machine
->varargs_size
);
5130 std_expand_builtin_va_start (valist
, nextarg
);
5134 /* Implement va_arg. */
5137 mips_gimplify_va_arg_expr (tree valist
, tree type
, tree
*pre_p
, tree
*post_p
)
5139 HOST_WIDE_INT size
, rsize
;
5143 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
5146 type
= build_pointer_type (type
);
5148 size
= int_size_in_bytes (type
);
5149 rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
5151 if (mips_abi
!= ABI_EABI
|| !EABI_FLOAT_VARARGS_P
)
5152 addr
= std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
5155 /* Not a simple merged stack. */
5157 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
5158 tree ovfl
, top
, off
, align
;
5159 HOST_WIDE_INT osize
;
5162 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
5163 f_gtop
= TREE_CHAIN (f_ovfl
);
5164 f_ftop
= TREE_CHAIN (f_gtop
);
5165 f_goff
= TREE_CHAIN (f_ftop
);
5166 f_foff
= TREE_CHAIN (f_goff
);
5168 /* We maintain separate pointers and offsets for floating-point
5169 and integer arguments, but we need similar code in both cases.
5172 TOP be the top of the register save area;
5173 OFF be the offset from TOP of the next register;
5174 ADDR_RTX be the address of the argument;
5175 RSIZE be the number of bytes used to store the argument
5176 when it's in the register save area;
5177 OSIZE be the number of bytes used to store it when it's
5178 in the stack overflow area; and
5179 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
5181 The code we want is:
5183 1: off &= -rsize; // round down
5186 4: addr_rtx = top - off;
5191 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
5192 10: addr_rtx = ovfl + PADDING;
5196 [1] and [9] can sometimes be optimized away. */
5198 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
5201 if (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
5202 && GET_MODE_SIZE (TYPE_MODE (type
)) <= UNITS_PER_FPVALUE
)
5204 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
), valist
, f_ftop
,
5206 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
), valist
, f_foff
,
5209 /* When floating-point registers are saved to the stack,
5210 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
5211 of the float's precision. */
5212 rsize
= UNITS_PER_HWFPVALUE
;
5214 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5215 (= PARM_BOUNDARY bits). This can be different from RSIZE
5218 (1) On 32-bit targets when TYPE is a structure such as:
5220 struct s { float f; };
5222 Such structures are passed in paired FPRs, so RSIZE
5223 will be 8 bytes. However, the structure only takes
5224 up 4 bytes of memory, so OSIZE will only be 4.
5226 (2) In combinations such as -mgp64 -msingle-float
5227 -fshort-double. Doubles passed in registers
5228 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
5229 but those passed on the stack take up
5230 UNITS_PER_WORD bytes. */
5231 osize
= MAX (GET_MODE_SIZE (TYPE_MODE (type
)), UNITS_PER_WORD
);
5235 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
), valist
, f_gtop
,
5237 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
), valist
, f_goff
,
5239 if (rsize
> UNITS_PER_WORD
)
5241 /* [1] Emit code for: off &= -rsize. */
5242 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (off
), off
,
5243 build_int_cst (NULL_TREE
, -rsize
));
5244 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (off
), off
, t
);
5245 gimplify_and_add (t
, pre_p
);
5250 /* [2] Emit code to branch if off == 0. */
5251 t
= build2 (NE_EXPR
, boolean_type_node
, off
,
5252 build_int_cst (TREE_TYPE (off
), 0));
5253 addr
= build3 (COND_EXPR
, ptr_type_node
, t
, NULL_TREE
, NULL_TREE
);
5255 /* [5] Emit code for: off -= rsize. We do this as a form of
5256 post-increment not available to C. Also widen for the
5257 coming pointer arithmetic. */
5258 t
= fold_convert (TREE_TYPE (off
), build_int_cst (NULL_TREE
, rsize
));
5259 t
= build2 (POSTDECREMENT_EXPR
, TREE_TYPE (off
), off
, t
);
5260 t
= fold_convert (sizetype
, t
);
5261 t
= fold_build1 (NEGATE_EXPR
, sizetype
, t
);
5263 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
5264 the argument has RSIZE - SIZE bytes of leading padding. */
5265 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (top
), top
, t
);
5266 if (BYTES_BIG_ENDIAN
&& rsize
> size
)
5268 u
= size_int (rsize
- size
);
5269 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, u
);
5271 COND_EXPR_THEN (addr
) = t
;
5273 if (osize
> UNITS_PER_WORD
)
5275 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5276 u
= size_int (osize
- 1);
5277 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovfl
), ovfl
, u
);
5278 t
= fold_convert (sizetype
, t
);
5279 u
= size_int (-osize
);
5280 t
= build2 (BIT_AND_EXPR
, sizetype
, t
, u
);
5281 t
= fold_convert (TREE_TYPE (ovfl
), t
);
5282 align
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (ovfl
), ovfl
, t
);
5287 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5288 post-increment ovfl by osize. On big-endian machines,
5289 the argument has OSIZE - SIZE bytes of leading padding. */
5290 u
= fold_convert (TREE_TYPE (ovfl
),
5291 build_int_cst (NULL_TREE
, osize
));
5292 t
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (ovfl
), ovfl
, u
);
5293 if (BYTES_BIG_ENDIAN
&& osize
> size
)
5295 u
= size_int (osize
- size
);
5296 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, u
);
5299 /* String [9] and [10,11] together. */
5301 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (t
), align
, t
);
5302 COND_EXPR_ELSE (addr
) = t
;
5304 addr
= fold_convert (build_pointer_type (type
), addr
);
5305 addr
= build_va_arg_indirect_ref (addr
);
5309 addr
= build_va_arg_indirect_ref (addr
);
5314 /* Return true if it is possible to use left/right accesses for a
5315 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5316 returning true, update *OP, *LEFT and *RIGHT as follows:
5318 *OP is a BLKmode reference to the whole field.
5320 *LEFT is a QImode reference to the first byte if big endian or
5321 the last byte if little endian. This address can be used in the
5322 left-side instructions (lwl, swl, ldl, sdl).
5324 *RIGHT is a QImode reference to the opposite end of the field and
5325 can be used in the patterning right-side instruction. */
5328 mips_get_unaligned_mem (rtx
*op
, unsigned int width
, int bitpos
,
5329 rtx
*left
, rtx
*right
)
5333 /* Check that the operand really is a MEM. Not all the extv and
5334 extzv predicates are checked. */
5338 /* Check that the size is valid. */
5339 if (width
!= 32 && (!TARGET_64BIT
|| width
!= 64))
5342 /* We can only access byte-aligned values. Since we are always passed
5343 a reference to the first byte of the field, it is not necessary to
5344 do anything with BITPOS after this check. */
5345 if (bitpos
% BITS_PER_UNIT
!= 0)
5348 /* Reject aligned bitfields: we want to use a normal load or store
5349 instead of a left/right pair. */
5350 if (MEM_ALIGN (*op
) >= width
)
5353 /* Adjust *OP to refer to the whole field. This also has the effect
5354 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5355 *op
= adjust_address (*op
, BLKmode
, 0);
5356 set_mem_size (*op
, GEN_INT (width
/ BITS_PER_UNIT
));
5358 /* Get references to both ends of the field. We deliberately don't
5359 use the original QImode *OP for FIRST since the new BLKmode one
5360 might have a simpler address. */
5361 first
= adjust_address (*op
, QImode
, 0);
5362 last
= adjust_address (*op
, QImode
, width
/ BITS_PER_UNIT
- 1);
5364 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5365 be the upper word and RIGHT the lower word. */
5366 if (TARGET_BIG_ENDIAN
)
5367 *left
= first
, *right
= last
;
5369 *left
= last
, *right
= first
;
5375 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
5376 Return true on success. We only handle cases where zero_extract is
5377 equivalent to sign_extract. */
5380 mips_expand_unaligned_load (rtx dest
, rtx src
, unsigned int width
, int bitpos
)
5382 rtx left
, right
, temp
;
5384 /* If TARGET_64BIT, the destination of a 32-bit load will be a
5385 paradoxical word_mode subreg. This is the only case in which
5386 we allow the destination to be larger than the source. */
5387 if (GET_CODE (dest
) == SUBREG
5388 && GET_MODE (dest
) == DImode
5389 && SUBREG_BYTE (dest
) == 0
5390 && GET_MODE (SUBREG_REG (dest
)) == SImode
)
5391 dest
= SUBREG_REG (dest
);
5393 /* After the above adjustment, the destination must be the same
5394 width as the source. */
5395 if (GET_MODE_BITSIZE (GET_MODE (dest
)) != width
)
5398 if (!mips_get_unaligned_mem (&src
, width
, bitpos
, &left
, &right
))
5401 temp
= gen_reg_rtx (GET_MODE (dest
));
5402 if (GET_MODE (dest
) == DImode
)
5404 emit_insn (gen_mov_ldl (temp
, src
, left
));
5405 emit_insn (gen_mov_ldr (dest
, copy_rtx (src
), right
, temp
));
5409 emit_insn (gen_mov_lwl (temp
, src
, left
));
5410 emit_insn (gen_mov_lwr (dest
, copy_rtx (src
), right
, temp
));
5416 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
5420 mips_expand_unaligned_store (rtx dest
, rtx src
, unsigned int width
, int bitpos
)
5423 enum machine_mode mode
;
5425 if (!mips_get_unaligned_mem (&dest
, width
, bitpos
, &left
, &right
))
5428 mode
= mode_for_size (width
, MODE_INT
, 0);
5429 src
= gen_lowpart (mode
, src
);
5433 emit_insn (gen_mov_sdl (dest
, src
, left
));
5434 emit_insn (gen_mov_sdr (copy_rtx (dest
), copy_rtx (src
), right
));
5438 emit_insn (gen_mov_swl (dest
, src
, left
));
5439 emit_insn (gen_mov_swr (copy_rtx (dest
), copy_rtx (src
), right
));
5444 /* Return true if X is a MEM with the same size as MODE. */
5447 mips_mem_fits_mode_p (enum machine_mode mode
, rtx x
)
5454 size
= MEM_SIZE (x
);
5455 return size
&& INTVAL (size
) == GET_MODE_SIZE (mode
);
5458 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
5459 source of an "ext" instruction or the destination of an "ins"
5460 instruction. OP must be a register operand and the following
5461 conditions must hold:
5463 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
5464 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5465 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5467 Also reject lengths equal to a word as they are better handled
5468 by the move patterns. */
5471 mips_use_ins_ext_p (rtx op
, rtx size
, rtx position
)
5473 HOST_WIDE_INT len
, pos
;
5475 if (!ISA_HAS_EXT_INS
5476 || !register_operand (op
, VOIDmode
)
5477 || GET_MODE_BITSIZE (GET_MODE (op
)) > BITS_PER_WORD
)
5480 len
= INTVAL (size
);
5481 pos
= INTVAL (position
);
5483 if (len
<= 0 || len
>= GET_MODE_BITSIZE (GET_MODE (op
))
5484 || pos
< 0 || pos
+ len
> GET_MODE_BITSIZE (GET_MODE (op
)))
5490 /* Set up globals to generate code for the ISA or processor
5491 described by INFO. */
5494 mips_set_architecture (const struct mips_cpu_info
*info
)
5498 mips_arch_info
= info
;
5499 mips_arch
= info
->cpu
;
5500 mips_isa
= info
->isa
;
5505 /* Likewise for tuning. */
5508 mips_set_tune (const struct mips_cpu_info
*info
)
5512 mips_tune_info
= info
;
5513 mips_tune
= info
->cpu
;
5517 /* Initialize mips_split_addresses from the associated command-line
5520 mips_split_addresses is a half-way house between explicit
5521 relocations and the traditional assembler macros. It can
5522 split absolute 32-bit symbolic constants into a high/lo_sum
5523 pair but uses macros for other sorts of access.
5525 Like explicit relocation support for REL targets, it relies
5526 on GNU extensions in the assembler and the linker.
5528 Although this code should work for -O0, it has traditionally
5529 been treated as an optimization. */
5532 mips_init_split_addresses (void)
5534 if (!TARGET_MIPS16
&& TARGET_SPLIT_ADDRESSES
5535 && optimize
&& !flag_pic
5536 && !ABI_HAS_64BIT_SYMBOLS
)
5537 mips_split_addresses
= 1;
5539 mips_split_addresses
= 0;
5542 /* (Re-)Initialize information about relocs. */
5545 mips_init_relocs (void)
5547 memset (mips_split_p
, '\0', sizeof (mips_split_p
));
5548 memset (mips_hi_relocs
, '\0', sizeof (mips_hi_relocs
));
5549 memset (mips_lo_relocs
, '\0', sizeof (mips_lo_relocs
));
5551 if (ABI_HAS_64BIT_SYMBOLS
)
5553 if (TARGET_EXPLICIT_RELOCS
)
5555 mips_split_p
[SYMBOL_64_HIGH
] = true;
5556 mips_hi_relocs
[SYMBOL_64_HIGH
] = "%highest(";
5557 mips_lo_relocs
[SYMBOL_64_HIGH
] = "%higher(";
5559 mips_split_p
[SYMBOL_64_MID
] = true;
5560 mips_hi_relocs
[SYMBOL_64_MID
] = "%higher(";
5561 mips_lo_relocs
[SYMBOL_64_MID
] = "%hi(";
5563 mips_split_p
[SYMBOL_64_LOW
] = true;
5564 mips_hi_relocs
[SYMBOL_64_LOW
] = "%hi(";
5565 mips_lo_relocs
[SYMBOL_64_LOW
] = "%lo(";
5567 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
5568 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
5573 if (TARGET_EXPLICIT_RELOCS
|| mips_split_addresses
|| TARGET_MIPS16
)
5575 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
5576 mips_hi_relocs
[SYMBOL_ABSOLUTE
] = "%hi(";
5577 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
5579 mips_lo_relocs
[SYMBOL_32_HIGH
] = "%hi(";
5585 /* The high part is provided by a pseudo copy of $gp. */
5586 mips_split_p
[SYMBOL_GP_RELATIVE
] = true;
5587 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gprel(";
5590 if (TARGET_EXPLICIT_RELOCS
)
5592 /* Small data constants are kept whole until after reload,
5593 then lowered by mips_rewrite_small_data. */
5594 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gp_rel(";
5596 mips_split_p
[SYMBOL_GOT_PAGE_OFST
] = true;
5599 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got_page(";
5600 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%got_ofst(";
5604 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got(";
5605 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%lo(";
5610 /* The HIGH and LO_SUM are matched by special .md patterns. */
5611 mips_split_p
[SYMBOL_GOT_DISP
] = true;
5613 mips_split_p
[SYMBOL_GOTOFF_DISP
] = true;
5614 mips_hi_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_hi(";
5615 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_lo(";
5617 mips_split_p
[SYMBOL_GOTOFF_CALL
] = true;
5618 mips_hi_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_hi(";
5619 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_lo(";
5624 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_disp(";
5626 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got(";
5627 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call16(";
5633 mips_split_p
[SYMBOL_GOTOFF_LOADGP
] = true;
5634 mips_hi_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%hi(%neg(%gp_rel(";
5635 mips_lo_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%lo(%neg(%gp_rel(";
5638 /* Thread-local relocation operators. */
5639 mips_lo_relocs
[SYMBOL_TLSGD
] = "%tlsgd(";
5640 mips_lo_relocs
[SYMBOL_TLSLDM
] = "%tlsldm(";
5641 mips_split_p
[SYMBOL_DTPREL
] = 1;
5642 mips_hi_relocs
[SYMBOL_DTPREL
] = "%dtprel_hi(";
5643 mips_lo_relocs
[SYMBOL_DTPREL
] = "%dtprel_lo(";
5644 mips_lo_relocs
[SYMBOL_GOTTPREL
] = "%gottprel(";
5645 mips_split_p
[SYMBOL_TPREL
] = 1;
5646 mips_hi_relocs
[SYMBOL_TPREL
] = "%tprel_hi(";
5647 mips_lo_relocs
[SYMBOL_TPREL
] = "%tprel_lo(";
5649 mips_lo_relocs
[SYMBOL_HALF
] = "%half(";
5652 static GTY(()) int was_mips16_p
= -1;
5654 /* Set up the target-dependent global state so that it matches the
5655 current function's ISA mode. */
5658 mips_set_mips16_mode (int mips16_p
)
5660 if (mips16_p
== was_mips16_p
)
5663 /* Restore base settings of various flags. */
5664 target_flags
= mips_base_target_flags
;
5665 align_loops
= mips_base_align_loops
;
5666 align_jumps
= mips_base_align_jumps
;
5667 align_functions
= mips_base_align_functions
;
5668 flag_schedule_insns
= mips_base_schedule_insns
;
5669 flag_reorder_blocks_and_partition
= mips_base_reorder_blocks_and_partition
;
5670 flag_move_loop_invariants
= mips_base_move_loop_invariants
;
5671 flag_delayed_branch
= mips_flag_delayed_branch
;
5675 /* Select mips16 instruction set. */
5676 target_flags
|= MASK_MIPS16
;
5678 /* Don't run the scheduler before reload, since it tends to
5679 increase register pressure. */
5680 flag_schedule_insns
= 0;
5682 /* Don't do hot/cold partitioning. The constant layout code expects
5683 the whole function to be in a single section. */
5684 flag_reorder_blocks_and_partition
= 0;
5686 /* Don't move loop invariants, because it tends to increase
5687 register pressure. It also introduces an extra move in cases
5688 where the constant is the first operand in a two-operand binary
5689 instruction, or when it forms a register argument to a functon
5691 flag_move_loop_invariants
= 0;
5693 /* Silently disable -mexplicit-relocs since it doesn't apply
5694 to mips16 code. Even so, it would overly pedantic to warn
5695 about "-mips16 -mexplicit-relocs", especially given that
5696 we use a %gprel() operator. */
5697 target_flags
&= ~MASK_EXPLICIT_RELOCS
;
5699 /* Silently disable DSP extensions. */
5700 target_flags
&= ~MASK_DSP
;
5701 target_flags
&= ~MASK_DSPR2
;
5703 /* Experiments suggest we get the best overall results from using
5704 the range of an unextended lw or sw. Code that makes heavy use
5705 of byte or short accesses can do better with ranges of 0...31
5706 and 0...63 respectively, but most code is sensitive to the range
5707 of lw and sw instead. */
5708 targetm
.min_anchor_offset
= 0;
5709 targetm
.max_anchor_offset
= 127;
5711 if (flag_pic
|| TARGET_ABICALLS
)
5712 sorry ("MIPS16 PIC");
5716 /* Reset to select base non-mips16 ISA. */
5717 target_flags
&= ~MASK_MIPS16
;
5719 /* When using explicit relocs, we call dbr_schedule from within
5721 if (TARGET_EXPLICIT_RELOCS
)
5722 flag_delayed_branch
= 0;
5724 /* Provide default values for align_* for 64-bit targets. */
5727 if (align_loops
== 0)
5729 if (align_jumps
== 0)
5731 if (align_functions
== 0)
5732 align_functions
= 8;
5735 targetm
.min_anchor_offset
= TARGET_MIN_ANCHOR_OFFSET
;
5736 targetm
.max_anchor_offset
= TARGET_MAX_ANCHOR_OFFSET
;
5739 /* (Re)initialize mips target internals for new ISA. */
5740 mips_init_split_addresses ();
5741 mips_init_relocs ();
5743 if (was_mips16_p
>= 0)
5744 /* Reinitialize target-dependent state. */
5747 was_mips16_p
= TARGET_MIPS16
;
5750 /* Use a hash table to keep track of implicit mips16/nomips16 attributes
5751 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
5753 struct mflip_mips16_entry
GTY (()) {
5757 static GTY ((param_is (struct mflip_mips16_entry
))) htab_t mflip_mips16_htab
;
5759 /* Hash table callbacks for mflip_mips16_htab. */
5762 mflip_mips16_htab_hash (const void *entry
)
5764 return htab_hash_string (((const struct mflip_mips16_entry
*) entry
)->name
);
5768 mflip_mips16_htab_eq (const void *entry
, const void *name
)
5770 return strcmp (((const struct mflip_mips16_entry
*) entry
)->name
,
5771 (const char *) name
) == 0;
5774 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
5775 for -mflip-mips16. Return true if it should use "mips16" and false if
5776 it should use "nomips16". */
5779 mflip_mips16_use_mips16_p (tree decl
)
5781 struct mflip_mips16_entry
*entry
;
5786 /* Use the opposite of the command-line setting for anonymous decls. */
5787 if (!DECL_NAME (decl
))
5788 return !mips_base_mips16
;
5790 if (!mflip_mips16_htab
)
5791 mflip_mips16_htab
= htab_create_ggc (37, mflip_mips16_htab_hash
,
5792 mflip_mips16_htab_eq
, NULL
);
5794 name
= IDENTIFIER_POINTER (DECL_NAME (decl
));
5795 hash
= htab_hash_string (name
);
5796 slot
= htab_find_slot_with_hash (mflip_mips16_htab
, name
, hash
, INSERT
);
5797 entry
= (struct mflip_mips16_entry
*) *slot
;
5800 mips16_flipper
= !mips16_flipper
;
5801 entry
= GGC_NEW (struct mflip_mips16_entry
);
5803 entry
->mips16_p
= mips16_flipper
? !mips_base_mips16
: mips_base_mips16
;
5806 return entry
->mips16_p
;
5809 /* Implement TARGET_INSERT_ATTRIBUTES. */
5812 mips_insert_attributes (tree decl
, tree
*attributes
)
5815 bool mips16_p
, nomips16_p
;
5817 /* Check for "mips16" and "nomips16" attributes. */
5818 mips16_p
= lookup_attribute ("mips16", *attributes
) != NULL
;
5819 nomips16_p
= lookup_attribute ("nomips16", *attributes
) != NULL
;
5820 if (TREE_CODE (decl
) != FUNCTION_DECL
)
5823 error ("%qs attribute only applies to functions", "mips16");
5825 error ("%qs attribute only applies to functions", "nomips16");
5829 mips16_p
|= mips_mips16_decl_p (decl
);
5830 nomips16_p
|= mips_nomips16_decl_p (decl
);
5831 if (mips16_p
|| nomips16_p
)
5833 /* DECL cannot be simultaneously mips16 and nomips16. */
5834 if (mips16_p
&& nomips16_p
)
5835 error ("%qs cannot have both %<mips16%> and "
5836 "%<nomips16%> attributes",
5837 IDENTIFIER_POINTER (DECL_NAME (decl
)));
5839 else if (TARGET_FLIP_MIPS16
&& !DECL_ARTIFICIAL (decl
))
5841 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
5842 "mips16" attribute, arbitrarily pick one. We must pick the same
5843 setting for duplicate declarations of a function. */
5844 name
= mflip_mips16_use_mips16_p (decl
) ? "mips16" : "nomips16";
5845 *attributes
= tree_cons (get_identifier (name
), NULL
, *attributes
);
5850 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
5853 mips_merge_decl_attributes (tree olddecl
, tree newdecl
)
5855 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
5856 if (mips_mips16_decl_p (olddecl
) != mips_mips16_decl_p (newdecl
))
5857 error ("%qs redeclared with conflicting %qs attributes",
5858 IDENTIFIER_POINTER (DECL_NAME (newdecl
)), "mips16");
5859 if (mips_nomips16_decl_p (olddecl
) != mips_nomips16_decl_p (newdecl
))
5860 error ("%qs redeclared with conflicting %qs attributes",
5861 IDENTIFIER_POINTER (DECL_NAME (newdecl
)), "nomips16");
5863 return merge_attributes (DECL_ATTRIBUTES (olddecl
),
5864 DECL_ATTRIBUTES (newdecl
));
5867 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
5868 function should use the MIPS16 ISA and switch modes accordingly. */
5871 mips_set_current_function (tree fndecl
)
5873 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl
));
5876 /* Implement TARGET_HANDLE_OPTION. */
5879 mips_handle_option (size_t code
, const char *arg
, int value
)
5884 if (strcmp (arg
, "32") == 0)
5886 else if (strcmp (arg
, "o64") == 0)
5888 else if (strcmp (arg
, "n32") == 0)
5890 else if (strcmp (arg
, "64") == 0)
5892 else if (strcmp (arg
, "eabi") == 0)
5893 mips_abi
= ABI_EABI
;
5900 return mips_parse_cpu (arg
) != 0;
5903 mips_isa_info
= mips_parse_cpu (ACONCAT (("mips", arg
, NULL
)));
5904 return mips_isa_info
!= 0;
5906 case OPT_mno_flush_func
:
5907 mips_cache_flush_func
= NULL
;
5910 case OPT_mcode_readable_
:
5911 if (strcmp (arg
, "yes") == 0)
5912 mips_code_readable
= CODE_READABLE_YES
;
5913 else if (strcmp (arg
, "pcrel") == 0)
5914 mips_code_readable
= CODE_READABLE_PCREL
;
5915 else if (strcmp (arg
, "no") == 0)
5916 mips_code_readable
= CODE_READABLE_NO
;
5922 mips_llsc
= value
? LLSC_YES
: LLSC_NO
;
5930 /* Set up the threshold for data to go into the small data area, instead
5931 of the normal data area, and detect any conflicts in the switches. */
5934 override_options (void)
5936 int i
, start
, regno
;
5937 enum machine_mode mode
;
5939 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5940 SUBTARGET_OVERRIDE_OPTIONS
;
5943 mips_section_threshold
= g_switch_set
? g_switch_value
: MIPS_DEFAULT_GVALUE
;
5945 /* The following code determines the architecture and register size.
5946 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5947 The GAS and GCC code should be kept in sync as much as possible. */
5949 if (mips_arch_string
!= 0)
5950 mips_set_architecture (mips_parse_cpu (mips_arch_string
));
5952 if (mips_isa_info
!= 0)
5954 if (mips_arch_info
== 0)
5955 mips_set_architecture (mips_isa_info
);
5956 else if (mips_arch_info
->isa
!= mips_isa_info
->isa
)
5957 error ("-%s conflicts with the other architecture options, "
5958 "which specify a %s processor",
5959 mips_isa_info
->name
,
5960 mips_cpu_info_from_isa (mips_arch_info
->isa
)->name
);
5963 if (mips_arch_info
== 0)
5965 #ifdef MIPS_CPU_STRING_DEFAULT
5966 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT
));
5968 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT
));
5972 if (ABI_NEEDS_64BIT_REGS
&& !ISA_HAS_64BIT_REGS
)
5973 error ("-march=%s is not compatible with the selected ABI",
5974 mips_arch_info
->name
);
5976 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5977 if (mips_tune_string
!= 0)
5978 mips_set_tune (mips_parse_cpu (mips_tune_string
));
5980 if (mips_tune_info
== 0)
5981 mips_set_tune (mips_arch_info
);
5983 /* Set cost structure for the processor. */
5985 mips_cost
= &mips_rtx_cost_optimize_size
;
5987 mips_cost
= &mips_rtx_cost_data
[mips_tune
];
5989 /* If the user hasn't specified a branch cost, use the processor's
5991 if (mips_branch_cost
== 0)
5992 mips_branch_cost
= mips_cost
->branch_cost
;
5994 if ((target_flags_explicit
& MASK_64BIT
) != 0)
5996 /* The user specified the size of the integer registers. Make sure
5997 it agrees with the ABI and ISA. */
5998 if (TARGET_64BIT
&& !ISA_HAS_64BIT_REGS
)
5999 error ("-mgp64 used with a 32-bit processor");
6000 else if (!TARGET_64BIT
&& ABI_NEEDS_64BIT_REGS
)
6001 error ("-mgp32 used with a 64-bit ABI");
6002 else if (TARGET_64BIT
&& ABI_NEEDS_32BIT_REGS
)
6003 error ("-mgp64 used with a 32-bit ABI");
6007 /* Infer the integer register size from the ABI and processor.
6008 Restrict ourselves to 32-bit registers if that's all the
6009 processor has, or if the ABI cannot handle 64-bit registers. */
6010 if (ABI_NEEDS_32BIT_REGS
|| !ISA_HAS_64BIT_REGS
)
6011 target_flags
&= ~MASK_64BIT
;
6013 target_flags
|= MASK_64BIT
;
6016 if ((target_flags_explicit
& MASK_FLOAT64
) != 0)
6018 /* Really, -mfp32 and -mfp64 are ornamental options. There's
6019 only one right answer here. */
6020 if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
&& !TARGET_FLOAT64
)
6021 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
6022 else if (!TARGET_64BIT
&& TARGET_FLOAT64
6023 && !(ISA_HAS_MXHC1
&& mips_abi
== ABI_32
))
6024 error ("-mgp32 and -mfp64 can only be combined if the target"
6025 " supports the mfhc1 and mthc1 instructions");
6026 else if (TARGET_SINGLE_FLOAT
&& TARGET_FLOAT64
)
6027 error ("unsupported combination: %s", "-mfp64 -msingle-float");
6031 /* -msingle-float selects 32-bit float registers. Otherwise the
6032 float registers should be the same size as the integer ones. */
6033 if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
)
6034 target_flags
|= MASK_FLOAT64
;
6036 target_flags
&= ~MASK_FLOAT64
;
6039 /* End of code shared with GAS. */
6041 if ((target_flags_explicit
& MASK_LONG64
) == 0)
6043 if ((mips_abi
== ABI_EABI
&& TARGET_64BIT
) || mips_abi
== ABI_64
)
6044 target_flags
|= MASK_LONG64
;
6046 target_flags
&= ~MASK_LONG64
;
6050 flag_pcc_struct_return
= 0;
6052 if ((target_flags_explicit
& MASK_BRANCHLIKELY
) == 0)
6054 /* If neither -mbranch-likely nor -mno-branch-likely was given
6055 on the command line, set MASK_BRANCHLIKELY based on the target
6058 By default, we enable use of Branch Likely instructions on
6059 all architectures which support them with the following
6060 exceptions: when creating MIPS32 or MIPS64 code, and when
6061 tuning for architectures where their use tends to hurt
6064 The MIPS32 and MIPS64 architecture specifications say "Software
6065 is strongly encouraged to avoid use of Branch Likely
6066 instructions, as they will be removed from a future revision
6067 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
6068 issue those instructions unless instructed to do so by
6070 if (ISA_HAS_BRANCHLIKELY
6071 && !(ISA_MIPS32
|| ISA_MIPS32R2
|| ISA_MIPS64
)
6072 && !(TUNE_MIPS5500
|| TUNE_SB1
))
6073 target_flags
|= MASK_BRANCHLIKELY
;
6075 target_flags
&= ~MASK_BRANCHLIKELY
;
6077 if (TARGET_BRANCHLIKELY
&& !ISA_HAS_BRANCHLIKELY
)
6078 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
6080 /* The effect of -mabicalls isn't defined for the EABI. */
6081 if (mips_abi
== ABI_EABI
&& TARGET_ABICALLS
)
6083 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
6084 target_flags
&= ~MASK_ABICALLS
;
6087 /* MIPS16 cannot generate PIC yet. */
6088 if (TARGET_MIPS16
&& (flag_pic
|| TARGET_ABICALLS
))
6090 sorry ("MIPS16 PIC");
6091 target_flags
&= ~MASK_ABICALLS
;
6092 flag_pic
= flag_pie
= flag_shlib
= 0;
6095 if (TARGET_ABICALLS
)
6096 /* We need to set flag_pic for executables as well as DSOs
6097 because we may reference symbols that are not defined in
6098 the final executable. (MIPS does not use things like
6099 copy relocs, for example.)
6101 Also, there is a body of code that uses __PIC__ to distinguish
6102 between -mabicalls and -mno-abicalls code. */
6105 /* -mvr4130-align is a "speed over size" optimization: it usually produces
6106 faster code, but at the expense of more nops. Enable it at -O3 and
6108 if (optimize
> 2 && (target_flags_explicit
& MASK_VR4130_ALIGN
) == 0)
6109 target_flags
|= MASK_VR4130_ALIGN
;
6111 /* Prefer a call to memcpy over inline code when optimizing for size,
6112 though see MOVE_RATIO in mips.h. */
6113 if (optimize_size
&& (target_flags_explicit
& MASK_MEMCPY
) == 0)
6114 target_flags
|= MASK_MEMCPY
;
6116 /* If we have a nonzero small-data limit, check that the -mgpopt
6117 setting is consistent with the other target flags. */
6118 if (mips_section_threshold
> 0)
6122 if (!TARGET_MIPS16
&& !TARGET_EXPLICIT_RELOCS
)
6123 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
6125 TARGET_LOCAL_SDATA
= false;
6126 TARGET_EXTERN_SDATA
= false;
6130 if (TARGET_VXWORKS_RTP
)
6131 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
6133 if (TARGET_ABICALLS
)
6134 warning (0, "cannot use small-data accesses for %qs",
6139 #ifdef MIPS_TFMODE_FORMAT
6140 REAL_MODE_FORMAT (TFmode
) = &MIPS_TFMODE_FORMAT
;
6143 /* Make sure that the user didn't turn off paired single support when
6144 MIPS-3D support is requested. */
6145 if (TARGET_MIPS3D
&& (target_flags_explicit
& MASK_PAIRED_SINGLE_FLOAT
)
6146 && !TARGET_PAIRED_SINGLE_FLOAT
)
6147 error ("-mips3d requires -mpaired-single");
6149 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
6151 target_flags
|= MASK_PAIRED_SINGLE_FLOAT
;
6153 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
6154 and TARGET_HARD_FLOAT are both true. */
6155 if (TARGET_PAIRED_SINGLE_FLOAT
&& !(TARGET_FLOAT64
&& TARGET_HARD_FLOAT
))
6156 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
6158 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
6160 if (TARGET_PAIRED_SINGLE_FLOAT
&& !ISA_MIPS64
)
6161 error ("-mips3d/-mpaired-single must be used with -mips64");
6163 /* If TARGET_DSPR2, enable MASK_DSP. */
6165 target_flags
|= MASK_DSP
;
6167 mips_print_operand_punct
['?'] = 1;
6168 mips_print_operand_punct
['#'] = 1;
6169 mips_print_operand_punct
['/'] = 1;
6170 mips_print_operand_punct
['&'] = 1;
6171 mips_print_operand_punct
['!'] = 1;
6172 mips_print_operand_punct
['*'] = 1;
6173 mips_print_operand_punct
['@'] = 1;
6174 mips_print_operand_punct
['.'] = 1;
6175 mips_print_operand_punct
['('] = 1;
6176 mips_print_operand_punct
[')'] = 1;
6177 mips_print_operand_punct
['['] = 1;
6178 mips_print_operand_punct
[']'] = 1;
6179 mips_print_operand_punct
['<'] = 1;
6180 mips_print_operand_punct
['>'] = 1;
6181 mips_print_operand_punct
['{'] = 1;
6182 mips_print_operand_punct
['}'] = 1;
6183 mips_print_operand_punct
['^'] = 1;
6184 mips_print_operand_punct
['$'] = 1;
6185 mips_print_operand_punct
['+'] = 1;
6186 mips_print_operand_punct
['~'] = 1;
6187 mips_print_operand_punct
['|'] = 1;
6188 mips_print_operand_punct
['-'] = 1;
6190 /* Set up array to map GCC register number to debug register number.
6191 Ignore the special purpose register numbers. */
6193 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6195 mips_dbx_regno
[i
] = INVALID_REGNUM
;
6196 if (GP_REG_P (i
) || FP_REG_P (i
) || ALL_COP_REG_P (i
))
6197 mips_dwarf_regno
[i
] = i
;
6199 mips_dwarf_regno
[i
] = INVALID_REGNUM
;
6202 start
= GP_DBX_FIRST
- GP_REG_FIRST
;
6203 for (i
= GP_REG_FIRST
; i
<= GP_REG_LAST
; i
++)
6204 mips_dbx_regno
[i
] = i
+ start
;
6206 start
= FP_DBX_FIRST
- FP_REG_FIRST
;
6207 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
6208 mips_dbx_regno
[i
] = i
+ start
;
6210 /* HI and LO debug registers use big-endian ordering. */
6211 mips_dbx_regno
[HI_REGNUM
] = MD_DBX_FIRST
+ 0;
6212 mips_dbx_regno
[LO_REGNUM
] = MD_DBX_FIRST
+ 1;
6213 mips_dwarf_regno
[HI_REGNUM
] = MD_REG_FIRST
+ 0;
6214 mips_dwarf_regno
[LO_REGNUM
] = MD_REG_FIRST
+ 1;
6215 for (i
= DSP_ACC_REG_FIRST
; i
<= DSP_ACC_REG_LAST
; i
+= 2)
6217 mips_dwarf_regno
[i
+ TARGET_LITTLE_ENDIAN
] = i
;
6218 mips_dwarf_regno
[i
+ TARGET_BIG_ENDIAN
] = i
+ 1;
6221 /* Set up array giving whether a given register can hold a given mode. */
6223 for (mode
= VOIDmode
;
6224 mode
!= MAX_MACHINE_MODE
;
6225 mode
= (enum machine_mode
) ((int)mode
+ 1))
6227 register int size
= GET_MODE_SIZE (mode
);
6228 register enum mode_class
class = GET_MODE_CLASS (mode
);
6230 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
6234 if (mode
== CCV2mode
)
6237 && (regno
- ST_REG_FIRST
) % 2 == 0);
6239 else if (mode
== CCV4mode
)
6242 && (regno
- ST_REG_FIRST
) % 4 == 0);
6244 else if (mode
== CCmode
)
6247 temp
= (regno
== FPSW_REGNUM
);
6249 temp
= (ST_REG_P (regno
) || GP_REG_P (regno
)
6250 || FP_REG_P (regno
));
6253 else if (GP_REG_P (regno
))
6254 temp
= ((regno
& 1) == 0 || size
<= UNITS_PER_WORD
);
6256 else if (FP_REG_P (regno
))
6257 temp
= ((((regno
% MAX_FPRS_PER_FMT
) == 0)
6258 || (MIN_FPRS_PER_FMT
== 1
6259 && size
<= UNITS_PER_FPREG
))
6260 && (((class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
6261 || class == MODE_VECTOR_FLOAT
)
6262 && size
<= UNITS_PER_FPVALUE
)
6263 /* Allow integer modes that fit into a single
6264 register. We need to put integers into FPRs
6265 when using instructions like cvt and trunc.
6266 We can't allow sizes smaller than a word,
6267 the FPU has no appropriate load/store
6268 instructions for those. */
6269 || (class == MODE_INT
6270 && size
>= MIN_UNITS_PER_WORD
6271 && size
<= UNITS_PER_FPREG
)
6272 /* Allow TFmode for CCmode reloads. */
6273 || (ISA_HAS_8CC
&& mode
== TFmode
)));
6275 else if (ACC_REG_P (regno
))
6276 temp
= ((INTEGRAL_MODE_P (mode
) || ALL_FIXED_POINT_MODE_P (mode
))
6277 && size
<= UNITS_PER_WORD
* 2
6278 && (size
<= UNITS_PER_WORD
6279 || regno
== MD_REG_FIRST
6280 || (DSP_ACC_REG_P (regno
)
6281 && ((regno
- DSP_ACC_REG_FIRST
) & 1) == 0)));
6283 else if (ALL_COP_REG_P (regno
))
6284 temp
= (class == MODE_INT
&& size
<= UNITS_PER_WORD
);
6288 mips_hard_regno_mode_ok
[(int)mode
][regno
] = temp
;
6292 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
6293 initialized yet, so we can't use that here. */
6294 gpr_mode
= TARGET_64BIT
? DImode
: SImode
;
6296 /* Function to allocate machine-dependent function status. */
6297 init_machine_status
= &mips_init_machine_status
;
6299 /* Default to working around R4000 errata only if the processor
6300 was selected explicitly. */
6301 if ((target_flags_explicit
& MASK_FIX_R4000
) == 0
6302 && mips_matching_cpu_name_p (mips_arch_info
->name
, "r4000"))
6303 target_flags
|= MASK_FIX_R4000
;
6305 /* Default to working around R4400 errata only if the processor
6306 was selected explicitly. */
6307 if ((target_flags_explicit
& MASK_FIX_R4400
) == 0
6308 && mips_matching_cpu_name_p (mips_arch_info
->name
, "r4400"))
6309 target_flags
|= MASK_FIX_R4400
;
6311 /* Save base state of options. */
6312 mips_base_mips16
= TARGET_MIPS16
;
6313 mips_base_target_flags
= target_flags
;
6314 mips_base_schedule_insns
= flag_schedule_insns
;
6315 mips_base_reorder_blocks_and_partition
= flag_reorder_blocks_and_partition
;
6316 mips_base_move_loop_invariants
= flag_move_loop_invariants
;
6317 mips_base_align_loops
= align_loops
;
6318 mips_base_align_jumps
= align_jumps
;
6319 mips_base_align_functions
= align_functions
;
6320 mips_flag_delayed_branch
= flag_delayed_branch
;
6322 /* Now select the mips16 or 32-bit instruction set, as requested. */
6323 mips_set_mips16_mode (mips_base_mips16
);
6326 /* Swap the register information for registers I and I + 1, which
6327 currently have the wrong endianness. Note that the registers'
6328 fixedness and call-clobberedness might have been set on the
6332 mips_swap_registers (unsigned int i
)
6337 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
6338 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
6340 SWAP_INT (fixed_regs
[i
], fixed_regs
[i
+ 1]);
6341 SWAP_INT (call_used_regs
[i
], call_used_regs
[i
+ 1]);
6342 SWAP_INT (call_really_used_regs
[i
], call_really_used_regs
[i
+ 1]);
6343 SWAP_STRING (reg_names
[i
], reg_names
[i
+ 1]);
6349 /* Implement CONDITIONAL_REGISTER_USAGE. */
6352 mips_conditional_register_usage (void)
6358 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
++)
6359 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
6361 if (!TARGET_HARD_FLOAT
)
6365 for (regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
++)
6366 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
6367 for (regno
= ST_REG_FIRST
; regno
<= ST_REG_LAST
; regno
++)
6368 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
6370 else if (! ISA_HAS_8CC
)
6374 /* We only have a single condition code register. We
6375 implement this by hiding all the condition code registers,
6376 and generating RTL that refers directly to ST_REG_FIRST. */
6377 for (regno
= ST_REG_FIRST
; regno
<= ST_REG_LAST
; regno
++)
6378 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
6380 /* In mips16 mode, we permit the $t temporary registers to be used
6381 for reload. We prohibit the unused $s registers, since they
6382 are caller saved, and saving them via a mips16 register would
6383 probably waste more time than just reloading the value. */
6386 fixed_regs
[18] = call_used_regs
[18] = 1;
6387 fixed_regs
[19] = call_used_regs
[19] = 1;
6388 fixed_regs
[20] = call_used_regs
[20] = 1;
6389 fixed_regs
[21] = call_used_regs
[21] = 1;
6390 fixed_regs
[22] = call_used_regs
[22] = 1;
6391 fixed_regs
[23] = call_used_regs
[23] = 1;
6392 fixed_regs
[26] = call_used_regs
[26] = 1;
6393 fixed_regs
[27] = call_used_regs
[27] = 1;
6394 fixed_regs
[30] = call_used_regs
[30] = 1;
6396 /* fp20-23 are now caller saved. */
6397 if (mips_abi
== ABI_64
)
6400 for (regno
= FP_REG_FIRST
+ 20; regno
< FP_REG_FIRST
+ 24; regno
++)
6401 call_really_used_regs
[regno
] = call_used_regs
[regno
] = 1;
6403 /* Odd registers from fp21 to fp31 are now caller saved. */
6404 if (mips_abi
== ABI_N32
)
6407 for (regno
= FP_REG_FIRST
+ 21; regno
<= FP_REG_FIRST
+ 31; regno
+=2)
6408 call_really_used_regs
[regno
] = call_used_regs
[regno
] = 1;
6410 /* Make sure that double-register accumulator values are correctly
6411 ordered for the current endianness. */
6412 if (TARGET_LITTLE_ENDIAN
)
6415 mips_swap_registers (MD_REG_FIRST
);
6416 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
+= 2)
6417 mips_swap_registers (regno
);
6421 /* Allocate a chunk of memory for per-function machine-dependent data. */
6422 static struct machine_function
*
6423 mips_init_machine_status (void)
6425 return ((struct machine_function
*)
6426 ggc_alloc_cleared (sizeof (struct machine_function
)));
6429 /* On the mips16, we want to allocate $24 (T_REG) before other
6430 registers for instructions for which it is possible. This helps
6431 avoid shuffling registers around in order to set up for an xor,
6432 encouraging the compiler to use a cmp instead. */
6435 mips_order_regs_for_local_alloc (void)
6439 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6440 reg_alloc_order
[i
] = i
;
6444 /* It really doesn't matter where we put register 0, since it is
6445 a fixed register anyhow. */
6446 reg_alloc_order
[0] = 24;
6447 reg_alloc_order
[24] = 0;
6452 /* The MIPS debug format wants all automatic variables and arguments
6453 to be in terms of the virtual frame pointer (stack pointer before
6454 any adjustment in the function), while the MIPS 3.0 linker wants
6455 the frame pointer to be the stack pointer after the initial
6456 adjustment. So, we do the adjustment here. The arg pointer (which
6457 is eliminated) points to the virtual frame pointer, while the frame
6458 pointer (which may be eliminated) points to the stack pointer after
6459 the initial adjustments. */
6462 mips_debugger_offset (rtx addr
, HOST_WIDE_INT offset
)
6464 rtx offset2
= const0_rtx
;
6465 rtx reg
= eliminate_constant_term (addr
, &offset2
);
6468 offset
= INTVAL (offset2
);
6470 if (reg
== stack_pointer_rtx
|| reg
== frame_pointer_rtx
6471 || reg
== hard_frame_pointer_rtx
)
6473 HOST_WIDE_INT frame_size
= (!cfun
->machine
->frame
.initialized
)
6474 ? compute_frame_size (get_frame_size ())
6475 : cfun
->machine
->frame
.total_size
;
6477 /* MIPS16 frame is smaller */
6478 if (frame_pointer_needed
&& TARGET_MIPS16
)
6479 frame_size
-= cfun
->machine
->frame
.args_size
;
6481 offset
= offset
- frame_size
;
6484 /* sdbout_parms does not want this to crash for unrecognized cases. */
6486 else if (reg
!= arg_pointer_rtx
)
6487 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6494 /* If OP is an UNSPEC address, return the address to which it refers,
6495 otherwise return OP itself. */
6498 mips_strip_unspec_address (rtx op
)
6502 split_const (op
, &base
, &offset
);
6503 if (UNSPEC_ADDRESS_P (base
))
6504 op
= plus_constant (UNSPEC_ADDRESS (base
), INTVAL (offset
));
6508 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6510 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6511 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6512 'h' OP is HIGH, prints %hi(X),
6513 'd' output integer constant in decimal,
6514 'z' if the operand is 0, use $0 instead of normal operand.
6515 'D' print second part of double-word register or memory operand.
6516 'L' print low-order register of double-word register operand.
6517 'M' print high-order register of double-word register operand.
6518 'C' print part of opcode for a branch condition.
6519 'F' print part of opcode for a floating-point branch condition.
6520 'N' print part of opcode for a branch condition, inverted.
6521 'W' print part of opcode for a floating-point branch condition, inverted.
6522 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6523 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6524 't' like 'T', but with the EQ/NE cases reversed
6525 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6526 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6527 'R' print the reloc associated with LO_SUM
6528 'q' print DSP accumulator registers
6530 The punctuation characters are:
6532 '(' Turn on .set noreorder
6533 ')' Turn on .set reorder
6534 '[' Turn on .set noat
6536 '<' Turn on .set nomacro
6537 '>' Turn on .set macro
6538 '{' Turn on .set volatile (not GAS)
6539 '}' Turn on .set novolatile (not GAS)
6540 '&' Turn on .set noreorder if filling delay slots
6541 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6542 '!' Turn on .set nomacro if filling delay slots
6543 '#' Print nop if in a .set noreorder section.
6544 '/' Like '#', but does nothing within a delayed branch sequence
6545 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6546 '@' Print the name of the assembler temporary register (at or $1).
6547 '.' Print the name of the register with a hard-wired zero (zero or $0).
6548 '^' Print the name of the pic call-through register (t9 or $25).
6549 '$' Print the name of the stack pointer register (sp or $29).
6550 '+' Print the name of the gp register (usually gp or $28).
6551 '~' Output a branch alignment to LABEL_ALIGN(NULL).
6552 '|' Print .set push; .set mips2 if mips_llsc == LLSC_YES
6554 '-' Print .set pop under the same conditions for '|'. */
6557 print_operand (FILE *file
, rtx op
, int letter
)
6559 register enum rtx_code code
;
6561 if (PRINT_OPERAND_PUNCT_VALID_P (letter
))
6566 if (mips_branch_likely
)
6571 fputs (reg_names
[GP_REG_FIRST
+ 1], file
);
6575 fputs (reg_names
[PIC_FUNCTION_ADDR_REGNUM
], file
);
6579 fputs (reg_names
[GP_REG_FIRST
+ 0], file
);
6583 fputs (reg_names
[STACK_POINTER_REGNUM
], file
);
6587 fputs (reg_names
[PIC_OFFSET_TABLE_REGNUM
], file
);
6591 if (final_sequence
!= 0 && set_noreorder
++ == 0)
6592 fputs (".set\tnoreorder\n\t", file
);
6596 if (final_sequence
!= 0)
6598 if (set_noreorder
++ == 0)
6599 fputs (".set\tnoreorder\n\t", file
);
6601 if (set_nomacro
++ == 0)
6602 fputs (".set\tnomacro\n\t", file
);
6607 if (final_sequence
!= 0 && set_nomacro
++ == 0)
6608 fputs ("\n\t.set\tnomacro", file
);
6612 if (set_noreorder
!= 0)
6613 fputs ("\n\tnop", file
);
6617 /* Print an extra newline so that the delayed insn is separated
6618 from the following ones. This looks neater and is consistent
6619 with non-nop delayed sequences. */
6620 if (set_noreorder
!= 0 && final_sequence
== 0)
6621 fputs ("\n\tnop\n", file
);
6625 if (set_noreorder
++ == 0)
6626 fputs (".set\tnoreorder\n\t", file
);
6630 if (set_noreorder
== 0)
6631 error ("internal error: %%) found without a %%( in assembler pattern");
6633 else if (--set_noreorder
== 0)
6634 fputs ("\n\t.set\treorder", file
);
6639 if (set_noat
++ == 0)
6640 fputs (".set\tnoat\n\t", file
);
6645 error ("internal error: %%] found without a %%[ in assembler pattern");
6646 else if (--set_noat
== 0)
6647 fputs ("\n\t.set\tat", file
);
6652 if (set_nomacro
++ == 0)
6653 fputs (".set\tnomacro\n\t", file
);
6657 if (set_nomacro
== 0)
6658 error ("internal error: %%> found without a %%< in assembler pattern");
6659 else if (--set_nomacro
== 0)
6660 fputs ("\n\t.set\tmacro", file
);
6665 if (set_volatile
++ == 0)
6666 fputs ("#.set\tvolatile\n\t", file
);
6670 if (set_volatile
== 0)
6671 error ("internal error: %%} found without a %%{ in assembler pattern");
6672 else if (--set_volatile
== 0)
6673 fputs ("\n\t#.set\tnovolatile", file
);
6679 if (align_labels_log
> 0)
6680 ASM_OUTPUT_ALIGN (file
, align_labels_log
);
6686 fputs (".set\tpush\n\t.set\tmips2\n\t", file
);
6691 fputs ("\n\t.set\tpop", file
);
6695 error ("PRINT_OPERAND: unknown punctuation '%c'", letter
);
6704 error ("PRINT_OPERAND null pointer");
6708 code
= GET_CODE (op
);
6713 case EQ
: fputs ("eq", file
); break;
6714 case NE
: fputs ("ne", file
); break;
6715 case GT
: fputs ("gt", file
); break;
6716 case GE
: fputs ("ge", file
); break;
6717 case LT
: fputs ("lt", file
); break;
6718 case LE
: fputs ("le", file
); break;
6719 case GTU
: fputs ("gtu", file
); break;
6720 case GEU
: fputs ("geu", file
); break;
6721 case LTU
: fputs ("ltu", file
); break;
6722 case LEU
: fputs ("leu", file
); break;
6724 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op
);
6727 else if (letter
== 'N')
6730 case EQ
: fputs ("ne", file
); break;
6731 case NE
: fputs ("eq", file
); break;
6732 case GT
: fputs ("le", file
); break;
6733 case GE
: fputs ("lt", file
); break;
6734 case LT
: fputs ("ge", file
); break;
6735 case LE
: fputs ("gt", file
); break;
6736 case GTU
: fputs ("leu", file
); break;
6737 case GEU
: fputs ("ltu", file
); break;
6738 case LTU
: fputs ("geu", file
); break;
6739 case LEU
: fputs ("gtu", file
); break;
6741 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op
);
6744 else if (letter
== 'F')
6747 case EQ
: fputs ("c1f", file
); break;
6748 case NE
: fputs ("c1t", file
); break;
6750 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op
);
6753 else if (letter
== 'W')
6756 case EQ
: fputs ("c1t", file
); break;
6757 case NE
: fputs ("c1f", file
); break;
6759 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op
);
6762 else if (letter
== 'h')
6764 if (GET_CODE (op
) == HIGH
)
6767 print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_hi_relocs
);
6770 else if (letter
== 'R')
6771 print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_lo_relocs
);
6773 else if (letter
== 'Y')
6775 if (GET_CODE (op
) == CONST_INT
6776 && ((unsigned HOST_WIDE_INT
) INTVAL (op
)
6777 < ARRAY_SIZE (mips_fp_conditions
)))
6778 fputs (mips_fp_conditions
[INTVAL (op
)], file
);
6780 output_operand_lossage ("invalid %%Y value");
6783 else if (letter
== 'Z')
6787 print_operand (file
, op
, 0);
6792 else if (letter
== 'q')
6797 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op
);
6799 regnum
= REGNO (op
);
6800 if (MD_REG_P (regnum
))
6801 fprintf (file
, "$ac0");
6802 else if (DSP_ACC_REG_P (regnum
))
6803 fprintf (file
, "$ac%c", reg_names
[regnum
][3]);
6805 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op
);
6808 else if (code
== REG
|| code
== SUBREG
)
6810 register int regnum
;
6813 regnum
= REGNO (op
);
6815 regnum
= true_regnum (op
);
6817 if ((letter
== 'M' && ! WORDS_BIG_ENDIAN
)
6818 || (letter
== 'L' && WORDS_BIG_ENDIAN
)
6822 fprintf (file
, "%s", reg_names
[regnum
]);
6825 else if (code
== MEM
)
6828 output_address (plus_constant (XEXP (op
, 0), 4));
6830 output_address (XEXP (op
, 0));
6833 else if (letter
== 'x' && GET_CODE (op
) == CONST_INT
)
6834 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, 0xffff & INTVAL(op
));
6836 else if (letter
== 'X' && GET_CODE(op
) == CONST_INT
)
6837 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (op
));
6839 else if (letter
== 'd' && GET_CODE(op
) == CONST_INT
)
6840 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (INTVAL(op
)));
6842 else if (letter
== 'z' && op
== CONST0_RTX (GET_MODE (op
)))
6843 fputs (reg_names
[GP_REG_FIRST
], file
);
6845 else if (letter
== 'd' || letter
== 'x' || letter
== 'X')
6846 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6848 else if (letter
== 'T' || letter
== 't')
6850 int truth
= (code
== NE
) == (letter
== 'T');
6851 fputc ("zfnt"[truth
* 2 + (GET_MODE (op
) == CCmode
)], file
);
6854 else if (CONST_GP_P (op
))
6855 fputs (reg_names
[GLOBAL_POINTER_REGNUM
], file
);
6858 output_addr_const (file
, mips_strip_unspec_address (op
));
6862 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6863 in context CONTEXT. RELOCS is the array of relocations to use. */
6866 print_operand_reloc (FILE *file
, rtx op
, enum mips_symbol_context context
,
6867 const char **relocs
)
6869 enum mips_symbol_type symbol_type
;
6872 symbol_type
= mips_classify_symbolic_expression (op
, context
);
6873 if (relocs
[symbol_type
] == 0)
6874 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op
);
6876 fputs (relocs
[symbol_type
], file
);
6877 output_addr_const (file
, mips_strip_unspec_address (op
));
6878 for (p
= relocs
[symbol_type
]; *p
!= 0; p
++)
6883 /* Output address operand X to FILE. */
6886 print_operand_address (FILE *file
, rtx x
)
6888 struct mips_address_info addr
;
6890 if (mips_classify_address (&addr
, x
, word_mode
, true))
6894 print_operand (file
, addr
.offset
, 0);
6895 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
6898 case ADDRESS_LO_SUM
:
6899 print_operand_reloc (file
, addr
.offset
, SYMBOL_CONTEXT_MEM
,
6901 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
6904 case ADDRESS_CONST_INT
:
6905 output_addr_const (file
, x
);
6906 fprintf (file
, "(%s)", reg_names
[0]);
6909 case ADDRESS_SYMBOLIC
:
6910 output_addr_const (file
, mips_strip_unspec_address (x
));
6916 /* When using assembler macros, keep track of all of small-data externs
6917 so that mips_file_end can emit the appropriate declarations for them.
6919 In most cases it would be safe (though pointless) to emit .externs
6920 for other symbols too. One exception is when an object is within
6921 the -G limit but declared by the user to be in a section other
6922 than .sbss or .sdata. */
6925 mips_output_external (FILE *file
, tree decl
, const char *name
)
6927 default_elf_asm_output_external (file
, decl
, name
);
6929 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6930 set in order to avoid putting out names that are never really
6932 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl
)))
6934 if (!TARGET_EXPLICIT_RELOCS
&& mips_in_small_data_p (decl
))
6936 fputs ("\t.extern\t", file
);
6937 assemble_name (file
, name
);
6938 fprintf (file
, ", " HOST_WIDE_INT_PRINT_DEC
"\n",
6939 int_size_in_bytes (TREE_TYPE (decl
)));
6941 else if (TARGET_IRIX
6942 && mips_abi
== ABI_32
6943 && TREE_CODE (decl
) == FUNCTION_DECL
)
6945 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6946 `.global name .text' directive for every used but
6947 undefined function. If we don't, the linker may perform
6948 an optimization (skipping over the insns that set $gp)
6949 when it is unsafe. */
6950 fputs ("\t.globl ", file
);
6951 assemble_name (file
, name
);
6952 fputs (" .text\n", file
);
6957 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6958 put out a MIPS ECOFF file and a stab. */
6961 mips_output_filename (FILE *stream
, const char *name
)
6964 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6966 if (write_symbols
== DWARF2_DEBUG
)
6968 else if (mips_output_filename_first_time
)
6970 mips_output_filename_first_time
= 0;
6971 num_source_filenames
+= 1;
6972 current_function_file
= name
;
6973 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6974 output_quoted_string (stream
, name
);
6975 putc ('\n', stream
);
6978 /* If we are emitting stabs, let dbxout.c handle this (except for
6979 the mips_output_filename_first_time case). */
6980 else if (write_symbols
== DBX_DEBUG
)
6983 else if (name
!= current_function_file
6984 && strcmp (name
, current_function_file
) != 0)
6986 num_source_filenames
+= 1;
6987 current_function_file
= name
;
6988 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6989 output_quoted_string (stream
, name
);
6990 putc ('\n', stream
);
6994 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6995 that should be written before the opening quote, such as "\t.ascii\t"
6996 for real string data or "\t# " for a comment. */
6999 mips_output_ascii (FILE *stream
, const char *string_param
, size_t len
,
7004 register const unsigned char *string
=
7005 (const unsigned char *)string_param
;
7007 fprintf (stream
, "%s\"", prefix
);
7008 for (i
= 0; i
< len
; i
++)
7010 register int c
= string
[i
];
7014 if (c
== '\\' || c
== '\"')
7016 putc ('\\', stream
);
7024 fprintf (stream
, "\\%03o", c
);
7028 if (cur_pos
> 72 && i
+1 < len
)
7031 fprintf (stream
, "\"\n%s\"", prefix
);
7034 fprintf (stream
, "\"\n");
7037 /* Implement TARGET_ASM_FILE_START. */
7040 mips_file_start (void)
7042 default_file_start ();
7046 /* Generate a special section to describe the ABI switches used to
7047 produce the resultant binary. This used to be done by the assembler
7048 setting bits in the ELF header's flags field, but we have run out of
7049 bits. GDB needs this information in order to be able to correctly
7050 debug these binaries. See the function mips_gdbarch_init() in
7051 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
7052 causes unnecessary IRIX 6 ld warnings. */
7053 const char * abi_string
= NULL
;
7057 case ABI_32
: abi_string
= "abi32"; break;
7058 case ABI_N32
: abi_string
= "abiN32"; break;
7059 case ABI_64
: abi_string
= "abi64"; break;
7060 case ABI_O64
: abi_string
= "abiO64"; break;
7061 case ABI_EABI
: abi_string
= TARGET_64BIT
? "eabi64" : "eabi32"; break;
7065 /* Note - we use fprintf directly rather than calling switch_to_section
7066 because in this way we can avoid creating an allocated section. We
7067 do not want this section to take up any space in the running
7069 fprintf (asm_out_file
, "\t.section .mdebug.%s\n\t.previous\n",
7072 /* There is no ELF header flag to distinguish long32 forms of the
7073 EABI from long64 forms. Emit a special section to help tools
7074 such as GDB. Do the same for o64, which is sometimes used with
7076 if (mips_abi
== ABI_EABI
|| mips_abi
== ABI_O64
)
7077 fprintf (asm_out_file
, "\t.section .gcc_compiled_long%d\n"
7078 "\t.previous\n", TARGET_LONG64
? 64 : 32);
7080 #ifdef HAVE_AS_GNU_ATTRIBUTE
7081 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
7082 TARGET_HARD_FLOAT_ABI
? (TARGET_DOUBLE_FLOAT
? 1 : 2) : 3);
7086 /* Generate the pseudo ops that System V.4 wants. */
7087 if (TARGET_ABICALLS
)
7088 fprintf (asm_out_file
, "\t.abicalls\n");
7090 if (flag_verbose_asm
)
7091 fprintf (asm_out_file
, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7093 mips_section_threshold
, mips_arch_info
->name
, mips_isa
);
7096 #ifdef BSS_SECTION_ASM_OP
7097 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
7098 in the use of sbss. */
7101 mips_output_aligned_bss (FILE *stream
, tree decl
, const char *name
,
7102 unsigned HOST_WIDE_INT size
, int align
)
7104 extern tree last_assemble_variable_decl
;
7106 if (mips_in_small_data_p (decl
))
7107 switch_to_section (get_named_section (NULL
, ".sbss", 0));
7109 switch_to_section (bss_section
);
7110 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
7111 last_assemble_variable_decl
= decl
;
7112 ASM_DECLARE_OBJECT_NAME (stream
, name
, decl
);
7113 ASM_OUTPUT_SKIP (stream
, size
!= 0 ? size
: 1);
7117 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7118 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7121 mips_output_aligned_decl_common (FILE *stream
, tree decl
, const char *name
,
7122 unsigned HOST_WIDE_INT size
,
7125 /* If the target wants uninitialized const declarations in
7126 .rdata then don't put them in .comm. */
7127 if (TARGET_EMBEDDED_DATA
&& TARGET_UNINIT_CONST_IN_RODATA
7128 && TREE_CODE (decl
) == VAR_DECL
&& TREE_READONLY (decl
)
7129 && (DECL_INITIAL (decl
) == 0 || DECL_INITIAL (decl
) == error_mark_node
))
7131 if (TREE_PUBLIC (decl
) && DECL_NAME (decl
))
7132 targetm
.asm_out
.globalize_label (stream
, name
);
7134 switch_to_section (readonly_data_section
);
7135 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
7136 mips_declare_object (stream
, name
, "",
7137 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
7141 mips_declare_common_object (stream
, name
, "\n\t.comm\t",
7145 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7146 NAME is the name of the object and ALIGN is the required alignment
7147 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7148 alignment argument. */
7151 mips_declare_common_object (FILE *stream
, const char *name
,
7152 const char *init_string
,
7153 unsigned HOST_WIDE_INT size
,
7154 unsigned int align
, bool takes_alignment_p
)
7156 if (!takes_alignment_p
)
7158 size
+= (align
/ BITS_PER_UNIT
) - 1;
7159 size
-= size
% (align
/ BITS_PER_UNIT
);
7160 mips_declare_object (stream
, name
, init_string
,
7161 "," HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
7164 mips_declare_object (stream
, name
, init_string
,
7165 "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u\n",
7166 size
, align
/ BITS_PER_UNIT
);
7169 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7170 macros, mark the symbol as written so that mips_file_end won't emit an
7171 .extern for it. STREAM is the output file, NAME is the name of the
7172 symbol, INIT_STRING is the string that should be written before the
7173 symbol and FINAL_STRING is the string that should be written after it.
7174 FINAL_STRING is a printf() format that consumes the remaining arguments. */
7177 mips_declare_object (FILE *stream
, const char *name
, const char *init_string
,
7178 const char *final_string
, ...)
7182 fputs (init_string
, stream
);
7183 assemble_name (stream
, name
);
7184 va_start (ap
, final_string
);
7185 vfprintf (stream
, final_string
, ap
);
7188 if (!TARGET_EXPLICIT_RELOCS
)
7190 tree name_tree
= get_identifier (name
);
7191 TREE_ASM_WRITTEN (name_tree
) = 1;
7195 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7196 extern int size_directive_output
;
7198 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7199 definitions except that it uses mips_declare_object() to emit the label. */
7202 mips_declare_object_name (FILE *stream
, const char *name
,
7203 tree decl ATTRIBUTE_UNUSED
)
7205 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7206 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
7209 size_directive_output
= 0;
7210 if (!flag_inhibit_size_directive
&& DECL_SIZE (decl
))
7214 size_directive_output
= 1;
7215 size
= int_size_in_bytes (TREE_TYPE (decl
));
7216 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
7219 mips_declare_object (stream
, name
, "", ":\n");
7222 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7225 mips_finish_declare_object (FILE *stream
, tree decl
, int top_level
, int at_end
)
7229 name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
7230 if (!flag_inhibit_size_directive
7231 && DECL_SIZE (decl
) != 0
7232 && !at_end
&& top_level
7233 && DECL_INITIAL (decl
) == error_mark_node
7234 && !size_directive_output
)
7238 size_directive_output
= 1;
7239 size
= int_size_in_bytes (TREE_TYPE (decl
));
7240 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
7245 /* Return true if X in context CONTEXT is a small data address that can
7246 be rewritten as a LO_SUM. */
7249 mips_rewrite_small_data_p (rtx x
, enum mips_symbol_context context
)
7251 enum mips_symbol_type symbol_type
;
7253 return (TARGET_EXPLICIT_RELOCS
7254 && mips_symbolic_constant_p (x
, context
, &symbol_type
)
7255 && symbol_type
== SYMBOL_GP_RELATIVE
);
7259 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
7260 containing MEM, or null if none. */
7263 mips_small_data_pattern_1 (rtx
*loc
, void *data
)
7265 enum mips_symbol_context context
;
7267 if (GET_CODE (*loc
) == LO_SUM
)
7272 if (for_each_rtx (&XEXP (*loc
, 0), mips_small_data_pattern_1
, *loc
))
7277 context
= data
? SYMBOL_CONTEXT_MEM
: SYMBOL_CONTEXT_LEA
;
7278 return mips_rewrite_small_data_p (*loc
, context
);
7281 /* Return true if OP refers to small data symbols directly, not through
7285 mips_small_data_pattern_p (rtx op
)
7287 return for_each_rtx (&op
, mips_small_data_pattern_1
, 0);
7290 /* A for_each_rtx callback, used by mips_rewrite_small_data.
7291 DATA is the containing MEM, or null if none. */
7294 mips_rewrite_small_data_1 (rtx
*loc
, void *data
)
7296 enum mips_symbol_context context
;
7300 for_each_rtx (&XEXP (*loc
, 0), mips_rewrite_small_data_1
, *loc
);
7304 context
= data
? SYMBOL_CONTEXT_MEM
: SYMBOL_CONTEXT_LEA
;
7305 if (mips_rewrite_small_data_p (*loc
, context
))
7306 *loc
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, *loc
);
7308 if (GET_CODE (*loc
) == LO_SUM
)
7314 /* If possible, rewrite OP so that it refers to small data using
7315 explicit relocations. */
7318 mips_rewrite_small_data (rtx op
)
7320 op
= copy_insn (op
);
7321 for_each_rtx (&op
, mips_rewrite_small_data_1
, 0);
7325 /* Return true if the current function has an insn that implicitly
7329 mips_function_has_gp_insn (void)
7331 /* Don't bother rechecking if we found one last time. */
7332 if (!cfun
->machine
->has_gp_insn_p
)
7336 push_topmost_sequence ();
7337 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7339 && GET_CODE (PATTERN (insn
)) != USE
7340 && GET_CODE (PATTERN (insn
)) != CLOBBER
7341 && (get_attr_got (insn
) != GOT_UNSET
7342 || small_data_pattern (PATTERN (insn
), VOIDmode
)))
7344 pop_topmost_sequence ();
7346 cfun
->machine
->has_gp_insn_p
= (insn
!= 0);
7348 return cfun
->machine
->has_gp_insn_p
;
7352 /* Return the register that should be used as the global pointer
7353 within this function. Return 0 if the function doesn't need
7354 a global pointer. */
7357 mips_global_pointer (void)
7361 /* $gp is always available unless we're using a GOT. */
7362 if (!TARGET_USE_GOT
)
7363 return GLOBAL_POINTER_REGNUM
;
7365 /* We must always provide $gp when it is used implicitly. */
7366 if (!TARGET_EXPLICIT_RELOCS
)
7367 return GLOBAL_POINTER_REGNUM
;
7369 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7371 if (current_function_profile
)
7372 return GLOBAL_POINTER_REGNUM
;
7374 /* If the function has a nonlocal goto, $gp must hold the correct
7375 global pointer for the target function. */
7376 if (current_function_has_nonlocal_goto
)
7377 return GLOBAL_POINTER_REGNUM
;
7379 /* If the gp is never referenced, there's no need to initialize it.
7380 Note that reload can sometimes introduce constant pool references
7381 into a function that otherwise didn't need them. For example,
7382 suppose we have an instruction like:
7384 (set (reg:DF R1) (float:DF (reg:SI R2)))
7386 If R2 turns out to be constant such as 1, the instruction may have a
7387 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7388 using this constant if R2 doesn't get allocated to a register.
7390 In cases like these, reload will have added the constant to the pool
7391 but no instruction will yet refer to it. */
7392 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM
)
7393 && !current_function_uses_const_pool
7394 && !mips_function_has_gp_insn ())
7397 /* We need a global pointer, but perhaps we can use a call-clobbered
7398 register instead of $gp. */
7399 if (TARGET_CALL_SAVED_GP
&& current_function_is_leaf
)
7400 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
7401 if (!df_regs_ever_live_p (regno
)
7402 && call_really_used_regs
[regno
]
7403 && !fixed_regs
[regno
]
7404 && regno
!= PIC_FUNCTION_ADDR_REGNUM
)
7407 return GLOBAL_POINTER_REGNUM
;
7411 /* Return true if the function return value MODE will get returned in a
7412 floating-point register. */
7415 mips_return_mode_in_fpr_p (enum machine_mode mode
)
7417 return ((GET_MODE_CLASS (mode
) == MODE_FLOAT
7418 || GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
7419 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
7420 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_HWFPVALUE
);
7423 /* Return a two-character string representing a function floating-point
7424 return mode, used to name MIPS16 function stubs. */
7427 mips16_call_stub_mode_suffix (enum machine_mode mode
)
7431 else if (mode
== DFmode
)
7433 else if (mode
== SCmode
)
7435 else if (mode
== DCmode
)
7437 else if (mode
== V2SFmode
)
7443 /* Return true if the current function returns its value in a floating-point
7444 register in MIPS16 mode. */
7447 mips16_cfun_returns_in_fpr_p (void)
7449 tree return_type
= DECL_RESULT (current_function_decl
);
7450 return (TARGET_MIPS16
7451 && TARGET_HARD_FLOAT_ABI
7452 && !aggregate_value_p (return_type
, current_function_decl
)
7453 && mips_return_mode_in_fpr_p (DECL_MODE (return_type
)));
7457 /* Return true if the current function must save REGNO. */
7460 mips_save_reg_p (unsigned int regno
)
7462 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7463 if we have not chosen a call-clobbered substitute. */
7464 if (regno
== GLOBAL_POINTER_REGNUM
)
7465 return TARGET_CALL_SAVED_GP
&& cfun
->machine
->global_pointer
== regno
;
7467 /* Check call-saved registers. */
7468 if ((current_function_saves_all_registers
|| df_regs_ever_live_p (regno
))
7469 && !call_really_used_regs
[regno
])
7472 /* Save both registers in an FPR pair if either one is used. This is
7473 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7474 register to be used without the even register. */
7475 if (FP_REG_P (regno
)
7476 && MAX_FPRS_PER_FMT
== 2
7477 && df_regs_ever_live_p (regno
+ 1)
7478 && !call_really_used_regs
[regno
+ 1])
7481 /* We need to save the old frame pointer before setting up a new one. */
7482 if (regno
== HARD_FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
7485 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7486 if (current_function_profile
&& MIPS_SAVE_REG_FOR_PROFILING_P (regno
))
7489 /* We need to save the incoming return address if it is ever clobbered
7490 within the function, if __builtin_eh_return is being used to set a
7491 different return address, or if a stub is being used to return a
7493 if (regno
== GP_REG_FIRST
+ 31
7494 && (df_regs_ever_live_p (regno
)
7495 || current_function_calls_eh_return
7496 || mips16_cfun_returns_in_fpr_p ()))
7502 /* Return the index of the lowest X in the range [0, SIZE) for which
7503 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7506 mips16e_find_first_register (unsigned int mask
, const unsigned char *regs
,
7511 for (i
= 0; i
< size
; i
++)
7512 if (BITSET_P (mask
, regs
[i
]))
7518 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7519 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7520 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7521 the same is true for all indexes (X, SIZE). */
7524 mips16e_mask_registers (unsigned int *mask_ptr
, const unsigned char *regs
,
7525 unsigned int size
, HOST_WIDE_INT
*gp_reg_size_ptr
)
7529 i
= mips16e_find_first_register (*mask_ptr
, regs
, size
);
7530 for (i
++; i
< size
; i
++)
7531 if (!BITSET_P (*mask_ptr
, regs
[i
]))
7533 *gp_reg_size_ptr
+= GET_MODE_SIZE (gpr_mode
);
7534 *mask_ptr
|= 1 << regs
[i
];
7538 /* Return the bytes needed to compute the frame pointer from the current
7539 stack pointer. SIZE is the size (in bytes) of the local variables.
7541 MIPS stack frames look like:
7543 Before call After call
7544 high +-----------------------+ +-----------------------+
7546 | caller's temps. | | caller's temps. |
7548 +-----------------------+ +-----------------------+
7550 | arguments on stack. | | arguments on stack. |
7552 +-----------------------+ +-----------------------+
7553 | 4 words to save | | 4 words to save |
7554 | arguments passed | | arguments passed |
7555 | in registers, even | | in registers, even |
7556 | if not passed. | | if not passed. |
7557 SP->+-----------------------+ VFP->+-----------------------+
7558 (VFP = SP+fp_sp_offset) | |\
7559 | fp register save | | fp_reg_size
7561 SP+gp_sp_offset->+-----------------------+
7563 | | gp register save | | gp_reg_size
7564 gp_reg_rounded | | |/
7565 | +-----------------------+
7566 \| alignment padding |
7567 +-----------------------+
7569 | local variables | | var_size
7571 +-----------------------+
7573 | alloca allocations |
7575 +-----------------------+
7577 cprestore_size | | GP save for V.4 abi |
7579 +-----------------------+
7581 | arguments on stack | |
7583 +-----------------------+ |
7584 | 4 words to save | | args_size
7585 | arguments passed | |
7586 | in registers, even | |
7587 | if not passed. | |
7588 low | (TARGET_OLDABI only) |/
7589 memory SP->+-----------------------+
7594 compute_frame_size (HOST_WIDE_INT size
)
7597 HOST_WIDE_INT total_size
; /* # bytes that the entire frame takes up */
7598 HOST_WIDE_INT var_size
; /* # bytes that variables take up */
7599 HOST_WIDE_INT args_size
; /* # bytes that outgoing arguments take up */
7600 HOST_WIDE_INT cprestore_size
; /* # bytes that the cprestore slot takes up */
7601 HOST_WIDE_INT gp_reg_rounded
; /* # bytes needed to store gp after rounding */
7602 HOST_WIDE_INT gp_reg_size
; /* # bytes needed to store gp regs */
7603 HOST_WIDE_INT fp_reg_size
; /* # bytes needed to store fp regs */
7604 unsigned int mask
; /* mask of saved gp registers */
7605 unsigned int fmask
; /* mask of saved fp registers */
7607 cfun
->machine
->global_pointer
= mips_global_pointer ();
7613 var_size
= MIPS_STACK_ALIGN (size
);
7614 args_size
= current_function_outgoing_args_size
;
7615 cprestore_size
= MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET
) - args_size
;
7617 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7618 functions. If the function has local variables, we're committed
7619 to allocating it anyway. Otherwise reclaim it here. */
7620 if (var_size
== 0 && current_function_is_leaf
)
7621 cprestore_size
= args_size
= 0;
7623 /* The MIPS 3.0 linker does not like functions that dynamically
7624 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7625 looks like we are trying to create a second frame pointer to the
7626 function, so allocate some stack space to make it happy. */
7628 if (args_size
== 0 && current_function_calls_alloca
)
7629 args_size
= 4 * UNITS_PER_WORD
;
7631 total_size
= var_size
+ args_size
+ cprestore_size
;
7633 /* Calculate space needed for gp registers. */
7634 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
7635 if (mips_save_reg_p (regno
))
7637 gp_reg_size
+= GET_MODE_SIZE (gpr_mode
);
7638 mask
|= 1 << (regno
- GP_REG_FIRST
);
7641 /* We need to restore these for the handler. */
7642 if (current_function_calls_eh_return
)
7647 regno
= EH_RETURN_DATA_REGNO (i
);
7648 if (regno
== INVALID_REGNUM
)
7650 gp_reg_size
+= GET_MODE_SIZE (gpr_mode
);
7651 mask
|= 1 << (regno
- GP_REG_FIRST
);
7655 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7656 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7657 save all later registers too. */
7658 if (GENERATE_MIPS16E_SAVE_RESTORE
)
7660 mips16e_mask_registers (&mask
, mips16e_s2_s8_regs
,
7661 ARRAY_SIZE (mips16e_s2_s8_regs
), &gp_reg_size
);
7662 mips16e_mask_registers (&mask
, mips16e_a0_a3_regs
,
7663 ARRAY_SIZE (mips16e_a0_a3_regs
), &gp_reg_size
);
7666 /* This loop must iterate over the same space as its companion in
7667 mips_for_each_saved_reg. */
7668 if (TARGET_HARD_FLOAT
)
7669 for (regno
= (FP_REG_LAST
- MAX_FPRS_PER_FMT
+ 1);
7670 regno
>= FP_REG_FIRST
;
7671 regno
-= MAX_FPRS_PER_FMT
)
7672 if (mips_save_reg_p (regno
))
7674 fp_reg_size
+= MAX_FPRS_PER_FMT
* UNITS_PER_FPREG
;
7675 fmask
|= ((1 << MAX_FPRS_PER_FMT
) - 1) << (regno
- FP_REG_FIRST
);
7678 gp_reg_rounded
= MIPS_STACK_ALIGN (gp_reg_size
);
7679 total_size
+= gp_reg_rounded
+ MIPS_STACK_ALIGN (fp_reg_size
);
7681 /* Add in the space required for saving incoming register arguments. */
7682 total_size
+= current_function_pretend_args_size
;
7683 total_size
+= MIPS_STACK_ALIGN (cfun
->machine
->varargs_size
);
7685 /* Save other computed information. */
7686 cfun
->machine
->frame
.total_size
= total_size
;
7687 cfun
->machine
->frame
.var_size
= var_size
;
7688 cfun
->machine
->frame
.args_size
= args_size
;
7689 cfun
->machine
->frame
.cprestore_size
= cprestore_size
;
7690 cfun
->machine
->frame
.gp_reg_size
= gp_reg_size
;
7691 cfun
->machine
->frame
.fp_reg_size
= fp_reg_size
;
7692 cfun
->machine
->frame
.mask
= mask
;
7693 cfun
->machine
->frame
.fmask
= fmask
;
7694 cfun
->machine
->frame
.initialized
= reload_completed
;
7695 cfun
->machine
->frame
.num_gp
= gp_reg_size
/ UNITS_PER_WORD
;
7696 cfun
->machine
->frame
.num_fp
= (fp_reg_size
7697 / (MAX_FPRS_PER_FMT
* UNITS_PER_FPREG
));
7701 HOST_WIDE_INT offset
;
7703 if (GENERATE_MIPS16E_SAVE_RESTORE
)
7704 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7705 to be aligned at the high end with any padding at the low end.
7706 It is only safe to use this calculation for o32, where we never
7707 have pretend arguments, and where any varargs will be saved in
7708 the caller-allocated area rather than at the top of the frame. */
7709 offset
= (total_size
- GET_MODE_SIZE (gpr_mode
));
7711 offset
= (args_size
+ cprestore_size
+ var_size
7712 + gp_reg_size
- GET_MODE_SIZE (gpr_mode
));
7713 cfun
->machine
->frame
.gp_sp_offset
= offset
;
7714 cfun
->machine
->frame
.gp_save_offset
= offset
- total_size
;
7718 cfun
->machine
->frame
.gp_sp_offset
= 0;
7719 cfun
->machine
->frame
.gp_save_offset
= 0;
7724 HOST_WIDE_INT offset
;
7726 offset
= (args_size
+ cprestore_size
+ var_size
7727 + gp_reg_rounded
+ fp_reg_size
7728 - MAX_FPRS_PER_FMT
* UNITS_PER_FPREG
);
7729 cfun
->machine
->frame
.fp_sp_offset
= offset
;
7730 cfun
->machine
->frame
.fp_save_offset
= offset
- total_size
;
7734 cfun
->machine
->frame
.fp_sp_offset
= 0;
7735 cfun
->machine
->frame
.fp_save_offset
= 0;
7738 /* Ok, we're done. */
7742 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7743 pointer or argument pointer. TO is either the stack pointer or
7744 hard frame pointer. */
7747 mips_initial_elimination_offset (int from
, int to
)
7749 HOST_WIDE_INT offset
;
7751 compute_frame_size (get_frame_size ());
7753 /* Set OFFSET to the offset from the stack pointer. */
7756 case FRAME_POINTER_REGNUM
:
7760 case ARG_POINTER_REGNUM
:
7761 offset
= (cfun
->machine
->frame
.total_size
7762 - current_function_pretend_args_size
);
7769 if (TARGET_MIPS16
&& to
== HARD_FRAME_POINTER_REGNUM
)
7770 offset
-= cfun
->machine
->frame
.args_size
;
7775 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7776 back to a previous frame. */
7778 mips_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
7783 return get_hard_reg_initial_val (Pmode
, GP_REG_FIRST
+ 31);
7786 /* Use FN to save or restore register REGNO. MODE is the register's
7787 mode and OFFSET is the offset of its save slot from the current
7791 mips_save_restore_reg (enum machine_mode mode
, int regno
,
7792 HOST_WIDE_INT offset
, mips_save_restore_fn fn
)
7796 mem
= gen_frame_mem (mode
, plus_constant (stack_pointer_rtx
, offset
));
7798 fn (gen_rtx_REG (mode
, regno
), mem
);
7802 /* Call FN for each register that is saved by the current function.
7803 SP_OFFSET is the offset of the current stack pointer from the start
7807 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset
, mips_save_restore_fn fn
)
7809 enum machine_mode fpr_mode
;
7810 HOST_WIDE_INT offset
;
7813 /* Save registers starting from high to low. The debuggers prefer at least
7814 the return register be stored at func+4, and also it allows us not to
7815 need a nop in the epilogue if at least one register is reloaded in
7816 addition to return address. */
7817 offset
= cfun
->machine
->frame
.gp_sp_offset
- sp_offset
;
7818 for (regno
= GP_REG_LAST
; regno
>= GP_REG_FIRST
; regno
--)
7819 if (BITSET_P (cfun
->machine
->frame
.mask
, regno
- GP_REG_FIRST
))
7821 mips_save_restore_reg (gpr_mode
, regno
, offset
, fn
);
7822 offset
-= GET_MODE_SIZE (gpr_mode
);
7825 /* This loop must iterate over the same space as its companion in
7826 compute_frame_size. */
7827 offset
= cfun
->machine
->frame
.fp_sp_offset
- sp_offset
;
7828 fpr_mode
= (TARGET_SINGLE_FLOAT
? SFmode
: DFmode
);
7829 for (regno
= (FP_REG_LAST
- MAX_FPRS_PER_FMT
+ 1);
7830 regno
>= FP_REG_FIRST
;
7831 regno
-= MAX_FPRS_PER_FMT
)
7832 if (BITSET_P (cfun
->machine
->frame
.fmask
, regno
- FP_REG_FIRST
))
7834 mips_save_restore_reg (fpr_mode
, regno
, offset
, fn
);
7835 offset
-= GET_MODE_SIZE (fpr_mode
);
7839 /* If we're generating n32 or n64 abicalls, and the current function
7840 does not use $28 as its global pointer, emit a cplocal directive.
7841 Use pic_offset_table_rtx as the argument to the directive. */
7844 mips_output_cplocal (void)
7846 if (!TARGET_EXPLICIT_RELOCS
7847 && cfun
->machine
->global_pointer
> 0
7848 && cfun
->machine
->global_pointer
!= GLOBAL_POINTER_REGNUM
)
7849 output_asm_insn (".cplocal %+", 0);
7852 /* Return the style of GP load sequence that is being used for the
7853 current function. */
7855 enum mips_loadgp_style
7856 mips_current_loadgp_style (void)
7858 if (!TARGET_USE_GOT
|| cfun
->machine
->global_pointer
== 0)
7864 if (TARGET_ABSOLUTE_ABICALLS
)
7865 return LOADGP_ABSOLUTE
;
7867 return TARGET_NEWABI
? LOADGP_NEWABI
: LOADGP_OLDABI
;
7870 /* The __gnu_local_gp symbol. */
7872 static GTY(()) rtx mips_gnu_local_gp
;
7874 /* If we're generating n32 or n64 abicalls, emit instructions
7875 to set up the global pointer. */
7878 mips_emit_loadgp (void)
7880 rtx addr
, offset
, incoming_address
, base
, index
;
7882 switch (mips_current_loadgp_style ())
7884 case LOADGP_ABSOLUTE
:
7885 if (mips_gnu_local_gp
== NULL
)
7887 mips_gnu_local_gp
= gen_rtx_SYMBOL_REF (Pmode
, "__gnu_local_gp");
7888 SYMBOL_REF_FLAGS (mips_gnu_local_gp
) |= SYMBOL_FLAG_LOCAL
;
7890 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp
));
7894 addr
= XEXP (DECL_RTL (current_function_decl
), 0);
7895 offset
= mips_unspec_address (addr
, SYMBOL_GOTOFF_LOADGP
);
7896 incoming_address
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
7897 emit_insn (gen_loadgp_newabi (offset
, incoming_address
));
7898 if (!TARGET_EXPLICIT_RELOCS
)
7899 emit_insn (gen_loadgp_blockage ());
7903 base
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_BASE
));
7904 index
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_INDEX
));
7905 emit_insn (gen_loadgp_rtp (base
, index
));
7906 if (!TARGET_EXPLICIT_RELOCS
)
7907 emit_insn (gen_loadgp_blockage ());
7915 /* Set up the stack and frame (if desired) for the function. */
7918 mips_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
7921 HOST_WIDE_INT tsize
= cfun
->machine
->frame
.total_size
;
7923 #ifdef SDB_DEBUGGING_INFO
7924 if (debug_info_level
!= DINFO_LEVEL_TERSE
&& write_symbols
== SDB_DEBUG
)
7925 SDB_OUTPUT_SOURCE_LINE (file
, DECL_SOURCE_LINE (current_function_decl
));
7928 /* In mips16 mode, we may need to generate a 32 bit to handle
7929 floating point arguments. The linker will arrange for any 32-bit
7930 functions to call this stub, which will then jump to the 16-bit
7933 && TARGET_HARD_FLOAT_ABI
7934 && current_function_args_info
.fp_code
!= 0)
7935 build_mips16_function_stub (file
);
7937 /* Select the mips16 mode for this function. */
7939 fprintf (file
, "\t.set\tmips16\n");
7941 fprintf (file
, "\t.set\tnomips16\n");
7943 if (!FUNCTION_NAME_ALREADY_DECLARED
)
7945 /* Get the function name the same way that toplev.c does before calling
7946 assemble_start_function. This is needed so that the name used here
7947 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7948 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
7950 if (!flag_inhibit_size_directive
)
7952 fputs ("\t.ent\t", file
);
7953 assemble_name (file
, fnname
);
7957 assemble_name (file
, fnname
);
7958 fputs (":\n", file
);
7961 /* Stop mips_file_end from treating this function as external. */
7962 if (TARGET_IRIX
&& mips_abi
== ABI_32
)
7963 TREE_ASM_WRITTEN (DECL_NAME (cfun
->decl
)) = 1;
7965 if (!flag_inhibit_size_directive
)
7967 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7969 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC
",%s\t\t"
7970 "# vars= " HOST_WIDE_INT_PRINT_DEC
", regs= %d/%d"
7971 ", args= " HOST_WIDE_INT_PRINT_DEC
7972 ", gp= " HOST_WIDE_INT_PRINT_DEC
"\n",
7973 (reg_names
[(frame_pointer_needed
)
7974 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
]),
7975 ((frame_pointer_needed
&& TARGET_MIPS16
)
7976 ? tsize
- cfun
->machine
->frame
.args_size
7978 reg_names
[GP_REG_FIRST
+ 31],
7979 cfun
->machine
->frame
.var_size
,
7980 cfun
->machine
->frame
.num_gp
,
7981 cfun
->machine
->frame
.num_fp
,
7982 cfun
->machine
->frame
.args_size
,
7983 cfun
->machine
->frame
.cprestore_size
);
7985 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7986 fprintf (file
, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
7987 cfun
->machine
->frame
.mask
,
7988 cfun
->machine
->frame
.gp_save_offset
);
7989 fprintf (file
, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
7990 cfun
->machine
->frame
.fmask
,
7991 cfun
->machine
->frame
.fp_save_offset
);
7994 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7995 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7998 if (mips_current_loadgp_style () == LOADGP_OLDABI
)
8000 /* Handle the initialization of $gp for SVR4 PIC. */
8001 if (!cfun
->machine
->all_noreorder_p
)
8002 output_asm_insn ("%(.cpload\t%^%)", 0);
8004 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8006 else if (cfun
->machine
->all_noreorder_p
)
8007 output_asm_insn ("%(%<", 0);
8009 /* Tell the assembler which register we're using as the global
8010 pointer. This is needed for thunks, since they can use either
8011 explicit relocs or assembler macros. */
8012 mips_output_cplocal ();
8015 /* Make the last instruction frame related and note that it performs
8016 the operation described by FRAME_PATTERN. */
8019 mips_set_frame_expr (rtx frame_pattern
)
8023 insn
= get_last_insn ();
8024 RTX_FRAME_RELATED_P (insn
) = 1;
8025 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
8031 /* Return a frame-related rtx that stores REG at MEM.
8032 REG must be a single register. */
8035 mips_frame_set (rtx mem
, rtx reg
)
8039 /* If we're saving the return address register and the dwarf return
8040 address column differs from the hard register number, adjust the
8041 note reg to refer to the former. */
8042 if (REGNO (reg
) == GP_REG_FIRST
+ 31
8043 && DWARF_FRAME_RETURN_COLUMN
!= GP_REG_FIRST
+ 31)
8044 reg
= gen_rtx_REG (GET_MODE (reg
), DWARF_FRAME_RETURN_COLUMN
);
8046 set
= gen_rtx_SET (VOIDmode
, mem
, reg
);
8047 RTX_FRAME_RELATED_P (set
) = 1;
8053 /* Save register REG to MEM. Make the instruction frame-related. */
8056 mips_save_reg (rtx reg
, rtx mem
)
8058 if (GET_MODE (reg
) == DFmode
&& !TARGET_FLOAT64
)
8062 if (mips_split_64bit_move_p (mem
, reg
))
8063 mips_split_64bit_move (mem
, reg
);
8065 mips_emit_move (mem
, reg
);
8067 x1
= mips_frame_set (mips_subword (mem
, 0), mips_subword (reg
, 0));
8068 x2
= mips_frame_set (mips_subword (mem
, 1), mips_subword (reg
, 1));
8069 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, x1
, x2
)));
8074 && REGNO (reg
) != GP_REG_FIRST
+ 31
8075 && !M16_REG_P (REGNO (reg
)))
8077 /* Save a non-mips16 register by moving it through a temporary.
8078 We don't need to do this for $31 since there's a special
8079 instruction for it. */
8080 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg
)), reg
);
8081 mips_emit_move (mem
, MIPS_PROLOGUE_TEMP (GET_MODE (reg
)));
8084 mips_emit_move (mem
, reg
);
8086 mips_set_frame_expr (mips_frame_set (mem
, reg
));
8090 /* Return a move between register REGNO and memory location SP + OFFSET.
8091 Make the move a load if RESTORE_P, otherwise make it a frame-related
8095 mips16e_save_restore_reg (bool restore_p
, HOST_WIDE_INT offset
,
8100 mem
= gen_frame_mem (SImode
, plus_constant (stack_pointer_rtx
, offset
));
8101 reg
= gen_rtx_REG (SImode
, regno
);
8103 ? gen_rtx_SET (VOIDmode
, reg
, mem
)
8104 : mips_frame_set (mem
, reg
));
8107 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8108 The instruction must:
8110 - Allocate or deallocate SIZE bytes in total; SIZE is known
8113 - Save or restore as many registers in *MASK_PTR as possible.
8114 The instruction saves the first registers at the top of the
8115 allocated area, with the other registers below it.
8117 - Save NARGS argument registers above the allocated area.
8119 (NARGS is always zero if RESTORE_P.)
8121 The SAVE and RESTORE instructions cannot save and restore all general
8122 registers, so there may be some registers left over for the caller to
8123 handle. Destructively modify *MASK_PTR so that it contains the registers
8124 that still need to be saved or restored. The caller can save these
8125 registers in the memory immediately below *OFFSET_PTR, which is a
8126 byte offset from the bottom of the allocated stack area. */
8129 mips16e_build_save_restore (bool restore_p
, unsigned int *mask_ptr
,
8130 HOST_WIDE_INT
*offset_ptr
, unsigned int nargs
,
8134 HOST_WIDE_INT offset
, top_offset
;
8135 unsigned int i
, regno
;
8138 gcc_assert (cfun
->machine
->frame
.fp_reg_size
== 0);
8140 /* Calculate the number of elements in the PARALLEL. We need one element
8141 for the stack adjustment, one for each argument register save, and one
8142 for each additional register move. */
8144 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
8145 if (BITSET_P (*mask_ptr
, mips16e_save_restore_regs
[i
]))
8148 /* Create the final PARALLEL. */
8149 pattern
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (n
));
8152 /* Add the stack pointer adjustment. */
8153 set
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8154 plus_constant (stack_pointer_rtx
,
8155 restore_p
? size
: -size
));
8156 RTX_FRAME_RELATED_P (set
) = 1;
8157 XVECEXP (pattern
, 0, n
++) = set
;
8159 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8160 top_offset
= restore_p
? size
: 0;
8162 /* Save the arguments. */
8163 for (i
= 0; i
< nargs
; i
++)
8165 offset
= top_offset
+ i
* GET_MODE_SIZE (gpr_mode
);
8166 set
= mips16e_save_restore_reg (restore_p
, offset
, GP_ARG_FIRST
+ i
);
8167 XVECEXP (pattern
, 0, n
++) = set
;
8170 /* Then fill in the other register moves. */
8171 offset
= top_offset
;
8172 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
8174 regno
= mips16e_save_restore_regs
[i
];
8175 if (BITSET_P (*mask_ptr
, regno
))
8177 offset
-= UNITS_PER_WORD
;
8178 set
= mips16e_save_restore_reg (restore_p
, offset
, regno
);
8179 XVECEXP (pattern
, 0, n
++) = set
;
8180 *mask_ptr
&= ~(1 << regno
);
8184 /* Tell the caller what offset it should use for the remaining registers. */
8185 *offset_ptr
= size
+ (offset
- top_offset
) + size
;
8187 gcc_assert (n
== XVECLEN (pattern
, 0));
8192 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8193 pointer. Return true if PATTERN matches the kind of instruction
8194 generated by mips16e_build_save_restore. If INFO is nonnull,
8195 initialize it when returning true. */
8198 mips16e_save_restore_pattern_p (rtx pattern
, HOST_WIDE_INT adjust
,
8199 struct mips16e_save_restore_info
*info
)
8201 unsigned int i
, nargs
, mask
;
8202 HOST_WIDE_INT top_offset
, save_offset
, offset
, extra
;
8203 rtx set
, reg
, mem
, base
;
8206 if (!GENERATE_MIPS16E_SAVE_RESTORE
)
8209 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8210 top_offset
= adjust
> 0 ? adjust
: 0;
8212 /* Interpret all other members of the PARALLEL. */
8213 save_offset
= top_offset
- GET_MODE_SIZE (gpr_mode
);
8217 for (n
= 1; n
< XVECLEN (pattern
, 0); n
++)
8219 /* Check that we have a SET. */
8220 set
= XVECEXP (pattern
, 0, n
);
8221 if (GET_CODE (set
) != SET
)
8224 /* Check that the SET is a load (if restoring) or a store
8226 mem
= adjust
> 0 ? SET_SRC (set
) : SET_DEST (set
);
8230 /* Check that the address is the sum of the stack pointer and a
8231 possibly-zero constant offset. */
8232 mips_split_plus (XEXP (mem
, 0), &base
, &offset
);
8233 if (base
!= stack_pointer_rtx
)
8236 /* Check that SET's other operand is a register. */
8237 reg
= adjust
> 0 ? SET_DEST (set
) : SET_SRC (set
);
8241 /* Check for argument saves. */
8242 if (offset
== top_offset
+ nargs
* GET_MODE_SIZE (gpr_mode
)
8243 && REGNO (reg
) == GP_ARG_FIRST
+ nargs
)
8245 else if (offset
== save_offset
)
8247 while (mips16e_save_restore_regs
[i
++] != REGNO (reg
))
8248 if (i
== ARRAY_SIZE (mips16e_save_restore_regs
))
8251 mask
|= 1 << REGNO (reg
);
8252 save_offset
-= GET_MODE_SIZE (gpr_mode
);
8258 /* Check that the restrictions on register ranges are met. */
8260 mips16e_mask_registers (&mask
, mips16e_s2_s8_regs
,
8261 ARRAY_SIZE (mips16e_s2_s8_regs
), &extra
);
8262 mips16e_mask_registers (&mask
, mips16e_a0_a3_regs
,
8263 ARRAY_SIZE (mips16e_a0_a3_regs
), &extra
);
8267 /* Make sure that the topmost argument register is not saved twice.
8268 The checks above ensure that the same is then true for the other
8269 argument registers. */
8270 if (nargs
> 0 && BITSET_P (mask
, GP_ARG_FIRST
+ nargs
- 1))
8273 /* Pass back information, if requested. */
8276 info
->nargs
= nargs
;
8278 info
->size
= (adjust
> 0 ? adjust
: -adjust
);
8284 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8285 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8286 the null terminator. */
8289 mips16e_add_register_range (char *s
, unsigned int min_reg
,
8290 unsigned int max_reg
)
8292 if (min_reg
!= max_reg
)
8293 s
+= sprintf (s
, ",%s-%s", reg_names
[min_reg
], reg_names
[max_reg
]);
8295 s
+= sprintf (s
, ",%s", reg_names
[min_reg
]);
8299 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8300 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8303 mips16e_output_save_restore (rtx pattern
, HOST_WIDE_INT adjust
)
8305 static char buffer
[300];
8307 struct mips16e_save_restore_info info
;
8308 unsigned int i
, end
;
8311 /* Parse the pattern. */
8312 if (!mips16e_save_restore_pattern_p (pattern
, adjust
, &info
))
8315 /* Add the mnemonic. */
8316 s
= strcpy (buffer
, adjust
> 0 ? "restore\t" : "save\t");
8319 /* Save the arguments. */
8321 s
+= sprintf (s
, "%s-%s,", reg_names
[GP_ARG_FIRST
],
8322 reg_names
[GP_ARG_FIRST
+ info
.nargs
- 1]);
8323 else if (info
.nargs
== 1)
8324 s
+= sprintf (s
, "%s,", reg_names
[GP_ARG_FIRST
]);
8326 /* Emit the amount of stack space to allocate or deallocate. */
8327 s
+= sprintf (s
, "%d", (int) info
.size
);
8329 /* Save or restore $16. */
8330 if (BITSET_P (info
.mask
, 16))
8331 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 16]);
8333 /* Save or restore $17. */
8334 if (BITSET_P (info
.mask
, 17))
8335 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 17]);
8337 /* Save or restore registers in the range $s2...$s8, which
8338 mips16e_s2_s8_regs lists in decreasing order. Note that this
8339 is a software register range; the hardware registers are not
8340 numbered consecutively. */
8341 end
= ARRAY_SIZE (mips16e_s2_s8_regs
);
8342 i
= mips16e_find_first_register (info
.mask
, mips16e_s2_s8_regs
, end
);
8344 s
= mips16e_add_register_range (s
, mips16e_s2_s8_regs
[end
- 1],
8345 mips16e_s2_s8_regs
[i
]);
8347 /* Save or restore registers in the range $a0...$a3. */
8348 end
= ARRAY_SIZE (mips16e_a0_a3_regs
);
8349 i
= mips16e_find_first_register (info
.mask
, mips16e_a0_a3_regs
, end
);
8351 s
= mips16e_add_register_range (s
, mips16e_a0_a3_regs
[i
],
8352 mips16e_a0_a3_regs
[end
- 1]);
8354 /* Save or restore $31. */
8355 if (BITSET_P (info
.mask
, 31))
8356 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 31]);
8361 /* Return a simplified form of X using the register values in REG_VALUES.
8362 REG_VALUES[R] is the last value assigned to hard register R, or null
8363 if R has not been modified.
8365 This function is rather limited, but is good enough for our purposes. */
8368 mips16e_collect_propagate_value (rtx x
, rtx
*reg_values
)
8372 x
= avoid_constant_pool_reference (x
);
8376 x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
8377 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
),
8378 x0
, GET_MODE (XEXP (x
, 0)));
8381 if (ARITHMETIC_P (x
))
8383 x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
8384 x1
= mips16e_collect_propagate_value (XEXP (x
, 1), reg_values
);
8385 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), x0
, x1
);
8389 && reg_values
[REGNO (x
)]
8390 && !rtx_unstable_p (reg_values
[REGNO (x
)]))
8391 return reg_values
[REGNO (x
)];
8396 /* Return true if (set DEST SRC) stores an argument register into its
8397 caller-allocated save slot, storing the number of that argument
8398 register in *REGNO_PTR if so. REG_VALUES is as for
8399 mips16e_collect_propagate_value. */
8402 mips16e_collect_argument_save_p (rtx dest
, rtx src
, rtx
*reg_values
,
8403 unsigned int *regno_ptr
)
8405 unsigned int argno
, regno
;
8406 HOST_WIDE_INT offset
, required_offset
;
8409 /* Check that this is a word-mode store. */
8410 if (!MEM_P (dest
) || !REG_P (src
) || GET_MODE (dest
) != word_mode
)
8413 /* Check that the register being saved is an unmodified argument
8415 regno
= REGNO (src
);
8416 if (regno
< GP_ARG_FIRST
|| regno
> GP_ARG_LAST
|| reg_values
[regno
])
8418 argno
= regno
- GP_ARG_FIRST
;
8420 /* Check whether the address is an appropriate stack pointer or
8421 frame pointer access. The frame pointer is offset from the
8422 stack pointer by the size of the outgoing arguments. */
8423 addr
= mips16e_collect_propagate_value (XEXP (dest
, 0), reg_values
);
8424 mips_split_plus (addr
, &base
, &offset
);
8425 required_offset
= cfun
->machine
->frame
.total_size
+ argno
* UNITS_PER_WORD
;
8426 if (base
== hard_frame_pointer_rtx
)
8427 required_offset
-= cfun
->machine
->frame
.args_size
;
8428 else if (base
!= stack_pointer_rtx
)
8430 if (offset
!= required_offset
)
8437 /* A subroutine of mips_expand_prologue, called only when generating
8438 MIPS16e SAVE instructions. Search the start of the function for any
8439 instructions that save argument registers into their caller-allocated
8440 save slots. Delete such instructions and return a value N such that
8441 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8442 instructions redundant. */
8445 mips16e_collect_argument_saves (void)
8447 rtx reg_values
[FIRST_PSEUDO_REGISTER
];
8448 rtx insn
, next
, set
, dest
, src
;
8449 unsigned int nargs
, regno
;
8451 push_topmost_sequence ();
8453 memset (reg_values
, 0, sizeof (reg_values
));
8454 for (insn
= get_insns (); insn
; insn
= next
)
8456 next
= NEXT_INSN (insn
);
8463 set
= PATTERN (insn
);
8464 if (GET_CODE (set
) != SET
)
8467 dest
= SET_DEST (set
);
8468 src
= SET_SRC (set
);
8469 if (mips16e_collect_argument_save_p (dest
, src
, reg_values
, ®no
))
8471 if (!BITSET_P (cfun
->machine
->frame
.mask
, regno
))
8474 nargs
= MAX (nargs
, (regno
- GP_ARG_FIRST
) + 1);
8477 else if (REG_P (dest
) && GET_MODE (dest
) == word_mode
)
8478 reg_values
[REGNO (dest
)]
8479 = mips16e_collect_propagate_value (src
, reg_values
);
8483 pop_topmost_sequence ();
8488 /* Expand the prologue into a bunch of separate insns. */
8491 mips_expand_prologue (void)
8497 if (cfun
->machine
->global_pointer
> 0)
8498 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
8500 size
= compute_frame_size (get_frame_size ());
8502 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8503 bytes beforehand; this is enough to cover the register save area
8504 without going out of range. */
8505 if ((cfun
->machine
->frame
.mask
| cfun
->machine
->frame
.fmask
) != 0)
8507 HOST_WIDE_INT step1
;
8509 step1
= MIN (size
, MIPS_MAX_FIRST_STACK_STEP
);
8511 if (GENERATE_MIPS16E_SAVE_RESTORE
)
8513 HOST_WIDE_INT offset
;
8514 unsigned int mask
, regno
;
8516 /* Try to merge argument stores into the save instruction. */
8517 nargs
= mips16e_collect_argument_saves ();
8519 /* Build the save instruction. */
8520 mask
= cfun
->machine
->frame
.mask
;
8521 insn
= mips16e_build_save_restore (false, &mask
, &offset
,
8523 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
8526 /* Check if we need to save other registers. */
8527 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
8528 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
8530 offset
-= GET_MODE_SIZE (gpr_mode
);
8531 mips_save_restore_reg (gpr_mode
, regno
, offset
, mips_save_reg
);
8536 insn
= gen_add3_insn (stack_pointer_rtx
,
8539 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
8541 mips_for_each_saved_reg (size
, mips_save_reg
);
8545 /* Allocate the rest of the frame. */
8548 if (SMALL_OPERAND (-size
))
8549 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx
,
8551 GEN_INT (-size
)))) = 1;
8554 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), GEN_INT (size
));
8557 /* There are no instructions to add or subtract registers
8558 from the stack pointer, so use the frame pointer as a
8559 temporary. We should always be using a frame pointer
8560 in this case anyway. */
8561 gcc_assert (frame_pointer_needed
);
8562 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
8563 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx
,
8564 hard_frame_pointer_rtx
,
8565 MIPS_PROLOGUE_TEMP (Pmode
)));
8566 mips_emit_move (stack_pointer_rtx
, hard_frame_pointer_rtx
);
8569 emit_insn (gen_sub3_insn (stack_pointer_rtx
,
8571 MIPS_PROLOGUE_TEMP (Pmode
)));
8573 /* Describe the combined effect of the previous instructions. */
8575 (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8576 plus_constant (stack_pointer_rtx
, -size
)));
8580 /* Set up the frame pointer, if we're using one. In mips16 code,
8581 we point the frame pointer ahead of the outgoing argument area.
8582 This should allow more variables & incoming arguments to be
8583 accessed with unextended instructions. */
8584 if (frame_pointer_needed
)
8586 if (TARGET_MIPS16
&& cfun
->machine
->frame
.args_size
!= 0)
8588 rtx offset
= GEN_INT (cfun
->machine
->frame
.args_size
);
8589 if (SMALL_OPERAND (cfun
->machine
->frame
.args_size
))
8591 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx
,
8596 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), offset
);
8597 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
8598 emit_insn (gen_add3_insn (hard_frame_pointer_rtx
,
8599 hard_frame_pointer_rtx
,
8600 MIPS_PROLOGUE_TEMP (Pmode
)));
8602 (gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
8603 plus_constant (stack_pointer_rtx
,
8604 cfun
->machine
->frame
.args_size
)));
8608 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx
,
8609 stack_pointer_rtx
)) = 1;
8612 mips_emit_loadgp ();
8614 /* If generating o32/o64 abicalls, save $gp on the stack. */
8615 if (TARGET_ABICALLS
&& TARGET_OLDABI
&& !current_function_is_leaf
)
8616 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size
)));
8618 /* If we are profiling, make sure no instructions are scheduled before
8619 the call to mcount. */
8621 if (current_function_profile
)
8622 emit_insn (gen_blockage ());
8625 /* Do any necessary cleanup after a function to restore stack, frame,
8628 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8631 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
8632 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
8634 /* Reinstate the normal $gp. */
8635 SET_REGNO (pic_offset_table_rtx
, GLOBAL_POINTER_REGNUM
);
8636 mips_output_cplocal ();
8638 if (cfun
->machine
->all_noreorder_p
)
8640 /* Avoid using %>%) since it adds excess whitespace. */
8641 output_asm_insn (".set\tmacro", 0);
8642 output_asm_insn (".set\treorder", 0);
8643 set_noreorder
= set_nomacro
= 0;
8646 if (!FUNCTION_NAME_ALREADY_DECLARED
&& !flag_inhibit_size_directive
)
8650 /* Get the function name the same way that toplev.c does before calling
8651 assemble_start_function. This is needed so that the name used here
8652 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8653 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
8654 fputs ("\t.end\t", file
);
8655 assemble_name (file
, fnname
);
8660 /* Emit instructions to restore register REG from slot MEM. */
8663 mips_restore_reg (rtx reg
, rtx mem
)
8665 /* There's no mips16 instruction to load $31 directly. Load into
8666 $7 instead and adjust the return insn appropriately. */
8667 if (TARGET_MIPS16
&& REGNO (reg
) == GP_REG_FIRST
+ 31)
8668 reg
= gen_rtx_REG (GET_MODE (reg
), 7);
8670 if (TARGET_MIPS16
&& !M16_REG_P (REGNO (reg
)))
8672 /* Can't restore directly; move through a temporary. */
8673 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg
)), mem
);
8674 mips_emit_move (reg
, MIPS_EPILOGUE_TEMP (GET_MODE (reg
)));
8677 mips_emit_move (reg
, mem
);
8681 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8682 if this epilogue precedes a sibling call, false if it is for a normal
8683 "epilogue" pattern. */
8686 mips_expand_epilogue (int sibcall_p
)
8688 HOST_WIDE_INT step1
, step2
;
8691 if (!sibcall_p
&& mips_can_use_return_insn ())
8693 emit_jump_insn (gen_return ());
8697 /* In mips16 mode, if the return value should go into a floating-point
8698 register, we need to call a helper routine to copy it over. */
8699 if (mips16_cfun_returns_in_fpr_p ())
8708 enum machine_mode return_mode
;
8710 return_type
= DECL_RESULT (current_function_decl
);
8711 return_mode
= DECL_MODE (return_type
);
8713 name
= ACONCAT (("__mips16_ret_",
8714 mips16_call_stub_mode_suffix (return_mode
),
8716 id
= get_identifier (name
);
8717 func
= gen_rtx_SYMBOL_REF (Pmode
, IDENTIFIER_POINTER (id
));
8718 retval
= gen_rtx_REG (return_mode
, GP_RETURN
);
8719 call
= gen_call_value_internal (retval
, func
, const0_rtx
);
8720 insn
= emit_call_insn (call
);
8721 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), retval
);
8724 /* Split the frame into two. STEP1 is the amount of stack we should
8725 deallocate before restoring the registers. STEP2 is the amount we
8726 should deallocate afterwards.
8728 Start off by assuming that no registers need to be restored. */
8729 step1
= cfun
->machine
->frame
.total_size
;
8732 /* Work out which register holds the frame address. Account for the
8733 frame pointer offset used by mips16 code. */
8734 if (!frame_pointer_needed
)
8735 base
= stack_pointer_rtx
;
8738 base
= hard_frame_pointer_rtx
;
8740 step1
-= cfun
->machine
->frame
.args_size
;
8743 /* If we need to restore registers, deallocate as much stack as
8744 possible in the second step without going out of range. */
8745 if ((cfun
->machine
->frame
.mask
| cfun
->machine
->frame
.fmask
) != 0)
8747 step2
= MIN (step1
, MIPS_MAX_FIRST_STACK_STEP
);
8751 /* Set TARGET to BASE + STEP1. */
8757 /* Get an rtx for STEP1 that we can add to BASE. */
8758 adjust
= GEN_INT (step1
);
8759 if (!SMALL_OPERAND (step1
))
8761 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), adjust
);
8762 adjust
= MIPS_EPILOGUE_TEMP (Pmode
);
8765 /* Normal mode code can copy the result straight into $sp. */
8767 target
= stack_pointer_rtx
;
8769 emit_insn (gen_add3_insn (target
, base
, adjust
));
8772 /* Copy TARGET into the stack pointer. */
8773 if (target
!= stack_pointer_rtx
)
8774 mips_emit_move (stack_pointer_rtx
, target
);
8776 /* If we're using addressing macros, $gp is implicitly used by all
8777 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8779 if (TARGET_CALL_SAVED_GP
&& !TARGET_EXPLICIT_RELOCS
)
8780 emit_insn (gen_blockage ());
8782 if (GENERATE_MIPS16E_SAVE_RESTORE
&& cfun
->machine
->frame
.mask
!= 0)
8784 unsigned int regno
, mask
;
8785 HOST_WIDE_INT offset
;
8788 /* Generate the restore instruction. */
8789 mask
= cfun
->machine
->frame
.mask
;
8790 restore
= mips16e_build_save_restore (true, &mask
, &offset
, 0, step2
);
8792 /* Restore any other registers manually. */
8793 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
8794 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
8796 offset
-= GET_MODE_SIZE (gpr_mode
);
8797 mips_save_restore_reg (gpr_mode
, regno
, offset
, mips_restore_reg
);
8800 /* Restore the remaining registers and deallocate the final bit
8802 emit_insn (restore
);
8806 /* Restore the registers. */
8807 mips_for_each_saved_reg (cfun
->machine
->frame
.total_size
- step2
,
8810 /* Deallocate the final bit of the frame. */
8812 emit_insn (gen_add3_insn (stack_pointer_rtx
,
8817 /* Add in the __builtin_eh_return stack adjustment. We need to
8818 use a temporary in mips16 code. */
8819 if (current_function_calls_eh_return
)
8823 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), stack_pointer_rtx
);
8824 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode
),
8825 MIPS_EPILOGUE_TEMP (Pmode
),
8826 EH_RETURN_STACKADJ_RTX
));
8827 mips_emit_move (stack_pointer_rtx
, MIPS_EPILOGUE_TEMP (Pmode
));
8830 emit_insn (gen_add3_insn (stack_pointer_rtx
,
8832 EH_RETURN_STACKADJ_RTX
));
8837 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8838 path will restore the return address into $7 rather than $31. */
8840 && !GENERATE_MIPS16E_SAVE_RESTORE
8841 && (cfun
->machine
->frame
.mask
& RA_MASK
) != 0)
8842 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode
,
8843 GP_REG_FIRST
+ 7)));
8845 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode
,
8846 GP_REG_FIRST
+ 31)));
8850 /* Return nonzero if this function is known to have a null epilogue.
8851 This allows the optimizer to omit jumps to jumps if no stack
8855 mips_can_use_return_insn (void)
8857 if (! reload_completed
)
8860 if (df_regs_ever_live_p (31) || current_function_profile
)
8863 /* In mips16 mode, a function that returns a floating point value
8864 needs to arrange to copy the return value into the floating point
8866 if (mips16_cfun_returns_in_fpr_p ())
8869 if (cfun
->machine
->frame
.initialized
)
8870 return cfun
->machine
->frame
.total_size
== 0;
8872 return compute_frame_size (get_frame_size ()) == 0;
8875 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8876 in order to avoid duplicating too much logic from elsewhere. */
8879 mips_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
8880 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8883 rtx
this, temp1
, temp2
, insn
, fnaddr
;
8886 /* Pretend to be a post-reload pass while generating rtl. */
8887 reload_completed
= 1;
8889 /* Mark the end of the (empty) prologue. */
8890 emit_note (NOTE_INSN_PROLOGUE_END
);
8892 /* Determine if we can use a sibcall to call FUNCTION directly. */
8893 fnaddr
= XEXP (DECL_RTL (function
), 0);
8894 use_sibcall_p
= (mips_function_ok_for_sibcall (function
, NULL
)
8895 && const_call_insn_operand (fnaddr
, Pmode
));
8897 /* Determine if we need to load FNADDR from the GOT. */
8899 switch (mips_classify_symbol (fnaddr
, SYMBOL_CONTEXT_LEA
))
8901 case SYMBOL_GOT_PAGE_OFST
:
8902 case SYMBOL_GOT_DISP
:
8903 /* Pick a global pointer. Use a call-clobbered register if
8904 TARGET_CALL_SAVED_GP. */
8905 cfun
->machine
->global_pointer
=
8906 TARGET_CALL_SAVED_GP
? 15 : GLOBAL_POINTER_REGNUM
;
8907 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
8909 /* Set up the global pointer for n32 or n64 abicalls. */
8910 mips_emit_loadgp ();
8917 /* We need two temporary registers in some cases. */
8918 temp1
= gen_rtx_REG (Pmode
, 2);
8919 temp2
= gen_rtx_REG (Pmode
, 3);
8921 /* Find out which register contains the "this" pointer. */
8922 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
8923 this = gen_rtx_REG (Pmode
, GP_ARG_FIRST
+ 1);
8925 this = gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
8927 /* Add DELTA to THIS. */
8930 rtx offset
= GEN_INT (delta
);
8931 if (!SMALL_OPERAND (delta
))
8933 mips_emit_move (temp1
, offset
);
8936 emit_insn (gen_add3_insn (this, this, offset
));
8939 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8940 if (vcall_offset
!= 0)
8944 /* Set TEMP1 to *THIS. */
8945 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, this));
8947 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8948 addr
= mips_add_offset (temp2
, temp1
, vcall_offset
);
8950 /* Load the offset and add it to THIS. */
8951 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, addr
));
8952 emit_insn (gen_add3_insn (this, this, temp1
));
8955 /* Jump to the target function. Use a sibcall if direct jumps are
8956 allowed, otherwise load the address into a register first. */
8959 insn
= emit_call_insn (gen_sibcall_internal (fnaddr
, const0_rtx
));
8960 SIBLING_CALL_P (insn
) = 1;
8964 /* This is messy. gas treats "la $25,foo" as part of a call
8965 sequence and may allow a global "foo" to be lazily bound.
8966 The general move patterns therefore reject this combination.
8968 In this context, lazy binding would actually be OK
8969 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8970 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8971 We must therefore load the address via a temporary
8972 register if mips_dangerous_for_la25_p.
8974 If we jump to the temporary register rather than $25, the assembler
8975 can use the move insn to fill the jump's delay slot. */
8976 if (TARGET_USE_PIC_FN_ADDR_REG
8977 && !mips_dangerous_for_la25_p (fnaddr
))
8978 temp1
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
8979 mips_load_call_address (temp1
, fnaddr
, true);
8981 if (TARGET_USE_PIC_FN_ADDR_REG
8982 && REGNO (temp1
) != PIC_FUNCTION_ADDR_REGNUM
)
8983 mips_emit_move (gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
), temp1
);
8984 emit_jump_insn (gen_indirect_jump (temp1
));
8987 /* Run just enough of rest_of_compilation. This sequence was
8988 "borrowed" from alpha.c. */
8989 insn
= get_insns ();
8990 insn_locators_alloc ();
8991 split_all_insns_noflow ();
8992 mips16_lay_out_constants ();
8993 shorten_branches (insn
);
8994 final_start_function (insn
, file
, 1);
8995 final (insn
, file
, 1);
8996 final_end_function ();
8998 /* Clean up the vars set above. Note that final_end_function resets
8999 the global pointer for us. */
9000 reload_completed
= 0;
9003 /* Implement TARGET_SELECT_RTX_SECTION. */
9006 mips_select_rtx_section (enum machine_mode mode
, rtx x
,
9007 unsigned HOST_WIDE_INT align
)
9009 /* ??? Consider using mergeable small data sections. */
9010 if (mips_rtx_constant_in_small_data_p (mode
))
9011 return get_named_section (NULL
, ".sdata", 0);
9013 return default_elf_select_rtx_section (mode
, x
, align
);
9016 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
9018 The complication here is that, with the combination TARGET_ABICALLS
9019 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
9020 therefore not be included in the read-only part of a DSO. Handle such
9021 cases by selecting a normal data section instead of a read-only one.
9022 The logic apes that in default_function_rodata_section. */
9025 mips_function_rodata_section (tree decl
)
9027 if (!TARGET_ABICALLS
|| TARGET_GPWORD
)
9028 return default_function_rodata_section (decl
);
9030 if (decl
&& DECL_SECTION_NAME (decl
))
9032 const char *name
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
9033 if (DECL_ONE_ONLY (decl
) && strncmp (name
, ".gnu.linkonce.t.", 16) == 0)
9035 char *rname
= ASTRDUP (name
);
9037 return get_section (rname
, SECTION_LINKONCE
| SECTION_WRITE
, decl
);
9039 else if (flag_function_sections
&& flag_data_sections
9040 && strncmp (name
, ".text.", 6) == 0)
9042 char *rname
= ASTRDUP (name
);
9043 memcpy (rname
+ 1, "data", 4);
9044 return get_section (rname
, SECTION_WRITE
, decl
);
9047 return data_section
;
9050 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
9051 locally-defined objects go in a small data section. It also controls
9052 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
9053 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
9056 mips_in_small_data_p (const_tree decl
)
9060 if (TREE_CODE (decl
) == STRING_CST
|| TREE_CODE (decl
) == FUNCTION_DECL
)
9063 /* We don't yet generate small-data references for -mabicalls or
9064 VxWorks RTP code. See the related -G handling in override_options. */
9065 if (TARGET_ABICALLS
|| TARGET_VXWORKS_RTP
)
9068 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
) != 0)
9072 /* Reject anything that isn't in a known small-data section. */
9073 name
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
9074 if (strcmp (name
, ".sdata") != 0 && strcmp (name
, ".sbss") != 0)
9077 /* If a symbol is defined externally, the assembler will use the
9078 usual -G rules when deciding how to implement macros. */
9079 if (mips_lo_relocs
[SYMBOL_GP_RELATIVE
] || !DECL_EXTERNAL (decl
))
9082 else if (TARGET_EMBEDDED_DATA
)
9084 /* Don't put constants into the small data section: we want them
9085 to be in ROM rather than RAM. */
9086 if (TREE_CODE (decl
) != VAR_DECL
)
9089 if (TREE_READONLY (decl
)
9090 && !TREE_SIDE_EFFECTS (decl
)
9091 && (!DECL_INITIAL (decl
) || TREE_CONSTANT (DECL_INITIAL (decl
))))
9095 /* Enforce -mlocal-sdata. */
9096 if (!TARGET_LOCAL_SDATA
&& !TREE_PUBLIC (decl
))
9099 /* Enforce -mextern-sdata. */
9100 if (!TARGET_EXTERN_SDATA
&& DECL_P (decl
))
9102 if (DECL_EXTERNAL (decl
))
9104 if (DECL_COMMON (decl
) && DECL_INITIAL (decl
) == NULL
)
9108 size
= int_size_in_bytes (TREE_TYPE (decl
));
9109 return (size
> 0 && size
<= mips_section_threshold
);
9112 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
9113 anchors for small data: the GP register acts as an anchor in that
9114 case. We also don't want to use them for PC-relative accesses,
9115 where the PC acts as an anchor. */
9118 mips_use_anchors_for_symbol_p (const_rtx symbol
)
9120 switch (mips_classify_symbol (symbol
, SYMBOL_CONTEXT_MEM
))
9122 case SYMBOL_PC_RELATIVE
:
9123 case SYMBOL_GP_RELATIVE
:
9127 return default_use_anchors_for_symbol_p (symbol
);
9131 /* See whether VALTYPE is a record whose fields should be returned in
9132 floating-point registers. If so, return the number of fields and
9133 list them in FIELDS (which should have two elements). Return 0
9136 For n32 & n64, a structure with one or two fields is returned in
9137 floating-point registers as long as every field has a floating-point
9141 mips_fpr_return_fields (const_tree valtype
, tree
*fields
)
9149 if (TREE_CODE (valtype
) != RECORD_TYPE
)
9153 for (field
= TYPE_FIELDS (valtype
); field
!= 0; field
= TREE_CHAIN (field
))
9155 if (TREE_CODE (field
) != FIELD_DECL
)
9158 if (TREE_CODE (TREE_TYPE (field
)) != REAL_TYPE
)
9164 fields
[i
++] = field
;
9170 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
9171 a value in the most significant part of $2/$3 if:
9173 - the target is big-endian;
9175 - the value has a structure or union type (we generalize this to
9176 cover aggregates from other languages too); and
9178 - the structure is not returned in floating-point registers. */
9181 mips_return_in_msb (const_tree valtype
)
9185 return (TARGET_NEWABI
9186 && TARGET_BIG_ENDIAN
9187 && AGGREGATE_TYPE_P (valtype
)
9188 && mips_fpr_return_fields (valtype
, fields
) == 0);
9192 /* Return a composite value in a pair of floating-point registers.
9193 MODE1 and OFFSET1 are the mode and byte offset for the first value,
9194 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
9197 For n32 & n64, $f0 always holds the first value and $f2 the second.
9198 Otherwise the values are packed together as closely as possible. */
9201 mips_return_fpr_pair (enum machine_mode mode
,
9202 enum machine_mode mode1
, HOST_WIDE_INT offset1
,
9203 enum machine_mode mode2
, HOST_WIDE_INT offset2
)
9207 inc
= (TARGET_NEWABI
? 2 : MAX_FPRS_PER_FMT
);
9208 return gen_rtx_PARALLEL
9211 gen_rtx_EXPR_LIST (VOIDmode
,
9212 gen_rtx_REG (mode1
, FP_RETURN
),
9214 gen_rtx_EXPR_LIST (VOIDmode
,
9215 gen_rtx_REG (mode2
, FP_RETURN
+ inc
),
9216 GEN_INT (offset2
))));
9221 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
9222 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
9223 VALTYPE is null and MODE is the mode of the return value. */
9226 mips_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
,
9227 enum machine_mode mode
)
9234 mode
= TYPE_MODE (valtype
);
9235 unsignedp
= TYPE_UNSIGNED (valtype
);
9237 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
9238 true, we must promote the mode just as PROMOTE_MODE does. */
9239 mode
= promote_mode (valtype
, mode
, &unsignedp
, 1);
9241 /* Handle structures whose fields are returned in $f0/$f2. */
9242 switch (mips_fpr_return_fields (valtype
, fields
))
9245 return gen_rtx_REG (mode
, FP_RETURN
);
9248 return mips_return_fpr_pair (mode
,
9249 TYPE_MODE (TREE_TYPE (fields
[0])),
9250 int_byte_position (fields
[0]),
9251 TYPE_MODE (TREE_TYPE (fields
[1])),
9252 int_byte_position (fields
[1]));
9255 /* If a value is passed in the most significant part of a register, see
9256 whether we have to round the mode up to a whole number of words. */
9257 if (mips_return_in_msb (valtype
))
9259 HOST_WIDE_INT size
= int_size_in_bytes (valtype
);
9260 if (size
% UNITS_PER_WORD
!= 0)
9262 size
+= UNITS_PER_WORD
- size
% UNITS_PER_WORD
;
9263 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
9267 /* For EABI, the class of return register depends entirely on MODE.
9268 For example, "struct { some_type x; }" and "union { some_type x; }"
9269 are returned in the same way as a bare "some_type" would be.
9270 Other ABIs only use FPRs for scalar, complex or vector types. */
9271 if (mips_abi
!= ABI_EABI
&& !FLOAT_TYPE_P (valtype
))
9272 return gen_rtx_REG (mode
, GP_RETURN
);
9277 /* Handle long doubles for n32 & n64. */
9279 return mips_return_fpr_pair (mode
,
9281 DImode
, GET_MODE_SIZE (mode
) / 2);
9283 if (mips_return_mode_in_fpr_p (mode
))
9285 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
9286 return mips_return_fpr_pair (mode
,
9287 GET_MODE_INNER (mode
), 0,
9288 GET_MODE_INNER (mode
),
9289 GET_MODE_SIZE (mode
) / 2);
9291 return gen_rtx_REG (mode
, FP_RETURN
);
9295 return gen_rtx_REG (mode
, GP_RETURN
);
9298 /* Return nonzero when an argument must be passed by reference. */
9301 mips_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
9302 enum machine_mode mode
, const_tree type
,
9303 bool named ATTRIBUTE_UNUSED
)
9305 if (mips_abi
== ABI_EABI
)
9309 /* ??? How should SCmode be handled? */
9310 if (mode
== DImode
|| mode
== DFmode
9311 || mode
== DQmode
|| mode
== UDQmode
9312 || mode
== DAmode
|| mode
== UDAmode
)
9315 size
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
9316 return size
== -1 || size
> UNITS_PER_WORD
;
9320 /* If we have a variable-sized parameter, we have no choice. */
9321 return targetm
.calls
.must_pass_in_stack (mode
, type
);
9326 mips_callee_copies (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
9327 enum machine_mode mode ATTRIBUTE_UNUSED
,
9328 const_tree type ATTRIBUTE_UNUSED
, bool named
)
9330 return mips_abi
== ABI_EABI
&& named
;
9333 /* Return true if registers of class CLASS cannot change from mode FROM
9337 mips_cannot_change_mode_class (enum machine_mode from
,
9338 enum machine_mode to
, enum reg_class
class)
9340 if (MIN (GET_MODE_SIZE (from
), GET_MODE_SIZE (to
)) <= UNITS_PER_WORD
9341 && MAX (GET_MODE_SIZE (from
), GET_MODE_SIZE (to
)) > UNITS_PER_WORD
)
9343 if (TARGET_BIG_ENDIAN
)
9345 /* When a multi-word value is stored in paired floating-point
9346 registers, the first register always holds the low word.
9347 We therefore can't allow FPRs to change between single-word
9348 and multi-word modes. */
9349 if (MAX_FPRS_PER_FMT
> 1 && reg_classes_intersect_p (FP_REGS
, class))
9354 /* gcc assumes that each word of a multiword register can be accessed
9355 individually using SUBREGs. This is not true for floating-point
9356 registers if they are bigger than a word. */
9357 if (UNITS_PER_FPREG
> UNITS_PER_WORD
9358 && GET_MODE_SIZE (from
) > UNITS_PER_WORD
9359 && GET_MODE_SIZE (to
) < UNITS_PER_FPREG
9360 && reg_classes_intersect_p (FP_REGS
, class))
9363 /* Loading a 32-bit value into a 64-bit floating-point register
9364 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9365 We can't allow 64-bit float registers to change from SImode to
9370 && GET_MODE_SIZE (to
) >= UNITS_PER_WORD
9371 && reg_classes_intersect_p (FP_REGS
, class))
9377 /* Return true if X should not be moved directly into register $25.
9378 We need this because many versions of GAS will treat "la $25,foo" as
9379 part of a call sequence and so allow a global "foo" to be lazily bound. */
9382 mips_dangerous_for_la25_p (rtx x
)
9384 return (!TARGET_EXPLICIT_RELOCS
9386 && GET_CODE (x
) == SYMBOL_REF
9387 && mips_global_symbol_p (x
));
9390 /* Implement PREFERRED_RELOAD_CLASS. */
9393 mips_preferred_reload_class (rtx x
, enum reg_class
class)
9395 if (mips_dangerous_for_la25_p (x
) && reg_class_subset_p (LEA_REGS
, class))
9398 if (TARGET_HARD_FLOAT
9399 && FLOAT_MODE_P (GET_MODE (x
))
9400 && reg_class_subset_p (FP_REGS
, class))
9403 if (reg_class_subset_p (GR_REGS
, class))
9406 if (TARGET_MIPS16
&& reg_class_subset_p (M16_REGS
, class))
9412 /* This function returns the register class required for a secondary
9413 register when copying between one of the registers in CLASS, and X,
9414 using MODE. If IN_P is nonzero, the copy is going from X to the
9415 register, otherwise the register is the source. A return value of
9416 NO_REGS means that no secondary register is required. */
9419 mips_secondary_reload_class (enum reg_class
class,
9420 enum machine_mode mode
, rtx x
, int in_p
)
9422 enum reg_class gr_regs
= TARGET_MIPS16
? M16_REGS
: GR_REGS
;
9426 if (REG_P (x
)|| GET_CODE (x
) == SUBREG
)
9427 regno
= true_regnum (x
);
9429 gp_reg_p
= TARGET_MIPS16
? M16_REG_P (regno
) : GP_REG_P (regno
);
9431 if (mips_dangerous_for_la25_p (x
))
9434 if (TEST_HARD_REG_BIT (reg_class_contents
[(int) class], 25))
9438 /* Copying from HI or LO to anywhere other than a general register
9439 requires a general register.
9440 This rule applies to both the original HI/LO pair and the new
9441 DSP accumulators. */
9442 if (reg_class_subset_p (class, ACC_REGS
))
9444 if (TARGET_MIPS16
&& in_p
)
9446 /* We can't really copy to HI or LO at all in mips16 mode. */
9449 return gp_reg_p
? NO_REGS
: gr_regs
;
9451 if (ACC_REG_P (regno
))
9453 if (TARGET_MIPS16
&& ! in_p
)
9455 /* We can't really copy to HI or LO at all in mips16 mode. */
9458 return class == gr_regs
? NO_REGS
: gr_regs
;
9461 /* We can only copy a value to a condition code register from a
9462 floating point register, and even then we require a scratch
9463 floating point register. We can only copy a value out of a
9464 condition code register into a general register. */
9465 if (class == ST_REGS
)
9469 return gp_reg_p
? NO_REGS
: gr_regs
;
9471 if (ST_REG_P (regno
))
9475 return class == gr_regs
? NO_REGS
: gr_regs
;
9478 if (class == FP_REGS
)
9482 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
9485 else if (CONSTANT_P (x
) && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
9487 /* We can use the l.s and l.d macros to load floating-point
9488 constants. ??? For l.s, we could probably get better
9489 code by returning GR_REGS here. */
9492 else if (gp_reg_p
|| x
== CONST0_RTX (mode
))
9494 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9497 else if (FP_REG_P (regno
))
9499 /* In this case we can use mov.s or mov.d. */
9504 /* Otherwise, we need to reload through an integer register. */
9509 /* In mips16 mode, going between memory and anything but M16_REGS
9510 requires an M16_REG. */
9513 if (class != M16_REGS
&& class != M16_NA_REGS
)
9521 if (class == M16_REGS
|| class == M16_NA_REGS
)
9530 /* Implement CLASS_MAX_NREGS.
9532 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
9534 - ST_REGS are always hold CCmode values, and CCmode values are
9535 considered to be 4 bytes wide.
9537 All other register classes are covered by UNITS_PER_WORD. Note that
9538 this is true even for unions of integer and float registers when the
9539 latter are smaller than the former. The only supported combination
9540 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
9541 words but 32-bit float registers. A word-based calculation is correct
9542 in that case since -msingle-float disallows multi-FPR values. */
9545 mips_class_max_nregs (enum reg_class
class ATTRIBUTE_UNUSED
,
9546 enum machine_mode mode
)
9548 if (class == ST_REGS
)
9549 return (GET_MODE_SIZE (mode
) + 3) / 4;
9550 else if (class == FP_REGS
)
9551 return (GET_MODE_SIZE (mode
) + UNITS_PER_FPREG
- 1) / UNITS_PER_FPREG
;
9553 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
9557 mips_valid_pointer_mode (enum machine_mode mode
)
9559 return (mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
));
9562 /* Target hook for vector_mode_supported_p. */
9565 mips_vector_mode_supported_p (enum machine_mode mode
)
9570 return TARGET_PAIRED_SINGLE_FLOAT
;
9587 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9590 mips_scalar_mode_supported_p (enum machine_mode mode
)
9592 if (ALL_FIXED_POINT_MODE_P (mode
)
9593 && GET_MODE_PRECISION (mode
) <= 2 * BITS_PER_WORD
)
9596 return default_scalar_mode_supported_p (mode
);
9599 /* If we can access small data directly (using gp-relative relocation
9600 operators) return the small data pointer, otherwise return null.
9602 For each mips16 function which refers to GP relative symbols, we
9603 use a pseudo register, initialized at the start of the function, to
9604 hold the $gp value. */
9607 mips16_gp_pseudo_reg (void)
9609 if (cfun
->machine
->mips16_gp_pseudo_rtx
== NULL_RTX
)
9610 cfun
->machine
->mips16_gp_pseudo_rtx
= gen_reg_rtx (Pmode
);
9612 /* Don't initialize the pseudo register if we are being called from
9613 the tree optimizers' cost-calculation routines. */
9614 if (!cfun
->machine
->initialized_mips16_gp_pseudo_p
9615 && (current_ir_type () != IR_GIMPLE
|| currently_expanding_to_rtl
))
9619 /* We want to initialize this to a value which gcc will believe
9621 insn
= gen_load_const_gp (cfun
->machine
->mips16_gp_pseudo_rtx
);
9623 push_topmost_sequence ();
9624 /* We need to emit the initialization after the FUNCTION_BEG
9625 note, so that it will be integrated. */
9626 for (scan
= get_insns (); scan
!= NULL_RTX
; scan
= NEXT_INSN (scan
))
9628 && NOTE_KIND (scan
) == NOTE_INSN_FUNCTION_BEG
)
9630 if (scan
== NULL_RTX
)
9631 scan
= get_insns ();
9632 insn
= emit_insn_after (insn
, scan
);
9633 pop_topmost_sequence ();
9635 cfun
->machine
->initialized_mips16_gp_pseudo_p
= true;
9638 return cfun
->machine
->mips16_gp_pseudo_rtx
;
9641 /* Write out code to move floating point arguments in or out of
9642 general registers. Output the instructions to FILE. FP_CODE is
9643 the code describing which arguments are present (see the comment at
9644 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
9645 we are copying from the floating point registers. */
9648 mips16_fp_args (FILE *file
, int fp_code
, int from_fp_p
)
9653 CUMULATIVE_ARGS cum
;
9655 /* This code only works for the original 32-bit ABI and the O64 ABI. */
9656 gcc_assert (TARGET_OLDABI
);
9663 init_cumulative_args (&cum
, NULL
, NULL
);
9665 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
9667 enum machine_mode mode
;
9668 struct mips_arg_info info
;
9672 else if ((f
& 3) == 2)
9677 mips_arg_info (&cum
, mode
, NULL
, true, &info
);
9678 gparg
= mips_arg_regno (&info
, false);
9679 fparg
= mips_arg_regno (&info
, true);
9682 fprintf (file
, "\t%s\t%s,%s\n", s
,
9683 reg_names
[gparg
], reg_names
[fparg
]);
9684 else if (TARGET_64BIT
)
9685 fprintf (file
, "\td%s\t%s,%s\n", s
,
9686 reg_names
[gparg
], reg_names
[fparg
]);
9687 else if (ISA_HAS_MXHC1
)
9688 /* -mips32r2 -mfp64 */
9689 fprintf (file
, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
9691 reg_names
[gparg
+ (WORDS_BIG_ENDIAN
? 1 : 0)],
9693 from_fp_p
? "mfhc1" : "mthc1",
9694 reg_names
[gparg
+ (WORDS_BIG_ENDIAN
? 0 : 1)],
9696 else if (TARGET_BIG_ENDIAN
)
9697 fprintf (file
, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s
,
9698 reg_names
[gparg
], reg_names
[fparg
+ 1], s
,
9699 reg_names
[gparg
+ 1], reg_names
[fparg
]);
9701 fprintf (file
, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s
,
9702 reg_names
[gparg
], reg_names
[fparg
], s
,
9703 reg_names
[gparg
+ 1], reg_names
[fparg
+ 1]);
9705 function_arg_advance (&cum
, mode
, NULL
, true);
9709 /* Build a mips16 function stub. This is used for functions which
9710 take arguments in the floating point registers. It is 32-bit code
9711 that moves the floating point args into the general registers, and
9712 then jumps to the 16-bit code. */
9715 build_mips16_function_stub (FILE *file
)
9718 char *secname
, *stubname
;
9719 tree stubid
, stubdecl
;
9723 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
9724 fnname
= targetm
.strip_name_encoding (fnname
);
9725 secname
= (char *) alloca (strlen (fnname
) + 20);
9726 sprintf (secname
, ".mips16.fn.%s", fnname
);
9727 stubname
= (char *) alloca (strlen (fnname
) + 20);
9728 sprintf (stubname
, "__fn_stub_%s", fnname
);
9729 stubid
= get_identifier (stubname
);
9730 stubdecl
= build_decl (FUNCTION_DECL
, stubid
,
9731 build_function_type (void_type_node
, NULL_TREE
));
9732 DECL_SECTION_NAME (stubdecl
) = build_string (strlen (secname
), secname
);
9733 DECL_RESULT (stubdecl
) = build_decl (RESULT_DECL
, NULL_TREE
, void_type_node
);
9735 fprintf (file
, "\t# Stub function for %s (", current_function_name ());
9737 for (f
= (unsigned int) current_function_args_info
.fp_code
; f
!= 0; f
>>= 2)
9739 fprintf (file
, "%s%s",
9740 need_comma
? ", " : "",
9741 (f
& 3) == 1 ? "float" : "double");
9744 fprintf (file
, ")\n");
9746 fprintf (file
, "\t.set\tnomips16\n");
9747 switch_to_section (function_section (stubdecl
));
9748 ASM_OUTPUT_ALIGN (file
, floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
));
9750 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9751 within a .ent, and we cannot emit another .ent. */
9752 if (!FUNCTION_NAME_ALREADY_DECLARED
)
9754 fputs ("\t.ent\t", file
);
9755 assemble_name (file
, stubname
);
9759 assemble_name (file
, stubname
);
9760 fputs (":\n", file
);
9762 /* We don't want the assembler to insert any nops here. */
9763 fprintf (file
, "\t.set\tnoreorder\n");
9765 mips16_fp_args (file
, current_function_args_info
.fp_code
, 1);
9767 fprintf (asm_out_file
, "\t.set\tnoat\n");
9768 fprintf (asm_out_file
, "\tla\t%s,", reg_names
[GP_REG_FIRST
+ 1]);
9769 assemble_name (file
, fnname
);
9770 fprintf (file
, "\n");
9771 fprintf (asm_out_file
, "\tjr\t%s\n", reg_names
[GP_REG_FIRST
+ 1]);
9772 fprintf (asm_out_file
, "\t.set\tat\n");
9774 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9775 with one of the mfc1 instructions, because the result is not
9776 available for one instruction, so if the very first instruction
9777 in the function refers to the register, it will see the wrong
9779 fprintf (file
, "\tnop\n");
9781 fprintf (file
, "\t.set\treorder\n");
9783 if (!FUNCTION_NAME_ALREADY_DECLARED
)
9785 fputs ("\t.end\t", file
);
9786 assemble_name (file
, stubname
);
9790 switch_to_section (function_section (current_function_decl
));
9793 /* We keep a list of functions for which we have already built stubs
9794 in build_mips16_call_stub. */
9798 struct mips16_stub
*next
;
9803 static struct mips16_stub
*mips16_stubs
;
9805 /* Emit code to return a double value from a mips16 stub. GPREG is the
9806 first GP reg to use, FPREG is the first FP reg to use. */
9809 mips16_fpret_double (int gpreg
, int fpreg
)
9812 fprintf (asm_out_file
, "\tdmfc1\t%s,%s\n",
9813 reg_names
[gpreg
], reg_names
[fpreg
]);
9814 else if (TARGET_FLOAT64
)
9816 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9817 reg_names
[gpreg
+ WORDS_BIG_ENDIAN
],
9819 fprintf (asm_out_file
, "\tmfhc1\t%s,%s\n",
9820 reg_names
[gpreg
+ !WORDS_BIG_ENDIAN
],
9825 if (TARGET_BIG_ENDIAN
)
9827 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9828 reg_names
[gpreg
+ 0],
9829 reg_names
[fpreg
+ 1]);
9830 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9831 reg_names
[gpreg
+ 1],
9832 reg_names
[fpreg
+ 0]);
9836 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9837 reg_names
[gpreg
+ 0],
9838 reg_names
[fpreg
+ 0]);
9839 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
9840 reg_names
[gpreg
+ 1],
9841 reg_names
[fpreg
+ 1]);
9846 /* Build a call stub for a mips16 call. A stub is needed if we are
9847 passing any floating point values which should go into the floating
9848 point registers. If we are, and the call turns out to be to a
9849 32-bit function, the stub will be used to move the values into the
9850 floating point registers before calling the 32-bit function. The
9851 linker will magically adjust the function call to either the 16-bit
9852 function or the 32-bit stub, depending upon where the function call
9853 is actually defined.
9855 Similarly, we need a stub if the return value might come back in a
9856 floating point register.
9858 RETVAL is the location of the return value, or null if this is
9859 a call rather than a call_value. FN is the address of the
9860 function and ARG_SIZE is the size of the arguments. FP_CODE
9861 is the code built by function_arg. This function returns a nonzero
9862 value if it builds the call instruction itself. */
9865 build_mips16_call_stub (rtx retval
, rtx fn
, rtx arg_size
, int fp_code
)
9869 char *secname
, *stubname
;
9870 struct mips16_stub
*l
;
9871 tree stubid
, stubdecl
;
9876 /* We don't need to do anything if we aren't in mips16 mode, or if
9877 we were invoked with the -msoft-float option. */
9878 if (!TARGET_MIPS16
|| TARGET_SOFT_FLOAT_ABI
)
9881 /* Figure out whether the value might come back in a floating point
9884 fpret
= mips_return_mode_in_fpr_p (GET_MODE (retval
));
9886 /* We don't need to do anything if there were no floating point
9887 arguments and the value will not be returned in a floating point
9889 if (fp_code
== 0 && ! fpret
)
9892 /* We don't need to do anything if this is a call to a special
9893 mips16 support function. */
9894 if (GET_CODE (fn
) == SYMBOL_REF
9895 && strncmp (XSTR (fn
, 0), "__mips16_", 9) == 0)
9898 /* This code will only work for o32 and o64 abis. The other ABI's
9899 require more sophisticated support. */
9900 gcc_assert (TARGET_OLDABI
);
9902 /* If we're calling via a function pointer, then we must always call
9903 via a stub. There are magic stubs provided in libgcc.a for each
9904 of the required cases. Each of them expects the function address
9905 to arrive in register $2. */
9907 if (GET_CODE (fn
) != SYMBOL_REF
)
9913 /* ??? If this code is modified to support other ABI's, we need
9914 to handle PARALLEL return values here. */
9917 sprintf (buf
, "__mips16_call_stub_%s_%d",
9918 mips16_call_stub_mode_suffix (GET_MODE (retval
)),
9921 sprintf (buf
, "__mips16_call_stub_%d",
9924 id
= get_identifier (buf
);
9925 stub_fn
= gen_rtx_SYMBOL_REF (Pmode
, IDENTIFIER_POINTER (id
));
9927 mips_emit_move (gen_rtx_REG (Pmode
, 2), fn
);
9929 if (retval
== NULL_RTX
)
9930 insn
= gen_call_internal (stub_fn
, arg_size
);
9932 insn
= gen_call_value_internal (retval
, stub_fn
, arg_size
);
9933 insn
= emit_call_insn (insn
);
9935 /* Put the register usage information on the CALL. */
9936 CALL_INSN_FUNCTION_USAGE (insn
) =
9937 gen_rtx_EXPR_LIST (VOIDmode
,
9938 gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 2)),
9939 CALL_INSN_FUNCTION_USAGE (insn
));
9941 /* If we are handling a floating point return value, we need to
9942 save $18 in the function prologue. Putting a note on the
9943 call will mean that df_regs_ever_live_p ($18) will be true if the
9944 call is not eliminated, and we can check that in the prologue
9947 CALL_INSN_FUNCTION_USAGE (insn
) =
9948 gen_rtx_EXPR_LIST (VOIDmode
,
9949 gen_rtx_USE (VOIDmode
,
9950 gen_rtx_REG (word_mode
, 18)),
9951 CALL_INSN_FUNCTION_USAGE (insn
));
9953 /* Return 1 to tell the caller that we've generated the call
9958 /* We know the function we are going to call. If we have already
9959 built a stub, we don't need to do anything further. */
9961 fnname
= targetm
.strip_name_encoding (XSTR (fn
, 0));
9962 for (l
= mips16_stubs
; l
!= NULL
; l
= l
->next
)
9963 if (strcmp (l
->name
, fnname
) == 0)
9968 /* Build a special purpose stub. When the linker sees a
9969 function call in mips16 code, it will check where the target
9970 is defined. If the target is a 32-bit call, the linker will
9971 search for the section defined here. It can tell which
9972 symbol this section is associated with by looking at the
9973 relocation information (the name is unreliable, since this
9974 might be a static function). If such a section is found, the
9975 linker will redirect the call to the start of the magic
9978 If the function does not return a floating point value, the
9979 special stub section is named
9982 If the function does return a floating point value, the stub
9984 .mips16.call.fp.FNNAME
9987 secname
= (char *) alloca (strlen (fnname
) + 40);
9988 sprintf (secname
, ".mips16.call.%s%s",
9991 stubname
= (char *) alloca (strlen (fnname
) + 20);
9992 sprintf (stubname
, "__call_stub_%s%s",
9995 stubid
= get_identifier (stubname
);
9996 stubdecl
= build_decl (FUNCTION_DECL
, stubid
,
9997 build_function_type (void_type_node
, NULL_TREE
));
9998 DECL_SECTION_NAME (stubdecl
) = build_string (strlen (secname
), secname
);
9999 DECL_RESULT (stubdecl
) = build_decl (RESULT_DECL
, NULL_TREE
, void_type_node
);
10001 fprintf (asm_out_file
, "\t# Stub function to call %s%s (",
10003 ? (GET_MODE (retval
) == SFmode
? "float " : "double ")
10007 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
10009 fprintf (asm_out_file
, "%s%s",
10010 need_comma
? ", " : "",
10011 (f
& 3) == 1 ? "float" : "double");
10014 fprintf (asm_out_file
, ")\n");
10016 fprintf (asm_out_file
, "\t.set\tnomips16\n");
10017 assemble_start_function (stubdecl
, stubname
);
10019 if (!FUNCTION_NAME_ALREADY_DECLARED
)
10021 fputs ("\t.ent\t", asm_out_file
);
10022 assemble_name (asm_out_file
, stubname
);
10023 fputs ("\n", asm_out_file
);
10025 assemble_name (asm_out_file
, stubname
);
10026 fputs (":\n", asm_out_file
);
10029 /* We build the stub code by hand. That's the only way we can
10030 do it, since we can't generate 32-bit code during a 16-bit
10033 /* We don't want the assembler to insert any nops here. */
10034 fprintf (asm_out_file
, "\t.set\tnoreorder\n");
10036 mips16_fp_args (asm_out_file
, fp_code
, 0);
10040 fprintf (asm_out_file
, "\t.set\tnoat\n");
10041 fprintf (asm_out_file
, "\tla\t%s,%s\n", reg_names
[GP_REG_FIRST
+ 1],
10043 fprintf (asm_out_file
, "\tjr\t%s\n", reg_names
[GP_REG_FIRST
+ 1]);
10044 fprintf (asm_out_file
, "\t.set\tat\n");
10045 /* Unfortunately, we can't fill the jump delay slot. We
10046 can't fill with one of the mtc1 instructions, because the
10047 result is not available for one instruction, so if the
10048 very first instruction in the function refers to the
10049 register, it will see the wrong value. */
10050 fprintf (asm_out_file
, "\tnop\n");
10054 fprintf (asm_out_file
, "\tmove\t%s,%s\n",
10055 reg_names
[GP_REG_FIRST
+ 18], reg_names
[GP_REG_FIRST
+ 31]);
10056 fprintf (asm_out_file
, "\tjal\t%s\n", fnname
);
10057 /* As above, we can't fill the delay slot. */
10058 fprintf (asm_out_file
, "\tnop\n");
10059 switch (GET_MODE (retval
))
10062 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
10063 reg_names
[GP_REG_FIRST
+ 3],
10064 reg_names
[FP_REG_FIRST
+ MAX_FPRS_PER_FMT
]);
10067 fprintf (asm_out_file
, "\tmfc1\t%s,%s\n",
10068 reg_names
[GP_REG_FIRST
+ 2],
10069 reg_names
[FP_REG_FIRST
+ 0]);
10073 mips16_fpret_double (GP_REG_FIRST
+ 2 + (8 / UNITS_PER_WORD
),
10074 FP_REG_FIRST
+ MAX_FPRS_PER_FMT
);
10078 mips16_fpret_double (GP_REG_FIRST
+ 2, FP_REG_FIRST
+ 0);
10082 gcc_unreachable ();
10084 fprintf (asm_out_file
, "\tj\t%s\n", reg_names
[GP_REG_FIRST
+ 18]);
10085 /* As above, we can't fill the delay slot. */
10086 fprintf (asm_out_file
, "\tnop\n");
10089 fprintf (asm_out_file
, "\t.set\treorder\n");
10091 #ifdef ASM_DECLARE_FUNCTION_SIZE
10092 ASM_DECLARE_FUNCTION_SIZE (asm_out_file
, stubname
, stubdecl
);
10095 if (!FUNCTION_NAME_ALREADY_DECLARED
)
10097 fputs ("\t.end\t", asm_out_file
);
10098 assemble_name (asm_out_file
, stubname
);
10099 fputs ("\n", asm_out_file
);
10102 /* Record this stub. */
10103 l
= (struct mips16_stub
*) xmalloc (sizeof *l
);
10104 l
->name
= xstrdup (fnname
);
10106 l
->next
= mips16_stubs
;
10110 /* If we expect a floating point return value, but we've built a
10111 stub which does not expect one, then we're in trouble. We can't
10112 use the existing stub, because it won't handle the floating point
10113 value. We can't build a new stub, because the linker won't know
10114 which stub to use for the various calls in this object file.
10115 Fortunately, this case is illegal, since it means that a function
10116 was declared in two different ways in a single compilation. */
10117 if (fpret
&& ! l
->fpret
)
10118 error ("cannot handle inconsistent calls to %qs", fnname
);
10120 if (retval
== NULL_RTX
)
10121 insn
= gen_call_internal_direct (fn
, arg_size
);
10123 insn
= gen_call_value_internal_direct (retval
, fn
, arg_size
);
10124 insn
= emit_call_insn (insn
);
10126 /* If we are calling a stub which handles a floating point return
10127 value, we need to arrange to save $18 in the prologue. We do
10128 this by marking the function call as using the register. The
10129 prologue will later see that it is used, and emit code to save
10132 CALL_INSN_FUNCTION_USAGE (insn
) =
10133 gen_rtx_EXPR_LIST (VOIDmode
,
10134 gen_rtx_USE (VOIDmode
, gen_rtx_REG (word_mode
, 18)),
10135 CALL_INSN_FUNCTION_USAGE (insn
));
10137 /* Return 1 to tell the caller that we've generated the call
10142 /* An entry in the mips16 constant pool. VALUE is the pool constant,
10143 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10145 struct mips16_constant
{
10146 struct mips16_constant
*next
;
10149 enum machine_mode mode
;
10152 /* Information about an incomplete mips16 constant pool. FIRST is the
10153 first constant, HIGHEST_ADDRESS is the highest address that the first
10154 byte of the pool can have, and INSN_ADDRESS is the current instruction
10157 struct mips16_constant_pool
{
10158 struct mips16_constant
*first
;
10159 int highest_address
;
10163 /* Add constant VALUE to POOL and return its label. MODE is the
10164 value's mode (used for CONST_INTs, etc.). */
10167 add_constant (struct mips16_constant_pool
*pool
,
10168 rtx value
, enum machine_mode mode
)
10170 struct mips16_constant
**p
, *c
;
10171 bool first_of_size_p
;
10173 /* See whether the constant is already in the pool. If so, return the
10174 existing label, otherwise leave P pointing to the place where the
10175 constant should be added.
10177 Keep the pool sorted in increasing order of mode size so that we can
10178 reduce the number of alignments needed. */
10179 first_of_size_p
= true;
10180 for (p
= &pool
->first
; *p
!= 0; p
= &(*p
)->next
)
10182 if (mode
== (*p
)->mode
&& rtx_equal_p (value
, (*p
)->value
))
10183 return (*p
)->label
;
10184 if (GET_MODE_SIZE (mode
) < GET_MODE_SIZE ((*p
)->mode
))
10186 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE ((*p
)->mode
))
10187 first_of_size_p
= false;
10190 /* In the worst case, the constant needed by the earliest instruction
10191 will end up at the end of the pool. The entire pool must then be
10192 accessible from that instruction.
10194 When adding the first constant, set the pool's highest address to
10195 the address of the first out-of-range byte. Adjust this address
10196 downwards each time a new constant is added. */
10197 if (pool
->first
== 0)
10198 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10199 is the address of the instruction with the lowest two bits clear.
10200 The base PC value for ld has the lowest three bits clear. Assume
10201 the worst case here. */
10202 pool
->highest_address
= pool
->insn_address
- (UNITS_PER_WORD
- 2) + 0x8000;
10203 pool
->highest_address
-= GET_MODE_SIZE (mode
);
10204 if (first_of_size_p
)
10205 /* Take into account the worst possible padding due to alignment. */
10206 pool
->highest_address
-= GET_MODE_SIZE (mode
) - 1;
10208 /* Create a new entry. */
10209 c
= (struct mips16_constant
*) xmalloc (sizeof *c
);
10212 c
->label
= gen_label_rtx ();
10219 /* Output constant VALUE after instruction INSN and return the last
10220 instruction emitted. MODE is the mode of the constant. */
10223 dump_constants_1 (enum machine_mode mode
, rtx value
, rtx insn
)
10225 switch (GET_MODE_CLASS (mode
))
10229 rtx size
= GEN_INT (GET_MODE_SIZE (mode
));
10230 return emit_insn_after (gen_consttable_int (value
, size
), insn
);
10234 return emit_insn_after (gen_consttable_float (value
), insn
);
10236 case MODE_VECTOR_FLOAT
:
10237 case MODE_VECTOR_INT
:
10240 for (i
= 0; i
< CONST_VECTOR_NUNITS (value
); i
++)
10241 insn
= dump_constants_1 (GET_MODE_INNER (mode
),
10242 CONST_VECTOR_ELT (value
, i
), insn
);
10247 gcc_unreachable ();
10252 /* Dump out the constants in CONSTANTS after INSN. */
10255 dump_constants (struct mips16_constant
*constants
, rtx insn
)
10257 struct mips16_constant
*c
, *next
;
10261 for (c
= constants
; c
!= NULL
; c
= next
)
10263 /* If necessary, increase the alignment of PC. */
10264 if (align
< GET_MODE_SIZE (c
->mode
))
10266 int align_log
= floor_log2 (GET_MODE_SIZE (c
->mode
));
10267 insn
= emit_insn_after (gen_align (GEN_INT (align_log
)), insn
);
10269 align
= GET_MODE_SIZE (c
->mode
);
10271 insn
= emit_label_after (c
->label
, insn
);
10272 insn
= dump_constants_1 (c
->mode
, c
->value
, insn
);
10278 emit_barrier_after (insn
);
10281 /* Return the length of instruction INSN. */
10284 mips16_insn_length (rtx insn
)
10288 rtx body
= PATTERN (insn
);
10289 if (GET_CODE (body
) == ADDR_VEC
)
10290 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 0);
10291 if (GET_CODE (body
) == ADDR_DIFF_VEC
)
10292 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 1);
10294 return get_attr_length (insn
);
10297 /* If *X is a symbolic constant that refers to the constant pool, add
10298 the constant to POOL and rewrite *X to use the constant's label. */
10301 mips16_rewrite_pool_constant (struct mips16_constant_pool
*pool
, rtx
*x
)
10303 rtx base
, offset
, label
;
10305 split_const (*x
, &base
, &offset
);
10306 if (GET_CODE (base
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (base
))
10308 label
= add_constant (pool
, get_pool_constant (base
),
10309 get_pool_mode (base
));
10310 base
= gen_rtx_LABEL_REF (Pmode
, label
);
10311 *x
= mips_unspec_address_offset (base
, offset
, SYMBOL_PC_RELATIVE
);
10315 /* This structure is used to communicate with mips16_rewrite_pool_refs.
10316 INSN is the instruction we're rewriting and POOL points to the current
10318 struct mips16_rewrite_pool_refs_info
{
10320 struct mips16_constant_pool
*pool
;
10323 /* Rewrite *X so that constant pool references refer to the constant's
10324 label instead. DATA points to a mips16_rewrite_pool_refs_info
10328 mips16_rewrite_pool_refs (rtx
*x
, void *data
)
10330 struct mips16_rewrite_pool_refs_info
*info
= data
;
10332 if (force_to_mem_operand (*x
, Pmode
))
10334 rtx mem
= force_const_mem (GET_MODE (*x
), *x
);
10335 validate_change (info
->insn
, x
, mem
, false);
10340 mips16_rewrite_pool_constant (info
->pool
, &XEXP (*x
, 0));
10344 if (TARGET_MIPS16_TEXT_LOADS
)
10345 mips16_rewrite_pool_constant (info
->pool
, x
);
10347 return GET_CODE (*x
) == CONST
? -1 : 0;
10350 /* Build MIPS16 constant pools. */
10353 mips16_lay_out_constants (void)
10355 struct mips16_constant_pool pool
;
10356 struct mips16_rewrite_pool_refs_info info
;
10359 if (!TARGET_MIPS16_PCREL_LOADS
)
10363 memset (&pool
, 0, sizeof (pool
));
10364 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
10366 /* Rewrite constant pool references in INSN. */
10371 for_each_rtx (&PATTERN (insn
), mips16_rewrite_pool_refs
, &info
);
10374 pool
.insn_address
+= mips16_insn_length (insn
);
10376 if (pool
.first
!= NULL
)
10378 /* If there are no natural barriers between the first user of
10379 the pool and the highest acceptable address, we'll need to
10380 create a new instruction to jump around the constant pool.
10381 In the worst case, this instruction will be 4 bytes long.
10383 If it's too late to do this transformation after INSN,
10384 do it immediately before INSN. */
10385 if (barrier
== 0 && pool
.insn_address
+ 4 > pool
.highest_address
)
10389 label
= gen_label_rtx ();
10391 jump
= emit_jump_insn_before (gen_jump (label
), insn
);
10392 JUMP_LABEL (jump
) = label
;
10393 LABEL_NUSES (label
) = 1;
10394 barrier
= emit_barrier_after (jump
);
10396 emit_label_after (label
, barrier
);
10397 pool
.insn_address
+= 4;
10400 /* See whether the constant pool is now out of range of the first
10401 user. If so, output the constants after the previous barrier.
10402 Note that any instructions between BARRIER and INSN (inclusive)
10403 will use negative offsets to refer to the pool. */
10404 if (pool
.insn_address
> pool
.highest_address
)
10406 dump_constants (pool
.first
, barrier
);
10410 else if (BARRIER_P (insn
))
10414 dump_constants (pool
.first
, get_last_insn ());
10417 /* A temporary variable used by for_each_rtx callbacks, etc. */
10418 static rtx mips_sim_insn
;
10420 /* A structure representing the state of the processor pipeline.
10421 Used by the mips_sim_* family of functions. */
10423 /* The maximum number of instructions that can be issued in a cycle.
10424 (Caches mips_issue_rate.) */
10425 unsigned int issue_rate
;
10427 /* The current simulation time. */
10430 /* How many more instructions can be issued in the current cycle. */
10431 unsigned int insns_left
;
10433 /* LAST_SET[X].INSN is the last instruction to set register X.
10434 LAST_SET[X].TIME is the time at which that instruction was issued.
10435 INSN is null if no instruction has yet set register X. */
10439 } last_set
[FIRST_PSEUDO_REGISTER
];
10441 /* The pipeline's current DFA state. */
10445 /* Reset STATE to the initial simulation state. */
10448 mips_sim_reset (struct mips_sim
*state
)
10451 state
->insns_left
= state
->issue_rate
;
10452 memset (&state
->last_set
, 0, sizeof (state
->last_set
));
10453 state_reset (state
->dfa_state
);
10456 /* Initialize STATE before its first use. DFA_STATE points to an
10457 allocated but uninitialized DFA state. */
10460 mips_sim_init (struct mips_sim
*state
, state_t dfa_state
)
10462 state
->issue_rate
= mips_issue_rate ();
10463 state
->dfa_state
= dfa_state
;
10464 mips_sim_reset (state
);
10467 /* Advance STATE by one clock cycle. */
10470 mips_sim_next_cycle (struct mips_sim
*state
)
10473 state
->insns_left
= state
->issue_rate
;
10474 state_transition (state
->dfa_state
, 0);
10477 /* Advance simulation state STATE until instruction INSN can read
10481 mips_sim_wait_reg (struct mips_sim
*state
, rtx insn
, rtx reg
)
10485 for (i
= 0; i
< HARD_REGNO_NREGS (REGNO (reg
), GET_MODE (reg
)); i
++)
10486 if (state
->last_set
[REGNO (reg
) + i
].insn
!= 0)
10490 t
= state
->last_set
[REGNO (reg
) + i
].time
;
10491 t
+= insn_latency (state
->last_set
[REGNO (reg
) + i
].insn
, insn
);
10492 while (state
->time
< t
)
10493 mips_sim_next_cycle (state
);
10497 /* A for_each_rtx callback. If *X is a register, advance simulation state
10498 DATA until mips_sim_insn can read the register's value. */
10501 mips_sim_wait_regs_2 (rtx
*x
, void *data
)
10504 mips_sim_wait_reg (data
, mips_sim_insn
, *x
);
10508 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
10511 mips_sim_wait_regs_1 (rtx
*x
, void *data
)
10513 for_each_rtx (x
, mips_sim_wait_regs_2
, data
);
10516 /* Advance simulation state STATE until all of INSN's register
10517 dependencies are satisfied. */
10520 mips_sim_wait_regs (struct mips_sim
*state
, rtx insn
)
10522 mips_sim_insn
= insn
;
10523 note_uses (&PATTERN (insn
), mips_sim_wait_regs_1
, state
);
10526 /* Advance simulation state STATE until the units required by
10527 instruction INSN are available. */
10530 mips_sim_wait_units (struct mips_sim
*state
, rtx insn
)
10534 tmp_state
= alloca (state_size ());
10535 while (state
->insns_left
== 0
10536 || (memcpy (tmp_state
, state
->dfa_state
, state_size ()),
10537 state_transition (tmp_state
, insn
) >= 0))
10538 mips_sim_next_cycle (state
);
10541 /* Advance simulation state STATE until INSN is ready to issue. */
10544 mips_sim_wait_insn (struct mips_sim
*state
, rtx insn
)
10546 mips_sim_wait_regs (state
, insn
);
10547 mips_sim_wait_units (state
, insn
);
10550 /* mips_sim_insn has just set X. Update the LAST_SET array
10551 in simulation state DATA. */
10554 mips_sim_record_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
10556 struct mips_sim
*state
;
10561 for (i
= 0; i
< HARD_REGNO_NREGS (REGNO (x
), GET_MODE (x
)); i
++)
10563 state
->last_set
[REGNO (x
) + i
].insn
= mips_sim_insn
;
10564 state
->last_set
[REGNO (x
) + i
].time
= state
->time
;
10568 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
10569 can issue immediately (i.e., that mips_sim_wait_insn has already
10573 mips_sim_issue_insn (struct mips_sim
*state
, rtx insn
)
10575 state_transition (state
->dfa_state
, insn
);
10576 state
->insns_left
--;
10578 mips_sim_insn
= insn
;
10579 note_stores (PATTERN (insn
), mips_sim_record_set
, state
);
10582 /* Simulate issuing a NOP in state STATE. */
10585 mips_sim_issue_nop (struct mips_sim
*state
)
10587 if (state
->insns_left
== 0)
10588 mips_sim_next_cycle (state
);
10589 state
->insns_left
--;
10592 /* Update simulation state STATE so that it's ready to accept the instruction
10593 after INSN. INSN should be part of the main rtl chain, not a member of a
10597 mips_sim_finish_insn (struct mips_sim
*state
, rtx insn
)
10599 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
10601 mips_sim_issue_nop (state
);
10603 switch (GET_CODE (SEQ_BEGIN (insn
)))
10607 /* We can't predict the processor state after a call or label. */
10608 mips_sim_reset (state
);
10612 /* The delay slots of branch likely instructions are only executed
10613 when the branch is taken. Therefore, if the caller has simulated
10614 the delay slot instruction, STATE does not really reflect the state
10615 of the pipeline for the instruction after the delay slot. Also,
10616 branch likely instructions tend to incur a penalty when not taken,
10617 so there will probably be an extra delay between the branch and
10618 the instruction after the delay slot. */
10619 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn
)))
10620 mips_sim_reset (state
);
10628 /* The VR4130 pipeline issues aligned pairs of instructions together,
10629 but it stalls the second instruction if it depends on the first.
10630 In order to cut down the amount of logic required, this dependence
10631 check is not based on a full instruction decode. Instead, any non-SPECIAL
10632 instruction is assumed to modify the register specified by bits 20-16
10633 (which is usually the "rt" field).
10635 In beq, beql, bne and bnel instructions, the rt field is actually an
10636 input, so we can end up with a false dependence between the branch
10637 and its delay slot. If this situation occurs in instruction INSN,
10638 try to avoid it by swapping rs and rt. */
10641 vr4130_avoid_branch_rt_conflict (rtx insn
)
10645 first
= SEQ_BEGIN (insn
);
10646 second
= SEQ_END (insn
);
10648 && NONJUMP_INSN_P (second
)
10649 && GET_CODE (PATTERN (first
)) == SET
10650 && GET_CODE (SET_DEST (PATTERN (first
))) == PC
10651 && GET_CODE (SET_SRC (PATTERN (first
))) == IF_THEN_ELSE
)
10653 /* Check for the right kind of condition. */
10654 rtx cond
= XEXP (SET_SRC (PATTERN (first
)), 0);
10655 if ((GET_CODE (cond
) == EQ
|| GET_CODE (cond
) == NE
)
10656 && REG_P (XEXP (cond
, 0))
10657 && REG_P (XEXP (cond
, 1))
10658 && reg_referenced_p (XEXP (cond
, 1), PATTERN (second
))
10659 && !reg_referenced_p (XEXP (cond
, 0), PATTERN (second
)))
10661 /* SECOND mentions the rt register but not the rs register. */
10662 rtx tmp
= XEXP (cond
, 0);
10663 XEXP (cond
, 0) = XEXP (cond
, 1);
10664 XEXP (cond
, 1) = tmp
;
10669 /* Implement -mvr4130-align. Go through each basic block and simulate the
10670 processor pipeline. If we find that a pair of instructions could execute
10671 in parallel, and the first of those instruction is not 8-byte aligned,
10672 insert a nop to make it aligned. */
10675 vr4130_align_insns (void)
10677 struct mips_sim state
;
10678 rtx insn
, subinsn
, last
, last2
, next
;
10683 /* LAST is the last instruction before INSN to have a nonzero length.
10684 LAST2 is the last such instruction before LAST. */
10688 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
10691 mips_sim_init (&state
, alloca (state_size ()));
10692 for (insn
= get_insns (); insn
!= 0; insn
= next
)
10694 unsigned int length
;
10696 next
= NEXT_INSN (insn
);
10698 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
10699 This isn't really related to the alignment pass, but we do it on
10700 the fly to avoid a separate instruction walk. */
10701 vr4130_avoid_branch_rt_conflict (insn
);
10703 if (USEFUL_INSN_P (insn
))
10704 FOR_EACH_SUBINSN (subinsn
, insn
)
10706 mips_sim_wait_insn (&state
, subinsn
);
10708 /* If we want this instruction to issue in parallel with the
10709 previous one, make sure that the previous instruction is
10710 aligned. There are several reasons why this isn't worthwhile
10711 when the second instruction is a call:
10713 - Calls are less likely to be performance critical,
10714 - There's a good chance that the delay slot can execute
10715 in parallel with the call.
10716 - The return address would then be unaligned.
10718 In general, if we're going to insert a nop between instructions
10719 X and Y, it's better to insert it immediately after X. That
10720 way, if the nop makes Y aligned, it will also align any labels
10721 between X and Y. */
10722 if (state
.insns_left
!= state
.issue_rate
10723 && !CALL_P (subinsn
))
10725 if (subinsn
== SEQ_BEGIN (insn
) && aligned_p
)
10727 /* SUBINSN is the first instruction in INSN and INSN is
10728 aligned. We want to align the previous instruction
10729 instead, so insert a nop between LAST2 and LAST.
10731 Note that LAST could be either a single instruction
10732 or a branch with a delay slot. In the latter case,
10733 LAST, like INSN, is already aligned, but the delay
10734 slot must have some extra delay that stops it from
10735 issuing at the same time as the branch. We therefore
10736 insert a nop before the branch in order to align its
10738 emit_insn_after (gen_nop (), last2
);
10741 else if (subinsn
!= SEQ_BEGIN (insn
) && !aligned_p
)
10743 /* SUBINSN is the delay slot of INSN, but INSN is
10744 currently unaligned. Insert a nop between
10745 LAST and INSN to align it. */
10746 emit_insn_after (gen_nop (), last
);
10750 mips_sim_issue_insn (&state
, subinsn
);
10752 mips_sim_finish_insn (&state
, insn
);
10754 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10755 length
= get_attr_length (insn
);
10758 /* If the instruction is an asm statement or multi-instruction
10759 mips.md patern, the length is only an estimate. Insert an
10760 8 byte alignment after it so that the following instructions
10761 can be handled correctly. */
10762 if (NONJUMP_INSN_P (SEQ_BEGIN (insn
))
10763 && (recog_memoized (insn
) < 0 || length
>= 8))
10765 next
= emit_insn_after (gen_align (GEN_INT (3)), insn
);
10766 next
= NEXT_INSN (next
);
10767 mips_sim_next_cycle (&state
);
10770 else if (length
& 4)
10771 aligned_p
= !aligned_p
;
10776 /* See whether INSN is an aligned label. */
10777 if (LABEL_P (insn
) && label_to_alignment (insn
) >= 3)
10783 /* Subroutine of mips_reorg. If there is a hazard between INSN
10784 and a previous instruction, avoid it by inserting nops after
10787 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10788 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10789 before using the value of that register. *HILO_DELAY counts the
10790 number of instructions since the last hilo hazard (that is,
10791 the number of instructions since the last mflo or mfhi).
10793 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10794 for the next instruction.
10796 LO_REG is an rtx for the LO register, used in dependence checking. */
10799 mips_avoid_hazard (rtx after
, rtx insn
, int *hilo_delay
,
10800 rtx
*delayed_reg
, rtx lo_reg
)
10805 if (!INSN_P (insn
))
10808 pattern
= PATTERN (insn
);
10810 /* Do not put the whole function in .set noreorder if it contains
10811 an asm statement. We don't know whether there will be hazards
10812 between the asm statement and the gcc-generated code. */
10813 if (GET_CODE (pattern
) == ASM_INPUT
|| asm_noperands (pattern
) >= 0)
10814 cfun
->machine
->all_noreorder_p
= false;
10816 /* Ignore zero-length instructions (barriers and the like). */
10817 ninsns
= get_attr_length (insn
) / 4;
10821 /* Work out how many nops are needed. Note that we only care about
10822 registers that are explicitly mentioned in the instruction's pattern.
10823 It doesn't matter that calls use the argument registers or that they
10824 clobber hi and lo. */
10825 if (*hilo_delay
< 2 && reg_set_p (lo_reg
, pattern
))
10826 nops
= 2 - *hilo_delay
;
10827 else if (*delayed_reg
!= 0 && reg_referenced_p (*delayed_reg
, pattern
))
10832 /* Insert the nops between this instruction and the previous one.
10833 Each new nop takes us further from the last hilo hazard. */
10834 *hilo_delay
+= nops
;
10836 emit_insn_after (gen_hazard_nop (), after
);
10838 /* Set up the state for the next instruction. */
10839 *hilo_delay
+= ninsns
;
10841 if (INSN_CODE (insn
) >= 0)
10842 switch (get_attr_hazard (insn
))
10852 set
= single_set (insn
);
10853 gcc_assert (set
!= 0);
10854 *delayed_reg
= SET_DEST (set
);
10860 /* Go through the instruction stream and insert nops where necessary.
10861 See if the whole function can then be put into .set noreorder &
10865 mips_avoid_hazards (void)
10867 rtx insn
, last_insn
, lo_reg
, delayed_reg
;
10870 /* Force all instructions to be split into their final form. */
10871 split_all_insns_noflow ();
10873 /* Recalculate instruction lengths without taking nops into account. */
10874 cfun
->machine
->ignore_hazard_length_p
= true;
10875 shorten_branches (get_insns ());
10877 cfun
->machine
->all_noreorder_p
= true;
10879 /* Profiled functions can't be all noreorder because the profiler
10880 support uses assembler macros. */
10881 if (current_function_profile
)
10882 cfun
->machine
->all_noreorder_p
= false;
10884 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10885 we rely on the assembler to work around some errata. */
10886 if (TARGET_FIX_VR4120
)
10887 cfun
->machine
->all_noreorder_p
= false;
10889 /* The same is true for -mfix-vr4130 if we might generate mflo or
10890 mfhi instructions. Note that we avoid using mflo and mfhi if
10891 the VR4130 macc and dmacc instructions are available instead;
10892 see the *mfhilo_{si,di}_macc patterns. */
10893 if (TARGET_FIX_VR4130
&& !ISA_HAS_MACCHI
)
10894 cfun
->machine
->all_noreorder_p
= false;
10899 lo_reg
= gen_rtx_REG (SImode
, LO_REGNUM
);
10901 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
10904 if (GET_CODE (PATTERN (insn
)) == SEQUENCE
)
10905 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
10906 mips_avoid_hazard (last_insn
, XVECEXP (PATTERN (insn
), 0, i
),
10907 &hilo_delay
, &delayed_reg
, lo_reg
);
10909 mips_avoid_hazard (last_insn
, insn
, &hilo_delay
,
10910 &delayed_reg
, lo_reg
);
10917 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10922 mips16_lay_out_constants ();
10923 if (TARGET_EXPLICIT_RELOCS
)
10925 if (mips_flag_delayed_branch
)
10926 dbr_schedule (get_insns ());
10927 mips_avoid_hazards ();
10928 if (TUNE_MIPS4130
&& TARGET_VR4130_ALIGN
)
10929 vr4130_align_insns ();
10933 /* This function does three things:
10935 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10936 - Register the mips16 hardware floating point stubs.
10937 - Register the gofast functions if selected using --enable-gofast. */
10939 #include "config/gofast.h"
10942 mips_init_libfuncs (void)
10944 if (TARGET_FIX_VR4120
)
10946 set_optab_libfunc (sdiv_optab
, SImode
, "__vr4120_divsi3");
10947 set_optab_libfunc (smod_optab
, SImode
, "__vr4120_modsi3");
10950 if (TARGET_MIPS16
&& TARGET_HARD_FLOAT_ABI
)
10952 set_optab_libfunc (add_optab
, SFmode
, "__mips16_addsf3");
10953 set_optab_libfunc (sub_optab
, SFmode
, "__mips16_subsf3");
10954 set_optab_libfunc (smul_optab
, SFmode
, "__mips16_mulsf3");
10955 set_optab_libfunc (sdiv_optab
, SFmode
, "__mips16_divsf3");
10957 set_optab_libfunc (eq_optab
, SFmode
, "__mips16_eqsf2");
10958 set_optab_libfunc (ne_optab
, SFmode
, "__mips16_nesf2");
10959 set_optab_libfunc (gt_optab
, SFmode
, "__mips16_gtsf2");
10960 set_optab_libfunc (ge_optab
, SFmode
, "__mips16_gesf2");
10961 set_optab_libfunc (lt_optab
, SFmode
, "__mips16_ltsf2");
10962 set_optab_libfunc (le_optab
, SFmode
, "__mips16_lesf2");
10963 set_optab_libfunc (unord_optab
, SFmode
, "__mips16_unordsf2");
10965 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__mips16_fix_truncsfsi");
10966 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__mips16_floatsisf");
10967 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__mips16_floatunsisf");
10969 if (TARGET_DOUBLE_FLOAT
)
10971 set_optab_libfunc (add_optab
, DFmode
, "__mips16_adddf3");
10972 set_optab_libfunc (sub_optab
, DFmode
, "__mips16_subdf3");
10973 set_optab_libfunc (smul_optab
, DFmode
, "__mips16_muldf3");
10974 set_optab_libfunc (sdiv_optab
, DFmode
, "__mips16_divdf3");
10976 set_optab_libfunc (eq_optab
, DFmode
, "__mips16_eqdf2");
10977 set_optab_libfunc (ne_optab
, DFmode
, "__mips16_nedf2");
10978 set_optab_libfunc (gt_optab
, DFmode
, "__mips16_gtdf2");
10979 set_optab_libfunc (ge_optab
, DFmode
, "__mips16_gedf2");
10980 set_optab_libfunc (lt_optab
, DFmode
, "__mips16_ltdf2");
10981 set_optab_libfunc (le_optab
, DFmode
, "__mips16_ledf2");
10982 set_optab_libfunc (unord_optab
, DFmode
, "__mips16_unorddf2");
10984 set_conv_libfunc (sext_optab
, DFmode
, SFmode
, "__mips16_extendsfdf2");
10985 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
, "__mips16_truncdfsf2");
10987 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__mips16_fix_truncdfsi");
10988 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
, "__mips16_floatsidf");
10989 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
, "__mips16_floatunsidf");
10993 gofast_maybe_init_libfuncs ();
10996 /* Return a number assessing the cost of moving a register in class
10997 FROM to class TO. The classes are expressed using the enumeration
10998 values such as `GENERAL_REGS'. A value of 2 is the default; other
10999 values are interpreted relative to that.
11001 It is not required that the cost always equal 2 when FROM is the
11002 same as TO; on some machines it is expensive to move between
11003 registers if they are not general registers.
11005 If reload sees an insn consisting of a single `set' between two
11006 hard registers, and if `REGISTER_MOVE_COST' applied to their
11007 classes returns a value of 2, reload does not check to ensure that
11008 the constraints of the insn are met. Setting a cost of other than
11009 2 will allow reload to verify that the constraints are met. You
11010 should do this if the `movM' pattern's constraints do not allow
11013 ??? We make the cost of moving from HI/LO into general
11014 registers the same as for one of moving general registers to
11015 HI/LO for TARGET_MIPS16 in order to prevent allocating a
11016 pseudo to HI/LO. This might hurt optimizations though, it
11017 isn't clear if it is wise. And it might not work in all cases. We
11018 could solve the DImode LO reg problem by using a multiply, just
11019 like reload_{in,out}si. We could solve the SImode/HImode HI reg
11020 problem by using divide instructions. divu puts the remainder in
11021 the HI reg, so doing a divide by -1 will move the value in the HI
11022 reg for all values except -1. We could handle that case by using a
11023 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
11024 a compare/branch to test the input value to see which instruction
11025 we need to use. This gets pretty messy, but it is feasible. */
11028 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
11029 enum reg_class to
, enum reg_class from
)
11031 if (from
== M16_REGS
&& reg_class_subset_p (to
, GENERAL_REGS
))
11033 else if (from
== M16_NA_REGS
&& reg_class_subset_p (to
, GENERAL_REGS
))
11035 else if (reg_class_subset_p (from
, GENERAL_REGS
))
11037 if (to
== M16_REGS
)
11039 else if (to
== M16_NA_REGS
)
11041 else if (reg_class_subset_p (to
, GENERAL_REGS
))
11048 else if (to
== FP_REGS
)
11050 else if (reg_class_subset_p (to
, ACC_REGS
))
11057 else if (reg_class_subset_p (to
, ALL_COP_REGS
))
11062 else if (from
== FP_REGS
)
11064 if (reg_class_subset_p (to
, GENERAL_REGS
))
11066 else if (to
== FP_REGS
)
11068 else if (to
== ST_REGS
)
11071 else if (reg_class_subset_p (from
, ACC_REGS
))
11073 if (reg_class_subset_p (to
, GENERAL_REGS
))
11081 else if (from
== ST_REGS
&& reg_class_subset_p (to
, GENERAL_REGS
))
11083 else if (reg_class_subset_p (from
, ALL_COP_REGS
))
11089 ??? What cases are these? Shouldn't we return 2 here? */
11094 /* Return the length of INSN. LENGTH is the initial length computed by
11095 attributes in the machine-description file. */
11098 mips_adjust_insn_length (rtx insn
, int length
)
11100 /* A unconditional jump has an unfilled delay slot if it is not part
11101 of a sequence. A conditional jump normally has a delay slot, but
11102 does not on MIPS16. */
11103 if (CALL_P (insn
) || (TARGET_MIPS16
? simplejump_p (insn
) : JUMP_P (insn
)))
11106 /* See how many nops might be needed to avoid hardware hazards. */
11107 if (!cfun
->machine
->ignore_hazard_length_p
&& INSN_CODE (insn
) >= 0)
11108 switch (get_attr_hazard (insn
))
11122 /* All MIPS16 instructions are a measly two bytes. */
11130 /* Return an asm sequence to start a noat block and load the address
11131 of a label into $1. */
11134 mips_output_load_label (void)
11136 if (TARGET_EXPLICIT_RELOCS
)
11140 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
11143 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
11146 if (ISA_HAS_LOAD_DELAY
)
11147 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
11148 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
11152 if (Pmode
== DImode
)
11153 return "%[dla\t%@,%0";
11155 return "%[la\t%@,%0";
11159 /* Return the assembly code for INSN, which has the operands given by
11160 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
11161 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
11162 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
11163 version of BRANCH_IF_TRUE. */
11166 mips_output_conditional_branch (rtx insn
, rtx
*operands
,
11167 const char *branch_if_true
,
11168 const char *branch_if_false
)
11170 unsigned int length
;
11171 rtx taken
, not_taken
;
11173 length
= get_attr_length (insn
);
11176 /* Just a simple conditional branch. */
11177 mips_branch_likely
= (final_sequence
&& INSN_ANNULLED_BRANCH_P (insn
));
11178 return branch_if_true
;
11181 /* Generate a reversed branch around a direct jump. This fallback does
11182 not use branch-likely instructions. */
11183 mips_branch_likely
= false;
11184 not_taken
= gen_label_rtx ();
11185 taken
= operands
[1];
11187 /* Generate the reversed branch to NOT_TAKEN. */
11188 operands
[1] = not_taken
;
11189 output_asm_insn (branch_if_false
, operands
);
11191 /* If INSN has a delay slot, we must provide delay slots for both the
11192 branch to NOT_TAKEN and the conditional jump. We must also ensure
11193 that INSN's delay slot is executed in the appropriate cases. */
11194 if (final_sequence
)
11196 /* This first delay slot will always be executed, so use INSN's
11197 delay slot if is not annulled. */
11198 if (!INSN_ANNULLED_BRANCH_P (insn
))
11200 final_scan_insn (XVECEXP (final_sequence
, 0, 1),
11201 asm_out_file
, optimize
, 1, NULL
);
11202 INSN_DELETED_P (XVECEXP (final_sequence
, 0, 1)) = 1;
11205 output_asm_insn ("nop", 0);
11206 fprintf (asm_out_file
, "\n");
11209 /* Output the unconditional branch to TAKEN. */
11211 output_asm_insn ("j\t%0%/", &taken
);
11214 output_asm_insn (mips_output_load_label (), &taken
);
11215 output_asm_insn ("jr\t%@%]%/", 0);
11218 /* Now deal with its delay slot; see above. */
11219 if (final_sequence
)
11221 /* This delay slot will only be executed if the branch is taken.
11222 Use INSN's delay slot if is annulled. */
11223 if (INSN_ANNULLED_BRANCH_P (insn
))
11225 final_scan_insn (XVECEXP (final_sequence
, 0, 1),
11226 asm_out_file
, optimize
, 1, NULL
);
11227 INSN_DELETED_P (XVECEXP (final_sequence
, 0, 1)) = 1;
11230 output_asm_insn ("nop", 0);
11231 fprintf (asm_out_file
, "\n");
11234 /* Output NOT_TAKEN. */
11235 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
11236 CODE_LABEL_NUMBER (not_taken
));
11240 /* Return the assembly code for INSN, which branches to OPERANDS[1]
11241 if some ordered condition is true. The condition is given by
11242 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
11243 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
11244 its second is always zero. */
11247 mips_output_order_conditional_branch (rtx insn
, rtx
*operands
, bool inverted_p
)
11249 const char *branch
[2];
11251 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
11252 Make BRANCH[0] branch on the inverse condition. */
11253 switch (GET_CODE (operands
[0]))
11255 /* These cases are equivalent to comparisons against zero. */
11257 inverted_p
= !inverted_p
;
11258 /* Fall through. */
11260 branch
[!inverted_p
] = MIPS_BRANCH ("bne", "%2,%.,%1");
11261 branch
[inverted_p
] = MIPS_BRANCH ("beq", "%2,%.,%1");
11264 /* These cases are always true or always false. */
11266 inverted_p
= !inverted_p
;
11267 /* Fall through. */
11269 branch
[!inverted_p
] = MIPS_BRANCH ("beq", "%.,%.,%1");
11270 branch
[inverted_p
] = MIPS_BRANCH ("bne", "%.,%.,%1");
11274 branch
[!inverted_p
] = MIPS_BRANCH ("b%C0z", "%2,%1");
11275 branch
[inverted_p
] = MIPS_BRANCH ("b%N0z", "%2,%1");
11278 return mips_output_conditional_branch (insn
, operands
, branch
[1], branch
[0]);
11281 /* Used to output div or ddiv instruction DIVISION, which has the operands
11282 given by OPERANDS. Add in a divide-by-zero check if needed.
11284 When working around R4000 and R4400 errata, we need to make sure that
11285 the division is not immediately followed by a shift[1][2]. We also
11286 need to stop the division from being put into a branch delay slot[3].
11287 The easiest way to avoid both problems is to add a nop after the
11288 division. When a divide-by-zero check is needed, this nop can be
11289 used to fill the branch delay slot.
11291 [1] If a double-word or a variable shift executes immediately
11292 after starting an integer division, the shift may give an
11293 incorrect result. See quotations of errata #16 and #28 from
11294 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11295 in mips.md for details.
11297 [2] A similar bug to [1] exists for all revisions of the
11298 R4000 and the R4400 when run in an MC configuration.
11299 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
11301 "19. In this following sequence:
11303 ddiv (or ddivu or div or divu)
11304 dsll32 (or dsrl32, dsra32)
11306 if an MPT stall occurs, while the divide is slipping the cpu
11307 pipeline, then the following double shift would end up with an
11310 Workaround: The compiler needs to avoid generating any
11311 sequence with divide followed by extended double shift."
11313 This erratum is also present in "MIPS R4400MC Errata, Processor
11314 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
11315 & 3.0" as errata #10 and #4, respectively.
11317 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11318 (also valid for MIPS R4000MC processors):
11320 "52. R4000SC: This bug does not apply for the R4000PC.
11322 There are two flavors of this bug:
11324 1) If the instruction just after divide takes an RF exception
11325 (tlb-refill, tlb-invalid) and gets an instruction cache
11326 miss (both primary and secondary) and the line which is
11327 currently in secondary cache at this index had the first
11328 data word, where the bits 5..2 are set, then R4000 would
11329 get a wrong result for the div.
11334 ------------------- # end-of page. -tlb-refill
11339 ------------------- # end-of page. -tlb-invalid
11342 2) If the divide is in the taken branch delay slot, where the
11343 target takes RF exception and gets an I-cache miss for the
11344 exception vector or where I-cache miss occurs for the
11345 target address, under the above mentioned scenarios, the
11346 div would get wrong results.
11349 j r2 # to next page mapped or unmapped
11350 div r8,r9 # this bug would be there as long
11351 # as there is an ICache miss and
11352 nop # the "data pattern" is present
11355 beq r0, r0, NextPage # to Next page
11359 This bug is present for div, divu, ddiv, and ddivu
11362 Workaround: For item 1), OS could make sure that the next page
11363 after the divide instruction is also mapped. For item 2), the
11364 compiler could make sure that the divide instruction is not in
11365 the branch delay slot."
11367 These processors have PRId values of 0x00004220 and 0x00004300 for
11368 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
11371 mips_output_division (const char *division
, rtx
*operands
)
11376 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
11378 output_asm_insn (s
, operands
);
11381 if (TARGET_CHECK_ZERO_DIV
)
11385 output_asm_insn (s
, operands
);
11386 s
= "bnez\t%2,1f\n\tbreak\t7\n1:";
11388 else if (GENERATE_DIVIDE_TRAPS
)
11390 output_asm_insn (s
, operands
);
11391 s
= "teq\t%2,%.,7";
11395 output_asm_insn ("%(bne\t%2,%.,1f", operands
);
11396 output_asm_insn (s
, operands
);
11397 s
= "break\t7%)\n1:";
11403 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11404 with a final "000" replaced by "k". Ignore case.
11406 Note: this function is shared between GCC and GAS. */
11409 mips_strict_matching_cpu_name_p (const char *canonical
, const char *given
)
11411 while (*given
!= 0 && TOLOWER (*given
) == TOLOWER (*canonical
))
11412 given
++, canonical
++;
11414 return ((*given
== 0 && *canonical
== 0)
11415 || (strcmp (canonical
, "000") == 0 && strcasecmp (given
, "k") == 0));
11419 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11420 CPU name. We've traditionally allowed a lot of variation here.
11422 Note: this function is shared between GCC and GAS. */
11425 mips_matching_cpu_name_p (const char *canonical
, const char *given
)
11427 /* First see if the name matches exactly, or with a final "000"
11428 turned into "k". */
11429 if (mips_strict_matching_cpu_name_p (canonical
, given
))
11432 /* If not, try comparing based on numerical designation alone.
11433 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11434 if (TOLOWER (*given
) == 'r')
11436 if (!ISDIGIT (*given
))
11439 /* Skip over some well-known prefixes in the canonical name,
11440 hoping to find a number there too. */
11441 if (TOLOWER (canonical
[0]) == 'v' && TOLOWER (canonical
[1]) == 'r')
11443 else if (TOLOWER (canonical
[0]) == 'r' && TOLOWER (canonical
[1]) == 'm')
11445 else if (TOLOWER (canonical
[0]) == 'r')
11448 return mips_strict_matching_cpu_name_p (canonical
, given
);
11452 /* Return the mips_cpu_info entry for the processor or ISA given
11453 by CPU_STRING. Return null if the string isn't recognized.
11455 A similar function exists in GAS. */
11457 static const struct mips_cpu_info
*
11458 mips_parse_cpu (const char *cpu_string
)
11460 const struct mips_cpu_info
*p
;
11463 /* In the past, we allowed upper-case CPU names, but it doesn't
11464 work well with the multilib machinery. */
11465 for (s
= cpu_string
; *s
!= 0; s
++)
11468 warning (0, "the cpu name must be lower case");
11472 /* 'from-abi' selects the most compatible architecture for the given
11473 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11474 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11475 version. Look first at the -mgp options, if given, otherwise base
11476 the choice on MASK_64BIT in TARGET_DEFAULT. */
11477 if (strcasecmp (cpu_string
, "from-abi") == 0)
11478 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS
? 1
11479 : ABI_NEEDS_64BIT_REGS
? 3
11480 : (TARGET_64BIT
? 3 : 1));
11482 /* 'default' has traditionally been a no-op. Probably not very useful. */
11483 if (strcasecmp (cpu_string
, "default") == 0)
11486 for (p
= mips_cpu_info_table
; p
->name
!= 0; p
++)
11487 if (mips_matching_cpu_name_p (p
->name
, cpu_string
))
11494 /* Return the processor associated with the given ISA level, or null
11495 if the ISA isn't valid. */
11497 static const struct mips_cpu_info
*
11498 mips_cpu_info_from_isa (int isa
)
11500 const struct mips_cpu_info
*p
;
11502 for (p
= mips_cpu_info_table
; p
->name
!= 0; p
++)
11509 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
11510 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
11511 they only hold condition code modes, and CCmode is always considered to
11512 be 4 bytes wide. All other registers are word sized. */
11515 mips_hard_regno_nregs (int regno
, enum machine_mode mode
)
11517 if (ST_REG_P (regno
))
11518 return ((GET_MODE_SIZE (mode
) + 3) / 4);
11519 else if (! FP_REG_P (regno
))
11520 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
11522 return ((GET_MODE_SIZE (mode
) + UNITS_PER_FPREG
- 1) / UNITS_PER_FPREG
);
11525 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
11526 all BLKmode objects are returned in memory. Under the new (N32 and
11527 64-bit MIPS ABIs) small structures are returned in a register.
11528 Objects with varying size must still be returned in memory, of
11532 mips_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
11535 return (TYPE_MODE (type
) == BLKmode
);
11537 return ((int_size_in_bytes (type
) > (2 * UNITS_PER_WORD
))
11538 || (int_size_in_bytes (type
) == -1));
11542 mips_strict_argument_naming (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
)
11544 return !TARGET_OLDABI
;
11547 /* Return true if INSN is a multiply-add or multiply-subtract
11548 instruction and PREV assigns to the accumulator operand. */
11551 mips_linked_madd_p (rtx prev
, rtx insn
)
11555 x
= single_set (insn
);
11561 if (GET_CODE (x
) == PLUS
11562 && GET_CODE (XEXP (x
, 0)) == MULT
11563 && reg_set_p (XEXP (x
, 1), prev
))
11566 if (GET_CODE (x
) == MINUS
11567 && GET_CODE (XEXP (x
, 1)) == MULT
11568 && reg_set_p (XEXP (x
, 0), prev
))
11574 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11575 that may clobber hi or lo. */
11577 static rtx mips_macc_chains_last_hilo
;
11579 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11580 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11583 mips_macc_chains_record (rtx insn
)
11585 if (get_attr_may_clobber_hilo (insn
))
11586 mips_macc_chains_last_hilo
= insn
;
11589 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11590 has NREADY elements, looking for a multiply-add or multiply-subtract
11591 instruction that is cumulative with mips_macc_chains_last_hilo.
11592 If there is one, promote it ahead of anything else that might
11593 clobber hi or lo. */
11596 mips_macc_chains_reorder (rtx
*ready
, int nready
)
11600 if (mips_macc_chains_last_hilo
!= 0)
11601 for (i
= nready
- 1; i
>= 0; i
--)
11602 if (mips_linked_madd_p (mips_macc_chains_last_hilo
, ready
[i
]))
11604 for (j
= nready
- 1; j
> i
; j
--)
11605 if (recog_memoized (ready
[j
]) >= 0
11606 && get_attr_may_clobber_hilo (ready
[j
]))
11608 mips_promote_ready (ready
, i
, j
);
11615 /* The last instruction to be scheduled. */
11617 static rtx vr4130_last_insn
;
11619 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11620 points to an rtx that is initially an instruction. Nullify the rtx
11621 if the instruction uses the value of register X. */
11624 vr4130_true_reg_dependence_p_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
11626 rtx
*insn_ptr
= data
;
11629 && reg_referenced_p (x
, PATTERN (*insn_ptr
)))
11633 /* Return true if there is true register dependence between vr4130_last_insn
11637 vr4130_true_reg_dependence_p (rtx insn
)
11639 note_stores (PATTERN (vr4130_last_insn
),
11640 vr4130_true_reg_dependence_p_1
, &insn
);
11644 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11645 the ready queue and that INSN2 is the instruction after it, return
11646 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11647 in which INSN1 and INSN2 can probably issue in parallel, but for
11648 which (INSN2, INSN1) should be less sensitive to instruction
11649 alignment than (INSN1, INSN2). See 4130.md for more details. */
11652 vr4130_swap_insns_p (rtx insn1
, rtx insn2
)
11654 sd_iterator_def sd_it
;
11657 /* Check for the following case:
11659 1) there is some other instruction X with an anti dependence on INSN1;
11660 2) X has a higher priority than INSN2; and
11661 3) X is an arithmetic instruction (and thus has no unit restrictions).
11663 If INSN1 is the last instruction blocking X, it would better to
11664 choose (INSN1, X) over (INSN2, INSN1). */
11665 FOR_EACH_DEP (insn1
, SD_LIST_FORW
, sd_it
, dep
)
11666 if (DEP_TYPE (dep
) == REG_DEP_ANTI
11667 && INSN_PRIORITY (DEP_CON (dep
)) > INSN_PRIORITY (insn2
)
11668 && recog_memoized (DEP_CON (dep
)) >= 0
11669 && get_attr_vr4130_class (DEP_CON (dep
)) == VR4130_CLASS_ALU
)
11672 if (vr4130_last_insn
!= 0
11673 && recog_memoized (insn1
) >= 0
11674 && recog_memoized (insn2
) >= 0)
11676 /* See whether INSN1 and INSN2 use different execution units,
11677 or if they are both ALU-type instructions. If so, they can
11678 probably execute in parallel. */
11679 enum attr_vr4130_class class1
= get_attr_vr4130_class (insn1
);
11680 enum attr_vr4130_class class2
= get_attr_vr4130_class (insn2
);
11681 if (class1
!= class2
|| class1
== VR4130_CLASS_ALU
)
11683 /* If only one of the instructions has a dependence on
11684 vr4130_last_insn, prefer to schedule the other one first. */
11685 bool dep1
= vr4130_true_reg_dependence_p (insn1
);
11686 bool dep2
= vr4130_true_reg_dependence_p (insn2
);
11690 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11691 is not an ALU-type instruction and if INSN1 uses the same
11692 execution unit. (Note that if this condition holds, we already
11693 know that INSN2 uses a different execution unit.) */
11694 if (class1
!= VR4130_CLASS_ALU
11695 && recog_memoized (vr4130_last_insn
) >= 0
11696 && class1
== get_attr_vr4130_class (vr4130_last_insn
))
11703 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11704 queue with at least two instructions. Swap the first two if
11705 vr4130_swap_insns_p says that it could be worthwhile. */
11708 vr4130_reorder (rtx
*ready
, int nready
)
11710 if (vr4130_swap_insns_p (ready
[nready
- 1], ready
[nready
- 2]))
11711 mips_promote_ready (ready
, nready
- 2, nready
- 1);
11714 /* Remove the instruction at index LOWER from ready queue READY and
11715 reinsert it in front of the instruction at index HIGHER. LOWER must
11719 mips_promote_ready (rtx
*ready
, int lower
, int higher
)
11724 new_head
= ready
[lower
];
11725 for (i
= lower
; i
< higher
; i
++)
11726 ready
[i
] = ready
[i
+ 1];
11727 ready
[i
] = new_head
;
11730 /* If the priority of the instruction at POS2 in the ready queue READY
11731 is within LIMIT units of that of the instruction at POS1, swap the
11732 instructions if POS2 is not already less than POS1. */
11735 mips_maybe_swap_ready (rtx
*ready
, int pos1
, int pos2
, int limit
)
11738 && INSN_PRIORITY (ready
[pos1
]) + limit
>= INSN_PRIORITY (ready
[pos2
]))
11741 temp
= ready
[pos1
];
11742 ready
[pos1
] = ready
[pos2
];
11743 ready
[pos2
] = temp
;
11747 /* Record whether last 74k AGEN instruction was a load or store. */
11749 static enum attr_type mips_last_74k_agen_insn
= TYPE_UNKNOWN
;
11751 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11752 resets to TYPE_UNKNOWN state. */
11755 mips_74k_agen_init (rtx insn
)
11757 if (!insn
|| !NONJUMP_INSN_P (insn
))
11758 mips_last_74k_agen_insn
= TYPE_UNKNOWN
;
11759 else if (USEFUL_INSN_P (insn
))
11761 enum attr_type type
= get_attr_type (insn
);
11762 if (type
== TYPE_LOAD
|| type
== TYPE_STORE
)
11763 mips_last_74k_agen_insn
= type
;
11767 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11768 loads to be grouped together, and multiple stores to be grouped
11769 together. Swap things around in the ready queue to make this happen. */
11772 mips_74k_agen_reorder (rtx
*ready
, int nready
)
11775 int store_pos
, load_pos
;
11780 for (i
= nready
- 1; i
>= 0; i
--)
11782 rtx insn
= ready
[i
];
11783 if (USEFUL_INSN_P (insn
))
11784 switch (get_attr_type (insn
))
11787 if (store_pos
== -1)
11792 if (load_pos
== -1)
11801 if (load_pos
== -1 || store_pos
== -1)
11804 switch (mips_last_74k_agen_insn
)
11807 /* Prefer to schedule loads since they have a higher latency. */
11809 /* Swap loads to the front of the queue. */
11810 mips_maybe_swap_ready (ready
, load_pos
, store_pos
, 4);
11813 /* Swap stores to the front of the queue. */
11814 mips_maybe_swap_ready (ready
, store_pos
, load_pos
, 4);
11821 /* Implement TARGET_SCHED_INIT. */
11824 mips_sched_init (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
11825 int max_ready ATTRIBUTE_UNUSED
)
11827 mips_macc_chains_last_hilo
= 0;
11828 vr4130_last_insn
= 0;
11829 mips_74k_agen_init (NULL_RTX
);
11832 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11835 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
11836 rtx
*ready
, int *nreadyp
, int cycle ATTRIBUTE_UNUSED
)
11838 if (!reload_completed
11839 && TUNE_MACC_CHAINS
11841 mips_macc_chains_reorder (ready
, *nreadyp
);
11842 if (reload_completed
11844 && !TARGET_VR4130_ALIGN
11846 vr4130_reorder (ready
, *nreadyp
);
11848 mips_74k_agen_reorder (ready
, *nreadyp
);
11849 return mips_issue_rate ();
11852 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11855 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
11856 rtx insn
, int more
)
11859 mips_74k_agen_init (insn
);
11860 switch (GET_CODE (PATTERN (insn
)))
11864 /* Don't count USEs and CLOBBERs against the issue rate. */
11869 if (!reload_completed
&& TUNE_MACC_CHAINS
)
11870 mips_macc_chains_record (insn
);
11871 vr4130_last_insn
= insn
;
11877 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11878 dependencies have no cost, except on the 20Kc where output-dependence
11879 is treated like input-dependence. */
11882 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED
, rtx link
,
11883 rtx dep ATTRIBUTE_UNUSED
, int cost
)
11885 if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
11888 if (REG_NOTE_KIND (link
) != 0)
11893 /* Return the number of instructions that can be issued per cycle. */
11896 mips_issue_rate (void)
11900 case PROCESSOR_74KC
:
11901 case PROCESSOR_74KF2_1
:
11902 case PROCESSOR_74KF1_1
:
11903 case PROCESSOR_74KF3_2
:
11904 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11905 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11906 but in reality only a maximum of 3 insns can be issued as the
11907 floating point load/stores also require a slot in the AGEN pipe. */
11910 case PROCESSOR_20KC
:
11911 case PROCESSOR_R4130
:
11912 case PROCESSOR_R5400
:
11913 case PROCESSOR_R5500
:
11914 case PROCESSOR_R7000
:
11915 case PROCESSOR_R9000
:
11918 case PROCESSOR_SB1
:
11919 case PROCESSOR_SB1A
:
11920 /* This is actually 4, but we get better performance if we claim 3.
11921 This is partly because of unwanted speculative code motion with the
11922 larger number, and partly because in most common cases we can't
11923 reach the theoretical max of 4. */
11931 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11932 be as wide as the scheduling freedom in the DFA. */
11935 mips_multipass_dfa_lookahead (void)
11937 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11944 /* Implements a store data bypass check. We need this because the cprestore
11945 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11946 default routine to abort. We just return false for that case. */
11947 /* ??? Should try to give a better result here than assuming false. */
11950 mips_store_data_bypass_p (rtx out_insn
, rtx in_insn
)
11952 if (GET_CODE (PATTERN (in_insn
)) == UNSPEC_VOLATILE
)
11955 return ! store_data_bypass_p (out_insn
, in_insn
);
11958 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11959 return the first operand of the associated "pref" or "prefx" insn. */
11962 mips_prefetch_cookie (rtx write
, rtx locality
)
11964 /* store_streamed / load_streamed. */
11965 if (INTVAL (locality
) <= 0)
11966 return GEN_INT (INTVAL (write
) + 4);
11968 /* store / load. */
11969 if (INTVAL (locality
) <= 2)
11972 /* store_retained / load_retained. */
11973 return GEN_INT (INTVAL (write
) + 6);
11976 /* MIPS builtin function support. */
11978 struct builtin_description
11980 /* The code of the main .md file instruction. See mips_builtin_type
11981 for more information. */
11982 enum insn_code icode
;
11984 /* The floating-point comparison code to use with ICODE, if any. */
11985 enum mips_fp_condition cond
;
11987 /* The name of the builtin function. */
11990 /* Specifies how the function should be expanded. */
11991 enum mips_builtin_type builtin_type
;
11993 /* The function's prototype. */
11994 enum mips_function_type function_type
;
11996 /* The target flags required for this function. */
12000 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
12001 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
12002 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
12003 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
12004 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
12006 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
12008 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
12009 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
12010 "__builtin_mips_" #INSN "_" #COND "_s", \
12011 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
12012 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
12013 "__builtin_mips_" #INSN "_" #COND "_d", \
12014 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
12016 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
12017 The lower and upper forms require TARGET_FLAGS while the any and all
12018 forms require MASK_MIPS3D. */
12019 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
12020 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12021 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
12022 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
12023 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12024 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
12025 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
12026 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12027 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
12028 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
12029 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12030 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
12031 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
12033 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
12034 require MASK_MIPS3D. */
12035 #define CMP_4S_BUILTINS(INSN, COND) \
12036 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
12037 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
12038 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12040 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
12041 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
12042 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12045 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
12046 instruction requires TARGET_FLAGS. */
12047 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
12048 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12049 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
12050 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12052 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12053 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
12054 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12057 /* Define all the builtins related to c.cond.fmt condition COND. */
12058 #define CMP_BUILTINS(COND) \
12059 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
12060 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
12061 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
12062 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
12063 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
12064 CMP_4S_BUILTINS (c, COND), \
12065 CMP_4S_BUILTINS (cabs, COND)
12067 static const struct builtin_description mips_bdesc
[] =
12069 DIRECT_BUILTIN (pll_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
12070 DIRECT_BUILTIN (pul_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
12071 DIRECT_BUILTIN (plu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
12072 DIRECT_BUILTIN (puu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
12073 DIRECT_BUILTIN (cvt_ps_s
, MIPS_V2SF_FTYPE_SF_SF
, MASK_PAIRED_SINGLE_FLOAT
),
12074 DIRECT_BUILTIN (cvt_s_pl
, MIPS_SF_FTYPE_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
12075 DIRECT_BUILTIN (cvt_s_pu
, MIPS_SF_FTYPE_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
12076 DIRECT_BUILTIN (abs_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_PAIRED_SINGLE_FLOAT
),
12078 DIRECT_BUILTIN (alnv_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF_INT
,
12079 MASK_PAIRED_SINGLE_FLOAT
),
12080 DIRECT_BUILTIN (addr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_MIPS3D
),
12081 DIRECT_BUILTIN (mulr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_MIPS3D
),
12082 DIRECT_BUILTIN (cvt_pw_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_MIPS3D
),
12083 DIRECT_BUILTIN (cvt_ps_pw
, MIPS_V2SF_FTYPE_V2SF
, MASK_MIPS3D
),
12085 DIRECT_BUILTIN (recip1_s
, MIPS_SF_FTYPE_SF
, MASK_MIPS3D
),
12086 DIRECT_BUILTIN (recip1_d
, MIPS_DF_FTYPE_DF
, MASK_MIPS3D
),
12087 DIRECT_BUILTIN (recip1_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_MIPS3D
),
12088 DIRECT_BUILTIN (recip2_s
, MIPS_SF_FTYPE_SF_SF
, MASK_MIPS3D
),
12089 DIRECT_BUILTIN (recip2_d
, MIPS_DF_FTYPE_DF_DF
, MASK_MIPS3D
),
12090 DIRECT_BUILTIN (recip2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_MIPS3D
),
12092 DIRECT_BUILTIN (rsqrt1_s
, MIPS_SF_FTYPE_SF
, MASK_MIPS3D
),
12093 DIRECT_BUILTIN (rsqrt1_d
, MIPS_DF_FTYPE_DF
, MASK_MIPS3D
),
12094 DIRECT_BUILTIN (rsqrt1_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_MIPS3D
),
12095 DIRECT_BUILTIN (rsqrt2_s
, MIPS_SF_FTYPE_SF_SF
, MASK_MIPS3D
),
12096 DIRECT_BUILTIN (rsqrt2_d
, MIPS_DF_FTYPE_DF_DF
, MASK_MIPS3D
),
12097 DIRECT_BUILTIN (rsqrt2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, MASK_MIPS3D
),
12099 MIPS_FP_CONDITIONS (CMP_BUILTINS
)
12102 /* Builtin functions for the SB-1 processor. */
12104 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
12106 static const struct builtin_description sb1_bdesc
[] =
12108 DIRECT_BUILTIN (sqrt_ps
, MIPS_V2SF_FTYPE_V2SF
, MASK_PAIRED_SINGLE_FLOAT
)
12111 /* Builtin functions for DSP ASE. */
12113 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
12114 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
12115 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
12116 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
12117 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
12119 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
12120 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
12121 builtin_description fields. */
12122 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
12123 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
12124 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
12126 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
12127 branch instruction. TARGET_FLAGS is a builtin_description field. */
12128 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
12129 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
12130 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
12132 static const struct builtin_description dsp_bdesc
[] =
12134 DIRECT_BUILTIN (addq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12135 DIRECT_BUILTIN (addq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12136 DIRECT_BUILTIN (addq_s_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
12137 DIRECT_BUILTIN (addu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
12138 DIRECT_BUILTIN (addu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
12139 DIRECT_BUILTIN (subq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12140 DIRECT_BUILTIN (subq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12141 DIRECT_BUILTIN (subq_s_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
12142 DIRECT_BUILTIN (subu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
12143 DIRECT_BUILTIN (subu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
12144 DIRECT_BUILTIN (addsc
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
12145 DIRECT_BUILTIN (addwc
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
12146 DIRECT_BUILTIN (modsub
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
12147 DIRECT_BUILTIN (raddu_w_qb
, MIPS_SI_FTYPE_V4QI
, MASK_DSP
),
12148 DIRECT_BUILTIN (absq_s_ph
, MIPS_V2HI_FTYPE_V2HI
, MASK_DSP
),
12149 DIRECT_BUILTIN (absq_s_w
, MIPS_SI_FTYPE_SI
, MASK_DSP
),
12150 DIRECT_BUILTIN (precrq_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12151 DIRECT_BUILTIN (precrq_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, MASK_DSP
),
12152 DIRECT_BUILTIN (precrq_rs_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, MASK_DSP
),
12153 DIRECT_BUILTIN (precrqu_s_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12154 DIRECT_BUILTIN (preceq_w_phl
, MIPS_SI_FTYPE_V2HI
, MASK_DSP
),
12155 DIRECT_BUILTIN (preceq_w_phr
, MIPS_SI_FTYPE_V2HI
, MASK_DSP
),
12156 DIRECT_BUILTIN (precequ_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
12157 DIRECT_BUILTIN (precequ_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
12158 DIRECT_BUILTIN (precequ_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
12159 DIRECT_BUILTIN (precequ_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
12160 DIRECT_BUILTIN (preceu_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
12161 DIRECT_BUILTIN (preceu_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
12162 DIRECT_BUILTIN (preceu_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
12163 DIRECT_BUILTIN (preceu_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, MASK_DSP
),
12164 DIRECT_BUILTIN (shll_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, MASK_DSP
),
12165 DIRECT_BUILTIN (shll_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSP
),
12166 DIRECT_BUILTIN (shll_s_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSP
),
12167 DIRECT_BUILTIN (shll_s_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
12168 DIRECT_BUILTIN (shrl_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, MASK_DSP
),
12169 DIRECT_BUILTIN (shra_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSP
),
12170 DIRECT_BUILTIN (shra_r_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSP
),
12171 DIRECT_BUILTIN (shra_r_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
12172 DIRECT_BUILTIN (muleu_s_ph_qbl
, MIPS_V2HI_FTYPE_V4QI_V2HI
, MASK_DSP
),
12173 DIRECT_BUILTIN (muleu_s_ph_qbr
, MIPS_V2HI_FTYPE_V4QI_V2HI
, MASK_DSP
),
12174 DIRECT_BUILTIN (mulq_rs_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12175 DIRECT_BUILTIN (muleq_s_w_phl
, MIPS_SI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12176 DIRECT_BUILTIN (muleq_s_w_phr
, MIPS_SI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12177 DIRECT_BUILTIN (bitrev
, MIPS_SI_FTYPE_SI
, MASK_DSP
),
12178 DIRECT_BUILTIN (insv
, MIPS_SI_FTYPE_SI_SI
, MASK_DSP
),
12179 DIRECT_BUILTIN (repl_qb
, MIPS_V4QI_FTYPE_SI
, MASK_DSP
),
12180 DIRECT_BUILTIN (repl_ph
, MIPS_V2HI_FTYPE_SI
, MASK_DSP
),
12181 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, MASK_DSP
),
12182 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, MASK_DSP
),
12183 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, MASK_DSP
),
12184 DIRECT_BUILTIN (cmpgu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSP
),
12185 DIRECT_BUILTIN (cmpgu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSP
),
12186 DIRECT_BUILTIN (cmpgu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSP
),
12187 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, MASK_DSP
),
12188 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, MASK_DSP
),
12189 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, MASK_DSP
),
12190 DIRECT_BUILTIN (pick_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSP
),
12191 DIRECT_BUILTIN (pick_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12192 DIRECT_BUILTIN (packrl_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSP
),
12193 DIRECT_NO_TARGET_BUILTIN (wrdsp
, MIPS_VOID_FTYPE_SI_SI
, MASK_DSP
),
12194 DIRECT_BUILTIN (rddsp
, MIPS_SI_FTYPE_SI
, MASK_DSP
),
12195 DIRECT_BUILTIN (lbux
, MIPS_SI_FTYPE_PTR_SI
, MASK_DSP
),
12196 DIRECT_BUILTIN (lhx
, MIPS_SI_FTYPE_PTR_SI
, MASK_DSP
),
12197 DIRECT_BUILTIN (lwx
, MIPS_SI_FTYPE_PTR_SI
, MASK_DSP
),
12198 BPOSGE_BUILTIN (32, MASK_DSP
),
12200 /* The following are for the MIPS DSP ASE REV 2. */
12201 DIRECT_BUILTIN (absq_s_qb
, MIPS_V4QI_FTYPE_V4QI
, MASK_DSPR2
),
12202 DIRECT_BUILTIN (addu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12203 DIRECT_BUILTIN (addu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12204 DIRECT_BUILTIN (adduh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
12205 DIRECT_BUILTIN (adduh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
12206 DIRECT_BUILTIN (append
, MIPS_SI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
12207 DIRECT_BUILTIN (balign
, MIPS_SI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
12208 DIRECT_BUILTIN (cmpgdu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
12209 DIRECT_BUILTIN (cmpgdu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
12210 DIRECT_BUILTIN (cmpgdu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
12211 DIRECT_BUILTIN (mul_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12212 DIRECT_BUILTIN (mul_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12213 DIRECT_BUILTIN (mulq_rs_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
12214 DIRECT_BUILTIN (mulq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12215 DIRECT_BUILTIN (mulq_s_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
12216 DIRECT_BUILTIN (precr_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12217 DIRECT_BUILTIN (precr_sra_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
12218 DIRECT_BUILTIN (precr_sra_r_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
12219 DIRECT_BUILTIN (prepend
, MIPS_SI_FTYPE_SI_SI_SI
, MASK_DSPR2
),
12220 DIRECT_BUILTIN (shra_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, MASK_DSPR2
),
12221 DIRECT_BUILTIN (shra_r_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, MASK_DSPR2
),
12222 DIRECT_BUILTIN (shrl_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, MASK_DSPR2
),
12223 DIRECT_BUILTIN (subu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12224 DIRECT_BUILTIN (subu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12225 DIRECT_BUILTIN (subuh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
12226 DIRECT_BUILTIN (subuh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, MASK_DSPR2
),
12227 DIRECT_BUILTIN (addqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12228 DIRECT_BUILTIN (addqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12229 DIRECT_BUILTIN (addqh_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
12230 DIRECT_BUILTIN (addqh_r_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
12231 DIRECT_BUILTIN (subqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12232 DIRECT_BUILTIN (subqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, MASK_DSPR2
),
12233 DIRECT_BUILTIN (subqh_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
),
12234 DIRECT_BUILTIN (subqh_r_w
, MIPS_SI_FTYPE_SI_SI
, MASK_DSPR2
)
12237 static const struct builtin_description dsp_32only_bdesc
[] =
12239 DIRECT_BUILTIN (dpau_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, MASK_DSP
),
12240 DIRECT_BUILTIN (dpau_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, MASK_DSP
),
12241 DIRECT_BUILTIN (dpsu_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, MASK_DSP
),
12242 DIRECT_BUILTIN (dpsu_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, MASK_DSP
),
12243 DIRECT_BUILTIN (dpaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
12244 DIRECT_BUILTIN (dpsq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
12245 DIRECT_BUILTIN (mulsaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
12246 DIRECT_BUILTIN (dpaq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, MASK_DSP
),
12247 DIRECT_BUILTIN (dpsq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, MASK_DSP
),
12248 DIRECT_BUILTIN (maq_s_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
12249 DIRECT_BUILTIN (maq_s_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
12250 DIRECT_BUILTIN (maq_sa_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
12251 DIRECT_BUILTIN (maq_sa_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSP
),
12252 DIRECT_BUILTIN (extr_w
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
12253 DIRECT_BUILTIN (extr_r_w
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
12254 DIRECT_BUILTIN (extr_rs_w
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
12255 DIRECT_BUILTIN (extr_s_h
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
12256 DIRECT_BUILTIN (extp
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
12257 DIRECT_BUILTIN (extpdp
, MIPS_SI_FTYPE_DI_SI
, MASK_DSP
),
12258 DIRECT_BUILTIN (shilo
, MIPS_DI_FTYPE_DI_SI
, MASK_DSP
),
12259 DIRECT_BUILTIN (mthlip
, MIPS_DI_FTYPE_DI_SI
, MASK_DSP
),
12261 /* The following are for the MIPS DSP ASE REV 2. */
12262 DIRECT_BUILTIN (dpa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
12263 DIRECT_BUILTIN (dps_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
12264 DIRECT_BUILTIN (madd
, MIPS_DI_FTYPE_DI_SI_SI
, MASK_DSPR2
),
12265 DIRECT_BUILTIN (maddu
, MIPS_DI_FTYPE_DI_USI_USI
, MASK_DSPR2
),
12266 DIRECT_BUILTIN (msub
, MIPS_DI_FTYPE_DI_SI_SI
, MASK_DSPR2
),
12267 DIRECT_BUILTIN (msubu
, MIPS_DI_FTYPE_DI_USI_USI
, MASK_DSPR2
),
12268 DIRECT_BUILTIN (mulsa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
12269 DIRECT_BUILTIN (mult
, MIPS_DI_FTYPE_SI_SI
, MASK_DSPR2
),
12270 DIRECT_BUILTIN (multu
, MIPS_DI_FTYPE_USI_USI
, MASK_DSPR2
),
12271 DIRECT_BUILTIN (dpax_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
12272 DIRECT_BUILTIN (dpsx_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
12273 DIRECT_BUILTIN (dpaqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
12274 DIRECT_BUILTIN (dpaqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
12275 DIRECT_BUILTIN (dpsqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
),
12276 DIRECT_BUILTIN (dpsqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, MASK_DSPR2
)
12279 /* This helps provide a mapping from builtin function codes to bdesc
12284 /* The builtin function table that this entry describes. */
12285 const struct builtin_description
*bdesc
;
12287 /* The number of entries in the builtin function table. */
12290 /* The target processor that supports these builtin functions.
12291 PROCESSOR_MAX means we enable them for all processors. */
12292 enum processor_type proc
;
12294 /* If the target has these flags, this builtin function table
12295 will not be supported. */
12296 int unsupported_target_flags
;
12299 static const struct bdesc_map bdesc_arrays
[] =
12301 { mips_bdesc
, ARRAY_SIZE (mips_bdesc
), PROCESSOR_MAX
, 0 },
12302 { sb1_bdesc
, ARRAY_SIZE (sb1_bdesc
), PROCESSOR_SB1
, 0 },
12303 { dsp_bdesc
, ARRAY_SIZE (dsp_bdesc
), PROCESSOR_MAX
, 0 },
12304 { dsp_32only_bdesc
, ARRAY_SIZE (dsp_32only_bdesc
), PROCESSOR_MAX
,
12308 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
12309 suitable for input operand OP of instruction ICODE. Return the value. */
12312 mips_prepare_builtin_arg (enum insn_code icode
,
12313 unsigned int op
, tree exp
, unsigned int argnum
)
12316 enum machine_mode mode
;
12318 value
= expand_normal (CALL_EXPR_ARG (exp
, argnum
));
12319 mode
= insn_data
[icode
].operand
[op
].mode
;
12320 if (!insn_data
[icode
].operand
[op
].predicate (value
, mode
))
12322 value
= copy_to_mode_reg (mode
, value
);
12323 /* Check the predicate again. */
12324 if (!insn_data
[icode
].operand
[op
].predicate (value
, mode
))
12326 error ("invalid argument to builtin function");
12334 /* Return an rtx suitable for output operand OP of instruction ICODE.
12335 If TARGET is non-null, try to use it where possible. */
12338 mips_prepare_builtin_target (enum insn_code icode
, unsigned int op
, rtx target
)
12340 enum machine_mode mode
;
12342 mode
= insn_data
[icode
].operand
[op
].mode
;
12343 if (target
== 0 || !insn_data
[icode
].operand
[op
].predicate (target
, mode
))
12344 target
= gen_reg_rtx (mode
);
12349 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
12352 mips_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
12353 enum machine_mode mode ATTRIBUTE_UNUSED
,
12354 int ignore ATTRIBUTE_UNUSED
)
12356 enum insn_code icode
;
12357 enum mips_builtin_type type
;
12359 unsigned int fcode
;
12360 const struct builtin_description
*bdesc
;
12361 const struct bdesc_map
*m
;
12363 fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12364 fcode
= DECL_FUNCTION_CODE (fndecl
);
12368 error ("built-in function %qs not supported for MIPS16",
12369 IDENTIFIER_POINTER (DECL_NAME (fndecl
)));
12374 for (m
= bdesc_arrays
; m
< &bdesc_arrays
[ARRAY_SIZE (bdesc_arrays
)]; m
++)
12376 if (fcode
< m
->size
)
12379 icode
= bdesc
[fcode
].icode
;
12380 type
= bdesc
[fcode
].builtin_type
;
12390 case MIPS_BUILTIN_DIRECT
:
12391 return mips_expand_builtin_direct (icode
, target
, exp
, true);
12393 case MIPS_BUILTIN_DIRECT_NO_TARGET
:
12394 return mips_expand_builtin_direct (icode
, target
, exp
, false);
12396 case MIPS_BUILTIN_MOVT
:
12397 case MIPS_BUILTIN_MOVF
:
12398 return mips_expand_builtin_movtf (type
, icode
, bdesc
[fcode
].cond
,
12401 case MIPS_BUILTIN_CMP_ANY
:
12402 case MIPS_BUILTIN_CMP_ALL
:
12403 case MIPS_BUILTIN_CMP_UPPER
:
12404 case MIPS_BUILTIN_CMP_LOWER
:
12405 case MIPS_BUILTIN_CMP_SINGLE
:
12406 return mips_expand_builtin_compare (type
, icode
, bdesc
[fcode
].cond
,
12409 case MIPS_BUILTIN_BPOSGE32
:
12410 return mips_expand_builtin_bposge (type
, target
);
12417 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
12420 mips_init_builtins (void)
12422 const struct builtin_description
*d
;
12423 const struct bdesc_map
*m
;
12424 tree types
[(int) MIPS_MAX_FTYPE_MAX
];
12425 tree V2SF_type_node
;
12426 tree V2HI_type_node
;
12427 tree V4QI_type_node
;
12428 unsigned int offset
;
12430 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
12431 if (!TARGET_PAIRED_SINGLE_FLOAT
&& !TARGET_DSP
)
12434 if (TARGET_PAIRED_SINGLE_FLOAT
)
12436 V2SF_type_node
= build_vector_type_for_mode (float_type_node
, V2SFmode
);
12438 types
[MIPS_V2SF_FTYPE_V2SF
]
12439 = build_function_type_list (V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
12441 types
[MIPS_V2SF_FTYPE_V2SF_V2SF
]
12442 = build_function_type_list (V2SF_type_node
,
12443 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
12445 types
[MIPS_V2SF_FTYPE_V2SF_V2SF_INT
]
12446 = build_function_type_list (V2SF_type_node
,
12447 V2SF_type_node
, V2SF_type_node
,
12448 integer_type_node
, NULL_TREE
);
12450 types
[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF
]
12451 = build_function_type_list (V2SF_type_node
,
12452 V2SF_type_node
, V2SF_type_node
,
12453 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
12455 types
[MIPS_V2SF_FTYPE_SF_SF
]
12456 = build_function_type_list (V2SF_type_node
,
12457 float_type_node
, float_type_node
, NULL_TREE
);
12459 types
[MIPS_INT_FTYPE_V2SF_V2SF
]
12460 = build_function_type_list (integer_type_node
,
12461 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
12463 types
[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF
]
12464 = build_function_type_list (integer_type_node
,
12465 V2SF_type_node
, V2SF_type_node
,
12466 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
12468 types
[MIPS_INT_FTYPE_SF_SF
]
12469 = build_function_type_list (integer_type_node
,
12470 float_type_node
, float_type_node
, NULL_TREE
);
12472 types
[MIPS_INT_FTYPE_DF_DF
]
12473 = build_function_type_list (integer_type_node
,
12474 double_type_node
, double_type_node
, NULL_TREE
);
12476 types
[MIPS_SF_FTYPE_V2SF
]
12477 = build_function_type_list (float_type_node
, V2SF_type_node
, NULL_TREE
);
12479 types
[MIPS_SF_FTYPE_SF
]
12480 = build_function_type_list (float_type_node
,
12481 float_type_node
, NULL_TREE
);
12483 types
[MIPS_SF_FTYPE_SF_SF
]
12484 = build_function_type_list (float_type_node
,
12485 float_type_node
, float_type_node
, NULL_TREE
);
12487 types
[MIPS_DF_FTYPE_DF
]
12488 = build_function_type_list (double_type_node
,
12489 double_type_node
, NULL_TREE
);
12491 types
[MIPS_DF_FTYPE_DF_DF
]
12492 = build_function_type_list (double_type_node
,
12493 double_type_node
, double_type_node
, NULL_TREE
);
12498 V2HI_type_node
= build_vector_type_for_mode (intHI_type_node
, V2HImode
);
12499 V4QI_type_node
= build_vector_type_for_mode (intQI_type_node
, V4QImode
);
12501 types
[MIPS_V2HI_FTYPE_V2HI_V2HI
]
12502 = build_function_type_list (V2HI_type_node
,
12503 V2HI_type_node
, V2HI_type_node
,
12506 types
[MIPS_SI_FTYPE_SI_SI
]
12507 = build_function_type_list (intSI_type_node
,
12508 intSI_type_node
, intSI_type_node
,
12511 types
[MIPS_V4QI_FTYPE_V4QI_V4QI
]
12512 = build_function_type_list (V4QI_type_node
,
12513 V4QI_type_node
, V4QI_type_node
,
12516 types
[MIPS_SI_FTYPE_V4QI
]
12517 = build_function_type_list (intSI_type_node
,
12521 types
[MIPS_V2HI_FTYPE_V2HI
]
12522 = build_function_type_list (V2HI_type_node
,
12526 types
[MIPS_SI_FTYPE_SI
]
12527 = build_function_type_list (intSI_type_node
,
12531 types
[MIPS_V4QI_FTYPE_V2HI_V2HI
]
12532 = build_function_type_list (V4QI_type_node
,
12533 V2HI_type_node
, V2HI_type_node
,
12536 types
[MIPS_V2HI_FTYPE_SI_SI
]
12537 = build_function_type_list (V2HI_type_node
,
12538 intSI_type_node
, intSI_type_node
,
12541 types
[MIPS_SI_FTYPE_V2HI
]
12542 = build_function_type_list (intSI_type_node
,
12546 types
[MIPS_V2HI_FTYPE_V4QI
]
12547 = build_function_type_list (V2HI_type_node
,
12551 types
[MIPS_V4QI_FTYPE_V4QI_SI
]
12552 = build_function_type_list (V4QI_type_node
,
12553 V4QI_type_node
, intSI_type_node
,
12556 types
[MIPS_V2HI_FTYPE_V2HI_SI
]
12557 = build_function_type_list (V2HI_type_node
,
12558 V2HI_type_node
, intSI_type_node
,
12561 types
[MIPS_V2HI_FTYPE_V4QI_V2HI
]
12562 = build_function_type_list (V2HI_type_node
,
12563 V4QI_type_node
, V2HI_type_node
,
12566 types
[MIPS_SI_FTYPE_V2HI_V2HI
]
12567 = build_function_type_list (intSI_type_node
,
12568 V2HI_type_node
, V2HI_type_node
,
12571 types
[MIPS_DI_FTYPE_DI_V4QI_V4QI
]
12572 = build_function_type_list (intDI_type_node
,
12573 intDI_type_node
, V4QI_type_node
, V4QI_type_node
,
12576 types
[MIPS_DI_FTYPE_DI_V2HI_V2HI
]
12577 = build_function_type_list (intDI_type_node
,
12578 intDI_type_node
, V2HI_type_node
, V2HI_type_node
,
12581 types
[MIPS_DI_FTYPE_DI_SI_SI
]
12582 = build_function_type_list (intDI_type_node
,
12583 intDI_type_node
, intSI_type_node
, intSI_type_node
,
12586 types
[MIPS_V4QI_FTYPE_SI
]
12587 = build_function_type_list (V4QI_type_node
,
12591 types
[MIPS_V2HI_FTYPE_SI
]
12592 = build_function_type_list (V2HI_type_node
,
12596 types
[MIPS_VOID_FTYPE_V4QI_V4QI
]
12597 = build_function_type_list (void_type_node
,
12598 V4QI_type_node
, V4QI_type_node
,
12601 types
[MIPS_SI_FTYPE_V4QI_V4QI
]
12602 = build_function_type_list (intSI_type_node
,
12603 V4QI_type_node
, V4QI_type_node
,
12606 types
[MIPS_VOID_FTYPE_V2HI_V2HI
]
12607 = build_function_type_list (void_type_node
,
12608 V2HI_type_node
, V2HI_type_node
,
12611 types
[MIPS_SI_FTYPE_DI_SI
]
12612 = build_function_type_list (intSI_type_node
,
12613 intDI_type_node
, intSI_type_node
,
12616 types
[MIPS_DI_FTYPE_DI_SI
]
12617 = build_function_type_list (intDI_type_node
,
12618 intDI_type_node
, intSI_type_node
,
12621 types
[MIPS_VOID_FTYPE_SI_SI
]
12622 = build_function_type_list (void_type_node
,
12623 intSI_type_node
, intSI_type_node
,
12626 types
[MIPS_SI_FTYPE_PTR_SI
]
12627 = build_function_type_list (intSI_type_node
,
12628 ptr_type_node
, intSI_type_node
,
12631 types
[MIPS_SI_FTYPE_VOID
]
12632 = build_function_type (intSI_type_node
, void_list_node
);
12636 types
[MIPS_V4QI_FTYPE_V4QI
]
12637 = build_function_type_list (V4QI_type_node
,
12641 types
[MIPS_SI_FTYPE_SI_SI_SI
]
12642 = build_function_type_list (intSI_type_node
,
12643 intSI_type_node
, intSI_type_node
,
12644 intSI_type_node
, NULL_TREE
);
12646 types
[MIPS_DI_FTYPE_DI_USI_USI
]
12647 = build_function_type_list (intDI_type_node
,
12649 unsigned_intSI_type_node
,
12650 unsigned_intSI_type_node
, NULL_TREE
);
12652 types
[MIPS_DI_FTYPE_SI_SI
]
12653 = build_function_type_list (intDI_type_node
,
12654 intSI_type_node
, intSI_type_node
,
12657 types
[MIPS_DI_FTYPE_USI_USI
]
12658 = build_function_type_list (intDI_type_node
,
12659 unsigned_intSI_type_node
,
12660 unsigned_intSI_type_node
, NULL_TREE
);
12662 types
[MIPS_V2HI_FTYPE_SI_SI_SI
]
12663 = build_function_type_list (V2HI_type_node
,
12664 intSI_type_node
, intSI_type_node
,
12665 intSI_type_node
, NULL_TREE
);
12670 /* Iterate through all of the bdesc arrays, initializing all of the
12671 builtin functions. */
12674 for (m
= bdesc_arrays
; m
< &bdesc_arrays
[ARRAY_SIZE (bdesc_arrays
)]; m
++)
12676 if ((m
->proc
== PROCESSOR_MAX
|| (m
->proc
== mips_arch
))
12677 && (m
->unsupported_target_flags
& target_flags
) == 0)
12678 for (d
= m
->bdesc
; d
< &m
->bdesc
[m
->size
]; d
++)
12679 if ((d
->target_flags
& target_flags
) == d
->target_flags
)
12680 add_builtin_function (d
->name
, types
[d
->function_type
],
12681 d
- m
->bdesc
+ offset
,
12682 BUILT_IN_MD
, NULL
, NULL
);
12687 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
12688 .md pattern and CALL is the function expr with arguments. TARGET,
12689 if nonnull, suggests a good place to put the result.
12690 HAS_TARGET indicates the function must return something. */
12693 mips_expand_builtin_direct (enum insn_code icode
, rtx target
, tree exp
,
12696 rtx ops
[MAX_RECOG_OPERANDS
];
12702 /* We save target to ops[0]. */
12703 ops
[0] = mips_prepare_builtin_target (icode
, 0, target
);
12707 /* We need to test if the arglist is not zero. Some instructions have extra
12708 clobber registers. */
12709 for (; i
< insn_data
[icode
].n_operands
&& i
<= call_expr_nargs (exp
); i
++, j
++)
12710 ops
[i
] = mips_prepare_builtin_arg (icode
, i
, exp
, j
);
12715 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1]));
12719 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1], ops
[2]));
12723 emit_insn (GEN_FCN (icode
) (ops
[0], ops
[1], ops
[2], ops
[3]));
12727 gcc_unreachable ();
12732 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12733 function (TYPE says which). EXP is the tree for the function
12734 function, ICODE is the instruction that should be used to compare
12735 the first two arguments, and COND is the condition it should test.
12736 TARGET, if nonnull, suggests a good place to put the result. */
12739 mips_expand_builtin_movtf (enum mips_builtin_type type
,
12740 enum insn_code icode
, enum mips_fp_condition cond
,
12741 rtx target
, tree exp
)
12743 rtx cmp_result
, op0
, op1
;
12745 cmp_result
= mips_prepare_builtin_target (icode
, 0, 0);
12746 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 0);
12747 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 1);
12748 emit_insn (GEN_FCN (icode
) (cmp_result
, op0
, op1
, GEN_INT (cond
)));
12750 icode
= CODE_FOR_mips_cond_move_tf_ps
;
12751 target
= mips_prepare_builtin_target (icode
, 0, target
);
12752 if (type
== MIPS_BUILTIN_MOVT
)
12754 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 2);
12755 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 3);
12759 op0
= mips_prepare_builtin_arg (icode
, 1, exp
, 2);
12760 op1
= mips_prepare_builtin_arg (icode
, 2, exp
, 3);
12762 emit_insn (gen_mips_cond_move_tf_ps (target
, op0
, op1
, cmp_result
));
12766 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12767 into TARGET otherwise. Return TARGET. */
12770 mips_builtin_branch_and_move (rtx condition
, rtx target
,
12771 rtx value_if_true
, rtx value_if_false
)
12773 rtx true_label
, done_label
;
12775 true_label
= gen_label_rtx ();
12776 done_label
= gen_label_rtx ();
12778 /* First assume that CONDITION is false. */
12779 mips_emit_move (target
, value_if_false
);
12781 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12782 emit_jump_insn (gen_condjump (condition
, true_label
));
12783 emit_jump_insn (gen_jump (done_label
));
12786 /* Fix TARGET if CONDITION is true. */
12787 emit_label (true_label
);
12788 mips_emit_move (target
, value_if_true
);
12790 emit_label (done_label
);
12794 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12795 of the comparison instruction and COND is the condition it should test.
12796 EXP is the function call and arguments and TARGET, if nonnull,
12797 suggests a good place to put the boolean result. */
12800 mips_expand_builtin_compare (enum mips_builtin_type builtin_type
,
12801 enum insn_code icode
, enum mips_fp_condition cond
,
12802 rtx target
, tree exp
)
12804 rtx offset
, condition
, cmp_result
, ops
[MAX_RECOG_OPERANDS
];
12808 if (target
== 0 || GET_MODE (target
) != SImode
)
12809 target
= gen_reg_rtx (SImode
);
12811 /* Prepare the operands to the comparison. */
12812 cmp_result
= mips_prepare_builtin_target (icode
, 0, 0);
12813 for (i
= 1; i
< insn_data
[icode
].n_operands
- 1; i
++, j
++)
12814 ops
[i
] = mips_prepare_builtin_arg (icode
, i
, exp
, j
);
12816 switch (insn_data
[icode
].n_operands
)
12819 emit_insn (GEN_FCN (icode
) (cmp_result
, ops
[1], ops
[2], GEN_INT (cond
)));
12823 emit_insn (GEN_FCN (icode
) (cmp_result
, ops
[1], ops
[2],
12824 ops
[3], ops
[4], GEN_INT (cond
)));
12828 gcc_unreachable ();
12831 /* If the comparison sets more than one register, we define the result
12832 to be 0 if all registers are false and -1 if all registers are true.
12833 The value of the complete result is indeterminate otherwise. */
12834 switch (builtin_type
)
12836 case MIPS_BUILTIN_CMP_ALL
:
12837 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, constm1_rtx
);
12838 return mips_builtin_branch_and_move (condition
, target
,
12839 const0_rtx
, const1_rtx
);
12841 case MIPS_BUILTIN_CMP_UPPER
:
12842 case MIPS_BUILTIN_CMP_LOWER
:
12843 offset
= GEN_INT (builtin_type
== MIPS_BUILTIN_CMP_UPPER
);
12844 condition
= gen_single_cc (cmp_result
, offset
);
12845 return mips_builtin_branch_and_move (condition
, target
,
12846 const1_rtx
, const0_rtx
);
12849 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, const0_rtx
);
12850 return mips_builtin_branch_and_move (condition
, target
,
12851 const1_rtx
, const0_rtx
);
12855 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12856 suggests a good place to put the boolean result. */
12859 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type
, rtx target
)
12861 rtx condition
, cmp_result
;
12864 if (target
== 0 || GET_MODE (target
) != SImode
)
12865 target
= gen_reg_rtx (SImode
);
12867 cmp_result
= gen_rtx_REG (CCDSPmode
, CCDSP_PO_REGNUM
);
12869 if (builtin_type
== MIPS_BUILTIN_BPOSGE32
)
12874 condition
= gen_rtx_GE (VOIDmode
, cmp_result
, GEN_INT (cmp_value
));
12875 return mips_builtin_branch_and_move (condition
, target
,
12876 const1_rtx
, const0_rtx
);
12879 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12880 FIRST is true if this is the first time handling this decl. */
12883 mips_encode_section_info (tree decl
, rtx rtl
, int first
)
12885 default_encode_section_info (decl
, rtl
, first
);
12887 if (TREE_CODE (decl
) == FUNCTION_DECL
)
12889 rtx symbol
= XEXP (rtl
, 0);
12890 tree type
= TREE_TYPE (decl
);
12892 if ((TARGET_LONG_CALLS
&& !mips_near_type_p (type
))
12893 || mips_far_type_p (type
))
12894 SYMBOL_REF_FLAGS (symbol
) |= SYMBOL_FLAG_LONG_CALL
;
12898 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12899 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12902 mips_extra_live_on_entry (bitmap regs
)
12904 if (TARGET_USE_GOT
&& !TARGET_ABSOLUTE_ABICALLS
)
12905 bitmap_set_bit (regs
, PIC_FUNCTION_ADDR_REGNUM
);
12908 /* SImode values are represented as sign-extended to DImode. */
12911 mips_mode_rep_extended (enum machine_mode mode
, enum machine_mode mode_rep
)
12913 if (TARGET_64BIT
&& mode
== SImode
&& mode_rep
== DImode
)
12914 return SIGN_EXTEND
;
12919 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12922 mips_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
12927 fputs ("\t.dtprelword\t", file
);
12931 fputs ("\t.dtpreldword\t", file
);
12935 gcc_unreachable ();
12937 output_addr_const (file
, x
);
12938 fputs ("+0x8000", file
);
12941 #include "gt-mips.h"