]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/mips/mips.c
arc-protos.h (arc_select_cc_mode, gen_compare_reg): Wrap in RTX_CODE macro guard.
[thirdparty/gcc.git] / gcc / config / mips / mips.c
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
16
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
60
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
69 XVECEXP (X, 0, 0)
70
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save and restore registers.
77
78 The value for normal-mode code must be a SMALL_OPERAND and must
79 preserve the maximum stack alignment. We therefore use a value
80 of 0x7ff0 in this case.
81
82 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
83 up to 0x7f8 bytes and can usually save or restore all the registers
84 that we need to save or restore. (Note that we can only use these
85 instructions for o32, for which the stack alignment is 8 bytes.)
86
87 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
88 RESTORE are not available. We can then use unextended instructions
89 to save and restore registers, and to allocate and deallocate the top
90 part of the frame. */
91 #define MIPS_MAX_FIRST_STACK_STEP \
92 (!TARGET_MIPS16 ? 0x7ff0 \
93 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
94 : TARGET_64BIT ? 0x100 : 0x400)
95
96 /* True if INSN is a mips.md pattern or asm statement. */
97 #define USEFUL_INSN_P(INSN) \
98 (INSN_P (INSN) \
99 && GET_CODE (PATTERN (INSN)) != USE \
100 && GET_CODE (PATTERN (INSN)) != CLOBBER \
101 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
102 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
103
104 /* If INSN is a delayed branch sequence, return the first instruction
105 in the sequence, otherwise return INSN itself. */
106 #define SEQ_BEGIN(INSN) \
107 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
108 ? XVECEXP (PATTERN (INSN), 0, 0) \
109 : (INSN))
110
111 /* Likewise for the last instruction in a delayed branch sequence. */
112 #define SEQ_END(INSN) \
113 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
114 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
115 : (INSN))
116
117 /* Execute the following loop body with SUBINSN set to each instruction
118 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
119 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
120 for ((SUBINSN) = SEQ_BEGIN (INSN); \
121 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
122 (SUBINSN) = NEXT_INSN (SUBINSN))
123
124 /* True if bit BIT is set in VALUE. */
125 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
126
127 /* Classifies an address.
128
129 ADDRESS_REG
130 A natural register + offset address. The register satisfies
131 mips_valid_base_register_p and the offset is a const_arith_operand.
132
133 ADDRESS_LO_SUM
134 A LO_SUM rtx. The first operand is a valid base register and
135 the second operand is a symbolic address.
136
137 ADDRESS_CONST_INT
138 A signed 16-bit constant address.
139
140 ADDRESS_SYMBOLIC:
141 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
142 enum mips_address_type {
143 ADDRESS_REG,
144 ADDRESS_LO_SUM,
145 ADDRESS_CONST_INT,
146 ADDRESS_SYMBOLIC
147 };
148
149 /* Classifies the prototype of a builtin function. */
150 enum mips_function_type
151 {
152 MIPS_V2SF_FTYPE_V2SF,
153 MIPS_V2SF_FTYPE_V2SF_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
156 MIPS_V2SF_FTYPE_SF_SF,
157 MIPS_INT_FTYPE_V2SF_V2SF,
158 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
159 MIPS_INT_FTYPE_SF_SF,
160 MIPS_INT_FTYPE_DF_DF,
161 MIPS_SF_FTYPE_V2SF,
162 MIPS_SF_FTYPE_SF,
163 MIPS_SF_FTYPE_SF_SF,
164 MIPS_DF_FTYPE_DF,
165 MIPS_DF_FTYPE_DF_DF,
166
167 /* For MIPS DSP ASE */
168 MIPS_DI_FTYPE_DI_SI,
169 MIPS_DI_FTYPE_DI_SI_SI,
170 MIPS_DI_FTYPE_DI_V2HI_V2HI,
171 MIPS_DI_FTYPE_DI_V4QI_V4QI,
172 MIPS_SI_FTYPE_DI_SI,
173 MIPS_SI_FTYPE_PTR_SI,
174 MIPS_SI_FTYPE_SI,
175 MIPS_SI_FTYPE_SI_SI,
176 MIPS_SI_FTYPE_V2HI,
177 MIPS_SI_FTYPE_V2HI_V2HI,
178 MIPS_SI_FTYPE_V4QI,
179 MIPS_SI_FTYPE_V4QI_V4QI,
180 MIPS_SI_FTYPE_VOID,
181 MIPS_V2HI_FTYPE_SI,
182 MIPS_V2HI_FTYPE_SI_SI,
183 MIPS_V2HI_FTYPE_V2HI,
184 MIPS_V2HI_FTYPE_V2HI_SI,
185 MIPS_V2HI_FTYPE_V2HI_V2HI,
186 MIPS_V2HI_FTYPE_V4QI,
187 MIPS_V2HI_FTYPE_V4QI_V2HI,
188 MIPS_V4QI_FTYPE_SI,
189 MIPS_V4QI_FTYPE_V2HI_V2HI,
190 MIPS_V4QI_FTYPE_V4QI_SI,
191 MIPS_V4QI_FTYPE_V4QI_V4QI,
192 MIPS_VOID_FTYPE_SI_SI,
193 MIPS_VOID_FTYPE_V2HI_V2HI,
194 MIPS_VOID_FTYPE_V4QI_V4QI,
195
196 /* For MIPS DSP REV 2 ASE. */
197 MIPS_V4QI_FTYPE_V4QI,
198 MIPS_SI_FTYPE_SI_SI_SI,
199 MIPS_DI_FTYPE_DI_USI_USI,
200 MIPS_DI_FTYPE_SI_SI,
201 MIPS_DI_FTYPE_USI_USI,
202 MIPS_V2HI_FTYPE_SI_SI_SI,
203
204 /* The last type. */
205 MIPS_MAX_FTYPE_MAX
206 };
207
208 /* Specifies how a builtin function should be converted into rtl. */
209 enum mips_builtin_type
210 {
211 /* The builtin corresponds directly to an .md pattern. The return
212 value is mapped to operand 0 and the arguments are mapped to
213 operands 1 and above. */
214 MIPS_BUILTIN_DIRECT,
215
216 /* The builtin corresponds directly to an .md pattern. There is no return
217 value and the arguments are mapped to operands 0 and above. */
218 MIPS_BUILTIN_DIRECT_NO_TARGET,
219
220 /* The builtin corresponds to a comparison instruction followed by
221 a mips_cond_move_tf_ps pattern. The first two arguments are the
222 values to compare and the second two arguments are the vector
223 operands for the movt.ps or movf.ps instruction (in assembly order). */
224 MIPS_BUILTIN_MOVF,
225 MIPS_BUILTIN_MOVT,
226
227 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
228 of this instruction is the result of the comparison, which has mode
229 CCV2 or CCV4. The function arguments are mapped to operands 1 and
230 above. The function's return value is an SImode boolean that is
231 true under the following conditions:
232
233 MIPS_BUILTIN_CMP_ANY: one of the registers is true
234 MIPS_BUILTIN_CMP_ALL: all of the registers are true
235 MIPS_BUILTIN_CMP_LOWER: the first register is true
236 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
237 MIPS_BUILTIN_CMP_ANY,
238 MIPS_BUILTIN_CMP_ALL,
239 MIPS_BUILTIN_CMP_UPPER,
240 MIPS_BUILTIN_CMP_LOWER,
241
242 /* As above, but the instruction only sets a single $fcc register. */
243 MIPS_BUILTIN_CMP_SINGLE,
244
245 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
246 MIPS_BUILTIN_BPOSGE32
247 };
248
249 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
250 #define MIPS_FP_CONDITIONS(MACRO) \
251 MACRO (f), \
252 MACRO (un), \
253 MACRO (eq), \
254 MACRO (ueq), \
255 MACRO (olt), \
256 MACRO (ult), \
257 MACRO (ole), \
258 MACRO (ule), \
259 MACRO (sf), \
260 MACRO (ngle), \
261 MACRO (seq), \
262 MACRO (ngl), \
263 MACRO (lt), \
264 MACRO (nge), \
265 MACRO (le), \
266 MACRO (ngt)
267
268 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
269 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
270 enum mips_fp_condition {
271 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
272 };
273
274 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
275 #define STRINGIFY(X) #X
276 static const char *const mips_fp_conditions[] = {
277 MIPS_FP_CONDITIONS (STRINGIFY)
278 };
279
280 /* A function to save or store a register. The first argument is the
281 register and the second is the stack slot. */
282 typedef void (*mips_save_restore_fn) (rtx, rtx);
283
284 struct mips16_constant;
285 struct mips_arg_info;
286 struct mips_address_info;
287 struct mips_integer_op;
288 struct mips_sim;
289
290 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
291 static bool mips_classify_address (struct mips_address_info *, rtx,
292 enum machine_mode, int);
293 static bool mips_cannot_force_const_mem (rtx);
294 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
295 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
296 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
297 static rtx mips_force_temporary (rtx, rtx);
298 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
299 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
300 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
301 static unsigned int mips_build_lower (struct mips_integer_op *,
302 unsigned HOST_WIDE_INT);
303 static unsigned int mips_build_integer (struct mips_integer_op *,
304 unsigned HOST_WIDE_INT);
305 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
306 static int m16_check_op (rtx, int, int, int);
307 static bool mips_rtx_costs (rtx, int, int, int *);
308 static int mips_address_cost (rtx);
309 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
310 static void mips_load_call_address (rtx, rtx, int);
311 static bool mips_function_ok_for_sibcall (tree, tree);
312 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
313 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
314 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
315 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
316 tree, int, struct mips_arg_info *);
317 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
318 static void mips_set_architecture (const struct mips_cpu_info *);
319 static void mips_set_tune (const struct mips_cpu_info *);
320 static bool mips_handle_option (size_t, const char *, int);
321 static struct machine_function *mips_init_machine_status (void);
322 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
323 const char **);
324 static void mips_file_start (void);
325 static int mips_small_data_pattern_1 (rtx *, void *);
326 static int mips_rewrite_small_data_1 (rtx *, void *);
327 static bool mips_function_has_gp_insn (void);
328 static unsigned int mips_global_pointer (void);
329 static bool mips_save_reg_p (unsigned int);
330 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
331 mips_save_restore_fn);
332 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
333 static void mips_output_cplocal (void);
334 static void mips_emit_loadgp (void);
335 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
336 static void mips_set_frame_expr (rtx);
337 static rtx mips_frame_set (rtx, rtx);
338 static void mips_save_reg (rtx, rtx);
339 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
340 static void mips_restore_reg (rtx, rtx);
341 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
342 HOST_WIDE_INT, tree);
343 static int symbolic_expression_p (rtx);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (tree);
348 static bool mips_use_anchors_for_symbol_p (rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
401 tree, int *, int);
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
405 const_tree, bool);
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
407 const_tree, bool);
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
409 tree, bool);
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_vector_mode_supported_p (enum machine_mode);
412 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
413 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
414 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
415 static void mips_init_builtins (void);
416 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
417 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
418 enum insn_code, enum mips_fp_condition,
419 rtx, tree);
420 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
421 enum insn_code, enum mips_fp_condition,
422 rtx, tree);
423 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
424 static void mips_encode_section_info (tree, rtx, int);
425 static void mips_extra_live_on_entry (bitmap);
426 static int mips_comp_type_attributes (tree, tree);
427 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
428 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
429 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
430
431 /* Structure to be filled in by compute_frame_size with register
432 save masks, and offsets for the current function. */
433
434 struct mips_frame_info GTY(())
435 {
436 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
437 HOST_WIDE_INT var_size; /* # bytes that variables take up */
438 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
439 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
440 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
441 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
442 unsigned int mask; /* mask of saved gp registers */
443 unsigned int fmask; /* mask of saved fp registers */
444 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
445 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
446 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
447 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
448 bool initialized; /* true if frame size already calculated */
449 int num_gp; /* number of gp registers saved */
450 int num_fp; /* number of fp registers saved */
451 };
452
453 struct machine_function GTY(()) {
454 /* Pseudo-reg holding the value of $28 in a mips16 function which
455 refers to GP relative global variables. */
456 rtx mips16_gp_pseudo_rtx;
457
458 /* The number of extra stack bytes taken up by register varargs.
459 This area is allocated by the callee at the very top of the frame. */
460 int varargs_size;
461
462 /* Current frame information, calculated by compute_frame_size. */
463 struct mips_frame_info frame;
464
465 /* The register to use as the global pointer within this function. */
466 unsigned int global_pointer;
467
468 /* True if mips_adjust_insn_length should ignore an instruction's
469 hazard attribute. */
470 bool ignore_hazard_length_p;
471
472 /* True if the whole function is suitable for .set noreorder and
473 .set nomacro. */
474 bool all_noreorder_p;
475
476 /* True if the function is known to have an instruction that needs $gp. */
477 bool has_gp_insn_p;
478
479 /* True if we have emitted an instruction to initialize
480 mips16_gp_pseudo_rtx. */
481 bool initialized_mips16_gp_pseudo_p;
482 };
483
484 /* Information about a single argument. */
485 struct mips_arg_info
486 {
487 /* True if the argument is passed in a floating-point register, or
488 would have been if we hadn't run out of registers. */
489 bool fpr_p;
490
491 /* The number of words passed in registers, rounded up. */
492 unsigned int reg_words;
493
494 /* For EABI, the offset of the first register from GP_ARG_FIRST or
495 FP_ARG_FIRST. For other ABIs, the offset of the first register from
496 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
497 comment for details).
498
499 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
500 on the stack. */
501 unsigned int reg_offset;
502
503 /* The number of words that must be passed on the stack, rounded up. */
504 unsigned int stack_words;
505
506 /* The offset from the start of the stack overflow area of the argument's
507 first stack word. Only meaningful when STACK_WORDS is nonzero. */
508 unsigned int stack_offset;
509 };
510
511
512 /* Information about an address described by mips_address_type.
513
514 ADDRESS_CONST_INT
515 No fields are used.
516
517 ADDRESS_REG
518 REG is the base register and OFFSET is the constant offset.
519
520 ADDRESS_LO_SUM
521 REG is the register that contains the high part of the address,
522 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
523 is the type of OFFSET's symbol.
524
525 ADDRESS_SYMBOLIC
526 SYMBOL_TYPE is the type of symbol being referenced. */
527
528 struct mips_address_info
529 {
530 enum mips_address_type type;
531 rtx reg;
532 rtx offset;
533 enum mips_symbol_type symbol_type;
534 };
535
536
537 /* One stage in a constant building sequence. These sequences have
538 the form:
539
540 A = VALUE[0]
541 A = A CODE[1] VALUE[1]
542 A = A CODE[2] VALUE[2]
543 ...
544
545 where A is an accumulator, each CODE[i] is a binary rtl operation
546 and each VALUE[i] is a constant integer. */
547 struct mips_integer_op {
548 enum rtx_code code;
549 unsigned HOST_WIDE_INT value;
550 };
551
552
553 /* The largest number of operations needed to load an integer constant.
554 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
555 When the lowest bit is clear, we can try, but reject a sequence with
556 an extra SLL at the end. */
557 #define MIPS_MAX_INTEGER_OPS 7
558
559 /* Information about a MIPS16e SAVE or RESTORE instruction. */
560 struct mips16e_save_restore_info {
561 /* The number of argument registers saved by a SAVE instruction.
562 0 for RESTORE instructions. */
563 unsigned int nargs;
564
565 /* Bit X is set if the instruction saves or restores GPR X. */
566 unsigned int mask;
567
568 /* The total number of bytes to allocate. */
569 HOST_WIDE_INT size;
570 };
571
572 /* Global variables for machine-dependent things. */
573
574 /* Threshold for data being put into the small data/bss area, instead
575 of the normal data area. */
576 int mips_section_threshold = -1;
577
578 /* Count the number of .file directives, so that .loc is up to date. */
579 int num_source_filenames = 0;
580
581 /* Count the number of sdb related labels are generated (to find block
582 start and end boundaries). */
583 int sdb_label_count = 0;
584
585 /* Next label # for each statement for Silicon Graphics IRIS systems. */
586 int sym_lineno = 0;
587
588 /* Name of the file containing the current function. */
589 const char *current_function_file = "";
590
591 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
592 int set_noreorder;
593 int set_noat;
594 int set_nomacro;
595 int set_volatile;
596
597 /* The next branch instruction is a branch likely, not branch normal. */
598 int mips_branch_likely;
599
600 /* The operands passed to the last cmpMM expander. */
601 rtx cmp_operands[2];
602
603 /* The target cpu for code generation. */
604 enum processor_type mips_arch;
605 const struct mips_cpu_info *mips_arch_info;
606
607 /* The target cpu for optimization and scheduling. */
608 enum processor_type mips_tune;
609 const struct mips_cpu_info *mips_tune_info;
610
611 /* Which instruction set architecture to use. */
612 int mips_isa;
613
614 /* Which ABI to use. */
615 int mips_abi = MIPS_ABI_DEFAULT;
616
617 /* Cost information to use. */
618 const struct mips_rtx_cost_data *mips_cost;
619
620 /* The -mtext-loads setting. */
621 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
622
623 /* The architecture selected by -mipsN. */
624 static const struct mips_cpu_info *mips_isa_info;
625
626 /* If TRUE, we split addresses into their high and low parts in the RTL. */
627 int mips_split_addresses;
628
629 /* Mode used for saving/restoring general purpose registers. */
630 static enum machine_mode gpr_mode;
631
632 /* Array giving truth value on whether or not a given hard register
633 can support a given mode. */
634 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
635
636 /* List of all MIPS punctuation characters used by print_operand. */
637 char mips_print_operand_punct[256];
638
639 /* Map GCC register number to debugger register number. */
640 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
641 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
642
643 /* A copy of the original flag_delayed_branch: see override_options. */
644 static int mips_flag_delayed_branch;
645
646 static GTY (()) int mips_output_filename_first_time = 1;
647
648 /* mips_split_p[X] is true if symbols of type X can be split by
649 mips_split_symbol(). */
650 bool mips_split_p[NUM_SYMBOL_TYPES];
651
652 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
653 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
654 if they are matched by a special .md file pattern. */
655 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
656
657 /* Likewise for HIGHs. */
658 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
659
660 /* Map hard register number to register class */
661 const enum reg_class mips_regno_to_class[] =
662 {
663 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
664 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
665 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
666 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
667 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
668 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
669 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
670 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
671 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
672 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
673 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
674 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
675 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
676 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
677 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
679 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
680 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
681 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
682 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
683 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
684 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
685 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
686 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
687 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
688 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
689 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
690 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
691 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
692 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
693 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
694 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
695 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
696 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
697 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
698 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
699 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
700 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
701 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
702 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
703 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
704 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
705 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
706 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
707 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
708 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
709 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
710 };
711
712 /* Table of machine dependent attributes. */
713 const struct attribute_spec mips_attribute_table[] =
714 {
715 { "long_call", 0, 0, false, true, true, NULL },
716 { "far", 0, 0, false, true, true, NULL },
717 { "near", 0, 0, false, true, true, NULL },
718 { NULL, 0, 0, false, false, false, NULL }
719 };
720 \f
721 /* A table describing all the processors gcc knows about. Names are
722 matched in the order listed. The first mention of an ISA level is
723 taken as the canonical name for that ISA.
724
725 To ease comparison, please keep this table in the same order as
726 gas's mips_cpu_info_table[]. Please also make sure that
727 MIPS_ISA_LEVEL_SPEC handles all -march options correctly. */
728 const struct mips_cpu_info mips_cpu_info_table[] = {
729 /* Entries for generic ISAs */
730 { "mips1", PROCESSOR_R3000, 1 },
731 { "mips2", PROCESSOR_R6000, 2 },
732 { "mips3", PROCESSOR_R4000, 3 },
733 { "mips4", PROCESSOR_R8000, 4 },
734 { "mips32", PROCESSOR_4KC, 32 },
735 { "mips32r2", PROCESSOR_M4K, 33 },
736 { "mips64", PROCESSOR_5KC, 64 },
737
738 /* MIPS I */
739 { "r3000", PROCESSOR_R3000, 1 },
740 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
741 { "r3900", PROCESSOR_R3900, 1 },
742
743 /* MIPS II */
744 { "r6000", PROCESSOR_R6000, 2 },
745
746 /* MIPS III */
747 { "r4000", PROCESSOR_R4000, 3 },
748 { "vr4100", PROCESSOR_R4100, 3 },
749 { "vr4111", PROCESSOR_R4111, 3 },
750 { "vr4120", PROCESSOR_R4120, 3 },
751 { "vr4130", PROCESSOR_R4130, 3 },
752 { "vr4300", PROCESSOR_R4300, 3 },
753 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
754 { "r4600", PROCESSOR_R4600, 3 },
755 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
756 { "r4650", PROCESSOR_R4650, 3 },
757
758 /* MIPS IV */
759 { "r8000", PROCESSOR_R8000, 4 },
760 { "vr5000", PROCESSOR_R5000, 4 },
761 { "vr5400", PROCESSOR_R5400, 4 },
762 { "vr5500", PROCESSOR_R5500, 4 },
763 { "rm7000", PROCESSOR_R7000, 4 },
764 { "rm9000", PROCESSOR_R9000, 4 },
765
766 /* MIPS32 */
767 { "4kc", PROCESSOR_4KC, 32 },
768 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
769 { "4kp", PROCESSOR_4KP, 32 },
770 { "4ksc", PROCESSOR_4KC, 32 },
771
772 /* MIPS32 Release 2 */
773 { "m4k", PROCESSOR_M4K, 33 },
774 { "4kec", PROCESSOR_4KC, 33 },
775 { "4kem", PROCESSOR_4KC, 33 },
776 { "4kep", PROCESSOR_4KP, 33 },
777 { "4ksd", PROCESSOR_4KC, 33 },
778
779 { "24kc", PROCESSOR_24KC, 33 },
780 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
781 { "24kf", PROCESSOR_24KF2_1, 33 },
782 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
783 { "24kfx", PROCESSOR_24KF1_1, 33 },
784 { "24kx", PROCESSOR_24KF1_1, 33 },
785
786 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
787 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
788 { "24kef", PROCESSOR_24KF2_1, 33 },
789 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
790 { "24kefx", PROCESSOR_24KF1_1, 33 },
791 { "24kex", PROCESSOR_24KF1_1, 33 },
792
793 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
794 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
795 { "34kf", PROCESSOR_24KF2_1, 33 },
796 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
797 { "34kfx", PROCESSOR_24KF1_1, 33 },
798 { "34kx", PROCESSOR_24KF1_1, 33 },
799
800 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
801 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
802 { "74kf", PROCESSOR_74KF2_1, 33 },
803 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
804 { "74kfx", PROCESSOR_74KF1_1, 33 },
805 { "74kx", PROCESSOR_74KF1_1, 33 },
806 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
807
808 /* MIPS64 */
809 { "5kc", PROCESSOR_5KC, 64 },
810 { "5kf", PROCESSOR_5KF, 64 },
811 { "20kc", PROCESSOR_20KC, 64 },
812 { "sb1", PROCESSOR_SB1, 64 },
813 { "sb1a", PROCESSOR_SB1A, 64 },
814 { "sr71000", PROCESSOR_SR71000, 64 },
815
816 /* End marker */
817 { 0, 0, 0 }
818 };
819
820 /* Default costs. If these are used for a processor we should look
821 up the actual costs. */
822 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
823 COSTS_N_INSNS (7), /* fp_mult_sf */ \
824 COSTS_N_INSNS (8), /* fp_mult_df */ \
825 COSTS_N_INSNS (23), /* fp_div_sf */ \
826 COSTS_N_INSNS (36), /* fp_div_df */ \
827 COSTS_N_INSNS (10), /* int_mult_si */ \
828 COSTS_N_INSNS (10), /* int_mult_di */ \
829 COSTS_N_INSNS (69), /* int_div_si */ \
830 COSTS_N_INSNS (69), /* int_div_di */ \
831 2, /* branch_cost */ \
832 4 /* memory_latency */
833
834 /* Need to replace these with the costs of calling the appropriate
835 libgcc routine. */
836 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
837 COSTS_N_INSNS (256), /* fp_mult_sf */ \
838 COSTS_N_INSNS (256), /* fp_mult_df */ \
839 COSTS_N_INSNS (256), /* fp_div_sf */ \
840 COSTS_N_INSNS (256) /* fp_div_df */
841
842 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
843 {
844 COSTS_N_INSNS (1), /* fp_add */
845 COSTS_N_INSNS (1), /* fp_mult_sf */
846 COSTS_N_INSNS (1), /* fp_mult_df */
847 COSTS_N_INSNS (1), /* fp_div_sf */
848 COSTS_N_INSNS (1), /* fp_div_df */
849 COSTS_N_INSNS (1), /* int_mult_si */
850 COSTS_N_INSNS (1), /* int_mult_di */
851 COSTS_N_INSNS (1), /* int_div_si */
852 COSTS_N_INSNS (1), /* int_div_di */
853 2, /* branch_cost */
854 4 /* memory_latency */
855 };
856
857 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
858 {
859 { /* R3000 */
860 COSTS_N_INSNS (2), /* fp_add */
861 COSTS_N_INSNS (4), /* fp_mult_sf */
862 COSTS_N_INSNS (5), /* fp_mult_df */
863 COSTS_N_INSNS (12), /* fp_div_sf */
864 COSTS_N_INSNS (19), /* fp_div_df */
865 COSTS_N_INSNS (12), /* int_mult_si */
866 COSTS_N_INSNS (12), /* int_mult_di */
867 COSTS_N_INSNS (35), /* int_div_si */
868 COSTS_N_INSNS (35), /* int_div_di */
869 1, /* branch_cost */
870 4 /* memory_latency */
871
872 },
873 { /* 4KC */
874 SOFT_FP_COSTS,
875 COSTS_N_INSNS (6), /* int_mult_si */
876 COSTS_N_INSNS (6), /* int_mult_di */
877 COSTS_N_INSNS (36), /* int_div_si */
878 COSTS_N_INSNS (36), /* int_div_di */
879 1, /* branch_cost */
880 4 /* memory_latency */
881 },
882 { /* 4KP */
883 SOFT_FP_COSTS,
884 COSTS_N_INSNS (36), /* int_mult_si */
885 COSTS_N_INSNS (36), /* int_mult_di */
886 COSTS_N_INSNS (37), /* int_div_si */
887 COSTS_N_INSNS (37), /* int_div_di */
888 1, /* branch_cost */
889 4 /* memory_latency */
890 },
891 { /* 5KC */
892 SOFT_FP_COSTS,
893 COSTS_N_INSNS (4), /* int_mult_si */
894 COSTS_N_INSNS (11), /* int_mult_di */
895 COSTS_N_INSNS (36), /* int_div_si */
896 COSTS_N_INSNS (68), /* int_div_di */
897 1, /* branch_cost */
898 4 /* memory_latency */
899 },
900 { /* 5KF */
901 COSTS_N_INSNS (4), /* fp_add */
902 COSTS_N_INSNS (4), /* fp_mult_sf */
903 COSTS_N_INSNS (5), /* fp_mult_df */
904 COSTS_N_INSNS (17), /* fp_div_sf */
905 COSTS_N_INSNS (32), /* fp_div_df */
906 COSTS_N_INSNS (4), /* int_mult_si */
907 COSTS_N_INSNS (11), /* int_mult_di */
908 COSTS_N_INSNS (36), /* int_div_si */
909 COSTS_N_INSNS (68), /* int_div_di */
910 1, /* branch_cost */
911 4 /* memory_latency */
912 },
913 { /* 20KC */
914 COSTS_N_INSNS (4), /* fp_add */
915 COSTS_N_INSNS (4), /* fp_mult_sf */
916 COSTS_N_INSNS (5), /* fp_mult_df */
917 COSTS_N_INSNS (17), /* fp_div_sf */
918 COSTS_N_INSNS (32), /* fp_div_df */
919 COSTS_N_INSNS (4), /* int_mult_si */
920 COSTS_N_INSNS (7), /* int_mult_di */
921 COSTS_N_INSNS (42), /* int_div_si */
922 COSTS_N_INSNS (72), /* int_div_di */
923 1, /* branch_cost */
924 4 /* memory_latency */
925 },
926 { /* 24KC */
927 SOFT_FP_COSTS,
928 COSTS_N_INSNS (5), /* int_mult_si */
929 COSTS_N_INSNS (5), /* int_mult_di */
930 COSTS_N_INSNS (41), /* int_div_si */
931 COSTS_N_INSNS (41), /* int_div_di */
932 1, /* branch_cost */
933 4 /* memory_latency */
934 },
935 { /* 24KF2_1 */
936 COSTS_N_INSNS (8), /* fp_add */
937 COSTS_N_INSNS (8), /* fp_mult_sf */
938 COSTS_N_INSNS (10), /* fp_mult_df */
939 COSTS_N_INSNS (34), /* fp_div_sf */
940 COSTS_N_INSNS (64), /* fp_div_df */
941 COSTS_N_INSNS (5), /* int_mult_si */
942 COSTS_N_INSNS (5), /* int_mult_di */
943 COSTS_N_INSNS (41), /* int_div_si */
944 COSTS_N_INSNS (41), /* int_div_di */
945 1, /* branch_cost */
946 4 /* memory_latency */
947 },
948 { /* 24KF1_1 */
949 COSTS_N_INSNS (4), /* fp_add */
950 COSTS_N_INSNS (4), /* fp_mult_sf */
951 COSTS_N_INSNS (5), /* fp_mult_df */
952 COSTS_N_INSNS (17), /* fp_div_sf */
953 COSTS_N_INSNS (32), /* fp_div_df */
954 COSTS_N_INSNS (5), /* int_mult_si */
955 COSTS_N_INSNS (5), /* int_mult_di */
956 COSTS_N_INSNS (41), /* int_div_si */
957 COSTS_N_INSNS (41), /* int_div_di */
958 1, /* branch_cost */
959 4 /* memory_latency */
960 },
961 { /* 74KC */
962 SOFT_FP_COSTS,
963 COSTS_N_INSNS (5), /* int_mult_si */
964 COSTS_N_INSNS (5), /* int_mult_di */
965 COSTS_N_INSNS (41), /* int_div_si */
966 COSTS_N_INSNS (41), /* int_div_di */
967 1, /* branch_cost */
968 4 /* memory_latency */
969 },
970 { /* 74KF2_1 */
971 COSTS_N_INSNS (8), /* fp_add */
972 COSTS_N_INSNS (8), /* fp_mult_sf */
973 COSTS_N_INSNS (10), /* fp_mult_df */
974 COSTS_N_INSNS (34), /* fp_div_sf */
975 COSTS_N_INSNS (64), /* fp_div_df */
976 COSTS_N_INSNS (5), /* int_mult_si */
977 COSTS_N_INSNS (5), /* int_mult_di */
978 COSTS_N_INSNS (41), /* int_div_si */
979 COSTS_N_INSNS (41), /* int_div_di */
980 1, /* branch_cost */
981 4 /* memory_latency */
982 },
983 { /* 74KF1_1 */
984 COSTS_N_INSNS (4), /* fp_add */
985 COSTS_N_INSNS (4), /* fp_mult_sf */
986 COSTS_N_INSNS (5), /* fp_mult_df */
987 COSTS_N_INSNS (17), /* fp_div_sf */
988 COSTS_N_INSNS (32), /* fp_div_df */
989 COSTS_N_INSNS (5), /* int_mult_si */
990 COSTS_N_INSNS (5), /* int_mult_di */
991 COSTS_N_INSNS (41), /* int_div_si */
992 COSTS_N_INSNS (41), /* int_div_di */
993 1, /* branch_cost */
994 4 /* memory_latency */
995 },
996 { /* 74KF3_2 */
997 COSTS_N_INSNS (6), /* fp_add */
998 COSTS_N_INSNS (6), /* fp_mult_sf */
999 COSTS_N_INSNS (7), /* fp_mult_df */
1000 COSTS_N_INSNS (25), /* fp_div_sf */
1001 COSTS_N_INSNS (48), /* fp_div_df */
1002 COSTS_N_INSNS (5), /* int_mult_si */
1003 COSTS_N_INSNS (5), /* int_mult_di */
1004 COSTS_N_INSNS (41), /* int_div_si */
1005 COSTS_N_INSNS (41), /* int_div_di */
1006 1, /* branch_cost */
1007 4 /* memory_latency */
1008 },
1009 { /* M4k */
1010 DEFAULT_COSTS
1011 },
1012 { /* R3900 */
1013 COSTS_N_INSNS (2), /* fp_add */
1014 COSTS_N_INSNS (4), /* fp_mult_sf */
1015 COSTS_N_INSNS (5), /* fp_mult_df */
1016 COSTS_N_INSNS (12), /* fp_div_sf */
1017 COSTS_N_INSNS (19), /* fp_div_df */
1018 COSTS_N_INSNS (2), /* int_mult_si */
1019 COSTS_N_INSNS (2), /* int_mult_di */
1020 COSTS_N_INSNS (35), /* int_div_si */
1021 COSTS_N_INSNS (35), /* int_div_di */
1022 1, /* branch_cost */
1023 4 /* memory_latency */
1024 },
1025 { /* R6000 */
1026 COSTS_N_INSNS (3), /* fp_add */
1027 COSTS_N_INSNS (5), /* fp_mult_sf */
1028 COSTS_N_INSNS (6), /* fp_mult_df */
1029 COSTS_N_INSNS (15), /* fp_div_sf */
1030 COSTS_N_INSNS (16), /* fp_div_df */
1031 COSTS_N_INSNS (17), /* int_mult_si */
1032 COSTS_N_INSNS (17), /* int_mult_di */
1033 COSTS_N_INSNS (38), /* int_div_si */
1034 COSTS_N_INSNS (38), /* int_div_di */
1035 2, /* branch_cost */
1036 6 /* memory_latency */
1037 },
1038 { /* R4000 */
1039 COSTS_N_INSNS (6), /* fp_add */
1040 COSTS_N_INSNS (7), /* fp_mult_sf */
1041 COSTS_N_INSNS (8), /* fp_mult_df */
1042 COSTS_N_INSNS (23), /* fp_div_sf */
1043 COSTS_N_INSNS (36), /* fp_div_df */
1044 COSTS_N_INSNS (10), /* int_mult_si */
1045 COSTS_N_INSNS (10), /* int_mult_di */
1046 COSTS_N_INSNS (69), /* int_div_si */
1047 COSTS_N_INSNS (69), /* int_div_di */
1048 2, /* branch_cost */
1049 6 /* memory_latency */
1050 },
1051 { /* R4100 */
1052 DEFAULT_COSTS
1053 },
1054 { /* R4111 */
1055 DEFAULT_COSTS
1056 },
1057 { /* R4120 */
1058 DEFAULT_COSTS
1059 },
1060 { /* R4130 */
1061 /* The only costs that appear to be updated here are
1062 integer multiplication. */
1063 SOFT_FP_COSTS,
1064 COSTS_N_INSNS (4), /* int_mult_si */
1065 COSTS_N_INSNS (6), /* int_mult_di */
1066 COSTS_N_INSNS (69), /* int_div_si */
1067 COSTS_N_INSNS (69), /* int_div_di */
1068 1, /* branch_cost */
1069 4 /* memory_latency */
1070 },
1071 { /* R4300 */
1072 DEFAULT_COSTS
1073 },
1074 { /* R4600 */
1075 DEFAULT_COSTS
1076 },
1077 { /* R4650 */
1078 DEFAULT_COSTS
1079 },
1080 { /* R5000 */
1081 COSTS_N_INSNS (6), /* fp_add */
1082 COSTS_N_INSNS (4), /* fp_mult_sf */
1083 COSTS_N_INSNS (5), /* fp_mult_df */
1084 COSTS_N_INSNS (23), /* fp_div_sf */
1085 COSTS_N_INSNS (36), /* fp_div_df */
1086 COSTS_N_INSNS (5), /* int_mult_si */
1087 COSTS_N_INSNS (5), /* int_mult_di */
1088 COSTS_N_INSNS (36), /* int_div_si */
1089 COSTS_N_INSNS (36), /* int_div_di */
1090 1, /* branch_cost */
1091 4 /* memory_latency */
1092 },
1093 { /* R5400 */
1094 COSTS_N_INSNS (6), /* fp_add */
1095 COSTS_N_INSNS (5), /* fp_mult_sf */
1096 COSTS_N_INSNS (6), /* fp_mult_df */
1097 COSTS_N_INSNS (30), /* fp_div_sf */
1098 COSTS_N_INSNS (59), /* fp_div_df */
1099 COSTS_N_INSNS (3), /* int_mult_si */
1100 COSTS_N_INSNS (4), /* int_mult_di */
1101 COSTS_N_INSNS (42), /* int_div_si */
1102 COSTS_N_INSNS (74), /* int_div_di */
1103 1, /* branch_cost */
1104 4 /* memory_latency */
1105 },
1106 { /* R5500 */
1107 COSTS_N_INSNS (6), /* fp_add */
1108 COSTS_N_INSNS (5), /* fp_mult_sf */
1109 COSTS_N_INSNS (6), /* fp_mult_df */
1110 COSTS_N_INSNS (30), /* fp_div_sf */
1111 COSTS_N_INSNS (59), /* fp_div_df */
1112 COSTS_N_INSNS (5), /* int_mult_si */
1113 COSTS_N_INSNS (9), /* int_mult_di */
1114 COSTS_N_INSNS (42), /* int_div_si */
1115 COSTS_N_INSNS (74), /* int_div_di */
1116 1, /* branch_cost */
1117 4 /* memory_latency */
1118 },
1119 { /* R7000 */
1120 /* The only costs that are changed here are
1121 integer multiplication. */
1122 COSTS_N_INSNS (6), /* fp_add */
1123 COSTS_N_INSNS (7), /* fp_mult_sf */
1124 COSTS_N_INSNS (8), /* fp_mult_df */
1125 COSTS_N_INSNS (23), /* fp_div_sf */
1126 COSTS_N_INSNS (36), /* fp_div_df */
1127 COSTS_N_INSNS (5), /* int_mult_si */
1128 COSTS_N_INSNS (9), /* int_mult_di */
1129 COSTS_N_INSNS (69), /* int_div_si */
1130 COSTS_N_INSNS (69), /* int_div_di */
1131 1, /* branch_cost */
1132 4 /* memory_latency */
1133 },
1134 { /* R8000 */
1135 DEFAULT_COSTS
1136 },
1137 { /* R9000 */
1138 /* The only costs that are changed here are
1139 integer multiplication. */
1140 COSTS_N_INSNS (6), /* fp_add */
1141 COSTS_N_INSNS (7), /* fp_mult_sf */
1142 COSTS_N_INSNS (8), /* fp_mult_df */
1143 COSTS_N_INSNS (23), /* fp_div_sf */
1144 COSTS_N_INSNS (36), /* fp_div_df */
1145 COSTS_N_INSNS (3), /* int_mult_si */
1146 COSTS_N_INSNS (8), /* int_mult_di */
1147 COSTS_N_INSNS (69), /* int_div_si */
1148 COSTS_N_INSNS (69), /* int_div_di */
1149 1, /* branch_cost */
1150 4 /* memory_latency */
1151 },
1152 { /* SB1 */
1153 /* These costs are the same as the SB-1A below. */
1154 COSTS_N_INSNS (4), /* fp_add */
1155 COSTS_N_INSNS (4), /* fp_mult_sf */
1156 COSTS_N_INSNS (4), /* fp_mult_df */
1157 COSTS_N_INSNS (24), /* fp_div_sf */
1158 COSTS_N_INSNS (32), /* fp_div_df */
1159 COSTS_N_INSNS (3), /* int_mult_si */
1160 COSTS_N_INSNS (4), /* int_mult_di */
1161 COSTS_N_INSNS (36), /* int_div_si */
1162 COSTS_N_INSNS (68), /* int_div_di */
1163 1, /* branch_cost */
1164 4 /* memory_latency */
1165 },
1166 { /* SB1-A */
1167 /* These costs are the same as the SB-1 above. */
1168 COSTS_N_INSNS (4), /* fp_add */
1169 COSTS_N_INSNS (4), /* fp_mult_sf */
1170 COSTS_N_INSNS (4), /* fp_mult_df */
1171 COSTS_N_INSNS (24), /* fp_div_sf */
1172 COSTS_N_INSNS (32), /* fp_div_df */
1173 COSTS_N_INSNS (3), /* int_mult_si */
1174 COSTS_N_INSNS (4), /* int_mult_di */
1175 COSTS_N_INSNS (36), /* int_div_si */
1176 COSTS_N_INSNS (68), /* int_div_di */
1177 1, /* branch_cost */
1178 4 /* memory_latency */
1179 },
1180 { /* SR71000 */
1181 DEFAULT_COSTS
1182 },
1183 };
1184
1185 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1186 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1187 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1188 static const unsigned char mips16e_s2_s8_regs[] = {
1189 30, 23, 22, 21, 20, 19, 18
1190 };
1191 static const unsigned char mips16e_a0_a3_regs[] = {
1192 4, 5, 6, 7
1193 };
1194
1195 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1196 ordered from the uppermost in memory to the lowest in memory. */
1197 static const unsigned char mips16e_save_restore_regs[] = {
1198 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1199 };
1200 \f
1201 /* Nonzero if -march should decide the default value of
1202 MASK_SOFT_FLOAT_ABI. */
1203 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1204 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1205 #endif
1206 \f
1207 /* Initialize the GCC target structure. */
1208 #undef TARGET_ASM_ALIGNED_HI_OP
1209 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1210 #undef TARGET_ASM_ALIGNED_SI_OP
1211 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1212 #undef TARGET_ASM_ALIGNED_DI_OP
1213 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1214
1215 #undef TARGET_ASM_FUNCTION_PROLOGUE
1216 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1217 #undef TARGET_ASM_FUNCTION_EPILOGUE
1218 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1219 #undef TARGET_ASM_SELECT_RTX_SECTION
1220 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1221 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1222 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1223
1224 #undef TARGET_SCHED_INIT
1225 #define TARGET_SCHED_INIT mips_sched_init
1226 #undef TARGET_SCHED_REORDER
1227 #define TARGET_SCHED_REORDER mips_sched_reorder
1228 #undef TARGET_SCHED_REORDER2
1229 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1230 #undef TARGET_SCHED_VARIABLE_ISSUE
1231 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1232 #undef TARGET_SCHED_ADJUST_COST
1233 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1234 #undef TARGET_SCHED_ISSUE_RATE
1235 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1236 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1237 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1238 mips_multipass_dfa_lookahead
1239
1240 #undef TARGET_DEFAULT_TARGET_FLAGS
1241 #define TARGET_DEFAULT_TARGET_FLAGS \
1242 (TARGET_DEFAULT \
1243 | TARGET_CPU_DEFAULT \
1244 | TARGET_ENDIAN_DEFAULT \
1245 | TARGET_FP_EXCEPTIONS_DEFAULT \
1246 | MASK_CHECK_ZERO_DIV \
1247 | MASK_FUSED_MADD)
1248 #undef TARGET_HANDLE_OPTION
1249 #define TARGET_HANDLE_OPTION mips_handle_option
1250
1251 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1252 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1253
1254 #undef TARGET_VALID_POINTER_MODE
1255 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1256 #undef TARGET_RTX_COSTS
1257 #define TARGET_RTX_COSTS mips_rtx_costs
1258 #undef TARGET_ADDRESS_COST
1259 #define TARGET_ADDRESS_COST mips_address_cost
1260
1261 #undef TARGET_IN_SMALL_DATA_P
1262 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1263
1264 #undef TARGET_MACHINE_DEPENDENT_REORG
1265 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1266
1267 #undef TARGET_ASM_FILE_START
1268 #define TARGET_ASM_FILE_START mips_file_start
1269 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1270 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1271
1272 #undef TARGET_INIT_LIBFUNCS
1273 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1274
1275 #undef TARGET_BUILD_BUILTIN_VA_LIST
1276 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1277 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1278 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1279
1280 #undef TARGET_PROMOTE_FUNCTION_ARGS
1281 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1282 #undef TARGET_PROMOTE_FUNCTION_RETURN
1283 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1284 #undef TARGET_PROMOTE_PROTOTYPES
1285 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1286
1287 #undef TARGET_RETURN_IN_MEMORY
1288 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1289 #undef TARGET_RETURN_IN_MSB
1290 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1291
1292 #undef TARGET_ASM_OUTPUT_MI_THUNK
1293 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1294 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1295 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1296
1297 #undef TARGET_SETUP_INCOMING_VARARGS
1298 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1299 #undef TARGET_STRICT_ARGUMENT_NAMING
1300 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1301 #undef TARGET_MUST_PASS_IN_STACK
1302 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1303 #undef TARGET_PASS_BY_REFERENCE
1304 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1305 #undef TARGET_CALLEE_COPIES
1306 #define TARGET_CALLEE_COPIES mips_callee_copies
1307 #undef TARGET_ARG_PARTIAL_BYTES
1308 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1309
1310 #undef TARGET_MODE_REP_EXTENDED
1311 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1312
1313 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1314 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1315
1316 #undef TARGET_INIT_BUILTINS
1317 #define TARGET_INIT_BUILTINS mips_init_builtins
1318 #undef TARGET_EXPAND_BUILTIN
1319 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1320
1321 #undef TARGET_HAVE_TLS
1322 #define TARGET_HAVE_TLS HAVE_AS_TLS
1323
1324 #undef TARGET_CANNOT_FORCE_CONST_MEM
1325 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1326
1327 #undef TARGET_ENCODE_SECTION_INFO
1328 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1329
1330 #undef TARGET_ATTRIBUTE_TABLE
1331 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1332
1333 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1334 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1335
1336 #undef TARGET_MIN_ANCHOR_OFFSET
1337 #define TARGET_MIN_ANCHOR_OFFSET -32768
1338 #undef TARGET_MAX_ANCHOR_OFFSET
1339 #define TARGET_MAX_ANCHOR_OFFSET 32767
1340 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1341 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1342 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1343 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1344
1345 #undef TARGET_COMP_TYPE_ATTRIBUTES
1346 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1347
1348 #ifdef HAVE_AS_DTPRELWORD
1349 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1350 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1351 #endif
1352
1353 struct gcc_target targetm = TARGET_INITIALIZER;
1354
1355
1356 /* Predicates to test for presence of "near" and "far"/"long_call"
1357 attributes on the given TYPE. */
1358
1359 static bool
1360 mips_near_type_p (tree type)
1361 {
1362 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1363 }
1364
1365 static bool
1366 mips_far_type_p (tree type)
1367 {
1368 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1369 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1370 }
1371
1372
1373 /* Return 0 if the attributes for two types are incompatible, 1 if they
1374 are compatible, and 2 if they are nearly compatible (which causes a
1375 warning to be generated). */
1376
1377 static int
1378 mips_comp_type_attributes (tree type1, tree type2)
1379 {
1380 /* Check for mismatch of non-default calling convention. */
1381 if (TREE_CODE (type1) != FUNCTION_TYPE)
1382 return 1;
1383
1384 /* Disallow mixed near/far attributes. */
1385 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1386 return 0;
1387 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1388 return 0;
1389
1390 return 1;
1391 }
1392 \f
1393 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1394 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1395
1396 static void
1397 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1398 {
1399 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1400 {
1401 *base_ptr = XEXP (x, 0);
1402 *offset_ptr = INTVAL (XEXP (x, 1));
1403 }
1404 else
1405 {
1406 *base_ptr = x;
1407 *offset_ptr = 0;
1408 }
1409 }
1410 \f
1411 /* Return true if SYMBOL_REF X is associated with a global symbol
1412 (in the STB_GLOBAL sense). */
1413
1414 static bool
1415 mips_global_symbol_p (rtx x)
1416 {
1417 tree decl;
1418
1419 decl = SYMBOL_REF_DECL (x);
1420 if (!decl)
1421 return !SYMBOL_REF_LOCAL_P (x);
1422
1423 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1424 or weak symbols. Relocations in the object file will be against
1425 the target symbol, so it's that symbol's binding that matters here. */
1426 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1427 }
1428
1429 /* Return true if SYMBOL_REF X binds locally. */
1430
1431 static bool
1432 mips_symbol_binds_local_p (rtx x)
1433 {
1434 return (SYMBOL_REF_DECL (x)
1435 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1436 : SYMBOL_REF_LOCAL_P (x));
1437 }
1438
1439 /* Return the method that should be used to access SYMBOL_REF or
1440 LABEL_REF X in context CONTEXT. */
1441
1442 static enum mips_symbol_type
1443 mips_classify_symbol (rtx x, enum mips_symbol_context context)
1444 {
1445 if (TARGET_RTP_PIC)
1446 return SYMBOL_GOT_DISP;
1447
1448 if (GET_CODE (x) == LABEL_REF)
1449 {
1450 /* LABEL_REFs are used for jump tables as well as text labels.
1451 Only return SYMBOL_PC_RELATIVE if we know the label is in
1452 the text section. */
1453 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1454 return SYMBOL_PC_RELATIVE;
1455 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1456 return SYMBOL_GOT_PAGE_OFST;
1457 return SYMBOL_ABSOLUTE;
1458 }
1459
1460 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1461
1462 if (SYMBOL_REF_TLS_MODEL (x))
1463 return SYMBOL_TLS;
1464
1465 if (CONSTANT_POOL_ADDRESS_P (x))
1466 {
1467 if (TARGET_MIPS16_TEXT_LOADS)
1468 return SYMBOL_PC_RELATIVE;
1469
1470 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1471 return SYMBOL_PC_RELATIVE;
1472
1473 if (!TARGET_EMBEDDED_DATA
1474 && GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1475 return SYMBOL_GP_RELATIVE;
1476 }
1477
1478 /* Do not use small-data accesses for weak symbols; they may end up
1479 being zero. */
1480 if (SYMBOL_REF_SMALL_P (x)
1481 && !SYMBOL_REF_WEAK (x))
1482 return SYMBOL_GP_RELATIVE;
1483
1484 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1485 is in effect. */
1486 if (TARGET_ABICALLS
1487 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1488 {
1489 /* There are three cases to consider:
1490
1491 - o32 PIC (either with or without explicit relocs)
1492 - n32/n64 PIC without explicit relocs
1493 - n32/n64 PIC with explicit relocs
1494
1495 In the first case, both local and global accesses will use an
1496 R_MIPS_GOT16 relocation. We must correctly predict which of
1497 the two semantics (local or global) the assembler and linker
1498 will apply. The choice depends on the symbol's binding rather
1499 than its visibility.
1500
1501 In the second case, the assembler will not use R_MIPS_GOT16
1502 relocations, but it chooses between local and global accesses
1503 in the same way as for o32 PIC.
1504
1505 In the third case we have more freedom since both forms of
1506 access will work for any kind of symbol. However, there seems
1507 little point in doing things differently. */
1508 if (mips_global_symbol_p (x))
1509 return SYMBOL_GOT_DISP;
1510
1511 return SYMBOL_GOT_PAGE_OFST;
1512 }
1513
1514 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1515 return SYMBOL_FORCE_TO_MEM;
1516 return SYMBOL_ABSOLUTE;
1517 }
1518
1519 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1520 is the alignment (in bytes) of SYMBOL_REF X. */
1521
1522 static bool
1523 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1524 {
1525 /* If for some reason we can't get the alignment for the
1526 symbol, initializing this to one means we will only accept
1527 a zero offset. */
1528 HOST_WIDE_INT align = 1;
1529 tree t;
1530
1531 /* Get the alignment of the symbol we're referring to. */
1532 t = SYMBOL_REF_DECL (x);
1533 if (t)
1534 align = DECL_ALIGN_UNIT (t);
1535
1536 return offset >= 0 && offset < align;
1537 }
1538
1539 /* Return true if X is a symbolic constant that can be used in context
1540 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1541
1542 bool
1543 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1544 enum mips_symbol_type *symbol_type)
1545 {
1546 rtx offset;
1547
1548 split_const (x, &x, &offset);
1549 if (UNSPEC_ADDRESS_P (x))
1550 {
1551 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1552 x = UNSPEC_ADDRESS (x);
1553 }
1554 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1555 {
1556 *symbol_type = mips_classify_symbol (x, context);
1557 if (*symbol_type == SYMBOL_TLS)
1558 return false;
1559 }
1560 else
1561 return false;
1562
1563 if (offset == const0_rtx)
1564 return true;
1565
1566 /* Check whether a nonzero offset is valid for the underlying
1567 relocations. */
1568 switch (*symbol_type)
1569 {
1570 case SYMBOL_ABSOLUTE:
1571 case SYMBOL_FORCE_TO_MEM:
1572 case SYMBOL_32_HIGH:
1573 case SYMBOL_64_HIGH:
1574 case SYMBOL_64_MID:
1575 case SYMBOL_64_LOW:
1576 /* If the target has 64-bit pointers and the object file only
1577 supports 32-bit symbols, the values of those symbols will be
1578 sign-extended. In this case we can't allow an arbitrary offset
1579 in case the 32-bit value X + OFFSET has a different sign from X. */
1580 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1581 return offset_within_block_p (x, INTVAL (offset));
1582
1583 /* In other cases the relocations can handle any offset. */
1584 return true;
1585
1586 case SYMBOL_PC_RELATIVE:
1587 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1588 In this case, we no longer have access to the underlying constant,
1589 but the original symbol-based access was known to be valid. */
1590 if (GET_CODE (x) == LABEL_REF)
1591 return true;
1592
1593 /* Fall through. */
1594
1595 case SYMBOL_GP_RELATIVE:
1596 /* Make sure that the offset refers to something within the
1597 same object block. This should guarantee that the final
1598 PC- or GP-relative offset is within the 16-bit limit. */
1599 return offset_within_block_p (x, INTVAL (offset));
1600
1601 case SYMBOL_GOT_PAGE_OFST:
1602 case SYMBOL_GOTOFF_PAGE:
1603 /* If the symbol is global, the GOT entry will contain the symbol's
1604 address, and we will apply a 16-bit offset after loading it.
1605 If the symbol is local, the linker should provide enough local
1606 GOT entries for a 16-bit offset, but larger offsets may lead
1607 to GOT overflow. */
1608 return SMALL_INT (offset);
1609
1610 case SYMBOL_TPREL:
1611 case SYMBOL_DTPREL:
1612 /* There is no carry between the HI and LO REL relocations, so the
1613 offset is only valid if we know it won't lead to such a carry. */
1614 return mips_offset_within_alignment_p (x, INTVAL (offset));
1615
1616 case SYMBOL_GOT_DISP:
1617 case SYMBOL_GOTOFF_DISP:
1618 case SYMBOL_GOTOFF_CALL:
1619 case SYMBOL_GOTOFF_LOADGP:
1620 case SYMBOL_TLSGD:
1621 case SYMBOL_TLSLDM:
1622 case SYMBOL_GOTTPREL:
1623 case SYMBOL_TLS:
1624 case SYMBOL_HALF:
1625 return false;
1626 }
1627 gcc_unreachable ();
1628 }
1629
1630
1631 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1632
1633 int
1634 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1635 {
1636 if (!HARD_REGISTER_NUM_P (regno))
1637 {
1638 if (!strict)
1639 return true;
1640 regno = reg_renumber[regno];
1641 }
1642
1643 /* These fake registers will be eliminated to either the stack or
1644 hard frame pointer, both of which are usually valid base registers.
1645 Reload deals with the cases where the eliminated form isn't valid. */
1646 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1647 return true;
1648
1649 /* In mips16 mode, the stack pointer can only address word and doubleword
1650 values, nothing smaller. There are two problems here:
1651
1652 (a) Instantiating virtual registers can introduce new uses of the
1653 stack pointer. If these virtual registers are valid addresses,
1654 the stack pointer should be too.
1655
1656 (b) Most uses of the stack pointer are not made explicit until
1657 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1658 We don't know until that stage whether we'll be eliminating to the
1659 stack pointer (which needs the restriction) or the hard frame
1660 pointer (which doesn't).
1661
1662 All in all, it seems more consistent to only enforce this restriction
1663 during and after reload. */
1664 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1665 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1666
1667 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1668 }
1669
1670
1671 /* Return true if X is a valid base register for the given mode.
1672 Allow only hard registers if STRICT. */
1673
1674 static bool
1675 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1676 {
1677 if (!strict && GET_CODE (x) == SUBREG)
1678 x = SUBREG_REG (x);
1679
1680 return (REG_P (x)
1681 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1682 }
1683
1684
1685 /* Return true if X is a valid address for machine mode MODE. If it is,
1686 fill in INFO appropriately. STRICT is true if we should only accept
1687 hard base registers. */
1688
1689 static bool
1690 mips_classify_address (struct mips_address_info *info, rtx x,
1691 enum machine_mode mode, int strict)
1692 {
1693 switch (GET_CODE (x))
1694 {
1695 case REG:
1696 case SUBREG:
1697 info->type = ADDRESS_REG;
1698 info->reg = x;
1699 info->offset = const0_rtx;
1700 return mips_valid_base_register_p (info->reg, mode, strict);
1701
1702 case PLUS:
1703 info->type = ADDRESS_REG;
1704 info->reg = XEXP (x, 0);
1705 info->offset = XEXP (x, 1);
1706 return (mips_valid_base_register_p (info->reg, mode, strict)
1707 && const_arith_operand (info->offset, VOIDmode));
1708
1709 case LO_SUM:
1710 info->type = ADDRESS_LO_SUM;
1711 info->reg = XEXP (x, 0);
1712 info->offset = XEXP (x, 1);
1713 return (mips_valid_base_register_p (info->reg, mode, strict)
1714 && mips_symbolic_constant_p (info->offset, SYMBOL_CONTEXT_MEM,
1715 &info->symbol_type)
1716 && mips_symbol_insns (info->symbol_type, mode) > 0
1717 && mips_lo_relocs[info->symbol_type] != 0);
1718
1719 case CONST_INT:
1720 /* Small-integer addresses don't occur very often, but they
1721 are legitimate if $0 is a valid base register. */
1722 info->type = ADDRESS_CONST_INT;
1723 return !TARGET_MIPS16 && SMALL_INT (x);
1724
1725 case CONST:
1726 case LABEL_REF:
1727 case SYMBOL_REF:
1728 info->type = ADDRESS_SYMBOLIC;
1729 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1730 &info->symbol_type)
1731 && mips_symbol_insns (info->symbol_type, mode) > 0
1732 && !mips_split_p[info->symbol_type]);
1733
1734 default:
1735 return false;
1736 }
1737 }
1738
1739 /* Return true if X is a thread-local symbol. */
1740
1741 static bool
1742 mips_tls_operand_p (rtx x)
1743 {
1744 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1745 }
1746
1747 /* Return true if X can not be forced into a constant pool. */
1748
1749 static int
1750 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1751 {
1752 return mips_tls_operand_p (*x);
1753 }
1754
1755 /* Return true if X can not be forced into a constant pool. */
1756
1757 static bool
1758 mips_cannot_force_const_mem (rtx x)
1759 {
1760 rtx base, offset;
1761
1762 if (!TARGET_MIPS16)
1763 {
1764 /* As an optimization, reject constants that mips_legitimize_move
1765 can expand inline.
1766
1767 Suppose we have a multi-instruction sequence that loads constant C
1768 into register R. If R does not get allocated a hard register, and
1769 R is used in an operand that allows both registers and memory
1770 references, reload will consider forcing C into memory and using
1771 one of the instruction's memory alternatives. Returning false
1772 here will force it to use an input reload instead. */
1773 if (GET_CODE (x) == CONST_INT)
1774 return true;
1775
1776 split_const (x, &base, &offset);
1777 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1778 return true;
1779 }
1780
1781 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1782 return true;
1783
1784 return false;
1785 }
1786
1787 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1788 constants when we're using a per-function constant pool. */
1789
1790 static bool
1791 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1792 rtx x ATTRIBUTE_UNUSED)
1793 {
1794 return !TARGET_MIPS16_PCREL_LOADS;
1795 }
1796 \f
1797 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1798 single instruction. We rely on the fact that, in the worst case,
1799 all instructions involved in a MIPS16 address calculation are usually
1800 extended ones. */
1801
1802 static int
1803 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1804 {
1805 switch (type)
1806 {
1807 case SYMBOL_ABSOLUTE:
1808 /* When using 64-bit symbols, we need 5 preparatory instructions,
1809 such as:
1810
1811 lui $at,%highest(symbol)
1812 daddiu $at,$at,%higher(symbol)
1813 dsll $at,$at,16
1814 daddiu $at,$at,%hi(symbol)
1815 dsll $at,$at,16
1816
1817 The final address is then $at + %lo(symbol). With 32-bit
1818 symbols we just need a preparatory lui for normal mode and
1819 a preparatory "li; sll" for MIPS16. */
1820 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1821
1822 case SYMBOL_GP_RELATIVE:
1823 /* Treat GP-relative accesses as taking a single instruction on
1824 MIPS16 too; the copy of $gp can often be shared. */
1825 return 1;
1826
1827 case SYMBOL_PC_RELATIVE:
1828 /* PC-relative constants can be only be used with addiupc,
1829 lwpc and ldpc. */
1830 if (mode == MAX_MACHINE_MODE
1831 || GET_MODE_SIZE (mode) == 4
1832 || GET_MODE_SIZE (mode) == 8)
1833 return 1;
1834
1835 /* The constant must be loaded using addiupc first. */
1836 return 0;
1837
1838 case SYMBOL_FORCE_TO_MEM:
1839 /* The constant must be loaded from the constant pool. */
1840 return 0;
1841
1842 case SYMBOL_GOT_DISP:
1843 /* The constant will have to be loaded from the GOT before it
1844 is used in an address. */
1845 if (mode != MAX_MACHINE_MODE)
1846 return 0;
1847
1848 /* Fall through. */
1849
1850 case SYMBOL_GOT_PAGE_OFST:
1851 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1852 the local/global classification is accurate. See override_options
1853 for details.
1854
1855 The worst cases are:
1856
1857 (1) For local symbols when generating o32 or o64 code. The assembler
1858 will use:
1859
1860 lw $at,%got(symbol)
1861 nop
1862
1863 ...and the final address will be $at + %lo(symbol).
1864
1865 (2) For global symbols when -mxgot. The assembler will use:
1866
1867 lui $at,%got_hi(symbol)
1868 (d)addu $at,$at,$gp
1869
1870 ...and the final address will be $at + %got_lo(symbol). */
1871 return 3;
1872
1873 case SYMBOL_GOTOFF_PAGE:
1874 case SYMBOL_GOTOFF_DISP:
1875 case SYMBOL_GOTOFF_CALL:
1876 case SYMBOL_GOTOFF_LOADGP:
1877 case SYMBOL_32_HIGH:
1878 case SYMBOL_64_HIGH:
1879 case SYMBOL_64_MID:
1880 case SYMBOL_64_LOW:
1881 case SYMBOL_TLSGD:
1882 case SYMBOL_TLSLDM:
1883 case SYMBOL_DTPREL:
1884 case SYMBOL_GOTTPREL:
1885 case SYMBOL_TPREL:
1886 case SYMBOL_HALF:
1887 /* A 16-bit constant formed by a single relocation, or a 32-bit
1888 constant formed from a high 16-bit relocation and a low 16-bit
1889 relocation. Use mips_split_p to determine which. */
1890 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1891
1892 case SYMBOL_TLS:
1893 /* We don't treat a bare TLS symbol as a constant. */
1894 return 0;
1895 }
1896 gcc_unreachable ();
1897 }
1898
1899 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1900 to load symbols of type TYPE into a register. Return 0 if the given
1901 type of symbol cannot be used as an immediate operand.
1902
1903 Otherwise, return the number of instructions needed to load or store
1904 values of mode MODE to or from addresses of type TYPE. Return 0 if
1905 the given type of symbol is not valid in addresses.
1906
1907 In both cases, treat extended MIPS16 instructions as two instructions. */
1908
1909 static int
1910 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1911 {
1912 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1913 }
1914
1915 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1916
1917 bool
1918 mips_stack_address_p (rtx x, enum machine_mode mode)
1919 {
1920 struct mips_address_info addr;
1921
1922 return (mips_classify_address (&addr, x, mode, false)
1923 && addr.type == ADDRESS_REG
1924 && addr.reg == stack_pointer_rtx);
1925 }
1926
1927 /* Return true if a value at OFFSET bytes from BASE can be accessed
1928 using an unextended mips16 instruction. MODE is the mode of the
1929 value.
1930
1931 Usually the offset in an unextended instruction is a 5-bit field.
1932 The offset is unsigned and shifted left once for HIs, twice
1933 for SIs, and so on. An exception is SImode accesses off the
1934 stack pointer, which have an 8-bit immediate field. */
1935
1936 static bool
1937 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1938 {
1939 if (TARGET_MIPS16
1940 && GET_CODE (offset) == CONST_INT
1941 && INTVAL (offset) >= 0
1942 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1943 {
1944 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1945 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1946 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1947 }
1948 return false;
1949 }
1950
1951
1952 /* Return the number of instructions needed to load or store a value
1953 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
1954 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
1955 otherwise assume that a single load or store is enough.
1956
1957 For mips16 code, count extended instructions as two instructions. */
1958
1959 int
1960 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
1961 {
1962 struct mips_address_info addr;
1963 int factor;
1964
1965 /* BLKmode is used for single unaligned loads and stores and should
1966 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
1967 meaningless, so we have to single it out as a special case one way
1968 or the other.) */
1969 if (mode != BLKmode && might_split_p)
1970 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1971 else
1972 factor = 1;
1973
1974 if (mips_classify_address (&addr, x, mode, false))
1975 switch (addr.type)
1976 {
1977 case ADDRESS_REG:
1978 if (TARGET_MIPS16
1979 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1980 return factor * 2;
1981 return factor;
1982
1983 case ADDRESS_LO_SUM:
1984 return (TARGET_MIPS16 ? factor * 2 : factor);
1985
1986 case ADDRESS_CONST_INT:
1987 return factor;
1988
1989 case ADDRESS_SYMBOLIC:
1990 return factor * mips_symbol_insns (addr.symbol_type, mode);
1991 }
1992 return 0;
1993 }
1994
1995
1996 /* Likewise for constant X. */
1997
1998 int
1999 mips_const_insns (rtx x)
2000 {
2001 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2002 enum mips_symbol_type symbol_type;
2003 rtx offset;
2004
2005 switch (GET_CODE (x))
2006 {
2007 case HIGH:
2008 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2009 &symbol_type)
2010 || !mips_split_p[symbol_type])
2011 return 0;
2012
2013 /* This is simply an lui for normal mode. It is an extended
2014 "li" followed by an extended "sll" for MIPS16. */
2015 return TARGET_MIPS16 ? 4 : 1;
2016
2017 case CONST_INT:
2018 if (TARGET_MIPS16)
2019 /* Unsigned 8-bit constants can be loaded using an unextended
2020 LI instruction. Unsigned 16-bit constants can be loaded
2021 using an extended LI. Negative constants must be loaded
2022 using LI and then negated. */
2023 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2024 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2025 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2026 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2027 : 0);
2028
2029 return mips_build_integer (codes, INTVAL (x));
2030
2031 case CONST_DOUBLE:
2032 case CONST_VECTOR:
2033 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2034
2035 case CONST:
2036 if (CONST_GP_P (x))
2037 return 1;
2038
2039 /* See if we can refer to X directly. */
2040 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2041 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2042
2043 /* Otherwise try splitting the constant into a base and offset.
2044 16-bit offsets can be added using an extra addiu. Larger offsets
2045 must be calculated separately and then added to the base. */
2046 split_const (x, &x, &offset);
2047 if (offset != 0)
2048 {
2049 int n = mips_const_insns (x);
2050 if (n != 0)
2051 {
2052 if (SMALL_INT (offset))
2053 return n + 1;
2054 else
2055 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2056 }
2057 }
2058 return 0;
2059
2060 case SYMBOL_REF:
2061 case LABEL_REF:
2062 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2063 MAX_MACHINE_MODE);
2064
2065 default:
2066 return 0;
2067 }
2068 }
2069
2070
2071 /* Return the number of instructions needed to implement INSN,
2072 given that it loads from or stores to MEM. Count extended
2073 mips16 instructions as two instructions. */
2074
2075 int
2076 mips_load_store_insns (rtx mem, rtx insn)
2077 {
2078 enum machine_mode mode;
2079 bool might_split_p;
2080 rtx set;
2081
2082 gcc_assert (MEM_P (mem));
2083 mode = GET_MODE (mem);
2084
2085 /* Try to prove that INSN does not need to be split. */
2086 might_split_p = true;
2087 if (GET_MODE_BITSIZE (mode) == 64)
2088 {
2089 set = single_set (insn);
2090 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2091 might_split_p = false;
2092 }
2093
2094 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2095 }
2096
2097
2098 /* Return the number of instructions needed for an integer division. */
2099
2100 int
2101 mips_idiv_insns (void)
2102 {
2103 int count;
2104
2105 count = 1;
2106 if (TARGET_CHECK_ZERO_DIV)
2107 {
2108 if (GENERATE_DIVIDE_TRAPS)
2109 count++;
2110 else
2111 count += 2;
2112 }
2113
2114 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2115 count++;
2116 return count;
2117 }
2118 \f
2119 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2120 returns a nonzero value if X is a legitimate address for a memory
2121 operand of the indicated MODE. STRICT is nonzero if this function
2122 is called during reload. */
2123
2124 bool
2125 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2126 {
2127 struct mips_address_info addr;
2128
2129 return mips_classify_address (&addr, x, mode, strict);
2130 }
2131
2132 /* Emit a move from SRC to DEST. Assume that the move expanders can
2133 handle all moves if !can_create_pseudo_p (). The distinction is
2134 important because, unlike emit_move_insn, the move expanders know
2135 how to force Pmode objects into the constant pool even when the
2136 constant pool address is not itself legitimate. */
2137
2138 rtx
2139 mips_emit_move (rtx dest, rtx src)
2140 {
2141 return (can_create_pseudo_p ()
2142 ? emit_move_insn (dest, src)
2143 : emit_move_insn_1 (dest, src));
2144 }
2145
2146 /* Copy VALUE to a register and return that register. If new psuedos
2147 are allowed, copy it into a new register, otherwise use DEST. */
2148
2149 static rtx
2150 mips_force_temporary (rtx dest, rtx value)
2151 {
2152 if (can_create_pseudo_p ())
2153 return force_reg (Pmode, value);
2154 else
2155 {
2156 mips_emit_move (copy_rtx (dest), value);
2157 return dest;
2158 }
2159 }
2160
2161
2162 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2163 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2164 constant in that context and can be split into a high part and a LO_SUM.
2165 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2166 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2167
2168 TEMP is as for mips_force_temporary and is used to load the high
2169 part into a register. */
2170
2171 bool
2172 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2173 {
2174 enum mips_symbol_context context;
2175 enum mips_symbol_type symbol_type;
2176 rtx high;
2177
2178 context = (mode == MAX_MACHINE_MODE
2179 ? SYMBOL_CONTEXT_LEA
2180 : SYMBOL_CONTEXT_MEM);
2181 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2182 || mips_symbol_insns (symbol_type, mode) == 0
2183 || !mips_split_p[symbol_type])
2184 return false;
2185
2186 if (lo_sum_out)
2187 {
2188 if (symbol_type == SYMBOL_GP_RELATIVE)
2189 {
2190 if (!can_create_pseudo_p ())
2191 {
2192 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2193 high = temp;
2194 }
2195 else
2196 high = mips16_gp_pseudo_reg ();
2197 }
2198 else
2199 {
2200 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2201 high = mips_force_temporary (temp, high);
2202 }
2203 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2204 }
2205 return true;
2206 }
2207
2208
2209 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2210 and add CONST_INT OFFSET to the result. */
2211
2212 static rtx
2213 mips_unspec_address_offset (rtx base, rtx offset,
2214 enum mips_symbol_type symbol_type)
2215 {
2216 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2217 UNSPEC_ADDRESS_FIRST + symbol_type);
2218 if (offset != const0_rtx)
2219 base = gen_rtx_PLUS (Pmode, base, offset);
2220 return gen_rtx_CONST (Pmode, base);
2221 }
2222
2223 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2224 type SYMBOL_TYPE. */
2225
2226 rtx
2227 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2228 {
2229 rtx base, offset;
2230
2231 split_const (address, &base, &offset);
2232 return mips_unspec_address_offset (base, offset, symbol_type);
2233 }
2234
2235
2236 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2237 high part to BASE and return the result. Just return BASE otherwise.
2238 TEMP is available as a temporary register if needed.
2239
2240 The returned expression can be used as the first operand to a LO_SUM. */
2241
2242 static rtx
2243 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2244 enum mips_symbol_type symbol_type)
2245 {
2246 if (mips_split_p[symbol_type])
2247 {
2248 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2249 addr = mips_force_temporary (temp, addr);
2250 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2251 }
2252 return base;
2253 }
2254
2255
2256 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2257 mips_force_temporary; it is only needed when OFFSET is not a
2258 SMALL_OPERAND. */
2259
2260 static rtx
2261 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2262 {
2263 if (!SMALL_OPERAND (offset))
2264 {
2265 rtx high;
2266 if (TARGET_MIPS16)
2267 {
2268 /* Load the full offset into a register so that we can use
2269 an unextended instruction for the address itself. */
2270 high = GEN_INT (offset);
2271 offset = 0;
2272 }
2273 else
2274 {
2275 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2276 high = GEN_INT (CONST_HIGH_PART (offset));
2277 offset = CONST_LOW_PART (offset);
2278 }
2279 high = mips_force_temporary (temp, high);
2280 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2281 }
2282 return plus_constant (reg, offset);
2283 }
2284
2285 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2286 referencing, and TYPE is the symbol type to use (either global
2287 dynamic or local dynamic). V0 is an RTX for the return value
2288 location. The entire insn sequence is returned. */
2289
2290 static GTY(()) rtx mips_tls_symbol;
2291
2292 static rtx
2293 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2294 {
2295 rtx insn, loc, tga, a0;
2296
2297 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2298
2299 if (!mips_tls_symbol)
2300 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2301
2302 loc = mips_unspec_address (sym, type);
2303
2304 start_sequence ();
2305
2306 emit_insn (gen_rtx_SET (Pmode, a0,
2307 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2308 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2309 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2310 CONST_OR_PURE_CALL_P (insn) = 1;
2311 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2312 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2313 insn = get_insns ();
2314
2315 end_sequence ();
2316
2317 return insn;
2318 }
2319
2320 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2321 return value will be a valid address and move_operand (either a REG
2322 or a LO_SUM). */
2323
2324 static rtx
2325 mips_legitimize_tls_address (rtx loc)
2326 {
2327 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2328 enum tls_model model;
2329
2330 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2331 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2332
2333 model = SYMBOL_REF_TLS_MODEL (loc);
2334 /* Only TARGET_ABICALLS code can have more than one module; other
2335 code must be be static and should not use a GOT. All TLS models
2336 reduce to local exec in this situation. */
2337 if (!TARGET_ABICALLS)
2338 model = TLS_MODEL_LOCAL_EXEC;
2339
2340 switch (model)
2341 {
2342 case TLS_MODEL_GLOBAL_DYNAMIC:
2343 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2344 dest = gen_reg_rtx (Pmode);
2345 emit_libcall_block (insn, dest, v0, loc);
2346 break;
2347
2348 case TLS_MODEL_LOCAL_DYNAMIC:
2349 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2350 tmp1 = gen_reg_rtx (Pmode);
2351
2352 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2353 share the LDM result with other LD model accesses. */
2354 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2355 UNSPEC_TLS_LDM);
2356 emit_libcall_block (insn, tmp1, v0, eqv);
2357
2358 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2359 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2360 mips_unspec_address (loc, SYMBOL_DTPREL));
2361 break;
2362
2363 case TLS_MODEL_INITIAL_EXEC:
2364 tmp1 = gen_reg_rtx (Pmode);
2365 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2366 if (Pmode == DImode)
2367 {
2368 emit_insn (gen_tls_get_tp_di (v1));
2369 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2370 }
2371 else
2372 {
2373 emit_insn (gen_tls_get_tp_si (v1));
2374 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2375 }
2376 dest = gen_reg_rtx (Pmode);
2377 emit_insn (gen_add3_insn (dest, tmp1, v1));
2378 break;
2379
2380 case TLS_MODEL_LOCAL_EXEC:
2381 if (Pmode == DImode)
2382 emit_insn (gen_tls_get_tp_di (v1));
2383 else
2384 emit_insn (gen_tls_get_tp_si (v1));
2385
2386 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2387 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2388 mips_unspec_address (loc, SYMBOL_TPREL));
2389 break;
2390
2391 default:
2392 gcc_unreachable ();
2393 }
2394
2395 return dest;
2396 }
2397
2398 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2399 be legitimized in a way that the generic machinery might not expect,
2400 put the new address in *XLOC and return true. MODE is the mode of
2401 the memory being accessed. */
2402
2403 bool
2404 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2405 {
2406 if (mips_tls_operand_p (*xloc))
2407 {
2408 *xloc = mips_legitimize_tls_address (*xloc);
2409 return true;
2410 }
2411
2412 /* See if the address can split into a high part and a LO_SUM. */
2413 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2414 return true;
2415
2416 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2417 {
2418 /* Handle REG + CONSTANT using mips_add_offset. */
2419 rtx reg;
2420
2421 reg = XEXP (*xloc, 0);
2422 if (!mips_valid_base_register_p (reg, mode, 0))
2423 reg = copy_to_mode_reg (Pmode, reg);
2424 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2425 return true;
2426 }
2427
2428 return false;
2429 }
2430
2431
2432 /* Subroutine of mips_build_integer (with the same interface).
2433 Assume that the final action in the sequence should be a left shift. */
2434
2435 static unsigned int
2436 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2437 {
2438 unsigned int i, shift;
2439
2440 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2441 since signed numbers are easier to load than unsigned ones. */
2442 shift = 0;
2443 while ((value & 1) == 0)
2444 value /= 2, shift++;
2445
2446 i = mips_build_integer (codes, value);
2447 codes[i].code = ASHIFT;
2448 codes[i].value = shift;
2449 return i + 1;
2450 }
2451
2452
2453 /* As for mips_build_shift, but assume that the final action will be
2454 an IOR or PLUS operation. */
2455
2456 static unsigned int
2457 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2458 {
2459 unsigned HOST_WIDE_INT high;
2460 unsigned int i;
2461
2462 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2463 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2464 {
2465 /* The constant is too complex to load with a simple lui/ori pair
2466 so our goal is to clear as many trailing zeros as possible.
2467 In this case, we know bit 16 is set and that the low 16 bits
2468 form a negative number. If we subtract that number from VALUE,
2469 we will clear at least the lowest 17 bits, maybe more. */
2470 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2471 codes[i].code = PLUS;
2472 codes[i].value = CONST_LOW_PART (value);
2473 }
2474 else
2475 {
2476 i = mips_build_integer (codes, high);
2477 codes[i].code = IOR;
2478 codes[i].value = value & 0xffff;
2479 }
2480 return i + 1;
2481 }
2482
2483
2484 /* Fill CODES with a sequence of rtl operations to load VALUE.
2485 Return the number of operations needed. */
2486
2487 static unsigned int
2488 mips_build_integer (struct mips_integer_op *codes,
2489 unsigned HOST_WIDE_INT value)
2490 {
2491 if (SMALL_OPERAND (value)
2492 || SMALL_OPERAND_UNSIGNED (value)
2493 || LUI_OPERAND (value))
2494 {
2495 /* The value can be loaded with a single instruction. */
2496 codes[0].code = UNKNOWN;
2497 codes[0].value = value;
2498 return 1;
2499 }
2500 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2501 {
2502 /* Either the constant is a simple LUI/ORI combination or its
2503 lowest bit is set. We don't want to shift in this case. */
2504 return mips_build_lower (codes, value);
2505 }
2506 else if ((value & 0xffff) == 0)
2507 {
2508 /* The constant will need at least three actions. The lowest
2509 16 bits are clear, so the final action will be a shift. */
2510 return mips_build_shift (codes, value);
2511 }
2512 else
2513 {
2514 /* The final action could be a shift, add or inclusive OR.
2515 Rather than use a complex condition to select the best
2516 approach, try both mips_build_shift and mips_build_lower
2517 and pick the one that gives the shortest sequence.
2518 Note that this case is only used once per constant. */
2519 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2520 unsigned int cost, alt_cost;
2521
2522 cost = mips_build_shift (codes, value);
2523 alt_cost = mips_build_lower (alt_codes, value);
2524 if (alt_cost < cost)
2525 {
2526 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2527 cost = alt_cost;
2528 }
2529 return cost;
2530 }
2531 }
2532
2533
2534 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2535
2536 void
2537 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2538 {
2539 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2540 enum machine_mode mode;
2541 unsigned int i, cost;
2542 rtx x;
2543
2544 mode = GET_MODE (dest);
2545 cost = mips_build_integer (codes, value);
2546
2547 /* Apply each binary operation to X. Invariant: X is a legitimate
2548 source operand for a SET pattern. */
2549 x = GEN_INT (codes[0].value);
2550 for (i = 1; i < cost; i++)
2551 {
2552 if (!can_create_pseudo_p ())
2553 {
2554 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2555 x = temp;
2556 }
2557 else
2558 x = force_reg (mode, x);
2559 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2560 }
2561
2562 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2563 }
2564
2565
2566 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2567 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2568 move_operand. */
2569
2570 static void
2571 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2572 {
2573 rtx base, offset;
2574
2575 /* Split moves of big integers into smaller pieces. */
2576 if (splittable_const_int_operand (src, mode))
2577 {
2578 mips_move_integer (dest, dest, INTVAL (src));
2579 return;
2580 }
2581
2582 /* Split moves of symbolic constants into high/low pairs. */
2583 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2584 {
2585 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2586 return;
2587 }
2588
2589 if (mips_tls_operand_p (src))
2590 {
2591 mips_emit_move (dest, mips_legitimize_tls_address (src));
2592 return;
2593 }
2594
2595 /* If we have (const (plus symbol offset)), load the symbol first
2596 and then add in the offset. This is usually better than forcing
2597 the constant into memory, at least in non-mips16 code. */
2598 split_const (src, &base, &offset);
2599 if (!TARGET_MIPS16
2600 && offset != const0_rtx
2601 && (can_create_pseudo_p () || SMALL_INT (offset)))
2602 {
2603 base = mips_force_temporary (dest, base);
2604 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2605 return;
2606 }
2607
2608 src = force_const_mem (mode, src);
2609
2610 /* When using explicit relocs, constant pool references are sometimes
2611 not legitimate addresses. */
2612 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2613 mips_emit_move (dest, src);
2614 }
2615
2616
2617 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2618 sequence that is valid. */
2619
2620 bool
2621 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2622 {
2623 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2624 {
2625 mips_emit_move (dest, force_reg (mode, src));
2626 return true;
2627 }
2628
2629 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2630 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2631 && REG_P (src) && MD_REG_P (REGNO (src))
2632 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2633 {
2634 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2635 if (GET_MODE_SIZE (mode) <= 4)
2636 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2637 gen_rtx_REG (SImode, REGNO (src)),
2638 gen_rtx_REG (SImode, other_regno)));
2639 else
2640 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2641 gen_rtx_REG (DImode, REGNO (src)),
2642 gen_rtx_REG (DImode, other_regno)));
2643 return true;
2644 }
2645
2646 /* We need to deal with constants that would be legitimate
2647 immediate_operands but not legitimate move_operands. */
2648 if (CONSTANT_P (src) && !move_operand (src, mode))
2649 {
2650 mips_legitimize_const_move (mode, dest, src);
2651 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2652 return true;
2653 }
2654 return false;
2655 }
2656 \f
2657 /* We need a lot of little routines to check constant values on the
2658 mips16. These are used to figure out how long the instruction will
2659 be. It would be much better to do this using constraints, but
2660 there aren't nearly enough letters available. */
2661
2662 static int
2663 m16_check_op (rtx op, int low, int high, int mask)
2664 {
2665 return (GET_CODE (op) == CONST_INT
2666 && INTVAL (op) >= low
2667 && INTVAL (op) <= high
2668 && (INTVAL (op) & mask) == 0);
2669 }
2670
2671 int
2672 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2673 {
2674 return m16_check_op (op, 0x1, 0x8, 0);
2675 }
2676
2677 int
2678 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2679 {
2680 return m16_check_op (op, - 0x8, 0x7, 0);
2681 }
2682
2683 int
2684 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2685 {
2686 return m16_check_op (op, - 0x7, 0x8, 0);
2687 }
2688
2689 int
2690 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2691 {
2692 return m16_check_op (op, - 0x10, 0xf, 0);
2693 }
2694
2695 int
2696 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2697 {
2698 return m16_check_op (op, - 0xf, 0x10, 0);
2699 }
2700
2701 int
2702 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2703 {
2704 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2705 }
2706
2707 int
2708 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2709 {
2710 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2711 }
2712
2713 int
2714 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2715 {
2716 return m16_check_op (op, - 0x80, 0x7f, 0);
2717 }
2718
2719 int
2720 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2721 {
2722 return m16_check_op (op, - 0x7f, 0x80, 0);
2723 }
2724
2725 int
2726 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2727 {
2728 return m16_check_op (op, 0x0, 0xff, 0);
2729 }
2730
2731 int
2732 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2733 {
2734 return m16_check_op (op, - 0xff, 0x0, 0);
2735 }
2736
2737 int
2738 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2739 {
2740 return m16_check_op (op, - 0x1, 0xfe, 0);
2741 }
2742
2743 int
2744 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2745 {
2746 return m16_check_op (op, 0x0, 0xff << 2, 3);
2747 }
2748
2749 int
2750 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2751 {
2752 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2753 }
2754
2755 int
2756 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2757 {
2758 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2759 }
2760
2761 int
2762 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2763 {
2764 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2765 }
2766 \f
2767 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2768 address instruction. */
2769
2770 static bool
2771 mips_lwxs_address_p (rtx addr)
2772 {
2773 if (ISA_HAS_LWXS
2774 && GET_CODE (addr) == PLUS
2775 && REG_P (XEXP (addr, 1)))
2776 {
2777 rtx offset = XEXP (addr, 0);
2778 if (GET_CODE (offset) == MULT
2779 && REG_P (XEXP (offset, 0))
2780 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2781 && INTVAL (XEXP (offset, 1)) == 4)
2782 return true;
2783 }
2784 return false;
2785 }
2786
2787 static bool
2788 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2789 {
2790 enum machine_mode mode = GET_MODE (x);
2791 bool float_mode_p = FLOAT_MODE_P (mode);
2792
2793 switch (code)
2794 {
2795 case CONST_INT:
2796 if (TARGET_MIPS16)
2797 {
2798 /* A number between 1 and 8 inclusive is efficient for a shift.
2799 Otherwise, we will need an extended instruction. */
2800 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2801 || (outer_code) == LSHIFTRT)
2802 {
2803 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2804 *total = 0;
2805 else
2806 *total = COSTS_N_INSNS (1);
2807 return true;
2808 }
2809
2810 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2811 if ((outer_code) == XOR
2812 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2813 {
2814 *total = 0;
2815 return true;
2816 }
2817
2818 /* We may be able to use slt or sltu for a comparison with a
2819 signed 16-bit value. (The boundary conditions aren't quite
2820 right, but this is just a heuristic anyhow.) */
2821 if (((outer_code) == LT || (outer_code) == LE
2822 || (outer_code) == GE || (outer_code) == GT
2823 || (outer_code) == LTU || (outer_code) == LEU
2824 || (outer_code) == GEU || (outer_code) == GTU)
2825 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2826 {
2827 *total = 0;
2828 return true;
2829 }
2830
2831 /* Equality comparisons with 0 are cheap. */
2832 if (((outer_code) == EQ || (outer_code) == NE)
2833 && INTVAL (x) == 0)
2834 {
2835 *total = 0;
2836 return true;
2837 }
2838
2839 /* Constants in the range 0...255 can be loaded with an unextended
2840 instruction. They are therefore as cheap as a register move.
2841
2842 Given the choice between "li R1,0...255" and "move R1,R2"
2843 (where R2 is a known constant), it is usually better to use "li",
2844 since we do not want to unnecessarily extend the lifetime
2845 of R2. */
2846 if (outer_code == SET
2847 && INTVAL (x) >= 0
2848 && INTVAL (x) < 256)
2849 {
2850 *total = 0;
2851 return true;
2852 }
2853 }
2854 else
2855 {
2856 /* These can be used anywhere. */
2857 *total = 0;
2858 return true;
2859 }
2860
2861 /* Otherwise fall through to the handling below because
2862 we'll need to construct the constant. */
2863
2864 case CONST:
2865 case SYMBOL_REF:
2866 case LABEL_REF:
2867 case CONST_DOUBLE:
2868 if (LEGITIMATE_CONSTANT_P (x))
2869 {
2870 *total = COSTS_N_INSNS (1);
2871 return true;
2872 }
2873 else
2874 {
2875 /* The value will need to be fetched from the constant pool. */
2876 *total = CONSTANT_POOL_COST;
2877 return true;
2878 }
2879
2880 case MEM:
2881 {
2882 /* If the address is legitimate, return the number of
2883 instructions it needs. */
2884 rtx addr = XEXP (x, 0);
2885 int n = mips_address_insns (addr, GET_MODE (x), true);
2886 if (n > 0)
2887 {
2888 *total = COSTS_N_INSNS (n + 1);
2889 return true;
2890 }
2891 /* Check for scaled indexed address. */
2892 if (mips_lwxs_address_p (addr))
2893 {
2894 *total = COSTS_N_INSNS (2);
2895 return true;
2896 }
2897 /* Otherwise use the default handling. */
2898 return false;
2899 }
2900
2901 case FFS:
2902 *total = COSTS_N_INSNS (6);
2903 return true;
2904
2905 case NOT:
2906 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2907 return true;
2908
2909 case AND:
2910 case IOR:
2911 case XOR:
2912 if (mode == DImode && !TARGET_64BIT)
2913 {
2914 *total = COSTS_N_INSNS (2);
2915 return true;
2916 }
2917 return false;
2918
2919 case ASHIFT:
2920 case ASHIFTRT:
2921 case LSHIFTRT:
2922 if (mode == DImode && !TARGET_64BIT)
2923 {
2924 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2925 ? 4 : 12);
2926 return true;
2927 }
2928 return false;
2929
2930 case ABS:
2931 if (float_mode_p)
2932 *total = COSTS_N_INSNS (1);
2933 else
2934 *total = COSTS_N_INSNS (4);
2935 return true;
2936
2937 case LO_SUM:
2938 *total = COSTS_N_INSNS (1);
2939 return true;
2940
2941 case PLUS:
2942 case MINUS:
2943 if (float_mode_p)
2944 {
2945 *total = mips_cost->fp_add;
2946 return true;
2947 }
2948
2949 else if (mode == DImode && !TARGET_64BIT)
2950 {
2951 *total = COSTS_N_INSNS (4);
2952 return true;
2953 }
2954 return false;
2955
2956 case NEG:
2957 if (mode == DImode && !TARGET_64BIT)
2958 {
2959 *total = COSTS_N_INSNS (4);
2960 return true;
2961 }
2962 return false;
2963
2964 case MULT:
2965 if (mode == SFmode)
2966 *total = mips_cost->fp_mult_sf;
2967
2968 else if (mode == DFmode)
2969 *total = mips_cost->fp_mult_df;
2970
2971 else if (mode == SImode)
2972 *total = mips_cost->int_mult_si;
2973
2974 else
2975 *total = mips_cost->int_mult_di;
2976
2977 return true;
2978
2979 case DIV:
2980 case MOD:
2981 if (float_mode_p)
2982 {
2983 if (mode == SFmode)
2984 *total = mips_cost->fp_div_sf;
2985 else
2986 *total = mips_cost->fp_div_df;
2987
2988 return true;
2989 }
2990 /* Fall through. */
2991
2992 case UDIV:
2993 case UMOD:
2994 if (mode == DImode)
2995 *total = mips_cost->int_div_di;
2996 else
2997 *total = mips_cost->int_div_si;
2998
2999 return true;
3000
3001 case SIGN_EXTEND:
3002 /* A sign extend from SImode to DImode in 64-bit mode is often
3003 zero instructions, because the result can often be used
3004 directly by another instruction; we'll call it one. */
3005 if (TARGET_64BIT && mode == DImode
3006 && GET_MODE (XEXP (x, 0)) == SImode)
3007 *total = COSTS_N_INSNS (1);
3008 else
3009 *total = COSTS_N_INSNS (2);
3010 return true;
3011
3012 case ZERO_EXTEND:
3013 if (TARGET_64BIT && mode == DImode
3014 && GET_MODE (XEXP (x, 0)) == SImode)
3015 *total = COSTS_N_INSNS (2);
3016 else
3017 *total = COSTS_N_INSNS (1);
3018 return true;
3019
3020 case FLOAT:
3021 case UNSIGNED_FLOAT:
3022 case FIX:
3023 case FLOAT_EXTEND:
3024 case FLOAT_TRUNCATE:
3025 case SQRT:
3026 *total = mips_cost->fp_add;
3027 return true;
3028
3029 default:
3030 return false;
3031 }
3032 }
3033
3034 /* Provide the costs of an addressing mode that contains ADDR.
3035 If ADDR is not a valid address, its cost is irrelevant. */
3036
3037 static int
3038 mips_address_cost (rtx addr)
3039 {
3040 return mips_address_insns (addr, SImode, false);
3041 }
3042 \f
3043 /* Return one word of double-word value OP, taking into account the fixed
3044 endianness of certain registers. HIGH_P is true to select the high part,
3045 false to select the low part. */
3046
3047 rtx
3048 mips_subword (rtx op, int high_p)
3049 {
3050 unsigned int byte;
3051 enum machine_mode mode;
3052
3053 mode = GET_MODE (op);
3054 if (mode == VOIDmode)
3055 mode = DImode;
3056
3057 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3058 byte = UNITS_PER_WORD;
3059 else
3060 byte = 0;
3061
3062 if (FP_REG_RTX_P (op))
3063 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3064
3065 if (MEM_P (op))
3066 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3067
3068 return simplify_gen_subreg (word_mode, op, mode, byte);
3069 }
3070
3071
3072 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3073
3074 bool
3075 mips_split_64bit_move_p (rtx dest, rtx src)
3076 {
3077 if (TARGET_64BIT)
3078 return false;
3079
3080 /* FP->FP moves can be done in a single instruction. */
3081 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3082 return false;
3083
3084 /* Check for floating-point loads and stores. They can be done using
3085 ldc1 and sdc1 on MIPS II and above. */
3086 if (mips_isa > 1)
3087 {
3088 if (FP_REG_RTX_P (dest) && MEM_P (src))
3089 return false;
3090 if (FP_REG_RTX_P (src) && MEM_P (dest))
3091 return false;
3092 }
3093 return true;
3094 }
3095
3096
3097 /* Split a 64-bit move from SRC to DEST assuming that
3098 mips_split_64bit_move_p holds.
3099
3100 Moves into and out of FPRs cause some difficulty here. Such moves
3101 will always be DFmode, since paired FPRs are not allowed to store
3102 DImode values. The most natural representation would be two separate
3103 32-bit moves, such as:
3104
3105 (set (reg:SI $f0) (mem:SI ...))
3106 (set (reg:SI $f1) (mem:SI ...))
3107
3108 However, the second insn is invalid because odd-numbered FPRs are
3109 not allowed to store independent values. Use the patterns load_df_low,
3110 load_df_high and store_df_high instead. */
3111
3112 void
3113 mips_split_64bit_move (rtx dest, rtx src)
3114 {
3115 if (FP_REG_RTX_P (dest))
3116 {
3117 /* Loading an FPR from memory or from GPRs. */
3118 if (ISA_HAS_MXHC1)
3119 {
3120 dest = gen_lowpart (DFmode, dest);
3121 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3122 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3123 copy_rtx (dest)));
3124 }
3125 else
3126 {
3127 emit_insn (gen_load_df_low (copy_rtx (dest),
3128 mips_subword (src, 0)));
3129 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3130 copy_rtx (dest)));
3131 }
3132 }
3133 else if (FP_REG_RTX_P (src))
3134 {
3135 /* Storing an FPR into memory or GPRs. */
3136 if (ISA_HAS_MXHC1)
3137 {
3138 src = gen_lowpart (DFmode, src);
3139 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3140 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3141 }
3142 else
3143 {
3144 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3145 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3146 }
3147 }
3148 else
3149 {
3150 /* The operation can be split into two normal moves. Decide in
3151 which order to do them. */
3152 rtx low_dest;
3153
3154 low_dest = mips_subword (dest, 0);
3155 if (REG_P (low_dest)
3156 && reg_overlap_mentioned_p (low_dest, src))
3157 {
3158 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3159 mips_emit_move (low_dest, mips_subword (src, 0));
3160 }
3161 else
3162 {
3163 mips_emit_move (low_dest, mips_subword (src, 0));
3164 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3165 }
3166 }
3167 }
3168 \f
3169 /* Return the appropriate instructions to move SRC into DEST. Assume
3170 that SRC is operand 1 and DEST is operand 0. */
3171
3172 const char *
3173 mips_output_move (rtx dest, rtx src)
3174 {
3175 enum rtx_code dest_code, src_code;
3176 enum mips_symbol_type symbol_type;
3177 bool dbl_p;
3178
3179 dest_code = GET_CODE (dest);
3180 src_code = GET_CODE (src);
3181 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3182
3183 if (dbl_p && mips_split_64bit_move_p (dest, src))
3184 return "#";
3185
3186 if ((src_code == REG && GP_REG_P (REGNO (src)))
3187 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3188 {
3189 if (dest_code == REG)
3190 {
3191 if (GP_REG_P (REGNO (dest)))
3192 return "move\t%0,%z1";
3193
3194 if (MD_REG_P (REGNO (dest)))
3195 return "mt%0\t%z1";
3196
3197 if (DSP_ACC_REG_P (REGNO (dest)))
3198 {
3199 static char retval[] = "mt__\t%z1,%q0";
3200 retval[2] = reg_names[REGNO (dest)][4];
3201 retval[3] = reg_names[REGNO (dest)][5];
3202 return retval;
3203 }
3204
3205 if (FP_REG_P (REGNO (dest)))
3206 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3207
3208 if (ALL_COP_REG_P (REGNO (dest)))
3209 {
3210 static char retval[] = "dmtc_\t%z1,%0";
3211
3212 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3213 return (dbl_p ? retval : retval + 1);
3214 }
3215 }
3216 if (dest_code == MEM)
3217 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3218 }
3219 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3220 {
3221 if (src_code == REG)
3222 {
3223 if (DSP_ACC_REG_P (REGNO (src)))
3224 {
3225 static char retval[] = "mf__\t%0,%q1";
3226 retval[2] = reg_names[REGNO (src)][4];
3227 retval[3] = reg_names[REGNO (src)][5];
3228 return retval;
3229 }
3230
3231 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3232 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3233
3234 if (FP_REG_P (REGNO (src)))
3235 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3236
3237 if (ALL_COP_REG_P (REGNO (src)))
3238 {
3239 static char retval[] = "dmfc_\t%0,%1";
3240
3241 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3242 return (dbl_p ? retval : retval + 1);
3243 }
3244 }
3245
3246 if (src_code == MEM)
3247 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3248
3249 if (src_code == CONST_INT)
3250 {
3251 /* Don't use the X format, because that will give out of
3252 range numbers for 64-bit hosts and 32-bit targets. */
3253 if (!TARGET_MIPS16)
3254 return "li\t%0,%1\t\t\t# %X1";
3255
3256 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3257 return "li\t%0,%1";
3258
3259 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3260 return "#";
3261 }
3262
3263 if (src_code == HIGH)
3264 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3265
3266 if (CONST_GP_P (src))
3267 return "move\t%0,%1";
3268
3269 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3270 && mips_lo_relocs[symbol_type] != 0)
3271 {
3272 /* A signed 16-bit constant formed by applying a relocation
3273 operator to a symbolic address. */
3274 gcc_assert (!mips_split_p[symbol_type]);
3275 return "li\t%0,%R1";
3276 }
3277
3278 if (symbolic_operand (src, VOIDmode))
3279 {
3280 gcc_assert (TARGET_MIPS16
3281 ? TARGET_MIPS16_TEXT_LOADS
3282 : !TARGET_EXPLICIT_RELOCS);
3283 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3284 }
3285 }
3286 if (src_code == REG && FP_REG_P (REGNO (src)))
3287 {
3288 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3289 {
3290 if (GET_MODE (dest) == V2SFmode)
3291 return "mov.ps\t%0,%1";
3292 else
3293 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3294 }
3295
3296 if (dest_code == MEM)
3297 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3298 }
3299 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3300 {
3301 if (src_code == MEM)
3302 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3303 }
3304 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3305 {
3306 static char retval[] = "l_c_\t%0,%1";
3307
3308 retval[1] = (dbl_p ? 'd' : 'w');
3309 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3310 return retval;
3311 }
3312 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3313 {
3314 static char retval[] = "s_c_\t%1,%0";
3315
3316 retval[1] = (dbl_p ? 'd' : 'w');
3317 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3318 return retval;
3319 }
3320 gcc_unreachable ();
3321 }
3322 \f
3323 /* Restore $gp from its save slot. Valid only when using o32 or
3324 o64 abicalls. */
3325
3326 void
3327 mips_restore_gp (void)
3328 {
3329 rtx address, slot;
3330
3331 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3332
3333 address = mips_add_offset (pic_offset_table_rtx,
3334 frame_pointer_needed
3335 ? hard_frame_pointer_rtx
3336 : stack_pointer_rtx,
3337 current_function_outgoing_args_size);
3338 slot = gen_rtx_MEM (Pmode, address);
3339
3340 mips_emit_move (pic_offset_table_rtx, slot);
3341 if (!TARGET_EXPLICIT_RELOCS)
3342 emit_insn (gen_blockage ());
3343 }
3344 \f
3345 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3346
3347 static void
3348 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3349 {
3350 emit_insn (gen_rtx_SET (VOIDmode, target,
3351 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3352 }
3353
3354 /* Return true if CMP1 is a suitable second operand for relational
3355 operator CODE. See also the *sCC patterns in mips.md. */
3356
3357 static bool
3358 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3359 {
3360 switch (code)
3361 {
3362 case GT:
3363 case GTU:
3364 return reg_or_0_operand (cmp1, VOIDmode);
3365
3366 case GE:
3367 case GEU:
3368 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3369
3370 case LT:
3371 case LTU:
3372 return arith_operand (cmp1, VOIDmode);
3373
3374 case LE:
3375 return sle_operand (cmp1, VOIDmode);
3376
3377 case LEU:
3378 return sleu_operand (cmp1, VOIDmode);
3379
3380 default:
3381 gcc_unreachable ();
3382 }
3383 }
3384
3385 /* Canonicalize LE or LEU comparisons into LT comparisons when
3386 possible to avoid extra instructions or inverting the
3387 comparison. */
3388
3389 static bool
3390 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3391 enum machine_mode mode)
3392 {
3393 HOST_WIDE_INT original, plus_one;
3394
3395 if (GET_CODE (*cmp1) != CONST_INT)
3396 return false;
3397
3398 original = INTVAL (*cmp1);
3399 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3400
3401 switch (*code)
3402 {
3403 case LE:
3404 if (original < plus_one)
3405 {
3406 *code = LT;
3407 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3408 return true;
3409 }
3410 break;
3411
3412 case LEU:
3413 if (plus_one != 0)
3414 {
3415 *code = LTU;
3416 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3417 return true;
3418 }
3419 break;
3420
3421 default:
3422 return false;
3423 }
3424
3425 return false;
3426
3427 }
3428
3429 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3430 result in TARGET. CMP0 and TARGET are register_operands that have
3431 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3432 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3433
3434 static void
3435 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3436 rtx target, rtx cmp0, rtx cmp1)
3437 {
3438 /* First see if there is a MIPS instruction that can do this operation
3439 with CMP1 in its current form. If not, try to canonicalize the
3440 comparison to LT. If that fails, try doing the same for the
3441 inverse operation. If that also fails, force CMP1 into a register
3442 and try again. */
3443 if (mips_relational_operand_ok_p (code, cmp1))
3444 mips_emit_binary (code, target, cmp0, cmp1);
3445 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3446 mips_emit_binary (code, target, cmp0, cmp1);
3447 else
3448 {
3449 enum rtx_code inv_code = reverse_condition (code);
3450 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3451 {
3452 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3453 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3454 }
3455 else if (invert_ptr == 0)
3456 {
3457 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3458 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3459 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3460 }
3461 else
3462 {
3463 *invert_ptr = !*invert_ptr;
3464 mips_emit_binary (inv_code, target, cmp0, cmp1);
3465 }
3466 }
3467 }
3468
3469 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3470 The register will have the same mode as CMP0. */
3471
3472 static rtx
3473 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3474 {
3475 if (cmp1 == const0_rtx)
3476 return cmp0;
3477
3478 if (uns_arith_operand (cmp1, VOIDmode))
3479 return expand_binop (GET_MODE (cmp0), xor_optab,
3480 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3481
3482 return expand_binop (GET_MODE (cmp0), sub_optab,
3483 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3484 }
3485
3486 /* Convert *CODE into a code that can be used in a floating-point
3487 scc instruction (c.<cond>.<fmt>). Return true if the values of
3488 the condition code registers will be inverted, with 0 indicating
3489 that the condition holds. */
3490
3491 static bool
3492 mips_reverse_fp_cond_p (enum rtx_code *code)
3493 {
3494 switch (*code)
3495 {
3496 case NE:
3497 case LTGT:
3498 case ORDERED:
3499 *code = reverse_condition_maybe_unordered (*code);
3500 return true;
3501
3502 default:
3503 return false;
3504 }
3505 }
3506
3507 /* Convert a comparison into something that can be used in a branch or
3508 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3509 being compared and *CODE is the code used to compare them.
3510
3511 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3512 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3513 otherwise any standard branch condition can be used. The standard branch
3514 conditions are:
3515
3516 - EQ/NE between two registers.
3517 - any comparison between a register and zero. */
3518
3519 static void
3520 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3521 {
3522 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3523 {
3524 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3525 {
3526 *op0 = cmp_operands[0];
3527 *op1 = cmp_operands[1];
3528 }
3529 else if (*code == EQ || *code == NE)
3530 {
3531 if (need_eq_ne_p)
3532 {
3533 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3534 *op1 = const0_rtx;
3535 }
3536 else
3537 {
3538 *op0 = cmp_operands[0];
3539 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3540 }
3541 }
3542 else
3543 {
3544 /* The comparison needs a separate scc instruction. Store the
3545 result of the scc in *OP0 and compare it against zero. */
3546 bool invert = false;
3547 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3548 *op1 = const0_rtx;
3549 mips_emit_int_relational (*code, &invert, *op0,
3550 cmp_operands[0], cmp_operands[1]);
3551 *code = (invert ? EQ : NE);
3552 }
3553 }
3554 else
3555 {
3556 enum rtx_code cmp_code;
3557
3558 /* Floating-point tests use a separate c.cond.fmt comparison to
3559 set a condition code register. The branch or conditional move
3560 will then compare that register against zero.
3561
3562 Set CMP_CODE to the code of the comparison instruction and
3563 *CODE to the code that the branch or move should use. */
3564 cmp_code = *code;
3565 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3566 *op0 = (ISA_HAS_8CC
3567 ? gen_reg_rtx (CCmode)
3568 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3569 *op1 = const0_rtx;
3570 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3571 }
3572 }
3573 \f
3574 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3575 Store the result in TARGET and return true if successful.
3576
3577 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3578
3579 bool
3580 mips_emit_scc (enum rtx_code code, rtx target)
3581 {
3582 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3583 return false;
3584
3585 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3586 if (code == EQ || code == NE)
3587 {
3588 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3589 mips_emit_binary (code, target, zie, const0_rtx);
3590 }
3591 else
3592 mips_emit_int_relational (code, 0, target,
3593 cmp_operands[0], cmp_operands[1]);
3594 return true;
3595 }
3596
3597 /* Emit the common code for doing conditional branches.
3598 operand[0] is the label to jump to.
3599 The comparison operands are saved away by cmp{si,di,sf,df}. */
3600
3601 void
3602 gen_conditional_branch (rtx *operands, enum rtx_code code)
3603 {
3604 rtx op0, op1, condition;
3605
3606 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3607 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3608 emit_jump_insn (gen_condjump (condition, operands[0]));
3609 }
3610
3611 /* Implement:
3612
3613 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3614 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3615
3616 void
3617 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3618 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3619 {
3620 rtx cmp_result;
3621 bool reversed_p;
3622
3623 reversed_p = mips_reverse_fp_cond_p (&cond);
3624 cmp_result = gen_reg_rtx (CCV2mode);
3625 emit_insn (gen_scc_ps (cmp_result,
3626 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3627 if (reversed_p)
3628 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3629 cmp_result));
3630 else
3631 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3632 cmp_result));
3633 }
3634
3635 /* Emit the common code for conditional moves. OPERANDS is the array
3636 of operands passed to the conditional move define_expand. */
3637
3638 void
3639 gen_conditional_move (rtx *operands)
3640 {
3641 enum rtx_code code;
3642 rtx op0, op1;
3643
3644 code = GET_CODE (operands[1]);
3645 mips_emit_compare (&code, &op0, &op1, true);
3646 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3647 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3648 gen_rtx_fmt_ee (code,
3649 GET_MODE (op0),
3650 op0, op1),
3651 operands[2], operands[3])));
3652 }
3653
3654 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3655 the conditional_trap expander. */
3656
3657 void
3658 mips_gen_conditional_trap (rtx *operands)
3659 {
3660 rtx op0, op1;
3661 enum rtx_code cmp_code = GET_CODE (operands[0]);
3662 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3663
3664 /* MIPS conditional trap machine instructions don't have GT or LE
3665 flavors, so we must invert the comparison and convert to LT and
3666 GE, respectively. */
3667 switch (cmp_code)
3668 {
3669 case GT: cmp_code = LT; break;
3670 case LE: cmp_code = GE; break;
3671 case GTU: cmp_code = LTU; break;
3672 case LEU: cmp_code = GEU; break;
3673 default: break;
3674 }
3675 if (cmp_code == GET_CODE (operands[0]))
3676 {
3677 op0 = cmp_operands[0];
3678 op1 = cmp_operands[1];
3679 }
3680 else
3681 {
3682 op0 = cmp_operands[1];
3683 op1 = cmp_operands[0];
3684 }
3685 op0 = force_reg (mode, op0);
3686 if (!arith_operand (op1, mode))
3687 op1 = force_reg (mode, op1);
3688
3689 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3690 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3691 operands[1]));
3692 }
3693 \f
3694 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3695
3696 static bool
3697 mips_ok_for_lazy_binding_p (rtx x)
3698 {
3699 return (TARGET_USE_GOT
3700 && GET_CODE (x) == SYMBOL_REF
3701 && !mips_symbol_binds_local_p (x));
3702 }
3703
3704 /* Load function address ADDR into register DEST. SIBCALL_P is true
3705 if the address is needed for a sibling call. */
3706
3707 static void
3708 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3709 {
3710 /* If we're generating PIC, and this call is to a global function,
3711 try to allow its address to be resolved lazily. This isn't
3712 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3713 to the stub would be our caller's gp, not ours. */
3714 if (TARGET_EXPLICIT_RELOCS
3715 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3716 && mips_ok_for_lazy_binding_p (addr))
3717 {
3718 rtx high, lo_sum_symbol;
3719
3720 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3721 addr, SYMBOL_GOTOFF_CALL);
3722 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3723 if (Pmode == SImode)
3724 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3725 else
3726 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3727 }
3728 else
3729 mips_emit_move (dest, addr);
3730 }
3731
3732
3733 /* Expand a call or call_value instruction. RESULT is where the
3734 result will go (null for calls), ADDR is the address of the
3735 function, ARGS_SIZE is the size of the arguments and AUX is
3736 the value passed to us by mips_function_arg. SIBCALL_P is true
3737 if we are expanding a sibling call, false if we're expanding
3738 a normal call. */
3739
3740 void
3741 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3742 {
3743 rtx orig_addr, pattern, insn;
3744
3745 orig_addr = addr;
3746 if (!call_insn_operand (addr, VOIDmode))
3747 {
3748 addr = gen_reg_rtx (Pmode);
3749 mips_load_call_address (addr, orig_addr, sibcall_p);
3750 }
3751
3752 if (TARGET_MIPS16
3753 && TARGET_HARD_FLOAT_ABI
3754 && build_mips16_call_stub (result, addr, args_size,
3755 aux == 0 ? 0 : (int) GET_MODE (aux)))
3756 return;
3757
3758 if (result == 0)
3759 pattern = (sibcall_p
3760 ? gen_sibcall_internal (addr, args_size)
3761 : gen_call_internal (addr, args_size));
3762 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3763 {
3764 rtx reg1, reg2;
3765
3766 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3767 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3768 pattern =
3769 (sibcall_p
3770 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3771 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3772 }
3773 else
3774 pattern = (sibcall_p
3775 ? gen_sibcall_value_internal (result, addr, args_size)
3776 : gen_call_value_internal (result, addr, args_size));
3777
3778 insn = emit_call_insn (pattern);
3779
3780 /* Lazy-binding stubs require $gp to be valid on entry. */
3781 if (mips_ok_for_lazy_binding_p (orig_addr))
3782 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3783 }
3784
3785
3786 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3787
3788 static bool
3789 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3790 tree exp ATTRIBUTE_UNUSED)
3791 {
3792 return TARGET_SIBCALLS;
3793 }
3794 \f
3795 /* Emit code to move general operand SRC into condition-code
3796 register DEST. SCRATCH is a scratch TFmode float register.
3797 The sequence is:
3798
3799 FP1 = SRC
3800 FP2 = 0.0f
3801 DEST = FP2 < FP1
3802
3803 where FP1 and FP2 are single-precision float registers
3804 taken from SCRATCH. */
3805
3806 void
3807 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3808 {
3809 rtx fp1, fp2;
3810
3811 /* Change the source to SFmode. */
3812 if (MEM_P (src))
3813 src = adjust_address (src, SFmode, 0);
3814 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3815 src = gen_rtx_REG (SFmode, true_regnum (src));
3816
3817 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3818 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
3819
3820 mips_emit_move (copy_rtx (fp1), src);
3821 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
3822 emit_insn (gen_slt_sf (dest, fp2, fp1));
3823 }
3824 \f
3825 /* Emit code to change the current function's return address to
3826 ADDRESS. SCRATCH is available as a scratch register, if needed.
3827 ADDRESS and SCRATCH are both word-mode GPRs. */
3828
3829 void
3830 mips_set_return_address (rtx address, rtx scratch)
3831 {
3832 rtx slot_address;
3833
3834 compute_frame_size (get_frame_size ());
3835 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3836 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3837 cfun->machine->frame.gp_sp_offset);
3838
3839 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3840 }
3841 \f
3842 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3843 Assume that the areas do not overlap. */
3844
3845 static void
3846 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3847 {
3848 HOST_WIDE_INT offset, delta;
3849 unsigned HOST_WIDE_INT bits;
3850 int i;
3851 enum machine_mode mode;
3852 rtx *regs;
3853
3854 /* Work out how many bits to move at a time. If both operands have
3855 half-word alignment, it is usually better to move in half words.
3856 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3857 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3858 Otherwise move word-sized chunks. */
3859 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3860 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3861 bits = BITS_PER_WORD / 2;
3862 else
3863 bits = BITS_PER_WORD;
3864
3865 mode = mode_for_size (bits, MODE_INT, 0);
3866 delta = bits / BITS_PER_UNIT;
3867
3868 /* Allocate a buffer for the temporary registers. */
3869 regs = alloca (sizeof (rtx) * length / delta);
3870
3871 /* Load as many BITS-sized chunks as possible. Use a normal load if
3872 the source has enough alignment, otherwise use left/right pairs. */
3873 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3874 {
3875 regs[i] = gen_reg_rtx (mode);
3876 if (MEM_ALIGN (src) >= bits)
3877 mips_emit_move (regs[i], adjust_address (src, mode, offset));
3878 else
3879 {
3880 rtx part = adjust_address (src, BLKmode, offset);
3881 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3882 gcc_unreachable ();
3883 }
3884 }
3885
3886 /* Copy the chunks to the destination. */
3887 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3888 if (MEM_ALIGN (dest) >= bits)
3889 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
3890 else
3891 {
3892 rtx part = adjust_address (dest, BLKmode, offset);
3893 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3894 gcc_unreachable ();
3895 }
3896
3897 /* Mop up any left-over bytes. */
3898 if (offset < length)
3899 {
3900 src = adjust_address (src, BLKmode, offset);
3901 dest = adjust_address (dest, BLKmode, offset);
3902 move_by_pieces (dest, src, length - offset,
3903 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3904 }
3905 }
3906 \f
3907 #define MAX_MOVE_REGS 4
3908 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3909
3910
3911 /* Helper function for doing a loop-based block operation on memory
3912 reference MEM. Each iteration of the loop will operate on LENGTH
3913 bytes of MEM.
3914
3915 Create a new base register for use within the loop and point it to
3916 the start of MEM. Create a new memory reference that uses this
3917 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3918
3919 static void
3920 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3921 rtx *loop_reg, rtx *loop_mem)
3922 {
3923 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3924
3925 /* Although the new mem does not refer to a known location,
3926 it does keep up to LENGTH bytes of alignment. */
3927 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3928 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3929 }
3930
3931
3932 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3933 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3934 memory regions do not overlap. */
3935
3936 static void
3937 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3938 {
3939 rtx label, src_reg, dest_reg, final_src;
3940 HOST_WIDE_INT leftover;
3941
3942 leftover = length % MAX_MOVE_BYTES;
3943 length -= leftover;
3944
3945 /* Create registers and memory references for use within the loop. */
3946 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3947 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3948
3949 /* Calculate the value that SRC_REG should have after the last iteration
3950 of the loop. */
3951 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3952 0, 0, OPTAB_WIDEN);
3953
3954 /* Emit the start of the loop. */
3955 label = gen_label_rtx ();
3956 emit_label (label);
3957
3958 /* Emit the loop body. */
3959 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3960
3961 /* Move on to the next block. */
3962 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3963 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3964
3965 /* Emit the loop condition. */
3966 if (Pmode == DImode)
3967 emit_insn (gen_cmpdi (src_reg, final_src));
3968 else
3969 emit_insn (gen_cmpsi (src_reg, final_src));
3970 emit_jump_insn (gen_bne (label));
3971
3972 /* Mop up any left-over bytes. */
3973 if (leftover)
3974 mips_block_move_straight (dest, src, leftover);
3975 }
3976 \f
3977
3978 /* Expand a loop of synci insns for the address range [BEGIN, END). */
3979
3980 void
3981 mips_expand_synci_loop (rtx begin, rtx end)
3982 {
3983 rtx inc, label, cmp, cmp_result;
3984
3985 /* Load INC with the cache line size (rdhwr INC,$1). */
3986 inc = gen_reg_rtx (SImode);
3987 emit_insn (gen_rdhwr (inc, const1_rtx));
3988
3989 /* Loop back to here. */
3990 label = gen_label_rtx ();
3991 emit_label (label);
3992
3993 emit_insn (gen_synci (begin));
3994
3995 cmp = gen_reg_rtx (Pmode);
3996 mips_emit_binary (GTU, cmp, begin, end);
3997
3998 mips_emit_binary (PLUS, begin, begin, inc);
3999
4000 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4001 emit_jump_insn (gen_condjump (cmp_result, label));
4002 }
4003 \f
4004 /* Expand a movmemsi instruction. */
4005
4006 bool
4007 mips_expand_block_move (rtx dest, rtx src, rtx length)
4008 {
4009 if (GET_CODE (length) == CONST_INT)
4010 {
4011 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4012 {
4013 mips_block_move_straight (dest, src, INTVAL (length));
4014 return true;
4015 }
4016 else if (optimize)
4017 {
4018 mips_block_move_loop (dest, src, INTVAL (length));
4019 return true;
4020 }
4021 }
4022 return false;
4023 }
4024 \f
4025 /* Argument support functions. */
4026
4027 /* Initialize CUMULATIVE_ARGS for a function. */
4028
4029 void
4030 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4031 rtx libname ATTRIBUTE_UNUSED)
4032 {
4033 static CUMULATIVE_ARGS zero_cum;
4034 tree param, next_param;
4035
4036 *cum = zero_cum;
4037 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4038
4039 /* Determine if this function has variable arguments. This is
4040 indicated by the last argument being 'void_type_mode' if there
4041 are no variable arguments. The standard MIPS calling sequence
4042 passes all arguments in the general purpose registers in this case. */
4043
4044 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4045 param != 0; param = next_param)
4046 {
4047 next_param = TREE_CHAIN (param);
4048 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4049 cum->gp_reg_found = 1;
4050 }
4051 }
4052
4053
4054 /* Fill INFO with information about a single argument. CUM is the
4055 cumulative state for earlier arguments. MODE is the mode of this
4056 argument and TYPE is its type (if known). NAMED is true if this
4057 is a named (fixed) argument rather than a variable one. */
4058
4059 static void
4060 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4061 tree type, int named, struct mips_arg_info *info)
4062 {
4063 bool doubleword_aligned_p;
4064 unsigned int num_bytes, num_words, max_regs;
4065
4066 /* Work out the size of the argument. */
4067 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4068 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4069
4070 /* Decide whether it should go in a floating-point register, assuming
4071 one is free. Later code checks for availability.
4072
4073 The checks against UNITS_PER_FPVALUE handle the soft-float and
4074 single-float cases. */
4075 switch (mips_abi)
4076 {
4077 case ABI_EABI:
4078 /* The EABI conventions have traditionally been defined in terms
4079 of TYPE_MODE, regardless of the actual type. */
4080 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4081 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4082 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4083 break;
4084
4085 case ABI_32:
4086 case ABI_O64:
4087 /* Only leading floating-point scalars are passed in
4088 floating-point registers. We also handle vector floats the same
4089 say, which is OK because they are not covered by the standard ABI. */
4090 info->fpr_p = (!cum->gp_reg_found
4091 && cum->arg_number < 2
4092 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4093 || VECTOR_FLOAT_TYPE_P (type))
4094 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4095 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4096 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4097 break;
4098
4099 case ABI_N32:
4100 case ABI_64:
4101 /* Scalar and complex floating-point types are passed in
4102 floating-point registers. */
4103 info->fpr_p = (named
4104 && (type == 0 || FLOAT_TYPE_P (type))
4105 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4106 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4107 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4108 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4109
4110 /* ??? According to the ABI documentation, the real and imaginary
4111 parts of complex floats should be passed in individual registers.
4112 The real and imaginary parts of stack arguments are supposed
4113 to be contiguous and there should be an extra word of padding
4114 at the end.
4115
4116 This has two problems. First, it makes it impossible to use a
4117 single "void *" va_list type, since register and stack arguments
4118 are passed differently. (At the time of writing, MIPSpro cannot
4119 handle complex float varargs correctly.) Second, it's unclear
4120 what should happen when there is only one register free.
4121
4122 For now, we assume that named complex floats should go into FPRs
4123 if there are two FPRs free, otherwise they should be passed in the
4124 same way as a struct containing two floats. */
4125 if (info->fpr_p
4126 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4127 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4128 {
4129 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4130 info->fpr_p = false;
4131 else
4132 num_words = 2;
4133 }
4134 break;
4135
4136 default:
4137 gcc_unreachable ();
4138 }
4139
4140 /* See whether the argument has doubleword alignment. */
4141 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4142
4143 /* Set REG_OFFSET to the register count we're interested in.
4144 The EABI allocates the floating-point registers separately,
4145 but the other ABIs allocate them like integer registers. */
4146 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4147 ? cum->num_fprs
4148 : cum->num_gprs);
4149
4150 /* Advance to an even register if the argument is doubleword-aligned. */
4151 if (doubleword_aligned_p)
4152 info->reg_offset += info->reg_offset & 1;
4153
4154 /* Work out the offset of a stack argument. */
4155 info->stack_offset = cum->stack_words;
4156 if (doubleword_aligned_p)
4157 info->stack_offset += info->stack_offset & 1;
4158
4159 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4160
4161 /* Partition the argument between registers and stack. */
4162 info->reg_words = MIN (num_words, max_regs);
4163 info->stack_words = num_words - info->reg_words;
4164 }
4165
4166
4167 /* INFO describes an argument that is passed in a single-register value.
4168 Return the register it uses, assuming that FPRs are available if
4169 HARD_FLOAT_P. */
4170
4171 static unsigned int
4172 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4173 {
4174 if (!info->fpr_p || !hard_float_p)
4175 return GP_ARG_FIRST + info->reg_offset;
4176 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4177 /* In o32, the second argument is always passed in $f14
4178 for TARGET_DOUBLE_FLOAT, regardless of whether the
4179 first argument was a word or doubleword. */
4180 return FP_ARG_FIRST + 2;
4181 else
4182 return FP_ARG_FIRST + info->reg_offset;
4183 }
4184
4185 /* Implement FUNCTION_ARG_ADVANCE. */
4186
4187 void
4188 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4189 tree type, int named)
4190 {
4191 struct mips_arg_info info;
4192
4193 mips_arg_info (cum, mode, type, named, &info);
4194
4195 if (!info.fpr_p)
4196 cum->gp_reg_found = true;
4197
4198 /* See the comment above the cumulative args structure in mips.h
4199 for an explanation of what this code does. It assumes the O32
4200 ABI, which passes at most 2 arguments in float registers. */
4201 if (cum->arg_number < 2 && info.fpr_p)
4202 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4203
4204 if (mips_abi != ABI_EABI || !info.fpr_p)
4205 cum->num_gprs = info.reg_offset + info.reg_words;
4206 else if (info.reg_words > 0)
4207 cum->num_fprs += MAX_FPRS_PER_FMT;
4208
4209 if (info.stack_words > 0)
4210 cum->stack_words = info.stack_offset + info.stack_words;
4211
4212 cum->arg_number++;
4213 }
4214
4215 /* Implement FUNCTION_ARG. */
4216
4217 struct rtx_def *
4218 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4219 tree type, int named)
4220 {
4221 struct mips_arg_info info;
4222
4223 /* We will be called with a mode of VOIDmode after the last argument
4224 has been seen. Whatever we return will be passed to the call
4225 insn. If we need a mips16 fp_code, return a REG with the code
4226 stored as the mode. */
4227 if (mode == VOIDmode)
4228 {
4229 if (TARGET_MIPS16 && cum->fp_code != 0)
4230 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4231
4232 else
4233 return 0;
4234 }
4235
4236 mips_arg_info (cum, mode, type, named, &info);
4237
4238 /* Return straight away if the whole argument is passed on the stack. */
4239 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4240 return 0;
4241
4242 if (type != 0
4243 && TREE_CODE (type) == RECORD_TYPE
4244 && TARGET_NEWABI
4245 && TYPE_SIZE_UNIT (type)
4246 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4247 && named)
4248 {
4249 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4250 structure contains a double in its entirety, then that 64-bit
4251 chunk is passed in a floating point register. */
4252 tree field;
4253
4254 /* First check to see if there is any such field. */
4255 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4256 if (TREE_CODE (field) == FIELD_DECL
4257 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4258 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4259 && host_integerp (bit_position (field), 0)
4260 && int_bit_position (field) % BITS_PER_WORD == 0)
4261 break;
4262
4263 if (field != 0)
4264 {
4265 /* Now handle the special case by returning a PARALLEL
4266 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4267 chunks are passed in registers. */
4268 unsigned int i;
4269 HOST_WIDE_INT bitpos;
4270 rtx ret;
4271
4272 /* assign_parms checks the mode of ENTRY_PARM, so we must
4273 use the actual mode here. */
4274 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4275
4276 bitpos = 0;
4277 field = TYPE_FIELDS (type);
4278 for (i = 0; i < info.reg_words; i++)
4279 {
4280 rtx reg;
4281
4282 for (; field; field = TREE_CHAIN (field))
4283 if (TREE_CODE (field) == FIELD_DECL
4284 && int_bit_position (field) >= bitpos)
4285 break;
4286
4287 if (field
4288 && int_bit_position (field) == bitpos
4289 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4290 && !TARGET_SOFT_FLOAT
4291 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4292 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4293 else
4294 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4295
4296 XVECEXP (ret, 0, i)
4297 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4298 GEN_INT (bitpos / BITS_PER_UNIT));
4299
4300 bitpos += BITS_PER_WORD;
4301 }
4302 return ret;
4303 }
4304 }
4305
4306 /* Handle the n32/n64 conventions for passing complex floating-point
4307 arguments in FPR pairs. The real part goes in the lower register
4308 and the imaginary part goes in the upper register. */
4309 if (TARGET_NEWABI
4310 && info.fpr_p
4311 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4312 {
4313 rtx real, imag;
4314 enum machine_mode inner;
4315 int reg;
4316
4317 inner = GET_MODE_INNER (mode);
4318 reg = FP_ARG_FIRST + info.reg_offset;
4319 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4320 {
4321 /* Real part in registers, imaginary part on stack. */
4322 gcc_assert (info.stack_words == info.reg_words);
4323 return gen_rtx_REG (inner, reg);
4324 }
4325 else
4326 {
4327 gcc_assert (info.stack_words == 0);
4328 real = gen_rtx_EXPR_LIST (VOIDmode,
4329 gen_rtx_REG (inner, reg),
4330 const0_rtx);
4331 imag = gen_rtx_EXPR_LIST (VOIDmode,
4332 gen_rtx_REG (inner,
4333 reg + info.reg_words / 2),
4334 GEN_INT (GET_MODE_SIZE (inner)));
4335 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4336 }
4337 }
4338
4339 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4340 }
4341
4342
4343 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4344
4345 static int
4346 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4347 enum machine_mode mode, tree type, bool named)
4348 {
4349 struct mips_arg_info info;
4350
4351 mips_arg_info (cum, mode, type, named, &info);
4352 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4353 }
4354
4355
4356 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4357 PARM_BOUNDARY bits of alignment, but will be given anything up
4358 to STACK_BOUNDARY bits if the type requires it. */
4359
4360 int
4361 function_arg_boundary (enum machine_mode mode, tree type)
4362 {
4363 unsigned int alignment;
4364
4365 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4366 if (alignment < PARM_BOUNDARY)
4367 alignment = PARM_BOUNDARY;
4368 if (alignment > STACK_BOUNDARY)
4369 alignment = STACK_BOUNDARY;
4370 return alignment;
4371 }
4372
4373 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4374 upward rather than downward. In other words, return true if the
4375 first byte of the stack slot has useful data, false if the last
4376 byte does. */
4377
4378 bool
4379 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4380 {
4381 /* On little-endian targets, the first byte of every stack argument
4382 is passed in the first byte of the stack slot. */
4383 if (!BYTES_BIG_ENDIAN)
4384 return true;
4385
4386 /* Otherwise, integral types are padded downward: the last byte of a
4387 stack argument is passed in the last byte of the stack slot. */
4388 if (type != 0
4389 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4390 : GET_MODE_CLASS (mode) == MODE_INT)
4391 return false;
4392
4393 /* Big-endian o64 pads floating-point arguments downward. */
4394 if (mips_abi == ABI_O64)
4395 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4396 return false;
4397
4398 /* Other types are padded upward for o32, o64, n32 and n64. */
4399 if (mips_abi != ABI_EABI)
4400 return true;
4401
4402 /* Arguments smaller than a stack slot are padded downward. */
4403 if (mode != BLKmode)
4404 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4405 else
4406 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4407 }
4408
4409
4410 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4411 if the least significant byte of the register has useful data. Return
4412 the opposite if the most significant byte does. */
4413
4414 bool
4415 mips_pad_reg_upward (enum machine_mode mode, tree type)
4416 {
4417 /* No shifting is required for floating-point arguments. */
4418 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4419 return !BYTES_BIG_ENDIAN;
4420
4421 /* Otherwise, apply the same padding to register arguments as we do
4422 to stack arguments. */
4423 return mips_pad_arg_upward (mode, type);
4424 }
4425 \f
4426 static void
4427 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4428 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4429 int no_rtl)
4430 {
4431 CUMULATIVE_ARGS local_cum;
4432 int gp_saved, fp_saved;
4433
4434 /* The caller has advanced CUM up to, but not beyond, the last named
4435 argument. Advance a local copy of CUM past the last "real" named
4436 argument, to find out how many registers are left over. */
4437
4438 local_cum = *cum;
4439 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4440
4441 /* Found out how many registers we need to save. */
4442 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4443 fp_saved = (EABI_FLOAT_VARARGS_P
4444 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4445 : 0);
4446
4447 if (!no_rtl)
4448 {
4449 if (gp_saved > 0)
4450 {
4451 rtx ptr, mem;
4452
4453 ptr = plus_constant (virtual_incoming_args_rtx,
4454 REG_PARM_STACK_SPACE (cfun->decl)
4455 - gp_saved * UNITS_PER_WORD);
4456 mem = gen_rtx_MEM (BLKmode, ptr);
4457 set_mem_alias_set (mem, get_varargs_alias_set ());
4458
4459 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4460 mem, gp_saved);
4461 }
4462 if (fp_saved > 0)
4463 {
4464 /* We can't use move_block_from_reg, because it will use
4465 the wrong mode. */
4466 enum machine_mode mode;
4467 int off, i;
4468
4469 /* Set OFF to the offset from virtual_incoming_args_rtx of
4470 the first float register. The FP save area lies below
4471 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4472 off = -gp_saved * UNITS_PER_WORD;
4473 off &= ~(UNITS_PER_FPVALUE - 1);
4474 off -= fp_saved * UNITS_PER_FPREG;
4475
4476 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4477
4478 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4479 i += MAX_FPRS_PER_FMT)
4480 {
4481 rtx ptr, mem;
4482
4483 ptr = plus_constant (virtual_incoming_args_rtx, off);
4484 mem = gen_rtx_MEM (mode, ptr);
4485 set_mem_alias_set (mem, get_varargs_alias_set ());
4486 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4487 off += UNITS_PER_HWFPVALUE;
4488 }
4489 }
4490 }
4491 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4492 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4493 + fp_saved * UNITS_PER_FPREG);
4494 }
4495
4496 /* Create the va_list data type.
4497 We keep 3 pointers, and two offsets.
4498 Two pointers are to the overflow area, which starts at the CFA.
4499 One of these is constant, for addressing into the GPR save area below it.
4500 The other is advanced up the stack through the overflow region.
4501 The third pointer is to the GPR save area. Since the FPR save area
4502 is just below it, we can address FPR slots off this pointer.
4503 We also keep two one-byte offsets, which are to be subtracted from the
4504 constant pointers to yield addresses in the GPR and FPR save areas.
4505 These are downcounted as float or non-float arguments are used,
4506 and when they get to zero, the argument must be obtained from the
4507 overflow region.
4508 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4509 pointer is enough. It's started at the GPR save area, and is
4510 advanced, period.
4511 Note that the GPR save area is not constant size, due to optimization
4512 in the prologue. Hence, we can't use a design with two pointers
4513 and two offsets, although we could have designed this with two pointers
4514 and three offsets. */
4515
4516 static tree
4517 mips_build_builtin_va_list (void)
4518 {
4519 if (EABI_FLOAT_VARARGS_P)
4520 {
4521 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4522 tree array, index;
4523
4524 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4525
4526 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4527 ptr_type_node);
4528 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4529 ptr_type_node);
4530 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4531 ptr_type_node);
4532 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4533 unsigned_char_type_node);
4534 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4535 unsigned_char_type_node);
4536 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4537 warn on every user file. */
4538 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4539 array = build_array_type (unsigned_char_type_node,
4540 build_index_type (index));
4541 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4542
4543 DECL_FIELD_CONTEXT (f_ovfl) = record;
4544 DECL_FIELD_CONTEXT (f_gtop) = record;
4545 DECL_FIELD_CONTEXT (f_ftop) = record;
4546 DECL_FIELD_CONTEXT (f_goff) = record;
4547 DECL_FIELD_CONTEXT (f_foff) = record;
4548 DECL_FIELD_CONTEXT (f_res) = record;
4549
4550 TYPE_FIELDS (record) = f_ovfl;
4551 TREE_CHAIN (f_ovfl) = f_gtop;
4552 TREE_CHAIN (f_gtop) = f_ftop;
4553 TREE_CHAIN (f_ftop) = f_goff;
4554 TREE_CHAIN (f_goff) = f_foff;
4555 TREE_CHAIN (f_foff) = f_res;
4556
4557 layout_type (record);
4558 return record;
4559 }
4560 else if (TARGET_IRIX && TARGET_IRIX6)
4561 /* On IRIX 6, this type is 'char *'. */
4562 return build_pointer_type (char_type_node);
4563 else
4564 /* Otherwise, we use 'void *'. */
4565 return ptr_type_node;
4566 }
4567
4568 /* Implement va_start. */
4569
4570 void
4571 mips_va_start (tree valist, rtx nextarg)
4572 {
4573 if (EABI_FLOAT_VARARGS_P)
4574 {
4575 const CUMULATIVE_ARGS *cum;
4576 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4577 tree ovfl, gtop, ftop, goff, foff;
4578 tree t;
4579 int gpr_save_area_size;
4580 int fpr_save_area_size;
4581 int fpr_offset;
4582
4583 cum = &current_function_args_info;
4584 gpr_save_area_size
4585 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4586 fpr_save_area_size
4587 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4588
4589 f_ovfl = TYPE_FIELDS (va_list_type_node);
4590 f_gtop = TREE_CHAIN (f_ovfl);
4591 f_ftop = TREE_CHAIN (f_gtop);
4592 f_goff = TREE_CHAIN (f_ftop);
4593 f_foff = TREE_CHAIN (f_goff);
4594
4595 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4596 NULL_TREE);
4597 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4598 NULL_TREE);
4599 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4600 NULL_TREE);
4601 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4602 NULL_TREE);
4603 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4604 NULL_TREE);
4605
4606 /* Emit code to initialize OVFL, which points to the next varargs
4607 stack argument. CUM->STACK_WORDS gives the number of stack
4608 words used by named arguments. */
4609 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4610 if (cum->stack_words > 0)
4611 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4612 size_int (cum->stack_words * UNITS_PER_WORD));
4613 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4614 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4615
4616 /* Emit code to initialize GTOP, the top of the GPR save area. */
4617 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4618 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4619 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4620
4621 /* Emit code to initialize FTOP, the top of the FPR save area.
4622 This address is gpr_save_area_bytes below GTOP, rounded
4623 down to the next fp-aligned boundary. */
4624 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4625 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4626 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4627 if (fpr_offset)
4628 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4629 size_int (-fpr_offset));
4630 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4631 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4632
4633 /* Emit code to initialize GOFF, the offset from GTOP of the
4634 next GPR argument. */
4635 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4636 build_int_cst (NULL_TREE, gpr_save_area_size));
4637 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4638
4639 /* Likewise emit code to initialize FOFF, the offset from FTOP
4640 of the next FPR argument. */
4641 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4642 build_int_cst (NULL_TREE, fpr_save_area_size));
4643 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4644 }
4645 else
4646 {
4647 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4648 std_expand_builtin_va_start (valist, nextarg);
4649 }
4650 }
4651 \f
4652 /* Implement va_arg. */
4653
4654 static tree
4655 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4656 {
4657 HOST_WIDE_INT size, rsize;
4658 tree addr;
4659 bool indirect;
4660
4661 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4662
4663 if (indirect)
4664 type = build_pointer_type (type);
4665
4666 size = int_size_in_bytes (type);
4667 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4668
4669 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4670 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4671 else
4672 {
4673 /* Not a simple merged stack. */
4674
4675 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4676 tree ovfl, top, off, align;
4677 HOST_WIDE_INT osize;
4678 tree t, u;
4679
4680 f_ovfl = TYPE_FIELDS (va_list_type_node);
4681 f_gtop = TREE_CHAIN (f_ovfl);
4682 f_ftop = TREE_CHAIN (f_gtop);
4683 f_goff = TREE_CHAIN (f_ftop);
4684 f_foff = TREE_CHAIN (f_goff);
4685
4686 /* We maintain separate pointers and offsets for floating-point
4687 and integer arguments, but we need similar code in both cases.
4688 Let:
4689
4690 TOP be the top of the register save area;
4691 OFF be the offset from TOP of the next register;
4692 ADDR_RTX be the address of the argument;
4693 RSIZE be the number of bytes used to store the argument
4694 when it's in the register save area;
4695 OSIZE be the number of bytes used to store it when it's
4696 in the stack overflow area; and
4697 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4698
4699 The code we want is:
4700
4701 1: off &= -rsize; // round down
4702 2: if (off != 0)
4703 3: {
4704 4: addr_rtx = top - off;
4705 5: off -= rsize;
4706 6: }
4707 7: else
4708 8: {
4709 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4710 10: addr_rtx = ovfl + PADDING;
4711 11: ovfl += osize;
4712 14: }
4713
4714 [1] and [9] can sometimes be optimized away. */
4715
4716 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4717 NULL_TREE);
4718
4719 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4720 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4721 {
4722 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4723 NULL_TREE);
4724 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4725 NULL_TREE);
4726
4727 /* When floating-point registers are saved to the stack,
4728 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4729 of the float's precision. */
4730 rsize = UNITS_PER_HWFPVALUE;
4731
4732 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4733 (= PARM_BOUNDARY bits). This can be different from RSIZE
4734 in two cases:
4735
4736 (1) On 32-bit targets when TYPE is a structure such as:
4737
4738 struct s { float f; };
4739
4740 Such structures are passed in paired FPRs, so RSIZE
4741 will be 8 bytes. However, the structure only takes
4742 up 4 bytes of memory, so OSIZE will only be 4.
4743
4744 (2) In combinations such as -mgp64 -msingle-float
4745 -fshort-double. Doubles passed in registers
4746 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4747 but those passed on the stack take up
4748 UNITS_PER_WORD bytes. */
4749 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4750 }
4751 else
4752 {
4753 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4754 NULL_TREE);
4755 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4756 NULL_TREE);
4757 if (rsize > UNITS_PER_WORD)
4758 {
4759 /* [1] Emit code for: off &= -rsize. */
4760 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4761 build_int_cst (NULL_TREE, -rsize));
4762 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4763 gimplify_and_add (t, pre_p);
4764 }
4765 osize = rsize;
4766 }
4767
4768 /* [2] Emit code to branch if off == 0. */
4769 t = build2 (NE_EXPR, boolean_type_node, off,
4770 build_int_cst (TREE_TYPE (off), 0));
4771 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4772
4773 /* [5] Emit code for: off -= rsize. We do this as a form of
4774 post-increment not available to C. Also widen for the
4775 coming pointer arithmetic. */
4776 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4777 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4778 t = fold_convert (sizetype, t);
4779 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4780
4781 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4782 the argument has RSIZE - SIZE bytes of leading padding. */
4783 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4784 if (BYTES_BIG_ENDIAN && rsize > size)
4785 {
4786 u = size_int (rsize - size);
4787 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4788 }
4789 COND_EXPR_THEN (addr) = t;
4790
4791 if (osize > UNITS_PER_WORD)
4792 {
4793 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4794 u = size_int (osize - 1);
4795 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4796 t = fold_convert (sizetype, t);
4797 u = size_int (-osize);
4798 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4799 t = fold_convert (TREE_TYPE (ovfl), t);
4800 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4801 }
4802 else
4803 align = NULL;
4804
4805 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4806 post-increment ovfl by osize. On big-endian machines,
4807 the argument has OSIZE - SIZE bytes of leading padding. */
4808 u = fold_convert (TREE_TYPE (ovfl),
4809 build_int_cst (NULL_TREE, osize));
4810 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4811 if (BYTES_BIG_ENDIAN && osize > size)
4812 {
4813 u = size_int (osize - size);
4814 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4815 }
4816
4817 /* String [9] and [10,11] together. */
4818 if (align)
4819 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4820 COND_EXPR_ELSE (addr) = t;
4821
4822 addr = fold_convert (build_pointer_type (type), addr);
4823 addr = build_va_arg_indirect_ref (addr);
4824 }
4825
4826 if (indirect)
4827 addr = build_va_arg_indirect_ref (addr);
4828
4829 return addr;
4830 }
4831 \f
4832 /* Return true if it is possible to use left/right accesses for a
4833 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4834 returning true, update *OP, *LEFT and *RIGHT as follows:
4835
4836 *OP is a BLKmode reference to the whole field.
4837
4838 *LEFT is a QImode reference to the first byte if big endian or
4839 the last byte if little endian. This address can be used in the
4840 left-side instructions (lwl, swl, ldl, sdl).
4841
4842 *RIGHT is a QImode reference to the opposite end of the field and
4843 can be used in the patterning right-side instruction. */
4844
4845 static bool
4846 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4847 rtx *left, rtx *right)
4848 {
4849 rtx first, last;
4850
4851 /* Check that the operand really is a MEM. Not all the extv and
4852 extzv predicates are checked. */
4853 if (!MEM_P (*op))
4854 return false;
4855
4856 /* Check that the size is valid. */
4857 if (width != 32 && (!TARGET_64BIT || width != 64))
4858 return false;
4859
4860 /* We can only access byte-aligned values. Since we are always passed
4861 a reference to the first byte of the field, it is not necessary to
4862 do anything with BITPOS after this check. */
4863 if (bitpos % BITS_PER_UNIT != 0)
4864 return false;
4865
4866 /* Reject aligned bitfields: we want to use a normal load or store
4867 instead of a left/right pair. */
4868 if (MEM_ALIGN (*op) >= width)
4869 return false;
4870
4871 /* Adjust *OP to refer to the whole field. This also has the effect
4872 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4873 *op = adjust_address (*op, BLKmode, 0);
4874 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4875
4876 /* Get references to both ends of the field. We deliberately don't
4877 use the original QImode *OP for FIRST since the new BLKmode one
4878 might have a simpler address. */
4879 first = adjust_address (*op, QImode, 0);
4880 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4881
4882 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4883 be the upper word and RIGHT the lower word. */
4884 if (TARGET_BIG_ENDIAN)
4885 *left = first, *right = last;
4886 else
4887 *left = last, *right = first;
4888
4889 return true;
4890 }
4891
4892
4893 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4894 Return true on success. We only handle cases where zero_extract is
4895 equivalent to sign_extract. */
4896
4897 bool
4898 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4899 {
4900 rtx left, right, temp;
4901
4902 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4903 paradoxical word_mode subreg. This is the only case in which
4904 we allow the destination to be larger than the source. */
4905 if (GET_CODE (dest) == SUBREG
4906 && GET_MODE (dest) == DImode
4907 && SUBREG_BYTE (dest) == 0
4908 && GET_MODE (SUBREG_REG (dest)) == SImode)
4909 dest = SUBREG_REG (dest);
4910
4911 /* After the above adjustment, the destination must be the same
4912 width as the source. */
4913 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4914 return false;
4915
4916 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4917 return false;
4918
4919 temp = gen_reg_rtx (GET_MODE (dest));
4920 if (GET_MODE (dest) == DImode)
4921 {
4922 emit_insn (gen_mov_ldl (temp, src, left));
4923 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4924 }
4925 else
4926 {
4927 emit_insn (gen_mov_lwl (temp, src, left));
4928 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4929 }
4930 return true;
4931 }
4932
4933
4934 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4935 true on success. */
4936
4937 bool
4938 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4939 {
4940 rtx left, right;
4941 enum machine_mode mode;
4942
4943 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4944 return false;
4945
4946 mode = mode_for_size (width, MODE_INT, 0);
4947 src = gen_lowpart (mode, src);
4948
4949 if (mode == DImode)
4950 {
4951 emit_insn (gen_mov_sdl (dest, src, left));
4952 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4953 }
4954 else
4955 {
4956 emit_insn (gen_mov_swl (dest, src, left));
4957 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4958 }
4959 return true;
4960 }
4961
4962 /* Return true if X is a MEM with the same size as MODE. */
4963
4964 bool
4965 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4966 {
4967 rtx size;
4968
4969 if (!MEM_P (x))
4970 return false;
4971
4972 size = MEM_SIZE (x);
4973 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4974 }
4975
4976 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4977 source of an "ext" instruction or the destination of an "ins"
4978 instruction. OP must be a register operand and the following
4979 conditions must hold:
4980
4981 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4982 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4983 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4984
4985 Also reject lengths equal to a word as they are better handled
4986 by the move patterns. */
4987
4988 bool
4989 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4990 {
4991 HOST_WIDE_INT len, pos;
4992
4993 if (!ISA_HAS_EXT_INS
4994 || !register_operand (op, VOIDmode)
4995 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4996 return false;
4997
4998 len = INTVAL (size);
4999 pos = INTVAL (position);
5000
5001 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5002 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5003 return false;
5004
5005 return true;
5006 }
5007
5008 /* Set up globals to generate code for the ISA or processor
5009 described by INFO. */
5010
5011 static void
5012 mips_set_architecture (const struct mips_cpu_info *info)
5013 {
5014 if (info != 0)
5015 {
5016 mips_arch_info = info;
5017 mips_arch = info->cpu;
5018 mips_isa = info->isa;
5019 }
5020 }
5021
5022
5023 /* Likewise for tuning. */
5024
5025 static void
5026 mips_set_tune (const struct mips_cpu_info *info)
5027 {
5028 if (info != 0)
5029 {
5030 mips_tune_info = info;
5031 mips_tune = info->cpu;
5032 }
5033 }
5034
5035 /* Implement TARGET_HANDLE_OPTION. */
5036
5037 static bool
5038 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
5039 {
5040 switch (code)
5041 {
5042 case OPT_mabi_:
5043 if (strcmp (arg, "32") == 0)
5044 mips_abi = ABI_32;
5045 else if (strcmp (arg, "o64") == 0)
5046 mips_abi = ABI_O64;
5047 else if (strcmp (arg, "n32") == 0)
5048 mips_abi = ABI_N32;
5049 else if (strcmp (arg, "64") == 0)
5050 mips_abi = ABI_64;
5051 else if (strcmp (arg, "eabi") == 0)
5052 mips_abi = ABI_EABI;
5053 else
5054 return false;
5055 return true;
5056
5057 case OPT_march_:
5058 case OPT_mtune_:
5059 return mips_parse_cpu (arg) != 0;
5060
5061 case OPT_mips:
5062 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5063 return mips_isa_info != 0;
5064
5065 case OPT_mno_flush_func:
5066 mips_cache_flush_func = NULL;
5067 return true;
5068
5069 case OPT_mcode_readable_:
5070 if (strcmp (arg, "yes") == 0)
5071 mips_code_readable = CODE_READABLE_YES;
5072 else if (strcmp (arg, "pcrel") == 0)
5073 mips_code_readable = CODE_READABLE_PCREL;
5074 else if (strcmp (arg, "no") == 0)
5075 mips_code_readable = CODE_READABLE_NO;
5076 else
5077 return false;
5078 return true;
5079
5080 default:
5081 return true;
5082 }
5083 }
5084
5085 /* Set up the threshold for data to go into the small data area, instead
5086 of the normal data area, and detect any conflicts in the switches. */
5087
5088 void
5089 override_options (void)
5090 {
5091 int i, start, regno;
5092 enum machine_mode mode;
5093
5094 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5095 SUBTARGET_OVERRIDE_OPTIONS;
5096 #endif
5097
5098 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5099
5100 /* The following code determines the architecture and register size.
5101 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5102 The GAS and GCC code should be kept in sync as much as possible. */
5103
5104 if (mips_arch_string != 0)
5105 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5106
5107 if (mips_isa_info != 0)
5108 {
5109 if (mips_arch_info == 0)
5110 mips_set_architecture (mips_isa_info);
5111 else if (mips_arch_info->isa != mips_isa_info->isa)
5112 error ("-%s conflicts with the other architecture options, "
5113 "which specify a %s processor",
5114 mips_isa_info->name,
5115 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5116 }
5117
5118 if (mips_arch_info == 0)
5119 {
5120 #ifdef MIPS_CPU_STRING_DEFAULT
5121 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5122 #else
5123 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5124 #endif
5125 }
5126
5127 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5128 error ("-march=%s is not compatible with the selected ABI",
5129 mips_arch_info->name);
5130
5131 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5132 if (mips_tune_string != 0)
5133 mips_set_tune (mips_parse_cpu (mips_tune_string));
5134
5135 if (mips_tune_info == 0)
5136 mips_set_tune (mips_arch_info);
5137
5138 /* Set cost structure for the processor. */
5139 if (optimize_size)
5140 mips_cost = &mips_rtx_cost_optimize_size;
5141 else
5142 mips_cost = &mips_rtx_cost_data[mips_tune];
5143
5144 /* If the user hasn't specified a branch cost, use the processor's
5145 default. */
5146 if (mips_branch_cost == 0)
5147 mips_branch_cost = mips_cost->branch_cost;
5148
5149 if ((target_flags_explicit & MASK_64BIT) != 0)
5150 {
5151 /* The user specified the size of the integer registers. Make sure
5152 it agrees with the ABI and ISA. */
5153 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5154 error ("-mgp64 used with a 32-bit processor");
5155 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5156 error ("-mgp32 used with a 64-bit ABI");
5157 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5158 error ("-mgp64 used with a 32-bit ABI");
5159 }
5160 else
5161 {
5162 /* Infer the integer register size from the ABI and processor.
5163 Restrict ourselves to 32-bit registers if that's all the
5164 processor has, or if the ABI cannot handle 64-bit registers. */
5165 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5166 target_flags &= ~MASK_64BIT;
5167 else
5168 target_flags |= MASK_64BIT;
5169 }
5170
5171 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5172 {
5173 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5174 only one right answer here. */
5175 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5176 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5177 else if (!TARGET_64BIT && TARGET_FLOAT64
5178 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5179 error ("-mgp32 and -mfp64 can only be combined if the target"
5180 " supports the mfhc1 and mthc1 instructions");
5181 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5182 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5183 }
5184 else
5185 {
5186 /* -msingle-float selects 32-bit float registers. Otherwise the
5187 float registers should be the same size as the integer ones. */
5188 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5189 target_flags |= MASK_FLOAT64;
5190 else
5191 target_flags &= ~MASK_FLOAT64;
5192 }
5193
5194 /* End of code shared with GAS. */
5195
5196 if ((target_flags_explicit & MASK_LONG64) == 0)
5197 {
5198 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5199 target_flags |= MASK_LONG64;
5200 else
5201 target_flags &= ~MASK_LONG64;
5202 }
5203
5204 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5205 && (target_flags_explicit & MASK_SOFT_FLOAT_ABI) == 0)
5206 {
5207 /* For some configurations, it is useful to have -march control
5208 the default setting of MASK_SOFT_FLOAT_ABI. */
5209 switch ((int) mips_arch)
5210 {
5211 case PROCESSOR_R4100:
5212 case PROCESSOR_R4111:
5213 case PROCESSOR_R4120:
5214 case PROCESSOR_R4130:
5215 target_flags |= MASK_SOFT_FLOAT_ABI;
5216 break;
5217
5218 default:
5219 target_flags &= ~MASK_SOFT_FLOAT_ABI;
5220 break;
5221 }
5222 }
5223
5224 if (!TARGET_OLDABI)
5225 flag_pcc_struct_return = 0;
5226
5227 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5228 {
5229 /* If neither -mbranch-likely nor -mno-branch-likely was given
5230 on the command line, set MASK_BRANCHLIKELY based on the target
5231 architecture.
5232
5233 By default, we enable use of Branch Likely instructions on
5234 all architectures which support them with the following
5235 exceptions: when creating MIPS32 or MIPS64 code, and when
5236 tuning for architectures where their use tends to hurt
5237 performance.
5238
5239 The MIPS32 and MIPS64 architecture specifications say "Software
5240 is strongly encouraged to avoid use of Branch Likely
5241 instructions, as they will be removed from a future revision
5242 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5243 issue those instructions unless instructed to do so by
5244 -mbranch-likely. */
5245 if (ISA_HAS_BRANCHLIKELY
5246 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5247 && !(TUNE_MIPS5500 || TUNE_SB1))
5248 target_flags |= MASK_BRANCHLIKELY;
5249 else
5250 target_flags &= ~MASK_BRANCHLIKELY;
5251 }
5252 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5253 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5254
5255 /* The effect of -mabicalls isn't defined for the EABI. */
5256 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5257 {
5258 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5259 target_flags &= ~MASK_ABICALLS;
5260 }
5261
5262 if (TARGET_ABICALLS)
5263 {
5264 /* We need to set flag_pic for executables as well as DSOs
5265 because we may reference symbols that are not defined in
5266 the final executable. (MIPS does not use things like
5267 copy relocs, for example.)
5268
5269 Also, there is a body of code that uses __PIC__ to distinguish
5270 between -mabicalls and -mno-abicalls code. */
5271 flag_pic = 1;
5272 if (mips_section_threshold > 0)
5273 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
5274 }
5275
5276 if (TARGET_VXWORKS_RTP && mips_section_threshold > 0)
5277 warning (0, "-G and -mrtp are incompatible");
5278
5279 /* mips_split_addresses is a half-way house between explicit
5280 relocations and the traditional assembler macros. It can
5281 split absolute 32-bit symbolic constants into a high/lo_sum
5282 pair but uses macros for other sorts of access.
5283
5284 Like explicit relocation support for REL targets, it relies
5285 on GNU extensions in the assembler and the linker.
5286
5287 Although this code should work for -O0, it has traditionally
5288 been treated as an optimization. */
5289 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5290 && optimize && !flag_pic
5291 && !ABI_HAS_64BIT_SYMBOLS)
5292 mips_split_addresses = 1;
5293 else
5294 mips_split_addresses = 0;
5295
5296 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5297 faster code, but at the expense of more nops. Enable it at -O3 and
5298 above. */
5299 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5300 target_flags |= MASK_VR4130_ALIGN;
5301
5302 if (TARGET_MIPS16)
5303 {
5304 /* Don't run the scheduler before reload, since it tends to
5305 increase register pressure. */
5306 flag_schedule_insns = 0;
5307
5308 /* Don't do hot/cold partitioning. The constant layout code expects
5309 the whole function to be in a single section. */
5310 flag_reorder_blocks_and_partition = 0;
5311
5312 /* Silently disable -mexplicit-relocs since it doesn't apply
5313 to mips16 code. Even so, it would overly pedantic to warn
5314 about "-mips16 -mexplicit-relocs", especially given that
5315 we use a %gprel() operator. */
5316 target_flags &= ~MASK_EXPLICIT_RELOCS;
5317 }
5318
5319 /* When using explicit relocs, we call dbr_schedule from within
5320 mips_reorg. */
5321 if (TARGET_EXPLICIT_RELOCS)
5322 {
5323 mips_flag_delayed_branch = flag_delayed_branch;
5324 flag_delayed_branch = 0;
5325 }
5326
5327 #ifdef MIPS_TFMODE_FORMAT
5328 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5329 #endif
5330
5331 /* Make sure that the user didn't turn off paired single support when
5332 MIPS-3D support is requested. */
5333 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5334 && !TARGET_PAIRED_SINGLE_FLOAT)
5335 error ("-mips3d requires -mpaired-single");
5336
5337 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5338 if (TARGET_MIPS3D)
5339 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5340
5341 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5342 and TARGET_HARD_FLOAT are both true. */
5343 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5344 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5345
5346 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5347 enabled. */
5348 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5349 error ("-mips3d/-mpaired-single must be used with -mips64");
5350
5351 /* If TARGET_DSPR2, enable MASK_DSP. */
5352 if (TARGET_DSPR2)
5353 target_flags |= MASK_DSP;
5354
5355 if (TARGET_MIPS16 && TARGET_DSP)
5356 error ("-mips16 and -mdsp cannot be used together");
5357
5358 mips_print_operand_punct['?'] = 1;
5359 mips_print_operand_punct['#'] = 1;
5360 mips_print_operand_punct['/'] = 1;
5361 mips_print_operand_punct['&'] = 1;
5362 mips_print_operand_punct['!'] = 1;
5363 mips_print_operand_punct['*'] = 1;
5364 mips_print_operand_punct['@'] = 1;
5365 mips_print_operand_punct['.'] = 1;
5366 mips_print_operand_punct['('] = 1;
5367 mips_print_operand_punct[')'] = 1;
5368 mips_print_operand_punct['['] = 1;
5369 mips_print_operand_punct[']'] = 1;
5370 mips_print_operand_punct['<'] = 1;
5371 mips_print_operand_punct['>'] = 1;
5372 mips_print_operand_punct['{'] = 1;
5373 mips_print_operand_punct['}'] = 1;
5374 mips_print_operand_punct['^'] = 1;
5375 mips_print_operand_punct['$'] = 1;
5376 mips_print_operand_punct['+'] = 1;
5377 mips_print_operand_punct['~'] = 1;
5378
5379 /* Set up array to map GCC register number to debug register number.
5380 Ignore the special purpose register numbers. */
5381
5382 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5383 {
5384 mips_dbx_regno[i] = INVALID_REGNUM;
5385 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
5386 mips_dwarf_regno[i] = i;
5387 else
5388 mips_dwarf_regno[i] = INVALID_REGNUM;
5389 }
5390
5391 start = GP_DBX_FIRST - GP_REG_FIRST;
5392 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5393 mips_dbx_regno[i] = i + start;
5394
5395 start = FP_DBX_FIRST - FP_REG_FIRST;
5396 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5397 mips_dbx_regno[i] = i + start;
5398
5399 /* HI and LO debug registers use big-endian ordering. */
5400 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5401 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5402 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
5403 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
5404 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
5405 {
5406 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
5407 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
5408 }
5409
5410 /* Set up array giving whether a given register can hold a given mode. */
5411
5412 for (mode = VOIDmode;
5413 mode != MAX_MACHINE_MODE;
5414 mode = (enum machine_mode) ((int)mode + 1))
5415 {
5416 register int size = GET_MODE_SIZE (mode);
5417 register enum mode_class class = GET_MODE_CLASS (mode);
5418
5419 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5420 {
5421 register int temp;
5422
5423 if (mode == CCV2mode)
5424 temp = (ISA_HAS_8CC
5425 && ST_REG_P (regno)
5426 && (regno - ST_REG_FIRST) % 2 == 0);
5427
5428 else if (mode == CCV4mode)
5429 temp = (ISA_HAS_8CC
5430 && ST_REG_P (regno)
5431 && (regno - ST_REG_FIRST) % 4 == 0);
5432
5433 else if (mode == CCmode)
5434 {
5435 if (! ISA_HAS_8CC)
5436 temp = (regno == FPSW_REGNUM);
5437 else
5438 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5439 || FP_REG_P (regno));
5440 }
5441
5442 else if (GP_REG_P (regno))
5443 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5444
5445 else if (FP_REG_P (regno))
5446 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
5447 || (MIN_FPRS_PER_FMT == 1
5448 && size <= UNITS_PER_FPREG))
5449 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5450 || class == MODE_VECTOR_FLOAT)
5451 && size <= UNITS_PER_FPVALUE)
5452 /* Allow integer modes that fit into a single
5453 register. We need to put integers into FPRs
5454 when using instructions like cvt and trunc.
5455 We can't allow sizes smaller than a word,
5456 the FPU has no appropriate load/store
5457 instructions for those. */
5458 || (class == MODE_INT
5459 && size >= MIN_UNITS_PER_WORD
5460 && size <= UNITS_PER_FPREG)
5461 /* Allow TFmode for CCmode reloads. */
5462 || (ISA_HAS_8CC && mode == TFmode)));
5463
5464 else if (ACC_REG_P (regno))
5465 temp = (INTEGRAL_MODE_P (mode)
5466 && size <= UNITS_PER_WORD * 2
5467 && (size <= UNITS_PER_WORD
5468 || regno == MD_REG_FIRST
5469 || (DSP_ACC_REG_P (regno)
5470 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
5471
5472 else if (ALL_COP_REG_P (regno))
5473 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5474 else
5475 temp = 0;
5476
5477 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5478 }
5479 }
5480
5481 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5482 initialized yet, so we can't use that here. */
5483 gpr_mode = TARGET_64BIT ? DImode : SImode;
5484
5485 /* Provide default values for align_* for 64-bit targets. */
5486 if (TARGET_64BIT && !TARGET_MIPS16)
5487 {
5488 if (align_loops == 0)
5489 align_loops = 8;
5490 if (align_jumps == 0)
5491 align_jumps = 8;
5492 if (align_functions == 0)
5493 align_functions = 8;
5494 }
5495
5496 /* Function to allocate machine-dependent function status. */
5497 init_machine_status = &mips_init_machine_status;
5498
5499 if (ABI_HAS_64BIT_SYMBOLS)
5500 {
5501 if (TARGET_EXPLICIT_RELOCS)
5502 {
5503 mips_split_p[SYMBOL_64_HIGH] = true;
5504 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5505 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5506
5507 mips_split_p[SYMBOL_64_MID] = true;
5508 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5509 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5510
5511 mips_split_p[SYMBOL_64_LOW] = true;
5512 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5513 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5514
5515 mips_split_p[SYMBOL_ABSOLUTE] = true;
5516 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5517 }
5518 }
5519 else
5520 {
5521 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5522 {
5523 mips_split_p[SYMBOL_ABSOLUTE] = true;
5524 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5525 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5526
5527 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5528 }
5529 }
5530
5531 if (TARGET_MIPS16)
5532 {
5533 /* The high part is provided by a pseudo copy of $gp. */
5534 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5535 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5536 }
5537
5538 if (TARGET_EXPLICIT_RELOCS)
5539 {
5540 /* Small data constants are kept whole until after reload,
5541 then lowered by mips_rewrite_small_data. */
5542 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5543
5544 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5545 if (TARGET_NEWABI)
5546 {
5547 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5548 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5549 }
5550 else
5551 {
5552 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5553 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5554 }
5555
5556 if (TARGET_XGOT)
5557 {
5558 /* The HIGH and LO_SUM are matched by special .md patterns. */
5559 mips_split_p[SYMBOL_GOT_DISP] = true;
5560
5561 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5562 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5563 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5564
5565 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5566 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5567 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5568 }
5569 else
5570 {
5571 if (TARGET_NEWABI)
5572 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5573 else
5574 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5575 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5576 }
5577 }
5578
5579 if (TARGET_NEWABI)
5580 {
5581 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5582 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5583 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5584 }
5585
5586 /* Thread-local relocation operators. */
5587 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5588 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5589 mips_split_p[SYMBOL_DTPREL] = 1;
5590 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5591 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5592 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5593 mips_split_p[SYMBOL_TPREL] = 1;
5594 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5595 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5596
5597 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5598
5599 /* We don't have a thread pointer access instruction on MIPS16, or
5600 appropriate TLS relocations. */
5601 if (TARGET_MIPS16)
5602 targetm.have_tls = false;
5603
5604 /* Default to working around R4000 errata only if the processor
5605 was selected explicitly. */
5606 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5607 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5608 target_flags |= MASK_FIX_R4000;
5609
5610 /* Default to working around R4400 errata only if the processor
5611 was selected explicitly. */
5612 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5613 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5614 target_flags |= MASK_FIX_R4400;
5615 }
5616
5617 /* Swap the register information for registers I and I + 1, which
5618 currently have the wrong endianness. Note that the registers'
5619 fixedness and call-clobberedness might have been set on the
5620 command line. */
5621
5622 static void
5623 mips_swap_registers (unsigned int i)
5624 {
5625 int tmpi;
5626 const char *tmps;
5627
5628 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
5629 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
5630
5631 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
5632 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
5633 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
5634 SWAP_STRING (reg_names[i], reg_names[i + 1]);
5635
5636 #undef SWAP_STRING
5637 #undef SWAP_INT
5638 }
5639
5640 /* Implement CONDITIONAL_REGISTER_USAGE. */
5641
5642 void
5643 mips_conditional_register_usage (void)
5644 {
5645 if (!TARGET_DSP)
5646 {
5647 int regno;
5648
5649 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5650 fixed_regs[regno] = call_used_regs[regno] = 1;
5651 }
5652 if (!TARGET_HARD_FLOAT)
5653 {
5654 int regno;
5655
5656 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5657 fixed_regs[regno] = call_used_regs[regno] = 1;
5658 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5659 fixed_regs[regno] = call_used_regs[regno] = 1;
5660 }
5661 else if (! ISA_HAS_8CC)
5662 {
5663 int regno;
5664
5665 /* We only have a single condition code register. We
5666 implement this by hiding all the condition code registers,
5667 and generating RTL that refers directly to ST_REG_FIRST. */
5668 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5669 fixed_regs[regno] = call_used_regs[regno] = 1;
5670 }
5671 /* In mips16 mode, we permit the $t temporary registers to be used
5672 for reload. We prohibit the unused $s registers, since they
5673 are caller saved, and saving them via a mips16 register would
5674 probably waste more time than just reloading the value. */
5675 if (TARGET_MIPS16)
5676 {
5677 fixed_regs[18] = call_used_regs[18] = 1;
5678 fixed_regs[19] = call_used_regs[19] = 1;
5679 fixed_regs[20] = call_used_regs[20] = 1;
5680 fixed_regs[21] = call_used_regs[21] = 1;
5681 fixed_regs[22] = call_used_regs[22] = 1;
5682 fixed_regs[23] = call_used_regs[23] = 1;
5683 fixed_regs[26] = call_used_regs[26] = 1;
5684 fixed_regs[27] = call_used_regs[27] = 1;
5685 fixed_regs[30] = call_used_regs[30] = 1;
5686 }
5687 /* fp20-23 are now caller saved. */
5688 if (mips_abi == ABI_64)
5689 {
5690 int regno;
5691 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5692 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5693 }
5694 /* Odd registers from fp21 to fp31 are now caller saved. */
5695 if (mips_abi == ABI_N32)
5696 {
5697 int regno;
5698 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5699 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5700 }
5701 /* Make sure that double-register accumulator values are correctly
5702 ordered for the current endianness. */
5703 if (TARGET_LITTLE_ENDIAN)
5704 {
5705 int regno;
5706 mips_swap_registers (MD_REG_FIRST);
5707 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
5708 mips_swap_registers (regno);
5709 }
5710 }
5711
5712 /* Allocate a chunk of memory for per-function machine-dependent data. */
5713 static struct machine_function *
5714 mips_init_machine_status (void)
5715 {
5716 return ((struct machine_function *)
5717 ggc_alloc_cleared (sizeof (struct machine_function)));
5718 }
5719
5720 /* On the mips16, we want to allocate $24 (T_REG) before other
5721 registers for instructions for which it is possible. This helps
5722 avoid shuffling registers around in order to set up for an xor,
5723 encouraging the compiler to use a cmp instead. */
5724
5725 void
5726 mips_order_regs_for_local_alloc (void)
5727 {
5728 register int i;
5729
5730 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5731 reg_alloc_order[i] = i;
5732
5733 if (TARGET_MIPS16)
5734 {
5735 /* It really doesn't matter where we put register 0, since it is
5736 a fixed register anyhow. */
5737 reg_alloc_order[0] = 24;
5738 reg_alloc_order[24] = 0;
5739 }
5740 }
5741
5742 \f
5743 /* The MIPS debug format wants all automatic variables and arguments
5744 to be in terms of the virtual frame pointer (stack pointer before
5745 any adjustment in the function), while the MIPS 3.0 linker wants
5746 the frame pointer to be the stack pointer after the initial
5747 adjustment. So, we do the adjustment here. The arg pointer (which
5748 is eliminated) points to the virtual frame pointer, while the frame
5749 pointer (which may be eliminated) points to the stack pointer after
5750 the initial adjustments. */
5751
5752 HOST_WIDE_INT
5753 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5754 {
5755 rtx offset2 = const0_rtx;
5756 rtx reg = eliminate_constant_term (addr, &offset2);
5757
5758 if (offset == 0)
5759 offset = INTVAL (offset2);
5760
5761 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5762 || reg == hard_frame_pointer_rtx)
5763 {
5764 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5765 ? compute_frame_size (get_frame_size ())
5766 : cfun->machine->frame.total_size;
5767
5768 /* MIPS16 frame is smaller */
5769 if (frame_pointer_needed && TARGET_MIPS16)
5770 frame_size -= cfun->machine->frame.args_size;
5771
5772 offset = offset - frame_size;
5773 }
5774
5775 /* sdbout_parms does not want this to crash for unrecognized cases. */
5776 #if 0
5777 else if (reg != arg_pointer_rtx)
5778 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5779 addr);
5780 #endif
5781
5782 return offset;
5783 }
5784 \f
5785 /* If OP is an UNSPEC address, return the address to which it refers,
5786 otherwise return OP itself. */
5787
5788 static rtx
5789 mips_strip_unspec_address (rtx op)
5790 {
5791 rtx base, offset;
5792
5793 split_const (op, &base, &offset);
5794 if (UNSPEC_ADDRESS_P (base))
5795 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
5796 return op;
5797 }
5798
5799 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5800
5801 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5802 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5803 'h' OP is HIGH, prints %hi(X),
5804 'd' output integer constant in decimal,
5805 'z' if the operand is 0, use $0 instead of normal operand.
5806 'D' print second part of double-word register or memory operand.
5807 'L' print low-order register of double-word register operand.
5808 'M' print high-order register of double-word register operand.
5809 'C' print part of opcode for a branch condition.
5810 'F' print part of opcode for a floating-point branch condition.
5811 'N' print part of opcode for a branch condition, inverted.
5812 'W' print part of opcode for a floating-point branch condition, inverted.
5813 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5814 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5815 't' like 'T', but with the EQ/NE cases reversed
5816 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5817 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5818 'R' print the reloc associated with LO_SUM
5819 'q' print DSP accumulator registers
5820
5821 The punctuation characters are:
5822
5823 '(' Turn on .set noreorder
5824 ')' Turn on .set reorder
5825 '[' Turn on .set noat
5826 ']' Turn on .set at
5827 '<' Turn on .set nomacro
5828 '>' Turn on .set macro
5829 '{' Turn on .set volatile (not GAS)
5830 '}' Turn on .set novolatile (not GAS)
5831 '&' Turn on .set noreorder if filling delay slots
5832 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5833 '!' Turn on .set nomacro if filling delay slots
5834 '#' Print nop if in a .set noreorder section.
5835 '/' Like '#', but does nothing within a delayed branch sequence
5836 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5837 '@' Print the name of the assembler temporary register (at or $1).
5838 '.' Print the name of the register with a hard-wired zero (zero or $0).
5839 '^' Print the name of the pic call-through register (t9 or $25).
5840 '$' Print the name of the stack pointer register (sp or $29).
5841 '+' Print the name of the gp register (usually gp or $28).
5842 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5843
5844 void
5845 print_operand (FILE *file, rtx op, int letter)
5846 {
5847 register enum rtx_code code;
5848
5849 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5850 {
5851 switch (letter)
5852 {
5853 case '?':
5854 if (mips_branch_likely)
5855 putc ('l', file);
5856 break;
5857
5858 case '@':
5859 fputs (reg_names [GP_REG_FIRST + 1], file);
5860 break;
5861
5862 case '^':
5863 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5864 break;
5865
5866 case '.':
5867 fputs (reg_names [GP_REG_FIRST + 0], file);
5868 break;
5869
5870 case '$':
5871 fputs (reg_names[STACK_POINTER_REGNUM], file);
5872 break;
5873
5874 case '+':
5875 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5876 break;
5877
5878 case '&':
5879 if (final_sequence != 0 && set_noreorder++ == 0)
5880 fputs (".set\tnoreorder\n\t", file);
5881 break;
5882
5883 case '*':
5884 if (final_sequence != 0)
5885 {
5886 if (set_noreorder++ == 0)
5887 fputs (".set\tnoreorder\n\t", file);
5888
5889 if (set_nomacro++ == 0)
5890 fputs (".set\tnomacro\n\t", file);
5891 }
5892 break;
5893
5894 case '!':
5895 if (final_sequence != 0 && set_nomacro++ == 0)
5896 fputs ("\n\t.set\tnomacro", file);
5897 break;
5898
5899 case '#':
5900 if (set_noreorder != 0)
5901 fputs ("\n\tnop", file);
5902 break;
5903
5904 case '/':
5905 /* Print an extra newline so that the delayed insn is separated
5906 from the following ones. This looks neater and is consistent
5907 with non-nop delayed sequences. */
5908 if (set_noreorder != 0 && final_sequence == 0)
5909 fputs ("\n\tnop\n", file);
5910 break;
5911
5912 case '(':
5913 if (set_noreorder++ == 0)
5914 fputs (".set\tnoreorder\n\t", file);
5915 break;
5916
5917 case ')':
5918 if (set_noreorder == 0)
5919 error ("internal error: %%) found without a %%( in assembler pattern");
5920
5921 else if (--set_noreorder == 0)
5922 fputs ("\n\t.set\treorder", file);
5923
5924 break;
5925
5926 case '[':
5927 if (set_noat++ == 0)
5928 fputs (".set\tnoat\n\t", file);
5929 break;
5930
5931 case ']':
5932 if (set_noat == 0)
5933 error ("internal error: %%] found without a %%[ in assembler pattern");
5934 else if (--set_noat == 0)
5935 fputs ("\n\t.set\tat", file);
5936
5937 break;
5938
5939 case '<':
5940 if (set_nomacro++ == 0)
5941 fputs (".set\tnomacro\n\t", file);
5942 break;
5943
5944 case '>':
5945 if (set_nomacro == 0)
5946 error ("internal error: %%> found without a %%< in assembler pattern");
5947 else if (--set_nomacro == 0)
5948 fputs ("\n\t.set\tmacro", file);
5949
5950 break;
5951
5952 case '{':
5953 if (set_volatile++ == 0)
5954 fputs ("#.set\tvolatile\n\t", file);
5955 break;
5956
5957 case '}':
5958 if (set_volatile == 0)
5959 error ("internal error: %%} found without a %%{ in assembler pattern");
5960 else if (--set_volatile == 0)
5961 fputs ("\n\t#.set\tnovolatile", file);
5962
5963 break;
5964
5965 case '~':
5966 {
5967 if (align_labels_log > 0)
5968 ASM_OUTPUT_ALIGN (file, align_labels_log);
5969 }
5970 break;
5971
5972 default:
5973 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5974 break;
5975 }
5976
5977 return;
5978 }
5979
5980 if (! op)
5981 {
5982 error ("PRINT_OPERAND null pointer");
5983 return;
5984 }
5985
5986 code = GET_CODE (op);
5987
5988 if (letter == 'C')
5989 switch (code)
5990 {
5991 case EQ: fputs ("eq", file); break;
5992 case NE: fputs ("ne", file); break;
5993 case GT: fputs ("gt", file); break;
5994 case GE: fputs ("ge", file); break;
5995 case LT: fputs ("lt", file); break;
5996 case LE: fputs ("le", file); break;
5997 case GTU: fputs ("gtu", file); break;
5998 case GEU: fputs ("geu", file); break;
5999 case LTU: fputs ("ltu", file); break;
6000 case LEU: fputs ("leu", file); break;
6001 default:
6002 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6003 }
6004
6005 else if (letter == 'N')
6006 switch (code)
6007 {
6008 case EQ: fputs ("ne", file); break;
6009 case NE: fputs ("eq", file); break;
6010 case GT: fputs ("le", file); break;
6011 case GE: fputs ("lt", file); break;
6012 case LT: fputs ("ge", file); break;
6013 case LE: fputs ("gt", file); break;
6014 case GTU: fputs ("leu", file); break;
6015 case GEU: fputs ("ltu", file); break;
6016 case LTU: fputs ("geu", file); break;
6017 case LEU: fputs ("gtu", file); break;
6018 default:
6019 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6020 }
6021
6022 else if (letter == 'F')
6023 switch (code)
6024 {
6025 case EQ: fputs ("c1f", file); break;
6026 case NE: fputs ("c1t", file); break;
6027 default:
6028 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6029 }
6030
6031 else if (letter == 'W')
6032 switch (code)
6033 {
6034 case EQ: fputs ("c1t", file); break;
6035 case NE: fputs ("c1f", file); break;
6036 default:
6037 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6038 }
6039
6040 else if (letter == 'h')
6041 {
6042 if (GET_CODE (op) == HIGH)
6043 op = XEXP (op, 0);
6044
6045 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6046 }
6047
6048 else if (letter == 'R')
6049 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6050
6051 else if (letter == 'Y')
6052 {
6053 if (GET_CODE (op) == CONST_INT
6054 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6055 < ARRAY_SIZE (mips_fp_conditions)))
6056 fputs (mips_fp_conditions[INTVAL (op)], file);
6057 else
6058 output_operand_lossage ("invalid %%Y value");
6059 }
6060
6061 else if (letter == 'Z')
6062 {
6063 if (ISA_HAS_8CC)
6064 {
6065 print_operand (file, op, 0);
6066 fputc (',', file);
6067 }
6068 }
6069
6070 else if (letter == 'q')
6071 {
6072 int regnum;
6073
6074 if (code != REG)
6075 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6076
6077 regnum = REGNO (op);
6078 if (MD_REG_P (regnum))
6079 fprintf (file, "$ac0");
6080 else if (DSP_ACC_REG_P (regnum))
6081 fprintf (file, "$ac%c", reg_names[regnum][3]);
6082 else
6083 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6084 }
6085
6086 else if (code == REG || code == SUBREG)
6087 {
6088 register int regnum;
6089
6090 if (code == REG)
6091 regnum = REGNO (op);
6092 else
6093 regnum = true_regnum (op);
6094
6095 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6096 || (letter == 'L' && WORDS_BIG_ENDIAN)
6097 || letter == 'D')
6098 regnum++;
6099
6100 fprintf (file, "%s", reg_names[regnum]);
6101 }
6102
6103 else if (code == MEM)
6104 {
6105 if (letter == 'D')
6106 output_address (plus_constant (XEXP (op, 0), 4));
6107 else
6108 output_address (XEXP (op, 0));
6109 }
6110
6111 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6112 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6113
6114 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6115 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6116
6117 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6118 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6119
6120 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6121 fputs (reg_names[GP_REG_FIRST], file);
6122
6123 else if (letter == 'd' || letter == 'x' || letter == 'X')
6124 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6125
6126 else if (letter == 'T' || letter == 't')
6127 {
6128 int truth = (code == NE) == (letter == 'T');
6129 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6130 }
6131
6132 else if (CONST_GP_P (op))
6133 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6134
6135 else
6136 output_addr_const (file, mips_strip_unspec_address (op));
6137 }
6138
6139
6140 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6141 in context CONTEXT. RELOCS is the array of relocations to use. */
6142
6143 static void
6144 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6145 const char **relocs)
6146 {
6147 enum mips_symbol_type symbol_type;
6148 const char *p;
6149
6150 if (!mips_symbolic_constant_p (op, context, &symbol_type)
6151 || relocs[symbol_type] == 0)
6152 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6153
6154 fputs (relocs[symbol_type], file);
6155 output_addr_const (file, mips_strip_unspec_address (op));
6156 for (p = relocs[symbol_type]; *p != 0; p++)
6157 if (*p == '(')
6158 fputc (')', file);
6159 }
6160 \f
6161 /* Output address operand X to FILE. */
6162
6163 void
6164 print_operand_address (FILE *file, rtx x)
6165 {
6166 struct mips_address_info addr;
6167
6168 if (mips_classify_address (&addr, x, word_mode, true))
6169 switch (addr.type)
6170 {
6171 case ADDRESS_REG:
6172 print_operand (file, addr.offset, 0);
6173 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6174 return;
6175
6176 case ADDRESS_LO_SUM:
6177 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6178 mips_lo_relocs);
6179 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6180 return;
6181
6182 case ADDRESS_CONST_INT:
6183 output_addr_const (file, x);
6184 fprintf (file, "(%s)", reg_names[0]);
6185 return;
6186
6187 case ADDRESS_SYMBOLIC:
6188 output_addr_const (file, mips_strip_unspec_address (x));
6189 return;
6190 }
6191 gcc_unreachable ();
6192 }
6193 \f
6194 /* When using assembler macros, keep track of all of small-data externs
6195 so that mips_file_end can emit the appropriate declarations for them.
6196
6197 In most cases it would be safe (though pointless) to emit .externs
6198 for other symbols too. One exception is when an object is within
6199 the -G limit but declared by the user to be in a section other
6200 than .sbss or .sdata. */
6201
6202 void
6203 mips_output_external (FILE *file, tree decl, const char *name)
6204 {
6205 default_elf_asm_output_external (file, decl, name);
6206
6207 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6208 set in order to avoid putting out names that are never really
6209 used. */
6210 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6211 {
6212 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6213 {
6214 fputs ("\t.extern\t", file);
6215 assemble_name (file, name);
6216 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6217 int_size_in_bytes (TREE_TYPE (decl)));
6218 }
6219 else if (TARGET_IRIX
6220 && mips_abi == ABI_32
6221 && TREE_CODE (decl) == FUNCTION_DECL)
6222 {
6223 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6224 `.global name .text' directive for every used but
6225 undefined function. If we don't, the linker may perform
6226 an optimization (skipping over the insns that set $gp)
6227 when it is unsafe. */
6228 fputs ("\t.globl ", file);
6229 assemble_name (file, name);
6230 fputs (" .text\n", file);
6231 }
6232 }
6233 }
6234 \f
6235 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6236 put out a MIPS ECOFF file and a stab. */
6237
6238 void
6239 mips_output_filename (FILE *stream, const char *name)
6240 {
6241
6242 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6243 directives. */
6244 if (write_symbols == DWARF2_DEBUG)
6245 return;
6246 else if (mips_output_filename_first_time)
6247 {
6248 mips_output_filename_first_time = 0;
6249 num_source_filenames += 1;
6250 current_function_file = name;
6251 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6252 output_quoted_string (stream, name);
6253 putc ('\n', stream);
6254 }
6255
6256 /* If we are emitting stabs, let dbxout.c handle this (except for
6257 the mips_output_filename_first_time case). */
6258 else if (write_symbols == DBX_DEBUG)
6259 return;
6260
6261 else if (name != current_function_file
6262 && strcmp (name, current_function_file) != 0)
6263 {
6264 num_source_filenames += 1;
6265 current_function_file = name;
6266 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6267 output_quoted_string (stream, name);
6268 putc ('\n', stream);
6269 }
6270 }
6271 \f
6272 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6273 that should be written before the opening quote, such as "\t.ascii\t"
6274 for real string data or "\t# " for a comment. */
6275
6276 void
6277 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6278 const char *prefix)
6279 {
6280 size_t i;
6281 int cur_pos = 17;
6282 register const unsigned char *string =
6283 (const unsigned char *)string_param;
6284
6285 fprintf (stream, "%s\"", prefix);
6286 for (i = 0; i < len; i++)
6287 {
6288 register int c = string[i];
6289
6290 if (ISPRINT (c))
6291 {
6292 if (c == '\\' || c == '\"')
6293 {
6294 putc ('\\', stream);
6295 cur_pos++;
6296 }
6297 putc (c, stream);
6298 cur_pos++;
6299 }
6300 else
6301 {
6302 fprintf (stream, "\\%03o", c);
6303 cur_pos += 4;
6304 }
6305
6306 if (cur_pos > 72 && i+1 < len)
6307 {
6308 cur_pos = 17;
6309 fprintf (stream, "\"\n%s\"", prefix);
6310 }
6311 }
6312 fprintf (stream, "\"\n");
6313 }
6314 \f
6315 /* Implement TARGET_ASM_FILE_START. */
6316
6317 static void
6318 mips_file_start (void)
6319 {
6320 default_file_start ();
6321
6322 if (!TARGET_IRIX)
6323 {
6324 /* Generate a special section to describe the ABI switches used to
6325 produce the resultant binary. This used to be done by the assembler
6326 setting bits in the ELF header's flags field, but we have run out of
6327 bits. GDB needs this information in order to be able to correctly
6328 debug these binaries. See the function mips_gdbarch_init() in
6329 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6330 causes unnecessary IRIX 6 ld warnings. */
6331 const char * abi_string = NULL;
6332
6333 switch (mips_abi)
6334 {
6335 case ABI_32: abi_string = "abi32"; break;
6336 case ABI_N32: abi_string = "abiN32"; break;
6337 case ABI_64: abi_string = "abi64"; break;
6338 case ABI_O64: abi_string = "abiO64"; break;
6339 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6340 default:
6341 gcc_unreachable ();
6342 }
6343 /* Note - we use fprintf directly rather than calling switch_to_section
6344 because in this way we can avoid creating an allocated section. We
6345 do not want this section to take up any space in the running
6346 executable. */
6347 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6348
6349 /* There is no ELF header flag to distinguish long32 forms of the
6350 EABI from long64 forms. Emit a special section to help tools
6351 such as GDB. Do the same for o64, which is sometimes used with
6352 -mlong64. */
6353 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6354 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6355 TARGET_LONG64 ? 64 : 32);
6356
6357 /* Restore the default section. */
6358 fprintf (asm_out_file, "\t.previous\n");
6359
6360 #ifdef HAVE_AS_GNU_ATTRIBUTE
6361 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6362 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6363 #endif
6364 }
6365
6366 /* Generate the pseudo ops that System V.4 wants. */
6367 if (TARGET_ABICALLS)
6368 fprintf (asm_out_file, "\t.abicalls\n");
6369
6370 if (TARGET_MIPS16)
6371 fprintf (asm_out_file, "\t.set\tmips16\n");
6372
6373 if (flag_verbose_asm)
6374 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6375 ASM_COMMENT_START,
6376 mips_section_threshold, mips_arch_info->name, mips_isa);
6377 }
6378
6379 #ifdef BSS_SECTION_ASM_OP
6380 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6381 in the use of sbss. */
6382
6383 void
6384 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6385 unsigned HOST_WIDE_INT size, int align)
6386 {
6387 extern tree last_assemble_variable_decl;
6388
6389 if (mips_in_small_data_p (decl))
6390 switch_to_section (get_named_section (NULL, ".sbss", 0));
6391 else
6392 switch_to_section (bss_section);
6393 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6394 last_assemble_variable_decl = decl;
6395 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6396 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6397 }
6398 #endif
6399 \f
6400 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6401 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6402
6403 void
6404 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6405 unsigned HOST_WIDE_INT size,
6406 unsigned int align)
6407 {
6408 /* If the target wants uninitialized const declarations in
6409 .rdata then don't put them in .comm. */
6410 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6411 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6412 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6413 {
6414 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6415 targetm.asm_out.globalize_label (stream, name);
6416
6417 switch_to_section (readonly_data_section);
6418 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6419 mips_declare_object (stream, name, "",
6420 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6421 size);
6422 }
6423 else
6424 mips_declare_common_object (stream, name, "\n\t.comm\t",
6425 size, align, true);
6426 }
6427
6428 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6429 NAME is the name of the object and ALIGN is the required alignment
6430 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6431 alignment argument. */
6432
6433 void
6434 mips_declare_common_object (FILE *stream, const char *name,
6435 const char *init_string,
6436 unsigned HOST_WIDE_INT size,
6437 unsigned int align, bool takes_alignment_p)
6438 {
6439 if (!takes_alignment_p)
6440 {
6441 size += (align / BITS_PER_UNIT) - 1;
6442 size -= size % (align / BITS_PER_UNIT);
6443 mips_declare_object (stream, name, init_string,
6444 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6445 }
6446 else
6447 mips_declare_object (stream, name, init_string,
6448 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6449 size, align / BITS_PER_UNIT);
6450 }
6451
6452 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6453 macros, mark the symbol as written so that mips_file_end won't emit an
6454 .extern for it. STREAM is the output file, NAME is the name of the
6455 symbol, INIT_STRING is the string that should be written before the
6456 symbol and FINAL_STRING is the string that should be written after it.
6457 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6458
6459 void
6460 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6461 const char *final_string, ...)
6462 {
6463 va_list ap;
6464
6465 fputs (init_string, stream);
6466 assemble_name (stream, name);
6467 va_start (ap, final_string);
6468 vfprintf (stream, final_string, ap);
6469 va_end (ap);
6470
6471 if (!TARGET_EXPLICIT_RELOCS)
6472 {
6473 tree name_tree = get_identifier (name);
6474 TREE_ASM_WRITTEN (name_tree) = 1;
6475 }
6476 }
6477
6478 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6479 extern int size_directive_output;
6480
6481 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6482 definitions except that it uses mips_declare_object() to emit the label. */
6483
6484 void
6485 mips_declare_object_name (FILE *stream, const char *name,
6486 tree decl ATTRIBUTE_UNUSED)
6487 {
6488 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6489 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6490 #endif
6491
6492 size_directive_output = 0;
6493 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6494 {
6495 HOST_WIDE_INT size;
6496
6497 size_directive_output = 1;
6498 size = int_size_in_bytes (TREE_TYPE (decl));
6499 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6500 }
6501
6502 mips_declare_object (stream, name, "", ":\n");
6503 }
6504
6505 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6506
6507 void
6508 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6509 {
6510 const char *name;
6511
6512 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6513 if (!flag_inhibit_size_directive
6514 && DECL_SIZE (decl) != 0
6515 && !at_end && top_level
6516 && DECL_INITIAL (decl) == error_mark_node
6517 && !size_directive_output)
6518 {
6519 HOST_WIDE_INT size;
6520
6521 size_directive_output = 1;
6522 size = int_size_in_bytes (TREE_TYPE (decl));
6523 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6524 }
6525 }
6526 #endif
6527 \f
6528 /* Return true if X in context CONTEXT is a small data address that can
6529 be rewritten as a LO_SUM. */
6530
6531 static bool
6532 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
6533 {
6534 enum mips_symbol_type symbol_type;
6535
6536 return (TARGET_EXPLICIT_RELOCS
6537 && mips_symbolic_constant_p (x, context, &symbol_type)
6538 && symbol_type == SYMBOL_GP_RELATIVE);
6539 }
6540
6541
6542 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
6543 containing MEM, or null if none. */
6544
6545 static int
6546 mips_small_data_pattern_1 (rtx *loc, void *data)
6547 {
6548 enum mips_symbol_context context;
6549
6550 if (GET_CODE (*loc) == LO_SUM)
6551 return -1;
6552
6553 if (MEM_P (*loc))
6554 {
6555 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
6556 return 1;
6557 return -1;
6558 }
6559
6560 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6561 return mips_rewrite_small_data_p (*loc, context);
6562 }
6563
6564 /* Return true if OP refers to small data symbols directly, not through
6565 a LO_SUM. */
6566
6567 bool
6568 mips_small_data_pattern_p (rtx op)
6569 {
6570 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6571 }
6572 \f
6573 /* A for_each_rtx callback, used by mips_rewrite_small_data.
6574 DATA is the containing MEM, or null if none. */
6575
6576 static int
6577 mips_rewrite_small_data_1 (rtx *loc, void *data)
6578 {
6579 enum mips_symbol_context context;
6580
6581 if (MEM_P (*loc))
6582 {
6583 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
6584 return -1;
6585 }
6586
6587 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6588 if (mips_rewrite_small_data_p (*loc, context))
6589 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6590
6591 if (GET_CODE (*loc) == LO_SUM)
6592 return -1;
6593
6594 return 0;
6595 }
6596
6597 /* If possible, rewrite OP so that it refers to small data using
6598 explicit relocations. */
6599
6600 rtx
6601 mips_rewrite_small_data (rtx op)
6602 {
6603 op = copy_insn (op);
6604 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6605 return op;
6606 }
6607 \f
6608 /* Return true if the current function has an insn that implicitly
6609 refers to $gp. */
6610
6611 static bool
6612 mips_function_has_gp_insn (void)
6613 {
6614 /* Don't bother rechecking if we found one last time. */
6615 if (!cfun->machine->has_gp_insn_p)
6616 {
6617 rtx insn;
6618
6619 push_topmost_sequence ();
6620 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6621 if (INSN_P (insn)
6622 && GET_CODE (PATTERN (insn)) != USE
6623 && GET_CODE (PATTERN (insn)) != CLOBBER
6624 && (get_attr_got (insn) != GOT_UNSET
6625 || small_data_pattern (PATTERN (insn), VOIDmode)))
6626 break;
6627 pop_topmost_sequence ();
6628
6629 cfun->machine->has_gp_insn_p = (insn != 0);
6630 }
6631 return cfun->machine->has_gp_insn_p;
6632 }
6633
6634
6635 /* Return the register that should be used as the global pointer
6636 within this function. Return 0 if the function doesn't need
6637 a global pointer. */
6638
6639 static unsigned int
6640 mips_global_pointer (void)
6641 {
6642 unsigned int regno;
6643
6644 /* $gp is always available unless we're using a GOT. */
6645 if (!TARGET_USE_GOT)
6646 return GLOBAL_POINTER_REGNUM;
6647
6648 /* We must always provide $gp when it is used implicitly. */
6649 if (!TARGET_EXPLICIT_RELOCS)
6650 return GLOBAL_POINTER_REGNUM;
6651
6652 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6653 a valid gp. */
6654 if (current_function_profile)
6655 return GLOBAL_POINTER_REGNUM;
6656
6657 /* If the function has a nonlocal goto, $gp must hold the correct
6658 global pointer for the target function. */
6659 if (current_function_has_nonlocal_goto)
6660 return GLOBAL_POINTER_REGNUM;
6661
6662 /* If the gp is never referenced, there's no need to initialize it.
6663 Note that reload can sometimes introduce constant pool references
6664 into a function that otherwise didn't need them. For example,
6665 suppose we have an instruction like:
6666
6667 (set (reg:DF R1) (float:DF (reg:SI R2)))
6668
6669 If R2 turns out to be constant such as 1, the instruction may have a
6670 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6671 using this constant if R2 doesn't get allocated to a register.
6672
6673 In cases like these, reload will have added the constant to the pool
6674 but no instruction will yet refer to it. */
6675 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
6676 && !current_function_uses_const_pool
6677 && !mips_function_has_gp_insn ())
6678 return 0;
6679
6680 /* We need a global pointer, but perhaps we can use a call-clobbered
6681 register instead of $gp. */
6682 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6683 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6684 if (!df_regs_ever_live_p (regno)
6685 && call_used_regs[regno]
6686 && !fixed_regs[regno]
6687 && regno != PIC_FUNCTION_ADDR_REGNUM)
6688 return regno;
6689
6690 return GLOBAL_POINTER_REGNUM;
6691 }
6692
6693
6694 /* Return true if the function return value MODE will get returned in a
6695 floating-point register. */
6696
6697 static bool
6698 mips_return_mode_in_fpr_p (enum machine_mode mode)
6699 {
6700 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6701 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
6702 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6703 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6704 }
6705
6706 /* Return a two-character string representing a function floating-point
6707 return mode, used to name MIPS16 function stubs. */
6708
6709 static const char *
6710 mips16_call_stub_mode_suffix (enum machine_mode mode)
6711 {
6712 if (mode == SFmode)
6713 return "sf";
6714 else if (mode == DFmode)
6715 return "df";
6716 else if (mode == SCmode)
6717 return "sc";
6718 else if (mode == DCmode)
6719 return "dc";
6720 else if (mode == V2SFmode)
6721 return "df";
6722 else
6723 gcc_unreachable ();
6724 }
6725
6726 /* Return true if the current function returns its value in a floating-point
6727 register in MIPS16 mode. */
6728
6729 static bool
6730 mips16_cfun_returns_in_fpr_p (void)
6731 {
6732 tree return_type = DECL_RESULT (current_function_decl);
6733 return (TARGET_MIPS16
6734 && TARGET_HARD_FLOAT_ABI
6735 && !aggregate_value_p (return_type, current_function_decl)
6736 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
6737 }
6738
6739
6740 /* Return true if the current function must save REGNO. */
6741
6742 static bool
6743 mips_save_reg_p (unsigned int regno)
6744 {
6745 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6746 if we have not chosen a call-clobbered substitute. */
6747 if (regno == GLOBAL_POINTER_REGNUM)
6748 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6749
6750 /* Check call-saved registers. */
6751 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
6752 return true;
6753
6754 /* Save both registers in an FPR pair if either one is used. This is
6755 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6756 register to be used without the even register. */
6757 if (FP_REG_P (regno)
6758 && MAX_FPRS_PER_FMT == 2
6759 && df_regs_ever_live_p (regno + 1)
6760 && !call_used_regs[regno + 1])
6761 return true;
6762
6763 /* We need to save the old frame pointer before setting up a new one. */
6764 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6765 return true;
6766
6767 /* We need to save the incoming return address if it is ever clobbered
6768 within the function. */
6769 if (regno == GP_REG_FIRST + 31 && df_regs_ever_live_p (regno))
6770 return true;
6771
6772 if (TARGET_MIPS16)
6773 {
6774 /* $18 is a special case in mips16 code. It may be used to call
6775 a function which returns a floating point value, but it is
6776 marked in call_used_regs. */
6777 if (regno == GP_REG_FIRST + 18 && df_regs_ever_live_p (regno))
6778 return true;
6779
6780 /* $31 is also a special case. It will be used to copy a return
6781 value into the floating point registers if the return value is
6782 floating point. */
6783 if (regno == GP_REG_FIRST + 31
6784 && mips16_cfun_returns_in_fpr_p ())
6785 return true;
6786 }
6787
6788 return false;
6789 }
6790
6791 /* Return the index of the lowest X in the range [0, SIZE) for which
6792 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
6793
6794 static unsigned int
6795 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
6796 unsigned int size)
6797 {
6798 unsigned int i;
6799
6800 for (i = 0; i < size; i++)
6801 if (BITSET_P (mask, regs[i]))
6802 break;
6803
6804 return i;
6805 }
6806
6807 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
6808 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
6809 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
6810 the same is true for all indexes (X, SIZE). */
6811
6812 static void
6813 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
6814 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
6815 {
6816 unsigned int i;
6817
6818 i = mips16e_find_first_register (*mask_ptr, regs, size);
6819 for (i++; i < size; i++)
6820 if (!BITSET_P (*mask_ptr, regs[i]))
6821 {
6822 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
6823 *mask_ptr |= 1 << regs[i];
6824 }
6825 }
6826
6827 /* Return the bytes needed to compute the frame pointer from the current
6828 stack pointer. SIZE is the size (in bytes) of the local variables.
6829
6830 MIPS stack frames look like:
6831
6832 Before call After call
6833 high +-----------------------+ +-----------------------+
6834 mem. | | | |
6835 | caller's temps. | | caller's temps. |
6836 | | | |
6837 +-----------------------+ +-----------------------+
6838 | | | |
6839 | arguments on stack. | | arguments on stack. |
6840 | | | |
6841 +-----------------------+ +-----------------------+
6842 | 4 words to save | | 4 words to save |
6843 | arguments passed | | arguments passed |
6844 | in registers, even | | in registers, even |
6845 | if not passed. | | if not passed. |
6846 SP->+-----------------------+ VFP->+-----------------------+
6847 (VFP = SP+fp_sp_offset) | |\
6848 | fp register save | | fp_reg_size
6849 | |/
6850 SP+gp_sp_offset->+-----------------------+
6851 /| |\
6852 | | gp register save | | gp_reg_size
6853 gp_reg_rounded | | |/
6854 | +-----------------------+
6855 \| alignment padding |
6856 +-----------------------+
6857 | |\
6858 | local variables | | var_size
6859 | |/
6860 +-----------------------+
6861 | |
6862 | alloca allocations |
6863 | |
6864 +-----------------------+
6865 /| |
6866 cprestore_size | | GP save for V.4 abi |
6867 \| |
6868 +-----------------------+
6869 | |\
6870 | arguments on stack | |
6871 | | |
6872 +-----------------------+ |
6873 | 4 words to save | | args_size
6874 | arguments passed | |
6875 | in registers, even | |
6876 | if not passed. | |
6877 low | (TARGET_OLDABI only) |/
6878 memory SP->+-----------------------+
6879
6880 */
6881
6882 HOST_WIDE_INT
6883 compute_frame_size (HOST_WIDE_INT size)
6884 {
6885 unsigned int regno;
6886 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6887 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6888 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6889 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6890 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6891 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6892 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6893 unsigned int mask; /* mask of saved gp registers */
6894 unsigned int fmask; /* mask of saved fp registers */
6895
6896 cfun->machine->global_pointer = mips_global_pointer ();
6897
6898 gp_reg_size = 0;
6899 fp_reg_size = 0;
6900 mask = 0;
6901 fmask = 0;
6902 var_size = MIPS_STACK_ALIGN (size);
6903 args_size = current_function_outgoing_args_size;
6904 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6905
6906 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6907 functions. If the function has local variables, we're committed
6908 to allocating it anyway. Otherwise reclaim it here. */
6909 if (var_size == 0 && current_function_is_leaf)
6910 cprestore_size = args_size = 0;
6911
6912 /* The MIPS 3.0 linker does not like functions that dynamically
6913 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6914 looks like we are trying to create a second frame pointer to the
6915 function, so allocate some stack space to make it happy. */
6916
6917 if (args_size == 0 && current_function_calls_alloca)
6918 args_size = 4 * UNITS_PER_WORD;
6919
6920 total_size = var_size + args_size + cprestore_size;
6921
6922 /* Calculate space needed for gp registers. */
6923 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6924 if (mips_save_reg_p (regno))
6925 {
6926 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6927 mask |= 1 << (regno - GP_REG_FIRST);
6928 }
6929
6930 /* We need to restore these for the handler. */
6931 if (current_function_calls_eh_return)
6932 {
6933 unsigned int i;
6934 for (i = 0; ; ++i)
6935 {
6936 regno = EH_RETURN_DATA_REGNO (i);
6937 if (regno == INVALID_REGNUM)
6938 break;
6939 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6940 mask |= 1 << (regno - GP_REG_FIRST);
6941 }
6942 }
6943
6944 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
6945 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
6946 save all later registers too. */
6947 if (GENERATE_MIPS16E_SAVE_RESTORE)
6948 {
6949 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
6950 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
6951 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
6952 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
6953 }
6954
6955 /* This loop must iterate over the same space as its companion in
6956 mips_for_each_saved_reg. */
6957 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
6958 regno >= FP_REG_FIRST;
6959 regno -= MAX_FPRS_PER_FMT)
6960 {
6961 if (mips_save_reg_p (regno))
6962 {
6963 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
6964 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
6965 }
6966 }
6967
6968 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6969 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6970
6971 /* Add in the space required for saving incoming register arguments. */
6972 total_size += current_function_pretend_args_size;
6973 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6974
6975 /* Save other computed information. */
6976 cfun->machine->frame.total_size = total_size;
6977 cfun->machine->frame.var_size = var_size;
6978 cfun->machine->frame.args_size = args_size;
6979 cfun->machine->frame.cprestore_size = cprestore_size;
6980 cfun->machine->frame.gp_reg_size = gp_reg_size;
6981 cfun->machine->frame.fp_reg_size = fp_reg_size;
6982 cfun->machine->frame.mask = mask;
6983 cfun->machine->frame.fmask = fmask;
6984 cfun->machine->frame.initialized = reload_completed;
6985 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6986 cfun->machine->frame.num_fp = (fp_reg_size
6987 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
6988
6989 if (mask)
6990 {
6991 HOST_WIDE_INT offset;
6992
6993 if (GENERATE_MIPS16E_SAVE_RESTORE)
6994 /* MIPS16e SAVE and RESTORE instructions require the GP save area
6995 to be aligned at the high end with any padding at the low end.
6996 It is only safe to use this calculation for o32, where we never
6997 have pretend arguments, and where any varargs will be saved in
6998 the caller-allocated area rather than at the top of the frame. */
6999 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7000 else
7001 offset = (args_size + cprestore_size + var_size
7002 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7003 cfun->machine->frame.gp_sp_offset = offset;
7004 cfun->machine->frame.gp_save_offset = offset - total_size;
7005 }
7006 else
7007 {
7008 cfun->machine->frame.gp_sp_offset = 0;
7009 cfun->machine->frame.gp_save_offset = 0;
7010 }
7011
7012 if (fmask)
7013 {
7014 HOST_WIDE_INT offset;
7015
7016 offset = (args_size + cprestore_size + var_size
7017 + gp_reg_rounded + fp_reg_size
7018 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7019 cfun->machine->frame.fp_sp_offset = offset;
7020 cfun->machine->frame.fp_save_offset = offset - total_size;
7021 }
7022 else
7023 {
7024 cfun->machine->frame.fp_sp_offset = 0;
7025 cfun->machine->frame.fp_save_offset = 0;
7026 }
7027
7028 /* Ok, we're done. */
7029 return total_size;
7030 }
7031 \f
7032 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7033 pointer or argument pointer. TO is either the stack pointer or
7034 hard frame pointer. */
7035
7036 HOST_WIDE_INT
7037 mips_initial_elimination_offset (int from, int to)
7038 {
7039 HOST_WIDE_INT offset;
7040
7041 compute_frame_size (get_frame_size ());
7042
7043 /* Set OFFSET to the offset from the stack pointer. */
7044 switch (from)
7045 {
7046 case FRAME_POINTER_REGNUM:
7047 offset = 0;
7048 break;
7049
7050 case ARG_POINTER_REGNUM:
7051 offset = (cfun->machine->frame.total_size
7052 - current_function_pretend_args_size);
7053 break;
7054
7055 default:
7056 gcc_unreachable ();
7057 }
7058
7059 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7060 offset -= cfun->machine->frame.args_size;
7061
7062 return offset;
7063 }
7064 \f
7065 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7066 back to a previous frame. */
7067 rtx
7068 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7069 {
7070 if (count != 0)
7071 return const0_rtx;
7072
7073 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7074 }
7075 \f
7076 /* Use FN to save or restore register REGNO. MODE is the register's
7077 mode and OFFSET is the offset of its save slot from the current
7078 stack pointer. */
7079
7080 static void
7081 mips_save_restore_reg (enum machine_mode mode, int regno,
7082 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7083 {
7084 rtx mem;
7085
7086 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7087
7088 fn (gen_rtx_REG (mode, regno), mem);
7089 }
7090
7091
7092 /* Call FN for each register that is saved by the current function.
7093 SP_OFFSET is the offset of the current stack pointer from the start
7094 of the frame. */
7095
7096 static void
7097 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7098 {
7099 enum machine_mode fpr_mode;
7100 HOST_WIDE_INT offset;
7101 int regno;
7102
7103 /* Save registers starting from high to low. The debuggers prefer at least
7104 the return register be stored at func+4, and also it allows us not to
7105 need a nop in the epilogue if at least one register is reloaded in
7106 addition to return address. */
7107 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7108 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7109 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7110 {
7111 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7112 offset -= GET_MODE_SIZE (gpr_mode);
7113 }
7114
7115 /* This loop must iterate over the same space as its companion in
7116 compute_frame_size. */
7117 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7118 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7119 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7120 regno >= FP_REG_FIRST;
7121 regno -= MAX_FPRS_PER_FMT)
7122 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7123 {
7124 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7125 offset -= GET_MODE_SIZE (fpr_mode);
7126 }
7127 }
7128 \f
7129 /* If we're generating n32 or n64 abicalls, and the current function
7130 does not use $28 as its global pointer, emit a cplocal directive.
7131 Use pic_offset_table_rtx as the argument to the directive. */
7132
7133 static void
7134 mips_output_cplocal (void)
7135 {
7136 if (!TARGET_EXPLICIT_RELOCS
7137 && cfun->machine->global_pointer > 0
7138 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7139 output_asm_insn (".cplocal %+", 0);
7140 }
7141
7142 /* Return the style of GP load sequence that is being used for the
7143 current function. */
7144
7145 enum mips_loadgp_style
7146 mips_current_loadgp_style (void)
7147 {
7148 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7149 return LOADGP_NONE;
7150
7151 if (TARGET_RTP_PIC)
7152 return LOADGP_RTP;
7153
7154 if (TARGET_ABSOLUTE_ABICALLS)
7155 return LOADGP_ABSOLUTE;
7156
7157 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7158 }
7159
7160 /* The __gnu_local_gp symbol. */
7161
7162 static GTY(()) rtx mips_gnu_local_gp;
7163
7164 /* If we're generating n32 or n64 abicalls, emit instructions
7165 to set up the global pointer. */
7166
7167 static void
7168 mips_emit_loadgp (void)
7169 {
7170 rtx addr, offset, incoming_address, base, index;
7171
7172 switch (mips_current_loadgp_style ())
7173 {
7174 case LOADGP_ABSOLUTE:
7175 if (mips_gnu_local_gp == NULL)
7176 {
7177 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7178 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7179 }
7180 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7181 break;
7182
7183 case LOADGP_NEWABI:
7184 addr = XEXP (DECL_RTL (current_function_decl), 0);
7185 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7186 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7187 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7188 if (!TARGET_EXPLICIT_RELOCS)
7189 emit_insn (gen_loadgp_blockage ());
7190 break;
7191
7192 case LOADGP_RTP:
7193 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7194 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7195 emit_insn (gen_loadgp_rtp (base, index));
7196 if (!TARGET_EXPLICIT_RELOCS)
7197 emit_insn (gen_loadgp_blockage ());
7198 break;
7199
7200 default:
7201 break;
7202 }
7203 }
7204
7205 /* Set up the stack and frame (if desired) for the function. */
7206
7207 static void
7208 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7209 {
7210 const char *fnname;
7211 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7212
7213 #ifdef SDB_DEBUGGING_INFO
7214 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7215 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7216 #endif
7217
7218 /* In mips16 mode, we may need to generate a 32 bit to handle
7219 floating point arguments. The linker will arrange for any 32-bit
7220 functions to call this stub, which will then jump to the 16-bit
7221 function proper. */
7222 if (TARGET_MIPS16
7223 && TARGET_HARD_FLOAT_ABI
7224 && current_function_args_info.fp_code != 0)
7225 build_mips16_function_stub (file);
7226
7227 if (!FUNCTION_NAME_ALREADY_DECLARED)
7228 {
7229 /* Get the function name the same way that toplev.c does before calling
7230 assemble_start_function. This is needed so that the name used here
7231 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7232 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7233
7234 if (!flag_inhibit_size_directive)
7235 {
7236 fputs ("\t.ent\t", file);
7237 assemble_name (file, fnname);
7238 fputs ("\n", file);
7239 }
7240
7241 assemble_name (file, fnname);
7242 fputs (":\n", file);
7243 }
7244
7245 /* Stop mips_file_end from treating this function as external. */
7246 if (TARGET_IRIX && mips_abi == ABI_32)
7247 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7248
7249 if (!flag_inhibit_size_directive)
7250 {
7251 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7252 fprintf (file,
7253 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7254 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7255 ", args= " HOST_WIDE_INT_PRINT_DEC
7256 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7257 (reg_names[(frame_pointer_needed)
7258 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7259 ((frame_pointer_needed && TARGET_MIPS16)
7260 ? tsize - cfun->machine->frame.args_size
7261 : tsize),
7262 reg_names[GP_REG_FIRST + 31],
7263 cfun->machine->frame.var_size,
7264 cfun->machine->frame.num_gp,
7265 cfun->machine->frame.num_fp,
7266 cfun->machine->frame.args_size,
7267 cfun->machine->frame.cprestore_size);
7268
7269 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7270 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7271 cfun->machine->frame.mask,
7272 cfun->machine->frame.gp_save_offset);
7273 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7274 cfun->machine->frame.fmask,
7275 cfun->machine->frame.fp_save_offset);
7276
7277 /* Require:
7278 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7279 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7280 }
7281
7282 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7283 {
7284 /* Handle the initialization of $gp for SVR4 PIC. */
7285 if (!cfun->machine->all_noreorder_p)
7286 output_asm_insn ("%(.cpload\t%^%)", 0);
7287 else
7288 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7289 }
7290 else if (cfun->machine->all_noreorder_p)
7291 output_asm_insn ("%(%<", 0);
7292
7293 /* Tell the assembler which register we're using as the global
7294 pointer. This is needed for thunks, since they can use either
7295 explicit relocs or assembler macros. */
7296 mips_output_cplocal ();
7297 }
7298 \f
7299 /* Make the last instruction frame related and note that it performs
7300 the operation described by FRAME_PATTERN. */
7301
7302 static void
7303 mips_set_frame_expr (rtx frame_pattern)
7304 {
7305 rtx insn;
7306
7307 insn = get_last_insn ();
7308 RTX_FRAME_RELATED_P (insn) = 1;
7309 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7310 frame_pattern,
7311 REG_NOTES (insn));
7312 }
7313
7314
7315 /* Return a frame-related rtx that stores REG at MEM.
7316 REG must be a single register. */
7317
7318 static rtx
7319 mips_frame_set (rtx mem, rtx reg)
7320 {
7321 rtx set;
7322
7323 /* If we're saving the return address register and the dwarf return
7324 address column differs from the hard register number, adjust the
7325 note reg to refer to the former. */
7326 if (REGNO (reg) == GP_REG_FIRST + 31
7327 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7328 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7329
7330 set = gen_rtx_SET (VOIDmode, mem, reg);
7331 RTX_FRAME_RELATED_P (set) = 1;
7332
7333 return set;
7334 }
7335
7336
7337 /* Save register REG to MEM. Make the instruction frame-related. */
7338
7339 static void
7340 mips_save_reg (rtx reg, rtx mem)
7341 {
7342 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7343 {
7344 rtx x1, x2;
7345
7346 if (mips_split_64bit_move_p (mem, reg))
7347 mips_split_64bit_move (mem, reg);
7348 else
7349 mips_emit_move (mem, reg);
7350
7351 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7352 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7353 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7354 }
7355 else
7356 {
7357 if (TARGET_MIPS16
7358 && REGNO (reg) != GP_REG_FIRST + 31
7359 && !M16_REG_P (REGNO (reg)))
7360 {
7361 /* Save a non-mips16 register by moving it through a temporary.
7362 We don't need to do this for $31 since there's a special
7363 instruction for it. */
7364 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7365 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7366 }
7367 else
7368 mips_emit_move (mem, reg);
7369
7370 mips_set_frame_expr (mips_frame_set (mem, reg));
7371 }
7372 }
7373
7374 /* Return a move between register REGNO and memory location SP + OFFSET.
7375 Make the move a load if RESTORE_P, otherwise make it a frame-related
7376 store. */
7377
7378 static rtx
7379 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7380 unsigned int regno)
7381 {
7382 rtx reg, mem;
7383
7384 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7385 reg = gen_rtx_REG (SImode, regno);
7386 return (restore_p
7387 ? gen_rtx_SET (VOIDmode, reg, mem)
7388 : mips_frame_set (mem, reg));
7389 }
7390
7391 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7392 The instruction must:
7393
7394 - Allocate or deallocate SIZE bytes in total; SIZE is known
7395 to be nonzero.
7396
7397 - Save or restore as many registers in *MASK_PTR as possible.
7398 The instruction saves the first registers at the top of the
7399 allocated area, with the other registers below it.
7400
7401 - Save NARGS argument registers above the allocated area.
7402
7403 (NARGS is always zero if RESTORE_P.)
7404
7405 The SAVE and RESTORE instructions cannot save and restore all general
7406 registers, so there may be some registers left over for the caller to
7407 handle. Destructively modify *MASK_PTR so that it contains the registers
7408 that still need to be saved or restored. The caller can save these
7409 registers in the memory immediately below *OFFSET_PTR, which is a
7410 byte offset from the bottom of the allocated stack area. */
7411
7412 static rtx
7413 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7414 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7415 HOST_WIDE_INT size)
7416 {
7417 rtx pattern, set;
7418 HOST_WIDE_INT offset, top_offset;
7419 unsigned int i, regno;
7420 int n;
7421
7422 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7423
7424 /* Calculate the number of elements in the PARALLEL. We need one element
7425 for the stack adjustment, one for each argument register save, and one
7426 for each additional register move. */
7427 n = 1 + nargs;
7428 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7429 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7430 n++;
7431
7432 /* Create the final PARALLEL. */
7433 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7434 n = 0;
7435
7436 /* Add the stack pointer adjustment. */
7437 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7438 plus_constant (stack_pointer_rtx,
7439 restore_p ? size : -size));
7440 RTX_FRAME_RELATED_P (set) = 1;
7441 XVECEXP (pattern, 0, n++) = set;
7442
7443 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7444 top_offset = restore_p ? size : 0;
7445
7446 /* Save the arguments. */
7447 for (i = 0; i < nargs; i++)
7448 {
7449 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7450 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7451 XVECEXP (pattern, 0, n++) = set;
7452 }
7453
7454 /* Then fill in the other register moves. */
7455 offset = top_offset;
7456 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7457 {
7458 regno = mips16e_save_restore_regs[i];
7459 if (BITSET_P (*mask_ptr, regno))
7460 {
7461 offset -= UNITS_PER_WORD;
7462 set = mips16e_save_restore_reg (restore_p, offset, regno);
7463 XVECEXP (pattern, 0, n++) = set;
7464 *mask_ptr &= ~(1 << regno);
7465 }
7466 }
7467
7468 /* Tell the caller what offset it should use for the remaining registers. */
7469 *offset_ptr = size + (offset - top_offset) + size;
7470
7471 gcc_assert (n == XVECLEN (pattern, 0));
7472
7473 return pattern;
7474 }
7475
7476 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7477 pointer. Return true if PATTERN matches the kind of instruction
7478 generated by mips16e_build_save_restore. If INFO is nonnull,
7479 initialize it when returning true. */
7480
7481 bool
7482 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7483 struct mips16e_save_restore_info *info)
7484 {
7485 unsigned int i, nargs, mask;
7486 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7487 rtx set, reg, mem, base;
7488 int n;
7489
7490 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7491 return false;
7492
7493 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7494 top_offset = adjust > 0 ? adjust : 0;
7495
7496 /* Interpret all other members of the PARALLEL. */
7497 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7498 mask = 0;
7499 nargs = 0;
7500 i = 0;
7501 for (n = 1; n < XVECLEN (pattern, 0); n++)
7502 {
7503 /* Check that we have a SET. */
7504 set = XVECEXP (pattern, 0, n);
7505 if (GET_CODE (set) != SET)
7506 return false;
7507
7508 /* Check that the SET is a load (if restoring) or a store
7509 (if saving). */
7510 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7511 if (!MEM_P (mem))
7512 return false;
7513
7514 /* Check that the address is the sum of the stack pointer and a
7515 possibly-zero constant offset. */
7516 mips_split_plus (XEXP (mem, 0), &base, &offset);
7517 if (base != stack_pointer_rtx)
7518 return false;
7519
7520 /* Check that SET's other operand is a register. */
7521 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7522 if (!REG_P (reg))
7523 return false;
7524
7525 /* Check for argument saves. */
7526 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7527 && REGNO (reg) == GP_ARG_FIRST + nargs)
7528 nargs++;
7529 else if (offset == save_offset)
7530 {
7531 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7532 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7533 return false;
7534
7535 mask |= 1 << REGNO (reg);
7536 save_offset -= GET_MODE_SIZE (gpr_mode);
7537 }
7538 else
7539 return false;
7540 }
7541
7542 /* Check that the restrictions on register ranges are met. */
7543 extra = 0;
7544 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7545 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7546 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7547 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7548 if (extra != 0)
7549 return false;
7550
7551 /* Make sure that the topmost argument register is not saved twice.
7552 The checks above ensure that the same is then true for the other
7553 argument registers. */
7554 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7555 return false;
7556
7557 /* Pass back information, if requested. */
7558 if (info)
7559 {
7560 info->nargs = nargs;
7561 info->mask = mask;
7562 info->size = (adjust > 0 ? adjust : -adjust);
7563 }
7564
7565 return true;
7566 }
7567
7568 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7569 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7570 the null terminator. */
7571
7572 static char *
7573 mips16e_add_register_range (char *s, unsigned int min_reg,
7574 unsigned int max_reg)
7575 {
7576 if (min_reg != max_reg)
7577 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7578 else
7579 s += sprintf (s, ",%s", reg_names[min_reg]);
7580 return s;
7581 }
7582
7583 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7584 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7585
7586 const char *
7587 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7588 {
7589 static char buffer[300];
7590
7591 struct mips16e_save_restore_info info;
7592 unsigned int i, end;
7593 char *s;
7594
7595 /* Parse the pattern. */
7596 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7597 gcc_unreachable ();
7598
7599 /* Add the mnemonic. */
7600 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7601 s += strlen (s);
7602
7603 /* Save the arguments. */
7604 if (info.nargs > 1)
7605 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7606 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7607 else if (info.nargs == 1)
7608 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7609
7610 /* Emit the amount of stack space to allocate or deallocate. */
7611 s += sprintf (s, "%d", (int) info.size);
7612
7613 /* Save or restore $16. */
7614 if (BITSET_P (info.mask, 16))
7615 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7616
7617 /* Save or restore $17. */
7618 if (BITSET_P (info.mask, 17))
7619 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7620
7621 /* Save or restore registers in the range $s2...$s8, which
7622 mips16e_s2_s8_regs lists in decreasing order. Note that this
7623 is a software register range; the hardware registers are not
7624 numbered consecutively. */
7625 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7626 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7627 if (i < end)
7628 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7629 mips16e_s2_s8_regs[i]);
7630
7631 /* Save or restore registers in the range $a0...$a3. */
7632 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7633 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7634 if (i < end)
7635 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7636 mips16e_a0_a3_regs[end - 1]);
7637
7638 /* Save or restore $31. */
7639 if (BITSET_P (info.mask, 31))
7640 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7641
7642 return buffer;
7643 }
7644
7645 /* Return a simplified form of X using the register values in REG_VALUES.
7646 REG_VALUES[R] is the last value assigned to hard register R, or null
7647 if R has not been modified.
7648
7649 This function is rather limited, but is good enough for our purposes. */
7650
7651 static rtx
7652 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7653 {
7654 rtx x0, x1;
7655
7656 x = avoid_constant_pool_reference (x);
7657
7658 if (UNARY_P (x))
7659 {
7660 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7661 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7662 x0, GET_MODE (XEXP (x, 0)));
7663 }
7664
7665 if (ARITHMETIC_P (x))
7666 {
7667 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7668 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7669 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7670 }
7671
7672 if (REG_P (x)
7673 && reg_values[REGNO (x)]
7674 && !rtx_unstable_p (reg_values[REGNO (x)]))
7675 return reg_values[REGNO (x)];
7676
7677 return x;
7678 }
7679
7680 /* Return true if (set DEST SRC) stores an argument register into its
7681 caller-allocated save slot, storing the number of that argument
7682 register in *REGNO_PTR if so. REG_VALUES is as for
7683 mips16e_collect_propagate_value. */
7684
7685 static bool
7686 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7687 unsigned int *regno_ptr)
7688 {
7689 unsigned int argno, regno;
7690 HOST_WIDE_INT offset, required_offset;
7691 rtx addr, base;
7692
7693 /* Check that this is a word-mode store. */
7694 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7695 return false;
7696
7697 /* Check that the register being saved is an unmodified argument
7698 register. */
7699 regno = REGNO (src);
7700 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7701 return false;
7702 argno = regno - GP_ARG_FIRST;
7703
7704 /* Check whether the address is an appropriate stack pointer or
7705 frame pointer access. The frame pointer is offset from the
7706 stack pointer by the size of the outgoing arguments. */
7707 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7708 mips_split_plus (addr, &base, &offset);
7709 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7710 if (base == hard_frame_pointer_rtx)
7711 required_offset -= cfun->machine->frame.args_size;
7712 else if (base != stack_pointer_rtx)
7713 return false;
7714 if (offset != required_offset)
7715 return false;
7716
7717 *regno_ptr = regno;
7718 return true;
7719 }
7720
7721 /* A subroutine of mips_expand_prologue, called only when generating
7722 MIPS16e SAVE instructions. Search the start of the function for any
7723 instructions that save argument registers into their caller-allocated
7724 save slots. Delete such instructions and return a value N such that
7725 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7726 instructions redundant. */
7727
7728 static unsigned int
7729 mips16e_collect_argument_saves (void)
7730 {
7731 rtx reg_values[FIRST_PSEUDO_REGISTER];
7732 rtx insn, next, set, dest, src;
7733 unsigned int nargs, regno;
7734
7735 push_topmost_sequence ();
7736 nargs = 0;
7737 memset (reg_values, 0, sizeof (reg_values));
7738 for (insn = get_insns (); insn; insn = next)
7739 {
7740 next = NEXT_INSN (insn);
7741 if (NOTE_P (insn))
7742 continue;
7743
7744 if (!INSN_P (insn))
7745 break;
7746
7747 set = PATTERN (insn);
7748 if (GET_CODE (set) != SET)
7749 break;
7750
7751 dest = SET_DEST (set);
7752 src = SET_SRC (set);
7753 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
7754 {
7755 if (!BITSET_P (cfun->machine->frame.mask, regno))
7756 {
7757 delete_insn (insn);
7758 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7759 }
7760 }
7761 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7762 reg_values[REGNO (dest)]
7763 = mips16e_collect_propagate_value (src, reg_values);
7764 else
7765 break;
7766 }
7767 pop_topmost_sequence ();
7768
7769 return nargs;
7770 }
7771
7772 /* Expand the prologue into a bunch of separate insns. */
7773
7774 void
7775 mips_expand_prologue (void)
7776 {
7777 HOST_WIDE_INT size;
7778 unsigned int nargs;
7779 rtx insn;
7780
7781 if (cfun->machine->global_pointer > 0)
7782 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7783
7784 size = compute_frame_size (get_frame_size ());
7785
7786 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7787 bytes beforehand; this is enough to cover the register save area
7788 without going out of range. */
7789 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7790 {
7791 HOST_WIDE_INT step1;
7792
7793 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
7794
7795 if (GENERATE_MIPS16E_SAVE_RESTORE)
7796 {
7797 HOST_WIDE_INT offset;
7798 unsigned int mask, regno;
7799
7800 /* Try to merge argument stores into the save instruction. */
7801 nargs = mips16e_collect_argument_saves ();
7802
7803 /* Build the save instruction. */
7804 mask = cfun->machine->frame.mask;
7805 insn = mips16e_build_save_restore (false, &mask, &offset,
7806 nargs, step1);
7807 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7808 size -= step1;
7809
7810 /* Check if we need to save other registers. */
7811 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7812 if (BITSET_P (mask, regno - GP_REG_FIRST))
7813 {
7814 offset -= GET_MODE_SIZE (gpr_mode);
7815 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
7816 }
7817 }
7818 else
7819 {
7820 insn = gen_add3_insn (stack_pointer_rtx,
7821 stack_pointer_rtx,
7822 GEN_INT (-step1));
7823 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7824 size -= step1;
7825 mips_for_each_saved_reg (size, mips_save_reg);
7826 }
7827 }
7828
7829 /* Allocate the rest of the frame. */
7830 if (size > 0)
7831 {
7832 if (SMALL_OPERAND (-size))
7833 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
7834 stack_pointer_rtx,
7835 GEN_INT (-size)))) = 1;
7836 else
7837 {
7838 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
7839 if (TARGET_MIPS16)
7840 {
7841 /* There are no instructions to add or subtract registers
7842 from the stack pointer, so use the frame pointer as a
7843 temporary. We should always be using a frame pointer
7844 in this case anyway. */
7845 gcc_assert (frame_pointer_needed);
7846 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
7847 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
7848 hard_frame_pointer_rtx,
7849 MIPS_PROLOGUE_TEMP (Pmode)));
7850 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
7851 }
7852 else
7853 emit_insn (gen_sub3_insn (stack_pointer_rtx,
7854 stack_pointer_rtx,
7855 MIPS_PROLOGUE_TEMP (Pmode)));
7856
7857 /* Describe the combined effect of the previous instructions. */
7858 mips_set_frame_expr
7859 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7860 plus_constant (stack_pointer_rtx, -size)));
7861 }
7862 }
7863
7864 /* Set up the frame pointer, if we're using one. In mips16 code,
7865 we point the frame pointer ahead of the outgoing argument area.
7866 This should allow more variables & incoming arguments to be
7867 accessed with unextended instructions. */
7868 if (frame_pointer_needed)
7869 {
7870 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
7871 {
7872 rtx offset = GEN_INT (cfun->machine->frame.args_size);
7873 if (SMALL_OPERAND (cfun->machine->frame.args_size))
7874 RTX_FRAME_RELATED_P
7875 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7876 stack_pointer_rtx,
7877 offset))) = 1;
7878 else
7879 {
7880 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
7881 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
7882 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7883 hard_frame_pointer_rtx,
7884 MIPS_PROLOGUE_TEMP (Pmode)));
7885 mips_set_frame_expr
7886 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
7887 plus_constant (stack_pointer_rtx,
7888 cfun->machine->frame.args_size)));
7889 }
7890 }
7891 else
7892 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
7893 stack_pointer_rtx)) = 1;
7894 }
7895
7896 mips_emit_loadgp ();
7897
7898 /* If generating o32/o64 abicalls, save $gp on the stack. */
7899 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
7900 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
7901
7902 /* If we are profiling, make sure no instructions are scheduled before
7903 the call to mcount. */
7904
7905 if (current_function_profile)
7906 emit_insn (gen_blockage ());
7907 }
7908 \f
7909 /* Do any necessary cleanup after a function to restore stack, frame,
7910 and regs. */
7911
7912 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
7913
7914 static void
7915 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7916 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7917 {
7918 /* Reinstate the normal $gp. */
7919 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
7920 mips_output_cplocal ();
7921
7922 if (cfun->machine->all_noreorder_p)
7923 {
7924 /* Avoid using %>%) since it adds excess whitespace. */
7925 output_asm_insn (".set\tmacro", 0);
7926 output_asm_insn (".set\treorder", 0);
7927 set_noreorder = set_nomacro = 0;
7928 }
7929
7930 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
7931 {
7932 const char *fnname;
7933
7934 /* Get the function name the same way that toplev.c does before calling
7935 assemble_start_function. This is needed so that the name used here
7936 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7937 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7938 fputs ("\t.end\t", file);
7939 assemble_name (file, fnname);
7940 fputs ("\n", file);
7941 }
7942 }
7943 \f
7944 /* Emit instructions to restore register REG from slot MEM. */
7945
7946 static void
7947 mips_restore_reg (rtx reg, rtx mem)
7948 {
7949 /* There's no mips16 instruction to load $31 directly. Load into
7950 $7 instead and adjust the return insn appropriately. */
7951 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
7952 reg = gen_rtx_REG (GET_MODE (reg), 7);
7953
7954 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
7955 {
7956 /* Can't restore directly; move through a temporary. */
7957 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
7958 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
7959 }
7960 else
7961 mips_emit_move (reg, mem);
7962 }
7963
7964
7965 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
7966 if this epilogue precedes a sibling call, false if it is for a normal
7967 "epilogue" pattern. */
7968
7969 void
7970 mips_expand_epilogue (int sibcall_p)
7971 {
7972 HOST_WIDE_INT step1, step2;
7973 rtx base, target;
7974
7975 if (!sibcall_p && mips_can_use_return_insn ())
7976 {
7977 emit_jump_insn (gen_return ());
7978 return;
7979 }
7980
7981 /* In mips16 mode, if the return value should go into a floating-point
7982 register, we need to call a helper routine to copy it over. */
7983 if (mips16_cfun_returns_in_fpr_p ())
7984 {
7985 char *name;
7986 rtx func;
7987 rtx insn;
7988 rtx retval;
7989 rtx call;
7990 tree id;
7991 tree return_type;
7992 enum machine_mode return_mode;
7993
7994 return_type = DECL_RESULT (current_function_decl);
7995 return_mode = DECL_MODE (return_type);
7996
7997 name = ACONCAT (("__mips16_ret_",
7998 mips16_call_stub_mode_suffix (return_mode),
7999 NULL));
8000 id = get_identifier (name);
8001 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8002 retval = gen_rtx_REG (return_mode, GP_RETURN);
8003 call = gen_call_value_internal (retval, func, const0_rtx);
8004 insn = emit_call_insn (call);
8005 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8006 }
8007
8008 /* Split the frame into two. STEP1 is the amount of stack we should
8009 deallocate before restoring the registers. STEP2 is the amount we
8010 should deallocate afterwards.
8011
8012 Start off by assuming that no registers need to be restored. */
8013 step1 = cfun->machine->frame.total_size;
8014 step2 = 0;
8015
8016 /* Work out which register holds the frame address. Account for the
8017 frame pointer offset used by mips16 code. */
8018 if (!frame_pointer_needed)
8019 base = stack_pointer_rtx;
8020 else
8021 {
8022 base = hard_frame_pointer_rtx;
8023 if (TARGET_MIPS16)
8024 step1 -= cfun->machine->frame.args_size;
8025 }
8026
8027 /* If we need to restore registers, deallocate as much stack as
8028 possible in the second step without going out of range. */
8029 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8030 {
8031 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8032 step1 -= step2;
8033 }
8034
8035 /* Set TARGET to BASE + STEP1. */
8036 target = base;
8037 if (step1 > 0)
8038 {
8039 rtx adjust;
8040
8041 /* Get an rtx for STEP1 that we can add to BASE. */
8042 adjust = GEN_INT (step1);
8043 if (!SMALL_OPERAND (step1))
8044 {
8045 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8046 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8047 }
8048
8049 /* Normal mode code can copy the result straight into $sp. */
8050 if (!TARGET_MIPS16)
8051 target = stack_pointer_rtx;
8052
8053 emit_insn (gen_add3_insn (target, base, adjust));
8054 }
8055
8056 /* Copy TARGET into the stack pointer. */
8057 if (target != stack_pointer_rtx)
8058 mips_emit_move (stack_pointer_rtx, target);
8059
8060 /* If we're using addressing macros, $gp is implicitly used by all
8061 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8062 from the stack. */
8063 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8064 emit_insn (gen_blockage ());
8065
8066 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8067 {
8068 unsigned int regno, mask;
8069 HOST_WIDE_INT offset;
8070 rtx restore;
8071
8072 /* Generate the restore instruction. */
8073 mask = cfun->machine->frame.mask;
8074 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8075
8076 /* Restore any other registers manually. */
8077 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8078 if (BITSET_P (mask, regno - GP_REG_FIRST))
8079 {
8080 offset -= GET_MODE_SIZE (gpr_mode);
8081 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8082 }
8083
8084 /* Restore the remaining registers and deallocate the final bit
8085 of the frame. */
8086 emit_insn (restore);
8087 }
8088 else
8089 {
8090 /* Restore the registers. */
8091 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8092 mips_restore_reg);
8093
8094 /* Deallocate the final bit of the frame. */
8095 if (step2 > 0)
8096 emit_insn (gen_add3_insn (stack_pointer_rtx,
8097 stack_pointer_rtx,
8098 GEN_INT (step2)));
8099 }
8100
8101 /* Add in the __builtin_eh_return stack adjustment. We need to
8102 use a temporary in mips16 code. */
8103 if (current_function_calls_eh_return)
8104 {
8105 if (TARGET_MIPS16)
8106 {
8107 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8108 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8109 MIPS_EPILOGUE_TEMP (Pmode),
8110 EH_RETURN_STACKADJ_RTX));
8111 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8112 }
8113 else
8114 emit_insn (gen_add3_insn (stack_pointer_rtx,
8115 stack_pointer_rtx,
8116 EH_RETURN_STACKADJ_RTX));
8117 }
8118
8119 if (!sibcall_p)
8120 {
8121 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8122 path will restore the return address into $7 rather than $31. */
8123 if (TARGET_MIPS16
8124 && !GENERATE_MIPS16E_SAVE_RESTORE
8125 && (cfun->machine->frame.mask & RA_MASK) != 0)
8126 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8127 GP_REG_FIRST + 7)));
8128 else
8129 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8130 GP_REG_FIRST + 31)));
8131 }
8132 }
8133 \f
8134 /* Return nonzero if this function is known to have a null epilogue.
8135 This allows the optimizer to omit jumps to jumps if no stack
8136 was created. */
8137
8138 int
8139 mips_can_use_return_insn (void)
8140 {
8141 if (! reload_completed)
8142 return 0;
8143
8144 if (df_regs_ever_live_p (31) || current_function_profile)
8145 return 0;
8146
8147 /* In mips16 mode, a function that returns a floating point value
8148 needs to arrange to copy the return value into the floating point
8149 registers. */
8150 if (mips16_cfun_returns_in_fpr_p ())
8151 return 0;
8152
8153 if (cfun->machine->frame.initialized)
8154 return cfun->machine->frame.total_size == 0;
8155
8156 return compute_frame_size (get_frame_size ()) == 0;
8157 }
8158 \f
8159 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8160 in order to avoid duplicating too much logic from elsewhere. */
8161
8162 static void
8163 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8164 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8165 tree function)
8166 {
8167 rtx this, temp1, temp2, insn, fnaddr;
8168
8169 /* Pretend to be a post-reload pass while generating rtl. */
8170 reload_completed = 1;
8171
8172 /* Mark the end of the (empty) prologue. */
8173 emit_note (NOTE_INSN_PROLOGUE_END);
8174
8175 /* Pick a global pointer. Use a call-clobbered register if
8176 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8177 if (TARGET_USE_GOT)
8178 {
8179 cfun->machine->global_pointer =
8180 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8181
8182 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8183
8184 }
8185
8186 /* Set up the global pointer for n32 or n64 abicalls. If
8187 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8188 no need to load it.*/
8189 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8190 || !targetm.binds_local_p (function))
8191 mips_emit_loadgp ();
8192
8193 /* We need two temporary registers in some cases. */
8194 temp1 = gen_rtx_REG (Pmode, 2);
8195 temp2 = gen_rtx_REG (Pmode, 3);
8196
8197 /* Find out which register contains the "this" pointer. */
8198 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8199 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8200 else
8201 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8202
8203 /* Add DELTA to THIS. */
8204 if (delta != 0)
8205 {
8206 rtx offset = GEN_INT (delta);
8207 if (!SMALL_OPERAND (delta))
8208 {
8209 mips_emit_move (temp1, offset);
8210 offset = temp1;
8211 }
8212 emit_insn (gen_add3_insn (this, this, offset));
8213 }
8214
8215 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8216 if (vcall_offset != 0)
8217 {
8218 rtx addr;
8219
8220 /* Set TEMP1 to *THIS. */
8221 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8222
8223 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8224 addr = mips_add_offset (temp2, temp1, vcall_offset);
8225
8226 /* Load the offset and add it to THIS. */
8227 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8228 emit_insn (gen_add3_insn (this, this, temp1));
8229 }
8230
8231 /* Jump to the target function. Use a sibcall if direct jumps are
8232 allowed, otherwise load the address into a register first. */
8233 fnaddr = XEXP (DECL_RTL (function), 0);
8234 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr))
8235 {
8236 /* This is messy. gas treats "la $25,foo" as part of a call
8237 sequence and may allow a global "foo" to be lazily bound.
8238 The general move patterns therefore reject this combination.
8239
8240 In this context, lazy binding would actually be OK
8241 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8242 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8243 We must therefore load the address via a temporary
8244 register if mips_dangerous_for_la25_p.
8245
8246 If we jump to the temporary register rather than $25, the assembler
8247 can use the move insn to fill the jump's delay slot. */
8248 if (TARGET_USE_PIC_FN_ADDR_REG
8249 && !mips_dangerous_for_la25_p (fnaddr))
8250 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8251 mips_load_call_address (temp1, fnaddr, true);
8252
8253 if (TARGET_USE_PIC_FN_ADDR_REG
8254 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8255 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8256 emit_jump_insn (gen_indirect_jump (temp1));
8257 }
8258 else
8259 {
8260 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8261 SIBLING_CALL_P (insn) = 1;
8262 }
8263
8264 /* Run just enough of rest_of_compilation. This sequence was
8265 "borrowed" from alpha.c. */
8266 insn = get_insns ();
8267 insn_locators_alloc ();
8268 split_all_insns_noflow ();
8269 mips16_lay_out_constants ();
8270 shorten_branches (insn);
8271 final_start_function (insn, file, 1);
8272 final (insn, file, 1);
8273 final_end_function ();
8274
8275 /* Clean up the vars set above. Note that final_end_function resets
8276 the global pointer for us. */
8277 reload_completed = 0;
8278 }
8279 \f
8280 /* Returns nonzero if X contains a SYMBOL_REF. */
8281
8282 static int
8283 symbolic_expression_p (rtx x)
8284 {
8285 if (GET_CODE (x) == SYMBOL_REF)
8286 return 1;
8287
8288 if (GET_CODE (x) == CONST)
8289 return symbolic_expression_p (XEXP (x, 0));
8290
8291 if (UNARY_P (x))
8292 return symbolic_expression_p (XEXP (x, 0));
8293
8294 if (ARITHMETIC_P (x))
8295 return (symbolic_expression_p (XEXP (x, 0))
8296 || symbolic_expression_p (XEXP (x, 1)));
8297
8298 return 0;
8299 }
8300
8301 /* Choose the section to use for the constant rtx expression X that has
8302 mode MODE. */
8303
8304 static section *
8305 mips_select_rtx_section (enum machine_mode mode, rtx x,
8306 unsigned HOST_WIDE_INT align)
8307 {
8308 if (TARGET_EMBEDDED_DATA)
8309 {
8310 /* For embedded applications, always put constants in read-only data,
8311 in order to reduce RAM usage. */
8312 return mergeable_constant_section (mode, align, 0);
8313 }
8314 else
8315 {
8316 /* For hosted applications, always put constants in small data if
8317 possible, as this gives the best performance. */
8318 /* ??? Consider using mergeable small data sections. */
8319
8320 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
8321 && mips_section_threshold > 0)
8322 return get_named_section (NULL, ".sdata", 0);
8323 else if (flag_pic && symbolic_expression_p (x))
8324 return get_named_section (NULL, ".data.rel.ro", 3);
8325 else
8326 return mergeable_constant_section (mode, align, 0);
8327 }
8328 }
8329
8330 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8331
8332 The complication here is that, with the combination TARGET_ABICALLS
8333 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8334 therefore not be included in the read-only part of a DSO. Handle such
8335 cases by selecting a normal data section instead of a read-only one.
8336 The logic apes that in default_function_rodata_section. */
8337
8338 static section *
8339 mips_function_rodata_section (tree decl)
8340 {
8341 if (!TARGET_ABICALLS || TARGET_GPWORD)
8342 return default_function_rodata_section (decl);
8343
8344 if (decl && DECL_SECTION_NAME (decl))
8345 {
8346 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8347 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8348 {
8349 char *rname = ASTRDUP (name);
8350 rname[14] = 'd';
8351 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8352 }
8353 else if (flag_function_sections && flag_data_sections
8354 && strncmp (name, ".text.", 6) == 0)
8355 {
8356 char *rname = ASTRDUP (name);
8357 memcpy (rname + 1, "data", 4);
8358 return get_section (rname, SECTION_WRITE, decl);
8359 }
8360 }
8361 return data_section;
8362 }
8363
8364 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8365 locally-defined objects go in a small data section. It also controls
8366 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8367 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8368
8369 static bool
8370 mips_in_small_data_p (tree decl)
8371 {
8372 HOST_WIDE_INT size;
8373
8374 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8375 return false;
8376
8377 /* We don't yet generate small-data references for -mabicalls or
8378 VxWorks RTP code. See the related -G handling in override_options. */
8379 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8380 return false;
8381
8382 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8383 {
8384 const char *name;
8385
8386 /* Reject anything that isn't in a known small-data section. */
8387 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8388 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8389 return false;
8390
8391 /* If a symbol is defined externally, the assembler will use the
8392 usual -G rules when deciding how to implement macros. */
8393 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
8394 return true;
8395 }
8396 else if (TARGET_EMBEDDED_DATA)
8397 {
8398 /* Don't put constants into the small data section: we want them
8399 to be in ROM rather than RAM. */
8400 if (TREE_CODE (decl) != VAR_DECL)
8401 return false;
8402
8403 if (TREE_READONLY (decl)
8404 && !TREE_SIDE_EFFECTS (decl)
8405 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8406 return false;
8407 }
8408
8409 size = int_size_in_bytes (TREE_TYPE (decl));
8410 return (size > 0 && size <= mips_section_threshold);
8411 }
8412
8413 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8414 anchors for small data: the GP register acts as an anchor in that
8415 case. We also don't want to use them for PC-relative accesses,
8416 where the PC acts as an anchor. */
8417
8418 static bool
8419 mips_use_anchors_for_symbol_p (rtx symbol)
8420 {
8421 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8422 {
8423 case SYMBOL_PC_RELATIVE:
8424 case SYMBOL_GP_RELATIVE:
8425 return false;
8426
8427 default:
8428 return true;
8429 }
8430 }
8431 \f
8432 /* See whether VALTYPE is a record whose fields should be returned in
8433 floating-point registers. If so, return the number of fields and
8434 list them in FIELDS (which should have two elements). Return 0
8435 otherwise.
8436
8437 For n32 & n64, a structure with one or two fields is returned in
8438 floating-point registers as long as every field has a floating-point
8439 type. */
8440
8441 static int
8442 mips_fpr_return_fields (const_tree valtype, tree *fields)
8443 {
8444 tree field;
8445 int i;
8446
8447 if (!TARGET_NEWABI)
8448 return 0;
8449
8450 if (TREE_CODE (valtype) != RECORD_TYPE)
8451 return 0;
8452
8453 i = 0;
8454 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8455 {
8456 if (TREE_CODE (field) != FIELD_DECL)
8457 continue;
8458
8459 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8460 return 0;
8461
8462 if (i == 2)
8463 return 0;
8464
8465 fields[i++] = field;
8466 }
8467 return i;
8468 }
8469
8470
8471 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8472 a value in the most significant part of $2/$3 if:
8473
8474 - the target is big-endian;
8475
8476 - the value has a structure or union type (we generalize this to
8477 cover aggregates from other languages too); and
8478
8479 - the structure is not returned in floating-point registers. */
8480
8481 static bool
8482 mips_return_in_msb (const_tree valtype)
8483 {
8484 tree fields[2];
8485
8486 return (TARGET_NEWABI
8487 && TARGET_BIG_ENDIAN
8488 && AGGREGATE_TYPE_P (valtype)
8489 && mips_fpr_return_fields (valtype, fields) == 0);
8490 }
8491
8492
8493 /* Return a composite value in a pair of floating-point registers.
8494 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8495 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8496 complete value.
8497
8498 For n32 & n64, $f0 always holds the first value and $f2 the second.
8499 Otherwise the values are packed together as closely as possible. */
8500
8501 static rtx
8502 mips_return_fpr_pair (enum machine_mode mode,
8503 enum machine_mode mode1, HOST_WIDE_INT offset1,
8504 enum machine_mode mode2, HOST_WIDE_INT offset2)
8505 {
8506 int inc;
8507
8508 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8509 return gen_rtx_PARALLEL
8510 (mode,
8511 gen_rtvec (2,
8512 gen_rtx_EXPR_LIST (VOIDmode,
8513 gen_rtx_REG (mode1, FP_RETURN),
8514 GEN_INT (offset1)),
8515 gen_rtx_EXPR_LIST (VOIDmode,
8516 gen_rtx_REG (mode2, FP_RETURN + inc),
8517 GEN_INT (offset2))));
8518
8519 }
8520
8521
8522 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8523 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8524 VALTYPE is null and MODE is the mode of the return value. */
8525
8526 rtx
8527 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
8528 enum machine_mode mode)
8529 {
8530 if (valtype)
8531 {
8532 tree fields[2];
8533 int unsignedp;
8534
8535 mode = TYPE_MODE (valtype);
8536 unsignedp = TYPE_UNSIGNED (valtype);
8537
8538 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8539 true, we must promote the mode just as PROMOTE_MODE does. */
8540 mode = promote_mode (valtype, mode, &unsignedp, 1);
8541
8542 /* Handle structures whose fields are returned in $f0/$f2. */
8543 switch (mips_fpr_return_fields (valtype, fields))
8544 {
8545 case 1:
8546 return gen_rtx_REG (mode, FP_RETURN);
8547
8548 case 2:
8549 return mips_return_fpr_pair (mode,
8550 TYPE_MODE (TREE_TYPE (fields[0])),
8551 int_byte_position (fields[0]),
8552 TYPE_MODE (TREE_TYPE (fields[1])),
8553 int_byte_position (fields[1]));
8554 }
8555
8556 /* If a value is passed in the most significant part of a register, see
8557 whether we have to round the mode up to a whole number of words. */
8558 if (mips_return_in_msb (valtype))
8559 {
8560 HOST_WIDE_INT size = int_size_in_bytes (valtype);
8561 if (size % UNITS_PER_WORD != 0)
8562 {
8563 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
8564 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
8565 }
8566 }
8567
8568 /* For EABI, the class of return register depends entirely on MODE.
8569 For example, "struct { some_type x; }" and "union { some_type x; }"
8570 are returned in the same way as a bare "some_type" would be.
8571 Other ABIs only use FPRs for scalar, complex or vector types. */
8572 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
8573 return gen_rtx_REG (mode, GP_RETURN);
8574 }
8575
8576 if (!TARGET_MIPS16)
8577 {
8578 /* Handle long doubles for n32 & n64. */
8579 if (mode == TFmode)
8580 return mips_return_fpr_pair (mode,
8581 DImode, 0,
8582 DImode, GET_MODE_SIZE (mode) / 2);
8583
8584 if (mips_return_mode_in_fpr_p (mode))
8585 {
8586 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8587 return mips_return_fpr_pair (mode,
8588 GET_MODE_INNER (mode), 0,
8589 GET_MODE_INNER (mode),
8590 GET_MODE_SIZE (mode) / 2);
8591 else
8592 return gen_rtx_REG (mode, FP_RETURN);
8593 }
8594 }
8595
8596 return gen_rtx_REG (mode, GP_RETURN);
8597 }
8598
8599 /* Return nonzero when an argument must be passed by reference. */
8600
8601 static bool
8602 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8603 enum machine_mode mode, const_tree type,
8604 bool named ATTRIBUTE_UNUSED)
8605 {
8606 if (mips_abi == ABI_EABI)
8607 {
8608 int size;
8609
8610 /* ??? How should SCmode be handled? */
8611 if (mode == DImode || mode == DFmode)
8612 return 0;
8613
8614 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
8615 return size == -1 || size > UNITS_PER_WORD;
8616 }
8617 else
8618 {
8619 /* If we have a variable-sized parameter, we have no choice. */
8620 return targetm.calls.must_pass_in_stack (mode, type);
8621 }
8622 }
8623
8624 static bool
8625 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8626 enum machine_mode mode ATTRIBUTE_UNUSED,
8627 const_tree type ATTRIBUTE_UNUSED, bool named)
8628 {
8629 return mips_abi == ABI_EABI && named;
8630 }
8631
8632 /* Return true if registers of class CLASS cannot change from mode FROM
8633 to mode TO. */
8634
8635 bool
8636 mips_cannot_change_mode_class (enum machine_mode from,
8637 enum machine_mode to, enum reg_class class)
8638 {
8639 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
8640 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
8641 {
8642 if (TARGET_BIG_ENDIAN)
8643 {
8644 /* When a multi-word value is stored in paired floating-point
8645 registers, the first register always holds the low word.
8646 We therefore can't allow FPRs to change between single-word
8647 and multi-word modes. */
8648 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
8649 return true;
8650 }
8651 }
8652
8653 /* gcc assumes that each word of a multiword register can be accessed
8654 individually using SUBREGs. This is not true for floating-point
8655 registers if they are bigger than a word. */
8656 if (UNITS_PER_FPREG > UNITS_PER_WORD
8657 && GET_MODE_SIZE (from) > UNITS_PER_WORD
8658 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
8659 && reg_classes_intersect_p (FP_REGS, class))
8660 return true;
8661
8662 /* Loading a 32-bit value into a 64-bit floating-point register
8663 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8664 We can't allow 64-bit float registers to change from SImode to
8665 to a wider mode. */
8666 if (TARGET_64BIT
8667 && TARGET_FLOAT64
8668 && from == SImode
8669 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
8670 && reg_classes_intersect_p (FP_REGS, class))
8671 return true;
8672
8673 return false;
8674 }
8675
8676 /* Return true if X should not be moved directly into register $25.
8677 We need this because many versions of GAS will treat "la $25,foo" as
8678 part of a call sequence and so allow a global "foo" to be lazily bound. */
8679
8680 bool
8681 mips_dangerous_for_la25_p (rtx x)
8682 {
8683 return (!TARGET_EXPLICIT_RELOCS
8684 && TARGET_USE_GOT
8685 && GET_CODE (x) == SYMBOL_REF
8686 && mips_global_symbol_p (x));
8687 }
8688
8689 /* Implement PREFERRED_RELOAD_CLASS. */
8690
8691 enum reg_class
8692 mips_preferred_reload_class (rtx x, enum reg_class class)
8693 {
8694 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8695 return LEA_REGS;
8696
8697 if (TARGET_HARD_FLOAT
8698 && FLOAT_MODE_P (GET_MODE (x))
8699 && reg_class_subset_p (FP_REGS, class))
8700 return FP_REGS;
8701
8702 if (reg_class_subset_p (GR_REGS, class))
8703 class = GR_REGS;
8704
8705 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8706 class = M16_REGS;
8707
8708 return class;
8709 }
8710
8711 /* This function returns the register class required for a secondary
8712 register when copying between one of the registers in CLASS, and X,
8713 using MODE. If IN_P is nonzero, the copy is going from X to the
8714 register, otherwise the register is the source. A return value of
8715 NO_REGS means that no secondary register is required. */
8716
8717 enum reg_class
8718 mips_secondary_reload_class (enum reg_class class,
8719 enum machine_mode mode, rtx x, int in_p)
8720 {
8721 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
8722 int regno = -1;
8723 int gp_reg_p;
8724
8725 if (REG_P (x)|| GET_CODE (x) == SUBREG)
8726 regno = true_regnum (x);
8727
8728 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
8729
8730 if (mips_dangerous_for_la25_p (x))
8731 {
8732 gr_regs = LEA_REGS;
8733 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
8734 return gr_regs;
8735 }
8736
8737 /* Copying from HI or LO to anywhere other than a general register
8738 requires a general register.
8739 This rule applies to both the original HI/LO pair and the new
8740 DSP accumulators. */
8741 if (reg_class_subset_p (class, ACC_REGS))
8742 {
8743 if (TARGET_MIPS16 && in_p)
8744 {
8745 /* We can't really copy to HI or LO at all in mips16 mode. */
8746 return M16_REGS;
8747 }
8748 return gp_reg_p ? NO_REGS : gr_regs;
8749 }
8750 if (ACC_REG_P (regno))
8751 {
8752 if (TARGET_MIPS16 && ! in_p)
8753 {
8754 /* We can't really copy to HI or LO at all in mips16 mode. */
8755 return M16_REGS;
8756 }
8757 return class == gr_regs ? NO_REGS : gr_regs;
8758 }
8759
8760 /* We can only copy a value to a condition code register from a
8761 floating point register, and even then we require a scratch
8762 floating point register. We can only copy a value out of a
8763 condition code register into a general register. */
8764 if (class == ST_REGS)
8765 {
8766 if (in_p)
8767 return FP_REGS;
8768 return gp_reg_p ? NO_REGS : gr_regs;
8769 }
8770 if (ST_REG_P (regno))
8771 {
8772 if (! in_p)
8773 return FP_REGS;
8774 return class == gr_regs ? NO_REGS : gr_regs;
8775 }
8776
8777 if (class == FP_REGS)
8778 {
8779 if (MEM_P (x))
8780 {
8781 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8782 return NO_REGS;
8783 }
8784 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
8785 {
8786 /* We can use the l.s and l.d macros to load floating-point
8787 constants. ??? For l.s, we could probably get better
8788 code by returning GR_REGS here. */
8789 return NO_REGS;
8790 }
8791 else if (gp_reg_p || x == CONST0_RTX (mode))
8792 {
8793 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8794 return NO_REGS;
8795 }
8796 else if (FP_REG_P (regno))
8797 {
8798 /* In this case we can use mov.s or mov.d. */
8799 return NO_REGS;
8800 }
8801 else
8802 {
8803 /* Otherwise, we need to reload through an integer register. */
8804 return gr_regs;
8805 }
8806 }
8807
8808 /* In mips16 mode, going between memory and anything but M16_REGS
8809 requires an M16_REG. */
8810 if (TARGET_MIPS16)
8811 {
8812 if (class != M16_REGS && class != M16_NA_REGS)
8813 {
8814 if (gp_reg_p)
8815 return NO_REGS;
8816 return M16_REGS;
8817 }
8818 if (! gp_reg_p)
8819 {
8820 if (class == M16_REGS || class == M16_NA_REGS)
8821 return NO_REGS;
8822 return M16_REGS;
8823 }
8824 }
8825
8826 return NO_REGS;
8827 }
8828
8829 /* Implement CLASS_MAX_NREGS.
8830
8831 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8832
8833 - ST_REGS are always hold CCmode values, and CCmode values are
8834 considered to be 4 bytes wide.
8835
8836 All other register classes are covered by UNITS_PER_WORD. Note that
8837 this is true even for unions of integer and float registers when the
8838 latter are smaller than the former. The only supported combination
8839 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8840 words but 32-bit float registers. A word-based calculation is correct
8841 in that case since -msingle-float disallows multi-FPR values. */
8842
8843 int
8844 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8845 enum machine_mode mode)
8846 {
8847 if (class == ST_REGS)
8848 return (GET_MODE_SIZE (mode) + 3) / 4;
8849 else if (class == FP_REGS)
8850 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8851 else
8852 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8853 }
8854
8855 static bool
8856 mips_valid_pointer_mode (enum machine_mode mode)
8857 {
8858 return (mode == SImode || (TARGET_64BIT && mode == DImode));
8859 }
8860
8861 /* Target hook for vector_mode_supported_p. */
8862
8863 static bool
8864 mips_vector_mode_supported_p (enum machine_mode mode)
8865 {
8866 switch (mode)
8867 {
8868 case V2SFmode:
8869 return TARGET_PAIRED_SINGLE_FLOAT;
8870
8871 case V2HImode:
8872 case V4QImode:
8873 return TARGET_DSP;
8874
8875 default:
8876 return false;
8877 }
8878 }
8879 \f
8880 /* If we can access small data directly (using gp-relative relocation
8881 operators) return the small data pointer, otherwise return null.
8882
8883 For each mips16 function which refers to GP relative symbols, we
8884 use a pseudo register, initialized at the start of the function, to
8885 hold the $gp value. */
8886
8887 static rtx
8888 mips16_gp_pseudo_reg (void)
8889 {
8890 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
8891 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
8892
8893 /* Don't initialize the pseudo register if we are being called from
8894 the tree optimizers' cost-calculation routines. */
8895 if (!cfun->machine->initialized_mips16_gp_pseudo_p
8896 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
8897 {
8898 rtx insn, scan;
8899
8900 /* We want to initialize this to a value which gcc will believe
8901 is constant. */
8902 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
8903
8904 push_topmost_sequence ();
8905 /* We need to emit the initialization after the FUNCTION_BEG
8906 note, so that it will be integrated. */
8907 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
8908 if (NOTE_P (scan)
8909 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
8910 break;
8911 if (scan == NULL_RTX)
8912 scan = get_insns ();
8913 insn = emit_insn_after (insn, scan);
8914 pop_topmost_sequence ();
8915
8916 cfun->machine->initialized_mips16_gp_pseudo_p = true;
8917 }
8918
8919 return cfun->machine->mips16_gp_pseudo_rtx;
8920 }
8921
8922 /* Write out code to move floating point arguments in or out of
8923 general registers. Output the instructions to FILE. FP_CODE is
8924 the code describing which arguments are present (see the comment at
8925 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
8926 we are copying from the floating point registers. */
8927
8928 static void
8929 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
8930 {
8931 const char *s;
8932 int gparg, fparg;
8933 unsigned int f;
8934 CUMULATIVE_ARGS cum;
8935
8936 /* This code only works for the original 32-bit ABI and the O64 ABI. */
8937 gcc_assert (TARGET_OLDABI);
8938
8939 if (from_fp_p)
8940 s = "mfc1";
8941 else
8942 s = "mtc1";
8943
8944 init_cumulative_args (&cum, NULL, NULL);
8945
8946 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8947 {
8948 enum machine_mode mode;
8949 struct mips_arg_info info;
8950
8951 if ((f & 3) == 1)
8952 mode = SFmode;
8953 else if ((f & 3) == 2)
8954 mode = DFmode;
8955 else
8956 gcc_unreachable ();
8957
8958 mips_arg_info (&cum, mode, NULL, true, &info);
8959 gparg = mips_arg_regno (&info, false);
8960 fparg = mips_arg_regno (&info, true);
8961
8962 if (mode == SFmode)
8963 fprintf (file, "\t%s\t%s,%s\n", s,
8964 reg_names[gparg], reg_names[fparg]);
8965 else if (TARGET_64BIT)
8966 fprintf (file, "\td%s\t%s,%s\n", s,
8967 reg_names[gparg], reg_names[fparg]);
8968 else if (ISA_HAS_MXHC1)
8969 /* -mips32r2 -mfp64 */
8970 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
8971 s,
8972 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
8973 reg_names[fparg],
8974 from_fp_p ? "mfhc1" : "mthc1",
8975 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
8976 reg_names[fparg]);
8977 else if (TARGET_BIG_ENDIAN)
8978 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8979 reg_names[gparg], reg_names[fparg + 1], s,
8980 reg_names[gparg + 1], reg_names[fparg]);
8981 else
8982 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8983 reg_names[gparg], reg_names[fparg], s,
8984 reg_names[gparg + 1], reg_names[fparg + 1]);
8985
8986 function_arg_advance (&cum, mode, NULL, true);
8987 }
8988 }
8989
8990 /* Build a mips16 function stub. This is used for functions which
8991 take arguments in the floating point registers. It is 32-bit code
8992 that moves the floating point args into the general registers, and
8993 then jumps to the 16-bit code. */
8994
8995 static void
8996 build_mips16_function_stub (FILE *file)
8997 {
8998 const char *fnname;
8999 char *secname, *stubname;
9000 tree stubid, stubdecl;
9001 int need_comma;
9002 unsigned int f;
9003
9004 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9005 secname = (char *) alloca (strlen (fnname) + 20);
9006 sprintf (secname, ".mips16.fn.%s", fnname);
9007 stubname = (char *) alloca (strlen (fnname) + 20);
9008 sprintf (stubname, "__fn_stub_%s", fnname);
9009 stubid = get_identifier (stubname);
9010 stubdecl = build_decl (FUNCTION_DECL, stubid,
9011 build_function_type (void_type_node, NULL_TREE));
9012 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9013 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9014
9015 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9016 need_comma = 0;
9017 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9018 {
9019 fprintf (file, "%s%s",
9020 need_comma ? ", " : "",
9021 (f & 3) == 1 ? "float" : "double");
9022 need_comma = 1;
9023 }
9024 fprintf (file, ")\n");
9025
9026 fprintf (file, "\t.set\tnomips16\n");
9027 switch_to_section (function_section (stubdecl));
9028 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9029
9030 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9031 within a .ent, and we cannot emit another .ent. */
9032 if (!FUNCTION_NAME_ALREADY_DECLARED)
9033 {
9034 fputs ("\t.ent\t", file);
9035 assemble_name (file, stubname);
9036 fputs ("\n", file);
9037 }
9038
9039 assemble_name (file, stubname);
9040 fputs (":\n", file);
9041
9042 /* We don't want the assembler to insert any nops here. */
9043 fprintf (file, "\t.set\tnoreorder\n");
9044
9045 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9046
9047 fprintf (asm_out_file, "\t.set\tnoat\n");
9048 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9049 assemble_name (file, fnname);
9050 fprintf (file, "\n");
9051 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9052 fprintf (asm_out_file, "\t.set\tat\n");
9053
9054 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9055 with one of the mfc1 instructions, because the result is not
9056 available for one instruction, so if the very first instruction
9057 in the function refers to the register, it will see the wrong
9058 value. */
9059 fprintf (file, "\tnop\n");
9060
9061 fprintf (file, "\t.set\treorder\n");
9062
9063 if (!FUNCTION_NAME_ALREADY_DECLARED)
9064 {
9065 fputs ("\t.end\t", file);
9066 assemble_name (file, stubname);
9067 fputs ("\n", file);
9068 }
9069
9070 fprintf (file, "\t.set\tmips16\n");
9071
9072 switch_to_section (function_section (current_function_decl));
9073 }
9074
9075 /* We keep a list of functions for which we have already built stubs
9076 in build_mips16_call_stub. */
9077
9078 struct mips16_stub
9079 {
9080 struct mips16_stub *next;
9081 char *name;
9082 int fpret;
9083 };
9084
9085 static struct mips16_stub *mips16_stubs;
9086
9087 /* Emit code to return a double value from a mips16 stub. GPREG is the
9088 first GP reg to use, FPREG is the first FP reg to use. */
9089
9090 static void
9091 mips16_fpret_double (int gpreg, int fpreg)
9092 {
9093 if (TARGET_64BIT)
9094 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9095 reg_names[gpreg], reg_names[fpreg]);
9096 else if (TARGET_FLOAT64)
9097 {
9098 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9099 reg_names[gpreg + WORDS_BIG_ENDIAN],
9100 reg_names[fpreg]);
9101 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9102 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9103 reg_names[fpreg]);
9104 }
9105 else
9106 {
9107 if (TARGET_BIG_ENDIAN)
9108 {
9109 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9110 reg_names[gpreg + 0],
9111 reg_names[fpreg + 1]);
9112 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9113 reg_names[gpreg + 1],
9114 reg_names[fpreg + 0]);
9115 }
9116 else
9117 {
9118 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9119 reg_names[gpreg + 0],
9120 reg_names[fpreg + 0]);
9121 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9122 reg_names[gpreg + 1],
9123 reg_names[fpreg + 1]);
9124 }
9125 }
9126 }
9127
9128 /* Build a call stub for a mips16 call. A stub is needed if we are
9129 passing any floating point values which should go into the floating
9130 point registers. If we are, and the call turns out to be to a
9131 32-bit function, the stub will be used to move the values into the
9132 floating point registers before calling the 32-bit function. The
9133 linker will magically adjust the function call to either the 16-bit
9134 function or the 32-bit stub, depending upon where the function call
9135 is actually defined.
9136
9137 Similarly, we need a stub if the return value might come back in a
9138 floating point register.
9139
9140 RETVAL is the location of the return value, or null if this is
9141 a call rather than a call_value. FN is the address of the
9142 function and ARG_SIZE is the size of the arguments. FP_CODE
9143 is the code built by function_arg. This function returns a nonzero
9144 value if it builds the call instruction itself. */
9145
9146 int
9147 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9148 {
9149 int fpret = 0;
9150 const char *fnname;
9151 char *secname, *stubname;
9152 struct mips16_stub *l;
9153 tree stubid, stubdecl;
9154 int need_comma;
9155 unsigned int f;
9156
9157 /* We don't need to do anything if we aren't in mips16 mode, or if
9158 we were invoked with the -msoft-float option. */
9159 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9160 return 0;
9161
9162 /* Figure out whether the value might come back in a floating point
9163 register. */
9164 if (retval)
9165 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9166
9167 /* We don't need to do anything if there were no floating point
9168 arguments and the value will not be returned in a floating point
9169 register. */
9170 if (fp_code == 0 && ! fpret)
9171 return 0;
9172
9173 /* We don't need to do anything if this is a call to a special
9174 mips16 support function. */
9175 if (GET_CODE (fn) == SYMBOL_REF
9176 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9177 return 0;
9178
9179 /* This code will only work for o32 and o64 abis. The other ABI's
9180 require more sophisticated support. */
9181 gcc_assert (TARGET_OLDABI);
9182
9183 /* If we're calling via a function pointer, then we must always call
9184 via a stub. There are magic stubs provided in libgcc.a for each
9185 of the required cases. Each of them expects the function address
9186 to arrive in register $2. */
9187
9188 if (GET_CODE (fn) != SYMBOL_REF)
9189 {
9190 char buf[30];
9191 tree id;
9192 rtx stub_fn, insn;
9193
9194 /* ??? If this code is modified to support other ABI's, we need
9195 to handle PARALLEL return values here. */
9196
9197 if (fpret)
9198 sprintf (buf, "__mips16_call_stub_%s_%d",
9199 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9200 fp_code);
9201 else
9202 sprintf (buf, "__mips16_call_stub_%d",
9203 fp_code);
9204
9205 id = get_identifier (buf);
9206 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9207
9208 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9209
9210 if (retval == NULL_RTX)
9211 insn = gen_call_internal (stub_fn, arg_size);
9212 else
9213 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9214 insn = emit_call_insn (insn);
9215
9216 /* Put the register usage information on the CALL. */
9217 CALL_INSN_FUNCTION_USAGE (insn) =
9218 gen_rtx_EXPR_LIST (VOIDmode,
9219 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9220 CALL_INSN_FUNCTION_USAGE (insn));
9221
9222 /* If we are handling a floating point return value, we need to
9223 save $18 in the function prologue. Putting a note on the
9224 call will mean that df_regs_ever_live_p ($18) will be true if the
9225 call is not eliminated, and we can check that in the prologue
9226 code. */
9227 if (fpret)
9228 CALL_INSN_FUNCTION_USAGE (insn) =
9229 gen_rtx_EXPR_LIST (VOIDmode,
9230 gen_rtx_USE (VOIDmode,
9231 gen_rtx_REG (word_mode, 18)),
9232 CALL_INSN_FUNCTION_USAGE (insn));
9233
9234 /* Return 1 to tell the caller that we've generated the call
9235 insn. */
9236 return 1;
9237 }
9238
9239 /* We know the function we are going to call. If we have already
9240 built a stub, we don't need to do anything further. */
9241
9242 fnname = XSTR (fn, 0);
9243 for (l = mips16_stubs; l != NULL; l = l->next)
9244 if (strcmp (l->name, fnname) == 0)
9245 break;
9246
9247 if (l == NULL)
9248 {
9249 /* Build a special purpose stub. When the linker sees a
9250 function call in mips16 code, it will check where the target
9251 is defined. If the target is a 32-bit call, the linker will
9252 search for the section defined here. It can tell which
9253 symbol this section is associated with by looking at the
9254 relocation information (the name is unreliable, since this
9255 might be a static function). If such a section is found, the
9256 linker will redirect the call to the start of the magic
9257 section.
9258
9259 If the function does not return a floating point value, the
9260 special stub section is named
9261 .mips16.call.FNNAME
9262
9263 If the function does return a floating point value, the stub
9264 section is named
9265 .mips16.call.fp.FNNAME
9266 */
9267
9268 secname = (char *) alloca (strlen (fnname) + 40);
9269 sprintf (secname, ".mips16.call.%s%s",
9270 fpret ? "fp." : "",
9271 fnname);
9272 stubname = (char *) alloca (strlen (fnname) + 20);
9273 sprintf (stubname, "__call_stub_%s%s",
9274 fpret ? "fp_" : "",
9275 fnname);
9276 stubid = get_identifier (stubname);
9277 stubdecl = build_decl (FUNCTION_DECL, stubid,
9278 build_function_type (void_type_node, NULL_TREE));
9279 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9280 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9281
9282 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9283 (fpret
9284 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9285 : ""),
9286 fnname);
9287 need_comma = 0;
9288 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9289 {
9290 fprintf (asm_out_file, "%s%s",
9291 need_comma ? ", " : "",
9292 (f & 3) == 1 ? "float" : "double");
9293 need_comma = 1;
9294 }
9295 fprintf (asm_out_file, ")\n");
9296
9297 fprintf (asm_out_file, "\t.set\tnomips16\n");
9298 assemble_start_function (stubdecl, stubname);
9299
9300 if (!FUNCTION_NAME_ALREADY_DECLARED)
9301 {
9302 fputs ("\t.ent\t", asm_out_file);
9303 assemble_name (asm_out_file, stubname);
9304 fputs ("\n", asm_out_file);
9305
9306 assemble_name (asm_out_file, stubname);
9307 fputs (":\n", asm_out_file);
9308 }
9309
9310 /* We build the stub code by hand. That's the only way we can
9311 do it, since we can't generate 32-bit code during a 16-bit
9312 compilation. */
9313
9314 /* We don't want the assembler to insert any nops here. */
9315 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9316
9317 mips16_fp_args (asm_out_file, fp_code, 0);
9318
9319 if (! fpret)
9320 {
9321 fprintf (asm_out_file, "\t.set\tnoat\n");
9322 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9323 fnname);
9324 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9325 fprintf (asm_out_file, "\t.set\tat\n");
9326 /* Unfortunately, we can't fill the jump delay slot. We
9327 can't fill with one of the mtc1 instructions, because the
9328 result is not available for one instruction, so if the
9329 very first instruction in the function refers to the
9330 register, it will see the wrong value. */
9331 fprintf (asm_out_file, "\tnop\n");
9332 }
9333 else
9334 {
9335 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9336 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9337 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9338 /* As above, we can't fill the delay slot. */
9339 fprintf (asm_out_file, "\tnop\n");
9340 if (GET_MODE (retval) == SFmode)
9341 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9342 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9343 else if (GET_MODE (retval) == SCmode)
9344 {
9345 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9346 reg_names[GP_REG_FIRST + 2],
9347 reg_names[FP_REG_FIRST + 0]);
9348 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9349 reg_names[GP_REG_FIRST + 3],
9350 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9351 }
9352 else if (GET_MODE (retval) == DFmode
9353 || GET_MODE (retval) == V2SFmode)
9354 {
9355 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9356 }
9357 else if (GET_MODE (retval) == DCmode)
9358 {
9359 mips16_fpret_double (GP_REG_FIRST + 2,
9360 FP_REG_FIRST + 0);
9361 mips16_fpret_double (GP_REG_FIRST + 4,
9362 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9363 }
9364 else
9365 {
9366 if (TARGET_BIG_ENDIAN)
9367 {
9368 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9369 reg_names[GP_REG_FIRST + 2],
9370 reg_names[FP_REG_FIRST + 1]);
9371 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9372 reg_names[GP_REG_FIRST + 3],
9373 reg_names[FP_REG_FIRST + 0]);
9374 }
9375 else
9376 {
9377 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9378 reg_names[GP_REG_FIRST + 2],
9379 reg_names[FP_REG_FIRST + 0]);
9380 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9381 reg_names[GP_REG_FIRST + 3],
9382 reg_names[FP_REG_FIRST + 1]);
9383 }
9384 }
9385 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9386 /* As above, we can't fill the delay slot. */
9387 fprintf (asm_out_file, "\tnop\n");
9388 }
9389
9390 fprintf (asm_out_file, "\t.set\treorder\n");
9391
9392 #ifdef ASM_DECLARE_FUNCTION_SIZE
9393 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9394 #endif
9395
9396 if (!FUNCTION_NAME_ALREADY_DECLARED)
9397 {
9398 fputs ("\t.end\t", asm_out_file);
9399 assemble_name (asm_out_file, stubname);
9400 fputs ("\n", asm_out_file);
9401 }
9402
9403 fprintf (asm_out_file, "\t.set\tmips16\n");
9404
9405 /* Record this stub. */
9406 l = (struct mips16_stub *) xmalloc (sizeof *l);
9407 l->name = xstrdup (fnname);
9408 l->fpret = fpret;
9409 l->next = mips16_stubs;
9410 mips16_stubs = l;
9411 }
9412
9413 /* If we expect a floating point return value, but we've built a
9414 stub which does not expect one, then we're in trouble. We can't
9415 use the existing stub, because it won't handle the floating point
9416 value. We can't build a new stub, because the linker won't know
9417 which stub to use for the various calls in this object file.
9418 Fortunately, this case is illegal, since it means that a function
9419 was declared in two different ways in a single compilation. */
9420 if (fpret && ! l->fpret)
9421 error ("cannot handle inconsistent calls to %qs", fnname);
9422
9423 /* If we are calling a stub which handles a floating point return
9424 value, we need to arrange to save $18 in the prologue. We do
9425 this by marking the function call as using the register. The
9426 prologue will later see that it is used, and emit code to save
9427 it. */
9428
9429 if (l->fpret)
9430 {
9431 rtx insn;
9432
9433 if (retval == NULL_RTX)
9434 insn = gen_call_internal (fn, arg_size);
9435 else
9436 insn = gen_call_value_internal (retval, fn, arg_size);
9437 insn = emit_call_insn (insn);
9438
9439 CALL_INSN_FUNCTION_USAGE (insn) =
9440 gen_rtx_EXPR_LIST (VOIDmode,
9441 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9442 CALL_INSN_FUNCTION_USAGE (insn));
9443
9444 /* Return 1 to tell the caller that we've generated the call
9445 insn. */
9446 return 1;
9447 }
9448
9449 /* Return 0 to let the caller generate the call insn. */
9450 return 0;
9451 }
9452
9453 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9454 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9455
9456 struct mips16_constant {
9457 struct mips16_constant *next;
9458 rtx value;
9459 rtx label;
9460 enum machine_mode mode;
9461 };
9462
9463 /* Information about an incomplete mips16 constant pool. FIRST is the
9464 first constant, HIGHEST_ADDRESS is the highest address that the first
9465 byte of the pool can have, and INSN_ADDRESS is the current instruction
9466 address. */
9467
9468 struct mips16_constant_pool {
9469 struct mips16_constant *first;
9470 int highest_address;
9471 int insn_address;
9472 };
9473
9474 /* Add constant VALUE to POOL and return its label. MODE is the
9475 value's mode (used for CONST_INTs, etc.). */
9476
9477 static rtx
9478 add_constant (struct mips16_constant_pool *pool,
9479 rtx value, enum machine_mode mode)
9480 {
9481 struct mips16_constant **p, *c;
9482 bool first_of_size_p;
9483
9484 /* See whether the constant is already in the pool. If so, return the
9485 existing label, otherwise leave P pointing to the place where the
9486 constant should be added.
9487
9488 Keep the pool sorted in increasing order of mode size so that we can
9489 reduce the number of alignments needed. */
9490 first_of_size_p = true;
9491 for (p = &pool->first; *p != 0; p = &(*p)->next)
9492 {
9493 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9494 return (*p)->label;
9495 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9496 break;
9497 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9498 first_of_size_p = false;
9499 }
9500
9501 /* In the worst case, the constant needed by the earliest instruction
9502 will end up at the end of the pool. The entire pool must then be
9503 accessible from that instruction.
9504
9505 When adding the first constant, set the pool's highest address to
9506 the address of the first out-of-range byte. Adjust this address
9507 downwards each time a new constant is added. */
9508 if (pool->first == 0)
9509 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9510 is the address of the instruction with the lowest two bits clear.
9511 The base PC value for ld has the lowest three bits clear. Assume
9512 the worst case here. */
9513 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
9514 pool->highest_address -= GET_MODE_SIZE (mode);
9515 if (first_of_size_p)
9516 /* Take into account the worst possible padding due to alignment. */
9517 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
9518
9519 /* Create a new entry. */
9520 c = (struct mips16_constant *) xmalloc (sizeof *c);
9521 c->value = value;
9522 c->mode = mode;
9523 c->label = gen_label_rtx ();
9524 c->next = *p;
9525 *p = c;
9526
9527 return c->label;
9528 }
9529
9530 /* Output constant VALUE after instruction INSN and return the last
9531 instruction emitted. MODE is the mode of the constant. */
9532
9533 static rtx
9534 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
9535 {
9536 switch (GET_MODE_CLASS (mode))
9537 {
9538 case MODE_INT:
9539 {
9540 rtx size = GEN_INT (GET_MODE_SIZE (mode));
9541 return emit_insn_after (gen_consttable_int (value, size), insn);
9542 }
9543
9544 case MODE_FLOAT:
9545 return emit_insn_after (gen_consttable_float (value), insn);
9546
9547 case MODE_VECTOR_FLOAT:
9548 case MODE_VECTOR_INT:
9549 {
9550 int i;
9551 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
9552 insn = dump_constants_1 (GET_MODE_INNER (mode),
9553 CONST_VECTOR_ELT (value, i), insn);
9554 return insn;
9555 }
9556
9557 default:
9558 gcc_unreachable ();
9559 }
9560 }
9561
9562
9563 /* Dump out the constants in CONSTANTS after INSN. */
9564
9565 static void
9566 dump_constants (struct mips16_constant *constants, rtx insn)
9567 {
9568 struct mips16_constant *c, *next;
9569 int align;
9570
9571 align = 0;
9572 for (c = constants; c != NULL; c = next)
9573 {
9574 /* If necessary, increase the alignment of PC. */
9575 if (align < GET_MODE_SIZE (c->mode))
9576 {
9577 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
9578 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
9579 }
9580 align = GET_MODE_SIZE (c->mode);
9581
9582 insn = emit_label_after (c->label, insn);
9583 insn = dump_constants_1 (c->mode, c->value, insn);
9584
9585 next = c->next;
9586 free (c);
9587 }
9588
9589 emit_barrier_after (insn);
9590 }
9591
9592 /* Return the length of instruction INSN. */
9593
9594 static int
9595 mips16_insn_length (rtx insn)
9596 {
9597 if (JUMP_P (insn))
9598 {
9599 rtx body = PATTERN (insn);
9600 if (GET_CODE (body) == ADDR_VEC)
9601 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
9602 if (GET_CODE (body) == ADDR_DIFF_VEC)
9603 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
9604 }
9605 return get_attr_length (insn);
9606 }
9607
9608 /* Rewrite *X so that constant pool references refer to the constant's
9609 label instead. DATA points to the constant pool structure. */
9610
9611 static int
9612 mips16_rewrite_pool_refs (rtx *x, void *data)
9613 {
9614 struct mips16_constant_pool *pool = data;
9615 rtx base, offset, label;
9616
9617 if (MEM_P (*x))
9618 x = &XEXP (*x, 0);
9619 else if (!TARGET_MIPS16_TEXT_LOADS)
9620 return 0;
9621
9622 split_const (*x, &base, &offset);
9623 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
9624 {
9625 label = add_constant (pool, get_pool_constant (base),
9626 get_pool_mode (base));
9627 base = gen_rtx_LABEL_REF (Pmode, label);
9628 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
9629 return -1;
9630 }
9631 return GET_CODE (*x) == CONST ? -1 : 0;
9632 }
9633
9634 /* Build MIPS16 constant pools. */
9635
9636 static void
9637 mips16_lay_out_constants (void)
9638 {
9639 struct mips16_constant_pool pool;
9640 rtx insn, barrier;
9641
9642 if (!TARGET_MIPS16_PCREL_LOADS)
9643 return;
9644
9645 barrier = 0;
9646 memset (&pool, 0, sizeof (pool));
9647 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9648 {
9649 /* Rewrite constant pool references in INSN. */
9650 if (INSN_P (insn))
9651 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
9652
9653 pool.insn_address += mips16_insn_length (insn);
9654
9655 if (pool.first != NULL)
9656 {
9657 /* If there are no natural barriers between the first user of
9658 the pool and the highest acceptable address, we'll need to
9659 create a new instruction to jump around the constant pool.
9660 In the worst case, this instruction will be 4 bytes long.
9661
9662 If it's too late to do this transformation after INSN,
9663 do it immediately before INSN. */
9664 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
9665 {
9666 rtx label, jump;
9667
9668 label = gen_label_rtx ();
9669
9670 jump = emit_jump_insn_before (gen_jump (label), insn);
9671 JUMP_LABEL (jump) = label;
9672 LABEL_NUSES (label) = 1;
9673 barrier = emit_barrier_after (jump);
9674
9675 emit_label_after (label, barrier);
9676 pool.insn_address += 4;
9677 }
9678
9679 /* See whether the constant pool is now out of range of the first
9680 user. If so, output the constants after the previous barrier.
9681 Note that any instructions between BARRIER and INSN (inclusive)
9682 will use negative offsets to refer to the pool. */
9683 if (pool.insn_address > pool.highest_address)
9684 {
9685 dump_constants (pool.first, barrier);
9686 pool.first = NULL;
9687 barrier = 0;
9688 }
9689 else if (BARRIER_P (insn))
9690 barrier = insn;
9691 }
9692 }
9693 dump_constants (pool.first, get_last_insn ());
9694 }
9695 \f
9696 /* A temporary variable used by for_each_rtx callbacks, etc. */
9697 static rtx mips_sim_insn;
9698
9699 /* A structure representing the state of the processor pipeline.
9700 Used by the mips_sim_* family of functions. */
9701 struct mips_sim {
9702 /* The maximum number of instructions that can be issued in a cycle.
9703 (Caches mips_issue_rate.) */
9704 unsigned int issue_rate;
9705
9706 /* The current simulation time. */
9707 unsigned int time;
9708
9709 /* How many more instructions can be issued in the current cycle. */
9710 unsigned int insns_left;
9711
9712 /* LAST_SET[X].INSN is the last instruction to set register X.
9713 LAST_SET[X].TIME is the time at which that instruction was issued.
9714 INSN is null if no instruction has yet set register X. */
9715 struct {
9716 rtx insn;
9717 unsigned int time;
9718 } last_set[FIRST_PSEUDO_REGISTER];
9719
9720 /* The pipeline's current DFA state. */
9721 state_t dfa_state;
9722 };
9723
9724 /* Reset STATE to the initial simulation state. */
9725
9726 static void
9727 mips_sim_reset (struct mips_sim *state)
9728 {
9729 state->time = 0;
9730 state->insns_left = state->issue_rate;
9731 memset (&state->last_set, 0, sizeof (state->last_set));
9732 state_reset (state->dfa_state);
9733 }
9734
9735 /* Initialize STATE before its first use. DFA_STATE points to an
9736 allocated but uninitialized DFA state. */
9737
9738 static void
9739 mips_sim_init (struct mips_sim *state, state_t dfa_state)
9740 {
9741 state->issue_rate = mips_issue_rate ();
9742 state->dfa_state = dfa_state;
9743 mips_sim_reset (state);
9744 }
9745
9746 /* Advance STATE by one clock cycle. */
9747
9748 static void
9749 mips_sim_next_cycle (struct mips_sim *state)
9750 {
9751 state->time++;
9752 state->insns_left = state->issue_rate;
9753 state_transition (state->dfa_state, 0);
9754 }
9755
9756 /* Advance simulation state STATE until instruction INSN can read
9757 register REG. */
9758
9759 static void
9760 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
9761 {
9762 unsigned int i;
9763
9764 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
9765 if (state->last_set[REGNO (reg) + i].insn != 0)
9766 {
9767 unsigned int t;
9768
9769 t = state->last_set[REGNO (reg) + i].time;
9770 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
9771 while (state->time < t)
9772 mips_sim_next_cycle (state);
9773 }
9774 }
9775
9776 /* A for_each_rtx callback. If *X is a register, advance simulation state
9777 DATA until mips_sim_insn can read the register's value. */
9778
9779 static int
9780 mips_sim_wait_regs_2 (rtx *x, void *data)
9781 {
9782 if (REG_P (*x))
9783 mips_sim_wait_reg (data, mips_sim_insn, *x);
9784 return 0;
9785 }
9786
9787 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9788
9789 static void
9790 mips_sim_wait_regs_1 (rtx *x, void *data)
9791 {
9792 for_each_rtx (x, mips_sim_wait_regs_2, data);
9793 }
9794
9795 /* Advance simulation state STATE until all of INSN's register
9796 dependencies are satisfied. */
9797
9798 static void
9799 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
9800 {
9801 mips_sim_insn = insn;
9802 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
9803 }
9804
9805 /* Advance simulation state STATE until the units required by
9806 instruction INSN are available. */
9807
9808 static void
9809 mips_sim_wait_units (struct mips_sim *state, rtx insn)
9810 {
9811 state_t tmp_state;
9812
9813 tmp_state = alloca (state_size ());
9814 while (state->insns_left == 0
9815 || (memcpy (tmp_state, state->dfa_state, state_size ()),
9816 state_transition (tmp_state, insn) >= 0))
9817 mips_sim_next_cycle (state);
9818 }
9819
9820 /* Advance simulation state STATE until INSN is ready to issue. */
9821
9822 static void
9823 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
9824 {
9825 mips_sim_wait_regs (state, insn);
9826 mips_sim_wait_units (state, insn);
9827 }
9828
9829 /* mips_sim_insn has just set X. Update the LAST_SET array
9830 in simulation state DATA. */
9831
9832 static void
9833 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9834 {
9835 struct mips_sim *state;
9836 unsigned int i;
9837
9838 state = data;
9839 if (REG_P (x))
9840 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
9841 {
9842 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
9843 state->last_set[REGNO (x) + i].time = state->time;
9844 }
9845 }
9846
9847 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
9848 can issue immediately (i.e., that mips_sim_wait_insn has already
9849 been called). */
9850
9851 static void
9852 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
9853 {
9854 state_transition (state->dfa_state, insn);
9855 state->insns_left--;
9856
9857 mips_sim_insn = insn;
9858 note_stores (PATTERN (insn), mips_sim_record_set, state);
9859 }
9860
9861 /* Simulate issuing a NOP in state STATE. */
9862
9863 static void
9864 mips_sim_issue_nop (struct mips_sim *state)
9865 {
9866 if (state->insns_left == 0)
9867 mips_sim_next_cycle (state);
9868 state->insns_left--;
9869 }
9870
9871 /* Update simulation state STATE so that it's ready to accept the instruction
9872 after INSN. INSN should be part of the main rtl chain, not a member of a
9873 SEQUENCE. */
9874
9875 static void
9876 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
9877 {
9878 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
9879 if (JUMP_P (insn))
9880 mips_sim_issue_nop (state);
9881
9882 switch (GET_CODE (SEQ_BEGIN (insn)))
9883 {
9884 case CODE_LABEL:
9885 case CALL_INSN:
9886 /* We can't predict the processor state after a call or label. */
9887 mips_sim_reset (state);
9888 break;
9889
9890 case JUMP_INSN:
9891 /* The delay slots of branch likely instructions are only executed
9892 when the branch is taken. Therefore, if the caller has simulated
9893 the delay slot instruction, STATE does not really reflect the state
9894 of the pipeline for the instruction after the delay slot. Also,
9895 branch likely instructions tend to incur a penalty when not taken,
9896 so there will probably be an extra delay between the branch and
9897 the instruction after the delay slot. */
9898 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
9899 mips_sim_reset (state);
9900 break;
9901
9902 default:
9903 break;
9904 }
9905 }
9906 \f
9907 /* The VR4130 pipeline issues aligned pairs of instructions together,
9908 but it stalls the second instruction if it depends on the first.
9909 In order to cut down the amount of logic required, this dependence
9910 check is not based on a full instruction decode. Instead, any non-SPECIAL
9911 instruction is assumed to modify the register specified by bits 20-16
9912 (which is usually the "rt" field).
9913
9914 In beq, beql, bne and bnel instructions, the rt field is actually an
9915 input, so we can end up with a false dependence between the branch
9916 and its delay slot. If this situation occurs in instruction INSN,
9917 try to avoid it by swapping rs and rt. */
9918
9919 static void
9920 vr4130_avoid_branch_rt_conflict (rtx insn)
9921 {
9922 rtx first, second;
9923
9924 first = SEQ_BEGIN (insn);
9925 second = SEQ_END (insn);
9926 if (JUMP_P (first)
9927 && NONJUMP_INSN_P (second)
9928 && GET_CODE (PATTERN (first)) == SET
9929 && GET_CODE (SET_DEST (PATTERN (first))) == PC
9930 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
9931 {
9932 /* Check for the right kind of condition. */
9933 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
9934 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
9935 && REG_P (XEXP (cond, 0))
9936 && REG_P (XEXP (cond, 1))
9937 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
9938 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
9939 {
9940 /* SECOND mentions the rt register but not the rs register. */
9941 rtx tmp = XEXP (cond, 0);
9942 XEXP (cond, 0) = XEXP (cond, 1);
9943 XEXP (cond, 1) = tmp;
9944 }
9945 }
9946 }
9947
9948 /* Implement -mvr4130-align. Go through each basic block and simulate the
9949 processor pipeline. If we find that a pair of instructions could execute
9950 in parallel, and the first of those instruction is not 8-byte aligned,
9951 insert a nop to make it aligned. */
9952
9953 static void
9954 vr4130_align_insns (void)
9955 {
9956 struct mips_sim state;
9957 rtx insn, subinsn, last, last2, next;
9958 bool aligned_p;
9959
9960 dfa_start ();
9961
9962 /* LAST is the last instruction before INSN to have a nonzero length.
9963 LAST2 is the last such instruction before LAST. */
9964 last = 0;
9965 last2 = 0;
9966
9967 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
9968 aligned_p = true;
9969
9970 mips_sim_init (&state, alloca (state_size ()));
9971 for (insn = get_insns (); insn != 0; insn = next)
9972 {
9973 unsigned int length;
9974
9975 next = NEXT_INSN (insn);
9976
9977 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
9978 This isn't really related to the alignment pass, but we do it on
9979 the fly to avoid a separate instruction walk. */
9980 vr4130_avoid_branch_rt_conflict (insn);
9981
9982 if (USEFUL_INSN_P (insn))
9983 FOR_EACH_SUBINSN (subinsn, insn)
9984 {
9985 mips_sim_wait_insn (&state, subinsn);
9986
9987 /* If we want this instruction to issue in parallel with the
9988 previous one, make sure that the previous instruction is
9989 aligned. There are several reasons why this isn't worthwhile
9990 when the second instruction is a call:
9991
9992 - Calls are less likely to be performance critical,
9993 - There's a good chance that the delay slot can execute
9994 in parallel with the call.
9995 - The return address would then be unaligned.
9996
9997 In general, if we're going to insert a nop between instructions
9998 X and Y, it's better to insert it immediately after X. That
9999 way, if the nop makes Y aligned, it will also align any labels
10000 between X and Y. */
10001 if (state.insns_left != state.issue_rate
10002 && !CALL_P (subinsn))
10003 {
10004 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10005 {
10006 /* SUBINSN is the first instruction in INSN and INSN is
10007 aligned. We want to align the previous instruction
10008 instead, so insert a nop between LAST2 and LAST.
10009
10010 Note that LAST could be either a single instruction
10011 or a branch with a delay slot. In the latter case,
10012 LAST, like INSN, is already aligned, but the delay
10013 slot must have some extra delay that stops it from
10014 issuing at the same time as the branch. We therefore
10015 insert a nop before the branch in order to align its
10016 delay slot. */
10017 emit_insn_after (gen_nop (), last2);
10018 aligned_p = false;
10019 }
10020 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10021 {
10022 /* SUBINSN is the delay slot of INSN, but INSN is
10023 currently unaligned. Insert a nop between
10024 LAST and INSN to align it. */
10025 emit_insn_after (gen_nop (), last);
10026 aligned_p = true;
10027 }
10028 }
10029 mips_sim_issue_insn (&state, subinsn);
10030 }
10031 mips_sim_finish_insn (&state, insn);
10032
10033 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10034 length = get_attr_length (insn);
10035 if (length > 0)
10036 {
10037 /* If the instruction is an asm statement or multi-instruction
10038 mips.md patern, the length is only an estimate. Insert an
10039 8 byte alignment after it so that the following instructions
10040 can be handled correctly. */
10041 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10042 && (recog_memoized (insn) < 0 || length >= 8))
10043 {
10044 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10045 next = NEXT_INSN (next);
10046 mips_sim_next_cycle (&state);
10047 aligned_p = true;
10048 }
10049 else if (length & 4)
10050 aligned_p = !aligned_p;
10051 last2 = last;
10052 last = insn;
10053 }
10054
10055 /* See whether INSN is an aligned label. */
10056 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10057 aligned_p = true;
10058 }
10059 dfa_finish ();
10060 }
10061 \f
10062 /* Subroutine of mips_reorg. If there is a hazard between INSN
10063 and a previous instruction, avoid it by inserting nops after
10064 instruction AFTER.
10065
10066 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10067 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10068 before using the value of that register. *HILO_DELAY counts the
10069 number of instructions since the last hilo hazard (that is,
10070 the number of instructions since the last mflo or mfhi).
10071
10072 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10073 for the next instruction.
10074
10075 LO_REG is an rtx for the LO register, used in dependence checking. */
10076
10077 static void
10078 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10079 rtx *delayed_reg, rtx lo_reg)
10080 {
10081 rtx pattern, set;
10082 int nops, ninsns;
10083
10084 if (!INSN_P (insn))
10085 return;
10086
10087 pattern = PATTERN (insn);
10088
10089 /* Do not put the whole function in .set noreorder if it contains
10090 an asm statement. We don't know whether there will be hazards
10091 between the asm statement and the gcc-generated code. */
10092 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10093 cfun->machine->all_noreorder_p = false;
10094
10095 /* Ignore zero-length instructions (barriers and the like). */
10096 ninsns = get_attr_length (insn) / 4;
10097 if (ninsns == 0)
10098 return;
10099
10100 /* Work out how many nops are needed. Note that we only care about
10101 registers that are explicitly mentioned in the instruction's pattern.
10102 It doesn't matter that calls use the argument registers or that they
10103 clobber hi and lo. */
10104 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10105 nops = 2 - *hilo_delay;
10106 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10107 nops = 1;
10108 else
10109 nops = 0;
10110
10111 /* Insert the nops between this instruction and the previous one.
10112 Each new nop takes us further from the last hilo hazard. */
10113 *hilo_delay += nops;
10114 while (nops-- > 0)
10115 emit_insn_after (gen_hazard_nop (), after);
10116
10117 /* Set up the state for the next instruction. */
10118 *hilo_delay += ninsns;
10119 *delayed_reg = 0;
10120 if (INSN_CODE (insn) >= 0)
10121 switch (get_attr_hazard (insn))
10122 {
10123 case HAZARD_NONE:
10124 break;
10125
10126 case HAZARD_HILO:
10127 *hilo_delay = 0;
10128 break;
10129
10130 case HAZARD_DELAY:
10131 set = single_set (insn);
10132 gcc_assert (set != 0);
10133 *delayed_reg = SET_DEST (set);
10134 break;
10135 }
10136 }
10137
10138
10139 /* Go through the instruction stream and insert nops where necessary.
10140 See if the whole function can then be put into .set noreorder &
10141 .set nomacro. */
10142
10143 static void
10144 mips_avoid_hazards (void)
10145 {
10146 rtx insn, last_insn, lo_reg, delayed_reg;
10147 int hilo_delay, i;
10148
10149 /* Force all instructions to be split into their final form. */
10150 split_all_insns_noflow ();
10151
10152 /* Recalculate instruction lengths without taking nops into account. */
10153 cfun->machine->ignore_hazard_length_p = true;
10154 shorten_branches (get_insns ());
10155
10156 cfun->machine->all_noreorder_p = true;
10157
10158 /* Profiled functions can't be all noreorder because the profiler
10159 support uses assembler macros. */
10160 if (current_function_profile)
10161 cfun->machine->all_noreorder_p = false;
10162
10163 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10164 we rely on the assembler to work around some errata. */
10165 if (TARGET_FIX_VR4120)
10166 cfun->machine->all_noreorder_p = false;
10167
10168 /* The same is true for -mfix-vr4130 if we might generate mflo or
10169 mfhi instructions. Note that we avoid using mflo and mfhi if
10170 the VR4130 macc and dmacc instructions are available instead;
10171 see the *mfhilo_{si,di}_macc patterns. */
10172 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10173 cfun->machine->all_noreorder_p = false;
10174
10175 last_insn = 0;
10176 hilo_delay = 2;
10177 delayed_reg = 0;
10178 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10179
10180 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10181 if (INSN_P (insn))
10182 {
10183 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10184 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10185 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10186 &hilo_delay, &delayed_reg, lo_reg);
10187 else
10188 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10189 &delayed_reg, lo_reg);
10190
10191 last_insn = insn;
10192 }
10193 }
10194
10195
10196 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10197
10198 static void
10199 mips_reorg (void)
10200 {
10201 mips16_lay_out_constants ();
10202 if (TARGET_EXPLICIT_RELOCS)
10203 {
10204 if (mips_flag_delayed_branch)
10205 dbr_schedule (get_insns ());
10206 mips_avoid_hazards ();
10207 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10208 vr4130_align_insns ();
10209 }
10210 }
10211
10212 /* This function does three things:
10213
10214 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10215 - Register the mips16 hardware floating point stubs.
10216 - Register the gofast functions if selected using --enable-gofast. */
10217
10218 #include "config/gofast.h"
10219
10220 static void
10221 mips_init_libfuncs (void)
10222 {
10223 if (TARGET_FIX_VR4120)
10224 {
10225 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10226 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10227 }
10228
10229 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10230 {
10231 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10232 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10233 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10234 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10235
10236 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10237 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10238 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10239 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10240 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10241 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10242 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10243
10244 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10245 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10246 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10247
10248 if (TARGET_DOUBLE_FLOAT)
10249 {
10250 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10251 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10252 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10253 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10254
10255 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10256 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10257 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10258 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10259 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10260 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10261 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10262
10263 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10264 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10265
10266 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10267 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10268 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10269 }
10270 }
10271 else
10272 gofast_maybe_init_libfuncs ();
10273 }
10274
10275 /* Return a number assessing the cost of moving a register in class
10276 FROM to class TO. The classes are expressed using the enumeration
10277 values such as `GENERAL_REGS'. A value of 2 is the default; other
10278 values are interpreted relative to that.
10279
10280 It is not required that the cost always equal 2 when FROM is the
10281 same as TO; on some machines it is expensive to move between
10282 registers if they are not general registers.
10283
10284 If reload sees an insn consisting of a single `set' between two
10285 hard registers, and if `REGISTER_MOVE_COST' applied to their
10286 classes returns a value of 2, reload does not check to ensure that
10287 the constraints of the insn are met. Setting a cost of other than
10288 2 will allow reload to verify that the constraints are met. You
10289 should do this if the `movM' pattern's constraints do not allow
10290 such copying.
10291
10292 ??? We make the cost of moving from HI/LO into general
10293 registers the same as for one of moving general registers to
10294 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10295 pseudo to HI/LO. This might hurt optimizations though, it
10296 isn't clear if it is wise. And it might not work in all cases. We
10297 could solve the DImode LO reg problem by using a multiply, just
10298 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10299 problem by using divide instructions. divu puts the remainder in
10300 the HI reg, so doing a divide by -1 will move the value in the HI
10301 reg for all values except -1. We could handle that case by using a
10302 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10303 a compare/branch to test the input value to see which instruction
10304 we need to use. This gets pretty messy, but it is feasible. */
10305
10306 int
10307 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10308 enum reg_class to, enum reg_class from)
10309 {
10310 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10311 return 2;
10312 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10313 return 2;
10314 else if (reg_class_subset_p (from, GENERAL_REGS))
10315 {
10316 if (to == M16_REGS)
10317 return 2;
10318 else if (to == M16_NA_REGS)
10319 return 2;
10320 else if (reg_class_subset_p (to, GENERAL_REGS))
10321 {
10322 if (TARGET_MIPS16)
10323 return 4;
10324 else
10325 return 2;
10326 }
10327 else if (to == FP_REGS)
10328 return 4;
10329 else if (reg_class_subset_p (to, ACC_REGS))
10330 {
10331 if (TARGET_MIPS16)
10332 return 12;
10333 else
10334 return 6;
10335 }
10336 else if (reg_class_subset_p (to, ALL_COP_REGS))
10337 {
10338 return 5;
10339 }
10340 }
10341 else if (from == FP_REGS)
10342 {
10343 if (reg_class_subset_p (to, GENERAL_REGS))
10344 return 4;
10345 else if (to == FP_REGS)
10346 return 2;
10347 else if (to == ST_REGS)
10348 return 8;
10349 }
10350 else if (reg_class_subset_p (from, ACC_REGS))
10351 {
10352 if (reg_class_subset_p (to, GENERAL_REGS))
10353 {
10354 if (TARGET_MIPS16)
10355 return 12;
10356 else
10357 return 6;
10358 }
10359 }
10360 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10361 return 4;
10362 else if (reg_class_subset_p (from, ALL_COP_REGS))
10363 {
10364 return 5;
10365 }
10366
10367 /* Fall through.
10368 ??? What cases are these? Shouldn't we return 2 here? */
10369
10370 return 12;
10371 }
10372
10373 /* Return the length of INSN. LENGTH is the initial length computed by
10374 attributes in the machine-description file. */
10375
10376 int
10377 mips_adjust_insn_length (rtx insn, int length)
10378 {
10379 /* A unconditional jump has an unfilled delay slot if it is not part
10380 of a sequence. A conditional jump normally has a delay slot, but
10381 does not on MIPS16. */
10382 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10383 length += 4;
10384
10385 /* See how many nops might be needed to avoid hardware hazards. */
10386 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10387 switch (get_attr_hazard (insn))
10388 {
10389 case HAZARD_NONE:
10390 break;
10391
10392 case HAZARD_DELAY:
10393 length += 4;
10394 break;
10395
10396 case HAZARD_HILO:
10397 length += 8;
10398 break;
10399 }
10400
10401 /* All MIPS16 instructions are a measly two bytes. */
10402 if (TARGET_MIPS16)
10403 length /= 2;
10404
10405 return length;
10406 }
10407
10408
10409 /* Return an asm sequence to start a noat block and load the address
10410 of a label into $1. */
10411
10412 const char *
10413 mips_output_load_label (void)
10414 {
10415 if (TARGET_EXPLICIT_RELOCS)
10416 switch (mips_abi)
10417 {
10418 case ABI_N32:
10419 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10420
10421 case ABI_64:
10422 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10423
10424 default:
10425 if (ISA_HAS_LOAD_DELAY)
10426 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10427 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10428 }
10429 else
10430 {
10431 if (Pmode == DImode)
10432 return "%[dla\t%@,%0";
10433 else
10434 return "%[la\t%@,%0";
10435 }
10436 }
10437
10438 /* Return the assembly code for INSN, which has the operands given by
10439 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10440 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10441 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10442 version of BRANCH_IF_TRUE. */
10443
10444 const char *
10445 mips_output_conditional_branch (rtx insn, rtx *operands,
10446 const char *branch_if_true,
10447 const char *branch_if_false)
10448 {
10449 unsigned int length;
10450 rtx taken, not_taken;
10451
10452 length = get_attr_length (insn);
10453 if (length <= 8)
10454 {
10455 /* Just a simple conditional branch. */
10456 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10457 return branch_if_true;
10458 }
10459
10460 /* Generate a reversed branch around a direct jump. This fallback does
10461 not use branch-likely instructions. */
10462 mips_branch_likely = false;
10463 not_taken = gen_label_rtx ();
10464 taken = operands[1];
10465
10466 /* Generate the reversed branch to NOT_TAKEN. */
10467 operands[1] = not_taken;
10468 output_asm_insn (branch_if_false, operands);
10469
10470 /* If INSN has a delay slot, we must provide delay slots for both the
10471 branch to NOT_TAKEN and the conditional jump. We must also ensure
10472 that INSN's delay slot is executed in the appropriate cases. */
10473 if (final_sequence)
10474 {
10475 /* This first delay slot will always be executed, so use INSN's
10476 delay slot if is not annulled. */
10477 if (!INSN_ANNULLED_BRANCH_P (insn))
10478 {
10479 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10480 asm_out_file, optimize, 1, NULL);
10481 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10482 }
10483 else
10484 output_asm_insn ("nop", 0);
10485 fprintf (asm_out_file, "\n");
10486 }
10487
10488 /* Output the unconditional branch to TAKEN. */
10489 if (length <= 16)
10490 output_asm_insn ("j\t%0%/", &taken);
10491 else
10492 {
10493 output_asm_insn (mips_output_load_label (), &taken);
10494 output_asm_insn ("jr\t%@%]%/", 0);
10495 }
10496
10497 /* Now deal with its delay slot; see above. */
10498 if (final_sequence)
10499 {
10500 /* This delay slot will only be executed if the branch is taken.
10501 Use INSN's delay slot if is annulled. */
10502 if (INSN_ANNULLED_BRANCH_P (insn))
10503 {
10504 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10505 asm_out_file, optimize, 1, NULL);
10506 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10507 }
10508 else
10509 output_asm_insn ("nop", 0);
10510 fprintf (asm_out_file, "\n");
10511 }
10512
10513 /* Output NOT_TAKEN. */
10514 (*targetm.asm_out.internal_label) (asm_out_file, "L",
10515 CODE_LABEL_NUMBER (not_taken));
10516 return "";
10517 }
10518
10519 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10520 if some ordered condition is true. The condition is given by
10521 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10522 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10523 its second is always zero. */
10524
10525 const char *
10526 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10527 {
10528 const char *branch[2];
10529
10530 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10531 Make BRANCH[0] branch on the inverse condition. */
10532 switch (GET_CODE (operands[0]))
10533 {
10534 /* These cases are equivalent to comparisons against zero. */
10535 case LEU:
10536 inverted_p = !inverted_p;
10537 /* Fall through. */
10538 case GTU:
10539 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10540 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10541 break;
10542
10543 /* These cases are always true or always false. */
10544 case LTU:
10545 inverted_p = !inverted_p;
10546 /* Fall through. */
10547 case GEU:
10548 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10549 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10550 break;
10551
10552 default:
10553 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10554 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10555 break;
10556 }
10557 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10558 }
10559 \f
10560 /* Used to output div or ddiv instruction DIVISION, which has the operands
10561 given by OPERANDS. Add in a divide-by-zero check if needed.
10562
10563 When working around R4000 and R4400 errata, we need to make sure that
10564 the division is not immediately followed by a shift[1][2]. We also
10565 need to stop the division from being put into a branch delay slot[3].
10566 The easiest way to avoid both problems is to add a nop after the
10567 division. When a divide-by-zero check is needed, this nop can be
10568 used to fill the branch delay slot.
10569
10570 [1] If a double-word or a variable shift executes immediately
10571 after starting an integer division, the shift may give an
10572 incorrect result. See quotations of errata #16 and #28 from
10573 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10574 in mips.md for details.
10575
10576 [2] A similar bug to [1] exists for all revisions of the
10577 R4000 and the R4400 when run in an MC configuration.
10578 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10579
10580 "19. In this following sequence:
10581
10582 ddiv (or ddivu or div or divu)
10583 dsll32 (or dsrl32, dsra32)
10584
10585 if an MPT stall occurs, while the divide is slipping the cpu
10586 pipeline, then the following double shift would end up with an
10587 incorrect result.
10588
10589 Workaround: The compiler needs to avoid generating any
10590 sequence with divide followed by extended double shift."
10591
10592 This erratum is also present in "MIPS R4400MC Errata, Processor
10593 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10594 & 3.0" as errata #10 and #4, respectively.
10595
10596 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10597 (also valid for MIPS R4000MC processors):
10598
10599 "52. R4000SC: This bug does not apply for the R4000PC.
10600
10601 There are two flavors of this bug:
10602
10603 1) If the instruction just after divide takes an RF exception
10604 (tlb-refill, tlb-invalid) and gets an instruction cache
10605 miss (both primary and secondary) and the line which is
10606 currently in secondary cache at this index had the first
10607 data word, where the bits 5..2 are set, then R4000 would
10608 get a wrong result for the div.
10609
10610 ##1
10611 nop
10612 div r8, r9
10613 ------------------- # end-of page. -tlb-refill
10614 nop
10615 ##2
10616 nop
10617 div r8, r9
10618 ------------------- # end-of page. -tlb-invalid
10619 nop
10620
10621 2) If the divide is in the taken branch delay slot, where the
10622 target takes RF exception and gets an I-cache miss for the
10623 exception vector or where I-cache miss occurs for the
10624 target address, under the above mentioned scenarios, the
10625 div would get wrong results.
10626
10627 ##1
10628 j r2 # to next page mapped or unmapped
10629 div r8,r9 # this bug would be there as long
10630 # as there is an ICache miss and
10631 nop # the "data pattern" is present
10632
10633 ##2
10634 beq r0, r0, NextPage # to Next page
10635 div r8,r9
10636 nop
10637
10638 This bug is present for div, divu, ddiv, and ddivu
10639 instructions.
10640
10641 Workaround: For item 1), OS could make sure that the next page
10642 after the divide instruction is also mapped. For item 2), the
10643 compiler could make sure that the divide instruction is not in
10644 the branch delay slot."
10645
10646 These processors have PRId values of 0x00004220 and 0x00004300 for
10647 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10648
10649 const char *
10650 mips_output_division (const char *division, rtx *operands)
10651 {
10652 const char *s;
10653
10654 s = division;
10655 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10656 {
10657 output_asm_insn (s, operands);
10658 s = "nop";
10659 }
10660 if (TARGET_CHECK_ZERO_DIV)
10661 {
10662 if (TARGET_MIPS16)
10663 {
10664 output_asm_insn (s, operands);
10665 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10666 }
10667 else if (GENERATE_DIVIDE_TRAPS)
10668 {
10669 output_asm_insn (s, operands);
10670 s = "teq\t%2,%.,7";
10671 }
10672 else
10673 {
10674 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10675 output_asm_insn (s, operands);
10676 s = "break\t7%)\n1:";
10677 }
10678 }
10679 return s;
10680 }
10681 \f
10682 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10683 with a final "000" replaced by "k". Ignore case.
10684
10685 Note: this function is shared between GCC and GAS. */
10686
10687 static bool
10688 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
10689 {
10690 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
10691 given++, canonical++;
10692
10693 return ((*given == 0 && *canonical == 0)
10694 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
10695 }
10696
10697
10698 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10699 CPU name. We've traditionally allowed a lot of variation here.
10700
10701 Note: this function is shared between GCC and GAS. */
10702
10703 static bool
10704 mips_matching_cpu_name_p (const char *canonical, const char *given)
10705 {
10706 /* First see if the name matches exactly, or with a final "000"
10707 turned into "k". */
10708 if (mips_strict_matching_cpu_name_p (canonical, given))
10709 return true;
10710
10711 /* If not, try comparing based on numerical designation alone.
10712 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10713 if (TOLOWER (*given) == 'r')
10714 given++;
10715 if (!ISDIGIT (*given))
10716 return false;
10717
10718 /* Skip over some well-known prefixes in the canonical name,
10719 hoping to find a number there too. */
10720 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
10721 canonical += 2;
10722 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
10723 canonical += 2;
10724 else if (TOLOWER (canonical[0]) == 'r')
10725 canonical += 1;
10726
10727 return mips_strict_matching_cpu_name_p (canonical, given);
10728 }
10729
10730
10731 /* Return the mips_cpu_info entry for the processor or ISA given
10732 by CPU_STRING. Return null if the string isn't recognized.
10733
10734 A similar function exists in GAS. */
10735
10736 static const struct mips_cpu_info *
10737 mips_parse_cpu (const char *cpu_string)
10738 {
10739 const struct mips_cpu_info *p;
10740 const char *s;
10741
10742 /* In the past, we allowed upper-case CPU names, but it doesn't
10743 work well with the multilib machinery. */
10744 for (s = cpu_string; *s != 0; s++)
10745 if (ISUPPER (*s))
10746 {
10747 warning (0, "the cpu name must be lower case");
10748 break;
10749 }
10750
10751 /* 'from-abi' selects the most compatible architecture for the given
10752 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10753 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10754 version. Look first at the -mgp options, if given, otherwise base
10755 the choice on MASK_64BIT in TARGET_DEFAULT. */
10756 if (strcasecmp (cpu_string, "from-abi") == 0)
10757 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
10758 : ABI_NEEDS_64BIT_REGS ? 3
10759 : (TARGET_64BIT ? 3 : 1));
10760
10761 /* 'default' has traditionally been a no-op. Probably not very useful. */
10762 if (strcasecmp (cpu_string, "default") == 0)
10763 return 0;
10764
10765 for (p = mips_cpu_info_table; p->name != 0; p++)
10766 if (mips_matching_cpu_name_p (p->name, cpu_string))
10767 return p;
10768
10769 return 0;
10770 }
10771
10772
10773 /* Return the processor associated with the given ISA level, or null
10774 if the ISA isn't valid. */
10775
10776 static const struct mips_cpu_info *
10777 mips_cpu_info_from_isa (int isa)
10778 {
10779 const struct mips_cpu_info *p;
10780
10781 for (p = mips_cpu_info_table; p->name != 0; p++)
10782 if (p->isa == isa)
10783 return p;
10784
10785 return 0;
10786 }
10787 \f
10788 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10789 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10790 they only hold condition code modes, and CCmode is always considered to
10791 be 4 bytes wide. All other registers are word sized. */
10792
10793 unsigned int
10794 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10795 {
10796 if (ST_REG_P (regno))
10797 return ((GET_MODE_SIZE (mode) + 3) / 4);
10798 else if (! FP_REG_P (regno))
10799 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
10800 else
10801 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
10802 }
10803
10804 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10805 all BLKmode objects are returned in memory. Under the new (N32 and
10806 64-bit MIPS ABIs) small structures are returned in a register.
10807 Objects with varying size must still be returned in memory, of
10808 course. */
10809
10810 static bool
10811 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
10812 {
10813 if (TARGET_OLDABI)
10814 return (TYPE_MODE (type) == BLKmode);
10815 else
10816 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
10817 || (int_size_in_bytes (type) == -1));
10818 }
10819
10820 static bool
10821 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
10822 {
10823 return !TARGET_OLDABI;
10824 }
10825 \f
10826 /* Return true if INSN is a multiply-add or multiply-subtract
10827 instruction and PREV assigns to the accumulator operand. */
10828
10829 bool
10830 mips_linked_madd_p (rtx prev, rtx insn)
10831 {
10832 rtx x;
10833
10834 x = single_set (insn);
10835 if (x == 0)
10836 return false;
10837
10838 x = SET_SRC (x);
10839
10840 if (GET_CODE (x) == PLUS
10841 && GET_CODE (XEXP (x, 0)) == MULT
10842 && reg_set_p (XEXP (x, 1), prev))
10843 return true;
10844
10845 if (GET_CODE (x) == MINUS
10846 && GET_CODE (XEXP (x, 1)) == MULT
10847 && reg_set_p (XEXP (x, 0), prev))
10848 return true;
10849
10850 return false;
10851 }
10852 \f
10853 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10854 that may clobber hi or lo. */
10855
10856 static rtx mips_macc_chains_last_hilo;
10857
10858 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10859 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10860
10861 static void
10862 mips_macc_chains_record (rtx insn)
10863 {
10864 if (get_attr_may_clobber_hilo (insn))
10865 mips_macc_chains_last_hilo = insn;
10866 }
10867
10868 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10869 has NREADY elements, looking for a multiply-add or multiply-subtract
10870 instruction that is cumulative with mips_macc_chains_last_hilo.
10871 If there is one, promote it ahead of anything else that might
10872 clobber hi or lo. */
10873
10874 static void
10875 mips_macc_chains_reorder (rtx *ready, int nready)
10876 {
10877 int i, j;
10878
10879 if (mips_macc_chains_last_hilo != 0)
10880 for (i = nready - 1; i >= 0; i--)
10881 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10882 {
10883 for (j = nready - 1; j > i; j--)
10884 if (recog_memoized (ready[j]) >= 0
10885 && get_attr_may_clobber_hilo (ready[j]))
10886 {
10887 mips_promote_ready (ready, i, j);
10888 break;
10889 }
10890 break;
10891 }
10892 }
10893 \f
10894 /* The last instruction to be scheduled. */
10895
10896 static rtx vr4130_last_insn;
10897
10898 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10899 points to an rtx that is initially an instruction. Nullify the rtx
10900 if the instruction uses the value of register X. */
10901
10902 static void
10903 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10904 {
10905 rtx *insn_ptr = data;
10906 if (REG_P (x)
10907 && *insn_ptr != 0
10908 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10909 *insn_ptr = 0;
10910 }
10911
10912 /* Return true if there is true register dependence between vr4130_last_insn
10913 and INSN. */
10914
10915 static bool
10916 vr4130_true_reg_dependence_p (rtx insn)
10917 {
10918 note_stores (PATTERN (vr4130_last_insn),
10919 vr4130_true_reg_dependence_p_1, &insn);
10920 return insn == 0;
10921 }
10922
10923 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10924 the ready queue and that INSN2 is the instruction after it, return
10925 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10926 in which INSN1 and INSN2 can probably issue in parallel, but for
10927 which (INSN2, INSN1) should be less sensitive to instruction
10928 alignment than (INSN1, INSN2). See 4130.md for more details. */
10929
10930 static bool
10931 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10932 {
10933 sd_iterator_def sd_it;
10934 dep_t dep;
10935
10936 /* Check for the following case:
10937
10938 1) there is some other instruction X with an anti dependence on INSN1;
10939 2) X has a higher priority than INSN2; and
10940 3) X is an arithmetic instruction (and thus has no unit restrictions).
10941
10942 If INSN1 is the last instruction blocking X, it would better to
10943 choose (INSN1, X) over (INSN2, INSN1). */
10944 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
10945 if (DEP_TYPE (dep) == REG_DEP_ANTI
10946 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
10947 && recog_memoized (DEP_CON (dep)) >= 0
10948 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
10949 return false;
10950
10951 if (vr4130_last_insn != 0
10952 && recog_memoized (insn1) >= 0
10953 && recog_memoized (insn2) >= 0)
10954 {
10955 /* See whether INSN1 and INSN2 use different execution units,
10956 or if they are both ALU-type instructions. If so, they can
10957 probably execute in parallel. */
10958 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10959 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10960 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10961 {
10962 /* If only one of the instructions has a dependence on
10963 vr4130_last_insn, prefer to schedule the other one first. */
10964 bool dep1 = vr4130_true_reg_dependence_p (insn1);
10965 bool dep2 = vr4130_true_reg_dependence_p (insn2);
10966 if (dep1 != dep2)
10967 return dep1;
10968
10969 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10970 is not an ALU-type instruction and if INSN1 uses the same
10971 execution unit. (Note that if this condition holds, we already
10972 know that INSN2 uses a different execution unit.) */
10973 if (class1 != VR4130_CLASS_ALU
10974 && recog_memoized (vr4130_last_insn) >= 0
10975 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10976 return true;
10977 }
10978 }
10979 return false;
10980 }
10981
10982 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10983 queue with at least two instructions. Swap the first two if
10984 vr4130_swap_insns_p says that it could be worthwhile. */
10985
10986 static void
10987 vr4130_reorder (rtx *ready, int nready)
10988 {
10989 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10990 mips_promote_ready (ready, nready - 2, nready - 1);
10991 }
10992 \f
10993 /* Remove the instruction at index LOWER from ready queue READY and
10994 reinsert it in front of the instruction at index HIGHER. LOWER must
10995 be <= HIGHER. */
10996
10997 static void
10998 mips_promote_ready (rtx *ready, int lower, int higher)
10999 {
11000 rtx new_head;
11001 int i;
11002
11003 new_head = ready[lower];
11004 for (i = lower; i < higher; i++)
11005 ready[i] = ready[i + 1];
11006 ready[i] = new_head;
11007 }
11008
11009 /* If the priority of the instruction at POS2 in the ready queue READY
11010 is within LIMIT units of that of the instruction at POS1, swap the
11011 instructions if POS2 is not already less than POS1. */
11012
11013 static void
11014 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11015 {
11016 if (pos1 < pos2
11017 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11018 {
11019 rtx temp;
11020 temp = ready[pos1];
11021 ready[pos1] = ready[pos2];
11022 ready[pos2] = temp;
11023 }
11024 }
11025
11026 /* Record whether last 74k AGEN instruction was a load or store. */
11027
11028 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11029
11030 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11031 resets to TYPE_UNKNOWN state. */
11032
11033 static void
11034 mips_74k_agen_init (rtx insn)
11035 {
11036 if (!insn || !NONJUMP_INSN_P (insn))
11037 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11038 else if (USEFUL_INSN_P (insn))
11039 {
11040 enum attr_type type = get_attr_type (insn);
11041 if (type == TYPE_LOAD || type == TYPE_STORE)
11042 mips_last_74k_agen_insn = type;
11043 }
11044 }
11045
11046 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11047 loads to be grouped together, and multiple stores to be grouped
11048 together. Swap things around in the ready queue to make this happen. */
11049
11050 static void
11051 mips_74k_agen_reorder (rtx *ready, int nready)
11052 {
11053 int i;
11054 int store_pos, load_pos;
11055
11056 store_pos = -1;
11057 load_pos = -1;
11058
11059 for (i = nready - 1; i >= 0; i--)
11060 {
11061 rtx insn = ready[i];
11062 if (USEFUL_INSN_P (insn))
11063 switch (get_attr_type (insn))
11064 {
11065 case TYPE_STORE:
11066 if (store_pos == -1)
11067 store_pos = i;
11068 break;
11069
11070 case TYPE_LOAD:
11071 if (load_pos == -1)
11072 load_pos = i;
11073 break;
11074
11075 default:
11076 break;
11077 }
11078 }
11079
11080 if (load_pos == -1 || store_pos == -1)
11081 return;
11082
11083 switch (mips_last_74k_agen_insn)
11084 {
11085 case TYPE_UNKNOWN:
11086 /* Prefer to schedule loads since they have a higher latency. */
11087 case TYPE_LOAD:
11088 /* Swap loads to the front of the queue. */
11089 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11090 break;
11091 case TYPE_STORE:
11092 /* Swap stores to the front of the queue. */
11093 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11094 break;
11095 default:
11096 break;
11097 }
11098 }
11099
11100 /* Implement TARGET_SCHED_INIT. */
11101
11102 static void
11103 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11104 int max_ready ATTRIBUTE_UNUSED)
11105 {
11106 mips_macc_chains_last_hilo = 0;
11107 vr4130_last_insn = 0;
11108 mips_74k_agen_init (NULL_RTX);
11109 }
11110
11111 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11112
11113 static int
11114 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11115 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11116 {
11117 if (!reload_completed
11118 && TUNE_MACC_CHAINS
11119 && *nreadyp > 0)
11120 mips_macc_chains_reorder (ready, *nreadyp);
11121 if (reload_completed
11122 && TUNE_MIPS4130
11123 && !TARGET_VR4130_ALIGN
11124 && *nreadyp > 1)
11125 vr4130_reorder (ready, *nreadyp);
11126 if (TUNE_74K)
11127 mips_74k_agen_reorder (ready, *nreadyp);
11128 return mips_issue_rate ();
11129 }
11130
11131 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11132
11133 static int
11134 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11135 rtx insn, int more)
11136 {
11137 if (TUNE_74K)
11138 mips_74k_agen_init (insn);
11139 switch (GET_CODE (PATTERN (insn)))
11140 {
11141 case USE:
11142 case CLOBBER:
11143 /* Don't count USEs and CLOBBERs against the issue rate. */
11144 break;
11145
11146 default:
11147 more--;
11148 if (!reload_completed && TUNE_MACC_CHAINS)
11149 mips_macc_chains_record (insn);
11150 vr4130_last_insn = insn;
11151 break;
11152 }
11153 return more;
11154 }
11155 \f
11156 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11157 dependencies have no cost, except on the 20Kc where output-dependence
11158 is treated like input-dependence. */
11159
11160 static int
11161 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11162 rtx dep ATTRIBUTE_UNUSED, int cost)
11163 {
11164 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11165 && TUNE_20KC)
11166 return cost;
11167 if (REG_NOTE_KIND (link) != 0)
11168 return 0;
11169 return cost;
11170 }
11171
11172 /* Return the number of instructions that can be issued per cycle. */
11173
11174 static int
11175 mips_issue_rate (void)
11176 {
11177 switch (mips_tune)
11178 {
11179 case PROCESSOR_74KC:
11180 case PROCESSOR_74KF2_1:
11181 case PROCESSOR_74KF1_1:
11182 case PROCESSOR_74KF3_2:
11183 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11184 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11185 but in reality only a maximum of 3 insns can be issued as the
11186 floating point load/stores also require a slot in the AGEN pipe. */
11187 return 4;
11188
11189 case PROCESSOR_20KC:
11190 case PROCESSOR_R4130:
11191 case PROCESSOR_R5400:
11192 case PROCESSOR_R5500:
11193 case PROCESSOR_R7000:
11194 case PROCESSOR_R9000:
11195 return 2;
11196
11197 case PROCESSOR_SB1:
11198 case PROCESSOR_SB1A:
11199 /* This is actually 4, but we get better performance if we claim 3.
11200 This is partly because of unwanted speculative code motion with the
11201 larger number, and partly because in most common cases we can't
11202 reach the theoretical max of 4. */
11203 return 3;
11204
11205 default:
11206 return 1;
11207 }
11208 }
11209
11210 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11211 be as wide as the scheduling freedom in the DFA. */
11212
11213 static int
11214 mips_multipass_dfa_lookahead (void)
11215 {
11216 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11217 if (TUNE_SB1)
11218 return 4;
11219
11220 return 0;
11221 }
11222
11223 /* Implements a store data bypass check. We need this because the cprestore
11224 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11225 default routine to abort. We just return false for that case. */
11226 /* ??? Should try to give a better result here than assuming false. */
11227
11228 int
11229 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11230 {
11231 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11232 return false;
11233
11234 return ! store_data_bypass_p (out_insn, in_insn);
11235 }
11236 \f
11237 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11238 return the first operand of the associated "pref" or "prefx" insn. */
11239
11240 rtx
11241 mips_prefetch_cookie (rtx write, rtx locality)
11242 {
11243 /* store_streamed / load_streamed. */
11244 if (INTVAL (locality) <= 0)
11245 return GEN_INT (INTVAL (write) + 4);
11246
11247 /* store / load. */
11248 if (INTVAL (locality) <= 2)
11249 return write;
11250
11251 /* store_retained / load_retained. */
11252 return GEN_INT (INTVAL (write) + 6);
11253 }
11254 \f
11255 /* MIPS builtin function support. */
11256
11257 struct builtin_description
11258 {
11259 /* The code of the main .md file instruction. See mips_builtin_type
11260 for more information. */
11261 enum insn_code icode;
11262
11263 /* The floating-point comparison code to use with ICODE, if any. */
11264 enum mips_fp_condition cond;
11265
11266 /* The name of the builtin function. */
11267 const char *name;
11268
11269 /* Specifies how the function should be expanded. */
11270 enum mips_builtin_type builtin_type;
11271
11272 /* The function's prototype. */
11273 enum mips_function_type function_type;
11274
11275 /* The target flags required for this function. */
11276 int target_flags;
11277 };
11278
11279 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11280 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11281 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11282 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11283 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11284
11285 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11286 TARGET_FLAGS. */
11287 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11288 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11289 "__builtin_mips_" #INSN "_" #COND "_s", \
11290 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11291 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11292 "__builtin_mips_" #INSN "_" #COND "_d", \
11293 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11294
11295 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11296 The lower and upper forms require TARGET_FLAGS while the any and all
11297 forms require MASK_MIPS3D. */
11298 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11299 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11300 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11301 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11302 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11303 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11304 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11305 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11306 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11307 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11308 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11309 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11310 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11311
11312 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11313 require MASK_MIPS3D. */
11314 #define CMP_4S_BUILTINS(INSN, COND) \
11315 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11316 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11317 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11318 MASK_MIPS3D }, \
11319 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11320 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11321 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11322 MASK_MIPS3D }
11323
11324 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11325 instruction requires TARGET_FLAGS. */
11326 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11327 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11328 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11329 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11330 TARGET_FLAGS }, \
11331 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11332 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11333 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11334 TARGET_FLAGS }
11335
11336 /* Define all the builtins related to c.cond.fmt condition COND. */
11337 #define CMP_BUILTINS(COND) \
11338 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11339 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11340 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11341 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11342 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11343 CMP_4S_BUILTINS (c, COND), \
11344 CMP_4S_BUILTINS (cabs, COND)
11345
11346 static const struct builtin_description mips_bdesc[] =
11347 {
11348 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11349 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11350 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11351 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11352 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11353 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11354 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11355 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11356
11357 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11358 MASK_PAIRED_SINGLE_FLOAT),
11359 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11360 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11361 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11362 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11363
11364 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11365 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11366 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11367 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11368 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11369 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11370
11371 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11372 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11373 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11374 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11375 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11376 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11377
11378 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11379 };
11380
11381 /* Builtin functions for the SB-1 processor. */
11382
11383 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11384
11385 static const struct builtin_description sb1_bdesc[] =
11386 {
11387 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11388 };
11389
11390 /* Builtin functions for DSP ASE. */
11391
11392 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11393 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11394 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11395 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11396 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11397
11398 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11399 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11400 builtin_description fields. */
11401 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11402 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11403 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11404
11405 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11406 branch instruction. TARGET_FLAGS is a builtin_description field. */
11407 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11408 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11409 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11410
11411 static const struct builtin_description dsp_bdesc[] =
11412 {
11413 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11414 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11415 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11416 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11417 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11418 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11419 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11420 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11421 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11422 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11423 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11424 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11425 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11426 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11427 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11428 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11429 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11430 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11431 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11432 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11433 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11434 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11435 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11436 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11437 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11438 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11439 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11440 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11441 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11442 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11443 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11444 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11445 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11446 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11447 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11448 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11449 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11450 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11451 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11452 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11453 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11454 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11455 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11456 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11457 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11458 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11459 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11460 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11461 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11462 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11463 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11464 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11465 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11466 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11467 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11468 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11469 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11470 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11471 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11472 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11473 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11474 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11475 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11476 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11477 BPOSGE_BUILTIN (32, MASK_DSP),
11478
11479 /* The following are for the MIPS DSP ASE REV 2. */
11480 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11481 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11482 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11483 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11484 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11485 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11486 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11487 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11488 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11489 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11490 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11491 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11492 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11493 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11494 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11495 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11496 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11497 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11498 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11499 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11500 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11501 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11502 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11503 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11504 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11505 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11506 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11507 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11508 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11509 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11510 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11511 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11512 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11513 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
11514 };
11515
11516 static const struct builtin_description dsp_32only_bdesc[] =
11517 {
11518 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11519 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11520 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11521 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11522 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11523 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11524 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11525 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11526 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11527 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11528 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11529 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11530 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11531 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11532 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11533 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11534 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11535 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11536 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11537 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11538 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11539
11540 /* The following are for the MIPS DSP ASE REV 2. */
11541 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11542 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11543 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11544 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11545 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11546 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11547 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11548 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
11549 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
11550 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11551 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11552 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11553 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11554 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11555 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
11556 };
11557
11558 /* This helps provide a mapping from builtin function codes to bdesc
11559 arrays. */
11560
11561 struct bdesc_map
11562 {
11563 /* The builtin function table that this entry describes. */
11564 const struct builtin_description *bdesc;
11565
11566 /* The number of entries in the builtin function table. */
11567 unsigned int size;
11568
11569 /* The target processor that supports these builtin functions.
11570 PROCESSOR_MAX means we enable them for all processors. */
11571 enum processor_type proc;
11572
11573 /* If the target has these flags, this builtin function table
11574 will not be supported. */
11575 int unsupported_target_flags;
11576 };
11577
11578 static const struct bdesc_map bdesc_arrays[] =
11579 {
11580 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
11581 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
11582 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
11583 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
11584 MASK_64BIT }
11585 };
11586
11587 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11588 suitable for input operand OP of instruction ICODE. Return the value. */
11589
11590 static rtx
11591 mips_prepare_builtin_arg (enum insn_code icode,
11592 unsigned int op, tree exp, unsigned int argnum)
11593 {
11594 rtx value;
11595 enum machine_mode mode;
11596
11597 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
11598 mode = insn_data[icode].operand[op].mode;
11599 if (!insn_data[icode].operand[op].predicate (value, mode))
11600 {
11601 value = copy_to_mode_reg (mode, value);
11602 /* Check the predicate again. */
11603 if (!insn_data[icode].operand[op].predicate (value, mode))
11604 {
11605 error ("invalid argument to builtin function");
11606 return const0_rtx;
11607 }
11608 }
11609
11610 return value;
11611 }
11612
11613 /* Return an rtx suitable for output operand OP of instruction ICODE.
11614 If TARGET is non-null, try to use it where possible. */
11615
11616 static rtx
11617 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11618 {
11619 enum machine_mode mode;
11620
11621 mode = insn_data[icode].operand[op].mode;
11622 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11623 target = gen_reg_rtx (mode);
11624
11625 return target;
11626 }
11627
11628 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11629
11630 rtx
11631 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11632 enum machine_mode mode ATTRIBUTE_UNUSED,
11633 int ignore ATTRIBUTE_UNUSED)
11634 {
11635 enum insn_code icode;
11636 enum mips_builtin_type type;
11637 tree fndecl;
11638 unsigned int fcode;
11639 const struct builtin_description *bdesc;
11640 const struct bdesc_map *m;
11641
11642 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11643 fcode = DECL_FUNCTION_CODE (fndecl);
11644
11645 bdesc = NULL;
11646 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11647 {
11648 if (fcode < m->size)
11649 {
11650 bdesc = m->bdesc;
11651 icode = bdesc[fcode].icode;
11652 type = bdesc[fcode].builtin_type;
11653 break;
11654 }
11655 fcode -= m->size;
11656 }
11657 if (bdesc == NULL)
11658 return 0;
11659
11660 switch (type)
11661 {
11662 case MIPS_BUILTIN_DIRECT:
11663 return mips_expand_builtin_direct (icode, target, exp, true);
11664
11665 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11666 return mips_expand_builtin_direct (icode, target, exp, false);
11667
11668 case MIPS_BUILTIN_MOVT:
11669 case MIPS_BUILTIN_MOVF:
11670 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
11671 target, exp);
11672
11673 case MIPS_BUILTIN_CMP_ANY:
11674 case MIPS_BUILTIN_CMP_ALL:
11675 case MIPS_BUILTIN_CMP_UPPER:
11676 case MIPS_BUILTIN_CMP_LOWER:
11677 case MIPS_BUILTIN_CMP_SINGLE:
11678 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
11679 target, exp);
11680
11681 case MIPS_BUILTIN_BPOSGE32:
11682 return mips_expand_builtin_bposge (type, target);
11683
11684 default:
11685 return 0;
11686 }
11687 }
11688
11689 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11690
11691 void
11692 mips_init_builtins (void)
11693 {
11694 const struct builtin_description *d;
11695 const struct bdesc_map *m;
11696 tree types[(int) MIPS_MAX_FTYPE_MAX];
11697 tree V2SF_type_node;
11698 tree V2HI_type_node;
11699 tree V4QI_type_node;
11700 unsigned int offset;
11701
11702 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11703 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
11704 return;
11705
11706 if (TARGET_PAIRED_SINGLE_FLOAT)
11707 {
11708 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
11709
11710 types[MIPS_V2SF_FTYPE_V2SF]
11711 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
11712
11713 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
11714 = build_function_type_list (V2SF_type_node,
11715 V2SF_type_node, V2SF_type_node, NULL_TREE);
11716
11717 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
11718 = build_function_type_list (V2SF_type_node,
11719 V2SF_type_node, V2SF_type_node,
11720 integer_type_node, NULL_TREE);
11721
11722 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
11723 = build_function_type_list (V2SF_type_node,
11724 V2SF_type_node, V2SF_type_node,
11725 V2SF_type_node, V2SF_type_node, NULL_TREE);
11726
11727 types[MIPS_V2SF_FTYPE_SF_SF]
11728 = build_function_type_list (V2SF_type_node,
11729 float_type_node, float_type_node, NULL_TREE);
11730
11731 types[MIPS_INT_FTYPE_V2SF_V2SF]
11732 = build_function_type_list (integer_type_node,
11733 V2SF_type_node, V2SF_type_node, NULL_TREE);
11734
11735 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
11736 = build_function_type_list (integer_type_node,
11737 V2SF_type_node, V2SF_type_node,
11738 V2SF_type_node, V2SF_type_node, NULL_TREE);
11739
11740 types[MIPS_INT_FTYPE_SF_SF]
11741 = build_function_type_list (integer_type_node,
11742 float_type_node, float_type_node, NULL_TREE);
11743
11744 types[MIPS_INT_FTYPE_DF_DF]
11745 = build_function_type_list (integer_type_node,
11746 double_type_node, double_type_node, NULL_TREE);
11747
11748 types[MIPS_SF_FTYPE_V2SF]
11749 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
11750
11751 types[MIPS_SF_FTYPE_SF]
11752 = build_function_type_list (float_type_node,
11753 float_type_node, NULL_TREE);
11754
11755 types[MIPS_SF_FTYPE_SF_SF]
11756 = build_function_type_list (float_type_node,
11757 float_type_node, float_type_node, NULL_TREE);
11758
11759 types[MIPS_DF_FTYPE_DF]
11760 = build_function_type_list (double_type_node,
11761 double_type_node, NULL_TREE);
11762
11763 types[MIPS_DF_FTYPE_DF_DF]
11764 = build_function_type_list (double_type_node,
11765 double_type_node, double_type_node, NULL_TREE);
11766 }
11767
11768 if (TARGET_DSP)
11769 {
11770 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
11771 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
11772
11773 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
11774 = build_function_type_list (V2HI_type_node,
11775 V2HI_type_node, V2HI_type_node,
11776 NULL_TREE);
11777
11778 types[MIPS_SI_FTYPE_SI_SI]
11779 = build_function_type_list (intSI_type_node,
11780 intSI_type_node, intSI_type_node,
11781 NULL_TREE);
11782
11783 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
11784 = build_function_type_list (V4QI_type_node,
11785 V4QI_type_node, V4QI_type_node,
11786 NULL_TREE);
11787
11788 types[MIPS_SI_FTYPE_V4QI]
11789 = build_function_type_list (intSI_type_node,
11790 V4QI_type_node,
11791 NULL_TREE);
11792
11793 types[MIPS_V2HI_FTYPE_V2HI]
11794 = build_function_type_list (V2HI_type_node,
11795 V2HI_type_node,
11796 NULL_TREE);
11797
11798 types[MIPS_SI_FTYPE_SI]
11799 = build_function_type_list (intSI_type_node,
11800 intSI_type_node,
11801 NULL_TREE);
11802
11803 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
11804 = build_function_type_list (V4QI_type_node,
11805 V2HI_type_node, V2HI_type_node,
11806 NULL_TREE);
11807
11808 types[MIPS_V2HI_FTYPE_SI_SI]
11809 = build_function_type_list (V2HI_type_node,
11810 intSI_type_node, intSI_type_node,
11811 NULL_TREE);
11812
11813 types[MIPS_SI_FTYPE_V2HI]
11814 = build_function_type_list (intSI_type_node,
11815 V2HI_type_node,
11816 NULL_TREE);
11817
11818 types[MIPS_V2HI_FTYPE_V4QI]
11819 = build_function_type_list (V2HI_type_node,
11820 V4QI_type_node,
11821 NULL_TREE);
11822
11823 types[MIPS_V4QI_FTYPE_V4QI_SI]
11824 = build_function_type_list (V4QI_type_node,
11825 V4QI_type_node, intSI_type_node,
11826 NULL_TREE);
11827
11828 types[MIPS_V2HI_FTYPE_V2HI_SI]
11829 = build_function_type_list (V2HI_type_node,
11830 V2HI_type_node, intSI_type_node,
11831 NULL_TREE);
11832
11833 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
11834 = build_function_type_list (V2HI_type_node,
11835 V4QI_type_node, V2HI_type_node,
11836 NULL_TREE);
11837
11838 types[MIPS_SI_FTYPE_V2HI_V2HI]
11839 = build_function_type_list (intSI_type_node,
11840 V2HI_type_node, V2HI_type_node,
11841 NULL_TREE);
11842
11843 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
11844 = build_function_type_list (intDI_type_node,
11845 intDI_type_node, V4QI_type_node, V4QI_type_node,
11846 NULL_TREE);
11847
11848 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
11849 = build_function_type_list (intDI_type_node,
11850 intDI_type_node, V2HI_type_node, V2HI_type_node,
11851 NULL_TREE);
11852
11853 types[MIPS_DI_FTYPE_DI_SI_SI]
11854 = build_function_type_list (intDI_type_node,
11855 intDI_type_node, intSI_type_node, intSI_type_node,
11856 NULL_TREE);
11857
11858 types[MIPS_V4QI_FTYPE_SI]
11859 = build_function_type_list (V4QI_type_node,
11860 intSI_type_node,
11861 NULL_TREE);
11862
11863 types[MIPS_V2HI_FTYPE_SI]
11864 = build_function_type_list (V2HI_type_node,
11865 intSI_type_node,
11866 NULL_TREE);
11867
11868 types[MIPS_VOID_FTYPE_V4QI_V4QI]
11869 = build_function_type_list (void_type_node,
11870 V4QI_type_node, V4QI_type_node,
11871 NULL_TREE);
11872
11873 types[MIPS_SI_FTYPE_V4QI_V4QI]
11874 = build_function_type_list (intSI_type_node,
11875 V4QI_type_node, V4QI_type_node,
11876 NULL_TREE);
11877
11878 types[MIPS_VOID_FTYPE_V2HI_V2HI]
11879 = build_function_type_list (void_type_node,
11880 V2HI_type_node, V2HI_type_node,
11881 NULL_TREE);
11882
11883 types[MIPS_SI_FTYPE_DI_SI]
11884 = build_function_type_list (intSI_type_node,
11885 intDI_type_node, intSI_type_node,
11886 NULL_TREE);
11887
11888 types[MIPS_DI_FTYPE_DI_SI]
11889 = build_function_type_list (intDI_type_node,
11890 intDI_type_node, intSI_type_node,
11891 NULL_TREE);
11892
11893 types[MIPS_VOID_FTYPE_SI_SI]
11894 = build_function_type_list (void_type_node,
11895 intSI_type_node, intSI_type_node,
11896 NULL_TREE);
11897
11898 types[MIPS_SI_FTYPE_PTR_SI]
11899 = build_function_type_list (intSI_type_node,
11900 ptr_type_node, intSI_type_node,
11901 NULL_TREE);
11902
11903 types[MIPS_SI_FTYPE_VOID]
11904 = build_function_type (intSI_type_node, void_list_node);
11905
11906 if (TARGET_DSPR2)
11907 {
11908 types[MIPS_V4QI_FTYPE_V4QI]
11909 = build_function_type_list (V4QI_type_node,
11910 V4QI_type_node,
11911 NULL_TREE);
11912
11913 types[MIPS_SI_FTYPE_SI_SI_SI]
11914 = build_function_type_list (intSI_type_node,
11915 intSI_type_node, intSI_type_node,
11916 intSI_type_node, NULL_TREE);
11917
11918 types[MIPS_DI_FTYPE_DI_USI_USI]
11919 = build_function_type_list (intDI_type_node,
11920 intDI_type_node,
11921 unsigned_intSI_type_node,
11922 unsigned_intSI_type_node, NULL_TREE);
11923
11924 types[MIPS_DI_FTYPE_SI_SI]
11925 = build_function_type_list (intDI_type_node,
11926 intSI_type_node, intSI_type_node,
11927 NULL_TREE);
11928
11929 types[MIPS_DI_FTYPE_USI_USI]
11930 = build_function_type_list (intDI_type_node,
11931 unsigned_intSI_type_node,
11932 unsigned_intSI_type_node, NULL_TREE);
11933
11934 types[MIPS_V2HI_FTYPE_SI_SI_SI]
11935 = build_function_type_list (V2HI_type_node,
11936 intSI_type_node, intSI_type_node,
11937 intSI_type_node, NULL_TREE);
11938
11939 }
11940 }
11941
11942 /* Iterate through all of the bdesc arrays, initializing all of the
11943 builtin functions. */
11944
11945 offset = 0;
11946 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11947 {
11948 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
11949 && (m->unsupported_target_flags & target_flags) == 0)
11950 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
11951 if ((d->target_flags & target_flags) == d->target_flags)
11952 add_builtin_function (d->name, types[d->function_type],
11953 d - m->bdesc + offset,
11954 BUILT_IN_MD, NULL, NULL);
11955 offset += m->size;
11956 }
11957 }
11958
11959 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
11960 .md pattern and CALL is the function expr with arguments. TARGET,
11961 if nonnull, suggests a good place to put the result.
11962 HAS_TARGET indicates the function must return something. */
11963
11964 static rtx
11965 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11966 bool has_target)
11967 {
11968 rtx ops[MAX_RECOG_OPERANDS];
11969 int i = 0;
11970 int j = 0;
11971
11972 if (has_target)
11973 {
11974 /* We save target to ops[0]. */
11975 ops[0] = mips_prepare_builtin_target (icode, 0, target);
11976 i = 1;
11977 }
11978
11979 /* We need to test if the arglist is not zero. Some instructions have extra
11980 clobber registers. */
11981 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
11982 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11983
11984 switch (i)
11985 {
11986 case 2:
11987 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11988 break;
11989
11990 case 3:
11991 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11992 break;
11993
11994 case 4:
11995 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11996 break;
11997
11998 default:
11999 gcc_unreachable ();
12000 }
12001 return target;
12002 }
12003
12004 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12005 function (TYPE says which). EXP is the tree for the function
12006 function, ICODE is the instruction that should be used to compare
12007 the first two arguments, and COND is the condition it should test.
12008 TARGET, if nonnull, suggests a good place to put the result. */
12009
12010 static rtx
12011 mips_expand_builtin_movtf (enum mips_builtin_type type,
12012 enum insn_code icode, enum mips_fp_condition cond,
12013 rtx target, tree exp)
12014 {
12015 rtx cmp_result, op0, op1;
12016
12017 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12018 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12019 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12020 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12021
12022 icode = CODE_FOR_mips_cond_move_tf_ps;
12023 target = mips_prepare_builtin_target (icode, 0, target);
12024 if (type == MIPS_BUILTIN_MOVT)
12025 {
12026 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12027 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12028 }
12029 else
12030 {
12031 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12032 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12033 }
12034 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12035 return target;
12036 }
12037
12038 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12039 into TARGET otherwise. Return TARGET. */
12040
12041 static rtx
12042 mips_builtin_branch_and_move (rtx condition, rtx target,
12043 rtx value_if_true, rtx value_if_false)
12044 {
12045 rtx true_label, done_label;
12046
12047 true_label = gen_label_rtx ();
12048 done_label = gen_label_rtx ();
12049
12050 /* First assume that CONDITION is false. */
12051 mips_emit_move (target, value_if_false);
12052
12053 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12054 emit_jump_insn (gen_condjump (condition, true_label));
12055 emit_jump_insn (gen_jump (done_label));
12056 emit_barrier ();
12057
12058 /* Fix TARGET if CONDITION is true. */
12059 emit_label (true_label);
12060 mips_emit_move (target, value_if_true);
12061
12062 emit_label (done_label);
12063 return target;
12064 }
12065
12066 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12067 of the comparison instruction and COND is the condition it should test.
12068 EXP is the function call and arguments and TARGET, if nonnull,
12069 suggests a good place to put the boolean result. */
12070
12071 static rtx
12072 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12073 enum insn_code icode, enum mips_fp_condition cond,
12074 rtx target, tree exp)
12075 {
12076 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12077 int i;
12078 int j = 0;
12079
12080 if (target == 0 || GET_MODE (target) != SImode)
12081 target = gen_reg_rtx (SImode);
12082
12083 /* Prepare the operands to the comparison. */
12084 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12085 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12086 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12087
12088 switch (insn_data[icode].n_operands)
12089 {
12090 case 4:
12091 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12092 break;
12093
12094 case 6:
12095 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12096 ops[3], ops[4], GEN_INT (cond)));
12097 break;
12098
12099 default:
12100 gcc_unreachable ();
12101 }
12102
12103 /* If the comparison sets more than one register, we define the result
12104 to be 0 if all registers are false and -1 if all registers are true.
12105 The value of the complete result is indeterminate otherwise. */
12106 switch (builtin_type)
12107 {
12108 case MIPS_BUILTIN_CMP_ALL:
12109 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12110 return mips_builtin_branch_and_move (condition, target,
12111 const0_rtx, const1_rtx);
12112
12113 case MIPS_BUILTIN_CMP_UPPER:
12114 case MIPS_BUILTIN_CMP_LOWER:
12115 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12116 condition = gen_single_cc (cmp_result, offset);
12117 return mips_builtin_branch_and_move (condition, target,
12118 const1_rtx, const0_rtx);
12119
12120 default:
12121 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12122 return mips_builtin_branch_and_move (condition, target,
12123 const1_rtx, const0_rtx);
12124 }
12125 }
12126
12127 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12128 suggests a good place to put the boolean result. */
12129
12130 static rtx
12131 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12132 {
12133 rtx condition, cmp_result;
12134 int cmp_value;
12135
12136 if (target == 0 || GET_MODE (target) != SImode)
12137 target = gen_reg_rtx (SImode);
12138
12139 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12140
12141 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12142 cmp_value = 32;
12143 else
12144 gcc_assert (0);
12145
12146 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12147 return mips_builtin_branch_and_move (condition, target,
12148 const1_rtx, const0_rtx);
12149 }
12150 \f
12151 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12152 FIRST is true if this is the first time handling this decl. */
12153
12154 static void
12155 mips_encode_section_info (tree decl, rtx rtl, int first)
12156 {
12157 default_encode_section_info (decl, rtl, first);
12158
12159 if (TREE_CODE (decl) == FUNCTION_DECL)
12160 {
12161 rtx symbol = XEXP (rtl, 0);
12162
12163 if ((TARGET_LONG_CALLS && !mips_near_type_p (TREE_TYPE (decl)))
12164 || mips_far_type_p (TREE_TYPE (decl)))
12165 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12166 }
12167 }
12168
12169 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12170 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12171
12172 static void
12173 mips_extra_live_on_entry (bitmap regs)
12174 {
12175 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12176 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12177 }
12178
12179 /* SImode values are represented as sign-extended to DImode. */
12180
12181 int
12182 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12183 {
12184 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12185 return SIGN_EXTEND;
12186
12187 return UNKNOWN;
12188 }
12189 \f
12190 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12191
12192 static void
12193 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12194 {
12195 switch (size)
12196 {
12197 case 4:
12198 fputs ("\t.dtprelword\t", file);
12199 break;
12200
12201 case 8:
12202 fputs ("\t.dtpreldword\t", file);
12203 break;
12204
12205 default:
12206 gcc_unreachable ();
12207 }
12208 output_addr_const (file, x);
12209 fputs ("+0x8000", file);
12210 }
12211 \f
12212 #include "gt-mips.h"