]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/mips/mips.c
configure.ac (mipsisa*-*-elfoabi*): New stanza.
[thirdparty/gcc.git] / gcc / config / mips / mips.c
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
16
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
60 #include "diagnostic.h"
61
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
70 XVECEXP (X, 0, 0)
71
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
78
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
82
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
87
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
91 part of the frame. */
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
96
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
99 (INSN_P (INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
104
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
110 : (INSN))
111
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
116 : (INSN))
117
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
124
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
127
128 /* Classifies an address.
129
130 ADDRESS_REG
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
133
134 ADDRESS_LO_SUM
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
137
138 ADDRESS_CONST_INT
139 A signed 16-bit constant address.
140
141 ADDRESS_SYMBOLIC:
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
144 ADDRESS_REG,
145 ADDRESS_LO_SUM,
146 ADDRESS_CONST_INT,
147 ADDRESS_SYMBOLIC
148 };
149
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
152 {
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
162 MIPS_SF_FTYPE_V2SF,
163 MIPS_SF_FTYPE_SF,
164 MIPS_SF_FTYPE_SF_SF,
165 MIPS_DF_FTYPE_DF,
166 MIPS_DF_FTYPE_DF_DF,
167
168 /* For MIPS DSP ASE */
169 MIPS_DI_FTYPE_DI_SI,
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
173 MIPS_SI_FTYPE_DI_SI,
174 MIPS_SI_FTYPE_PTR_SI,
175 MIPS_SI_FTYPE_SI,
176 MIPS_SI_FTYPE_SI_SI,
177 MIPS_SI_FTYPE_V2HI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
179 MIPS_SI_FTYPE_V4QI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
181 MIPS_SI_FTYPE_VOID,
182 MIPS_V2HI_FTYPE_SI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
189 MIPS_V4QI_FTYPE_SI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
196
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
201 MIPS_DI_FTYPE_SI_SI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
204
205 /* The last type. */
206 MIPS_MAX_FTYPE_MAX
207 };
208
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
211 {
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
215 MIPS_BUILTIN_DIRECT,
216
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
220
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
225 MIPS_BUILTIN_MOVF,
226 MIPS_BUILTIN_MOVT,
227
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
233
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
242
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
245
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
248 };
249
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
252 MACRO (f), \
253 MACRO (un), \
254 MACRO (eq), \
255 MACRO (ueq), \
256 MACRO (olt), \
257 MACRO (ult), \
258 MACRO (ole), \
259 MACRO (ule), \
260 MACRO (sf), \
261 MACRO (ngle), \
262 MACRO (seq), \
263 MACRO (ngl), \
264 MACRO (lt), \
265 MACRO (nge), \
266 MACRO (le), \
267 MACRO (ngt)
268
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
273 };
274
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
279 };
280
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
284
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
289 struct mips_sim;
290
291 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
292 static bool mips_classify_address (struct mips_address_info *, rtx,
293 enum machine_mode, int);
294 static bool mips_cannot_force_const_mem (rtx);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
296 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
297 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
298 static rtx mips_force_temporary (rtx, rtx);
299 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
300 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
301 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
302 static unsigned int mips_build_lower (struct mips_integer_op *,
303 unsigned HOST_WIDE_INT);
304 static unsigned int mips_build_integer (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
307 static int m16_check_op (rtx, int, int, int);
308 static bool mips_rtx_costs (rtx, int, int, int *);
309 static int mips_address_cost (rtx);
310 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
311 static void mips_load_call_address (rtx, rtx, int);
312 static bool mips_function_ok_for_sibcall (tree, tree);
313 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
314 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
315 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
316 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
317 tree, int, struct mips_arg_info *);
318 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
319 static void mips_set_architecture (const struct mips_cpu_info *);
320 static void mips_set_tune (const struct mips_cpu_info *);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function *mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
324 const char **);
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx *, void *);
327 static int mips_rewrite_small_data_1 (rtx *, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
332 mips_save_restore_fn);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
337 static void mips_set_frame_expr (rtx);
338 static rtx mips_frame_set (rtx, rtx);
339 static void mips_save_reg (rtx, rtx);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
341 static void mips_restore_reg (rtx, rtx);
342 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
343 HOST_WIDE_INT, tree);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (const_tree);
348 static bool mips_use_anchors_for_symbol_p (const_rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
401 tree, int *, int);
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
405 const_tree, bool);
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
407 const_tree, bool);
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
409 tree, bool);
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_scalar_mode_supported_p (enum machine_mode);
412 static bool mips_vector_mode_supported_p (enum machine_mode);
413 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
414 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
415 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
416 static void mips_init_builtins (void);
417 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
418 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
419 enum insn_code, enum mips_fp_condition,
420 rtx, tree);
421 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
422 enum insn_code, enum mips_fp_condition,
423 rtx, tree);
424 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
425 static void mips_encode_section_info (tree, rtx, int);
426 static void mips_extra_live_on_entry (bitmap);
427 static int mips_comp_type_attributes (const_tree, const_tree);
428 static void mips_set_mips16_mode (int);
429 static void mips_insert_attributes (tree, tree *);
430 static tree mips_merge_decl_attributes (tree, tree);
431 static void mips_set_current_function (tree);
432 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
433 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
434 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
435
436 /* Structure to be filled in by compute_frame_size with register
437 save masks, and offsets for the current function. */
438
439 struct mips_frame_info GTY(())
440 {
441 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
442 HOST_WIDE_INT var_size; /* # bytes that variables take up */
443 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
444 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
445 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
446 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
447 unsigned int mask; /* mask of saved gp registers */
448 unsigned int fmask; /* mask of saved fp registers */
449 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
450 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
451 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
452 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
453 bool initialized; /* true if frame size already calculated */
454 int num_gp; /* number of gp registers saved */
455 int num_fp; /* number of fp registers saved */
456 };
457
458 struct machine_function GTY(()) {
459 /* Pseudo-reg holding the value of $28 in a mips16 function which
460 refers to GP relative global variables. */
461 rtx mips16_gp_pseudo_rtx;
462
463 /* The number of extra stack bytes taken up by register varargs.
464 This area is allocated by the callee at the very top of the frame. */
465 int varargs_size;
466
467 /* Current frame information, calculated by compute_frame_size. */
468 struct mips_frame_info frame;
469
470 /* The register to use as the global pointer within this function. */
471 unsigned int global_pointer;
472
473 /* True if mips_adjust_insn_length should ignore an instruction's
474 hazard attribute. */
475 bool ignore_hazard_length_p;
476
477 /* True if the whole function is suitable for .set noreorder and
478 .set nomacro. */
479 bool all_noreorder_p;
480
481 /* True if the function is known to have an instruction that needs $gp. */
482 bool has_gp_insn_p;
483
484 /* True if we have emitted an instruction to initialize
485 mips16_gp_pseudo_rtx. */
486 bool initialized_mips16_gp_pseudo_p;
487 };
488
489 /* Information about a single argument. */
490 struct mips_arg_info
491 {
492 /* True if the argument is passed in a floating-point register, or
493 would have been if we hadn't run out of registers. */
494 bool fpr_p;
495
496 /* The number of words passed in registers, rounded up. */
497 unsigned int reg_words;
498
499 /* For EABI, the offset of the first register from GP_ARG_FIRST or
500 FP_ARG_FIRST. For other ABIs, the offset of the first register from
501 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
502 comment for details).
503
504 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
505 on the stack. */
506 unsigned int reg_offset;
507
508 /* The number of words that must be passed on the stack, rounded up. */
509 unsigned int stack_words;
510
511 /* The offset from the start of the stack overflow area of the argument's
512 first stack word. Only meaningful when STACK_WORDS is nonzero. */
513 unsigned int stack_offset;
514 };
515
516
517 /* Information about an address described by mips_address_type.
518
519 ADDRESS_CONST_INT
520 No fields are used.
521
522 ADDRESS_REG
523 REG is the base register and OFFSET is the constant offset.
524
525 ADDRESS_LO_SUM
526 REG is the register that contains the high part of the address,
527 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
528 is the type of OFFSET's symbol.
529
530 ADDRESS_SYMBOLIC
531 SYMBOL_TYPE is the type of symbol being referenced. */
532
533 struct mips_address_info
534 {
535 enum mips_address_type type;
536 rtx reg;
537 rtx offset;
538 enum mips_symbol_type symbol_type;
539 };
540
541
542 /* One stage in a constant building sequence. These sequences have
543 the form:
544
545 A = VALUE[0]
546 A = A CODE[1] VALUE[1]
547 A = A CODE[2] VALUE[2]
548 ...
549
550 where A is an accumulator, each CODE[i] is a binary rtl operation
551 and each VALUE[i] is a constant integer. */
552 struct mips_integer_op {
553 enum rtx_code code;
554 unsigned HOST_WIDE_INT value;
555 };
556
557
558 /* The largest number of operations needed to load an integer constant.
559 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
560 When the lowest bit is clear, we can try, but reject a sequence with
561 an extra SLL at the end. */
562 #define MIPS_MAX_INTEGER_OPS 7
563
564 /* Information about a MIPS16e SAVE or RESTORE instruction. */
565 struct mips16e_save_restore_info {
566 /* The number of argument registers saved by a SAVE instruction.
567 0 for RESTORE instructions. */
568 unsigned int nargs;
569
570 /* Bit X is set if the instruction saves or restores GPR X. */
571 unsigned int mask;
572
573 /* The total number of bytes to allocate. */
574 HOST_WIDE_INT size;
575 };
576
577 /* Global variables for machine-dependent things. */
578
579 /* Threshold for data being put into the small data/bss area, instead
580 of the normal data area. */
581 int mips_section_threshold = -1;
582
583 /* Count the number of .file directives, so that .loc is up to date. */
584 int num_source_filenames = 0;
585
586 /* Count the number of sdb related labels are generated (to find block
587 start and end boundaries). */
588 int sdb_label_count = 0;
589
590 /* Next label # for each statement for Silicon Graphics IRIS systems. */
591 int sym_lineno = 0;
592
593 /* Name of the file containing the current function. */
594 const char *current_function_file = "";
595
596 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
597 int set_noreorder;
598 int set_noat;
599 int set_nomacro;
600 int set_volatile;
601
602 /* The next branch instruction is a branch likely, not branch normal. */
603 int mips_branch_likely;
604
605 /* The operands passed to the last cmpMM expander. */
606 rtx cmp_operands[2];
607
608 /* The target cpu for code generation. */
609 enum processor_type mips_arch;
610 const struct mips_cpu_info *mips_arch_info;
611
612 /* The target cpu for optimization and scheduling. */
613 enum processor_type mips_tune;
614 const struct mips_cpu_info *mips_tune_info;
615
616 /* Which instruction set architecture to use. */
617 int mips_isa;
618
619 /* Which ABI to use. */
620 int mips_abi = MIPS_ABI_DEFAULT;
621
622 /* Cost information to use. */
623 const struct mips_rtx_cost_data *mips_cost;
624
625 /* Remember the ambient target flags, excluding mips16. */
626 static int mips_base_target_flags;
627 /* The mips16 command-line target flags only. */
628 static bool mips_base_mips16;
629 /* Similar copies of option settings. */
630 static int mips_base_schedule_insns; /* flag_schedule_insns */
631 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
632 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
633 static int mips_base_align_loops; /* align_loops */
634 static int mips_base_align_jumps; /* align_jumps */
635 static int mips_base_align_functions; /* align_functions */
636 static GTY(()) int mips16_flipper;
637
638 /* The -mtext-loads setting. */
639 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
640
641 /* The -mllsc setting. */
642 enum mips_llsc_setting mips_llsc = LLSC_DEFAULT;
643
644 /* The architecture selected by -mipsN. */
645 static const struct mips_cpu_info *mips_isa_info;
646
647 /* If TRUE, we split addresses into their high and low parts in the RTL. */
648 int mips_split_addresses;
649
650 /* Mode used for saving/restoring general purpose registers. */
651 static enum machine_mode gpr_mode;
652
653 /* Array giving truth value on whether or not a given hard register
654 can support a given mode. */
655 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
656
657 /* List of all MIPS punctuation characters used by print_operand. */
658 char mips_print_operand_punct[256];
659
660 /* Map GCC register number to debugger register number. */
661 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
662 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
663
664 /* A copy of the original flag_delayed_branch: see override_options. */
665 static int mips_flag_delayed_branch;
666
667 static GTY (()) int mips_output_filename_first_time = 1;
668
669 /* mips_split_p[X] is true if symbols of type X can be split by
670 mips_split_symbol(). */
671 bool mips_split_p[NUM_SYMBOL_TYPES];
672
673 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
674 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
675 if they are matched by a special .md file pattern. */
676 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
677
678 /* Likewise for HIGHs. */
679 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
680
681 /* Map hard register number to register class */
682 const enum reg_class mips_regno_to_class[] =
683 {
684 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
685 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
686 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
687 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
688 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
689 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
690 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
691 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
692 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
693 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
694 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
695 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
696 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
697 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
698 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
699 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
700 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
701 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
702 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
703 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
704 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
705 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
706 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
707 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
708 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
709 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
710 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
711 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
712 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
713 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
714 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
715 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
716 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
717 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
718 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
719 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
720 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
721 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
722 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
723 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
724 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
725 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
726 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
727 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
728 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
729 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
730 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
731 };
732
733 /* Table of machine dependent attributes. */
734 const struct attribute_spec mips_attribute_table[] =
735 {
736 { "long_call", 0, 0, false, true, true, NULL },
737 { "far", 0, 0, false, true, true, NULL },
738 { "near", 0, 0, false, true, true, NULL },
739 /* Switch MIPS16 ASE on and off per-function. We would really like
740 to make these type attributes, but GCC doesn't provide the hooks
741 we need to support the right conversion rules. As declaration
742 attributes, they affect code generation but don't carry other
743 semantics. */
744 { "mips16", 0, 0, true, false, false, NULL },
745 { "nomips16", 0, 0, true, false, false, NULL },
746 { NULL, 0, 0, false, false, false, NULL }
747 };
748 \f
749 /* A table describing all the processors gcc knows about. Names are
750 matched in the order listed. The first mention of an ISA level is
751 taken as the canonical name for that ISA.
752
753 To ease comparison, please keep this table in the same order
754 as gas's mips_cpu_info_table[]. Please also make sure that
755 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
756 options correctly. */
757 const struct mips_cpu_info mips_cpu_info_table[] = {
758 /* Entries for generic ISAs */
759 { "mips1", PROCESSOR_R3000, 1 },
760 { "mips2", PROCESSOR_R6000, 2 },
761 { "mips3", PROCESSOR_R4000, 3 },
762 { "mips4", PROCESSOR_R8000, 4 },
763 { "mips32", PROCESSOR_4KC, 32 },
764 { "mips32r2", PROCESSOR_M4K, 33 },
765 { "mips64", PROCESSOR_5KC, 64 },
766
767 /* MIPS I */
768 { "r3000", PROCESSOR_R3000, 1 },
769 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
770 { "r3900", PROCESSOR_R3900, 1 },
771
772 /* MIPS II */
773 { "r6000", PROCESSOR_R6000, 2 },
774
775 /* MIPS III */
776 { "r4000", PROCESSOR_R4000, 3 },
777 { "vr4100", PROCESSOR_R4100, 3 },
778 { "vr4111", PROCESSOR_R4111, 3 },
779 { "vr4120", PROCESSOR_R4120, 3 },
780 { "vr4130", PROCESSOR_R4130, 3 },
781 { "vr4300", PROCESSOR_R4300, 3 },
782 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
783 { "r4600", PROCESSOR_R4600, 3 },
784 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
785 { "r4650", PROCESSOR_R4650, 3 },
786
787 /* MIPS IV */
788 { "r8000", PROCESSOR_R8000, 4 },
789 { "vr5000", PROCESSOR_R5000, 4 },
790 { "vr5400", PROCESSOR_R5400, 4 },
791 { "vr5500", PROCESSOR_R5500, 4 },
792 { "rm7000", PROCESSOR_R7000, 4 },
793 { "rm9000", PROCESSOR_R9000, 4 },
794
795 /* MIPS32 */
796 { "4kc", PROCESSOR_4KC, 32 },
797 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
798 { "4kp", PROCESSOR_4KP, 32 },
799 { "4ksc", PROCESSOR_4KC, 32 },
800
801 /* MIPS32 Release 2 */
802 { "m4k", PROCESSOR_M4K, 33 },
803 { "4kec", PROCESSOR_4KC, 33 },
804 { "4kem", PROCESSOR_4KC, 33 },
805 { "4kep", PROCESSOR_4KP, 33 },
806 { "4ksd", PROCESSOR_4KC, 33 },
807
808 { "24kc", PROCESSOR_24KC, 33 },
809 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
810 { "24kf", PROCESSOR_24KF2_1, 33 },
811 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
812 { "24kfx", PROCESSOR_24KF1_1, 33 },
813 { "24kx", PROCESSOR_24KF1_1, 33 },
814
815 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
816 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
817 { "24kef", PROCESSOR_24KF2_1, 33 },
818 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
819 { "24kefx", PROCESSOR_24KF1_1, 33 },
820 { "24kex", PROCESSOR_24KF1_1, 33 },
821
822 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
823 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
824 { "34kf", PROCESSOR_24KF2_1, 33 },
825 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
826 { "34kfx", PROCESSOR_24KF1_1, 33 },
827 { "34kx", PROCESSOR_24KF1_1, 33 },
828
829 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
830 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
831 { "74kf", PROCESSOR_74KF2_1, 33 },
832 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
833 { "74kfx", PROCESSOR_74KF1_1, 33 },
834 { "74kx", PROCESSOR_74KF1_1, 33 },
835 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
836
837 /* MIPS64 */
838 { "5kc", PROCESSOR_5KC, 64 },
839 { "5kf", PROCESSOR_5KF, 64 },
840 { "20kc", PROCESSOR_20KC, 64 },
841 { "sb1", PROCESSOR_SB1, 64 },
842 { "sb1a", PROCESSOR_SB1A, 64 },
843 { "sr71000", PROCESSOR_SR71000, 64 },
844
845 /* End marker */
846 { 0, 0, 0 }
847 };
848
849 /* Default costs. If these are used for a processor we should look
850 up the actual costs. */
851 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
852 COSTS_N_INSNS (7), /* fp_mult_sf */ \
853 COSTS_N_INSNS (8), /* fp_mult_df */ \
854 COSTS_N_INSNS (23), /* fp_div_sf */ \
855 COSTS_N_INSNS (36), /* fp_div_df */ \
856 COSTS_N_INSNS (10), /* int_mult_si */ \
857 COSTS_N_INSNS (10), /* int_mult_di */ \
858 COSTS_N_INSNS (69), /* int_div_si */ \
859 COSTS_N_INSNS (69), /* int_div_di */ \
860 2, /* branch_cost */ \
861 4 /* memory_latency */
862
863 /* Need to replace these with the costs of calling the appropriate
864 libgcc routine. */
865 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
866 COSTS_N_INSNS (256), /* fp_mult_sf */ \
867 COSTS_N_INSNS (256), /* fp_mult_df */ \
868 COSTS_N_INSNS (256), /* fp_div_sf */ \
869 COSTS_N_INSNS (256) /* fp_div_df */
870
871 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
872 {
873 COSTS_N_INSNS (1), /* fp_add */
874 COSTS_N_INSNS (1), /* fp_mult_sf */
875 COSTS_N_INSNS (1), /* fp_mult_df */
876 COSTS_N_INSNS (1), /* fp_div_sf */
877 COSTS_N_INSNS (1), /* fp_div_df */
878 COSTS_N_INSNS (1), /* int_mult_si */
879 COSTS_N_INSNS (1), /* int_mult_di */
880 COSTS_N_INSNS (1), /* int_div_si */
881 COSTS_N_INSNS (1), /* int_div_di */
882 2, /* branch_cost */
883 4 /* memory_latency */
884 };
885
886 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
887 {
888 { /* R3000 */
889 COSTS_N_INSNS (2), /* fp_add */
890 COSTS_N_INSNS (4), /* fp_mult_sf */
891 COSTS_N_INSNS (5), /* fp_mult_df */
892 COSTS_N_INSNS (12), /* fp_div_sf */
893 COSTS_N_INSNS (19), /* fp_div_df */
894 COSTS_N_INSNS (12), /* int_mult_si */
895 COSTS_N_INSNS (12), /* int_mult_di */
896 COSTS_N_INSNS (35), /* int_div_si */
897 COSTS_N_INSNS (35), /* int_div_di */
898 1, /* branch_cost */
899 4 /* memory_latency */
900
901 },
902 { /* 4KC */
903 SOFT_FP_COSTS,
904 COSTS_N_INSNS (6), /* int_mult_si */
905 COSTS_N_INSNS (6), /* int_mult_di */
906 COSTS_N_INSNS (36), /* int_div_si */
907 COSTS_N_INSNS (36), /* int_div_di */
908 1, /* branch_cost */
909 4 /* memory_latency */
910 },
911 { /* 4KP */
912 SOFT_FP_COSTS,
913 COSTS_N_INSNS (36), /* int_mult_si */
914 COSTS_N_INSNS (36), /* int_mult_di */
915 COSTS_N_INSNS (37), /* int_div_si */
916 COSTS_N_INSNS (37), /* int_div_di */
917 1, /* branch_cost */
918 4 /* memory_latency */
919 },
920 { /* 5KC */
921 SOFT_FP_COSTS,
922 COSTS_N_INSNS (4), /* int_mult_si */
923 COSTS_N_INSNS (11), /* int_mult_di */
924 COSTS_N_INSNS (36), /* int_div_si */
925 COSTS_N_INSNS (68), /* int_div_di */
926 1, /* branch_cost */
927 4 /* memory_latency */
928 },
929 { /* 5KF */
930 COSTS_N_INSNS (4), /* fp_add */
931 COSTS_N_INSNS (4), /* fp_mult_sf */
932 COSTS_N_INSNS (5), /* fp_mult_df */
933 COSTS_N_INSNS (17), /* fp_div_sf */
934 COSTS_N_INSNS (32), /* fp_div_df */
935 COSTS_N_INSNS (4), /* int_mult_si */
936 COSTS_N_INSNS (11), /* int_mult_di */
937 COSTS_N_INSNS (36), /* int_div_si */
938 COSTS_N_INSNS (68), /* int_div_di */
939 1, /* branch_cost */
940 4 /* memory_latency */
941 },
942 { /* 20KC */
943 COSTS_N_INSNS (4), /* fp_add */
944 COSTS_N_INSNS (4), /* fp_mult_sf */
945 COSTS_N_INSNS (5), /* fp_mult_df */
946 COSTS_N_INSNS (17), /* fp_div_sf */
947 COSTS_N_INSNS (32), /* fp_div_df */
948 COSTS_N_INSNS (4), /* int_mult_si */
949 COSTS_N_INSNS (7), /* int_mult_di */
950 COSTS_N_INSNS (42), /* int_div_si */
951 COSTS_N_INSNS (72), /* int_div_di */
952 1, /* branch_cost */
953 4 /* memory_latency */
954 },
955 { /* 24KC */
956 SOFT_FP_COSTS,
957 COSTS_N_INSNS (5), /* int_mult_si */
958 COSTS_N_INSNS (5), /* int_mult_di */
959 COSTS_N_INSNS (41), /* int_div_si */
960 COSTS_N_INSNS (41), /* int_div_di */
961 1, /* branch_cost */
962 4 /* memory_latency */
963 },
964 { /* 24KF2_1 */
965 COSTS_N_INSNS (8), /* fp_add */
966 COSTS_N_INSNS (8), /* fp_mult_sf */
967 COSTS_N_INSNS (10), /* fp_mult_df */
968 COSTS_N_INSNS (34), /* fp_div_sf */
969 COSTS_N_INSNS (64), /* fp_div_df */
970 COSTS_N_INSNS (5), /* int_mult_si */
971 COSTS_N_INSNS (5), /* int_mult_di */
972 COSTS_N_INSNS (41), /* int_div_si */
973 COSTS_N_INSNS (41), /* int_div_di */
974 1, /* branch_cost */
975 4 /* memory_latency */
976 },
977 { /* 24KF1_1 */
978 COSTS_N_INSNS (4), /* fp_add */
979 COSTS_N_INSNS (4), /* fp_mult_sf */
980 COSTS_N_INSNS (5), /* fp_mult_df */
981 COSTS_N_INSNS (17), /* fp_div_sf */
982 COSTS_N_INSNS (32), /* fp_div_df */
983 COSTS_N_INSNS (5), /* int_mult_si */
984 COSTS_N_INSNS (5), /* int_mult_di */
985 COSTS_N_INSNS (41), /* int_div_si */
986 COSTS_N_INSNS (41), /* int_div_di */
987 1, /* branch_cost */
988 4 /* memory_latency */
989 },
990 { /* 74KC */
991 SOFT_FP_COSTS,
992 COSTS_N_INSNS (5), /* int_mult_si */
993 COSTS_N_INSNS (5), /* int_mult_di */
994 COSTS_N_INSNS (41), /* int_div_si */
995 COSTS_N_INSNS (41), /* int_div_di */
996 1, /* branch_cost */
997 4 /* memory_latency */
998 },
999 { /* 74KF2_1 */
1000 COSTS_N_INSNS (8), /* fp_add */
1001 COSTS_N_INSNS (8), /* fp_mult_sf */
1002 COSTS_N_INSNS (10), /* fp_mult_df */
1003 COSTS_N_INSNS (34), /* fp_div_sf */
1004 COSTS_N_INSNS (64), /* fp_div_df */
1005 COSTS_N_INSNS (5), /* int_mult_si */
1006 COSTS_N_INSNS (5), /* int_mult_di */
1007 COSTS_N_INSNS (41), /* int_div_si */
1008 COSTS_N_INSNS (41), /* int_div_di */
1009 1, /* branch_cost */
1010 4 /* memory_latency */
1011 },
1012 { /* 74KF1_1 */
1013 COSTS_N_INSNS (4), /* fp_add */
1014 COSTS_N_INSNS (4), /* fp_mult_sf */
1015 COSTS_N_INSNS (5), /* fp_mult_df */
1016 COSTS_N_INSNS (17), /* fp_div_sf */
1017 COSTS_N_INSNS (32), /* fp_div_df */
1018 COSTS_N_INSNS (5), /* int_mult_si */
1019 COSTS_N_INSNS (5), /* int_mult_di */
1020 COSTS_N_INSNS (41), /* int_div_si */
1021 COSTS_N_INSNS (41), /* int_div_di */
1022 1, /* branch_cost */
1023 4 /* memory_latency */
1024 },
1025 { /* 74KF3_2 */
1026 COSTS_N_INSNS (6), /* fp_add */
1027 COSTS_N_INSNS (6), /* fp_mult_sf */
1028 COSTS_N_INSNS (7), /* fp_mult_df */
1029 COSTS_N_INSNS (25), /* fp_div_sf */
1030 COSTS_N_INSNS (48), /* fp_div_df */
1031 COSTS_N_INSNS (5), /* int_mult_si */
1032 COSTS_N_INSNS (5), /* int_mult_di */
1033 COSTS_N_INSNS (41), /* int_div_si */
1034 COSTS_N_INSNS (41), /* int_div_di */
1035 1, /* branch_cost */
1036 4 /* memory_latency */
1037 },
1038 { /* M4k */
1039 DEFAULT_COSTS
1040 },
1041 { /* R3900 */
1042 COSTS_N_INSNS (2), /* fp_add */
1043 COSTS_N_INSNS (4), /* fp_mult_sf */
1044 COSTS_N_INSNS (5), /* fp_mult_df */
1045 COSTS_N_INSNS (12), /* fp_div_sf */
1046 COSTS_N_INSNS (19), /* fp_div_df */
1047 COSTS_N_INSNS (2), /* int_mult_si */
1048 COSTS_N_INSNS (2), /* int_mult_di */
1049 COSTS_N_INSNS (35), /* int_div_si */
1050 COSTS_N_INSNS (35), /* int_div_di */
1051 1, /* branch_cost */
1052 4 /* memory_latency */
1053 },
1054 { /* R6000 */
1055 COSTS_N_INSNS (3), /* fp_add */
1056 COSTS_N_INSNS (5), /* fp_mult_sf */
1057 COSTS_N_INSNS (6), /* fp_mult_df */
1058 COSTS_N_INSNS (15), /* fp_div_sf */
1059 COSTS_N_INSNS (16), /* fp_div_df */
1060 COSTS_N_INSNS (17), /* int_mult_si */
1061 COSTS_N_INSNS (17), /* int_mult_di */
1062 COSTS_N_INSNS (38), /* int_div_si */
1063 COSTS_N_INSNS (38), /* int_div_di */
1064 2, /* branch_cost */
1065 6 /* memory_latency */
1066 },
1067 { /* R4000 */
1068 COSTS_N_INSNS (6), /* fp_add */
1069 COSTS_N_INSNS (7), /* fp_mult_sf */
1070 COSTS_N_INSNS (8), /* fp_mult_df */
1071 COSTS_N_INSNS (23), /* fp_div_sf */
1072 COSTS_N_INSNS (36), /* fp_div_df */
1073 COSTS_N_INSNS (10), /* int_mult_si */
1074 COSTS_N_INSNS (10), /* int_mult_di */
1075 COSTS_N_INSNS (69), /* int_div_si */
1076 COSTS_N_INSNS (69), /* int_div_di */
1077 2, /* branch_cost */
1078 6 /* memory_latency */
1079 },
1080 { /* R4100 */
1081 DEFAULT_COSTS
1082 },
1083 { /* R4111 */
1084 DEFAULT_COSTS
1085 },
1086 { /* R4120 */
1087 DEFAULT_COSTS
1088 },
1089 { /* R4130 */
1090 /* The only costs that appear to be updated here are
1091 integer multiplication. */
1092 SOFT_FP_COSTS,
1093 COSTS_N_INSNS (4), /* int_mult_si */
1094 COSTS_N_INSNS (6), /* int_mult_di */
1095 COSTS_N_INSNS (69), /* int_div_si */
1096 COSTS_N_INSNS (69), /* int_div_di */
1097 1, /* branch_cost */
1098 4 /* memory_latency */
1099 },
1100 { /* R4300 */
1101 DEFAULT_COSTS
1102 },
1103 { /* R4600 */
1104 DEFAULT_COSTS
1105 },
1106 { /* R4650 */
1107 DEFAULT_COSTS
1108 },
1109 { /* R5000 */
1110 COSTS_N_INSNS (6), /* fp_add */
1111 COSTS_N_INSNS (4), /* fp_mult_sf */
1112 COSTS_N_INSNS (5), /* fp_mult_df */
1113 COSTS_N_INSNS (23), /* fp_div_sf */
1114 COSTS_N_INSNS (36), /* fp_div_df */
1115 COSTS_N_INSNS (5), /* int_mult_si */
1116 COSTS_N_INSNS (5), /* int_mult_di */
1117 COSTS_N_INSNS (36), /* int_div_si */
1118 COSTS_N_INSNS (36), /* int_div_di */
1119 1, /* branch_cost */
1120 4 /* memory_latency */
1121 },
1122 { /* R5400 */
1123 COSTS_N_INSNS (6), /* fp_add */
1124 COSTS_N_INSNS (5), /* fp_mult_sf */
1125 COSTS_N_INSNS (6), /* fp_mult_df */
1126 COSTS_N_INSNS (30), /* fp_div_sf */
1127 COSTS_N_INSNS (59), /* fp_div_df */
1128 COSTS_N_INSNS (3), /* int_mult_si */
1129 COSTS_N_INSNS (4), /* int_mult_di */
1130 COSTS_N_INSNS (42), /* int_div_si */
1131 COSTS_N_INSNS (74), /* int_div_di */
1132 1, /* branch_cost */
1133 4 /* memory_latency */
1134 },
1135 { /* R5500 */
1136 COSTS_N_INSNS (6), /* fp_add */
1137 COSTS_N_INSNS (5), /* fp_mult_sf */
1138 COSTS_N_INSNS (6), /* fp_mult_df */
1139 COSTS_N_INSNS (30), /* fp_div_sf */
1140 COSTS_N_INSNS (59), /* fp_div_df */
1141 COSTS_N_INSNS (5), /* int_mult_si */
1142 COSTS_N_INSNS (9), /* int_mult_di */
1143 COSTS_N_INSNS (42), /* int_div_si */
1144 COSTS_N_INSNS (74), /* int_div_di */
1145 1, /* branch_cost */
1146 4 /* memory_latency */
1147 },
1148 { /* R7000 */
1149 /* The only costs that are changed here are
1150 integer multiplication. */
1151 COSTS_N_INSNS (6), /* fp_add */
1152 COSTS_N_INSNS (7), /* fp_mult_sf */
1153 COSTS_N_INSNS (8), /* fp_mult_df */
1154 COSTS_N_INSNS (23), /* fp_div_sf */
1155 COSTS_N_INSNS (36), /* fp_div_df */
1156 COSTS_N_INSNS (5), /* int_mult_si */
1157 COSTS_N_INSNS (9), /* int_mult_di */
1158 COSTS_N_INSNS (69), /* int_div_si */
1159 COSTS_N_INSNS (69), /* int_div_di */
1160 1, /* branch_cost */
1161 4 /* memory_latency */
1162 },
1163 { /* R8000 */
1164 DEFAULT_COSTS
1165 },
1166 { /* R9000 */
1167 /* The only costs that are changed here are
1168 integer multiplication. */
1169 COSTS_N_INSNS (6), /* fp_add */
1170 COSTS_N_INSNS (7), /* fp_mult_sf */
1171 COSTS_N_INSNS (8), /* fp_mult_df */
1172 COSTS_N_INSNS (23), /* fp_div_sf */
1173 COSTS_N_INSNS (36), /* fp_div_df */
1174 COSTS_N_INSNS (3), /* int_mult_si */
1175 COSTS_N_INSNS (8), /* int_mult_di */
1176 COSTS_N_INSNS (69), /* int_div_si */
1177 COSTS_N_INSNS (69), /* int_div_di */
1178 1, /* branch_cost */
1179 4 /* memory_latency */
1180 },
1181 { /* SB1 */
1182 /* These costs are the same as the SB-1A below. */
1183 COSTS_N_INSNS (4), /* fp_add */
1184 COSTS_N_INSNS (4), /* fp_mult_sf */
1185 COSTS_N_INSNS (4), /* fp_mult_df */
1186 COSTS_N_INSNS (24), /* fp_div_sf */
1187 COSTS_N_INSNS (32), /* fp_div_df */
1188 COSTS_N_INSNS (3), /* int_mult_si */
1189 COSTS_N_INSNS (4), /* int_mult_di */
1190 COSTS_N_INSNS (36), /* int_div_si */
1191 COSTS_N_INSNS (68), /* int_div_di */
1192 1, /* branch_cost */
1193 4 /* memory_latency */
1194 },
1195 { /* SB1-A */
1196 /* These costs are the same as the SB-1 above. */
1197 COSTS_N_INSNS (4), /* fp_add */
1198 COSTS_N_INSNS (4), /* fp_mult_sf */
1199 COSTS_N_INSNS (4), /* fp_mult_df */
1200 COSTS_N_INSNS (24), /* fp_div_sf */
1201 COSTS_N_INSNS (32), /* fp_div_df */
1202 COSTS_N_INSNS (3), /* int_mult_si */
1203 COSTS_N_INSNS (4), /* int_mult_di */
1204 COSTS_N_INSNS (36), /* int_div_si */
1205 COSTS_N_INSNS (68), /* int_div_di */
1206 1, /* branch_cost */
1207 4 /* memory_latency */
1208 },
1209 { /* SR71000 */
1210 DEFAULT_COSTS
1211 },
1212 };
1213
1214 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1215 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1216 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1217 static const unsigned char mips16e_s2_s8_regs[] = {
1218 30, 23, 22, 21, 20, 19, 18
1219 };
1220 static const unsigned char mips16e_a0_a3_regs[] = {
1221 4, 5, 6, 7
1222 };
1223
1224 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1225 ordered from the uppermost in memory to the lowest in memory. */
1226 static const unsigned char mips16e_save_restore_regs[] = {
1227 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1228 };
1229 \f
1230 /* Initialize the GCC target structure. */
1231 #undef TARGET_ASM_ALIGNED_HI_OP
1232 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1233 #undef TARGET_ASM_ALIGNED_SI_OP
1234 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1235 #undef TARGET_ASM_ALIGNED_DI_OP
1236 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1237
1238 #undef TARGET_ASM_FUNCTION_PROLOGUE
1239 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1240 #undef TARGET_ASM_FUNCTION_EPILOGUE
1241 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1242 #undef TARGET_ASM_SELECT_RTX_SECTION
1243 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1244 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1245 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1246
1247 #undef TARGET_SCHED_INIT
1248 #define TARGET_SCHED_INIT mips_sched_init
1249 #undef TARGET_SCHED_REORDER
1250 #define TARGET_SCHED_REORDER mips_sched_reorder
1251 #undef TARGET_SCHED_REORDER2
1252 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1253 #undef TARGET_SCHED_VARIABLE_ISSUE
1254 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1255 #undef TARGET_SCHED_ADJUST_COST
1256 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1257 #undef TARGET_SCHED_ISSUE_RATE
1258 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1259 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1260 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1261 mips_multipass_dfa_lookahead
1262
1263 #undef TARGET_DEFAULT_TARGET_FLAGS
1264 #define TARGET_DEFAULT_TARGET_FLAGS \
1265 (TARGET_DEFAULT \
1266 | TARGET_CPU_DEFAULT \
1267 | TARGET_ENDIAN_DEFAULT \
1268 | TARGET_FP_EXCEPTIONS_DEFAULT \
1269 | MASK_CHECK_ZERO_DIV \
1270 | MASK_FUSED_MADD)
1271 #undef TARGET_HANDLE_OPTION
1272 #define TARGET_HANDLE_OPTION mips_handle_option
1273
1274 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1275 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1276
1277 #undef TARGET_INSERT_ATTRIBUTES
1278 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
1279 #undef TARGET_MERGE_DECL_ATTRIBUTES
1280 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
1281 #undef TARGET_SET_CURRENT_FUNCTION
1282 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
1283
1284 #undef TARGET_VALID_POINTER_MODE
1285 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1286 #undef TARGET_RTX_COSTS
1287 #define TARGET_RTX_COSTS mips_rtx_costs
1288 #undef TARGET_ADDRESS_COST
1289 #define TARGET_ADDRESS_COST mips_address_cost
1290
1291 #undef TARGET_IN_SMALL_DATA_P
1292 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1293
1294 #undef TARGET_MACHINE_DEPENDENT_REORG
1295 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1296
1297 #undef TARGET_ASM_FILE_START
1298 #define TARGET_ASM_FILE_START mips_file_start
1299 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1300 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1301
1302 #undef TARGET_INIT_LIBFUNCS
1303 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1304
1305 #undef TARGET_BUILD_BUILTIN_VA_LIST
1306 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1307 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1308 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1309
1310 #undef TARGET_PROMOTE_FUNCTION_ARGS
1311 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1312 #undef TARGET_PROMOTE_FUNCTION_RETURN
1313 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1314 #undef TARGET_PROMOTE_PROTOTYPES
1315 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1316
1317 #undef TARGET_RETURN_IN_MEMORY
1318 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1319 #undef TARGET_RETURN_IN_MSB
1320 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1321
1322 #undef TARGET_ASM_OUTPUT_MI_THUNK
1323 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1324 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1325 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1326
1327 #undef TARGET_SETUP_INCOMING_VARARGS
1328 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1329 #undef TARGET_STRICT_ARGUMENT_NAMING
1330 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1331 #undef TARGET_MUST_PASS_IN_STACK
1332 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1333 #undef TARGET_PASS_BY_REFERENCE
1334 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1335 #undef TARGET_CALLEE_COPIES
1336 #define TARGET_CALLEE_COPIES mips_callee_copies
1337 #undef TARGET_ARG_PARTIAL_BYTES
1338 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1339
1340 #undef TARGET_MODE_REP_EXTENDED
1341 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1342
1343 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1344 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1345
1346 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1347 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
1348
1349 #undef TARGET_INIT_BUILTINS
1350 #define TARGET_INIT_BUILTINS mips_init_builtins
1351 #undef TARGET_EXPAND_BUILTIN
1352 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1353
1354 #undef TARGET_HAVE_TLS
1355 #define TARGET_HAVE_TLS HAVE_AS_TLS
1356
1357 #undef TARGET_CANNOT_FORCE_CONST_MEM
1358 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1359
1360 #undef TARGET_ENCODE_SECTION_INFO
1361 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1362
1363 #undef TARGET_ATTRIBUTE_TABLE
1364 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1365 /* All our function attributes are related to how out-of-line copies should
1366 be compiled or called. They don't in themselves prevent inlining. */
1367 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
1368 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
1369
1370 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1371 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1372
1373 #undef TARGET_MIN_ANCHOR_OFFSET
1374 #define TARGET_MIN_ANCHOR_OFFSET -32768
1375 #undef TARGET_MAX_ANCHOR_OFFSET
1376 #define TARGET_MAX_ANCHOR_OFFSET 32767
1377 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1378 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1379 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1380 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1381
1382 #undef TARGET_COMP_TYPE_ATTRIBUTES
1383 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1384
1385 #ifdef HAVE_AS_DTPRELWORD
1386 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1387 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1388 #endif
1389
1390 struct gcc_target targetm = TARGET_INITIALIZER;
1391
1392
1393 /* Predicates to test for presence of "near" and "far"/"long_call"
1394 attributes on the given TYPE. */
1395
1396 static bool
1397 mips_near_type_p (const_tree type)
1398 {
1399 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1400 }
1401
1402 static bool
1403 mips_far_type_p (const_tree type)
1404 {
1405 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1406 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1407 }
1408
1409 /* Similar predicates for "mips16"/"nomips16" attributes. */
1410
1411 static bool
1412 mips_mips16_decl_p (const_tree decl)
1413 {
1414 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1415 }
1416
1417 static bool
1418 mips_nomips16_decl_p (const_tree decl)
1419 {
1420 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1421 }
1422
1423 /* Return 0 if the attributes for two types are incompatible, 1 if they
1424 are compatible, and 2 if they are nearly compatible (which causes a
1425 warning to be generated). */
1426
1427 static int
1428 mips_comp_type_attributes (const_tree type1, const_tree type2)
1429 {
1430 /* Check for mismatch of non-default calling convention. */
1431 if (TREE_CODE (type1) != FUNCTION_TYPE)
1432 return 1;
1433
1434 /* Disallow mixed near/far attributes. */
1435 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1436 return 0;
1437 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1438 return 0;
1439
1440 return 1;
1441 }
1442 \f
1443 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1444 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1445
1446 static void
1447 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1448 {
1449 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1450 {
1451 *base_ptr = XEXP (x, 0);
1452 *offset_ptr = INTVAL (XEXP (x, 1));
1453 }
1454 else
1455 {
1456 *base_ptr = x;
1457 *offset_ptr = 0;
1458 }
1459 }
1460 \f
1461 /* Return true if SYMBOL_REF X is associated with a global symbol
1462 (in the STB_GLOBAL sense). */
1463
1464 static bool
1465 mips_global_symbol_p (const_rtx x)
1466 {
1467 const_tree const decl = SYMBOL_REF_DECL (x);
1468
1469 if (!decl)
1470 return !SYMBOL_REF_LOCAL_P (x);
1471
1472 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1473 or weak symbols. Relocations in the object file will be against
1474 the target symbol, so it's that symbol's binding that matters here. */
1475 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1476 }
1477
1478 /* Return true if SYMBOL_REF X binds locally. */
1479
1480 static bool
1481 mips_symbol_binds_local_p (const_rtx x)
1482 {
1483 return (SYMBOL_REF_DECL (x)
1484 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1485 : SYMBOL_REF_LOCAL_P (x));
1486 }
1487
1488 /* Return true if rtx constants of mode MODE should be put into a small
1489 data section. */
1490
1491 static bool
1492 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1493 {
1494 return (!TARGET_EMBEDDED_DATA
1495 && TARGET_LOCAL_SDATA
1496 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1497 }
1498
1499 /* Return the method that should be used to access SYMBOL_REF or
1500 LABEL_REF X in context CONTEXT. */
1501
1502 static enum mips_symbol_type
1503 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1504 {
1505 if (TARGET_RTP_PIC)
1506 return SYMBOL_GOT_DISP;
1507
1508 if (GET_CODE (x) == LABEL_REF)
1509 {
1510 /* LABEL_REFs are used for jump tables as well as text labels.
1511 Only return SYMBOL_PC_RELATIVE if we know the label is in
1512 the text section. */
1513 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1514 return SYMBOL_PC_RELATIVE;
1515 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1516 return SYMBOL_GOT_PAGE_OFST;
1517 return SYMBOL_ABSOLUTE;
1518 }
1519
1520 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1521
1522 if (SYMBOL_REF_TLS_MODEL (x))
1523 return SYMBOL_TLS;
1524
1525 if (CONSTANT_POOL_ADDRESS_P (x))
1526 {
1527 if (TARGET_MIPS16_TEXT_LOADS)
1528 return SYMBOL_PC_RELATIVE;
1529
1530 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1531 return SYMBOL_PC_RELATIVE;
1532
1533 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1534 return SYMBOL_GP_RELATIVE;
1535 }
1536
1537 /* Do not use small-data accesses for weak symbols; they may end up
1538 being zero. */
1539 if (TARGET_GPOPT
1540 && SYMBOL_REF_SMALL_P (x)
1541 && !SYMBOL_REF_WEAK (x))
1542 return SYMBOL_GP_RELATIVE;
1543
1544 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1545 is in effect. */
1546 if (TARGET_ABICALLS
1547 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1548 {
1549 /* There are three cases to consider:
1550
1551 - o32 PIC (either with or without explicit relocs)
1552 - n32/n64 PIC without explicit relocs
1553 - n32/n64 PIC with explicit relocs
1554
1555 In the first case, both local and global accesses will use an
1556 R_MIPS_GOT16 relocation. We must correctly predict which of
1557 the two semantics (local or global) the assembler and linker
1558 will apply. The choice depends on the symbol's binding rather
1559 than its visibility.
1560
1561 In the second case, the assembler will not use R_MIPS_GOT16
1562 relocations, but it chooses between local and global accesses
1563 in the same way as for o32 PIC.
1564
1565 In the third case we have more freedom since both forms of
1566 access will work for any kind of symbol. However, there seems
1567 little point in doing things differently. */
1568 if (mips_global_symbol_p (x))
1569 return SYMBOL_GOT_DISP;
1570
1571 return SYMBOL_GOT_PAGE_OFST;
1572 }
1573
1574 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1575 return SYMBOL_FORCE_TO_MEM;
1576 return SYMBOL_ABSOLUTE;
1577 }
1578
1579 /* Classify symbolic expression X, given that it appears in context
1580 CONTEXT. */
1581
1582 static enum mips_symbol_type
1583 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1584 {
1585 rtx offset;
1586
1587 split_const (x, &x, &offset);
1588 if (UNSPEC_ADDRESS_P (x))
1589 return UNSPEC_ADDRESS_TYPE (x);
1590
1591 return mips_classify_symbol (x, context);
1592 }
1593
1594 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1595 is the alignment (in bytes) of SYMBOL_REF X. */
1596
1597 static bool
1598 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1599 {
1600 /* If for some reason we can't get the alignment for the
1601 symbol, initializing this to one means we will only accept
1602 a zero offset. */
1603 HOST_WIDE_INT align = 1;
1604 tree t;
1605
1606 /* Get the alignment of the symbol we're referring to. */
1607 t = SYMBOL_REF_DECL (x);
1608 if (t)
1609 align = DECL_ALIGN_UNIT (t);
1610
1611 return offset >= 0 && offset < align;
1612 }
1613
1614 /* Return true if X is a symbolic constant that can be used in context
1615 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1616
1617 bool
1618 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1619 enum mips_symbol_type *symbol_type)
1620 {
1621 rtx offset;
1622
1623 split_const (x, &x, &offset);
1624 if (UNSPEC_ADDRESS_P (x))
1625 {
1626 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1627 x = UNSPEC_ADDRESS (x);
1628 }
1629 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1630 {
1631 *symbol_type = mips_classify_symbol (x, context);
1632 if (*symbol_type == SYMBOL_TLS)
1633 return false;
1634 }
1635 else
1636 return false;
1637
1638 if (offset == const0_rtx)
1639 return true;
1640
1641 /* Check whether a nonzero offset is valid for the underlying
1642 relocations. */
1643 switch (*symbol_type)
1644 {
1645 case SYMBOL_ABSOLUTE:
1646 case SYMBOL_FORCE_TO_MEM:
1647 case SYMBOL_32_HIGH:
1648 case SYMBOL_64_HIGH:
1649 case SYMBOL_64_MID:
1650 case SYMBOL_64_LOW:
1651 /* If the target has 64-bit pointers and the object file only
1652 supports 32-bit symbols, the values of those symbols will be
1653 sign-extended. In this case we can't allow an arbitrary offset
1654 in case the 32-bit value X + OFFSET has a different sign from X. */
1655 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1656 return offset_within_block_p (x, INTVAL (offset));
1657
1658 /* In other cases the relocations can handle any offset. */
1659 return true;
1660
1661 case SYMBOL_PC_RELATIVE:
1662 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1663 In this case, we no longer have access to the underlying constant,
1664 but the original symbol-based access was known to be valid. */
1665 if (GET_CODE (x) == LABEL_REF)
1666 return true;
1667
1668 /* Fall through. */
1669
1670 case SYMBOL_GP_RELATIVE:
1671 /* Make sure that the offset refers to something within the
1672 same object block. This should guarantee that the final
1673 PC- or GP-relative offset is within the 16-bit limit. */
1674 return offset_within_block_p (x, INTVAL (offset));
1675
1676 case SYMBOL_GOT_PAGE_OFST:
1677 case SYMBOL_GOTOFF_PAGE:
1678 /* If the symbol is global, the GOT entry will contain the symbol's
1679 address, and we will apply a 16-bit offset after loading it.
1680 If the symbol is local, the linker should provide enough local
1681 GOT entries for a 16-bit offset, but larger offsets may lead
1682 to GOT overflow. */
1683 return SMALL_INT (offset);
1684
1685 case SYMBOL_TPREL:
1686 case SYMBOL_DTPREL:
1687 /* There is no carry between the HI and LO REL relocations, so the
1688 offset is only valid if we know it won't lead to such a carry. */
1689 return mips_offset_within_alignment_p (x, INTVAL (offset));
1690
1691 case SYMBOL_GOT_DISP:
1692 case SYMBOL_GOTOFF_DISP:
1693 case SYMBOL_GOTOFF_CALL:
1694 case SYMBOL_GOTOFF_LOADGP:
1695 case SYMBOL_TLSGD:
1696 case SYMBOL_TLSLDM:
1697 case SYMBOL_GOTTPREL:
1698 case SYMBOL_TLS:
1699 case SYMBOL_HALF:
1700 return false;
1701 }
1702 gcc_unreachable ();
1703 }
1704
1705
1706 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1707
1708 int
1709 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1710 {
1711 if (!HARD_REGISTER_NUM_P (regno))
1712 {
1713 if (!strict)
1714 return true;
1715 regno = reg_renumber[regno];
1716 }
1717
1718 /* These fake registers will be eliminated to either the stack or
1719 hard frame pointer, both of which are usually valid base registers.
1720 Reload deals with the cases where the eliminated form isn't valid. */
1721 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1722 return true;
1723
1724 /* In mips16 mode, the stack pointer can only address word and doubleword
1725 values, nothing smaller. There are two problems here:
1726
1727 (a) Instantiating virtual registers can introduce new uses of the
1728 stack pointer. If these virtual registers are valid addresses,
1729 the stack pointer should be too.
1730
1731 (b) Most uses of the stack pointer are not made explicit until
1732 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1733 We don't know until that stage whether we'll be eliminating to the
1734 stack pointer (which needs the restriction) or the hard frame
1735 pointer (which doesn't).
1736
1737 All in all, it seems more consistent to only enforce this restriction
1738 during and after reload. */
1739 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1740 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1741
1742 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1743 }
1744
1745
1746 /* Return true if X is a valid base register for the given mode.
1747 Allow only hard registers if STRICT. */
1748
1749 static bool
1750 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1751 {
1752 if (!strict && GET_CODE (x) == SUBREG)
1753 x = SUBREG_REG (x);
1754
1755 return (REG_P (x)
1756 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1757 }
1758
1759
1760 /* Return true if X is a valid address for machine mode MODE. If it is,
1761 fill in INFO appropriately. STRICT is true if we should only accept
1762 hard base registers. */
1763
1764 static bool
1765 mips_classify_address (struct mips_address_info *info, rtx x,
1766 enum machine_mode mode, int strict)
1767 {
1768 switch (GET_CODE (x))
1769 {
1770 case REG:
1771 case SUBREG:
1772 info->type = ADDRESS_REG;
1773 info->reg = x;
1774 info->offset = const0_rtx;
1775 return mips_valid_base_register_p (info->reg, mode, strict);
1776
1777 case PLUS:
1778 info->type = ADDRESS_REG;
1779 info->reg = XEXP (x, 0);
1780 info->offset = XEXP (x, 1);
1781 return (mips_valid_base_register_p (info->reg, mode, strict)
1782 && const_arith_operand (info->offset, VOIDmode));
1783
1784 case LO_SUM:
1785 info->type = ADDRESS_LO_SUM;
1786 info->reg = XEXP (x, 0);
1787 info->offset = XEXP (x, 1);
1788 /* We have to trust the creator of the LO_SUM to do something vaguely
1789 sane. Target-independent code that creates a LO_SUM should also
1790 create and verify the matching HIGH. Target-independent code that
1791 adds an offset to a LO_SUM must prove that the offset will not
1792 induce a carry. Failure to do either of these things would be
1793 a bug, and we are not required to check for it here. The MIPS
1794 backend itself should only create LO_SUMs for valid symbolic
1795 constants, with the high part being either a HIGH or a copy
1796 of _gp. */
1797 info->symbol_type
1798 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1799 return (mips_valid_base_register_p (info->reg, mode, strict)
1800 && mips_symbol_insns (info->symbol_type, mode) > 0
1801 && mips_lo_relocs[info->symbol_type] != 0);
1802
1803 case CONST_INT:
1804 /* Small-integer addresses don't occur very often, but they
1805 are legitimate if $0 is a valid base register. */
1806 info->type = ADDRESS_CONST_INT;
1807 return !TARGET_MIPS16 && SMALL_INT (x);
1808
1809 case CONST:
1810 case LABEL_REF:
1811 case SYMBOL_REF:
1812 info->type = ADDRESS_SYMBOLIC;
1813 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1814 &info->symbol_type)
1815 && mips_symbol_insns (info->symbol_type, mode) > 0
1816 && !mips_split_p[info->symbol_type]);
1817
1818 default:
1819 return false;
1820 }
1821 }
1822
1823 /* Return true if X is a thread-local symbol. */
1824
1825 static bool
1826 mips_tls_operand_p (rtx x)
1827 {
1828 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1829 }
1830
1831 /* Return true if X can not be forced into a constant pool. */
1832
1833 static int
1834 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1835 {
1836 return mips_tls_operand_p (*x);
1837 }
1838
1839 /* Return true if X can not be forced into a constant pool. */
1840
1841 static bool
1842 mips_cannot_force_const_mem (rtx x)
1843 {
1844 rtx base, offset;
1845
1846 if (!TARGET_MIPS16)
1847 {
1848 /* As an optimization, reject constants that mips_legitimize_move
1849 can expand inline.
1850
1851 Suppose we have a multi-instruction sequence that loads constant C
1852 into register R. If R does not get allocated a hard register, and
1853 R is used in an operand that allows both registers and memory
1854 references, reload will consider forcing C into memory and using
1855 one of the instruction's memory alternatives. Returning false
1856 here will force it to use an input reload instead. */
1857 if (GET_CODE (x) == CONST_INT)
1858 return true;
1859
1860 split_const (x, &base, &offset);
1861 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1862 return true;
1863 }
1864
1865 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1866 return true;
1867
1868 return false;
1869 }
1870
1871 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1872 constants when we're using a per-function constant pool. */
1873
1874 static bool
1875 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1876 const_rtx x ATTRIBUTE_UNUSED)
1877 {
1878 return !TARGET_MIPS16_PCREL_LOADS;
1879 }
1880 \f
1881 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1882 single instruction. We rely on the fact that, in the worst case,
1883 all instructions involved in a MIPS16 address calculation are usually
1884 extended ones. */
1885
1886 static int
1887 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1888 {
1889 switch (type)
1890 {
1891 case SYMBOL_ABSOLUTE:
1892 /* When using 64-bit symbols, we need 5 preparatory instructions,
1893 such as:
1894
1895 lui $at,%highest(symbol)
1896 daddiu $at,$at,%higher(symbol)
1897 dsll $at,$at,16
1898 daddiu $at,$at,%hi(symbol)
1899 dsll $at,$at,16
1900
1901 The final address is then $at + %lo(symbol). With 32-bit
1902 symbols we just need a preparatory lui for normal mode and
1903 a preparatory "li; sll" for MIPS16. */
1904 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1905
1906 case SYMBOL_GP_RELATIVE:
1907 /* Treat GP-relative accesses as taking a single instruction on
1908 MIPS16 too; the copy of $gp can often be shared. */
1909 return 1;
1910
1911 case SYMBOL_PC_RELATIVE:
1912 /* PC-relative constants can be only be used with addiupc,
1913 lwpc and ldpc. */
1914 if (mode == MAX_MACHINE_MODE
1915 || GET_MODE_SIZE (mode) == 4
1916 || GET_MODE_SIZE (mode) == 8)
1917 return 1;
1918
1919 /* The constant must be loaded using addiupc first. */
1920 return 0;
1921
1922 case SYMBOL_FORCE_TO_MEM:
1923 /* LEAs will be converted into constant-pool references by
1924 mips_reorg. */
1925 if (mode == MAX_MACHINE_MODE)
1926 return 1;
1927
1928 /* The constant must be loaded from the constant pool. */
1929 return 0;
1930
1931 case SYMBOL_GOT_DISP:
1932 /* The constant will have to be loaded from the GOT before it
1933 is used in an address. */
1934 if (mode != MAX_MACHINE_MODE)
1935 return 0;
1936
1937 /* Fall through. */
1938
1939 case SYMBOL_GOT_PAGE_OFST:
1940 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1941 the local/global classification is accurate. See override_options
1942 for details.
1943
1944 The worst cases are:
1945
1946 (1) For local symbols when generating o32 or o64 code. The assembler
1947 will use:
1948
1949 lw $at,%got(symbol)
1950 nop
1951
1952 ...and the final address will be $at + %lo(symbol).
1953
1954 (2) For global symbols when -mxgot. The assembler will use:
1955
1956 lui $at,%got_hi(symbol)
1957 (d)addu $at,$at,$gp
1958
1959 ...and the final address will be $at + %got_lo(symbol). */
1960 return 3;
1961
1962 case SYMBOL_GOTOFF_PAGE:
1963 case SYMBOL_GOTOFF_DISP:
1964 case SYMBOL_GOTOFF_CALL:
1965 case SYMBOL_GOTOFF_LOADGP:
1966 case SYMBOL_32_HIGH:
1967 case SYMBOL_64_HIGH:
1968 case SYMBOL_64_MID:
1969 case SYMBOL_64_LOW:
1970 case SYMBOL_TLSGD:
1971 case SYMBOL_TLSLDM:
1972 case SYMBOL_DTPREL:
1973 case SYMBOL_GOTTPREL:
1974 case SYMBOL_TPREL:
1975 case SYMBOL_HALF:
1976 /* A 16-bit constant formed by a single relocation, or a 32-bit
1977 constant formed from a high 16-bit relocation and a low 16-bit
1978 relocation. Use mips_split_p to determine which. */
1979 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1980
1981 case SYMBOL_TLS:
1982 /* We don't treat a bare TLS symbol as a constant. */
1983 return 0;
1984 }
1985 gcc_unreachable ();
1986 }
1987
1988 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1989 to load symbols of type TYPE into a register. Return 0 if the given
1990 type of symbol cannot be used as an immediate operand.
1991
1992 Otherwise, return the number of instructions needed to load or store
1993 values of mode MODE to or from addresses of type TYPE. Return 0 if
1994 the given type of symbol is not valid in addresses.
1995
1996 In both cases, treat extended MIPS16 instructions as two instructions. */
1997
1998 static int
1999 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
2000 {
2001 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
2002 }
2003
2004 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2005
2006 bool
2007 mips_stack_address_p (rtx x, enum machine_mode mode)
2008 {
2009 struct mips_address_info addr;
2010
2011 return (mips_classify_address (&addr, x, mode, false)
2012 && addr.type == ADDRESS_REG
2013 && addr.reg == stack_pointer_rtx);
2014 }
2015
2016 /* Return true if a value at OFFSET bytes from BASE can be accessed
2017 using an unextended mips16 instruction. MODE is the mode of the
2018 value.
2019
2020 Usually the offset in an unextended instruction is a 5-bit field.
2021 The offset is unsigned and shifted left once for HIs, twice
2022 for SIs, and so on. An exception is SImode accesses off the
2023 stack pointer, which have an 8-bit immediate field. */
2024
2025 static bool
2026 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
2027 {
2028 if (TARGET_MIPS16
2029 && GET_CODE (offset) == CONST_INT
2030 && INTVAL (offset) >= 0
2031 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
2032 {
2033 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2034 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
2035 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
2036 }
2037 return false;
2038 }
2039
2040
2041 /* Return the number of instructions needed to load or store a value
2042 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
2043 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
2044 otherwise assume that a single load or store is enough.
2045
2046 For mips16 code, count extended instructions as two instructions. */
2047
2048 int
2049 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2050 {
2051 struct mips_address_info addr;
2052 int factor;
2053
2054 /* BLKmode is used for single unaligned loads and stores and should
2055 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2056 meaningless, so we have to single it out as a special case one way
2057 or the other.) */
2058 if (mode != BLKmode && might_split_p)
2059 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2060 else
2061 factor = 1;
2062
2063 if (mips_classify_address (&addr, x, mode, false))
2064 switch (addr.type)
2065 {
2066 case ADDRESS_REG:
2067 if (TARGET_MIPS16
2068 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
2069 return factor * 2;
2070 return factor;
2071
2072 case ADDRESS_LO_SUM:
2073 return (TARGET_MIPS16 ? factor * 2 : factor);
2074
2075 case ADDRESS_CONST_INT:
2076 return factor;
2077
2078 case ADDRESS_SYMBOLIC:
2079 return factor * mips_symbol_insns (addr.symbol_type, mode);
2080 }
2081 return 0;
2082 }
2083
2084
2085 /* Likewise for constant X. */
2086
2087 int
2088 mips_const_insns (rtx x)
2089 {
2090 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2091 enum mips_symbol_type symbol_type;
2092 rtx offset;
2093
2094 switch (GET_CODE (x))
2095 {
2096 case HIGH:
2097 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2098 &symbol_type)
2099 || !mips_split_p[symbol_type])
2100 return 0;
2101
2102 /* This is simply an lui for normal mode. It is an extended
2103 "li" followed by an extended "sll" for MIPS16. */
2104 return TARGET_MIPS16 ? 4 : 1;
2105
2106 case CONST_INT:
2107 if (TARGET_MIPS16)
2108 /* Unsigned 8-bit constants can be loaded using an unextended
2109 LI instruction. Unsigned 16-bit constants can be loaded
2110 using an extended LI. Negative constants must be loaded
2111 using LI and then negated. */
2112 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2113 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2114 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2115 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2116 : 0);
2117
2118 return mips_build_integer (codes, INTVAL (x));
2119
2120 case CONST_DOUBLE:
2121 case CONST_VECTOR:
2122 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2123
2124 case CONST:
2125 if (CONST_GP_P (x))
2126 return 1;
2127
2128 /* See if we can refer to X directly. */
2129 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2130 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2131
2132 /* Otherwise try splitting the constant into a base and offset.
2133 16-bit offsets can be added using an extra addiu. Larger offsets
2134 must be calculated separately and then added to the base. */
2135 split_const (x, &x, &offset);
2136 if (offset != 0)
2137 {
2138 int n = mips_const_insns (x);
2139 if (n != 0)
2140 {
2141 if (SMALL_INT (offset))
2142 return n + 1;
2143 else
2144 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2145 }
2146 }
2147 return 0;
2148
2149 case SYMBOL_REF:
2150 case LABEL_REF:
2151 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2152 MAX_MACHINE_MODE);
2153
2154 default:
2155 return 0;
2156 }
2157 }
2158
2159
2160 /* Return the number of instructions needed to implement INSN,
2161 given that it loads from or stores to MEM. Count extended
2162 mips16 instructions as two instructions. */
2163
2164 int
2165 mips_load_store_insns (rtx mem, rtx insn)
2166 {
2167 enum machine_mode mode;
2168 bool might_split_p;
2169 rtx set;
2170
2171 gcc_assert (MEM_P (mem));
2172 mode = GET_MODE (mem);
2173
2174 /* Try to prove that INSN does not need to be split. */
2175 might_split_p = true;
2176 if (GET_MODE_BITSIZE (mode) == 64)
2177 {
2178 set = single_set (insn);
2179 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2180 might_split_p = false;
2181 }
2182
2183 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2184 }
2185
2186
2187 /* Return the number of instructions needed for an integer division. */
2188
2189 int
2190 mips_idiv_insns (void)
2191 {
2192 int count;
2193
2194 count = 1;
2195 if (TARGET_CHECK_ZERO_DIV)
2196 {
2197 if (GENERATE_DIVIDE_TRAPS)
2198 count++;
2199 else
2200 count += 2;
2201 }
2202
2203 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2204 count++;
2205 return count;
2206 }
2207 \f
2208 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2209 returns a nonzero value if X is a legitimate address for a memory
2210 operand of the indicated MODE. STRICT is nonzero if this function
2211 is called during reload. */
2212
2213 bool
2214 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2215 {
2216 struct mips_address_info addr;
2217
2218 return mips_classify_address (&addr, x, mode, strict);
2219 }
2220
2221 /* Emit a move from SRC to DEST. Assume that the move expanders can
2222 handle all moves if !can_create_pseudo_p (). The distinction is
2223 important because, unlike emit_move_insn, the move expanders know
2224 how to force Pmode objects into the constant pool even when the
2225 constant pool address is not itself legitimate. */
2226
2227 rtx
2228 mips_emit_move (rtx dest, rtx src)
2229 {
2230 return (can_create_pseudo_p ()
2231 ? emit_move_insn (dest, src)
2232 : emit_move_insn_1 (dest, src));
2233 }
2234
2235 /* Copy VALUE to a register and return that register. If new psuedos
2236 are allowed, copy it into a new register, otherwise use DEST. */
2237
2238 static rtx
2239 mips_force_temporary (rtx dest, rtx value)
2240 {
2241 if (can_create_pseudo_p ())
2242 return force_reg (Pmode, value);
2243 else
2244 {
2245 mips_emit_move (copy_rtx (dest), value);
2246 return dest;
2247 }
2248 }
2249
2250
2251 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2252 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2253 constant in that context and can be split into a high part and a LO_SUM.
2254 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2255 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2256
2257 TEMP is as for mips_force_temporary and is used to load the high
2258 part into a register. */
2259
2260 bool
2261 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2262 {
2263 enum mips_symbol_context context;
2264 enum mips_symbol_type symbol_type;
2265 rtx high;
2266
2267 context = (mode == MAX_MACHINE_MODE
2268 ? SYMBOL_CONTEXT_LEA
2269 : SYMBOL_CONTEXT_MEM);
2270 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2271 || mips_symbol_insns (symbol_type, mode) == 0
2272 || !mips_split_p[symbol_type])
2273 return false;
2274
2275 if (lo_sum_out)
2276 {
2277 if (symbol_type == SYMBOL_GP_RELATIVE)
2278 {
2279 if (!can_create_pseudo_p ())
2280 {
2281 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2282 high = temp;
2283 }
2284 else
2285 high = mips16_gp_pseudo_reg ();
2286 }
2287 else
2288 {
2289 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2290 high = mips_force_temporary (temp, high);
2291 }
2292 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2293 }
2294 return true;
2295 }
2296
2297
2298 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2299 and add CONST_INT OFFSET to the result. */
2300
2301 static rtx
2302 mips_unspec_address_offset (rtx base, rtx offset,
2303 enum mips_symbol_type symbol_type)
2304 {
2305 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2306 UNSPEC_ADDRESS_FIRST + symbol_type);
2307 if (offset != const0_rtx)
2308 base = gen_rtx_PLUS (Pmode, base, offset);
2309 return gen_rtx_CONST (Pmode, base);
2310 }
2311
2312 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2313 type SYMBOL_TYPE. */
2314
2315 rtx
2316 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2317 {
2318 rtx base, offset;
2319
2320 split_const (address, &base, &offset);
2321 return mips_unspec_address_offset (base, offset, symbol_type);
2322 }
2323
2324
2325 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2326 high part to BASE and return the result. Just return BASE otherwise.
2327 TEMP is available as a temporary register if needed.
2328
2329 The returned expression can be used as the first operand to a LO_SUM. */
2330
2331 static rtx
2332 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2333 enum mips_symbol_type symbol_type)
2334 {
2335 if (mips_split_p[symbol_type])
2336 {
2337 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2338 addr = mips_force_temporary (temp, addr);
2339 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2340 }
2341 return base;
2342 }
2343
2344
2345 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2346 mips_force_temporary; it is only needed when OFFSET is not a
2347 SMALL_OPERAND. */
2348
2349 static rtx
2350 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2351 {
2352 if (!SMALL_OPERAND (offset))
2353 {
2354 rtx high;
2355 if (TARGET_MIPS16)
2356 {
2357 /* Load the full offset into a register so that we can use
2358 an unextended instruction for the address itself. */
2359 high = GEN_INT (offset);
2360 offset = 0;
2361 }
2362 else
2363 {
2364 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2365 high = GEN_INT (CONST_HIGH_PART (offset));
2366 offset = CONST_LOW_PART (offset);
2367 }
2368 high = mips_force_temporary (temp, high);
2369 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2370 }
2371 return plus_constant (reg, offset);
2372 }
2373
2374 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2375 referencing, and TYPE is the symbol type to use (either global
2376 dynamic or local dynamic). V0 is an RTX for the return value
2377 location. The entire insn sequence is returned. */
2378
2379 static GTY(()) rtx mips_tls_symbol;
2380
2381 static rtx
2382 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2383 {
2384 rtx insn, loc, tga, a0;
2385
2386 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2387
2388 if (!mips_tls_symbol)
2389 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2390
2391 loc = mips_unspec_address (sym, type);
2392
2393 start_sequence ();
2394
2395 emit_insn (gen_rtx_SET (Pmode, a0,
2396 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2397 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2398 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2399 CONST_OR_PURE_CALL_P (insn) = 1;
2400 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2401 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2402 insn = get_insns ();
2403
2404 end_sequence ();
2405
2406 return insn;
2407 }
2408
2409 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2410 return value will be a valid address and move_operand (either a REG
2411 or a LO_SUM). */
2412
2413 static rtx
2414 mips_legitimize_tls_address (rtx loc)
2415 {
2416 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2417 enum tls_model model;
2418
2419 if (TARGET_MIPS16)
2420 {
2421 sorry ("MIPS16 TLS");
2422 return gen_reg_rtx (Pmode);
2423 }
2424
2425 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2426 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2427
2428 model = SYMBOL_REF_TLS_MODEL (loc);
2429 /* Only TARGET_ABICALLS code can have more than one module; other
2430 code must be be static and should not use a GOT. All TLS models
2431 reduce to local exec in this situation. */
2432 if (!TARGET_ABICALLS)
2433 model = TLS_MODEL_LOCAL_EXEC;
2434
2435 switch (model)
2436 {
2437 case TLS_MODEL_GLOBAL_DYNAMIC:
2438 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2439 dest = gen_reg_rtx (Pmode);
2440 emit_libcall_block (insn, dest, v0, loc);
2441 break;
2442
2443 case TLS_MODEL_LOCAL_DYNAMIC:
2444 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2445 tmp1 = gen_reg_rtx (Pmode);
2446
2447 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2448 share the LDM result with other LD model accesses. */
2449 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2450 UNSPEC_TLS_LDM);
2451 emit_libcall_block (insn, tmp1, v0, eqv);
2452
2453 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2454 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2455 mips_unspec_address (loc, SYMBOL_DTPREL));
2456 break;
2457
2458 case TLS_MODEL_INITIAL_EXEC:
2459 tmp1 = gen_reg_rtx (Pmode);
2460 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2461 if (Pmode == DImode)
2462 {
2463 emit_insn (gen_tls_get_tp_di (v1));
2464 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2465 }
2466 else
2467 {
2468 emit_insn (gen_tls_get_tp_si (v1));
2469 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2470 }
2471 dest = gen_reg_rtx (Pmode);
2472 emit_insn (gen_add3_insn (dest, tmp1, v1));
2473 break;
2474
2475 case TLS_MODEL_LOCAL_EXEC:
2476 if (Pmode == DImode)
2477 emit_insn (gen_tls_get_tp_di (v1));
2478 else
2479 emit_insn (gen_tls_get_tp_si (v1));
2480
2481 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2482 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2483 mips_unspec_address (loc, SYMBOL_TPREL));
2484 break;
2485
2486 default:
2487 gcc_unreachable ();
2488 }
2489
2490 return dest;
2491 }
2492
2493 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2494 be legitimized in a way that the generic machinery might not expect,
2495 put the new address in *XLOC and return true. MODE is the mode of
2496 the memory being accessed. */
2497
2498 bool
2499 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2500 {
2501 if (mips_tls_operand_p (*xloc))
2502 {
2503 *xloc = mips_legitimize_tls_address (*xloc);
2504 return true;
2505 }
2506
2507 /* See if the address can split into a high part and a LO_SUM. */
2508 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2509 return true;
2510
2511 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2512 {
2513 /* Handle REG + CONSTANT using mips_add_offset. */
2514 rtx reg;
2515
2516 reg = XEXP (*xloc, 0);
2517 if (!mips_valid_base_register_p (reg, mode, 0))
2518 reg = copy_to_mode_reg (Pmode, reg);
2519 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2520 return true;
2521 }
2522
2523 return false;
2524 }
2525
2526
2527 /* Subroutine of mips_build_integer (with the same interface).
2528 Assume that the final action in the sequence should be a left shift. */
2529
2530 static unsigned int
2531 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2532 {
2533 unsigned int i, shift;
2534
2535 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2536 since signed numbers are easier to load than unsigned ones. */
2537 shift = 0;
2538 while ((value & 1) == 0)
2539 value /= 2, shift++;
2540
2541 i = mips_build_integer (codes, value);
2542 codes[i].code = ASHIFT;
2543 codes[i].value = shift;
2544 return i + 1;
2545 }
2546
2547
2548 /* As for mips_build_shift, but assume that the final action will be
2549 an IOR or PLUS operation. */
2550
2551 static unsigned int
2552 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2553 {
2554 unsigned HOST_WIDE_INT high;
2555 unsigned int i;
2556
2557 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2558 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2559 {
2560 /* The constant is too complex to load with a simple lui/ori pair
2561 so our goal is to clear as many trailing zeros as possible.
2562 In this case, we know bit 16 is set and that the low 16 bits
2563 form a negative number. If we subtract that number from VALUE,
2564 we will clear at least the lowest 17 bits, maybe more. */
2565 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2566 codes[i].code = PLUS;
2567 codes[i].value = CONST_LOW_PART (value);
2568 }
2569 else
2570 {
2571 i = mips_build_integer (codes, high);
2572 codes[i].code = IOR;
2573 codes[i].value = value & 0xffff;
2574 }
2575 return i + 1;
2576 }
2577
2578
2579 /* Fill CODES with a sequence of rtl operations to load VALUE.
2580 Return the number of operations needed. */
2581
2582 static unsigned int
2583 mips_build_integer (struct mips_integer_op *codes,
2584 unsigned HOST_WIDE_INT value)
2585 {
2586 if (SMALL_OPERAND (value)
2587 || SMALL_OPERAND_UNSIGNED (value)
2588 || LUI_OPERAND (value))
2589 {
2590 /* The value can be loaded with a single instruction. */
2591 codes[0].code = UNKNOWN;
2592 codes[0].value = value;
2593 return 1;
2594 }
2595 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2596 {
2597 /* Either the constant is a simple LUI/ORI combination or its
2598 lowest bit is set. We don't want to shift in this case. */
2599 return mips_build_lower (codes, value);
2600 }
2601 else if ((value & 0xffff) == 0)
2602 {
2603 /* The constant will need at least three actions. The lowest
2604 16 bits are clear, so the final action will be a shift. */
2605 return mips_build_shift (codes, value);
2606 }
2607 else
2608 {
2609 /* The final action could be a shift, add or inclusive OR.
2610 Rather than use a complex condition to select the best
2611 approach, try both mips_build_shift and mips_build_lower
2612 and pick the one that gives the shortest sequence.
2613 Note that this case is only used once per constant. */
2614 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2615 unsigned int cost, alt_cost;
2616
2617 cost = mips_build_shift (codes, value);
2618 alt_cost = mips_build_lower (alt_codes, value);
2619 if (alt_cost < cost)
2620 {
2621 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2622 cost = alt_cost;
2623 }
2624 return cost;
2625 }
2626 }
2627
2628
2629 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2630
2631 void
2632 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2633 {
2634 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2635 enum machine_mode mode;
2636 unsigned int i, cost;
2637 rtx x;
2638
2639 mode = GET_MODE (dest);
2640 cost = mips_build_integer (codes, value);
2641
2642 /* Apply each binary operation to X. Invariant: X is a legitimate
2643 source operand for a SET pattern. */
2644 x = GEN_INT (codes[0].value);
2645 for (i = 1; i < cost; i++)
2646 {
2647 if (!can_create_pseudo_p ())
2648 {
2649 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2650 x = temp;
2651 }
2652 else
2653 x = force_reg (mode, x);
2654 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2655 }
2656
2657 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2658 }
2659
2660
2661 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2662 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2663 move_operand. */
2664
2665 static void
2666 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2667 {
2668 rtx base, offset;
2669
2670 /* Split moves of big integers into smaller pieces. */
2671 if (splittable_const_int_operand (src, mode))
2672 {
2673 mips_move_integer (dest, dest, INTVAL (src));
2674 return;
2675 }
2676
2677 /* Split moves of symbolic constants into high/low pairs. */
2678 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2679 {
2680 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2681 return;
2682 }
2683
2684 if (mips_tls_operand_p (src))
2685 {
2686 mips_emit_move (dest, mips_legitimize_tls_address (src));
2687 return;
2688 }
2689
2690 /* If we have (const (plus symbol offset)), and that expression cannot
2691 be forced into memory, load the symbol first and add in the offset.
2692 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2693 forced into memory, as it usually produces better code. */
2694 split_const (src, &base, &offset);
2695 if (offset != const0_rtx
2696 && (targetm.cannot_force_const_mem (src)
2697 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2698 {
2699 base = mips_force_temporary (dest, base);
2700 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2701 return;
2702 }
2703
2704 src = force_const_mem (mode, src);
2705
2706 /* When using explicit relocs, constant pool references are sometimes
2707 not legitimate addresses. */
2708 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2709 mips_emit_move (dest, src);
2710 }
2711
2712
2713 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2714 sequence that is valid. */
2715
2716 bool
2717 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2718 {
2719 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2720 {
2721 mips_emit_move (dest, force_reg (mode, src));
2722 return true;
2723 }
2724
2725 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2726 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2727 && REG_P (src) && MD_REG_P (REGNO (src))
2728 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2729 {
2730 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2731 if (GET_MODE_SIZE (mode) <= 4)
2732 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2733 gen_rtx_REG (SImode, REGNO (src)),
2734 gen_rtx_REG (SImode, other_regno)));
2735 else
2736 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2737 gen_rtx_REG (DImode, REGNO (src)),
2738 gen_rtx_REG (DImode, other_regno)));
2739 return true;
2740 }
2741
2742 /* We need to deal with constants that would be legitimate
2743 immediate_operands but not legitimate move_operands. */
2744 if (CONSTANT_P (src) && !move_operand (src, mode))
2745 {
2746 mips_legitimize_const_move (mode, dest, src);
2747 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2748 return true;
2749 }
2750 return false;
2751 }
2752 \f
2753 /* We need a lot of little routines to check constant values on the
2754 mips16. These are used to figure out how long the instruction will
2755 be. It would be much better to do this using constraints, but
2756 there aren't nearly enough letters available. */
2757
2758 static int
2759 m16_check_op (rtx op, int low, int high, int mask)
2760 {
2761 return (GET_CODE (op) == CONST_INT
2762 && INTVAL (op) >= low
2763 && INTVAL (op) <= high
2764 && (INTVAL (op) & mask) == 0);
2765 }
2766
2767 int
2768 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2769 {
2770 return m16_check_op (op, 0x1, 0x8, 0);
2771 }
2772
2773 int
2774 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2775 {
2776 return m16_check_op (op, - 0x8, 0x7, 0);
2777 }
2778
2779 int
2780 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2781 {
2782 return m16_check_op (op, - 0x7, 0x8, 0);
2783 }
2784
2785 int
2786 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2787 {
2788 return m16_check_op (op, - 0x10, 0xf, 0);
2789 }
2790
2791 int
2792 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2793 {
2794 return m16_check_op (op, - 0xf, 0x10, 0);
2795 }
2796
2797 int
2798 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2799 {
2800 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2801 }
2802
2803 int
2804 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2805 {
2806 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2807 }
2808
2809 int
2810 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2811 {
2812 return m16_check_op (op, - 0x80, 0x7f, 0);
2813 }
2814
2815 int
2816 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2817 {
2818 return m16_check_op (op, - 0x7f, 0x80, 0);
2819 }
2820
2821 int
2822 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2823 {
2824 return m16_check_op (op, 0x0, 0xff, 0);
2825 }
2826
2827 int
2828 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2829 {
2830 return m16_check_op (op, - 0xff, 0x0, 0);
2831 }
2832
2833 int
2834 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2835 {
2836 return m16_check_op (op, - 0x1, 0xfe, 0);
2837 }
2838
2839 int
2840 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2841 {
2842 return m16_check_op (op, 0x0, 0xff << 2, 3);
2843 }
2844
2845 int
2846 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2847 {
2848 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2849 }
2850
2851 int
2852 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2853 {
2854 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2855 }
2856
2857 int
2858 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2859 {
2860 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2861 }
2862 \f
2863 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2864 address instruction. */
2865
2866 static bool
2867 mips_lwxs_address_p (rtx addr)
2868 {
2869 if (ISA_HAS_LWXS
2870 && GET_CODE (addr) == PLUS
2871 && REG_P (XEXP (addr, 1)))
2872 {
2873 rtx offset = XEXP (addr, 0);
2874 if (GET_CODE (offset) == MULT
2875 && REG_P (XEXP (offset, 0))
2876 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2877 && INTVAL (XEXP (offset, 1)) == 4)
2878 return true;
2879 }
2880 return false;
2881 }
2882
2883 /* The cost of loading values from the constant pool. It should be
2884 larger than the cost of any constant we want to synthesize inline. */
2885
2886 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2887
2888 /* Return the cost of X when used as an operand to the MIPS16 instruction
2889 that implements CODE. Return -1 if there is no such instruction, or if
2890 X is not a valid immediate operand for it. */
2891
2892 static int
2893 mips16_constant_cost (int code, HOST_WIDE_INT x)
2894 {
2895 switch (code)
2896 {
2897 case ASHIFT:
2898 case ASHIFTRT:
2899 case LSHIFTRT:
2900 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2901 other shifts are extended. The shift patterns truncate the shift
2902 count to the right size, so there are no out-of-range values. */
2903 if (IN_RANGE (x, 1, 8))
2904 return 0;
2905 return COSTS_N_INSNS (1);
2906
2907 case PLUS:
2908 if (IN_RANGE (x, -128, 127))
2909 return 0;
2910 if (SMALL_OPERAND (x))
2911 return COSTS_N_INSNS (1);
2912 return -1;
2913
2914 case LEU:
2915 /* Like LE, but reject the always-true case. */
2916 if (x == -1)
2917 return -1;
2918 case LE:
2919 /* We add 1 to the immediate and use SLT. */
2920 x += 1;
2921 case XOR:
2922 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2923 case LT:
2924 case LTU:
2925 if (IN_RANGE (x, 0, 255))
2926 return 0;
2927 if (SMALL_OPERAND_UNSIGNED (x))
2928 return COSTS_N_INSNS (1);
2929 return -1;
2930
2931 case EQ:
2932 case NE:
2933 /* Equality comparisons with 0 are cheap. */
2934 if (x == 0)
2935 return 0;
2936 return -1;
2937
2938 default:
2939 return -1;
2940 }
2941 }
2942
2943 /* Return true if there is a non-MIPS16 instruction that implements CODE
2944 and if that instruction accepts X as an immediate operand. */
2945
2946 static int
2947 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2948 {
2949 switch (code)
2950 {
2951 case ASHIFT:
2952 case ASHIFTRT:
2953 case LSHIFTRT:
2954 /* All shift counts are truncated to a valid constant. */
2955 return true;
2956
2957 case ROTATE:
2958 case ROTATERT:
2959 /* Likewise rotates, if the target supports rotates at all. */
2960 return ISA_HAS_ROR;
2961
2962 case AND:
2963 case IOR:
2964 case XOR:
2965 /* These instructions take 16-bit unsigned immediates. */
2966 return SMALL_OPERAND_UNSIGNED (x);
2967
2968 case PLUS:
2969 case LT:
2970 case LTU:
2971 /* These instructions take 16-bit signed immediates. */
2972 return SMALL_OPERAND (x);
2973
2974 case EQ:
2975 case NE:
2976 case GT:
2977 case GTU:
2978 /* The "immediate" forms of these instructions are really
2979 implemented as comparisons with register 0. */
2980 return x == 0;
2981
2982 case GE:
2983 case GEU:
2984 /* Likewise, meaning that the only valid immediate operand is 1. */
2985 return x == 1;
2986
2987 case LE:
2988 /* We add 1 to the immediate and use SLT. */
2989 return SMALL_OPERAND (x + 1);
2990
2991 case LEU:
2992 /* Likewise SLTU, but reject the always-true case. */
2993 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2994
2995 case SIGN_EXTRACT:
2996 case ZERO_EXTRACT:
2997 /* The bit position and size are immediate operands. */
2998 return ISA_HAS_EXT_INS;
2999
3000 default:
3001 /* By default assume that $0 can be used for 0. */
3002 return x == 0;
3003 }
3004 }
3005
3006 /* Return the cost of binary operation X, given that the instruction
3007 sequence for a word-sized or smaller operation has cost SINGLE_COST
3008 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3009
3010 static int
3011 mips_binary_cost (rtx x, int single_cost, int double_cost)
3012 {
3013 int cost;
3014
3015 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3016 cost = double_cost;
3017 else
3018 cost = single_cost;
3019 return (cost
3020 + rtx_cost (XEXP (x, 0), 0)
3021 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3022 }
3023
3024 /* Return the cost of floating-point multiplications of mode MODE. */
3025
3026 static int
3027 mips_fp_mult_cost (enum machine_mode mode)
3028 {
3029 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3030 }
3031
3032 /* Return the cost of floating-point divisions of mode MODE. */
3033
3034 static int
3035 mips_fp_div_cost (enum machine_mode mode)
3036 {
3037 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3038 }
3039
3040 /* Return the cost of sign-extending OP to mode MODE, not including the
3041 cost of OP itself. */
3042
3043 static int
3044 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3045 {
3046 if (MEM_P (op))
3047 /* Extended loads are as cheap as unextended ones. */
3048 return 0;
3049
3050 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3051 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3052 return 0;
3053
3054 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3055 /* We can use SEB or SEH. */
3056 return COSTS_N_INSNS (1);
3057
3058 /* We need to use a shift left and a shift right. */
3059 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3060 }
3061
3062 /* Return the cost of zero-extending OP to mode MODE, not including the
3063 cost of OP itself. */
3064
3065 static int
3066 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3067 {
3068 if (MEM_P (op))
3069 /* Extended loads are as cheap as unextended ones. */
3070 return 0;
3071
3072 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3073 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3074 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3075
3076 if (GENERATE_MIPS16E)
3077 /* We can use ZEB or ZEH. */
3078 return COSTS_N_INSNS (1);
3079
3080 if (TARGET_MIPS16)
3081 /* We need to load 0xff or 0xffff into a register and use AND. */
3082 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3083
3084 /* We can use ANDI. */
3085 return COSTS_N_INSNS (1);
3086 }
3087
3088 /* Implement TARGET_RTX_COSTS. */
3089
3090 static bool
3091 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3092 {
3093 enum machine_mode mode = GET_MODE (x);
3094 bool float_mode_p = FLOAT_MODE_P (mode);
3095 int cost;
3096 rtx addr;
3097
3098 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3099 appear in the instruction stream, and the cost of a comparison is
3100 really the cost of the branch or scc condition. At the time of
3101 writing, gcc only uses an explicit outer COMPARE code when optabs
3102 is testing whether a constant is expensive enough to force into a
3103 register. We want optabs to pass such constants through the MIPS
3104 expanders instead, so make all constants very cheap here. */
3105 if (outer_code == COMPARE)
3106 {
3107 gcc_assert (CONSTANT_P (x));
3108 *total = 0;
3109 return true;
3110 }
3111
3112 switch (code)
3113 {
3114 case CONST_INT:
3115 /* Treat *clear_upper32-style ANDs as having zero cost in the
3116 second operand. The cost is entirely in the first operand.
3117
3118 ??? This is needed because we would otherwise try to CSE
3119 the constant operand. Although that's the right thing for
3120 instructions that continue to be a register operation throughout
3121 compilation, it is disastrous for instructions that could
3122 later be converted into a memory operation. */
3123 if (TARGET_64BIT
3124 && outer_code == AND
3125 && UINTVAL (x) == 0xffffffff)
3126 {
3127 *total = 0;
3128 return true;
3129 }
3130
3131 if (TARGET_MIPS16)
3132 {
3133 cost = mips16_constant_cost (outer_code, INTVAL (x));
3134 if (cost >= 0)
3135 {
3136 *total = cost;
3137 return true;
3138 }
3139 }
3140 else
3141 {
3142 /* When not optimizing for size, we care more about the cost
3143 of hot code, and hot code is often in a loop. If a constant
3144 operand needs to be forced into a register, we will often be
3145 able to hoist the constant load out of the loop, so the load
3146 should not contribute to the cost. */
3147 if (!optimize_size
3148 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3149 {
3150 *total = 0;
3151 return true;
3152 }
3153 }
3154 /* Fall through. */
3155
3156 case CONST:
3157 case SYMBOL_REF:
3158 case LABEL_REF:
3159 case CONST_DOUBLE:
3160 if (force_to_mem_operand (x, VOIDmode))
3161 {
3162 *total = COSTS_N_INSNS (1);
3163 return true;
3164 }
3165 cost = mips_const_insns (x);
3166 if (cost > 0)
3167 {
3168 /* If the constant is likely to be stored in a GPR, SETs of
3169 single-insn constants are as cheap as register sets; we
3170 never want to CSE them.
3171
3172 Don't reduce the cost of storing a floating-point zero in
3173 FPRs. If we have a zero in an FPR for other reasons, we
3174 can get better cfg-cleanup and delayed-branch results by
3175 using it consistently, rather than using $0 sometimes and
3176 an FPR at other times. Also, moves between floating-point
3177 registers are sometimes cheaper than (D)MTC1 $0. */
3178 if (cost == 1
3179 && outer_code == SET
3180 && !(float_mode_p && TARGET_HARD_FLOAT))
3181 cost = 0;
3182 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3183 want to CSE the constant itself. It is usually better to
3184 have N copies of the last operation in the sequence and one
3185 shared copy of the other operations. (Note that this is
3186 not true for MIPS16 code, where the final operation in the
3187 sequence is often an extended instruction.)
3188
3189 Also, if we have a CONST_INT, we don't know whether it is
3190 for a word or doubleword operation, so we cannot rely on
3191 the result of mips_build_integer. */
3192 else if (!TARGET_MIPS16
3193 && (outer_code == SET || mode == VOIDmode))
3194 cost = 1;
3195 *total = COSTS_N_INSNS (cost);
3196 return true;
3197 }
3198 /* The value will need to be fetched from the constant pool. */
3199 *total = CONSTANT_POOL_COST;
3200 return true;
3201
3202 case MEM:
3203 /* If the address is legitimate, return the number of
3204 instructions it needs. */
3205 addr = XEXP (x, 0);
3206 cost = mips_address_insns (addr, mode, true);
3207 if (cost > 0)
3208 {
3209 *total = COSTS_N_INSNS (cost + 1);
3210 return true;
3211 }
3212 /* Check for a scaled indexed address. */
3213 if (mips_lwxs_address_p (addr))
3214 {
3215 *total = COSTS_N_INSNS (2);
3216 return true;
3217 }
3218 /* Otherwise use the default handling. */
3219 return false;
3220
3221 case FFS:
3222 *total = COSTS_N_INSNS (6);
3223 return false;
3224
3225 case NOT:
3226 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3227 return false;
3228
3229 case AND:
3230 /* Check for a *clear_upper32 pattern and treat it like a zero
3231 extension. See the pattern's comment for details. */
3232 if (TARGET_64BIT
3233 && mode == DImode
3234 && CONST_INT_P (XEXP (x, 1))
3235 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3236 {
3237 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3238 + rtx_cost (XEXP (x, 0), 0));
3239 return true;
3240 }
3241 /* Fall through. */
3242
3243 case IOR:
3244 case XOR:
3245 /* Double-word operations use two single-word operations. */
3246 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3247 return true;
3248
3249 case ASHIFT:
3250 case ASHIFTRT:
3251 case LSHIFTRT:
3252 case ROTATE:
3253 case ROTATERT:
3254 if (CONSTANT_P (XEXP (x, 1)))
3255 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3256 else
3257 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3258 return true;
3259
3260 case ABS:
3261 if (float_mode_p)
3262 *total = mips_cost->fp_add;
3263 else
3264 *total = COSTS_N_INSNS (4);
3265 return false;
3266
3267 case LO_SUM:
3268 /* Low-part immediates need an extended MIPS16 instruction. */
3269 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3270 + rtx_cost (XEXP (x, 0), 0));
3271 return true;
3272
3273 case LT:
3274 case LTU:
3275 case LE:
3276 case LEU:
3277 case GT:
3278 case GTU:
3279 case GE:
3280 case GEU:
3281 case EQ:
3282 case NE:
3283 case UNORDERED:
3284 case LTGT:
3285 /* Branch comparisons have VOIDmode, so use the first operand's
3286 mode instead. */
3287 mode = GET_MODE (XEXP (x, 0));
3288 if (FLOAT_MODE_P (mode))
3289 {
3290 *total = mips_cost->fp_add;
3291 return false;
3292 }
3293 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3294 return true;
3295
3296 case MINUS:
3297 if (float_mode_p
3298 && ISA_HAS_NMADD_NMSUB
3299 && TARGET_FUSED_MADD
3300 && !HONOR_NANS (mode)
3301 && !HONOR_SIGNED_ZEROS (mode))
3302 {
3303 /* See if we can use NMADD or NMSUB. See mips.md for the
3304 associated patterns. */
3305 rtx op0 = XEXP (x, 0);
3306 rtx op1 = XEXP (x, 1);
3307 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3308 {
3309 *total = (mips_fp_mult_cost (mode)
3310 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3311 + rtx_cost (XEXP (op0, 1), 0)
3312 + rtx_cost (op1, 0));
3313 return true;
3314 }
3315 if (GET_CODE (op1) == MULT)
3316 {
3317 *total = (mips_fp_mult_cost (mode)
3318 + rtx_cost (op0, 0)
3319 + rtx_cost (XEXP (op1, 0), 0)
3320 + rtx_cost (XEXP (op1, 1), 0));
3321 return true;
3322 }
3323 }
3324 /* Fall through. */
3325
3326 case PLUS:
3327 if (float_mode_p)
3328 {
3329 if (ISA_HAS_FP4
3330 && TARGET_FUSED_MADD
3331 && GET_CODE (XEXP (x, 0)) == MULT)
3332 *total = 0;
3333 else
3334 *total = mips_cost->fp_add;
3335 return false;
3336 }
3337
3338 /* Double-word operations require three single-word operations and
3339 an SLTU. The MIPS16 version then needs to move the result of
3340 the SLTU from $24 to a MIPS16 register. */
3341 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3342 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3343 return true;
3344
3345 case NEG:
3346 if (float_mode_p
3347 && ISA_HAS_NMADD_NMSUB
3348 && TARGET_FUSED_MADD
3349 && !HONOR_NANS (mode)
3350 && HONOR_SIGNED_ZEROS (mode))
3351 {
3352 /* See if we can use NMADD or NMSUB. See mips.md for the
3353 associated patterns. */
3354 rtx op = XEXP (x, 0);
3355 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3356 && GET_CODE (XEXP (op, 0)) == MULT)
3357 {
3358 *total = (mips_fp_mult_cost (mode)
3359 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3360 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3361 + rtx_cost (XEXP (op, 1), 0));
3362 return true;
3363 }
3364 }
3365
3366 if (float_mode_p)
3367 *total = mips_cost->fp_add;
3368 else
3369 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3370 return false;
3371
3372 case MULT:
3373 if (float_mode_p)
3374 *total = mips_fp_mult_cost (mode);
3375 else if (mode == DImode && !TARGET_64BIT)
3376 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3377 where the mulsidi3 always includes an MFHI and an MFLO. */
3378 *total = (optimize_size
3379 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3380 : mips_cost->int_mult_si * 3 + 6);
3381 else if (optimize_size)
3382 *total = (ISA_HAS_MUL3 ? 1 : 2);
3383 else if (mode == DImode)
3384 *total = mips_cost->int_mult_di;
3385 else
3386 *total = mips_cost->int_mult_si;
3387 return false;
3388
3389 case DIV:
3390 /* Check for a reciprocal. */
3391 if (float_mode_p && XEXP (x, 0) == CONST1_RTX (mode))
3392 {
3393 if (ISA_HAS_FP4
3394 && flag_unsafe_math_optimizations
3395 && (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT))
3396 {
3397 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3398 division as being free. */
3399 *total = rtx_cost (XEXP (x, 1), 0);
3400 return true;
3401 }
3402 if (!ISA_MIPS1)
3403 {
3404 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3405 return true;
3406 }
3407 }
3408 /* Fall through. */
3409
3410 case SQRT:
3411 case MOD:
3412 if (float_mode_p)
3413 {
3414 *total = mips_fp_div_cost (mode);
3415 return false;
3416 }
3417 /* Fall through. */
3418
3419 case UDIV:
3420 case UMOD:
3421 if (optimize_size)
3422 {
3423 /* It is our responsibility to make division by a power of 2
3424 as cheap as 2 register additions if we want the division
3425 expanders to be used for such operations; see the setting
3426 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3427 should always produce shorter code than using
3428 expand_sdiv2_pow2. */
3429 if (TARGET_MIPS16
3430 && CONST_INT_P (XEXP (x, 1))
3431 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3432 {
3433 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3434 return true;
3435 }
3436 *total = COSTS_N_INSNS (mips_idiv_insns ());
3437 }
3438 else if (mode == DImode)
3439 *total = mips_cost->int_div_di;
3440 else
3441 *total = mips_cost->int_div_si;
3442 return false;
3443
3444 case SIGN_EXTEND:
3445 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3446 return false;
3447
3448 case ZERO_EXTEND:
3449 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3450 return false;
3451
3452 case FLOAT:
3453 case UNSIGNED_FLOAT:
3454 case FIX:
3455 case FLOAT_EXTEND:
3456 case FLOAT_TRUNCATE:
3457 *total = mips_cost->fp_add;
3458 return false;
3459
3460 default:
3461 return false;
3462 }
3463 }
3464
3465 /* Provide the costs of an addressing mode that contains ADDR.
3466 If ADDR is not a valid address, its cost is irrelevant. */
3467
3468 static int
3469 mips_address_cost (rtx addr)
3470 {
3471 return mips_address_insns (addr, SImode, false);
3472 }
3473 \f
3474 /* Return one word of double-word value OP, taking into account the fixed
3475 endianness of certain registers. HIGH_P is true to select the high part,
3476 false to select the low part. */
3477
3478 rtx
3479 mips_subword (rtx op, int high_p)
3480 {
3481 unsigned int byte;
3482 enum machine_mode mode;
3483
3484 mode = GET_MODE (op);
3485 if (mode == VOIDmode)
3486 mode = DImode;
3487
3488 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3489 byte = UNITS_PER_WORD;
3490 else
3491 byte = 0;
3492
3493 if (FP_REG_RTX_P (op))
3494 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3495
3496 if (MEM_P (op))
3497 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3498
3499 return simplify_gen_subreg (word_mode, op, mode, byte);
3500 }
3501
3502
3503 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3504
3505 bool
3506 mips_split_64bit_move_p (rtx dest, rtx src)
3507 {
3508 if (TARGET_64BIT)
3509 return false;
3510
3511 /* FP->FP moves can be done in a single instruction. */
3512 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3513 return false;
3514
3515 /* Check for floating-point loads and stores. They can be done using
3516 ldc1 and sdc1 on MIPS II and above. */
3517 if (mips_isa > 1)
3518 {
3519 if (FP_REG_RTX_P (dest) && MEM_P (src))
3520 return false;
3521 if (FP_REG_RTX_P (src) && MEM_P (dest))
3522 return false;
3523 }
3524 return true;
3525 }
3526
3527
3528 /* Split a 64-bit move from SRC to DEST assuming that
3529 mips_split_64bit_move_p holds.
3530
3531 Moves into and out of FPRs cause some difficulty here. Such moves
3532 will always be DFmode, since paired FPRs are not allowed to store
3533 DImode values. The most natural representation would be two separate
3534 32-bit moves, such as:
3535
3536 (set (reg:SI $f0) (mem:SI ...))
3537 (set (reg:SI $f1) (mem:SI ...))
3538
3539 However, the second insn is invalid because odd-numbered FPRs are
3540 not allowed to store independent values. Use the patterns load_df_low,
3541 load_df_high and store_df_high instead. */
3542
3543 void
3544 mips_split_64bit_move (rtx dest, rtx src)
3545 {
3546 if (FP_REG_RTX_P (dest))
3547 {
3548 /* Loading an FPR from memory or from GPRs. */
3549 if (ISA_HAS_MXHC1)
3550 {
3551 dest = gen_lowpart (DFmode, dest);
3552 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3553 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3554 copy_rtx (dest)));
3555 }
3556 else
3557 {
3558 emit_insn (gen_load_df_low (copy_rtx (dest),
3559 mips_subword (src, 0)));
3560 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3561 copy_rtx (dest)));
3562 }
3563 }
3564 else if (FP_REG_RTX_P (src))
3565 {
3566 /* Storing an FPR into memory or GPRs. */
3567 if (ISA_HAS_MXHC1)
3568 {
3569 src = gen_lowpart (DFmode, src);
3570 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3571 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3572 }
3573 else
3574 {
3575 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3576 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3577 }
3578 }
3579 else
3580 {
3581 /* The operation can be split into two normal moves. Decide in
3582 which order to do them. */
3583 rtx low_dest;
3584
3585 low_dest = mips_subword (dest, 0);
3586 if (REG_P (low_dest)
3587 && reg_overlap_mentioned_p (low_dest, src))
3588 {
3589 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3590 mips_emit_move (low_dest, mips_subword (src, 0));
3591 }
3592 else
3593 {
3594 mips_emit_move (low_dest, mips_subword (src, 0));
3595 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3596 }
3597 }
3598 }
3599 \f
3600 /* Return the appropriate instructions to move SRC into DEST. Assume
3601 that SRC is operand 1 and DEST is operand 0. */
3602
3603 const char *
3604 mips_output_move (rtx dest, rtx src)
3605 {
3606 enum rtx_code dest_code, src_code;
3607 enum mips_symbol_type symbol_type;
3608 bool dbl_p;
3609
3610 dest_code = GET_CODE (dest);
3611 src_code = GET_CODE (src);
3612 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3613
3614 if (dbl_p && mips_split_64bit_move_p (dest, src))
3615 return "#";
3616
3617 if ((src_code == REG && GP_REG_P (REGNO (src)))
3618 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3619 {
3620 if (dest_code == REG)
3621 {
3622 if (GP_REG_P (REGNO (dest)))
3623 return "move\t%0,%z1";
3624
3625 if (MD_REG_P (REGNO (dest)))
3626 return "mt%0\t%z1";
3627
3628 if (DSP_ACC_REG_P (REGNO (dest)))
3629 {
3630 static char retval[] = "mt__\t%z1,%q0";
3631 retval[2] = reg_names[REGNO (dest)][4];
3632 retval[3] = reg_names[REGNO (dest)][5];
3633 return retval;
3634 }
3635
3636 if (FP_REG_P (REGNO (dest)))
3637 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3638
3639 if (ALL_COP_REG_P (REGNO (dest)))
3640 {
3641 static char retval[] = "dmtc_\t%z1,%0";
3642
3643 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3644 return (dbl_p ? retval : retval + 1);
3645 }
3646 }
3647 if (dest_code == MEM)
3648 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3649 }
3650 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3651 {
3652 if (src_code == REG)
3653 {
3654 if (DSP_ACC_REG_P (REGNO (src)))
3655 {
3656 static char retval[] = "mf__\t%0,%q1";
3657 retval[2] = reg_names[REGNO (src)][4];
3658 retval[3] = reg_names[REGNO (src)][5];
3659 return retval;
3660 }
3661
3662 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3663 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3664
3665 if (FP_REG_P (REGNO (src)))
3666 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3667
3668 if (ALL_COP_REG_P (REGNO (src)))
3669 {
3670 static char retval[] = "dmfc_\t%0,%1";
3671
3672 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3673 return (dbl_p ? retval : retval + 1);
3674 }
3675 }
3676
3677 if (src_code == MEM)
3678 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3679
3680 if (src_code == CONST_INT)
3681 {
3682 /* Don't use the X format, because that will give out of
3683 range numbers for 64-bit hosts and 32-bit targets. */
3684 if (!TARGET_MIPS16)
3685 return "li\t%0,%1\t\t\t# %X1";
3686
3687 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3688 return "li\t%0,%1";
3689
3690 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3691 return "#";
3692 }
3693
3694 if (src_code == HIGH)
3695 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3696
3697 if (CONST_GP_P (src))
3698 return "move\t%0,%1";
3699
3700 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3701 && mips_lo_relocs[symbol_type] != 0)
3702 {
3703 /* A signed 16-bit constant formed by applying a relocation
3704 operator to a symbolic address. */
3705 gcc_assert (!mips_split_p[symbol_type]);
3706 return "li\t%0,%R1";
3707 }
3708
3709 if (symbolic_operand (src, VOIDmode))
3710 {
3711 gcc_assert (TARGET_MIPS16
3712 ? TARGET_MIPS16_TEXT_LOADS
3713 : !TARGET_EXPLICIT_RELOCS);
3714 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3715 }
3716 }
3717 if (src_code == REG && FP_REG_P (REGNO (src)))
3718 {
3719 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3720 {
3721 if (GET_MODE (dest) == V2SFmode)
3722 return "mov.ps\t%0,%1";
3723 else
3724 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3725 }
3726
3727 if (dest_code == MEM)
3728 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3729 }
3730 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3731 {
3732 if (src_code == MEM)
3733 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3734 }
3735 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3736 {
3737 static char retval[] = "l_c_\t%0,%1";
3738
3739 retval[1] = (dbl_p ? 'd' : 'w');
3740 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3741 return retval;
3742 }
3743 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3744 {
3745 static char retval[] = "s_c_\t%1,%0";
3746
3747 retval[1] = (dbl_p ? 'd' : 'w');
3748 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3749 return retval;
3750 }
3751 gcc_unreachable ();
3752 }
3753 \f
3754 /* Restore $gp from its save slot. Valid only when using o32 or
3755 o64 abicalls. */
3756
3757 void
3758 mips_restore_gp (void)
3759 {
3760 rtx address, slot;
3761
3762 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3763
3764 address = mips_add_offset (pic_offset_table_rtx,
3765 frame_pointer_needed
3766 ? hard_frame_pointer_rtx
3767 : stack_pointer_rtx,
3768 current_function_outgoing_args_size);
3769 slot = gen_rtx_MEM (Pmode, address);
3770
3771 mips_emit_move (pic_offset_table_rtx, slot);
3772 if (!TARGET_EXPLICIT_RELOCS)
3773 emit_insn (gen_blockage ());
3774 }
3775 \f
3776 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3777
3778 static void
3779 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3780 {
3781 emit_insn (gen_rtx_SET (VOIDmode, target,
3782 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3783 }
3784
3785 /* Return true if CMP1 is a suitable second operand for relational
3786 operator CODE. See also the *sCC patterns in mips.md. */
3787
3788 static bool
3789 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3790 {
3791 switch (code)
3792 {
3793 case GT:
3794 case GTU:
3795 return reg_or_0_operand (cmp1, VOIDmode);
3796
3797 case GE:
3798 case GEU:
3799 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3800
3801 case LT:
3802 case LTU:
3803 return arith_operand (cmp1, VOIDmode);
3804
3805 case LE:
3806 return sle_operand (cmp1, VOIDmode);
3807
3808 case LEU:
3809 return sleu_operand (cmp1, VOIDmode);
3810
3811 default:
3812 gcc_unreachable ();
3813 }
3814 }
3815
3816 /* Canonicalize LE or LEU comparisons into LT comparisons when
3817 possible to avoid extra instructions or inverting the
3818 comparison. */
3819
3820 static bool
3821 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3822 enum machine_mode mode)
3823 {
3824 HOST_WIDE_INT original, plus_one;
3825
3826 if (GET_CODE (*cmp1) != CONST_INT)
3827 return false;
3828
3829 original = INTVAL (*cmp1);
3830 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3831
3832 switch (*code)
3833 {
3834 case LE:
3835 if (original < plus_one)
3836 {
3837 *code = LT;
3838 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3839 return true;
3840 }
3841 break;
3842
3843 case LEU:
3844 if (plus_one != 0)
3845 {
3846 *code = LTU;
3847 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3848 return true;
3849 }
3850 break;
3851
3852 default:
3853 return false;
3854 }
3855
3856 return false;
3857
3858 }
3859
3860 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3861 result in TARGET. CMP0 and TARGET are register_operands that have
3862 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3863 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3864
3865 static void
3866 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3867 rtx target, rtx cmp0, rtx cmp1)
3868 {
3869 /* First see if there is a MIPS instruction that can do this operation
3870 with CMP1 in its current form. If not, try to canonicalize the
3871 comparison to LT. If that fails, try doing the same for the
3872 inverse operation. If that also fails, force CMP1 into a register
3873 and try again. */
3874 if (mips_relational_operand_ok_p (code, cmp1))
3875 mips_emit_binary (code, target, cmp0, cmp1);
3876 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3877 mips_emit_binary (code, target, cmp0, cmp1);
3878 else
3879 {
3880 enum rtx_code inv_code = reverse_condition (code);
3881 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3882 {
3883 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3884 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3885 }
3886 else if (invert_ptr == 0)
3887 {
3888 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3889 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3890 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3891 }
3892 else
3893 {
3894 *invert_ptr = !*invert_ptr;
3895 mips_emit_binary (inv_code, target, cmp0, cmp1);
3896 }
3897 }
3898 }
3899
3900 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3901 The register will have the same mode as CMP0. */
3902
3903 static rtx
3904 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3905 {
3906 if (cmp1 == const0_rtx)
3907 return cmp0;
3908
3909 if (uns_arith_operand (cmp1, VOIDmode))
3910 return expand_binop (GET_MODE (cmp0), xor_optab,
3911 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3912
3913 return expand_binop (GET_MODE (cmp0), sub_optab,
3914 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3915 }
3916
3917 /* Convert *CODE into a code that can be used in a floating-point
3918 scc instruction (c.<cond>.<fmt>). Return true if the values of
3919 the condition code registers will be inverted, with 0 indicating
3920 that the condition holds. */
3921
3922 static bool
3923 mips_reverse_fp_cond_p (enum rtx_code *code)
3924 {
3925 switch (*code)
3926 {
3927 case NE:
3928 case LTGT:
3929 case ORDERED:
3930 *code = reverse_condition_maybe_unordered (*code);
3931 return true;
3932
3933 default:
3934 return false;
3935 }
3936 }
3937
3938 /* Convert a comparison into something that can be used in a branch or
3939 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3940 being compared and *CODE is the code used to compare them.
3941
3942 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3943 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3944 otherwise any standard branch condition can be used. The standard branch
3945 conditions are:
3946
3947 - EQ/NE between two registers.
3948 - any comparison between a register and zero. */
3949
3950 static void
3951 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3952 {
3953 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3954 {
3955 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3956 {
3957 *op0 = cmp_operands[0];
3958 *op1 = cmp_operands[1];
3959 }
3960 else if (*code == EQ || *code == NE)
3961 {
3962 if (need_eq_ne_p)
3963 {
3964 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3965 *op1 = const0_rtx;
3966 }
3967 else
3968 {
3969 *op0 = cmp_operands[0];
3970 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3971 }
3972 }
3973 else
3974 {
3975 /* The comparison needs a separate scc instruction. Store the
3976 result of the scc in *OP0 and compare it against zero. */
3977 bool invert = false;
3978 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3979 *op1 = const0_rtx;
3980 mips_emit_int_relational (*code, &invert, *op0,
3981 cmp_operands[0], cmp_operands[1]);
3982 *code = (invert ? EQ : NE);
3983 }
3984 }
3985 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3986 {
3987 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3988 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3989 *code = NE;
3990 *op1 = const0_rtx;
3991 }
3992 else
3993 {
3994 enum rtx_code cmp_code;
3995
3996 /* Floating-point tests use a separate c.cond.fmt comparison to
3997 set a condition code register. The branch or conditional move
3998 will then compare that register against zero.
3999
4000 Set CMP_CODE to the code of the comparison instruction and
4001 *CODE to the code that the branch or move should use. */
4002 cmp_code = *code;
4003 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
4004 *op0 = (ISA_HAS_8CC
4005 ? gen_reg_rtx (CCmode)
4006 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4007 *op1 = const0_rtx;
4008 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4009 }
4010 }
4011 \f
4012 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4013 Store the result in TARGET and return true if successful.
4014
4015 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
4016
4017 bool
4018 mips_emit_scc (enum rtx_code code, rtx target)
4019 {
4020 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4021 return false;
4022
4023 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
4024 if (code == EQ || code == NE)
4025 {
4026 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4027 mips_emit_binary (code, target, zie, const0_rtx);
4028 }
4029 else
4030 mips_emit_int_relational (code, 0, target,
4031 cmp_operands[0], cmp_operands[1]);
4032 return true;
4033 }
4034
4035 /* Emit the common code for doing conditional branches.
4036 operand[0] is the label to jump to.
4037 The comparison operands are saved away by cmp{si,di,sf,df}. */
4038
4039 void
4040 gen_conditional_branch (rtx *operands, enum rtx_code code)
4041 {
4042 rtx op0, op1, condition;
4043
4044 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4045 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4046 emit_jump_insn (gen_condjump (condition, operands[0]));
4047 }
4048
4049 /* Implement:
4050
4051 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4052 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4053
4054 void
4055 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4056 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4057 {
4058 rtx cmp_result;
4059 bool reversed_p;
4060
4061 reversed_p = mips_reverse_fp_cond_p (&cond);
4062 cmp_result = gen_reg_rtx (CCV2mode);
4063 emit_insn (gen_scc_ps (cmp_result,
4064 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4065 if (reversed_p)
4066 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4067 cmp_result));
4068 else
4069 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4070 cmp_result));
4071 }
4072
4073 /* Emit the common code for conditional moves. OPERANDS is the array
4074 of operands passed to the conditional move define_expand. */
4075
4076 void
4077 gen_conditional_move (rtx *operands)
4078 {
4079 enum rtx_code code;
4080 rtx op0, op1;
4081
4082 code = GET_CODE (operands[1]);
4083 mips_emit_compare (&code, &op0, &op1, true);
4084 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4085 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4086 gen_rtx_fmt_ee (code,
4087 GET_MODE (op0),
4088 op0, op1),
4089 operands[2], operands[3])));
4090 }
4091
4092 /* Emit a conditional trap. OPERANDS is the array of operands passed to
4093 the conditional_trap expander. */
4094
4095 void
4096 mips_gen_conditional_trap (rtx *operands)
4097 {
4098 rtx op0, op1;
4099 enum rtx_code cmp_code = GET_CODE (operands[0]);
4100 enum machine_mode mode = GET_MODE (cmp_operands[0]);
4101
4102 /* MIPS conditional trap machine instructions don't have GT or LE
4103 flavors, so we must invert the comparison and convert to LT and
4104 GE, respectively. */
4105 switch (cmp_code)
4106 {
4107 case GT: cmp_code = LT; break;
4108 case LE: cmp_code = GE; break;
4109 case GTU: cmp_code = LTU; break;
4110 case LEU: cmp_code = GEU; break;
4111 default: break;
4112 }
4113 if (cmp_code == GET_CODE (operands[0]))
4114 {
4115 op0 = cmp_operands[0];
4116 op1 = cmp_operands[1];
4117 }
4118 else
4119 {
4120 op0 = cmp_operands[1];
4121 op1 = cmp_operands[0];
4122 }
4123 op0 = force_reg (mode, op0);
4124 if (!arith_operand (op1, mode))
4125 op1 = force_reg (mode, op1);
4126
4127 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4128 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
4129 operands[1]));
4130 }
4131 \f
4132 /* Return true if function DECL is a MIPS16 function. Return the ambient
4133 setting if DECL is null. */
4134
4135 static bool
4136 mips_use_mips16_mode_p (tree decl)
4137 {
4138 if (decl)
4139 {
4140 /* Nested functions must use the same frame pointer as their
4141 parent and must therefore use the same ISA mode. */
4142 tree parent = decl_function_context (decl);
4143 if (parent)
4144 decl = parent;
4145 if (mips_mips16_decl_p (decl))
4146 return true;
4147 if (mips_nomips16_decl_p (decl))
4148 return false;
4149 }
4150 return mips_base_mips16;
4151 }
4152
4153 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
4154
4155 static bool
4156 mips_ok_for_lazy_binding_p (rtx x)
4157 {
4158 return (TARGET_USE_GOT
4159 && GET_CODE (x) == SYMBOL_REF
4160 && !mips_symbol_binds_local_p (x));
4161 }
4162
4163 /* Load function address ADDR into register DEST. SIBCALL_P is true
4164 if the address is needed for a sibling call. */
4165
4166 static void
4167 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
4168 {
4169 /* If we're generating PIC, and this call is to a global function,
4170 try to allow its address to be resolved lazily. This isn't
4171 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
4172 to the stub would be our caller's gp, not ours. */
4173 if (TARGET_EXPLICIT_RELOCS
4174 && !(sibcall_p && TARGET_CALL_SAVED_GP)
4175 && mips_ok_for_lazy_binding_p (addr))
4176 {
4177 rtx high, lo_sum_symbol;
4178
4179 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
4180 addr, SYMBOL_GOTOFF_CALL);
4181 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
4182 if (Pmode == SImode)
4183 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
4184 else
4185 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
4186 }
4187 else
4188 mips_emit_move (dest, addr);
4189 }
4190
4191
4192 /* Expand a call or call_value instruction. RESULT is where the
4193 result will go (null for calls), ADDR is the address of the
4194 function, ARGS_SIZE is the size of the arguments and AUX is
4195 the value passed to us by mips_function_arg. SIBCALL_P is true
4196 if we are expanding a sibling call, false if we're expanding
4197 a normal call. */
4198
4199 void
4200 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
4201 {
4202 rtx orig_addr, pattern, insn;
4203
4204 orig_addr = addr;
4205 if (!call_insn_operand (addr, VOIDmode))
4206 {
4207 addr = gen_reg_rtx (Pmode);
4208 mips_load_call_address (addr, orig_addr, sibcall_p);
4209 }
4210
4211 if (TARGET_MIPS16
4212 && TARGET_HARD_FLOAT_ABI
4213 && build_mips16_call_stub (result, addr, args_size,
4214 aux == 0 ? 0 : (int) GET_MODE (aux)))
4215 return;
4216
4217 if (result == 0)
4218 pattern = (sibcall_p
4219 ? gen_sibcall_internal (addr, args_size)
4220 : gen_call_internal (addr, args_size));
4221 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
4222 {
4223 rtx reg1, reg2;
4224
4225 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
4226 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
4227 pattern =
4228 (sibcall_p
4229 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
4230 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
4231 }
4232 else
4233 pattern = (sibcall_p
4234 ? gen_sibcall_value_internal (result, addr, args_size)
4235 : gen_call_value_internal (result, addr, args_size));
4236
4237 insn = emit_call_insn (pattern);
4238
4239 /* Lazy-binding stubs require $gp to be valid on entry. */
4240 if (mips_ok_for_lazy_binding_p (orig_addr))
4241 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4242 }
4243
4244
4245 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4246
4247 static bool
4248 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4249 {
4250 if (!TARGET_SIBCALLS)
4251 return false;
4252
4253 /* We can't do a sibcall if the called function is a MIPS16 function
4254 because there is no direct "jx" instruction equivalent to "jalx" to
4255 switch the ISA mode. */
4256 if (mips_use_mips16_mode_p (decl))
4257 return false;
4258
4259 /* ...and when -minterlink-mips16 is in effect, assume that external
4260 functions could be MIPS16 ones unless an attribute explicitly
4261 tells us otherwise. We only care about cases where the sibling
4262 and normal calls would both be direct. */
4263 if (TARGET_INTERLINK_MIPS16
4264 && decl
4265 && DECL_EXTERNAL (decl)
4266 && !mips_nomips16_decl_p (decl)
4267 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
4268 return false;
4269
4270 /* Otherwise OK. */
4271 return true;
4272 }
4273 \f
4274 /* Emit code to move general operand SRC into condition-code
4275 register DEST. SCRATCH is a scratch TFmode float register.
4276 The sequence is:
4277
4278 FP1 = SRC
4279 FP2 = 0.0f
4280 DEST = FP2 < FP1
4281
4282 where FP1 and FP2 are single-precision float registers
4283 taken from SCRATCH. */
4284
4285 void
4286 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
4287 {
4288 rtx fp1, fp2;
4289
4290 /* Change the source to SFmode. */
4291 if (MEM_P (src))
4292 src = adjust_address (src, SFmode, 0);
4293 else if (REG_P (src) || GET_CODE (src) == SUBREG)
4294 src = gen_rtx_REG (SFmode, true_regnum (src));
4295
4296 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
4297 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
4298
4299 mips_emit_move (copy_rtx (fp1), src);
4300 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
4301 emit_insn (gen_slt_sf (dest, fp2, fp1));
4302 }
4303 \f
4304 /* Emit code to change the current function's return address to
4305 ADDRESS. SCRATCH is available as a scratch register, if needed.
4306 ADDRESS and SCRATCH are both word-mode GPRs. */
4307
4308 void
4309 mips_set_return_address (rtx address, rtx scratch)
4310 {
4311 rtx slot_address;
4312
4313 compute_frame_size (get_frame_size ());
4314 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
4315 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
4316 cfun->machine->frame.gp_sp_offset);
4317
4318 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
4319 }
4320 \f
4321 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
4322 Assume that the areas do not overlap. */
4323
4324 static void
4325 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
4326 {
4327 HOST_WIDE_INT offset, delta;
4328 unsigned HOST_WIDE_INT bits;
4329 int i;
4330 enum machine_mode mode;
4331 rtx *regs;
4332
4333 /* Work out how many bits to move at a time. If both operands have
4334 half-word alignment, it is usually better to move in half words.
4335 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
4336 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
4337 Otherwise move word-sized chunks. */
4338 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
4339 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
4340 bits = BITS_PER_WORD / 2;
4341 else
4342 bits = BITS_PER_WORD;
4343
4344 mode = mode_for_size (bits, MODE_INT, 0);
4345 delta = bits / BITS_PER_UNIT;
4346
4347 /* Allocate a buffer for the temporary registers. */
4348 regs = alloca (sizeof (rtx) * length / delta);
4349
4350 /* Load as many BITS-sized chunks as possible. Use a normal load if
4351 the source has enough alignment, otherwise use left/right pairs. */
4352 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4353 {
4354 regs[i] = gen_reg_rtx (mode);
4355 if (MEM_ALIGN (src) >= bits)
4356 mips_emit_move (regs[i], adjust_address (src, mode, offset));
4357 else
4358 {
4359 rtx part = adjust_address (src, BLKmode, offset);
4360 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
4361 gcc_unreachable ();
4362 }
4363 }
4364
4365 /* Copy the chunks to the destination. */
4366 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4367 if (MEM_ALIGN (dest) >= bits)
4368 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
4369 else
4370 {
4371 rtx part = adjust_address (dest, BLKmode, offset);
4372 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
4373 gcc_unreachable ();
4374 }
4375
4376 /* Mop up any left-over bytes. */
4377 if (offset < length)
4378 {
4379 src = adjust_address (src, BLKmode, offset);
4380 dest = adjust_address (dest, BLKmode, offset);
4381 move_by_pieces (dest, src, length - offset,
4382 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
4383 }
4384 }
4385 \f
4386 #define MAX_MOVE_REGS 4
4387 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
4388
4389
4390 /* Helper function for doing a loop-based block operation on memory
4391 reference MEM. Each iteration of the loop will operate on LENGTH
4392 bytes of MEM.
4393
4394 Create a new base register for use within the loop and point it to
4395 the start of MEM. Create a new memory reference that uses this
4396 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
4397
4398 static void
4399 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
4400 rtx *loop_reg, rtx *loop_mem)
4401 {
4402 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
4403
4404 /* Although the new mem does not refer to a known location,
4405 it does keep up to LENGTH bytes of alignment. */
4406 *loop_mem = change_address (mem, BLKmode, *loop_reg);
4407 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
4408 }
4409
4410
4411 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
4412 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
4413 memory regions do not overlap. */
4414
4415 static void
4416 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
4417 {
4418 rtx label, src_reg, dest_reg, final_src;
4419 HOST_WIDE_INT leftover;
4420
4421 leftover = length % MAX_MOVE_BYTES;
4422 length -= leftover;
4423
4424 /* Create registers and memory references for use within the loop. */
4425 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
4426 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
4427
4428 /* Calculate the value that SRC_REG should have after the last iteration
4429 of the loop. */
4430 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
4431 0, 0, OPTAB_WIDEN);
4432
4433 /* Emit the start of the loop. */
4434 label = gen_label_rtx ();
4435 emit_label (label);
4436
4437 /* Emit the loop body. */
4438 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
4439
4440 /* Move on to the next block. */
4441 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
4442 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
4443
4444 /* Emit the loop condition. */
4445 if (Pmode == DImode)
4446 emit_insn (gen_cmpdi (src_reg, final_src));
4447 else
4448 emit_insn (gen_cmpsi (src_reg, final_src));
4449 emit_jump_insn (gen_bne (label));
4450
4451 /* Mop up any left-over bytes. */
4452 if (leftover)
4453 mips_block_move_straight (dest, src, leftover);
4454 }
4455 \f
4456
4457 /* Expand a loop of synci insns for the address range [BEGIN, END). */
4458
4459 void
4460 mips_expand_synci_loop (rtx begin, rtx end)
4461 {
4462 rtx inc, label, cmp, cmp_result;
4463
4464 /* Load INC with the cache line size (rdhwr INC,$1). */
4465 inc = gen_reg_rtx (SImode);
4466 emit_insn (gen_rdhwr (inc, const1_rtx));
4467
4468 /* Loop back to here. */
4469 label = gen_label_rtx ();
4470 emit_label (label);
4471
4472 emit_insn (gen_synci (begin));
4473
4474 cmp = gen_reg_rtx (Pmode);
4475 mips_emit_binary (GTU, cmp, begin, end);
4476
4477 mips_emit_binary (PLUS, begin, begin, inc);
4478
4479 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4480 emit_jump_insn (gen_condjump (cmp_result, label));
4481 }
4482 \f
4483 /* Expand a movmemsi instruction. */
4484
4485 bool
4486 mips_expand_block_move (rtx dest, rtx src, rtx length)
4487 {
4488 if (GET_CODE (length) == CONST_INT)
4489 {
4490 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4491 {
4492 mips_block_move_straight (dest, src, INTVAL (length));
4493 return true;
4494 }
4495 else if (optimize)
4496 {
4497 mips_block_move_loop (dest, src, INTVAL (length));
4498 return true;
4499 }
4500 }
4501 return false;
4502 }
4503 \f
4504 /* Argument support functions. */
4505
4506 /* Initialize CUMULATIVE_ARGS for a function. */
4507
4508 void
4509 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4510 rtx libname ATTRIBUTE_UNUSED)
4511 {
4512 static CUMULATIVE_ARGS zero_cum;
4513 tree param, next_param;
4514
4515 *cum = zero_cum;
4516 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4517
4518 /* Determine if this function has variable arguments. This is
4519 indicated by the last argument being 'void_type_mode' if there
4520 are no variable arguments. The standard MIPS calling sequence
4521 passes all arguments in the general purpose registers in this case. */
4522
4523 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4524 param != 0; param = next_param)
4525 {
4526 next_param = TREE_CHAIN (param);
4527 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4528 cum->gp_reg_found = 1;
4529 }
4530 }
4531
4532
4533 /* Fill INFO with information about a single argument. CUM is the
4534 cumulative state for earlier arguments. MODE is the mode of this
4535 argument and TYPE is its type (if known). NAMED is true if this
4536 is a named (fixed) argument rather than a variable one. */
4537
4538 static void
4539 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4540 tree type, int named, struct mips_arg_info *info)
4541 {
4542 bool doubleword_aligned_p;
4543 unsigned int num_bytes, num_words, max_regs;
4544
4545 /* Work out the size of the argument. */
4546 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4547 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4548
4549 /* Decide whether it should go in a floating-point register, assuming
4550 one is free. Later code checks for availability.
4551
4552 The checks against UNITS_PER_FPVALUE handle the soft-float and
4553 single-float cases. */
4554 switch (mips_abi)
4555 {
4556 case ABI_EABI:
4557 /* The EABI conventions have traditionally been defined in terms
4558 of TYPE_MODE, regardless of the actual type. */
4559 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4560 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4561 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4562 break;
4563
4564 case ABI_32:
4565 case ABI_O64:
4566 /* Only leading floating-point scalars are passed in
4567 floating-point registers. We also handle vector floats the same
4568 say, which is OK because they are not covered by the standard ABI. */
4569 info->fpr_p = (!cum->gp_reg_found
4570 && cum->arg_number < 2
4571 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4572 || VECTOR_FLOAT_TYPE_P (type))
4573 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4574 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4575 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4576 break;
4577
4578 case ABI_N32:
4579 case ABI_64:
4580 /* Scalar and complex floating-point types are passed in
4581 floating-point registers. */
4582 info->fpr_p = (named
4583 && (type == 0 || FLOAT_TYPE_P (type))
4584 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4585 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4586 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4587 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4588
4589 /* ??? According to the ABI documentation, the real and imaginary
4590 parts of complex floats should be passed in individual registers.
4591 The real and imaginary parts of stack arguments are supposed
4592 to be contiguous and there should be an extra word of padding
4593 at the end.
4594
4595 This has two problems. First, it makes it impossible to use a
4596 single "void *" va_list type, since register and stack arguments
4597 are passed differently. (At the time of writing, MIPSpro cannot
4598 handle complex float varargs correctly.) Second, it's unclear
4599 what should happen when there is only one register free.
4600
4601 For now, we assume that named complex floats should go into FPRs
4602 if there are two FPRs free, otherwise they should be passed in the
4603 same way as a struct containing two floats. */
4604 if (info->fpr_p
4605 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4606 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4607 {
4608 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4609 info->fpr_p = false;
4610 else
4611 num_words = 2;
4612 }
4613 break;
4614
4615 default:
4616 gcc_unreachable ();
4617 }
4618
4619 /* See whether the argument has doubleword alignment. */
4620 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4621
4622 /* Set REG_OFFSET to the register count we're interested in.
4623 The EABI allocates the floating-point registers separately,
4624 but the other ABIs allocate them like integer registers. */
4625 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4626 ? cum->num_fprs
4627 : cum->num_gprs);
4628
4629 /* Advance to an even register if the argument is doubleword-aligned. */
4630 if (doubleword_aligned_p)
4631 info->reg_offset += info->reg_offset & 1;
4632
4633 /* Work out the offset of a stack argument. */
4634 info->stack_offset = cum->stack_words;
4635 if (doubleword_aligned_p)
4636 info->stack_offset += info->stack_offset & 1;
4637
4638 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4639
4640 /* Partition the argument between registers and stack. */
4641 info->reg_words = MIN (num_words, max_regs);
4642 info->stack_words = num_words - info->reg_words;
4643 }
4644
4645
4646 /* INFO describes an argument that is passed in a single-register value.
4647 Return the register it uses, assuming that FPRs are available if
4648 HARD_FLOAT_P. */
4649
4650 static unsigned int
4651 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4652 {
4653 if (!info->fpr_p || !hard_float_p)
4654 return GP_ARG_FIRST + info->reg_offset;
4655 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4656 /* In o32, the second argument is always passed in $f14
4657 for TARGET_DOUBLE_FLOAT, regardless of whether the
4658 first argument was a word or doubleword. */
4659 return FP_ARG_FIRST + 2;
4660 else
4661 return FP_ARG_FIRST + info->reg_offset;
4662 }
4663
4664 /* Implement FUNCTION_ARG_ADVANCE. */
4665
4666 void
4667 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4668 tree type, int named)
4669 {
4670 struct mips_arg_info info;
4671
4672 mips_arg_info (cum, mode, type, named, &info);
4673
4674 if (!info.fpr_p)
4675 cum->gp_reg_found = true;
4676
4677 /* See the comment above the cumulative args structure in mips.h
4678 for an explanation of what this code does. It assumes the O32
4679 ABI, which passes at most 2 arguments in float registers. */
4680 if (cum->arg_number < 2 && info.fpr_p)
4681 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4682
4683 if (mips_abi != ABI_EABI || !info.fpr_p)
4684 cum->num_gprs = info.reg_offset + info.reg_words;
4685 else if (info.reg_words > 0)
4686 cum->num_fprs += MAX_FPRS_PER_FMT;
4687
4688 if (info.stack_words > 0)
4689 cum->stack_words = info.stack_offset + info.stack_words;
4690
4691 cum->arg_number++;
4692 }
4693
4694 /* Implement FUNCTION_ARG. */
4695
4696 struct rtx_def *
4697 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4698 tree type, int named)
4699 {
4700 struct mips_arg_info info;
4701
4702 /* We will be called with a mode of VOIDmode after the last argument
4703 has been seen. Whatever we return will be passed to the call
4704 insn. If we need a mips16 fp_code, return a REG with the code
4705 stored as the mode. */
4706 if (mode == VOIDmode)
4707 {
4708 if (TARGET_MIPS16 && cum->fp_code != 0)
4709 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4710
4711 else
4712 return 0;
4713 }
4714
4715 mips_arg_info (cum, mode, type, named, &info);
4716
4717 /* Return straight away if the whole argument is passed on the stack. */
4718 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4719 return 0;
4720
4721 if (type != 0
4722 && TREE_CODE (type) == RECORD_TYPE
4723 && TARGET_NEWABI
4724 && TYPE_SIZE_UNIT (type)
4725 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4726 && named)
4727 {
4728 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4729 structure contains a double in its entirety, then that 64-bit
4730 chunk is passed in a floating point register. */
4731 tree field;
4732
4733 /* First check to see if there is any such field. */
4734 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4735 if (TREE_CODE (field) == FIELD_DECL
4736 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4737 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4738 && host_integerp (bit_position (field), 0)
4739 && int_bit_position (field) % BITS_PER_WORD == 0)
4740 break;
4741
4742 if (field != 0)
4743 {
4744 /* Now handle the special case by returning a PARALLEL
4745 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4746 chunks are passed in registers. */
4747 unsigned int i;
4748 HOST_WIDE_INT bitpos;
4749 rtx ret;
4750
4751 /* assign_parms checks the mode of ENTRY_PARM, so we must
4752 use the actual mode here. */
4753 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4754
4755 bitpos = 0;
4756 field = TYPE_FIELDS (type);
4757 for (i = 0; i < info.reg_words; i++)
4758 {
4759 rtx reg;
4760
4761 for (; field; field = TREE_CHAIN (field))
4762 if (TREE_CODE (field) == FIELD_DECL
4763 && int_bit_position (field) >= bitpos)
4764 break;
4765
4766 if (field
4767 && int_bit_position (field) == bitpos
4768 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4769 && !TARGET_SOFT_FLOAT
4770 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4771 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4772 else
4773 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4774
4775 XVECEXP (ret, 0, i)
4776 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4777 GEN_INT (bitpos / BITS_PER_UNIT));
4778
4779 bitpos += BITS_PER_WORD;
4780 }
4781 return ret;
4782 }
4783 }
4784
4785 /* Handle the n32/n64 conventions for passing complex floating-point
4786 arguments in FPR pairs. The real part goes in the lower register
4787 and the imaginary part goes in the upper register. */
4788 if (TARGET_NEWABI
4789 && info.fpr_p
4790 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4791 {
4792 rtx real, imag;
4793 enum machine_mode inner;
4794 int reg;
4795
4796 inner = GET_MODE_INNER (mode);
4797 reg = FP_ARG_FIRST + info.reg_offset;
4798 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4799 {
4800 /* Real part in registers, imaginary part on stack. */
4801 gcc_assert (info.stack_words == info.reg_words);
4802 return gen_rtx_REG (inner, reg);
4803 }
4804 else
4805 {
4806 gcc_assert (info.stack_words == 0);
4807 real = gen_rtx_EXPR_LIST (VOIDmode,
4808 gen_rtx_REG (inner, reg),
4809 const0_rtx);
4810 imag = gen_rtx_EXPR_LIST (VOIDmode,
4811 gen_rtx_REG (inner,
4812 reg + info.reg_words / 2),
4813 GEN_INT (GET_MODE_SIZE (inner)));
4814 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4815 }
4816 }
4817
4818 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4819 }
4820
4821
4822 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4823
4824 static int
4825 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4826 enum machine_mode mode, tree type, bool named)
4827 {
4828 struct mips_arg_info info;
4829
4830 mips_arg_info (cum, mode, type, named, &info);
4831 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4832 }
4833
4834
4835 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4836 PARM_BOUNDARY bits of alignment, but will be given anything up
4837 to STACK_BOUNDARY bits if the type requires it. */
4838
4839 int
4840 function_arg_boundary (enum machine_mode mode, tree type)
4841 {
4842 unsigned int alignment;
4843
4844 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4845 if (alignment < PARM_BOUNDARY)
4846 alignment = PARM_BOUNDARY;
4847 if (alignment > STACK_BOUNDARY)
4848 alignment = STACK_BOUNDARY;
4849 return alignment;
4850 }
4851
4852 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4853 upward rather than downward. In other words, return true if the
4854 first byte of the stack slot has useful data, false if the last
4855 byte does. */
4856
4857 bool
4858 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4859 {
4860 /* On little-endian targets, the first byte of every stack argument
4861 is passed in the first byte of the stack slot. */
4862 if (!BYTES_BIG_ENDIAN)
4863 return true;
4864
4865 /* Otherwise, integral types are padded downward: the last byte of a
4866 stack argument is passed in the last byte of the stack slot. */
4867 if (type != 0
4868 ? (INTEGRAL_TYPE_P (type)
4869 || POINTER_TYPE_P (type)
4870 || FIXED_POINT_TYPE_P (type))
4871 : (GET_MODE_CLASS (mode) == MODE_INT
4872 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4873 return false;
4874
4875 /* Big-endian o64 pads floating-point arguments downward. */
4876 if (mips_abi == ABI_O64)
4877 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4878 return false;
4879
4880 /* Other types are padded upward for o32, o64, n32 and n64. */
4881 if (mips_abi != ABI_EABI)
4882 return true;
4883
4884 /* Arguments smaller than a stack slot are padded downward. */
4885 if (mode != BLKmode)
4886 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4887 else
4888 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4889 }
4890
4891
4892 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4893 if the least significant byte of the register has useful data. Return
4894 the opposite if the most significant byte does. */
4895
4896 bool
4897 mips_pad_reg_upward (enum machine_mode mode, tree type)
4898 {
4899 /* No shifting is required for floating-point arguments. */
4900 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4901 return !BYTES_BIG_ENDIAN;
4902
4903 /* Otherwise, apply the same padding to register arguments as we do
4904 to stack arguments. */
4905 return mips_pad_arg_upward (mode, type);
4906 }
4907 \f
4908 static void
4909 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4910 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4911 int no_rtl)
4912 {
4913 CUMULATIVE_ARGS local_cum;
4914 int gp_saved, fp_saved;
4915
4916 /* The caller has advanced CUM up to, but not beyond, the last named
4917 argument. Advance a local copy of CUM past the last "real" named
4918 argument, to find out how many registers are left over. */
4919
4920 local_cum = *cum;
4921 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4922
4923 /* Found out how many registers we need to save. */
4924 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4925 fp_saved = (EABI_FLOAT_VARARGS_P
4926 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4927 : 0);
4928
4929 if (!no_rtl)
4930 {
4931 if (gp_saved > 0)
4932 {
4933 rtx ptr, mem;
4934
4935 ptr = plus_constant (virtual_incoming_args_rtx,
4936 REG_PARM_STACK_SPACE (cfun->decl)
4937 - gp_saved * UNITS_PER_WORD);
4938 mem = gen_rtx_MEM (BLKmode, ptr);
4939 set_mem_alias_set (mem, get_varargs_alias_set ());
4940
4941 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4942 mem, gp_saved);
4943 }
4944 if (fp_saved > 0)
4945 {
4946 /* We can't use move_block_from_reg, because it will use
4947 the wrong mode. */
4948 enum machine_mode mode;
4949 int off, i;
4950
4951 /* Set OFF to the offset from virtual_incoming_args_rtx of
4952 the first float register. The FP save area lies below
4953 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4954 off = -gp_saved * UNITS_PER_WORD;
4955 off &= ~(UNITS_PER_FPVALUE - 1);
4956 off -= fp_saved * UNITS_PER_FPREG;
4957
4958 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4959
4960 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4961 i += MAX_FPRS_PER_FMT)
4962 {
4963 rtx ptr, mem;
4964
4965 ptr = plus_constant (virtual_incoming_args_rtx, off);
4966 mem = gen_rtx_MEM (mode, ptr);
4967 set_mem_alias_set (mem, get_varargs_alias_set ());
4968 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4969 off += UNITS_PER_HWFPVALUE;
4970 }
4971 }
4972 }
4973 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4974 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4975 + fp_saved * UNITS_PER_FPREG);
4976 }
4977
4978 /* Create the va_list data type.
4979 We keep 3 pointers, and two offsets.
4980 Two pointers are to the overflow area, which starts at the CFA.
4981 One of these is constant, for addressing into the GPR save area below it.
4982 The other is advanced up the stack through the overflow region.
4983 The third pointer is to the GPR save area. Since the FPR save area
4984 is just below it, we can address FPR slots off this pointer.
4985 We also keep two one-byte offsets, which are to be subtracted from the
4986 constant pointers to yield addresses in the GPR and FPR save areas.
4987 These are downcounted as float or non-float arguments are used,
4988 and when they get to zero, the argument must be obtained from the
4989 overflow region.
4990 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4991 pointer is enough. It's started at the GPR save area, and is
4992 advanced, period.
4993 Note that the GPR save area is not constant size, due to optimization
4994 in the prologue. Hence, we can't use a design with two pointers
4995 and two offsets, although we could have designed this with two pointers
4996 and three offsets. */
4997
4998 static tree
4999 mips_build_builtin_va_list (void)
5000 {
5001 if (EABI_FLOAT_VARARGS_P)
5002 {
5003 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
5004 tree array, index;
5005
5006 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5007
5008 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
5009 ptr_type_node);
5010 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
5011 ptr_type_node);
5012 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
5013 ptr_type_node);
5014 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
5015 unsigned_char_type_node);
5016 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
5017 unsigned_char_type_node);
5018 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5019 warn on every user file. */
5020 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5021 array = build_array_type (unsigned_char_type_node,
5022 build_index_type (index));
5023 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
5024
5025 DECL_FIELD_CONTEXT (f_ovfl) = record;
5026 DECL_FIELD_CONTEXT (f_gtop) = record;
5027 DECL_FIELD_CONTEXT (f_ftop) = record;
5028 DECL_FIELD_CONTEXT (f_goff) = record;
5029 DECL_FIELD_CONTEXT (f_foff) = record;
5030 DECL_FIELD_CONTEXT (f_res) = record;
5031
5032 TYPE_FIELDS (record) = f_ovfl;
5033 TREE_CHAIN (f_ovfl) = f_gtop;
5034 TREE_CHAIN (f_gtop) = f_ftop;
5035 TREE_CHAIN (f_ftop) = f_goff;
5036 TREE_CHAIN (f_goff) = f_foff;
5037 TREE_CHAIN (f_foff) = f_res;
5038
5039 layout_type (record);
5040 return record;
5041 }
5042 else if (TARGET_IRIX && TARGET_IRIX6)
5043 /* On IRIX 6, this type is 'char *'. */
5044 return build_pointer_type (char_type_node);
5045 else
5046 /* Otherwise, we use 'void *'. */
5047 return ptr_type_node;
5048 }
5049
5050 /* Implement va_start. */
5051
5052 void
5053 mips_va_start (tree valist, rtx nextarg)
5054 {
5055 if (EABI_FLOAT_VARARGS_P)
5056 {
5057 const CUMULATIVE_ARGS *cum;
5058 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5059 tree ovfl, gtop, ftop, goff, foff;
5060 tree t;
5061 int gpr_save_area_size;
5062 int fpr_save_area_size;
5063 int fpr_offset;
5064
5065 cum = &current_function_args_info;
5066 gpr_save_area_size
5067 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5068 fpr_save_area_size
5069 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5070
5071 f_ovfl = TYPE_FIELDS (va_list_type_node);
5072 f_gtop = TREE_CHAIN (f_ovfl);
5073 f_ftop = TREE_CHAIN (f_gtop);
5074 f_goff = TREE_CHAIN (f_ftop);
5075 f_foff = TREE_CHAIN (f_goff);
5076
5077 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5078 NULL_TREE);
5079 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5080 NULL_TREE);
5081 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5082 NULL_TREE);
5083 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5084 NULL_TREE);
5085 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5086 NULL_TREE);
5087
5088 /* Emit code to initialize OVFL, which points to the next varargs
5089 stack argument. CUM->STACK_WORDS gives the number of stack
5090 words used by named arguments. */
5091 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5092 if (cum->stack_words > 0)
5093 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5094 size_int (cum->stack_words * UNITS_PER_WORD));
5095 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5096 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5097
5098 /* Emit code to initialize GTOP, the top of the GPR save area. */
5099 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5100 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
5101 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5102
5103 /* Emit code to initialize FTOP, the top of the FPR save area.
5104 This address is gpr_save_area_bytes below GTOP, rounded
5105 down to the next fp-aligned boundary. */
5106 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5107 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5108 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
5109 if (fpr_offset)
5110 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5111 size_int (-fpr_offset));
5112 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
5113 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5114
5115 /* Emit code to initialize GOFF, the offset from GTOP of the
5116 next GPR argument. */
5117 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
5118 build_int_cst (NULL_TREE, gpr_save_area_size));
5119 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5120
5121 /* Likewise emit code to initialize FOFF, the offset from FTOP
5122 of the next FPR argument. */
5123 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
5124 build_int_cst (NULL_TREE, fpr_save_area_size));
5125 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5126 }
5127 else
5128 {
5129 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5130 std_expand_builtin_va_start (valist, nextarg);
5131 }
5132 }
5133 \f
5134 /* Implement va_arg. */
5135
5136 static tree
5137 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5138 {
5139 HOST_WIDE_INT size, rsize;
5140 tree addr;
5141 bool indirect;
5142
5143 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5144
5145 if (indirect)
5146 type = build_pointer_type (type);
5147
5148 size = int_size_in_bytes (type);
5149 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5150
5151 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
5152 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5153 else
5154 {
5155 /* Not a simple merged stack. */
5156
5157 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5158 tree ovfl, top, off, align;
5159 HOST_WIDE_INT osize;
5160 tree t, u;
5161
5162 f_ovfl = TYPE_FIELDS (va_list_type_node);
5163 f_gtop = TREE_CHAIN (f_ovfl);
5164 f_ftop = TREE_CHAIN (f_gtop);
5165 f_goff = TREE_CHAIN (f_ftop);
5166 f_foff = TREE_CHAIN (f_goff);
5167
5168 /* We maintain separate pointers and offsets for floating-point
5169 and integer arguments, but we need similar code in both cases.
5170 Let:
5171
5172 TOP be the top of the register save area;
5173 OFF be the offset from TOP of the next register;
5174 ADDR_RTX be the address of the argument;
5175 RSIZE be the number of bytes used to store the argument
5176 when it's in the register save area;
5177 OSIZE be the number of bytes used to store it when it's
5178 in the stack overflow area; and
5179 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
5180
5181 The code we want is:
5182
5183 1: off &= -rsize; // round down
5184 2: if (off != 0)
5185 3: {
5186 4: addr_rtx = top - off;
5187 5: off -= rsize;
5188 6: }
5189 7: else
5190 8: {
5191 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
5192 10: addr_rtx = ovfl + PADDING;
5193 11: ovfl += osize;
5194 14: }
5195
5196 [1] and [9] can sometimes be optimized away. */
5197
5198 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5199 NULL_TREE);
5200
5201 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5202 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5203 {
5204 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5205 NULL_TREE);
5206 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5207 NULL_TREE);
5208
5209 /* When floating-point registers are saved to the stack,
5210 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
5211 of the float's precision. */
5212 rsize = UNITS_PER_HWFPVALUE;
5213
5214 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5215 (= PARM_BOUNDARY bits). This can be different from RSIZE
5216 in two cases:
5217
5218 (1) On 32-bit targets when TYPE is a structure such as:
5219
5220 struct s { float f; };
5221
5222 Such structures are passed in paired FPRs, so RSIZE
5223 will be 8 bytes. However, the structure only takes
5224 up 4 bytes of memory, so OSIZE will only be 4.
5225
5226 (2) In combinations such as -mgp64 -msingle-float
5227 -fshort-double. Doubles passed in registers
5228 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
5229 but those passed on the stack take up
5230 UNITS_PER_WORD bytes. */
5231 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5232 }
5233 else
5234 {
5235 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5236 NULL_TREE);
5237 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5238 NULL_TREE);
5239 if (rsize > UNITS_PER_WORD)
5240 {
5241 /* [1] Emit code for: off &= -rsize. */
5242 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5243 build_int_cst (NULL_TREE, -rsize));
5244 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
5245 gimplify_and_add (t, pre_p);
5246 }
5247 osize = rsize;
5248 }
5249
5250 /* [2] Emit code to branch if off == 0. */
5251 t = build2 (NE_EXPR, boolean_type_node, off,
5252 build_int_cst (TREE_TYPE (off), 0));
5253 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5254
5255 /* [5] Emit code for: off -= rsize. We do this as a form of
5256 post-increment not available to C. Also widen for the
5257 coming pointer arithmetic. */
5258 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5259 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5260 t = fold_convert (sizetype, t);
5261 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5262
5263 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
5264 the argument has RSIZE - SIZE bytes of leading padding. */
5265 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5266 if (BYTES_BIG_ENDIAN && rsize > size)
5267 {
5268 u = size_int (rsize - size);
5269 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5270 }
5271 COND_EXPR_THEN (addr) = t;
5272
5273 if (osize > UNITS_PER_WORD)
5274 {
5275 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5276 u = size_int (osize - 1);
5277 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5278 t = fold_convert (sizetype, t);
5279 u = size_int (-osize);
5280 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5281 t = fold_convert (TREE_TYPE (ovfl), t);
5282 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5283 }
5284 else
5285 align = NULL;
5286
5287 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5288 post-increment ovfl by osize. On big-endian machines,
5289 the argument has OSIZE - SIZE bytes of leading padding. */
5290 u = fold_convert (TREE_TYPE (ovfl),
5291 build_int_cst (NULL_TREE, osize));
5292 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5293 if (BYTES_BIG_ENDIAN && osize > size)
5294 {
5295 u = size_int (osize - size);
5296 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5297 }
5298
5299 /* String [9] and [10,11] together. */
5300 if (align)
5301 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5302 COND_EXPR_ELSE (addr) = t;
5303
5304 addr = fold_convert (build_pointer_type (type), addr);
5305 addr = build_va_arg_indirect_ref (addr);
5306 }
5307
5308 if (indirect)
5309 addr = build_va_arg_indirect_ref (addr);
5310
5311 return addr;
5312 }
5313 \f
5314 /* Return true if it is possible to use left/right accesses for a
5315 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5316 returning true, update *OP, *LEFT and *RIGHT as follows:
5317
5318 *OP is a BLKmode reference to the whole field.
5319
5320 *LEFT is a QImode reference to the first byte if big endian or
5321 the last byte if little endian. This address can be used in the
5322 left-side instructions (lwl, swl, ldl, sdl).
5323
5324 *RIGHT is a QImode reference to the opposite end of the field and
5325 can be used in the patterning right-side instruction. */
5326
5327 static bool
5328 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
5329 rtx *left, rtx *right)
5330 {
5331 rtx first, last;
5332
5333 /* Check that the operand really is a MEM. Not all the extv and
5334 extzv predicates are checked. */
5335 if (!MEM_P (*op))
5336 return false;
5337
5338 /* Check that the size is valid. */
5339 if (width != 32 && (!TARGET_64BIT || width != 64))
5340 return false;
5341
5342 /* We can only access byte-aligned values. Since we are always passed
5343 a reference to the first byte of the field, it is not necessary to
5344 do anything with BITPOS after this check. */
5345 if (bitpos % BITS_PER_UNIT != 0)
5346 return false;
5347
5348 /* Reject aligned bitfields: we want to use a normal load or store
5349 instead of a left/right pair. */
5350 if (MEM_ALIGN (*op) >= width)
5351 return false;
5352
5353 /* Adjust *OP to refer to the whole field. This also has the effect
5354 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5355 *op = adjust_address (*op, BLKmode, 0);
5356 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5357
5358 /* Get references to both ends of the field. We deliberately don't
5359 use the original QImode *OP for FIRST since the new BLKmode one
5360 might have a simpler address. */
5361 first = adjust_address (*op, QImode, 0);
5362 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5363
5364 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5365 be the upper word and RIGHT the lower word. */
5366 if (TARGET_BIG_ENDIAN)
5367 *left = first, *right = last;
5368 else
5369 *left = last, *right = first;
5370
5371 return true;
5372 }
5373
5374
5375 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
5376 Return true on success. We only handle cases where zero_extract is
5377 equivalent to sign_extract. */
5378
5379 bool
5380 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
5381 {
5382 rtx left, right, temp;
5383
5384 /* If TARGET_64BIT, the destination of a 32-bit load will be a
5385 paradoxical word_mode subreg. This is the only case in which
5386 we allow the destination to be larger than the source. */
5387 if (GET_CODE (dest) == SUBREG
5388 && GET_MODE (dest) == DImode
5389 && SUBREG_BYTE (dest) == 0
5390 && GET_MODE (SUBREG_REG (dest)) == SImode)
5391 dest = SUBREG_REG (dest);
5392
5393 /* After the above adjustment, the destination must be the same
5394 width as the source. */
5395 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5396 return false;
5397
5398 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5399 return false;
5400
5401 temp = gen_reg_rtx (GET_MODE (dest));
5402 if (GET_MODE (dest) == DImode)
5403 {
5404 emit_insn (gen_mov_ldl (temp, src, left));
5405 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5406 }
5407 else
5408 {
5409 emit_insn (gen_mov_lwl (temp, src, left));
5410 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5411 }
5412 return true;
5413 }
5414
5415
5416 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
5417 true on success. */
5418
5419 bool
5420 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
5421 {
5422 rtx left, right;
5423 enum machine_mode mode;
5424
5425 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
5426 return false;
5427
5428 mode = mode_for_size (width, MODE_INT, 0);
5429 src = gen_lowpart (mode, src);
5430
5431 if (mode == DImode)
5432 {
5433 emit_insn (gen_mov_sdl (dest, src, left));
5434 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
5435 }
5436 else
5437 {
5438 emit_insn (gen_mov_swl (dest, src, left));
5439 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
5440 }
5441 return true;
5442 }
5443
5444 /* Return true if X is a MEM with the same size as MODE. */
5445
5446 bool
5447 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
5448 {
5449 rtx size;
5450
5451 if (!MEM_P (x))
5452 return false;
5453
5454 size = MEM_SIZE (x);
5455 return size && INTVAL (size) == GET_MODE_SIZE (mode);
5456 }
5457
5458 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
5459 source of an "ext" instruction or the destination of an "ins"
5460 instruction. OP must be a register operand and the following
5461 conditions must hold:
5462
5463 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
5464 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5465 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5466
5467 Also reject lengths equal to a word as they are better handled
5468 by the move patterns. */
5469
5470 bool
5471 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
5472 {
5473 HOST_WIDE_INT len, pos;
5474
5475 if (!ISA_HAS_EXT_INS
5476 || !register_operand (op, VOIDmode)
5477 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
5478 return false;
5479
5480 len = INTVAL (size);
5481 pos = INTVAL (position);
5482
5483 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5484 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5485 return false;
5486
5487 return true;
5488 }
5489
5490 /* Set up globals to generate code for the ISA or processor
5491 described by INFO. */
5492
5493 static void
5494 mips_set_architecture (const struct mips_cpu_info *info)
5495 {
5496 if (info != 0)
5497 {
5498 mips_arch_info = info;
5499 mips_arch = info->cpu;
5500 mips_isa = info->isa;
5501 }
5502 }
5503
5504
5505 /* Likewise for tuning. */
5506
5507 static void
5508 mips_set_tune (const struct mips_cpu_info *info)
5509 {
5510 if (info != 0)
5511 {
5512 mips_tune_info = info;
5513 mips_tune = info->cpu;
5514 }
5515 }
5516
5517 /* Initialize mips_split_addresses from the associated command-line
5518 settings.
5519
5520 mips_split_addresses is a half-way house between explicit
5521 relocations and the traditional assembler macros. It can
5522 split absolute 32-bit symbolic constants into a high/lo_sum
5523 pair but uses macros for other sorts of access.
5524
5525 Like explicit relocation support for REL targets, it relies
5526 on GNU extensions in the assembler and the linker.
5527
5528 Although this code should work for -O0, it has traditionally
5529 been treated as an optimization. */
5530
5531 static void
5532 mips_init_split_addresses (void)
5533 {
5534 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5535 && optimize && !flag_pic
5536 && !ABI_HAS_64BIT_SYMBOLS)
5537 mips_split_addresses = 1;
5538 else
5539 mips_split_addresses = 0;
5540 }
5541
5542 /* (Re-)Initialize information about relocs. */
5543
5544 static void
5545 mips_init_relocs (void)
5546 {
5547 memset (mips_split_p, '\0', sizeof (mips_split_p));
5548 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
5549 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
5550
5551 if (ABI_HAS_64BIT_SYMBOLS)
5552 {
5553 if (TARGET_EXPLICIT_RELOCS)
5554 {
5555 mips_split_p[SYMBOL_64_HIGH] = true;
5556 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5557 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5558
5559 mips_split_p[SYMBOL_64_MID] = true;
5560 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5561 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5562
5563 mips_split_p[SYMBOL_64_LOW] = true;
5564 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5565 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5566
5567 mips_split_p[SYMBOL_ABSOLUTE] = true;
5568 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5569 }
5570 }
5571 else
5572 {
5573 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5574 {
5575 mips_split_p[SYMBOL_ABSOLUTE] = true;
5576 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5577 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5578
5579 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5580 }
5581 }
5582
5583 if (TARGET_MIPS16)
5584 {
5585 /* The high part is provided by a pseudo copy of $gp. */
5586 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5587 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5588 }
5589
5590 if (TARGET_EXPLICIT_RELOCS)
5591 {
5592 /* Small data constants are kept whole until after reload,
5593 then lowered by mips_rewrite_small_data. */
5594 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5595
5596 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5597 if (TARGET_NEWABI)
5598 {
5599 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5600 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5601 }
5602 else
5603 {
5604 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5605 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5606 }
5607
5608 if (TARGET_XGOT)
5609 {
5610 /* The HIGH and LO_SUM are matched by special .md patterns. */
5611 mips_split_p[SYMBOL_GOT_DISP] = true;
5612
5613 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5614 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5615 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5616
5617 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5618 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5619 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5620 }
5621 else
5622 {
5623 if (TARGET_NEWABI)
5624 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5625 else
5626 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5627 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5628 }
5629 }
5630
5631 if (TARGET_NEWABI)
5632 {
5633 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5634 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5635 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5636 }
5637
5638 /* Thread-local relocation operators. */
5639 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5640 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5641 mips_split_p[SYMBOL_DTPREL] = 1;
5642 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5643 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5644 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5645 mips_split_p[SYMBOL_TPREL] = 1;
5646 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5647 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5648
5649 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5650 }
5651
5652 static GTY(()) int was_mips16_p = -1;
5653
5654 /* Set up the target-dependent global state so that it matches the
5655 current function's ISA mode. */
5656
5657 static void
5658 mips_set_mips16_mode (int mips16_p)
5659 {
5660 if (mips16_p == was_mips16_p)
5661 return;
5662
5663 /* Restore base settings of various flags. */
5664 target_flags = mips_base_target_flags;
5665 align_loops = mips_base_align_loops;
5666 align_jumps = mips_base_align_jumps;
5667 align_functions = mips_base_align_functions;
5668 flag_schedule_insns = mips_base_schedule_insns;
5669 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
5670 flag_move_loop_invariants = mips_base_move_loop_invariants;
5671 flag_delayed_branch = mips_flag_delayed_branch;
5672
5673 if (mips16_p)
5674 {
5675 /* Select mips16 instruction set. */
5676 target_flags |= MASK_MIPS16;
5677
5678 /* Don't run the scheduler before reload, since it tends to
5679 increase register pressure. */
5680 flag_schedule_insns = 0;
5681
5682 /* Don't do hot/cold partitioning. The constant layout code expects
5683 the whole function to be in a single section. */
5684 flag_reorder_blocks_and_partition = 0;
5685
5686 /* Don't move loop invariants, because it tends to increase
5687 register pressure. It also introduces an extra move in cases
5688 where the constant is the first operand in a two-operand binary
5689 instruction, or when it forms a register argument to a functon
5690 call. */
5691 flag_move_loop_invariants = 0;
5692
5693 /* Silently disable -mexplicit-relocs since it doesn't apply
5694 to mips16 code. Even so, it would overly pedantic to warn
5695 about "-mips16 -mexplicit-relocs", especially given that
5696 we use a %gprel() operator. */
5697 target_flags &= ~MASK_EXPLICIT_RELOCS;
5698
5699 /* Silently disable DSP extensions. */
5700 target_flags &= ~MASK_DSP;
5701 target_flags &= ~MASK_DSPR2;
5702
5703 /* Experiments suggest we get the best overall results from using
5704 the range of an unextended lw or sw. Code that makes heavy use
5705 of byte or short accesses can do better with ranges of 0...31
5706 and 0...63 respectively, but most code is sensitive to the range
5707 of lw and sw instead. */
5708 targetm.min_anchor_offset = 0;
5709 targetm.max_anchor_offset = 127;
5710
5711 if (flag_pic || TARGET_ABICALLS)
5712 sorry ("MIPS16 PIC");
5713 }
5714 else
5715 {
5716 /* Reset to select base non-mips16 ISA. */
5717 target_flags &= ~MASK_MIPS16;
5718
5719 /* When using explicit relocs, we call dbr_schedule from within
5720 mips_reorg. */
5721 if (TARGET_EXPLICIT_RELOCS)
5722 flag_delayed_branch = 0;
5723
5724 /* Provide default values for align_* for 64-bit targets. */
5725 if (TARGET_64BIT)
5726 {
5727 if (align_loops == 0)
5728 align_loops = 8;
5729 if (align_jumps == 0)
5730 align_jumps = 8;
5731 if (align_functions == 0)
5732 align_functions = 8;
5733 }
5734
5735 targetm.min_anchor_offset = TARGET_MIN_ANCHOR_OFFSET;
5736 targetm.max_anchor_offset = TARGET_MAX_ANCHOR_OFFSET;
5737 }
5738
5739 /* (Re)initialize mips target internals for new ISA. */
5740 mips_init_split_addresses ();
5741 mips_init_relocs ();
5742
5743 if (was_mips16_p >= 0)
5744 /* Reinitialize target-dependent state. */
5745 target_reinit ();
5746
5747 was_mips16_p = TARGET_MIPS16;
5748 }
5749
5750 /* Use a hash table to keep track of implicit mips16/nomips16 attributes
5751 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
5752
5753 struct mflip_mips16_entry GTY (()) {
5754 const char *name;
5755 bool mips16_p;
5756 };
5757 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
5758
5759 /* Hash table callbacks for mflip_mips16_htab. */
5760
5761 static hashval_t
5762 mflip_mips16_htab_hash (const void *entry)
5763 {
5764 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
5765 }
5766
5767 static int
5768 mflip_mips16_htab_eq (const void *entry, const void *name)
5769 {
5770 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
5771 (const char *) name) == 0;
5772 }
5773
5774 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
5775 for -mflip-mips16. Return true if it should use "mips16" and false if
5776 it should use "nomips16". */
5777
5778 static bool
5779 mflip_mips16_use_mips16_p (tree decl)
5780 {
5781 struct mflip_mips16_entry *entry;
5782 const char *name;
5783 hashval_t hash;
5784 void **slot;
5785
5786 /* Use the opposite of the command-line setting for anonymous decls. */
5787 if (!DECL_NAME (decl))
5788 return !mips_base_mips16;
5789
5790 if (!mflip_mips16_htab)
5791 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
5792 mflip_mips16_htab_eq, NULL);
5793
5794 name = IDENTIFIER_POINTER (DECL_NAME (decl));
5795 hash = htab_hash_string (name);
5796 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
5797 entry = (struct mflip_mips16_entry *) *slot;
5798 if (!entry)
5799 {
5800 mips16_flipper = !mips16_flipper;
5801 entry = GGC_NEW (struct mflip_mips16_entry);
5802 entry->name = name;
5803 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
5804 *slot = entry;
5805 }
5806 return entry->mips16_p;
5807 }
5808
5809 /* Implement TARGET_INSERT_ATTRIBUTES. */
5810
5811 static void
5812 mips_insert_attributes (tree decl, tree *attributes)
5813 {
5814 const char *name;
5815 bool mips16_p, nomips16_p;
5816
5817 /* Check for "mips16" and "nomips16" attributes. */
5818 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
5819 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
5820 if (TREE_CODE (decl) != FUNCTION_DECL)
5821 {
5822 if (mips16_p)
5823 error ("%qs attribute only applies to functions", "mips16");
5824 if (nomips16_p)
5825 error ("%qs attribute only applies to functions", "nomips16");
5826 }
5827 else
5828 {
5829 mips16_p |= mips_mips16_decl_p (decl);
5830 nomips16_p |= mips_nomips16_decl_p (decl);
5831 if (mips16_p || nomips16_p)
5832 {
5833 /* DECL cannot be simultaneously mips16 and nomips16. */
5834 if (mips16_p && nomips16_p)
5835 error ("%qs cannot have both %<mips16%> and "
5836 "%<nomips16%> attributes",
5837 IDENTIFIER_POINTER (DECL_NAME (decl)));
5838 }
5839 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
5840 {
5841 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
5842 "mips16" attribute, arbitrarily pick one. We must pick the same
5843 setting for duplicate declarations of a function. */
5844 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
5845 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
5846 }
5847 }
5848 }
5849
5850 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
5851
5852 static tree
5853 mips_merge_decl_attributes (tree olddecl, tree newdecl)
5854 {
5855 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
5856 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
5857 error ("%qs redeclared with conflicting %qs attributes",
5858 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
5859 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
5860 error ("%qs redeclared with conflicting %qs attributes",
5861 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
5862
5863 return merge_attributes (DECL_ATTRIBUTES (olddecl),
5864 DECL_ATTRIBUTES (newdecl));
5865 }
5866
5867 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
5868 function should use the MIPS16 ISA and switch modes accordingly. */
5869
5870 static void
5871 mips_set_current_function (tree fndecl)
5872 {
5873 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
5874 }
5875
5876 /* Implement TARGET_HANDLE_OPTION. */
5877
5878 static bool
5879 mips_handle_option (size_t code, const char *arg, int value)
5880 {
5881 switch (code)
5882 {
5883 case OPT_mabi_:
5884 if (strcmp (arg, "32") == 0)
5885 mips_abi = ABI_32;
5886 else if (strcmp (arg, "o64") == 0)
5887 mips_abi = ABI_O64;
5888 else if (strcmp (arg, "n32") == 0)
5889 mips_abi = ABI_N32;
5890 else if (strcmp (arg, "64") == 0)
5891 mips_abi = ABI_64;
5892 else if (strcmp (arg, "eabi") == 0)
5893 mips_abi = ABI_EABI;
5894 else
5895 return false;
5896 return true;
5897
5898 case OPT_march_:
5899 case OPT_mtune_:
5900 return mips_parse_cpu (arg) != 0;
5901
5902 case OPT_mips:
5903 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5904 return mips_isa_info != 0;
5905
5906 case OPT_mno_flush_func:
5907 mips_cache_flush_func = NULL;
5908 return true;
5909
5910 case OPT_mcode_readable_:
5911 if (strcmp (arg, "yes") == 0)
5912 mips_code_readable = CODE_READABLE_YES;
5913 else if (strcmp (arg, "pcrel") == 0)
5914 mips_code_readable = CODE_READABLE_PCREL;
5915 else if (strcmp (arg, "no") == 0)
5916 mips_code_readable = CODE_READABLE_NO;
5917 else
5918 return false;
5919 return true;
5920
5921 case OPT_mllsc:
5922 mips_llsc = value ? LLSC_YES : LLSC_NO;
5923 return true;
5924
5925 default:
5926 return true;
5927 }
5928 }
5929
5930 /* Set up the threshold for data to go into the small data area, instead
5931 of the normal data area, and detect any conflicts in the switches. */
5932
5933 void
5934 override_options (void)
5935 {
5936 int i, start, regno;
5937 enum machine_mode mode;
5938
5939 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5940 SUBTARGET_OVERRIDE_OPTIONS;
5941 #endif
5942
5943 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5944
5945 /* The following code determines the architecture and register size.
5946 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5947 The GAS and GCC code should be kept in sync as much as possible. */
5948
5949 if (mips_arch_string != 0)
5950 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5951
5952 if (mips_isa_info != 0)
5953 {
5954 if (mips_arch_info == 0)
5955 mips_set_architecture (mips_isa_info);
5956 else if (mips_arch_info->isa != mips_isa_info->isa)
5957 error ("-%s conflicts with the other architecture options, "
5958 "which specify a %s processor",
5959 mips_isa_info->name,
5960 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5961 }
5962
5963 if (mips_arch_info == 0)
5964 {
5965 #ifdef MIPS_CPU_STRING_DEFAULT
5966 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5967 #else
5968 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5969 #endif
5970 }
5971
5972 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5973 error ("-march=%s is not compatible with the selected ABI",
5974 mips_arch_info->name);
5975
5976 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5977 if (mips_tune_string != 0)
5978 mips_set_tune (mips_parse_cpu (mips_tune_string));
5979
5980 if (mips_tune_info == 0)
5981 mips_set_tune (mips_arch_info);
5982
5983 /* Set cost structure for the processor. */
5984 if (optimize_size)
5985 mips_cost = &mips_rtx_cost_optimize_size;
5986 else
5987 mips_cost = &mips_rtx_cost_data[mips_tune];
5988
5989 /* If the user hasn't specified a branch cost, use the processor's
5990 default. */
5991 if (mips_branch_cost == 0)
5992 mips_branch_cost = mips_cost->branch_cost;
5993
5994 if ((target_flags_explicit & MASK_64BIT) != 0)
5995 {
5996 /* The user specified the size of the integer registers. Make sure
5997 it agrees with the ABI and ISA. */
5998 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5999 error ("-mgp64 used with a 32-bit processor");
6000 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
6001 error ("-mgp32 used with a 64-bit ABI");
6002 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
6003 error ("-mgp64 used with a 32-bit ABI");
6004 }
6005 else
6006 {
6007 /* Infer the integer register size from the ABI and processor.
6008 Restrict ourselves to 32-bit registers if that's all the
6009 processor has, or if the ABI cannot handle 64-bit registers. */
6010 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
6011 target_flags &= ~MASK_64BIT;
6012 else
6013 target_flags |= MASK_64BIT;
6014 }
6015
6016 if ((target_flags_explicit & MASK_FLOAT64) != 0)
6017 {
6018 /* Really, -mfp32 and -mfp64 are ornamental options. There's
6019 only one right answer here. */
6020 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
6021 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
6022 else if (!TARGET_64BIT && TARGET_FLOAT64
6023 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
6024 error ("-mgp32 and -mfp64 can only be combined if the target"
6025 " supports the mfhc1 and mthc1 instructions");
6026 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
6027 error ("unsupported combination: %s", "-mfp64 -msingle-float");
6028 }
6029 else
6030 {
6031 /* -msingle-float selects 32-bit float registers. Otherwise the
6032 float registers should be the same size as the integer ones. */
6033 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
6034 target_flags |= MASK_FLOAT64;
6035 else
6036 target_flags &= ~MASK_FLOAT64;
6037 }
6038
6039 /* End of code shared with GAS. */
6040
6041 if ((target_flags_explicit & MASK_LONG64) == 0)
6042 {
6043 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
6044 target_flags |= MASK_LONG64;
6045 else
6046 target_flags &= ~MASK_LONG64;
6047 }
6048
6049 if (!TARGET_OLDABI)
6050 flag_pcc_struct_return = 0;
6051
6052 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
6053 {
6054 /* If neither -mbranch-likely nor -mno-branch-likely was given
6055 on the command line, set MASK_BRANCHLIKELY based on the target
6056 architecture.
6057
6058 By default, we enable use of Branch Likely instructions on
6059 all architectures which support them with the following
6060 exceptions: when creating MIPS32 or MIPS64 code, and when
6061 tuning for architectures where their use tends to hurt
6062 performance.
6063
6064 The MIPS32 and MIPS64 architecture specifications say "Software
6065 is strongly encouraged to avoid use of Branch Likely
6066 instructions, as they will be removed from a future revision
6067 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
6068 issue those instructions unless instructed to do so by
6069 -mbranch-likely. */
6070 if (ISA_HAS_BRANCHLIKELY
6071 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
6072 && !(TUNE_MIPS5500 || TUNE_SB1))
6073 target_flags |= MASK_BRANCHLIKELY;
6074 else
6075 target_flags &= ~MASK_BRANCHLIKELY;
6076 }
6077 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
6078 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
6079
6080 /* The effect of -mabicalls isn't defined for the EABI. */
6081 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
6082 {
6083 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
6084 target_flags &= ~MASK_ABICALLS;
6085 }
6086
6087 /* MIPS16 cannot generate PIC yet. */
6088 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
6089 {
6090 sorry ("MIPS16 PIC");
6091 target_flags &= ~MASK_ABICALLS;
6092 flag_pic = flag_pie = flag_shlib = 0;
6093 }
6094
6095 if (TARGET_ABICALLS)
6096 /* We need to set flag_pic for executables as well as DSOs
6097 because we may reference symbols that are not defined in
6098 the final executable. (MIPS does not use things like
6099 copy relocs, for example.)
6100
6101 Also, there is a body of code that uses __PIC__ to distinguish
6102 between -mabicalls and -mno-abicalls code. */
6103 flag_pic = 1;
6104
6105 /* -mvr4130-align is a "speed over size" optimization: it usually produces
6106 faster code, but at the expense of more nops. Enable it at -O3 and
6107 above. */
6108 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
6109 target_flags |= MASK_VR4130_ALIGN;
6110
6111 /* Prefer a call to memcpy over inline code when optimizing for size,
6112 though see MOVE_RATIO in mips.h. */
6113 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
6114 target_flags |= MASK_MEMCPY;
6115
6116 /* If we have a nonzero small-data limit, check that the -mgpopt
6117 setting is consistent with the other target flags. */
6118 if (mips_section_threshold > 0)
6119 {
6120 if (!TARGET_GPOPT)
6121 {
6122 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
6123 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
6124
6125 TARGET_LOCAL_SDATA = false;
6126 TARGET_EXTERN_SDATA = false;
6127 }
6128 else
6129 {
6130 if (TARGET_VXWORKS_RTP)
6131 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
6132
6133 if (TARGET_ABICALLS)
6134 warning (0, "cannot use small-data accesses for %qs",
6135 "-mabicalls");
6136 }
6137 }
6138
6139 #ifdef MIPS_TFMODE_FORMAT
6140 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
6141 #endif
6142
6143 /* Make sure that the user didn't turn off paired single support when
6144 MIPS-3D support is requested. */
6145 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
6146 && !TARGET_PAIRED_SINGLE_FLOAT)
6147 error ("-mips3d requires -mpaired-single");
6148
6149 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
6150 if (TARGET_MIPS3D)
6151 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
6152
6153 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
6154 and TARGET_HARD_FLOAT are both true. */
6155 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
6156 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
6157
6158 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
6159 enabled. */
6160 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
6161 error ("-mips3d/-mpaired-single must be used with -mips64");
6162
6163 /* If TARGET_DSPR2, enable MASK_DSP. */
6164 if (TARGET_DSPR2)
6165 target_flags |= MASK_DSP;
6166
6167 mips_print_operand_punct['?'] = 1;
6168 mips_print_operand_punct['#'] = 1;
6169 mips_print_operand_punct['/'] = 1;
6170 mips_print_operand_punct['&'] = 1;
6171 mips_print_operand_punct['!'] = 1;
6172 mips_print_operand_punct['*'] = 1;
6173 mips_print_operand_punct['@'] = 1;
6174 mips_print_operand_punct['.'] = 1;
6175 mips_print_operand_punct['('] = 1;
6176 mips_print_operand_punct[')'] = 1;
6177 mips_print_operand_punct['['] = 1;
6178 mips_print_operand_punct[']'] = 1;
6179 mips_print_operand_punct['<'] = 1;
6180 mips_print_operand_punct['>'] = 1;
6181 mips_print_operand_punct['{'] = 1;
6182 mips_print_operand_punct['}'] = 1;
6183 mips_print_operand_punct['^'] = 1;
6184 mips_print_operand_punct['$'] = 1;
6185 mips_print_operand_punct['+'] = 1;
6186 mips_print_operand_punct['~'] = 1;
6187 mips_print_operand_punct['|'] = 1;
6188 mips_print_operand_punct['-'] = 1;
6189
6190 /* Set up array to map GCC register number to debug register number.
6191 Ignore the special purpose register numbers. */
6192
6193 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6194 {
6195 mips_dbx_regno[i] = INVALID_REGNUM;
6196 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
6197 mips_dwarf_regno[i] = i;
6198 else
6199 mips_dwarf_regno[i] = INVALID_REGNUM;
6200 }
6201
6202 start = GP_DBX_FIRST - GP_REG_FIRST;
6203 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
6204 mips_dbx_regno[i] = i + start;
6205
6206 start = FP_DBX_FIRST - FP_REG_FIRST;
6207 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
6208 mips_dbx_regno[i] = i + start;
6209
6210 /* HI and LO debug registers use big-endian ordering. */
6211 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
6212 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
6213 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
6214 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
6215 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
6216 {
6217 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
6218 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
6219 }
6220
6221 /* Set up array giving whether a given register can hold a given mode. */
6222
6223 for (mode = VOIDmode;
6224 mode != MAX_MACHINE_MODE;
6225 mode = (enum machine_mode) ((int)mode + 1))
6226 {
6227 register int size = GET_MODE_SIZE (mode);
6228 register enum mode_class class = GET_MODE_CLASS (mode);
6229
6230 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6231 {
6232 register int temp;
6233
6234 if (mode == CCV2mode)
6235 temp = (ISA_HAS_8CC
6236 && ST_REG_P (regno)
6237 && (regno - ST_REG_FIRST) % 2 == 0);
6238
6239 else if (mode == CCV4mode)
6240 temp = (ISA_HAS_8CC
6241 && ST_REG_P (regno)
6242 && (regno - ST_REG_FIRST) % 4 == 0);
6243
6244 else if (mode == CCmode)
6245 {
6246 if (! ISA_HAS_8CC)
6247 temp = (regno == FPSW_REGNUM);
6248 else
6249 temp = (ST_REG_P (regno) || GP_REG_P (regno)
6250 || FP_REG_P (regno));
6251 }
6252
6253 else if (GP_REG_P (regno))
6254 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
6255
6256 else if (FP_REG_P (regno))
6257 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
6258 || (MIN_FPRS_PER_FMT == 1
6259 && size <= UNITS_PER_FPREG))
6260 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
6261 || class == MODE_VECTOR_FLOAT)
6262 && size <= UNITS_PER_FPVALUE)
6263 /* Allow integer modes that fit into a single
6264 register. We need to put integers into FPRs
6265 when using instructions like cvt and trunc.
6266 We can't allow sizes smaller than a word,
6267 the FPU has no appropriate load/store
6268 instructions for those. */
6269 || (class == MODE_INT
6270 && size >= MIN_UNITS_PER_WORD
6271 && size <= UNITS_PER_FPREG)
6272 /* Allow TFmode for CCmode reloads. */
6273 || (ISA_HAS_8CC && mode == TFmode)));
6274
6275 else if (ACC_REG_P (regno))
6276 temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
6277 && size <= UNITS_PER_WORD * 2
6278 && (size <= UNITS_PER_WORD
6279 || regno == MD_REG_FIRST
6280 || (DSP_ACC_REG_P (regno)
6281 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
6282
6283 else if (ALL_COP_REG_P (regno))
6284 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
6285 else
6286 temp = 0;
6287
6288 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
6289 }
6290 }
6291
6292 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
6293 initialized yet, so we can't use that here. */
6294 gpr_mode = TARGET_64BIT ? DImode : SImode;
6295
6296 /* Function to allocate machine-dependent function status. */
6297 init_machine_status = &mips_init_machine_status;
6298
6299 /* Default to working around R4000 errata only if the processor
6300 was selected explicitly. */
6301 if ((target_flags_explicit & MASK_FIX_R4000) == 0
6302 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
6303 target_flags |= MASK_FIX_R4000;
6304
6305 /* Default to working around R4400 errata only if the processor
6306 was selected explicitly. */
6307 if ((target_flags_explicit & MASK_FIX_R4400) == 0
6308 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
6309 target_flags |= MASK_FIX_R4400;
6310
6311 /* Save base state of options. */
6312 mips_base_mips16 = TARGET_MIPS16;
6313 mips_base_target_flags = target_flags;
6314 mips_base_schedule_insns = flag_schedule_insns;
6315 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
6316 mips_base_move_loop_invariants = flag_move_loop_invariants;
6317 mips_base_align_loops = align_loops;
6318 mips_base_align_jumps = align_jumps;
6319 mips_base_align_functions = align_functions;
6320 mips_flag_delayed_branch = flag_delayed_branch;
6321
6322 /* Now select the mips16 or 32-bit instruction set, as requested. */
6323 mips_set_mips16_mode (mips_base_mips16);
6324 }
6325
6326 /* Swap the register information for registers I and I + 1, which
6327 currently have the wrong endianness. Note that the registers'
6328 fixedness and call-clobberedness might have been set on the
6329 command line. */
6330
6331 static void
6332 mips_swap_registers (unsigned int i)
6333 {
6334 int tmpi;
6335 const char *tmps;
6336
6337 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
6338 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
6339
6340 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
6341 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
6342 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
6343 SWAP_STRING (reg_names[i], reg_names[i + 1]);
6344
6345 #undef SWAP_STRING
6346 #undef SWAP_INT
6347 }
6348
6349 /* Implement CONDITIONAL_REGISTER_USAGE. */
6350
6351 void
6352 mips_conditional_register_usage (void)
6353 {
6354 if (!TARGET_DSP)
6355 {
6356 int regno;
6357
6358 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
6359 fixed_regs[regno] = call_used_regs[regno] = 1;
6360 }
6361 if (!TARGET_HARD_FLOAT)
6362 {
6363 int regno;
6364
6365 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
6366 fixed_regs[regno] = call_used_regs[regno] = 1;
6367 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6368 fixed_regs[regno] = call_used_regs[regno] = 1;
6369 }
6370 else if (! ISA_HAS_8CC)
6371 {
6372 int regno;
6373
6374 /* We only have a single condition code register. We
6375 implement this by hiding all the condition code registers,
6376 and generating RTL that refers directly to ST_REG_FIRST. */
6377 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6378 fixed_regs[regno] = call_used_regs[regno] = 1;
6379 }
6380 /* In mips16 mode, we permit the $t temporary registers to be used
6381 for reload. We prohibit the unused $s registers, since they
6382 are caller saved, and saving them via a mips16 register would
6383 probably waste more time than just reloading the value. */
6384 if (TARGET_MIPS16)
6385 {
6386 fixed_regs[18] = call_used_regs[18] = 1;
6387 fixed_regs[19] = call_used_regs[19] = 1;
6388 fixed_regs[20] = call_used_regs[20] = 1;
6389 fixed_regs[21] = call_used_regs[21] = 1;
6390 fixed_regs[22] = call_used_regs[22] = 1;
6391 fixed_regs[23] = call_used_regs[23] = 1;
6392 fixed_regs[26] = call_used_regs[26] = 1;
6393 fixed_regs[27] = call_used_regs[27] = 1;
6394 fixed_regs[30] = call_used_regs[30] = 1;
6395 }
6396 /* fp20-23 are now caller saved. */
6397 if (mips_abi == ABI_64)
6398 {
6399 int regno;
6400 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
6401 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6402 }
6403 /* Odd registers from fp21 to fp31 are now caller saved. */
6404 if (mips_abi == ABI_N32)
6405 {
6406 int regno;
6407 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
6408 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6409 }
6410 /* Make sure that double-register accumulator values are correctly
6411 ordered for the current endianness. */
6412 if (TARGET_LITTLE_ENDIAN)
6413 {
6414 int regno;
6415 mips_swap_registers (MD_REG_FIRST);
6416 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
6417 mips_swap_registers (regno);
6418 }
6419 }
6420
6421 /* Allocate a chunk of memory for per-function machine-dependent data. */
6422 static struct machine_function *
6423 mips_init_machine_status (void)
6424 {
6425 return ((struct machine_function *)
6426 ggc_alloc_cleared (sizeof (struct machine_function)));
6427 }
6428
6429 /* On the mips16, we want to allocate $24 (T_REG) before other
6430 registers for instructions for which it is possible. This helps
6431 avoid shuffling registers around in order to set up for an xor,
6432 encouraging the compiler to use a cmp instead. */
6433
6434 void
6435 mips_order_regs_for_local_alloc (void)
6436 {
6437 register int i;
6438
6439 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6440 reg_alloc_order[i] = i;
6441
6442 if (TARGET_MIPS16)
6443 {
6444 /* It really doesn't matter where we put register 0, since it is
6445 a fixed register anyhow. */
6446 reg_alloc_order[0] = 24;
6447 reg_alloc_order[24] = 0;
6448 }
6449 }
6450
6451 \f
6452 /* The MIPS debug format wants all automatic variables and arguments
6453 to be in terms of the virtual frame pointer (stack pointer before
6454 any adjustment in the function), while the MIPS 3.0 linker wants
6455 the frame pointer to be the stack pointer after the initial
6456 adjustment. So, we do the adjustment here. The arg pointer (which
6457 is eliminated) points to the virtual frame pointer, while the frame
6458 pointer (which may be eliminated) points to the stack pointer after
6459 the initial adjustments. */
6460
6461 HOST_WIDE_INT
6462 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6463 {
6464 rtx offset2 = const0_rtx;
6465 rtx reg = eliminate_constant_term (addr, &offset2);
6466
6467 if (offset == 0)
6468 offset = INTVAL (offset2);
6469
6470 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
6471 || reg == hard_frame_pointer_rtx)
6472 {
6473 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
6474 ? compute_frame_size (get_frame_size ())
6475 : cfun->machine->frame.total_size;
6476
6477 /* MIPS16 frame is smaller */
6478 if (frame_pointer_needed && TARGET_MIPS16)
6479 frame_size -= cfun->machine->frame.args_size;
6480
6481 offset = offset - frame_size;
6482 }
6483
6484 /* sdbout_parms does not want this to crash for unrecognized cases. */
6485 #if 0
6486 else if (reg != arg_pointer_rtx)
6487 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6488 addr);
6489 #endif
6490
6491 return offset;
6492 }
6493 \f
6494 /* If OP is an UNSPEC address, return the address to which it refers,
6495 otherwise return OP itself. */
6496
6497 static rtx
6498 mips_strip_unspec_address (rtx op)
6499 {
6500 rtx base, offset;
6501
6502 split_const (op, &base, &offset);
6503 if (UNSPEC_ADDRESS_P (base))
6504 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6505 return op;
6506 }
6507
6508 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6509
6510 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6511 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6512 'h' OP is HIGH, prints %hi(X),
6513 'd' output integer constant in decimal,
6514 'z' if the operand is 0, use $0 instead of normal operand.
6515 'D' print second part of double-word register or memory operand.
6516 'L' print low-order register of double-word register operand.
6517 'M' print high-order register of double-word register operand.
6518 'C' print part of opcode for a branch condition.
6519 'F' print part of opcode for a floating-point branch condition.
6520 'N' print part of opcode for a branch condition, inverted.
6521 'W' print part of opcode for a floating-point branch condition, inverted.
6522 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6523 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6524 't' like 'T', but with the EQ/NE cases reversed
6525 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6526 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6527 'R' print the reloc associated with LO_SUM
6528 'q' print DSP accumulator registers
6529
6530 The punctuation characters are:
6531
6532 '(' Turn on .set noreorder
6533 ')' Turn on .set reorder
6534 '[' Turn on .set noat
6535 ']' Turn on .set at
6536 '<' Turn on .set nomacro
6537 '>' Turn on .set macro
6538 '{' Turn on .set volatile (not GAS)
6539 '}' Turn on .set novolatile (not GAS)
6540 '&' Turn on .set noreorder if filling delay slots
6541 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6542 '!' Turn on .set nomacro if filling delay slots
6543 '#' Print nop if in a .set noreorder section.
6544 '/' Like '#', but does nothing within a delayed branch sequence
6545 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6546 '@' Print the name of the assembler temporary register (at or $1).
6547 '.' Print the name of the register with a hard-wired zero (zero or $0).
6548 '^' Print the name of the pic call-through register (t9 or $25).
6549 '$' Print the name of the stack pointer register (sp or $29).
6550 '+' Print the name of the gp register (usually gp or $28).
6551 '~' Output a branch alignment to LABEL_ALIGN(NULL).
6552 '|' Print .set push; .set mips2 if mips_llsc == LLSC_YES
6553 && !ISA_HAS_LL_SC.
6554 '-' Print .set pop under the same conditions for '|'. */
6555
6556 void
6557 print_operand (FILE *file, rtx op, int letter)
6558 {
6559 register enum rtx_code code;
6560
6561 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6562 {
6563 switch (letter)
6564 {
6565 case '?':
6566 if (mips_branch_likely)
6567 putc ('l', file);
6568 break;
6569
6570 case '@':
6571 fputs (reg_names [GP_REG_FIRST + 1], file);
6572 break;
6573
6574 case '^':
6575 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
6576 break;
6577
6578 case '.':
6579 fputs (reg_names [GP_REG_FIRST + 0], file);
6580 break;
6581
6582 case '$':
6583 fputs (reg_names[STACK_POINTER_REGNUM], file);
6584 break;
6585
6586 case '+':
6587 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6588 break;
6589
6590 case '&':
6591 if (final_sequence != 0 && set_noreorder++ == 0)
6592 fputs (".set\tnoreorder\n\t", file);
6593 break;
6594
6595 case '*':
6596 if (final_sequence != 0)
6597 {
6598 if (set_noreorder++ == 0)
6599 fputs (".set\tnoreorder\n\t", file);
6600
6601 if (set_nomacro++ == 0)
6602 fputs (".set\tnomacro\n\t", file);
6603 }
6604 break;
6605
6606 case '!':
6607 if (final_sequence != 0 && set_nomacro++ == 0)
6608 fputs ("\n\t.set\tnomacro", file);
6609 break;
6610
6611 case '#':
6612 if (set_noreorder != 0)
6613 fputs ("\n\tnop", file);
6614 break;
6615
6616 case '/':
6617 /* Print an extra newline so that the delayed insn is separated
6618 from the following ones. This looks neater and is consistent
6619 with non-nop delayed sequences. */
6620 if (set_noreorder != 0 && final_sequence == 0)
6621 fputs ("\n\tnop\n", file);
6622 break;
6623
6624 case '(':
6625 if (set_noreorder++ == 0)
6626 fputs (".set\tnoreorder\n\t", file);
6627 break;
6628
6629 case ')':
6630 if (set_noreorder == 0)
6631 error ("internal error: %%) found without a %%( in assembler pattern");
6632
6633 else if (--set_noreorder == 0)
6634 fputs ("\n\t.set\treorder", file);
6635
6636 break;
6637
6638 case '[':
6639 if (set_noat++ == 0)
6640 fputs (".set\tnoat\n\t", file);
6641 break;
6642
6643 case ']':
6644 if (set_noat == 0)
6645 error ("internal error: %%] found without a %%[ in assembler pattern");
6646 else if (--set_noat == 0)
6647 fputs ("\n\t.set\tat", file);
6648
6649 break;
6650
6651 case '<':
6652 if (set_nomacro++ == 0)
6653 fputs (".set\tnomacro\n\t", file);
6654 break;
6655
6656 case '>':
6657 if (set_nomacro == 0)
6658 error ("internal error: %%> found without a %%< in assembler pattern");
6659 else if (--set_nomacro == 0)
6660 fputs ("\n\t.set\tmacro", file);
6661
6662 break;
6663
6664 case '{':
6665 if (set_volatile++ == 0)
6666 fputs ("#.set\tvolatile\n\t", file);
6667 break;
6668
6669 case '}':
6670 if (set_volatile == 0)
6671 error ("internal error: %%} found without a %%{ in assembler pattern");
6672 else if (--set_volatile == 0)
6673 fputs ("\n\t#.set\tnovolatile", file);
6674
6675 break;
6676
6677 case '~':
6678 {
6679 if (align_labels_log > 0)
6680 ASM_OUTPUT_ALIGN (file, align_labels_log);
6681 }
6682 break;
6683
6684 case '|':
6685 if (!ISA_HAS_LL_SC)
6686 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6687 break;
6688
6689 case '-':
6690 if (!ISA_HAS_LL_SC)
6691 fputs ("\n\t.set\tpop", file);
6692 break;
6693
6694 default:
6695 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
6696 break;
6697 }
6698
6699 return;
6700 }
6701
6702 if (! op)
6703 {
6704 error ("PRINT_OPERAND null pointer");
6705 return;
6706 }
6707
6708 code = GET_CODE (op);
6709
6710 if (letter == 'C')
6711 switch (code)
6712 {
6713 case EQ: fputs ("eq", file); break;
6714 case NE: fputs ("ne", file); break;
6715 case GT: fputs ("gt", file); break;
6716 case GE: fputs ("ge", file); break;
6717 case LT: fputs ("lt", file); break;
6718 case LE: fputs ("le", file); break;
6719 case GTU: fputs ("gtu", file); break;
6720 case GEU: fputs ("geu", file); break;
6721 case LTU: fputs ("ltu", file); break;
6722 case LEU: fputs ("leu", file); break;
6723 default:
6724 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6725 }
6726
6727 else if (letter == 'N')
6728 switch (code)
6729 {
6730 case EQ: fputs ("ne", file); break;
6731 case NE: fputs ("eq", file); break;
6732 case GT: fputs ("le", file); break;
6733 case GE: fputs ("lt", file); break;
6734 case LT: fputs ("ge", file); break;
6735 case LE: fputs ("gt", file); break;
6736 case GTU: fputs ("leu", file); break;
6737 case GEU: fputs ("ltu", file); break;
6738 case LTU: fputs ("geu", file); break;
6739 case LEU: fputs ("gtu", file); break;
6740 default:
6741 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6742 }
6743
6744 else if (letter == 'F')
6745 switch (code)
6746 {
6747 case EQ: fputs ("c1f", file); break;
6748 case NE: fputs ("c1t", file); break;
6749 default:
6750 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6751 }
6752
6753 else if (letter == 'W')
6754 switch (code)
6755 {
6756 case EQ: fputs ("c1t", file); break;
6757 case NE: fputs ("c1f", file); break;
6758 default:
6759 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6760 }
6761
6762 else if (letter == 'h')
6763 {
6764 if (GET_CODE (op) == HIGH)
6765 op = XEXP (op, 0);
6766
6767 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6768 }
6769
6770 else if (letter == 'R')
6771 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6772
6773 else if (letter == 'Y')
6774 {
6775 if (GET_CODE (op) == CONST_INT
6776 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6777 < ARRAY_SIZE (mips_fp_conditions)))
6778 fputs (mips_fp_conditions[INTVAL (op)], file);
6779 else
6780 output_operand_lossage ("invalid %%Y value");
6781 }
6782
6783 else if (letter == 'Z')
6784 {
6785 if (ISA_HAS_8CC)
6786 {
6787 print_operand (file, op, 0);
6788 fputc (',', file);
6789 }
6790 }
6791
6792 else if (letter == 'q')
6793 {
6794 int regnum;
6795
6796 if (code != REG)
6797 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6798
6799 regnum = REGNO (op);
6800 if (MD_REG_P (regnum))
6801 fprintf (file, "$ac0");
6802 else if (DSP_ACC_REG_P (regnum))
6803 fprintf (file, "$ac%c", reg_names[regnum][3]);
6804 else
6805 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6806 }
6807
6808 else if (code == REG || code == SUBREG)
6809 {
6810 register int regnum;
6811
6812 if (code == REG)
6813 regnum = REGNO (op);
6814 else
6815 regnum = true_regnum (op);
6816
6817 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6818 || (letter == 'L' && WORDS_BIG_ENDIAN)
6819 || letter == 'D')
6820 regnum++;
6821
6822 fprintf (file, "%s", reg_names[regnum]);
6823 }
6824
6825 else if (code == MEM)
6826 {
6827 if (letter == 'D')
6828 output_address (plus_constant (XEXP (op, 0), 4));
6829 else
6830 output_address (XEXP (op, 0));
6831 }
6832
6833 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6834 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6835
6836 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6837 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6838
6839 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6840 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6841
6842 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6843 fputs (reg_names[GP_REG_FIRST], file);
6844
6845 else if (letter == 'd' || letter == 'x' || letter == 'X')
6846 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6847
6848 else if (letter == 'T' || letter == 't')
6849 {
6850 int truth = (code == NE) == (letter == 'T');
6851 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6852 }
6853
6854 else if (CONST_GP_P (op))
6855 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6856
6857 else
6858 output_addr_const (file, mips_strip_unspec_address (op));
6859 }
6860
6861
6862 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6863 in context CONTEXT. RELOCS is the array of relocations to use. */
6864
6865 static void
6866 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6867 const char **relocs)
6868 {
6869 enum mips_symbol_type symbol_type;
6870 const char *p;
6871
6872 symbol_type = mips_classify_symbolic_expression (op, context);
6873 if (relocs[symbol_type] == 0)
6874 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6875
6876 fputs (relocs[symbol_type], file);
6877 output_addr_const (file, mips_strip_unspec_address (op));
6878 for (p = relocs[symbol_type]; *p != 0; p++)
6879 if (*p == '(')
6880 fputc (')', file);
6881 }
6882 \f
6883 /* Output address operand X to FILE. */
6884
6885 void
6886 print_operand_address (FILE *file, rtx x)
6887 {
6888 struct mips_address_info addr;
6889
6890 if (mips_classify_address (&addr, x, word_mode, true))
6891 switch (addr.type)
6892 {
6893 case ADDRESS_REG:
6894 print_operand (file, addr.offset, 0);
6895 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6896 return;
6897
6898 case ADDRESS_LO_SUM:
6899 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6900 mips_lo_relocs);
6901 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6902 return;
6903
6904 case ADDRESS_CONST_INT:
6905 output_addr_const (file, x);
6906 fprintf (file, "(%s)", reg_names[0]);
6907 return;
6908
6909 case ADDRESS_SYMBOLIC:
6910 output_addr_const (file, mips_strip_unspec_address (x));
6911 return;
6912 }
6913 gcc_unreachable ();
6914 }
6915 \f
6916 /* When using assembler macros, keep track of all of small-data externs
6917 so that mips_file_end can emit the appropriate declarations for them.
6918
6919 In most cases it would be safe (though pointless) to emit .externs
6920 for other symbols too. One exception is when an object is within
6921 the -G limit but declared by the user to be in a section other
6922 than .sbss or .sdata. */
6923
6924 void
6925 mips_output_external (FILE *file, tree decl, const char *name)
6926 {
6927 default_elf_asm_output_external (file, decl, name);
6928
6929 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6930 set in order to avoid putting out names that are never really
6931 used. */
6932 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6933 {
6934 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6935 {
6936 fputs ("\t.extern\t", file);
6937 assemble_name (file, name);
6938 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6939 int_size_in_bytes (TREE_TYPE (decl)));
6940 }
6941 else if (TARGET_IRIX
6942 && mips_abi == ABI_32
6943 && TREE_CODE (decl) == FUNCTION_DECL)
6944 {
6945 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6946 `.global name .text' directive for every used but
6947 undefined function. If we don't, the linker may perform
6948 an optimization (skipping over the insns that set $gp)
6949 when it is unsafe. */
6950 fputs ("\t.globl ", file);
6951 assemble_name (file, name);
6952 fputs (" .text\n", file);
6953 }
6954 }
6955 }
6956 \f
6957 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6958 put out a MIPS ECOFF file and a stab. */
6959
6960 void
6961 mips_output_filename (FILE *stream, const char *name)
6962 {
6963
6964 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6965 directives. */
6966 if (write_symbols == DWARF2_DEBUG)
6967 return;
6968 else if (mips_output_filename_first_time)
6969 {
6970 mips_output_filename_first_time = 0;
6971 num_source_filenames += 1;
6972 current_function_file = name;
6973 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6974 output_quoted_string (stream, name);
6975 putc ('\n', stream);
6976 }
6977
6978 /* If we are emitting stabs, let dbxout.c handle this (except for
6979 the mips_output_filename_first_time case). */
6980 else if (write_symbols == DBX_DEBUG)
6981 return;
6982
6983 else if (name != current_function_file
6984 && strcmp (name, current_function_file) != 0)
6985 {
6986 num_source_filenames += 1;
6987 current_function_file = name;
6988 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6989 output_quoted_string (stream, name);
6990 putc ('\n', stream);
6991 }
6992 }
6993 \f
6994 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6995 that should be written before the opening quote, such as "\t.ascii\t"
6996 for real string data or "\t# " for a comment. */
6997
6998 void
6999 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
7000 const char *prefix)
7001 {
7002 size_t i;
7003 int cur_pos = 17;
7004 register const unsigned char *string =
7005 (const unsigned char *)string_param;
7006
7007 fprintf (stream, "%s\"", prefix);
7008 for (i = 0; i < len; i++)
7009 {
7010 register int c = string[i];
7011
7012 if (ISPRINT (c))
7013 {
7014 if (c == '\\' || c == '\"')
7015 {
7016 putc ('\\', stream);
7017 cur_pos++;
7018 }
7019 putc (c, stream);
7020 cur_pos++;
7021 }
7022 else
7023 {
7024 fprintf (stream, "\\%03o", c);
7025 cur_pos += 4;
7026 }
7027
7028 if (cur_pos > 72 && i+1 < len)
7029 {
7030 cur_pos = 17;
7031 fprintf (stream, "\"\n%s\"", prefix);
7032 }
7033 }
7034 fprintf (stream, "\"\n");
7035 }
7036 \f
7037 /* Implement TARGET_ASM_FILE_START. */
7038
7039 static void
7040 mips_file_start (void)
7041 {
7042 default_file_start ();
7043
7044 if (!TARGET_IRIX)
7045 {
7046 /* Generate a special section to describe the ABI switches used to
7047 produce the resultant binary. This used to be done by the assembler
7048 setting bits in the ELF header's flags field, but we have run out of
7049 bits. GDB needs this information in order to be able to correctly
7050 debug these binaries. See the function mips_gdbarch_init() in
7051 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
7052 causes unnecessary IRIX 6 ld warnings. */
7053 const char * abi_string = NULL;
7054
7055 switch (mips_abi)
7056 {
7057 case ABI_32: abi_string = "abi32"; break;
7058 case ABI_N32: abi_string = "abiN32"; break;
7059 case ABI_64: abi_string = "abi64"; break;
7060 case ABI_O64: abi_string = "abiO64"; break;
7061 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
7062 default:
7063 gcc_unreachable ();
7064 }
7065 /* Note - we use fprintf directly rather than calling switch_to_section
7066 because in this way we can avoid creating an allocated section. We
7067 do not want this section to take up any space in the running
7068 executable. */
7069 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7070 abi_string);
7071
7072 /* There is no ELF header flag to distinguish long32 forms of the
7073 EABI from long64 forms. Emit a special section to help tools
7074 such as GDB. Do the same for o64, which is sometimes used with
7075 -mlong64. */
7076 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7077 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7078 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7079
7080 #ifdef HAVE_AS_GNU_ATTRIBUTE
7081 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7082 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
7083 #endif
7084 }
7085
7086 /* Generate the pseudo ops that System V.4 wants. */
7087 if (TARGET_ABICALLS)
7088 fprintf (asm_out_file, "\t.abicalls\n");
7089
7090 if (flag_verbose_asm)
7091 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7092 ASM_COMMENT_START,
7093 mips_section_threshold, mips_arch_info->name, mips_isa);
7094 }
7095
7096 #ifdef BSS_SECTION_ASM_OP
7097 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
7098 in the use of sbss. */
7099
7100 void
7101 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
7102 unsigned HOST_WIDE_INT size, int align)
7103 {
7104 extern tree last_assemble_variable_decl;
7105
7106 if (mips_in_small_data_p (decl))
7107 switch_to_section (get_named_section (NULL, ".sbss", 0));
7108 else
7109 switch_to_section (bss_section);
7110 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7111 last_assemble_variable_decl = decl;
7112 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
7113 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
7114 }
7115 #endif
7116 \f
7117 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7118 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7119
7120 void
7121 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7122 unsigned HOST_WIDE_INT size,
7123 unsigned int align)
7124 {
7125 /* If the target wants uninitialized const declarations in
7126 .rdata then don't put them in .comm. */
7127 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
7128 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
7129 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7130 {
7131 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7132 targetm.asm_out.globalize_label (stream, name);
7133
7134 switch_to_section (readonly_data_section);
7135 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7136 mips_declare_object (stream, name, "",
7137 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7138 size);
7139 }
7140 else
7141 mips_declare_common_object (stream, name, "\n\t.comm\t",
7142 size, align, true);
7143 }
7144
7145 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7146 NAME is the name of the object and ALIGN is the required alignment
7147 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7148 alignment argument. */
7149
7150 void
7151 mips_declare_common_object (FILE *stream, const char *name,
7152 const char *init_string,
7153 unsigned HOST_WIDE_INT size,
7154 unsigned int align, bool takes_alignment_p)
7155 {
7156 if (!takes_alignment_p)
7157 {
7158 size += (align / BITS_PER_UNIT) - 1;
7159 size -= size % (align / BITS_PER_UNIT);
7160 mips_declare_object (stream, name, init_string,
7161 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7162 }
7163 else
7164 mips_declare_object (stream, name, init_string,
7165 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7166 size, align / BITS_PER_UNIT);
7167 }
7168
7169 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7170 macros, mark the symbol as written so that mips_file_end won't emit an
7171 .extern for it. STREAM is the output file, NAME is the name of the
7172 symbol, INIT_STRING is the string that should be written before the
7173 symbol and FINAL_STRING is the string that should be written after it.
7174 FINAL_STRING is a printf() format that consumes the remaining arguments. */
7175
7176 void
7177 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7178 const char *final_string, ...)
7179 {
7180 va_list ap;
7181
7182 fputs (init_string, stream);
7183 assemble_name (stream, name);
7184 va_start (ap, final_string);
7185 vfprintf (stream, final_string, ap);
7186 va_end (ap);
7187
7188 if (!TARGET_EXPLICIT_RELOCS)
7189 {
7190 tree name_tree = get_identifier (name);
7191 TREE_ASM_WRITTEN (name_tree) = 1;
7192 }
7193 }
7194
7195 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7196 extern int size_directive_output;
7197
7198 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7199 definitions except that it uses mips_declare_object() to emit the label. */
7200
7201 void
7202 mips_declare_object_name (FILE *stream, const char *name,
7203 tree decl ATTRIBUTE_UNUSED)
7204 {
7205 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7206 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7207 #endif
7208
7209 size_directive_output = 0;
7210 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7211 {
7212 HOST_WIDE_INT size;
7213
7214 size_directive_output = 1;
7215 size = int_size_in_bytes (TREE_TYPE (decl));
7216 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7217 }
7218
7219 mips_declare_object (stream, name, "", ":\n");
7220 }
7221
7222 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7223
7224 void
7225 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7226 {
7227 const char *name;
7228
7229 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7230 if (!flag_inhibit_size_directive
7231 && DECL_SIZE (decl) != 0
7232 && !at_end && top_level
7233 && DECL_INITIAL (decl) == error_mark_node
7234 && !size_directive_output)
7235 {
7236 HOST_WIDE_INT size;
7237
7238 size_directive_output = 1;
7239 size = int_size_in_bytes (TREE_TYPE (decl));
7240 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7241 }
7242 }
7243 #endif
7244 \f
7245 /* Return true if X in context CONTEXT is a small data address that can
7246 be rewritten as a LO_SUM. */
7247
7248 static bool
7249 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
7250 {
7251 enum mips_symbol_type symbol_type;
7252
7253 return (TARGET_EXPLICIT_RELOCS
7254 && mips_symbolic_constant_p (x, context, &symbol_type)
7255 && symbol_type == SYMBOL_GP_RELATIVE);
7256 }
7257
7258
7259 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
7260 containing MEM, or null if none. */
7261
7262 static int
7263 mips_small_data_pattern_1 (rtx *loc, void *data)
7264 {
7265 enum mips_symbol_context context;
7266
7267 if (GET_CODE (*loc) == LO_SUM)
7268 return -1;
7269
7270 if (MEM_P (*loc))
7271 {
7272 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
7273 return 1;
7274 return -1;
7275 }
7276
7277 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7278 return mips_rewrite_small_data_p (*loc, context);
7279 }
7280
7281 /* Return true if OP refers to small data symbols directly, not through
7282 a LO_SUM. */
7283
7284 bool
7285 mips_small_data_pattern_p (rtx op)
7286 {
7287 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
7288 }
7289 \f
7290 /* A for_each_rtx callback, used by mips_rewrite_small_data.
7291 DATA is the containing MEM, or null if none. */
7292
7293 static int
7294 mips_rewrite_small_data_1 (rtx *loc, void *data)
7295 {
7296 enum mips_symbol_context context;
7297
7298 if (MEM_P (*loc))
7299 {
7300 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
7301 return -1;
7302 }
7303
7304 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7305 if (mips_rewrite_small_data_p (*loc, context))
7306 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
7307
7308 if (GET_CODE (*loc) == LO_SUM)
7309 return -1;
7310
7311 return 0;
7312 }
7313
7314 /* If possible, rewrite OP so that it refers to small data using
7315 explicit relocations. */
7316
7317 rtx
7318 mips_rewrite_small_data (rtx op)
7319 {
7320 op = copy_insn (op);
7321 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
7322 return op;
7323 }
7324 \f
7325 /* Return true if the current function has an insn that implicitly
7326 refers to $gp. */
7327
7328 static bool
7329 mips_function_has_gp_insn (void)
7330 {
7331 /* Don't bother rechecking if we found one last time. */
7332 if (!cfun->machine->has_gp_insn_p)
7333 {
7334 rtx insn;
7335
7336 push_topmost_sequence ();
7337 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7338 if (INSN_P (insn)
7339 && GET_CODE (PATTERN (insn)) != USE
7340 && GET_CODE (PATTERN (insn)) != CLOBBER
7341 && (get_attr_got (insn) != GOT_UNSET
7342 || small_data_pattern (PATTERN (insn), VOIDmode)))
7343 break;
7344 pop_topmost_sequence ();
7345
7346 cfun->machine->has_gp_insn_p = (insn != 0);
7347 }
7348 return cfun->machine->has_gp_insn_p;
7349 }
7350
7351
7352 /* Return the register that should be used as the global pointer
7353 within this function. Return 0 if the function doesn't need
7354 a global pointer. */
7355
7356 static unsigned int
7357 mips_global_pointer (void)
7358 {
7359 unsigned int regno;
7360
7361 /* $gp is always available unless we're using a GOT. */
7362 if (!TARGET_USE_GOT)
7363 return GLOBAL_POINTER_REGNUM;
7364
7365 /* We must always provide $gp when it is used implicitly. */
7366 if (!TARGET_EXPLICIT_RELOCS)
7367 return GLOBAL_POINTER_REGNUM;
7368
7369 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7370 a valid gp. */
7371 if (current_function_profile)
7372 return GLOBAL_POINTER_REGNUM;
7373
7374 /* If the function has a nonlocal goto, $gp must hold the correct
7375 global pointer for the target function. */
7376 if (current_function_has_nonlocal_goto)
7377 return GLOBAL_POINTER_REGNUM;
7378
7379 /* If the gp is never referenced, there's no need to initialize it.
7380 Note that reload can sometimes introduce constant pool references
7381 into a function that otherwise didn't need them. For example,
7382 suppose we have an instruction like:
7383
7384 (set (reg:DF R1) (float:DF (reg:SI R2)))
7385
7386 If R2 turns out to be constant such as 1, the instruction may have a
7387 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7388 using this constant if R2 doesn't get allocated to a register.
7389
7390 In cases like these, reload will have added the constant to the pool
7391 but no instruction will yet refer to it. */
7392 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7393 && !current_function_uses_const_pool
7394 && !mips_function_has_gp_insn ())
7395 return 0;
7396
7397 /* We need a global pointer, but perhaps we can use a call-clobbered
7398 register instead of $gp. */
7399 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7400 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7401 if (!df_regs_ever_live_p (regno)
7402 && call_really_used_regs[regno]
7403 && !fixed_regs[regno]
7404 && regno != PIC_FUNCTION_ADDR_REGNUM)
7405 return regno;
7406
7407 return GLOBAL_POINTER_REGNUM;
7408 }
7409
7410
7411 /* Return true if the function return value MODE will get returned in a
7412 floating-point register. */
7413
7414 static bool
7415 mips_return_mode_in_fpr_p (enum machine_mode mode)
7416 {
7417 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
7418 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
7419 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7420 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
7421 }
7422
7423 /* Return a two-character string representing a function floating-point
7424 return mode, used to name MIPS16 function stubs. */
7425
7426 static const char *
7427 mips16_call_stub_mode_suffix (enum machine_mode mode)
7428 {
7429 if (mode == SFmode)
7430 return "sf";
7431 else if (mode == DFmode)
7432 return "df";
7433 else if (mode == SCmode)
7434 return "sc";
7435 else if (mode == DCmode)
7436 return "dc";
7437 else if (mode == V2SFmode)
7438 return "df";
7439 else
7440 gcc_unreachable ();
7441 }
7442
7443 /* Return true if the current function returns its value in a floating-point
7444 register in MIPS16 mode. */
7445
7446 static bool
7447 mips16_cfun_returns_in_fpr_p (void)
7448 {
7449 tree return_type = DECL_RESULT (current_function_decl);
7450 return (TARGET_MIPS16
7451 && TARGET_HARD_FLOAT_ABI
7452 && !aggregate_value_p (return_type, current_function_decl)
7453 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7454 }
7455
7456
7457 /* Return true if the current function must save REGNO. */
7458
7459 static bool
7460 mips_save_reg_p (unsigned int regno)
7461 {
7462 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7463 if we have not chosen a call-clobbered substitute. */
7464 if (regno == GLOBAL_POINTER_REGNUM)
7465 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7466
7467 /* Check call-saved registers. */
7468 if ((current_function_saves_all_registers || df_regs_ever_live_p (regno))
7469 && !call_really_used_regs[regno])
7470 return true;
7471
7472 /* Save both registers in an FPR pair if either one is used. This is
7473 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7474 register to be used without the even register. */
7475 if (FP_REG_P (regno)
7476 && MAX_FPRS_PER_FMT == 2
7477 && df_regs_ever_live_p (regno + 1)
7478 && !call_really_used_regs[regno + 1])
7479 return true;
7480
7481 /* We need to save the old frame pointer before setting up a new one. */
7482 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7483 return true;
7484
7485 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7486 if (current_function_profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7487 return true;
7488
7489 /* We need to save the incoming return address if it is ever clobbered
7490 within the function, if __builtin_eh_return is being used to set a
7491 different return address, or if a stub is being used to return a
7492 value in FPRs. */
7493 if (regno == GP_REG_FIRST + 31
7494 && (df_regs_ever_live_p (regno)
7495 || current_function_calls_eh_return
7496 || mips16_cfun_returns_in_fpr_p ()))
7497 return true;
7498
7499 return false;
7500 }
7501
7502 /* Return the index of the lowest X in the range [0, SIZE) for which
7503 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7504
7505 static unsigned int
7506 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7507 unsigned int size)
7508 {
7509 unsigned int i;
7510
7511 for (i = 0; i < size; i++)
7512 if (BITSET_P (mask, regs[i]))
7513 break;
7514
7515 return i;
7516 }
7517
7518 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7519 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7520 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7521 the same is true for all indexes (X, SIZE). */
7522
7523 static void
7524 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7525 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
7526 {
7527 unsigned int i;
7528
7529 i = mips16e_find_first_register (*mask_ptr, regs, size);
7530 for (i++; i < size; i++)
7531 if (!BITSET_P (*mask_ptr, regs[i]))
7532 {
7533 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
7534 *mask_ptr |= 1 << regs[i];
7535 }
7536 }
7537
7538 /* Return the bytes needed to compute the frame pointer from the current
7539 stack pointer. SIZE is the size (in bytes) of the local variables.
7540
7541 MIPS stack frames look like:
7542
7543 Before call After call
7544 high +-----------------------+ +-----------------------+
7545 mem. | | | |
7546 | caller's temps. | | caller's temps. |
7547 | | | |
7548 +-----------------------+ +-----------------------+
7549 | | | |
7550 | arguments on stack. | | arguments on stack. |
7551 | | | |
7552 +-----------------------+ +-----------------------+
7553 | 4 words to save | | 4 words to save |
7554 | arguments passed | | arguments passed |
7555 | in registers, even | | in registers, even |
7556 | if not passed. | | if not passed. |
7557 SP->+-----------------------+ VFP->+-----------------------+
7558 (VFP = SP+fp_sp_offset) | |\
7559 | fp register save | | fp_reg_size
7560 | |/
7561 SP+gp_sp_offset->+-----------------------+
7562 /| |\
7563 | | gp register save | | gp_reg_size
7564 gp_reg_rounded | | |/
7565 | +-----------------------+
7566 \| alignment padding |
7567 +-----------------------+
7568 | |\
7569 | local variables | | var_size
7570 | |/
7571 +-----------------------+
7572 | |
7573 | alloca allocations |
7574 | |
7575 +-----------------------+
7576 /| |
7577 cprestore_size | | GP save for V.4 abi |
7578 \| |
7579 +-----------------------+
7580 | |\
7581 | arguments on stack | |
7582 | | |
7583 +-----------------------+ |
7584 | 4 words to save | | args_size
7585 | arguments passed | |
7586 | in registers, even | |
7587 | if not passed. | |
7588 low | (TARGET_OLDABI only) |/
7589 memory SP->+-----------------------+
7590
7591 */
7592
7593 HOST_WIDE_INT
7594 compute_frame_size (HOST_WIDE_INT size)
7595 {
7596 unsigned int regno;
7597 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
7598 HOST_WIDE_INT var_size; /* # bytes that variables take up */
7599 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
7600 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
7601 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
7602 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
7603 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
7604 unsigned int mask; /* mask of saved gp registers */
7605 unsigned int fmask; /* mask of saved fp registers */
7606
7607 cfun->machine->global_pointer = mips_global_pointer ();
7608
7609 gp_reg_size = 0;
7610 fp_reg_size = 0;
7611 mask = 0;
7612 fmask = 0;
7613 var_size = MIPS_STACK_ALIGN (size);
7614 args_size = current_function_outgoing_args_size;
7615 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
7616
7617 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7618 functions. If the function has local variables, we're committed
7619 to allocating it anyway. Otherwise reclaim it here. */
7620 if (var_size == 0 && current_function_is_leaf)
7621 cprestore_size = args_size = 0;
7622
7623 /* The MIPS 3.0 linker does not like functions that dynamically
7624 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7625 looks like we are trying to create a second frame pointer to the
7626 function, so allocate some stack space to make it happy. */
7627
7628 if (args_size == 0 && current_function_calls_alloca)
7629 args_size = 4 * UNITS_PER_WORD;
7630
7631 total_size = var_size + args_size + cprestore_size;
7632
7633 /* Calculate space needed for gp registers. */
7634 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7635 if (mips_save_reg_p (regno))
7636 {
7637 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7638 mask |= 1 << (regno - GP_REG_FIRST);
7639 }
7640
7641 /* We need to restore these for the handler. */
7642 if (current_function_calls_eh_return)
7643 {
7644 unsigned int i;
7645 for (i = 0; ; ++i)
7646 {
7647 regno = EH_RETURN_DATA_REGNO (i);
7648 if (regno == INVALID_REGNUM)
7649 break;
7650 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7651 mask |= 1 << (regno - GP_REG_FIRST);
7652 }
7653 }
7654
7655 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7656 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7657 save all later registers too. */
7658 if (GENERATE_MIPS16E_SAVE_RESTORE)
7659 {
7660 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7661 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
7662 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7663 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
7664 }
7665
7666 /* This loop must iterate over the same space as its companion in
7667 mips_for_each_saved_reg. */
7668 if (TARGET_HARD_FLOAT)
7669 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7670 regno >= FP_REG_FIRST;
7671 regno -= MAX_FPRS_PER_FMT)
7672 if (mips_save_reg_p (regno))
7673 {
7674 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
7675 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
7676 }
7677
7678 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
7679 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
7680
7681 /* Add in the space required for saving incoming register arguments. */
7682 total_size += current_function_pretend_args_size;
7683 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7684
7685 /* Save other computed information. */
7686 cfun->machine->frame.total_size = total_size;
7687 cfun->machine->frame.var_size = var_size;
7688 cfun->machine->frame.args_size = args_size;
7689 cfun->machine->frame.cprestore_size = cprestore_size;
7690 cfun->machine->frame.gp_reg_size = gp_reg_size;
7691 cfun->machine->frame.fp_reg_size = fp_reg_size;
7692 cfun->machine->frame.mask = mask;
7693 cfun->machine->frame.fmask = fmask;
7694 cfun->machine->frame.initialized = reload_completed;
7695 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
7696 cfun->machine->frame.num_fp = (fp_reg_size
7697 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
7698
7699 if (mask)
7700 {
7701 HOST_WIDE_INT offset;
7702
7703 if (GENERATE_MIPS16E_SAVE_RESTORE)
7704 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7705 to be aligned at the high end with any padding at the low end.
7706 It is only safe to use this calculation for o32, where we never
7707 have pretend arguments, and where any varargs will be saved in
7708 the caller-allocated area rather than at the top of the frame. */
7709 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7710 else
7711 offset = (args_size + cprestore_size + var_size
7712 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7713 cfun->machine->frame.gp_sp_offset = offset;
7714 cfun->machine->frame.gp_save_offset = offset - total_size;
7715 }
7716 else
7717 {
7718 cfun->machine->frame.gp_sp_offset = 0;
7719 cfun->machine->frame.gp_save_offset = 0;
7720 }
7721
7722 if (fmask)
7723 {
7724 HOST_WIDE_INT offset;
7725
7726 offset = (args_size + cprestore_size + var_size
7727 + gp_reg_rounded + fp_reg_size
7728 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7729 cfun->machine->frame.fp_sp_offset = offset;
7730 cfun->machine->frame.fp_save_offset = offset - total_size;
7731 }
7732 else
7733 {
7734 cfun->machine->frame.fp_sp_offset = 0;
7735 cfun->machine->frame.fp_save_offset = 0;
7736 }
7737
7738 /* Ok, we're done. */
7739 return total_size;
7740 }
7741 \f
7742 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7743 pointer or argument pointer. TO is either the stack pointer or
7744 hard frame pointer. */
7745
7746 HOST_WIDE_INT
7747 mips_initial_elimination_offset (int from, int to)
7748 {
7749 HOST_WIDE_INT offset;
7750
7751 compute_frame_size (get_frame_size ());
7752
7753 /* Set OFFSET to the offset from the stack pointer. */
7754 switch (from)
7755 {
7756 case FRAME_POINTER_REGNUM:
7757 offset = 0;
7758 break;
7759
7760 case ARG_POINTER_REGNUM:
7761 offset = (cfun->machine->frame.total_size
7762 - current_function_pretend_args_size);
7763 break;
7764
7765 default:
7766 gcc_unreachable ();
7767 }
7768
7769 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7770 offset -= cfun->machine->frame.args_size;
7771
7772 return offset;
7773 }
7774 \f
7775 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7776 back to a previous frame. */
7777 rtx
7778 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7779 {
7780 if (count != 0)
7781 return const0_rtx;
7782
7783 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7784 }
7785 \f
7786 /* Use FN to save or restore register REGNO. MODE is the register's
7787 mode and OFFSET is the offset of its save slot from the current
7788 stack pointer. */
7789
7790 static void
7791 mips_save_restore_reg (enum machine_mode mode, int regno,
7792 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7793 {
7794 rtx mem;
7795
7796 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7797
7798 fn (gen_rtx_REG (mode, regno), mem);
7799 }
7800
7801
7802 /* Call FN for each register that is saved by the current function.
7803 SP_OFFSET is the offset of the current stack pointer from the start
7804 of the frame. */
7805
7806 static void
7807 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7808 {
7809 enum machine_mode fpr_mode;
7810 HOST_WIDE_INT offset;
7811 int regno;
7812
7813 /* Save registers starting from high to low. The debuggers prefer at least
7814 the return register be stored at func+4, and also it allows us not to
7815 need a nop in the epilogue if at least one register is reloaded in
7816 addition to return address. */
7817 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7818 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7819 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7820 {
7821 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7822 offset -= GET_MODE_SIZE (gpr_mode);
7823 }
7824
7825 /* This loop must iterate over the same space as its companion in
7826 compute_frame_size. */
7827 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7828 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7829 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7830 regno >= FP_REG_FIRST;
7831 regno -= MAX_FPRS_PER_FMT)
7832 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7833 {
7834 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7835 offset -= GET_MODE_SIZE (fpr_mode);
7836 }
7837 }
7838 \f
7839 /* If we're generating n32 or n64 abicalls, and the current function
7840 does not use $28 as its global pointer, emit a cplocal directive.
7841 Use pic_offset_table_rtx as the argument to the directive. */
7842
7843 static void
7844 mips_output_cplocal (void)
7845 {
7846 if (!TARGET_EXPLICIT_RELOCS
7847 && cfun->machine->global_pointer > 0
7848 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7849 output_asm_insn (".cplocal %+", 0);
7850 }
7851
7852 /* Return the style of GP load sequence that is being used for the
7853 current function. */
7854
7855 enum mips_loadgp_style
7856 mips_current_loadgp_style (void)
7857 {
7858 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7859 return LOADGP_NONE;
7860
7861 if (TARGET_RTP_PIC)
7862 return LOADGP_RTP;
7863
7864 if (TARGET_ABSOLUTE_ABICALLS)
7865 return LOADGP_ABSOLUTE;
7866
7867 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7868 }
7869
7870 /* The __gnu_local_gp symbol. */
7871
7872 static GTY(()) rtx mips_gnu_local_gp;
7873
7874 /* If we're generating n32 or n64 abicalls, emit instructions
7875 to set up the global pointer. */
7876
7877 static void
7878 mips_emit_loadgp (void)
7879 {
7880 rtx addr, offset, incoming_address, base, index;
7881
7882 switch (mips_current_loadgp_style ())
7883 {
7884 case LOADGP_ABSOLUTE:
7885 if (mips_gnu_local_gp == NULL)
7886 {
7887 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7888 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7889 }
7890 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7891 break;
7892
7893 case LOADGP_NEWABI:
7894 addr = XEXP (DECL_RTL (current_function_decl), 0);
7895 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7896 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7897 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7898 if (!TARGET_EXPLICIT_RELOCS)
7899 emit_insn (gen_loadgp_blockage ());
7900 break;
7901
7902 case LOADGP_RTP:
7903 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7904 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7905 emit_insn (gen_loadgp_rtp (base, index));
7906 if (!TARGET_EXPLICIT_RELOCS)
7907 emit_insn (gen_loadgp_blockage ());
7908 break;
7909
7910 default:
7911 break;
7912 }
7913 }
7914
7915 /* Set up the stack and frame (if desired) for the function. */
7916
7917 static void
7918 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7919 {
7920 const char *fnname;
7921 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7922
7923 #ifdef SDB_DEBUGGING_INFO
7924 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7925 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7926 #endif
7927
7928 /* In mips16 mode, we may need to generate a 32 bit to handle
7929 floating point arguments. The linker will arrange for any 32-bit
7930 functions to call this stub, which will then jump to the 16-bit
7931 function proper. */
7932 if (TARGET_MIPS16
7933 && TARGET_HARD_FLOAT_ABI
7934 && current_function_args_info.fp_code != 0)
7935 build_mips16_function_stub (file);
7936
7937 /* Select the mips16 mode for this function. */
7938 if (TARGET_MIPS16)
7939 fprintf (file, "\t.set\tmips16\n");
7940 else
7941 fprintf (file, "\t.set\tnomips16\n");
7942
7943 if (!FUNCTION_NAME_ALREADY_DECLARED)
7944 {
7945 /* Get the function name the same way that toplev.c does before calling
7946 assemble_start_function. This is needed so that the name used here
7947 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7948 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7949
7950 if (!flag_inhibit_size_directive)
7951 {
7952 fputs ("\t.ent\t", file);
7953 assemble_name (file, fnname);
7954 fputs ("\n", file);
7955 }
7956
7957 assemble_name (file, fnname);
7958 fputs (":\n", file);
7959 }
7960
7961 /* Stop mips_file_end from treating this function as external. */
7962 if (TARGET_IRIX && mips_abi == ABI_32)
7963 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7964
7965 if (!flag_inhibit_size_directive)
7966 {
7967 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7968 fprintf (file,
7969 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7970 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7971 ", args= " HOST_WIDE_INT_PRINT_DEC
7972 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7973 (reg_names[(frame_pointer_needed)
7974 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7975 ((frame_pointer_needed && TARGET_MIPS16)
7976 ? tsize - cfun->machine->frame.args_size
7977 : tsize),
7978 reg_names[GP_REG_FIRST + 31],
7979 cfun->machine->frame.var_size,
7980 cfun->machine->frame.num_gp,
7981 cfun->machine->frame.num_fp,
7982 cfun->machine->frame.args_size,
7983 cfun->machine->frame.cprestore_size);
7984
7985 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7986 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7987 cfun->machine->frame.mask,
7988 cfun->machine->frame.gp_save_offset);
7989 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7990 cfun->machine->frame.fmask,
7991 cfun->machine->frame.fp_save_offset);
7992
7993 /* Require:
7994 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7995 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7996 }
7997
7998 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7999 {
8000 /* Handle the initialization of $gp for SVR4 PIC. */
8001 if (!cfun->machine->all_noreorder_p)
8002 output_asm_insn ("%(.cpload\t%^%)", 0);
8003 else
8004 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8005 }
8006 else if (cfun->machine->all_noreorder_p)
8007 output_asm_insn ("%(%<", 0);
8008
8009 /* Tell the assembler which register we're using as the global
8010 pointer. This is needed for thunks, since they can use either
8011 explicit relocs or assembler macros. */
8012 mips_output_cplocal ();
8013 }
8014 \f
8015 /* Make the last instruction frame related and note that it performs
8016 the operation described by FRAME_PATTERN. */
8017
8018 static void
8019 mips_set_frame_expr (rtx frame_pattern)
8020 {
8021 rtx insn;
8022
8023 insn = get_last_insn ();
8024 RTX_FRAME_RELATED_P (insn) = 1;
8025 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8026 frame_pattern,
8027 REG_NOTES (insn));
8028 }
8029
8030
8031 /* Return a frame-related rtx that stores REG at MEM.
8032 REG must be a single register. */
8033
8034 static rtx
8035 mips_frame_set (rtx mem, rtx reg)
8036 {
8037 rtx set;
8038
8039 /* If we're saving the return address register and the dwarf return
8040 address column differs from the hard register number, adjust the
8041 note reg to refer to the former. */
8042 if (REGNO (reg) == GP_REG_FIRST + 31
8043 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
8044 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
8045
8046 set = gen_rtx_SET (VOIDmode, mem, reg);
8047 RTX_FRAME_RELATED_P (set) = 1;
8048
8049 return set;
8050 }
8051
8052
8053 /* Save register REG to MEM. Make the instruction frame-related. */
8054
8055 static void
8056 mips_save_reg (rtx reg, rtx mem)
8057 {
8058 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8059 {
8060 rtx x1, x2;
8061
8062 if (mips_split_64bit_move_p (mem, reg))
8063 mips_split_64bit_move (mem, reg);
8064 else
8065 mips_emit_move (mem, reg);
8066
8067 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
8068 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
8069 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8070 }
8071 else
8072 {
8073 if (TARGET_MIPS16
8074 && REGNO (reg) != GP_REG_FIRST + 31
8075 && !M16_REG_P (REGNO (reg)))
8076 {
8077 /* Save a non-mips16 register by moving it through a temporary.
8078 We don't need to do this for $31 since there's a special
8079 instruction for it. */
8080 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8081 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8082 }
8083 else
8084 mips_emit_move (mem, reg);
8085
8086 mips_set_frame_expr (mips_frame_set (mem, reg));
8087 }
8088 }
8089
8090 /* Return a move between register REGNO and memory location SP + OFFSET.
8091 Make the move a load if RESTORE_P, otherwise make it a frame-related
8092 store. */
8093
8094 static rtx
8095 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
8096 unsigned int regno)
8097 {
8098 rtx reg, mem;
8099
8100 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
8101 reg = gen_rtx_REG (SImode, regno);
8102 return (restore_p
8103 ? gen_rtx_SET (VOIDmode, reg, mem)
8104 : mips_frame_set (mem, reg));
8105 }
8106
8107 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8108 The instruction must:
8109
8110 - Allocate or deallocate SIZE bytes in total; SIZE is known
8111 to be nonzero.
8112
8113 - Save or restore as many registers in *MASK_PTR as possible.
8114 The instruction saves the first registers at the top of the
8115 allocated area, with the other registers below it.
8116
8117 - Save NARGS argument registers above the allocated area.
8118
8119 (NARGS is always zero if RESTORE_P.)
8120
8121 The SAVE and RESTORE instructions cannot save and restore all general
8122 registers, so there may be some registers left over for the caller to
8123 handle. Destructively modify *MASK_PTR so that it contains the registers
8124 that still need to be saved or restored. The caller can save these
8125 registers in the memory immediately below *OFFSET_PTR, which is a
8126 byte offset from the bottom of the allocated stack area. */
8127
8128 static rtx
8129 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
8130 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
8131 HOST_WIDE_INT size)
8132 {
8133 rtx pattern, set;
8134 HOST_WIDE_INT offset, top_offset;
8135 unsigned int i, regno;
8136 int n;
8137
8138 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
8139
8140 /* Calculate the number of elements in the PARALLEL. We need one element
8141 for the stack adjustment, one for each argument register save, and one
8142 for each additional register move. */
8143 n = 1 + nargs;
8144 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8145 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
8146 n++;
8147
8148 /* Create the final PARALLEL. */
8149 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
8150 n = 0;
8151
8152 /* Add the stack pointer adjustment. */
8153 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8154 plus_constant (stack_pointer_rtx,
8155 restore_p ? size : -size));
8156 RTX_FRAME_RELATED_P (set) = 1;
8157 XVECEXP (pattern, 0, n++) = set;
8158
8159 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8160 top_offset = restore_p ? size : 0;
8161
8162 /* Save the arguments. */
8163 for (i = 0; i < nargs; i++)
8164 {
8165 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
8166 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
8167 XVECEXP (pattern, 0, n++) = set;
8168 }
8169
8170 /* Then fill in the other register moves. */
8171 offset = top_offset;
8172 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8173 {
8174 regno = mips16e_save_restore_regs[i];
8175 if (BITSET_P (*mask_ptr, regno))
8176 {
8177 offset -= UNITS_PER_WORD;
8178 set = mips16e_save_restore_reg (restore_p, offset, regno);
8179 XVECEXP (pattern, 0, n++) = set;
8180 *mask_ptr &= ~(1 << regno);
8181 }
8182 }
8183
8184 /* Tell the caller what offset it should use for the remaining registers. */
8185 *offset_ptr = size + (offset - top_offset) + size;
8186
8187 gcc_assert (n == XVECLEN (pattern, 0));
8188
8189 return pattern;
8190 }
8191
8192 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8193 pointer. Return true if PATTERN matches the kind of instruction
8194 generated by mips16e_build_save_restore. If INFO is nonnull,
8195 initialize it when returning true. */
8196
8197 bool
8198 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8199 struct mips16e_save_restore_info *info)
8200 {
8201 unsigned int i, nargs, mask;
8202 HOST_WIDE_INT top_offset, save_offset, offset, extra;
8203 rtx set, reg, mem, base;
8204 int n;
8205
8206 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8207 return false;
8208
8209 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8210 top_offset = adjust > 0 ? adjust : 0;
8211
8212 /* Interpret all other members of the PARALLEL. */
8213 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
8214 mask = 0;
8215 nargs = 0;
8216 i = 0;
8217 for (n = 1; n < XVECLEN (pattern, 0); n++)
8218 {
8219 /* Check that we have a SET. */
8220 set = XVECEXP (pattern, 0, n);
8221 if (GET_CODE (set) != SET)
8222 return false;
8223
8224 /* Check that the SET is a load (if restoring) or a store
8225 (if saving). */
8226 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8227 if (!MEM_P (mem))
8228 return false;
8229
8230 /* Check that the address is the sum of the stack pointer and a
8231 possibly-zero constant offset. */
8232 mips_split_plus (XEXP (mem, 0), &base, &offset);
8233 if (base != stack_pointer_rtx)
8234 return false;
8235
8236 /* Check that SET's other operand is a register. */
8237 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8238 if (!REG_P (reg))
8239 return false;
8240
8241 /* Check for argument saves. */
8242 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
8243 && REGNO (reg) == GP_ARG_FIRST + nargs)
8244 nargs++;
8245 else if (offset == save_offset)
8246 {
8247 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8248 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8249 return false;
8250
8251 mask |= 1 << REGNO (reg);
8252 save_offset -= GET_MODE_SIZE (gpr_mode);
8253 }
8254 else
8255 return false;
8256 }
8257
8258 /* Check that the restrictions on register ranges are met. */
8259 extra = 0;
8260 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8261 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8262 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8263 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8264 if (extra != 0)
8265 return false;
8266
8267 /* Make sure that the topmost argument register is not saved twice.
8268 The checks above ensure that the same is then true for the other
8269 argument registers. */
8270 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8271 return false;
8272
8273 /* Pass back information, if requested. */
8274 if (info)
8275 {
8276 info->nargs = nargs;
8277 info->mask = mask;
8278 info->size = (adjust > 0 ? adjust : -adjust);
8279 }
8280
8281 return true;
8282 }
8283
8284 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8285 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8286 the null terminator. */
8287
8288 static char *
8289 mips16e_add_register_range (char *s, unsigned int min_reg,
8290 unsigned int max_reg)
8291 {
8292 if (min_reg != max_reg)
8293 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8294 else
8295 s += sprintf (s, ",%s", reg_names[min_reg]);
8296 return s;
8297 }
8298
8299 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8300 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8301
8302 const char *
8303 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8304 {
8305 static char buffer[300];
8306
8307 struct mips16e_save_restore_info info;
8308 unsigned int i, end;
8309 char *s;
8310
8311 /* Parse the pattern. */
8312 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8313 gcc_unreachable ();
8314
8315 /* Add the mnemonic. */
8316 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8317 s += strlen (s);
8318
8319 /* Save the arguments. */
8320 if (info.nargs > 1)
8321 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8322 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8323 else if (info.nargs == 1)
8324 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8325
8326 /* Emit the amount of stack space to allocate or deallocate. */
8327 s += sprintf (s, "%d", (int) info.size);
8328
8329 /* Save or restore $16. */
8330 if (BITSET_P (info.mask, 16))
8331 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8332
8333 /* Save or restore $17. */
8334 if (BITSET_P (info.mask, 17))
8335 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8336
8337 /* Save or restore registers in the range $s2...$s8, which
8338 mips16e_s2_s8_regs lists in decreasing order. Note that this
8339 is a software register range; the hardware registers are not
8340 numbered consecutively. */
8341 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8342 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8343 if (i < end)
8344 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8345 mips16e_s2_s8_regs[i]);
8346
8347 /* Save or restore registers in the range $a0...$a3. */
8348 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8349 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8350 if (i < end)
8351 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8352 mips16e_a0_a3_regs[end - 1]);
8353
8354 /* Save or restore $31. */
8355 if (BITSET_P (info.mask, 31))
8356 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
8357
8358 return buffer;
8359 }
8360
8361 /* Return a simplified form of X using the register values in REG_VALUES.
8362 REG_VALUES[R] is the last value assigned to hard register R, or null
8363 if R has not been modified.
8364
8365 This function is rather limited, but is good enough for our purposes. */
8366
8367 static rtx
8368 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
8369 {
8370 rtx x0, x1;
8371
8372 x = avoid_constant_pool_reference (x);
8373
8374 if (UNARY_P (x))
8375 {
8376 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8377 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
8378 x0, GET_MODE (XEXP (x, 0)));
8379 }
8380
8381 if (ARITHMETIC_P (x))
8382 {
8383 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8384 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
8385 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
8386 }
8387
8388 if (REG_P (x)
8389 && reg_values[REGNO (x)]
8390 && !rtx_unstable_p (reg_values[REGNO (x)]))
8391 return reg_values[REGNO (x)];
8392
8393 return x;
8394 }
8395
8396 /* Return true if (set DEST SRC) stores an argument register into its
8397 caller-allocated save slot, storing the number of that argument
8398 register in *REGNO_PTR if so. REG_VALUES is as for
8399 mips16e_collect_propagate_value. */
8400
8401 static bool
8402 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
8403 unsigned int *regno_ptr)
8404 {
8405 unsigned int argno, regno;
8406 HOST_WIDE_INT offset, required_offset;
8407 rtx addr, base;
8408
8409 /* Check that this is a word-mode store. */
8410 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
8411 return false;
8412
8413 /* Check that the register being saved is an unmodified argument
8414 register. */
8415 regno = REGNO (src);
8416 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
8417 return false;
8418 argno = regno - GP_ARG_FIRST;
8419
8420 /* Check whether the address is an appropriate stack pointer or
8421 frame pointer access. The frame pointer is offset from the
8422 stack pointer by the size of the outgoing arguments. */
8423 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
8424 mips_split_plus (addr, &base, &offset);
8425 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
8426 if (base == hard_frame_pointer_rtx)
8427 required_offset -= cfun->machine->frame.args_size;
8428 else if (base != stack_pointer_rtx)
8429 return false;
8430 if (offset != required_offset)
8431 return false;
8432
8433 *regno_ptr = regno;
8434 return true;
8435 }
8436
8437 /* A subroutine of mips_expand_prologue, called only when generating
8438 MIPS16e SAVE instructions. Search the start of the function for any
8439 instructions that save argument registers into their caller-allocated
8440 save slots. Delete such instructions and return a value N such that
8441 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8442 instructions redundant. */
8443
8444 static unsigned int
8445 mips16e_collect_argument_saves (void)
8446 {
8447 rtx reg_values[FIRST_PSEUDO_REGISTER];
8448 rtx insn, next, set, dest, src;
8449 unsigned int nargs, regno;
8450
8451 push_topmost_sequence ();
8452 nargs = 0;
8453 memset (reg_values, 0, sizeof (reg_values));
8454 for (insn = get_insns (); insn; insn = next)
8455 {
8456 next = NEXT_INSN (insn);
8457 if (NOTE_P (insn))
8458 continue;
8459
8460 if (!INSN_P (insn))
8461 break;
8462
8463 set = PATTERN (insn);
8464 if (GET_CODE (set) != SET)
8465 break;
8466
8467 dest = SET_DEST (set);
8468 src = SET_SRC (set);
8469 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
8470 {
8471 if (!BITSET_P (cfun->machine->frame.mask, regno))
8472 {
8473 delete_insn (insn);
8474 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8475 }
8476 }
8477 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8478 reg_values[REGNO (dest)]
8479 = mips16e_collect_propagate_value (src, reg_values);
8480 else
8481 break;
8482 }
8483 pop_topmost_sequence ();
8484
8485 return nargs;
8486 }
8487
8488 /* Expand the prologue into a bunch of separate insns. */
8489
8490 void
8491 mips_expand_prologue (void)
8492 {
8493 HOST_WIDE_INT size;
8494 unsigned int nargs;
8495 rtx insn;
8496
8497 if (cfun->machine->global_pointer > 0)
8498 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8499
8500 size = compute_frame_size (get_frame_size ());
8501
8502 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8503 bytes beforehand; this is enough to cover the register save area
8504 without going out of range. */
8505 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8506 {
8507 HOST_WIDE_INT step1;
8508
8509 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8510
8511 if (GENERATE_MIPS16E_SAVE_RESTORE)
8512 {
8513 HOST_WIDE_INT offset;
8514 unsigned int mask, regno;
8515
8516 /* Try to merge argument stores into the save instruction. */
8517 nargs = mips16e_collect_argument_saves ();
8518
8519 /* Build the save instruction. */
8520 mask = cfun->machine->frame.mask;
8521 insn = mips16e_build_save_restore (false, &mask, &offset,
8522 nargs, step1);
8523 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8524 size -= step1;
8525
8526 /* Check if we need to save other registers. */
8527 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8528 if (BITSET_P (mask, regno - GP_REG_FIRST))
8529 {
8530 offset -= GET_MODE_SIZE (gpr_mode);
8531 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8532 }
8533 }
8534 else
8535 {
8536 insn = gen_add3_insn (stack_pointer_rtx,
8537 stack_pointer_rtx,
8538 GEN_INT (-step1));
8539 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8540 size -= step1;
8541 mips_for_each_saved_reg (size, mips_save_reg);
8542 }
8543 }
8544
8545 /* Allocate the rest of the frame. */
8546 if (size > 0)
8547 {
8548 if (SMALL_OPERAND (-size))
8549 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8550 stack_pointer_rtx,
8551 GEN_INT (-size)))) = 1;
8552 else
8553 {
8554 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8555 if (TARGET_MIPS16)
8556 {
8557 /* There are no instructions to add or subtract registers
8558 from the stack pointer, so use the frame pointer as a
8559 temporary. We should always be using a frame pointer
8560 in this case anyway. */
8561 gcc_assert (frame_pointer_needed);
8562 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8563 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8564 hard_frame_pointer_rtx,
8565 MIPS_PROLOGUE_TEMP (Pmode)));
8566 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8567 }
8568 else
8569 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8570 stack_pointer_rtx,
8571 MIPS_PROLOGUE_TEMP (Pmode)));
8572
8573 /* Describe the combined effect of the previous instructions. */
8574 mips_set_frame_expr
8575 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8576 plus_constant (stack_pointer_rtx, -size)));
8577 }
8578 }
8579
8580 /* Set up the frame pointer, if we're using one. In mips16 code,
8581 we point the frame pointer ahead of the outgoing argument area.
8582 This should allow more variables & incoming arguments to be
8583 accessed with unextended instructions. */
8584 if (frame_pointer_needed)
8585 {
8586 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
8587 {
8588 rtx offset = GEN_INT (cfun->machine->frame.args_size);
8589 if (SMALL_OPERAND (cfun->machine->frame.args_size))
8590 RTX_FRAME_RELATED_P
8591 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8592 stack_pointer_rtx,
8593 offset))) = 1;
8594 else
8595 {
8596 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
8597 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8598 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8599 hard_frame_pointer_rtx,
8600 MIPS_PROLOGUE_TEMP (Pmode)));
8601 mips_set_frame_expr
8602 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8603 plus_constant (stack_pointer_rtx,
8604 cfun->machine->frame.args_size)));
8605 }
8606 }
8607 else
8608 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
8609 stack_pointer_rtx)) = 1;
8610 }
8611
8612 mips_emit_loadgp ();
8613
8614 /* If generating o32/o64 abicalls, save $gp on the stack. */
8615 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8616 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8617
8618 /* If we are profiling, make sure no instructions are scheduled before
8619 the call to mcount. */
8620
8621 if (current_function_profile)
8622 emit_insn (gen_blockage ());
8623 }
8624 \f
8625 /* Do any necessary cleanup after a function to restore stack, frame,
8626 and regs. */
8627
8628 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8629
8630 static void
8631 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8632 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8633 {
8634 /* Reinstate the normal $gp. */
8635 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8636 mips_output_cplocal ();
8637
8638 if (cfun->machine->all_noreorder_p)
8639 {
8640 /* Avoid using %>%) since it adds excess whitespace. */
8641 output_asm_insn (".set\tmacro", 0);
8642 output_asm_insn (".set\treorder", 0);
8643 set_noreorder = set_nomacro = 0;
8644 }
8645
8646 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8647 {
8648 const char *fnname;
8649
8650 /* Get the function name the same way that toplev.c does before calling
8651 assemble_start_function. This is needed so that the name used here
8652 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8653 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8654 fputs ("\t.end\t", file);
8655 assemble_name (file, fnname);
8656 fputs ("\n", file);
8657 }
8658 }
8659 \f
8660 /* Emit instructions to restore register REG from slot MEM. */
8661
8662 static void
8663 mips_restore_reg (rtx reg, rtx mem)
8664 {
8665 /* There's no mips16 instruction to load $31 directly. Load into
8666 $7 instead and adjust the return insn appropriately. */
8667 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8668 reg = gen_rtx_REG (GET_MODE (reg), 7);
8669
8670 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8671 {
8672 /* Can't restore directly; move through a temporary. */
8673 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8674 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8675 }
8676 else
8677 mips_emit_move (reg, mem);
8678 }
8679
8680
8681 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8682 if this epilogue precedes a sibling call, false if it is for a normal
8683 "epilogue" pattern. */
8684
8685 void
8686 mips_expand_epilogue (int sibcall_p)
8687 {
8688 HOST_WIDE_INT step1, step2;
8689 rtx base, target;
8690
8691 if (!sibcall_p && mips_can_use_return_insn ())
8692 {
8693 emit_jump_insn (gen_return ());
8694 return;
8695 }
8696
8697 /* In mips16 mode, if the return value should go into a floating-point
8698 register, we need to call a helper routine to copy it over. */
8699 if (mips16_cfun_returns_in_fpr_p ())
8700 {
8701 char *name;
8702 rtx func;
8703 rtx insn;
8704 rtx retval;
8705 rtx call;
8706 tree id;
8707 tree return_type;
8708 enum machine_mode return_mode;
8709
8710 return_type = DECL_RESULT (current_function_decl);
8711 return_mode = DECL_MODE (return_type);
8712
8713 name = ACONCAT (("__mips16_ret_",
8714 mips16_call_stub_mode_suffix (return_mode),
8715 NULL));
8716 id = get_identifier (name);
8717 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8718 retval = gen_rtx_REG (return_mode, GP_RETURN);
8719 call = gen_call_value_internal (retval, func, const0_rtx);
8720 insn = emit_call_insn (call);
8721 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8722 }
8723
8724 /* Split the frame into two. STEP1 is the amount of stack we should
8725 deallocate before restoring the registers. STEP2 is the amount we
8726 should deallocate afterwards.
8727
8728 Start off by assuming that no registers need to be restored. */
8729 step1 = cfun->machine->frame.total_size;
8730 step2 = 0;
8731
8732 /* Work out which register holds the frame address. Account for the
8733 frame pointer offset used by mips16 code. */
8734 if (!frame_pointer_needed)
8735 base = stack_pointer_rtx;
8736 else
8737 {
8738 base = hard_frame_pointer_rtx;
8739 if (TARGET_MIPS16)
8740 step1 -= cfun->machine->frame.args_size;
8741 }
8742
8743 /* If we need to restore registers, deallocate as much stack as
8744 possible in the second step without going out of range. */
8745 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8746 {
8747 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8748 step1 -= step2;
8749 }
8750
8751 /* Set TARGET to BASE + STEP1. */
8752 target = base;
8753 if (step1 > 0)
8754 {
8755 rtx adjust;
8756
8757 /* Get an rtx for STEP1 that we can add to BASE. */
8758 adjust = GEN_INT (step1);
8759 if (!SMALL_OPERAND (step1))
8760 {
8761 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8762 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8763 }
8764
8765 /* Normal mode code can copy the result straight into $sp. */
8766 if (!TARGET_MIPS16)
8767 target = stack_pointer_rtx;
8768
8769 emit_insn (gen_add3_insn (target, base, adjust));
8770 }
8771
8772 /* Copy TARGET into the stack pointer. */
8773 if (target != stack_pointer_rtx)
8774 mips_emit_move (stack_pointer_rtx, target);
8775
8776 /* If we're using addressing macros, $gp is implicitly used by all
8777 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8778 from the stack. */
8779 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8780 emit_insn (gen_blockage ());
8781
8782 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8783 {
8784 unsigned int regno, mask;
8785 HOST_WIDE_INT offset;
8786 rtx restore;
8787
8788 /* Generate the restore instruction. */
8789 mask = cfun->machine->frame.mask;
8790 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8791
8792 /* Restore any other registers manually. */
8793 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8794 if (BITSET_P (mask, regno - GP_REG_FIRST))
8795 {
8796 offset -= GET_MODE_SIZE (gpr_mode);
8797 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8798 }
8799
8800 /* Restore the remaining registers and deallocate the final bit
8801 of the frame. */
8802 emit_insn (restore);
8803 }
8804 else
8805 {
8806 /* Restore the registers. */
8807 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8808 mips_restore_reg);
8809
8810 /* Deallocate the final bit of the frame. */
8811 if (step2 > 0)
8812 emit_insn (gen_add3_insn (stack_pointer_rtx,
8813 stack_pointer_rtx,
8814 GEN_INT (step2)));
8815 }
8816
8817 /* Add in the __builtin_eh_return stack adjustment. We need to
8818 use a temporary in mips16 code. */
8819 if (current_function_calls_eh_return)
8820 {
8821 if (TARGET_MIPS16)
8822 {
8823 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8824 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8825 MIPS_EPILOGUE_TEMP (Pmode),
8826 EH_RETURN_STACKADJ_RTX));
8827 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8828 }
8829 else
8830 emit_insn (gen_add3_insn (stack_pointer_rtx,
8831 stack_pointer_rtx,
8832 EH_RETURN_STACKADJ_RTX));
8833 }
8834
8835 if (!sibcall_p)
8836 {
8837 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8838 path will restore the return address into $7 rather than $31. */
8839 if (TARGET_MIPS16
8840 && !GENERATE_MIPS16E_SAVE_RESTORE
8841 && (cfun->machine->frame.mask & RA_MASK) != 0)
8842 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8843 GP_REG_FIRST + 7)));
8844 else
8845 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8846 GP_REG_FIRST + 31)));
8847 }
8848 }
8849 \f
8850 /* Return nonzero if this function is known to have a null epilogue.
8851 This allows the optimizer to omit jumps to jumps if no stack
8852 was created. */
8853
8854 int
8855 mips_can_use_return_insn (void)
8856 {
8857 if (! reload_completed)
8858 return 0;
8859
8860 if (df_regs_ever_live_p (31) || current_function_profile)
8861 return 0;
8862
8863 /* In mips16 mode, a function that returns a floating point value
8864 needs to arrange to copy the return value into the floating point
8865 registers. */
8866 if (mips16_cfun_returns_in_fpr_p ())
8867 return 0;
8868
8869 if (cfun->machine->frame.initialized)
8870 return cfun->machine->frame.total_size == 0;
8871
8872 return compute_frame_size (get_frame_size ()) == 0;
8873 }
8874 \f
8875 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8876 in order to avoid duplicating too much logic from elsewhere. */
8877
8878 static void
8879 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8880 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8881 tree function)
8882 {
8883 rtx this, temp1, temp2, insn, fnaddr;
8884 bool use_sibcall_p;
8885
8886 /* Pretend to be a post-reload pass while generating rtl. */
8887 reload_completed = 1;
8888
8889 /* Mark the end of the (empty) prologue. */
8890 emit_note (NOTE_INSN_PROLOGUE_END);
8891
8892 /* Determine if we can use a sibcall to call FUNCTION directly. */
8893 fnaddr = XEXP (DECL_RTL (function), 0);
8894 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
8895 && const_call_insn_operand (fnaddr, Pmode));
8896
8897 /* Determine if we need to load FNADDR from the GOT. */
8898 if (!use_sibcall_p)
8899 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
8900 {
8901 case SYMBOL_GOT_PAGE_OFST:
8902 case SYMBOL_GOT_DISP:
8903 /* Pick a global pointer. Use a call-clobbered register if
8904 TARGET_CALL_SAVED_GP. */
8905 cfun->machine->global_pointer =
8906 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8907 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8908
8909 /* Set up the global pointer for n32 or n64 abicalls. */
8910 mips_emit_loadgp ();
8911 break;
8912
8913 default:
8914 break;
8915 }
8916
8917 /* We need two temporary registers in some cases. */
8918 temp1 = gen_rtx_REG (Pmode, 2);
8919 temp2 = gen_rtx_REG (Pmode, 3);
8920
8921 /* Find out which register contains the "this" pointer. */
8922 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8923 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8924 else
8925 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8926
8927 /* Add DELTA to THIS. */
8928 if (delta != 0)
8929 {
8930 rtx offset = GEN_INT (delta);
8931 if (!SMALL_OPERAND (delta))
8932 {
8933 mips_emit_move (temp1, offset);
8934 offset = temp1;
8935 }
8936 emit_insn (gen_add3_insn (this, this, offset));
8937 }
8938
8939 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8940 if (vcall_offset != 0)
8941 {
8942 rtx addr;
8943
8944 /* Set TEMP1 to *THIS. */
8945 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8946
8947 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8948 addr = mips_add_offset (temp2, temp1, vcall_offset);
8949
8950 /* Load the offset and add it to THIS. */
8951 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8952 emit_insn (gen_add3_insn (this, this, temp1));
8953 }
8954
8955 /* Jump to the target function. Use a sibcall if direct jumps are
8956 allowed, otherwise load the address into a register first. */
8957 if (use_sibcall_p)
8958 {
8959 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8960 SIBLING_CALL_P (insn) = 1;
8961 }
8962 else
8963 {
8964 /* This is messy. gas treats "la $25,foo" as part of a call
8965 sequence and may allow a global "foo" to be lazily bound.
8966 The general move patterns therefore reject this combination.
8967
8968 In this context, lazy binding would actually be OK
8969 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8970 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8971 We must therefore load the address via a temporary
8972 register if mips_dangerous_for_la25_p.
8973
8974 If we jump to the temporary register rather than $25, the assembler
8975 can use the move insn to fill the jump's delay slot. */
8976 if (TARGET_USE_PIC_FN_ADDR_REG
8977 && !mips_dangerous_for_la25_p (fnaddr))
8978 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8979 mips_load_call_address (temp1, fnaddr, true);
8980
8981 if (TARGET_USE_PIC_FN_ADDR_REG
8982 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8983 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8984 emit_jump_insn (gen_indirect_jump (temp1));
8985 }
8986
8987 /* Run just enough of rest_of_compilation. This sequence was
8988 "borrowed" from alpha.c. */
8989 insn = get_insns ();
8990 insn_locators_alloc ();
8991 split_all_insns_noflow ();
8992 mips16_lay_out_constants ();
8993 shorten_branches (insn);
8994 final_start_function (insn, file, 1);
8995 final (insn, file, 1);
8996 final_end_function ();
8997
8998 /* Clean up the vars set above. Note that final_end_function resets
8999 the global pointer for us. */
9000 reload_completed = 0;
9001 }
9002 \f
9003 /* Implement TARGET_SELECT_RTX_SECTION. */
9004
9005 static section *
9006 mips_select_rtx_section (enum machine_mode mode, rtx x,
9007 unsigned HOST_WIDE_INT align)
9008 {
9009 /* ??? Consider using mergeable small data sections. */
9010 if (mips_rtx_constant_in_small_data_p (mode))
9011 return get_named_section (NULL, ".sdata", 0);
9012
9013 return default_elf_select_rtx_section (mode, x, align);
9014 }
9015
9016 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
9017
9018 The complication here is that, with the combination TARGET_ABICALLS
9019 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
9020 therefore not be included in the read-only part of a DSO. Handle such
9021 cases by selecting a normal data section instead of a read-only one.
9022 The logic apes that in default_function_rodata_section. */
9023
9024 static section *
9025 mips_function_rodata_section (tree decl)
9026 {
9027 if (!TARGET_ABICALLS || TARGET_GPWORD)
9028 return default_function_rodata_section (decl);
9029
9030 if (decl && DECL_SECTION_NAME (decl))
9031 {
9032 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
9033 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
9034 {
9035 char *rname = ASTRDUP (name);
9036 rname[14] = 'd';
9037 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
9038 }
9039 else if (flag_function_sections && flag_data_sections
9040 && strncmp (name, ".text.", 6) == 0)
9041 {
9042 char *rname = ASTRDUP (name);
9043 memcpy (rname + 1, "data", 4);
9044 return get_section (rname, SECTION_WRITE, decl);
9045 }
9046 }
9047 return data_section;
9048 }
9049
9050 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
9051 locally-defined objects go in a small data section. It also controls
9052 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
9053 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
9054
9055 static bool
9056 mips_in_small_data_p (const_tree decl)
9057 {
9058 HOST_WIDE_INT size;
9059
9060 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
9061 return false;
9062
9063 /* We don't yet generate small-data references for -mabicalls or
9064 VxWorks RTP code. See the related -G handling in override_options. */
9065 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
9066 return false;
9067
9068 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
9069 {
9070 const char *name;
9071
9072 /* Reject anything that isn't in a known small-data section. */
9073 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
9074 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
9075 return false;
9076
9077 /* If a symbol is defined externally, the assembler will use the
9078 usual -G rules when deciding how to implement macros. */
9079 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
9080 return true;
9081 }
9082 else if (TARGET_EMBEDDED_DATA)
9083 {
9084 /* Don't put constants into the small data section: we want them
9085 to be in ROM rather than RAM. */
9086 if (TREE_CODE (decl) != VAR_DECL)
9087 return false;
9088
9089 if (TREE_READONLY (decl)
9090 && !TREE_SIDE_EFFECTS (decl)
9091 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
9092 return false;
9093 }
9094
9095 /* Enforce -mlocal-sdata. */
9096 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
9097 return false;
9098
9099 /* Enforce -mextern-sdata. */
9100 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
9101 {
9102 if (DECL_EXTERNAL (decl))
9103 return false;
9104 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
9105 return false;
9106 }
9107
9108 size = int_size_in_bytes (TREE_TYPE (decl));
9109 return (size > 0 && size <= mips_section_threshold);
9110 }
9111
9112 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
9113 anchors for small data: the GP register acts as an anchor in that
9114 case. We also don't want to use them for PC-relative accesses,
9115 where the PC acts as an anchor. */
9116
9117 static bool
9118 mips_use_anchors_for_symbol_p (const_rtx symbol)
9119 {
9120 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
9121 {
9122 case SYMBOL_PC_RELATIVE:
9123 case SYMBOL_GP_RELATIVE:
9124 return false;
9125
9126 default:
9127 return default_use_anchors_for_symbol_p (symbol);
9128 }
9129 }
9130 \f
9131 /* See whether VALTYPE is a record whose fields should be returned in
9132 floating-point registers. If so, return the number of fields and
9133 list them in FIELDS (which should have two elements). Return 0
9134 otherwise.
9135
9136 For n32 & n64, a structure with one or two fields is returned in
9137 floating-point registers as long as every field has a floating-point
9138 type. */
9139
9140 static int
9141 mips_fpr_return_fields (const_tree valtype, tree *fields)
9142 {
9143 tree field;
9144 int i;
9145
9146 if (!TARGET_NEWABI)
9147 return 0;
9148
9149 if (TREE_CODE (valtype) != RECORD_TYPE)
9150 return 0;
9151
9152 i = 0;
9153 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
9154 {
9155 if (TREE_CODE (field) != FIELD_DECL)
9156 continue;
9157
9158 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
9159 return 0;
9160
9161 if (i == 2)
9162 return 0;
9163
9164 fields[i++] = field;
9165 }
9166 return i;
9167 }
9168
9169
9170 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
9171 a value in the most significant part of $2/$3 if:
9172
9173 - the target is big-endian;
9174
9175 - the value has a structure or union type (we generalize this to
9176 cover aggregates from other languages too); and
9177
9178 - the structure is not returned in floating-point registers. */
9179
9180 static bool
9181 mips_return_in_msb (const_tree valtype)
9182 {
9183 tree fields[2];
9184
9185 return (TARGET_NEWABI
9186 && TARGET_BIG_ENDIAN
9187 && AGGREGATE_TYPE_P (valtype)
9188 && mips_fpr_return_fields (valtype, fields) == 0);
9189 }
9190
9191
9192 /* Return a composite value in a pair of floating-point registers.
9193 MODE1 and OFFSET1 are the mode and byte offset for the first value,
9194 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
9195 complete value.
9196
9197 For n32 & n64, $f0 always holds the first value and $f2 the second.
9198 Otherwise the values are packed together as closely as possible. */
9199
9200 static rtx
9201 mips_return_fpr_pair (enum machine_mode mode,
9202 enum machine_mode mode1, HOST_WIDE_INT offset1,
9203 enum machine_mode mode2, HOST_WIDE_INT offset2)
9204 {
9205 int inc;
9206
9207 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
9208 return gen_rtx_PARALLEL
9209 (mode,
9210 gen_rtvec (2,
9211 gen_rtx_EXPR_LIST (VOIDmode,
9212 gen_rtx_REG (mode1, FP_RETURN),
9213 GEN_INT (offset1)),
9214 gen_rtx_EXPR_LIST (VOIDmode,
9215 gen_rtx_REG (mode2, FP_RETURN + inc),
9216 GEN_INT (offset2))));
9217
9218 }
9219
9220
9221 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
9222 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
9223 VALTYPE is null and MODE is the mode of the return value. */
9224
9225 rtx
9226 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
9227 enum machine_mode mode)
9228 {
9229 if (valtype)
9230 {
9231 tree fields[2];
9232 int unsignedp;
9233
9234 mode = TYPE_MODE (valtype);
9235 unsignedp = TYPE_UNSIGNED (valtype);
9236
9237 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
9238 true, we must promote the mode just as PROMOTE_MODE does. */
9239 mode = promote_mode (valtype, mode, &unsignedp, 1);
9240
9241 /* Handle structures whose fields are returned in $f0/$f2. */
9242 switch (mips_fpr_return_fields (valtype, fields))
9243 {
9244 case 1:
9245 return gen_rtx_REG (mode, FP_RETURN);
9246
9247 case 2:
9248 return mips_return_fpr_pair (mode,
9249 TYPE_MODE (TREE_TYPE (fields[0])),
9250 int_byte_position (fields[0]),
9251 TYPE_MODE (TREE_TYPE (fields[1])),
9252 int_byte_position (fields[1]));
9253 }
9254
9255 /* If a value is passed in the most significant part of a register, see
9256 whether we have to round the mode up to a whole number of words. */
9257 if (mips_return_in_msb (valtype))
9258 {
9259 HOST_WIDE_INT size = int_size_in_bytes (valtype);
9260 if (size % UNITS_PER_WORD != 0)
9261 {
9262 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
9263 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
9264 }
9265 }
9266
9267 /* For EABI, the class of return register depends entirely on MODE.
9268 For example, "struct { some_type x; }" and "union { some_type x; }"
9269 are returned in the same way as a bare "some_type" would be.
9270 Other ABIs only use FPRs for scalar, complex or vector types. */
9271 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
9272 return gen_rtx_REG (mode, GP_RETURN);
9273 }
9274
9275 if (!TARGET_MIPS16)
9276 {
9277 /* Handle long doubles for n32 & n64. */
9278 if (mode == TFmode)
9279 return mips_return_fpr_pair (mode,
9280 DImode, 0,
9281 DImode, GET_MODE_SIZE (mode) / 2);
9282
9283 if (mips_return_mode_in_fpr_p (mode))
9284 {
9285 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9286 return mips_return_fpr_pair (mode,
9287 GET_MODE_INNER (mode), 0,
9288 GET_MODE_INNER (mode),
9289 GET_MODE_SIZE (mode) / 2);
9290 else
9291 return gen_rtx_REG (mode, FP_RETURN);
9292 }
9293 }
9294
9295 return gen_rtx_REG (mode, GP_RETURN);
9296 }
9297
9298 /* Return nonzero when an argument must be passed by reference. */
9299
9300 static bool
9301 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9302 enum machine_mode mode, const_tree type,
9303 bool named ATTRIBUTE_UNUSED)
9304 {
9305 if (mips_abi == ABI_EABI)
9306 {
9307 int size;
9308
9309 /* ??? How should SCmode be handled? */
9310 if (mode == DImode || mode == DFmode
9311 || mode == DQmode || mode == UDQmode
9312 || mode == DAmode || mode == UDAmode)
9313 return 0;
9314
9315 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
9316 return size == -1 || size > UNITS_PER_WORD;
9317 }
9318 else
9319 {
9320 /* If we have a variable-sized parameter, we have no choice. */
9321 return targetm.calls.must_pass_in_stack (mode, type);
9322 }
9323 }
9324
9325 static bool
9326 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9327 enum machine_mode mode ATTRIBUTE_UNUSED,
9328 const_tree type ATTRIBUTE_UNUSED, bool named)
9329 {
9330 return mips_abi == ABI_EABI && named;
9331 }
9332
9333 /* Return true if registers of class CLASS cannot change from mode FROM
9334 to mode TO. */
9335
9336 bool
9337 mips_cannot_change_mode_class (enum machine_mode from,
9338 enum machine_mode to, enum reg_class class)
9339 {
9340 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
9341 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
9342 {
9343 if (TARGET_BIG_ENDIAN)
9344 {
9345 /* When a multi-word value is stored in paired floating-point
9346 registers, the first register always holds the low word.
9347 We therefore can't allow FPRs to change between single-word
9348 and multi-word modes. */
9349 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
9350 return true;
9351 }
9352 }
9353
9354 /* gcc assumes that each word of a multiword register can be accessed
9355 individually using SUBREGs. This is not true for floating-point
9356 registers if they are bigger than a word. */
9357 if (UNITS_PER_FPREG > UNITS_PER_WORD
9358 && GET_MODE_SIZE (from) > UNITS_PER_WORD
9359 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
9360 && reg_classes_intersect_p (FP_REGS, class))
9361 return true;
9362
9363 /* Loading a 32-bit value into a 64-bit floating-point register
9364 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9365 We can't allow 64-bit float registers to change from SImode to
9366 to a wider mode. */
9367 if (TARGET_64BIT
9368 && TARGET_FLOAT64
9369 && from == SImode
9370 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
9371 && reg_classes_intersect_p (FP_REGS, class))
9372 return true;
9373
9374 return false;
9375 }
9376
9377 /* Return true if X should not be moved directly into register $25.
9378 We need this because many versions of GAS will treat "la $25,foo" as
9379 part of a call sequence and so allow a global "foo" to be lazily bound. */
9380
9381 bool
9382 mips_dangerous_for_la25_p (rtx x)
9383 {
9384 return (!TARGET_EXPLICIT_RELOCS
9385 && TARGET_USE_GOT
9386 && GET_CODE (x) == SYMBOL_REF
9387 && mips_global_symbol_p (x));
9388 }
9389
9390 /* Implement PREFERRED_RELOAD_CLASS. */
9391
9392 enum reg_class
9393 mips_preferred_reload_class (rtx x, enum reg_class class)
9394 {
9395 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
9396 return LEA_REGS;
9397
9398 if (TARGET_HARD_FLOAT
9399 && FLOAT_MODE_P (GET_MODE (x))
9400 && reg_class_subset_p (FP_REGS, class))
9401 return FP_REGS;
9402
9403 if (reg_class_subset_p (GR_REGS, class))
9404 class = GR_REGS;
9405
9406 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
9407 class = M16_REGS;
9408
9409 return class;
9410 }
9411
9412 /* This function returns the register class required for a secondary
9413 register when copying between one of the registers in CLASS, and X,
9414 using MODE. If IN_P is nonzero, the copy is going from X to the
9415 register, otherwise the register is the source. A return value of
9416 NO_REGS means that no secondary register is required. */
9417
9418 enum reg_class
9419 mips_secondary_reload_class (enum reg_class class,
9420 enum machine_mode mode, rtx x, int in_p)
9421 {
9422 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
9423 int regno = -1;
9424 int gp_reg_p;
9425
9426 if (REG_P (x)|| GET_CODE (x) == SUBREG)
9427 regno = true_regnum (x);
9428
9429 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
9430
9431 if (mips_dangerous_for_la25_p (x))
9432 {
9433 gr_regs = LEA_REGS;
9434 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
9435 return gr_regs;
9436 }
9437
9438 /* Copying from HI or LO to anywhere other than a general register
9439 requires a general register.
9440 This rule applies to both the original HI/LO pair and the new
9441 DSP accumulators. */
9442 if (reg_class_subset_p (class, ACC_REGS))
9443 {
9444 if (TARGET_MIPS16 && in_p)
9445 {
9446 /* We can't really copy to HI or LO at all in mips16 mode. */
9447 return M16_REGS;
9448 }
9449 return gp_reg_p ? NO_REGS : gr_regs;
9450 }
9451 if (ACC_REG_P (regno))
9452 {
9453 if (TARGET_MIPS16 && ! in_p)
9454 {
9455 /* We can't really copy to HI or LO at all in mips16 mode. */
9456 return M16_REGS;
9457 }
9458 return class == gr_regs ? NO_REGS : gr_regs;
9459 }
9460
9461 /* We can only copy a value to a condition code register from a
9462 floating point register, and even then we require a scratch
9463 floating point register. We can only copy a value out of a
9464 condition code register into a general register. */
9465 if (class == ST_REGS)
9466 {
9467 if (in_p)
9468 return FP_REGS;
9469 return gp_reg_p ? NO_REGS : gr_regs;
9470 }
9471 if (ST_REG_P (regno))
9472 {
9473 if (! in_p)
9474 return FP_REGS;
9475 return class == gr_regs ? NO_REGS : gr_regs;
9476 }
9477
9478 if (class == FP_REGS)
9479 {
9480 if (MEM_P (x))
9481 {
9482 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
9483 return NO_REGS;
9484 }
9485 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
9486 {
9487 /* We can use the l.s and l.d macros to load floating-point
9488 constants. ??? For l.s, we could probably get better
9489 code by returning GR_REGS here. */
9490 return NO_REGS;
9491 }
9492 else if (gp_reg_p || x == CONST0_RTX (mode))
9493 {
9494 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9495 return NO_REGS;
9496 }
9497 else if (FP_REG_P (regno))
9498 {
9499 /* In this case we can use mov.s or mov.d. */
9500 return NO_REGS;
9501 }
9502 else
9503 {
9504 /* Otherwise, we need to reload through an integer register. */
9505 return gr_regs;
9506 }
9507 }
9508
9509 /* In mips16 mode, going between memory and anything but M16_REGS
9510 requires an M16_REG. */
9511 if (TARGET_MIPS16)
9512 {
9513 if (class != M16_REGS && class != M16_NA_REGS)
9514 {
9515 if (gp_reg_p)
9516 return NO_REGS;
9517 return M16_REGS;
9518 }
9519 if (! gp_reg_p)
9520 {
9521 if (class == M16_REGS || class == M16_NA_REGS)
9522 return NO_REGS;
9523 return M16_REGS;
9524 }
9525 }
9526
9527 return NO_REGS;
9528 }
9529
9530 /* Implement CLASS_MAX_NREGS.
9531
9532 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
9533
9534 - ST_REGS are always hold CCmode values, and CCmode values are
9535 considered to be 4 bytes wide.
9536
9537 All other register classes are covered by UNITS_PER_WORD. Note that
9538 this is true even for unions of integer and float registers when the
9539 latter are smaller than the former. The only supported combination
9540 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
9541 words but 32-bit float registers. A word-based calculation is correct
9542 in that case since -msingle-float disallows multi-FPR values. */
9543
9544 int
9545 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
9546 enum machine_mode mode)
9547 {
9548 if (class == ST_REGS)
9549 return (GET_MODE_SIZE (mode) + 3) / 4;
9550 else if (class == FP_REGS)
9551 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9552 else
9553 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9554 }
9555
9556 static bool
9557 mips_valid_pointer_mode (enum machine_mode mode)
9558 {
9559 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9560 }
9561
9562 /* Target hook for vector_mode_supported_p. */
9563
9564 static bool
9565 mips_vector_mode_supported_p (enum machine_mode mode)
9566 {
9567 switch (mode)
9568 {
9569 case V2SFmode:
9570 return TARGET_PAIRED_SINGLE_FLOAT;
9571
9572 case V2HImode:
9573 case V4QImode:
9574 case V2HQmode:
9575 case V2UHQmode:
9576 case V2HAmode:
9577 case V2UHAmode:
9578 case V4QQmode:
9579 case V4UQQmode:
9580 return TARGET_DSP;
9581
9582 default:
9583 return false;
9584 }
9585 }
9586
9587 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9588
9589 static bool
9590 mips_scalar_mode_supported_p (enum machine_mode mode)
9591 {
9592 if (ALL_FIXED_POINT_MODE_P (mode)
9593 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9594 return true;
9595
9596 return default_scalar_mode_supported_p (mode);
9597 }
9598 \f
9599 /* If we can access small data directly (using gp-relative relocation
9600 operators) return the small data pointer, otherwise return null.
9601
9602 For each mips16 function which refers to GP relative symbols, we
9603 use a pseudo register, initialized at the start of the function, to
9604 hold the $gp value. */
9605
9606 static rtx
9607 mips16_gp_pseudo_reg (void)
9608 {
9609 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
9610 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
9611
9612 /* Don't initialize the pseudo register if we are being called from
9613 the tree optimizers' cost-calculation routines. */
9614 if (!cfun->machine->initialized_mips16_gp_pseudo_p
9615 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
9616 {
9617 rtx insn, scan;
9618
9619 /* We want to initialize this to a value which gcc will believe
9620 is constant. */
9621 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
9622
9623 push_topmost_sequence ();
9624 /* We need to emit the initialization after the FUNCTION_BEG
9625 note, so that it will be integrated. */
9626 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
9627 if (NOTE_P (scan)
9628 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
9629 break;
9630 if (scan == NULL_RTX)
9631 scan = get_insns ();
9632 insn = emit_insn_after (insn, scan);
9633 pop_topmost_sequence ();
9634
9635 cfun->machine->initialized_mips16_gp_pseudo_p = true;
9636 }
9637
9638 return cfun->machine->mips16_gp_pseudo_rtx;
9639 }
9640
9641 /* Write out code to move floating point arguments in or out of
9642 general registers. Output the instructions to FILE. FP_CODE is
9643 the code describing which arguments are present (see the comment at
9644 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
9645 we are copying from the floating point registers. */
9646
9647 static void
9648 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
9649 {
9650 const char *s;
9651 int gparg, fparg;
9652 unsigned int f;
9653 CUMULATIVE_ARGS cum;
9654
9655 /* This code only works for the original 32-bit ABI and the O64 ABI. */
9656 gcc_assert (TARGET_OLDABI);
9657
9658 if (from_fp_p)
9659 s = "mfc1";
9660 else
9661 s = "mtc1";
9662
9663 init_cumulative_args (&cum, NULL, NULL);
9664
9665 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9666 {
9667 enum machine_mode mode;
9668 struct mips_arg_info info;
9669
9670 if ((f & 3) == 1)
9671 mode = SFmode;
9672 else if ((f & 3) == 2)
9673 mode = DFmode;
9674 else
9675 gcc_unreachable ();
9676
9677 mips_arg_info (&cum, mode, NULL, true, &info);
9678 gparg = mips_arg_regno (&info, false);
9679 fparg = mips_arg_regno (&info, true);
9680
9681 if (mode == SFmode)
9682 fprintf (file, "\t%s\t%s,%s\n", s,
9683 reg_names[gparg], reg_names[fparg]);
9684 else if (TARGET_64BIT)
9685 fprintf (file, "\td%s\t%s,%s\n", s,
9686 reg_names[gparg], reg_names[fparg]);
9687 else if (ISA_HAS_MXHC1)
9688 /* -mips32r2 -mfp64 */
9689 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
9690 s,
9691 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
9692 reg_names[fparg],
9693 from_fp_p ? "mfhc1" : "mthc1",
9694 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
9695 reg_names[fparg]);
9696 else if (TARGET_BIG_ENDIAN)
9697 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9698 reg_names[gparg], reg_names[fparg + 1], s,
9699 reg_names[gparg + 1], reg_names[fparg]);
9700 else
9701 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9702 reg_names[gparg], reg_names[fparg], s,
9703 reg_names[gparg + 1], reg_names[fparg + 1]);
9704
9705 function_arg_advance (&cum, mode, NULL, true);
9706 }
9707 }
9708
9709 /* Build a mips16 function stub. This is used for functions which
9710 take arguments in the floating point registers. It is 32-bit code
9711 that moves the floating point args into the general registers, and
9712 then jumps to the 16-bit code. */
9713
9714 static void
9715 build_mips16_function_stub (FILE *file)
9716 {
9717 const char *fnname;
9718 char *secname, *stubname;
9719 tree stubid, stubdecl;
9720 int need_comma;
9721 unsigned int f;
9722
9723 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9724 fnname = targetm.strip_name_encoding (fnname);
9725 secname = (char *) alloca (strlen (fnname) + 20);
9726 sprintf (secname, ".mips16.fn.%s", fnname);
9727 stubname = (char *) alloca (strlen (fnname) + 20);
9728 sprintf (stubname, "__fn_stub_%s", fnname);
9729 stubid = get_identifier (stubname);
9730 stubdecl = build_decl (FUNCTION_DECL, stubid,
9731 build_function_type (void_type_node, NULL_TREE));
9732 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9733 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9734
9735 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9736 need_comma = 0;
9737 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9738 {
9739 fprintf (file, "%s%s",
9740 need_comma ? ", " : "",
9741 (f & 3) == 1 ? "float" : "double");
9742 need_comma = 1;
9743 }
9744 fprintf (file, ")\n");
9745
9746 fprintf (file, "\t.set\tnomips16\n");
9747 switch_to_section (function_section (stubdecl));
9748 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9749
9750 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9751 within a .ent, and we cannot emit another .ent. */
9752 if (!FUNCTION_NAME_ALREADY_DECLARED)
9753 {
9754 fputs ("\t.ent\t", file);
9755 assemble_name (file, stubname);
9756 fputs ("\n", file);
9757 }
9758
9759 assemble_name (file, stubname);
9760 fputs (":\n", file);
9761
9762 /* We don't want the assembler to insert any nops here. */
9763 fprintf (file, "\t.set\tnoreorder\n");
9764
9765 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9766
9767 fprintf (asm_out_file, "\t.set\tnoat\n");
9768 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9769 assemble_name (file, fnname);
9770 fprintf (file, "\n");
9771 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9772 fprintf (asm_out_file, "\t.set\tat\n");
9773
9774 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9775 with one of the mfc1 instructions, because the result is not
9776 available for one instruction, so if the very first instruction
9777 in the function refers to the register, it will see the wrong
9778 value. */
9779 fprintf (file, "\tnop\n");
9780
9781 fprintf (file, "\t.set\treorder\n");
9782
9783 if (!FUNCTION_NAME_ALREADY_DECLARED)
9784 {
9785 fputs ("\t.end\t", file);
9786 assemble_name (file, stubname);
9787 fputs ("\n", file);
9788 }
9789
9790 switch_to_section (function_section (current_function_decl));
9791 }
9792
9793 /* We keep a list of functions for which we have already built stubs
9794 in build_mips16_call_stub. */
9795
9796 struct mips16_stub
9797 {
9798 struct mips16_stub *next;
9799 char *name;
9800 int fpret;
9801 };
9802
9803 static struct mips16_stub *mips16_stubs;
9804
9805 /* Emit code to return a double value from a mips16 stub. GPREG is the
9806 first GP reg to use, FPREG is the first FP reg to use. */
9807
9808 static void
9809 mips16_fpret_double (int gpreg, int fpreg)
9810 {
9811 if (TARGET_64BIT)
9812 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9813 reg_names[gpreg], reg_names[fpreg]);
9814 else if (TARGET_FLOAT64)
9815 {
9816 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9817 reg_names[gpreg + WORDS_BIG_ENDIAN],
9818 reg_names[fpreg]);
9819 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9820 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9821 reg_names[fpreg]);
9822 }
9823 else
9824 {
9825 if (TARGET_BIG_ENDIAN)
9826 {
9827 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9828 reg_names[gpreg + 0],
9829 reg_names[fpreg + 1]);
9830 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9831 reg_names[gpreg + 1],
9832 reg_names[fpreg + 0]);
9833 }
9834 else
9835 {
9836 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9837 reg_names[gpreg + 0],
9838 reg_names[fpreg + 0]);
9839 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9840 reg_names[gpreg + 1],
9841 reg_names[fpreg + 1]);
9842 }
9843 }
9844 }
9845
9846 /* Build a call stub for a mips16 call. A stub is needed if we are
9847 passing any floating point values which should go into the floating
9848 point registers. If we are, and the call turns out to be to a
9849 32-bit function, the stub will be used to move the values into the
9850 floating point registers before calling the 32-bit function. The
9851 linker will magically adjust the function call to either the 16-bit
9852 function or the 32-bit stub, depending upon where the function call
9853 is actually defined.
9854
9855 Similarly, we need a stub if the return value might come back in a
9856 floating point register.
9857
9858 RETVAL is the location of the return value, or null if this is
9859 a call rather than a call_value. FN is the address of the
9860 function and ARG_SIZE is the size of the arguments. FP_CODE
9861 is the code built by function_arg. This function returns a nonzero
9862 value if it builds the call instruction itself. */
9863
9864 int
9865 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9866 {
9867 int fpret = 0;
9868 const char *fnname;
9869 char *secname, *stubname;
9870 struct mips16_stub *l;
9871 tree stubid, stubdecl;
9872 int need_comma;
9873 unsigned int f;
9874 rtx insn;
9875
9876 /* We don't need to do anything if we aren't in mips16 mode, or if
9877 we were invoked with the -msoft-float option. */
9878 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9879 return 0;
9880
9881 /* Figure out whether the value might come back in a floating point
9882 register. */
9883 if (retval)
9884 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9885
9886 /* We don't need to do anything if there were no floating point
9887 arguments and the value will not be returned in a floating point
9888 register. */
9889 if (fp_code == 0 && ! fpret)
9890 return 0;
9891
9892 /* We don't need to do anything if this is a call to a special
9893 mips16 support function. */
9894 if (GET_CODE (fn) == SYMBOL_REF
9895 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9896 return 0;
9897
9898 /* This code will only work for o32 and o64 abis. The other ABI's
9899 require more sophisticated support. */
9900 gcc_assert (TARGET_OLDABI);
9901
9902 /* If we're calling via a function pointer, then we must always call
9903 via a stub. There are magic stubs provided in libgcc.a for each
9904 of the required cases. Each of them expects the function address
9905 to arrive in register $2. */
9906
9907 if (GET_CODE (fn) != SYMBOL_REF)
9908 {
9909 char buf[30];
9910 tree id;
9911 rtx stub_fn, insn;
9912
9913 /* ??? If this code is modified to support other ABI's, we need
9914 to handle PARALLEL return values here. */
9915
9916 if (fpret)
9917 sprintf (buf, "__mips16_call_stub_%s_%d",
9918 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9919 fp_code);
9920 else
9921 sprintf (buf, "__mips16_call_stub_%d",
9922 fp_code);
9923
9924 id = get_identifier (buf);
9925 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9926
9927 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9928
9929 if (retval == NULL_RTX)
9930 insn = gen_call_internal (stub_fn, arg_size);
9931 else
9932 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9933 insn = emit_call_insn (insn);
9934
9935 /* Put the register usage information on the CALL. */
9936 CALL_INSN_FUNCTION_USAGE (insn) =
9937 gen_rtx_EXPR_LIST (VOIDmode,
9938 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9939 CALL_INSN_FUNCTION_USAGE (insn));
9940
9941 /* If we are handling a floating point return value, we need to
9942 save $18 in the function prologue. Putting a note on the
9943 call will mean that df_regs_ever_live_p ($18) will be true if the
9944 call is not eliminated, and we can check that in the prologue
9945 code. */
9946 if (fpret)
9947 CALL_INSN_FUNCTION_USAGE (insn) =
9948 gen_rtx_EXPR_LIST (VOIDmode,
9949 gen_rtx_USE (VOIDmode,
9950 gen_rtx_REG (word_mode, 18)),
9951 CALL_INSN_FUNCTION_USAGE (insn));
9952
9953 /* Return 1 to tell the caller that we've generated the call
9954 insn. */
9955 return 1;
9956 }
9957
9958 /* We know the function we are going to call. If we have already
9959 built a stub, we don't need to do anything further. */
9960
9961 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
9962 for (l = mips16_stubs; l != NULL; l = l->next)
9963 if (strcmp (l->name, fnname) == 0)
9964 break;
9965
9966 if (l == NULL)
9967 {
9968 /* Build a special purpose stub. When the linker sees a
9969 function call in mips16 code, it will check where the target
9970 is defined. If the target is a 32-bit call, the linker will
9971 search for the section defined here. It can tell which
9972 symbol this section is associated with by looking at the
9973 relocation information (the name is unreliable, since this
9974 might be a static function). If such a section is found, the
9975 linker will redirect the call to the start of the magic
9976 section.
9977
9978 If the function does not return a floating point value, the
9979 special stub section is named
9980 .mips16.call.FNNAME
9981
9982 If the function does return a floating point value, the stub
9983 section is named
9984 .mips16.call.fp.FNNAME
9985 */
9986
9987 secname = (char *) alloca (strlen (fnname) + 40);
9988 sprintf (secname, ".mips16.call.%s%s",
9989 fpret ? "fp." : "",
9990 fnname);
9991 stubname = (char *) alloca (strlen (fnname) + 20);
9992 sprintf (stubname, "__call_stub_%s%s",
9993 fpret ? "fp_" : "",
9994 fnname);
9995 stubid = get_identifier (stubname);
9996 stubdecl = build_decl (FUNCTION_DECL, stubid,
9997 build_function_type (void_type_node, NULL_TREE));
9998 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9999 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
10000
10001 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
10002 (fpret
10003 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
10004 : ""),
10005 fnname);
10006 need_comma = 0;
10007 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
10008 {
10009 fprintf (asm_out_file, "%s%s",
10010 need_comma ? ", " : "",
10011 (f & 3) == 1 ? "float" : "double");
10012 need_comma = 1;
10013 }
10014 fprintf (asm_out_file, ")\n");
10015
10016 fprintf (asm_out_file, "\t.set\tnomips16\n");
10017 assemble_start_function (stubdecl, stubname);
10018
10019 if (!FUNCTION_NAME_ALREADY_DECLARED)
10020 {
10021 fputs ("\t.ent\t", asm_out_file);
10022 assemble_name (asm_out_file, stubname);
10023 fputs ("\n", asm_out_file);
10024
10025 assemble_name (asm_out_file, stubname);
10026 fputs (":\n", asm_out_file);
10027 }
10028
10029 /* We build the stub code by hand. That's the only way we can
10030 do it, since we can't generate 32-bit code during a 16-bit
10031 compilation. */
10032
10033 /* We don't want the assembler to insert any nops here. */
10034 fprintf (asm_out_file, "\t.set\tnoreorder\n");
10035
10036 mips16_fp_args (asm_out_file, fp_code, 0);
10037
10038 if (! fpret)
10039 {
10040 fprintf (asm_out_file, "\t.set\tnoat\n");
10041 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
10042 fnname);
10043 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
10044 fprintf (asm_out_file, "\t.set\tat\n");
10045 /* Unfortunately, we can't fill the jump delay slot. We
10046 can't fill with one of the mtc1 instructions, because the
10047 result is not available for one instruction, so if the
10048 very first instruction in the function refers to the
10049 register, it will see the wrong value. */
10050 fprintf (asm_out_file, "\tnop\n");
10051 }
10052 else
10053 {
10054 fprintf (asm_out_file, "\tmove\t%s,%s\n",
10055 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
10056 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
10057 /* As above, we can't fill the delay slot. */
10058 fprintf (asm_out_file, "\tnop\n");
10059 switch (GET_MODE (retval))
10060 {
10061 case SCmode:
10062 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10063 reg_names[GP_REG_FIRST + 3],
10064 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
10065 /* Fall though. */
10066 case SFmode:
10067 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10068 reg_names[GP_REG_FIRST + 2],
10069 reg_names[FP_REG_FIRST + 0]);
10070 break;
10071
10072 case DCmode:
10073 mips16_fpret_double (GP_REG_FIRST + 2 + (8 / UNITS_PER_WORD),
10074 FP_REG_FIRST + MAX_FPRS_PER_FMT);
10075 /* Fall though. */
10076 case DFmode:
10077 case V2SFmode:
10078 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
10079 break;
10080
10081 default:
10082 gcc_unreachable ();
10083 }
10084 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
10085 /* As above, we can't fill the delay slot. */
10086 fprintf (asm_out_file, "\tnop\n");
10087 }
10088
10089 fprintf (asm_out_file, "\t.set\treorder\n");
10090
10091 #ifdef ASM_DECLARE_FUNCTION_SIZE
10092 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
10093 #endif
10094
10095 if (!FUNCTION_NAME_ALREADY_DECLARED)
10096 {
10097 fputs ("\t.end\t", asm_out_file);
10098 assemble_name (asm_out_file, stubname);
10099 fputs ("\n", asm_out_file);
10100 }
10101
10102 /* Record this stub. */
10103 l = (struct mips16_stub *) xmalloc (sizeof *l);
10104 l->name = xstrdup (fnname);
10105 l->fpret = fpret;
10106 l->next = mips16_stubs;
10107 mips16_stubs = l;
10108 }
10109
10110 /* If we expect a floating point return value, but we've built a
10111 stub which does not expect one, then we're in trouble. We can't
10112 use the existing stub, because it won't handle the floating point
10113 value. We can't build a new stub, because the linker won't know
10114 which stub to use for the various calls in this object file.
10115 Fortunately, this case is illegal, since it means that a function
10116 was declared in two different ways in a single compilation. */
10117 if (fpret && ! l->fpret)
10118 error ("cannot handle inconsistent calls to %qs", fnname);
10119
10120 if (retval == NULL_RTX)
10121 insn = gen_call_internal_direct (fn, arg_size);
10122 else
10123 insn = gen_call_value_internal_direct (retval, fn, arg_size);
10124 insn = emit_call_insn (insn);
10125
10126 /* If we are calling a stub which handles a floating point return
10127 value, we need to arrange to save $18 in the prologue. We do
10128 this by marking the function call as using the register. The
10129 prologue will later see that it is used, and emit code to save
10130 it. */
10131 if (l->fpret)
10132 CALL_INSN_FUNCTION_USAGE (insn) =
10133 gen_rtx_EXPR_LIST (VOIDmode,
10134 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
10135 CALL_INSN_FUNCTION_USAGE (insn));
10136
10137 /* Return 1 to tell the caller that we've generated the call
10138 insn. */
10139 return 1;
10140 }
10141
10142 /* An entry in the mips16 constant pool. VALUE is the pool constant,
10143 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10144
10145 struct mips16_constant {
10146 struct mips16_constant *next;
10147 rtx value;
10148 rtx label;
10149 enum machine_mode mode;
10150 };
10151
10152 /* Information about an incomplete mips16 constant pool. FIRST is the
10153 first constant, HIGHEST_ADDRESS is the highest address that the first
10154 byte of the pool can have, and INSN_ADDRESS is the current instruction
10155 address. */
10156
10157 struct mips16_constant_pool {
10158 struct mips16_constant *first;
10159 int highest_address;
10160 int insn_address;
10161 };
10162
10163 /* Add constant VALUE to POOL and return its label. MODE is the
10164 value's mode (used for CONST_INTs, etc.). */
10165
10166 static rtx
10167 add_constant (struct mips16_constant_pool *pool,
10168 rtx value, enum machine_mode mode)
10169 {
10170 struct mips16_constant **p, *c;
10171 bool first_of_size_p;
10172
10173 /* See whether the constant is already in the pool. If so, return the
10174 existing label, otherwise leave P pointing to the place where the
10175 constant should be added.
10176
10177 Keep the pool sorted in increasing order of mode size so that we can
10178 reduce the number of alignments needed. */
10179 first_of_size_p = true;
10180 for (p = &pool->first; *p != 0; p = &(*p)->next)
10181 {
10182 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
10183 return (*p)->label;
10184 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10185 break;
10186 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10187 first_of_size_p = false;
10188 }
10189
10190 /* In the worst case, the constant needed by the earliest instruction
10191 will end up at the end of the pool. The entire pool must then be
10192 accessible from that instruction.
10193
10194 When adding the first constant, set the pool's highest address to
10195 the address of the first out-of-range byte. Adjust this address
10196 downwards each time a new constant is added. */
10197 if (pool->first == 0)
10198 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10199 is the address of the instruction with the lowest two bits clear.
10200 The base PC value for ld has the lowest three bits clear. Assume
10201 the worst case here. */
10202 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10203 pool->highest_address -= GET_MODE_SIZE (mode);
10204 if (first_of_size_p)
10205 /* Take into account the worst possible padding due to alignment. */
10206 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10207
10208 /* Create a new entry. */
10209 c = (struct mips16_constant *) xmalloc (sizeof *c);
10210 c->value = value;
10211 c->mode = mode;
10212 c->label = gen_label_rtx ();
10213 c->next = *p;
10214 *p = c;
10215
10216 return c->label;
10217 }
10218
10219 /* Output constant VALUE after instruction INSN and return the last
10220 instruction emitted. MODE is the mode of the constant. */
10221
10222 static rtx
10223 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10224 {
10225 switch (GET_MODE_CLASS (mode))
10226 {
10227 case MODE_INT:
10228 {
10229 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10230 return emit_insn_after (gen_consttable_int (value, size), insn);
10231 }
10232
10233 case MODE_FLOAT:
10234 return emit_insn_after (gen_consttable_float (value), insn);
10235
10236 case MODE_VECTOR_FLOAT:
10237 case MODE_VECTOR_INT:
10238 {
10239 int i;
10240 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10241 insn = dump_constants_1 (GET_MODE_INNER (mode),
10242 CONST_VECTOR_ELT (value, i), insn);
10243 return insn;
10244 }
10245
10246 default:
10247 gcc_unreachable ();
10248 }
10249 }
10250
10251
10252 /* Dump out the constants in CONSTANTS after INSN. */
10253
10254 static void
10255 dump_constants (struct mips16_constant *constants, rtx insn)
10256 {
10257 struct mips16_constant *c, *next;
10258 int align;
10259
10260 align = 0;
10261 for (c = constants; c != NULL; c = next)
10262 {
10263 /* If necessary, increase the alignment of PC. */
10264 if (align < GET_MODE_SIZE (c->mode))
10265 {
10266 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10267 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10268 }
10269 align = GET_MODE_SIZE (c->mode);
10270
10271 insn = emit_label_after (c->label, insn);
10272 insn = dump_constants_1 (c->mode, c->value, insn);
10273
10274 next = c->next;
10275 free (c);
10276 }
10277
10278 emit_barrier_after (insn);
10279 }
10280
10281 /* Return the length of instruction INSN. */
10282
10283 static int
10284 mips16_insn_length (rtx insn)
10285 {
10286 if (JUMP_P (insn))
10287 {
10288 rtx body = PATTERN (insn);
10289 if (GET_CODE (body) == ADDR_VEC)
10290 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
10291 if (GET_CODE (body) == ADDR_DIFF_VEC)
10292 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
10293 }
10294 return get_attr_length (insn);
10295 }
10296
10297 /* If *X is a symbolic constant that refers to the constant pool, add
10298 the constant to POOL and rewrite *X to use the constant's label. */
10299
10300 static void
10301 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
10302 {
10303 rtx base, offset, label;
10304
10305 split_const (*x, &base, &offset);
10306 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
10307 {
10308 label = add_constant (pool, get_pool_constant (base),
10309 get_pool_mode (base));
10310 base = gen_rtx_LABEL_REF (Pmode, label);
10311 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
10312 }
10313 }
10314
10315 /* This structure is used to communicate with mips16_rewrite_pool_refs.
10316 INSN is the instruction we're rewriting and POOL points to the current
10317 constant pool. */
10318 struct mips16_rewrite_pool_refs_info {
10319 rtx insn;
10320 struct mips16_constant_pool *pool;
10321 };
10322
10323 /* Rewrite *X so that constant pool references refer to the constant's
10324 label instead. DATA points to a mips16_rewrite_pool_refs_info
10325 structure. */
10326
10327 static int
10328 mips16_rewrite_pool_refs (rtx *x, void *data)
10329 {
10330 struct mips16_rewrite_pool_refs_info *info = data;
10331
10332 if (force_to_mem_operand (*x, Pmode))
10333 {
10334 rtx mem = force_const_mem (GET_MODE (*x), *x);
10335 validate_change (info->insn, x, mem, false);
10336 }
10337
10338 if (MEM_P (*x))
10339 {
10340 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
10341 return -1;
10342 }
10343
10344 if (TARGET_MIPS16_TEXT_LOADS)
10345 mips16_rewrite_pool_constant (info->pool, x);
10346
10347 return GET_CODE (*x) == CONST ? -1 : 0;
10348 }
10349
10350 /* Build MIPS16 constant pools. */
10351
10352 static void
10353 mips16_lay_out_constants (void)
10354 {
10355 struct mips16_constant_pool pool;
10356 struct mips16_rewrite_pool_refs_info info;
10357 rtx insn, barrier;
10358
10359 if (!TARGET_MIPS16_PCREL_LOADS)
10360 return;
10361
10362 barrier = 0;
10363 memset (&pool, 0, sizeof (pool));
10364 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10365 {
10366 /* Rewrite constant pool references in INSN. */
10367 if (INSN_P (insn))
10368 {
10369 info.insn = insn;
10370 info.pool = &pool;
10371 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
10372 }
10373
10374 pool.insn_address += mips16_insn_length (insn);
10375
10376 if (pool.first != NULL)
10377 {
10378 /* If there are no natural barriers between the first user of
10379 the pool and the highest acceptable address, we'll need to
10380 create a new instruction to jump around the constant pool.
10381 In the worst case, this instruction will be 4 bytes long.
10382
10383 If it's too late to do this transformation after INSN,
10384 do it immediately before INSN. */
10385 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
10386 {
10387 rtx label, jump;
10388
10389 label = gen_label_rtx ();
10390
10391 jump = emit_jump_insn_before (gen_jump (label), insn);
10392 JUMP_LABEL (jump) = label;
10393 LABEL_NUSES (label) = 1;
10394 barrier = emit_barrier_after (jump);
10395
10396 emit_label_after (label, barrier);
10397 pool.insn_address += 4;
10398 }
10399
10400 /* See whether the constant pool is now out of range of the first
10401 user. If so, output the constants after the previous barrier.
10402 Note that any instructions between BARRIER and INSN (inclusive)
10403 will use negative offsets to refer to the pool. */
10404 if (pool.insn_address > pool.highest_address)
10405 {
10406 dump_constants (pool.first, barrier);
10407 pool.first = NULL;
10408 barrier = 0;
10409 }
10410 else if (BARRIER_P (insn))
10411 barrier = insn;
10412 }
10413 }
10414 dump_constants (pool.first, get_last_insn ());
10415 }
10416 \f
10417 /* A temporary variable used by for_each_rtx callbacks, etc. */
10418 static rtx mips_sim_insn;
10419
10420 /* A structure representing the state of the processor pipeline.
10421 Used by the mips_sim_* family of functions. */
10422 struct mips_sim {
10423 /* The maximum number of instructions that can be issued in a cycle.
10424 (Caches mips_issue_rate.) */
10425 unsigned int issue_rate;
10426
10427 /* The current simulation time. */
10428 unsigned int time;
10429
10430 /* How many more instructions can be issued in the current cycle. */
10431 unsigned int insns_left;
10432
10433 /* LAST_SET[X].INSN is the last instruction to set register X.
10434 LAST_SET[X].TIME is the time at which that instruction was issued.
10435 INSN is null if no instruction has yet set register X. */
10436 struct {
10437 rtx insn;
10438 unsigned int time;
10439 } last_set[FIRST_PSEUDO_REGISTER];
10440
10441 /* The pipeline's current DFA state. */
10442 state_t dfa_state;
10443 };
10444
10445 /* Reset STATE to the initial simulation state. */
10446
10447 static void
10448 mips_sim_reset (struct mips_sim *state)
10449 {
10450 state->time = 0;
10451 state->insns_left = state->issue_rate;
10452 memset (&state->last_set, 0, sizeof (state->last_set));
10453 state_reset (state->dfa_state);
10454 }
10455
10456 /* Initialize STATE before its first use. DFA_STATE points to an
10457 allocated but uninitialized DFA state. */
10458
10459 static void
10460 mips_sim_init (struct mips_sim *state, state_t dfa_state)
10461 {
10462 state->issue_rate = mips_issue_rate ();
10463 state->dfa_state = dfa_state;
10464 mips_sim_reset (state);
10465 }
10466
10467 /* Advance STATE by one clock cycle. */
10468
10469 static void
10470 mips_sim_next_cycle (struct mips_sim *state)
10471 {
10472 state->time++;
10473 state->insns_left = state->issue_rate;
10474 state_transition (state->dfa_state, 0);
10475 }
10476
10477 /* Advance simulation state STATE until instruction INSN can read
10478 register REG. */
10479
10480 static void
10481 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
10482 {
10483 unsigned int i;
10484
10485 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
10486 if (state->last_set[REGNO (reg) + i].insn != 0)
10487 {
10488 unsigned int t;
10489
10490 t = state->last_set[REGNO (reg) + i].time;
10491 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
10492 while (state->time < t)
10493 mips_sim_next_cycle (state);
10494 }
10495 }
10496
10497 /* A for_each_rtx callback. If *X is a register, advance simulation state
10498 DATA until mips_sim_insn can read the register's value. */
10499
10500 static int
10501 mips_sim_wait_regs_2 (rtx *x, void *data)
10502 {
10503 if (REG_P (*x))
10504 mips_sim_wait_reg (data, mips_sim_insn, *x);
10505 return 0;
10506 }
10507
10508 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
10509
10510 static void
10511 mips_sim_wait_regs_1 (rtx *x, void *data)
10512 {
10513 for_each_rtx (x, mips_sim_wait_regs_2, data);
10514 }
10515
10516 /* Advance simulation state STATE until all of INSN's register
10517 dependencies are satisfied. */
10518
10519 static void
10520 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
10521 {
10522 mips_sim_insn = insn;
10523 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
10524 }
10525
10526 /* Advance simulation state STATE until the units required by
10527 instruction INSN are available. */
10528
10529 static void
10530 mips_sim_wait_units (struct mips_sim *state, rtx insn)
10531 {
10532 state_t tmp_state;
10533
10534 tmp_state = alloca (state_size ());
10535 while (state->insns_left == 0
10536 || (memcpy (tmp_state, state->dfa_state, state_size ()),
10537 state_transition (tmp_state, insn) >= 0))
10538 mips_sim_next_cycle (state);
10539 }
10540
10541 /* Advance simulation state STATE until INSN is ready to issue. */
10542
10543 static void
10544 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
10545 {
10546 mips_sim_wait_regs (state, insn);
10547 mips_sim_wait_units (state, insn);
10548 }
10549
10550 /* mips_sim_insn has just set X. Update the LAST_SET array
10551 in simulation state DATA. */
10552
10553 static void
10554 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10555 {
10556 struct mips_sim *state;
10557 unsigned int i;
10558
10559 state = data;
10560 if (REG_P (x))
10561 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
10562 {
10563 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
10564 state->last_set[REGNO (x) + i].time = state->time;
10565 }
10566 }
10567
10568 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
10569 can issue immediately (i.e., that mips_sim_wait_insn has already
10570 been called). */
10571
10572 static void
10573 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
10574 {
10575 state_transition (state->dfa_state, insn);
10576 state->insns_left--;
10577
10578 mips_sim_insn = insn;
10579 note_stores (PATTERN (insn), mips_sim_record_set, state);
10580 }
10581
10582 /* Simulate issuing a NOP in state STATE. */
10583
10584 static void
10585 mips_sim_issue_nop (struct mips_sim *state)
10586 {
10587 if (state->insns_left == 0)
10588 mips_sim_next_cycle (state);
10589 state->insns_left--;
10590 }
10591
10592 /* Update simulation state STATE so that it's ready to accept the instruction
10593 after INSN. INSN should be part of the main rtl chain, not a member of a
10594 SEQUENCE. */
10595
10596 static void
10597 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
10598 {
10599 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
10600 if (JUMP_P (insn))
10601 mips_sim_issue_nop (state);
10602
10603 switch (GET_CODE (SEQ_BEGIN (insn)))
10604 {
10605 case CODE_LABEL:
10606 case CALL_INSN:
10607 /* We can't predict the processor state after a call or label. */
10608 mips_sim_reset (state);
10609 break;
10610
10611 case JUMP_INSN:
10612 /* The delay slots of branch likely instructions are only executed
10613 when the branch is taken. Therefore, if the caller has simulated
10614 the delay slot instruction, STATE does not really reflect the state
10615 of the pipeline for the instruction after the delay slot. Also,
10616 branch likely instructions tend to incur a penalty when not taken,
10617 so there will probably be an extra delay between the branch and
10618 the instruction after the delay slot. */
10619 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
10620 mips_sim_reset (state);
10621 break;
10622
10623 default:
10624 break;
10625 }
10626 }
10627 \f
10628 /* The VR4130 pipeline issues aligned pairs of instructions together,
10629 but it stalls the second instruction if it depends on the first.
10630 In order to cut down the amount of logic required, this dependence
10631 check is not based on a full instruction decode. Instead, any non-SPECIAL
10632 instruction is assumed to modify the register specified by bits 20-16
10633 (which is usually the "rt" field).
10634
10635 In beq, beql, bne and bnel instructions, the rt field is actually an
10636 input, so we can end up with a false dependence between the branch
10637 and its delay slot. If this situation occurs in instruction INSN,
10638 try to avoid it by swapping rs and rt. */
10639
10640 static void
10641 vr4130_avoid_branch_rt_conflict (rtx insn)
10642 {
10643 rtx first, second;
10644
10645 first = SEQ_BEGIN (insn);
10646 second = SEQ_END (insn);
10647 if (JUMP_P (first)
10648 && NONJUMP_INSN_P (second)
10649 && GET_CODE (PATTERN (first)) == SET
10650 && GET_CODE (SET_DEST (PATTERN (first))) == PC
10651 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
10652 {
10653 /* Check for the right kind of condition. */
10654 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
10655 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
10656 && REG_P (XEXP (cond, 0))
10657 && REG_P (XEXP (cond, 1))
10658 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
10659 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
10660 {
10661 /* SECOND mentions the rt register but not the rs register. */
10662 rtx tmp = XEXP (cond, 0);
10663 XEXP (cond, 0) = XEXP (cond, 1);
10664 XEXP (cond, 1) = tmp;
10665 }
10666 }
10667 }
10668
10669 /* Implement -mvr4130-align. Go through each basic block and simulate the
10670 processor pipeline. If we find that a pair of instructions could execute
10671 in parallel, and the first of those instruction is not 8-byte aligned,
10672 insert a nop to make it aligned. */
10673
10674 static void
10675 vr4130_align_insns (void)
10676 {
10677 struct mips_sim state;
10678 rtx insn, subinsn, last, last2, next;
10679 bool aligned_p;
10680
10681 dfa_start ();
10682
10683 /* LAST is the last instruction before INSN to have a nonzero length.
10684 LAST2 is the last such instruction before LAST. */
10685 last = 0;
10686 last2 = 0;
10687
10688 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
10689 aligned_p = true;
10690
10691 mips_sim_init (&state, alloca (state_size ()));
10692 for (insn = get_insns (); insn != 0; insn = next)
10693 {
10694 unsigned int length;
10695
10696 next = NEXT_INSN (insn);
10697
10698 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
10699 This isn't really related to the alignment pass, but we do it on
10700 the fly to avoid a separate instruction walk. */
10701 vr4130_avoid_branch_rt_conflict (insn);
10702
10703 if (USEFUL_INSN_P (insn))
10704 FOR_EACH_SUBINSN (subinsn, insn)
10705 {
10706 mips_sim_wait_insn (&state, subinsn);
10707
10708 /* If we want this instruction to issue in parallel with the
10709 previous one, make sure that the previous instruction is
10710 aligned. There are several reasons why this isn't worthwhile
10711 when the second instruction is a call:
10712
10713 - Calls are less likely to be performance critical,
10714 - There's a good chance that the delay slot can execute
10715 in parallel with the call.
10716 - The return address would then be unaligned.
10717
10718 In general, if we're going to insert a nop between instructions
10719 X and Y, it's better to insert it immediately after X. That
10720 way, if the nop makes Y aligned, it will also align any labels
10721 between X and Y. */
10722 if (state.insns_left != state.issue_rate
10723 && !CALL_P (subinsn))
10724 {
10725 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10726 {
10727 /* SUBINSN is the first instruction in INSN and INSN is
10728 aligned. We want to align the previous instruction
10729 instead, so insert a nop between LAST2 and LAST.
10730
10731 Note that LAST could be either a single instruction
10732 or a branch with a delay slot. In the latter case,
10733 LAST, like INSN, is already aligned, but the delay
10734 slot must have some extra delay that stops it from
10735 issuing at the same time as the branch. We therefore
10736 insert a nop before the branch in order to align its
10737 delay slot. */
10738 emit_insn_after (gen_nop (), last2);
10739 aligned_p = false;
10740 }
10741 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10742 {
10743 /* SUBINSN is the delay slot of INSN, but INSN is
10744 currently unaligned. Insert a nop between
10745 LAST and INSN to align it. */
10746 emit_insn_after (gen_nop (), last);
10747 aligned_p = true;
10748 }
10749 }
10750 mips_sim_issue_insn (&state, subinsn);
10751 }
10752 mips_sim_finish_insn (&state, insn);
10753
10754 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10755 length = get_attr_length (insn);
10756 if (length > 0)
10757 {
10758 /* If the instruction is an asm statement or multi-instruction
10759 mips.md patern, the length is only an estimate. Insert an
10760 8 byte alignment after it so that the following instructions
10761 can be handled correctly. */
10762 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10763 && (recog_memoized (insn) < 0 || length >= 8))
10764 {
10765 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10766 next = NEXT_INSN (next);
10767 mips_sim_next_cycle (&state);
10768 aligned_p = true;
10769 }
10770 else if (length & 4)
10771 aligned_p = !aligned_p;
10772 last2 = last;
10773 last = insn;
10774 }
10775
10776 /* See whether INSN is an aligned label. */
10777 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10778 aligned_p = true;
10779 }
10780 dfa_finish ();
10781 }
10782 \f
10783 /* Subroutine of mips_reorg. If there is a hazard between INSN
10784 and a previous instruction, avoid it by inserting nops after
10785 instruction AFTER.
10786
10787 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10788 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10789 before using the value of that register. *HILO_DELAY counts the
10790 number of instructions since the last hilo hazard (that is,
10791 the number of instructions since the last mflo or mfhi).
10792
10793 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10794 for the next instruction.
10795
10796 LO_REG is an rtx for the LO register, used in dependence checking. */
10797
10798 static void
10799 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10800 rtx *delayed_reg, rtx lo_reg)
10801 {
10802 rtx pattern, set;
10803 int nops, ninsns;
10804
10805 if (!INSN_P (insn))
10806 return;
10807
10808 pattern = PATTERN (insn);
10809
10810 /* Do not put the whole function in .set noreorder if it contains
10811 an asm statement. We don't know whether there will be hazards
10812 between the asm statement and the gcc-generated code. */
10813 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10814 cfun->machine->all_noreorder_p = false;
10815
10816 /* Ignore zero-length instructions (barriers and the like). */
10817 ninsns = get_attr_length (insn) / 4;
10818 if (ninsns == 0)
10819 return;
10820
10821 /* Work out how many nops are needed. Note that we only care about
10822 registers that are explicitly mentioned in the instruction's pattern.
10823 It doesn't matter that calls use the argument registers or that they
10824 clobber hi and lo. */
10825 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10826 nops = 2 - *hilo_delay;
10827 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10828 nops = 1;
10829 else
10830 nops = 0;
10831
10832 /* Insert the nops between this instruction and the previous one.
10833 Each new nop takes us further from the last hilo hazard. */
10834 *hilo_delay += nops;
10835 while (nops-- > 0)
10836 emit_insn_after (gen_hazard_nop (), after);
10837
10838 /* Set up the state for the next instruction. */
10839 *hilo_delay += ninsns;
10840 *delayed_reg = 0;
10841 if (INSN_CODE (insn) >= 0)
10842 switch (get_attr_hazard (insn))
10843 {
10844 case HAZARD_NONE:
10845 break;
10846
10847 case HAZARD_HILO:
10848 *hilo_delay = 0;
10849 break;
10850
10851 case HAZARD_DELAY:
10852 set = single_set (insn);
10853 gcc_assert (set != 0);
10854 *delayed_reg = SET_DEST (set);
10855 break;
10856 }
10857 }
10858
10859
10860 /* Go through the instruction stream and insert nops where necessary.
10861 See if the whole function can then be put into .set noreorder &
10862 .set nomacro. */
10863
10864 static void
10865 mips_avoid_hazards (void)
10866 {
10867 rtx insn, last_insn, lo_reg, delayed_reg;
10868 int hilo_delay, i;
10869
10870 /* Force all instructions to be split into their final form. */
10871 split_all_insns_noflow ();
10872
10873 /* Recalculate instruction lengths without taking nops into account. */
10874 cfun->machine->ignore_hazard_length_p = true;
10875 shorten_branches (get_insns ());
10876
10877 cfun->machine->all_noreorder_p = true;
10878
10879 /* Profiled functions can't be all noreorder because the profiler
10880 support uses assembler macros. */
10881 if (current_function_profile)
10882 cfun->machine->all_noreorder_p = false;
10883
10884 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10885 we rely on the assembler to work around some errata. */
10886 if (TARGET_FIX_VR4120)
10887 cfun->machine->all_noreorder_p = false;
10888
10889 /* The same is true for -mfix-vr4130 if we might generate mflo or
10890 mfhi instructions. Note that we avoid using mflo and mfhi if
10891 the VR4130 macc and dmacc instructions are available instead;
10892 see the *mfhilo_{si,di}_macc patterns. */
10893 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10894 cfun->machine->all_noreorder_p = false;
10895
10896 last_insn = 0;
10897 hilo_delay = 2;
10898 delayed_reg = 0;
10899 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10900
10901 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10902 if (INSN_P (insn))
10903 {
10904 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10905 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10906 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10907 &hilo_delay, &delayed_reg, lo_reg);
10908 else
10909 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10910 &delayed_reg, lo_reg);
10911
10912 last_insn = insn;
10913 }
10914 }
10915
10916
10917 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10918
10919 static void
10920 mips_reorg (void)
10921 {
10922 mips16_lay_out_constants ();
10923 if (TARGET_EXPLICIT_RELOCS)
10924 {
10925 if (mips_flag_delayed_branch)
10926 dbr_schedule (get_insns ());
10927 mips_avoid_hazards ();
10928 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10929 vr4130_align_insns ();
10930 }
10931 }
10932
10933 /* This function does three things:
10934
10935 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10936 - Register the mips16 hardware floating point stubs.
10937 - Register the gofast functions if selected using --enable-gofast. */
10938
10939 #include "config/gofast.h"
10940
10941 static void
10942 mips_init_libfuncs (void)
10943 {
10944 if (TARGET_FIX_VR4120)
10945 {
10946 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10947 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10948 }
10949
10950 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10951 {
10952 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10953 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10954 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10955 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10956
10957 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10958 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10959 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10960 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10961 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10962 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10963 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10964
10965 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10966 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10967 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10968
10969 if (TARGET_DOUBLE_FLOAT)
10970 {
10971 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10972 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10973 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10974 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10975
10976 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10977 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10978 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10979 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10980 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10981 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10982 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10983
10984 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10985 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10986
10987 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10988 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10989 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10990 }
10991 }
10992 else
10993 gofast_maybe_init_libfuncs ();
10994 }
10995
10996 /* Return a number assessing the cost of moving a register in class
10997 FROM to class TO. The classes are expressed using the enumeration
10998 values such as `GENERAL_REGS'. A value of 2 is the default; other
10999 values are interpreted relative to that.
11000
11001 It is not required that the cost always equal 2 when FROM is the
11002 same as TO; on some machines it is expensive to move between
11003 registers if they are not general registers.
11004
11005 If reload sees an insn consisting of a single `set' between two
11006 hard registers, and if `REGISTER_MOVE_COST' applied to their
11007 classes returns a value of 2, reload does not check to ensure that
11008 the constraints of the insn are met. Setting a cost of other than
11009 2 will allow reload to verify that the constraints are met. You
11010 should do this if the `movM' pattern's constraints do not allow
11011 such copying.
11012
11013 ??? We make the cost of moving from HI/LO into general
11014 registers the same as for one of moving general registers to
11015 HI/LO for TARGET_MIPS16 in order to prevent allocating a
11016 pseudo to HI/LO. This might hurt optimizations though, it
11017 isn't clear if it is wise. And it might not work in all cases. We
11018 could solve the DImode LO reg problem by using a multiply, just
11019 like reload_{in,out}si. We could solve the SImode/HImode HI reg
11020 problem by using divide instructions. divu puts the remainder in
11021 the HI reg, so doing a divide by -1 will move the value in the HI
11022 reg for all values except -1. We could handle that case by using a
11023 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
11024 a compare/branch to test the input value to see which instruction
11025 we need to use. This gets pretty messy, but it is feasible. */
11026
11027 int
11028 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
11029 enum reg_class to, enum reg_class from)
11030 {
11031 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
11032 return 2;
11033 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
11034 return 2;
11035 else if (reg_class_subset_p (from, GENERAL_REGS))
11036 {
11037 if (to == M16_REGS)
11038 return 2;
11039 else if (to == M16_NA_REGS)
11040 return 2;
11041 else if (reg_class_subset_p (to, GENERAL_REGS))
11042 {
11043 if (TARGET_MIPS16)
11044 return 4;
11045 else
11046 return 2;
11047 }
11048 else if (to == FP_REGS)
11049 return 4;
11050 else if (reg_class_subset_p (to, ACC_REGS))
11051 {
11052 if (TARGET_MIPS16)
11053 return 12;
11054 else
11055 return 6;
11056 }
11057 else if (reg_class_subset_p (to, ALL_COP_REGS))
11058 {
11059 return 5;
11060 }
11061 }
11062 else if (from == FP_REGS)
11063 {
11064 if (reg_class_subset_p (to, GENERAL_REGS))
11065 return 4;
11066 else if (to == FP_REGS)
11067 return 2;
11068 else if (to == ST_REGS)
11069 return 8;
11070 }
11071 else if (reg_class_subset_p (from, ACC_REGS))
11072 {
11073 if (reg_class_subset_p (to, GENERAL_REGS))
11074 {
11075 if (TARGET_MIPS16)
11076 return 12;
11077 else
11078 return 6;
11079 }
11080 }
11081 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
11082 return 4;
11083 else if (reg_class_subset_p (from, ALL_COP_REGS))
11084 {
11085 return 5;
11086 }
11087
11088 /* Fall through.
11089 ??? What cases are these? Shouldn't we return 2 here? */
11090
11091 return 12;
11092 }
11093
11094 /* Return the length of INSN. LENGTH is the initial length computed by
11095 attributes in the machine-description file. */
11096
11097 int
11098 mips_adjust_insn_length (rtx insn, int length)
11099 {
11100 /* A unconditional jump has an unfilled delay slot if it is not part
11101 of a sequence. A conditional jump normally has a delay slot, but
11102 does not on MIPS16. */
11103 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
11104 length += 4;
11105
11106 /* See how many nops might be needed to avoid hardware hazards. */
11107 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
11108 switch (get_attr_hazard (insn))
11109 {
11110 case HAZARD_NONE:
11111 break;
11112
11113 case HAZARD_DELAY:
11114 length += 4;
11115 break;
11116
11117 case HAZARD_HILO:
11118 length += 8;
11119 break;
11120 }
11121
11122 /* All MIPS16 instructions are a measly two bytes. */
11123 if (TARGET_MIPS16)
11124 length /= 2;
11125
11126 return length;
11127 }
11128
11129
11130 /* Return an asm sequence to start a noat block and load the address
11131 of a label into $1. */
11132
11133 const char *
11134 mips_output_load_label (void)
11135 {
11136 if (TARGET_EXPLICIT_RELOCS)
11137 switch (mips_abi)
11138 {
11139 case ABI_N32:
11140 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
11141
11142 case ABI_64:
11143 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
11144
11145 default:
11146 if (ISA_HAS_LOAD_DELAY)
11147 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
11148 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
11149 }
11150 else
11151 {
11152 if (Pmode == DImode)
11153 return "%[dla\t%@,%0";
11154 else
11155 return "%[la\t%@,%0";
11156 }
11157 }
11158
11159 /* Return the assembly code for INSN, which has the operands given by
11160 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
11161 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
11162 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
11163 version of BRANCH_IF_TRUE. */
11164
11165 const char *
11166 mips_output_conditional_branch (rtx insn, rtx *operands,
11167 const char *branch_if_true,
11168 const char *branch_if_false)
11169 {
11170 unsigned int length;
11171 rtx taken, not_taken;
11172
11173 length = get_attr_length (insn);
11174 if (length <= 8)
11175 {
11176 /* Just a simple conditional branch. */
11177 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
11178 return branch_if_true;
11179 }
11180
11181 /* Generate a reversed branch around a direct jump. This fallback does
11182 not use branch-likely instructions. */
11183 mips_branch_likely = false;
11184 not_taken = gen_label_rtx ();
11185 taken = operands[1];
11186
11187 /* Generate the reversed branch to NOT_TAKEN. */
11188 operands[1] = not_taken;
11189 output_asm_insn (branch_if_false, operands);
11190
11191 /* If INSN has a delay slot, we must provide delay slots for both the
11192 branch to NOT_TAKEN and the conditional jump. We must also ensure
11193 that INSN's delay slot is executed in the appropriate cases. */
11194 if (final_sequence)
11195 {
11196 /* This first delay slot will always be executed, so use INSN's
11197 delay slot if is not annulled. */
11198 if (!INSN_ANNULLED_BRANCH_P (insn))
11199 {
11200 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11201 asm_out_file, optimize, 1, NULL);
11202 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11203 }
11204 else
11205 output_asm_insn ("nop", 0);
11206 fprintf (asm_out_file, "\n");
11207 }
11208
11209 /* Output the unconditional branch to TAKEN. */
11210 if (length <= 16)
11211 output_asm_insn ("j\t%0%/", &taken);
11212 else
11213 {
11214 output_asm_insn (mips_output_load_label (), &taken);
11215 output_asm_insn ("jr\t%@%]%/", 0);
11216 }
11217
11218 /* Now deal with its delay slot; see above. */
11219 if (final_sequence)
11220 {
11221 /* This delay slot will only be executed if the branch is taken.
11222 Use INSN's delay slot if is annulled. */
11223 if (INSN_ANNULLED_BRANCH_P (insn))
11224 {
11225 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11226 asm_out_file, optimize, 1, NULL);
11227 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11228 }
11229 else
11230 output_asm_insn ("nop", 0);
11231 fprintf (asm_out_file, "\n");
11232 }
11233
11234 /* Output NOT_TAKEN. */
11235 (*targetm.asm_out.internal_label) (asm_out_file, "L",
11236 CODE_LABEL_NUMBER (not_taken));
11237 return "";
11238 }
11239
11240 /* Return the assembly code for INSN, which branches to OPERANDS[1]
11241 if some ordered condition is true. The condition is given by
11242 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
11243 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
11244 its second is always zero. */
11245
11246 const char *
11247 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
11248 {
11249 const char *branch[2];
11250
11251 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
11252 Make BRANCH[0] branch on the inverse condition. */
11253 switch (GET_CODE (operands[0]))
11254 {
11255 /* These cases are equivalent to comparisons against zero. */
11256 case LEU:
11257 inverted_p = !inverted_p;
11258 /* Fall through. */
11259 case GTU:
11260 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
11261 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
11262 break;
11263
11264 /* These cases are always true or always false. */
11265 case LTU:
11266 inverted_p = !inverted_p;
11267 /* Fall through. */
11268 case GEU:
11269 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
11270 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
11271 break;
11272
11273 default:
11274 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
11275 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
11276 break;
11277 }
11278 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
11279 }
11280 \f
11281 /* Used to output div or ddiv instruction DIVISION, which has the operands
11282 given by OPERANDS. Add in a divide-by-zero check if needed.
11283
11284 When working around R4000 and R4400 errata, we need to make sure that
11285 the division is not immediately followed by a shift[1][2]. We also
11286 need to stop the division from being put into a branch delay slot[3].
11287 The easiest way to avoid both problems is to add a nop after the
11288 division. When a divide-by-zero check is needed, this nop can be
11289 used to fill the branch delay slot.
11290
11291 [1] If a double-word or a variable shift executes immediately
11292 after starting an integer division, the shift may give an
11293 incorrect result. See quotations of errata #16 and #28 from
11294 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11295 in mips.md for details.
11296
11297 [2] A similar bug to [1] exists for all revisions of the
11298 R4000 and the R4400 when run in an MC configuration.
11299 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
11300
11301 "19. In this following sequence:
11302
11303 ddiv (or ddivu or div or divu)
11304 dsll32 (or dsrl32, dsra32)
11305
11306 if an MPT stall occurs, while the divide is slipping the cpu
11307 pipeline, then the following double shift would end up with an
11308 incorrect result.
11309
11310 Workaround: The compiler needs to avoid generating any
11311 sequence with divide followed by extended double shift."
11312
11313 This erratum is also present in "MIPS R4400MC Errata, Processor
11314 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
11315 & 3.0" as errata #10 and #4, respectively.
11316
11317 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11318 (also valid for MIPS R4000MC processors):
11319
11320 "52. R4000SC: This bug does not apply for the R4000PC.
11321
11322 There are two flavors of this bug:
11323
11324 1) If the instruction just after divide takes an RF exception
11325 (tlb-refill, tlb-invalid) and gets an instruction cache
11326 miss (both primary and secondary) and the line which is
11327 currently in secondary cache at this index had the first
11328 data word, where the bits 5..2 are set, then R4000 would
11329 get a wrong result for the div.
11330
11331 ##1
11332 nop
11333 div r8, r9
11334 ------------------- # end-of page. -tlb-refill
11335 nop
11336 ##2
11337 nop
11338 div r8, r9
11339 ------------------- # end-of page. -tlb-invalid
11340 nop
11341
11342 2) If the divide is in the taken branch delay slot, where the
11343 target takes RF exception and gets an I-cache miss for the
11344 exception vector or where I-cache miss occurs for the
11345 target address, under the above mentioned scenarios, the
11346 div would get wrong results.
11347
11348 ##1
11349 j r2 # to next page mapped or unmapped
11350 div r8,r9 # this bug would be there as long
11351 # as there is an ICache miss and
11352 nop # the "data pattern" is present
11353
11354 ##2
11355 beq r0, r0, NextPage # to Next page
11356 div r8,r9
11357 nop
11358
11359 This bug is present for div, divu, ddiv, and ddivu
11360 instructions.
11361
11362 Workaround: For item 1), OS could make sure that the next page
11363 after the divide instruction is also mapped. For item 2), the
11364 compiler could make sure that the divide instruction is not in
11365 the branch delay slot."
11366
11367 These processors have PRId values of 0x00004220 and 0x00004300 for
11368 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
11369
11370 const char *
11371 mips_output_division (const char *division, rtx *operands)
11372 {
11373 const char *s;
11374
11375 s = division;
11376 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
11377 {
11378 output_asm_insn (s, operands);
11379 s = "nop";
11380 }
11381 if (TARGET_CHECK_ZERO_DIV)
11382 {
11383 if (TARGET_MIPS16)
11384 {
11385 output_asm_insn (s, operands);
11386 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
11387 }
11388 else if (GENERATE_DIVIDE_TRAPS)
11389 {
11390 output_asm_insn (s, operands);
11391 s = "teq\t%2,%.,7";
11392 }
11393 else
11394 {
11395 output_asm_insn ("%(bne\t%2,%.,1f", operands);
11396 output_asm_insn (s, operands);
11397 s = "break\t7%)\n1:";
11398 }
11399 }
11400 return s;
11401 }
11402 \f
11403 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11404 with a final "000" replaced by "k". Ignore case.
11405
11406 Note: this function is shared between GCC and GAS. */
11407
11408 static bool
11409 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11410 {
11411 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11412 given++, canonical++;
11413
11414 return ((*given == 0 && *canonical == 0)
11415 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11416 }
11417
11418
11419 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11420 CPU name. We've traditionally allowed a lot of variation here.
11421
11422 Note: this function is shared between GCC and GAS. */
11423
11424 static bool
11425 mips_matching_cpu_name_p (const char *canonical, const char *given)
11426 {
11427 /* First see if the name matches exactly, or with a final "000"
11428 turned into "k". */
11429 if (mips_strict_matching_cpu_name_p (canonical, given))
11430 return true;
11431
11432 /* If not, try comparing based on numerical designation alone.
11433 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11434 if (TOLOWER (*given) == 'r')
11435 given++;
11436 if (!ISDIGIT (*given))
11437 return false;
11438
11439 /* Skip over some well-known prefixes in the canonical name,
11440 hoping to find a number there too. */
11441 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11442 canonical += 2;
11443 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11444 canonical += 2;
11445 else if (TOLOWER (canonical[0]) == 'r')
11446 canonical += 1;
11447
11448 return mips_strict_matching_cpu_name_p (canonical, given);
11449 }
11450
11451
11452 /* Return the mips_cpu_info entry for the processor or ISA given
11453 by CPU_STRING. Return null if the string isn't recognized.
11454
11455 A similar function exists in GAS. */
11456
11457 static const struct mips_cpu_info *
11458 mips_parse_cpu (const char *cpu_string)
11459 {
11460 const struct mips_cpu_info *p;
11461 const char *s;
11462
11463 /* In the past, we allowed upper-case CPU names, but it doesn't
11464 work well with the multilib machinery. */
11465 for (s = cpu_string; *s != 0; s++)
11466 if (ISUPPER (*s))
11467 {
11468 warning (0, "the cpu name must be lower case");
11469 break;
11470 }
11471
11472 /* 'from-abi' selects the most compatible architecture for the given
11473 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11474 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11475 version. Look first at the -mgp options, if given, otherwise base
11476 the choice on MASK_64BIT in TARGET_DEFAULT. */
11477 if (strcasecmp (cpu_string, "from-abi") == 0)
11478 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
11479 : ABI_NEEDS_64BIT_REGS ? 3
11480 : (TARGET_64BIT ? 3 : 1));
11481
11482 /* 'default' has traditionally been a no-op. Probably not very useful. */
11483 if (strcasecmp (cpu_string, "default") == 0)
11484 return 0;
11485
11486 for (p = mips_cpu_info_table; p->name != 0; p++)
11487 if (mips_matching_cpu_name_p (p->name, cpu_string))
11488 return p;
11489
11490 return 0;
11491 }
11492
11493
11494 /* Return the processor associated with the given ISA level, or null
11495 if the ISA isn't valid. */
11496
11497 static const struct mips_cpu_info *
11498 mips_cpu_info_from_isa (int isa)
11499 {
11500 const struct mips_cpu_info *p;
11501
11502 for (p = mips_cpu_info_table; p->name != 0; p++)
11503 if (p->isa == isa)
11504 return p;
11505
11506 return 0;
11507 }
11508 \f
11509 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
11510 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
11511 they only hold condition code modes, and CCmode is always considered to
11512 be 4 bytes wide. All other registers are word sized. */
11513
11514 unsigned int
11515 mips_hard_regno_nregs (int regno, enum machine_mode mode)
11516 {
11517 if (ST_REG_P (regno))
11518 return ((GET_MODE_SIZE (mode) + 3) / 4);
11519 else if (! FP_REG_P (regno))
11520 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
11521 else
11522 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
11523 }
11524
11525 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
11526 all BLKmode objects are returned in memory. Under the new (N32 and
11527 64-bit MIPS ABIs) small structures are returned in a register.
11528 Objects with varying size must still be returned in memory, of
11529 course. */
11530
11531 static bool
11532 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
11533 {
11534 if (TARGET_OLDABI)
11535 return (TYPE_MODE (type) == BLKmode);
11536 else
11537 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
11538 || (int_size_in_bytes (type) == -1));
11539 }
11540
11541 static bool
11542 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
11543 {
11544 return !TARGET_OLDABI;
11545 }
11546 \f
11547 /* Return true if INSN is a multiply-add or multiply-subtract
11548 instruction and PREV assigns to the accumulator operand. */
11549
11550 bool
11551 mips_linked_madd_p (rtx prev, rtx insn)
11552 {
11553 rtx x;
11554
11555 x = single_set (insn);
11556 if (x == 0)
11557 return false;
11558
11559 x = SET_SRC (x);
11560
11561 if (GET_CODE (x) == PLUS
11562 && GET_CODE (XEXP (x, 0)) == MULT
11563 && reg_set_p (XEXP (x, 1), prev))
11564 return true;
11565
11566 if (GET_CODE (x) == MINUS
11567 && GET_CODE (XEXP (x, 1)) == MULT
11568 && reg_set_p (XEXP (x, 0), prev))
11569 return true;
11570
11571 return false;
11572 }
11573 \f
11574 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11575 that may clobber hi or lo. */
11576
11577 static rtx mips_macc_chains_last_hilo;
11578
11579 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11580 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11581
11582 static void
11583 mips_macc_chains_record (rtx insn)
11584 {
11585 if (get_attr_may_clobber_hilo (insn))
11586 mips_macc_chains_last_hilo = insn;
11587 }
11588
11589 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11590 has NREADY elements, looking for a multiply-add or multiply-subtract
11591 instruction that is cumulative with mips_macc_chains_last_hilo.
11592 If there is one, promote it ahead of anything else that might
11593 clobber hi or lo. */
11594
11595 static void
11596 mips_macc_chains_reorder (rtx *ready, int nready)
11597 {
11598 int i, j;
11599
11600 if (mips_macc_chains_last_hilo != 0)
11601 for (i = nready - 1; i >= 0; i--)
11602 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
11603 {
11604 for (j = nready - 1; j > i; j--)
11605 if (recog_memoized (ready[j]) >= 0
11606 && get_attr_may_clobber_hilo (ready[j]))
11607 {
11608 mips_promote_ready (ready, i, j);
11609 break;
11610 }
11611 break;
11612 }
11613 }
11614 \f
11615 /* The last instruction to be scheduled. */
11616
11617 static rtx vr4130_last_insn;
11618
11619 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11620 points to an rtx that is initially an instruction. Nullify the rtx
11621 if the instruction uses the value of register X. */
11622
11623 static void
11624 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11625 {
11626 rtx *insn_ptr = data;
11627 if (REG_P (x)
11628 && *insn_ptr != 0
11629 && reg_referenced_p (x, PATTERN (*insn_ptr)))
11630 *insn_ptr = 0;
11631 }
11632
11633 /* Return true if there is true register dependence between vr4130_last_insn
11634 and INSN. */
11635
11636 static bool
11637 vr4130_true_reg_dependence_p (rtx insn)
11638 {
11639 note_stores (PATTERN (vr4130_last_insn),
11640 vr4130_true_reg_dependence_p_1, &insn);
11641 return insn == 0;
11642 }
11643
11644 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11645 the ready queue and that INSN2 is the instruction after it, return
11646 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11647 in which INSN1 and INSN2 can probably issue in parallel, but for
11648 which (INSN2, INSN1) should be less sensitive to instruction
11649 alignment than (INSN1, INSN2). See 4130.md for more details. */
11650
11651 static bool
11652 vr4130_swap_insns_p (rtx insn1, rtx insn2)
11653 {
11654 sd_iterator_def sd_it;
11655 dep_t dep;
11656
11657 /* Check for the following case:
11658
11659 1) there is some other instruction X with an anti dependence on INSN1;
11660 2) X has a higher priority than INSN2; and
11661 3) X is an arithmetic instruction (and thus has no unit restrictions).
11662
11663 If INSN1 is the last instruction blocking X, it would better to
11664 choose (INSN1, X) over (INSN2, INSN1). */
11665 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
11666 if (DEP_TYPE (dep) == REG_DEP_ANTI
11667 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
11668 && recog_memoized (DEP_CON (dep)) >= 0
11669 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
11670 return false;
11671
11672 if (vr4130_last_insn != 0
11673 && recog_memoized (insn1) >= 0
11674 && recog_memoized (insn2) >= 0)
11675 {
11676 /* See whether INSN1 and INSN2 use different execution units,
11677 or if they are both ALU-type instructions. If so, they can
11678 probably execute in parallel. */
11679 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
11680 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
11681 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
11682 {
11683 /* If only one of the instructions has a dependence on
11684 vr4130_last_insn, prefer to schedule the other one first. */
11685 bool dep1 = vr4130_true_reg_dependence_p (insn1);
11686 bool dep2 = vr4130_true_reg_dependence_p (insn2);
11687 if (dep1 != dep2)
11688 return dep1;
11689
11690 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11691 is not an ALU-type instruction and if INSN1 uses the same
11692 execution unit. (Note that if this condition holds, we already
11693 know that INSN2 uses a different execution unit.) */
11694 if (class1 != VR4130_CLASS_ALU
11695 && recog_memoized (vr4130_last_insn) >= 0
11696 && class1 == get_attr_vr4130_class (vr4130_last_insn))
11697 return true;
11698 }
11699 }
11700 return false;
11701 }
11702
11703 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11704 queue with at least two instructions. Swap the first two if
11705 vr4130_swap_insns_p says that it could be worthwhile. */
11706
11707 static void
11708 vr4130_reorder (rtx *ready, int nready)
11709 {
11710 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
11711 mips_promote_ready (ready, nready - 2, nready - 1);
11712 }
11713 \f
11714 /* Remove the instruction at index LOWER from ready queue READY and
11715 reinsert it in front of the instruction at index HIGHER. LOWER must
11716 be <= HIGHER. */
11717
11718 static void
11719 mips_promote_ready (rtx *ready, int lower, int higher)
11720 {
11721 rtx new_head;
11722 int i;
11723
11724 new_head = ready[lower];
11725 for (i = lower; i < higher; i++)
11726 ready[i] = ready[i + 1];
11727 ready[i] = new_head;
11728 }
11729
11730 /* If the priority of the instruction at POS2 in the ready queue READY
11731 is within LIMIT units of that of the instruction at POS1, swap the
11732 instructions if POS2 is not already less than POS1. */
11733
11734 static void
11735 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11736 {
11737 if (pos1 < pos2
11738 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11739 {
11740 rtx temp;
11741 temp = ready[pos1];
11742 ready[pos1] = ready[pos2];
11743 ready[pos2] = temp;
11744 }
11745 }
11746
11747 /* Record whether last 74k AGEN instruction was a load or store. */
11748
11749 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11750
11751 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11752 resets to TYPE_UNKNOWN state. */
11753
11754 static void
11755 mips_74k_agen_init (rtx insn)
11756 {
11757 if (!insn || !NONJUMP_INSN_P (insn))
11758 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11759 else if (USEFUL_INSN_P (insn))
11760 {
11761 enum attr_type type = get_attr_type (insn);
11762 if (type == TYPE_LOAD || type == TYPE_STORE)
11763 mips_last_74k_agen_insn = type;
11764 }
11765 }
11766
11767 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11768 loads to be grouped together, and multiple stores to be grouped
11769 together. Swap things around in the ready queue to make this happen. */
11770
11771 static void
11772 mips_74k_agen_reorder (rtx *ready, int nready)
11773 {
11774 int i;
11775 int store_pos, load_pos;
11776
11777 store_pos = -1;
11778 load_pos = -1;
11779
11780 for (i = nready - 1; i >= 0; i--)
11781 {
11782 rtx insn = ready[i];
11783 if (USEFUL_INSN_P (insn))
11784 switch (get_attr_type (insn))
11785 {
11786 case TYPE_STORE:
11787 if (store_pos == -1)
11788 store_pos = i;
11789 break;
11790
11791 case TYPE_LOAD:
11792 if (load_pos == -1)
11793 load_pos = i;
11794 break;
11795
11796 default:
11797 break;
11798 }
11799 }
11800
11801 if (load_pos == -1 || store_pos == -1)
11802 return;
11803
11804 switch (mips_last_74k_agen_insn)
11805 {
11806 case TYPE_UNKNOWN:
11807 /* Prefer to schedule loads since they have a higher latency. */
11808 case TYPE_LOAD:
11809 /* Swap loads to the front of the queue. */
11810 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11811 break;
11812 case TYPE_STORE:
11813 /* Swap stores to the front of the queue. */
11814 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11815 break;
11816 default:
11817 break;
11818 }
11819 }
11820
11821 /* Implement TARGET_SCHED_INIT. */
11822
11823 static void
11824 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11825 int max_ready ATTRIBUTE_UNUSED)
11826 {
11827 mips_macc_chains_last_hilo = 0;
11828 vr4130_last_insn = 0;
11829 mips_74k_agen_init (NULL_RTX);
11830 }
11831
11832 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11833
11834 static int
11835 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11836 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11837 {
11838 if (!reload_completed
11839 && TUNE_MACC_CHAINS
11840 && *nreadyp > 0)
11841 mips_macc_chains_reorder (ready, *nreadyp);
11842 if (reload_completed
11843 && TUNE_MIPS4130
11844 && !TARGET_VR4130_ALIGN
11845 && *nreadyp > 1)
11846 vr4130_reorder (ready, *nreadyp);
11847 if (TUNE_74K)
11848 mips_74k_agen_reorder (ready, *nreadyp);
11849 return mips_issue_rate ();
11850 }
11851
11852 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11853
11854 static int
11855 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11856 rtx insn, int more)
11857 {
11858 if (TUNE_74K)
11859 mips_74k_agen_init (insn);
11860 switch (GET_CODE (PATTERN (insn)))
11861 {
11862 case USE:
11863 case CLOBBER:
11864 /* Don't count USEs and CLOBBERs against the issue rate. */
11865 break;
11866
11867 default:
11868 more--;
11869 if (!reload_completed && TUNE_MACC_CHAINS)
11870 mips_macc_chains_record (insn);
11871 vr4130_last_insn = insn;
11872 break;
11873 }
11874 return more;
11875 }
11876 \f
11877 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11878 dependencies have no cost, except on the 20Kc where output-dependence
11879 is treated like input-dependence. */
11880
11881 static int
11882 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11883 rtx dep ATTRIBUTE_UNUSED, int cost)
11884 {
11885 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11886 && TUNE_20KC)
11887 return cost;
11888 if (REG_NOTE_KIND (link) != 0)
11889 return 0;
11890 return cost;
11891 }
11892
11893 /* Return the number of instructions that can be issued per cycle. */
11894
11895 static int
11896 mips_issue_rate (void)
11897 {
11898 switch (mips_tune)
11899 {
11900 case PROCESSOR_74KC:
11901 case PROCESSOR_74KF2_1:
11902 case PROCESSOR_74KF1_1:
11903 case PROCESSOR_74KF3_2:
11904 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11905 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11906 but in reality only a maximum of 3 insns can be issued as the
11907 floating point load/stores also require a slot in the AGEN pipe. */
11908 return 4;
11909
11910 case PROCESSOR_20KC:
11911 case PROCESSOR_R4130:
11912 case PROCESSOR_R5400:
11913 case PROCESSOR_R5500:
11914 case PROCESSOR_R7000:
11915 case PROCESSOR_R9000:
11916 return 2;
11917
11918 case PROCESSOR_SB1:
11919 case PROCESSOR_SB1A:
11920 /* This is actually 4, but we get better performance if we claim 3.
11921 This is partly because of unwanted speculative code motion with the
11922 larger number, and partly because in most common cases we can't
11923 reach the theoretical max of 4. */
11924 return 3;
11925
11926 default:
11927 return 1;
11928 }
11929 }
11930
11931 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11932 be as wide as the scheduling freedom in the DFA. */
11933
11934 static int
11935 mips_multipass_dfa_lookahead (void)
11936 {
11937 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11938 if (TUNE_SB1)
11939 return 4;
11940
11941 return 0;
11942 }
11943
11944 /* Implements a store data bypass check. We need this because the cprestore
11945 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11946 default routine to abort. We just return false for that case. */
11947 /* ??? Should try to give a better result here than assuming false. */
11948
11949 int
11950 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11951 {
11952 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11953 return false;
11954
11955 return ! store_data_bypass_p (out_insn, in_insn);
11956 }
11957 \f
11958 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11959 return the first operand of the associated "pref" or "prefx" insn. */
11960
11961 rtx
11962 mips_prefetch_cookie (rtx write, rtx locality)
11963 {
11964 /* store_streamed / load_streamed. */
11965 if (INTVAL (locality) <= 0)
11966 return GEN_INT (INTVAL (write) + 4);
11967
11968 /* store / load. */
11969 if (INTVAL (locality) <= 2)
11970 return write;
11971
11972 /* store_retained / load_retained. */
11973 return GEN_INT (INTVAL (write) + 6);
11974 }
11975 \f
11976 /* MIPS builtin function support. */
11977
11978 struct builtin_description
11979 {
11980 /* The code of the main .md file instruction. See mips_builtin_type
11981 for more information. */
11982 enum insn_code icode;
11983
11984 /* The floating-point comparison code to use with ICODE, if any. */
11985 enum mips_fp_condition cond;
11986
11987 /* The name of the builtin function. */
11988 const char *name;
11989
11990 /* Specifies how the function should be expanded. */
11991 enum mips_builtin_type builtin_type;
11992
11993 /* The function's prototype. */
11994 enum mips_function_type function_type;
11995
11996 /* The target flags required for this function. */
11997 int target_flags;
11998 };
11999
12000 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
12001 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
12002 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
12003 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
12004 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
12005
12006 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
12007 TARGET_FLAGS. */
12008 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
12009 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
12010 "__builtin_mips_" #INSN "_" #COND "_s", \
12011 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
12012 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
12013 "__builtin_mips_" #INSN "_" #COND "_d", \
12014 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
12015
12016 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
12017 The lower and upper forms require TARGET_FLAGS while the any and all
12018 forms require MASK_MIPS3D. */
12019 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
12020 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12021 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
12022 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
12023 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12024 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
12025 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
12026 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12027 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
12028 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
12029 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12030 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
12031 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
12032
12033 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
12034 require MASK_MIPS3D. */
12035 #define CMP_4S_BUILTINS(INSN, COND) \
12036 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
12037 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
12038 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12039 MASK_MIPS3D }, \
12040 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
12041 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
12042 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12043 MASK_MIPS3D }
12044
12045 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
12046 instruction requires TARGET_FLAGS. */
12047 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
12048 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12049 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
12050 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12051 TARGET_FLAGS }, \
12052 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12053 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
12054 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12055 TARGET_FLAGS }
12056
12057 /* Define all the builtins related to c.cond.fmt condition COND. */
12058 #define CMP_BUILTINS(COND) \
12059 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
12060 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
12061 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
12062 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
12063 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
12064 CMP_4S_BUILTINS (c, COND), \
12065 CMP_4S_BUILTINS (cabs, COND)
12066
12067 static const struct builtin_description mips_bdesc[] =
12068 {
12069 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12070 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12071 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12072 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12073 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
12074 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12075 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12076 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12077
12078 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
12079 MASK_PAIRED_SINGLE_FLOAT),
12080 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12081 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12082 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12083 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12084
12085 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
12086 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
12087 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12088 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
12089 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
12090 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12091
12092 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
12093 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
12094 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12095 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
12096 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
12097 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12098
12099 MIPS_FP_CONDITIONS (CMP_BUILTINS)
12100 };
12101
12102 /* Builtin functions for the SB-1 processor. */
12103
12104 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
12105
12106 static const struct builtin_description sb1_bdesc[] =
12107 {
12108 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
12109 };
12110
12111 /* Builtin functions for DSP ASE. */
12112
12113 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
12114 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
12115 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
12116 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
12117 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
12118
12119 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
12120 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
12121 builtin_description fields. */
12122 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
12123 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
12124 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
12125
12126 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
12127 branch instruction. TARGET_FLAGS is a builtin_description field. */
12128 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
12129 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
12130 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
12131
12132 static const struct builtin_description dsp_bdesc[] =
12133 {
12134 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12135 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12136 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12137 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12138 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12139 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12140 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12141 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12142 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12143 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12144 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12145 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12146 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12147 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
12148 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
12149 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
12150 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
12151 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
12152 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
12153 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
12154 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
12155 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
12156 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12157 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12158 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12159 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12160 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12161 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12162 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12163 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12164 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
12165 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12166 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12167 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12168 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
12169 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12170 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12171 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12172 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
12173 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
12174 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12175 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
12176 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
12177 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
12178 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12179 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
12180 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
12181 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12182 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12183 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12184 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12185 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12186 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12187 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12188 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12189 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12190 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12191 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12192 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12193 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
12194 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
12195 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12196 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12197 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12198 BPOSGE_BUILTIN (32, MASK_DSP),
12199
12200 /* The following are for the MIPS DSP ASE REV 2. */
12201 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
12202 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12203 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12204 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12205 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12206 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12207 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12208 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12209 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12210 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12211 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12212 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12213 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12214 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12215 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12216 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12217 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
12218 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
12219 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12220 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
12221 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
12222 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
12223 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12224 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12225 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12226 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12227 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12228 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12229 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12230 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12231 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12232 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12233 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12234 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
12235 };
12236
12237 static const struct builtin_description dsp_32only_bdesc[] =
12238 {
12239 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12240 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12241 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12242 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12243 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12244 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12245 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12246 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12247 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12248 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12249 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12250 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12251 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12252 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12253 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12254 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12255 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12256 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12257 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12258 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12259 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12260
12261 /* The following are for the MIPS DSP ASE REV 2. */
12262 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12263 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12264 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12265 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12266 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12267 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12268 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12269 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
12270 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
12271 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12272 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12273 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12274 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12275 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12276 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
12277 };
12278
12279 /* This helps provide a mapping from builtin function codes to bdesc
12280 arrays. */
12281
12282 struct bdesc_map
12283 {
12284 /* The builtin function table that this entry describes. */
12285 const struct builtin_description *bdesc;
12286
12287 /* The number of entries in the builtin function table. */
12288 unsigned int size;
12289
12290 /* The target processor that supports these builtin functions.
12291 PROCESSOR_MAX means we enable them for all processors. */
12292 enum processor_type proc;
12293
12294 /* If the target has these flags, this builtin function table
12295 will not be supported. */
12296 int unsupported_target_flags;
12297 };
12298
12299 static const struct bdesc_map bdesc_arrays[] =
12300 {
12301 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
12302 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
12303 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
12304 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
12305 MASK_64BIT }
12306 };
12307
12308 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
12309 suitable for input operand OP of instruction ICODE. Return the value. */
12310
12311 static rtx
12312 mips_prepare_builtin_arg (enum insn_code icode,
12313 unsigned int op, tree exp, unsigned int argnum)
12314 {
12315 rtx value;
12316 enum machine_mode mode;
12317
12318 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
12319 mode = insn_data[icode].operand[op].mode;
12320 if (!insn_data[icode].operand[op].predicate (value, mode))
12321 {
12322 value = copy_to_mode_reg (mode, value);
12323 /* Check the predicate again. */
12324 if (!insn_data[icode].operand[op].predicate (value, mode))
12325 {
12326 error ("invalid argument to builtin function");
12327 return const0_rtx;
12328 }
12329 }
12330
12331 return value;
12332 }
12333
12334 /* Return an rtx suitable for output operand OP of instruction ICODE.
12335 If TARGET is non-null, try to use it where possible. */
12336
12337 static rtx
12338 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
12339 {
12340 enum machine_mode mode;
12341
12342 mode = insn_data[icode].operand[op].mode;
12343 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
12344 target = gen_reg_rtx (mode);
12345
12346 return target;
12347 }
12348
12349 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
12350
12351 rtx
12352 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
12353 enum machine_mode mode ATTRIBUTE_UNUSED,
12354 int ignore ATTRIBUTE_UNUSED)
12355 {
12356 enum insn_code icode;
12357 enum mips_builtin_type type;
12358 tree fndecl;
12359 unsigned int fcode;
12360 const struct builtin_description *bdesc;
12361 const struct bdesc_map *m;
12362
12363 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12364 fcode = DECL_FUNCTION_CODE (fndecl);
12365
12366 if (TARGET_MIPS16)
12367 {
12368 error ("built-in function %qs not supported for MIPS16",
12369 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
12370 return const0_rtx;
12371 }
12372
12373 bdesc = NULL;
12374 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12375 {
12376 if (fcode < m->size)
12377 {
12378 bdesc = m->bdesc;
12379 icode = bdesc[fcode].icode;
12380 type = bdesc[fcode].builtin_type;
12381 break;
12382 }
12383 fcode -= m->size;
12384 }
12385 if (bdesc == NULL)
12386 return 0;
12387
12388 switch (type)
12389 {
12390 case MIPS_BUILTIN_DIRECT:
12391 return mips_expand_builtin_direct (icode, target, exp, true);
12392
12393 case MIPS_BUILTIN_DIRECT_NO_TARGET:
12394 return mips_expand_builtin_direct (icode, target, exp, false);
12395
12396 case MIPS_BUILTIN_MOVT:
12397 case MIPS_BUILTIN_MOVF:
12398 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
12399 target, exp);
12400
12401 case MIPS_BUILTIN_CMP_ANY:
12402 case MIPS_BUILTIN_CMP_ALL:
12403 case MIPS_BUILTIN_CMP_UPPER:
12404 case MIPS_BUILTIN_CMP_LOWER:
12405 case MIPS_BUILTIN_CMP_SINGLE:
12406 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
12407 target, exp);
12408
12409 case MIPS_BUILTIN_BPOSGE32:
12410 return mips_expand_builtin_bposge (type, target);
12411
12412 default:
12413 return 0;
12414 }
12415 }
12416
12417 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
12418
12419 void
12420 mips_init_builtins (void)
12421 {
12422 const struct builtin_description *d;
12423 const struct bdesc_map *m;
12424 tree types[(int) MIPS_MAX_FTYPE_MAX];
12425 tree V2SF_type_node;
12426 tree V2HI_type_node;
12427 tree V4QI_type_node;
12428 unsigned int offset;
12429
12430 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
12431 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
12432 return;
12433
12434 if (TARGET_PAIRED_SINGLE_FLOAT)
12435 {
12436 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
12437
12438 types[MIPS_V2SF_FTYPE_V2SF]
12439 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
12440
12441 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
12442 = build_function_type_list (V2SF_type_node,
12443 V2SF_type_node, V2SF_type_node, NULL_TREE);
12444
12445 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
12446 = build_function_type_list (V2SF_type_node,
12447 V2SF_type_node, V2SF_type_node,
12448 integer_type_node, NULL_TREE);
12449
12450 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
12451 = build_function_type_list (V2SF_type_node,
12452 V2SF_type_node, V2SF_type_node,
12453 V2SF_type_node, V2SF_type_node, NULL_TREE);
12454
12455 types[MIPS_V2SF_FTYPE_SF_SF]
12456 = build_function_type_list (V2SF_type_node,
12457 float_type_node, float_type_node, NULL_TREE);
12458
12459 types[MIPS_INT_FTYPE_V2SF_V2SF]
12460 = build_function_type_list (integer_type_node,
12461 V2SF_type_node, V2SF_type_node, NULL_TREE);
12462
12463 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
12464 = build_function_type_list (integer_type_node,
12465 V2SF_type_node, V2SF_type_node,
12466 V2SF_type_node, V2SF_type_node, NULL_TREE);
12467
12468 types[MIPS_INT_FTYPE_SF_SF]
12469 = build_function_type_list (integer_type_node,
12470 float_type_node, float_type_node, NULL_TREE);
12471
12472 types[MIPS_INT_FTYPE_DF_DF]
12473 = build_function_type_list (integer_type_node,
12474 double_type_node, double_type_node, NULL_TREE);
12475
12476 types[MIPS_SF_FTYPE_V2SF]
12477 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
12478
12479 types[MIPS_SF_FTYPE_SF]
12480 = build_function_type_list (float_type_node,
12481 float_type_node, NULL_TREE);
12482
12483 types[MIPS_SF_FTYPE_SF_SF]
12484 = build_function_type_list (float_type_node,
12485 float_type_node, float_type_node, NULL_TREE);
12486
12487 types[MIPS_DF_FTYPE_DF]
12488 = build_function_type_list (double_type_node,
12489 double_type_node, NULL_TREE);
12490
12491 types[MIPS_DF_FTYPE_DF_DF]
12492 = build_function_type_list (double_type_node,
12493 double_type_node, double_type_node, NULL_TREE);
12494 }
12495
12496 if (TARGET_DSP)
12497 {
12498 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
12499 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
12500
12501 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
12502 = build_function_type_list (V2HI_type_node,
12503 V2HI_type_node, V2HI_type_node,
12504 NULL_TREE);
12505
12506 types[MIPS_SI_FTYPE_SI_SI]
12507 = build_function_type_list (intSI_type_node,
12508 intSI_type_node, intSI_type_node,
12509 NULL_TREE);
12510
12511 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
12512 = build_function_type_list (V4QI_type_node,
12513 V4QI_type_node, V4QI_type_node,
12514 NULL_TREE);
12515
12516 types[MIPS_SI_FTYPE_V4QI]
12517 = build_function_type_list (intSI_type_node,
12518 V4QI_type_node,
12519 NULL_TREE);
12520
12521 types[MIPS_V2HI_FTYPE_V2HI]
12522 = build_function_type_list (V2HI_type_node,
12523 V2HI_type_node,
12524 NULL_TREE);
12525
12526 types[MIPS_SI_FTYPE_SI]
12527 = build_function_type_list (intSI_type_node,
12528 intSI_type_node,
12529 NULL_TREE);
12530
12531 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
12532 = build_function_type_list (V4QI_type_node,
12533 V2HI_type_node, V2HI_type_node,
12534 NULL_TREE);
12535
12536 types[MIPS_V2HI_FTYPE_SI_SI]
12537 = build_function_type_list (V2HI_type_node,
12538 intSI_type_node, intSI_type_node,
12539 NULL_TREE);
12540
12541 types[MIPS_SI_FTYPE_V2HI]
12542 = build_function_type_list (intSI_type_node,
12543 V2HI_type_node,
12544 NULL_TREE);
12545
12546 types[MIPS_V2HI_FTYPE_V4QI]
12547 = build_function_type_list (V2HI_type_node,
12548 V4QI_type_node,
12549 NULL_TREE);
12550
12551 types[MIPS_V4QI_FTYPE_V4QI_SI]
12552 = build_function_type_list (V4QI_type_node,
12553 V4QI_type_node, intSI_type_node,
12554 NULL_TREE);
12555
12556 types[MIPS_V2HI_FTYPE_V2HI_SI]
12557 = build_function_type_list (V2HI_type_node,
12558 V2HI_type_node, intSI_type_node,
12559 NULL_TREE);
12560
12561 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
12562 = build_function_type_list (V2HI_type_node,
12563 V4QI_type_node, V2HI_type_node,
12564 NULL_TREE);
12565
12566 types[MIPS_SI_FTYPE_V2HI_V2HI]
12567 = build_function_type_list (intSI_type_node,
12568 V2HI_type_node, V2HI_type_node,
12569 NULL_TREE);
12570
12571 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
12572 = build_function_type_list (intDI_type_node,
12573 intDI_type_node, V4QI_type_node, V4QI_type_node,
12574 NULL_TREE);
12575
12576 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
12577 = build_function_type_list (intDI_type_node,
12578 intDI_type_node, V2HI_type_node, V2HI_type_node,
12579 NULL_TREE);
12580
12581 types[MIPS_DI_FTYPE_DI_SI_SI]
12582 = build_function_type_list (intDI_type_node,
12583 intDI_type_node, intSI_type_node, intSI_type_node,
12584 NULL_TREE);
12585
12586 types[MIPS_V4QI_FTYPE_SI]
12587 = build_function_type_list (V4QI_type_node,
12588 intSI_type_node,
12589 NULL_TREE);
12590
12591 types[MIPS_V2HI_FTYPE_SI]
12592 = build_function_type_list (V2HI_type_node,
12593 intSI_type_node,
12594 NULL_TREE);
12595
12596 types[MIPS_VOID_FTYPE_V4QI_V4QI]
12597 = build_function_type_list (void_type_node,
12598 V4QI_type_node, V4QI_type_node,
12599 NULL_TREE);
12600
12601 types[MIPS_SI_FTYPE_V4QI_V4QI]
12602 = build_function_type_list (intSI_type_node,
12603 V4QI_type_node, V4QI_type_node,
12604 NULL_TREE);
12605
12606 types[MIPS_VOID_FTYPE_V2HI_V2HI]
12607 = build_function_type_list (void_type_node,
12608 V2HI_type_node, V2HI_type_node,
12609 NULL_TREE);
12610
12611 types[MIPS_SI_FTYPE_DI_SI]
12612 = build_function_type_list (intSI_type_node,
12613 intDI_type_node, intSI_type_node,
12614 NULL_TREE);
12615
12616 types[MIPS_DI_FTYPE_DI_SI]
12617 = build_function_type_list (intDI_type_node,
12618 intDI_type_node, intSI_type_node,
12619 NULL_TREE);
12620
12621 types[MIPS_VOID_FTYPE_SI_SI]
12622 = build_function_type_list (void_type_node,
12623 intSI_type_node, intSI_type_node,
12624 NULL_TREE);
12625
12626 types[MIPS_SI_FTYPE_PTR_SI]
12627 = build_function_type_list (intSI_type_node,
12628 ptr_type_node, intSI_type_node,
12629 NULL_TREE);
12630
12631 types[MIPS_SI_FTYPE_VOID]
12632 = build_function_type (intSI_type_node, void_list_node);
12633
12634 if (TARGET_DSPR2)
12635 {
12636 types[MIPS_V4QI_FTYPE_V4QI]
12637 = build_function_type_list (V4QI_type_node,
12638 V4QI_type_node,
12639 NULL_TREE);
12640
12641 types[MIPS_SI_FTYPE_SI_SI_SI]
12642 = build_function_type_list (intSI_type_node,
12643 intSI_type_node, intSI_type_node,
12644 intSI_type_node, NULL_TREE);
12645
12646 types[MIPS_DI_FTYPE_DI_USI_USI]
12647 = build_function_type_list (intDI_type_node,
12648 intDI_type_node,
12649 unsigned_intSI_type_node,
12650 unsigned_intSI_type_node, NULL_TREE);
12651
12652 types[MIPS_DI_FTYPE_SI_SI]
12653 = build_function_type_list (intDI_type_node,
12654 intSI_type_node, intSI_type_node,
12655 NULL_TREE);
12656
12657 types[MIPS_DI_FTYPE_USI_USI]
12658 = build_function_type_list (intDI_type_node,
12659 unsigned_intSI_type_node,
12660 unsigned_intSI_type_node, NULL_TREE);
12661
12662 types[MIPS_V2HI_FTYPE_SI_SI_SI]
12663 = build_function_type_list (V2HI_type_node,
12664 intSI_type_node, intSI_type_node,
12665 intSI_type_node, NULL_TREE);
12666
12667 }
12668 }
12669
12670 /* Iterate through all of the bdesc arrays, initializing all of the
12671 builtin functions. */
12672
12673 offset = 0;
12674 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12675 {
12676 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
12677 && (m->unsupported_target_flags & target_flags) == 0)
12678 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
12679 if ((d->target_flags & target_flags) == d->target_flags)
12680 add_builtin_function (d->name, types[d->function_type],
12681 d - m->bdesc + offset,
12682 BUILT_IN_MD, NULL, NULL);
12683 offset += m->size;
12684 }
12685 }
12686
12687 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
12688 .md pattern and CALL is the function expr with arguments. TARGET,
12689 if nonnull, suggests a good place to put the result.
12690 HAS_TARGET indicates the function must return something. */
12691
12692 static rtx
12693 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
12694 bool has_target)
12695 {
12696 rtx ops[MAX_RECOG_OPERANDS];
12697 int i = 0;
12698 int j = 0;
12699
12700 if (has_target)
12701 {
12702 /* We save target to ops[0]. */
12703 ops[0] = mips_prepare_builtin_target (icode, 0, target);
12704 i = 1;
12705 }
12706
12707 /* We need to test if the arglist is not zero. Some instructions have extra
12708 clobber registers. */
12709 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
12710 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12711
12712 switch (i)
12713 {
12714 case 2:
12715 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
12716 break;
12717
12718 case 3:
12719 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12720 break;
12721
12722 case 4:
12723 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12724 break;
12725
12726 default:
12727 gcc_unreachable ();
12728 }
12729 return target;
12730 }
12731
12732 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12733 function (TYPE says which). EXP is the tree for the function
12734 function, ICODE is the instruction that should be used to compare
12735 the first two arguments, and COND is the condition it should test.
12736 TARGET, if nonnull, suggests a good place to put the result. */
12737
12738 static rtx
12739 mips_expand_builtin_movtf (enum mips_builtin_type type,
12740 enum insn_code icode, enum mips_fp_condition cond,
12741 rtx target, tree exp)
12742 {
12743 rtx cmp_result, op0, op1;
12744
12745 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12746 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12747 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12748 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12749
12750 icode = CODE_FOR_mips_cond_move_tf_ps;
12751 target = mips_prepare_builtin_target (icode, 0, target);
12752 if (type == MIPS_BUILTIN_MOVT)
12753 {
12754 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12755 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12756 }
12757 else
12758 {
12759 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12760 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12761 }
12762 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12763 return target;
12764 }
12765
12766 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12767 into TARGET otherwise. Return TARGET. */
12768
12769 static rtx
12770 mips_builtin_branch_and_move (rtx condition, rtx target,
12771 rtx value_if_true, rtx value_if_false)
12772 {
12773 rtx true_label, done_label;
12774
12775 true_label = gen_label_rtx ();
12776 done_label = gen_label_rtx ();
12777
12778 /* First assume that CONDITION is false. */
12779 mips_emit_move (target, value_if_false);
12780
12781 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12782 emit_jump_insn (gen_condjump (condition, true_label));
12783 emit_jump_insn (gen_jump (done_label));
12784 emit_barrier ();
12785
12786 /* Fix TARGET if CONDITION is true. */
12787 emit_label (true_label);
12788 mips_emit_move (target, value_if_true);
12789
12790 emit_label (done_label);
12791 return target;
12792 }
12793
12794 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12795 of the comparison instruction and COND is the condition it should test.
12796 EXP is the function call and arguments and TARGET, if nonnull,
12797 suggests a good place to put the boolean result. */
12798
12799 static rtx
12800 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12801 enum insn_code icode, enum mips_fp_condition cond,
12802 rtx target, tree exp)
12803 {
12804 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12805 int i;
12806 int j = 0;
12807
12808 if (target == 0 || GET_MODE (target) != SImode)
12809 target = gen_reg_rtx (SImode);
12810
12811 /* Prepare the operands to the comparison. */
12812 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12813 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12814 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12815
12816 switch (insn_data[icode].n_operands)
12817 {
12818 case 4:
12819 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12820 break;
12821
12822 case 6:
12823 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12824 ops[3], ops[4], GEN_INT (cond)));
12825 break;
12826
12827 default:
12828 gcc_unreachable ();
12829 }
12830
12831 /* If the comparison sets more than one register, we define the result
12832 to be 0 if all registers are false and -1 if all registers are true.
12833 The value of the complete result is indeterminate otherwise. */
12834 switch (builtin_type)
12835 {
12836 case MIPS_BUILTIN_CMP_ALL:
12837 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12838 return mips_builtin_branch_and_move (condition, target,
12839 const0_rtx, const1_rtx);
12840
12841 case MIPS_BUILTIN_CMP_UPPER:
12842 case MIPS_BUILTIN_CMP_LOWER:
12843 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12844 condition = gen_single_cc (cmp_result, offset);
12845 return mips_builtin_branch_and_move (condition, target,
12846 const1_rtx, const0_rtx);
12847
12848 default:
12849 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12850 return mips_builtin_branch_and_move (condition, target,
12851 const1_rtx, const0_rtx);
12852 }
12853 }
12854
12855 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12856 suggests a good place to put the boolean result. */
12857
12858 static rtx
12859 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12860 {
12861 rtx condition, cmp_result;
12862 int cmp_value;
12863
12864 if (target == 0 || GET_MODE (target) != SImode)
12865 target = gen_reg_rtx (SImode);
12866
12867 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12868
12869 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12870 cmp_value = 32;
12871 else
12872 gcc_assert (0);
12873
12874 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12875 return mips_builtin_branch_and_move (condition, target,
12876 const1_rtx, const0_rtx);
12877 }
12878 \f
12879 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12880 FIRST is true if this is the first time handling this decl. */
12881
12882 static void
12883 mips_encode_section_info (tree decl, rtx rtl, int first)
12884 {
12885 default_encode_section_info (decl, rtl, first);
12886
12887 if (TREE_CODE (decl) == FUNCTION_DECL)
12888 {
12889 rtx symbol = XEXP (rtl, 0);
12890 tree type = TREE_TYPE (decl);
12891
12892 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
12893 || mips_far_type_p (type))
12894 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12895 }
12896 }
12897
12898 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12899 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12900
12901 static void
12902 mips_extra_live_on_entry (bitmap regs)
12903 {
12904 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12905 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12906 }
12907
12908 /* SImode values are represented as sign-extended to DImode. */
12909
12910 int
12911 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12912 {
12913 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12914 return SIGN_EXTEND;
12915
12916 return UNKNOWN;
12917 }
12918 \f
12919 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12920
12921 static void
12922 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12923 {
12924 switch (size)
12925 {
12926 case 4:
12927 fputs ("\t.dtprelword\t", file);
12928 break;
12929
12930 case 8:
12931 fputs ("\t.dtpreldword\t", file);
12932 break;
12933
12934 default:
12935 gcc_unreachable ();
12936 }
12937 output_addr_const (file, x);
12938 fputs ("+0x8000", file);
12939 }
12940 \f
12941 #include "gt-mips.h"