1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989-2016 Free Software Foundation, Inc.
3 Contributed by A. Lichnewsky, lich@inria.inria.fr.
4 Changes by Michael Meissner, meissner@osf.org.
5 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
6 Brendan Eich, brendan@microunity.com.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
35 #include "stringpool.h"
41 #include "diagnostic.h"
42 #include "insn-attr.h"
45 #include "fold-const.h"
47 #include "stor-layout.h"
53 #include "common/common-target.h"
54 #include "langhooks.h"
57 #include "sched-int.h"
59 #include "target-globals.h"
60 #include "tree-pass.h"
65 /* This file should be included last. */
66 #include "target-def.h"
68 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
69 #define UNSPEC_ADDRESS_P(X) \
70 (GET_CODE (X) == UNSPEC \
71 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
72 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
74 /* Extract the symbol or label from UNSPEC wrapper X. */
75 #define UNSPEC_ADDRESS(X) \
78 /* Extract the symbol type from UNSPEC wrapper X. */
79 #define UNSPEC_ADDRESS_TYPE(X) \
80 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
82 /* The maximum distance between the top of the stack frame and the
83 value $sp has when we save and restore registers.
85 The value for normal-mode code must be a SMALL_OPERAND and must
86 preserve the maximum stack alignment. We therefore use a value
87 of 0x7ff0 in this case.
89 microMIPS LWM and SWM support 12-bit offsets (from -0x800 to 0x7ff),
90 so we use a maximum of 0x7f0 for TARGET_MICROMIPS.
92 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
93 up to 0x7f8 bytes and can usually save or restore all the registers
94 that we need to save or restore. (Note that we can only use these
95 instructions for o32, for which the stack alignment is 8 bytes.)
97 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
98 RESTORE are not available. We can then use unextended instructions
99 to save and restore registers, and to allocate and deallocate the top
100 part of the frame. */
101 #define MIPS_MAX_FIRST_STACK_STEP \
102 (!TARGET_COMPRESSION ? 0x7ff0 \
103 : TARGET_MICROMIPS || GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
104 : TARGET_64BIT ? 0x100 : 0x400)
106 /* True if INSN is a mips.md pattern or asm statement. */
107 /* ??? This test exists through the compiler, perhaps it should be
109 #define USEFUL_INSN_P(INSN) \
110 (NONDEBUG_INSN_P (INSN) \
111 && GET_CODE (PATTERN (INSN)) != USE \
112 && GET_CODE (PATTERN (INSN)) != CLOBBER)
114 /* If INSN is a delayed branch sequence, return the first instruction
115 in the sequence, otherwise return INSN itself. */
116 #define SEQ_BEGIN(INSN) \
117 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
118 ? as_a <rtx_insn *> (XVECEXP (PATTERN (INSN), 0, 0)) \
121 /* Likewise for the last instruction in a delayed branch sequence. */
122 #define SEQ_END(INSN) \
123 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
124 ? as_a <rtx_insn *> (XVECEXP (PATTERN (INSN), \
126 XVECLEN (PATTERN (INSN), 0) - 1)) \
129 /* Execute the following loop body with SUBINSN set to each instruction
130 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
131 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
132 for ((SUBINSN) = SEQ_BEGIN (INSN); \
133 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
134 (SUBINSN) = NEXT_INSN (SUBINSN))
136 /* True if bit BIT is set in VALUE. */
137 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
139 /* Return the opcode for a ptr_mode load of the form:
141 l[wd] DEST, OFFSET(BASE). */
142 #define MIPS_LOAD_PTR(DEST, OFFSET, BASE) \
143 (((ptr_mode == DImode ? 0x37 : 0x23) << 26) \
148 /* Return the opcode to move register SRC into register DEST. */
149 #define MIPS_MOVE(DEST, SRC) \
150 ((TARGET_64BIT ? 0x2d : 0x21) \
154 /* Return the opcode for:
157 #define MIPS_LUI(DEST, VALUE) \
158 ((0xf << 26) | ((DEST) << 16) | (VALUE))
160 /* Return the opcode to jump to register DEST. When the JR opcode is not
161 available use JALR $0, DEST. */
162 #define MIPS_JR(DEST) \
163 (TARGET_CB_ALWAYS ? ((0x1b << 27) | ((DEST) << 16)) \
164 : (((DEST) << 21) | (ISA_HAS_JR ? 0x8 : 0x9)))
166 /* Return the opcode for:
168 bal . + (1 + OFFSET) * 4. */
169 #define MIPS_BAL(OFFSET) \
170 ((0x1 << 26) | (0x11 << 16) | (OFFSET))
172 /* Return the usual opcode for a nop. */
175 /* Classifies an address.
178 A natural register + offset address. The register satisfies
179 mips_valid_base_register_p and the offset is a const_arith_operand.
182 A LO_SUM rtx. The first operand is a valid base register and
183 the second operand is a symbolic address.
186 A signed 16-bit constant address.
189 A constant symbolic address. */
190 enum mips_address_type
{
197 /* Macros to create an enumeration identifier for a function prototype. */
198 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
199 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
200 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
201 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
203 /* Classifies the prototype of a built-in function. */
204 enum mips_function_type
{
205 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
206 #include "config/mips/mips-ftypes.def"
207 #undef DEF_MIPS_FTYPE
211 /* Specifies how a built-in function should be converted into rtl. */
212 enum mips_builtin_type
{
213 /* The function corresponds directly to an .md pattern. The return
214 value is mapped to operand 0 and the arguments are mapped to
215 operands 1 and above. */
218 /* The function corresponds directly to an .md pattern. There is no return
219 value and the arguments are mapped to operands 0 and above. */
220 MIPS_BUILTIN_DIRECT_NO_TARGET
,
222 /* The function corresponds to a comparison instruction followed by
223 a mips_cond_move_tf_ps pattern. The first two arguments are the
224 values to compare and the second two arguments are the vector
225 operands for the movt.ps or movf.ps instruction (in assembly order). */
229 /* The function corresponds to a V2SF comparison instruction. Operand 0
230 of this instruction is the result of the comparison, which has mode
231 CCV2 or CCV4. The function arguments are mapped to operands 1 and
232 above. The function's return value is an SImode boolean that is
233 true under the following conditions:
235 MIPS_BUILTIN_CMP_ANY: one of the registers is true
236 MIPS_BUILTIN_CMP_ALL: all of the registers are true
237 MIPS_BUILTIN_CMP_LOWER: the first register is true
238 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
239 MIPS_BUILTIN_CMP_ANY
,
240 MIPS_BUILTIN_CMP_ALL
,
241 MIPS_BUILTIN_CMP_UPPER
,
242 MIPS_BUILTIN_CMP_LOWER
,
244 /* As above, but the instruction only sets a single $fcc register. */
245 MIPS_BUILTIN_CMP_SINGLE
,
247 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
248 MIPS_BUILTIN_BPOSGE32
251 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
252 #define MIPS_FP_CONDITIONS(MACRO) \
270 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
271 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
272 enum mips_fp_condition
{
273 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND
)
275 #undef DECLARE_MIPS_COND
277 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
278 #define STRINGIFY(X) #X
279 static const char *const mips_fp_conditions
[] = {
280 MIPS_FP_CONDITIONS (STRINGIFY
)
284 /* A class used to control a comdat-style stub that we output in each
285 translation unit that needs it. */
286 class mips_one_only_stub
{
288 virtual ~mips_one_only_stub () {}
290 /* Return the name of the stub. */
291 virtual const char *get_name () = 0;
293 /* Output the body of the function to asm_out_file. */
294 virtual void output_body () = 0;
297 /* Tuning information that is automatically derived from other sources
298 (such as the scheduler). */
300 /* The architecture and tuning settings that this structure describes. */
304 /* True if this structure describes MIPS16 settings. */
307 /* True if the structure has been initialized. */
310 /* True if "MULT $0, $0" is preferable to "MTLO $0; MTHI $0"
311 when optimizing for speed. */
312 bool fast_mult_zero_zero_p
;
315 /* Information about a single argument. */
316 struct mips_arg_info
{
317 /* True if the argument is passed in a floating-point register, or
318 would have been if we hadn't run out of registers. */
321 /* The number of words passed in registers, rounded up. */
322 unsigned int reg_words
;
324 /* For EABI, the offset of the first register from GP_ARG_FIRST or
325 FP_ARG_FIRST. For other ABIs, the offset of the first register from
326 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
327 comment for details).
329 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
331 unsigned int reg_offset
;
333 /* The number of words that must be passed on the stack, rounded up. */
334 unsigned int stack_words
;
336 /* The offset from the start of the stack overflow area of the argument's
337 first stack word. Only meaningful when STACK_WORDS is nonzero. */
338 unsigned int stack_offset
;
341 /* Information about an address described by mips_address_type.
347 REG is the base register and OFFSET is the constant offset.
350 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
351 is the type of symbol it references.
354 SYMBOL_TYPE is the type of symbol that the address references. */
355 struct mips_address_info
{
356 enum mips_address_type type
;
359 enum mips_symbol_type symbol_type
;
362 /* One stage in a constant building sequence. These sequences have
366 A = A CODE[1] VALUE[1]
367 A = A CODE[2] VALUE[2]
370 where A is an accumulator, each CODE[i] is a binary rtl operation
371 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
372 struct mips_integer_op
{
374 unsigned HOST_WIDE_INT value
;
377 /* The largest number of operations needed to load an integer constant.
378 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
379 When the lowest bit is clear, we can try, but reject a sequence with
380 an extra SLL at the end. */
381 #define MIPS_MAX_INTEGER_OPS 7
383 /* Information about a MIPS16e SAVE or RESTORE instruction. */
384 struct mips16e_save_restore_info
{
385 /* The number of argument registers saved by a SAVE instruction.
386 0 for RESTORE instructions. */
389 /* Bit X is set if the instruction saves or restores GPR X. */
392 /* The total number of bytes to allocate. */
396 /* Costs of various operations on the different architectures. */
398 struct mips_rtx_cost_data
400 unsigned short fp_add
;
401 unsigned short fp_mult_sf
;
402 unsigned short fp_mult_df
;
403 unsigned short fp_div_sf
;
404 unsigned short fp_div_df
;
405 unsigned short int_mult_si
;
406 unsigned short int_mult_di
;
407 unsigned short int_div_si
;
408 unsigned short int_div_di
;
409 unsigned short branch_cost
;
410 unsigned short memory_latency
;
413 /* Global variables for machine-dependent things. */
415 /* The -G setting, or the configuration's default small-data limit if
416 no -G option is given. */
417 static unsigned int mips_small_data_threshold
;
419 /* The number of file directives written by mips_output_filename. */
420 int num_source_filenames
;
422 /* The name that appeared in the last .file directive written by
423 mips_output_filename, or "" if mips_output_filename hasn't
424 written anything yet. */
425 const char *current_function_file
= "";
427 /* Arrays that map GCC register numbers to debugger register numbers. */
428 int mips_dbx_regno
[FIRST_PSEUDO_REGISTER
];
429 int mips_dwarf_regno
[FIRST_PSEUDO_REGISTER
];
431 /* Information about the current function's epilogue, used only while
434 /* A list of queued REG_CFA_RESTORE notes. */
437 /* The CFA is currently defined as CFA_REG + CFA_OFFSET. */
439 HOST_WIDE_INT cfa_offset
;
441 /* The offset of the CFA from the stack pointer while restoring
443 HOST_WIDE_INT cfa_restore_sp_offset
;
446 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
447 struct mips_asm_switch mips_noreorder
= { "reorder", 0 };
448 struct mips_asm_switch mips_nomacro
= { "macro", 0 };
449 struct mips_asm_switch mips_noat
= { "at", 0 };
451 /* True if we're writing out a branch-likely instruction rather than a
453 static bool mips_branch_likely
;
455 /* The current instruction-set architecture. */
456 enum processor mips_arch
;
457 const struct mips_cpu_info
*mips_arch_info
;
459 /* The processor that we should tune the code for. */
460 enum processor mips_tune
;
461 const struct mips_cpu_info
*mips_tune_info
;
463 /* The ISA level associated with mips_arch. */
466 /* The ISA revision level. This is 0 for MIPS I to V and N for
470 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
471 static const struct mips_cpu_info
*mips_isa_option_info
;
473 /* Which cost information to use. */
474 static const struct mips_rtx_cost_data
*mips_cost
;
476 /* The ambient target flags, excluding MASK_MIPS16. */
477 static int mips_base_target_flags
;
479 /* The default compression mode. */
480 unsigned int mips_base_compression_flags
;
482 /* The ambient values of other global variables. */
483 static int mips_base_schedule_insns
; /* flag_schedule_insns */
484 static int mips_base_reorder_blocks_and_partition
; /* flag_reorder... */
485 static int mips_base_move_loop_invariants
; /* flag_move_loop_invariants */
486 static int mips_base_align_loops
; /* align_loops */
487 static int mips_base_align_jumps
; /* align_jumps */
488 static int mips_base_align_functions
; /* align_functions */
490 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
491 bool mips_hard_regno_mode_ok
[(int) MAX_MACHINE_MODE
][FIRST_PSEUDO_REGISTER
];
493 /* Index C is true if character C is a valid PRINT_OPERAND punctation
495 static bool mips_print_operand_punct
[256];
497 static GTY (()) int mips_output_filename_first_time
= 1;
499 /* mips_split_p[X] is true if symbols of type X can be split by
500 mips_split_symbol. */
501 bool mips_split_p
[NUM_SYMBOL_TYPES
];
503 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
504 can be split by mips_split_symbol. */
505 bool mips_split_hi_p
[NUM_SYMBOL_TYPES
];
507 /* mips_use_pcrel_pool_p[X] is true if symbols of type X should be
508 forced into a PC-relative constant pool. */
509 bool mips_use_pcrel_pool_p
[NUM_SYMBOL_TYPES
];
511 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
512 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
513 if they are matched by a special .md file pattern. */
514 const char *mips_lo_relocs
[NUM_SYMBOL_TYPES
];
516 /* Likewise for HIGHs. */
517 const char *mips_hi_relocs
[NUM_SYMBOL_TYPES
];
519 /* Target state for MIPS16. */
520 struct target_globals
*mips16_globals
;
522 /* Target state for MICROMIPS. */
523 struct target_globals
*micromips_globals
;
525 /* Cached value of can_issue_more. This is cached in mips_variable_issue hook
526 and returned from mips_sched_reorder2. */
527 static int cached_can_issue_more
;
529 /* The stubs for various MIPS16 support functions, if used. */
530 static mips_one_only_stub
*mips16_rdhwr_stub
;
531 static mips_one_only_stub
*mips16_get_fcsr_stub
;
532 static mips_one_only_stub
*mips16_set_fcsr_stub
;
534 /* Index R is the smallest register class that contains register R. */
535 const enum reg_class mips_regno_to_class
[FIRST_PSEUDO_REGISTER
] = {
536 LEA_REGS
, LEA_REGS
, M16_STORE_REGS
, V1_REG
,
537 M16_STORE_REGS
, M16_STORE_REGS
, M16_STORE_REGS
, M16_STORE_REGS
,
538 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
539 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
540 M16_REGS
, M16_STORE_REGS
, LEA_REGS
, LEA_REGS
,
541 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
542 T_REG
, PIC_FN_ADDR_REG
, LEA_REGS
, LEA_REGS
,
543 LEA_REGS
, M16_SP_REGS
, LEA_REGS
, LEA_REGS
,
545 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
546 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
547 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
548 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
549 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
550 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
551 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
552 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
553 MD0_REG
, MD1_REG
, NO_REGS
, ST_REGS
,
554 ST_REGS
, ST_REGS
, ST_REGS
, ST_REGS
,
555 ST_REGS
, ST_REGS
, ST_REGS
, NO_REGS
,
556 NO_REGS
, FRAME_REGS
, FRAME_REGS
, NO_REGS
,
557 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
558 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
559 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
560 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
561 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
562 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
563 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
564 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
565 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
566 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
567 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
568 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
569 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
570 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
571 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
572 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
573 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
574 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
575 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
576 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
577 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
578 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
579 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
580 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
581 DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
,
582 DSP_ACC_REGS
, DSP_ACC_REGS
, ALL_REGS
, ALL_REGS
,
583 ALL_REGS
, ALL_REGS
, ALL_REGS
, ALL_REGS
586 static tree
mips_handle_interrupt_attr (tree
*, tree
, tree
, int, bool *);
587 static tree
mips_handle_use_shadow_register_set_attr (tree
*, tree
, tree
, int,
590 /* The value of TARGET_ATTRIBUTE_TABLE. */
591 static const struct attribute_spec mips_attribute_table
[] = {
592 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
594 { "long_call", 0, 0, false, true, true, NULL
, false },
595 { "far", 0, 0, false, true, true, NULL
, false },
596 { "near", 0, 0, false, true, true, NULL
, false },
597 /* We would really like to treat "mips16" and "nomips16" as type
598 attributes, but GCC doesn't provide the hooks we need to support
599 the right conversion rules. As declaration attributes, they affect
600 code generation but don't carry other semantics. */
601 { "mips16", 0, 0, true, false, false, NULL
, false },
602 { "nomips16", 0, 0, true, false, false, NULL
, false },
603 { "micromips", 0, 0, true, false, false, NULL
, false },
604 { "nomicromips", 0, 0, true, false, false, NULL
, false },
605 { "nocompression", 0, 0, true, false, false, NULL
, false },
606 /* Allow functions to be specified as interrupt handlers */
607 { "interrupt", 0, 1, false, true, true, mips_handle_interrupt_attr
,
609 { "use_shadow_register_set", 0, 1, false, true, true,
610 mips_handle_use_shadow_register_set_attr
, false },
611 { "keep_interrupts_masked", 0, 0, false, true, true, NULL
, false },
612 { "use_debug_exception_return", 0, 0, false, true, true, NULL
, false },
613 { NULL
, 0, 0, false, false, false, NULL
, false }
616 /* A table describing all the processors GCC knows about; see
617 mips-cpus.def for details. */
618 static const struct mips_cpu_info mips_cpu_info_table
[] = {
619 #define MIPS_CPU(NAME, CPU, ISA, FLAGS) \
620 { NAME, CPU, ISA, FLAGS },
621 #include "mips-cpus.def"
625 /* Default costs. If these are used for a processor we should look
626 up the actual costs. */
627 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
628 COSTS_N_INSNS (7), /* fp_mult_sf */ \
629 COSTS_N_INSNS (8), /* fp_mult_df */ \
630 COSTS_N_INSNS (23), /* fp_div_sf */ \
631 COSTS_N_INSNS (36), /* fp_div_df */ \
632 COSTS_N_INSNS (10), /* int_mult_si */ \
633 COSTS_N_INSNS (10), /* int_mult_di */ \
634 COSTS_N_INSNS (69), /* int_div_si */ \
635 COSTS_N_INSNS (69), /* int_div_di */ \
636 2, /* branch_cost */ \
637 4 /* memory_latency */
639 /* Floating-point costs for processors without an FPU. Just assume that
640 all floating-point libcalls are very expensive. */
641 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
642 COSTS_N_INSNS (256), /* fp_mult_sf */ \
643 COSTS_N_INSNS (256), /* fp_mult_df */ \
644 COSTS_N_INSNS (256), /* fp_div_sf */ \
645 COSTS_N_INSNS (256) /* fp_div_df */
647 /* Costs to use when optimizing for size. */
648 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size
= {
649 COSTS_N_INSNS (1), /* fp_add */
650 COSTS_N_INSNS (1), /* fp_mult_sf */
651 COSTS_N_INSNS (1), /* fp_mult_df */
652 COSTS_N_INSNS (1), /* fp_div_sf */
653 COSTS_N_INSNS (1), /* fp_div_df */
654 COSTS_N_INSNS (1), /* int_mult_si */
655 COSTS_N_INSNS (1), /* int_mult_di */
656 COSTS_N_INSNS (1), /* int_div_si */
657 COSTS_N_INSNS (1), /* int_div_di */
659 4 /* memory_latency */
662 /* Costs to use when optimizing for speed, indexed by processor. */
663 static const struct mips_rtx_cost_data
664 mips_rtx_cost_data
[NUM_PROCESSOR_VALUES
] = {
666 COSTS_N_INSNS (2), /* fp_add */
667 COSTS_N_INSNS (4), /* fp_mult_sf */
668 COSTS_N_INSNS (5), /* fp_mult_df */
669 COSTS_N_INSNS (12), /* fp_div_sf */
670 COSTS_N_INSNS (19), /* fp_div_df */
671 COSTS_N_INSNS (12), /* int_mult_si */
672 COSTS_N_INSNS (12), /* int_mult_di */
673 COSTS_N_INSNS (35), /* int_div_si */
674 COSTS_N_INSNS (35), /* int_div_di */
676 4 /* memory_latency */
680 COSTS_N_INSNS (6), /* int_mult_si */
681 COSTS_N_INSNS (6), /* int_mult_di */
682 COSTS_N_INSNS (36), /* int_div_si */
683 COSTS_N_INSNS (36), /* int_div_di */
685 4 /* memory_latency */
689 COSTS_N_INSNS (36), /* int_mult_si */
690 COSTS_N_INSNS (36), /* int_mult_di */
691 COSTS_N_INSNS (37), /* int_div_si */
692 COSTS_N_INSNS (37), /* int_div_di */
694 4 /* memory_latency */
698 COSTS_N_INSNS (4), /* int_mult_si */
699 COSTS_N_INSNS (11), /* int_mult_di */
700 COSTS_N_INSNS (36), /* int_div_si */
701 COSTS_N_INSNS (68), /* int_div_di */
703 4 /* memory_latency */
706 COSTS_N_INSNS (4), /* fp_add */
707 COSTS_N_INSNS (4), /* fp_mult_sf */
708 COSTS_N_INSNS (5), /* fp_mult_df */
709 COSTS_N_INSNS (17), /* fp_div_sf */
710 COSTS_N_INSNS (32), /* fp_div_df */
711 COSTS_N_INSNS (4), /* int_mult_si */
712 COSTS_N_INSNS (11), /* int_mult_di */
713 COSTS_N_INSNS (36), /* int_div_si */
714 COSTS_N_INSNS (68), /* int_div_di */
716 4 /* memory_latency */
719 COSTS_N_INSNS (4), /* fp_add */
720 COSTS_N_INSNS (4), /* fp_mult_sf */
721 COSTS_N_INSNS (5), /* fp_mult_df */
722 COSTS_N_INSNS (17), /* fp_div_sf */
723 COSTS_N_INSNS (32), /* fp_div_df */
724 COSTS_N_INSNS (4), /* int_mult_si */
725 COSTS_N_INSNS (7), /* int_mult_di */
726 COSTS_N_INSNS (42), /* int_div_si */
727 COSTS_N_INSNS (72), /* int_div_di */
729 4 /* memory_latency */
733 COSTS_N_INSNS (5), /* int_mult_si */
734 COSTS_N_INSNS (5), /* int_mult_di */
735 COSTS_N_INSNS (41), /* int_div_si */
736 COSTS_N_INSNS (41), /* int_div_di */
738 4 /* memory_latency */
741 COSTS_N_INSNS (8), /* fp_add */
742 COSTS_N_INSNS (8), /* fp_mult_sf */
743 COSTS_N_INSNS (10), /* fp_mult_df */
744 COSTS_N_INSNS (34), /* fp_div_sf */
745 COSTS_N_INSNS (64), /* fp_div_df */
746 COSTS_N_INSNS (5), /* int_mult_si */
747 COSTS_N_INSNS (5), /* int_mult_di */
748 COSTS_N_INSNS (41), /* int_div_si */
749 COSTS_N_INSNS (41), /* int_div_di */
751 4 /* memory_latency */
754 COSTS_N_INSNS (4), /* fp_add */
755 COSTS_N_INSNS (4), /* fp_mult_sf */
756 COSTS_N_INSNS (5), /* fp_mult_df */
757 COSTS_N_INSNS (17), /* fp_div_sf */
758 COSTS_N_INSNS (32), /* fp_div_df */
759 COSTS_N_INSNS (5), /* int_mult_si */
760 COSTS_N_INSNS (5), /* int_mult_di */
761 COSTS_N_INSNS (41), /* int_div_si */
762 COSTS_N_INSNS (41), /* int_div_di */
764 4 /* memory_latency */
768 COSTS_N_INSNS (5), /* int_mult_si */
769 COSTS_N_INSNS (5), /* int_mult_di */
770 COSTS_N_INSNS (41), /* int_div_si */
771 COSTS_N_INSNS (41), /* int_div_di */
773 4 /* memory_latency */
776 COSTS_N_INSNS (8), /* fp_add */
777 COSTS_N_INSNS (8), /* fp_mult_sf */
778 COSTS_N_INSNS (10), /* fp_mult_df */
779 COSTS_N_INSNS (34), /* fp_div_sf */
780 COSTS_N_INSNS (64), /* fp_div_df */
781 COSTS_N_INSNS (5), /* int_mult_si */
782 COSTS_N_INSNS (5), /* int_mult_di */
783 COSTS_N_INSNS (41), /* int_div_si */
784 COSTS_N_INSNS (41), /* int_div_di */
786 4 /* memory_latency */
789 COSTS_N_INSNS (4), /* fp_add */
790 COSTS_N_INSNS (4), /* fp_mult_sf */
791 COSTS_N_INSNS (5), /* fp_mult_df */
792 COSTS_N_INSNS (17), /* fp_div_sf */
793 COSTS_N_INSNS (32), /* fp_div_df */
794 COSTS_N_INSNS (5), /* int_mult_si */
795 COSTS_N_INSNS (5), /* int_mult_di */
796 COSTS_N_INSNS (41), /* int_div_si */
797 COSTS_N_INSNS (41), /* int_div_di */
799 4 /* memory_latency */
802 COSTS_N_INSNS (6), /* fp_add */
803 COSTS_N_INSNS (6), /* fp_mult_sf */
804 COSTS_N_INSNS (7), /* fp_mult_df */
805 COSTS_N_INSNS (25), /* fp_div_sf */
806 COSTS_N_INSNS (48), /* fp_div_df */
807 COSTS_N_INSNS (5), /* int_mult_si */
808 COSTS_N_INSNS (5), /* int_mult_di */
809 COSTS_N_INSNS (41), /* int_div_si */
810 COSTS_N_INSNS (41), /* int_div_di */
812 4 /* memory_latency */
829 COSTS_N_INSNS (5), /* int_mult_si */
830 COSTS_N_INSNS (5), /* int_mult_di */
831 COSTS_N_INSNS (72), /* int_div_si */
832 COSTS_N_INSNS (72), /* int_div_di */
834 4 /* memory_latency */
839 COSTS_N_INSNS (6), /* int_mult_si */
840 COSTS_N_INSNS (6), /* int_mult_di */
841 COSTS_N_INSNS (18), /* int_div_si */
842 COSTS_N_INSNS (35), /* int_div_di */
844 4 /* memory_latency */
848 COSTS_N_INSNS (6), /* fp_add */
849 COSTS_N_INSNS (6), /* fp_mult_sf */
850 COSTS_N_INSNS (7), /* fp_mult_df */
851 COSTS_N_INSNS (25), /* fp_div_sf */
852 COSTS_N_INSNS (48), /* fp_div_df */
853 COSTS_N_INSNS (6), /* int_mult_si */
854 COSTS_N_INSNS (6), /* int_mult_di */
855 COSTS_N_INSNS (18), /* int_div_si */
856 COSTS_N_INSNS (35), /* int_div_di */
858 4 /* memory_latency */
861 COSTS_N_INSNS (2), /* fp_add */
862 COSTS_N_INSNS (4), /* fp_mult_sf */
863 COSTS_N_INSNS (5), /* fp_mult_df */
864 COSTS_N_INSNS (12), /* fp_div_sf */
865 COSTS_N_INSNS (19), /* fp_div_df */
866 COSTS_N_INSNS (2), /* int_mult_si */
867 COSTS_N_INSNS (2), /* int_mult_di */
868 COSTS_N_INSNS (35), /* int_div_si */
869 COSTS_N_INSNS (35), /* int_div_di */
871 4 /* memory_latency */
874 COSTS_N_INSNS (3), /* fp_add */
875 COSTS_N_INSNS (5), /* fp_mult_sf */
876 COSTS_N_INSNS (6), /* fp_mult_df */
877 COSTS_N_INSNS (15), /* fp_div_sf */
878 COSTS_N_INSNS (16), /* fp_div_df */
879 COSTS_N_INSNS (17), /* int_mult_si */
880 COSTS_N_INSNS (17), /* int_mult_di */
881 COSTS_N_INSNS (38), /* int_div_si */
882 COSTS_N_INSNS (38), /* int_div_di */
884 6 /* memory_latency */
887 COSTS_N_INSNS (6), /* fp_add */
888 COSTS_N_INSNS (7), /* fp_mult_sf */
889 COSTS_N_INSNS (8), /* fp_mult_df */
890 COSTS_N_INSNS (23), /* fp_div_sf */
891 COSTS_N_INSNS (36), /* fp_div_df */
892 COSTS_N_INSNS (10), /* int_mult_si */
893 COSTS_N_INSNS (10), /* int_mult_di */
894 COSTS_N_INSNS (69), /* int_div_si */
895 COSTS_N_INSNS (69), /* int_div_di */
897 6 /* memory_latency */
909 /* The only costs that appear to be updated here are
910 integer multiplication. */
912 COSTS_N_INSNS (4), /* int_mult_si */
913 COSTS_N_INSNS (6), /* int_mult_di */
914 COSTS_N_INSNS (69), /* int_div_si */
915 COSTS_N_INSNS (69), /* int_div_di */
917 4 /* memory_latency */
932 COSTS_N_INSNS (6), /* fp_add */
933 COSTS_N_INSNS (4), /* fp_mult_sf */
934 COSTS_N_INSNS (5), /* fp_mult_df */
935 COSTS_N_INSNS (23), /* fp_div_sf */
936 COSTS_N_INSNS (36), /* fp_div_df */
937 COSTS_N_INSNS (5), /* int_mult_si */
938 COSTS_N_INSNS (5), /* int_mult_di */
939 COSTS_N_INSNS (36), /* int_div_si */
940 COSTS_N_INSNS (36), /* int_div_di */
942 4 /* memory_latency */
945 COSTS_N_INSNS (6), /* fp_add */
946 COSTS_N_INSNS (5), /* fp_mult_sf */
947 COSTS_N_INSNS (6), /* fp_mult_df */
948 COSTS_N_INSNS (30), /* fp_div_sf */
949 COSTS_N_INSNS (59), /* fp_div_df */
950 COSTS_N_INSNS (3), /* int_mult_si */
951 COSTS_N_INSNS (4), /* int_mult_di */
952 COSTS_N_INSNS (42), /* int_div_si */
953 COSTS_N_INSNS (74), /* int_div_di */
955 4 /* memory_latency */
958 COSTS_N_INSNS (6), /* fp_add */
959 COSTS_N_INSNS (5), /* fp_mult_sf */
960 COSTS_N_INSNS (6), /* fp_mult_df */
961 COSTS_N_INSNS (30), /* fp_div_sf */
962 COSTS_N_INSNS (59), /* fp_div_df */
963 COSTS_N_INSNS (5), /* int_mult_si */
964 COSTS_N_INSNS (9), /* int_mult_di */
965 COSTS_N_INSNS (42), /* int_div_si */
966 COSTS_N_INSNS (74), /* int_div_di */
968 4 /* memory_latency */
971 COSTS_N_INSNS (4), /* fp_add */
972 COSTS_N_INSNS (4), /* fp_mult_sf */
973 COSTS_N_INSNS (256), /* fp_mult_df */
974 COSTS_N_INSNS (8), /* fp_div_sf */
975 COSTS_N_INSNS (256), /* fp_div_df */
976 COSTS_N_INSNS (4), /* int_mult_si */
977 COSTS_N_INSNS (256), /* int_mult_di */
978 COSTS_N_INSNS (37), /* int_div_si */
979 COSTS_N_INSNS (256), /* int_div_di */
981 4 /* memory_latency */
984 /* The only costs that are changed here are
985 integer multiplication. */
986 COSTS_N_INSNS (6), /* fp_add */
987 COSTS_N_INSNS (7), /* fp_mult_sf */
988 COSTS_N_INSNS (8), /* fp_mult_df */
989 COSTS_N_INSNS (23), /* fp_div_sf */
990 COSTS_N_INSNS (36), /* fp_div_df */
991 COSTS_N_INSNS (5), /* int_mult_si */
992 COSTS_N_INSNS (9), /* int_mult_di */
993 COSTS_N_INSNS (69), /* int_div_si */
994 COSTS_N_INSNS (69), /* int_div_di */
996 4 /* memory_latency */
1002 /* The only costs that are changed here are
1003 integer multiplication. */
1004 COSTS_N_INSNS (6), /* fp_add */
1005 COSTS_N_INSNS (7), /* fp_mult_sf */
1006 COSTS_N_INSNS (8), /* fp_mult_df */
1007 COSTS_N_INSNS (23), /* fp_div_sf */
1008 COSTS_N_INSNS (36), /* fp_div_df */
1009 COSTS_N_INSNS (3), /* int_mult_si */
1010 COSTS_N_INSNS (8), /* int_mult_di */
1011 COSTS_N_INSNS (69), /* int_div_si */
1012 COSTS_N_INSNS (69), /* int_div_di */
1013 1, /* branch_cost */
1014 4 /* memory_latency */
1017 COSTS_N_INSNS (2), /* fp_add */
1018 COSTS_N_INSNS (2), /* fp_mult_sf */
1019 COSTS_N_INSNS (2), /* fp_mult_df */
1020 COSTS_N_INSNS (12), /* fp_div_sf */
1021 COSTS_N_INSNS (19), /* fp_div_df */
1022 COSTS_N_INSNS (5), /* int_mult_si */
1023 COSTS_N_INSNS (9), /* int_mult_di */
1024 COSTS_N_INSNS (34), /* int_div_si */
1025 COSTS_N_INSNS (66), /* int_div_di */
1026 1, /* branch_cost */
1027 4 /* memory_latency */
1030 /* These costs are the same as the SB-1A below. */
1031 COSTS_N_INSNS (4), /* fp_add */
1032 COSTS_N_INSNS (4), /* fp_mult_sf */
1033 COSTS_N_INSNS (4), /* fp_mult_df */
1034 COSTS_N_INSNS (24), /* fp_div_sf */
1035 COSTS_N_INSNS (32), /* fp_div_df */
1036 COSTS_N_INSNS (3), /* int_mult_si */
1037 COSTS_N_INSNS (4), /* int_mult_di */
1038 COSTS_N_INSNS (36), /* int_div_si */
1039 COSTS_N_INSNS (68), /* int_div_di */
1040 1, /* branch_cost */
1041 4 /* memory_latency */
1044 /* These costs are the same as the SB-1 above. */
1045 COSTS_N_INSNS (4), /* fp_add */
1046 COSTS_N_INSNS (4), /* fp_mult_sf */
1047 COSTS_N_INSNS (4), /* fp_mult_df */
1048 COSTS_N_INSNS (24), /* fp_div_sf */
1049 COSTS_N_INSNS (32), /* fp_div_df */
1050 COSTS_N_INSNS (3), /* int_mult_si */
1051 COSTS_N_INSNS (4), /* int_mult_di */
1052 COSTS_N_INSNS (36), /* int_div_si */
1053 COSTS_N_INSNS (68), /* int_div_di */
1054 1, /* branch_cost */
1055 4 /* memory_latency */
1062 COSTS_N_INSNS (8), /* int_mult_si */
1063 COSTS_N_INSNS (8), /* int_mult_di */
1064 COSTS_N_INSNS (72), /* int_div_si */
1065 COSTS_N_INSNS (72), /* int_div_di */
1066 1, /* branch_cost */
1067 4 /* memory_latency */
1070 /* These costs are the same as 5KF above. */
1071 COSTS_N_INSNS (4), /* fp_add */
1072 COSTS_N_INSNS (4), /* fp_mult_sf */
1073 COSTS_N_INSNS (5), /* fp_mult_df */
1074 COSTS_N_INSNS (17), /* fp_div_sf */
1075 COSTS_N_INSNS (32), /* fp_div_df */
1076 COSTS_N_INSNS (4), /* int_mult_si */
1077 COSTS_N_INSNS (11), /* int_mult_di */
1078 COSTS_N_INSNS (36), /* int_div_si */
1079 COSTS_N_INSNS (68), /* int_div_di */
1080 1, /* branch_cost */
1081 4 /* memory_latency */
1084 COSTS_N_INSNS (4), /* fp_add */
1085 COSTS_N_INSNS (5), /* fp_mult_sf */
1086 COSTS_N_INSNS (5), /* fp_mult_df */
1087 COSTS_N_INSNS (17), /* fp_div_sf */
1088 COSTS_N_INSNS (17), /* fp_div_df */
1089 COSTS_N_INSNS (5), /* int_mult_si */
1090 COSTS_N_INSNS (5), /* int_mult_di */
1091 COSTS_N_INSNS (8), /* int_div_si */
1092 COSTS_N_INSNS (8), /* int_div_di */
1093 2, /* branch_cost */
1094 4 /* memory_latency */
1097 COSTS_N_INSNS (4), /* fp_add */
1098 COSTS_N_INSNS (4), /* fp_mult_sf */
1099 COSTS_N_INSNS (5), /* fp_mult_df */
1100 COSTS_N_INSNS (17), /* fp_div_sf */
1101 COSTS_N_INSNS (32), /* fp_div_df */
1102 COSTS_N_INSNS (5), /* int_mult_si */
1103 COSTS_N_INSNS (5), /* int_mult_di */
1104 COSTS_N_INSNS (34), /* int_div_si */
1105 COSTS_N_INSNS (68), /* int_div_di */
1106 1, /* branch_cost */
1107 4 /* memory_latency */
1110 COSTS_N_INSNS (4), /* fp_add */
1111 COSTS_N_INSNS (5), /* fp_mult_sf */
1112 COSTS_N_INSNS (5), /* fp_mult_df */
1113 COSTS_N_INSNS (32), /* fp_div_sf */
1114 COSTS_N_INSNS (32), /* fp_div_df */
1115 COSTS_N_INSNS (5), /* int_mult_si */
1116 COSTS_N_INSNS (5), /* int_mult_di */
1117 COSTS_N_INSNS (36), /* int_div_si */
1118 COSTS_N_INSNS (36), /* int_div_di */
1119 2, /* branch_cost */
1120 4 /* memory_latency */
1124 static rtx
mips_find_pic_call_symbol (rtx_insn
*, rtx
, bool);
1125 static int mips_register_move_cost (machine_mode
, reg_class_t
,
1127 static unsigned int mips_function_arg_boundary (machine_mode
, const_tree
);
1128 static machine_mode
mips_get_reg_raw_mode (int regno
);
1130 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1131 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1132 static GTY (()) hash_map
<nofree_string_hash
, bool> *mflip_mips16_htab
;
1134 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1135 mode, false if it should next add an attribute for the opposite mode. */
1136 static GTY(()) bool mips16_flipper
;
1138 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1139 for -mflip-mips16. Return true if it should use "mips16" and false if
1140 it should use "nomips16". */
1143 mflip_mips16_use_mips16_p (tree decl
)
1146 bool base_is_mips16
= (mips_base_compression_flags
& MASK_MIPS16
) != 0;
1148 /* Use the opposite of the command-line setting for anonymous decls. */
1149 if (!DECL_NAME (decl
))
1150 return !base_is_mips16
;
1152 if (!mflip_mips16_htab
)
1153 mflip_mips16_htab
= hash_map
<nofree_string_hash
, bool>::create_ggc (37);
1155 name
= IDENTIFIER_POINTER (DECL_NAME (decl
));
1158 bool *slot
= &mflip_mips16_htab
->get_or_insert (name
, &existed
);
1161 mips16_flipper
= !mips16_flipper
;
1162 *slot
= mips16_flipper
? !base_is_mips16
: base_is_mips16
;
1167 /* Predicates to test for presence of "near" and "far"/"long_call"
1168 attributes on the given TYPE. */
1171 mips_near_type_p (const_tree type
)
1173 return lookup_attribute ("near", TYPE_ATTRIBUTES (type
)) != NULL
;
1177 mips_far_type_p (const_tree type
)
1179 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type
)) != NULL
1180 || lookup_attribute ("far", TYPE_ATTRIBUTES (type
)) != NULL
);
1184 /* Check if the interrupt attribute is set for a function. */
1187 mips_interrupt_type_p (tree type
)
1189 return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type
)) != NULL
;
1192 /* Return the mask for the "interrupt" attribute. */
1194 static enum mips_int_mask
1195 mips_interrupt_mask (tree type
)
1197 tree attr
= lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type
));
1201 /* For missing attributes or no arguments then return 'eic' as a safe
1204 return INT_MASK_EIC
;
1206 args
= TREE_VALUE (attr
);
1209 return INT_MASK_EIC
;
1211 cst
= TREE_VALUE (args
);
1213 if (strcmp (TREE_STRING_POINTER (cst
), "eic") == 0)
1214 return INT_MASK_EIC
;
1216 /* The validation code in mips_handle_interrupt_attr guarantees that the
1217 argument is now in the form:
1218 vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5). */
1219 str
= TREE_STRING_POINTER (cst
);
1221 gcc_assert (strlen (str
) == strlen ("vector=sw0"));
1224 return (enum mips_int_mask
) (INT_MASK_SW0
+ (str
[9] - '0'));
1226 return (enum mips_int_mask
) (INT_MASK_HW0
+ (str
[9] - '0'));
1229 /* Return the mips_shadow_set if the "use_shadow_register_set" attribute is
1230 set for a function. */
1232 static enum mips_shadow_set
1233 mips_use_shadow_register_set (tree type
)
1235 tree attr
= lookup_attribute ("use_shadow_register_set",
1236 TYPE_ATTRIBUTES (type
));
1239 /* The validation code in mips_handle_use_shadow_register_set_attr guarantees
1240 that if an argument is present then it means: Assume the shadow register
1241 set has a valid stack pointer in it. */
1243 return SHADOW_SET_NO
;
1245 args
= TREE_VALUE (attr
);
1248 return SHADOW_SET_YES
;
1250 return SHADOW_SET_INTSTACK
;
1253 /* Check if the attribute to keep interrupts masked is set for a function. */
1256 mips_keep_interrupts_masked_p (tree type
)
1258 return lookup_attribute ("keep_interrupts_masked",
1259 TYPE_ATTRIBUTES (type
)) != NULL
;
1262 /* Check if the attribute to use debug exception return is set for
1266 mips_use_debug_exception_return_p (tree type
)
1268 return lookup_attribute ("use_debug_exception_return",
1269 TYPE_ATTRIBUTES (type
)) != NULL
;
1272 /* Return the set of compression modes that are explicitly required
1273 by the attributes in ATTRIBUTES. */
1276 mips_get_compress_on_flags (tree attributes
)
1278 unsigned int flags
= 0;
1280 if (lookup_attribute ("mips16", attributes
) != NULL
)
1281 flags
|= MASK_MIPS16
;
1283 if (lookup_attribute ("micromips", attributes
) != NULL
)
1284 flags
|= MASK_MICROMIPS
;
1289 /* Return the set of compression modes that are explicitly forbidden
1290 by the attributes in ATTRIBUTES. */
1293 mips_get_compress_off_flags (tree attributes
)
1295 unsigned int flags
= 0;
1297 if (lookup_attribute ("nocompression", attributes
) != NULL
)
1298 flags
|= MASK_MIPS16
| MASK_MICROMIPS
;
1300 if (lookup_attribute ("nomips16", attributes
) != NULL
)
1301 flags
|= MASK_MIPS16
;
1303 if (lookup_attribute ("nomicromips", attributes
) != NULL
)
1304 flags
|= MASK_MICROMIPS
;
1309 /* Return the compression mode that should be used for function DECL.
1310 Return the ambient setting if DECL is null. */
1313 mips_get_compress_mode (tree decl
)
1315 unsigned int flags
, force_on
;
1317 flags
= mips_base_compression_flags
;
1320 /* Nested functions must use the same frame pointer as their
1321 parent and must therefore use the same ISA mode. */
1322 tree parent
= decl_function_context (decl
);
1325 force_on
= mips_get_compress_on_flags (DECL_ATTRIBUTES (decl
));
1328 flags
&= ~mips_get_compress_off_flags (DECL_ATTRIBUTES (decl
));
1333 /* Return the attribute name associated with MASK_MIPS16 and MASK_MICROMIPS
1337 mips_get_compress_on_name (unsigned int flags
)
1339 if (flags
== MASK_MIPS16
)
1344 /* Return the attribute name that forbids MASK_MIPS16 and MASK_MICROMIPS
1348 mips_get_compress_off_name (unsigned int flags
)
1350 if (flags
== MASK_MIPS16
)
1352 if (flags
== MASK_MICROMIPS
)
1353 return "nomicromips";
1354 return "nocompression";
1357 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1360 mips_comp_type_attributes (const_tree type1
, const_tree type2
)
1362 /* Disallow mixed near/far attributes. */
1363 if (mips_far_type_p (type1
) && mips_near_type_p (type2
))
1365 if (mips_near_type_p (type1
) && mips_far_type_p (type2
))
1370 /* Implement TARGET_INSERT_ATTRIBUTES. */
1373 mips_insert_attributes (tree decl
, tree
*attributes
)
1376 unsigned int compression_flags
, nocompression_flags
;
1378 /* Check for "mips16" and "nomips16" attributes. */
1379 compression_flags
= mips_get_compress_on_flags (*attributes
);
1380 nocompression_flags
= mips_get_compress_off_flags (*attributes
);
1382 if (TREE_CODE (decl
) != FUNCTION_DECL
)
1384 if (nocompression_flags
)
1385 error ("%qs attribute only applies to functions",
1386 mips_get_compress_off_name (nocompression_flags
));
1388 if (compression_flags
)
1389 error ("%qs attribute only applies to functions",
1390 mips_get_compress_on_name (nocompression_flags
));
1394 compression_flags
|= mips_get_compress_on_flags (DECL_ATTRIBUTES (decl
));
1395 nocompression_flags
|=
1396 mips_get_compress_off_flags (DECL_ATTRIBUTES (decl
));
1398 if (compression_flags
&& nocompression_flags
)
1399 error ("%qE cannot have both %qs and %qs attributes",
1400 DECL_NAME (decl
), mips_get_compress_on_name (compression_flags
),
1401 mips_get_compress_off_name (nocompression_flags
));
1403 if (compression_flags
& MASK_MIPS16
1404 && compression_flags
& MASK_MICROMIPS
)
1405 error ("%qE cannot have both %qs and %qs attributes",
1406 DECL_NAME (decl
), "mips16", "micromips");
1408 if (TARGET_FLIP_MIPS16
1409 && !DECL_ARTIFICIAL (decl
)
1410 && compression_flags
== 0
1411 && nocompression_flags
== 0)
1413 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1414 "mips16" attribute, arbitrarily pick one. We must pick the same
1415 setting for duplicate declarations of a function. */
1416 name
= mflip_mips16_use_mips16_p (decl
) ? "mips16" : "nomips16";
1417 *attributes
= tree_cons (get_identifier (name
), NULL
, *attributes
);
1418 name
= "nomicromips";
1419 *attributes
= tree_cons (get_identifier (name
), NULL
, *attributes
);
1424 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1427 mips_merge_decl_attributes (tree olddecl
, tree newdecl
)
1431 diff
= (mips_get_compress_on_flags (DECL_ATTRIBUTES (olddecl
))
1432 ^ mips_get_compress_on_flags (DECL_ATTRIBUTES (newdecl
)));
1434 error ("%qE redeclared with conflicting %qs attributes",
1435 DECL_NAME (newdecl
), mips_get_compress_on_name (diff
));
1437 diff
= (mips_get_compress_off_flags (DECL_ATTRIBUTES (olddecl
))
1438 ^ mips_get_compress_off_flags (DECL_ATTRIBUTES (newdecl
)));
1440 error ("%qE redeclared with conflicting %qs attributes",
1441 DECL_NAME (newdecl
), mips_get_compress_off_name (diff
));
1443 return merge_attributes (DECL_ATTRIBUTES (olddecl
),
1444 DECL_ATTRIBUTES (newdecl
));
1447 /* Implement TARGET_CAN_INLINE_P. */
1450 mips_can_inline_p (tree caller
, tree callee
)
1452 if (mips_get_compress_mode (callee
) != mips_get_compress_mode (caller
))
1454 return default_target_can_inline_p (caller
, callee
);
1457 /* Handle an "interrupt" attribute with an optional argument. */
1460 mips_handle_interrupt_attr (tree
*node ATTRIBUTE_UNUSED
, tree name
, tree args
,
1461 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
1463 /* Check for an argument. */
1464 if (is_attribute_p ("interrupt", name
) && args
!= NULL
)
1468 cst
= TREE_VALUE (args
);
1469 if (TREE_CODE (cst
) != STRING_CST
)
1471 warning (OPT_Wattributes
,
1472 "%qE attribute requires a string argument",
1474 *no_add_attrs
= true;
1476 else if (strcmp (TREE_STRING_POINTER (cst
), "eic") != 0
1477 && strncmp (TREE_STRING_POINTER (cst
), "vector=", 7) != 0)
1479 warning (OPT_Wattributes
,
1480 "argument to %qE attribute is neither eic, nor "
1481 "vector=<line>", name
);
1482 *no_add_attrs
= true;
1484 else if (strncmp (TREE_STRING_POINTER (cst
), "vector=", 7) == 0)
1486 const char *arg
= TREE_STRING_POINTER (cst
) + 7;
1488 /* Acceptable names are: sw0,sw1,hw0,hw1,hw2,hw3,hw4,hw5. */
1489 if (strlen (arg
) != 3
1490 || (arg
[0] != 's' && arg
[0] != 'h')
1492 || (arg
[0] == 's' && arg
[2] != '0' && arg
[2] != '1')
1493 || (arg
[0] == 'h' && (arg
[2] < '0' || arg
[2] > '5')))
1495 warning (OPT_Wattributes
,
1496 "interrupt vector to %qE attribute is not "
1497 "vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5)",
1499 *no_add_attrs
= true;
1509 /* Handle a "use_shadow_register_set" attribute with an optional argument. */
1512 mips_handle_use_shadow_register_set_attr (tree
*node ATTRIBUTE_UNUSED
,
1513 tree name
, tree args
,
1514 int flags ATTRIBUTE_UNUSED
,
1517 /* Check for an argument. */
1518 if (is_attribute_p ("use_shadow_register_set", name
) && args
!= NULL
)
1522 cst
= TREE_VALUE (args
);
1523 if (TREE_CODE (cst
) != STRING_CST
)
1525 warning (OPT_Wattributes
,
1526 "%qE attribute requires a string argument",
1528 *no_add_attrs
= true;
1530 else if (strcmp (TREE_STRING_POINTER (cst
), "intstack") != 0)
1532 warning (OPT_Wattributes
,
1533 "argument to %qE attribute is not intstack", name
);
1534 *no_add_attrs
= true;
1543 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1544 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1547 mips_split_plus (rtx x
, rtx
*base_ptr
, HOST_WIDE_INT
*offset_ptr
)
1549 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
1551 *base_ptr
= XEXP (x
, 0);
1552 *offset_ptr
= INTVAL (XEXP (x
, 1));
1561 static unsigned int mips_build_integer (struct mips_integer_op
*,
1562 unsigned HOST_WIDE_INT
);
1564 /* A subroutine of mips_build_integer, with the same interface.
1565 Assume that the final action in the sequence should be a left shift. */
1568 mips_build_shift (struct mips_integer_op
*codes
, HOST_WIDE_INT value
)
1570 unsigned int i
, shift
;
1572 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1573 since signed numbers are easier to load than unsigned ones. */
1575 while ((value
& 1) == 0)
1576 value
/= 2, shift
++;
1578 i
= mips_build_integer (codes
, value
);
1579 codes
[i
].code
= ASHIFT
;
1580 codes
[i
].value
= shift
;
1584 /* As for mips_build_shift, but assume that the final action will be
1585 an IOR or PLUS operation. */
1588 mips_build_lower (struct mips_integer_op
*codes
, unsigned HOST_WIDE_INT value
)
1590 unsigned HOST_WIDE_INT high
;
1593 high
= value
& ~(unsigned HOST_WIDE_INT
) 0xffff;
1594 if (!LUI_OPERAND (high
) && (value
& 0x18000) == 0x18000)
1596 /* The constant is too complex to load with a simple LUI/ORI pair,
1597 so we want to give the recursive call as many trailing zeros as
1598 possible. In this case, we know bit 16 is set and that the
1599 low 16 bits form a negative number. If we subtract that number
1600 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1601 i
= mips_build_integer (codes
, CONST_HIGH_PART (value
));
1602 codes
[i
].code
= PLUS
;
1603 codes
[i
].value
= CONST_LOW_PART (value
);
1607 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1608 bits gives a value with at least 17 trailing zeros. */
1609 i
= mips_build_integer (codes
, high
);
1610 codes
[i
].code
= IOR
;
1611 codes
[i
].value
= value
& 0xffff;
1616 /* Fill CODES with a sequence of rtl operations to load VALUE.
1617 Return the number of operations needed. */
1620 mips_build_integer (struct mips_integer_op
*codes
,
1621 unsigned HOST_WIDE_INT value
)
1623 if (SMALL_OPERAND (value
)
1624 || SMALL_OPERAND_UNSIGNED (value
)
1625 || LUI_OPERAND (value
))
1627 /* The value can be loaded with a single instruction. */
1628 codes
[0].code
= UNKNOWN
;
1629 codes
[0].value
= value
;
1632 else if ((value
& 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value
)))
1634 /* Either the constant is a simple LUI/ORI combination or its
1635 lowest bit is set. We don't want to shift in this case. */
1636 return mips_build_lower (codes
, value
);
1638 else if ((value
& 0xffff) == 0)
1640 /* The constant will need at least three actions. The lowest
1641 16 bits are clear, so the final action will be a shift. */
1642 return mips_build_shift (codes
, value
);
1646 /* The final action could be a shift, add or inclusive OR.
1647 Rather than use a complex condition to select the best
1648 approach, try both mips_build_shift and mips_build_lower
1649 and pick the one that gives the shortest sequence.
1650 Note that this case is only used once per constant. */
1651 struct mips_integer_op alt_codes
[MIPS_MAX_INTEGER_OPS
];
1652 unsigned int cost
, alt_cost
;
1654 cost
= mips_build_shift (codes
, value
);
1655 alt_cost
= mips_build_lower (alt_codes
, value
);
1656 if (alt_cost
< cost
)
1658 memcpy (codes
, alt_codes
, alt_cost
* sizeof (codes
[0]));
1665 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
1668 mips_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1670 return mips_const_insns (x
) > 0;
1673 /* Return a SYMBOL_REF for a MIPS16 function called NAME. */
1676 mips16_stub_function (const char *name
)
1680 x
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
1681 SYMBOL_REF_FLAGS (x
) |= (SYMBOL_FLAG_EXTERNAL
| SYMBOL_FLAG_FUNCTION
);
1685 /* Return a legitimate call address for STUB, given that STUB is a MIPS16
1686 support function. */
1689 mips16_stub_call_address (mips_one_only_stub
*stub
)
1691 rtx fn
= mips16_stub_function (stub
->get_name ());
1692 SYMBOL_REF_FLAGS (fn
) |= SYMBOL_FLAG_LOCAL
;
1693 if (!call_insn_operand (fn
, VOIDmode
))
1694 fn
= force_reg (Pmode
, fn
);
1698 /* A stub for moving the thread pointer into TLS_GET_TP_REGNUM. */
1700 class mips16_rdhwr_one_only_stub
: public mips_one_only_stub
1702 virtual const char *get_name ();
1703 virtual void output_body ();
1707 mips16_rdhwr_one_only_stub::get_name ()
1709 return "__mips16_rdhwr";
1713 mips16_rdhwr_one_only_stub::output_body ()
1715 fprintf (asm_out_file
,
1717 "\t.set\tmips32r2\n"
1718 "\t.set\tnoreorder\n"
1724 /* A stub for moving the FCSR into GET_FCSR_REGNUM. */
1725 class mips16_get_fcsr_one_only_stub
: public mips_one_only_stub
1727 virtual const char *get_name ();
1728 virtual void output_body ();
1732 mips16_get_fcsr_one_only_stub::get_name ()
1734 return "__mips16_get_fcsr";
1738 mips16_get_fcsr_one_only_stub::output_body ()
1740 fprintf (asm_out_file
,
1742 "\tj\t$31\n", reg_names
[GET_FCSR_REGNUM
]);
1745 /* A stub for moving SET_FCSR_REGNUM into the FCSR. */
1746 class mips16_set_fcsr_one_only_stub
: public mips_one_only_stub
1748 virtual const char *get_name ();
1749 virtual void output_body ();
1753 mips16_set_fcsr_one_only_stub::get_name ()
1755 return "__mips16_set_fcsr";
1759 mips16_set_fcsr_one_only_stub::output_body ()
1761 fprintf (asm_out_file
,
1763 "\tj\t$31\n", reg_names
[SET_FCSR_REGNUM
]);
1766 /* Return true if symbols of type TYPE require a GOT access. */
1769 mips_got_symbol_type_p (enum mips_symbol_type type
)
1773 case SYMBOL_GOT_PAGE_OFST
:
1774 case SYMBOL_GOT_DISP
:
1782 /* Return true if X is a thread-local symbol. */
1785 mips_tls_symbol_p (rtx x
)
1787 return GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0;
1790 /* Return true if SYMBOL_REF X is associated with a global symbol
1791 (in the STB_GLOBAL sense). */
1794 mips_global_symbol_p (const_rtx x
)
1796 const_tree decl
= SYMBOL_REF_DECL (x
);
1799 return !SYMBOL_REF_LOCAL_P (x
) || SYMBOL_REF_EXTERNAL_P (x
);
1801 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1802 or weak symbols. Relocations in the object file will be against
1803 the target symbol, so it's that symbol's binding that matters here. */
1804 return DECL_P (decl
) && (TREE_PUBLIC (decl
) || DECL_WEAK (decl
));
1807 /* Return true if function X is a libgcc MIPS16 stub function. */
1810 mips16_stub_function_p (const_rtx x
)
1812 return (GET_CODE (x
) == SYMBOL_REF
1813 && strncmp (XSTR (x
, 0), "__mips16_", 9) == 0);
1816 /* Return true if function X is a locally-defined and locally-binding
1820 mips16_local_function_p (const_rtx x
)
1822 return (GET_CODE (x
) == SYMBOL_REF
1823 && SYMBOL_REF_LOCAL_P (x
)
1824 && !SYMBOL_REF_EXTERNAL_P (x
)
1825 && (mips_get_compress_mode (SYMBOL_REF_DECL (x
)) & MASK_MIPS16
));
1828 /* Return true if SYMBOL_REF X binds locally. */
1831 mips_symbol_binds_local_p (const_rtx x
)
1833 return (SYMBOL_REF_DECL (x
)
1834 ? targetm
.binds_local_p (SYMBOL_REF_DECL (x
))
1835 : SYMBOL_REF_LOCAL_P (x
));
1838 /* Return true if rtx constants of mode MODE should be put into a small
1842 mips_rtx_constant_in_small_data_p (machine_mode mode
)
1844 return (!TARGET_EMBEDDED_DATA
1845 && TARGET_LOCAL_SDATA
1846 && GET_MODE_SIZE (mode
) <= mips_small_data_threshold
);
1849 /* Return true if X should not be moved directly into register $25.
1850 We need this because many versions of GAS will treat "la $25,foo" as
1851 part of a call sequence and so allow a global "foo" to be lazily bound. */
1854 mips_dangerous_for_la25_p (rtx x
)
1856 return (!TARGET_EXPLICIT_RELOCS
1858 && GET_CODE (x
) == SYMBOL_REF
1859 && mips_global_symbol_p (x
));
1862 /* Return true if calls to X might need $25 to be valid on entry. */
1865 mips_use_pic_fn_addr_reg_p (const_rtx x
)
1867 if (!TARGET_USE_PIC_FN_ADDR_REG
)
1870 /* MIPS16 stub functions are guaranteed not to use $25. */
1871 if (mips16_stub_function_p (x
))
1874 if (GET_CODE (x
) == SYMBOL_REF
)
1876 /* If PLTs and copy relocations are available, the static linker
1877 will make sure that $25 is valid on entry to the target function. */
1878 if (TARGET_ABICALLS_PIC0
)
1881 /* Locally-defined functions use absolute accesses to set up
1882 the global pointer. */
1883 if (TARGET_ABSOLUTE_ABICALLS
1884 && mips_symbol_binds_local_p (x
)
1885 && !SYMBOL_REF_EXTERNAL_P (x
))
1892 /* Return the method that should be used to access SYMBOL_REF or
1893 LABEL_REF X in context CONTEXT. */
1895 static enum mips_symbol_type
1896 mips_classify_symbol (const_rtx x
, enum mips_symbol_context context
)
1899 return SYMBOL_GOT_DISP
;
1901 if (GET_CODE (x
) == LABEL_REF
)
1903 /* Only return SYMBOL_PC_RELATIVE if we are generating MIPS16
1904 code and if we know that the label is in the current function's
1905 text section. LABEL_REFs are used for jump tables as well as
1906 text labels, so we must check whether jump tables live in the
1908 if (TARGET_MIPS16_SHORT_JUMP_TABLES
1909 && !LABEL_REF_NONLOCAL_P (x
))
1910 return SYMBOL_PC_RELATIVE
;
1912 if (TARGET_ABICALLS
&& !TARGET_ABSOLUTE_ABICALLS
)
1913 return SYMBOL_GOT_PAGE_OFST
;
1915 return SYMBOL_ABSOLUTE
;
1918 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
1920 if (SYMBOL_REF_TLS_MODEL (x
))
1923 if (CONSTANT_POOL_ADDRESS_P (x
))
1925 if (TARGET_MIPS16_TEXT_LOADS
)
1926 return SYMBOL_PC_RELATIVE
;
1928 if (TARGET_MIPS16_PCREL_LOADS
&& context
== SYMBOL_CONTEXT_MEM
)
1929 return SYMBOL_PC_RELATIVE
;
1931 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x
)))
1932 return SYMBOL_GP_RELATIVE
;
1935 /* Do not use small-data accesses for weak symbols; they may end up
1937 if (TARGET_GPOPT
&& SYMBOL_REF_SMALL_P (x
) && !SYMBOL_REF_WEAK (x
))
1938 return SYMBOL_GP_RELATIVE
;
1940 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1942 if (TARGET_ABICALLS_PIC2
1943 && !(TARGET_ABSOLUTE_ABICALLS
&& mips_symbol_binds_local_p (x
)))
1945 /* There are three cases to consider:
1947 - o32 PIC (either with or without explicit relocs)
1948 - n32/n64 PIC without explicit relocs
1949 - n32/n64 PIC with explicit relocs
1951 In the first case, both local and global accesses will use an
1952 R_MIPS_GOT16 relocation. We must correctly predict which of
1953 the two semantics (local or global) the assembler and linker
1954 will apply. The choice depends on the symbol's binding rather
1955 than its visibility.
1957 In the second case, the assembler will not use R_MIPS_GOT16
1958 relocations, but it chooses between local and global accesses
1959 in the same way as for o32 PIC.
1961 In the third case we have more freedom since both forms of
1962 access will work for any kind of symbol. However, there seems
1963 little point in doing things differently. */
1964 if (mips_global_symbol_p (x
))
1965 return SYMBOL_GOT_DISP
;
1967 return SYMBOL_GOT_PAGE_OFST
;
1970 return SYMBOL_ABSOLUTE
;
1973 /* Classify the base of symbolic expression X, given that X appears in
1976 static enum mips_symbol_type
1977 mips_classify_symbolic_expression (rtx x
, enum mips_symbol_context context
)
1981 split_const (x
, &x
, &offset
);
1982 if (UNSPEC_ADDRESS_P (x
))
1983 return UNSPEC_ADDRESS_TYPE (x
);
1985 return mips_classify_symbol (x
, context
);
1988 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1989 is the alignment in bytes of SYMBOL_REF X. */
1992 mips_offset_within_alignment_p (rtx x
, HOST_WIDE_INT offset
)
1994 HOST_WIDE_INT align
;
1996 align
= SYMBOL_REF_DECL (x
) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x
)) : 1;
1997 return IN_RANGE (offset
, 0, align
- 1);
2000 /* Return true if X is a symbolic constant that can be used in context
2001 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
2004 mips_symbolic_constant_p (rtx x
, enum mips_symbol_context context
,
2005 enum mips_symbol_type
*symbol_type
)
2009 split_const (x
, &x
, &offset
);
2010 if (UNSPEC_ADDRESS_P (x
))
2012 *symbol_type
= UNSPEC_ADDRESS_TYPE (x
);
2013 x
= UNSPEC_ADDRESS (x
);
2015 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
2017 *symbol_type
= mips_classify_symbol (x
, context
);
2018 if (*symbol_type
== SYMBOL_TLS
)
2024 if (offset
== const0_rtx
)
2027 /* Check whether a nonzero offset is valid for the underlying
2029 switch (*symbol_type
)
2031 case SYMBOL_ABSOLUTE
:
2032 case SYMBOL_64_HIGH
:
2035 /* If the target has 64-bit pointers and the object file only
2036 supports 32-bit symbols, the values of those symbols will be
2037 sign-extended. In this case we can't allow an arbitrary offset
2038 in case the 32-bit value X + OFFSET has a different sign from X. */
2039 if (Pmode
== DImode
&& !ABI_HAS_64BIT_SYMBOLS
)
2040 return offset_within_block_p (x
, INTVAL (offset
));
2042 /* In other cases the relocations can handle any offset. */
2045 case SYMBOL_PC_RELATIVE
:
2046 /* Allow constant pool references to be converted to LABEL+CONSTANT.
2047 In this case, we no longer have access to the underlying constant,
2048 but the original symbol-based access was known to be valid. */
2049 if (GET_CODE (x
) == LABEL_REF
)
2054 case SYMBOL_GP_RELATIVE
:
2055 /* Make sure that the offset refers to something within the
2056 same object block. This should guarantee that the final
2057 PC- or GP-relative offset is within the 16-bit limit. */
2058 return offset_within_block_p (x
, INTVAL (offset
));
2060 case SYMBOL_GOT_PAGE_OFST
:
2061 case SYMBOL_GOTOFF_PAGE
:
2062 /* If the symbol is global, the GOT entry will contain the symbol's
2063 address, and we will apply a 16-bit offset after loading it.
2064 If the symbol is local, the linker should provide enough local
2065 GOT entries for a 16-bit offset, but larger offsets may lead
2067 return SMALL_INT (offset
);
2071 /* There is no carry between the HI and LO REL relocations, so the
2072 offset is only valid if we know it won't lead to such a carry. */
2073 return mips_offset_within_alignment_p (x
, INTVAL (offset
));
2075 case SYMBOL_GOT_DISP
:
2076 case SYMBOL_GOTOFF_DISP
:
2077 case SYMBOL_GOTOFF_CALL
:
2078 case SYMBOL_GOTOFF_LOADGP
:
2081 case SYMBOL_GOTTPREL
:
2089 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
2090 single instruction. We rely on the fact that, in the worst case,
2091 all instructions involved in a MIPS16 address calculation are usually
2095 mips_symbol_insns_1 (enum mips_symbol_type type
, machine_mode mode
)
2097 if (mips_use_pcrel_pool_p
[(int) type
])
2099 if (mode
== MAX_MACHINE_MODE
)
2100 /* LEAs will be converted into constant-pool references by
2102 type
= SYMBOL_PC_RELATIVE
;
2104 /* The constant must be loaded and then dereferenced. */
2110 case SYMBOL_ABSOLUTE
:
2111 /* When using 64-bit symbols, we need 5 preparatory instructions,
2114 lui $at,%highest(symbol)
2115 daddiu $at,$at,%higher(symbol)
2117 daddiu $at,$at,%hi(symbol)
2120 The final address is then $at + %lo(symbol). With 32-bit
2121 symbols we just need a preparatory LUI for normal mode and
2122 a preparatory LI and SLL for MIPS16. */
2123 return ABI_HAS_64BIT_SYMBOLS
? 6 : TARGET_MIPS16
? 3 : 2;
2125 case SYMBOL_GP_RELATIVE
:
2126 /* Treat GP-relative accesses as taking a single instruction on
2127 MIPS16 too; the copy of $gp can often be shared. */
2130 case SYMBOL_PC_RELATIVE
:
2131 /* PC-relative constants can be only be used with ADDIUPC,
2132 DADDIUPC, LWPC and LDPC. */
2133 if (mode
== MAX_MACHINE_MODE
2134 || GET_MODE_SIZE (mode
) == 4
2135 || GET_MODE_SIZE (mode
) == 8)
2138 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
2141 case SYMBOL_GOT_DISP
:
2142 /* The constant will have to be loaded from the GOT before it
2143 is used in an address. */
2144 if (mode
!= MAX_MACHINE_MODE
)
2149 case SYMBOL_GOT_PAGE_OFST
:
2150 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
2151 local/global classification is accurate. The worst cases are:
2153 (1) For local symbols when generating o32 or o64 code. The assembler
2159 ...and the final address will be $at + %lo(symbol).
2161 (2) For global symbols when -mxgot. The assembler will use:
2163 lui $at,%got_hi(symbol)
2166 ...and the final address will be $at + %got_lo(symbol). */
2169 case SYMBOL_GOTOFF_PAGE
:
2170 case SYMBOL_GOTOFF_DISP
:
2171 case SYMBOL_GOTOFF_CALL
:
2172 case SYMBOL_GOTOFF_LOADGP
:
2173 case SYMBOL_64_HIGH
:
2179 case SYMBOL_GOTTPREL
:
2182 /* A 16-bit constant formed by a single relocation, or a 32-bit
2183 constant formed from a high 16-bit relocation and a low 16-bit
2184 relocation. Use mips_split_p to determine which. 32-bit
2185 constants need an "lui; addiu" sequence for normal mode and
2186 an "li; sll; addiu" sequence for MIPS16 mode. */
2187 return !mips_split_p
[type
] ? 1 : TARGET_MIPS16
? 3 : 2;
2190 /* We don't treat a bare TLS symbol as a constant. */
2196 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
2197 to load symbols of type TYPE into a register. Return 0 if the given
2198 type of symbol cannot be used as an immediate operand.
2200 Otherwise, return the number of instructions needed to load or store
2201 values of mode MODE to or from addresses of type TYPE. Return 0 if
2202 the given type of symbol is not valid in addresses.
2204 In both cases, instruction counts are based off BASE_INSN_LENGTH. */
2207 mips_symbol_insns (enum mips_symbol_type type
, machine_mode mode
)
2209 return mips_symbol_insns_1 (type
, mode
) * (TARGET_MIPS16
? 2 : 1);
2212 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2215 mips_cannot_force_const_mem (machine_mode mode
, rtx x
)
2217 enum mips_symbol_type type
;
2220 /* There is no assembler syntax for expressing an address-sized
2222 if (GET_CODE (x
) == HIGH
)
2225 /* As an optimization, reject constants that mips_legitimize_move
2228 Suppose we have a multi-instruction sequence that loads constant C
2229 into register R. If R does not get allocated a hard register, and
2230 R is used in an operand that allows both registers and memory
2231 references, reload will consider forcing C into memory and using
2232 one of the instruction's memory alternatives. Returning false
2233 here will force it to use an input reload instead. */
2234 if (CONST_INT_P (x
) && mips_legitimate_constant_p (mode
, x
))
2237 split_const (x
, &base
, &offset
);
2238 if (mips_symbolic_constant_p (base
, SYMBOL_CONTEXT_LEA
, &type
))
2240 /* See whether we explicitly want these symbols in the pool. */
2241 if (mips_use_pcrel_pool_p
[(int) type
])
2244 /* The same optimization as for CONST_INT. */
2245 if (SMALL_INT (offset
) && mips_symbol_insns (type
, MAX_MACHINE_MODE
) > 0)
2248 /* If MIPS16 constant pools live in the text section, they should
2249 not refer to anything that might need run-time relocation. */
2250 if (TARGET_MIPS16_PCREL_LOADS
&& mips_got_symbol_type_p (type
))
2254 /* TLS symbols must be computed by mips_legitimize_move. */
2255 if (tls_referenced_p (x
))
2261 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
2262 constants when we're using a per-function constant pool. */
2265 mips_use_blocks_for_constant_p (machine_mode mode ATTRIBUTE_UNUSED
,
2266 const_rtx x ATTRIBUTE_UNUSED
)
2268 return !TARGET_MIPS16_PCREL_LOADS
;
2271 /* Return true if register REGNO is a valid base register for mode MODE.
2272 STRICT_P is true if REG_OK_STRICT is in effect. */
2275 mips_regno_mode_ok_for_base_p (int regno
, machine_mode mode
,
2278 if (!HARD_REGISTER_NUM_P (regno
))
2282 regno
= reg_renumber
[regno
];
2285 /* These fake registers will be eliminated to either the stack or
2286 hard frame pointer, both of which are usually valid base registers.
2287 Reload deals with the cases where the eliminated form isn't valid. */
2288 if (regno
== ARG_POINTER_REGNUM
|| regno
== FRAME_POINTER_REGNUM
)
2291 /* In MIPS16 mode, the stack pointer can only address word and doubleword
2292 values, nothing smaller. */
2293 if (TARGET_MIPS16
&& regno
== STACK_POINTER_REGNUM
)
2294 return GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8;
2296 return TARGET_MIPS16
? M16_REG_P (regno
) : GP_REG_P (regno
);
2299 /* Return true if X is a valid base register for mode MODE.
2300 STRICT_P is true if REG_OK_STRICT is in effect. */
2303 mips_valid_base_register_p (rtx x
, machine_mode mode
, bool strict_p
)
2305 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
2309 && mips_regno_mode_ok_for_base_p (REGNO (x
), mode
, strict_p
));
2312 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
2313 can address a value of mode MODE. */
2316 mips_valid_offset_p (rtx x
, machine_mode mode
)
2318 /* Check that X is a signed 16-bit number. */
2319 if (!const_arith_operand (x
, Pmode
))
2322 /* We may need to split multiword moves, so make sure that every word
2324 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2325 && !SMALL_OPERAND (INTVAL (x
) + GET_MODE_SIZE (mode
) - UNITS_PER_WORD
))
2331 /* Return true if a LO_SUM can address a value of mode MODE when the
2332 LO_SUM symbol has type SYMBOL_TYPE. */
2335 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type
, machine_mode mode
)
2337 /* Check that symbols of type SYMBOL_TYPE can be used to access values
2339 if (mips_symbol_insns (symbol_type
, mode
) == 0)
2342 /* Check that there is a known low-part relocation. */
2343 if (mips_lo_relocs
[symbol_type
] == NULL
)
2346 /* We may need to split multiword moves, so make sure that each word
2347 can be accessed without inducing a carry. This is mainly needed
2348 for o64, which has historically only guaranteed 64-bit alignment
2349 for 128-bit types. */
2350 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2351 && GET_MODE_BITSIZE (mode
) > GET_MODE_ALIGNMENT (mode
))
2357 /* Return true if X is a valid address for machine mode MODE. If it is,
2358 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
2362 mips_classify_address (struct mips_address_info
*info
, rtx x
,
2363 machine_mode mode
, bool strict_p
)
2365 switch (GET_CODE (x
))
2369 info
->type
= ADDRESS_REG
;
2371 info
->offset
= const0_rtx
;
2372 return mips_valid_base_register_p (info
->reg
, mode
, strict_p
);
2375 info
->type
= ADDRESS_REG
;
2376 info
->reg
= XEXP (x
, 0);
2377 info
->offset
= XEXP (x
, 1);
2378 return (mips_valid_base_register_p (info
->reg
, mode
, strict_p
)
2379 && mips_valid_offset_p (info
->offset
, mode
));
2382 info
->type
= ADDRESS_LO_SUM
;
2383 info
->reg
= XEXP (x
, 0);
2384 info
->offset
= XEXP (x
, 1);
2385 /* We have to trust the creator of the LO_SUM to do something vaguely
2386 sane. Target-independent code that creates a LO_SUM should also
2387 create and verify the matching HIGH. Target-independent code that
2388 adds an offset to a LO_SUM must prove that the offset will not
2389 induce a carry. Failure to do either of these things would be
2390 a bug, and we are not required to check for it here. The MIPS
2391 backend itself should only create LO_SUMs for valid symbolic
2392 constants, with the high part being either a HIGH or a copy
2395 = mips_classify_symbolic_expression (info
->offset
, SYMBOL_CONTEXT_MEM
);
2396 return (mips_valid_base_register_p (info
->reg
, mode
, strict_p
)
2397 && mips_valid_lo_sum_p (info
->symbol_type
, mode
));
2400 /* Small-integer addresses don't occur very often, but they
2401 are legitimate if $0 is a valid base register. */
2402 info
->type
= ADDRESS_CONST_INT
;
2403 return !TARGET_MIPS16
&& SMALL_INT (x
);
2408 info
->type
= ADDRESS_SYMBOLIC
;
2409 return (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_MEM
,
2411 && mips_symbol_insns (info
->symbol_type
, mode
) > 0
2412 && !mips_split_p
[info
->symbol_type
]);
2419 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
2422 mips_legitimate_address_p (machine_mode mode
, rtx x
, bool strict_p
)
2424 struct mips_address_info addr
;
2426 return mips_classify_address (&addr
, x
, mode
, strict_p
);
2429 /* Return true if X is a legitimate $sp-based address for mode MODE. */
2432 mips_stack_address_p (rtx x
, machine_mode mode
)
2434 struct mips_address_info addr
;
2436 return (mips_classify_address (&addr
, x
, mode
, false)
2437 && addr
.type
== ADDRESS_REG
2438 && addr
.reg
== stack_pointer_rtx
);
2441 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2442 address instruction. Note that such addresses are not considered
2443 legitimate in the TARGET_LEGITIMATE_ADDRESS_P sense, because their use
2444 is so restricted. */
2447 mips_lwxs_address_p (rtx addr
)
2450 && GET_CODE (addr
) == PLUS
2451 && REG_P (XEXP (addr
, 1)))
2453 rtx offset
= XEXP (addr
, 0);
2454 if (GET_CODE (offset
) == MULT
2455 && REG_P (XEXP (offset
, 0))
2456 && CONST_INT_P (XEXP (offset
, 1))
2457 && INTVAL (XEXP (offset
, 1)) == 4)
2463 /* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load
2464 indexed address instruction. Note that such addresses are
2465 not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P
2466 sense, because their use is so restricted. */
2469 mips_lx_address_p (rtx addr
, machine_mode mode
)
2471 if (GET_CODE (addr
) != PLUS
2472 || !REG_P (XEXP (addr
, 0))
2473 || !REG_P (XEXP (addr
, 1)))
2475 if (ISA_HAS_LBX
&& mode
== QImode
)
2477 if (ISA_HAS_LHX
&& mode
== HImode
)
2479 if (ISA_HAS_LWX
&& mode
== SImode
)
2481 if (ISA_HAS_LDX
&& mode
== DImode
)
2486 /* Return true if a value at OFFSET bytes from base register BASE can be
2487 accessed using an unextended MIPS16 instruction. MODE is the mode of
2490 Usually the offset in an unextended instruction is a 5-bit field.
2491 The offset is unsigned and shifted left once for LH and SH, twice
2492 for LW and SW, and so on. An exception is LWSP and SWSP, which have
2493 an 8-bit immediate field that's shifted left twice. */
2496 mips16_unextended_reference_p (machine_mode mode
, rtx base
,
2497 unsigned HOST_WIDE_INT offset
)
2499 if (mode
!= BLKmode
&& offset
% GET_MODE_SIZE (mode
) == 0)
2501 if (GET_MODE_SIZE (mode
) == 4 && base
== stack_pointer_rtx
)
2502 return offset
< 256U * GET_MODE_SIZE (mode
);
2503 return offset
< 32U * GET_MODE_SIZE (mode
);
2508 /* Return the number of instructions needed to load or store a value
2509 of mode MODE at address X, assuming that BASE_INSN_LENGTH is the
2510 length of one instruction. Return 0 if X isn't valid for MODE.
2511 Assume that multiword moves may need to be split into word moves
2512 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2516 mips_address_insns (rtx x
, machine_mode mode
, bool might_split_p
)
2518 struct mips_address_info addr
;
2521 /* BLKmode is used for single unaligned loads and stores and should
2522 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2523 meaningless, so we have to single it out as a special case one way
2525 if (mode
!= BLKmode
&& might_split_p
)
2526 factor
= (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2530 if (mips_classify_address (&addr
, x
, mode
, false))
2535 && !mips16_unextended_reference_p (mode
, addr
.reg
,
2536 UINTVAL (addr
.offset
)))
2540 case ADDRESS_LO_SUM
:
2541 return TARGET_MIPS16
? factor
* 2 : factor
;
2543 case ADDRESS_CONST_INT
:
2546 case ADDRESS_SYMBOLIC
:
2547 return factor
* mips_symbol_insns (addr
.symbol_type
, mode
);
2552 /* Return true if X fits within an unsigned field of BITS bits that is
2553 shifted left SHIFT bits before being used. */
2556 mips_unsigned_immediate_p (unsigned HOST_WIDE_INT x
, int bits
, int shift
= 0)
2558 return (x
& ((1 << shift
) - 1)) == 0 && x
< ((unsigned) 1 << (shift
+ bits
));
2561 /* Return true if X fits within a signed field of BITS bits that is
2562 shifted left SHIFT bits before being used. */
2565 mips_signed_immediate_p (unsigned HOST_WIDE_INT x
, int bits
, int shift
= 0)
2567 x
+= 1 << (bits
+ shift
- 1);
2568 return mips_unsigned_immediate_p (x
, bits
, shift
);
2571 /* Return true if X is legitimate for accessing values of mode MODE,
2572 if it is based on a MIPS16 register, and if the offset satisfies
2573 OFFSET_PREDICATE. */
2576 m16_based_address_p (rtx x
, machine_mode mode
,
2577 insn_operand_predicate_fn offset_predicate
)
2579 struct mips_address_info addr
;
2581 return (mips_classify_address (&addr
, x
, mode
, false)
2582 && addr
.type
== ADDRESS_REG
2583 && M16_REG_P (REGNO (addr
.reg
))
2584 && offset_predicate (addr
.offset
, mode
));
2587 /* Return true if X is a legitimate address that conforms to the requirements
2588 for a microMIPS LWSP or SWSP insn. */
2591 lwsp_swsp_address_p (rtx x
, machine_mode mode
)
2593 struct mips_address_info addr
;
2595 return (mips_classify_address (&addr
, x
, mode
, false)
2596 && addr
.type
== ADDRESS_REG
2597 && REGNO (addr
.reg
) == STACK_POINTER_REGNUM
2598 && uw5_operand (addr
.offset
, mode
));
2601 /* Return true if X is a legitimate address with a 12-bit offset.
2602 MODE is the mode of the value being accessed. */
2605 umips_12bit_offset_address_p (rtx x
, machine_mode mode
)
2607 struct mips_address_info addr
;
2609 return (mips_classify_address (&addr
, x
, mode
, false)
2610 && addr
.type
== ADDRESS_REG
2611 && CONST_INT_P (addr
.offset
)
2612 && UMIPS_12BIT_OFFSET_P (INTVAL (addr
.offset
)));
2615 /* Return true if X is a legitimate address with a 9-bit offset.
2616 MODE is the mode of the value being accessed. */
2619 mips_9bit_offset_address_p (rtx x
, machine_mode mode
)
2621 struct mips_address_info addr
;
2623 return (mips_classify_address (&addr
, x
, mode
, false)
2624 && addr
.type
== ADDRESS_REG
2625 && CONST_INT_P (addr
.offset
)
2626 && MIPS_9BIT_OFFSET_P (INTVAL (addr
.offset
)));
2629 /* Return the number of instructions needed to load constant X,
2630 assuming that BASE_INSN_LENGTH is the length of one instruction.
2631 Return 0 if X isn't a valid constant. */
2634 mips_const_insns (rtx x
)
2636 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
2637 enum mips_symbol_type symbol_type
;
2640 switch (GET_CODE (x
))
2643 if (!mips_symbolic_constant_p (XEXP (x
, 0), SYMBOL_CONTEXT_LEA
,
2645 || !mips_split_p
[symbol_type
])
2648 /* This is simply an LUI for normal mode. It is an extended
2649 LI followed by an extended SLL for MIPS16. */
2650 return TARGET_MIPS16
? 4 : 1;
2654 /* Unsigned 8-bit constants can be loaded using an unextended
2655 LI instruction. Unsigned 16-bit constants can be loaded
2656 using an extended LI. Negative constants must be loaded
2657 using LI and then negated. */
2658 return (IN_RANGE (INTVAL (x
), 0, 255) ? 1
2659 : SMALL_OPERAND_UNSIGNED (INTVAL (x
)) ? 2
2660 : IN_RANGE (-INTVAL (x
), 0, 255) ? 2
2661 : SMALL_OPERAND_UNSIGNED (-INTVAL (x
)) ? 3
2664 return mips_build_integer (codes
, INTVAL (x
));
2668 /* Allow zeros for normal mode, where we can use $0. */
2669 return !TARGET_MIPS16
&& x
== CONST0_RTX (GET_MODE (x
)) ? 1 : 0;
2675 /* See if we can refer to X directly. */
2676 if (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_LEA
, &symbol_type
))
2677 return mips_symbol_insns (symbol_type
, MAX_MACHINE_MODE
);
2679 /* Otherwise try splitting the constant into a base and offset.
2680 If the offset is a 16-bit value, we can load the base address
2681 into a register and then use (D)ADDIU to add in the offset.
2682 If the offset is larger, we can load the base and offset
2683 into separate registers and add them together with (D)ADDU.
2684 However, the latter is only possible before reload; during
2685 and after reload, we must have the option of forcing the
2686 constant into the pool instead. */
2687 split_const (x
, &x
, &offset
);
2690 int n
= mips_const_insns (x
);
2693 if (SMALL_INT (offset
))
2695 else if (!targetm
.cannot_force_const_mem (GET_MODE (x
), x
))
2696 return n
+ 1 + mips_build_integer (codes
, INTVAL (offset
));
2703 return mips_symbol_insns (mips_classify_symbol (x
, SYMBOL_CONTEXT_LEA
),
2711 /* X is a doubleword constant that can be handled by splitting it into
2712 two words and loading each word separately. Return the number of
2713 instructions required to do this, assuming that BASE_INSN_LENGTH
2714 is the length of one instruction. */
2717 mips_split_const_insns (rtx x
)
2719 unsigned int low
, high
;
2721 low
= mips_const_insns (mips_subword (x
, false));
2722 high
= mips_const_insns (mips_subword (x
, true));
2723 gcc_assert (low
> 0 && high
> 0);
2727 /* Return the number of instructions needed to implement INSN,
2728 given that it loads from or stores to MEM. Assume that
2729 BASE_INSN_LENGTH is the length of one instruction. */
2732 mips_load_store_insns (rtx mem
, rtx_insn
*insn
)
2738 gcc_assert (MEM_P (mem
));
2739 mode
= GET_MODE (mem
);
2741 /* Try to prove that INSN does not need to be split. */
2742 might_split_p
= GET_MODE_SIZE (mode
) > UNITS_PER_WORD
;
2745 set
= single_set (insn
);
2746 if (set
&& !mips_split_move_insn_p (SET_DEST (set
), SET_SRC (set
), insn
))
2747 might_split_p
= false;
2750 return mips_address_insns (XEXP (mem
, 0), mode
, might_split_p
);
2753 /* Return the number of instructions needed for an integer division,
2754 assuming that BASE_INSN_LENGTH is the length of one instruction. */
2757 mips_idiv_insns (void)
2762 if (TARGET_CHECK_ZERO_DIV
)
2764 if (GENERATE_DIVIDE_TRAPS
)
2770 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
2775 /* Emit a move from SRC to DEST. Assume that the move expanders can
2776 handle all moves if !can_create_pseudo_p (). The distinction is
2777 important because, unlike emit_move_insn, the move expanders know
2778 how to force Pmode objects into the constant pool even when the
2779 constant pool address is not itself legitimate. */
2782 mips_emit_move (rtx dest
, rtx src
)
2784 return (can_create_pseudo_p ()
2785 ? emit_move_insn (dest
, src
)
2786 : emit_move_insn_1 (dest
, src
));
2789 /* Emit a move from SRC to DEST, splitting compound moves into individual
2790 instructions. SPLIT_TYPE is the type of split to perform. */
2793 mips_emit_move_or_split (rtx dest
, rtx src
, enum mips_split_type split_type
)
2795 if (mips_split_move_p (dest
, src
, split_type
))
2796 mips_split_move (dest
, src
, split_type
);
2798 mips_emit_move (dest
, src
);
2801 /* Emit an instruction of the form (set TARGET (CODE OP0)). */
2804 mips_emit_unary (enum rtx_code code
, rtx target
, rtx op0
)
2806 emit_insn (gen_rtx_SET (target
, gen_rtx_fmt_e (code
, GET_MODE (op0
), op0
)));
2809 /* Compute (CODE OP0) and store the result in a new register of mode MODE.
2810 Return that new register. */
2813 mips_force_unary (machine_mode mode
, enum rtx_code code
, rtx op0
)
2817 reg
= gen_reg_rtx (mode
);
2818 mips_emit_unary (code
, reg
, op0
);
2822 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2825 mips_emit_binary (enum rtx_code code
, rtx target
, rtx op0
, rtx op1
)
2827 emit_insn (gen_rtx_SET (target
, gen_rtx_fmt_ee (code
, GET_MODE (target
),
2831 /* Compute (CODE OP0 OP1) and store the result in a new register
2832 of mode MODE. Return that new register. */
2835 mips_force_binary (machine_mode mode
, enum rtx_code code
, rtx op0
, rtx op1
)
2839 reg
= gen_reg_rtx (mode
);
2840 mips_emit_binary (code
, reg
, op0
, op1
);
2844 /* Copy VALUE to a register and return that register. If new pseudos
2845 are allowed, copy it into a new register, otherwise use DEST. */
2848 mips_force_temporary (rtx dest
, rtx value
)
2850 if (can_create_pseudo_p ())
2851 return force_reg (Pmode
, value
);
2854 mips_emit_move (dest
, value
);
2859 /* Emit a call sequence with call pattern PATTERN and return the call
2860 instruction itself (which is not necessarily the last instruction
2861 emitted). ORIG_ADDR is the original, unlegitimized address,
2862 ADDR is the legitimized form, and LAZY_P is true if the call
2863 address is lazily-bound. */
2866 mips_emit_call_insn (rtx pattern
, rtx orig_addr
, rtx addr
, bool lazy_p
)
2871 insn
= emit_call_insn (pattern
);
2873 if (TARGET_MIPS16
&& mips_use_pic_fn_addr_reg_p (orig_addr
))
2875 /* MIPS16 JALRs only take MIPS16 registers. If the target
2876 function requires $25 to be valid on entry, we must copy it
2877 there separately. The move instruction can be put in the
2878 call's delay slot. */
2879 reg
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
2880 emit_insn_before (gen_move_insn (reg
, addr
), insn
);
2881 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), reg
);
2885 /* Lazy-binding stubs require $gp to be valid on entry. */
2886 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
2890 /* See the comment above load_call<mode> for details. */
2891 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
),
2892 gen_rtx_REG (Pmode
, GOT_VERSION_REGNUM
));
2893 emit_insn (gen_update_got_version ());
2897 && TARGET_EXPLICIT_RELOCS
2898 && TARGET_CALL_CLOBBERED_GP
)
2900 rtx post_call_tmp_reg
= gen_rtx_REG (word_mode
, POST_CALL_TMP_REG
);
2901 clobber_reg (&CALL_INSN_FUNCTION_USAGE (insn
), post_call_tmp_reg
);
2907 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2908 then add CONST_INT OFFSET to the result. */
2911 mips_unspec_address_offset (rtx base
, rtx offset
,
2912 enum mips_symbol_type symbol_type
)
2914 base
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, base
),
2915 UNSPEC_ADDRESS_FIRST
+ symbol_type
);
2916 if (offset
!= const0_rtx
)
2917 base
= gen_rtx_PLUS (Pmode
, base
, offset
);
2918 return gen_rtx_CONST (Pmode
, base
);
2921 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2922 type SYMBOL_TYPE. */
2925 mips_unspec_address (rtx address
, enum mips_symbol_type symbol_type
)
2929 split_const (address
, &base
, &offset
);
2930 return mips_unspec_address_offset (base
, offset
, symbol_type
);
2933 /* If OP is an UNSPEC address, return the address to which it refers,
2934 otherwise return OP itself. */
2937 mips_strip_unspec_address (rtx op
)
2941 split_const (op
, &base
, &offset
);
2942 if (UNSPEC_ADDRESS_P (base
))
2943 op
= plus_constant (Pmode
, UNSPEC_ADDRESS (base
), INTVAL (offset
));
2947 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2948 high part to BASE and return the result. Just return BASE otherwise.
2949 TEMP is as for mips_force_temporary.
2951 The returned expression can be used as the first operand to a LO_SUM. */
2954 mips_unspec_offset_high (rtx temp
, rtx base
, rtx addr
,
2955 enum mips_symbol_type symbol_type
)
2957 if (mips_split_p
[symbol_type
])
2959 addr
= gen_rtx_HIGH (Pmode
, mips_unspec_address (addr
, symbol_type
));
2960 addr
= mips_force_temporary (temp
, addr
);
2961 base
= mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, addr
, base
));
2966 /* Return an instruction that copies $gp into register REG. We want
2967 GCC to treat the register's value as constant, so that its value
2968 can be rematerialized on demand. */
2971 gen_load_const_gp (rtx reg
)
2973 return PMODE_INSN (gen_load_const_gp
, (reg
));
2976 /* Return a pseudo register that contains the value of $gp throughout
2977 the current function. Such registers are needed by MIPS16 functions,
2978 for which $gp itself is not a valid base register or addition operand. */
2981 mips16_gp_pseudo_reg (void)
2983 if (cfun
->machine
->mips16_gp_pseudo_rtx
== NULL_RTX
)
2987 cfun
->machine
->mips16_gp_pseudo_rtx
= gen_reg_rtx (Pmode
);
2989 push_topmost_sequence ();
2991 scan
= get_insns ();
2992 while (NEXT_INSN (scan
) && !INSN_P (NEXT_INSN (scan
)))
2993 scan
= NEXT_INSN (scan
);
2995 rtx set
= gen_load_const_gp (cfun
->machine
->mips16_gp_pseudo_rtx
);
2996 rtx_insn
*insn
= emit_insn_after (set
, scan
);
2997 INSN_LOCATION (insn
) = 0;
2999 pop_topmost_sequence ();
3002 return cfun
->machine
->mips16_gp_pseudo_rtx
;
3005 /* Return a base register that holds pic_offset_table_rtx.
3006 TEMP, if nonnull, is a scratch Pmode base register. */
3009 mips_pic_base_register (rtx temp
)
3012 return pic_offset_table_rtx
;
3014 if (currently_expanding_to_rtl
)
3015 return mips16_gp_pseudo_reg ();
3017 if (can_create_pseudo_p ())
3018 temp
= gen_reg_rtx (Pmode
);
3021 /* The first post-reload split exposes all references to $gp
3022 (both uses and definitions). All references must remain
3023 explicit after that point.
3025 It is safe to introduce uses of $gp at any time, so for
3026 simplicity, we do that before the split too. */
3027 mips_emit_move (temp
, pic_offset_table_rtx
);
3029 emit_insn (gen_load_const_gp (temp
));
3033 /* Return the RHS of a load_call<mode> insn. */
3036 mips_unspec_call (rtx reg
, rtx symbol
)
3040 vec
= gen_rtvec (3, reg
, symbol
, gen_rtx_REG (SImode
, GOT_VERSION_REGNUM
));
3041 return gen_rtx_UNSPEC (Pmode
, vec
, UNSPEC_LOAD_CALL
);
3044 /* If SRC is the RHS of a load_call<mode> insn, return the underlying symbol
3045 reference. Return NULL_RTX otherwise. */
3048 mips_strip_unspec_call (rtx src
)
3050 if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == UNSPEC_LOAD_CALL
)
3051 return mips_strip_unspec_address (XVECEXP (src
, 0, 1));
3055 /* Create and return a GOT reference of type TYPE for address ADDR.
3056 TEMP, if nonnull, is a scratch Pmode base register. */
3059 mips_got_load (rtx temp
, rtx addr
, enum mips_symbol_type type
)
3061 rtx base
, high
, lo_sum_symbol
;
3063 base
= mips_pic_base_register (temp
);
3065 /* If we used the temporary register to load $gp, we can't use
3066 it for the high part as well. */
3067 if (temp
!= NULL
&& reg_overlap_mentioned_p (base
, temp
))
3070 high
= mips_unspec_offset_high (temp
, base
, addr
, type
);
3071 lo_sum_symbol
= mips_unspec_address (addr
, type
);
3073 if (type
== SYMBOL_GOTOFF_CALL
)
3074 return mips_unspec_call (high
, lo_sum_symbol
);
3076 return PMODE_INSN (gen_unspec_got
, (high
, lo_sum_symbol
));
3079 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
3080 it appears in a MEM of that mode. Return true if ADDR is a legitimate
3081 constant in that context and can be split into high and low parts.
3082 If so, and if LOW_OUT is nonnull, emit the high part and store the
3083 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
3085 TEMP is as for mips_force_temporary and is used to load the high
3086 part into a register.
3088 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
3089 a legitimize SET_SRC for an .md pattern, otherwise the low part
3090 is guaranteed to be a legitimate address for mode MODE. */
3093 mips_split_symbol (rtx temp
, rtx addr
, machine_mode mode
, rtx
*low_out
)
3095 enum mips_symbol_context context
;
3096 enum mips_symbol_type symbol_type
;
3099 context
= (mode
== MAX_MACHINE_MODE
3100 ? SYMBOL_CONTEXT_LEA
3101 : SYMBOL_CONTEXT_MEM
);
3102 if (GET_CODE (addr
) == HIGH
&& context
== SYMBOL_CONTEXT_LEA
)
3104 addr
= XEXP (addr
, 0);
3105 if (mips_symbolic_constant_p (addr
, context
, &symbol_type
)
3106 && mips_symbol_insns (symbol_type
, mode
) > 0
3107 && mips_split_hi_p
[symbol_type
])
3110 switch (symbol_type
)
3112 case SYMBOL_GOT_PAGE_OFST
:
3113 /* The high part of a page/ofst pair is loaded from the GOT. */
3114 *low_out
= mips_got_load (temp
, addr
, SYMBOL_GOTOFF_PAGE
);
3125 if (mips_symbolic_constant_p (addr
, context
, &symbol_type
)
3126 && mips_symbol_insns (symbol_type
, mode
) > 0
3127 && mips_split_p
[symbol_type
])
3130 switch (symbol_type
)
3132 case SYMBOL_GOT_DISP
:
3133 /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
3134 *low_out
= mips_got_load (temp
, addr
, SYMBOL_GOTOFF_DISP
);
3137 case SYMBOL_GP_RELATIVE
:
3138 high
= mips_pic_base_register (temp
);
3139 *low_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
3143 high
= gen_rtx_HIGH (Pmode
, copy_rtx (addr
));
3144 high
= mips_force_temporary (temp
, high
);
3145 *low_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
3154 /* Return a legitimate address for REG + OFFSET. TEMP is as for
3155 mips_force_temporary; it is only needed when OFFSET is not a
3159 mips_add_offset (rtx temp
, rtx reg
, HOST_WIDE_INT offset
)
3161 if (!SMALL_OPERAND (offset
))
3167 /* Load the full offset into a register so that we can use
3168 an unextended instruction for the address itself. */
3169 high
= GEN_INT (offset
);
3174 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
3175 The addition inside the macro CONST_HIGH_PART may cause an
3176 overflow, so we need to force a sign-extension check. */
3177 high
= gen_int_mode (CONST_HIGH_PART (offset
), Pmode
);
3178 offset
= CONST_LOW_PART (offset
);
3180 high
= mips_force_temporary (temp
, high
);
3181 reg
= mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, high
, reg
));
3183 return plus_constant (Pmode
, reg
, offset
);
3186 /* The __tls_get_attr symbol. */
3187 static GTY(()) rtx mips_tls_symbol
;
3189 /* Return an instruction sequence that calls __tls_get_addr. SYM is
3190 the TLS symbol we are referencing and TYPE is the symbol type to use
3191 (either global dynamic or local dynamic). V0 is an RTX for the
3192 return value location. */
3195 mips_call_tls_get_addr (rtx sym
, enum mips_symbol_type type
, rtx v0
)
3199 a0
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
3201 if (!mips_tls_symbol
)
3202 mips_tls_symbol
= init_one_libfunc ("__tls_get_addr");
3204 loc
= mips_unspec_address (sym
, type
);
3208 emit_insn (gen_rtx_SET (a0
, gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
,
3210 insn
= mips_expand_call (MIPS_CALL_NORMAL
, v0
, mips_tls_symbol
,
3211 const0_rtx
, NULL_RTX
, false);
3212 RTL_CONST_CALL_P (insn
) = 1;
3213 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), a0
);
3214 insn
= get_insns ();
3221 /* Return a pseudo register that contains the current thread pointer. */
3224 mips_expand_thread_pointer (rtx tp
)
3230 if (!mips16_rdhwr_stub
)
3231 mips16_rdhwr_stub
= new mips16_rdhwr_one_only_stub ();
3232 fn
= mips16_stub_call_address (mips16_rdhwr_stub
);
3233 emit_insn (PMODE_INSN (gen_tls_get_tp_mips16
, (tp
, fn
)));
3236 emit_insn (PMODE_INSN (gen_tls_get_tp
, (tp
)));
3243 return mips_expand_thread_pointer (gen_reg_rtx (Pmode
));
3246 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
3247 its address. The return value will be both a valid address and a valid
3248 SET_SRC (either a REG or a LO_SUM). */
3251 mips_legitimize_tls_address (rtx loc
)
3253 rtx dest
, insn
, v0
, tp
, tmp1
, tmp2
, eqv
, offset
;
3254 enum tls_model model
;
3256 model
= SYMBOL_REF_TLS_MODEL (loc
);
3257 /* Only TARGET_ABICALLS code can have more than one module; other
3258 code must be static and should not use a GOT. All TLS models
3259 reduce to local exec in this situation. */
3260 if (!TARGET_ABICALLS
)
3261 model
= TLS_MODEL_LOCAL_EXEC
;
3265 case TLS_MODEL_GLOBAL_DYNAMIC
:
3266 v0
= gen_rtx_REG (Pmode
, GP_RETURN
);
3267 insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSGD
, v0
);
3268 dest
= gen_reg_rtx (Pmode
);
3269 emit_libcall_block (insn
, dest
, v0
, loc
);
3272 case TLS_MODEL_LOCAL_DYNAMIC
:
3273 v0
= gen_rtx_REG (Pmode
, GP_RETURN
);
3274 insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSLDM
, v0
);
3275 tmp1
= gen_reg_rtx (Pmode
);
3277 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3278 share the LDM result with other LD model accesses. */
3279 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
3281 emit_libcall_block (insn
, tmp1
, v0
, eqv
);
3283 offset
= mips_unspec_address (loc
, SYMBOL_DTPREL
);
3284 if (mips_split_p
[SYMBOL_DTPREL
])
3286 tmp2
= mips_unspec_offset_high (NULL
, tmp1
, loc
, SYMBOL_DTPREL
);
3287 dest
= gen_rtx_LO_SUM (Pmode
, tmp2
, offset
);
3290 dest
= expand_binop (Pmode
, add_optab
, tmp1
, offset
,
3291 0, 0, OPTAB_DIRECT
);
3294 case TLS_MODEL_INITIAL_EXEC
:
3295 tp
= mips_get_tp ();
3296 tmp1
= gen_reg_rtx (Pmode
);
3297 tmp2
= mips_unspec_address (loc
, SYMBOL_GOTTPREL
);
3298 if (Pmode
== DImode
)
3299 emit_insn (gen_load_gotdi (tmp1
, pic_offset_table_rtx
, tmp2
));
3301 emit_insn (gen_load_gotsi (tmp1
, pic_offset_table_rtx
, tmp2
));
3302 dest
= gen_reg_rtx (Pmode
);
3303 emit_insn (gen_add3_insn (dest
, tmp1
, tp
));
3306 case TLS_MODEL_LOCAL_EXEC
:
3307 tmp1
= mips_get_tp ();
3308 offset
= mips_unspec_address (loc
, SYMBOL_TPREL
);
3309 if (mips_split_p
[SYMBOL_TPREL
])
3311 tmp2
= mips_unspec_offset_high (NULL
, tmp1
, loc
, SYMBOL_TPREL
);
3312 dest
= gen_rtx_LO_SUM (Pmode
, tmp2
, offset
);
3315 dest
= expand_binop (Pmode
, add_optab
, tmp1
, offset
,
3316 0, 0, OPTAB_DIRECT
);
3325 /* Implement "TARGET = __builtin_mips_get_fcsr ()" for MIPS16,
3329 mips16_expand_get_fcsr (rtx target
)
3331 if (!mips16_get_fcsr_stub
)
3332 mips16_get_fcsr_stub
= new mips16_get_fcsr_one_only_stub ();
3333 rtx fn
= mips16_stub_call_address (mips16_get_fcsr_stub
);
3334 emit_insn (PMODE_INSN (gen_mips_get_fcsr_mips16
, (fn
)));
3335 emit_move_insn (target
, gen_rtx_REG (SImode
, GET_FCSR_REGNUM
));
3338 /* Implement __builtin_mips_set_fcsr (TARGET) for MIPS16, using a stub. */
3341 mips16_expand_set_fcsr (rtx newval
)
3343 if (!mips16_set_fcsr_stub
)
3344 mips16_set_fcsr_stub
= new mips16_set_fcsr_one_only_stub ();
3345 rtx fn
= mips16_stub_call_address (mips16_set_fcsr_stub
);
3346 emit_move_insn (gen_rtx_REG (SImode
, SET_FCSR_REGNUM
), newval
);
3347 emit_insn (PMODE_INSN (gen_mips_set_fcsr_mips16
, (fn
)));
3350 /* If X is not a valid address for mode MODE, force it into a register. */
3353 mips_force_address (rtx x
, machine_mode mode
)
3355 if (!mips_legitimate_address_p (mode
, x
, false))
3356 x
= force_reg (Pmode
, x
);
3360 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
3361 be legitimized in a way that the generic machinery might not expect,
3362 return a new address, otherwise return NULL. MODE is the mode of
3363 the memory being accessed. */
3366 mips_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
3370 HOST_WIDE_INT offset
;
3372 if (mips_tls_symbol_p (x
))
3373 return mips_legitimize_tls_address (x
);
3375 /* See if the address can split into a high part and a LO_SUM. */
3376 if (mips_split_symbol (NULL
, x
, mode
, &addr
))
3377 return mips_force_address (addr
, mode
);
3379 /* Handle BASE + OFFSET using mips_add_offset. */
3380 mips_split_plus (x
, &base
, &offset
);
3383 if (!mips_valid_base_register_p (base
, mode
, false))
3384 base
= copy_to_mode_reg (Pmode
, base
);
3385 addr
= mips_add_offset (NULL
, base
, offset
);
3386 return mips_force_address (addr
, mode
);
3392 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
3395 mips_move_integer (rtx temp
, rtx dest
, unsigned HOST_WIDE_INT value
)
3397 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
3399 unsigned int i
, num_ops
;
3402 mode
= GET_MODE (dest
);
3403 num_ops
= mips_build_integer (codes
, value
);
3405 /* Apply each binary operation to X. Invariant: X is a legitimate
3406 source operand for a SET pattern. */
3407 x
= GEN_INT (codes
[0].value
);
3408 for (i
= 1; i
< num_ops
; i
++)
3410 if (!can_create_pseudo_p ())
3412 emit_insn (gen_rtx_SET (temp
, x
));
3416 x
= force_reg (mode
, x
);
3417 x
= gen_rtx_fmt_ee (codes
[i
].code
, mode
, x
, GEN_INT (codes
[i
].value
));
3420 emit_insn (gen_rtx_SET (dest
, x
));
3423 /* Subroutine of mips_legitimize_move. Move constant SRC into register
3424 DEST given that SRC satisfies immediate_operand but doesn't satisfy
3428 mips_legitimize_const_move (machine_mode mode
, rtx dest
, rtx src
)
3432 /* Split moves of big integers into smaller pieces. */
3433 if (splittable_const_int_operand (src
, mode
))
3435 mips_move_integer (dest
, dest
, INTVAL (src
));
3439 /* Split moves of symbolic constants into high/low pairs. */
3440 if (mips_split_symbol (dest
, src
, MAX_MACHINE_MODE
, &src
))
3442 emit_insn (gen_rtx_SET (dest
, src
));
3446 /* Generate the appropriate access sequences for TLS symbols. */
3447 if (mips_tls_symbol_p (src
))
3449 mips_emit_move (dest
, mips_legitimize_tls_address (src
));
3453 /* If we have (const (plus symbol offset)), and that expression cannot
3454 be forced into memory, load the symbol first and add in the offset.
3455 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
3456 forced into memory, as it usually produces better code. */
3457 split_const (src
, &base
, &offset
);
3458 if (offset
!= const0_rtx
3459 && (targetm
.cannot_force_const_mem (mode
, src
)
3460 || (!TARGET_MIPS16
&& can_create_pseudo_p ())))
3462 base
= mips_force_temporary (dest
, base
);
3463 mips_emit_move (dest
, mips_add_offset (NULL
, base
, INTVAL (offset
)));
3467 src
= force_const_mem (mode
, src
);
3469 /* When using explicit relocs, constant pool references are sometimes
3470 not legitimate addresses. */
3471 mips_split_symbol (dest
, XEXP (src
, 0), mode
, &XEXP (src
, 0));
3472 mips_emit_move (dest
, src
);
3475 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
3476 sequence that is valid. */
3479 mips_legitimize_move (machine_mode mode
, rtx dest
, rtx src
)
3481 if (!register_operand (dest
, mode
) && !reg_or_0_operand (src
, mode
))
3483 mips_emit_move (dest
, force_reg (mode
, src
));
3487 /* We need to deal with constants that would be legitimate
3488 immediate_operands but aren't legitimate move_operands. */
3489 if (CONSTANT_P (src
) && !move_operand (src
, mode
))
3491 mips_legitimize_const_move (mode
, dest
, src
);
3492 set_unique_reg_note (get_last_insn (), REG_EQUAL
, copy_rtx (src
));
3498 /* Return true if value X in context CONTEXT is a small-data address
3499 that can be rewritten as a LO_SUM. */
3502 mips_rewrite_small_data_p (rtx x
, enum mips_symbol_context context
)
3504 enum mips_symbol_type symbol_type
;
3506 return (mips_lo_relocs
[SYMBOL_GP_RELATIVE
]
3507 && !mips_split_p
[SYMBOL_GP_RELATIVE
]
3508 && mips_symbolic_constant_p (x
, context
, &symbol_type
)
3509 && symbol_type
== SYMBOL_GP_RELATIVE
);
3512 /* Return true if OP refers to small data symbols directly, not through
3513 a LO_SUM. CONTEXT is the context in which X appears. */
3516 mips_small_data_pattern_1 (rtx x
, enum mips_symbol_context context
)
3518 subrtx_var_iterator::array_type array
;
3519 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, ALL
)
3523 /* Ignore things like "g" constraints in asms. We make no particular
3524 guarantee about which symbolic constants are acceptable as asm operands
3525 versus which must be forced into a GPR. */
3526 if (GET_CODE (x
) == LO_SUM
|| GET_CODE (x
) == ASM_OPERANDS
)
3527 iter
.skip_subrtxes ();
3530 if (mips_small_data_pattern_1 (XEXP (x
, 0), SYMBOL_CONTEXT_MEM
))
3532 iter
.skip_subrtxes ();
3534 else if (mips_rewrite_small_data_p (x
, context
))
3540 /* Return true if OP refers to small data symbols directly, not through
3544 mips_small_data_pattern_p (rtx op
)
3546 return mips_small_data_pattern_1 (op
, SYMBOL_CONTEXT_LEA
);
3549 /* Rewrite *LOC so that it refers to small data using explicit
3550 relocations. CONTEXT is the context in which *LOC appears. */
3553 mips_rewrite_small_data_1 (rtx
*loc
, enum mips_symbol_context context
)
3555 subrtx_ptr_iterator::array_type array
;
3556 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3561 mips_rewrite_small_data_1 (&XEXP (*loc
, 0), SYMBOL_CONTEXT_MEM
);
3562 iter
.skip_subrtxes ();
3564 else if (mips_rewrite_small_data_p (*loc
, context
))
3566 *loc
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, *loc
);
3567 iter
.skip_subrtxes ();
3569 else if (GET_CODE (*loc
) == LO_SUM
)
3570 iter
.skip_subrtxes ();
3574 /* Rewrite instruction pattern PATTERN so that it refers to small data
3575 using explicit relocations. */
3578 mips_rewrite_small_data (rtx pattern
)
3580 pattern
= copy_insn (pattern
);
3581 mips_rewrite_small_data_1 (&pattern
, SYMBOL_CONTEXT_LEA
);
3585 /* The cost of loading values from the constant pool. It should be
3586 larger than the cost of any constant we want to synthesize inline. */
3587 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3589 /* Return the cost of X when used as an operand to the MIPS16 instruction
3590 that implements CODE. Return -1 if there is no such instruction, or if
3591 X is not a valid immediate operand for it. */
3594 mips16_constant_cost (int code
, HOST_WIDE_INT x
)
3601 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3602 other shifts are extended. The shift patterns truncate the shift
3603 count to the right size, so there are no out-of-range values. */
3604 if (IN_RANGE (x
, 1, 8))
3606 return COSTS_N_INSNS (1);
3609 if (IN_RANGE (x
, -128, 127))
3611 if (SMALL_OPERAND (x
))
3612 return COSTS_N_INSNS (1);
3616 /* Like LE, but reject the always-true case. */
3620 /* We add 1 to the immediate and use SLT. */
3623 /* We can use CMPI for an xor with an unsigned 16-bit X. */
3626 if (IN_RANGE (x
, 0, 255))
3628 if (SMALL_OPERAND_UNSIGNED (x
))
3629 return COSTS_N_INSNS (1);
3634 /* Equality comparisons with 0 are cheap. */
3644 /* Return true if there is a non-MIPS16 instruction that implements CODE
3645 and if that instruction accepts X as an immediate operand. */
3648 mips_immediate_operand_p (int code
, HOST_WIDE_INT x
)
3655 /* All shift counts are truncated to a valid constant. */
3660 /* Likewise rotates, if the target supports rotates at all. */
3666 /* These instructions take 16-bit unsigned immediates. */
3667 return SMALL_OPERAND_UNSIGNED (x
);
3672 /* These instructions take 16-bit signed immediates. */
3673 return SMALL_OPERAND (x
);
3679 /* The "immediate" forms of these instructions are really
3680 implemented as comparisons with register 0. */
3685 /* Likewise, meaning that the only valid immediate operand is 1. */
3689 /* We add 1 to the immediate and use SLT. */
3690 return SMALL_OPERAND (x
+ 1);
3693 /* Likewise SLTU, but reject the always-true case. */
3694 return SMALL_OPERAND (x
+ 1) && x
+ 1 != 0;
3698 /* The bit position and size are immediate operands. */
3699 return ISA_HAS_EXT_INS
;
3702 /* By default assume that $0 can be used for 0. */
3707 /* Return the cost of binary operation X, given that the instruction
3708 sequence for a word-sized or smaller operation has cost SINGLE_COST
3709 and that the sequence of a double-word operation has cost DOUBLE_COST.
3710 If SPEED is true, optimize for speed otherwise optimize for size. */
3713 mips_binary_cost (rtx x
, int single_cost
, int double_cost
, bool speed
)
3717 if (GET_MODE_SIZE (GET_MODE (x
)) == UNITS_PER_WORD
* 2)
3722 + set_src_cost (XEXP (x
, 0), GET_MODE (x
), speed
)
3723 + rtx_cost (XEXP (x
, 1), GET_MODE (x
), GET_CODE (x
), 1, speed
));
3726 /* Return the cost of floating-point multiplications of mode MODE. */
3729 mips_fp_mult_cost (machine_mode mode
)
3731 return mode
== DFmode
? mips_cost
->fp_mult_df
: mips_cost
->fp_mult_sf
;
3734 /* Return the cost of floating-point divisions of mode MODE. */
3737 mips_fp_div_cost (machine_mode mode
)
3739 return mode
== DFmode
? mips_cost
->fp_div_df
: mips_cost
->fp_div_sf
;
3742 /* Return the cost of sign-extending OP to mode MODE, not including the
3743 cost of OP itself. */
3746 mips_sign_extend_cost (machine_mode mode
, rtx op
)
3749 /* Extended loads are as cheap as unextended ones. */
3752 if (TARGET_64BIT
&& mode
== DImode
&& GET_MODE (op
) == SImode
)
3753 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3756 if (ISA_HAS_SEB_SEH
|| GENERATE_MIPS16E
)
3757 /* We can use SEB or SEH. */
3758 return COSTS_N_INSNS (1);
3760 /* We need to use a shift left and a shift right. */
3761 return COSTS_N_INSNS (TARGET_MIPS16
? 4 : 2);
3764 /* Return the cost of zero-extending OP to mode MODE, not including the
3765 cost of OP itself. */
3768 mips_zero_extend_cost (machine_mode mode
, rtx op
)
3771 /* Extended loads are as cheap as unextended ones. */
3774 if (TARGET_64BIT
&& mode
== DImode
&& GET_MODE (op
) == SImode
)
3775 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3776 return COSTS_N_INSNS (TARGET_MIPS16
? 4 : 2);
3778 if (GENERATE_MIPS16E
)
3779 /* We can use ZEB or ZEH. */
3780 return COSTS_N_INSNS (1);
3783 /* We need to load 0xff or 0xffff into a register and use AND. */
3784 return COSTS_N_INSNS (GET_MODE (op
) == QImode
? 2 : 3);
3786 /* We can use ANDI. */
3787 return COSTS_N_INSNS (1);
3790 /* Return the cost of moving between two registers of mode MODE,
3791 assuming that the move will be in pieces of at most UNITS bytes. */
3794 mips_set_reg_reg_piece_cost (machine_mode mode
, unsigned int units
)
3796 return COSTS_N_INSNS ((GET_MODE_SIZE (mode
) + units
- 1) / units
);
3799 /* Return the cost of moving between two registers of mode MODE. */
3802 mips_set_reg_reg_cost (machine_mode mode
)
3804 switch (GET_MODE_CLASS (mode
))
3807 return mips_set_reg_reg_piece_cost (mode
, GET_MODE_SIZE (CCmode
));
3810 case MODE_COMPLEX_FLOAT
:
3811 case MODE_VECTOR_FLOAT
:
3812 if (TARGET_HARD_FLOAT
)
3813 return mips_set_reg_reg_piece_cost (mode
, UNITS_PER_HWFPVALUE
);
3817 return mips_set_reg_reg_piece_cost (mode
, UNITS_PER_WORD
);
3821 /* Implement TARGET_RTX_COSTS. */
3824 mips_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
3825 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
3827 int code
= GET_CODE (x
);
3828 bool float_mode_p
= FLOAT_MODE_P (mode
);
3832 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3833 appear in the instruction stream, and the cost of a comparison is
3834 really the cost of the branch or scc condition. At the time of
3835 writing, GCC only uses an explicit outer COMPARE code when optabs
3836 is testing whether a constant is expensive enough to force into a
3837 register. We want optabs to pass such constants through the MIPS
3838 expanders instead, so make all constants very cheap here. */
3839 if (outer_code
== COMPARE
)
3841 gcc_assert (CONSTANT_P (x
));
3849 /* Treat *clear_upper32-style ANDs as having zero cost in the
3850 second operand. The cost is entirely in the first operand.
3852 ??? This is needed because we would otherwise try to CSE
3853 the constant operand. Although that's the right thing for
3854 instructions that continue to be a register operation throughout
3855 compilation, it is disastrous for instructions that could
3856 later be converted into a memory operation. */
3858 && outer_code
== AND
3859 && UINTVAL (x
) == 0xffffffff)
3867 cost
= mips16_constant_cost (outer_code
, INTVAL (x
));
3876 /* When not optimizing for size, we care more about the cost
3877 of hot code, and hot code is often in a loop. If a constant
3878 operand needs to be forced into a register, we will often be
3879 able to hoist the constant load out of the loop, so the load
3880 should not contribute to the cost. */
3881 if (speed
|| mips_immediate_operand_p (outer_code
, INTVAL (x
)))
3893 if (force_to_mem_operand (x
, VOIDmode
))
3895 *total
= COSTS_N_INSNS (1);
3898 cost
= mips_const_insns (x
);
3901 /* If the constant is likely to be stored in a GPR, SETs of
3902 single-insn constants are as cheap as register sets; we
3903 never want to CSE them.
3905 Don't reduce the cost of storing a floating-point zero in
3906 FPRs. If we have a zero in an FPR for other reasons, we
3907 can get better cfg-cleanup and delayed-branch results by
3908 using it consistently, rather than using $0 sometimes and
3909 an FPR at other times. Also, moves between floating-point
3910 registers are sometimes cheaper than (D)MTC1 $0. */
3912 && outer_code
== SET
3913 && !(float_mode_p
&& TARGET_HARD_FLOAT
))
3915 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3916 want to CSE the constant itself. It is usually better to
3917 have N copies of the last operation in the sequence and one
3918 shared copy of the other operations. (Note that this is
3919 not true for MIPS16 code, where the final operation in the
3920 sequence is often an extended instruction.)
3922 Also, if we have a CONST_INT, we don't know whether it is
3923 for a word or doubleword operation, so we cannot rely on
3924 the result of mips_build_integer. */
3925 else if (!TARGET_MIPS16
3926 && (outer_code
== SET
|| GET_MODE (x
) == VOIDmode
))
3928 *total
= COSTS_N_INSNS (cost
);
3931 /* The value will need to be fetched from the constant pool. */
3932 *total
= CONSTANT_POOL_COST
;
3936 /* If the address is legitimate, return the number of
3937 instructions it needs. */
3939 cost
= mips_address_insns (addr
, mode
, true);
3942 *total
= COSTS_N_INSNS (cost
+ 1);
3945 /* Check for a scaled indexed address. */
3946 if (mips_lwxs_address_p (addr
)
3947 || mips_lx_address_p (addr
, mode
))
3949 *total
= COSTS_N_INSNS (2);
3952 /* Otherwise use the default handling. */
3956 *total
= COSTS_N_INSNS (6);
3960 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 2 : 1);
3964 /* Check for a *clear_upper32 pattern and treat it like a zero
3965 extension. See the pattern's comment for details. */
3968 && CONST_INT_P (XEXP (x
, 1))
3969 && UINTVAL (XEXP (x
, 1)) == 0xffffffff)
3971 *total
= (mips_zero_extend_cost (mode
, XEXP (x
, 0))
3972 + set_src_cost (XEXP (x
, 0), mode
, speed
));
3975 if (ISA_HAS_CINS
&& CONST_INT_P (XEXP (x
, 1)))
3977 rtx op
= XEXP (x
, 0);
3978 if (GET_CODE (op
) == ASHIFT
3979 && CONST_INT_P (XEXP (op
, 1))
3980 && mask_low_and_shift_p (mode
, XEXP (x
, 1), XEXP (op
, 1), 32))
3982 *total
= COSTS_N_INSNS (1);
3983 *total
+= set_src_cost (XEXP (op
, 0), mode
, speed
);
3987 /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in
3988 a single instruction. */
3990 && GET_CODE (XEXP (x
, 0)) == NOT
3991 && GET_CODE (XEXP (x
, 1)) == NOT
)
3993 cost
= GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 2 : 1;
3994 *total
= (COSTS_N_INSNS (cost
)
3995 + set_src_cost (XEXP (XEXP (x
, 0), 0), mode
, speed
)
3996 + set_src_cost (XEXP (XEXP (x
, 1), 0), mode
, speed
));
4004 /* Double-word operations use two single-word operations. */
4005 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
4014 if (CONSTANT_P (XEXP (x
, 1)))
4015 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
4018 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (12),
4024 *total
= mips_cost
->fp_add
;
4026 *total
= COSTS_N_INSNS (4);
4030 /* Low-part immediates need an extended MIPS16 instruction. */
4031 *total
= (COSTS_N_INSNS (TARGET_MIPS16
? 2 : 1)
4032 + set_src_cost (XEXP (x
, 0), mode
, speed
));
4047 /* Branch comparisons have VOIDmode, so use the first operand's
4049 mode
= GET_MODE (XEXP (x
, 0));
4050 if (FLOAT_MODE_P (mode
))
4052 *total
= mips_cost
->fp_add
;
4055 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
4060 if (float_mode_p
&& ISA_HAS_UNFUSED_MADD4
&& !HONOR_SIGNED_ZEROS (mode
))
4062 /* See if we can use NMADD or NMSUB via the *nmadd4<mode>_fastmath
4063 or *nmsub4<mode>_fastmath patterns. These patterns check for
4064 HONOR_SIGNED_ZEROS so we check here too. */
4065 rtx op0
= XEXP (x
, 0);
4066 rtx op1
= XEXP (x
, 1);
4067 if (GET_CODE (op0
) == MULT
&& GET_CODE (XEXP (op0
, 0)) == NEG
)
4069 *total
= (mips_fp_mult_cost (mode
)
4070 + set_src_cost (XEXP (XEXP (op0
, 0), 0), mode
, speed
)
4071 + set_src_cost (XEXP (op0
, 1), mode
, speed
)
4072 + set_src_cost (op1
, mode
, speed
));
4075 if (GET_CODE (op1
) == MULT
)
4077 *total
= (mips_fp_mult_cost (mode
)
4078 + set_src_cost (op0
, mode
, speed
)
4079 + set_src_cost (XEXP (op1
, 0), mode
, speed
)
4080 + set_src_cost (XEXP (op1
, 1), mode
, speed
));
4089 /* If this is part of a MADD or MSUB, treat the PLUS as
4091 if (ISA_HAS_UNFUSED_MADD4
&& GET_CODE (XEXP (x
, 0)) == MULT
)
4094 *total
= mips_cost
->fp_add
;
4098 /* If it's an add + mult (which is equivalent to shift left) and
4099 it's immediate operand satisfies const_immlsa_operand predicate. */
4100 if (((ISA_HAS_LSA
&& mode
== SImode
)
4101 || (ISA_HAS_DLSA
&& mode
== DImode
))
4102 && GET_CODE (XEXP (x
, 0)) == MULT
)
4104 rtx op2
= XEXP (XEXP (x
, 0), 1);
4105 if (const_immlsa_operand (op2
, mode
))
4107 *total
= (COSTS_N_INSNS (1)
4108 + set_src_cost (XEXP (XEXP (x
, 0), 0), mode
, speed
)
4109 + set_src_cost (XEXP (x
, 1), mode
, speed
));
4114 /* Double-word operations require three single-word operations and
4115 an SLTU. The MIPS16 version then needs to move the result of
4116 the SLTU from $24 to a MIPS16 register. */
4117 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1),
4118 COSTS_N_INSNS (TARGET_MIPS16
? 5 : 4),
4123 if (float_mode_p
&& ISA_HAS_UNFUSED_MADD4
)
4125 /* See if we can use NMADD or NMSUB via the *nmadd4<mode> or
4126 *nmsub4<mode> patterns. */
4127 rtx op
= XEXP (x
, 0);
4128 if ((GET_CODE (op
) == PLUS
|| GET_CODE (op
) == MINUS
)
4129 && GET_CODE (XEXP (op
, 0)) == MULT
)
4131 *total
= (mips_fp_mult_cost (mode
)
4132 + set_src_cost (XEXP (XEXP (op
, 0), 0), mode
, speed
)
4133 + set_src_cost (XEXP (XEXP (op
, 0), 1), mode
, speed
)
4134 + set_src_cost (XEXP (op
, 1), mode
, speed
));
4140 *total
= mips_cost
->fp_add
;
4142 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 4 : 1);
4146 *total
= mips_fp_mult_cost (mode
);
4151 *total
= mips_fp_mult_cost (mode
);
4152 else if (mode
== DImode
&& !TARGET_64BIT
)
4153 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
4154 where the mulsidi3 always includes an MFHI and an MFLO. */
4156 ? mips_cost
->int_mult_si
* 3 + 6
4157 : COSTS_N_INSNS (ISA_HAS_MUL3
? 7 : 9));
4159 *total
= COSTS_N_INSNS ((ISA_HAS_MUL3
|| ISA_HAS_R6MUL
) ? 1 : 2) + 1;
4160 else if (mode
== DImode
)
4161 *total
= mips_cost
->int_mult_di
;
4163 *total
= mips_cost
->int_mult_si
;
4167 /* Check for a reciprocal. */
4169 && ISA_HAS_FP_RECIP_RSQRT (mode
)
4170 && flag_unsafe_math_optimizations
4171 && XEXP (x
, 0) == CONST1_RTX (mode
))
4173 if (outer_code
== SQRT
|| GET_CODE (XEXP (x
, 1)) == SQRT
)
4174 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
4175 division as being free. */
4176 *total
= set_src_cost (XEXP (x
, 1), mode
, speed
);
4178 *total
= (mips_fp_div_cost (mode
)
4179 + set_src_cost (XEXP (x
, 1), mode
, speed
));
4188 *total
= mips_fp_div_cost (mode
);
4197 /* It is our responsibility to make division by a power of 2
4198 as cheap as 2 register additions if we want the division
4199 expanders to be used for such operations; see the setting
4200 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
4201 should always produce shorter code than using
4202 expand_sdiv2_pow2. */
4204 && CONST_INT_P (XEXP (x
, 1))
4205 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
4207 *total
= COSTS_N_INSNS (2);
4208 *total
+= set_src_cost (XEXP (x
, 0), mode
, speed
);
4211 *total
= COSTS_N_INSNS (mips_idiv_insns ());
4213 else if (mode
== DImode
)
4214 *total
= mips_cost
->int_div_di
;
4216 *total
= mips_cost
->int_div_si
;
4220 *total
= mips_sign_extend_cost (mode
, XEXP (x
, 0));
4224 if (outer_code
== SET
4226 && (GET_CODE (XEXP (x
, 0)) == TRUNCATE
4227 || GET_CODE (XEXP (x
, 0)) == SUBREG
)
4228 && GET_MODE (XEXP (x
, 0)) == QImode
4229 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
4231 *total
= set_src_cost (XEXP (XEXP (x
, 0), 0), VOIDmode
, speed
);
4234 *total
= mips_zero_extend_cost (mode
, XEXP (x
, 0));
4237 /* Costings for highpart multiplies. Matching patterns of the form:
4239 (lshiftrt:DI (mult:DI (sign_extend:DI (...)
4240 (sign_extend:DI (...))
4244 && (GET_CODE (XEXP (x
, 0)) == ASHIFTRT
4245 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
)
4246 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4247 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) == 32
4248 && GET_MODE (XEXP (x
, 0)) == DImode
)
4250 && INTVAL (XEXP (XEXP (x
, 0), 1)) == 64
4251 && GET_MODE (XEXP (x
, 0)) == TImode
))
4252 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
4253 && ((GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
4254 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == SIGN_EXTEND
)
4255 || (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
4256 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1))
4260 *total
= COSTS_N_INSNS (1) + 1;
4261 else if (mode
== DImode
)
4262 *total
= mips_cost
->int_mult_di
;
4264 *total
= mips_cost
->int_mult_si
;
4266 /* Sign extension is free, zero extension costs for DImode when
4267 on a 64bit core / when DMUL is present. */
4268 for (int i
= 0; i
< 2; ++i
)
4270 rtx op
= XEXP (XEXP (XEXP (x
, 0), 0), i
);
4272 && GET_CODE (op
) == ZERO_EXTEND
4273 && GET_MODE (op
) == DImode
)
4274 *total
+= rtx_cost (op
, DImode
, MULT
, i
, speed
);
4276 *total
+= rtx_cost (XEXP (op
, 0), VOIDmode
, GET_CODE (op
),
4285 case UNSIGNED_FLOAT
:
4288 case FLOAT_TRUNCATE
:
4289 *total
= mips_cost
->fp_add
;
4293 if (register_operand (SET_DEST (x
), VOIDmode
)
4294 && reg_or_0_operand (SET_SRC (x
), VOIDmode
))
4296 *total
= mips_set_reg_reg_cost (GET_MODE (SET_DEST (x
)));
4306 /* Implement TARGET_ADDRESS_COST. */
4309 mips_address_cost (rtx addr
, machine_mode mode
,
4310 addr_space_t as ATTRIBUTE_UNUSED
,
4311 bool speed ATTRIBUTE_UNUSED
)
4313 return mips_address_insns (addr
, mode
, false);
4316 /* Implement TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P. */
4319 mips_no_speculation_in_delay_slots_p ()
4321 return TARGET_CB_MAYBE
;
4324 /* Information about a single instruction in a multi-instruction
4326 struct mips_multi_member
{
4327 /* True if this is a label, false if it is code. */
4330 /* The output_asm_insn format of the instruction. */
4333 /* The operands to the instruction. */
4334 rtx operands
[MAX_RECOG_OPERANDS
];
4336 typedef struct mips_multi_member mips_multi_member
;
4338 /* The instructions that make up the current multi-insn sequence. */
4339 static vec
<mips_multi_member
> mips_multi_members
;
4341 /* How many instructions (as opposed to labels) are in the current
4342 multi-insn sequence. */
4343 static unsigned int mips_multi_num_insns
;
4345 /* Start a new multi-insn sequence. */
4348 mips_multi_start (void)
4350 mips_multi_members
.truncate (0);
4351 mips_multi_num_insns
= 0;
4354 /* Add a new, uninitialized member to the current multi-insn sequence. */
4356 static struct mips_multi_member
*
4357 mips_multi_add (void)
4359 mips_multi_member empty
;
4360 return mips_multi_members
.safe_push (empty
);
4363 /* Add a normal insn with the given asm format to the current multi-insn
4364 sequence. The other arguments are a null-terminated list of operands. */
4367 mips_multi_add_insn (const char *format
, ...)
4369 struct mips_multi_member
*member
;
4374 member
= mips_multi_add ();
4375 member
->is_label_p
= false;
4376 member
->format
= format
;
4377 va_start (ap
, format
);
4379 while ((op
= va_arg (ap
, rtx
)))
4380 member
->operands
[i
++] = op
;
4382 mips_multi_num_insns
++;
4385 /* Add the given label definition to the current multi-insn sequence.
4386 The definition should include the colon. */
4389 mips_multi_add_label (const char *label
)
4391 struct mips_multi_member
*member
;
4393 member
= mips_multi_add ();
4394 member
->is_label_p
= true;
4395 member
->format
= label
;
4398 /* Return the index of the last member of the current multi-insn sequence. */
4401 mips_multi_last_index (void)
4403 return mips_multi_members
.length () - 1;
4406 /* Add a copy of an existing instruction to the current multi-insn
4407 sequence. I is the index of the instruction that should be copied. */
4410 mips_multi_copy_insn (unsigned int i
)
4412 struct mips_multi_member
*member
;
4414 member
= mips_multi_add ();
4415 memcpy (member
, &mips_multi_members
[i
], sizeof (*member
));
4416 gcc_assert (!member
->is_label_p
);
4419 /* Change the operand of an existing instruction in the current
4420 multi-insn sequence. I is the index of the instruction,
4421 OP is the index of the operand, and X is the new value. */
4424 mips_multi_set_operand (unsigned int i
, unsigned int op
, rtx x
)
4426 mips_multi_members
[i
].operands
[op
] = x
;
4429 /* Write out the asm code for the current multi-insn sequence. */
4432 mips_multi_write (void)
4434 struct mips_multi_member
*member
;
4437 FOR_EACH_VEC_ELT (mips_multi_members
, i
, member
)
4438 if (member
->is_label_p
)
4439 fprintf (asm_out_file
, "%s\n", member
->format
);
4441 output_asm_insn (member
->format
, member
->operands
);
4444 /* Return one word of double-word value OP, taking into account the fixed
4445 endianness of certain registers. HIGH_P is true to select the high part,
4446 false to select the low part. */
4449 mips_subword (rtx op
, bool high_p
)
4451 unsigned int byte
, offset
;
4454 mode
= GET_MODE (op
);
4455 if (mode
== VOIDmode
)
4456 mode
= TARGET_64BIT
? TImode
: DImode
;
4458 if (TARGET_BIG_ENDIAN
? !high_p
: high_p
)
4459 byte
= UNITS_PER_WORD
;
4463 if (FP_REG_RTX_P (op
))
4465 /* Paired FPRs are always ordered little-endian. */
4466 offset
= (UNITS_PER_WORD
< UNITS_PER_HWFPVALUE
? high_p
: byte
!= 0);
4467 return gen_rtx_REG (word_mode
, REGNO (op
) + offset
);
4471 return mips_rewrite_small_data (adjust_address (op
, word_mode
, byte
));
4473 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
4476 /* Return true if SRC should be moved into DEST using "MULT $0, $0".
4477 SPLIT_TYPE is the condition under which moves should be split. */
4480 mips_mult_move_p (rtx dest
, rtx src
, enum mips_split_type split_type
)
4482 return ((split_type
!= SPLIT_FOR_SPEED
4483 || mips_tuning_info
.fast_mult_zero_zero_p
)
4484 && src
== const0_rtx
4486 && GET_MODE_SIZE (GET_MODE (dest
)) == 2 * UNITS_PER_WORD
4487 && (ISA_HAS_DSP_MULT
4488 ? ACC_REG_P (REGNO (dest
))
4489 : MD_REG_P (REGNO (dest
))));
4492 /* Return true if a move from SRC to DEST should be split into two.
4493 SPLIT_TYPE describes the split condition. */
4496 mips_split_move_p (rtx dest
, rtx src
, enum mips_split_type split_type
)
4498 /* Check whether the move can be done using some variant of MULT $0,$0. */
4499 if (mips_mult_move_p (dest
, src
, split_type
))
4502 /* FPR-to-FPR moves can be done in a single instruction, if they're
4504 unsigned int size
= GET_MODE_SIZE (GET_MODE (dest
));
4505 if (size
== 8 && FP_REG_RTX_P (src
) && FP_REG_RTX_P (dest
))
4508 /* Check for floating-point loads and stores. */
4509 if (size
== 8 && ISA_HAS_LDC1_SDC1
)
4511 if (FP_REG_RTX_P (dest
) && MEM_P (src
))
4513 if (FP_REG_RTX_P (src
) && MEM_P (dest
))
4517 /* Otherwise split all multiword moves. */
4518 return size
> UNITS_PER_WORD
;
4521 /* Split a move from SRC to DEST, given that mips_split_move_p holds.
4522 SPLIT_TYPE describes the split condition. */
4525 mips_split_move (rtx dest
, rtx src
, enum mips_split_type split_type
)
4529 gcc_checking_assert (mips_split_move_p (dest
, src
, split_type
));
4530 if (FP_REG_RTX_P (dest
) || FP_REG_RTX_P (src
))
4532 if (!TARGET_64BIT
&& GET_MODE (dest
) == DImode
)
4533 emit_insn (gen_move_doubleword_fprdi (dest
, src
));
4534 else if (!TARGET_64BIT
&& GET_MODE (dest
) == DFmode
)
4535 emit_insn (gen_move_doubleword_fprdf (dest
, src
));
4536 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V2SFmode
)
4537 emit_insn (gen_move_doubleword_fprv2sf (dest
, src
));
4538 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V2SImode
)
4539 emit_insn (gen_move_doubleword_fprv2si (dest
, src
));
4540 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V4HImode
)
4541 emit_insn (gen_move_doubleword_fprv4hi (dest
, src
));
4542 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V8QImode
)
4543 emit_insn (gen_move_doubleword_fprv8qi (dest
, src
));
4544 else if (TARGET_64BIT
&& GET_MODE (dest
) == TFmode
)
4545 emit_insn (gen_move_doubleword_fprtf (dest
, src
));
4549 else if (REG_P (dest
) && REGNO (dest
) == MD_REG_FIRST
)
4551 low_dest
= mips_subword (dest
, false);
4552 mips_emit_move (low_dest
, mips_subword (src
, false));
4554 emit_insn (gen_mthidi_ti (dest
, mips_subword (src
, true), low_dest
));
4556 emit_insn (gen_mthisi_di (dest
, mips_subword (src
, true), low_dest
));
4558 else if (REG_P (src
) && REGNO (src
) == MD_REG_FIRST
)
4560 mips_emit_move (mips_subword (dest
, false), mips_subword (src
, false));
4562 emit_insn (gen_mfhidi_ti (mips_subword (dest
, true), src
));
4564 emit_insn (gen_mfhisi_di (mips_subword (dest
, true), src
));
4568 /* The operation can be split into two normal moves. Decide in
4569 which order to do them. */
4570 low_dest
= mips_subword (dest
, false);
4571 if (REG_P (low_dest
)
4572 && reg_overlap_mentioned_p (low_dest
, src
))
4574 mips_emit_move (mips_subword (dest
, true), mips_subword (src
, true));
4575 mips_emit_move (low_dest
, mips_subword (src
, false));
4579 mips_emit_move (low_dest
, mips_subword (src
, false));
4580 mips_emit_move (mips_subword (dest
, true), mips_subword (src
, true));
4585 /* Return the split type for instruction INSN. */
4587 static enum mips_split_type
4588 mips_insn_split_type (rtx insn
)
4590 basic_block bb
= BLOCK_FOR_INSN (insn
);
4593 if (optimize_bb_for_speed_p (bb
))
4594 return SPLIT_FOR_SPEED
;
4596 return SPLIT_FOR_SIZE
;
4598 /* Once CFG information has been removed, we should trust the optimization
4599 decisions made by previous passes and only split where necessary. */
4600 return SPLIT_IF_NECESSARY
;
4603 /* Return true if a move from SRC to DEST in INSN should be split. */
4606 mips_split_move_insn_p (rtx dest
, rtx src
, rtx insn
)
4608 return mips_split_move_p (dest
, src
, mips_insn_split_type (insn
));
4611 /* Split a move from SRC to DEST in INSN, given that mips_split_move_insn_p
4615 mips_split_move_insn (rtx dest
, rtx src
, rtx insn
)
4617 mips_split_move (dest
, src
, mips_insn_split_type (insn
));
4620 /* Return the appropriate instructions to move SRC into DEST. Assume
4621 that SRC is operand 1 and DEST is operand 0. */
4624 mips_output_move (rtx dest
, rtx src
)
4626 enum rtx_code dest_code
, src_code
;
4628 enum mips_symbol_type symbol_type
;
4631 dest_code
= GET_CODE (dest
);
4632 src_code
= GET_CODE (src
);
4633 mode
= GET_MODE (dest
);
4634 dbl_p
= (GET_MODE_SIZE (mode
) == 8);
4636 if (mips_split_move_p (dest
, src
, SPLIT_IF_NECESSARY
))
4639 if ((src_code
== REG
&& GP_REG_P (REGNO (src
)))
4640 || (!TARGET_MIPS16
&& src
== CONST0_RTX (mode
)))
4642 if (dest_code
== REG
)
4644 if (GP_REG_P (REGNO (dest
)))
4645 return "move\t%0,%z1";
4647 if (mips_mult_move_p (dest
, src
, SPLIT_IF_NECESSARY
))
4649 if (ISA_HAS_DSP_MULT
)
4650 return "mult\t%q0,%.,%.";
4652 return "mult\t%.,%.";
4655 /* Moves to HI are handled by special .md insns. */
4656 if (REGNO (dest
) == LO_REGNUM
)
4659 if (DSP_ACC_REG_P (REGNO (dest
)))
4661 static char retval
[] = "mt__\t%z1,%q0";
4663 retval
[2] = reg_names
[REGNO (dest
)][4];
4664 retval
[3] = reg_names
[REGNO (dest
)][5];
4668 if (FP_REG_P (REGNO (dest
)))
4669 return dbl_p
? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
4671 if (ALL_COP_REG_P (REGNO (dest
)))
4673 static char retval
[] = "dmtc_\t%z1,%0";
4675 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
4676 return dbl_p
? retval
: retval
+ 1;
4679 if (dest_code
== MEM
)
4680 switch (GET_MODE_SIZE (mode
))
4682 case 1: return "sb\t%z1,%0";
4683 case 2: return "sh\t%z1,%0";
4684 case 4: return "sw\t%z1,%0";
4685 case 8: return "sd\t%z1,%0";
4688 if (dest_code
== REG
&& GP_REG_P (REGNO (dest
)))
4690 if (src_code
== REG
)
4692 /* Moves from HI are handled by special .md insns. */
4693 if (REGNO (src
) == LO_REGNUM
)
4695 /* When generating VR4120 or VR4130 code, we use MACC and
4696 DMACC instead of MFLO. This avoids both the normal
4697 MIPS III HI/LO hazards and the errata related to
4700 return dbl_p
? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
4704 if (DSP_ACC_REG_P (REGNO (src
)))
4706 static char retval
[] = "mf__\t%0,%q1";
4708 retval
[2] = reg_names
[REGNO (src
)][4];
4709 retval
[3] = reg_names
[REGNO (src
)][5];
4713 if (FP_REG_P (REGNO (src
)))
4714 return dbl_p
? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
4716 if (ALL_COP_REG_P (REGNO (src
)))
4718 static char retval
[] = "dmfc_\t%0,%1";
4720 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
4721 return dbl_p
? retval
: retval
+ 1;
4725 if (src_code
== MEM
)
4726 switch (GET_MODE_SIZE (mode
))
4728 case 1: return "lbu\t%0,%1";
4729 case 2: return "lhu\t%0,%1";
4730 case 4: return "lw\t%0,%1";
4731 case 8: return "ld\t%0,%1";
4734 if (src_code
== CONST_INT
)
4736 /* Don't use the X format for the operand itself, because that
4737 will give out-of-range numbers for 64-bit hosts and 32-bit
4740 return "li\t%0,%1\t\t\t# %X1";
4742 if (SMALL_OPERAND_UNSIGNED (INTVAL (src
)))
4745 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src
)))
4749 if (src_code
== HIGH
)
4750 return TARGET_MIPS16
? "#" : "lui\t%0,%h1";
4752 if (CONST_GP_P (src
))
4753 return "move\t%0,%1";
4755 if (mips_symbolic_constant_p (src
, SYMBOL_CONTEXT_LEA
, &symbol_type
)
4756 && mips_lo_relocs
[symbol_type
] != 0)
4758 /* A signed 16-bit constant formed by applying a relocation
4759 operator to a symbolic address. */
4760 gcc_assert (!mips_split_p
[symbol_type
]);
4761 return "li\t%0,%R1";
4764 if (symbolic_operand (src
, VOIDmode
))
4766 gcc_assert (TARGET_MIPS16
4767 ? TARGET_MIPS16_TEXT_LOADS
4768 : !TARGET_EXPLICIT_RELOCS
);
4769 return dbl_p
? "dla\t%0,%1" : "la\t%0,%1";
4772 if (src_code
== REG
&& FP_REG_P (REGNO (src
)))
4774 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
4776 if (GET_MODE (dest
) == V2SFmode
)
4777 return "mov.ps\t%0,%1";
4779 return dbl_p
? "mov.d\t%0,%1" : "mov.s\t%0,%1";
4782 if (dest_code
== MEM
)
4783 return dbl_p
? "sdc1\t%1,%0" : "swc1\t%1,%0";
4785 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
4787 if (src_code
== MEM
)
4788 return dbl_p
? "ldc1\t%0,%1" : "lwc1\t%0,%1";
4790 if (dest_code
== REG
&& ALL_COP_REG_P (REGNO (dest
)) && src_code
== MEM
)
4792 static char retval
[] = "l_c_\t%0,%1";
4794 retval
[1] = (dbl_p
? 'd' : 'w');
4795 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
4798 if (dest_code
== MEM
&& src_code
== REG
&& ALL_COP_REG_P (REGNO (src
)))
4800 static char retval
[] = "s_c_\t%1,%0";
4802 retval
[1] = (dbl_p
? 'd' : 'w');
4803 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
4809 /* Return true if CMP1 is a suitable second operand for integer ordering
4810 test CODE. See also the *sCC patterns in mips.md. */
4813 mips_int_order_operand_ok_p (enum rtx_code code
, rtx cmp1
)
4819 return reg_or_0_operand (cmp1
, VOIDmode
);
4823 return !TARGET_MIPS16
&& cmp1
== const1_rtx
;
4827 return arith_operand (cmp1
, VOIDmode
);
4830 return sle_operand (cmp1
, VOIDmode
);
4833 return sleu_operand (cmp1
, VOIDmode
);
4840 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
4841 integer ordering test *CODE, or if an equivalent combination can
4842 be formed by adjusting *CODE and *CMP1. When returning true, update
4843 *CODE and *CMP1 with the chosen code and operand, otherwise leave
4847 mips_canonicalize_int_order_test (enum rtx_code
*code
, rtx
*cmp1
,
4850 HOST_WIDE_INT plus_one
;
4852 if (mips_int_order_operand_ok_p (*code
, *cmp1
))
4855 if (CONST_INT_P (*cmp1
))
4859 plus_one
= trunc_int_for_mode (UINTVAL (*cmp1
) + 1, mode
);
4860 if (INTVAL (*cmp1
) < plus_one
)
4863 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
4869 plus_one
= trunc_int_for_mode (UINTVAL (*cmp1
) + 1, mode
);
4873 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
4884 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
4885 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
4886 is nonnull, it's OK to set TARGET to the inverse of the result and
4887 flip *INVERT_PTR instead. */
4890 mips_emit_int_order_test (enum rtx_code code
, bool *invert_ptr
,
4891 rtx target
, rtx cmp0
, rtx cmp1
)
4895 /* First see if there is a MIPS instruction that can do this operation.
4896 If not, try doing the same for the inverse operation. If that also
4897 fails, force CMP1 into a register and try again. */
4898 mode
= GET_MODE (cmp0
);
4899 if (mips_canonicalize_int_order_test (&code
, &cmp1
, mode
))
4900 mips_emit_binary (code
, target
, cmp0
, cmp1
);
4903 enum rtx_code inv_code
= reverse_condition (code
);
4904 if (!mips_canonicalize_int_order_test (&inv_code
, &cmp1
, mode
))
4906 cmp1
= force_reg (mode
, cmp1
);
4907 mips_emit_int_order_test (code
, invert_ptr
, target
, cmp0
, cmp1
);
4909 else if (invert_ptr
== 0)
4913 inv_target
= mips_force_binary (GET_MODE (target
),
4914 inv_code
, cmp0
, cmp1
);
4915 mips_emit_binary (XOR
, target
, inv_target
, const1_rtx
);
4919 *invert_ptr
= !*invert_ptr
;
4920 mips_emit_binary (inv_code
, target
, cmp0
, cmp1
);
4925 /* Return a register that is zero iff CMP0 and CMP1 are equal.
4926 The register will have the same mode as CMP0. */
4929 mips_zero_if_equal (rtx cmp0
, rtx cmp1
)
4931 if (cmp1
== const0_rtx
)
4934 if (uns_arith_operand (cmp1
, VOIDmode
))
4935 return expand_binop (GET_MODE (cmp0
), xor_optab
,
4936 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
4938 return expand_binop (GET_MODE (cmp0
), sub_optab
,
4939 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
4942 /* Convert *CODE into a code that can be used in a floating-point
4943 scc instruction (C.cond.fmt). Return true if the values of
4944 the condition code registers will be inverted, with 0 indicating
4945 that the condition holds. */
4948 mips_reversed_fp_cond (enum rtx_code
*code
)
4955 *code
= reverse_condition_maybe_unordered (*code
);
4963 /* Allocate a floating-point condition-code register of mode MODE.
4965 These condition code registers are used for certain kinds
4966 of compound operation, such as compare and branches, vconds,
4967 and built-in functions. At expand time, their use is entirely
4968 controlled by MIPS-specific code and is entirely internal
4969 to these compound operations.
4971 We could (and did in the past) expose condition-code values
4972 as pseudo registers and leave the register allocator to pick
4973 appropriate registers. The problem is that it is not practically
4974 possible for the rtl optimizers to guarantee that no spills will
4975 be needed, even when AVOID_CCMODE_COPIES is defined. We would
4976 therefore need spill and reload sequences to handle the worst case.
4978 Although such sequences do exist, they are very expensive and are
4979 not something we'd want to use. This is especially true of CCV2 and
4980 CCV4, where all the shuffling would greatly outweigh whatever benefit
4981 the vectorization itself provides.
4983 The main benefit of having more than one condition-code register
4984 is to allow the pipelining of operations, especially those involving
4985 comparisons and conditional moves. We don't really expect the
4986 registers to be live for long periods, and certainly never want
4987 them to be live across calls.
4989 Also, there should be no penalty attached to using all the available
4990 registers. They are simply bits in the same underlying FPU control
4993 We therefore expose the hardware registers from the outset and use
4994 a simple round-robin allocation scheme. */
4997 mips_allocate_fcc (machine_mode mode
)
4999 unsigned int regno
, count
;
5001 gcc_assert (TARGET_HARD_FLOAT
&& ISA_HAS_8CC
);
5005 else if (mode
== CCV2mode
)
5007 else if (mode
== CCV4mode
)
5012 cfun
->machine
->next_fcc
+= -cfun
->machine
->next_fcc
& (count
- 1);
5013 if (cfun
->machine
->next_fcc
> ST_REG_LAST
- ST_REG_FIRST
)
5014 cfun
->machine
->next_fcc
= 0;
5015 regno
= ST_REG_FIRST
+ cfun
->machine
->next_fcc
;
5016 cfun
->machine
->next_fcc
+= count
;
5017 return gen_rtx_REG (mode
, regno
);
5020 /* Convert a comparison into something that can be used in a branch or
5021 conditional move. On entry, *OP0 and *OP1 are the values being
5022 compared and *CODE is the code used to compare them.
5024 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
5025 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
5026 otherwise any standard branch condition can be used. The standard branch
5029 - EQ or NE between two registers.
5030 - any comparison between a register and zero.
5031 - if compact branches are available then any condition is valid. */
5034 mips_emit_compare (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
, bool need_eq_ne_p
)
5039 if (GET_MODE_CLASS (GET_MODE (*op0
)) == MODE_INT
)
5041 if (!need_eq_ne_p
&& *op1
== const0_rtx
)
5043 else if (*code
== EQ
|| *code
== NE
)
5047 *op0
= mips_zero_if_equal (cmp_op0
, cmp_op1
);
5051 *op1
= force_reg (GET_MODE (cmp_op0
), cmp_op1
);
5053 else if (!need_eq_ne_p
&& TARGET_CB_MAYBE
)
5083 *op1
= force_reg (GET_MODE (cmp_op0
), cmp_op1
);
5093 /* The comparison needs a separate scc instruction. Store the
5094 result of the scc in *OP0 and compare it against zero. */
5095 bool invert
= false;
5096 *op0
= gen_reg_rtx (GET_MODE (cmp_op0
));
5097 mips_emit_int_order_test (*code
, &invert
, *op0
, cmp_op0
, cmp_op1
);
5098 *code
= (invert
? EQ
: NE
);
5102 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_op0
)))
5104 *op0
= gen_rtx_REG (CCDSPmode
, CCDSP_CC_REGNUM
);
5105 mips_emit_binary (*code
, *op0
, cmp_op0
, cmp_op1
);
5111 enum rtx_code cmp_code
;
5113 /* Floating-point tests use a separate C.cond.fmt or CMP.cond.fmt
5114 comparison to set a register. The branch or conditional move will
5115 then compare that register against zero.
5117 Set CMP_CODE to the code of the comparison instruction and
5118 *CODE to the code that the branch or move should use. */
5122 /* All FP conditions can be implemented directly with CMP.cond.fmt
5123 or by reversing the operands. */
5125 *op0
= gen_reg_rtx (CCFmode
);
5129 /* Three FP conditions cannot be implemented by reversing the
5130 operands for C.cond.fmt, instead a reversed condition code is
5131 required and a test for false. */
5132 *code
= mips_reversed_fp_cond (&cmp_code
) ? EQ
: NE
;
5134 *op0
= mips_allocate_fcc (CCmode
);
5136 *op0
= gen_rtx_REG (CCmode
, FPSW_REGNUM
);
5140 mips_emit_binary (cmp_code
, *op0
, cmp_op0
, cmp_op1
);
5144 /* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
5145 and OPERAND[3]. Store the result in OPERANDS[0].
5147 On 64-bit targets, the mode of the comparison and target will always be
5148 SImode, thus possibly narrower than that of the comparison's operands. */
5151 mips_expand_scc (rtx operands
[])
5153 rtx target
= operands
[0];
5154 enum rtx_code code
= GET_CODE (operands
[1]);
5155 rtx op0
= operands
[2];
5156 rtx op1
= operands
[3];
5158 gcc_assert (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
);
5160 if (code
== EQ
|| code
== NE
)
5163 && reg_imm10_operand (op1
, GET_MODE (op1
)))
5164 mips_emit_binary (code
, target
, op0
, op1
);
5167 rtx zie
= mips_zero_if_equal (op0
, op1
);
5168 mips_emit_binary (code
, target
, zie
, const0_rtx
);
5172 mips_emit_int_order_test (code
, 0, target
, op0
, op1
);
5175 /* Compare OPERANDS[1] with OPERANDS[2] using comparison code
5176 CODE and jump to OPERANDS[3] if the condition holds. */
5179 mips_expand_conditional_branch (rtx
*operands
)
5181 enum rtx_code code
= GET_CODE (operands
[0]);
5182 rtx op0
= operands
[1];
5183 rtx op1
= operands
[2];
5186 mips_emit_compare (&code
, &op0
, &op1
, TARGET_MIPS16
);
5187 condition
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5188 emit_jump_insn (gen_condjump (condition
, operands
[3]));
5193 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
5194 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
5197 mips_expand_vcondv2sf (rtx dest
, rtx true_src
, rtx false_src
,
5198 enum rtx_code cond
, rtx cmp_op0
, rtx cmp_op1
)
5203 reversed_p
= mips_reversed_fp_cond (&cond
);
5204 cmp_result
= mips_allocate_fcc (CCV2mode
);
5205 emit_insn (gen_scc_ps (cmp_result
,
5206 gen_rtx_fmt_ee (cond
, VOIDmode
, cmp_op0
, cmp_op1
)));
5208 emit_insn (gen_mips_cond_move_tf_ps (dest
, false_src
, true_src
,
5211 emit_insn (gen_mips_cond_move_tf_ps (dest
, true_src
, false_src
,
5215 /* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0]
5216 if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
5219 mips_expand_conditional_move (rtx
*operands
)
5222 enum rtx_code code
= GET_CODE (operands
[1]);
5223 rtx op0
= XEXP (operands
[1], 0);
5224 rtx op1
= XEXP (operands
[1], 1);
5226 mips_emit_compare (&code
, &op0
, &op1
, true);
5227 cond
= gen_rtx_fmt_ee (code
, GET_MODE (op0
), op0
, op1
);
5229 /* There is no direct support for general conditional GP move involving
5230 two registers using SEL. */
5232 && INTEGRAL_MODE_P (GET_MODE (operands
[2]))
5233 && register_operand (operands
[2], VOIDmode
)
5234 && register_operand (operands
[3], VOIDmode
))
5236 machine_mode mode
= GET_MODE (operands
[0]);
5237 rtx temp
= gen_reg_rtx (mode
);
5238 rtx temp2
= gen_reg_rtx (mode
);
5240 emit_insn (gen_rtx_SET (temp
,
5241 gen_rtx_IF_THEN_ELSE (mode
, cond
,
5242 operands
[2], const0_rtx
)));
5244 /* Flip the test for the second operand. */
5245 cond
= gen_rtx_fmt_ee ((code
== EQ
) ? NE
: EQ
, GET_MODE (op0
), op0
, op1
);
5247 emit_insn (gen_rtx_SET (temp2
,
5248 gen_rtx_IF_THEN_ELSE (mode
, cond
,
5249 operands
[3], const0_rtx
)));
5251 /* Merge the two results, at least one is guaranteed to be zero. */
5252 emit_insn (gen_rtx_SET (operands
[0], gen_rtx_IOR (mode
, temp
, temp2
)));
5256 if (FLOAT_MODE_P (GET_MODE (operands
[2])) && !ISA_HAS_SEL
)
5258 operands
[2] = force_reg (GET_MODE (operands
[0]), operands
[2]);
5259 operands
[3] = force_reg (GET_MODE (operands
[0]), operands
[3]);
5262 emit_insn (gen_rtx_SET (operands
[0],
5263 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]), cond
,
5264 operands
[2], operands
[3])));
5268 /* Perform the comparison in COMPARISON, then trap if the condition holds. */
5271 mips_expand_conditional_trap (rtx comparison
)
5277 /* MIPS conditional trap instructions don't have GT or LE flavors,
5278 so we must swap the operands and convert to LT and GE respectively. */
5279 code
= GET_CODE (comparison
);
5286 code
= swap_condition (code
);
5287 op0
= XEXP (comparison
, 1);
5288 op1
= XEXP (comparison
, 0);
5292 op0
= XEXP (comparison
, 0);
5293 op1
= XEXP (comparison
, 1);
5297 mode
= GET_MODE (XEXP (comparison
, 0));
5298 op0
= force_reg (mode
, op0
);
5299 if (!(ISA_HAS_COND_TRAPI
5300 ? arith_operand (op1
, mode
)
5301 : reg_or_0_operand (op1
, mode
)))
5302 op1
= force_reg (mode
, op1
);
5304 emit_insn (gen_rtx_TRAP_IF (VOIDmode
,
5305 gen_rtx_fmt_ee (code
, mode
, op0
, op1
),
5309 /* Initialize *CUM for a call to a function of type FNTYPE. */
5312 mips_init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
)
5314 memset (cum
, 0, sizeof (*cum
));
5315 cum
->prototype
= (fntype
&& prototype_p (fntype
));
5316 cum
->gp_reg_found
= (cum
->prototype
&& stdarg_p (fntype
));
5319 /* Fill INFO with information about a single argument. CUM is the
5320 cumulative state for earlier arguments. MODE is the mode of this
5321 argument and TYPE is its type (if known). NAMED is true if this
5322 is a named (fixed) argument rather than a variable one. */
5325 mips_get_arg_info (struct mips_arg_info
*info
, const CUMULATIVE_ARGS
*cum
,
5326 machine_mode mode
, const_tree type
, bool named
)
5328 bool doubleword_aligned_p
;
5329 unsigned int num_bytes
, num_words
, max_regs
;
5331 /* Work out the size of the argument. */
5332 num_bytes
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
5333 num_words
= (num_bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
5335 /* Decide whether it should go in a floating-point register, assuming
5336 one is free. Later code checks for availability.
5338 The checks against UNITS_PER_FPVALUE handle the soft-float and
5339 single-float cases. */
5343 /* The EABI conventions have traditionally been defined in terms
5344 of TYPE_MODE, regardless of the actual type. */
5345 info
->fpr_p
= ((GET_MODE_CLASS (mode
) == MODE_FLOAT
5346 || mode
== V2SFmode
)
5347 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
5352 /* Only leading floating-point scalars are passed in
5353 floating-point registers. We also handle vector floats the same
5354 say, which is OK because they are not covered by the standard ABI. */
5355 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
|| mode
!= V2SFmode
);
5356 info
->fpr_p
= (!cum
->gp_reg_found
5357 && cum
->arg_number
< 2
5359 || SCALAR_FLOAT_TYPE_P (type
)
5360 || VECTOR_FLOAT_TYPE_P (type
))
5361 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
5362 || mode
== V2SFmode
)
5363 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
5368 /* Scalar, complex and vector floating-point types are passed in
5369 floating-point registers, as long as this is a named rather
5370 than a variable argument. */
5371 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
|| mode
!= V2SFmode
);
5372 info
->fpr_p
= (named
5373 && (type
== 0 || FLOAT_TYPE_P (type
))
5374 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
5375 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
5376 || mode
== V2SFmode
)
5377 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_FPVALUE
);
5379 /* ??? According to the ABI documentation, the real and imaginary
5380 parts of complex floats should be passed in individual registers.
5381 The real and imaginary parts of stack arguments are supposed
5382 to be contiguous and there should be an extra word of padding
5385 This has two problems. First, it makes it impossible to use a
5386 single "void *" va_list type, since register and stack arguments
5387 are passed differently. (At the time of writing, MIPSpro cannot
5388 handle complex float varargs correctly.) Second, it's unclear
5389 what should happen when there is only one register free.
5391 For now, we assume that named complex floats should go into FPRs
5392 if there are two FPRs free, otherwise they should be passed in the
5393 same way as a struct containing two floats. */
5395 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
5396 && GET_MODE_UNIT_SIZE (mode
) < UNITS_PER_FPVALUE
)
5398 if (cum
->num_gprs
>= MAX_ARGS_IN_REGISTERS
- 1)
5399 info
->fpr_p
= false;
5409 /* See whether the argument has doubleword alignment. */
5410 doubleword_aligned_p
= (mips_function_arg_boundary (mode
, type
)
5413 /* Set REG_OFFSET to the register count we're interested in.
5414 The EABI allocates the floating-point registers separately,
5415 but the other ABIs allocate them like integer registers. */
5416 info
->reg_offset
= (mips_abi
== ABI_EABI
&& info
->fpr_p
5420 /* Advance to an even register if the argument is doubleword-aligned. */
5421 if (doubleword_aligned_p
)
5422 info
->reg_offset
+= info
->reg_offset
& 1;
5424 /* Work out the offset of a stack argument. */
5425 info
->stack_offset
= cum
->stack_words
;
5426 if (doubleword_aligned_p
)
5427 info
->stack_offset
+= info
->stack_offset
& 1;
5429 max_regs
= MAX_ARGS_IN_REGISTERS
- info
->reg_offset
;
5431 /* Partition the argument between registers and stack. */
5432 info
->reg_words
= MIN (num_words
, max_regs
);
5433 info
->stack_words
= num_words
- info
->reg_words
;
5436 /* INFO describes a register argument that has the normal format for the
5437 argument's mode. Return the register it uses, assuming that FPRs are
5438 available if HARD_FLOAT_P. */
5441 mips_arg_regno (const struct mips_arg_info
*info
, bool hard_float_p
)
5443 if (!info
->fpr_p
|| !hard_float_p
)
5444 return GP_ARG_FIRST
+ info
->reg_offset
;
5445 else if (mips_abi
== ABI_32
&& TARGET_DOUBLE_FLOAT
&& info
->reg_offset
> 0)
5446 /* In o32, the second argument is always passed in $f14
5447 for TARGET_DOUBLE_FLOAT, regardless of whether the
5448 first argument was a word or doubleword. */
5449 return FP_ARG_FIRST
+ 2;
5451 return FP_ARG_FIRST
+ info
->reg_offset
;
5454 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
5457 mips_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
5459 return !TARGET_OLDABI
;
5462 /* Implement TARGET_FUNCTION_ARG. */
5465 mips_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
5466 const_tree type
, bool named
)
5468 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5469 struct mips_arg_info info
;
5471 /* We will be called with a mode of VOIDmode after the last argument
5472 has been seen. Whatever we return will be passed to the call expander.
5473 If we need a MIPS16 fp_code, return a REG with the code stored as
5475 if (mode
== VOIDmode
)
5477 if (TARGET_MIPS16
&& cum
->fp_code
!= 0)
5478 return gen_rtx_REG ((machine_mode
) cum
->fp_code
, 0);
5483 mips_get_arg_info (&info
, cum
, mode
, type
, named
);
5485 /* Return straight away if the whole argument is passed on the stack. */
5486 if (info
.reg_offset
== MAX_ARGS_IN_REGISTERS
)
5489 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
5490 contains a double in its entirety, then that 64-bit chunk is passed
5491 in a floating-point register. */
5493 && TARGET_HARD_FLOAT
5496 && TREE_CODE (type
) == RECORD_TYPE
5497 && TYPE_SIZE_UNIT (type
)
5498 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
5502 /* First check to see if there is any such field. */
5503 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
5504 if (TREE_CODE (field
) == FIELD_DECL
5505 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
))
5506 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
5507 && tree_fits_shwi_p (bit_position (field
))
5508 && int_bit_position (field
) % BITS_PER_WORD
== 0)
5513 /* Now handle the special case by returning a PARALLEL
5514 indicating where each 64-bit chunk goes. INFO.REG_WORDS
5515 chunks are passed in registers. */
5517 HOST_WIDE_INT bitpos
;
5520 /* assign_parms checks the mode of ENTRY_PARM, so we must
5521 use the actual mode here. */
5522 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (info
.reg_words
));
5525 field
= TYPE_FIELDS (type
);
5526 for (i
= 0; i
< info
.reg_words
; i
++)
5530 for (; field
; field
= DECL_CHAIN (field
))
5531 if (TREE_CODE (field
) == FIELD_DECL
5532 && int_bit_position (field
) >= bitpos
)
5536 && int_bit_position (field
) == bitpos
5537 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
))
5538 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
)
5539 reg
= gen_rtx_REG (DFmode
, FP_ARG_FIRST
+ info
.reg_offset
+ i
);
5541 reg
= gen_rtx_REG (DImode
, GP_ARG_FIRST
+ info
.reg_offset
+ i
);
5544 = gen_rtx_EXPR_LIST (VOIDmode
, reg
,
5545 GEN_INT (bitpos
/ BITS_PER_UNIT
));
5547 bitpos
+= BITS_PER_WORD
;
5553 /* Handle the n32/n64 conventions for passing complex floating-point
5554 arguments in FPR pairs. The real part goes in the lower register
5555 and the imaginary part goes in the upper register. */
5558 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
5564 inner
= GET_MODE_INNER (mode
);
5565 regno
= FP_ARG_FIRST
+ info
.reg_offset
;
5566 if (info
.reg_words
* UNITS_PER_WORD
== GET_MODE_SIZE (inner
))
5568 /* Real part in registers, imaginary part on stack. */
5569 gcc_assert (info
.stack_words
== info
.reg_words
);
5570 return gen_rtx_REG (inner
, regno
);
5574 gcc_assert (info
.stack_words
== 0);
5575 real
= gen_rtx_EXPR_LIST (VOIDmode
,
5576 gen_rtx_REG (inner
, regno
),
5578 imag
= gen_rtx_EXPR_LIST (VOIDmode
,
5580 regno
+ info
.reg_words
/ 2),
5581 GEN_INT (GET_MODE_SIZE (inner
)));
5582 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, real
, imag
));
5586 return gen_rtx_REG (mode
, mips_arg_regno (&info
, TARGET_HARD_FLOAT
));
5589 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
5592 mips_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
5593 const_tree type
, bool named
)
5595 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5596 struct mips_arg_info info
;
5598 mips_get_arg_info (&info
, cum
, mode
, type
, named
);
5601 cum
->gp_reg_found
= true;
5603 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
5604 an explanation of what this code does. It assumes that we're using
5605 either the o32 or the o64 ABI, both of which pass at most 2 arguments
5607 if (cum
->arg_number
< 2 && info
.fpr_p
)
5608 cum
->fp_code
+= (mode
== SFmode
? 1 : 2) << (cum
->arg_number
* 2);
5610 /* Advance the register count. This has the effect of setting
5611 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
5612 argument required us to skip the final GPR and pass the whole
5613 argument on the stack. */
5614 if (mips_abi
!= ABI_EABI
|| !info
.fpr_p
)
5615 cum
->num_gprs
= info
.reg_offset
+ info
.reg_words
;
5616 else if (info
.reg_words
> 0)
5617 cum
->num_fprs
+= MAX_FPRS_PER_FMT
;
5619 /* Advance the stack word count. */
5620 if (info
.stack_words
> 0)
5621 cum
->stack_words
= info
.stack_offset
+ info
.stack_words
;
5626 /* Implement TARGET_ARG_PARTIAL_BYTES. */
5629 mips_arg_partial_bytes (cumulative_args_t cum
,
5630 machine_mode mode
, tree type
, bool named
)
5632 struct mips_arg_info info
;
5634 mips_get_arg_info (&info
, get_cumulative_args (cum
), mode
, type
, named
);
5635 return info
.stack_words
> 0 ? info
.reg_words
* UNITS_PER_WORD
: 0;
5638 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
5639 least PARM_BOUNDARY bits of alignment, but will be given anything up
5640 to STACK_BOUNDARY bits if the type requires it. */
5643 mips_function_arg_boundary (machine_mode mode
, const_tree type
)
5645 unsigned int alignment
;
5647 alignment
= type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
);
5648 if (alignment
< PARM_BOUNDARY
)
5649 alignment
= PARM_BOUNDARY
;
5650 if (alignment
> STACK_BOUNDARY
)
5651 alignment
= STACK_BOUNDARY
;
5655 /* Implement TARGET_GET_RAW_RESULT_MODE and TARGET_GET_RAW_ARG_MODE. */
5658 mips_get_reg_raw_mode (int regno
)
5660 if (TARGET_FLOATXX
&& FP_REG_P (regno
))
5662 return default_get_reg_raw_mode (regno
);
5665 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
5666 upward rather than downward. In other words, return true if the
5667 first byte of the stack slot has useful data, false if the last
5671 mips_pad_arg_upward (machine_mode mode
, const_tree type
)
5673 /* On little-endian targets, the first byte of every stack argument
5674 is passed in the first byte of the stack slot. */
5675 if (!BYTES_BIG_ENDIAN
)
5678 /* Otherwise, integral types are padded downward: the last byte of a
5679 stack argument is passed in the last byte of the stack slot. */
5681 ? (INTEGRAL_TYPE_P (type
)
5682 || POINTER_TYPE_P (type
)
5683 || FIXED_POINT_TYPE_P (type
))
5684 : (SCALAR_INT_MODE_P (mode
)
5685 || ALL_SCALAR_FIXED_POINT_MODE_P (mode
)))
5688 /* Big-endian o64 pads floating-point arguments downward. */
5689 if (mips_abi
== ABI_O64
)
5690 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5693 /* Other types are padded upward for o32, o64, n32 and n64. */
5694 if (mips_abi
!= ABI_EABI
)
5697 /* Arguments smaller than a stack slot are padded downward. */
5698 if (mode
!= BLKmode
)
5699 return GET_MODE_BITSIZE (mode
) >= PARM_BOUNDARY
;
5701 return int_size_in_bytes (type
) >= (PARM_BOUNDARY
/ BITS_PER_UNIT
);
5704 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
5705 if the least significant byte of the register has useful data. Return
5706 the opposite if the most significant byte does. */
5709 mips_pad_reg_upward (machine_mode mode
, tree type
)
5711 /* No shifting is required for floating-point arguments. */
5712 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5713 return !BYTES_BIG_ENDIAN
;
5715 /* Otherwise, apply the same padding to register arguments as we do
5716 to stack arguments. */
5717 return mips_pad_arg_upward (mode
, type
);
5720 /* Return nonzero when an argument must be passed by reference. */
5723 mips_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
5724 machine_mode mode
, const_tree type
,
5725 bool named ATTRIBUTE_UNUSED
)
5727 if (mips_abi
== ABI_EABI
)
5731 /* ??? How should SCmode be handled? */
5732 if (mode
== DImode
|| mode
== DFmode
5733 || mode
== DQmode
|| mode
== UDQmode
5734 || mode
== DAmode
|| mode
== UDAmode
)
5737 size
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
5738 return size
== -1 || size
> UNITS_PER_WORD
;
5742 /* If we have a variable-sized parameter, we have no choice. */
5743 return targetm
.calls
.must_pass_in_stack (mode
, type
);
5747 /* Implement TARGET_CALLEE_COPIES. */
5750 mips_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED
,
5751 machine_mode mode ATTRIBUTE_UNUSED
,
5752 const_tree type ATTRIBUTE_UNUSED
, bool named
)
5754 return mips_abi
== ABI_EABI
&& named
;
5757 /* See whether VALTYPE is a record whose fields should be returned in
5758 floating-point registers. If so, return the number of fields and
5759 list them in FIELDS (which should have two elements). Return 0
5762 For n32 & n64, a structure with one or two fields is returned in
5763 floating-point registers as long as every field has a floating-point
5767 mips_fpr_return_fields (const_tree valtype
, tree
*fields
)
5775 if (TREE_CODE (valtype
) != RECORD_TYPE
)
5779 for (field
= TYPE_FIELDS (valtype
); field
!= 0; field
= DECL_CHAIN (field
))
5781 if (TREE_CODE (field
) != FIELD_DECL
)
5784 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
)))
5790 fields
[i
++] = field
;
5795 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
5796 a value in the most significant part of $2/$3 if:
5798 - the target is big-endian;
5800 - the value has a structure or union type (we generalize this to
5801 cover aggregates from other languages too); and
5803 - the structure is not returned in floating-point registers. */
5806 mips_return_in_msb (const_tree valtype
)
5810 return (TARGET_NEWABI
5811 && TARGET_BIG_ENDIAN
5812 && AGGREGATE_TYPE_P (valtype
)
5813 && mips_fpr_return_fields (valtype
, fields
) == 0);
5816 /* Return true if the function return value MODE will get returned in a
5817 floating-point register. */
5820 mips_return_mode_in_fpr_p (machine_mode mode
)
5822 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
|| mode
!= V2SFmode
);
5823 return ((GET_MODE_CLASS (mode
) == MODE_FLOAT
5825 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
5826 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_HWFPVALUE
);
5829 /* Return the representation of an FPR return register when the
5830 value being returned in FP_RETURN has mode VALUE_MODE and the
5831 return type itself has mode TYPE_MODE. On NewABI targets,
5832 the two modes may be different for structures like:
5834 struct __attribute__((packed)) foo { float f; }
5836 where we return the SFmode value of "f" in FP_RETURN, but where
5837 the structure itself has mode BLKmode. */
5840 mips_return_fpr_single (machine_mode type_mode
,
5841 machine_mode value_mode
)
5845 x
= gen_rtx_REG (value_mode
, FP_RETURN
);
5846 if (type_mode
!= value_mode
)
5848 x
= gen_rtx_EXPR_LIST (VOIDmode
, x
, const0_rtx
);
5849 x
= gen_rtx_PARALLEL (type_mode
, gen_rtvec (1, x
));
5854 /* Return a composite value in a pair of floating-point registers.
5855 MODE1 and OFFSET1 are the mode and byte offset for the first value,
5856 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
5859 For n32 & n64, $f0 always holds the first value and $f2 the second.
5860 Otherwise the values are packed together as closely as possible. */
5863 mips_return_fpr_pair (machine_mode mode
,
5864 machine_mode mode1
, HOST_WIDE_INT offset1
,
5865 machine_mode mode2
, HOST_WIDE_INT offset2
)
5869 inc
= (TARGET_NEWABI
|| mips_abi
== ABI_32
? 2 : MAX_FPRS_PER_FMT
);
5870 return gen_rtx_PARALLEL
5873 gen_rtx_EXPR_LIST (VOIDmode
,
5874 gen_rtx_REG (mode1
, FP_RETURN
),
5876 gen_rtx_EXPR_LIST (VOIDmode
,
5877 gen_rtx_REG (mode2
, FP_RETURN
+ inc
),
5878 GEN_INT (offset2
))));
5882 /* Implement TARGET_FUNCTION_VALUE and TARGET_LIBCALL_VALUE.
5883 For normal calls, VALTYPE is the return type and MODE is VOIDmode.
5884 For libcalls, VALTYPE is null and MODE is the mode of the return value. */
5887 mips_function_value_1 (const_tree valtype
, const_tree fn_decl_or_type
,
5896 if (fn_decl_or_type
&& DECL_P (fn_decl_or_type
))
5897 func
= fn_decl_or_type
;
5901 mode
= TYPE_MODE (valtype
);
5902 unsigned_p
= TYPE_UNSIGNED (valtype
);
5904 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
5905 return values, promote the mode here too. */
5906 mode
= promote_function_mode (valtype
, mode
, &unsigned_p
, func
, 1);
5908 /* Handle structures whose fields are returned in $f0/$f2. */
5909 switch (mips_fpr_return_fields (valtype
, fields
))
5912 return mips_return_fpr_single (mode
,
5913 TYPE_MODE (TREE_TYPE (fields
[0])));
5916 return mips_return_fpr_pair (mode
,
5917 TYPE_MODE (TREE_TYPE (fields
[0])),
5918 int_byte_position (fields
[0]),
5919 TYPE_MODE (TREE_TYPE (fields
[1])),
5920 int_byte_position (fields
[1]));
5923 /* If a value is passed in the most significant part of a register, see
5924 whether we have to round the mode up to a whole number of words. */
5925 if (mips_return_in_msb (valtype
))
5927 HOST_WIDE_INT size
= int_size_in_bytes (valtype
);
5928 if (size
% UNITS_PER_WORD
!= 0)
5930 size
+= UNITS_PER_WORD
- size
% UNITS_PER_WORD
;
5931 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
5935 /* For EABI, the class of return register depends entirely on MODE.
5936 For example, "struct { some_type x; }" and "union { some_type x; }"
5937 are returned in the same way as a bare "some_type" would be.
5938 Other ABIs only use FPRs for scalar, complex or vector types. */
5939 if (mips_abi
!= ABI_EABI
&& !FLOAT_TYPE_P (valtype
))
5940 return gen_rtx_REG (mode
, GP_RETURN
);
5945 /* Handle long doubles for n32 & n64. */
5947 return mips_return_fpr_pair (mode
,
5949 DImode
, GET_MODE_SIZE (mode
) / 2);
5951 if (mips_return_mode_in_fpr_p (mode
))
5953 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
5954 return mips_return_fpr_pair (mode
,
5955 GET_MODE_INNER (mode
), 0,
5956 GET_MODE_INNER (mode
),
5957 GET_MODE_SIZE (mode
) / 2);
5959 return gen_rtx_REG (mode
, FP_RETURN
);
5963 return gen_rtx_REG (mode
, GP_RETURN
);
5966 /* Implement TARGET_FUNCTION_VALUE. */
5969 mips_function_value (const_tree valtype
, const_tree fn_decl_or_type
,
5970 bool outgoing ATTRIBUTE_UNUSED
)
5972 return mips_function_value_1 (valtype
, fn_decl_or_type
, VOIDmode
);
5975 /* Implement TARGET_LIBCALL_VALUE. */
5978 mips_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
5980 return mips_function_value_1 (NULL_TREE
, NULL_TREE
, mode
);
5983 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5985 On the MIPS, R2 R3 and F0 F2 are the only register thus used. */
5988 mips_function_value_regno_p (const unsigned int regno
)
5990 /* Most types only require one GPR or one FPR for return values but for
5991 hard-float two FPRs can be used for _Complex types (for all ABIs)
5992 and long doubles (for n64). */
5993 if (regno
== GP_RETURN
5994 || regno
== FP_RETURN
5995 || (FP_RETURN
!= GP_RETURN
5996 && regno
== FP_RETURN
+ 2))
5999 /* For o32 FP32, _Complex double will be returned in four 32-bit registers.
6000 This does not apply to o32 FPXX as floating-point function argument and
6001 return registers are described as 64-bit even though floating-point
6002 registers are primarily described as 32-bit internally.
6003 See: mips_get_reg_raw_mode. */
6004 if ((mips_abi
== ABI_32
&& TARGET_FLOAT32
)
6005 && FP_RETURN
!= GP_RETURN
6006 && (regno
== FP_RETURN
+ 1
6007 || regno
== FP_RETURN
+ 3))
6013 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
6014 all BLKmode objects are returned in memory. Under the n32, n64
6015 and embedded ABIs, small structures are returned in a register.
6016 Objects with varying size must still be returned in memory, of
6020 mips_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
6022 return (TARGET_OLDABI
6023 ? TYPE_MODE (type
) == BLKmode
6024 : !IN_RANGE (int_size_in_bytes (type
), 0, 2 * UNITS_PER_WORD
));
6027 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
6030 mips_setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
6031 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
6034 CUMULATIVE_ARGS local_cum
;
6035 int gp_saved
, fp_saved
;
6037 /* The caller has advanced CUM up to, but not beyond, the last named
6038 argument. Advance a local copy of CUM past the last "real" named
6039 argument, to find out how many registers are left over. */
6040 local_cum
= *get_cumulative_args (cum
);
6041 mips_function_arg_advance (pack_cumulative_args (&local_cum
), mode
, type
,
6044 /* Found out how many registers we need to save. */
6045 gp_saved
= MAX_ARGS_IN_REGISTERS
- local_cum
.num_gprs
;
6046 fp_saved
= (EABI_FLOAT_VARARGS_P
6047 ? MAX_ARGS_IN_REGISTERS
- local_cum
.num_fprs
6056 ptr
= plus_constant (Pmode
, virtual_incoming_args_rtx
,
6057 REG_PARM_STACK_SPACE (cfun
->decl
)
6058 - gp_saved
* UNITS_PER_WORD
);
6059 mem
= gen_frame_mem (BLKmode
, ptr
);
6060 set_mem_alias_set (mem
, get_varargs_alias_set ());
6062 move_block_from_reg (local_cum
.num_gprs
+ GP_ARG_FIRST
,
6067 /* We can't use move_block_from_reg, because it will use
6072 /* Set OFF to the offset from virtual_incoming_args_rtx of
6073 the first float register. The FP save area lies below
6074 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
6075 off
= ROUND_DOWN (-gp_saved
* UNITS_PER_WORD
, UNITS_PER_FPVALUE
);
6076 off
-= fp_saved
* UNITS_PER_FPREG
;
6078 mode
= TARGET_SINGLE_FLOAT
? SFmode
: DFmode
;
6080 for (i
= local_cum
.num_fprs
; i
< MAX_ARGS_IN_REGISTERS
;
6081 i
+= MAX_FPRS_PER_FMT
)
6085 ptr
= plus_constant (Pmode
, virtual_incoming_args_rtx
, off
);
6086 mem
= gen_frame_mem (mode
, ptr
);
6087 set_mem_alias_set (mem
, get_varargs_alias_set ());
6088 mips_emit_move (mem
, gen_rtx_REG (mode
, FP_ARG_FIRST
+ i
));
6089 off
+= UNITS_PER_HWFPVALUE
;
6093 if (REG_PARM_STACK_SPACE (cfun
->decl
) == 0)
6094 cfun
->machine
->varargs_size
= (gp_saved
* UNITS_PER_WORD
6095 + fp_saved
* UNITS_PER_FPREG
);
6098 /* Implement TARGET_BUILTIN_VA_LIST. */
6101 mips_build_builtin_va_list (void)
6103 if (EABI_FLOAT_VARARGS_P
)
6105 /* We keep 3 pointers, and two offsets.
6107 Two pointers are to the overflow area, which starts at the CFA.
6108 One of these is constant, for addressing into the GPR save area
6109 below it. The other is advanced up the stack through the
6112 The third pointer is to the bottom of the GPR save area.
6113 Since the FPR save area is just below it, we can address
6114 FPR slots off this pointer.
6116 We also keep two one-byte offsets, which are to be subtracted
6117 from the constant pointers to yield addresses in the GPR and
6118 FPR save areas. These are downcounted as float or non-float
6119 arguments are used, and when they get to zero, the argument
6120 must be obtained from the overflow region. */
6121 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
, f_res
, record
;
6124 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6126 f_ovfl
= build_decl (BUILTINS_LOCATION
,
6127 FIELD_DECL
, get_identifier ("__overflow_argptr"),
6129 f_gtop
= build_decl (BUILTINS_LOCATION
,
6130 FIELD_DECL
, get_identifier ("__gpr_top"),
6132 f_ftop
= build_decl (BUILTINS_LOCATION
,
6133 FIELD_DECL
, get_identifier ("__fpr_top"),
6135 f_goff
= build_decl (BUILTINS_LOCATION
,
6136 FIELD_DECL
, get_identifier ("__gpr_offset"),
6137 unsigned_char_type_node
);
6138 f_foff
= build_decl (BUILTINS_LOCATION
,
6139 FIELD_DECL
, get_identifier ("__fpr_offset"),
6140 unsigned_char_type_node
);
6141 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
6142 warn on every user file. */
6143 index
= build_int_cst (NULL_TREE
, GET_MODE_SIZE (ptr_mode
) - 2 - 1);
6144 array
= build_array_type (unsigned_char_type_node
,
6145 build_index_type (index
));
6146 f_res
= build_decl (BUILTINS_LOCATION
,
6147 FIELD_DECL
, get_identifier ("__reserved"), array
);
6149 DECL_FIELD_CONTEXT (f_ovfl
) = record
;
6150 DECL_FIELD_CONTEXT (f_gtop
) = record
;
6151 DECL_FIELD_CONTEXT (f_ftop
) = record
;
6152 DECL_FIELD_CONTEXT (f_goff
) = record
;
6153 DECL_FIELD_CONTEXT (f_foff
) = record
;
6154 DECL_FIELD_CONTEXT (f_res
) = record
;
6156 TYPE_FIELDS (record
) = f_ovfl
;
6157 DECL_CHAIN (f_ovfl
) = f_gtop
;
6158 DECL_CHAIN (f_gtop
) = f_ftop
;
6159 DECL_CHAIN (f_ftop
) = f_goff
;
6160 DECL_CHAIN (f_goff
) = f_foff
;
6161 DECL_CHAIN (f_foff
) = f_res
;
6163 layout_type (record
);
6167 /* Otherwise, we use 'void *'. */
6168 return ptr_type_node
;
6171 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
6174 mips_va_start (tree valist
, rtx nextarg
)
6176 if (EABI_FLOAT_VARARGS_P
)
6178 const CUMULATIVE_ARGS
*cum
;
6179 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
6180 tree ovfl
, gtop
, ftop
, goff
, foff
;
6182 int gpr_save_area_size
;
6183 int fpr_save_area_size
;
6186 cum
= &crtl
->args
.info
;
6188 = (MAX_ARGS_IN_REGISTERS
- cum
->num_gprs
) * UNITS_PER_WORD
;
6190 = (MAX_ARGS_IN_REGISTERS
- cum
->num_fprs
) * UNITS_PER_FPREG
;
6192 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
6193 f_gtop
= DECL_CHAIN (f_ovfl
);
6194 f_ftop
= DECL_CHAIN (f_gtop
);
6195 f_goff
= DECL_CHAIN (f_ftop
);
6196 f_foff
= DECL_CHAIN (f_goff
);
6198 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
6200 gtop
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
), valist
, f_gtop
,
6202 ftop
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
), valist
, f_ftop
,
6204 goff
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
), valist
, f_goff
,
6206 foff
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
), valist
, f_foff
,
6209 /* Emit code to initialize OVFL, which points to the next varargs
6210 stack argument. CUM->STACK_WORDS gives the number of stack
6211 words used by named arguments. */
6212 t
= make_tree (TREE_TYPE (ovfl
), virtual_incoming_args_rtx
);
6213 if (cum
->stack_words
> 0)
6214 t
= fold_build_pointer_plus_hwi (t
, cum
->stack_words
* UNITS_PER_WORD
);
6215 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovfl
), ovfl
, t
);
6216 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6218 /* Emit code to initialize GTOP, the top of the GPR save area. */
6219 t
= make_tree (TREE_TYPE (gtop
), virtual_incoming_args_rtx
);
6220 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gtop
), gtop
, t
);
6221 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6223 /* Emit code to initialize FTOP, the top of the FPR save area.
6224 This address is gpr_save_area_bytes below GTOP, rounded
6225 down to the next fp-aligned boundary. */
6226 t
= make_tree (TREE_TYPE (ftop
), virtual_incoming_args_rtx
);
6227 fpr_offset
= gpr_save_area_size
+ UNITS_PER_FPVALUE
- 1;
6228 fpr_offset
&= -UNITS_PER_FPVALUE
;
6230 t
= fold_build_pointer_plus_hwi (t
, -fpr_offset
);
6231 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ftop
), ftop
, t
);
6232 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6234 /* Emit code to initialize GOFF, the offset from GTOP of the
6235 next GPR argument. */
6236 t
= build2 (MODIFY_EXPR
, TREE_TYPE (goff
), goff
,
6237 build_int_cst (TREE_TYPE (goff
), gpr_save_area_size
));
6238 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6240 /* Likewise emit code to initialize FOFF, the offset from FTOP
6241 of the next FPR argument. */
6242 t
= build2 (MODIFY_EXPR
, TREE_TYPE (foff
), foff
,
6243 build_int_cst (TREE_TYPE (foff
), fpr_save_area_size
));
6244 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6248 nextarg
= plus_constant (Pmode
, nextarg
, -cfun
->machine
->varargs_size
);
6249 std_expand_builtin_va_start (valist
, nextarg
);
6253 /* Like std_gimplify_va_arg_expr, but apply alignment to zero-sized
6257 mips_std_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6260 tree addr
, t
, type_size
, rounded_size
, valist_tmp
;
6261 unsigned HOST_WIDE_INT align
, boundary
;
6264 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
6266 type
= build_pointer_type (type
);
6268 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
6269 boundary
= targetm
.calls
.function_arg_boundary (TYPE_MODE (type
), type
);
6271 /* When we align parameter on stack for caller, if the parameter
6272 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
6273 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
6274 here with caller. */
6275 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
6276 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
6278 boundary
/= BITS_PER_UNIT
;
6280 /* Hoist the valist value into a temporary for the moment. */
6281 valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
6283 /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually
6284 requires greater alignment, we must perform dynamic alignment. */
6285 if (boundary
> align
)
6287 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
6288 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
6289 gimplify_and_add (t
, pre_p
);
6291 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
6292 fold_build2 (BIT_AND_EXPR
, TREE_TYPE (valist
),
6294 build_int_cst (TREE_TYPE (valist
), -boundary
)));
6295 gimplify_and_add (t
, pre_p
);
6300 /* If the actual alignment is less than the alignment of the type,
6301 adjust the type accordingly so that we don't assume strict alignment
6302 when dereferencing the pointer. */
6303 boundary
*= BITS_PER_UNIT
;
6304 if (boundary
< TYPE_ALIGN (type
))
6306 type
= build_variant_type_copy (type
);
6307 TYPE_ALIGN (type
) = boundary
;
6310 /* Compute the rounded size of the type. */
6311 type_size
= size_in_bytes (type
);
6312 rounded_size
= round_up (type_size
, align
);
6314 /* Reduce rounded_size so it's sharable with the postqueue. */
6315 gimplify_expr (&rounded_size
, pre_p
, post_p
, is_gimple_val
, fb_rvalue
);
6319 if (PAD_VARARGS_DOWN
&& !integer_zerop (rounded_size
))
6321 /* Small args are padded downward. */
6322 t
= fold_build2_loc (input_location
, GT_EXPR
, sizetype
,
6323 rounded_size
, size_int (align
));
6324 t
= fold_build3 (COND_EXPR
, sizetype
, t
, size_zero_node
,
6325 size_binop (MINUS_EXPR
, rounded_size
, type_size
));
6326 addr
= fold_build_pointer_plus (addr
, t
);
6329 /* Compute new value for AP. */
6330 t
= fold_build_pointer_plus (valist_tmp
, rounded_size
);
6331 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
6332 gimplify_and_add (t
, pre_p
);
6334 addr
= fold_convert (build_pointer_type (type
), addr
);
6337 addr
= build_va_arg_indirect_ref (addr
);
6339 return build_va_arg_indirect_ref (addr
);
6342 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
6345 mips_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6351 indirect_p
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
6353 type
= build_pointer_type (type
);
6355 if (!EABI_FLOAT_VARARGS_P
)
6356 addr
= mips_std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6359 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
6360 tree ovfl
, top
, off
, align
;
6361 HOST_WIDE_INT size
, rsize
, osize
;
6364 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
6365 f_gtop
= DECL_CHAIN (f_ovfl
);
6366 f_ftop
= DECL_CHAIN (f_gtop
);
6367 f_goff
= DECL_CHAIN (f_ftop
);
6368 f_foff
= DECL_CHAIN (f_goff
);
6372 TOP be the top of the GPR or FPR save area;
6373 OFF be the offset from TOP of the next register;
6374 ADDR_RTX be the address of the argument;
6375 SIZE be the number of bytes in the argument type;
6376 RSIZE be the number of bytes used to store the argument
6377 when it's in the register save area; and
6378 OSIZE be the number of bytes used to store it when it's
6379 in the stack overflow area.
6381 The code we want is:
6383 1: off &= -rsize; // round down
6386 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
6391 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
6392 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
6396 [1] and [9] can sometimes be optimized away. */
6398 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
6400 size
= int_size_in_bytes (type
);
6402 if (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
6403 && GET_MODE_SIZE (TYPE_MODE (type
)) <= UNITS_PER_FPVALUE
)
6405 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
),
6406 unshare_expr (valist
), f_ftop
, NULL_TREE
);
6407 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
),
6408 unshare_expr (valist
), f_foff
, NULL_TREE
);
6410 /* When va_start saves FPR arguments to the stack, each slot
6411 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
6412 argument's precision. */
6413 rsize
= UNITS_PER_HWFPVALUE
;
6415 /* Overflow arguments are padded to UNITS_PER_WORD bytes
6416 (= PARM_BOUNDARY bits). This can be different from RSIZE
6419 (1) On 32-bit targets when TYPE is a structure such as:
6421 struct s { float f; };
6423 Such structures are passed in paired FPRs, so RSIZE
6424 will be 8 bytes. However, the structure only takes
6425 up 4 bytes of memory, so OSIZE will only be 4.
6427 (2) In combinations such as -mgp64 -msingle-float
6428 -fshort-double. Doubles passed in registers will then take
6429 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
6430 stack take up UNITS_PER_WORD bytes. */
6431 osize
= MAX (GET_MODE_SIZE (TYPE_MODE (type
)), UNITS_PER_WORD
);
6435 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
),
6436 unshare_expr (valist
), f_gtop
, NULL_TREE
);
6437 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
),
6438 unshare_expr (valist
), f_goff
, NULL_TREE
);
6439 rsize
= ROUND_UP (size
, UNITS_PER_WORD
);
6440 if (rsize
> UNITS_PER_WORD
)
6442 /* [1] Emit code for: off &= -rsize. */
6443 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (off
), unshare_expr (off
),
6444 build_int_cst (TREE_TYPE (off
), -rsize
));
6445 gimplify_assign (unshare_expr (off
), t
, pre_p
);
6450 /* [2] Emit code to branch if off == 0. */
6451 t
= build2 (NE_EXPR
, boolean_type_node
, unshare_expr (off
),
6452 build_int_cst (TREE_TYPE (off
), 0));
6453 addr
= build3 (COND_EXPR
, ptr_type_node
, t
, NULL_TREE
, NULL_TREE
);
6455 /* [5] Emit code for: off -= rsize. We do this as a form of
6456 post-decrement not available to C. */
6457 t
= fold_convert (TREE_TYPE (off
), build_int_cst (NULL_TREE
, rsize
));
6458 t
= build2 (POSTDECREMENT_EXPR
, TREE_TYPE (off
), off
, t
);
6460 /* [4] Emit code for:
6461 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
6462 t
= fold_convert (sizetype
, t
);
6463 t
= fold_build1 (NEGATE_EXPR
, sizetype
, t
);
6464 t
= fold_build_pointer_plus (top
, t
);
6465 if (BYTES_BIG_ENDIAN
&& rsize
> size
)
6466 t
= fold_build_pointer_plus_hwi (t
, rsize
- size
);
6467 COND_EXPR_THEN (addr
) = t
;
6469 if (osize
> UNITS_PER_WORD
)
6471 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
6472 t
= fold_build_pointer_plus_hwi (unshare_expr (ovfl
), osize
- 1);
6473 u
= build_int_cst (TREE_TYPE (t
), -osize
);
6474 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6475 align
= build2 (MODIFY_EXPR
, TREE_TYPE (ovfl
),
6476 unshare_expr (ovfl
), t
);
6481 /* [10, 11] Emit code for:
6482 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
6484 u
= fold_convert (TREE_TYPE (ovfl
), build_int_cst (NULL_TREE
, osize
));
6485 t
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (ovfl
), ovfl
, u
);
6486 if (BYTES_BIG_ENDIAN
&& osize
> size
)
6487 t
= fold_build_pointer_plus_hwi (t
, osize
- size
);
6489 /* String [9] and [10, 11] together. */
6491 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (t
), align
, t
);
6492 COND_EXPR_ELSE (addr
) = t
;
6494 addr
= fold_convert (build_pointer_type (type
), addr
);
6495 addr
= build_va_arg_indirect_ref (addr
);
6499 addr
= build_va_arg_indirect_ref (addr
);
6504 /* Declare a unique, locally-binding function called NAME, then start
6508 mips_start_unique_function (const char *name
)
6512 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
6513 get_identifier (name
),
6514 build_function_type_list (void_type_node
, NULL_TREE
));
6515 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
6516 NULL_TREE
, void_type_node
);
6517 TREE_PUBLIC (decl
) = 1;
6518 TREE_STATIC (decl
) = 1;
6520 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
6522 targetm
.asm_out
.unique_section (decl
, 0);
6523 switch_to_section (get_named_section (decl
, NULL
, 0));
6525 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
6526 fputs ("\t.hidden\t", asm_out_file
);
6527 assemble_name (asm_out_file
, name
);
6528 putc ('\n', asm_out_file
);
6531 /* Start a definition of function NAME. MIPS16_P indicates whether the
6532 function contains MIPS16 code. */
6535 mips_start_function_definition (const char *name
, bool mips16_p
)
6538 fprintf (asm_out_file
, "\t.set\tmips16\n");
6540 fprintf (asm_out_file
, "\t.set\tnomips16\n");
6542 if (TARGET_MICROMIPS
)
6543 fprintf (asm_out_file
, "\t.set\tmicromips\n");
6544 #ifdef HAVE_GAS_MICROMIPS
6546 fprintf (asm_out_file
, "\t.set\tnomicromips\n");
6549 if (!flag_inhibit_size_directive
)
6551 fputs ("\t.ent\t", asm_out_file
);
6552 assemble_name (asm_out_file
, name
);
6553 fputs ("\n", asm_out_file
);
6556 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, name
, "function");
6558 /* Start the definition proper. */
6559 assemble_name (asm_out_file
, name
);
6560 fputs (":\n", asm_out_file
);
6563 /* End a function definition started by mips_start_function_definition. */
6566 mips_end_function_definition (const char *name
)
6568 if (!flag_inhibit_size_directive
)
6570 fputs ("\t.end\t", asm_out_file
);
6571 assemble_name (asm_out_file
, name
);
6572 fputs ("\n", asm_out_file
);
6576 /* If *STUB_PTR points to a stub, output a comdat-style definition for it,
6577 then free *STUB_PTR. */
6580 mips_finish_stub (mips_one_only_stub
**stub_ptr
)
6582 mips_one_only_stub
*stub
= *stub_ptr
;
6586 const char *name
= stub
->get_name ();
6587 mips_start_unique_function (name
);
6588 mips_start_function_definition (name
, false);
6589 stub
->output_body ();
6590 mips_end_function_definition (name
);
6595 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
6598 mips_ok_for_lazy_binding_p (rtx x
)
6600 return (TARGET_USE_GOT
6601 && GET_CODE (x
) == SYMBOL_REF
6602 && !SYMBOL_REF_BIND_NOW_P (x
)
6603 && !mips_symbol_binds_local_p (x
));
6606 /* Load function address ADDR into register DEST. TYPE is as for
6607 mips_expand_call. Return true if we used an explicit lazy-binding
6611 mips_load_call_address (enum mips_call_type type
, rtx dest
, rtx addr
)
6613 /* If we're generating PIC, and this call is to a global function,
6614 try to allow its address to be resolved lazily. This isn't
6615 possible for sibcalls when $gp is call-saved because the value
6616 of $gp on entry to the stub would be our caller's gp, not ours. */
6617 if (TARGET_EXPLICIT_RELOCS
6618 && !(type
== MIPS_CALL_SIBCALL
&& TARGET_CALL_SAVED_GP
)
6619 && mips_ok_for_lazy_binding_p (addr
))
6621 addr
= mips_got_load (dest
, addr
, SYMBOL_GOTOFF_CALL
);
6622 emit_insn (gen_rtx_SET (dest
, addr
));
6627 mips_emit_move (dest
, addr
);
6632 /* Each locally-defined hard-float MIPS16 function has a local symbol
6633 associated with it. This hash table maps the function symbol (FUNC)
6634 to the local symbol (LOCAL). */
6635 static GTY (()) hash_map
<nofree_string_hash
, rtx
> *mips16_local_aliases
;
6637 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
6638 Return a local alias for it, creating a new one if necessary. */
6641 mips16_local_alias (rtx func
)
6643 /* Create the hash table if this is the first call. */
6644 if (mips16_local_aliases
== NULL
)
6645 mips16_local_aliases
= hash_map
<nofree_string_hash
, rtx
>::create_ggc (37);
6647 /* Look up the function symbol, creating a new entry if need be. */
6649 const char *func_name
= XSTR (func
, 0);
6650 rtx
*slot
= &mips16_local_aliases
->get_or_insert (func_name
, &existed
);
6651 gcc_assert (slot
!= NULL
);
6657 /* Create a new SYMBOL_REF for the local symbol. The choice of
6658 __fn_local_* is based on the __fn_stub_* names that we've
6659 traditionally used for the non-MIPS16 stub. */
6660 func_name
= targetm
.strip_name_encoding (XSTR (func
, 0));
6661 const char *local_name
= ACONCAT (("__fn_local_", func_name
, NULL
));
6662 local
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (local_name
));
6663 SYMBOL_REF_FLAGS (local
) = SYMBOL_REF_FLAGS (func
) | SYMBOL_FLAG_LOCAL
;
6665 /* Create a new structure to represent the mapping. */
6671 /* A chained list of functions for which mips16_build_call_stub has already
6672 generated a stub. NAME is the name of the function and FP_RET_P is true
6673 if the function returns a value in floating-point registers. */
6674 struct mips16_stub
{
6675 struct mips16_stub
*next
;
6679 static struct mips16_stub
*mips16_stubs
;
6681 /* Return the two-character string that identifies floating-point
6682 return mode MODE in the name of a MIPS16 function stub. */
6685 mips16_call_stub_mode_suffix (machine_mode mode
)
6689 else if (mode
== DFmode
)
6691 else if (mode
== SCmode
)
6693 else if (mode
== DCmode
)
6695 else if (mode
== V2SFmode
)
6697 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
);
6704 /* Write instructions to move a 32-bit value between general register
6705 GPREG and floating-point register FPREG. DIRECTION is 't' to move
6706 from GPREG to FPREG and 'f' to move in the opposite direction. */
6709 mips_output_32bit_xfer (char direction
, unsigned int gpreg
, unsigned int fpreg
)
6711 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
6712 reg_names
[gpreg
], reg_names
[fpreg
]);
6715 /* Likewise for 64-bit values. */
6718 mips_output_64bit_xfer (char direction
, unsigned int gpreg
, unsigned int fpreg
)
6721 fprintf (asm_out_file
, "\tdm%cc1\t%s,%s\n", direction
,
6722 reg_names
[gpreg
], reg_names
[fpreg
]);
6723 else if (ISA_HAS_MXHC1
)
6725 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
6726 reg_names
[gpreg
+ TARGET_BIG_ENDIAN
], reg_names
[fpreg
]);
6727 fprintf (asm_out_file
, "\tm%chc1\t%s,%s\n", direction
,
6728 reg_names
[gpreg
+ TARGET_LITTLE_ENDIAN
], reg_names
[fpreg
]);
6730 else if (TARGET_FLOATXX
&& direction
== 't')
6732 /* Use the argument save area to move via memory. */
6733 fprintf (asm_out_file
, "\tsw\t%s,0($sp)\n", reg_names
[gpreg
]);
6734 fprintf (asm_out_file
, "\tsw\t%s,4($sp)\n", reg_names
[gpreg
+ 1]);
6735 fprintf (asm_out_file
, "\tldc1\t%s,0($sp)\n", reg_names
[fpreg
]);
6737 else if (TARGET_FLOATXX
&& direction
== 'f')
6739 /* Use the argument save area to move via memory. */
6740 fprintf (asm_out_file
, "\tsdc1\t%s,0($sp)\n", reg_names
[fpreg
]);
6741 fprintf (asm_out_file
, "\tlw\t%s,0($sp)\n", reg_names
[gpreg
]);
6742 fprintf (asm_out_file
, "\tlw\t%s,4($sp)\n", reg_names
[gpreg
+ 1]);
6746 /* Move the least-significant word. */
6747 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
6748 reg_names
[gpreg
+ TARGET_BIG_ENDIAN
], reg_names
[fpreg
]);
6749 /* ...then the most significant word. */
6750 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
6751 reg_names
[gpreg
+ TARGET_LITTLE_ENDIAN
], reg_names
[fpreg
+ 1]);
6755 /* Write out code to move floating-point arguments into or out of
6756 general registers. FP_CODE is the code describing which arguments
6757 are present (see the comment above the definition of CUMULATIVE_ARGS
6758 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
6761 mips_output_args_xfer (int fp_code
, char direction
)
6763 unsigned int gparg
, fparg
, f
;
6764 CUMULATIVE_ARGS cum
;
6766 /* This code only works for o32 and o64. */
6767 gcc_assert (TARGET_OLDABI
);
6769 mips_init_cumulative_args (&cum
, NULL
);
6771 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
6774 struct mips_arg_info info
;
6778 else if ((f
& 3) == 2)
6783 mips_get_arg_info (&info
, &cum
, mode
, NULL
, true);
6784 gparg
= mips_arg_regno (&info
, false);
6785 fparg
= mips_arg_regno (&info
, true);
6788 mips_output_32bit_xfer (direction
, gparg
, fparg
);
6790 mips_output_64bit_xfer (direction
, gparg
, fparg
);
6792 mips_function_arg_advance (pack_cumulative_args (&cum
), mode
, NULL
, true);
6796 /* Write a MIPS16 stub for the current function. This stub is used
6797 for functions which take arguments in the floating-point registers.
6798 It is normal-mode code that moves the floating-point arguments
6799 into the general registers and then jumps to the MIPS16 code. */
6802 mips16_build_function_stub (void)
6804 const char *fnname
, *alias_name
, *separator
;
6805 char *secname
, *stubname
;
6810 /* Create the name of the stub, and its unique section. */
6811 symbol
= XEXP (DECL_RTL (current_function_decl
), 0);
6812 alias
= mips16_local_alias (symbol
);
6814 fnname
= targetm
.strip_name_encoding (XSTR (symbol
, 0));
6815 alias_name
= targetm
.strip_name_encoding (XSTR (alias
, 0));
6816 secname
= ACONCAT ((".mips16.fn.", fnname
, NULL
));
6817 stubname
= ACONCAT (("__fn_stub_", fnname
, NULL
));
6819 /* Build a decl for the stub. */
6820 stubdecl
= build_decl (BUILTINS_LOCATION
,
6821 FUNCTION_DECL
, get_identifier (stubname
),
6822 build_function_type_list (void_type_node
, NULL_TREE
));
6823 set_decl_section_name (stubdecl
, secname
);
6824 DECL_RESULT (stubdecl
) = build_decl (BUILTINS_LOCATION
,
6825 RESULT_DECL
, NULL_TREE
, void_type_node
);
6827 /* Output a comment. */
6828 fprintf (asm_out_file
, "\t# Stub function for %s (",
6829 current_function_name ());
6831 for (f
= (unsigned int) crtl
->args
.info
.fp_code
; f
!= 0; f
>>= 2)
6833 fprintf (asm_out_file
, "%s%s", separator
,
6834 (f
& 3) == 1 ? "float" : "double");
6837 fprintf (asm_out_file
, ")\n");
6839 /* Start the function definition. */
6840 assemble_start_function (stubdecl
, stubname
);
6841 mips_start_function_definition (stubname
, false);
6843 /* If generating pic2 code, either set up the global pointer or
6845 if (TARGET_ABICALLS_PIC2
)
6847 if (TARGET_ABSOLUTE_ABICALLS
)
6848 fprintf (asm_out_file
, "\t.option\tpic0\n");
6851 output_asm_insn ("%(.cpload\t%^%)", NULL
);
6852 /* Emit an R_MIPS_NONE relocation to tell the linker what the
6853 target function is. Use a local GOT access when loading the
6854 symbol, to cut down on the number of unnecessary GOT entries
6855 for stubs that aren't needed. */
6856 output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol
);
6861 /* Load the address of the MIPS16 function into $25. Do this first so
6862 that targets with coprocessor interlocks can use an MFC1 to fill the
6864 output_asm_insn ("la\t%^,%0", &symbol
);
6866 /* Move the arguments from floating-point registers to general registers. */
6867 mips_output_args_xfer (crtl
->args
.info
.fp_code
, 'f');
6869 /* Jump to the MIPS16 function. */
6870 output_asm_insn ("jr\t%^", NULL
);
6872 if (TARGET_ABICALLS_PIC2
&& TARGET_ABSOLUTE_ABICALLS
)
6873 fprintf (asm_out_file
, "\t.option\tpic2\n");
6875 mips_end_function_definition (stubname
);
6877 /* If the linker needs to create a dynamic symbol for the target
6878 function, it will associate the symbol with the stub (which,
6879 unlike the target function, follows the proper calling conventions).
6880 It is therefore useful to have a local alias for the target function,
6881 so that it can still be identified as MIPS16 code. As an optimization,
6882 this symbol can also be used for indirect MIPS16 references from
6883 within this file. */
6884 ASM_OUTPUT_DEF (asm_out_file
, alias_name
, fnname
);
6886 switch_to_section (function_section (current_function_decl
));
6889 /* The current function is a MIPS16 function that returns a value in an FPR.
6890 Copy the return value from its soft-float to its hard-float location.
6891 libgcc2 has special non-MIPS16 helper functions for each case. */
6894 mips16_copy_fpr_return_value (void)
6896 rtx fn
, insn
, retval
;
6898 machine_mode return_mode
;
6901 return_type
= DECL_RESULT (current_function_decl
);
6902 return_mode
= DECL_MODE (return_type
);
6904 name
= ACONCAT (("__mips16_ret_",
6905 mips16_call_stub_mode_suffix (return_mode
),
6907 fn
= mips16_stub_function (name
);
6909 /* The function takes arguments in $2 (and possibly $3), so calls
6910 to it cannot be lazily bound. */
6911 SYMBOL_REF_FLAGS (fn
) |= SYMBOL_FLAG_BIND_NOW
;
6913 /* Model the call as something that takes the GPR return value as
6914 argument and returns an "updated" value. */
6915 retval
= gen_rtx_REG (return_mode
, GP_RETURN
);
6916 insn
= mips_expand_call (MIPS_CALL_EPILOGUE
, retval
, fn
,
6917 const0_rtx
, NULL_RTX
, false);
6918 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), retval
);
6921 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
6922 RETVAL is the location of the return value, or null if this is
6923 a "call" rather than a "call_value". ARGS_SIZE is the size of the
6924 arguments and FP_CODE is the code built by mips_function_arg;
6925 see the comment before the fp_code field in CUMULATIVE_ARGS for details.
6927 There are three alternatives:
6929 - If a stub was needed, emit the call and return the call insn itself.
6931 - If we can avoid using a stub by redirecting the call, set *FN_PTR
6932 to the new target and return null.
6934 - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
6937 A stub is needed for calls to functions that, in normal mode,
6938 receive arguments in FPRs or return values in FPRs. The stub
6939 copies the arguments from their soft-float positions to their
6940 hard-float positions, calls the real function, then copies the
6941 return value from its hard-float position to its soft-float
6944 We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
6945 If *FN_PTR turns out to be to a non-MIPS16 function, the linker
6946 automatically redirects the JAL to the stub, otherwise the JAL
6947 continues to call FN directly. */
6950 mips16_build_call_stub (rtx retval
, rtx
*fn_ptr
, rtx args_size
, int fp_code
)
6954 struct mips16_stub
*l
;
6958 /* We don't need to do anything if we aren't in MIPS16 mode, or if
6959 we were invoked with the -msoft-float option. */
6960 if (!TARGET_MIPS16
|| TARGET_SOFT_FLOAT_ABI
)
6963 /* Figure out whether the value might come back in a floating-point
6965 fp_ret_p
= retval
&& mips_return_mode_in_fpr_p (GET_MODE (retval
));
6967 /* We don't need to do anything if there were no floating-point
6968 arguments and the value will not be returned in a floating-point
6970 if (fp_code
== 0 && !fp_ret_p
)
6973 /* We don't need to do anything if this is a call to a special
6974 MIPS16 support function. */
6976 if (mips16_stub_function_p (fn
))
6979 /* If we're calling a locally-defined MIPS16 function, we know that
6980 it will return values in both the "soft-float" and "hard-float"
6981 registers. There is no need to use a stub to move the latter
6983 if (fp_code
== 0 && mips16_local_function_p (fn
))
6986 /* This code will only work for o32 and o64 abis. The other ABI's
6987 require more sophisticated support. */
6988 gcc_assert (TARGET_OLDABI
);
6990 /* If we're calling via a function pointer, use one of the magic
6991 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
6992 Each stub expects the function address to arrive in register $2. */
6993 if (GET_CODE (fn
) != SYMBOL_REF
6994 || !call_insn_operand (fn
, VOIDmode
))
7001 /* If this is a locally-defined and locally-binding function,
7002 avoid the stub by calling the local alias directly. */
7003 if (mips16_local_function_p (fn
))
7005 *fn_ptr
= mips16_local_alias (fn
);
7009 /* Create a SYMBOL_REF for the libgcc.a function. */
7011 sprintf (buf
, "__mips16_call_stub_%s_%d",
7012 mips16_call_stub_mode_suffix (GET_MODE (retval
)),
7015 sprintf (buf
, "__mips16_call_stub_%d", fp_code
);
7016 stub_fn
= mips16_stub_function (buf
);
7018 /* The function uses $2 as an argument, so calls to it
7019 cannot be lazily bound. */
7020 SYMBOL_REF_FLAGS (stub_fn
) |= SYMBOL_FLAG_BIND_NOW
;
7022 /* Load the target function into $2. */
7023 addr
= gen_rtx_REG (Pmode
, GP_REG_FIRST
+ 2);
7024 lazy_p
= mips_load_call_address (MIPS_CALL_NORMAL
, addr
, fn
);
7026 /* Emit the call. */
7027 insn
= mips_expand_call (MIPS_CALL_NORMAL
, retval
, stub_fn
,
7028 args_size
, NULL_RTX
, lazy_p
);
7030 /* Tell GCC that this call does indeed use the value of $2. */
7031 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), addr
);
7033 /* If we are handling a floating-point return value, we need to
7034 save $18 in the function prologue. Putting a note on the
7035 call will mean that df_regs_ever_live_p ($18) will be true if the
7036 call is not eliminated, and we can check that in the prologue
7039 CALL_INSN_FUNCTION_USAGE (insn
) =
7040 gen_rtx_EXPR_LIST (VOIDmode
,
7041 gen_rtx_CLOBBER (VOIDmode
,
7042 gen_rtx_REG (word_mode
, 18)),
7043 CALL_INSN_FUNCTION_USAGE (insn
));
7048 /* We know the function we are going to call. If we have already
7049 built a stub, we don't need to do anything further. */
7050 fnname
= targetm
.strip_name_encoding (XSTR (fn
, 0));
7051 for (l
= mips16_stubs
; l
!= NULL
; l
= l
->next
)
7052 if (strcmp (l
->name
, fnname
) == 0)
7057 const char *separator
;
7058 char *secname
, *stubname
;
7059 tree stubid
, stubdecl
;
7062 /* If the function does not return in FPRs, the special stub
7066 If the function does return in FPRs, the stub section is named
7067 .mips16.call.fp.FNNAME
7069 Build a decl for the stub. */
7070 secname
= ACONCAT ((".mips16.call.", fp_ret_p
? "fp." : "",
7072 stubname
= ACONCAT (("__call_stub_", fp_ret_p
? "fp_" : "",
7074 stubid
= get_identifier (stubname
);
7075 stubdecl
= build_decl (BUILTINS_LOCATION
,
7076 FUNCTION_DECL
, stubid
,
7077 build_function_type_list (void_type_node
,
7079 set_decl_section_name (stubdecl
, secname
);
7080 DECL_RESULT (stubdecl
) = build_decl (BUILTINS_LOCATION
,
7081 RESULT_DECL
, NULL_TREE
,
7084 /* Output a comment. */
7085 fprintf (asm_out_file
, "\t# Stub function to call %s%s (",
7087 ? (GET_MODE (retval
) == SFmode
? "float " : "double ")
7091 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
7093 fprintf (asm_out_file
, "%s%s", separator
,
7094 (f
& 3) == 1 ? "float" : "double");
7097 fprintf (asm_out_file
, ")\n");
7099 /* Start the function definition. */
7100 assemble_start_function (stubdecl
, stubname
);
7101 mips_start_function_definition (stubname
, false);
7105 fprintf (asm_out_file
, "\t.cfi_startproc\n");
7107 /* Create a fake CFA 4 bytes below the stack pointer.
7108 This works around unwinders (like libgcc's) that expect
7109 the CFA for non-signal frames to be unique. */
7110 fprintf (asm_out_file
, "\t.cfi_def_cfa 29,-4\n");
7112 /* "Save" $sp in itself so we don't use the fake CFA.
7113 This is: DW_CFA_val_expression r29, { DW_OP_reg29 }. */
7114 fprintf (asm_out_file
, "\t.cfi_escape 0x16,29,1,0x6d\n");
7116 /* Save the return address in $18. The stub's caller knows
7117 that $18 might be clobbered, even though $18 is usually
7118 a call-saved register.
7120 Do it early on in case the last move to a floating-point
7121 register can be scheduled into the delay slot of the
7122 call we are about to make. */
7123 fprintf (asm_out_file
, "\tmove\t%s,%s\n",
7124 reg_names
[GP_REG_FIRST
+ 18],
7125 reg_names
[RETURN_ADDR_REGNUM
]);
7129 /* Load the address of the MIPS16 function into $25. Do this
7130 first so that targets with coprocessor interlocks can use
7131 an MFC1 to fill the delay slot. */
7132 if (TARGET_EXPLICIT_RELOCS
)
7134 output_asm_insn ("lui\t%^,%%hi(%0)", &fn
);
7135 output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn
);
7138 output_asm_insn ("la\t%^,%0", &fn
);
7141 /* Move the arguments from general registers to floating-point
7143 mips_output_args_xfer (fp_code
, 't');
7147 /* Now call the non-MIPS16 function. */
7148 output_asm_insn (mips_output_jump (&fn
, 0, -1, true), &fn
);
7149 fprintf (asm_out_file
, "\t.cfi_register 31,18\n");
7151 /* Move the result from floating-point registers to
7152 general registers. */
7153 switch (GET_MODE (retval
))
7156 mips_output_32bit_xfer ('f', GP_RETURN
+ TARGET_BIG_ENDIAN
,
7160 mips_output_32bit_xfer ('f', GP_RETURN
+ TARGET_LITTLE_ENDIAN
,
7161 TARGET_LITTLE_ENDIAN
7164 if (GET_MODE (retval
) == SCmode
&& TARGET_64BIT
)
7166 /* On 64-bit targets, complex floats are returned in
7167 a single GPR, such that "sd" on a suitably-aligned
7168 target would store the value correctly. */
7169 fprintf (asm_out_file
, "\tdsll\t%s,%s,32\n",
7170 reg_names
[GP_RETURN
+ TARGET_BIG_ENDIAN
],
7171 reg_names
[GP_RETURN
+ TARGET_BIG_ENDIAN
]);
7172 fprintf (asm_out_file
, "\tdsll\t%s,%s,32\n",
7173 reg_names
[GP_RETURN
+ TARGET_LITTLE_ENDIAN
],
7174 reg_names
[GP_RETURN
+ TARGET_LITTLE_ENDIAN
]);
7175 fprintf (asm_out_file
, "\tdsrl\t%s,%s,32\n",
7176 reg_names
[GP_RETURN
+ TARGET_BIG_ENDIAN
],
7177 reg_names
[GP_RETURN
+ TARGET_BIG_ENDIAN
]);
7178 fprintf (asm_out_file
, "\tor\t%s,%s,%s\n",
7179 reg_names
[GP_RETURN
],
7180 reg_names
[GP_RETURN
],
7181 reg_names
[GP_RETURN
+ 1]);
7186 mips_output_32bit_xfer ('f', GP_RETURN
, FP_REG_FIRST
);
7190 mips_output_64bit_xfer ('f', GP_RETURN
+ (8 / UNITS_PER_WORD
),
7195 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
7196 || GET_MODE (retval
) != V2SFmode
);
7197 mips_output_64bit_xfer ('f', GP_RETURN
, FP_REG_FIRST
);
7203 fprintf (asm_out_file
, "\tjr\t%s\n", reg_names
[GP_REG_FIRST
+ 18]);
7204 fprintf (asm_out_file
, "\t.cfi_endproc\n");
7208 /* Jump to the previously-loaded address. */
7209 output_asm_insn ("jr\t%^", NULL
);
7212 #ifdef ASM_DECLARE_FUNCTION_SIZE
7213 ASM_DECLARE_FUNCTION_SIZE (asm_out_file
, stubname
, stubdecl
);
7216 mips_end_function_definition (stubname
);
7218 /* Record this stub. */
7219 l
= XNEW (struct mips16_stub
);
7220 l
->name
= xstrdup (fnname
);
7221 l
->fp_ret_p
= fp_ret_p
;
7222 l
->next
= mips16_stubs
;
7226 /* If we expect a floating-point return value, but we've built a
7227 stub which does not expect one, then we're in trouble. We can't
7228 use the existing stub, because it won't handle the floating-point
7229 value. We can't build a new stub, because the linker won't know
7230 which stub to use for the various calls in this object file.
7231 Fortunately, this case is illegal, since it means that a function
7232 was declared in two different ways in a single compilation. */
7233 if (fp_ret_p
&& !l
->fp_ret_p
)
7234 error ("cannot handle inconsistent calls to %qs", fnname
);
7236 if (retval
== NULL_RTX
)
7237 pattern
= gen_call_internal_direct (fn
, args_size
);
7239 pattern
= gen_call_value_internal_direct (retval
, fn
, args_size
);
7240 insn
= mips_emit_call_insn (pattern
, fn
, fn
, false);
7242 /* If we are calling a stub which handles a floating-point return
7243 value, we need to arrange to save $18 in the prologue. We do this
7244 by marking the function call as using the register. The prologue
7245 will later see that it is used, and emit code to save it. */
7247 CALL_INSN_FUNCTION_USAGE (insn
) =
7248 gen_rtx_EXPR_LIST (VOIDmode
,
7249 gen_rtx_CLOBBER (VOIDmode
,
7250 gen_rtx_REG (word_mode
, 18)),
7251 CALL_INSN_FUNCTION_USAGE (insn
));
7256 /* Expand a call of type TYPE. RESULT is where the result will go (null
7257 for "call"s and "sibcall"s), ADDR is the address of the function,
7258 ARGS_SIZE is the size of the arguments and AUX is the value passed
7259 to us by mips_function_arg. LAZY_P is true if this call already
7260 involves a lazily-bound function address (such as when calling
7261 functions through a MIPS16 hard-float stub).
7263 Return the call itself. */
7266 mips_expand_call (enum mips_call_type type
, rtx result
, rtx addr
,
7267 rtx args_size
, rtx aux
, bool lazy_p
)
7269 rtx orig_addr
, pattern
;
7273 fp_code
= aux
== 0 ? 0 : (int) GET_MODE (aux
);
7274 insn
= mips16_build_call_stub (result
, &addr
, args_size
, fp_code
);
7277 gcc_assert (!lazy_p
&& type
== MIPS_CALL_NORMAL
);
7282 if (!call_insn_operand (addr
, VOIDmode
))
7284 if (type
== MIPS_CALL_EPILOGUE
)
7285 addr
= MIPS_EPILOGUE_TEMP (Pmode
);
7287 addr
= gen_reg_rtx (Pmode
);
7288 lazy_p
|= mips_load_call_address (type
, addr
, orig_addr
);
7293 rtx (*fn
) (rtx
, rtx
);
7295 if (type
== MIPS_CALL_SIBCALL
)
7296 fn
= gen_sibcall_internal
;
7298 fn
= gen_call_internal
;
7300 pattern
= fn (addr
, args_size
);
7302 else if (GET_CODE (result
) == PARALLEL
&& XVECLEN (result
, 0) == 2)
7304 /* Handle return values created by mips_return_fpr_pair. */
7305 rtx (*fn
) (rtx
, rtx
, rtx
, rtx
);
7308 if (type
== MIPS_CALL_SIBCALL
)
7309 fn
= gen_sibcall_value_multiple_internal
;
7311 fn
= gen_call_value_multiple_internal
;
7313 reg1
= XEXP (XVECEXP (result
, 0, 0), 0);
7314 reg2
= XEXP (XVECEXP (result
, 0, 1), 0);
7315 pattern
= fn (reg1
, addr
, args_size
, reg2
);
7319 rtx (*fn
) (rtx
, rtx
, rtx
);
7321 if (type
== MIPS_CALL_SIBCALL
)
7322 fn
= gen_sibcall_value_internal
;
7324 fn
= gen_call_value_internal
;
7326 /* Handle return values created by mips_return_fpr_single. */
7327 if (GET_CODE (result
) == PARALLEL
&& XVECLEN (result
, 0) == 1)
7328 result
= XEXP (XVECEXP (result
, 0, 0), 0);
7329 pattern
= fn (result
, addr
, args_size
);
7332 return mips_emit_call_insn (pattern
, orig_addr
, addr
, lazy_p
);
7335 /* Split call instruction INSN into a $gp-clobbering call and
7336 (where necessary) an instruction to restore $gp from its save slot.
7337 CALL_PATTERN is the pattern of the new call. */
7340 mips_split_call (rtx insn
, rtx call_pattern
)
7342 emit_call_insn (call_pattern
);
7343 if (!find_reg_note (insn
, REG_NORETURN
, 0))
7344 mips_restore_gp_from_cprestore_slot (gen_rtx_REG (Pmode
,
7345 POST_CALL_TMP_REG
));
7348 /* Return true if a call to DECL may need to use JALX. */
7351 mips_call_may_need_jalx_p (tree decl
)
7353 /* If the current translation unit would use a different mode for DECL,
7354 assume that the call needs JALX. */
7355 if (mips_get_compress_mode (decl
) != TARGET_COMPRESSION
)
7358 /* mips_get_compress_mode is always accurate for locally-binding
7359 functions in the current translation unit. */
7360 if (!DECL_EXTERNAL (decl
) && targetm
.binds_local_p (decl
))
7363 /* When -minterlink-compressed is in effect, assume that functions
7364 could use a different encoding mode unless an attribute explicitly
7365 tells us otherwise. */
7366 if (TARGET_INTERLINK_COMPRESSED
)
7368 if (!TARGET_COMPRESSION
7369 && mips_get_compress_off_flags (DECL_ATTRIBUTES (decl
)) ==0)
7371 if (TARGET_COMPRESSION
7372 && mips_get_compress_on_flags (DECL_ATTRIBUTES (decl
)) == 0)
7379 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
7382 mips_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
7384 if (!TARGET_SIBCALLS
)
7387 /* Interrupt handlers need special epilogue code and therefore can't
7389 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl
)))
7392 /* Direct Js are only possible to functions that use the same ISA encoding.
7393 There is no JX counterpoart of JALX. */
7395 && const_call_insn_operand (XEXP (DECL_RTL (decl
), 0), VOIDmode
)
7396 && mips_call_may_need_jalx_p (decl
))
7399 /* Sibling calls should not prevent lazy binding. Lazy-binding stubs
7400 require $gp to be valid on entry, so sibcalls can only use stubs
7401 if $gp is call-clobbered. */
7403 && TARGET_CALL_SAVED_GP
7404 && !TARGET_ABICALLS_PIC0
7405 && !targetm
.binds_local_p (decl
))
7412 /* Implement TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */
7415 mips_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size
,
7417 enum by_pieces_operation op
,
7420 if (op
== STORE_BY_PIECES
)
7421 return mips_store_by_pieces_p (size
, align
);
7422 if (op
== MOVE_BY_PIECES
&& HAVE_movmemsi
)
7424 /* movmemsi is meant to generate code that is at least as good as
7425 move_by_pieces. However, movmemsi effectively uses a by-pieces
7426 implementation both for moves smaller than a word and for
7427 word-aligned moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT
7428 bytes. We should allow the tree-level optimisers to do such
7429 moves by pieces, as it often exposes other optimization
7430 opportunities. We might as well continue to use movmemsi at
7431 the rtl level though, as it produces better code when
7432 scheduling is disabled (such as at -O). */
7433 if (currently_expanding_to_rtl
)
7435 if (align
< BITS_PER_WORD
)
7436 return size
< UNITS_PER_WORD
;
7437 return size
<= MIPS_MAX_MOVE_BYTES_STRAIGHT
;
7440 return default_use_by_pieces_infrastructure_p (size
, align
, op
, speed_p
);
7443 /* Implement a handler for STORE_BY_PIECES operations
7444 for TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */
7447 mips_store_by_pieces_p (unsigned HOST_WIDE_INT size
, unsigned int align
)
7449 /* Storing by pieces involves moving constants into registers
7450 of size MIN (ALIGN, BITS_PER_WORD), then storing them.
7451 We need to decide whether it is cheaper to load the address of
7452 constant data into a register and use a block move instead. */
7454 /* If the data is only byte aligned, then:
7456 (a1) A block move of less than 4 bytes would involve three 3 LBs and
7457 3 SBs. We might as well use 3 single-instruction LIs and 3 SBs
7460 (a2) A block move of 4 bytes from aligned source data can use an
7461 LW/SWL/SWR sequence. This is often better than the 4 LIs and
7462 4 SBs that we would generate when storing by pieces. */
7463 if (align
<= BITS_PER_UNIT
)
7466 /* If the data is 2-byte aligned, then:
7468 (b1) A block move of less than 4 bytes would use a combination of LBs,
7469 LHs, SBs and SHs. We get better code by using single-instruction
7470 LIs, SBs and SHs instead.
7472 (b2) A block move of 4 bytes from aligned source data would again use
7473 an LW/SWL/SWR sequence. In most cases, loading the address of
7474 the source data would require at least one extra instruction.
7475 It is often more efficient to use 2 single-instruction LIs and
7478 (b3) A block move of up to 3 additional bytes would be like (b1).
7480 (b4) A block move of 8 bytes from aligned source data can use two
7481 LW/SWL/SWR sequences or a single LD/SDL/SDR sequence. Both
7482 sequences are better than the 4 LIs and 4 SHs that we'd generate
7483 when storing by pieces.
7485 The reasoning for higher alignments is similar:
7487 (c1) A block move of less than 4 bytes would be the same as (b1).
7489 (c2) A block move of 4 bytes would use an LW/SW sequence. Again,
7490 loading the address of the source data would typically require
7491 at least one extra instruction. It is generally better to use
7494 (c3) A block move of up to 3 additional bytes would be like (b1).
7496 (c4) A block move of 8 bytes can use two LW/SW sequences or a single
7497 LD/SD sequence, and in these cases we've traditionally preferred
7498 the memory copy over the more bulky constant moves. */
7502 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
7503 Assume that the areas do not overlap. */
7506 mips_block_move_straight (rtx dest
, rtx src
, HOST_WIDE_INT length
)
7508 HOST_WIDE_INT offset
, delta
;
7509 unsigned HOST_WIDE_INT bits
;
7514 /* Work out how many bits to move at a time. If both operands have
7515 half-word alignment, it is usually better to move in half words.
7516 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
7517 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
7518 Otherwise move word-sized chunks.
7520 For ISA_HAS_LWL_LWR we rely on the lwl/lwr & swl/swr load. Otherwise
7521 picking the minimum of alignment or BITS_PER_WORD gets us the
7522 desired size for bits. */
7524 if (!ISA_HAS_LWL_LWR
)
7525 bits
= MIN (BITS_PER_WORD
, MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)));
7528 if (MEM_ALIGN (src
) == BITS_PER_WORD
/ 2
7529 && MEM_ALIGN (dest
) == BITS_PER_WORD
/ 2)
7530 bits
= BITS_PER_WORD
/ 2;
7532 bits
= BITS_PER_WORD
;
7535 mode
= mode_for_size (bits
, MODE_INT
, 0);
7536 delta
= bits
/ BITS_PER_UNIT
;
7538 /* Allocate a buffer for the temporary registers. */
7539 regs
= XALLOCAVEC (rtx
, length
/ delta
);
7541 /* Load as many BITS-sized chunks as possible. Use a normal load if
7542 the source has enough alignment, otherwise use left/right pairs. */
7543 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
7545 regs
[i
] = gen_reg_rtx (mode
);
7546 if (MEM_ALIGN (src
) >= bits
)
7547 mips_emit_move (regs
[i
], adjust_address (src
, mode
, offset
));
7550 rtx part
= adjust_address (src
, BLKmode
, offset
);
7551 set_mem_size (part
, delta
);
7552 if (!mips_expand_ext_as_unaligned_load (regs
[i
], part
, bits
, 0, 0))
7557 /* Copy the chunks to the destination. */
7558 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
7559 if (MEM_ALIGN (dest
) >= bits
)
7560 mips_emit_move (adjust_address (dest
, mode
, offset
), regs
[i
]);
7563 rtx part
= adjust_address (dest
, BLKmode
, offset
);
7564 set_mem_size (part
, delta
);
7565 if (!mips_expand_ins_as_unaligned_store (part
, regs
[i
], bits
, 0))
7569 /* Mop up any left-over bytes. */
7570 if (offset
< length
)
7572 src
= adjust_address (src
, BLKmode
, offset
);
7573 dest
= adjust_address (dest
, BLKmode
, offset
);
7574 move_by_pieces (dest
, src
, length
- offset
,
7575 MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)), 0);
7579 /* Helper function for doing a loop-based block operation on memory
7580 reference MEM. Each iteration of the loop will operate on LENGTH
7583 Create a new base register for use within the loop and point it to
7584 the start of MEM. Create a new memory reference that uses this
7585 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
7588 mips_adjust_block_mem (rtx mem
, HOST_WIDE_INT length
,
7589 rtx
*loop_reg
, rtx
*loop_mem
)
7591 *loop_reg
= copy_addr_to_reg (XEXP (mem
, 0));
7593 /* Although the new mem does not refer to a known location,
7594 it does keep up to LENGTH bytes of alignment. */
7595 *loop_mem
= change_address (mem
, BLKmode
, *loop_reg
);
7596 set_mem_align (*loop_mem
, MIN (MEM_ALIGN (mem
), length
* BITS_PER_UNIT
));
7599 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
7600 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
7601 the memory regions do not overlap. */
7604 mips_block_move_loop (rtx dest
, rtx src
, HOST_WIDE_INT length
,
7605 HOST_WIDE_INT bytes_per_iter
)
7607 rtx_code_label
*label
;
7608 rtx src_reg
, dest_reg
, final_src
, test
;
7609 HOST_WIDE_INT leftover
;
7611 leftover
= length
% bytes_per_iter
;
7614 /* Create registers and memory references for use within the loop. */
7615 mips_adjust_block_mem (src
, bytes_per_iter
, &src_reg
, &src
);
7616 mips_adjust_block_mem (dest
, bytes_per_iter
, &dest_reg
, &dest
);
7618 /* Calculate the value that SRC_REG should have after the last iteration
7620 final_src
= expand_simple_binop (Pmode
, PLUS
, src_reg
, GEN_INT (length
),
7623 /* Emit the start of the loop. */
7624 label
= gen_label_rtx ();
7627 /* Emit the loop body. */
7628 mips_block_move_straight (dest
, src
, bytes_per_iter
);
7630 /* Move on to the next block. */
7631 mips_emit_move (src_reg
, plus_constant (Pmode
, src_reg
, bytes_per_iter
));
7632 mips_emit_move (dest_reg
, plus_constant (Pmode
, dest_reg
, bytes_per_iter
));
7634 /* Emit the loop condition. */
7635 test
= gen_rtx_NE (VOIDmode
, src_reg
, final_src
);
7636 if (Pmode
== DImode
)
7637 emit_jump_insn (gen_cbranchdi4 (test
, src_reg
, final_src
, label
));
7639 emit_jump_insn (gen_cbranchsi4 (test
, src_reg
, final_src
, label
));
7641 /* Mop up any left-over bytes. */
7643 mips_block_move_straight (dest
, src
, leftover
);
7646 /* Expand a movmemsi instruction, which copies LENGTH bytes from
7647 memory reference SRC to memory reference DEST. */
7650 mips_expand_block_move (rtx dest
, rtx src
, rtx length
)
7652 if (!ISA_HAS_LWL_LWR
7653 && (MEM_ALIGN (src
) < MIPS_MIN_MOVE_MEM_ALIGN
7654 || MEM_ALIGN (dest
) < MIPS_MIN_MOVE_MEM_ALIGN
))
7657 if (CONST_INT_P (length
))
7659 if (INTVAL (length
) <= MIPS_MAX_MOVE_BYTES_STRAIGHT
)
7661 mips_block_move_straight (dest
, src
, INTVAL (length
));
7666 mips_block_move_loop (dest
, src
, INTVAL (length
),
7667 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER
);
7674 /* Expand a loop of synci insns for the address range [BEGIN, END). */
7677 mips_expand_synci_loop (rtx begin
, rtx end
)
7679 rtx inc
, cmp_result
, mask
, length
;
7680 rtx_code_label
*label
, *end_label
;
7682 /* Create end_label. */
7683 end_label
= gen_label_rtx ();
7685 /* Check if begin equals end. */
7686 cmp_result
= gen_rtx_EQ (VOIDmode
, begin
, end
);
7687 emit_jump_insn (gen_condjump (cmp_result
, end_label
));
7689 /* Load INC with the cache line size (rdhwr INC,$1). */
7690 inc
= gen_reg_rtx (Pmode
);
7691 emit_insn (PMODE_INSN (gen_rdhwr_synci_step
, (inc
)));
7693 /* Check if inc is 0. */
7694 cmp_result
= gen_rtx_EQ (VOIDmode
, inc
, const0_rtx
);
7695 emit_jump_insn (gen_condjump (cmp_result
, end_label
));
7697 /* Calculate mask. */
7698 mask
= mips_force_unary (Pmode
, NEG
, inc
);
7700 /* Mask out begin by mask. */
7701 begin
= mips_force_binary (Pmode
, AND
, begin
, mask
);
7703 /* Calculate length. */
7704 length
= mips_force_binary (Pmode
, MINUS
, end
, begin
);
7706 /* Loop back to here. */
7707 label
= gen_label_rtx ();
7710 emit_insn (gen_synci (begin
));
7712 /* Update length. */
7713 mips_emit_binary (MINUS
, length
, length
, inc
);
7716 mips_emit_binary (PLUS
, begin
, begin
, inc
);
7718 /* Check if length is greater than 0. */
7719 cmp_result
= gen_rtx_GT (VOIDmode
, length
, const0_rtx
);
7720 emit_jump_insn (gen_condjump (cmp_result
, label
));
7722 emit_label (end_label
);
7725 /* Expand a QI or HI mode atomic memory operation.
7727 GENERATOR contains a pointer to the gen_* function that generates
7728 the SI mode underlying atomic operation using masks that we
7731 RESULT is the return register for the operation. Its value is NULL
7734 MEM is the location of the atomic access.
7736 OLDVAL is the first operand for the operation.
7738 NEWVAL is the optional second operand for the operation. Its value
7739 is NULL if unused. */
7742 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator
,
7743 rtx result
, rtx mem
, rtx oldval
, rtx newval
)
7745 rtx orig_addr
, memsi_addr
, memsi
, shift
, shiftsi
, unshifted_mask
;
7746 rtx unshifted_mask_reg
, mask
, inverted_mask
, si_op
;
7750 mode
= GET_MODE (mem
);
7752 /* Compute the address of the containing SImode value. */
7753 orig_addr
= force_reg (Pmode
, XEXP (mem
, 0));
7754 memsi_addr
= mips_force_binary (Pmode
, AND
, orig_addr
,
7755 force_reg (Pmode
, GEN_INT (-4)));
7757 /* Create a memory reference for it. */
7758 memsi
= gen_rtx_MEM (SImode
, memsi_addr
);
7759 set_mem_alias_set (memsi
, ALIAS_SET_MEMORY_BARRIER
);
7760 MEM_VOLATILE_P (memsi
) = MEM_VOLATILE_P (mem
);
7762 /* Work out the byte offset of the QImode or HImode value,
7763 counting from the least significant byte. */
7764 shift
= mips_force_binary (Pmode
, AND
, orig_addr
, GEN_INT (3));
7765 if (TARGET_BIG_ENDIAN
)
7766 mips_emit_binary (XOR
, shift
, shift
, GEN_INT (mode
== QImode
? 3 : 2));
7768 /* Multiply by eight to convert the shift value from bytes to bits. */
7769 mips_emit_binary (ASHIFT
, shift
, shift
, GEN_INT (3));
7771 /* Make the final shift an SImode value, so that it can be used in
7772 SImode operations. */
7773 shiftsi
= force_reg (SImode
, gen_lowpart (SImode
, shift
));
7775 /* Set MASK to an inclusive mask of the QImode or HImode value. */
7776 unshifted_mask
= GEN_INT (GET_MODE_MASK (mode
));
7777 unshifted_mask_reg
= force_reg (SImode
, unshifted_mask
);
7778 mask
= mips_force_binary (SImode
, ASHIFT
, unshifted_mask_reg
, shiftsi
);
7780 /* Compute the equivalent exclusive mask. */
7781 inverted_mask
= gen_reg_rtx (SImode
);
7782 emit_insn (gen_rtx_SET (inverted_mask
, gen_rtx_NOT (SImode
, mask
)));
7784 /* Shift the old value into place. */
7785 if (oldval
!= const0_rtx
)
7787 oldval
= convert_modes (SImode
, mode
, oldval
, true);
7788 oldval
= force_reg (SImode
, oldval
);
7789 oldval
= mips_force_binary (SImode
, ASHIFT
, oldval
, shiftsi
);
7792 /* Do the same for the new value. */
7793 if (newval
&& newval
!= const0_rtx
)
7795 newval
= convert_modes (SImode
, mode
, newval
, true);
7796 newval
= force_reg (SImode
, newval
);
7797 newval
= mips_force_binary (SImode
, ASHIFT
, newval
, shiftsi
);
7800 /* Do the SImode atomic access. */
7802 res
= gen_reg_rtx (SImode
);
7804 si_op
= generator
.fn_6 (res
, memsi
, mask
, inverted_mask
, oldval
, newval
);
7806 si_op
= generator
.fn_5 (res
, memsi
, mask
, inverted_mask
, oldval
);
7808 si_op
= generator
.fn_4 (memsi
, mask
, inverted_mask
, oldval
);
7814 /* Shift and convert the result. */
7815 mips_emit_binary (AND
, res
, res
, mask
);
7816 mips_emit_binary (LSHIFTRT
, res
, res
, shiftsi
);
7817 mips_emit_move (result
, gen_lowpart (GET_MODE (result
), res
));
7821 /* Return true if it is possible to use left/right accesses for a
7822 bitfield of WIDTH bits starting BITPOS bits into BLKmode memory OP.
7823 When returning true, update *LEFT and *RIGHT as follows:
7825 *LEFT is a QImode reference to the first byte if big endian or
7826 the last byte if little endian. This address can be used in the
7827 left-side instructions (LWL, SWL, LDL, SDL).
7829 *RIGHT is a QImode reference to the opposite end of the field and
7830 can be used in the patterning right-side instruction. */
7833 mips_get_unaligned_mem (rtx op
, HOST_WIDE_INT width
, HOST_WIDE_INT bitpos
,
7834 rtx
*left
, rtx
*right
)
7838 /* Check that the size is valid. */
7839 if (width
!= 32 && (!TARGET_64BIT
|| width
!= 64))
7842 /* We can only access byte-aligned values. Since we are always passed
7843 a reference to the first byte of the field, it is not necessary to
7844 do anything with BITPOS after this check. */
7845 if (bitpos
% BITS_PER_UNIT
!= 0)
7848 /* Reject aligned bitfields: we want to use a normal load or store
7849 instead of a left/right pair. */
7850 if (MEM_ALIGN (op
) >= width
)
7853 /* Get references to both ends of the field. */
7854 first
= adjust_address (op
, QImode
, 0);
7855 last
= adjust_address (op
, QImode
, width
/ BITS_PER_UNIT
- 1);
7857 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
7858 correspond to the MSB and RIGHT to the LSB. */
7859 if (TARGET_BIG_ENDIAN
)
7860 *left
= first
, *right
= last
;
7862 *left
= last
, *right
= first
;
7867 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
7868 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
7869 the operation is the equivalent of:
7871 (set DEST (*_extract SRC WIDTH BITPOS))
7873 Return true on success. */
7876 mips_expand_ext_as_unaligned_load (rtx dest
, rtx src
, HOST_WIDE_INT width
,
7877 HOST_WIDE_INT bitpos
, bool unsigned_p
)
7879 rtx left
, right
, temp
;
7880 rtx dest1
= NULL_RTX
;
7882 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
7883 be a DImode, create a new temp and emit a zero extend at the end. */
7884 if (GET_MODE (dest
) == DImode
7886 && GET_MODE_BITSIZE (SImode
) == width
)
7889 dest
= gen_reg_rtx (SImode
);
7892 if (!mips_get_unaligned_mem (src
, width
, bitpos
, &left
, &right
))
7895 temp
= gen_reg_rtx (GET_MODE (dest
));
7896 if (GET_MODE (dest
) == DImode
)
7898 emit_insn (gen_mov_ldl (temp
, src
, left
));
7899 emit_insn (gen_mov_ldr (dest
, copy_rtx (src
), right
, temp
));
7903 emit_insn (gen_mov_lwl (temp
, src
, left
));
7904 emit_insn (gen_mov_lwr (dest
, copy_rtx (src
), right
, temp
));
7907 /* If we were loading 32bits and the original register was DI then
7908 sign/zero extend into the orignal dest. */
7912 emit_insn (gen_zero_extendsidi2 (dest1
, dest
));
7914 emit_insn (gen_extendsidi2 (dest1
, dest
));
7919 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
7920 BITPOS and SRC are the operands passed to the expander; the operation
7921 is the equivalent of:
7923 (set (zero_extract DEST WIDTH BITPOS) SRC)
7925 Return true on success. */
7928 mips_expand_ins_as_unaligned_store (rtx dest
, rtx src
, HOST_WIDE_INT width
,
7929 HOST_WIDE_INT bitpos
)
7934 if (!mips_get_unaligned_mem (dest
, width
, bitpos
, &left
, &right
))
7937 mode
= mode_for_size (width
, MODE_INT
, 0);
7938 src
= gen_lowpart (mode
, src
);
7941 emit_insn (gen_mov_sdl (dest
, src
, left
));
7942 emit_insn (gen_mov_sdr (copy_rtx (dest
), copy_rtx (src
), right
));
7946 emit_insn (gen_mov_swl (dest
, src
, left
));
7947 emit_insn (gen_mov_swr (copy_rtx (dest
), copy_rtx (src
), right
));
7952 /* Return true if X is a MEM with the same size as MODE. */
7955 mips_mem_fits_mode_p (machine_mode mode
, rtx x
)
7958 && MEM_SIZE_KNOWN_P (x
)
7959 && MEM_SIZE (x
) == GET_MODE_SIZE (mode
));
7962 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
7963 source of an "ext" instruction or the destination of an "ins"
7964 instruction. OP must be a register operand and the following
7965 conditions must hold:
7967 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
7968 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
7969 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
7971 Also reject lengths equal to a word as they are better handled
7972 by the move patterns. */
7975 mips_use_ins_ext_p (rtx op
, HOST_WIDE_INT width
, HOST_WIDE_INT bitpos
)
7977 if (!ISA_HAS_EXT_INS
7978 || !register_operand (op
, VOIDmode
)
7979 || GET_MODE_BITSIZE (GET_MODE (op
)) > BITS_PER_WORD
)
7982 if (!IN_RANGE (width
, 1, GET_MODE_BITSIZE (GET_MODE (op
)) - 1))
7985 if (bitpos
< 0 || bitpos
+ width
> GET_MODE_BITSIZE (GET_MODE (op
)))
7991 /* Check if MASK and SHIFT are valid in mask-low-and-shift-left
7992 operation if MAXLEN is the maxium length of consecutive bits that
7993 can make up MASK. MODE is the mode of the operation. See
7994 mask_low_and_shift_len for the actual definition. */
7997 mask_low_and_shift_p (machine_mode mode
, rtx mask
, rtx shift
, int maxlen
)
7999 return IN_RANGE (mask_low_and_shift_len (mode
, mask
, shift
), 1, maxlen
);
8002 /* Return true iff OP1 and OP2 are valid operands together for the
8003 *and<MODE>3 and *and<MODE>3_mips16 patterns. For the cases to consider,
8004 see the table in the comment before the pattern. */
8007 and_operands_ok (machine_mode mode
, rtx op1
, rtx op2
)
8009 return (memory_operand (op1
, mode
)
8010 ? and_load_operand (op2
, mode
)
8011 : and_reg_operand (op2
, mode
));
8014 /* The canonical form of a mask-low-and-shift-left operation is
8015 (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
8016 cleared. Thus we need to shift MASK to the right before checking if it
8017 is a valid mask value. MODE is the mode of the operation. If true
8018 return the length of the mask, otherwise return -1. */
8021 mask_low_and_shift_len (machine_mode mode
, rtx mask
, rtx shift
)
8023 HOST_WIDE_INT shval
;
8025 shval
= INTVAL (shift
) & (GET_MODE_BITSIZE (mode
) - 1);
8026 return exact_log2 ((UINTVAL (mask
) >> shval
) + 1);
8029 /* Return true if -msplit-addresses is selected and should be honored.
8031 -msplit-addresses is a half-way house between explicit relocations
8032 and the traditional assembler macros. It can split absolute 32-bit
8033 symbolic constants into a high/lo_sum pair but uses macros for other
8036 Like explicit relocation support for REL targets, it relies
8037 on GNU extensions in the assembler and the linker.
8039 Although this code should work for -O0, it has traditionally
8040 been treated as an optimization. */
8043 mips_split_addresses_p (void)
8045 return (TARGET_SPLIT_ADDRESSES
8049 && !ABI_HAS_64BIT_SYMBOLS
);
8052 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
8055 mips_init_relocs (void)
8057 memset (mips_split_p
, '\0', sizeof (mips_split_p
));
8058 memset (mips_split_hi_p
, '\0', sizeof (mips_split_hi_p
));
8059 memset (mips_use_pcrel_pool_p
, '\0', sizeof (mips_use_pcrel_pool_p
));
8060 memset (mips_hi_relocs
, '\0', sizeof (mips_hi_relocs
));
8061 memset (mips_lo_relocs
, '\0', sizeof (mips_lo_relocs
));
8063 if (TARGET_MIPS16_PCREL_LOADS
)
8064 mips_use_pcrel_pool_p
[SYMBOL_ABSOLUTE
] = true;
8067 if (ABI_HAS_64BIT_SYMBOLS
)
8069 if (TARGET_EXPLICIT_RELOCS
)
8071 mips_split_p
[SYMBOL_64_HIGH
] = true;
8072 mips_hi_relocs
[SYMBOL_64_HIGH
] = "%highest(";
8073 mips_lo_relocs
[SYMBOL_64_HIGH
] = "%higher(";
8075 mips_split_p
[SYMBOL_64_MID
] = true;
8076 mips_hi_relocs
[SYMBOL_64_MID
] = "%higher(";
8077 mips_lo_relocs
[SYMBOL_64_MID
] = "%hi(";
8079 mips_split_p
[SYMBOL_64_LOW
] = true;
8080 mips_hi_relocs
[SYMBOL_64_LOW
] = "%hi(";
8081 mips_lo_relocs
[SYMBOL_64_LOW
] = "%lo(";
8083 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
8084 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
8089 if (TARGET_EXPLICIT_RELOCS
8090 || mips_split_addresses_p ()
8093 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
8094 mips_hi_relocs
[SYMBOL_ABSOLUTE
] = "%hi(";
8095 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
8102 /* The high part is provided by a pseudo copy of $gp. */
8103 mips_split_p
[SYMBOL_GP_RELATIVE
] = true;
8104 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gprel(";
8106 else if (TARGET_EXPLICIT_RELOCS
)
8107 /* Small data constants are kept whole until after reload,
8108 then lowered by mips_rewrite_small_data. */
8109 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gp_rel(";
8111 if (TARGET_EXPLICIT_RELOCS
)
8113 mips_split_p
[SYMBOL_GOT_PAGE_OFST
] = true;
8116 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got_page(";
8117 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%got_ofst(";
8121 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got(";
8122 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%lo(";
8125 /* Expose the use of $28 as soon as possible. */
8126 mips_split_hi_p
[SYMBOL_GOT_PAGE_OFST
] = true;
8130 /* The HIGH and LO_SUM are matched by special .md patterns. */
8131 mips_split_p
[SYMBOL_GOT_DISP
] = true;
8133 mips_split_p
[SYMBOL_GOTOFF_DISP
] = true;
8134 mips_hi_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_hi(";
8135 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_lo(";
8137 mips_split_p
[SYMBOL_GOTOFF_CALL
] = true;
8138 mips_hi_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_hi(";
8139 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_lo(";
8144 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_disp(";
8146 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got(";
8147 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call16(";
8149 /* Expose the use of $28 as soon as possible. */
8150 mips_split_p
[SYMBOL_GOT_DISP
] = true;
8156 mips_split_p
[SYMBOL_GOTOFF_LOADGP
] = true;
8157 mips_hi_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%hi(%neg(%gp_rel(";
8158 mips_lo_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%lo(%neg(%gp_rel(";
8161 mips_lo_relocs
[SYMBOL_TLSGD
] = "%tlsgd(";
8162 mips_lo_relocs
[SYMBOL_TLSLDM
] = "%tlsldm(";
8164 if (TARGET_MIPS16_PCREL_LOADS
)
8166 mips_use_pcrel_pool_p
[SYMBOL_DTPREL
] = true;
8167 mips_use_pcrel_pool_p
[SYMBOL_TPREL
] = true;
8171 mips_split_p
[SYMBOL_DTPREL
] = true;
8172 mips_hi_relocs
[SYMBOL_DTPREL
] = "%dtprel_hi(";
8173 mips_lo_relocs
[SYMBOL_DTPREL
] = "%dtprel_lo(";
8175 mips_split_p
[SYMBOL_TPREL
] = true;
8176 mips_hi_relocs
[SYMBOL_TPREL
] = "%tprel_hi(";
8177 mips_lo_relocs
[SYMBOL_TPREL
] = "%tprel_lo(";
8180 mips_lo_relocs
[SYMBOL_GOTTPREL
] = "%gottprel(";
8181 mips_lo_relocs
[SYMBOL_HALF
] = "%half(";
8184 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
8185 in context CONTEXT. RELOCS is the array of relocations to use. */
8188 mips_print_operand_reloc (FILE *file
, rtx op
, enum mips_symbol_context context
,
8189 const char **relocs
)
8191 enum mips_symbol_type symbol_type
;
8194 symbol_type
= mips_classify_symbolic_expression (op
, context
);
8195 gcc_assert (relocs
[symbol_type
]);
8197 fputs (relocs
[symbol_type
], file
);
8198 output_addr_const (file
, mips_strip_unspec_address (op
));
8199 for (p
= relocs
[symbol_type
]; *p
!= 0; p
++)
8204 /* Start a new block with the given asm switch enabled. If we need
8205 to print a directive, emit PREFIX before it and SUFFIX after it. */
8208 mips_push_asm_switch_1 (struct mips_asm_switch
*asm_switch
,
8209 const char *prefix
, const char *suffix
)
8211 if (asm_switch
->nesting_level
== 0)
8212 fprintf (asm_out_file
, "%s.set\tno%s%s", prefix
, asm_switch
->name
, suffix
);
8213 asm_switch
->nesting_level
++;
8216 /* Likewise, but end a block. */
8219 mips_pop_asm_switch_1 (struct mips_asm_switch
*asm_switch
,
8220 const char *prefix
, const char *suffix
)
8222 gcc_assert (asm_switch
->nesting_level
);
8223 asm_switch
->nesting_level
--;
8224 if (asm_switch
->nesting_level
== 0)
8225 fprintf (asm_out_file
, "%s.set\t%s%s", prefix
, asm_switch
->name
, suffix
);
8228 /* Wrappers around mips_push_asm_switch_1 and mips_pop_asm_switch_1
8229 that either print a complete line or print nothing. */
8232 mips_push_asm_switch (struct mips_asm_switch
*asm_switch
)
8234 mips_push_asm_switch_1 (asm_switch
, "\t", "\n");
8238 mips_pop_asm_switch (struct mips_asm_switch
*asm_switch
)
8240 mips_pop_asm_switch_1 (asm_switch
, "\t", "\n");
8243 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
8244 The punctuation characters are:
8246 '(' Start a nested ".set noreorder" block.
8247 ')' End a nested ".set noreorder" block.
8248 '[' Start a nested ".set noat" block.
8249 ']' End a nested ".set noat" block.
8250 '<' Start a nested ".set nomacro" block.
8251 '>' End a nested ".set nomacro" block.
8252 '*' Behave like %(%< if generating a delayed-branch sequence.
8253 '#' Print a nop if in a ".set noreorder" block.
8254 '/' Like '#', but do nothing within a delayed-branch sequence.
8255 '?' Print "l" if mips_branch_likely is true
8256 '~' Print a nop if mips_branch_likely is true
8257 '.' Print the name of the register with a hard-wired zero (zero or $0).
8258 '@' Print the name of the assembler temporary register (at or $1).
8259 '^' Print the name of the pic call-through register (t9 or $25).
8260 '+' Print the name of the gp register (usually gp or $28).
8261 '$' Print the name of the stack pointer register (sp or $29).
8262 ':' Print "c" to use the compact version if the delay slot is a nop.
8263 '!' Print "s" to use the short version if the delay slot contains a
8266 See also mips_init_print_operand_punct. */
8269 mips_print_operand_punctuation (FILE *file
, int ch
)
8274 mips_push_asm_switch_1 (&mips_noreorder
, "", "\n\t");
8278 mips_pop_asm_switch_1 (&mips_noreorder
, "\n\t", "");
8282 mips_push_asm_switch_1 (&mips_noat
, "", "\n\t");
8286 mips_pop_asm_switch_1 (&mips_noat
, "\n\t", "");
8290 mips_push_asm_switch_1 (&mips_nomacro
, "", "\n\t");
8294 mips_pop_asm_switch_1 (&mips_nomacro
, "\n\t", "");
8298 if (final_sequence
!= 0)
8300 mips_print_operand_punctuation (file
, '(');
8301 mips_print_operand_punctuation (file
, '<');
8306 if (mips_noreorder
.nesting_level
> 0)
8307 fputs ("\n\tnop", file
);
8311 /* Print an extra newline so that the delayed insn is separated
8312 from the following ones. This looks neater and is consistent
8313 with non-nop delayed sequences. */
8314 if (mips_noreorder
.nesting_level
> 0 && final_sequence
== 0)
8315 fputs ("\n\tnop\n", file
);
8319 if (mips_branch_likely
)
8324 if (mips_branch_likely
)
8325 fputs ("\n\tnop", file
);
8329 fputs (reg_names
[GP_REG_FIRST
+ 0], file
);
8333 fputs (reg_names
[AT_REGNUM
], file
);
8337 fputs (reg_names
[PIC_FUNCTION_ADDR_REGNUM
], file
);
8341 fputs (reg_names
[PIC_OFFSET_TABLE_REGNUM
], file
);
8345 fputs (reg_names
[STACK_POINTER_REGNUM
], file
);
8349 /* When final_sequence is 0, the delay slot will be a nop. We can
8350 use the compact version where available. The %: formatter will
8351 only be present if a compact form of the branch is available. */
8352 if (final_sequence
== 0)
8357 /* If the delay slot instruction is short, then use the
8359 if (TARGET_MICROMIPS
&& !TARGET_INTERLINK_COMPRESSED
&& mips_isa_rev
<= 5
8360 && (final_sequence
== 0
8361 || get_attr_length (final_sequence
->insn (1)) == 2))
8371 /* Initialize mips_print_operand_punct. */
8374 mips_init_print_operand_punct (void)
8378 for (p
= "()[]<>*#/?~.@^+$:!"; *p
; p
++)
8379 mips_print_operand_punct
[(unsigned char) *p
] = true;
8382 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
8383 associated with condition CODE. Print the condition part of the
8387 mips_print_int_branch_condition (FILE *file
, enum rtx_code code
, int letter
)
8401 /* Conveniently, the MIPS names for these conditions are the same
8402 as their RTL equivalents. */
8403 fputs (GET_RTX_NAME (code
), file
);
8407 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter
);
8412 /* Likewise floating-point branches. */
8415 mips_print_float_branch_condition (FILE *file
, enum rtx_code code
, int letter
)
8421 fputs ("c1eqz", file
);
8423 fputs ("c1f", file
);
8428 fputs ("c1nez", file
);
8430 fputs ("c1t", file
);
8434 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter
);
8439 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8442 mips_print_operand_punct_valid_p (unsigned char code
)
8444 return mips_print_operand_punct
[code
];
8447 /* Implement TARGET_PRINT_OPERAND. The MIPS-specific operand codes are:
8449 'X' Print CONST_INT OP in hexadecimal format.
8450 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
8451 'd' Print CONST_INT OP in decimal.
8452 'm' Print one less than CONST_INT OP in decimal.
8453 'h' Print the high-part relocation associated with OP, after stripping
8455 'R' Print the low-part relocation associated with OP.
8456 'C' Print the integer branch condition for comparison OP.
8457 'N' Print the inverse of the integer branch condition for comparison OP.
8458 'F' Print the FPU branch condition for comparison OP.
8459 'W' Print the inverse of the FPU branch condition for comparison OP.
8460 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
8461 'z' for (eq:?I ...), 'n' for (ne:?I ...).
8462 't' Like 'T', but with the EQ/NE cases reversed
8463 'Y' Print mips_fp_conditions[INTVAL (OP)]
8464 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
8465 'q' Print a DSP accumulator register.
8466 'D' Print the second part of a double-word register or memory operand.
8467 'L' Print the low-order register in a double-word register operand.
8468 'M' Print high-order register in a double-word register operand.
8469 'z' Print $0 if OP is zero, otherwise print OP normally.
8470 'b' Print the address of a memory operand, without offset. */
8473 mips_print_operand (FILE *file
, rtx op
, int letter
)
8477 if (mips_print_operand_punct_valid_p (letter
))
8479 mips_print_operand_punctuation (file
, letter
);
8484 code
= GET_CODE (op
);
8489 if (CONST_INT_P (op
))
8490 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (op
));
8492 output_operand_lossage ("invalid use of '%%%c'", letter
);
8496 if (CONST_INT_P (op
))
8497 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (op
) & 0xffff);
8499 output_operand_lossage ("invalid use of '%%%c'", letter
);
8503 if (CONST_INT_P (op
))
8504 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (op
));
8506 output_operand_lossage ("invalid use of '%%%c'", letter
);
8510 if (CONST_INT_P (op
))
8511 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (op
) - 1);
8513 output_operand_lossage ("invalid use of '%%%c'", letter
);
8519 mips_print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_hi_relocs
);
8523 mips_print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_lo_relocs
);
8527 mips_print_int_branch_condition (file
, code
, letter
);
8531 mips_print_int_branch_condition (file
, reverse_condition (code
), letter
);
8535 mips_print_float_branch_condition (file
, code
, letter
);
8539 mips_print_float_branch_condition (file
, reverse_condition (code
),
8546 int truth
= (code
== NE
) == (letter
== 'T');
8547 fputc ("zfnt"[truth
* 2 + ST_REG_P (REGNO (XEXP (op
, 0)))], file
);
8552 if (code
== CONST_INT
&& UINTVAL (op
) < ARRAY_SIZE (mips_fp_conditions
))
8553 fputs (mips_fp_conditions
[UINTVAL (op
)], file
);
8555 output_operand_lossage ("'%%%c' is not a valid operand prefix",
8560 if (ISA_HAS_8CC
|| ISA_HAS_CCF
)
8562 mips_print_operand (file
, op
, 0);
8568 if (code
== REG
&& MD_REG_P (REGNO (op
)))
8569 fprintf (file
, "$ac0");
8570 else if (code
== REG
&& DSP_ACC_REG_P (REGNO (op
)))
8571 fprintf (file
, "$ac%c", reg_names
[REGNO (op
)][3]);
8573 output_operand_lossage ("invalid use of '%%%c'", letter
);
8581 unsigned int regno
= REGNO (op
);
8582 if ((letter
== 'M' && TARGET_LITTLE_ENDIAN
)
8583 || (letter
== 'L' && TARGET_BIG_ENDIAN
)
8586 else if (letter
&& letter
!= 'z' && letter
!= 'M' && letter
!= 'L')
8587 output_operand_lossage ("invalid use of '%%%c'", letter
);
8588 /* We need to print $0 .. $31 for COP0 registers. */
8589 if (COP0_REG_P (regno
))
8590 fprintf (file
, "$%s", ®_names
[regno
][4]);
8592 fprintf (file
, "%s", reg_names
[regno
]);
8598 output_address (GET_MODE (op
), plus_constant (Pmode
,
8600 else if (letter
== 'b')
8602 gcc_assert (REG_P (XEXP (op
, 0)));
8603 mips_print_operand (file
, XEXP (op
, 0), 0);
8605 else if (letter
&& letter
!= 'z')
8606 output_operand_lossage ("invalid use of '%%%c'", letter
);
8608 output_address (GET_MODE (op
), XEXP (op
, 0));
8612 if (letter
== 'z' && op
== CONST0_RTX (GET_MODE (op
)))
8613 fputs (reg_names
[GP_REG_FIRST
], file
);
8614 else if (letter
&& letter
!= 'z')
8615 output_operand_lossage ("invalid use of '%%%c'", letter
);
8616 else if (CONST_GP_P (op
))
8617 fputs (reg_names
[GLOBAL_POINTER_REGNUM
], file
);
8619 output_addr_const (file
, mips_strip_unspec_address (op
));
8625 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8628 mips_print_operand_address (FILE *file
, machine_mode
/*mode*/, rtx x
)
8630 struct mips_address_info addr
;
8632 if (mips_classify_address (&addr
, x
, word_mode
, true))
8636 mips_print_operand (file
, addr
.offset
, 0);
8637 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
8640 case ADDRESS_LO_SUM
:
8641 mips_print_operand_reloc (file
, addr
.offset
, SYMBOL_CONTEXT_MEM
,
8643 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
8646 case ADDRESS_CONST_INT
:
8647 output_addr_const (file
, x
);
8648 fprintf (file
, "(%s)", reg_names
[GP_REG_FIRST
]);
8651 case ADDRESS_SYMBOLIC
:
8652 output_addr_const (file
, mips_strip_unspec_address (x
));
8658 /* Implement TARGET_ENCODE_SECTION_INFO. */
8661 mips_encode_section_info (tree decl
, rtx rtl
, int first
)
8663 default_encode_section_info (decl
, rtl
, first
);
8665 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8667 rtx symbol
= XEXP (rtl
, 0);
8668 tree type
= TREE_TYPE (decl
);
8670 /* Encode whether the symbol is short or long. */
8671 if ((TARGET_LONG_CALLS
&& !mips_near_type_p (type
))
8672 || mips_far_type_p (type
))
8673 SYMBOL_REF_FLAGS (symbol
) |= SYMBOL_FLAG_LONG_CALL
;
8677 /* Implement TARGET_SELECT_RTX_SECTION. */
8680 mips_select_rtx_section (machine_mode mode
, rtx x
,
8681 unsigned HOST_WIDE_INT align
)
8683 /* ??? Consider using mergeable small data sections. */
8684 if (mips_rtx_constant_in_small_data_p (mode
))
8685 return get_named_section (NULL
, ".sdata", 0);
8687 return default_elf_select_rtx_section (mode
, x
, align
);
8690 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8692 The complication here is that, with the combination TARGET_ABICALLS
8693 && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
8694 absolute addresses, and should therefore not be included in the
8695 read-only part of a DSO. Handle such cases by selecting a normal
8696 data section instead of a read-only one. The logic apes that in
8697 default_function_rodata_section. */
8700 mips_function_rodata_section (tree decl
)
8702 if (!TARGET_ABICALLS
|| TARGET_ABSOLUTE_ABICALLS
|| TARGET_GPWORD
)
8703 return default_function_rodata_section (decl
);
8705 if (decl
&& DECL_SECTION_NAME (decl
))
8707 const char *name
= DECL_SECTION_NAME (decl
);
8708 if (DECL_COMDAT_GROUP (decl
) && strncmp (name
, ".gnu.linkonce.t.", 16) == 0)
8710 char *rname
= ASTRDUP (name
);
8712 return get_section (rname
, SECTION_LINKONCE
| SECTION_WRITE
, decl
);
8714 else if (flag_function_sections
8715 && flag_data_sections
8716 && strncmp (name
, ".text.", 6) == 0)
8718 char *rname
= ASTRDUP (name
);
8719 memcpy (rname
+ 1, "data", 4);
8720 return get_section (rname
, SECTION_WRITE
, decl
);
8723 return data_section
;
8726 /* Implement TARGET_IN_SMALL_DATA_P. */
8729 mips_in_small_data_p (const_tree decl
)
8731 unsigned HOST_WIDE_INT size
;
8733 if (TREE_CODE (decl
) == STRING_CST
|| TREE_CODE (decl
) == FUNCTION_DECL
)
8736 /* We don't yet generate small-data references for -mabicalls
8737 or VxWorks RTP code. See the related -G handling in
8738 mips_option_override. */
8739 if (TARGET_ABICALLS
|| TARGET_VXWORKS_RTP
)
8742 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
) != 0)
8746 /* Reject anything that isn't in a known small-data section. */
8747 name
= DECL_SECTION_NAME (decl
);
8748 if (strcmp (name
, ".sdata") != 0 && strcmp (name
, ".sbss") != 0)
8751 /* If a symbol is defined externally, the assembler will use the
8752 usual -G rules when deciding how to implement macros. */
8753 if (mips_lo_relocs
[SYMBOL_GP_RELATIVE
] || !DECL_EXTERNAL (decl
))
8756 else if (TARGET_EMBEDDED_DATA
)
8758 /* Don't put constants into the small data section: we want them
8759 to be in ROM rather than RAM. */
8760 if (TREE_CODE (decl
) != VAR_DECL
)
8763 if (TREE_READONLY (decl
)
8764 && !TREE_SIDE_EFFECTS (decl
)
8765 && (!DECL_INITIAL (decl
) || TREE_CONSTANT (DECL_INITIAL (decl
))))
8769 /* Enforce -mlocal-sdata. */
8770 if (!TARGET_LOCAL_SDATA
&& !TREE_PUBLIC (decl
))
8773 /* Enforce -mextern-sdata. */
8774 if (!TARGET_EXTERN_SDATA
&& DECL_P (decl
))
8776 if (DECL_EXTERNAL (decl
))
8778 if (DECL_COMMON (decl
) && DECL_INITIAL (decl
) == NULL
)
8782 /* We have traditionally not treated zero-sized objects as small data,
8783 so this is now effectively part of the ABI. */
8784 size
= int_size_in_bytes (TREE_TYPE (decl
));
8785 return size
> 0 && size
<= mips_small_data_threshold
;
8788 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8789 anchors for small data: the GP register acts as an anchor in that
8790 case. We also don't want to use them for PC-relative accesses,
8791 where the PC acts as an anchor. */
8794 mips_use_anchors_for_symbol_p (const_rtx symbol
)
8796 switch (mips_classify_symbol (symbol
, SYMBOL_CONTEXT_MEM
))
8798 case SYMBOL_PC_RELATIVE
:
8799 case SYMBOL_GP_RELATIVE
:
8803 return default_use_anchors_for_symbol_p (symbol
);
8807 /* The MIPS debug format wants all automatic variables and arguments
8808 to be in terms of the virtual frame pointer (stack pointer before
8809 any adjustment in the function), while the MIPS 3.0 linker wants
8810 the frame pointer to be the stack pointer after the initial
8811 adjustment. So, we do the adjustment here. The arg pointer (which
8812 is eliminated) points to the virtual frame pointer, while the frame
8813 pointer (which may be eliminated) points to the stack pointer after
8814 the initial adjustments. */
8817 mips_debugger_offset (rtx addr
, HOST_WIDE_INT offset
)
8819 rtx offset2
= const0_rtx
;
8820 rtx reg
= eliminate_constant_term (addr
, &offset2
);
8823 offset
= INTVAL (offset2
);
8825 if (reg
== stack_pointer_rtx
8826 || reg
== frame_pointer_rtx
8827 || reg
== hard_frame_pointer_rtx
)
8829 offset
-= cfun
->machine
->frame
.total_size
;
8830 if (reg
== hard_frame_pointer_rtx
)
8831 offset
+= cfun
->machine
->frame
.hard_frame_pointer_offset
;
8837 /* Implement ASM_OUTPUT_EXTERNAL. */
8840 mips_output_external (FILE *file
, tree decl
, const char *name
)
8842 default_elf_asm_output_external (file
, decl
, name
);
8844 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
8845 set in order to avoid putting out names that are never really
8847 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl
)))
8849 if (!TARGET_EXPLICIT_RELOCS
&& mips_in_small_data_p (decl
))
8851 /* When using assembler macros, emit .extern directives for
8852 all small-data externs so that the assembler knows how
8855 In most cases it would be safe (though pointless) to emit
8856 .externs for other symbols too. One exception is when an
8857 object is within the -G limit but declared by the user to
8858 be in a section other than .sbss or .sdata. */
8859 fputs ("\t.extern\t", file
);
8860 assemble_name (file
, name
);
8861 fprintf (file
, ", " HOST_WIDE_INT_PRINT_DEC
"\n",
8862 int_size_in_bytes (TREE_TYPE (decl
)));
8867 /* Implement TARGET_ASM_OUTPUT_SOURCE_FILENAME. */
8870 mips_output_filename (FILE *stream
, const char *name
)
8872 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
8874 if (write_symbols
== DWARF2_DEBUG
)
8876 else if (mips_output_filename_first_time
)
8878 mips_output_filename_first_time
= 0;
8879 num_source_filenames
+= 1;
8880 current_function_file
= name
;
8881 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8882 output_quoted_string (stream
, name
);
8883 putc ('\n', stream
);
8885 /* If we are emitting stabs, let dbxout.c handle this (except for
8886 the mips_output_filename_first_time case). */
8887 else if (write_symbols
== DBX_DEBUG
)
8889 else if (name
!= current_function_file
8890 && strcmp (name
, current_function_file
) != 0)
8892 num_source_filenames
+= 1;
8893 current_function_file
= name
;
8894 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8895 output_quoted_string (stream
, name
);
8896 putc ('\n', stream
);
8900 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
8902 static void ATTRIBUTE_UNUSED
8903 mips_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
8908 fputs ("\t.dtprelword\t", file
);
8912 fputs ("\t.dtpreldword\t", file
);
8918 output_addr_const (file
, x
);
8919 fputs ("+0x8000", file
);
8922 /* Implement TARGET_DWARF_REGISTER_SPAN. */
8925 mips_dwarf_register_span (rtx reg
)
8930 /* TARGET_FLOATXX is implemented as 32-bit floating-point registers but
8931 ensures that double-precision registers are treated as if they were
8932 64-bit physical registers. The code will run correctly with 32-bit or
8933 64-bit registers which means that dwarf information cannot be precise
8934 for all scenarios. We choose to state that the 64-bit values are stored
8935 in a single 64-bit 'piece'. This slightly unusual construct can then be
8936 interpreted as either a pair of registers if the registers are 32-bit or
8937 a single 64-bit register depending on hardware. */
8938 mode
= GET_MODE (reg
);
8939 if (FP_REG_P (REGNO (reg
))
8941 && GET_MODE_SIZE (mode
) > UNITS_PER_FPREG
)
8943 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, reg
));
8945 /* By default, GCC maps increasing register numbers to increasing
8946 memory locations, but paired FPRs are always little-endian,
8947 regardless of the prevailing endianness. */
8948 else if (FP_REG_P (REGNO (reg
))
8949 && TARGET_BIG_ENDIAN
8950 && MAX_FPRS_PER_FMT
> 1
8951 && GET_MODE_SIZE (mode
) > UNITS_PER_FPREG
)
8953 gcc_assert (GET_MODE_SIZE (mode
) == UNITS_PER_HWFPVALUE
);
8954 high
= mips_subword (reg
, true);
8955 low
= mips_subword (reg
, false);
8956 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, high
, low
));
8962 /* Implement TARGET_DWARF_FRAME_REG_MODE. */
8965 mips_dwarf_frame_reg_mode (int regno
)
8967 machine_mode mode
= default_dwarf_frame_reg_mode (regno
);
8969 if (FP_REG_P (regno
) && mips_abi
== ABI_32
&& TARGET_FLOAT64
)
8975 /* DSP ALU can bypass data with no delays for the following pairs. */
8976 enum insn_code dspalu_bypass_table
[][2] =
8978 {CODE_FOR_mips_addsc
, CODE_FOR_mips_addwc
},
8979 {CODE_FOR_mips_cmpu_eq_qb
, CODE_FOR_mips_pick_qb
},
8980 {CODE_FOR_mips_cmpu_lt_qb
, CODE_FOR_mips_pick_qb
},
8981 {CODE_FOR_mips_cmpu_le_qb
, CODE_FOR_mips_pick_qb
},
8982 {CODE_FOR_mips_cmp_eq_ph
, CODE_FOR_mips_pick_ph
},
8983 {CODE_FOR_mips_cmp_lt_ph
, CODE_FOR_mips_pick_ph
},
8984 {CODE_FOR_mips_cmp_le_ph
, CODE_FOR_mips_pick_ph
},
8985 {CODE_FOR_mips_wrdsp
, CODE_FOR_mips_insv
}
8989 mips_dspalu_bypass_p (rtx out_insn
, rtx in_insn
)
8992 int num_bypass
= ARRAY_SIZE (dspalu_bypass_table
);
8993 enum insn_code out_icode
= (enum insn_code
) INSN_CODE (out_insn
);
8994 enum insn_code in_icode
= (enum insn_code
) INSN_CODE (in_insn
);
8996 for (i
= 0; i
< num_bypass
; i
++)
8998 if (out_icode
== dspalu_bypass_table
[i
][0]
8999 && in_icode
== dspalu_bypass_table
[i
][1])
9005 /* Implement ASM_OUTPUT_ASCII. */
9008 mips_output_ascii (FILE *stream
, const char *string
, size_t len
)
9014 fprintf (stream
, "\t.ascii\t\"");
9015 for (i
= 0; i
< len
; i
++)
9019 c
= (unsigned char) string
[i
];
9022 if (c
== '\\' || c
== '\"')
9024 putc ('\\', stream
);
9032 fprintf (stream
, "\\%03o", c
);
9036 if (cur_pos
> 72 && i
+1 < len
)
9039 fprintf (stream
, "\"\n\t.ascii\t\"");
9042 fprintf (stream
, "\"\n");
9045 /* Return the pseudo-op for full SYMBOL_(D)TPREL address *ADDR.
9046 Update *ADDR with the operand that should be printed. */
9049 mips_output_tls_reloc_directive (rtx
*addr
)
9051 enum mips_symbol_type type
;
9053 type
= mips_classify_symbolic_expression (*addr
, SYMBOL_CONTEXT_LEA
);
9054 *addr
= mips_strip_unspec_address (*addr
);
9058 return Pmode
== SImode
? ".dtprelword\t%0" : ".dtpreldword\t%0";
9061 return Pmode
== SImode
? ".tprelword\t%0" : ".tpreldword\t%0";
9068 /* Emit either a label, .comm, or .lcomm directive. When using assembler
9069 macros, mark the symbol as written so that mips_asm_output_external
9070 won't emit an .extern for it. STREAM is the output file, NAME is the
9071 name of the symbol, INIT_STRING is the string that should be written
9072 before the symbol and FINAL_STRING is the string that should be
9073 written after it. FINAL_STRING is a printf format that consumes the
9074 remaining arguments. */
9077 mips_declare_object (FILE *stream
, const char *name
, const char *init_string
,
9078 const char *final_string
, ...)
9082 fputs (init_string
, stream
);
9083 assemble_name (stream
, name
);
9084 va_start (ap
, final_string
);
9085 vfprintf (stream
, final_string
, ap
);
9088 if (!TARGET_EXPLICIT_RELOCS
)
9090 tree name_tree
= get_identifier (name
);
9091 TREE_ASM_WRITTEN (name_tree
) = 1;
9095 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
9096 NAME is the name of the object and ALIGN is the required alignment
9097 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
9098 alignment argument. */
9101 mips_declare_common_object (FILE *stream
, const char *name
,
9102 const char *init_string
,
9103 unsigned HOST_WIDE_INT size
,
9104 unsigned int align
, bool takes_alignment_p
)
9106 if (!takes_alignment_p
)
9108 size
+= (align
/ BITS_PER_UNIT
) - 1;
9109 size
-= size
% (align
/ BITS_PER_UNIT
);
9110 mips_declare_object (stream
, name
, init_string
,
9111 "," HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
9114 mips_declare_object (stream
, name
, init_string
,
9115 "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u\n",
9116 size
, align
/ BITS_PER_UNIT
);
9119 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
9120 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
9123 mips_output_aligned_decl_common (FILE *stream
, tree decl
, const char *name
,
9124 unsigned HOST_WIDE_INT size
,
9127 /* If the target wants uninitialized const declarations in
9128 .rdata then don't put them in .comm. */
9129 if (TARGET_EMBEDDED_DATA
9130 && TARGET_UNINIT_CONST_IN_RODATA
9131 && TREE_CODE (decl
) == VAR_DECL
9132 && TREE_READONLY (decl
)
9133 && (DECL_INITIAL (decl
) == 0 || DECL_INITIAL (decl
) == error_mark_node
))
9135 if (TREE_PUBLIC (decl
) && DECL_NAME (decl
))
9136 targetm
.asm_out
.globalize_label (stream
, name
);
9138 switch_to_section (readonly_data_section
);
9139 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
9140 mips_declare_object (stream
, name
, "",
9141 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
9145 mips_declare_common_object (stream
, name
, "\n\t.comm\t",
9149 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
9150 extern int size_directive_output
;
9152 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
9153 definitions except that it uses mips_declare_object to emit the label. */
9156 mips_declare_object_name (FILE *stream
, const char *name
,
9157 tree decl ATTRIBUTE_UNUSED
)
9159 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
9160 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
9163 size_directive_output
= 0;
9164 if (!flag_inhibit_size_directive
&& DECL_SIZE (decl
))
9168 size_directive_output
= 1;
9169 size
= int_size_in_bytes (TREE_TYPE (decl
));
9170 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
9173 mips_declare_object (stream
, name
, "", ":\n");
9176 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
9179 mips_finish_declare_object (FILE *stream
, tree decl
, int top_level
, int at_end
)
9183 name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
9184 if (!flag_inhibit_size_directive
9185 && DECL_SIZE (decl
) != 0
9188 && DECL_INITIAL (decl
) == error_mark_node
9189 && !size_directive_output
)
9193 size_directive_output
= 1;
9194 size
= int_size_in_bytes (TREE_TYPE (decl
));
9195 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
9200 /* Return the FOO in the name of the ".mdebug.FOO" section associated
9201 with the current ABI. */
9204 mips_mdebug_abi_name (void)
9217 return TARGET_64BIT
? "eabi64" : "eabi32";
9223 /* Implement TARGET_ASM_FILE_START. */
9226 mips_file_start (void)
9228 default_file_start ();
9230 /* Generate a special section to describe the ABI switches used to
9231 produce the resultant binary. */
9233 /* Record the ABI itself. Modern versions of binutils encode
9234 this information in the ELF header flags, but GDB needs the
9235 information in order to correctly debug binaries produced by
9236 older binutils. See the function mips_gdbarch_init in
9238 fprintf (asm_out_file
, "\t.section .mdebug.%s\n\t.previous\n",
9239 mips_mdebug_abi_name ());
9241 /* There is no ELF header flag to distinguish long32 forms of the
9242 EABI from long64 forms. Emit a special section to help tools
9243 such as GDB. Do the same for o64, which is sometimes used with
9245 if (mips_abi
== ABI_EABI
|| mips_abi
== ABI_O64
)
9246 fprintf (asm_out_file
, "\t.section .gcc_compiled_long%d\n"
9247 "\t.previous\n", TARGET_LONG64
? 64 : 32);
9249 /* Record the NaN encoding. */
9250 if (HAVE_AS_NAN
|| mips_nan
!= MIPS_IEEE_754_DEFAULT
)
9251 fprintf (asm_out_file
, "\t.nan\t%s\n",
9252 mips_nan
== MIPS_IEEE_754_2008
? "2008" : "legacy");
9254 #ifdef HAVE_AS_DOT_MODULE
9255 /* Record the FP ABI. See below for comments. */
9256 if (TARGET_NO_FLOAT
)
9257 #ifdef HAVE_AS_GNU_ATTRIBUTE
9258 fputs ("\t.gnu_attribute 4, 0\n", asm_out_file
);
9262 else if (!TARGET_HARD_FLOAT_ABI
)
9263 fputs ("\t.module\tsoftfloat\n", asm_out_file
);
9264 else if (!TARGET_DOUBLE_FLOAT
)
9265 fputs ("\t.module\tsinglefloat\n", asm_out_file
);
9266 else if (TARGET_FLOATXX
)
9267 fputs ("\t.module\tfp=xx\n", asm_out_file
);
9268 else if (TARGET_FLOAT64
)
9269 fputs ("\t.module\tfp=64\n", asm_out_file
);
9271 fputs ("\t.module\tfp=32\n", asm_out_file
);
9273 if (TARGET_ODD_SPREG
)
9274 fputs ("\t.module\toddspreg\n", asm_out_file
);
9276 fputs ("\t.module\tnooddspreg\n", asm_out_file
);
9279 #ifdef HAVE_AS_GNU_ATTRIBUTE
9283 /* No floating-point operations, -mno-float. */
9284 if (TARGET_NO_FLOAT
)
9286 /* Soft-float code, -msoft-float. */
9287 else if (!TARGET_HARD_FLOAT_ABI
)
9289 /* Single-float code, -msingle-float. */
9290 else if (!TARGET_DOUBLE_FLOAT
)
9292 /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64.
9294 This case used 12 callee-saved double-precision registers
9295 and is deprecated. */
9296 /* 64-bit or 32-bit FP registers on a 32-bit target, -mfpxx. */
9297 else if (TARGET_FLOATXX
)
9299 /* 64-bit FP registers on a 32-bit target, -mfp64 -modd-spreg. */
9300 else if (mips_abi
== ABI_32
&& TARGET_FLOAT64
&& TARGET_ODD_SPREG
)
9302 /* 64-bit FP registers on a 32-bit target, -mfp64 -mno-odd-spreg. */
9303 else if (mips_abi
== ABI_32
&& TARGET_FLOAT64
)
9305 /* Regular FP code, FP regs same size as GP regs, -mdouble-float. */
9309 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", attr
);
9314 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
9315 if (TARGET_ABICALLS
)
9317 fprintf (asm_out_file
, "\t.abicalls\n");
9318 if (TARGET_ABICALLS_PIC0
)
9319 fprintf (asm_out_file
, "\t.option\tpic0\n");
9322 if (flag_verbose_asm
)
9323 fprintf (asm_out_file
, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
9325 mips_small_data_threshold
, mips_arch_info
->name
, mips_isa
);
9328 /* Implement TARGET_ASM_CODE_END. */
9331 mips_code_end (void)
9333 mips_finish_stub (&mips16_rdhwr_stub
);
9334 mips_finish_stub (&mips16_get_fcsr_stub
);
9335 mips_finish_stub (&mips16_set_fcsr_stub
);
9338 /* Make the last instruction frame-related and note that it performs
9339 the operation described by FRAME_PATTERN. */
9342 mips_set_frame_expr (rtx frame_pattern
)
9346 insn
= get_last_insn ();
9347 RTX_FRAME_RELATED_P (insn
) = 1;
9348 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
9353 /* Return a frame-related rtx that stores REG at MEM.
9354 REG must be a single register. */
9357 mips_frame_set (rtx mem
, rtx reg
)
9361 set
= gen_rtx_SET (mem
, reg
);
9362 RTX_FRAME_RELATED_P (set
) = 1;
9367 /* Record that the epilogue has restored call-saved register REG. */
9370 mips_add_cfa_restore (rtx reg
)
9372 mips_epilogue
.cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
9373 mips_epilogue
.cfa_restores
);
9376 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
9377 mips16e_s2_s8_regs[X], it must also save the registers in indexes
9378 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
9379 static const unsigned char mips16e_s2_s8_regs
[] = {
9380 30, 23, 22, 21, 20, 19, 18
9382 static const unsigned char mips16e_a0_a3_regs
[] = {
9386 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
9387 ordered from the uppermost in memory to the lowest in memory. */
9388 static const unsigned char mips16e_save_restore_regs
[] = {
9389 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
9392 /* Return the index of the lowest X in the range [0, SIZE) for which
9393 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
9396 mips16e_find_first_register (unsigned int mask
, const unsigned char *regs
,
9401 for (i
= 0; i
< size
; i
++)
9402 if (BITSET_P (mask
, regs
[i
]))
9408 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
9409 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
9410 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
9411 is true for all indexes (X, SIZE). */
9414 mips16e_mask_registers (unsigned int *mask_ptr
, const unsigned char *regs
,
9415 unsigned int size
, unsigned int *num_regs_ptr
)
9419 i
= mips16e_find_first_register (*mask_ptr
, regs
, size
);
9420 for (i
++; i
< size
; i
++)
9421 if (!BITSET_P (*mask_ptr
, regs
[i
]))
9424 *mask_ptr
|= 1 << regs
[i
];
9428 /* Return a simplified form of X using the register values in REG_VALUES.
9429 REG_VALUES[R] is the last value assigned to hard register R, or null
9430 if R has not been modified.
9432 This function is rather limited, but is good enough for our purposes. */
9435 mips16e_collect_propagate_value (rtx x
, rtx
*reg_values
)
9437 x
= avoid_constant_pool_reference (x
);
9441 rtx x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
9442 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
),
9443 x0
, GET_MODE (XEXP (x
, 0)));
9446 if (ARITHMETIC_P (x
))
9448 rtx x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
9449 rtx x1
= mips16e_collect_propagate_value (XEXP (x
, 1), reg_values
);
9450 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), x0
, x1
);
9454 && reg_values
[REGNO (x
)]
9455 && !rtx_unstable_p (reg_values
[REGNO (x
)]))
9456 return reg_values
[REGNO (x
)];
9461 /* Return true if (set DEST SRC) stores an argument register into its
9462 caller-allocated save slot, storing the number of that argument
9463 register in *REGNO_PTR if so. REG_VALUES is as for
9464 mips16e_collect_propagate_value. */
9467 mips16e_collect_argument_save_p (rtx dest
, rtx src
, rtx
*reg_values
,
9468 unsigned int *regno_ptr
)
9470 unsigned int argno
, regno
;
9471 HOST_WIDE_INT offset
, required_offset
;
9474 /* Check that this is a word-mode store. */
9475 if (!MEM_P (dest
) || !REG_P (src
) || GET_MODE (dest
) != word_mode
)
9478 /* Check that the register being saved is an unmodified argument
9480 regno
= REGNO (src
);
9481 if (!IN_RANGE (regno
, GP_ARG_FIRST
, GP_ARG_LAST
) || reg_values
[regno
])
9483 argno
= regno
- GP_ARG_FIRST
;
9485 /* Check whether the address is an appropriate stack-pointer or
9486 frame-pointer access. */
9487 addr
= mips16e_collect_propagate_value (XEXP (dest
, 0), reg_values
);
9488 mips_split_plus (addr
, &base
, &offset
);
9489 required_offset
= cfun
->machine
->frame
.total_size
+ argno
* UNITS_PER_WORD
;
9490 if (base
== hard_frame_pointer_rtx
)
9491 required_offset
-= cfun
->machine
->frame
.hard_frame_pointer_offset
;
9492 else if (base
!= stack_pointer_rtx
)
9494 if (offset
!= required_offset
)
9501 /* A subroutine of mips_expand_prologue, called only when generating
9502 MIPS16e SAVE instructions. Search the start of the function for any
9503 instructions that save argument registers into their caller-allocated
9504 save slots. Delete such instructions and return a value N such that
9505 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
9506 instructions redundant. */
9509 mips16e_collect_argument_saves (void)
9511 rtx reg_values
[FIRST_PSEUDO_REGISTER
];
9512 rtx_insn
*insn
, *next
;
9514 unsigned int nargs
, regno
;
9516 push_topmost_sequence ();
9518 memset (reg_values
, 0, sizeof (reg_values
));
9519 for (insn
= get_insns (); insn
; insn
= next
)
9521 next
= NEXT_INSN (insn
);
9522 if (NOTE_P (insn
) || DEBUG_INSN_P (insn
))
9528 set
= PATTERN (insn
);
9529 if (GET_CODE (set
) != SET
)
9532 dest
= SET_DEST (set
);
9533 src
= SET_SRC (set
);
9534 if (mips16e_collect_argument_save_p (dest
, src
, reg_values
, ®no
))
9536 if (!BITSET_P (cfun
->machine
->frame
.mask
, regno
))
9539 nargs
= MAX (nargs
, (regno
- GP_ARG_FIRST
) + 1);
9542 else if (REG_P (dest
) && GET_MODE (dest
) == word_mode
)
9543 reg_values
[REGNO (dest
)]
9544 = mips16e_collect_propagate_value (src
, reg_values
);
9548 pop_topmost_sequence ();
9553 /* Return a move between register REGNO and memory location SP + OFFSET.
9554 REG_PARM_P is true if SP + OFFSET belongs to REG_PARM_STACK_SPACE.
9555 Make the move a load if RESTORE_P, otherwise make it a store. */
9558 mips16e_save_restore_reg (bool restore_p
, bool reg_parm_p
,
9559 HOST_WIDE_INT offset
, unsigned int regno
)
9563 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
, stack_pointer_rtx
,
9565 reg
= gen_rtx_REG (SImode
, regno
);
9568 mips_add_cfa_restore (reg
);
9569 return gen_rtx_SET (reg
, mem
);
9572 return gen_rtx_SET (mem
, reg
);
9573 return mips_frame_set (mem
, reg
);
9576 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
9577 The instruction must:
9579 - Allocate or deallocate SIZE bytes in total; SIZE is known
9582 - Save or restore as many registers in *MASK_PTR as possible.
9583 The instruction saves the first registers at the top of the
9584 allocated area, with the other registers below it.
9586 - Save NARGS argument registers above the allocated area.
9588 (NARGS is always zero if RESTORE_P.)
9590 The SAVE and RESTORE instructions cannot save and restore all general
9591 registers, so there may be some registers left over for the caller to
9592 handle. Destructively modify *MASK_PTR so that it contains the registers
9593 that still need to be saved or restored. The caller can save these
9594 registers in the memory immediately below *OFFSET_PTR, which is a
9595 byte offset from the bottom of the allocated stack area. */
9598 mips16e_build_save_restore (bool restore_p
, unsigned int *mask_ptr
,
9599 HOST_WIDE_INT
*offset_ptr
, unsigned int nargs
,
9603 HOST_WIDE_INT offset
, top_offset
;
9604 unsigned int i
, regno
;
9607 gcc_assert (cfun
->machine
->frame
.num_fp
== 0);
9609 /* Calculate the number of elements in the PARALLEL. We need one element
9610 for the stack adjustment, one for each argument register save, and one
9611 for each additional register move. */
9613 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
9614 if (BITSET_P (*mask_ptr
, mips16e_save_restore_regs
[i
]))
9617 /* Create the final PARALLEL. */
9618 pattern
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (n
));
9621 /* Add the stack pointer adjustment. */
9622 set
= gen_rtx_SET (stack_pointer_rtx
,
9623 plus_constant (Pmode
, stack_pointer_rtx
,
9624 restore_p
? size
: -size
));
9625 RTX_FRAME_RELATED_P (set
) = 1;
9626 XVECEXP (pattern
, 0, n
++) = set
;
9628 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
9629 top_offset
= restore_p
? size
: 0;
9631 /* Save the arguments. */
9632 for (i
= 0; i
< nargs
; i
++)
9634 offset
= top_offset
+ i
* UNITS_PER_WORD
;
9635 set
= mips16e_save_restore_reg (restore_p
, true, offset
,
9637 XVECEXP (pattern
, 0, n
++) = set
;
9640 /* Then fill in the other register moves. */
9641 offset
= top_offset
;
9642 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
9644 regno
= mips16e_save_restore_regs
[i
];
9645 if (BITSET_P (*mask_ptr
, regno
))
9647 offset
-= UNITS_PER_WORD
;
9648 set
= mips16e_save_restore_reg (restore_p
, false, offset
, regno
);
9649 XVECEXP (pattern
, 0, n
++) = set
;
9650 *mask_ptr
&= ~(1 << regno
);
9654 /* Tell the caller what offset it should use for the remaining registers. */
9655 *offset_ptr
= size
+ (offset
- top_offset
);
9657 gcc_assert (n
== XVECLEN (pattern
, 0));
9662 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
9663 pointer. Return true if PATTERN matches the kind of instruction
9664 generated by mips16e_build_save_restore. If INFO is nonnull,
9665 initialize it when returning true. */
9668 mips16e_save_restore_pattern_p (rtx pattern
, HOST_WIDE_INT adjust
,
9669 struct mips16e_save_restore_info
*info
)
9671 unsigned int i
, nargs
, mask
, extra
;
9672 HOST_WIDE_INT top_offset
, save_offset
, offset
;
9673 rtx set
, reg
, mem
, base
;
9676 if (!GENERATE_MIPS16E_SAVE_RESTORE
)
9679 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
9680 top_offset
= adjust
> 0 ? adjust
: 0;
9682 /* Interpret all other members of the PARALLEL. */
9683 save_offset
= top_offset
- UNITS_PER_WORD
;
9687 for (n
= 1; n
< XVECLEN (pattern
, 0); n
++)
9689 /* Check that we have a SET. */
9690 set
= XVECEXP (pattern
, 0, n
);
9691 if (GET_CODE (set
) != SET
)
9694 /* Check that the SET is a load (if restoring) or a store
9696 mem
= adjust
> 0 ? SET_SRC (set
) : SET_DEST (set
);
9700 /* Check that the address is the sum of the stack pointer and a
9701 possibly-zero constant offset. */
9702 mips_split_plus (XEXP (mem
, 0), &base
, &offset
);
9703 if (base
!= stack_pointer_rtx
)
9706 /* Check that SET's other operand is a register. */
9707 reg
= adjust
> 0 ? SET_DEST (set
) : SET_SRC (set
);
9711 /* Check for argument saves. */
9712 if (offset
== top_offset
+ nargs
* UNITS_PER_WORD
9713 && REGNO (reg
) == GP_ARG_FIRST
+ nargs
)
9715 else if (offset
== save_offset
)
9717 while (mips16e_save_restore_regs
[i
++] != REGNO (reg
))
9718 if (i
== ARRAY_SIZE (mips16e_save_restore_regs
))
9721 mask
|= 1 << REGNO (reg
);
9722 save_offset
-= UNITS_PER_WORD
;
9728 /* Check that the restrictions on register ranges are met. */
9730 mips16e_mask_registers (&mask
, mips16e_s2_s8_regs
,
9731 ARRAY_SIZE (mips16e_s2_s8_regs
), &extra
);
9732 mips16e_mask_registers (&mask
, mips16e_a0_a3_regs
,
9733 ARRAY_SIZE (mips16e_a0_a3_regs
), &extra
);
9737 /* Make sure that the topmost argument register is not saved twice.
9738 The checks above ensure that the same is then true for the other
9739 argument registers. */
9740 if (nargs
> 0 && BITSET_P (mask
, GP_ARG_FIRST
+ nargs
- 1))
9743 /* Pass back information, if requested. */
9746 info
->nargs
= nargs
;
9748 info
->size
= (adjust
> 0 ? adjust
: -adjust
);
9754 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
9755 for the register range [MIN_REG, MAX_REG]. Return a pointer to
9756 the null terminator. */
9759 mips16e_add_register_range (char *s
, unsigned int min_reg
,
9760 unsigned int max_reg
)
9762 if (min_reg
!= max_reg
)
9763 s
+= sprintf (s
, ",%s-%s", reg_names
[min_reg
], reg_names
[max_reg
]);
9765 s
+= sprintf (s
, ",%s", reg_names
[min_reg
]);
9769 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
9770 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
9773 mips16e_output_save_restore (rtx pattern
, HOST_WIDE_INT adjust
)
9775 static char buffer
[300];
9777 struct mips16e_save_restore_info info
;
9778 unsigned int i
, end
;
9781 /* Parse the pattern. */
9782 if (!mips16e_save_restore_pattern_p (pattern
, adjust
, &info
))
9785 /* Add the mnemonic. */
9786 s
= strcpy (buffer
, adjust
> 0 ? "restore\t" : "save\t");
9789 /* Save the arguments. */
9791 s
+= sprintf (s
, "%s-%s,", reg_names
[GP_ARG_FIRST
],
9792 reg_names
[GP_ARG_FIRST
+ info
.nargs
- 1]);
9793 else if (info
.nargs
== 1)
9794 s
+= sprintf (s
, "%s,", reg_names
[GP_ARG_FIRST
]);
9796 /* Emit the amount of stack space to allocate or deallocate. */
9797 s
+= sprintf (s
, "%d", (int) info
.size
);
9799 /* Save or restore $16. */
9800 if (BITSET_P (info
.mask
, 16))
9801 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 16]);
9803 /* Save or restore $17. */
9804 if (BITSET_P (info
.mask
, 17))
9805 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 17]);
9807 /* Save or restore registers in the range $s2...$s8, which
9808 mips16e_s2_s8_regs lists in decreasing order. Note that this
9809 is a software register range; the hardware registers are not
9810 numbered consecutively. */
9811 end
= ARRAY_SIZE (mips16e_s2_s8_regs
);
9812 i
= mips16e_find_first_register (info
.mask
, mips16e_s2_s8_regs
, end
);
9814 s
= mips16e_add_register_range (s
, mips16e_s2_s8_regs
[end
- 1],
9815 mips16e_s2_s8_regs
[i
]);
9817 /* Save or restore registers in the range $a0...$a3. */
9818 end
= ARRAY_SIZE (mips16e_a0_a3_regs
);
9819 i
= mips16e_find_first_register (info
.mask
, mips16e_a0_a3_regs
, end
);
9821 s
= mips16e_add_register_range (s
, mips16e_a0_a3_regs
[i
],
9822 mips16e_a0_a3_regs
[end
- 1]);
9824 /* Save or restore $31. */
9825 if (BITSET_P (info
.mask
, RETURN_ADDR_REGNUM
))
9826 s
+= sprintf (s
, ",%s", reg_names
[RETURN_ADDR_REGNUM
]);
9831 /* Return true if the current function returns its value in a floating-point
9832 register in MIPS16 mode. */
9835 mips16_cfun_returns_in_fpr_p (void)
9837 tree return_type
= DECL_RESULT (current_function_decl
);
9838 return (TARGET_MIPS16
9839 && TARGET_HARD_FLOAT_ABI
9840 && !aggregate_value_p (return_type
, current_function_decl
)
9841 && mips_return_mode_in_fpr_p (DECL_MODE (return_type
)));
9844 /* Return true if predicate PRED is true for at least one instruction.
9845 Cache the result in *CACHE, and assume that the result is true
9846 if *CACHE is already true. */
9849 mips_find_gp_ref (bool *cache
, bool (*pred
) (rtx_insn
*))
9851 rtx_insn
*insn
, *subinsn
;
9855 push_topmost_sequence ();
9856 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
9857 FOR_EACH_SUBINSN (subinsn
, insn
)
9858 if (USEFUL_INSN_P (subinsn
) && pred (subinsn
))
9863 pop_topmost_sequence ();
9868 /* Return true if INSN refers to the global pointer in an "inflexible" way.
9869 See mips_cfun_has_inflexible_gp_ref_p for details. */
9872 mips_insn_has_inflexible_gp_ref_p (rtx_insn
*insn
)
9874 /* Uses of pic_offset_table_rtx in CALL_INSN_FUNCTION_USAGE
9875 indicate that the target could be a traditional MIPS
9876 lazily-binding stub. */
9877 return find_reg_fusage (insn
, USE
, pic_offset_table_rtx
);
9880 /* Return true if the current function refers to the global pointer
9881 in a way that forces $28 to be valid. This means that we can't
9882 change the choice of global pointer, even for NewABI code.
9884 One example of this (and one which needs several checks) is that
9885 $28 must be valid when calling traditional MIPS lazy-binding stubs.
9886 (This restriction does not apply to PLTs.) */
9889 mips_cfun_has_inflexible_gp_ref_p (void)
9891 /* If the function has a nonlocal goto, $28 must hold the correct
9892 global pointer for the target function. That is, the target
9893 of the goto implicitly uses $28. */
9894 if (crtl
->has_nonlocal_goto
)
9897 if (TARGET_ABICALLS_PIC2
)
9899 /* Symbolic accesses implicitly use the global pointer unless
9900 -mexplicit-relocs is in effect. JAL macros to symbolic addresses
9901 might go to traditional MIPS lazy-binding stubs. */
9902 if (!TARGET_EXPLICIT_RELOCS
)
9905 /* FUNCTION_PROFILER includes a JAL to _mcount, which again
9906 can be lazily-bound. */
9910 /* MIPS16 functions that return in FPRs need to call an
9911 external libgcc routine. This call is only made explict
9912 during mips_expand_epilogue, and it too might be lazily bound. */
9913 if (mips16_cfun_returns_in_fpr_p ())
9917 return mips_find_gp_ref (&cfun
->machine
->has_inflexible_gp_insn_p
,
9918 mips_insn_has_inflexible_gp_ref_p
);
9921 /* Return true if INSN refers to the global pointer in a "flexible" way.
9922 See mips_cfun_has_flexible_gp_ref_p for details. */
9925 mips_insn_has_flexible_gp_ref_p (rtx_insn
*insn
)
9927 return (get_attr_got (insn
) != GOT_UNSET
9928 || mips_small_data_pattern_p (PATTERN (insn
))
9929 || reg_overlap_mentioned_p (pic_offset_table_rtx
, PATTERN (insn
)));
9932 /* Return true if the current function references the global pointer,
9933 but if those references do not inherently require the global pointer
9934 to be $28. Assume !mips_cfun_has_inflexible_gp_ref_p (). */
9937 mips_cfun_has_flexible_gp_ref_p (void)
9939 /* Reload can sometimes introduce constant pool references
9940 into a function that otherwise didn't need them. For example,
9941 suppose we have an instruction like:
9943 (set (reg:DF R1) (float:DF (reg:SI R2)))
9945 If R2 turns out to be a constant such as 1, the instruction may
9946 have a REG_EQUAL note saying that R1 == 1.0. Reload then has
9947 the option of using this constant if R2 doesn't get allocated
9950 In cases like these, reload will have added the constant to the
9951 pool but no instruction will yet refer to it. */
9952 if (TARGET_ABICALLS_PIC2
&& !reload_completed
&& crtl
->uses_const_pool
)
9955 return mips_find_gp_ref (&cfun
->machine
->has_flexible_gp_insn_p
,
9956 mips_insn_has_flexible_gp_ref_p
);
9959 /* Return the register that should be used as the global pointer
9960 within this function. Return INVALID_REGNUM if the function
9961 doesn't need a global pointer. */
9964 mips_global_pointer (void)
9968 /* $gp is always available unless we're using a GOT. */
9969 if (!TARGET_USE_GOT
)
9970 return GLOBAL_POINTER_REGNUM
;
9972 /* If there are inflexible references to $gp, we must use the
9973 standard register. */
9974 if (mips_cfun_has_inflexible_gp_ref_p ())
9975 return GLOBAL_POINTER_REGNUM
;
9977 /* If there are no current references to $gp, then the only uses
9978 we can introduce later are those involved in long branches. */
9979 if (TARGET_ABSOLUTE_JUMPS
&& !mips_cfun_has_flexible_gp_ref_p ())
9980 return INVALID_REGNUM
;
9982 /* If the global pointer is call-saved, try to use a call-clobbered
9984 if (TARGET_CALL_SAVED_GP
&& crtl
->is_leaf
)
9985 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
9986 if (!df_regs_ever_live_p (regno
)
9987 && call_really_used_regs
[regno
]
9988 && !fixed_regs
[regno
]
9989 && regno
!= PIC_FUNCTION_ADDR_REGNUM
)
9992 return GLOBAL_POINTER_REGNUM
;
9995 /* Return true if the current function's prologue must load the global
9996 pointer value into pic_offset_table_rtx and store the same value in
9997 the function's cprestore slot (if any).
9999 One problem we have to deal with is that, when emitting GOT-based
10000 position independent code, long-branch sequences will need to load
10001 the address of the branch target from the GOT. We don't know until
10002 the very end of compilation whether (and where) the function needs
10003 long branches, so we must ensure that _any_ branch can access the
10004 global pointer in some form. However, we do not want to pessimize
10005 the usual case in which all branches are short.
10007 We handle this as follows:
10009 (1) During reload, we set cfun->machine->global_pointer to
10010 INVALID_REGNUM if we _know_ that the current function
10011 doesn't need a global pointer. This is only valid if
10012 long branches don't need the GOT.
10014 Otherwise, we assume that we might need a global pointer
10015 and pick an appropriate register.
10017 (2) If cfun->machine->global_pointer != INVALID_REGNUM,
10018 we ensure that the global pointer is available at every
10019 block boundary bar entry and exit. We do this in one of two ways:
10021 - If the function has a cprestore slot, we ensure that this
10022 slot is valid at every branch. However, as explained in
10023 point (6) below, there is no guarantee that pic_offset_table_rtx
10024 itself is valid if new uses of the global pointer are introduced
10025 after the first post-epilogue split.
10027 We guarantee that the cprestore slot is valid by loading it
10028 into a fake register, CPRESTORE_SLOT_REGNUM. We then make
10029 this register live at every block boundary bar function entry
10030 and exit. It is then invalid to move the load (and thus the
10031 preceding store) across a block boundary.
10033 - If the function has no cprestore slot, we guarantee that
10034 pic_offset_table_rtx itself is valid at every branch.
10036 See mips_eh_uses for the handling of the register liveness.
10038 (3) During prologue and epilogue generation, we emit "ghost"
10039 placeholder instructions to manipulate the global pointer.
10041 (4) During prologue generation, we set cfun->machine->must_initialize_gp_p
10042 and cfun->machine->must_restore_gp_when_clobbered_p if we already know
10043 that the function needs a global pointer. (There is no need to set
10044 them earlier than this, and doing it as late as possible leads to
10045 fewer false positives.)
10047 (5) If cfun->machine->must_initialize_gp_p is true during a
10048 split_insns pass, we split the ghost instructions into real
10049 instructions. These split instructions can then be optimized in
10050 the usual way. Otherwise, we keep the ghost instructions intact,
10051 and optimize for the case where they aren't needed. We still
10052 have the option of splitting them later, if we need to introduce
10053 new uses of the global pointer.
10055 For example, the scheduler ignores a ghost instruction that
10056 stores $28 to the stack, but it handles the split form of
10057 the ghost instruction as an ordinary store.
10059 (6) [OldABI only.] If cfun->machine->must_restore_gp_when_clobbered_p
10060 is true during the first post-epilogue split_insns pass, we split
10061 calls and restore_gp patterns into instructions that explicitly
10062 load pic_offset_table_rtx from the cprestore slot. Otherwise,
10063 we split these patterns into instructions that _don't_ load from
10064 the cprestore slot.
10066 If cfun->machine->must_restore_gp_when_clobbered_p is true at the
10067 time of the split, then any instructions that exist at that time
10068 can make free use of pic_offset_table_rtx. However, if we want
10069 to introduce new uses of the global pointer after the split,
10070 we must explicitly load the value from the cprestore slot, since
10071 pic_offset_table_rtx itself might not be valid at a given point
10074 The idea is that we want to be able to delete redundant
10075 loads from the cprestore slot in the usual case where no
10076 long branches are needed.
10078 (7) If cfun->machine->must_initialize_gp_p is still false at the end
10079 of md_reorg, we decide whether the global pointer is needed for
10080 long branches. If so, we set cfun->machine->must_initialize_gp_p
10081 to true and split the ghost instructions into real instructions
10084 Note that the ghost instructions must have a zero length for three reasons:
10086 - Giving the length of the underlying $gp sequence might cause
10087 us to use long branches in cases where they aren't really needed.
10089 - They would perturb things like alignment calculations.
10091 - More importantly, the hazard detection in md_reorg relies on
10092 empty instructions having a zero length.
10094 If we find a long branch and split the ghost instructions at the
10095 end of md_reorg, the split could introduce more long branches.
10096 That isn't a problem though, because we still do the split before
10097 the final shorten_branches pass.
10099 This is extremely ugly, but it seems like the best compromise between
10100 correctness and efficiency. */
10103 mips_must_initialize_gp_p (void)
10105 return cfun
->machine
->must_initialize_gp_p
;
10108 /* Return true if REGNO is a register that is ordinarily call-clobbered
10109 but must nevertheless be preserved by an interrupt handler. */
10112 mips_interrupt_extra_call_saved_reg_p (unsigned int regno
)
10114 if ((ISA_HAS_HILO
|| TARGET_DSP
)
10115 && MD_REG_P (regno
))
10118 if (TARGET_DSP
&& DSP_ACC_REG_P (regno
))
10121 if (GP_REG_P (regno
)
10122 && cfun
->machine
->use_shadow_register_set
== SHADOW_SET_NO
)
10124 /* $0 is hard-wired. */
10125 if (regno
== GP_REG_FIRST
)
10128 /* The interrupt handler can treat kernel registers as
10129 scratch registers. */
10130 if (KERNEL_REG_P (regno
))
10133 /* The function will return the stack pointer to its original value
10135 if (regno
== STACK_POINTER_REGNUM
)
10138 /* Otherwise, return true for registers that aren't ordinarily
10140 return call_really_used_regs
[regno
];
10146 /* Return true if the current function should treat register REGNO
10150 mips_cfun_call_saved_reg_p (unsigned int regno
)
10152 /* If the user makes an ordinarily-call-saved register global,
10153 that register is no longer call-saved. */
10154 if (global_regs
[regno
])
10157 /* Interrupt handlers need to save extra registers. */
10158 if (cfun
->machine
->interrupt_handler_p
10159 && mips_interrupt_extra_call_saved_reg_p (regno
))
10162 /* call_insns preserve $28 unless they explicitly say otherwise,
10163 so call_really_used_regs[] treats $28 as call-saved. However,
10164 we want the ABI property rather than the default call_insn
10166 return (regno
== GLOBAL_POINTER_REGNUM
10167 ? TARGET_CALL_SAVED_GP
10168 : !call_really_used_regs
[regno
]);
10171 /* Return true if the function body might clobber register REGNO.
10172 We know that REGNO is call-saved. */
10175 mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno
)
10177 /* Some functions should be treated as clobbering all call-saved
10179 if (crtl
->saves_all_registers
)
10182 /* DF handles cases where a register is explicitly referenced in
10183 the rtl. Incoming values are passed in call-clobbered registers,
10184 so we can assume that any live call-saved register is set within
10186 if (df_regs_ever_live_p (regno
))
10189 /* Check for registers that are clobbered by FUNCTION_PROFILER.
10190 These clobbers are not explicit in the rtl. */
10191 if (crtl
->profile
&& MIPS_SAVE_REG_FOR_PROFILING_P (regno
))
10194 /* If we're using a call-saved global pointer, the function's
10195 prologue will need to set it up. */
10196 if (cfun
->machine
->global_pointer
== regno
)
10199 /* The function's prologue will need to set the frame pointer if
10200 frame_pointer_needed. */
10201 if (regno
== HARD_FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
10204 /* If a MIPS16 function returns a value in FPRs, its epilogue
10205 will need to call an external libgcc routine. This yet-to-be
10206 generated call_insn will clobber $31. */
10207 if (regno
== RETURN_ADDR_REGNUM
&& mips16_cfun_returns_in_fpr_p ())
10210 /* If REGNO is ordinarily call-clobbered, we must assume that any
10211 called function could modify it. */
10212 if (cfun
->machine
->interrupt_handler_p
10214 && mips_interrupt_extra_call_saved_reg_p (regno
))
10220 /* Return true if the current function must save register REGNO. */
10223 mips_save_reg_p (unsigned int regno
)
10225 if (mips_cfun_call_saved_reg_p (regno
))
10227 if (mips_cfun_might_clobber_call_saved_reg_p (regno
))
10230 /* Save both registers in an FPR pair if either one is used. This is
10231 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
10232 register to be used without the even register. */
10233 if (FP_REG_P (regno
)
10234 && MAX_FPRS_PER_FMT
== 2
10235 && mips_cfun_might_clobber_call_saved_reg_p (regno
+ 1))
10239 /* We need to save the incoming return address if __builtin_eh_return
10240 is being used to set a different return address. */
10241 if (regno
== RETURN_ADDR_REGNUM
&& crtl
->calls_eh_return
)
10247 /* Populate the current function's mips_frame_info structure.
10249 MIPS stack frames look like:
10251 +-------------------------------+
10253 | incoming stack arguments |
10255 +-------------------------------+
10257 | caller-allocated save area |
10258 A | for register arguments |
10260 +-------------------------------+ <-- incoming stack pointer
10262 | callee-allocated save area |
10263 B | for arguments that are |
10264 | split between registers and |
10267 +-------------------------------+ <-- arg_pointer_rtx
10269 C | callee-allocated save area |
10270 | for register varargs |
10272 +-------------------------------+ <-- frame_pointer_rtx
10273 | | + cop0_sp_offset
10274 | COP0 reg save area | + UNITS_PER_WORD
10276 +-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
10277 | | + UNITS_PER_WORD
10278 | accumulator save area |
10280 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
10281 | | + UNITS_PER_HWFPVALUE
10284 +-------------------------------+ <-- stack_pointer_rtx + gp_sp_offset
10285 | | + UNITS_PER_WORD
10288 +-------------------------------+ <-- frame_pointer_rtx with
10289 | | \ -fstack-protector
10290 | local variables | | var_size
10292 +-------------------------------+
10294 | $gp save area | | cprestore_size
10296 P +-------------------------------+ <-- hard_frame_pointer_rtx for
10298 | outgoing stack arguments | |
10300 +-------------------------------+ | args_size
10302 | caller-allocated save area | |
10303 | for register arguments | |
10305 +-------------------------------+ <-- stack_pointer_rtx
10306 frame_pointer_rtx without
10308 hard_frame_pointer_rtx for
10311 At least two of A, B and C will be empty.
10313 Dynamic stack allocations such as alloca insert data at point P.
10314 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
10315 hard_frame_pointer_rtx unchanged. */
10318 mips_compute_frame_info (void)
10320 struct mips_frame_info
*frame
;
10321 HOST_WIDE_INT offset
, size
;
10322 unsigned int regno
, i
;
10324 /* Set this function's interrupt properties. */
10325 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl
)))
10327 if (mips_isa_rev
< 2)
10328 error ("the %<interrupt%> attribute requires a MIPS32r2 processor or greater");
10329 else if (TARGET_MIPS16
)
10330 error ("interrupt handlers cannot be MIPS16 functions");
10333 cfun
->machine
->interrupt_handler_p
= true;
10334 cfun
->machine
->int_mask
=
10335 mips_interrupt_mask (TREE_TYPE (current_function_decl
));
10336 cfun
->machine
->use_shadow_register_set
=
10337 mips_use_shadow_register_set (TREE_TYPE (current_function_decl
));
10338 cfun
->machine
->keep_interrupts_masked_p
=
10339 mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl
));
10340 cfun
->machine
->use_debug_exception_return_p
=
10341 mips_use_debug_exception_return_p (TREE_TYPE
10342 (current_function_decl
));
10346 frame
= &cfun
->machine
->frame
;
10347 memset (frame
, 0, sizeof (*frame
));
10348 size
= get_frame_size ();
10350 cfun
->machine
->global_pointer
= mips_global_pointer ();
10352 /* The first two blocks contain the outgoing argument area and the $gp save
10353 slot. This area isn't needed in leaf functions. We can also skip it
10354 if we know that none of the called functions will use this space.
10356 But if the target-independent frame size is nonzero, we have already
10357 committed to allocating these in STARTING_FRAME_OFFSET for
10358 !FRAME_GROWS_DOWNWARD. */
10360 if ((size
== 0 || FRAME_GROWS_DOWNWARD
)
10361 && (crtl
->is_leaf
|| (cfun
->machine
->optimize_call_stack
&& !flag_pic
)))
10363 /* The MIPS 3.0 linker does not like functions that dynamically
10364 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
10365 looks like we are trying to create a second frame pointer to the
10366 function, so allocate some stack space to make it happy. */
10367 if (cfun
->calls_alloca
)
10368 frame
->args_size
= REG_PARM_STACK_SPACE (cfun
->decl
);
10370 frame
->args_size
= 0;
10371 frame
->cprestore_size
= 0;
10375 frame
->args_size
= crtl
->outgoing_args_size
;
10376 frame
->cprestore_size
= MIPS_GP_SAVE_AREA_SIZE
;
10378 offset
= frame
->args_size
+ frame
->cprestore_size
;
10380 /* Move above the local variables. */
10381 frame
->var_size
= MIPS_STACK_ALIGN (size
);
10382 offset
+= frame
->var_size
;
10384 /* Find out which GPRs we need to save. */
10385 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
10386 if (mips_save_reg_p (regno
))
10389 frame
->mask
|= 1 << (regno
- GP_REG_FIRST
);
10392 /* If this function calls eh_return, we must also save and restore the
10393 EH data registers. */
10394 if (crtl
->calls_eh_return
)
10395 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; i
++)
10398 frame
->mask
|= 1 << (EH_RETURN_DATA_REGNO (i
) - GP_REG_FIRST
);
10401 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
10402 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
10403 save all later registers too. */
10404 if (GENERATE_MIPS16E_SAVE_RESTORE
)
10406 mips16e_mask_registers (&frame
->mask
, mips16e_s2_s8_regs
,
10407 ARRAY_SIZE (mips16e_s2_s8_regs
), &frame
->num_gp
);
10408 mips16e_mask_registers (&frame
->mask
, mips16e_a0_a3_regs
,
10409 ARRAY_SIZE (mips16e_a0_a3_regs
), &frame
->num_gp
);
10412 /* Move above the GPR save area. */
10413 if (frame
->num_gp
> 0)
10415 offset
+= MIPS_STACK_ALIGN (frame
->num_gp
* UNITS_PER_WORD
);
10416 frame
->gp_sp_offset
= offset
- UNITS_PER_WORD
;
10419 /* Find out which FPRs we need to save. This loop must iterate over
10420 the same space as its companion in mips_for_each_saved_gpr_and_fpr. */
10421 if (TARGET_HARD_FLOAT
)
10422 for (regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
+= MAX_FPRS_PER_FMT
)
10423 if (mips_save_reg_p (regno
))
10425 frame
->num_fp
+= MAX_FPRS_PER_FMT
;
10426 frame
->fmask
|= ~(~0U << MAX_FPRS_PER_FMT
) << (regno
- FP_REG_FIRST
);
10429 /* Move above the FPR save area. */
10430 if (frame
->num_fp
> 0)
10432 offset
+= MIPS_STACK_ALIGN (frame
->num_fp
* UNITS_PER_FPREG
);
10433 frame
->fp_sp_offset
= offset
- UNITS_PER_HWFPVALUE
;
10436 /* Add in space for the interrupt context information. */
10437 if (cfun
->machine
->interrupt_handler_p
)
10440 if (mips_save_reg_p (LO_REGNUM
) || mips_save_reg_p (HI_REGNUM
))
10443 frame
->acc_mask
|= (1 << 0);
10446 /* Check accumulators 1, 2, 3. */
10447 for (i
= DSP_ACC_REG_FIRST
; i
<= DSP_ACC_REG_LAST
; i
+= 2)
10448 if (mips_save_reg_p (i
) || mips_save_reg_p (i
+ 1))
10451 frame
->acc_mask
|= 1 << (((i
- DSP_ACC_REG_FIRST
) / 2) + 1);
10454 /* All interrupt context functions need space to preserve STATUS. */
10455 frame
->num_cop0_regs
++;
10457 /* We need to save EPC regardless of whether interrupts remain masked
10458 as exceptions will corrupt EPC. */
10459 frame
->num_cop0_regs
++;
10462 /* Move above the accumulator save area. */
10463 if (frame
->num_acc
> 0)
10465 /* Each accumulator needs 2 words. */
10466 offset
+= frame
->num_acc
* 2 * UNITS_PER_WORD
;
10467 frame
->acc_sp_offset
= offset
- UNITS_PER_WORD
;
10470 /* Move above the COP0 register save area. */
10471 if (frame
->num_cop0_regs
> 0)
10473 offset
+= frame
->num_cop0_regs
* UNITS_PER_WORD
;
10474 frame
->cop0_sp_offset
= offset
- UNITS_PER_WORD
;
10477 /* Determine if we can save the callee-saved registers in the frame
10478 header. Restrict this to functions where there is no other reason
10479 to allocate stack space so that we can eliminate the instructions
10480 that modify the stack pointer. */
10484 && flag_frame_header_optimization
10485 && !MAIN_NAME_P (DECL_NAME (current_function_decl
))
10486 && cfun
->machine
->varargs_size
== 0
10487 && crtl
->args
.pretend_args_size
== 0
10488 && frame
->var_size
== 0
10489 && frame
->num_acc
== 0
10490 && frame
->num_cop0_regs
== 0
10491 && frame
->num_fp
== 0
10492 && frame
->num_gp
> 0
10493 && frame
->num_gp
<= MAX_ARGS_IN_REGISTERS
10494 && !GENERATE_MIPS16E_SAVE_RESTORE
10495 && !cfun
->machine
->interrupt_handler_p
10496 && cfun
->machine
->does_not_use_frame_header
10497 && cfun
->machine
->optimize_call_stack
10498 && !cfun
->machine
->callers_may_not_allocate_frame
10499 && !mips_cfun_has_cprestore_slot_p ())
10502 frame
->gp_sp_offset
= REG_PARM_STACK_SPACE(cfun
) - UNITS_PER_WORD
;
10503 cfun
->machine
->use_frame_header_for_callee_saved_regs
= true;
10506 /* Move above the callee-allocated varargs save area. */
10507 offset
+= MIPS_STACK_ALIGN (cfun
->machine
->varargs_size
);
10508 frame
->arg_pointer_offset
= offset
;
10510 /* Move above the callee-allocated area for pretend stack arguments. */
10511 offset
+= crtl
->args
.pretend_args_size
;
10512 frame
->total_size
= offset
;
10514 /* Work out the offsets of the save areas from the top of the frame. */
10515 if (frame
->gp_sp_offset
> 0)
10516 frame
->gp_save_offset
= frame
->gp_sp_offset
- offset
;
10517 if (frame
->fp_sp_offset
> 0)
10518 frame
->fp_save_offset
= frame
->fp_sp_offset
- offset
;
10519 if (frame
->acc_sp_offset
> 0)
10520 frame
->acc_save_offset
= frame
->acc_sp_offset
- offset
;
10521 if (frame
->num_cop0_regs
> 0)
10522 frame
->cop0_save_offset
= frame
->cop0_sp_offset
- offset
;
10524 /* MIPS16 code offsets the frame pointer by the size of the outgoing
10525 arguments. This tends to increase the chances of using unextended
10526 instructions for local variables and incoming arguments. */
10528 frame
->hard_frame_pointer_offset
= frame
->args_size
;
10531 /* Return the style of GP load sequence that is being used for the
10532 current function. */
10534 enum mips_loadgp_style
10535 mips_current_loadgp_style (void)
10537 if (!TARGET_USE_GOT
|| cfun
->machine
->global_pointer
== INVALID_REGNUM
)
10538 return LOADGP_NONE
;
10540 if (TARGET_RTP_PIC
)
10543 if (TARGET_ABSOLUTE_ABICALLS
)
10544 return LOADGP_ABSOLUTE
;
10546 return TARGET_NEWABI
? LOADGP_NEWABI
: LOADGP_OLDABI
;
10549 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
10552 mips_frame_pointer_required (void)
10554 /* If the function contains dynamic stack allocations, we need to
10555 use the frame pointer to access the static parts of the frame. */
10556 if (cfun
->calls_alloca
)
10559 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
10560 reload may be unable to compute the address of a local variable,
10561 since there is no way to add a large constant to the stack pointer
10562 without using a second temporary register. */
10565 mips_compute_frame_info ();
10566 if (!SMALL_OPERAND (cfun
->machine
->frame
.total_size
))
10573 /* Make sure that we're not trying to eliminate to the wrong hard frame
10577 mips_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
10579 return (to
== HARD_FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
10582 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
10583 or argument pointer. TO is either the stack pointer or hard frame
10587 mips_initial_elimination_offset (int from
, int to
)
10589 HOST_WIDE_INT offset
;
10591 mips_compute_frame_info ();
10593 /* Set OFFSET to the offset from the end-of-prologue stack pointer. */
10596 case FRAME_POINTER_REGNUM
:
10597 if (FRAME_GROWS_DOWNWARD
)
10598 offset
= (cfun
->machine
->frame
.args_size
10599 + cfun
->machine
->frame
.cprestore_size
10600 + cfun
->machine
->frame
.var_size
);
10605 case ARG_POINTER_REGNUM
:
10606 offset
= cfun
->machine
->frame
.arg_pointer_offset
;
10610 gcc_unreachable ();
10613 if (to
== HARD_FRAME_POINTER_REGNUM
)
10614 offset
-= cfun
->machine
->frame
.hard_frame_pointer_offset
;
10619 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
10622 mips_extra_live_on_entry (bitmap regs
)
10624 if (TARGET_USE_GOT
)
10626 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
10627 the global pointer. */
10628 if (!TARGET_ABSOLUTE_ABICALLS
)
10629 bitmap_set_bit (regs
, PIC_FUNCTION_ADDR_REGNUM
);
10631 /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
10632 the global pointer. */
10634 bitmap_set_bit (regs
, MIPS16_PIC_TEMP_REGNUM
);
10636 /* See the comment above load_call<mode> for details. */
10637 bitmap_set_bit (regs
, GOT_VERSION_REGNUM
);
10641 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
10645 mips_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
10650 return get_hard_reg_initial_val (Pmode
, RETURN_ADDR_REGNUM
);
10653 /* Emit code to change the current function's return address to
10654 ADDRESS. SCRATCH is available as a scratch register, if needed.
10655 ADDRESS and SCRATCH are both word-mode GPRs. */
10658 mips_set_return_address (rtx address
, rtx scratch
)
10662 gcc_assert (BITSET_P (cfun
->machine
->frame
.mask
, RETURN_ADDR_REGNUM
));
10663 slot_address
= mips_add_offset (scratch
, stack_pointer_rtx
,
10664 cfun
->machine
->frame
.gp_sp_offset
);
10665 mips_emit_move (gen_frame_mem (GET_MODE (address
), slot_address
), address
);
10668 /* Return true if the current function has a cprestore slot. */
10671 mips_cfun_has_cprestore_slot_p (void)
10673 return (cfun
->machine
->global_pointer
!= INVALID_REGNUM
10674 && cfun
->machine
->frame
.cprestore_size
> 0);
10677 /* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the
10678 cprestore slot. LOAD_P is true if the caller wants to load from
10679 the cprestore slot; it is false if the caller wants to store to
10683 mips_get_cprestore_base_and_offset (rtx
*base
, HOST_WIDE_INT
*offset
,
10686 const struct mips_frame_info
*frame
;
10688 frame
= &cfun
->machine
->frame
;
10689 /* .cprestore always uses the stack pointer instead of the frame pointer.
10690 We have a free choice for direct stores for non-MIPS16 functions,
10691 and for MIPS16 functions whose cprestore slot is in range of the
10692 stack pointer. Using the stack pointer would sometimes give more
10693 (early) scheduling freedom, but using the frame pointer would
10694 sometimes give more (late) scheduling freedom. It's hard to
10695 predict which applies to a given function, so let's keep things
10698 Loads must always use the frame pointer in functions that call
10699 alloca, and there's little benefit to using the stack pointer
10701 if (frame_pointer_needed
&& !(TARGET_CPRESTORE_DIRECTIVE
&& !load_p
))
10703 *base
= hard_frame_pointer_rtx
;
10704 *offset
= frame
->args_size
- frame
->hard_frame_pointer_offset
;
10708 *base
= stack_pointer_rtx
;
10709 *offset
= frame
->args_size
;
10713 /* Return true if X is the load or store address of the cprestore slot;
10714 LOAD_P says which. */
10717 mips_cprestore_address_p (rtx x
, bool load_p
)
10719 rtx given_base
, required_base
;
10720 HOST_WIDE_INT given_offset
, required_offset
;
10722 mips_split_plus (x
, &given_base
, &given_offset
);
10723 mips_get_cprestore_base_and_offset (&required_base
, &required_offset
, load_p
);
10724 return given_base
== required_base
&& given_offset
== required_offset
;
10727 /* Return a MEM rtx for the cprestore slot. LOAD_P is true if we are
10728 going to load from it, false if we are going to store to it.
10729 Use TEMP as a temporary register if need be. */
10732 mips_cprestore_slot (rtx temp
, bool load_p
)
10735 HOST_WIDE_INT offset
;
10737 mips_get_cprestore_base_and_offset (&base
, &offset
, load_p
);
10738 return gen_frame_mem (Pmode
, mips_add_offset (temp
, base
, offset
));
10741 /* Emit instructions to save global pointer value GP into cprestore
10742 slot MEM. OFFSET is the offset that MEM applies to the base register.
10744 MEM may not be a legitimate address. If it isn't, TEMP is a
10745 temporary register that can be used, otherwise it is a SCRATCH. */
10748 mips_save_gp_to_cprestore_slot (rtx mem
, rtx offset
, rtx gp
, rtx temp
)
10750 if (TARGET_CPRESTORE_DIRECTIVE
)
10752 gcc_assert (gp
== pic_offset_table_rtx
);
10753 emit_insn (PMODE_INSN (gen_cprestore
, (mem
, offset
)));
10756 mips_emit_move (mips_cprestore_slot (temp
, false), gp
);
10759 /* Restore $gp from its save slot, using TEMP as a temporary base register
10760 if need be. This function is for o32 and o64 abicalls only.
10762 See mips_must_initialize_gp_p for details about how we manage the
10766 mips_restore_gp_from_cprestore_slot (rtx temp
)
10768 gcc_assert (TARGET_ABICALLS
&& TARGET_OLDABI
&& epilogue_completed
);
10770 if (!cfun
->machine
->must_restore_gp_when_clobbered_p
)
10772 emit_note (NOTE_INSN_DELETED
);
10778 mips_emit_move (temp
, mips_cprestore_slot (temp
, true));
10779 mips_emit_move (pic_offset_table_rtx
, temp
);
10782 mips_emit_move (pic_offset_table_rtx
, mips_cprestore_slot (temp
, true));
10783 if (!TARGET_EXPLICIT_RELOCS
)
10784 emit_insn (gen_blockage ());
10787 /* A function to save or store a register. The first argument is the
10788 register and the second is the stack slot. */
10789 typedef void (*mips_save_restore_fn
) (rtx
, rtx
);
10791 /* Use FN to save or restore register REGNO. MODE is the register's
10792 mode and OFFSET is the offset of its save slot from the current
10796 mips_save_restore_reg (machine_mode mode
, int regno
,
10797 HOST_WIDE_INT offset
, mips_save_restore_fn fn
)
10801 mem
= gen_frame_mem (mode
, plus_constant (Pmode
, stack_pointer_rtx
,
10803 fn (gen_rtx_REG (mode
, regno
), mem
);
10806 /* Call FN for each accumlator that is saved by the current function.
10807 SP_OFFSET is the offset of the current stack pointer from the start
10811 mips_for_each_saved_acc (HOST_WIDE_INT sp_offset
, mips_save_restore_fn fn
)
10813 HOST_WIDE_INT offset
;
10816 offset
= cfun
->machine
->frame
.acc_sp_offset
- sp_offset
;
10817 if (BITSET_P (cfun
->machine
->frame
.acc_mask
, 0))
10819 mips_save_restore_reg (word_mode
, LO_REGNUM
, offset
, fn
);
10820 offset
-= UNITS_PER_WORD
;
10821 mips_save_restore_reg (word_mode
, HI_REGNUM
, offset
, fn
);
10822 offset
-= UNITS_PER_WORD
;
10825 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
++)
10826 if (BITSET_P (cfun
->machine
->frame
.acc_mask
,
10827 ((regno
- DSP_ACC_REG_FIRST
) / 2) + 1))
10829 mips_save_restore_reg (word_mode
, regno
, offset
, fn
);
10830 offset
-= UNITS_PER_WORD
;
10834 /* Save register REG to MEM. Make the instruction frame-related. */
10837 mips_save_reg (rtx reg
, rtx mem
)
10839 if (GET_MODE (reg
) == DFmode
10840 && (!TARGET_FLOAT64
10841 || mips_abi
== ABI_32
))
10845 mips_emit_move_or_split (mem
, reg
, SPLIT_IF_NECESSARY
);
10847 x1
= mips_frame_set (mips_subword (mem
, false),
10848 mips_subword (reg
, false));
10849 x2
= mips_frame_set (mips_subword (mem
, true),
10850 mips_subword (reg
, true));
10851 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, x1
, x2
)));
10854 mips_emit_save_slot_move (mem
, reg
, MIPS_PROLOGUE_TEMP (GET_MODE (reg
)));
10857 /* Capture the register combinations that are allowed in a SWM or LWM
10858 instruction. The entries are ordered by number of registers set in
10859 the mask. We also ignore the single register encodings because a
10860 normal SW/LW is preferred. */
10862 static const unsigned int umips_swm_mask
[17] = {
10863 0xc0ff0000, 0x80ff0000, 0x40ff0000, 0x807f0000,
10864 0x00ff0000, 0x803f0000, 0x007f0000, 0x801f0000,
10865 0x003f0000, 0x800f0000, 0x001f0000, 0x80070000,
10866 0x000f0000, 0x80030000, 0x00070000, 0x80010000,
10870 static const unsigned int umips_swm_encoding
[17] = {
10871 25, 24, 9, 23, 8, 22, 7, 21, 6, 20, 5, 19, 4, 18, 3, 17, 2
10874 /* Try to use a microMIPS LWM or SWM instruction to save or restore
10875 as many GPRs in *MASK as possible. *OFFSET is the offset from the
10876 stack pointer of the topmost save slot.
10878 Remove from *MASK all registers that were handled using LWM and SWM.
10879 Update *OFFSET so that it points to the first unused save slot. */
10882 umips_build_save_restore (mips_save_restore_fn fn
,
10883 unsigned *mask
, HOST_WIDE_INT
*offset
)
10887 rtx pattern
, set
, reg
, mem
;
10888 HOST_WIDE_INT this_offset
;
10891 /* Try matching $16 to $31 (s0 to ra). */
10892 for (i
= 0; i
< ARRAY_SIZE (umips_swm_mask
); i
++)
10893 if ((*mask
& 0xffff0000) == umips_swm_mask
[i
])
10896 if (i
== ARRAY_SIZE (umips_swm_mask
))
10899 /* Get the offset of the lowest save slot. */
10900 nregs
= (umips_swm_encoding
[i
] & 0xf) + (umips_swm_encoding
[i
] >> 4);
10901 this_offset
= *offset
- UNITS_PER_WORD
* (nregs
- 1);
10903 /* LWM/SWM can only support offsets from -2048 to 2047. */
10904 if (!UMIPS_12BIT_OFFSET_P (this_offset
))
10907 /* Create the final PARALLEL. */
10908 pattern
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nregs
));
10909 this_base
= stack_pointer_rtx
;
10911 /* For registers $16-$23 and $30. */
10912 for (j
= 0; j
< (umips_swm_encoding
[i
] & 0xf); j
++)
10914 HOST_WIDE_INT offset
= this_offset
+ j
* UNITS_PER_WORD
;
10915 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
, this_base
, offset
));
10916 unsigned int regno
= (j
!= 8) ? 16 + j
: 30;
10917 *mask
&= ~(1 << regno
);
10918 reg
= gen_rtx_REG (SImode
, regno
);
10919 if (fn
== mips_save_reg
)
10920 set
= mips_frame_set (mem
, reg
);
10923 set
= gen_rtx_SET (reg
, mem
);
10924 mips_add_cfa_restore (reg
);
10926 XVECEXP (pattern
, 0, j
) = set
;
10929 /* For register $31. */
10930 if (umips_swm_encoding
[i
] >> 4)
10932 HOST_WIDE_INT offset
= this_offset
+ j
* UNITS_PER_WORD
;
10933 *mask
&= ~(1 << 31);
10934 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
, this_base
, offset
));
10935 reg
= gen_rtx_REG (SImode
, 31);
10936 if (fn
== mips_save_reg
)
10937 set
= mips_frame_set (mem
, reg
);
10940 set
= gen_rtx_SET (reg
, mem
);
10941 mips_add_cfa_restore (reg
);
10943 XVECEXP (pattern
, 0, j
) = set
;
10946 pattern
= emit_insn (pattern
);
10947 if (fn
== mips_save_reg
)
10948 RTX_FRAME_RELATED_P (pattern
) = 1;
10950 /* Adjust the last offset. */
10951 *offset
-= UNITS_PER_WORD
* nregs
;
10956 /* Call FN for each register that is saved by the current function.
10957 SP_OFFSET is the offset of the current stack pointer from the start
10961 mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset
,
10962 mips_save_restore_fn fn
)
10964 machine_mode fpr_mode
;
10966 const struct mips_frame_info
*frame
= &cfun
->machine
->frame
;
10967 HOST_WIDE_INT offset
;
10970 /* Save registers starting from high to low. The debuggers prefer at least
10971 the return register be stored at func+4, and also it allows us not to
10972 need a nop in the epilogue if at least one register is reloaded in
10973 addition to return address. */
10974 offset
= frame
->gp_sp_offset
- sp_offset
;
10975 mask
= frame
->mask
;
10977 if (TARGET_MICROMIPS
)
10978 umips_build_save_restore (fn
, &mask
, &offset
);
10980 for (regno
= GP_REG_LAST
; regno
>= GP_REG_FIRST
; regno
--)
10981 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
10983 /* Record the ra offset for use by mips_function_profiler. */
10984 if (regno
== RETURN_ADDR_REGNUM
)
10985 cfun
->machine
->frame
.ra_fp_offset
= offset
+ sp_offset
;
10986 mips_save_restore_reg (word_mode
, regno
, offset
, fn
);
10987 offset
-= UNITS_PER_WORD
;
10990 /* This loop must iterate over the same space as its companion in
10991 mips_compute_frame_info. */
10992 offset
= cfun
->machine
->frame
.fp_sp_offset
- sp_offset
;
10993 fpr_mode
= (TARGET_SINGLE_FLOAT
? SFmode
: DFmode
);
10994 for (regno
= FP_REG_LAST
- MAX_FPRS_PER_FMT
+ 1;
10995 regno
>= FP_REG_FIRST
;
10996 regno
-= MAX_FPRS_PER_FMT
)
10997 if (BITSET_P (cfun
->machine
->frame
.fmask
, regno
- FP_REG_FIRST
))
10999 if (!TARGET_FLOAT64
&& TARGET_DOUBLE_FLOAT
11000 && (fixed_regs
[regno
] || fixed_regs
[regno
+ 1]))
11002 if (fixed_regs
[regno
])
11003 mips_save_restore_reg (SFmode
, regno
+ 1, offset
, fn
);
11005 mips_save_restore_reg (SFmode
, regno
, offset
, fn
);
11008 mips_save_restore_reg (fpr_mode
, regno
, offset
, fn
);
11009 offset
-= GET_MODE_SIZE (fpr_mode
);
11013 /* Return true if a move between register REGNO and its save slot (MEM)
11014 can be done in a single move. LOAD_P is true if we are loading
11015 from the slot, false if we are storing to it. */
11018 mips_direct_save_slot_move_p (unsigned int regno
, rtx mem
, bool load_p
)
11020 /* There is a specific MIPS16 instruction for saving $31 to the stack. */
11021 if (TARGET_MIPS16
&& !load_p
&& regno
== RETURN_ADDR_REGNUM
)
11024 return mips_secondary_reload_class (REGNO_REG_CLASS (regno
),
11025 GET_MODE (mem
), mem
, load_p
) == NO_REGS
;
11028 /* Emit a move from SRC to DEST, given that one of them is a register
11029 save slot and that the other is a register. TEMP is a temporary
11030 GPR of the same mode that is available if need be. */
11033 mips_emit_save_slot_move (rtx dest
, rtx src
, rtx temp
)
11035 unsigned int regno
;
11040 regno
= REGNO (src
);
11045 regno
= REGNO (dest
);
11049 if (regno
== cfun
->machine
->global_pointer
&& !mips_must_initialize_gp_p ())
11051 /* We don't yet know whether we'll need this instruction or not.
11052 Postpone the decision by emitting a ghost move. This move
11053 is specifically not frame-related; only the split version is. */
11055 emit_insn (gen_move_gpdi (dest
, src
));
11057 emit_insn (gen_move_gpsi (dest
, src
));
11061 if (regno
== HI_REGNUM
)
11065 mips_emit_move (temp
, src
);
11067 emit_insn (gen_mthidi_ti (gen_rtx_REG (TImode
, MD_REG_FIRST
),
11068 temp
, gen_rtx_REG (DImode
, LO_REGNUM
)));
11070 emit_insn (gen_mthisi_di (gen_rtx_REG (DImode
, MD_REG_FIRST
),
11071 temp
, gen_rtx_REG (SImode
, LO_REGNUM
)));
11076 emit_insn (gen_mfhidi_ti (temp
,
11077 gen_rtx_REG (TImode
, MD_REG_FIRST
)));
11079 emit_insn (gen_mfhisi_di (temp
,
11080 gen_rtx_REG (DImode
, MD_REG_FIRST
)));
11081 mips_emit_move (dest
, temp
);
11084 else if (mips_direct_save_slot_move_p (regno
, mem
, mem
== src
))
11085 mips_emit_move (dest
, src
);
11088 gcc_assert (!reg_overlap_mentioned_p (dest
, temp
));
11089 mips_emit_move (temp
, src
);
11090 mips_emit_move (dest
, temp
);
11093 mips_set_frame_expr (mips_frame_set (dest
, src
));
11096 /* If we're generating n32 or n64 abicalls, and the current function
11097 does not use $28 as its global pointer, emit a cplocal directive.
11098 Use pic_offset_table_rtx as the argument to the directive. */
11101 mips_output_cplocal (void)
11103 if (!TARGET_EXPLICIT_RELOCS
11104 && mips_must_initialize_gp_p ()
11105 && cfun
->machine
->global_pointer
!= GLOBAL_POINTER_REGNUM
)
11106 output_asm_insn (".cplocal %+", 0);
11109 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
11112 mips_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
11114 const char *fnname
;
11116 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
11117 floating-point arguments. */
11119 && TARGET_HARD_FLOAT_ABI
11120 && crtl
->args
.info
.fp_code
!= 0)
11121 mips16_build_function_stub ();
11123 /* Get the function name the same way that toplev.c does before calling
11124 assemble_start_function. This is needed so that the name used here
11125 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
11126 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
11127 mips_start_function_definition (fnname
, TARGET_MIPS16
);
11129 /* Output MIPS-specific frame information. */
11130 if (!flag_inhibit_size_directive
)
11132 const struct mips_frame_info
*frame
;
11134 frame
= &cfun
->machine
->frame
;
11136 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
11138 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC
",%s\t\t"
11139 "# vars= " HOST_WIDE_INT_PRINT_DEC
11141 ", args= " HOST_WIDE_INT_PRINT_DEC
11142 ", gp= " HOST_WIDE_INT_PRINT_DEC
"\n",
11143 reg_names
[frame_pointer_needed
11144 ? HARD_FRAME_POINTER_REGNUM
11145 : STACK_POINTER_REGNUM
],
11146 (frame_pointer_needed
11147 ? frame
->total_size
- frame
->hard_frame_pointer_offset
11148 : frame
->total_size
),
11149 reg_names
[RETURN_ADDR_REGNUM
],
11151 frame
->num_gp
, frame
->num_fp
,
11153 frame
->cprestore_size
);
11155 /* .mask MASK, OFFSET. */
11156 fprintf (file
, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
11157 frame
->mask
, frame
->gp_save_offset
);
11159 /* .fmask MASK, OFFSET. */
11160 fprintf (file
, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
11161 frame
->fmask
, frame
->fp_save_offset
);
11164 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
11165 Also emit the ".set noreorder; .set nomacro" sequence for functions
11167 if (mips_must_initialize_gp_p ()
11168 && mips_current_loadgp_style () == LOADGP_OLDABI
)
11172 /* This is a fixed-form sequence. The position of the
11173 first two instructions is important because of the
11174 way _gp_disp is defined. */
11175 output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
11176 output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
11177 output_asm_insn ("sll\t$2,16", 0);
11178 output_asm_insn ("addu\t$2,$3", 0);
11182 /* .cpload must be in a .set noreorder but not a
11183 .set nomacro block. */
11184 mips_push_asm_switch (&mips_noreorder
);
11185 output_asm_insn (".cpload\t%^", 0);
11186 if (!cfun
->machine
->all_noreorder_p
)
11187 mips_pop_asm_switch (&mips_noreorder
);
11189 mips_push_asm_switch (&mips_nomacro
);
11192 else if (cfun
->machine
->all_noreorder_p
)
11194 mips_push_asm_switch (&mips_noreorder
);
11195 mips_push_asm_switch (&mips_nomacro
);
11198 /* Tell the assembler which register we're using as the global
11199 pointer. This is needed for thunks, since they can use either
11200 explicit relocs or assembler macros. */
11201 mips_output_cplocal ();
11204 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
11207 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
11208 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
11210 const char *fnname
;
11212 /* Reinstate the normal $gp. */
11213 SET_REGNO (pic_offset_table_rtx
, GLOBAL_POINTER_REGNUM
);
11214 mips_output_cplocal ();
11216 if (cfun
->machine
->all_noreorder_p
)
11218 mips_pop_asm_switch (&mips_nomacro
);
11219 mips_pop_asm_switch (&mips_noreorder
);
11222 /* Get the function name the same way that toplev.c does before calling
11223 assemble_start_function. This is needed so that the name used here
11224 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
11225 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
11226 mips_end_function_definition (fnname
);
11229 /* Emit an optimisation barrier for accesses to the current frame. */
11232 mips_frame_barrier (void)
11234 emit_clobber (gen_frame_mem (BLKmode
, stack_pointer_rtx
));
11238 /* The __gnu_local_gp symbol. */
11240 static GTY(()) rtx mips_gnu_local_gp
;
11242 /* If we're generating n32 or n64 abicalls, emit instructions
11243 to set up the global pointer. */
11246 mips_emit_loadgp (void)
11248 rtx addr
, offset
, incoming_address
, base
, index
, pic_reg
;
11250 pic_reg
= TARGET_MIPS16
? MIPS16_PIC_TEMP
: pic_offset_table_rtx
;
11251 switch (mips_current_loadgp_style ())
11253 case LOADGP_ABSOLUTE
:
11254 if (mips_gnu_local_gp
== NULL
)
11256 mips_gnu_local_gp
= gen_rtx_SYMBOL_REF (Pmode
, "__gnu_local_gp");
11257 SYMBOL_REF_FLAGS (mips_gnu_local_gp
) |= SYMBOL_FLAG_LOCAL
;
11259 emit_insn (PMODE_INSN (gen_loadgp_absolute
,
11260 (pic_reg
, mips_gnu_local_gp
)));
11263 case LOADGP_OLDABI
:
11264 /* Added by mips_output_function_prologue. */
11267 case LOADGP_NEWABI
:
11268 addr
= XEXP (DECL_RTL (current_function_decl
), 0);
11269 offset
= mips_unspec_address (addr
, SYMBOL_GOTOFF_LOADGP
);
11270 incoming_address
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
11271 emit_insn (PMODE_INSN (gen_loadgp_newabi
,
11272 (pic_reg
, offset
, incoming_address
)));
11276 base
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_BASE
));
11277 index
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_INDEX
));
11278 emit_insn (PMODE_INSN (gen_loadgp_rtp
, (pic_reg
, base
, index
)));
11286 emit_insn (PMODE_INSN (gen_copygp_mips16
,
11287 (pic_offset_table_rtx
, pic_reg
)));
11289 /* Emit a blockage if there are implicit uses of the GP register.
11290 This includes profiled functions, because FUNCTION_PROFILE uses
11292 if (!TARGET_EXPLICIT_RELOCS
|| crtl
->profile
)
11293 emit_insn (gen_loadgp_blockage ());
11296 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
11298 #if PROBE_INTERVAL > 32768
11299 #error Cannot use indexed addressing mode for stack probing
11302 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
11303 inclusive. These are offsets from the current stack pointer. */
11306 mips_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
11309 sorry ("-fstack-check=specific not implemented for MIPS16");
11311 /* See if we have a constant small number of probes to generate. If so,
11312 that's the easy case. */
11313 if (first
+ size
<= 32768)
11317 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
11318 it exceeds SIZE. If only one probe is needed, this will not
11319 generate any code. Then probe at FIRST + SIZE. */
11320 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
11321 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
11324 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
11328 /* Otherwise, do the same as above, but in a loop. Note that we must be
11329 extra careful with variables wrapping around because we might be at
11330 the very top (or the very bottom) of the address space and we have
11331 to be able to handle this case properly; in particular, we use an
11332 equality test for the loop condition. */
11335 HOST_WIDE_INT rounded_size
;
11336 rtx r3
= MIPS_PROLOGUE_TEMP (Pmode
);
11337 rtx r12
= MIPS_PROLOGUE_TEMP2 (Pmode
);
11339 /* Sanity check for the addressing mode we're going to use. */
11340 gcc_assert (first
<= 32768);
11343 /* Step 1: round SIZE to the previous multiple of the interval. */
11345 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
11348 /* Step 2: compute initial and final value of the loop counter. */
11350 /* TEST_ADDR = SP + FIRST. */
11351 emit_insn (gen_rtx_SET (r3
, plus_constant (Pmode
, stack_pointer_rtx
,
11354 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
11355 if (rounded_size
> 32768)
11357 emit_move_insn (r12
, GEN_INT (rounded_size
));
11358 emit_insn (gen_rtx_SET (r12
, gen_rtx_MINUS (Pmode
, r3
, r12
)));
11361 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, r3
,
11365 /* Step 3: the loop
11369 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
11372 while (TEST_ADDR != LAST_ADDR)
11374 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
11375 until it is equal to ROUNDED_SIZE. */
11377 emit_insn (PMODE_INSN (gen_probe_stack_range
, (r3
, r3
, r12
)));
11380 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
11381 that SIZE is equal to ROUNDED_SIZE. */
11383 if (size
!= rounded_size
)
11384 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
11387 /* Make sure nothing is scheduled before we are done. */
11388 emit_insn (gen_blockage ());
11391 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
11392 absolute addresses. */
11395 mips_output_probe_stack_range (rtx reg1
, rtx reg2
)
11397 static int labelno
= 0;
11398 char loop_lab
[32], tmp
[64];
11401 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
11404 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
11406 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
11408 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
11409 if (TARGET_64BIT
&& TARGET_LONG64
)
11410 output_asm_insn ("daddiu\t%0,%0,%1", xops
);
11412 output_asm_insn ("addiu\t%0,%0,%1", xops
);
11414 /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch. */
11416 strcpy (tmp
, "%(%<bne\t%0,%1,");
11417 output_asm_insn (strcat (tmp
, &loop_lab
[1]), xops
);
11419 output_asm_insn ("sd\t$0,0(%0)%)", xops
);
11421 output_asm_insn ("sw\t$0,0(%0)%)", xops
);
11426 /* Return true if X contains a kernel register. */
11429 mips_refers_to_kernel_reg_p (const_rtx x
)
11431 subrtx_iterator::array_type array
;
11432 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
11433 if (REG_P (*iter
) && KERNEL_REG_P (REGNO (*iter
)))
11438 /* Expand the "prologue" pattern. */
11441 mips_expand_prologue (void)
11443 const struct mips_frame_info
*frame
;
11444 HOST_WIDE_INT size
;
11445 unsigned int nargs
;
11447 if (cfun
->machine
->global_pointer
!= INVALID_REGNUM
)
11449 /* Check whether an insn uses pic_offset_table_rtx, either explicitly
11450 or implicitly. If so, we can commit to using a global pointer
11451 straight away, otherwise we need to defer the decision. */
11452 if (mips_cfun_has_inflexible_gp_ref_p ()
11453 || mips_cfun_has_flexible_gp_ref_p ())
11455 cfun
->machine
->must_initialize_gp_p
= true;
11456 cfun
->machine
->must_restore_gp_when_clobbered_p
= true;
11459 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
11462 frame
= &cfun
->machine
->frame
;
11463 size
= frame
->total_size
;
11465 if (flag_stack_usage_info
)
11466 current_function_static_stack_size
= size
;
11468 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
11470 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
11472 if (size
> PROBE_INTERVAL
&& size
> STACK_CHECK_PROTECT
)
11473 mips_emit_probe_stack_range (STACK_CHECK_PROTECT
,
11474 size
- STACK_CHECK_PROTECT
);
11477 mips_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
11480 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
11481 bytes beforehand; this is enough to cover the register save area
11482 without going out of range. */
11483 if (((frame
->mask
| frame
->fmask
| frame
->acc_mask
) != 0)
11484 || frame
->num_cop0_regs
> 0)
11486 HOST_WIDE_INT step1
;
11488 step1
= MIN (size
, MIPS_MAX_FIRST_STACK_STEP
);
11489 if (GENERATE_MIPS16E_SAVE_RESTORE
)
11491 HOST_WIDE_INT offset
;
11492 unsigned int mask
, regno
;
11494 /* Try to merge argument stores into the save instruction. */
11495 nargs
= mips16e_collect_argument_saves ();
11497 /* Build the save instruction. */
11498 mask
= frame
->mask
;
11499 rtx insn
= mips16e_build_save_restore (false, &mask
, &offset
,
11501 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
11502 mips_frame_barrier ();
11505 /* Check if we need to save other registers. */
11506 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
11507 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
11509 offset
-= UNITS_PER_WORD
;
11510 mips_save_restore_reg (word_mode
, regno
,
11511 offset
, mips_save_reg
);
11516 if (cfun
->machine
->interrupt_handler_p
)
11518 HOST_WIDE_INT offset
;
11521 /* If this interrupt is using a shadow register set, we need to
11522 get the stack pointer from the previous register set. */
11523 if (cfun
->machine
->use_shadow_register_set
== SHADOW_SET_YES
)
11524 emit_insn (PMODE_INSN (gen_mips_rdpgpr
, (stack_pointer_rtx
,
11525 stack_pointer_rtx
)));
11527 if (!cfun
->machine
->keep_interrupts_masked_p
)
11529 if (cfun
->machine
->int_mask
== INT_MASK_EIC
)
11530 /* Move from COP0 Cause to K0. */
11531 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, K0_REG_NUM
),
11532 gen_rtx_REG (SImode
, COP0_CAUSE_REG_NUM
)));
11534 /* Move from COP0 EPC to K1. */
11535 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, K1_REG_NUM
),
11536 gen_rtx_REG (SImode
,
11537 COP0_EPC_REG_NUM
)));
11539 /* Allocate the first part of the frame. */
11540 rtx insn
= gen_add3_insn (stack_pointer_rtx
, stack_pointer_rtx
,
11542 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
11543 mips_frame_barrier ();
11546 /* Start at the uppermost location for saving. */
11547 offset
= frame
->cop0_sp_offset
- size
;
11549 /* Push EPC into its stack slot. */
11550 mem
= gen_frame_mem (word_mode
,
11551 plus_constant (Pmode
, stack_pointer_rtx
,
11553 mips_emit_move (mem
, gen_rtx_REG (word_mode
, K1_REG_NUM
));
11554 offset
-= UNITS_PER_WORD
;
11556 /* Move from COP0 Status to K1. */
11557 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, K1_REG_NUM
),
11558 gen_rtx_REG (SImode
,
11559 COP0_STATUS_REG_NUM
)));
11561 /* Right justify the RIPL in k0. */
11562 if (!cfun
->machine
->keep_interrupts_masked_p
11563 && cfun
->machine
->int_mask
== INT_MASK_EIC
)
11564 emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode
, K0_REG_NUM
),
11565 gen_rtx_REG (SImode
, K0_REG_NUM
),
11566 GEN_INT (CAUSE_IPL
)));
11568 /* Push Status into its stack slot. */
11569 mem
= gen_frame_mem (word_mode
,
11570 plus_constant (Pmode
, stack_pointer_rtx
,
11572 mips_emit_move (mem
, gen_rtx_REG (word_mode
, K1_REG_NUM
));
11573 offset
-= UNITS_PER_WORD
;
11575 /* Insert the RIPL into our copy of SR (k1) as the new IPL. */
11576 if (!cfun
->machine
->keep_interrupts_masked_p
11577 && cfun
->machine
->int_mask
== INT_MASK_EIC
)
11578 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
11581 gen_rtx_REG (SImode
, K0_REG_NUM
)));
11583 /* Clear all interrupt mask bits up to and including the
11584 handler's interrupt line. */
11585 if (!cfun
->machine
->keep_interrupts_masked_p
11586 && cfun
->machine
->int_mask
!= INT_MASK_EIC
)
11587 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
11588 GEN_INT (cfun
->machine
->int_mask
+ 1),
11590 gen_rtx_REG (SImode
, GP_REG_FIRST
)));
11592 if (!cfun
->machine
->keep_interrupts_masked_p
)
11593 /* Enable interrupts by clearing the KSU ERL and EXL bits.
11594 IE is already the correct value, so we don't have to do
11595 anything explicit. */
11596 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
11599 gen_rtx_REG (SImode
, GP_REG_FIRST
)));
11601 /* Disable interrupts by clearing the KSU, ERL, EXL,
11603 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
11606 gen_rtx_REG (SImode
, GP_REG_FIRST
)));
11608 if (TARGET_HARD_FLOAT
)
11609 /* Disable COP1 for hard-float. This will lead to an exception
11610 if floating-point code is executed in an ISR. */
11611 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
11614 gen_rtx_REG (SImode
, GP_REG_FIRST
)));
11620 rtx insn
= gen_add3_insn (stack_pointer_rtx
,
11623 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
11624 mips_frame_barrier ();
11628 mips_for_each_saved_acc (size
, mips_save_reg
);
11629 mips_for_each_saved_gpr_and_fpr (size
, mips_save_reg
);
11633 /* Allocate the rest of the frame. */
11636 if (SMALL_OPERAND (-size
))
11637 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx
,
11639 GEN_INT (-size
)))) = 1;
11642 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), GEN_INT (size
));
11645 /* There are no instructions to add or subtract registers
11646 from the stack pointer, so use the frame pointer as a
11647 temporary. We should always be using a frame pointer
11648 in this case anyway. */
11649 gcc_assert (frame_pointer_needed
);
11650 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
11651 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx
,
11652 hard_frame_pointer_rtx
,
11653 MIPS_PROLOGUE_TEMP (Pmode
)));
11654 mips_emit_move (stack_pointer_rtx
, hard_frame_pointer_rtx
);
11657 emit_insn (gen_sub3_insn (stack_pointer_rtx
,
11659 MIPS_PROLOGUE_TEMP (Pmode
)));
11661 /* Describe the combined effect of the previous instructions. */
11662 mips_set_frame_expr
11663 (gen_rtx_SET (stack_pointer_rtx
,
11664 plus_constant (Pmode
, stack_pointer_rtx
, -size
)));
11666 mips_frame_barrier ();
11669 /* Set up the frame pointer, if we're using one. */
11670 if (frame_pointer_needed
)
11672 HOST_WIDE_INT offset
;
11674 offset
= frame
->hard_frame_pointer_offset
;
11677 rtx insn
= mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
11678 RTX_FRAME_RELATED_P (insn
) = 1;
11680 else if (SMALL_OPERAND (offset
))
11682 rtx insn
= gen_add3_insn (hard_frame_pointer_rtx
,
11683 stack_pointer_rtx
, GEN_INT (offset
));
11684 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
11688 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), GEN_INT (offset
));
11689 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
11690 emit_insn (gen_add3_insn (hard_frame_pointer_rtx
,
11691 hard_frame_pointer_rtx
,
11692 MIPS_PROLOGUE_TEMP (Pmode
)));
11693 mips_set_frame_expr
11694 (gen_rtx_SET (hard_frame_pointer_rtx
,
11695 plus_constant (Pmode
, stack_pointer_rtx
, offset
)));
11699 mips_emit_loadgp ();
11701 /* Initialize the $gp save slot. */
11702 if (mips_cfun_has_cprestore_slot_p ())
11704 rtx base
, mem
, gp
, temp
;
11705 HOST_WIDE_INT offset
;
11707 mips_get_cprestore_base_and_offset (&base
, &offset
, false);
11708 mem
= gen_frame_mem (Pmode
, plus_constant (Pmode
, base
, offset
));
11709 gp
= TARGET_MIPS16
? MIPS16_PIC_TEMP
: pic_offset_table_rtx
;
11710 temp
= (SMALL_OPERAND (offset
)
11711 ? gen_rtx_SCRATCH (Pmode
)
11712 : MIPS_PROLOGUE_TEMP (Pmode
));
11713 emit_insn (PMODE_INSN (gen_potential_cprestore
,
11714 (mem
, GEN_INT (offset
), gp
, temp
)));
11716 mips_get_cprestore_base_and_offset (&base
, &offset
, true);
11717 mem
= gen_frame_mem (Pmode
, plus_constant (Pmode
, base
, offset
));
11718 emit_insn (PMODE_INSN (gen_use_cprestore
, (mem
)));
11721 /* We need to search back to the last use of K0 or K1. */
11722 if (cfun
->machine
->interrupt_handler_p
)
11725 for (insn
= get_last_insn (); insn
!= NULL_RTX
; insn
= PREV_INSN (insn
))
11727 && mips_refers_to_kernel_reg_p (PATTERN (insn
)))
11729 /* Emit a move from K1 to COP0 Status after insn. */
11730 gcc_assert (insn
!= NULL_RTX
);
11731 emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode
, COP0_STATUS_REG_NUM
),
11732 gen_rtx_REG (SImode
, K1_REG_NUM
)),
11736 /* If we are profiling, make sure no instructions are scheduled before
11737 the call to mcount. */
11739 emit_insn (gen_blockage ());
11742 /* Attach all pending register saves to the previous instruction.
11743 Return that instruction. */
11746 mips_epilogue_emit_cfa_restores (void)
11750 insn
= get_last_insn ();
11751 if (mips_epilogue
.cfa_restores
)
11753 gcc_assert (insn
&& !REG_NOTES (insn
));
11754 RTX_FRAME_RELATED_P (insn
) = 1;
11755 REG_NOTES (insn
) = mips_epilogue
.cfa_restores
;
11756 mips_epilogue
.cfa_restores
= 0;
11761 /* Like mips_epilogue_emit_cfa_restores, but also record that the CFA is
11762 now at REG + OFFSET. */
11765 mips_epilogue_set_cfa (rtx reg
, HOST_WIDE_INT offset
)
11769 insn
= mips_epilogue_emit_cfa_restores ();
11770 if (reg
!= mips_epilogue
.cfa_reg
|| offset
!= mips_epilogue
.cfa_offset
)
11772 RTX_FRAME_RELATED_P (insn
) = 1;
11773 REG_NOTES (insn
) = alloc_reg_note (REG_CFA_DEF_CFA
,
11774 plus_constant (Pmode
, reg
, offset
),
11776 mips_epilogue
.cfa_reg
= reg
;
11777 mips_epilogue
.cfa_offset
= offset
;
11781 /* Emit instructions to restore register REG from slot MEM. Also update
11782 the cfa_restores list. */
11785 mips_restore_reg (rtx reg
, rtx mem
)
11787 /* There's no MIPS16 instruction to load $31 directly. Load into
11788 $7 instead and adjust the return insn appropriately. */
11789 if (TARGET_MIPS16
&& REGNO (reg
) == RETURN_ADDR_REGNUM
)
11790 reg
= gen_rtx_REG (GET_MODE (reg
), GP_REG_FIRST
+ 7);
11791 else if (GET_MODE (reg
) == DFmode
11792 && (!TARGET_FLOAT64
11793 || mips_abi
== ABI_32
))
11795 mips_add_cfa_restore (mips_subword (reg
, true));
11796 mips_add_cfa_restore (mips_subword (reg
, false));
11799 mips_add_cfa_restore (reg
);
11801 mips_emit_save_slot_move (reg
, mem
, MIPS_EPILOGUE_TEMP (GET_MODE (reg
)));
11802 if (REGNO (reg
) == REGNO (mips_epilogue
.cfa_reg
))
11803 /* The CFA is currently defined in terms of the register whose
11804 value we have just restored. Redefine the CFA in terms of
11805 the stack pointer. */
11806 mips_epilogue_set_cfa (stack_pointer_rtx
,
11807 mips_epilogue
.cfa_restore_sp_offset
);
11810 /* Emit code to set the stack pointer to BASE + OFFSET, given that
11811 BASE + OFFSET is NEW_FRAME_SIZE bytes below the top of the frame.
11812 BASE, if not the stack pointer, is available as a temporary. */
11815 mips_deallocate_stack (rtx base
, rtx offset
, HOST_WIDE_INT new_frame_size
)
11817 if (base
== stack_pointer_rtx
&& offset
== const0_rtx
)
11820 mips_frame_barrier ();
11821 if (offset
== const0_rtx
)
11823 emit_move_insn (stack_pointer_rtx
, base
);
11824 mips_epilogue_set_cfa (stack_pointer_rtx
, new_frame_size
);
11826 else if (TARGET_MIPS16
&& base
!= stack_pointer_rtx
)
11828 emit_insn (gen_add3_insn (base
, base
, offset
));
11829 mips_epilogue_set_cfa (base
, new_frame_size
);
11830 emit_move_insn (stack_pointer_rtx
, base
);
11834 emit_insn (gen_add3_insn (stack_pointer_rtx
, base
, offset
));
11835 mips_epilogue_set_cfa (stack_pointer_rtx
, new_frame_size
);
11839 /* Emit any instructions needed before a return. */
11842 mips_expand_before_return (void)
11844 /* When using a call-clobbered gp, we start out with unified call
11845 insns that include instructions to restore the gp. We then split
11846 these unified calls after reload. These split calls explicitly
11847 clobber gp, so there is no need to define
11848 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
11850 For consistency, we should also insert an explicit clobber of $28
11851 before return insns, so that the post-reload optimizers know that
11852 the register is not live on exit. */
11853 if (TARGET_CALL_CLOBBERED_GP
)
11854 emit_clobber (pic_offset_table_rtx
);
11857 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
11861 mips_expand_epilogue (bool sibcall_p
)
11863 const struct mips_frame_info
*frame
;
11864 HOST_WIDE_INT step1
, step2
;
11867 bool use_jraddiusp_p
= false;
11869 if (!sibcall_p
&& mips_can_use_return_insn ())
11871 emit_jump_insn (gen_return ());
11875 /* In MIPS16 mode, if the return value should go into a floating-point
11876 register, we need to call a helper routine to copy it over. */
11877 if (mips16_cfun_returns_in_fpr_p ())
11878 mips16_copy_fpr_return_value ();
11880 /* Split the frame into two. STEP1 is the amount of stack we should
11881 deallocate before restoring the registers. STEP2 is the amount we
11882 should deallocate afterwards.
11884 Start off by assuming that no registers need to be restored. */
11885 frame
= &cfun
->machine
->frame
;
11886 step1
= frame
->total_size
;
11889 /* Work out which register holds the frame address. */
11890 if (!frame_pointer_needed
)
11891 base
= stack_pointer_rtx
;
11894 base
= hard_frame_pointer_rtx
;
11895 step1
-= frame
->hard_frame_pointer_offset
;
11897 mips_epilogue
.cfa_reg
= base
;
11898 mips_epilogue
.cfa_offset
= step1
;
11899 mips_epilogue
.cfa_restores
= NULL_RTX
;
11901 /* If we need to restore registers, deallocate as much stack as
11902 possible in the second step without going out of range. */
11903 if ((frame
->mask
| frame
->fmask
| frame
->acc_mask
) != 0
11904 || frame
->num_cop0_regs
> 0)
11906 step2
= MIN (step1
, MIPS_MAX_FIRST_STACK_STEP
);
11910 /* Get an rtx for STEP1 that we can add to BASE. */
11911 adjust
= GEN_INT (step1
);
11912 if (!SMALL_OPERAND (step1
))
11914 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), adjust
);
11915 adjust
= MIPS_EPILOGUE_TEMP (Pmode
);
11917 mips_deallocate_stack (base
, adjust
, step2
);
11919 /* If we're using addressing macros, $gp is implicitly used by all
11920 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
11922 if (TARGET_CALL_SAVED_GP
&& !TARGET_EXPLICIT_RELOCS
)
11923 emit_insn (gen_blockage ());
11925 mips_epilogue
.cfa_restore_sp_offset
= step2
;
11926 if (GENERATE_MIPS16E_SAVE_RESTORE
&& frame
->mask
!= 0)
11928 unsigned int regno
, mask
;
11929 HOST_WIDE_INT offset
;
11932 /* Generate the restore instruction. */
11933 mask
= frame
->mask
;
11934 restore
= mips16e_build_save_restore (true, &mask
, &offset
, 0, step2
);
11936 /* Restore any other registers manually. */
11937 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
11938 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
11940 offset
-= UNITS_PER_WORD
;
11941 mips_save_restore_reg (word_mode
, regno
, offset
, mips_restore_reg
);
11944 /* Restore the remaining registers and deallocate the final bit
11946 mips_frame_barrier ();
11947 emit_insn (restore
);
11948 mips_epilogue_set_cfa (stack_pointer_rtx
, 0);
11952 /* Restore the registers. */
11953 mips_for_each_saved_acc (frame
->total_size
- step2
, mips_restore_reg
);
11954 mips_for_each_saved_gpr_and_fpr (frame
->total_size
- step2
,
11957 if (cfun
->machine
->interrupt_handler_p
)
11959 HOST_WIDE_INT offset
;
11962 offset
= frame
->cop0_sp_offset
- (frame
->total_size
- step2
);
11964 /* Restore the original EPC. */
11965 mem
= gen_frame_mem (word_mode
,
11966 plus_constant (Pmode
, stack_pointer_rtx
,
11968 mips_emit_move (gen_rtx_REG (word_mode
, K1_REG_NUM
), mem
);
11969 offset
-= UNITS_PER_WORD
;
11971 /* Move to COP0 EPC. */
11972 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, COP0_EPC_REG_NUM
),
11973 gen_rtx_REG (SImode
, K1_REG_NUM
)));
11975 /* Restore the original Status. */
11976 mem
= gen_frame_mem (word_mode
,
11977 plus_constant (Pmode
, stack_pointer_rtx
,
11979 mips_emit_move (gen_rtx_REG (word_mode
, K1_REG_NUM
), mem
);
11980 offset
-= UNITS_PER_WORD
;
11982 /* If we don't use shadow register set, we need to update SP. */
11983 if (cfun
->machine
->use_shadow_register_set
== SHADOW_SET_NO
)
11984 mips_deallocate_stack (stack_pointer_rtx
, GEN_INT (step2
), 0);
11986 /* The choice of position is somewhat arbitrary in this case. */
11987 mips_epilogue_emit_cfa_restores ();
11989 /* Move to COP0 Status. */
11990 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, COP0_STATUS_REG_NUM
),
11991 gen_rtx_REG (SImode
, K1_REG_NUM
)));
11993 else if (TARGET_MICROMIPS
11994 && !crtl
->calls_eh_return
11997 && mips_unsigned_immediate_p (step2
, 5, 2))
11998 use_jraddiusp_p
= true;
12000 /* Deallocate the final bit of the frame. */
12001 mips_deallocate_stack (stack_pointer_rtx
, GEN_INT (step2
), 0);
12004 if (cfun
->machine
->use_frame_header_for_callee_saved_regs
)
12005 mips_epilogue_emit_cfa_restores ();
12006 else if (!use_jraddiusp_p
)
12007 gcc_assert (!mips_epilogue
.cfa_restores
);
12009 /* Add in the __builtin_eh_return stack adjustment. We need to
12010 use a temporary in MIPS16 code. */
12011 if (crtl
->calls_eh_return
)
12015 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), stack_pointer_rtx
);
12016 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode
),
12017 MIPS_EPILOGUE_TEMP (Pmode
),
12018 EH_RETURN_STACKADJ_RTX
));
12019 mips_emit_move (stack_pointer_rtx
, MIPS_EPILOGUE_TEMP (Pmode
));
12022 emit_insn (gen_add3_insn (stack_pointer_rtx
,
12024 EH_RETURN_STACKADJ_RTX
));
12029 mips_expand_before_return ();
12030 if (cfun
->machine
->interrupt_handler_p
)
12032 /* Interrupt handlers generate eret or deret. */
12033 if (cfun
->machine
->use_debug_exception_return_p
)
12034 emit_jump_insn (gen_mips_deret ());
12036 emit_jump_insn (gen_mips_eret ());
12042 /* When generating MIPS16 code, the normal
12043 mips_for_each_saved_gpr_and_fpr path will restore the return
12044 address into $7 rather than $31. */
12046 && !GENERATE_MIPS16E_SAVE_RESTORE
12047 && BITSET_P (frame
->mask
, RETURN_ADDR_REGNUM
))
12049 /* simple_returns cannot rely on values that are only available
12050 on paths through the epilogue (because return paths that do
12051 not pass through the epilogue may nevertheless reuse a
12052 simple_return that occurs at the end of the epilogue).
12053 Use a normal return here instead. */
12054 rtx reg
= gen_rtx_REG (Pmode
, GP_REG_FIRST
+ 7);
12055 pat
= gen_return_internal (reg
);
12057 else if (use_jraddiusp_p
)
12058 pat
= gen_jraddiusp (GEN_INT (step2
));
12061 rtx reg
= gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
);
12062 pat
= gen_simple_return_internal (reg
);
12064 emit_jump_insn (pat
);
12065 if (use_jraddiusp_p
)
12066 mips_epilogue_set_cfa (stack_pointer_rtx
, step2
);
12070 /* Search from the beginning to the first use of K0 or K1. */
12071 if (cfun
->machine
->interrupt_handler_p
12072 && !cfun
->machine
->keep_interrupts_masked_p
)
12074 for (insn
= get_insns (); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
12076 && mips_refers_to_kernel_reg_p (PATTERN (insn
)))
12078 gcc_assert (insn
!= NULL_RTX
);
12079 /* Insert disable interrupts before the first use of K0 or K1. */
12080 emit_insn_before (gen_mips_di (), insn
);
12081 emit_insn_before (gen_mips_ehb (), insn
);
12085 /* Return nonzero if this function is known to have a null epilogue.
12086 This allows the optimizer to omit jumps to jumps if no stack
12090 mips_can_use_return_insn (void)
12092 /* Interrupt handlers need to go through the epilogue. */
12093 if (cfun
->machine
->interrupt_handler_p
)
12096 if (!reload_completed
)
12102 /* In MIPS16 mode, a function that returns a floating-point value
12103 needs to arrange to copy the return value into the floating-point
12105 if (mips16_cfun_returns_in_fpr_p ())
12108 return (cfun
->machine
->frame
.total_size
== 0
12109 && !cfun
->machine
->use_frame_header_for_callee_saved_regs
);
12112 /* Return true if register REGNO can store a value of mode MODE.
12113 The result of this function is cached in mips_hard_regno_mode_ok. */
12116 mips_hard_regno_mode_ok_p (unsigned int regno
, machine_mode mode
)
12119 enum mode_class mclass
;
12121 if (mode
== CCV2mode
)
12122 return (ISA_HAS_8CC
12123 && ST_REG_P (regno
)
12124 && (regno
- ST_REG_FIRST
) % 2 == 0);
12126 if (mode
== CCV4mode
)
12127 return (ISA_HAS_8CC
12128 && ST_REG_P (regno
)
12129 && (regno
- ST_REG_FIRST
) % 4 == 0);
12131 if (mode
== CCmode
)
12132 return ISA_HAS_8CC
? ST_REG_P (regno
) : regno
== FPSW_REGNUM
;
12134 size
= GET_MODE_SIZE (mode
);
12135 mclass
= GET_MODE_CLASS (mode
);
12137 if (GP_REG_P (regno
) && mode
!= CCFmode
)
12138 return ((regno
- GP_REG_FIRST
) & 1) == 0 || size
<= UNITS_PER_WORD
;
12140 if (FP_REG_P (regno
)
12141 && (((regno
- FP_REG_FIRST
) % MAX_FPRS_PER_FMT
) == 0
12142 || (MIN_FPRS_PER_FMT
== 1 && size
<= UNITS_PER_FPREG
)))
12144 /* Deny use of odd-numbered registers for 32-bit data for
12145 the o32 FP64A ABI. */
12146 if (TARGET_O32_FP64A_ABI
&& size
<= 4 && (regno
& 1) != 0)
12149 /* The FPXX ABI requires double-precision values to be placed in
12150 even-numbered registers. Disallow odd-numbered registers with
12151 CCFmode because CCFmode double-precision compares will write a
12152 64-bit value to a register. */
12153 if (mode
== CCFmode
)
12154 return !(TARGET_FLOATXX
&& (regno
& 1) != 0);
12156 /* Allow 64-bit vector modes for Loongson-2E/2F. */
12157 if (TARGET_LOONGSON_VECTORS
12158 && (mode
== V2SImode
12159 || mode
== V4HImode
12160 || mode
== V8QImode
12161 || mode
== DImode
))
12164 if (mclass
== MODE_FLOAT
12165 || mclass
== MODE_COMPLEX_FLOAT
12166 || mclass
== MODE_VECTOR_FLOAT
)
12167 return size
<= UNITS_PER_FPVALUE
;
12169 /* Allow integer modes that fit into a single register. We need
12170 to put integers into FPRs when using instructions like CVT
12171 and TRUNC. There's no point allowing sizes smaller than a word,
12172 because the FPU has no appropriate load/store instructions. */
12173 if (mclass
== MODE_INT
)
12174 return size
>= MIN_UNITS_PER_WORD
&& size
<= UNITS_PER_FPREG
;
12177 /* Don't allow vector modes in accumulators. */
12178 if (ACC_REG_P (regno
)
12179 && !VECTOR_MODE_P (mode
)
12180 && (INTEGRAL_MODE_P (mode
) || ALL_FIXED_POINT_MODE_P (mode
)))
12182 if (MD_REG_P (regno
))
12184 /* After a multiplication or division, clobbering HI makes
12185 the value of LO unpredictable, and vice versa. This means
12186 that, for all interesting cases, HI and LO are effectively
12189 We model this by requiring that any value that uses HI
12191 if (size
<= UNITS_PER_WORD
* 2)
12192 return regno
== (size
<= UNITS_PER_WORD
? LO_REGNUM
: MD_REG_FIRST
);
12196 /* DSP accumulators do not have the same restrictions as
12197 HI and LO, so we can treat them as normal doubleword
12199 if (size
<= UNITS_PER_WORD
)
12202 if (size
<= UNITS_PER_WORD
* 2
12203 && ((regno
- DSP_ACC_REG_FIRST
) & 1) == 0)
12208 if (ALL_COP_REG_P (regno
))
12209 return mclass
== MODE_INT
&& size
<= UNITS_PER_WORD
;
12211 if (regno
== GOT_VERSION_REGNUM
)
12212 return mode
== SImode
;
12217 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
12220 mips_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
12221 unsigned int new_reg
)
12223 /* Interrupt functions can only use registers that have already been
12224 saved by the prologue, even if they would normally be call-clobbered. */
12225 if (cfun
->machine
->interrupt_handler_p
&& !df_regs_ever_live_p (new_reg
))
12231 /* Return nonzero if register REGNO can be used as a scratch register
12235 mips_hard_regno_scratch_ok (unsigned int regno
)
12237 /* See mips_hard_regno_rename_ok. */
12238 if (cfun
->machine
->interrupt_handler_p
&& !df_regs_ever_live_p (regno
))
12244 /* Implement HARD_REGNO_NREGS. */
12247 mips_hard_regno_nregs (int regno
, machine_mode mode
)
12249 if (ST_REG_P (regno
))
12250 /* The size of FP status registers is always 4, because they only hold
12251 CCmode values, and CCmode is always considered to be 4 bytes wide. */
12252 return (GET_MODE_SIZE (mode
) + 3) / 4;
12254 if (FP_REG_P (regno
))
12255 return (GET_MODE_SIZE (mode
) + UNITS_PER_FPREG
- 1) / UNITS_PER_FPREG
;
12257 /* All other registers are word-sized. */
12258 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
12261 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
12262 in mips_hard_regno_nregs. */
12265 mips_class_max_nregs (enum reg_class rclass
, machine_mode mode
)
12271 COPY_HARD_REG_SET (left
, reg_class_contents
[(int) rclass
]);
12272 if (hard_reg_set_intersect_p (left
, reg_class_contents
[(int) ST_REGS
]))
12274 if (HARD_REGNO_MODE_OK (ST_REG_FIRST
, mode
))
12275 size
= MIN (size
, 4);
12276 AND_COMPL_HARD_REG_SET (left
, reg_class_contents
[(int) ST_REGS
]);
12278 if (hard_reg_set_intersect_p (left
, reg_class_contents
[(int) FP_REGS
]))
12280 if (HARD_REGNO_MODE_OK (FP_REG_FIRST
, mode
))
12281 size
= MIN (size
, UNITS_PER_FPREG
);
12282 AND_COMPL_HARD_REG_SET (left
, reg_class_contents
[(int) FP_REGS
]);
12284 if (!hard_reg_set_empty_p (left
))
12285 size
= MIN (size
, UNITS_PER_WORD
);
12286 return (GET_MODE_SIZE (mode
) + size
- 1) / size
;
12289 /* Implement CANNOT_CHANGE_MODE_CLASS. */
12292 mips_cannot_change_mode_class (machine_mode from
,
12294 enum reg_class rclass
)
12296 /* Allow conversions between different Loongson integer vectors,
12297 and between those vectors and DImode. */
12298 if (GET_MODE_SIZE (from
) == 8 && GET_MODE_SIZE (to
) == 8
12299 && INTEGRAL_MODE_P (from
) && INTEGRAL_MODE_P (to
))
12302 /* Otherwise, there are several problems with changing the modes of
12303 values in floating-point registers:
12305 - When a multi-word value is stored in paired floating-point
12306 registers, the first register always holds the low word. We
12307 therefore can't allow FPRs to change between single-word and
12308 multi-word modes on big-endian targets.
12310 - GCC assumes that each word of a multiword register can be
12311 accessed individually using SUBREGs. This is not true for
12312 floating-point registers if they are bigger than a word.
12314 - Loading a 32-bit value into a 64-bit floating-point register
12315 will not sign-extend the value, despite what LOAD_EXTEND_OP
12316 says. We can't allow FPRs to change from SImode to a wider
12317 mode on 64-bit targets.
12319 - If the FPU has already interpreted a value in one format, we
12320 must not ask it to treat the value as having a different
12323 We therefore disallow all mode changes involving FPRs. */
12325 return reg_classes_intersect_p (FP_REGS
, rclass
);
12328 /* Implement target hook small_register_classes_for_mode_p. */
12331 mips_small_register_classes_for_mode_p (machine_mode mode
12334 return TARGET_MIPS16
;
12337 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
12340 mips_mode_ok_for_mov_fmt_p (machine_mode mode
)
12346 return TARGET_HARD_FLOAT
;
12349 return TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
;
12352 return TARGET_HARD_FLOAT
&& TARGET_PAIRED_SINGLE_FLOAT
;
12359 /* Implement MODES_TIEABLE_P. */
12362 mips_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
12364 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
12365 prefer to put one of them in FPRs. */
12366 return (mode1
== mode2
12367 || (!mips_mode_ok_for_mov_fmt_p (mode1
)
12368 && !mips_mode_ok_for_mov_fmt_p (mode2
)));
12371 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
12374 mips_preferred_reload_class (rtx x
, reg_class_t rclass
)
12376 if (mips_dangerous_for_la25_p (x
) && reg_class_subset_p (LEA_REGS
, rclass
))
12379 if (reg_class_subset_p (FP_REGS
, rclass
)
12380 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x
)))
12383 if (reg_class_subset_p (GR_REGS
, rclass
))
12386 if (TARGET_MIPS16
&& reg_class_subset_p (M16_REGS
, rclass
))
12392 /* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
12393 Return a "canonical" class to represent it in later calculations. */
12396 mips_canonicalize_move_class (reg_class_t rclass
)
12398 /* All moves involving accumulator registers have the same cost. */
12399 if (reg_class_subset_p (rclass
, ACC_REGS
))
12402 /* Likewise promote subclasses of general registers to the most
12403 interesting containing class. */
12404 if (TARGET_MIPS16
&& reg_class_subset_p (rclass
, M16_REGS
))
12406 else if (reg_class_subset_p (rclass
, GENERAL_REGS
))
12407 rclass
= GENERAL_REGS
;
12412 /* Return the cost of moving a value from a register of class FROM to a GPR.
12413 Return 0 for classes that are unions of other classes handled by this
12417 mips_move_to_gpr_cost (reg_class_t from
)
12423 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
12427 /* MFLO and MFHI. */
12437 /* This choice of value is historical. */
12445 /* Return the cost of moving a value from a GPR to a register of class TO.
12446 Return 0 for classes that are unions of other classes handled by this
12450 mips_move_from_gpr_cost (reg_class_t to
)
12456 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
12460 /* MTLO and MTHI. */
12470 /* This choice of value is historical. */
12478 /* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
12479 maximum of the move costs for subclasses; regclass will work out
12480 the maximum for us. */
12483 mips_register_move_cost (machine_mode mode
,
12484 reg_class_t from
, reg_class_t to
)
12489 from
= mips_canonicalize_move_class (from
);
12490 to
= mips_canonicalize_move_class (to
);
12492 /* Handle moves that can be done without using general-purpose registers. */
12493 if (from
== FP_REGS
)
12495 if (to
== FP_REGS
&& mips_mode_ok_for_mov_fmt_p (mode
))
12500 /* Handle cases in which only one class deviates from the ideal. */
12501 dregs
= TARGET_MIPS16
? M16_REGS
: GENERAL_REGS
;
12503 return mips_move_from_gpr_cost (to
);
12505 return mips_move_to_gpr_cost (from
);
12507 /* Handles cases that require a GPR temporary. */
12508 cost1
= mips_move_to_gpr_cost (from
);
12511 cost2
= mips_move_from_gpr_cost (to
);
12513 return cost1
+ cost2
;
12519 /* Implement TARGET_REGISTER_PRIORITY. */
12522 mips_register_priority (int hard_regno
)
12524 /* Treat MIPS16 registers with higher priority than other regs. */
12526 && TEST_HARD_REG_BIT (reg_class_contents
[M16_REGS
], hard_regno
))
12531 /* Implement TARGET_MEMORY_MOVE_COST. */
12534 mips_memory_move_cost (machine_mode mode
, reg_class_t rclass
, bool in
)
12536 return (mips_cost
->memory_latency
12537 + memory_move_secondary_cost (mode
, rclass
, in
));
12540 /* Implement SECONDARY_MEMORY_NEEDED. */
12543 mips_secondary_memory_needed (enum reg_class class1
, enum reg_class class2
,
12546 /* Ignore spilled pseudos. */
12547 if (lra_in_progress
&& (class1
== NO_REGS
|| class2
== NO_REGS
))
12550 if (((class1
== FP_REGS
) != (class2
== FP_REGS
))
12551 && ((TARGET_FLOATXX
&& !ISA_HAS_MXHC1
)
12552 || TARGET_O32_FP64A_ABI
)
12553 && GET_MODE_SIZE (mode
) >= 8)
12559 /* Return the register class required for a secondary register when
12560 copying between one of the registers in RCLASS and value X, which
12561 has mode MODE. X is the source of the move if IN_P, otherwise it
12562 is the destination. Return NO_REGS if no secondary register is
12566 mips_secondary_reload_class (enum reg_class rclass
,
12567 machine_mode mode
, rtx x
, bool)
12571 /* If X is a constant that cannot be loaded into $25, it must be loaded
12572 into some other GPR. No other register class allows a direct move. */
12573 if (mips_dangerous_for_la25_p (x
))
12574 return reg_class_subset_p (rclass
, LEA_REGS
) ? NO_REGS
: LEA_REGS
;
12576 regno
= true_regnum (x
);
12579 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
12580 if (!reg_class_subset_p (rclass
, M16_REGS
) && !M16_REG_P (regno
))
12586 /* Copying from accumulator registers to anywhere other than a general
12587 register requires a temporary general register. */
12588 if (reg_class_subset_p (rclass
, ACC_REGS
))
12589 return GP_REG_P (regno
) ? NO_REGS
: GR_REGS
;
12590 if (ACC_REG_P (regno
))
12591 return reg_class_subset_p (rclass
, GR_REGS
) ? NO_REGS
: GR_REGS
;
12593 if (reg_class_subset_p (rclass
, FP_REGS
))
12597 && (GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8)))
12598 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
12599 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
12602 if (GP_REG_P (regno
) || x
== CONST0_RTX (mode
))
12603 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
12606 if (CONSTANT_P (x
) && !targetm
.cannot_force_const_mem (mode
, x
))
12607 /* We can force the constant to memory and use lwc1
12608 and ldc1. As above, we will use pairs of lwc1s if
12609 ldc1 is not supported. */
12612 if (FP_REG_P (regno
) && mips_mode_ok_for_mov_fmt_p (mode
))
12613 /* In this case we can use mov.fmt. */
12616 /* Otherwise, we need to reload through an integer register. */
12619 if (FP_REG_P (regno
))
12620 return reg_class_subset_p (rclass
, GR_REGS
) ? NO_REGS
: GR_REGS
;
12625 /* Implement TARGET_MODE_REP_EXTENDED. */
12628 mips_mode_rep_extended (machine_mode mode
, machine_mode mode_rep
)
12630 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
12631 if (TARGET_64BIT
&& mode
== SImode
&& mode_rep
== DImode
)
12632 return SIGN_EXTEND
;
12637 /* Implement TARGET_VALID_POINTER_MODE. */
12640 mips_valid_pointer_mode (machine_mode mode
)
12642 return mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
);
12645 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
12648 mips_vector_mode_supported_p (machine_mode mode
)
12653 return TARGET_PAIRED_SINGLE_FLOAT
;
12668 return TARGET_LOONGSON_VECTORS
;
12675 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
12678 mips_scalar_mode_supported_p (machine_mode mode
)
12680 if (ALL_FIXED_POINT_MODE_P (mode
)
12681 && GET_MODE_PRECISION (mode
) <= 2 * BITS_PER_WORD
)
12684 return default_scalar_mode_supported_p (mode
);
12687 /* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
12689 static machine_mode
12690 mips_preferred_simd_mode (machine_mode mode ATTRIBUTE_UNUSED
)
12692 if (TARGET_PAIRED_SINGLE_FLOAT
12698 /* Implement TARGET_INIT_LIBFUNCS. */
12701 mips_init_libfuncs (void)
12703 if (TARGET_FIX_VR4120
)
12705 /* Register the special divsi3 and modsi3 functions needed to work
12706 around VR4120 division errata. */
12707 set_optab_libfunc (sdiv_optab
, SImode
, "__vr4120_divsi3");
12708 set_optab_libfunc (smod_optab
, SImode
, "__vr4120_modsi3");
12711 if (TARGET_MIPS16
&& TARGET_HARD_FLOAT_ABI
)
12713 /* Register the MIPS16 -mhard-float stubs. */
12714 set_optab_libfunc (add_optab
, SFmode
, "__mips16_addsf3");
12715 set_optab_libfunc (sub_optab
, SFmode
, "__mips16_subsf3");
12716 set_optab_libfunc (smul_optab
, SFmode
, "__mips16_mulsf3");
12717 set_optab_libfunc (sdiv_optab
, SFmode
, "__mips16_divsf3");
12719 set_optab_libfunc (eq_optab
, SFmode
, "__mips16_eqsf2");
12720 set_optab_libfunc (ne_optab
, SFmode
, "__mips16_nesf2");
12721 set_optab_libfunc (gt_optab
, SFmode
, "__mips16_gtsf2");
12722 set_optab_libfunc (ge_optab
, SFmode
, "__mips16_gesf2");
12723 set_optab_libfunc (lt_optab
, SFmode
, "__mips16_ltsf2");
12724 set_optab_libfunc (le_optab
, SFmode
, "__mips16_lesf2");
12725 set_optab_libfunc (unord_optab
, SFmode
, "__mips16_unordsf2");
12727 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__mips16_fix_truncsfsi");
12728 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__mips16_floatsisf");
12729 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__mips16_floatunsisf");
12731 if (TARGET_DOUBLE_FLOAT
)
12733 set_optab_libfunc (add_optab
, DFmode
, "__mips16_adddf3");
12734 set_optab_libfunc (sub_optab
, DFmode
, "__mips16_subdf3");
12735 set_optab_libfunc (smul_optab
, DFmode
, "__mips16_muldf3");
12736 set_optab_libfunc (sdiv_optab
, DFmode
, "__mips16_divdf3");
12738 set_optab_libfunc (eq_optab
, DFmode
, "__mips16_eqdf2");
12739 set_optab_libfunc (ne_optab
, DFmode
, "__mips16_nedf2");
12740 set_optab_libfunc (gt_optab
, DFmode
, "__mips16_gtdf2");
12741 set_optab_libfunc (ge_optab
, DFmode
, "__mips16_gedf2");
12742 set_optab_libfunc (lt_optab
, DFmode
, "__mips16_ltdf2");
12743 set_optab_libfunc (le_optab
, DFmode
, "__mips16_ledf2");
12744 set_optab_libfunc (unord_optab
, DFmode
, "__mips16_unorddf2");
12746 set_conv_libfunc (sext_optab
, DFmode
, SFmode
,
12747 "__mips16_extendsfdf2");
12748 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
,
12749 "__mips16_truncdfsf2");
12750 set_conv_libfunc (sfix_optab
, SImode
, DFmode
,
12751 "__mips16_fix_truncdfsi");
12752 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
,
12753 "__mips16_floatsidf");
12754 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
,
12755 "__mips16_floatunsidf");
12759 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
12760 on an external non-MIPS16 routine to implement __sync_synchronize.
12761 Similarly for the rest of the ll/sc libfuncs. */
12764 synchronize_libfunc
= init_one_libfunc ("__sync_synchronize");
12765 init_sync_libfuncs (UNITS_PER_WORD
);
12769 /* Build up a multi-insn sequence that loads label TARGET into $AT. */
12772 mips_process_load_label (rtx target
)
12774 rtx base
, gp
, intop
;
12775 HOST_WIDE_INT offset
;
12777 mips_multi_start ();
12781 mips_multi_add_insn ("lw\t%@,%%got_page(%0)(%+)", target
, 0);
12782 mips_multi_add_insn ("addiu\t%@,%@,%%got_ofst(%0)", target
, 0);
12786 mips_multi_add_insn ("ld\t%@,%%got_page(%0)(%+)", target
, 0);
12787 mips_multi_add_insn ("daddiu\t%@,%@,%%got_ofst(%0)", target
, 0);
12791 gp
= pic_offset_table_rtx
;
12792 if (mips_cfun_has_cprestore_slot_p ())
12794 gp
= gen_rtx_REG (Pmode
, AT_REGNUM
);
12795 mips_get_cprestore_base_and_offset (&base
, &offset
, true);
12796 if (!SMALL_OPERAND (offset
))
12798 intop
= GEN_INT (CONST_HIGH_PART (offset
));
12799 mips_multi_add_insn ("lui\t%0,%1", gp
, intop
, 0);
12800 mips_multi_add_insn ("addu\t%0,%0,%1", gp
, base
, 0);
12803 offset
= CONST_LOW_PART (offset
);
12805 intop
= GEN_INT (offset
);
12806 if (ISA_HAS_LOAD_DELAY
)
12807 mips_multi_add_insn ("lw\t%0,%1(%2)%#", gp
, intop
, base
, 0);
12809 mips_multi_add_insn ("lw\t%0,%1(%2)", gp
, intop
, base
, 0);
12811 if (ISA_HAS_LOAD_DELAY
)
12812 mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)%#", target
, gp
, 0);
12814 mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)", target
, gp
, 0);
12815 mips_multi_add_insn ("addiu\t%@,%@,%%lo(%0)", target
, 0);
12820 /* Return the number of instructions needed to load a label into $AT. */
12822 static unsigned int
12823 mips_load_label_num_insns (void)
12825 if (cfun
->machine
->load_label_num_insns
== 0)
12827 mips_process_load_label (pc_rtx
);
12828 cfun
->machine
->load_label_num_insns
= mips_multi_num_insns
;
12830 return cfun
->machine
->load_label_num_insns
;
12833 /* Emit an asm sequence to start a noat block and load the address
12834 of a label into $1. */
12837 mips_output_load_label (rtx target
)
12839 mips_push_asm_switch (&mips_noat
);
12840 if (TARGET_EXPLICIT_RELOCS
)
12842 mips_process_load_label (target
);
12843 mips_multi_write ();
12847 if (Pmode
== DImode
)
12848 output_asm_insn ("dla\t%@,%0", &target
);
12850 output_asm_insn ("la\t%@,%0", &target
);
12854 /* Return the length of INSN. LENGTH is the initial length computed by
12855 attributes in the machine-description file. */
12858 mips_adjust_insn_length (rtx_insn
*insn
, int length
)
12860 /* mips.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length
12861 of a PIC long-branch sequence. Substitute the correct value. */
12862 if (length
== MAX_PIC_BRANCH_LENGTH
12864 && INSN_CODE (insn
) >= 0
12865 && get_attr_type (insn
) == TYPE_BRANCH
)
12867 /* Add the branch-over instruction and its delay slot, if this
12868 is a conditional branch. */
12869 length
= simplejump_p (insn
) ? 0 : 8;
12871 /* Add the size of a load into $AT. */
12872 length
+= BASE_INSN_LENGTH
* mips_load_label_num_insns ();
12874 /* Add the length of an indirect jump, ignoring the delay slot. */
12875 length
+= TARGET_COMPRESSION
? 2 : 4;
12878 /* A unconditional jump has an unfilled delay slot if it is not part
12879 of a sequence. A conditional jump normally has a delay slot, but
12880 does not on MIPS16. */
12881 if (CALL_P (insn
) || (TARGET_MIPS16
? simplejump_p (insn
) : JUMP_P (insn
)))
12882 length
+= TARGET_MIPS16
? 2 : 4;
12884 /* See how many nops might be needed to avoid hardware hazards. */
12885 if (!cfun
->machine
->ignore_hazard_length_p
12887 && INSN_CODE (insn
) >= 0)
12888 switch (get_attr_hazard (insn
))
12894 case HAZARD_FORBIDDEN_SLOT
:
12895 length
+= NOP_INSN_LENGTH
;
12899 length
+= NOP_INSN_LENGTH
* 2;
12906 /* Return the asm template for a call. OPERANDS are the operands, TARGET_OPNO
12907 is the operand number of the target. SIZE_OPNO is the operand number of
12908 the argument size operand that can optionally hold the call attributes. If
12909 SIZE_OPNO is not -1 and the call is indirect, use the function symbol from
12910 the call attributes to attach a R_MIPS_JALR relocation to the call. LINK_P
12911 indicates whether the jump is a call and needs to set the link register.
12913 When generating GOT code without explicit relocation operators, all calls
12914 should use assembly macros. Otherwise, all indirect calls should use "jr"
12915 or "jalr"; we will arrange to restore $gp afterwards if necessary. Finally,
12916 we can only generate direct calls for -mabicalls by temporarily switching
12919 For microMIPS jal(r), we try to generate jal(r)s when a 16-bit
12920 instruction is in the delay slot of jal(r).
12922 Where compact branches are available, we try to use them if the delay slot
12923 has a NOP (or equivalently delay slots were not enabled for the instruction
12927 mips_output_jump (rtx
*operands
, int target_opno
, int size_opno
, bool link_p
)
12929 static char buffer
[300];
12931 bool reg_p
= REG_P (operands
[target_opno
]);
12933 const char *and_link
= link_p
? "al" : "";
12934 const char *reg
= reg_p
? "r" : "";
12935 const char *compact
= "";
12936 const char *nop
= "%/";
12937 const char *short_delay
= link_p
? "%!" : "";
12938 const char *insn_name
= TARGET_CB_NEVER
|| reg_p
? "j" : "b";
12940 /* Compact branches can only be described when the ISA has support for them
12941 as both the compact formatter '%:' and the delay slot NOP formatter '%/'
12942 work as a mutually exclusive pair. I.e. a NOP is never required if a
12943 compact form is available. */
12944 if (!final_sequence
12945 && (TARGET_CB_MAYBE
12946 || (ISA_HAS_JRC
&& !link_p
&& reg_p
)))
12952 if (TARGET_USE_GOT
&& !TARGET_EXPLICIT_RELOCS
)
12953 sprintf (s
, "%%*%s%s\t%%%d%%/", insn_name
, and_link
, target_opno
);
12956 if (!reg_p
&& TARGET_ABICALLS_PIC2
)
12957 s
+= sprintf (s
, ".option\tpic0\n\t");
12959 if (reg_p
&& mips_get_pic_call_symbol (operands
, size_opno
))
12961 s
+= sprintf (s
, "%%*.reloc\t1f,R_MIPS_JALR,%%%d\n1:\t", size_opno
);
12962 /* Not sure why this shouldn't permit a short delay but it did not
12963 allow it before so we still don't allow it. */
12967 s
+= sprintf (s
, "%%*");
12969 s
+= sprintf (s
, "%s%s%s%s%s\t%%%d%s", insn_name
, and_link
, reg
, compact
, short_delay
,
12972 if (!reg_p
&& TARGET_ABICALLS_PIC2
)
12973 s
+= sprintf (s
, "\n\t.option\tpic2");
12978 /* Return the assembly code for INSN, which has the operands given by
12979 OPERANDS, and which branches to OPERANDS[0] if some condition is true.
12980 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
12981 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
12982 version of BRANCH_IF_TRUE. */
12985 mips_output_conditional_branch (rtx_insn
*insn
, rtx
*operands
,
12986 const char *branch_if_true
,
12987 const char *branch_if_false
)
12989 unsigned int length
;
12992 gcc_assert (LABEL_P (operands
[0]));
12994 length
= get_attr_length (insn
);
12997 /* Just a simple conditional branch. */
12998 mips_branch_likely
= (final_sequence
&& INSN_ANNULLED_BRANCH_P (insn
));
12999 return branch_if_true
;
13002 /* Generate a reversed branch around a direct jump. This fallback does
13003 not use branch-likely instructions. */
13004 mips_branch_likely
= false;
13005 rtx_code_label
*not_taken
= gen_label_rtx ();
13006 taken
= operands
[0];
13008 /* Generate the reversed branch to NOT_TAKEN. */
13009 operands
[0] = not_taken
;
13010 output_asm_insn (branch_if_false
, operands
);
13012 /* If INSN has a delay slot, we must provide delay slots for both the
13013 branch to NOT_TAKEN and the conditional jump. We must also ensure
13014 that INSN's delay slot is executed in the appropriate cases. */
13015 if (final_sequence
)
13017 /* This first delay slot will always be executed, so use INSN's
13018 delay slot if is not annulled. */
13019 if (!INSN_ANNULLED_BRANCH_P (insn
))
13021 final_scan_insn (final_sequence
->insn (1),
13022 asm_out_file
, optimize
, 1, NULL
);
13023 final_sequence
->insn (1)->set_deleted ();
13026 output_asm_insn ("nop", 0);
13027 fprintf (asm_out_file
, "\n");
13030 /* Output the unconditional branch to TAKEN. */
13031 if (TARGET_ABSOLUTE_JUMPS
&& TARGET_CB_MAYBE
)
13033 /* Add a hazard nop. */
13034 if (!final_sequence
)
13036 output_asm_insn ("nop\t\t# hazard nop", 0);
13037 fprintf (asm_out_file
, "\n");
13039 output_asm_insn (MIPS_ABSOLUTE_JUMP ("bc\t%0"), &taken
);
13041 else if (TARGET_ABSOLUTE_JUMPS
)
13042 output_asm_insn (MIPS_ABSOLUTE_JUMP ("j\t%0%/"), &taken
);
13045 mips_output_load_label (taken
);
13046 if (TARGET_CB_MAYBE
)
13047 output_asm_insn ("jrc\t%@%]", 0);
13049 output_asm_insn ("jr\t%@%]%/", 0);
13052 /* Now deal with its delay slot; see above. */
13053 if (final_sequence
)
13055 /* This delay slot will only be executed if the branch is taken.
13056 Use INSN's delay slot if is annulled. */
13057 if (INSN_ANNULLED_BRANCH_P (insn
))
13059 final_scan_insn (final_sequence
->insn (1),
13060 asm_out_file
, optimize
, 1, NULL
);
13061 final_sequence
->insn (1)->set_deleted ();
13063 else if (TARGET_CB_NEVER
)
13064 output_asm_insn ("nop", 0);
13065 fprintf (asm_out_file
, "\n");
13068 /* Output NOT_TAKEN. */
13069 targetm
.asm_out
.internal_label (asm_out_file
, "L",
13070 CODE_LABEL_NUMBER (not_taken
));
13074 /* Return the assembly code for INSN, which branches to OPERANDS[0]
13075 if some equality condition is true. The condition is given by
13076 OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
13077 OPERANDS[1]. OPERANDS[2] is the comparison's first operand;
13078 OPERANDS[3] is the second operand and may be zero or a register. */
13081 mips_output_equal_conditional_branch (rtx_insn
* insn
, rtx
*operands
,
13084 const char *branch
[2];
13085 /* For a simple BNEZ or BEQZ microMIPSr3 branch. */
13086 if (TARGET_MICROMIPS
13087 && mips_isa_rev
<= 5
13088 && operands
[3] == const0_rtx
13089 && get_attr_length (insn
) <= 8)
13091 if (mips_cb
== MIPS_CB_OPTIMAL
)
13093 branch
[!inverted_p
] = "%*b%C1z%:\t%2,%0";
13094 branch
[inverted_p
] = "%*b%N1z%:\t%2,%0";
13098 branch
[!inverted_p
] = "%*b%C1z\t%2,%0%/";
13099 branch
[inverted_p
] = "%*b%N1z\t%2,%0%/";
13102 else if (TARGET_CB_MAYBE
)
13104 if (operands
[3] == const0_rtx
)
13106 branch
[!inverted_p
] = MIPS_BRANCH_C ("b%C1z", "%2,%0");
13107 branch
[inverted_p
] = MIPS_BRANCH_C ("b%N1z", "%2,%0");
13109 else if (REGNO (operands
[2]) != REGNO (operands
[3]))
13111 branch
[!inverted_p
] = MIPS_BRANCH_C ("b%C1", "%2,%3,%0");
13112 branch
[inverted_p
] = MIPS_BRANCH_C ("b%N1", "%2,%3,%0");
13116 /* This case is degenerate. It should not happen, but does. */
13117 if (GET_CODE (operands
[1]) == NE
)
13118 inverted_p
= !inverted_p
;
13120 branch
[!inverted_p
] = MIPS_BRANCH_C ("b", "%0");
13121 branch
[inverted_p
] = "%*\t\t# branch never";
13126 branch
[!inverted_p
] = MIPS_BRANCH ("b%C1", "%2,%z3,%0");
13127 branch
[inverted_p
] = MIPS_BRANCH ("b%N1", "%2,%z3,%0");
13130 return mips_output_conditional_branch (insn
, operands
, branch
[1], branch
[0]);
13133 /* Return the assembly code for INSN, which branches to OPERANDS[0]
13134 if some ordering condition is true. The condition is given by
13135 OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
13136 OPERANDS[1]. OPERANDS[2] is the comparison's first operand;
13137 OPERANDS[3] is the second operand and may be zero or a register. */
13140 mips_output_order_conditional_branch (rtx_insn
*insn
, rtx
*operands
,
13143 const char *branch
[2];
13145 /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true.
13146 Make BRANCH[0] branch on the inverse condition. */
13147 if (operands
[3] != const0_rtx
)
13149 /* Handle degenerate cases that should not, but do, occur. */
13150 if (REGNO (operands
[2]) == REGNO (operands
[3]))
13152 switch (GET_CODE (operands
[1]))
13156 inverted_p
= !inverted_p
;
13157 /* Fall through. */
13160 branch
[!inverted_p
] = MIPS_BRANCH_C ("b", "%0");
13161 branch
[inverted_p
] = "%*\t\t# branch never";
13164 gcc_unreachable ();
13169 branch
[!inverted_p
] = MIPS_BRANCH_C ("b%C1", "%2,%3,%0");
13170 branch
[inverted_p
] = MIPS_BRANCH_C ("b%N1", "%2,%3,%0");
13175 switch (GET_CODE (operands
[1]))
13177 /* These cases are equivalent to comparisons against zero. */
13179 inverted_p
= !inverted_p
;
13180 /* Fall through. */
13182 if (TARGET_CB_MAYBE
)
13184 branch
[!inverted_p
] = MIPS_BRANCH_C ("bnez", "%2,%0");
13185 branch
[inverted_p
] = MIPS_BRANCH_C ("beqz", "%2,%0");
13189 branch
[!inverted_p
] = MIPS_BRANCH ("bne", "%2,%.,%0");
13190 branch
[inverted_p
] = MIPS_BRANCH ("beq", "%2,%.,%0");
13194 /* These cases are always true or always false. */
13196 inverted_p
= !inverted_p
;
13197 /* Fall through. */
13199 if (TARGET_CB_MAYBE
)
13201 branch
[!inverted_p
] = MIPS_BRANCH_C ("b", "%0");
13202 branch
[inverted_p
] = "%*\t\t# branch never";
13206 branch
[!inverted_p
] = MIPS_BRANCH ("beq", "%.,%.,%0");
13207 branch
[inverted_p
] = MIPS_BRANCH ("bne", "%.,%.,%0");
13212 if (TARGET_CB_MAYBE
)
13214 branch
[!inverted_p
] = MIPS_BRANCH_C ("b%C1z", "%2,%0");
13215 branch
[inverted_p
] = MIPS_BRANCH_C ("b%N1z", "%2,%0");
13219 branch
[!inverted_p
] = MIPS_BRANCH ("b%C1z", "%2,%0");
13220 branch
[inverted_p
] = MIPS_BRANCH ("b%N1z", "%2,%0");
13225 return mips_output_conditional_branch (insn
, operands
, branch
[1], branch
[0]);
13228 /* Start a block of code that needs access to the LL, SC and SYNC
13232 mips_start_ll_sc_sync_block (void)
13234 if (!ISA_HAS_LL_SC
)
13236 output_asm_insn (".set\tpush", 0);
13238 output_asm_insn (".set\tmips3", 0);
13240 output_asm_insn (".set\tmips2", 0);
13244 /* End a block started by mips_start_ll_sc_sync_block. */
13247 mips_end_ll_sc_sync_block (void)
13249 if (!ISA_HAS_LL_SC
)
13250 output_asm_insn (".set\tpop", 0);
13253 /* Output and/or return the asm template for a sync instruction. */
13256 mips_output_sync (void)
13258 mips_start_ll_sc_sync_block ();
13259 output_asm_insn ("sync", 0);
13260 mips_end_ll_sc_sync_block ();
13264 /* Return the asm template associated with sync_insn1 value TYPE.
13265 IS_64BIT_P is true if we want a 64-bit rather than 32-bit operation. */
13267 static const char *
13268 mips_sync_insn1_template (enum attr_sync_insn1 type
, bool is_64bit_p
)
13272 case SYNC_INSN1_MOVE
:
13273 return "move\t%0,%z2";
13274 case SYNC_INSN1_LI
:
13275 return "li\t%0,%2";
13276 case SYNC_INSN1_ADDU
:
13277 return is_64bit_p
? "daddu\t%0,%1,%z2" : "addu\t%0,%1,%z2";
13278 case SYNC_INSN1_ADDIU
:
13279 return is_64bit_p
? "daddiu\t%0,%1,%2" : "addiu\t%0,%1,%2";
13280 case SYNC_INSN1_SUBU
:
13281 return is_64bit_p
? "dsubu\t%0,%1,%z2" : "subu\t%0,%1,%z2";
13282 case SYNC_INSN1_AND
:
13283 return "and\t%0,%1,%z2";
13284 case SYNC_INSN1_ANDI
:
13285 return "andi\t%0,%1,%2";
13286 case SYNC_INSN1_OR
:
13287 return "or\t%0,%1,%z2";
13288 case SYNC_INSN1_ORI
:
13289 return "ori\t%0,%1,%2";
13290 case SYNC_INSN1_XOR
:
13291 return "xor\t%0,%1,%z2";
13292 case SYNC_INSN1_XORI
:
13293 return "xori\t%0,%1,%2";
13295 gcc_unreachable ();
13298 /* Return the asm template associated with sync_insn2 value TYPE. */
13300 static const char *
13301 mips_sync_insn2_template (enum attr_sync_insn2 type
)
13305 case SYNC_INSN2_NOP
:
13306 gcc_unreachable ();
13307 case SYNC_INSN2_AND
:
13308 return "and\t%0,%1,%z2";
13309 case SYNC_INSN2_XOR
:
13310 return "xor\t%0,%1,%z2";
13311 case SYNC_INSN2_NOT
:
13312 return "nor\t%0,%1,%.";
13314 gcc_unreachable ();
13317 /* OPERANDS are the operands to a sync loop instruction and INDEX is
13318 the value of the one of the sync_* attributes. Return the operand
13319 referred to by the attribute, or DEFAULT_VALUE if the insn doesn't
13320 have the associated attribute. */
13323 mips_get_sync_operand (rtx
*operands
, int index
, rtx default_value
)
13326 default_value
= operands
[index
- 1];
13327 return default_value
;
13330 /* INSN is a sync loop with operands OPERANDS. Build up a multi-insn
13331 sequence for it. */
13334 mips_process_sync_loop (rtx_insn
*insn
, rtx
*operands
)
13336 rtx at
, mem
, oldval
, newval
, inclusive_mask
, exclusive_mask
;
13337 rtx required_oldval
, insn1_op2
, tmp1
, tmp2
, tmp3
, cmp
;
13338 unsigned int tmp3_insn
;
13339 enum attr_sync_insn1 insn1
;
13340 enum attr_sync_insn2 insn2
;
13343 enum memmodel model
;
13345 /* Read an operand from the sync_WHAT attribute and store it in
13346 variable WHAT. DEFAULT is the default value if no attribute
13348 #define READ_OPERAND(WHAT, DEFAULT) \
13349 WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
13352 /* Read the memory. */
13353 READ_OPERAND (mem
, 0);
13355 is_64bit_p
= (GET_MODE_BITSIZE (GET_MODE (mem
)) == 64);
13357 /* Read the other attributes. */
13358 at
= gen_rtx_REG (GET_MODE (mem
), AT_REGNUM
);
13359 READ_OPERAND (oldval
, at
);
13360 READ_OPERAND (cmp
, 0);
13361 READ_OPERAND (newval
, at
);
13362 READ_OPERAND (inclusive_mask
, 0);
13363 READ_OPERAND (exclusive_mask
, 0);
13364 READ_OPERAND (required_oldval
, 0);
13365 READ_OPERAND (insn1_op2
, 0);
13366 insn1
= get_attr_sync_insn1 (insn
);
13367 insn2
= get_attr_sync_insn2 (insn
);
13369 /* Don't bother setting CMP result that is never used. */
13370 if (cmp
&& find_reg_note (insn
, REG_UNUSED
, cmp
))
13373 memmodel_attr
= get_attr_sync_memmodel (insn
);
13374 switch (memmodel_attr
)
13377 model
= MEMMODEL_ACQ_REL
;
13380 model
= MEMMODEL_ACQUIRE
;
13383 model
= memmodel_from_int (INTVAL (operands
[memmodel_attr
]));
13386 mips_multi_start ();
13388 /* Output the release side of the memory barrier. */
13389 if (need_atomic_barrier_p (model
, true))
13391 if (required_oldval
== 0 && TARGET_OCTEON
)
13393 /* Octeon doesn't reorder reads, so a full barrier can be
13394 created by using SYNCW to order writes combined with the
13395 write from the following SC. When the SC successfully
13396 completes, we know that all preceding writes are also
13397 committed to the coherent memory system. It is possible
13398 for a single SYNCW to fail, but a pair of them will never
13399 fail, so we use two. */
13400 mips_multi_add_insn ("syncw", NULL
);
13401 mips_multi_add_insn ("syncw", NULL
);
13404 mips_multi_add_insn ("sync", NULL
);
13407 /* Output the branch-back label. */
13408 mips_multi_add_label ("1:");
13410 /* OLDVAL = *MEM. */
13411 mips_multi_add_insn (is_64bit_p
? "lld\t%0,%1" : "ll\t%0,%1",
13412 oldval
, mem
, NULL
);
13414 /* if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2. */
13415 if (required_oldval
)
13417 if (inclusive_mask
== 0)
13421 gcc_assert (oldval
!= at
);
13422 mips_multi_add_insn ("and\t%0,%1,%2",
13423 at
, oldval
, inclusive_mask
, NULL
);
13426 if (TARGET_CB_NEVER
)
13427 mips_multi_add_insn ("bne\t%0,%z1,2f", tmp1
, required_oldval
, NULL
);
13429 /* CMP = 0 [delay slot]. */
13431 mips_multi_add_insn ("li\t%0,0", cmp
, NULL
);
13433 if (TARGET_CB_MAYBE
&& required_oldval
== const0_rtx
)
13434 mips_multi_add_insn ("bnezc\t%0,2f", tmp1
, NULL
);
13435 else if (TARGET_CB_MAYBE
)
13436 mips_multi_add_insn ("bnec\t%0,%1,2f", tmp1
, required_oldval
, NULL
);
13440 /* $TMP1 = OLDVAL & EXCLUSIVE_MASK. */
13441 if (exclusive_mask
== 0)
13445 gcc_assert (oldval
!= at
);
13446 mips_multi_add_insn ("and\t%0,%1,%z2",
13447 at
, oldval
, exclusive_mask
, NULL
);
13451 /* $TMP2 = INSN1 (OLDVAL, INSN1_OP2).
13453 We can ignore moves if $TMP4 != INSN1_OP2, since we'll still emit
13454 at least one instruction in that case. */
13455 if (insn1
== SYNC_INSN1_MOVE
13456 && (tmp1
!= const0_rtx
|| insn2
!= SYNC_INSN2_NOP
))
13460 mips_multi_add_insn (mips_sync_insn1_template (insn1
, is_64bit_p
),
13461 newval
, oldval
, insn1_op2
, NULL
);
13465 /* $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK). */
13466 if (insn2
== SYNC_INSN2_NOP
)
13470 mips_multi_add_insn (mips_sync_insn2_template (insn2
),
13471 newval
, tmp2
, inclusive_mask
, NULL
);
13474 tmp3_insn
= mips_multi_last_index ();
13476 /* $AT = $TMP1 | $TMP3. */
13477 if (tmp1
== const0_rtx
|| tmp3
== const0_rtx
)
13479 mips_multi_set_operand (tmp3_insn
, 0, at
);
13484 gcc_assert (tmp1
!= tmp3
);
13485 mips_multi_add_insn ("or\t%0,%1,%2", at
, tmp1
, tmp3
, NULL
);
13488 /* if (!commit (*MEM = $AT)) goto 1.
13490 This will sometimes be a delayed branch; see the write code below
13492 mips_multi_add_insn (is_64bit_p
? "scd\t%0,%1" : "sc\t%0,%1", at
, mem
, NULL
);
13494 /* When using branch likely (-mfix-r10000), the delay slot instruction
13495 will be annulled on false. The normal delay slot instructions
13496 calculate the overall result of the atomic operation and must not
13497 be annulled. To ensure this behaviour unconditionally use a NOP
13498 in the delay slot for the branch likely case. */
13500 if (TARGET_CB_MAYBE
)
13501 mips_multi_add_insn ("beqzc\t%0,1b", at
, NULL
);
13503 mips_multi_add_insn ("beq%?\t%0,%.,1b%~", at
, NULL
);
13505 /* if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot]. */
13506 if (insn1
!= SYNC_INSN1_MOVE
&& insn1
!= SYNC_INSN1_LI
&& tmp3
!= newval
)
13508 mips_multi_copy_insn (tmp3_insn
);
13509 mips_multi_set_operand (mips_multi_last_index (), 0, newval
);
13511 else if (!(required_oldval
&& cmp
) && !mips_branch_likely
)
13512 mips_multi_add_insn ("nop", NULL
);
13514 /* CMP = 1 -- either standalone or in a delay slot. */
13515 if (required_oldval
&& cmp
)
13516 mips_multi_add_insn ("li\t%0,1", cmp
, NULL
);
13518 /* Output the acquire side of the memory barrier. */
13519 if (TARGET_SYNC_AFTER_SC
&& need_atomic_barrier_p (model
, false))
13520 mips_multi_add_insn ("sync", NULL
);
13522 /* Output the exit label, if needed. */
13523 if (required_oldval
)
13524 mips_multi_add_label ("2:");
13526 #undef READ_OPERAND
13529 /* Output and/or return the asm template for sync loop INSN, which has
13530 the operands given by OPERANDS. */
13533 mips_output_sync_loop (rtx_insn
*insn
, rtx
*operands
)
13535 /* Use branch-likely instructions to work around the LL/SC R10000
13537 mips_branch_likely
= TARGET_FIX_R10000
;
13539 mips_process_sync_loop (insn
, operands
);
13541 mips_push_asm_switch (&mips_noreorder
);
13542 mips_push_asm_switch (&mips_nomacro
);
13543 mips_push_asm_switch (&mips_noat
);
13544 mips_start_ll_sc_sync_block ();
13546 mips_multi_write ();
13548 mips_end_ll_sc_sync_block ();
13549 mips_pop_asm_switch (&mips_noat
);
13550 mips_pop_asm_switch (&mips_nomacro
);
13551 mips_pop_asm_switch (&mips_noreorder
);
13556 /* Return the number of individual instructions in sync loop INSN,
13557 which has the operands given by OPERANDS. */
13560 mips_sync_loop_insns (rtx_insn
*insn
, rtx
*operands
)
13562 /* Use branch-likely instructions to work around the LL/SC R10000
13564 mips_branch_likely
= TARGET_FIX_R10000
;
13565 mips_process_sync_loop (insn
, operands
);
13566 return mips_multi_num_insns
;
13569 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
13570 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
13572 When working around R4000 and R4400 errata, we need to make sure that
13573 the division is not immediately followed by a shift[1][2]. We also
13574 need to stop the division from being put into a branch delay slot[3].
13575 The easiest way to avoid both problems is to add a nop after the
13576 division. When a divide-by-zero check is needed, this nop can be
13577 used to fill the branch delay slot.
13579 [1] If a double-word or a variable shift executes immediately
13580 after starting an integer division, the shift may give an
13581 incorrect result. See quotations of errata #16 and #28 from
13582 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
13583 in mips.md for details.
13585 [2] A similar bug to [1] exists for all revisions of the
13586 R4000 and the R4400 when run in an MC configuration.
13587 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
13589 "19. In this following sequence:
13591 ddiv (or ddivu or div or divu)
13592 dsll32 (or dsrl32, dsra32)
13594 if an MPT stall occurs, while the divide is slipping the cpu
13595 pipeline, then the following double shift would end up with an
13598 Workaround: The compiler needs to avoid generating any
13599 sequence with divide followed by extended double shift."
13601 This erratum is also present in "MIPS R4400MC Errata, Processor
13602 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
13603 & 3.0" as errata #10 and #4, respectively.
13605 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
13606 (also valid for MIPS R4000MC processors):
13608 "52. R4000SC: This bug does not apply for the R4000PC.
13610 There are two flavors of this bug:
13612 1) If the instruction just after divide takes an RF exception
13613 (tlb-refill, tlb-invalid) and gets an instruction cache
13614 miss (both primary and secondary) and the line which is
13615 currently in secondary cache at this index had the first
13616 data word, where the bits 5..2 are set, then R4000 would
13617 get a wrong result for the div.
13622 ------------------- # end-of page. -tlb-refill
13627 ------------------- # end-of page. -tlb-invalid
13630 2) If the divide is in the taken branch delay slot, where the
13631 target takes RF exception and gets an I-cache miss for the
13632 exception vector or where I-cache miss occurs for the
13633 target address, under the above mentioned scenarios, the
13634 div would get wrong results.
13637 j r2 # to next page mapped or unmapped
13638 div r8,r9 # this bug would be there as long
13639 # as there is an ICache miss and
13640 nop # the "data pattern" is present
13643 beq r0, r0, NextPage # to Next page
13647 This bug is present for div, divu, ddiv, and ddivu
13650 Workaround: For item 1), OS could make sure that the next page
13651 after the divide instruction is also mapped. For item 2), the
13652 compiler could make sure that the divide instruction is not in
13653 the branch delay slot."
13655 These processors have PRId values of 0x00004220 and 0x00004300 for
13656 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
13659 mips_output_division (const char *division
, rtx
*operands
)
13664 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
13666 output_asm_insn (s
, operands
);
13669 if (TARGET_CHECK_ZERO_DIV
)
13673 output_asm_insn (s
, operands
);
13674 s
= "bnez\t%2,1f\n\tbreak\t7\n1:";
13676 else if (GENERATE_DIVIDE_TRAPS
)
13678 /* Avoid long replay penalty on load miss by putting the trap before
13681 output_asm_insn ("teq\t%2,%.,7", operands
);
13684 output_asm_insn (s
, operands
);
13685 s
= "teq\t%2,%.,7";
13690 output_asm_insn ("%(bne\t%2,%.,1f", operands
);
13691 output_asm_insn (s
, operands
);
13692 s
= "break\t7%)\n1:";
13698 /* Return true if destination of IN_INSN is used as add source in
13699 OUT_INSN. Both IN_INSN and OUT_INSN are of type fmadd. Example:
13700 madd.s dst, x, y, z
13701 madd.s a, dst, b, c */
13704 mips_fmadd_bypass (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
13706 int dst_reg
, src_reg
;
13708 gcc_assert (get_attr_type (in_insn
) == TYPE_FMADD
);
13709 gcc_assert (get_attr_type (out_insn
) == TYPE_FMADD
);
13711 extract_insn (in_insn
);
13712 dst_reg
= REG_P (recog_data
.operand
[0]);
13714 extract_insn (out_insn
);
13715 src_reg
= REG_P (recog_data
.operand
[1]);
13717 if (dst_reg
== src_reg
)
13723 /* Return true if IN_INSN is a multiply-add or multiply-subtract
13724 instruction and if OUT_INSN assigns to the accumulator operand. */
13727 mips_linked_madd_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
13729 enum attr_accum_in accum_in
;
13730 int accum_in_opnum
;
13733 if (recog_memoized (in_insn
) < 0)
13736 accum_in
= get_attr_accum_in (in_insn
);
13737 if (accum_in
== ACCUM_IN_NONE
)
13740 accum_in_opnum
= accum_in
- ACCUM_IN_0
;
13742 extract_insn (in_insn
);
13743 gcc_assert (accum_in_opnum
< recog_data
.n_operands
);
13744 accum_in_op
= recog_data
.operand
[accum_in_opnum
];
13746 return reg_set_p (accum_in_op
, out_insn
);
13749 /* True if the dependency between OUT_INSN and IN_INSN is on the store
13750 data rather than the address. We need this because the cprestore
13751 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
13752 which causes the default routine to abort. We just return false
13756 mips_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
13758 if (GET_CODE (PATTERN (in_insn
)) == UNSPEC_VOLATILE
)
13761 return store_data_bypass_p (out_insn
, in_insn
);
13765 /* Variables and flags used in scheduler hooks when tuning for
13769 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
13772 /* If true, then next ALU1/2 instruction will go to ALU1. */
13775 /* If true, then next FALU1/2 unstruction will go to FALU1. */
13778 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
13779 int alu1_core_unit_code
;
13780 int alu2_core_unit_code
;
13781 int falu1_core_unit_code
;
13782 int falu2_core_unit_code
;
13784 /* True if current cycle has a multi instruction.
13785 This flag is used in mips_ls2_dfa_post_advance_cycle. */
13786 bool cycle_has_multi_p
;
13788 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
13789 These are used in mips_ls2_dfa_post_advance_cycle to initialize
13791 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
13792 instruction to go ALU1. */
13793 rtx_insn
*alu1_turn_enabled_insn
;
13794 rtx_insn
*alu2_turn_enabled_insn
;
13795 rtx_insn
*falu1_turn_enabled_insn
;
13796 rtx_insn
*falu2_turn_enabled_insn
;
13799 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
13800 dependencies have no cost, except on the 20Kc where output-dependence
13801 is treated like input-dependence. */
13804 mips_adjust_cost (rtx_insn
*insn ATTRIBUTE_UNUSED
, rtx link
,
13805 rtx_insn
*dep ATTRIBUTE_UNUSED
, int cost
)
13807 if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
13810 if (REG_NOTE_KIND (link
) != 0)
13815 /* Return the number of instructions that can be issued per cycle. */
13818 mips_issue_rate (void)
13822 case PROCESSOR_74KC
:
13823 case PROCESSOR_74KF2_1
:
13824 case PROCESSOR_74KF1_1
:
13825 case PROCESSOR_74KF3_2
:
13826 /* The 74k is not strictly quad-issue cpu, but can be seen as one
13827 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
13828 but in reality only a maximum of 3 insns can be issued as
13829 floating-point loads and stores also require a slot in the
13831 case PROCESSOR_R10000
:
13832 /* All R10K Processors are quad-issue (being the first MIPS
13833 processors to support this feature). */
13836 case PROCESSOR_20KC
:
13837 case PROCESSOR_R4130
:
13838 case PROCESSOR_R5400
:
13839 case PROCESSOR_R5500
:
13840 case PROCESSOR_R5900
:
13841 case PROCESSOR_R7000
:
13842 case PROCESSOR_R9000
:
13843 case PROCESSOR_OCTEON
:
13844 case PROCESSOR_OCTEON2
:
13845 case PROCESSOR_OCTEON3
:
13846 case PROCESSOR_I6400
:
13849 case PROCESSOR_SB1
:
13850 case PROCESSOR_SB1A
:
13851 /* This is actually 4, but we get better performance if we claim 3.
13852 This is partly because of unwanted speculative code motion with the
13853 larger number, and partly because in most common cases we can't
13854 reach the theoretical max of 4. */
13857 case PROCESSOR_LOONGSON_2E
:
13858 case PROCESSOR_LOONGSON_2F
:
13859 case PROCESSOR_LOONGSON_3A
:
13860 case PROCESSOR_P5600
:
13863 case PROCESSOR_XLP
:
13864 return (reload_completed
? 4 : 3);
13871 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
13874 mips_ls2_init_dfa_post_cycle_insn (void)
13877 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
13878 mips_ls2
.alu1_turn_enabled_insn
= get_insns ();
13882 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
13883 mips_ls2
.alu2_turn_enabled_insn
= get_insns ();
13887 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
13888 mips_ls2
.falu1_turn_enabled_insn
= get_insns ();
13892 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
13893 mips_ls2
.falu2_turn_enabled_insn
= get_insns ();
13896 mips_ls2
.alu1_core_unit_code
= get_cpu_unit_code ("ls2_alu1_core");
13897 mips_ls2
.alu2_core_unit_code
= get_cpu_unit_code ("ls2_alu2_core");
13898 mips_ls2
.falu1_core_unit_code
= get_cpu_unit_code ("ls2_falu1_core");
13899 mips_ls2
.falu2_core_unit_code
= get_cpu_unit_code ("ls2_falu2_core");
13902 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
13903 Init data used in mips_dfa_post_advance_cycle. */
13906 mips_init_dfa_post_cycle_insn (void)
13908 if (TUNE_LOONGSON_2EF
)
13909 mips_ls2_init_dfa_post_cycle_insn ();
13912 /* Initialize STATE when scheduling for Loongson 2E/2F.
13913 Support round-robin dispatch scheme by enabling only one of
13914 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
13918 mips_ls2_dfa_post_advance_cycle (state_t state
)
13920 if (cpu_unit_reservation_p (state
, mips_ls2
.alu1_core_unit_code
))
13922 /* Though there are no non-pipelined ALU1 insns,
13923 we can get an instruction of type 'multi' before reload. */
13924 gcc_assert (mips_ls2
.cycle_has_multi_p
);
13925 mips_ls2
.alu1_turn_p
= false;
13928 mips_ls2
.cycle_has_multi_p
= false;
13930 if (cpu_unit_reservation_p (state
, mips_ls2
.alu2_core_unit_code
))
13931 /* We have a non-pipelined alu instruction in the core,
13932 adjust round-robin counter. */
13933 mips_ls2
.alu1_turn_p
= true;
13935 if (mips_ls2
.alu1_turn_p
)
13937 if (state_transition (state
, mips_ls2
.alu1_turn_enabled_insn
) >= 0)
13938 gcc_unreachable ();
13942 if (state_transition (state
, mips_ls2
.alu2_turn_enabled_insn
) >= 0)
13943 gcc_unreachable ();
13946 if (cpu_unit_reservation_p (state
, mips_ls2
.falu1_core_unit_code
))
13948 /* There are no non-pipelined FALU1 insns. */
13949 gcc_unreachable ();
13950 mips_ls2
.falu1_turn_p
= false;
13953 if (cpu_unit_reservation_p (state
, mips_ls2
.falu2_core_unit_code
))
13954 /* We have a non-pipelined falu instruction in the core,
13955 adjust round-robin counter. */
13956 mips_ls2
.falu1_turn_p
= true;
13958 if (mips_ls2
.falu1_turn_p
)
13960 if (state_transition (state
, mips_ls2
.falu1_turn_enabled_insn
) >= 0)
13961 gcc_unreachable ();
13965 if (state_transition (state
, mips_ls2
.falu2_turn_enabled_insn
) >= 0)
13966 gcc_unreachable ();
13970 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
13971 This hook is being called at the start of each cycle. */
13974 mips_dfa_post_advance_cycle (void)
13976 if (TUNE_LOONGSON_2EF
)
13977 mips_ls2_dfa_post_advance_cycle (curr_state
);
13980 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
13981 be as wide as the scheduling freedom in the DFA. */
13984 mips_multipass_dfa_lookahead (void)
13986 /* Can schedule up to 4 of the 6 function units in any one cycle. */
13990 if (TUNE_LOONGSON_2EF
|| TUNE_LOONGSON_3A
)
13996 if (TUNE_P5600
|| TUNE_I6400
)
14002 /* Remove the instruction at index LOWER from ready queue READY and
14003 reinsert it in front of the instruction at index HIGHER. LOWER must
14007 mips_promote_ready (rtx_insn
**ready
, int lower
, int higher
)
14009 rtx_insn
*new_head
;
14012 new_head
= ready
[lower
];
14013 for (i
= lower
; i
< higher
; i
++)
14014 ready
[i
] = ready
[i
+ 1];
14015 ready
[i
] = new_head
;
14018 /* If the priority of the instruction at POS2 in the ready queue READY
14019 is within LIMIT units of that of the instruction at POS1, swap the
14020 instructions if POS2 is not already less than POS1. */
14023 mips_maybe_swap_ready (rtx_insn
**ready
, int pos1
, int pos2
, int limit
)
14026 && INSN_PRIORITY (ready
[pos1
]) + limit
>= INSN_PRIORITY (ready
[pos2
]))
14030 temp
= ready
[pos1
];
14031 ready
[pos1
] = ready
[pos2
];
14032 ready
[pos2
] = temp
;
14036 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
14037 that may clobber hi or lo. */
14038 static rtx_insn
*mips_macc_chains_last_hilo
;
14040 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
14041 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
14044 mips_macc_chains_record (rtx_insn
*insn
)
14046 if (get_attr_may_clobber_hilo (insn
))
14047 mips_macc_chains_last_hilo
= insn
;
14050 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
14051 has NREADY elements, looking for a multiply-add or multiply-subtract
14052 instruction that is cumulative with mips_macc_chains_last_hilo.
14053 If there is one, promote it ahead of anything else that might
14054 clobber hi or lo. */
14057 mips_macc_chains_reorder (rtx_insn
**ready
, int nready
)
14061 if (mips_macc_chains_last_hilo
!= 0)
14062 for (i
= nready
- 1; i
>= 0; i
--)
14063 if (mips_linked_madd_p (mips_macc_chains_last_hilo
, ready
[i
]))
14065 for (j
= nready
- 1; j
> i
; j
--)
14066 if (recog_memoized (ready
[j
]) >= 0
14067 && get_attr_may_clobber_hilo (ready
[j
]))
14069 mips_promote_ready (ready
, i
, j
);
14076 /* The last instruction to be scheduled. */
14077 static rtx_insn
*vr4130_last_insn
;
14079 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
14080 points to an rtx that is initially an instruction. Nullify the rtx
14081 if the instruction uses the value of register X. */
14084 vr4130_true_reg_dependence_p_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
,
14089 insn_ptr
= (rtx
*) data
;
14092 && reg_referenced_p (x
, PATTERN (*insn_ptr
)))
14096 /* Return true if there is true register dependence between vr4130_last_insn
14100 vr4130_true_reg_dependence_p (rtx insn
)
14102 note_stores (PATTERN (vr4130_last_insn
),
14103 vr4130_true_reg_dependence_p_1
, &insn
);
14107 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
14108 the ready queue and that INSN2 is the instruction after it, return
14109 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
14110 in which INSN1 and INSN2 can probably issue in parallel, but for
14111 which (INSN2, INSN1) should be less sensitive to instruction
14112 alignment than (INSN1, INSN2). See 4130.md for more details. */
14115 vr4130_swap_insns_p (rtx_insn
*insn1
, rtx_insn
*insn2
)
14117 sd_iterator_def sd_it
;
14120 /* Check for the following case:
14122 1) there is some other instruction X with an anti dependence on INSN1;
14123 2) X has a higher priority than INSN2; and
14124 3) X is an arithmetic instruction (and thus has no unit restrictions).
14126 If INSN1 is the last instruction blocking X, it would better to
14127 choose (INSN1, X) over (INSN2, INSN1). */
14128 FOR_EACH_DEP (insn1
, SD_LIST_FORW
, sd_it
, dep
)
14129 if (DEP_TYPE (dep
) == REG_DEP_ANTI
14130 && INSN_PRIORITY (DEP_CON (dep
)) > INSN_PRIORITY (insn2
)
14131 && recog_memoized (DEP_CON (dep
)) >= 0
14132 && get_attr_vr4130_class (DEP_CON (dep
)) == VR4130_CLASS_ALU
)
14135 if (vr4130_last_insn
!= 0
14136 && recog_memoized (insn1
) >= 0
14137 && recog_memoized (insn2
) >= 0)
14139 /* See whether INSN1 and INSN2 use different execution units,
14140 or if they are both ALU-type instructions. If so, they can
14141 probably execute in parallel. */
14142 enum attr_vr4130_class class1
= get_attr_vr4130_class (insn1
);
14143 enum attr_vr4130_class class2
= get_attr_vr4130_class (insn2
);
14144 if (class1
!= class2
|| class1
== VR4130_CLASS_ALU
)
14146 /* If only one of the instructions has a dependence on
14147 vr4130_last_insn, prefer to schedule the other one first. */
14148 bool dep1_p
= vr4130_true_reg_dependence_p (insn1
);
14149 bool dep2_p
= vr4130_true_reg_dependence_p (insn2
);
14150 if (dep1_p
!= dep2_p
)
14153 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
14154 is not an ALU-type instruction and if INSN1 uses the same
14155 execution unit. (Note that if this condition holds, we already
14156 know that INSN2 uses a different execution unit.) */
14157 if (class1
!= VR4130_CLASS_ALU
14158 && recog_memoized (vr4130_last_insn
) >= 0
14159 && class1
== get_attr_vr4130_class (vr4130_last_insn
))
14166 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
14167 queue with at least two instructions. Swap the first two if
14168 vr4130_swap_insns_p says that it could be worthwhile. */
14171 vr4130_reorder (rtx_insn
**ready
, int nready
)
14173 if (vr4130_swap_insns_p (ready
[nready
- 1], ready
[nready
- 2]))
14174 mips_promote_ready (ready
, nready
- 2, nready
- 1);
14177 /* Record whether last 74k AGEN instruction was a load or store. */
14178 static enum attr_type mips_last_74k_agen_insn
= TYPE_UNKNOWN
;
14180 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
14181 resets to TYPE_UNKNOWN state. */
14184 mips_74k_agen_init (rtx_insn
*insn
)
14186 if (!insn
|| CALL_P (insn
) || JUMP_P (insn
))
14187 mips_last_74k_agen_insn
= TYPE_UNKNOWN
;
14190 enum attr_type type
= get_attr_type (insn
);
14191 if (type
== TYPE_LOAD
|| type
== TYPE_STORE
)
14192 mips_last_74k_agen_insn
= type
;
14196 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
14197 loads to be grouped together, and multiple stores to be grouped
14198 together. Swap things around in the ready queue to make this happen. */
14201 mips_74k_agen_reorder (rtx_insn
**ready
, int nready
)
14204 int store_pos
, load_pos
;
14209 for (i
= nready
- 1; i
>= 0; i
--)
14211 rtx_insn
*insn
= ready
[i
];
14212 if (USEFUL_INSN_P (insn
))
14213 switch (get_attr_type (insn
))
14216 if (store_pos
== -1)
14221 if (load_pos
== -1)
14230 if (load_pos
== -1 || store_pos
== -1)
14233 switch (mips_last_74k_agen_insn
)
14236 /* Prefer to schedule loads since they have a higher latency. */
14238 /* Swap loads to the front of the queue. */
14239 mips_maybe_swap_ready (ready
, load_pos
, store_pos
, 4);
14242 /* Swap stores to the front of the queue. */
14243 mips_maybe_swap_ready (ready
, store_pos
, load_pos
, 4);
14250 /* Implement TARGET_SCHED_INIT. */
14253 mips_sched_init (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
14254 int max_ready ATTRIBUTE_UNUSED
)
14256 mips_macc_chains_last_hilo
= 0;
14257 vr4130_last_insn
= 0;
14258 mips_74k_agen_init (NULL
);
14260 /* When scheduling for Loongson2, branch instructions go to ALU1,
14261 therefore basic block is most likely to start with round-robin counter
14262 pointed to ALU2. */
14263 mips_ls2
.alu1_turn_p
= false;
14264 mips_ls2
.falu1_turn_p
= true;
14267 /* Subroutine used by TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
14270 mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
14271 rtx_insn
**ready
, int *nreadyp
, int cycle ATTRIBUTE_UNUSED
)
14273 if (!reload_completed
14274 && TUNE_MACC_CHAINS
14276 mips_macc_chains_reorder (ready
, *nreadyp
);
14278 if (reload_completed
14280 && !TARGET_VR4130_ALIGN
14282 vr4130_reorder (ready
, *nreadyp
);
14285 mips_74k_agen_reorder (ready
, *nreadyp
);
14288 /* Implement TARGET_SCHED_REORDER. */
14291 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
14292 rtx_insn
**ready
, int *nreadyp
, int cycle ATTRIBUTE_UNUSED
)
14294 mips_sched_reorder_1 (file
, verbose
, ready
, nreadyp
, cycle
);
14295 return mips_issue_rate ();
14298 /* Implement TARGET_SCHED_REORDER2. */
14301 mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
14302 rtx_insn
**ready
, int *nreadyp
, int cycle ATTRIBUTE_UNUSED
)
14304 mips_sched_reorder_1 (file
, verbose
, ready
, nreadyp
, cycle
);
14305 return cached_can_issue_more
;
14308 /* Update round-robin counters for ALU1/2 and FALU1/2. */
14311 mips_ls2_variable_issue (rtx_insn
*insn
)
14313 if (mips_ls2
.alu1_turn_p
)
14315 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.alu1_core_unit_code
))
14316 mips_ls2
.alu1_turn_p
= false;
14320 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.alu2_core_unit_code
))
14321 mips_ls2
.alu1_turn_p
= true;
14324 if (mips_ls2
.falu1_turn_p
)
14326 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.falu1_core_unit_code
))
14327 mips_ls2
.falu1_turn_p
= false;
14331 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.falu2_core_unit_code
))
14332 mips_ls2
.falu1_turn_p
= true;
14335 if (recog_memoized (insn
) >= 0)
14336 mips_ls2
.cycle_has_multi_p
|= (get_attr_type (insn
) == TYPE_MULTI
);
14339 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
14342 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
14343 rtx_insn
*insn
, int more
)
14345 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
14346 if (USEFUL_INSN_P (insn
))
14348 if (get_attr_type (insn
) != TYPE_GHOST
)
14350 if (!reload_completed
&& TUNE_MACC_CHAINS
)
14351 mips_macc_chains_record (insn
);
14352 vr4130_last_insn
= insn
;
14354 mips_74k_agen_init (insn
);
14355 else if (TUNE_LOONGSON_2EF
)
14356 mips_ls2_variable_issue (insn
);
14359 /* Instructions of type 'multi' should all be split before
14360 the second scheduling pass. */
14361 gcc_assert (!reload_completed
14362 || recog_memoized (insn
) < 0
14363 || get_attr_type (insn
) != TYPE_MULTI
);
14365 cached_can_issue_more
= more
;
14369 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
14370 return the first operand of the associated PREF or PREFX insn. */
14373 mips_prefetch_cookie (rtx write
, rtx locality
)
14375 /* store_streamed / load_streamed. */
14376 if (INTVAL (locality
) <= 0)
14377 return GEN_INT (INTVAL (write
) + 4);
14379 /* store / load. */
14380 if (INTVAL (locality
) <= 2)
14383 /* store_retained / load_retained. */
14384 return GEN_INT (INTVAL (write
) + 6);
14387 /* Flags that indicate when a built-in function is available.
14389 BUILTIN_AVAIL_NON_MIPS16
14390 The function is available on the current target if !TARGET_MIPS16.
14392 BUILTIN_AVAIL_MIPS16
14393 The function is available on the current target if TARGET_MIPS16. */
14394 #define BUILTIN_AVAIL_NON_MIPS16 1
14395 #define BUILTIN_AVAIL_MIPS16 2
14397 /* Declare an availability predicate for built-in functions that
14398 require non-MIPS16 mode and also require COND to be true.
14399 NAME is the main part of the predicate's name. */
14400 #define AVAIL_NON_MIPS16(NAME, COND) \
14401 static unsigned int \
14402 mips_builtin_avail_##NAME (void) \
14404 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
14407 /* Declare an availability predicate for built-in functions that
14408 support both MIPS16 and non-MIPS16 code and also require COND
14409 to be true. NAME is the main part of the predicate's name. */
14410 #define AVAIL_ALL(NAME, COND) \
14411 static unsigned int \
14412 mips_builtin_avail_##NAME (void) \
14414 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 | BUILTIN_AVAIL_MIPS16 : 0; \
14417 /* This structure describes a single built-in function. */
14418 struct mips_builtin_description
{
14419 /* The code of the main .md file instruction. See mips_builtin_type
14420 for more information. */
14421 enum insn_code icode
;
14423 /* The floating-point comparison code to use with ICODE, if any. */
14424 enum mips_fp_condition cond
;
14426 /* The name of the built-in function. */
14429 /* Specifies how the function should be expanded. */
14430 enum mips_builtin_type builtin_type
;
14432 /* The function's prototype. */
14433 enum mips_function_type function_type
;
14435 /* Whether the function is available. */
14436 unsigned int (*avail
) (void);
14439 AVAIL_ALL (hard_float
, TARGET_HARD_FLOAT_ABI
)
14440 AVAIL_NON_MIPS16 (paired_single
, TARGET_PAIRED_SINGLE_FLOAT
)
14441 AVAIL_NON_MIPS16 (sb1_paired_single
, TARGET_SB1
&& TARGET_PAIRED_SINGLE_FLOAT
)
14442 AVAIL_NON_MIPS16 (mips3d
, TARGET_MIPS3D
)
14443 AVAIL_NON_MIPS16 (dsp
, TARGET_DSP
)
14444 AVAIL_NON_MIPS16 (dspr2
, TARGET_DSPR2
)
14445 AVAIL_NON_MIPS16 (dsp_32
, !TARGET_64BIT
&& TARGET_DSP
)
14446 AVAIL_NON_MIPS16 (dsp_64
, TARGET_64BIT
&& TARGET_DSP
)
14447 AVAIL_NON_MIPS16 (dspr2_32
, !TARGET_64BIT
&& TARGET_DSPR2
)
14448 AVAIL_NON_MIPS16 (loongson
, TARGET_LOONGSON_VECTORS
)
14449 AVAIL_NON_MIPS16 (cache
, TARGET_CACHE_BUILTIN
)
14451 /* Construct a mips_builtin_description from the given arguments.
14453 INSN is the name of the associated instruction pattern, without the
14454 leading CODE_FOR_mips_.
14456 CODE is the floating-point condition code associated with the
14457 function. It can be 'f' if the field is not applicable.
14459 NAME is the name of the function itself, without the leading
14462 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
14464 AVAIL is the name of the availability predicate, without the leading
14465 mips_builtin_avail_. */
14466 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
14467 FUNCTION_TYPE, AVAIL) \
14468 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
14469 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
14470 mips_builtin_avail_ ## AVAIL }
14472 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
14473 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
14474 are as for MIPS_BUILTIN. */
14475 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
14476 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
14478 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
14479 are subject to mips_builtin_avail_<AVAIL>. */
14480 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
14481 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
14482 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
14483 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
14484 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
14486 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
14487 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
14488 while the any and all forms are subject to mips_builtin_avail_mips3d. */
14489 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
14490 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
14491 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
14493 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
14494 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
14496 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
14497 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
14499 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
14500 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
14503 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
14504 are subject to mips_builtin_avail_mips3d. */
14505 #define CMP_4S_BUILTINS(INSN, COND) \
14506 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
14507 MIPS_BUILTIN_CMP_ANY, \
14508 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
14509 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
14510 MIPS_BUILTIN_CMP_ALL, \
14511 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
14513 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
14514 instruction requires mips_builtin_avail_<AVAIL>. */
14515 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
14516 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
14517 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
14519 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
14520 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
14523 /* Define all the built-in functions related to C.cond.fmt condition COND. */
14524 #define CMP_BUILTINS(COND) \
14525 MOVTF_BUILTINS (c, COND, paired_single), \
14526 MOVTF_BUILTINS (cabs, COND, mips3d), \
14527 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
14528 CMP_PS_BUILTINS (c, COND, paired_single), \
14529 CMP_PS_BUILTINS (cabs, COND, mips3d), \
14530 CMP_4S_BUILTINS (c, COND), \
14531 CMP_4S_BUILTINS (cabs, COND)
14533 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
14534 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
14535 and AVAIL are as for MIPS_BUILTIN. */
14536 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
14537 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
14538 FUNCTION_TYPE, AVAIL)
14540 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
14541 branch instruction. AVAIL is as for MIPS_BUILTIN. */
14542 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
14543 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
14544 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
14546 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
14547 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
14548 builtin_description field. */
14549 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
14550 { CODE_FOR_loongson_ ## INSN, MIPS_FP_COND_f, \
14551 "__builtin_loongson_" #FN_NAME, MIPS_BUILTIN_DIRECT, \
14552 FUNCTION_TYPE, mips_builtin_avail_loongson }
14554 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
14555 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
14556 builtin_description field. */
14557 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
14558 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
14560 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
14561 We use functions of this form when the same insn can be usefully applied
14562 to more than one datatype. */
14563 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
14564 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
14566 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
14567 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
14568 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
14569 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
14570 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
14571 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
14572 #define CODE_FOR_mips_mult CODE_FOR_mulsidi3_32bit
14573 #define CODE_FOR_mips_multu CODE_FOR_umulsidi3_32bit
14575 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
14576 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
14577 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
14578 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
14579 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
14580 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
14581 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
14582 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
14583 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
14584 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
14585 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
14586 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
14587 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
14588 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
14589 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
14590 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
14591 #define CODE_FOR_loongson_pmullh CODE_FOR_mulv4hi3
14592 #define CODE_FOR_loongson_psllh CODE_FOR_ashlv4hi3
14593 #define CODE_FOR_loongson_psllw CODE_FOR_ashlv2si3
14594 #define CODE_FOR_loongson_psrlh CODE_FOR_lshrv4hi3
14595 #define CODE_FOR_loongson_psrlw CODE_FOR_lshrv2si3
14596 #define CODE_FOR_loongson_psrah CODE_FOR_ashrv4hi3
14597 #define CODE_FOR_loongson_psraw CODE_FOR_ashrv2si3
14598 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
14599 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
14600 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
14601 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
14602 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
14603 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
14604 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
14606 static const struct mips_builtin_description mips_builtins
[] = {
14607 #define MIPS_GET_FCSR 0
14608 DIRECT_BUILTIN (get_fcsr
, MIPS_USI_FTYPE_VOID
, hard_float
),
14609 #define MIPS_SET_FCSR 1
14610 DIRECT_NO_TARGET_BUILTIN (set_fcsr
, MIPS_VOID_FTYPE_USI
, hard_float
),
14612 DIRECT_BUILTIN (pll_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
14613 DIRECT_BUILTIN (pul_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
14614 DIRECT_BUILTIN (plu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
14615 DIRECT_BUILTIN (puu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
14616 DIRECT_BUILTIN (cvt_ps_s
, MIPS_V2SF_FTYPE_SF_SF
, paired_single
),
14617 DIRECT_BUILTIN (cvt_s_pl
, MIPS_SF_FTYPE_V2SF
, paired_single
),
14618 DIRECT_BUILTIN (cvt_s_pu
, MIPS_SF_FTYPE_V2SF
, paired_single
),
14619 DIRECT_BUILTIN (abs_ps
, MIPS_V2SF_FTYPE_V2SF
, paired_single
),
14621 DIRECT_BUILTIN (alnv_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF_INT
, paired_single
),
14622 DIRECT_BUILTIN (addr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
14623 DIRECT_BUILTIN (mulr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
14624 DIRECT_BUILTIN (cvt_pw_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
14625 DIRECT_BUILTIN (cvt_ps_pw
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
14627 DIRECT_BUILTIN (recip1_s
, MIPS_SF_FTYPE_SF
, mips3d
),
14628 DIRECT_BUILTIN (recip1_d
, MIPS_DF_FTYPE_DF
, mips3d
),
14629 DIRECT_BUILTIN (recip1_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
14630 DIRECT_BUILTIN (recip2_s
, MIPS_SF_FTYPE_SF_SF
, mips3d
),
14631 DIRECT_BUILTIN (recip2_d
, MIPS_DF_FTYPE_DF_DF
, mips3d
),
14632 DIRECT_BUILTIN (recip2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
14634 DIRECT_BUILTIN (rsqrt1_s
, MIPS_SF_FTYPE_SF
, mips3d
),
14635 DIRECT_BUILTIN (rsqrt1_d
, MIPS_DF_FTYPE_DF
, mips3d
),
14636 DIRECT_BUILTIN (rsqrt1_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
14637 DIRECT_BUILTIN (rsqrt2_s
, MIPS_SF_FTYPE_SF_SF
, mips3d
),
14638 DIRECT_BUILTIN (rsqrt2_d
, MIPS_DF_FTYPE_DF_DF
, mips3d
),
14639 DIRECT_BUILTIN (rsqrt2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
14641 MIPS_FP_CONDITIONS (CMP_BUILTINS
),
14643 /* Built-in functions for the SB-1 processor. */
14644 DIRECT_BUILTIN (sqrt_ps
, MIPS_V2SF_FTYPE_V2SF
, sb1_paired_single
),
14646 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
14647 DIRECT_BUILTIN (addq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
14648 DIRECT_BUILTIN (addq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
14649 DIRECT_BUILTIN (addq_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
14650 DIRECT_BUILTIN (addu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
14651 DIRECT_BUILTIN (addu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
14652 DIRECT_BUILTIN (subq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
14653 DIRECT_BUILTIN (subq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
14654 DIRECT_BUILTIN (subq_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
14655 DIRECT_BUILTIN (subu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
14656 DIRECT_BUILTIN (subu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
14657 DIRECT_BUILTIN (addsc
, MIPS_SI_FTYPE_SI_SI
, dsp
),
14658 DIRECT_BUILTIN (addwc
, MIPS_SI_FTYPE_SI_SI
, dsp
),
14659 DIRECT_BUILTIN (modsub
, MIPS_SI_FTYPE_SI_SI
, dsp
),
14660 DIRECT_BUILTIN (raddu_w_qb
, MIPS_SI_FTYPE_V4QI
, dsp
),
14661 DIRECT_BUILTIN (absq_s_ph
, MIPS_V2HI_FTYPE_V2HI
, dsp
),
14662 DIRECT_BUILTIN (absq_s_w
, MIPS_SI_FTYPE_SI
, dsp
),
14663 DIRECT_BUILTIN (precrq_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dsp
),
14664 DIRECT_BUILTIN (precrq_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, dsp
),
14665 DIRECT_BUILTIN (precrq_rs_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, dsp
),
14666 DIRECT_BUILTIN (precrqu_s_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dsp
),
14667 DIRECT_BUILTIN (preceq_w_phl
, MIPS_SI_FTYPE_V2HI
, dsp
),
14668 DIRECT_BUILTIN (preceq_w_phr
, MIPS_SI_FTYPE_V2HI
, dsp
),
14669 DIRECT_BUILTIN (precequ_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
14670 DIRECT_BUILTIN (precequ_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
14671 DIRECT_BUILTIN (precequ_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
14672 DIRECT_BUILTIN (precequ_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
14673 DIRECT_BUILTIN (preceu_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
14674 DIRECT_BUILTIN (preceu_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
14675 DIRECT_BUILTIN (preceu_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
14676 DIRECT_BUILTIN (preceu_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
14677 DIRECT_BUILTIN (shll_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dsp
),
14678 DIRECT_BUILTIN (shll_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
14679 DIRECT_BUILTIN (shll_s_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
14680 DIRECT_BUILTIN (shll_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
14681 DIRECT_BUILTIN (shrl_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dsp
),
14682 DIRECT_BUILTIN (shra_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
14683 DIRECT_BUILTIN (shra_r_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
14684 DIRECT_BUILTIN (shra_r_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
14685 DIRECT_BUILTIN (muleu_s_ph_qbl
, MIPS_V2HI_FTYPE_V4QI_V2HI
, dsp
),
14686 DIRECT_BUILTIN (muleu_s_ph_qbr
, MIPS_V2HI_FTYPE_V4QI_V2HI
, dsp
),
14687 DIRECT_BUILTIN (mulq_rs_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
14688 DIRECT_BUILTIN (muleq_s_w_phl
, MIPS_SI_FTYPE_V2HI_V2HI
, dsp
),
14689 DIRECT_BUILTIN (muleq_s_w_phr
, MIPS_SI_FTYPE_V2HI_V2HI
, dsp
),
14690 DIRECT_BUILTIN (bitrev
, MIPS_SI_FTYPE_SI
, dsp
),
14691 DIRECT_BUILTIN (insv
, MIPS_SI_FTYPE_SI_SI
, dsp
),
14692 DIRECT_BUILTIN (repl_qb
, MIPS_V4QI_FTYPE_SI
, dsp
),
14693 DIRECT_BUILTIN (repl_ph
, MIPS_V2HI_FTYPE_SI
, dsp
),
14694 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
14695 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
14696 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
14697 DIRECT_BUILTIN (cmpgu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
14698 DIRECT_BUILTIN (cmpgu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
14699 DIRECT_BUILTIN (cmpgu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
14700 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
14701 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
14702 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
14703 DIRECT_BUILTIN (pick_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
14704 DIRECT_BUILTIN (pick_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
14705 DIRECT_BUILTIN (packrl_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
14706 DIRECT_NO_TARGET_BUILTIN (wrdsp
, MIPS_VOID_FTYPE_SI_SI
, dsp
),
14707 DIRECT_BUILTIN (rddsp
, MIPS_SI_FTYPE_SI
, dsp
),
14708 DIRECT_BUILTIN (lbux
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
14709 DIRECT_BUILTIN (lhx
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
14710 DIRECT_BUILTIN (lwx
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
14711 BPOSGE_BUILTIN (32, dsp
),
14713 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
14714 DIRECT_BUILTIN (absq_s_qb
, MIPS_V4QI_FTYPE_V4QI
, dspr2
),
14715 DIRECT_BUILTIN (addu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14716 DIRECT_BUILTIN (addu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14717 DIRECT_BUILTIN (adduh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
14718 DIRECT_BUILTIN (adduh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
14719 DIRECT_BUILTIN (append
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
14720 DIRECT_BUILTIN (balign
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
14721 DIRECT_BUILTIN (cmpgdu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
14722 DIRECT_BUILTIN (cmpgdu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
14723 DIRECT_BUILTIN (cmpgdu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
14724 DIRECT_BUILTIN (mul_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14725 DIRECT_BUILTIN (mul_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14726 DIRECT_BUILTIN (mulq_rs_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
14727 DIRECT_BUILTIN (mulq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14728 DIRECT_BUILTIN (mulq_s_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
14729 DIRECT_BUILTIN (precr_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dspr2
),
14730 DIRECT_BUILTIN (precr_sra_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, dspr2
),
14731 DIRECT_BUILTIN (precr_sra_r_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, dspr2
),
14732 DIRECT_BUILTIN (prepend
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
14733 DIRECT_BUILTIN (shra_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dspr2
),
14734 DIRECT_BUILTIN (shra_r_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dspr2
),
14735 DIRECT_BUILTIN (shrl_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dspr2
),
14736 DIRECT_BUILTIN (subu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14737 DIRECT_BUILTIN (subu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14738 DIRECT_BUILTIN (subuh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
14739 DIRECT_BUILTIN (subuh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
14740 DIRECT_BUILTIN (addqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14741 DIRECT_BUILTIN (addqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14742 DIRECT_BUILTIN (addqh_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
14743 DIRECT_BUILTIN (addqh_r_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
14744 DIRECT_BUILTIN (subqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14745 DIRECT_BUILTIN (subqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
14746 DIRECT_BUILTIN (subqh_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
14747 DIRECT_BUILTIN (subqh_r_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
14749 /* Built-in functions for the DSP ASE (32-bit only). */
14750 DIRECT_BUILTIN (dpau_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
14751 DIRECT_BUILTIN (dpau_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
14752 DIRECT_BUILTIN (dpsu_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
14753 DIRECT_BUILTIN (dpsu_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
14754 DIRECT_BUILTIN (dpaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
14755 DIRECT_BUILTIN (dpsq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
14756 DIRECT_BUILTIN (mulsaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
14757 DIRECT_BUILTIN (dpaq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
14758 DIRECT_BUILTIN (dpsq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
14759 DIRECT_BUILTIN (maq_s_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
14760 DIRECT_BUILTIN (maq_s_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
14761 DIRECT_BUILTIN (maq_sa_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
14762 DIRECT_BUILTIN (maq_sa_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
14763 DIRECT_BUILTIN (extr_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
14764 DIRECT_BUILTIN (extr_r_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
14765 DIRECT_BUILTIN (extr_rs_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
14766 DIRECT_BUILTIN (extr_s_h
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
14767 DIRECT_BUILTIN (extp
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
14768 DIRECT_BUILTIN (extpdp
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
14769 DIRECT_BUILTIN (shilo
, MIPS_DI_FTYPE_DI_SI
, dsp_32
),
14770 DIRECT_BUILTIN (mthlip
, MIPS_DI_FTYPE_DI_SI
, dsp_32
),
14771 DIRECT_BUILTIN (madd
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
14772 DIRECT_BUILTIN (maddu
, MIPS_DI_FTYPE_DI_USI_USI
, dsp_32
),
14773 DIRECT_BUILTIN (msub
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
14774 DIRECT_BUILTIN (msubu
, MIPS_DI_FTYPE_DI_USI_USI
, dsp_32
),
14775 DIRECT_BUILTIN (mult
, MIPS_DI_FTYPE_SI_SI
, dsp_32
),
14776 DIRECT_BUILTIN (multu
, MIPS_DI_FTYPE_USI_USI
, dsp_32
),
14778 /* Built-in functions for the DSP ASE (64-bit only). */
14779 DIRECT_BUILTIN (ldx
, MIPS_DI_FTYPE_POINTER_SI
, dsp_64
),
14781 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
14782 DIRECT_BUILTIN (dpa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14783 DIRECT_BUILTIN (dps_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14784 DIRECT_BUILTIN (mulsa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14785 DIRECT_BUILTIN (dpax_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14786 DIRECT_BUILTIN (dpsx_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14787 DIRECT_BUILTIN (dpaqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14788 DIRECT_BUILTIN (dpaqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14789 DIRECT_BUILTIN (dpsqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14790 DIRECT_BUILTIN (dpsqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
14792 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
14793 LOONGSON_BUILTIN (packsswh
, MIPS_V4HI_FTYPE_V2SI_V2SI
),
14794 LOONGSON_BUILTIN (packsshb
, MIPS_V8QI_FTYPE_V4HI_V4HI
),
14795 LOONGSON_BUILTIN (packushb
, MIPS_UV8QI_FTYPE_UV4HI_UV4HI
),
14796 LOONGSON_BUILTIN_SUFFIX (paddw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
14797 LOONGSON_BUILTIN_SUFFIX (paddh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14798 LOONGSON_BUILTIN_SUFFIX (paddb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14799 LOONGSON_BUILTIN_SUFFIX (paddw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
14800 LOONGSON_BUILTIN_SUFFIX (paddh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14801 LOONGSON_BUILTIN_SUFFIX (paddb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14802 LOONGSON_BUILTIN_SUFFIX (paddd
, u
, MIPS_UDI_FTYPE_UDI_UDI
),
14803 LOONGSON_BUILTIN_SUFFIX (paddd
, s
, MIPS_DI_FTYPE_DI_DI
),
14804 LOONGSON_BUILTIN (paddsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14805 LOONGSON_BUILTIN (paddsb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14806 LOONGSON_BUILTIN (paddush
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14807 LOONGSON_BUILTIN (paddusb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14808 LOONGSON_BUILTIN_ALIAS (pandn_d
, pandn_ud
, MIPS_UDI_FTYPE_UDI_UDI
),
14809 LOONGSON_BUILTIN_ALIAS (pandn_w
, pandn_uw
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
14810 LOONGSON_BUILTIN_ALIAS (pandn_h
, pandn_uh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14811 LOONGSON_BUILTIN_ALIAS (pandn_b
, pandn_ub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14812 LOONGSON_BUILTIN_ALIAS (pandn_d
, pandn_sd
, MIPS_DI_FTYPE_DI_DI
),
14813 LOONGSON_BUILTIN_ALIAS (pandn_w
, pandn_sw
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
14814 LOONGSON_BUILTIN_ALIAS (pandn_h
, pandn_sh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14815 LOONGSON_BUILTIN_ALIAS (pandn_b
, pandn_sb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14816 LOONGSON_BUILTIN (pavgh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14817 LOONGSON_BUILTIN (pavgb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14818 LOONGSON_BUILTIN_SUFFIX (pcmpeqw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
14819 LOONGSON_BUILTIN_SUFFIX (pcmpeqh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14820 LOONGSON_BUILTIN_SUFFIX (pcmpeqb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14821 LOONGSON_BUILTIN_SUFFIX (pcmpeqw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
14822 LOONGSON_BUILTIN_SUFFIX (pcmpeqh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14823 LOONGSON_BUILTIN_SUFFIX (pcmpeqb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14824 LOONGSON_BUILTIN_SUFFIX (pcmpgtw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
14825 LOONGSON_BUILTIN_SUFFIX (pcmpgth
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14826 LOONGSON_BUILTIN_SUFFIX (pcmpgtb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14827 LOONGSON_BUILTIN_SUFFIX (pcmpgtw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
14828 LOONGSON_BUILTIN_SUFFIX (pcmpgth
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14829 LOONGSON_BUILTIN_SUFFIX (pcmpgtb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14830 LOONGSON_BUILTIN_SUFFIX (pextrh
, u
, MIPS_UV4HI_FTYPE_UV4HI_USI
),
14831 LOONGSON_BUILTIN_SUFFIX (pextrh
, s
, MIPS_V4HI_FTYPE_V4HI_USI
),
14832 LOONGSON_BUILTIN_SUFFIX (pinsrh_0
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14833 LOONGSON_BUILTIN_SUFFIX (pinsrh_1
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14834 LOONGSON_BUILTIN_SUFFIX (pinsrh_2
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14835 LOONGSON_BUILTIN_SUFFIX (pinsrh_3
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14836 LOONGSON_BUILTIN_SUFFIX (pinsrh_0
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14837 LOONGSON_BUILTIN_SUFFIX (pinsrh_1
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14838 LOONGSON_BUILTIN_SUFFIX (pinsrh_2
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14839 LOONGSON_BUILTIN_SUFFIX (pinsrh_3
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14840 LOONGSON_BUILTIN (pmaddhw
, MIPS_V2SI_FTYPE_V4HI_V4HI
),
14841 LOONGSON_BUILTIN (pmaxsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14842 LOONGSON_BUILTIN (pmaxub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14843 LOONGSON_BUILTIN (pminsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14844 LOONGSON_BUILTIN (pminub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14845 LOONGSON_BUILTIN_SUFFIX (pmovmskb
, u
, MIPS_UV8QI_FTYPE_UV8QI
),
14846 LOONGSON_BUILTIN_SUFFIX (pmovmskb
, s
, MIPS_V8QI_FTYPE_V8QI
),
14847 LOONGSON_BUILTIN (pmulhuh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14848 LOONGSON_BUILTIN (pmulhh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14849 LOONGSON_BUILTIN (pmullh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14850 LOONGSON_BUILTIN (pmuluw
, MIPS_UDI_FTYPE_UV2SI_UV2SI
),
14851 LOONGSON_BUILTIN (pasubub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14852 LOONGSON_BUILTIN (biadd
, MIPS_UV4HI_FTYPE_UV8QI
),
14853 LOONGSON_BUILTIN (psadbh
, MIPS_UV4HI_FTYPE_UV8QI_UV8QI
),
14854 LOONGSON_BUILTIN_SUFFIX (pshufh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
14855 LOONGSON_BUILTIN_SUFFIX (pshufh
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
14856 LOONGSON_BUILTIN_SUFFIX (psllh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
14857 LOONGSON_BUILTIN_SUFFIX (psllh
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
14858 LOONGSON_BUILTIN_SUFFIX (psllw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
14859 LOONGSON_BUILTIN_SUFFIX (psllw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
14860 LOONGSON_BUILTIN_SUFFIX (psrah
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
14861 LOONGSON_BUILTIN_SUFFIX (psrah
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
14862 LOONGSON_BUILTIN_SUFFIX (psraw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
14863 LOONGSON_BUILTIN_SUFFIX (psraw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
14864 LOONGSON_BUILTIN_SUFFIX (psrlh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
14865 LOONGSON_BUILTIN_SUFFIX (psrlh
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
14866 LOONGSON_BUILTIN_SUFFIX (psrlw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
14867 LOONGSON_BUILTIN_SUFFIX (psrlw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
14868 LOONGSON_BUILTIN_SUFFIX (psubw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
14869 LOONGSON_BUILTIN_SUFFIX (psubh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14870 LOONGSON_BUILTIN_SUFFIX (psubb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14871 LOONGSON_BUILTIN_SUFFIX (psubw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
14872 LOONGSON_BUILTIN_SUFFIX (psubh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14873 LOONGSON_BUILTIN_SUFFIX (psubb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14874 LOONGSON_BUILTIN_SUFFIX (psubd
, u
, MIPS_UDI_FTYPE_UDI_UDI
),
14875 LOONGSON_BUILTIN_SUFFIX (psubd
, s
, MIPS_DI_FTYPE_DI_DI
),
14876 LOONGSON_BUILTIN (psubsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14877 LOONGSON_BUILTIN (psubsb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14878 LOONGSON_BUILTIN (psubush
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14879 LOONGSON_BUILTIN (psubusb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14880 LOONGSON_BUILTIN_SUFFIX (punpckhbh
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14881 LOONGSON_BUILTIN_SUFFIX (punpckhhw
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14882 LOONGSON_BUILTIN_SUFFIX (punpckhwd
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
14883 LOONGSON_BUILTIN_SUFFIX (punpckhbh
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14884 LOONGSON_BUILTIN_SUFFIX (punpckhhw
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14885 LOONGSON_BUILTIN_SUFFIX (punpckhwd
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
14886 LOONGSON_BUILTIN_SUFFIX (punpcklbh
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
14887 LOONGSON_BUILTIN_SUFFIX (punpcklhw
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
14888 LOONGSON_BUILTIN_SUFFIX (punpcklwd
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
14889 LOONGSON_BUILTIN_SUFFIX (punpcklbh
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
14890 LOONGSON_BUILTIN_SUFFIX (punpcklhw
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
14891 LOONGSON_BUILTIN_SUFFIX (punpcklwd
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
14893 /* Sundry other built-in functions. */
14894 DIRECT_NO_TARGET_BUILTIN (cache
, MIPS_VOID_FTYPE_SI_CVPOINTER
, cache
)
14897 /* Index I is the function declaration for mips_builtins[I], or null if the
14898 function isn't defined on this target. */
14899 static GTY(()) tree mips_builtin_decls
[ARRAY_SIZE (mips_builtins
)];
14901 /* MODE is a vector mode whose elements have type TYPE. Return the type
14902 of the vector itself. */
14905 mips_builtin_vector_type (tree type
, machine_mode mode
)
14907 static tree types
[2 * (int) MAX_MACHINE_MODE
];
14910 mode_index
= (int) mode
;
14912 if (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_UNSIGNED (type
))
14913 mode_index
+= MAX_MACHINE_MODE
;
14915 if (types
[mode_index
] == NULL_TREE
)
14916 types
[mode_index
] = build_vector_type_for_mode (type
, mode
);
14917 return types
[mode_index
];
14920 /* Return a type for 'const volatile void *'. */
14923 mips_build_cvpointer_type (void)
14927 if (cache
== NULL_TREE
)
14928 cache
= build_pointer_type (build_qualified_type
14930 TYPE_QUAL_CONST
| TYPE_QUAL_VOLATILE
));
14934 /* Source-level argument types. */
14935 #define MIPS_ATYPE_VOID void_type_node
14936 #define MIPS_ATYPE_INT integer_type_node
14937 #define MIPS_ATYPE_POINTER ptr_type_node
14938 #define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
14940 /* Standard mode-based argument types. */
14941 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
14942 #define MIPS_ATYPE_SI intSI_type_node
14943 #define MIPS_ATYPE_USI unsigned_intSI_type_node
14944 #define MIPS_ATYPE_DI intDI_type_node
14945 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
14946 #define MIPS_ATYPE_SF float_type_node
14947 #define MIPS_ATYPE_DF double_type_node
14949 /* Vector argument types. */
14950 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
14951 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
14952 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
14953 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
14954 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
14955 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
14956 #define MIPS_ATYPE_UV2SI \
14957 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
14958 #define MIPS_ATYPE_UV4HI \
14959 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
14960 #define MIPS_ATYPE_UV8QI \
14961 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
14963 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
14964 their associated MIPS_ATYPEs. */
14965 #define MIPS_FTYPE_ATYPES1(A, B) \
14966 MIPS_ATYPE_##A, MIPS_ATYPE_##B
14968 #define MIPS_FTYPE_ATYPES2(A, B, C) \
14969 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
14971 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
14972 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
14974 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
14975 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
14978 /* Return the function type associated with function prototype TYPE. */
14981 mips_build_function_type (enum mips_function_type type
)
14983 static tree types
[(int) MIPS_MAX_FTYPE_MAX
];
14985 if (types
[(int) type
] == NULL_TREE
)
14988 #define DEF_MIPS_FTYPE(NUM, ARGS) \
14989 case MIPS_FTYPE_NAME##NUM ARGS: \
14990 types[(int) type] \
14991 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
14994 #include "config/mips/mips-ftypes.def"
14995 #undef DEF_MIPS_FTYPE
14997 gcc_unreachable ();
15000 return types
[(int) type
];
15003 /* Implement TARGET_INIT_BUILTINS. */
15006 mips_init_builtins (void)
15008 const struct mips_builtin_description
*d
;
15011 /* Iterate through all of the bdesc arrays, initializing all of the
15012 builtin functions. */
15013 for (i
= 0; i
< ARRAY_SIZE (mips_builtins
); i
++)
15015 d
= &mips_builtins
[i
];
15017 mips_builtin_decls
[i
]
15018 = add_builtin_function (d
->name
,
15019 mips_build_function_type (d
->function_type
),
15020 i
, BUILT_IN_MD
, NULL
, NULL
);
15024 /* Implement TARGET_BUILTIN_DECL. */
15027 mips_builtin_decl (unsigned int code
, bool initialize_p ATTRIBUTE_UNUSED
)
15029 if (code
>= ARRAY_SIZE (mips_builtins
))
15030 return error_mark_node
;
15031 return mips_builtin_decls
[code
];
15034 /* Take argument ARGNO from EXP's argument list and convert it into
15035 an expand operand. Store the operand in *OP. */
15038 mips_prepare_builtin_arg (struct expand_operand
*op
, tree exp
,
15039 unsigned int argno
)
15044 arg
= CALL_EXPR_ARG (exp
, argno
);
15045 value
= expand_normal (arg
);
15046 create_input_operand (op
, value
, TYPE_MODE (TREE_TYPE (arg
)));
15049 /* Expand instruction ICODE as part of a built-in function sequence.
15050 Use the first NOPS elements of OPS as the instruction's operands.
15051 HAS_TARGET_P is true if operand 0 is a target; it is false if the
15052 instruction has no target.
15054 Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx. */
15057 mips_expand_builtin_insn (enum insn_code icode
, unsigned int nops
,
15058 struct expand_operand
*ops
, bool has_target_p
)
15060 if (!maybe_expand_insn (icode
, nops
, ops
))
15062 error ("invalid argument to built-in function");
15063 return has_target_p
? gen_reg_rtx (ops
[0].mode
) : const0_rtx
;
15065 return has_target_p
? ops
[0].value
: const0_rtx
;
15068 /* Expand a floating-point comparison for built-in function call EXP.
15069 The first NARGS arguments are the values to be compared. ICODE is
15070 the .md pattern that does the comparison and COND is the condition
15071 that is being tested. Return an rtx for the result. */
15074 mips_expand_builtin_compare_1 (enum insn_code icode
,
15075 enum mips_fp_condition cond
,
15076 tree exp
, int nargs
)
15078 struct expand_operand ops
[MAX_RECOG_OPERANDS
];
15082 /* The instruction should have a target operand, an operand for each
15083 argument, and an operand for COND. */
15084 gcc_assert (nargs
+ 2 == insn_data
[(int) icode
].n_generator_args
);
15086 output
= mips_allocate_fcc (insn_data
[(int) icode
].operand
[0].mode
);
15088 create_fixed_operand (&ops
[opno
++], output
);
15089 for (argno
= 0; argno
< nargs
; argno
++)
15090 mips_prepare_builtin_arg (&ops
[opno
++], exp
, argno
);
15091 create_integer_operand (&ops
[opno
++], (int) cond
);
15092 return mips_expand_builtin_insn (icode
, opno
, ops
, true);
15095 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
15096 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
15097 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
15098 suggests a good place to put the result. */
15101 mips_expand_builtin_direct (enum insn_code icode
, rtx target
, tree exp
,
15104 struct expand_operand ops
[MAX_RECOG_OPERANDS
];
15107 /* Map any target to operand 0. */
15110 create_output_operand (&ops
[opno
++], target
, TYPE_MODE (TREE_TYPE (exp
)));
15112 /* Map the arguments to the other operands. */
15113 gcc_assert (opno
+ call_expr_nargs (exp
)
15114 == insn_data
[icode
].n_generator_args
);
15115 for (argno
= 0; argno
< call_expr_nargs (exp
); argno
++)
15116 mips_prepare_builtin_arg (&ops
[opno
++], exp
, argno
);
15118 return mips_expand_builtin_insn (icode
, opno
, ops
, has_target_p
);
15121 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
15122 function; TYPE says which. EXP is the CALL_EXPR that calls the
15123 function, ICODE is the instruction that should be used to compare
15124 the first two arguments, and COND is the condition it should test.
15125 TARGET, if nonnull, suggests a good place to put the result. */
15128 mips_expand_builtin_movtf (enum mips_builtin_type type
,
15129 enum insn_code icode
, enum mips_fp_condition cond
,
15130 rtx target
, tree exp
)
15132 struct expand_operand ops
[4];
15135 cmp_result
= mips_expand_builtin_compare_1 (icode
, cond
, exp
, 2);
15136 create_output_operand (&ops
[0], target
, TYPE_MODE (TREE_TYPE (exp
)));
15137 if (type
== MIPS_BUILTIN_MOVT
)
15139 mips_prepare_builtin_arg (&ops
[2], exp
, 2);
15140 mips_prepare_builtin_arg (&ops
[1], exp
, 3);
15144 mips_prepare_builtin_arg (&ops
[1], exp
, 2);
15145 mips_prepare_builtin_arg (&ops
[2], exp
, 3);
15147 create_fixed_operand (&ops
[3], cmp_result
);
15148 return mips_expand_builtin_insn (CODE_FOR_mips_cond_move_tf_ps
,
15152 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
15153 into TARGET otherwise. Return TARGET. */
15156 mips_builtin_branch_and_move (rtx condition
, rtx target
,
15157 rtx value_if_true
, rtx value_if_false
)
15159 rtx_code_label
*true_label
, *done_label
;
15161 true_label
= gen_label_rtx ();
15162 done_label
= gen_label_rtx ();
15164 /* First assume that CONDITION is false. */
15165 mips_emit_move (target
, value_if_false
);
15167 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
15168 emit_jump_insn (gen_condjump (condition
, true_label
));
15169 emit_jump_insn (gen_jump (done_label
));
15172 /* Fix TARGET if CONDITION is true. */
15173 emit_label (true_label
);
15174 mips_emit_move (target
, value_if_true
);
15176 emit_label (done_label
);
15180 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
15181 the CALL_EXPR that calls the function, ICODE is the code of the
15182 comparison instruction, and COND is the condition it should test.
15183 TARGET, if nonnull, suggests a good place to put the boolean result. */
15186 mips_expand_builtin_compare (enum mips_builtin_type builtin_type
,
15187 enum insn_code icode
, enum mips_fp_condition cond
,
15188 rtx target
, tree exp
)
15190 rtx offset
, condition
, cmp_result
;
15192 if (target
== 0 || GET_MODE (target
) != SImode
)
15193 target
= gen_reg_rtx (SImode
);
15194 cmp_result
= mips_expand_builtin_compare_1 (icode
, cond
, exp
,
15195 call_expr_nargs (exp
));
15197 /* If the comparison sets more than one register, we define the result
15198 to be 0 if all registers are false and -1 if all registers are true.
15199 The value of the complete result is indeterminate otherwise. */
15200 switch (builtin_type
)
15202 case MIPS_BUILTIN_CMP_ALL
:
15203 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, constm1_rtx
);
15204 return mips_builtin_branch_and_move (condition
, target
,
15205 const0_rtx
, const1_rtx
);
15207 case MIPS_BUILTIN_CMP_UPPER
:
15208 case MIPS_BUILTIN_CMP_LOWER
:
15209 offset
= GEN_INT (builtin_type
== MIPS_BUILTIN_CMP_UPPER
);
15210 condition
= gen_single_cc (cmp_result
, offset
);
15211 return mips_builtin_branch_and_move (condition
, target
,
15212 const1_rtx
, const0_rtx
);
15215 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, const0_rtx
);
15216 return mips_builtin_branch_and_move (condition
, target
,
15217 const1_rtx
, const0_rtx
);
15221 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
15222 if nonnull, suggests a good place to put the boolean result. */
15225 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type
, rtx target
)
15227 rtx condition
, cmp_result
;
15230 if (target
== 0 || GET_MODE (target
) != SImode
)
15231 target
= gen_reg_rtx (SImode
);
15233 cmp_result
= gen_rtx_REG (CCDSPmode
, CCDSP_PO_REGNUM
);
15235 if (builtin_type
== MIPS_BUILTIN_BPOSGE32
)
15240 condition
= gen_rtx_GE (VOIDmode
, cmp_result
, GEN_INT (cmp_value
));
15241 return mips_builtin_branch_and_move (condition
, target
,
15242 const1_rtx
, const0_rtx
);
15245 /* Implement TARGET_EXPAND_BUILTIN. */
15248 mips_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
15249 machine_mode mode
, int ignore
)
15252 unsigned int fcode
, avail
;
15253 const struct mips_builtin_description
*d
;
15255 fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15256 fcode
= DECL_FUNCTION_CODE (fndecl
);
15257 gcc_assert (fcode
< ARRAY_SIZE (mips_builtins
));
15258 d
= &mips_builtins
[fcode
];
15259 avail
= d
->avail ();
15260 gcc_assert (avail
!= 0);
15261 if (TARGET_MIPS16
&& !(avail
& BUILTIN_AVAIL_MIPS16
))
15263 error ("built-in function %qE not supported for MIPS16",
15264 DECL_NAME (fndecl
));
15265 return ignore
? const0_rtx
: CONST0_RTX (mode
);
15267 switch (d
->builtin_type
)
15269 case MIPS_BUILTIN_DIRECT
:
15270 return mips_expand_builtin_direct (d
->icode
, target
, exp
, true);
15272 case MIPS_BUILTIN_DIRECT_NO_TARGET
:
15273 return mips_expand_builtin_direct (d
->icode
, target
, exp
, false);
15275 case MIPS_BUILTIN_MOVT
:
15276 case MIPS_BUILTIN_MOVF
:
15277 return mips_expand_builtin_movtf (d
->builtin_type
, d
->icode
,
15278 d
->cond
, target
, exp
);
15280 case MIPS_BUILTIN_CMP_ANY
:
15281 case MIPS_BUILTIN_CMP_ALL
:
15282 case MIPS_BUILTIN_CMP_UPPER
:
15283 case MIPS_BUILTIN_CMP_LOWER
:
15284 case MIPS_BUILTIN_CMP_SINGLE
:
15285 return mips_expand_builtin_compare (d
->builtin_type
, d
->icode
,
15286 d
->cond
, target
, exp
);
15288 case MIPS_BUILTIN_BPOSGE32
:
15289 return mips_expand_builtin_bposge (d
->builtin_type
, target
);
15291 gcc_unreachable ();
15294 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
15295 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
15296 struct mips16_constant
{
15297 struct mips16_constant
*next
;
15299 rtx_code_label
*label
;
15303 /* Information about an incomplete MIPS16 constant pool. FIRST is the
15304 first constant, HIGHEST_ADDRESS is the highest address that the first
15305 byte of the pool can have, and INSN_ADDRESS is the current instruction
15307 struct mips16_constant_pool
{
15308 struct mips16_constant
*first
;
15309 int highest_address
;
15313 /* Add constant VALUE to POOL and return its label. MODE is the
15314 value's mode (used for CONST_INTs, etc.). */
15316 static rtx_code_label
*
15317 mips16_add_constant (struct mips16_constant_pool
*pool
,
15318 rtx value
, machine_mode mode
)
15320 struct mips16_constant
**p
, *c
;
15321 bool first_of_size_p
;
15323 /* See whether the constant is already in the pool. If so, return the
15324 existing label, otherwise leave P pointing to the place where the
15325 constant should be added.
15327 Keep the pool sorted in increasing order of mode size so that we can
15328 reduce the number of alignments needed. */
15329 first_of_size_p
= true;
15330 for (p
= &pool
->first
; *p
!= 0; p
= &(*p
)->next
)
15332 if (mode
== (*p
)->mode
&& rtx_equal_p (value
, (*p
)->value
))
15333 return (*p
)->label
;
15334 if (GET_MODE_SIZE (mode
) < GET_MODE_SIZE ((*p
)->mode
))
15336 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE ((*p
)->mode
))
15337 first_of_size_p
= false;
15340 /* In the worst case, the constant needed by the earliest instruction
15341 will end up at the end of the pool. The entire pool must then be
15342 accessible from that instruction.
15344 When adding the first constant, set the pool's highest address to
15345 the address of the first out-of-range byte. Adjust this address
15346 downwards each time a new constant is added. */
15347 if (pool
->first
== 0)
15348 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
15349 of the instruction with the lowest two bits clear. The base PC
15350 value for LDPC has the lowest three bits clear. Assume the worst
15351 case here; namely that the PC-relative instruction occupies the
15352 last 2 bytes in an aligned word. */
15353 pool
->highest_address
= pool
->insn_address
- (UNITS_PER_WORD
- 2) + 0x8000;
15354 pool
->highest_address
-= GET_MODE_SIZE (mode
);
15355 if (first_of_size_p
)
15356 /* Take into account the worst possible padding due to alignment. */
15357 pool
->highest_address
-= GET_MODE_SIZE (mode
) - 1;
15359 /* Create a new entry. */
15360 c
= XNEW (struct mips16_constant
);
15363 c
->label
= gen_label_rtx ();
15370 /* Output constant VALUE after instruction INSN and return the last
15371 instruction emitted. MODE is the mode of the constant. */
15374 mips16_emit_constants_1 (machine_mode mode
, rtx value
, rtx_insn
*insn
)
15376 if (SCALAR_INT_MODE_P (mode
) || ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
15378 rtx size
= GEN_INT (GET_MODE_SIZE (mode
));
15379 return emit_insn_after (gen_consttable_int (value
, size
), insn
);
15382 if (SCALAR_FLOAT_MODE_P (mode
))
15383 return emit_insn_after (gen_consttable_float (value
), insn
);
15385 if (VECTOR_MODE_P (mode
))
15389 for (i
= 0; i
< CONST_VECTOR_NUNITS (value
); i
++)
15390 insn
= mips16_emit_constants_1 (GET_MODE_INNER (mode
),
15391 CONST_VECTOR_ELT (value
, i
), insn
);
15395 gcc_unreachable ();
15398 /* Dump out the constants in CONSTANTS after INSN. */
15401 mips16_emit_constants (struct mips16_constant
*constants
, rtx_insn
*insn
)
15403 struct mips16_constant
*c
, *next
;
15407 for (c
= constants
; c
!= NULL
; c
= next
)
15409 /* If necessary, increase the alignment of PC. */
15410 if (align
< GET_MODE_SIZE (c
->mode
))
15412 int align_log
= floor_log2 (GET_MODE_SIZE (c
->mode
));
15413 insn
= emit_insn_after (gen_align (GEN_INT (align_log
)), insn
);
15415 align
= GET_MODE_SIZE (c
->mode
);
15417 insn
= emit_label_after (c
->label
, insn
);
15418 insn
= mips16_emit_constants_1 (c
->mode
, c
->value
, insn
);
15424 emit_barrier_after (insn
);
15427 /* Return the length of instruction INSN. */
15430 mips16_insn_length (rtx_insn
*insn
)
15432 if (JUMP_TABLE_DATA_P (insn
))
15434 rtx body
= PATTERN (insn
);
15435 if (GET_CODE (body
) == ADDR_VEC
)
15436 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 0);
15437 else if (GET_CODE (body
) == ADDR_DIFF_VEC
)
15438 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 1);
15440 gcc_unreachable ();
15442 return get_attr_length (insn
);
15445 /* If *X is a symbolic constant that refers to the constant pool, add
15446 the constant to POOL and rewrite *X to use the constant's label. */
15449 mips16_rewrite_pool_constant (struct mips16_constant_pool
*pool
, rtx
*x
)
15452 rtx_code_label
*label
;
15454 split_const (*x
, &base
, &offset
);
15455 if (GET_CODE (base
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (base
))
15457 label
= mips16_add_constant (pool
, copy_rtx (get_pool_constant (base
)),
15458 get_pool_mode (base
));
15459 base
= gen_rtx_LABEL_REF (Pmode
, label
);
15460 *x
= mips_unspec_address_offset (base
, offset
, SYMBOL_PC_RELATIVE
);
15464 /* Rewrite INSN so that constant pool references refer to the constant's
15468 mips16_rewrite_pool_refs (rtx_insn
*insn
, struct mips16_constant_pool
*pool
)
15470 subrtx_ptr_iterator::array_type array
;
15471 FOR_EACH_SUBRTX_PTR (iter
, array
, &PATTERN (insn
), ALL
)
15475 if (force_to_mem_operand (*loc
, Pmode
))
15477 rtx mem
= force_const_mem (GET_MODE (*loc
), *loc
);
15478 validate_change (insn
, loc
, mem
, false);
15483 mips16_rewrite_pool_constant (pool
, &XEXP (*loc
, 0));
15484 iter
.skip_subrtxes ();
15488 if (TARGET_MIPS16_TEXT_LOADS
)
15489 mips16_rewrite_pool_constant (pool
, loc
);
15490 if (GET_CODE (*loc
) == CONST
15491 /* Don't rewrite the __mips16_rdwr symbol. */
15492 || (GET_CODE (*loc
) == UNSPEC
15493 && XINT (*loc
, 1) == UNSPEC_TLS_GET_TP
))
15494 iter
.skip_subrtxes ();
15499 /* Return whether CFG is used in mips_reorg. */
15502 mips_cfg_in_reorg (void)
15504 return (mips_r10k_cache_barrier
!= R10K_CACHE_BARRIER_NONE
15505 || TARGET_RELAX_PIC_CALLS
);
15508 /* Build MIPS16 constant pools. Split the instructions if SPLIT_P,
15509 otherwise assume that they are already split. */
15512 mips16_lay_out_constants (bool split_p
)
15514 struct mips16_constant_pool pool
;
15515 rtx_insn
*insn
, *barrier
;
15517 if (!TARGET_MIPS16_PCREL_LOADS
)
15522 if (mips_cfg_in_reorg ())
15523 split_all_insns ();
15525 split_all_insns_noflow ();
15528 memset (&pool
, 0, sizeof (pool
));
15529 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
15531 /* Rewrite constant pool references in INSN. */
15532 if (USEFUL_INSN_P (insn
))
15533 mips16_rewrite_pool_refs (insn
, &pool
);
15535 pool
.insn_address
+= mips16_insn_length (insn
);
15537 if (pool
.first
!= NULL
)
15539 /* If there are no natural barriers between the first user of
15540 the pool and the highest acceptable address, we'll need to
15541 create a new instruction to jump around the constant pool.
15542 In the worst case, this instruction will be 4 bytes long.
15544 If it's too late to do this transformation after INSN,
15545 do it immediately before INSN. */
15546 if (barrier
== 0 && pool
.insn_address
+ 4 > pool
.highest_address
)
15548 rtx_code_label
*label
;
15551 label
= gen_label_rtx ();
15553 jump
= emit_jump_insn_before (gen_jump (label
), insn
);
15554 JUMP_LABEL (jump
) = label
;
15555 LABEL_NUSES (label
) = 1;
15556 barrier
= emit_barrier_after (jump
);
15558 emit_label_after (label
, barrier
);
15559 pool
.insn_address
+= 4;
15562 /* See whether the constant pool is now out of range of the first
15563 user. If so, output the constants after the previous barrier.
15564 Note that any instructions between BARRIER and INSN (inclusive)
15565 will use negative offsets to refer to the pool. */
15566 if (pool
.insn_address
> pool
.highest_address
)
15568 mips16_emit_constants (pool
.first
, barrier
);
15572 else if (BARRIER_P (insn
))
15576 mips16_emit_constants (pool
.first
, get_last_insn ());
15579 /* Return true if it is worth r10k_simplify_address's while replacing
15580 an address with X. We are looking for constants, and for addresses
15581 at a known offset from the incoming stack pointer. */
15584 r10k_simplified_address_p (rtx x
)
15586 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
15588 return x
== virtual_incoming_args_rtx
|| CONSTANT_P (x
);
15591 /* X is an expression that appears in INSN. Try to use the UD chains
15592 to simplify it, returning the simplified form on success and the
15593 original form otherwise. Replace the incoming value of $sp with
15594 virtual_incoming_args_rtx (which should never occur in X otherwise). */
15597 r10k_simplify_address (rtx x
, rtx_insn
*insn
)
15599 rtx newx
, op0
, op1
, set
, note
;
15600 rtx_insn
*def_insn
;
15602 struct df_link
*defs
;
15607 op0
= r10k_simplify_address (XEXP (x
, 0), insn
);
15608 if (op0
!= XEXP (x
, 0))
15609 newx
= simplify_gen_unary (GET_CODE (x
), GET_MODE (x
),
15610 op0
, GET_MODE (XEXP (x
, 0)));
15612 else if (BINARY_P (x
))
15614 op0
= r10k_simplify_address (XEXP (x
, 0), insn
);
15615 op1
= r10k_simplify_address (XEXP (x
, 1), insn
);
15616 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
15617 newx
= simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
15619 else if (GET_CODE (x
) == LO_SUM
)
15621 /* LO_SUMs can be offset from HIGHs, if we know they won't
15622 overflow. See mips_classify_address for the rationale behind
15624 op0
= r10k_simplify_address (XEXP (x
, 0), insn
);
15625 if (GET_CODE (op0
) == HIGH
)
15626 newx
= XEXP (x
, 1);
15628 else if (REG_P (x
))
15630 /* Uses are recorded by regno_reg_rtx, not X itself. */
15631 use
= df_find_use (insn
, regno_reg_rtx
[REGNO (x
)]);
15633 defs
= DF_REF_CHAIN (use
);
15635 /* Require a single definition. */
15636 if (defs
&& defs
->next
== NULL
)
15639 if (DF_REF_IS_ARTIFICIAL (def
))
15641 /* Replace the incoming value of $sp with
15642 virtual_incoming_args_rtx. */
15643 if (x
== stack_pointer_rtx
15644 && DF_REF_BB (def
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
15645 newx
= virtual_incoming_args_rtx
;
15647 else if (dominated_by_p (CDI_DOMINATORS
, DF_REF_BB (use
),
15650 /* Make sure that DEF_INSN is a single set of REG. */
15651 def_insn
= DF_REF_INSN (def
);
15652 if (NONJUMP_INSN_P (def_insn
))
15654 set
= single_set (def_insn
);
15655 if (set
&& rtx_equal_p (SET_DEST (set
), x
))
15657 /* Prefer to use notes, since the def-use chains
15658 are often shorter. */
15659 note
= find_reg_equal_equiv_note (def_insn
);
15661 newx
= XEXP (note
, 0);
15663 newx
= SET_SRC (set
);
15664 newx
= r10k_simplify_address (newx
, def_insn
);
15670 if (newx
&& r10k_simplified_address_p (newx
))
15675 /* Return true if ADDRESS is known to be an uncached address
15676 on R10K systems. */
15679 r10k_uncached_address_p (unsigned HOST_WIDE_INT address
)
15681 unsigned HOST_WIDE_INT upper
;
15683 /* Check for KSEG1. */
15684 if (address
+ 0x60000000 < 0x20000000)
15687 /* Check for uncached XKPHYS addresses. */
15688 if (Pmode
== DImode
)
15690 upper
= (address
>> 40) & 0xf9ffff;
15691 if (upper
== 0x900000 || upper
== 0xb80000)
15697 /* Return true if we can prove that an access to address X in instruction
15698 INSN would be safe from R10K speculation. This X is a general
15699 expression; it might not be a legitimate address. */
15702 r10k_safe_address_p (rtx x
, rtx_insn
*insn
)
15705 HOST_WIDE_INT offset_val
;
15707 x
= r10k_simplify_address (x
, insn
);
15709 /* Check for references to the stack frame. It doesn't really matter
15710 how much of the frame has been allocated at INSN; -mr10k-cache-barrier
15711 allows us to assume that accesses to any part of the eventual frame
15712 is safe from speculation at any point in the function. */
15713 mips_split_plus (x
, &base
, &offset_val
);
15714 if (base
== virtual_incoming_args_rtx
15715 && offset_val
>= -cfun
->machine
->frame
.total_size
15716 && offset_val
< cfun
->machine
->frame
.args_size
)
15719 /* Check for uncached addresses. */
15720 if (CONST_INT_P (x
))
15721 return r10k_uncached_address_p (INTVAL (x
));
15723 /* Check for accesses to a static object. */
15724 split_const (x
, &base
, &offset
);
15725 return offset_within_block_p (base
, INTVAL (offset
));
15728 /* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
15729 an in-range access to an automatic variable, or to an object with
15730 a link-time-constant address. */
15733 r10k_safe_mem_expr_p (tree expr
, unsigned HOST_WIDE_INT offset
)
15735 HOST_WIDE_INT bitoffset
, bitsize
;
15736 tree inner
, var_offset
;
15738 int unsigned_p
, reverse_p
, volatile_p
;
15740 inner
= get_inner_reference (expr
, &bitsize
, &bitoffset
, &var_offset
, &mode
,
15741 &unsigned_p
, &reverse_p
, &volatile_p
, false);
15742 if (!DECL_P (inner
) || !DECL_SIZE_UNIT (inner
) || var_offset
)
15745 offset
+= bitoffset
/ BITS_PER_UNIT
;
15746 return offset
< tree_to_uhwi (DECL_SIZE_UNIT (inner
));
15749 /* Return true if X contains a MEM that is not safe from R10K speculation.
15750 INSN is the instruction that contains X. */
15753 r10k_needs_protection_p_1 (rtx x
, rtx_insn
*insn
)
15755 subrtx_var_iterator::array_type array
;
15756 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
15761 if ((MEM_EXPR (mem
)
15762 && MEM_OFFSET_KNOWN_P (mem
)
15763 && r10k_safe_mem_expr_p (MEM_EXPR (mem
), MEM_OFFSET (mem
)))
15764 || r10k_safe_address_p (XEXP (mem
, 0), insn
))
15765 iter
.skip_subrtxes ();
15773 /* A note_stores callback for which DATA points to an instruction pointer.
15774 If *DATA is nonnull, make it null if it X contains a MEM that is not
15775 safe from R10K speculation. */
15778 r10k_needs_protection_p_store (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
,
15781 rtx_insn
**insn_ptr
;
15783 insn_ptr
= (rtx_insn
**) data
;
15784 if (*insn_ptr
&& r10k_needs_protection_p_1 (x
, *insn_ptr
))
15788 /* X is the pattern of a call instruction. Return true if the call is
15789 not to a declared function. */
15792 r10k_needs_protection_p_call (const_rtx x
)
15794 subrtx_iterator::array_type array
;
15795 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
15797 const_rtx mem
= *iter
;
15800 const_rtx addr
= XEXP (mem
, 0);
15801 if (GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_DECL (addr
))
15802 iter
.skip_subrtxes ();
15810 /* Return true if instruction INSN needs to be protected by an R10K
15814 r10k_needs_protection_p (rtx_insn
*insn
)
15817 return r10k_needs_protection_p_call (PATTERN (insn
));
15819 if (mips_r10k_cache_barrier
== R10K_CACHE_BARRIER_STORE
)
15821 note_stores (PATTERN (insn
), r10k_needs_protection_p_store
, &insn
);
15822 return insn
== NULL_RTX
;
15825 return r10k_needs_protection_p_1 (PATTERN (insn
), insn
);
15828 /* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
15829 edge is unconditional. */
15832 r10k_protected_bb_p (basic_block bb
, sbitmap protected_bbs
)
15837 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
15838 if (!single_succ_p (e
->src
)
15839 || !bitmap_bit_p (protected_bbs
, e
->src
->index
)
15840 || (e
->flags
& EDGE_COMPLEX
) != 0)
15845 /* Implement -mr10k-cache-barrier= for the current function. */
15848 r10k_insert_cache_barriers (void)
15850 int *rev_post_order
;
15853 sbitmap protected_bbs
;
15854 rtx_insn
*insn
, *end
;
15855 rtx unprotected_region
;
15859 sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
15863 /* Calculate dominators. */
15864 calculate_dominance_info (CDI_DOMINATORS
);
15866 /* Bit X of PROTECTED_BBS is set if the last operation in basic block
15867 X is protected by a cache barrier. */
15868 protected_bbs
= sbitmap_alloc (last_basic_block_for_fn (cfun
));
15869 bitmap_clear (protected_bbs
);
15871 /* Iterate over the basic blocks in reverse post-order. */
15872 rev_post_order
= XNEWVEC (int, last_basic_block_for_fn (cfun
));
15873 n
= pre_and_rev_post_order_compute (NULL
, rev_post_order
, false);
15874 for (i
= 0; i
< n
; i
++)
15876 bb
= BASIC_BLOCK_FOR_FN (cfun
, rev_post_order
[i
]);
15878 /* If this block is only reached by unconditional edges, and if the
15879 source of every edge is protected, the beginning of the block is
15881 if (r10k_protected_bb_p (bb
, protected_bbs
))
15882 unprotected_region
= NULL_RTX
;
15884 unprotected_region
= pc_rtx
;
15885 end
= NEXT_INSN (BB_END (bb
));
15887 /* UNPROTECTED_REGION is:
15889 - null if we are processing a protected region,
15890 - pc_rtx if we are processing an unprotected region but have
15891 not yet found the first instruction in it
15892 - the first instruction in an unprotected region otherwise. */
15893 for (insn
= BB_HEAD (bb
); insn
!= end
; insn
= NEXT_INSN (insn
))
15895 if (unprotected_region
&& USEFUL_INSN_P (insn
))
15897 if (recog_memoized (insn
) == CODE_FOR_mips_cache
)
15898 /* This CACHE instruction protects the following code. */
15899 unprotected_region
= NULL_RTX
;
15902 /* See if INSN is the first instruction in this
15903 unprotected region. */
15904 if (unprotected_region
== pc_rtx
)
15905 unprotected_region
= insn
;
15907 /* See if INSN needs to be protected. If so,
15908 we must insert a cache barrier somewhere between
15909 PREV_INSN (UNPROTECTED_REGION) and INSN. It isn't
15910 clear which position is better performance-wise,
15911 but as a tie-breaker, we assume that it is better
15912 to allow delay slots to be back-filled where
15913 possible, and that it is better not to insert
15914 barriers in the middle of already-scheduled code.
15915 We therefore insert the barrier at the beginning
15917 if (r10k_needs_protection_p (insn
))
15919 emit_insn_before (gen_r10k_cache_barrier (),
15920 unprotected_region
);
15921 unprotected_region
= NULL_RTX
;
15927 /* The called function is not required to protect the exit path.
15928 The code that follows a call is therefore unprotected. */
15929 unprotected_region
= pc_rtx
;
15932 /* Record whether the end of this block is protected. */
15933 if (unprotected_region
== NULL_RTX
)
15934 bitmap_set_bit (protected_bbs
, bb
->index
);
15936 XDELETEVEC (rev_post_order
);
15938 sbitmap_free (protected_bbs
);
15940 free_dominance_info (CDI_DOMINATORS
);
15943 /* If INSN is a call, return the underlying CALL expr. Return NULL_RTX
15944 otherwise. If INSN has two call rtx, then store the second one in
15948 mips_call_expr_from_insn (rtx_insn
*insn
, rtx
*second_call
)
15953 if (!CALL_P (insn
))
15956 x
= PATTERN (insn
);
15957 if (GET_CODE (x
) == PARALLEL
)
15959 /* Calls returning complex values have two CALL rtx. Look for the second
15960 one here, and return it via the SECOND_CALL arg. */
15961 x2
= XVECEXP (x
, 0, 1);
15962 if (GET_CODE (x2
) == SET
)
15964 if (GET_CODE (x2
) == CALL
)
15967 x
= XVECEXP (x
, 0, 0);
15969 if (GET_CODE (x
) == SET
)
15971 gcc_assert (GET_CODE (x
) == CALL
);
15976 /* REG is set in DEF. See if the definition is one of the ways we load a
15977 register with a symbol address for a mips_use_pic_fn_addr_reg_p call.
15978 If it is, return the symbol reference of the function, otherwise return
15981 If RECURSE_P is true, use mips_find_pic_call_symbol to interpret
15982 the values of source registers, otherwise treat such registers as
15983 having an unknown value. */
15986 mips_pic_call_symbol_from_set (df_ref def
, rtx reg
, bool recurse_p
)
15988 rtx_insn
*def_insn
;
15991 if (DF_REF_IS_ARTIFICIAL (def
))
15994 def_insn
= DF_REF_INSN (def
);
15995 set
= single_set (def_insn
);
15996 if (set
&& rtx_equal_p (SET_DEST (set
), reg
))
15998 rtx note
, src
, symbol
;
16000 /* First see whether the source is a plain symbol. This is used
16001 when calling symbols that are not lazily bound. */
16002 src
= SET_SRC (set
);
16003 if (GET_CODE (src
) == SYMBOL_REF
)
16006 /* Handle %call16 references. */
16007 symbol
= mips_strip_unspec_call (src
);
16010 gcc_assert (GET_CODE (symbol
) == SYMBOL_REF
);
16014 /* If we have something more complicated, look for a
16015 REG_EQUAL or REG_EQUIV note. */
16016 note
= find_reg_equal_equiv_note (def_insn
);
16017 if (note
&& GET_CODE (XEXP (note
, 0)) == SYMBOL_REF
)
16018 return XEXP (note
, 0);
16020 /* Follow at most one simple register copy. Such copies are
16021 interesting in cases like:
16025 locally_binding_fn (...);
16030 locally_binding_fn (...);
16032 locally_binding_fn (...);
16034 where the load of locally_binding_fn can legitimately be
16035 hoisted or shared. However, we do not expect to see complex
16036 chains of copies, so a full worklist solution to the problem
16037 would probably be overkill. */
16038 if (recurse_p
&& REG_P (src
))
16039 return mips_find_pic_call_symbol (def_insn
, src
, false);
16045 /* Find the definition of the use of REG in INSN. See if the definition
16046 is one of the ways we load a register with a symbol address for a
16047 mips_use_pic_fn_addr_reg_p call. If it is return the symbol reference
16048 of the function, otherwise return NULL_RTX. RECURSE_P is as for
16049 mips_pic_call_symbol_from_set. */
16052 mips_find_pic_call_symbol (rtx_insn
*insn
, rtx reg
, bool recurse_p
)
16055 struct df_link
*defs
;
16058 use
= df_find_use (insn
, regno_reg_rtx
[REGNO (reg
)]);
16061 defs
= DF_REF_CHAIN (use
);
16064 symbol
= mips_pic_call_symbol_from_set (defs
->ref
, reg
, recurse_p
);
16068 /* If we have more than one definition, they need to be identical. */
16069 for (defs
= defs
->next
; defs
; defs
= defs
->next
)
16073 other
= mips_pic_call_symbol_from_set (defs
->ref
, reg
, recurse_p
);
16074 if (!rtx_equal_p (symbol
, other
))
16081 /* Replace the args_size operand of the call expression CALL with the
16082 call-attribute UNSPEC and fill in SYMBOL as the function symbol. */
16085 mips_annotate_pic_call_expr (rtx call
, rtx symbol
)
16089 args_size
= XEXP (call
, 1);
16090 XEXP (call
, 1) = gen_rtx_UNSPEC (GET_MODE (args_size
),
16091 gen_rtvec (2, args_size
, symbol
),
16095 /* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See
16096 if instead of the arg_size argument it contains the call attributes. If
16097 yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function
16098 symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is
16102 mips_get_pic_call_symbol (rtx
*operands
, int args_size_opno
)
16104 rtx args_size
, symbol
;
16106 if (!TARGET_RELAX_PIC_CALLS
|| args_size_opno
== -1)
16109 args_size
= operands
[args_size_opno
];
16110 if (GET_CODE (args_size
) != UNSPEC
)
16112 gcc_assert (XINT (args_size
, 1) == UNSPEC_CALL_ATTR
);
16114 symbol
= XVECEXP (args_size
, 0, 1);
16115 gcc_assert (GET_CODE (symbol
) == SYMBOL_REF
);
16117 operands
[args_size_opno
] = symbol
;
16121 /* Use DF to annotate PIC indirect calls with the function symbol they
16125 mips_annotate_pic_calls (void)
16130 FOR_EACH_BB_FN (bb
, cfun
)
16131 FOR_BB_INSNS (bb
, insn
)
16133 rtx call
, reg
, symbol
, second_call
;
16136 call
= mips_call_expr_from_insn (insn
, &second_call
);
16139 gcc_assert (MEM_P (XEXP (call
, 0)));
16140 reg
= XEXP (XEXP (call
, 0), 0);
16144 symbol
= mips_find_pic_call_symbol (insn
, reg
, true);
16147 mips_annotate_pic_call_expr (call
, symbol
);
16149 mips_annotate_pic_call_expr (second_call
, symbol
);
16154 /* A temporary variable used by note_uses callbacks, etc. */
16155 static rtx_insn
*mips_sim_insn
;
16157 /* A structure representing the state of the processor pipeline.
16158 Used by the mips_sim_* family of functions. */
16160 /* The maximum number of instructions that can be issued in a cycle.
16161 (Caches mips_issue_rate.) */
16162 unsigned int issue_rate
;
16164 /* The current simulation time. */
16167 /* How many more instructions can be issued in the current cycle. */
16168 unsigned int insns_left
;
16170 /* LAST_SET[X].INSN is the last instruction to set register X.
16171 LAST_SET[X].TIME is the time at which that instruction was issued.
16172 INSN is null if no instruction has yet set register X. */
16176 } last_set
[FIRST_PSEUDO_REGISTER
];
16178 /* The pipeline's current DFA state. */
16182 /* Reset STATE to the initial simulation state. */
16185 mips_sim_reset (struct mips_sim
*state
)
16187 curr_state
= state
->dfa_state
;
16190 state
->insns_left
= state
->issue_rate
;
16191 memset (&state
->last_set
, 0, sizeof (state
->last_set
));
16192 state_reset (curr_state
);
16194 targetm
.sched
.init (0, false, 0);
16195 advance_state (curr_state
);
16198 /* Initialize STATE before its first use. DFA_STATE points to an
16199 allocated but uninitialized DFA state. */
16202 mips_sim_init (struct mips_sim
*state
, state_t dfa_state
)
16204 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
16205 targetm
.sched
.init_dfa_pre_cycle_insn ();
16207 if (targetm
.sched
.init_dfa_post_cycle_insn
)
16208 targetm
.sched
.init_dfa_post_cycle_insn ();
16210 state
->issue_rate
= mips_issue_rate ();
16211 state
->dfa_state
= dfa_state
;
16212 mips_sim_reset (state
);
16215 /* Advance STATE by one clock cycle. */
16218 mips_sim_next_cycle (struct mips_sim
*state
)
16220 curr_state
= state
->dfa_state
;
16223 state
->insns_left
= state
->issue_rate
;
16224 advance_state (curr_state
);
16227 /* Advance simulation state STATE until instruction INSN can read
16231 mips_sim_wait_reg (struct mips_sim
*state
, rtx_insn
*insn
, rtx reg
)
16233 unsigned int regno
, end_regno
;
16235 end_regno
= END_REGNO (reg
);
16236 for (regno
= REGNO (reg
); regno
< end_regno
; regno
++)
16237 if (state
->last_set
[regno
].insn
!= 0)
16241 t
= (state
->last_set
[regno
].time
16242 + insn_latency (state
->last_set
[regno
].insn
, insn
));
16243 while (state
->time
< t
)
16244 mips_sim_next_cycle (state
);
16248 /* A note_uses callback. For each register in *X, advance simulation
16249 state DATA until mips_sim_insn can read the register's value. */
16252 mips_sim_wait_regs_1 (rtx
*x
, void *data
)
16254 subrtx_var_iterator::array_type array
;
16255 FOR_EACH_SUBRTX_VAR (iter
, array
, *x
, NONCONST
)
16257 mips_sim_wait_reg ((struct mips_sim
*) data
, mips_sim_insn
, *iter
);
16260 /* Advance simulation state STATE until all of INSN's register
16261 dependencies are satisfied. */
16264 mips_sim_wait_regs (struct mips_sim
*state
, rtx_insn
*insn
)
16266 mips_sim_insn
= insn
;
16267 note_uses (&PATTERN (insn
), mips_sim_wait_regs_1
, state
);
16270 /* Advance simulation state STATE until the units required by
16271 instruction INSN are available. */
16274 mips_sim_wait_units (struct mips_sim
*state
, rtx_insn
*insn
)
16278 tmp_state
= alloca (state_size ());
16279 while (state
->insns_left
== 0
16280 || (memcpy (tmp_state
, state
->dfa_state
, state_size ()),
16281 state_transition (tmp_state
, insn
) >= 0))
16282 mips_sim_next_cycle (state
);
16285 /* Advance simulation state STATE until INSN is ready to issue. */
16288 mips_sim_wait_insn (struct mips_sim
*state
, rtx_insn
*insn
)
16290 mips_sim_wait_regs (state
, insn
);
16291 mips_sim_wait_units (state
, insn
);
16294 /* mips_sim_insn has just set X. Update the LAST_SET array
16295 in simulation state DATA. */
16298 mips_sim_record_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
16300 struct mips_sim
*state
;
16302 state
= (struct mips_sim
*) data
;
16305 unsigned int regno
, end_regno
;
16307 end_regno
= END_REGNO (x
);
16308 for (regno
= REGNO (x
); regno
< end_regno
; regno
++)
16310 state
->last_set
[regno
].insn
= mips_sim_insn
;
16311 state
->last_set
[regno
].time
= state
->time
;
16316 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
16317 can issue immediately (i.e., that mips_sim_wait_insn has already
16321 mips_sim_issue_insn (struct mips_sim
*state
, rtx_insn
*insn
)
16323 curr_state
= state
->dfa_state
;
16325 state_transition (curr_state
, insn
);
16326 state
->insns_left
= targetm
.sched
.variable_issue (0, false, insn
,
16327 state
->insns_left
);
16329 mips_sim_insn
= insn
;
16330 note_stores (PATTERN (insn
), mips_sim_record_set
, state
);
16333 /* Simulate issuing a NOP in state STATE. */
16336 mips_sim_issue_nop (struct mips_sim
*state
)
16338 if (state
->insns_left
== 0)
16339 mips_sim_next_cycle (state
);
16340 state
->insns_left
--;
16343 /* Update simulation state STATE so that it's ready to accept the instruction
16344 after INSN. INSN should be part of the main rtl chain, not a member of a
16348 mips_sim_finish_insn (struct mips_sim
*state
, rtx_insn
*insn
)
16350 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
16352 mips_sim_issue_nop (state
);
16354 switch (GET_CODE (SEQ_BEGIN (insn
)))
16358 /* We can't predict the processor state after a call or label. */
16359 mips_sim_reset (state
);
16363 /* The delay slots of branch likely instructions are only executed
16364 when the branch is taken. Therefore, if the caller has simulated
16365 the delay slot instruction, STATE does not really reflect the state
16366 of the pipeline for the instruction after the delay slot. Also,
16367 branch likely instructions tend to incur a penalty when not taken,
16368 so there will probably be an extra delay between the branch and
16369 the instruction after the delay slot. */
16370 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn
)))
16371 mips_sim_reset (state
);
16379 /* Use simulator state STATE to calculate the execution time of
16380 instruction sequence SEQ. */
16382 static unsigned int
16383 mips_seq_time (struct mips_sim
*state
, rtx_insn
*seq
)
16385 mips_sim_reset (state
);
16386 for (rtx_insn
*insn
= seq
; insn
; insn
= NEXT_INSN (insn
))
16388 mips_sim_wait_insn (state
, insn
);
16389 mips_sim_issue_insn (state
, insn
);
16391 return state
->time
;
16394 /* Return the execution-time cost of mips_tuning_info.fast_mult_zero_zero_p
16395 setting SETTING, using STATE to simulate instruction sequences. */
16397 static unsigned int
16398 mips_mult_zero_zero_cost (struct mips_sim
*state
, bool setting
)
16400 mips_tuning_info
.fast_mult_zero_zero_p
= setting
;
16403 machine_mode dword_mode
= TARGET_64BIT
? TImode
: DImode
;
16404 rtx hilo
= gen_rtx_REG (dword_mode
, MD_REG_FIRST
);
16405 mips_emit_move_or_split (hilo
, const0_rtx
, SPLIT_FOR_SPEED
);
16407 /* If the target provides mulsidi3_32bit then that's the most likely
16408 consumer of the result. Test for bypasses. */
16409 if (dword_mode
== DImode
&& HAVE_maddsidi4
)
16411 rtx gpr
= gen_rtx_REG (SImode
, GP_REG_FIRST
+ 4);
16412 emit_insn (gen_maddsidi4 (hilo
, gpr
, gpr
, hilo
));
16415 unsigned int time
= mips_seq_time (state
, get_insns ());
16420 /* Check the relative speeds of "MULT $0,$0" and "MTLO $0; MTHI $0"
16421 and set up mips_tuning_info.fast_mult_zero_zero_p accordingly.
16422 Prefer MULT -- which is shorter -- in the event of a tie. */
16425 mips_set_fast_mult_zero_zero_p (struct mips_sim
*state
)
16427 if (TARGET_MIPS16
|| !ISA_HAS_HILO
)
16428 /* No MTLO or MTHI available for MIPS16. Also, when there are no HI or LO
16429 registers then there is no reason to zero them, arbitrarily choose to
16430 say that "MULT $0,$0" would be faster. */
16431 mips_tuning_info
.fast_mult_zero_zero_p
= true;
16434 unsigned int true_time
= mips_mult_zero_zero_cost (state
, true);
16435 unsigned int false_time
= mips_mult_zero_zero_cost (state
, false);
16436 mips_tuning_info
.fast_mult_zero_zero_p
= (true_time
<= false_time
);
16440 /* Set up costs based on the current architecture and tuning settings. */
16443 mips_set_tuning_info (void)
16445 if (mips_tuning_info
.initialized_p
16446 && mips_tuning_info
.arch
== mips_arch
16447 && mips_tuning_info
.tune
== mips_tune
16448 && mips_tuning_info
.mips16_p
== TARGET_MIPS16
)
16451 mips_tuning_info
.arch
= mips_arch
;
16452 mips_tuning_info
.tune
= mips_tune
;
16453 mips_tuning_info
.mips16_p
= TARGET_MIPS16
;
16454 mips_tuning_info
.initialized_p
= true;
16458 struct mips_sim state
;
16459 mips_sim_init (&state
, alloca (state_size ()));
16461 mips_set_fast_mult_zero_zero_p (&state
);
16466 /* Implement TARGET_EXPAND_TO_RTL_HOOK. */
16469 mips_expand_to_rtl_hook (void)
16471 /* We need to call this at a point where we can safely create sequences
16472 of instructions, so TARGET_OVERRIDE_OPTIONS is too early. We also
16473 need to call it at a point where the DFA infrastructure is not
16474 already in use, so we can't just call it lazily on demand.
16476 At present, mips_tuning_info is only needed during post-expand
16477 RTL passes such as split_insns, so this hook should be early enough.
16478 We may need to move the call elsewhere if mips_tuning_info starts
16479 to be used for other things (such as rtx_costs, or expanders that
16480 could be called during gimple optimization). */
16481 mips_set_tuning_info ();
16484 /* The VR4130 pipeline issues aligned pairs of instructions together,
16485 but it stalls the second instruction if it depends on the first.
16486 In order to cut down the amount of logic required, this dependence
16487 check is not based on a full instruction decode. Instead, any non-SPECIAL
16488 instruction is assumed to modify the register specified by bits 20-16
16489 (which is usually the "rt" field).
16491 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
16492 input, so we can end up with a false dependence between the branch
16493 and its delay slot. If this situation occurs in instruction INSN,
16494 try to avoid it by swapping rs and rt. */
16497 vr4130_avoid_branch_rt_conflict (rtx_insn
*insn
)
16499 rtx_insn
*first
, *second
;
16501 first
= SEQ_BEGIN (insn
);
16502 second
= SEQ_END (insn
);
16504 && NONJUMP_INSN_P (second
)
16505 && GET_CODE (PATTERN (first
)) == SET
16506 && GET_CODE (SET_DEST (PATTERN (first
))) == PC
16507 && GET_CODE (SET_SRC (PATTERN (first
))) == IF_THEN_ELSE
)
16509 /* Check for the right kind of condition. */
16510 rtx cond
= XEXP (SET_SRC (PATTERN (first
)), 0);
16511 if ((GET_CODE (cond
) == EQ
|| GET_CODE (cond
) == NE
)
16512 && REG_P (XEXP (cond
, 0))
16513 && REG_P (XEXP (cond
, 1))
16514 && reg_referenced_p (XEXP (cond
, 1), PATTERN (second
))
16515 && !reg_referenced_p (XEXP (cond
, 0), PATTERN (second
)))
16517 /* SECOND mentions the rt register but not the rs register. */
16518 rtx tmp
= XEXP (cond
, 0);
16519 XEXP (cond
, 0) = XEXP (cond
, 1);
16520 XEXP (cond
, 1) = tmp
;
16525 /* Implement -mvr4130-align. Go through each basic block and simulate the
16526 processor pipeline. If we find that a pair of instructions could execute
16527 in parallel, and the first of those instructions is not 8-byte aligned,
16528 insert a nop to make it aligned. */
16531 vr4130_align_insns (void)
16533 struct mips_sim state
;
16534 rtx_insn
*insn
, *subinsn
, *last
, *last2
, *next
;
16539 /* LAST is the last instruction before INSN to have a nonzero length.
16540 LAST2 is the last such instruction before LAST. */
16544 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
16547 mips_sim_init (&state
, alloca (state_size ()));
16548 for (insn
= get_insns (); insn
!= 0; insn
= next
)
16550 unsigned int length
;
16552 next
= NEXT_INSN (insn
);
16554 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
16555 This isn't really related to the alignment pass, but we do it on
16556 the fly to avoid a separate instruction walk. */
16557 vr4130_avoid_branch_rt_conflict (insn
);
16559 length
= get_attr_length (insn
);
16560 if (length
> 0 && USEFUL_INSN_P (insn
))
16561 FOR_EACH_SUBINSN (subinsn
, insn
)
16563 mips_sim_wait_insn (&state
, subinsn
);
16565 /* If we want this instruction to issue in parallel with the
16566 previous one, make sure that the previous instruction is
16567 aligned. There are several reasons why this isn't worthwhile
16568 when the second instruction is a call:
16570 - Calls are less likely to be performance critical,
16571 - There's a good chance that the delay slot can execute
16572 in parallel with the call.
16573 - The return address would then be unaligned.
16575 In general, if we're going to insert a nop between instructions
16576 X and Y, it's better to insert it immediately after X. That
16577 way, if the nop makes Y aligned, it will also align any labels
16578 between X and Y. */
16579 if (state
.insns_left
!= state
.issue_rate
16580 && !CALL_P (subinsn
))
16582 if (subinsn
== SEQ_BEGIN (insn
) && aligned_p
)
16584 /* SUBINSN is the first instruction in INSN and INSN is
16585 aligned. We want to align the previous instruction
16586 instead, so insert a nop between LAST2 and LAST.
16588 Note that LAST could be either a single instruction
16589 or a branch with a delay slot. In the latter case,
16590 LAST, like INSN, is already aligned, but the delay
16591 slot must have some extra delay that stops it from
16592 issuing at the same time as the branch. We therefore
16593 insert a nop before the branch in order to align its
16595 gcc_assert (last2
);
16596 emit_insn_after (gen_nop (), last2
);
16599 else if (subinsn
!= SEQ_BEGIN (insn
) && !aligned_p
)
16601 /* SUBINSN is the delay slot of INSN, but INSN is
16602 currently unaligned. Insert a nop between
16603 LAST and INSN to align it. */
16605 emit_insn_after (gen_nop (), last
);
16609 mips_sim_issue_insn (&state
, subinsn
);
16611 mips_sim_finish_insn (&state
, insn
);
16613 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
16614 length
= get_attr_length (insn
);
16617 /* If the instruction is an asm statement or multi-instruction
16618 mips.md patern, the length is only an estimate. Insert an
16619 8 byte alignment after it so that the following instructions
16620 can be handled correctly. */
16621 if (NONJUMP_INSN_P (SEQ_BEGIN (insn
))
16622 && (recog_memoized (insn
) < 0 || length
>= 8))
16624 next
= emit_insn_after (gen_align (GEN_INT (3)), insn
);
16625 next
= NEXT_INSN (next
);
16626 mips_sim_next_cycle (&state
);
16629 else if (length
& 4)
16630 aligned_p
= !aligned_p
;
16635 /* See whether INSN is an aligned label. */
16636 if (LABEL_P (insn
) && label_to_alignment (insn
) >= 3)
16642 /* This structure records that the current function has a LO_SUM
16643 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
16644 the largest offset applied to BASE by all such LO_SUMs. */
16645 struct mips_lo_sum_offset
{
16647 HOST_WIDE_INT offset
;
16650 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
16653 mips_hash_base (rtx base
)
16655 int do_not_record_p
;
16657 return hash_rtx (base
, GET_MODE (base
), &do_not_record_p
, NULL
, false);
16660 /* Hashtable helpers. */
16662 struct mips_lo_sum_offset_hasher
: free_ptr_hash
<mips_lo_sum_offset
>
16664 typedef rtx_def
*compare_type
;
16665 static inline hashval_t
hash (const mips_lo_sum_offset
*);
16666 static inline bool equal (const mips_lo_sum_offset
*, const rtx_def
*);
16669 /* Hash-table callbacks for mips_lo_sum_offsets. */
16672 mips_lo_sum_offset_hasher::hash (const mips_lo_sum_offset
*entry
)
16674 return mips_hash_base (entry
->base
);
16678 mips_lo_sum_offset_hasher::equal (const mips_lo_sum_offset
*entry
,
16679 const rtx_def
*value
)
16681 return rtx_equal_p (entry
->base
, value
);
16684 typedef hash_table
<mips_lo_sum_offset_hasher
> mips_offset_table
;
16686 /* Look up symbolic constant X in HTAB, which is a hash table of
16687 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
16688 paired with a recorded LO_SUM, otherwise record X in the table. */
16691 mips_lo_sum_offset_lookup (mips_offset_table
*htab
, rtx x
,
16692 enum insert_option option
)
16695 mips_lo_sum_offset
**slot
;
16696 struct mips_lo_sum_offset
*entry
;
16698 /* Split X into a base and offset. */
16699 split_const (x
, &base
, &offset
);
16700 if (UNSPEC_ADDRESS_P (base
))
16701 base
= UNSPEC_ADDRESS (base
);
16703 /* Look up the base in the hash table. */
16704 slot
= htab
->find_slot_with_hash (base
, mips_hash_base (base
), option
);
16708 entry
= (struct mips_lo_sum_offset
*) *slot
;
16709 if (option
== INSERT
)
16713 entry
= XNEW (struct mips_lo_sum_offset
);
16714 entry
->base
= base
;
16715 entry
->offset
= INTVAL (offset
);
16720 if (INTVAL (offset
) > entry
->offset
)
16721 entry
->offset
= INTVAL (offset
);
16724 return INTVAL (offset
) <= entry
->offset
;
16727 /* Search X for LO_SUMs and record them in HTAB. */
16730 mips_record_lo_sums (const_rtx x
, mips_offset_table
*htab
)
16732 subrtx_iterator::array_type array
;
16733 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
16734 if (GET_CODE (*iter
) == LO_SUM
)
16735 mips_lo_sum_offset_lookup (htab
, XEXP (*iter
, 1), INSERT
);
16738 /* Return true if INSN is a SET of an orphaned high-part relocation.
16739 HTAB is a hash table of mips_lo_sum_offsets that describes all the
16740 LO_SUMs in the current function. */
16743 mips_orphaned_high_part_p (mips_offset_table
*htab
, rtx_insn
*insn
)
16745 enum mips_symbol_type type
;
16748 set
= single_set (insn
);
16751 /* Check for %his. */
16753 if (GET_CODE (x
) == HIGH
16754 && absolute_symbolic_operand (XEXP (x
, 0), VOIDmode
))
16755 return !mips_lo_sum_offset_lookup (htab
, XEXP (x
, 0), NO_INSERT
);
16757 /* Check for local %gots (and %got_pages, which is redundant but OK). */
16758 if (GET_CODE (x
) == UNSPEC
16759 && XINT (x
, 1) == UNSPEC_LOAD_GOT
16760 && mips_symbolic_constant_p (XVECEXP (x
, 0, 1),
16761 SYMBOL_CONTEXT_LEA
, &type
)
16762 && type
== SYMBOL_GOTOFF_PAGE
)
16763 return !mips_lo_sum_offset_lookup (htab
, XVECEXP (x
, 0, 1), NO_INSERT
);
16768 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
16769 INSN and a previous instruction, avoid it by inserting nops after
16772 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
16773 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
16774 before using the value of that register. *HILO_DELAY counts the
16775 number of instructions since the last hilo hazard (that is,
16776 the number of instructions since the last MFLO or MFHI).
16778 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
16779 for the next instruction.
16781 LO_REG is an rtx for the LO register, used in dependence checking. */
16784 mips_avoid_hazard (rtx_insn
*after
, rtx_insn
*insn
, int *hilo_delay
,
16785 rtx
*delayed_reg
, rtx lo_reg
, bool *fs_delay
)
16790 pattern
= PATTERN (insn
);
16792 /* Do not put the whole function in .set noreorder if it contains
16793 an asm statement. We don't know whether there will be hazards
16794 between the asm statement and the gcc-generated code. */
16795 if (GET_CODE (pattern
) == ASM_INPUT
|| asm_noperands (pattern
) >= 0)
16796 cfun
->machine
->all_noreorder_p
= false;
16798 /* Ignore zero-length instructions (barriers and the like). */
16799 ninsns
= get_attr_length (insn
) / 4;
16803 /* Work out how many nops are needed. Note that we only care about
16804 registers that are explicitly mentioned in the instruction's pattern.
16805 It doesn't matter that calls use the argument registers or that they
16806 clobber hi and lo. */
16807 if (*hilo_delay
< 2 && reg_set_p (lo_reg
, pattern
))
16808 nops
= 2 - *hilo_delay
;
16809 else if (*delayed_reg
!= 0 && reg_referenced_p (*delayed_reg
, pattern
))
16811 /* If processing a forbidden slot hazard then a NOP is required if the
16812 branch instruction was not in a sequence (as the sequence would
16813 imply it is not actually a compact branch anyway) and the current
16814 insn is not an inline asm, and can't go in a delay slot. */
16815 else if (*fs_delay
&& get_attr_can_delay (insn
) == CAN_DELAY_NO
16816 && GET_CODE (PATTERN (after
)) != SEQUENCE
16817 && GET_CODE (pattern
) != ASM_INPUT
16818 && asm_noperands (pattern
) < 0)
16823 /* Insert the nops between this instruction and the previous one.
16824 Each new nop takes us further from the last hilo hazard. */
16825 *hilo_delay
+= nops
;
16827 emit_insn_after (gen_hazard_nop (), after
);
16829 /* Set up the state for the next instruction. */
16830 *hilo_delay
+= ninsns
;
16833 if (INSN_CODE (insn
) >= 0)
16834 switch (get_attr_hazard (insn
))
16839 case HAZARD_FORBIDDEN_SLOT
:
16840 if (TARGET_CB_MAYBE
)
16849 set
= single_set (insn
);
16851 *delayed_reg
= SET_DEST (set
);
16856 /* A SEQUENCE is breakable iff the branch inside it has a compact form
16857 and the target has compact branches. */
16860 mips_breakable_sequence_p (rtx_insn
*insn
)
16862 return (insn
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
16864 && get_attr_compact_form (SEQ_BEGIN (insn
)) != COMPACT_FORM_NEVER
);
16867 /* Remove a SEQUENCE and replace it with the delay slot instruction
16868 followed by the branch and return the instruction in the delay slot.
16869 Return the first of the two new instructions.
16870 Subroutine of mips_reorg_process_insns. */
16873 mips_break_sequence (rtx_insn
*insn
)
16875 rtx_insn
*before
= PREV_INSN (insn
);
16876 rtx_insn
*branch
= SEQ_BEGIN (insn
);
16877 rtx_insn
*ds
= SEQ_END (insn
);
16878 remove_insn (insn
);
16879 add_insn_after (ds
, before
, NULL
);
16880 add_insn_after (branch
, ds
, NULL
);
16884 /* Go through the instruction stream and insert nops where necessary.
16885 Also delete any high-part relocations whose partnering low parts
16886 are now all dead. See if the whole function can then be put into
16887 .set noreorder and .set nomacro. */
16890 mips_reorg_process_insns (void)
16892 rtx_insn
*insn
, *last_insn
, *subinsn
, *next_insn
;
16893 rtx lo_reg
, delayed_reg
;
16897 /* Force all instructions to be split into their final form. */
16898 split_all_insns_noflow ();
16900 /* Recalculate instruction lengths without taking nops into account. */
16901 cfun
->machine
->ignore_hazard_length_p
= true;
16902 shorten_branches (get_insns ());
16904 cfun
->machine
->all_noreorder_p
= true;
16906 /* We don't track MIPS16 PC-relative offsets closely enough to make
16907 a good job of "set .noreorder" code in MIPS16 mode. */
16909 cfun
->machine
->all_noreorder_p
= false;
16911 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
16912 if (!TARGET_EXPLICIT_RELOCS
)
16913 cfun
->machine
->all_noreorder_p
= false;
16915 /* Profiled functions can't be all noreorder because the profiler
16916 support uses assembler macros. */
16918 cfun
->machine
->all_noreorder_p
= false;
16920 /* Code compiled with -mfix-vr4120, -mfix-rm7000 or -mfix-24k can't be
16921 all noreorder because we rely on the assembler to work around some
16922 errata. The R5900 too has several bugs. */
16923 if (TARGET_FIX_VR4120
16924 || TARGET_FIX_RM7000
16926 || TARGET_MIPS5900
)
16927 cfun
->machine
->all_noreorder_p
= false;
16929 /* The same is true for -mfix-vr4130 if we might generate MFLO or
16930 MFHI instructions. Note that we avoid using MFLO and MFHI if
16931 the VR4130 MACC and DMACC instructions are available instead;
16932 see the *mfhilo_{si,di}_macc patterns. */
16933 if (TARGET_FIX_VR4130
&& !ISA_HAS_MACCHI
)
16934 cfun
->machine
->all_noreorder_p
= false;
16936 mips_offset_table
htab (37);
16938 /* Make a first pass over the instructions, recording all the LO_SUMs. */
16939 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
16940 FOR_EACH_SUBINSN (subinsn
, insn
)
16941 if (USEFUL_INSN_P (subinsn
))
16943 rtx body
= PATTERN (insn
);
16944 int noperands
= asm_noperands (body
);
16945 if (noperands
>= 0)
16947 rtx
*ops
= XALLOCAVEC (rtx
, noperands
);
16948 bool *used
= XALLOCAVEC (bool, noperands
);
16949 const char *string
= decode_asm_operands (body
, ops
, NULL
, NULL
,
16951 get_referenced_operands (string
, used
, noperands
);
16952 for (int i
= 0; i
< noperands
; ++i
)
16954 mips_record_lo_sums (ops
[i
], &htab
);
16957 mips_record_lo_sums (PATTERN (subinsn
), &htab
);
16963 lo_reg
= gen_rtx_REG (SImode
, LO_REGNUM
);
16966 /* Make a second pass over the instructions. Delete orphaned
16967 high-part relocations or turn them into NOPs. Avoid hazards
16968 by inserting NOPs. */
16969 for (insn
= get_insns (); insn
!= 0; insn
= next_insn
)
16971 next_insn
= NEXT_INSN (insn
);
16972 if (USEFUL_INSN_P (insn
))
16974 if (GET_CODE (PATTERN (insn
)) == SEQUENCE
)
16976 rtx_insn
*next_active
= next_active_insn (insn
);
16977 /* Undo delay slots to avoid bubbles if the next instruction can
16978 be placed in a forbidden slot or the cost of adding an
16979 explicit NOP in a forbidden slot is OK and if the SEQUENCE is
16980 safely breakable. */
16981 if (TARGET_CB_MAYBE
16982 && mips_breakable_sequence_p (insn
)
16983 && INSN_P (SEQ_BEGIN (insn
))
16984 && INSN_P (SEQ_END (insn
))
16986 && INSN_P (next_active
)
16987 && GET_CODE (PATTERN (next_active
)) != SEQUENCE
16988 && get_attr_can_delay (next_active
) == CAN_DELAY_YES
)
16989 || !optimize_size
))
16991 /* To hide a potential pipeline bubble, if we scan backwards
16992 from the current SEQUENCE and find that there is a load
16993 of a value that is used in the CTI and there are no
16994 dependencies between the CTI and instruction in the delay
16995 slot, break the sequence so the load delay is hidden. */
16997 CLEAR_HARD_REG_SET (uses
);
16998 note_uses (&PATTERN (SEQ_BEGIN (insn
)), record_hard_reg_uses
,
17000 HARD_REG_SET delay_sets
;
17001 CLEAR_HARD_REG_SET (delay_sets
);
17002 note_stores (PATTERN (SEQ_END (insn
)), record_hard_reg_sets
,
17005 rtx_insn
*prev
= prev_active_insn (insn
);
17007 && GET_CODE (PATTERN (prev
)) == SET
17008 && MEM_P (SET_SRC (PATTERN (prev
))))
17011 CLEAR_HARD_REG_SET (sets
);
17012 note_stores (PATTERN (prev
), record_hard_reg_sets
,
17015 /* Re-order if safe. */
17016 if (!hard_reg_set_intersect_p (delay_sets
, uses
)
17017 && hard_reg_set_intersect_p (uses
, sets
))
17019 next_insn
= mips_break_sequence (insn
);
17020 /* Need to process the hazards of the newly
17021 introduced instructions. */
17026 /* If we find an orphaned high-part relocation in a delay
17027 slot then we can convert to a compact branch and get
17028 the orphaned high part deleted. */
17029 if (mips_orphaned_high_part_p (&htab
, SEQ_END (insn
)))
17031 next_insn
= mips_break_sequence (insn
);
17032 /* Need to process the hazards of the newly
17033 introduced instructions. */
17038 /* If we find an orphaned high-part relocation in a delay
17039 slot, it's easier to turn that instruction into a NOP than
17040 to delete it. The delay slot will be a NOP either way. */
17041 FOR_EACH_SUBINSN (subinsn
, insn
)
17042 if (INSN_P (subinsn
))
17044 if (mips_orphaned_high_part_p (&htab
, subinsn
))
17046 PATTERN (subinsn
) = gen_nop ();
17047 INSN_CODE (subinsn
) = CODE_FOR_nop
;
17049 mips_avoid_hazard (last_insn
, subinsn
, &hilo_delay
,
17050 &delayed_reg
, lo_reg
, &fs_delay
);
17056 /* INSN is a single instruction. Delete it if it's an
17057 orphaned high-part relocation. */
17058 if (mips_orphaned_high_part_p (&htab
, insn
))
17059 delete_insn (insn
);
17060 /* Also delete cache barriers if the last instruction
17061 was an annulled branch. INSN will not be speculatively
17063 else if (recog_memoized (insn
) == CODE_FOR_r10k_cache_barrier
17065 && JUMP_P (SEQ_BEGIN (last_insn
))
17066 && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn
)))
17067 delete_insn (insn
);
17070 mips_avoid_hazard (last_insn
, insn
, &hilo_delay
,
17071 &delayed_reg
, lo_reg
, &fs_delay
);
17072 /* When a compact branch introduces a forbidden slot hazard
17073 and the next useful instruction is a SEQUENCE of a jump
17074 and a non-nop instruction in the delay slot, remove the
17075 sequence and replace it with the delay slot instruction
17076 then the jump to clear the forbidden slot hazard. */
17080 /* Search onwards from the current position looking for
17081 a SEQUENCE. We are looking for pipeline hazards here
17082 and do not need to worry about labels or barriers as
17083 the optimization only undoes delay slot filling which
17084 only affects the order of the branch and its delay
17086 rtx_insn
*next
= next_active_insn (insn
);
17088 && USEFUL_INSN_P (next
)
17089 && GET_CODE (PATTERN (next
)) == SEQUENCE
17090 && mips_breakable_sequence_p (next
))
17093 next_insn
= mips_break_sequence (next
);
17094 /* Need to process the hazards of the newly
17095 introduced instructions. */
17106 /* Return true if the function has a long branch instruction. */
17109 mips_has_long_branch_p (void)
17111 rtx_insn
*insn
, *subinsn
;
17114 /* We need up-to-date instruction lengths. */
17115 shorten_branches (get_insns ());
17117 /* Look for a branch that is longer than normal. The normal length for
17118 non-MIPS16 branches is 8, because the length includes the delay slot.
17119 It is 4 for MIPS16, because MIPS16 branches are extended instructions,
17120 but they have no delay slot. */
17121 normal_length
= (TARGET_MIPS16
? 4 : 8);
17122 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
17123 FOR_EACH_SUBINSN (subinsn
, insn
)
17124 if (JUMP_P (subinsn
)
17125 && get_attr_length (subinsn
) > normal_length
17126 && (any_condjump_p (subinsn
) || any_uncondjump_p (subinsn
)))
17132 /* If we are using a GOT, but have not decided to use a global pointer yet,
17133 see whether we need one to implement long branches. Convert the ghost
17134 global-pointer instructions into real ones if so. */
17137 mips_expand_ghost_gp_insns (void)
17139 /* Quick exit if we already know that we will or won't need a
17141 if (!TARGET_USE_GOT
17142 || cfun
->machine
->global_pointer
== INVALID_REGNUM
17143 || mips_must_initialize_gp_p ())
17146 /* Run a full check for long branches. */
17147 if (!mips_has_long_branch_p ())
17150 /* We've now established that we need $gp. */
17151 cfun
->machine
->must_initialize_gp_p
= true;
17152 split_all_insns_noflow ();
17157 /* Subroutine of mips_reorg to manage passes that require DF. */
17160 mips_df_reorg (void)
17162 /* Create def-use chains. */
17163 df_set_flags (DF_EQ_NOTES
);
17164 df_chain_add_problem (DF_UD_CHAIN
);
17167 if (TARGET_RELAX_PIC_CALLS
)
17168 mips_annotate_pic_calls ();
17170 if (mips_r10k_cache_barrier
!= R10K_CACHE_BARRIER_NONE
)
17171 r10k_insert_cache_barriers ();
17173 df_finish_pass (false);
17176 /* Emit code to load LABEL_REF SRC into MIPS16 register DEST. This is
17177 called very late in mips_reorg, but the caller is required to run
17178 mips16_lay_out_constants on the result. */
17181 mips16_load_branch_target (rtx dest
, rtx src
)
17183 if (TARGET_ABICALLS
&& !TARGET_ABSOLUTE_ABICALLS
)
17187 if (mips_cfun_has_cprestore_slot_p ())
17188 mips_emit_move (dest
, mips_cprestore_slot (dest
, true));
17190 mips_emit_move (dest
, pic_offset_table_rtx
);
17191 page
= mips_unspec_address (src
, SYMBOL_GOTOFF_PAGE
);
17192 low
= mips_unspec_address (src
, SYMBOL_GOT_PAGE_OFST
);
17193 emit_insn (gen_rtx_SET (dest
,
17194 PMODE_INSN (gen_unspec_got
, (dest
, page
))));
17195 emit_insn (gen_rtx_SET (dest
, gen_rtx_LO_SUM (Pmode
, dest
, low
)));
17199 src
= mips_unspec_address (src
, SYMBOL_ABSOLUTE
);
17200 mips_emit_move (dest
, src
);
17204 /* If we're compiling a MIPS16 function, look for and split any long branches.
17205 This must be called after all other instruction modifications in
17209 mips16_split_long_branches (void)
17211 bool something_changed
;
17213 if (!TARGET_MIPS16
)
17216 /* Loop until the alignments for all targets are sufficient. */
17220 rtx_jump_insn
*jump_insn
;
17222 shorten_branches (get_insns ());
17223 something_changed
= false;
17224 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
17225 if ((jump_insn
= dyn_cast
<rtx_jump_insn
*> (insn
))
17226 && get_attr_length (jump_insn
) > 4
17227 && (any_condjump_p (jump_insn
) || any_uncondjump_p (jump_insn
)))
17229 rtx old_label
, temp
, saved_temp
;
17230 rtx_code_label
*new_label
;
17232 rtx_insn
*jump
, *jump_sequence
;
17236 /* Free up a MIPS16 register by saving it in $1. */
17237 saved_temp
= gen_rtx_REG (Pmode
, AT_REGNUM
);
17238 temp
= gen_rtx_REG (Pmode
, GP_REG_FIRST
+ 2);
17239 emit_move_insn (saved_temp
, temp
);
17241 /* Load the branch target into TEMP. */
17242 old_label
= JUMP_LABEL (jump_insn
);
17243 target
= gen_rtx_LABEL_REF (Pmode
, old_label
);
17244 mips16_load_branch_target (temp
, target
);
17246 /* Jump to the target and restore the register's
17248 jump
= emit_jump_insn (PMODE_INSN (gen_indirect_jump_and_restore
,
17249 (temp
, temp
, saved_temp
)));
17250 JUMP_LABEL (jump
) = old_label
;
17251 LABEL_NUSES (old_label
)++;
17253 /* Rewrite any symbolic references that are supposed to use
17254 a PC-relative constant pool. */
17255 mips16_lay_out_constants (false);
17257 if (simplejump_p (jump_insn
))
17258 /* We're going to replace INSN with a longer form. */
17262 /* Create a branch-around label for the original
17264 new_label
= gen_label_rtx ();
17265 emit_label (new_label
);
17268 jump_sequence
= get_insns ();
17271 emit_insn_after (jump_sequence
, jump_insn
);
17273 invert_jump (jump_insn
, new_label
, false);
17275 delete_insn (jump_insn
);
17276 something_changed
= true;
17279 while (something_changed
);
17282 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
17287 /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF. Also during
17288 insn splitting in mips16_lay_out_constants, DF insn info is only kept up
17289 to date if the CFG is available. */
17290 if (mips_cfg_in_reorg ())
17291 compute_bb_for_insn ();
17292 mips16_lay_out_constants (true);
17293 if (mips_cfg_in_reorg ())
17296 free_bb_for_insn ();
17300 /* We use a machine specific pass to do a second machine dependent reorg
17301 pass after delay branch scheduling. */
17303 static unsigned int
17304 mips_machine_reorg2 (void)
17306 mips_reorg_process_insns ();
17308 && TARGET_EXPLICIT_RELOCS
17310 && TARGET_VR4130_ALIGN
)
17311 vr4130_align_insns ();
17312 if (mips_expand_ghost_gp_insns ())
17313 /* The expansion could invalidate some of the VR4130 alignment
17314 optimizations, but this should be an extremely rare case anyhow. */
17315 mips_reorg_process_insns ();
17316 mips16_split_long_branches ();
17322 const pass_data pass_data_mips_machine_reorg2
=
17324 RTL_PASS
, /* type */
17325 "mach2", /* name */
17326 OPTGROUP_NONE
, /* optinfo_flags */
17327 TV_MACH_DEP
, /* tv_id */
17328 0, /* properties_required */
17329 0, /* properties_provided */
17330 0, /* properties_destroyed */
17331 0, /* todo_flags_start */
17332 0, /* todo_flags_finish */
17335 class pass_mips_machine_reorg2
: public rtl_opt_pass
17338 pass_mips_machine_reorg2(gcc::context
*ctxt
)
17339 : rtl_opt_pass(pass_data_mips_machine_reorg2
, ctxt
)
17342 /* opt_pass methods: */
17343 virtual unsigned int execute (function
*) { return mips_machine_reorg2 (); }
17345 }; // class pass_mips_machine_reorg2
17347 } // anon namespace
17350 make_pass_mips_machine_reorg2 (gcc::context
*ctxt
)
17352 return new pass_mips_machine_reorg2 (ctxt
);
17356 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
17357 in order to avoid duplicating too much logic from elsewhere. */
17360 mips_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
17361 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
17364 rtx this_rtx
, temp1
, temp2
, fnaddr
;
17366 bool use_sibcall_p
;
17368 /* Pretend to be a post-reload pass while generating rtl. */
17369 reload_completed
= 1;
17371 /* Mark the end of the (empty) prologue. */
17372 emit_note (NOTE_INSN_PROLOGUE_END
);
17374 /* Determine if we can use a sibcall to call FUNCTION directly. */
17375 fnaddr
= XEXP (DECL_RTL (function
), 0);
17376 use_sibcall_p
= (mips_function_ok_for_sibcall (function
, NULL
)
17377 && const_call_insn_operand (fnaddr
, Pmode
));
17379 /* Determine if we need to load FNADDR from the GOT. */
17381 && (mips_got_symbol_type_p
17382 (mips_classify_symbol (fnaddr
, SYMBOL_CONTEXT_LEA
))))
17384 /* Pick a global pointer. Use a call-clobbered register if
17385 TARGET_CALL_SAVED_GP. */
17386 cfun
->machine
->global_pointer
17387 = TARGET_CALL_SAVED_GP
? 15 : GLOBAL_POINTER_REGNUM
;
17388 cfun
->machine
->must_initialize_gp_p
= true;
17389 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
17391 /* Set up the global pointer for n32 or n64 abicalls. */
17392 mips_emit_loadgp ();
17395 /* We need two temporary registers in some cases. */
17396 temp1
= gen_rtx_REG (Pmode
, 2);
17397 temp2
= gen_rtx_REG (Pmode
, 3);
17399 /* Find out which register contains the "this" pointer. */
17400 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
17401 this_rtx
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
+ 1);
17403 this_rtx
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
17405 /* Add DELTA to THIS_RTX. */
17408 rtx offset
= GEN_INT (delta
);
17409 if (!SMALL_OPERAND (delta
))
17411 mips_emit_move (temp1
, offset
);
17414 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, offset
));
17417 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
17418 if (vcall_offset
!= 0)
17422 /* Set TEMP1 to *THIS_RTX. */
17423 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, this_rtx
));
17425 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
17426 addr
= mips_add_offset (temp2
, temp1
, vcall_offset
);
17428 /* Load the offset and add it to THIS_RTX. */
17429 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, addr
));
17430 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, temp1
));
17433 /* Jump to the target function. Use a sibcall if direct jumps are
17434 allowed, otherwise load the address into a register first. */
17437 insn
= emit_call_insn (gen_sibcall_internal (fnaddr
, const0_rtx
));
17438 SIBLING_CALL_P (insn
) = 1;
17442 /* This is messy. GAS treats "la $25,foo" as part of a call
17443 sequence and may allow a global "foo" to be lazily bound.
17444 The general move patterns therefore reject this combination.
17446 In this context, lazy binding would actually be OK
17447 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
17448 TARGET_CALL_SAVED_GP; see mips_load_call_address.
17449 We must therefore load the address via a temporary
17450 register if mips_dangerous_for_la25_p.
17452 If we jump to the temporary register rather than $25,
17453 the assembler can use the move insn to fill the jump's
17456 We can use the same technique for MIPS16 code, where $25
17457 is not a valid JR register. */
17458 if (TARGET_USE_PIC_FN_ADDR_REG
17460 && !mips_dangerous_for_la25_p (fnaddr
))
17461 temp1
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
17462 mips_load_call_address (MIPS_CALL_SIBCALL
, temp1
, fnaddr
);
17464 if (TARGET_USE_PIC_FN_ADDR_REG
17465 && REGNO (temp1
) != PIC_FUNCTION_ADDR_REGNUM
)
17466 mips_emit_move (gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
), temp1
);
17467 emit_jump_insn (gen_indirect_jump (temp1
));
17470 /* Run just enough of rest_of_compilation. This sequence was
17471 "borrowed" from alpha.c. */
17472 insn
= get_insns ();
17473 split_all_insns_noflow ();
17474 mips16_lay_out_constants (true);
17475 shorten_branches (insn
);
17476 final_start_function (insn
, file
, 1);
17477 final (insn
, file
, 1);
17478 final_end_function ();
17480 /* Clean up the vars set above. Note that final_end_function resets
17481 the global pointer for us. */
17482 reload_completed
= 0;
17486 /* The last argument passed to mips_set_compression_mode,
17487 or negative if the function hasn't been called yet. */
17488 static unsigned int old_compression_mode
= -1;
17490 /* Set up the target-dependent global state for ISA mode COMPRESSION_MODE,
17491 which is either MASK_MIPS16 or MASK_MICROMIPS. */
17494 mips_set_compression_mode (unsigned int compression_mode
)
17497 if (compression_mode
== old_compression_mode
)
17500 /* Restore base settings of various flags. */
17501 target_flags
= mips_base_target_flags
;
17502 flag_schedule_insns
= mips_base_schedule_insns
;
17503 flag_reorder_blocks_and_partition
= mips_base_reorder_blocks_and_partition
;
17504 flag_move_loop_invariants
= mips_base_move_loop_invariants
;
17505 align_loops
= mips_base_align_loops
;
17506 align_jumps
= mips_base_align_jumps
;
17507 align_functions
= mips_base_align_functions
;
17508 target_flags
&= ~(MASK_MIPS16
| MASK_MICROMIPS
);
17509 target_flags
|= compression_mode
;
17511 if (compression_mode
& MASK_MIPS16
)
17513 /* Switch to MIPS16 mode. */
17514 target_flags
|= MASK_MIPS16
;
17516 /* Turn off SYNCI if it was on, MIPS16 doesn't support it. */
17517 target_flags
&= ~MASK_SYNCI
;
17519 /* Don't run the scheduler before reload, since it tends to
17520 increase register pressure. */
17521 flag_schedule_insns
= 0;
17523 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
17524 the whole function to be in a single section. */
17525 flag_reorder_blocks_and_partition
= 0;
17527 /* Don't move loop invariants, because it tends to increase
17528 register pressure. It also introduces an extra move in cases
17529 where the constant is the first operand in a two-operand binary
17530 instruction, or when it forms a register argument to a functon
17532 flag_move_loop_invariants
= 0;
17534 target_flags
|= MASK_EXPLICIT_RELOCS
;
17536 /* Experiments suggest we get the best overall section-anchor
17537 results from using the range of an unextended LW or SW. Code
17538 that makes heavy use of byte or short accesses can do better
17539 with ranges of 0...31 and 0...63 respectively, but most code is
17540 sensitive to the range of LW and SW instead. */
17541 targetm
.min_anchor_offset
= 0;
17542 targetm
.max_anchor_offset
= 127;
17544 targetm
.const_anchor
= 0;
17546 /* MIPS16 has no BAL instruction. */
17547 target_flags
&= ~MASK_RELAX_PIC_CALLS
;
17549 /* The R4000 errata don't apply to any known MIPS16 cores.
17550 It's simpler to make the R4000 fixes and MIPS16 mode
17551 mutually exclusive. */
17552 target_flags
&= ~MASK_FIX_R4000
;
17554 if (flag_pic
&& !TARGET_OLDABI
)
17555 sorry ("MIPS16 PIC for ABIs other than o32 and o64");
17558 sorry ("MIPS16 -mxgot code");
17560 if (TARGET_HARD_FLOAT_ABI
&& !TARGET_OLDABI
)
17561 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
17565 /* Switch to microMIPS or the standard encoding. */
17567 if (TARGET_MICROMIPS
)
17568 /* Avoid branch likely. */
17569 target_flags
&= ~MASK_BRANCHLIKELY
;
17571 /* Provide default values for align_* for 64-bit targets. */
17574 if (align_loops
== 0)
17576 if (align_jumps
== 0)
17578 if (align_functions
== 0)
17579 align_functions
= 8;
17582 targetm
.min_anchor_offset
= -32768;
17583 targetm
.max_anchor_offset
= 32767;
17585 targetm
.const_anchor
= 0x8000;
17588 /* (Re)initialize MIPS target internals for new ISA. */
17589 mips_init_relocs ();
17591 if (compression_mode
& MASK_MIPS16
)
17593 if (!mips16_globals
)
17594 mips16_globals
= save_target_globals_default_opts ();
17596 restore_target_globals (mips16_globals
);
17598 else if (compression_mode
& MASK_MICROMIPS
)
17600 if (!micromips_globals
)
17601 micromips_globals
= save_target_globals_default_opts ();
17603 restore_target_globals (micromips_globals
);
17606 restore_target_globals (&default_target_globals
);
17608 old_compression_mode
= compression_mode
;
17611 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
17612 function should use the MIPS16 or microMIPS ISA and switch modes
17616 mips_set_current_function (tree fndecl
)
17618 mips_set_compression_mode (mips_get_compress_mode (fndecl
));
17621 /* Allocate a chunk of memory for per-function machine-dependent data. */
17623 static struct machine_function
*
17624 mips_init_machine_status (void)
17626 return ggc_cleared_alloc
<machine_function
> ();
17629 /* Return the processor associated with the given ISA level, or null
17630 if the ISA isn't valid. */
17632 static const struct mips_cpu_info
*
17633 mips_cpu_info_from_isa (int isa
)
17637 for (i
= 0; i
< ARRAY_SIZE (mips_cpu_info_table
); i
++)
17638 if (mips_cpu_info_table
[i
].isa
== isa
)
17639 return mips_cpu_info_table
+ i
;
17644 /* Return a mips_cpu_info entry determined by an option valued
17647 static const struct mips_cpu_info
*
17648 mips_cpu_info_from_opt (int opt
)
17652 case MIPS_ARCH_OPTION_FROM_ABI
:
17653 /* 'from-abi' selects the most compatible architecture for the
17654 given ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit
17655 ABIs. For the EABIs, we have to decide whether we're using
17656 the 32-bit or 64-bit version. */
17657 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS
? 1
17658 : ABI_NEEDS_64BIT_REGS
? 3
17659 : (TARGET_64BIT
? 3 : 1));
17661 case MIPS_ARCH_OPTION_NATIVE
:
17662 gcc_unreachable ();
17665 return &mips_cpu_info_table
[opt
];
17669 /* Return a default mips_cpu_info entry, given that no -march= option
17670 was explicitly specified. */
17672 static const struct mips_cpu_info
*
17673 mips_default_arch (void)
17675 #if defined (MIPS_CPU_STRING_DEFAULT)
17677 for (i
= 0; i
< ARRAY_SIZE (mips_cpu_info_table
); i
++)
17678 if (strcmp (mips_cpu_info_table
[i
].name
, MIPS_CPU_STRING_DEFAULT
) == 0)
17679 return mips_cpu_info_table
+ i
;
17680 gcc_unreachable ();
17681 #elif defined (MIPS_ISA_DEFAULT)
17682 return mips_cpu_info_from_isa (MIPS_ISA_DEFAULT
);
17684 /* 'from-abi' makes a good default: you get whatever the ABI
17686 return mips_cpu_info_from_opt (MIPS_ARCH_OPTION_FROM_ABI
);
17690 /* Set up globals to generate code for the ISA or processor
17691 described by INFO. */
17694 mips_set_architecture (const struct mips_cpu_info
*info
)
17698 mips_arch_info
= info
;
17699 mips_arch
= info
->cpu
;
17700 mips_isa
= info
->isa
;
17704 mips_isa_rev
= (mips_isa
& 31) + 1;
17708 /* Likewise for tuning. */
17711 mips_set_tune (const struct mips_cpu_info
*info
)
17715 mips_tune_info
= info
;
17716 mips_tune
= info
->cpu
;
17720 /* Implement TARGET_OPTION_OVERRIDE. */
17723 mips_option_override (void)
17725 int i
, start
, regno
, mode
;
17727 if (global_options_set
.x_mips_isa_option
)
17728 mips_isa_option_info
= &mips_cpu_info_table
[mips_isa_option
];
17730 #ifdef SUBTARGET_OVERRIDE_OPTIONS
17731 SUBTARGET_OVERRIDE_OPTIONS
;
17734 /* MIPS16 and microMIPS cannot coexist. */
17735 if (TARGET_MICROMIPS
&& TARGET_MIPS16
)
17736 error ("unsupported combination: %s", "-mips16 -mmicromips");
17738 /* Save the base compression state and process flags as though we
17739 were generating uncompressed code. */
17740 mips_base_compression_flags
= TARGET_COMPRESSION
;
17741 target_flags
&= ~TARGET_COMPRESSION
;
17743 /* -mno-float overrides -mhard-float and -msoft-float. */
17744 if (TARGET_NO_FLOAT
)
17746 target_flags
|= MASK_SOFT_FLOAT_ABI
;
17747 target_flags_explicit
|= MASK_SOFT_FLOAT_ABI
;
17750 if (TARGET_FLIP_MIPS16
)
17751 TARGET_INTERLINK_COMPRESSED
= 1;
17753 /* Set the small data limit. */
17754 mips_small_data_threshold
= (global_options_set
.x_g_switch_value
17756 : MIPS_DEFAULT_GVALUE
);
17758 /* The following code determines the architecture and register size.
17759 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
17760 The GAS and GCC code should be kept in sync as much as possible. */
17762 if (global_options_set
.x_mips_arch_option
)
17763 mips_set_architecture (mips_cpu_info_from_opt (mips_arch_option
));
17765 if (mips_isa_option_info
!= 0)
17767 if (mips_arch_info
== 0)
17768 mips_set_architecture (mips_isa_option_info
);
17769 else if (mips_arch_info
->isa
!= mips_isa_option_info
->isa
)
17770 error ("%<-%s%> conflicts with the other architecture options, "
17771 "which specify a %s processor",
17772 mips_isa_option_info
->name
,
17773 mips_cpu_info_from_isa (mips_arch_info
->isa
)->name
);
17776 if (mips_arch_info
== 0)
17777 mips_set_architecture (mips_default_arch ());
17779 if (ABI_NEEDS_64BIT_REGS
&& !ISA_HAS_64BIT_REGS
)
17780 error ("%<-march=%s%> is not compatible with the selected ABI",
17781 mips_arch_info
->name
);
17783 /* Optimize for mips_arch, unless -mtune selects a different processor. */
17784 if (global_options_set
.x_mips_tune_option
)
17785 mips_set_tune (mips_cpu_info_from_opt (mips_tune_option
));
17787 if (mips_tune_info
== 0)
17788 mips_set_tune (mips_arch_info
);
17790 if ((target_flags_explicit
& MASK_64BIT
) != 0)
17792 /* The user specified the size of the integer registers. Make sure
17793 it agrees with the ABI and ISA. */
17794 if (TARGET_64BIT
&& !ISA_HAS_64BIT_REGS
)
17795 error ("%<-mgp64%> used with a 32-bit processor");
17796 else if (!TARGET_64BIT
&& ABI_NEEDS_64BIT_REGS
)
17797 error ("%<-mgp32%> used with a 64-bit ABI");
17798 else if (TARGET_64BIT
&& ABI_NEEDS_32BIT_REGS
)
17799 error ("%<-mgp64%> used with a 32-bit ABI");
17803 /* Infer the integer register size from the ABI and processor.
17804 Restrict ourselves to 32-bit registers if that's all the
17805 processor has, or if the ABI cannot handle 64-bit registers. */
17806 if (ABI_NEEDS_32BIT_REGS
|| !ISA_HAS_64BIT_REGS
)
17807 target_flags
&= ~MASK_64BIT
;
17809 target_flags
|= MASK_64BIT
;
17812 if ((target_flags_explicit
& MASK_FLOAT64
) != 0)
17814 if (mips_isa_rev
>= 6 && !TARGET_FLOAT64
)
17815 error ("the %qs architecture does not support %<-mfp32%>",
17816 mips_arch_info
->name
);
17817 else if (TARGET_SINGLE_FLOAT
&& TARGET_FLOAT64
)
17818 error ("unsupported combination: %s", "-mfp64 -msingle-float");
17819 else if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
&& !TARGET_FLOAT64
)
17820 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
17821 else if (!TARGET_64BIT
&& TARGET_FLOAT64
)
17823 if (!ISA_HAS_MXHC1
)
17824 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
17825 " the target supports the mfhc1 and mthc1 instructions");
17826 else if (mips_abi
!= ABI_32
)
17827 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
17833 /* -msingle-float selects 32-bit float registers. On r6 and later,
17834 -mdouble-float selects 64-bit float registers, since the old paired
17835 register model is not supported. In other cases the float registers
17836 should be the same size as the integer ones. */
17837 if (mips_isa_rev
>= 6 && TARGET_DOUBLE_FLOAT
&& !TARGET_FLOATXX
)
17838 target_flags
|= MASK_FLOAT64
;
17839 else if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
)
17840 target_flags
|= MASK_FLOAT64
;
17842 target_flags
&= ~MASK_FLOAT64
;
17845 if (mips_abi
!= ABI_32
&& TARGET_FLOATXX
)
17846 error ("%<-mfpxx%> can only be used with the o32 ABI");
17847 else if (TARGET_FLOAT64
&& TARGET_FLOATXX
)
17848 error ("unsupported combination: %s", "-mfp64 -mfpxx");
17849 else if (ISA_MIPS1
&& !TARGET_FLOAT32
)
17850 error ("%<-march=%s%> requires %<-mfp32%>", mips_arch_info
->name
);
17851 else if (TARGET_FLOATXX
&& !mips_lra_flag
)
17852 error ("%<-mfpxx%> requires %<-mlra%>");
17854 /* End of code shared with GAS. */
17856 /* The R5900 FPU only supports single precision. */
17857 if (TARGET_MIPS5900
&& TARGET_HARD_FLOAT_ABI
&& TARGET_DOUBLE_FLOAT
)
17858 error ("unsupported combination: %s",
17859 "-march=r5900 -mhard-float -mdouble-float");
17861 /* If a -mlong* option was given, check that it matches the ABI,
17862 otherwise infer the -mlong* setting from the other options. */
17863 if ((target_flags_explicit
& MASK_LONG64
) != 0)
17867 if (mips_abi
== ABI_N32
)
17868 error ("%qs is incompatible with %qs", "-mabi=n32", "-mlong64");
17869 else if (mips_abi
== ABI_32
)
17870 error ("%qs is incompatible with %qs", "-mabi=32", "-mlong64");
17871 else if (mips_abi
== ABI_O64
&& TARGET_ABICALLS
)
17872 /* We have traditionally allowed non-abicalls code to use
17873 an LP64 form of o64. However, it would take a bit more
17874 effort to support the combination of 32-bit GOT entries
17875 and 64-bit pointers, so we treat the abicalls case as
17877 error ("the combination of %qs and %qs is incompatible with %qs",
17878 "-mabi=o64", "-mabicalls", "-mlong64");
17882 if (mips_abi
== ABI_64
)
17883 error ("%qs is incompatible with %qs", "-mabi=64", "-mlong32");
17888 if ((mips_abi
== ABI_EABI
&& TARGET_64BIT
) || mips_abi
== ABI_64
)
17889 target_flags
|= MASK_LONG64
;
17891 target_flags
&= ~MASK_LONG64
;
17894 if (!TARGET_OLDABI
)
17895 flag_pcc_struct_return
= 0;
17897 /* Decide which rtx_costs structure to use. */
17899 mips_cost
= &mips_rtx_cost_optimize_size
;
17901 mips_cost
= &mips_rtx_cost_data
[mips_tune
];
17903 /* If the user hasn't specified a branch cost, use the processor's
17905 if (mips_branch_cost
== 0)
17906 mips_branch_cost
= mips_cost
->branch_cost
;
17908 /* If neither -mbranch-likely nor -mno-branch-likely was given
17909 on the command line, set MASK_BRANCHLIKELY based on the target
17910 architecture and tuning flags. Annulled delay slots are a
17911 size win, so we only consider the processor-specific tuning
17912 for !optimize_size. */
17913 if ((target_flags_explicit
& MASK_BRANCHLIKELY
) == 0)
17915 if (ISA_HAS_BRANCHLIKELY
17917 || (mips_tune_info
->tune_flags
& PTF_AVOID_BRANCHLIKELY
) == 0))
17918 target_flags
|= MASK_BRANCHLIKELY
;
17920 target_flags
&= ~MASK_BRANCHLIKELY
;
17922 else if (TARGET_BRANCHLIKELY
&& !ISA_HAS_BRANCHLIKELY
)
17923 warning (0, "the %qs architecture does not support branch-likely"
17924 " instructions", mips_arch_info
->name
);
17926 /* If the user hasn't specified -mimadd or -mno-imadd set
17927 MASK_IMADD based on the target architecture and tuning
17929 if ((target_flags_explicit
& MASK_IMADD
) == 0)
17931 if (ISA_HAS_MADD_MSUB
&&
17932 (mips_tune_info
->tune_flags
& PTF_AVOID_IMADD
) == 0)
17933 target_flags
|= MASK_IMADD
;
17935 target_flags
&= ~MASK_IMADD
;
17937 else if (TARGET_IMADD
&& !ISA_HAS_MADD_MSUB
)
17938 warning (0, "the %qs architecture does not support madd or msub"
17939 " instructions", mips_arch_info
->name
);
17941 /* If neither -modd-spreg nor -mno-odd-spreg was given on the command
17942 line, set MASK_ODD_SPREG based on the ISA and ABI. */
17943 if ((target_flags_explicit
& MASK_ODD_SPREG
) == 0)
17945 /* Disable TARGET_ODD_SPREG when using the o32 FPXX ABI. */
17946 if (!ISA_HAS_ODD_SPREG
|| TARGET_FLOATXX
)
17947 target_flags
&= ~MASK_ODD_SPREG
;
17949 target_flags
|= MASK_ODD_SPREG
;
17951 else if (TARGET_ODD_SPREG
&& !ISA_HAS_ODD_SPREG
)
17952 warning (0, "the %qs architecture does not support odd single-precision"
17953 " registers", mips_arch_info
->name
);
17955 if (!TARGET_ODD_SPREG
&& TARGET_64BIT
)
17957 error ("unsupported combination: %s", "-mgp64 -mno-odd-spreg");
17958 /* Allow compilation to continue further even though invalid output
17959 will be produced. */
17960 target_flags
|= MASK_ODD_SPREG
;
17963 if (!ISA_HAS_COMPACT_BRANCHES
&& mips_cb
== MIPS_CB_ALWAYS
)
17965 error ("unsupported combination: %qs%s %s",
17966 mips_arch_info
->name
, TARGET_MICROMIPS
? " -mmicromips" : "",
17967 "-mcompact-branches=always");
17969 else if (!ISA_HAS_DELAY_SLOTS
&& mips_cb
== MIPS_CB_NEVER
)
17971 error ("unsupported combination: %qs%s %s",
17972 mips_arch_info
->name
, TARGET_MICROMIPS
? " -mmicromips" : "",
17973 "-mcompact-branches=never");
17976 /* Require explicit relocs for MIPS R6 onwards. This enables simplification
17977 of the compact branch and jump support through the backend. */
17978 if (!TARGET_EXPLICIT_RELOCS
&& mips_isa_rev
>= 6)
17980 error ("unsupported combination: %qs %s",
17981 mips_arch_info
->name
, "-mno-explicit-relocs");
17984 /* The effect of -mabicalls isn't defined for the EABI. */
17985 if (mips_abi
== ABI_EABI
&& TARGET_ABICALLS
)
17987 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
17988 target_flags
&= ~MASK_ABICALLS
;
17991 /* PIC requires -mabicalls. */
17994 if (mips_abi
== ABI_EABI
)
17995 error ("cannot generate position-independent code for %qs",
17997 else if (!TARGET_ABICALLS
)
17998 error ("position-independent code requires %qs", "-mabicalls");
18001 if (TARGET_ABICALLS_PIC2
)
18002 /* We need to set flag_pic for executables as well as DSOs
18003 because we may reference symbols that are not defined in
18004 the final executable. (MIPS does not use things like
18005 copy relocs, for example.)
18007 There is a body of code that uses __PIC__ to distinguish
18008 between -mabicalls and -mno-abicalls code. The non-__PIC__
18009 variant is usually appropriate for TARGET_ABICALLS_PIC0, as
18010 long as any indirect jumps use $25. */
18013 /* -mvr4130-align is a "speed over size" optimization: it usually produces
18014 faster code, but at the expense of more nops. Enable it at -O3 and
18016 if (optimize
> 2 && (target_flags_explicit
& MASK_VR4130_ALIGN
) == 0)
18017 target_flags
|= MASK_VR4130_ALIGN
;
18019 /* Prefer a call to memcpy over inline code when optimizing for size,
18020 though see MOVE_RATIO in mips.h. */
18021 if (optimize_size
&& (target_flags_explicit
& MASK_MEMCPY
) == 0)
18022 target_flags
|= MASK_MEMCPY
;
18024 /* If we have a nonzero small-data limit, check that the -mgpopt
18025 setting is consistent with the other target flags. */
18026 if (mips_small_data_threshold
> 0)
18030 if (!TARGET_EXPLICIT_RELOCS
)
18031 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
18033 TARGET_LOCAL_SDATA
= false;
18034 TARGET_EXTERN_SDATA
= false;
18038 if (TARGET_VXWORKS_RTP
)
18039 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
18041 if (TARGET_ABICALLS
)
18042 warning (0, "cannot use small-data accesses for %qs",
18047 /* Set NaN and ABS defaults. */
18048 if (mips_nan
== MIPS_IEEE_754_DEFAULT
&& !ISA_HAS_IEEE_754_LEGACY
)
18049 mips_nan
= MIPS_IEEE_754_2008
;
18050 if (mips_abs
== MIPS_IEEE_754_DEFAULT
&& !ISA_HAS_IEEE_754_LEGACY
)
18051 mips_abs
= MIPS_IEEE_754_2008
;
18053 /* Check for IEEE 754 legacy/2008 support. */
18054 if ((mips_nan
== MIPS_IEEE_754_LEGACY
18055 || mips_abs
== MIPS_IEEE_754_LEGACY
)
18056 && !ISA_HAS_IEEE_754_LEGACY
)
18057 warning (0, "the %qs architecture does not support %<-m%s=legacy%>",
18058 mips_arch_info
->name
,
18059 mips_nan
== MIPS_IEEE_754_LEGACY
? "nan" : "abs");
18061 if ((mips_nan
== MIPS_IEEE_754_2008
18062 || mips_abs
== MIPS_IEEE_754_2008
)
18063 && !ISA_HAS_IEEE_754_2008
)
18064 warning (0, "the %qs architecture does not support %<-m%s=2008%>",
18065 mips_arch_info
->name
,
18066 mips_nan
== MIPS_IEEE_754_2008
? "nan" : "abs");
18068 /* Pre-IEEE 754-2008 MIPS hardware has a quirky almost-IEEE format
18069 for all its floating point. */
18070 if (mips_nan
!= MIPS_IEEE_754_2008
)
18072 REAL_MODE_FORMAT (SFmode
) = &mips_single_format
;
18073 REAL_MODE_FORMAT (DFmode
) = &mips_double_format
;
18074 REAL_MODE_FORMAT (TFmode
) = &mips_quad_format
;
18077 /* Make sure that the user didn't turn off paired single support when
18078 MIPS-3D support is requested. */
18080 && (target_flags_explicit
& MASK_PAIRED_SINGLE_FLOAT
)
18081 && !TARGET_PAIRED_SINGLE_FLOAT
)
18082 error ("%<-mips3d%> requires %<-mpaired-single%>");
18084 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
18086 target_flags
|= MASK_PAIRED_SINGLE_FLOAT
;
18088 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
18089 and TARGET_HARD_FLOAT_ABI are both true. */
18090 if (TARGET_PAIRED_SINGLE_FLOAT
&& !(TARGET_FLOAT64
&& TARGET_HARD_FLOAT_ABI
))
18092 error ("%qs must be used with %qs",
18093 TARGET_MIPS3D
? "-mips3d" : "-mpaired-single",
18094 TARGET_HARD_FLOAT_ABI
? "-mfp64" : "-mhard-float");
18095 target_flags
&= ~MASK_PAIRED_SINGLE_FLOAT
;
18099 /* Make sure that -mpaired-single is only used on ISAs that support it.
18100 We must disable it otherwise since it relies on other ISA properties
18101 like ISA_HAS_8CC having their normal values. */
18102 if (TARGET_PAIRED_SINGLE_FLOAT
&& !ISA_HAS_PAIRED_SINGLE
)
18104 error ("the %qs architecture does not support paired-single"
18105 " instructions", mips_arch_info
->name
);
18106 target_flags
&= ~MASK_PAIRED_SINGLE_FLOAT
;
18110 if (mips_r10k_cache_barrier
!= R10K_CACHE_BARRIER_NONE
18111 && !TARGET_CACHE_BUILTIN
)
18113 error ("%qs requires a target that provides the %qs instruction",
18114 "-mr10k-cache-barrier", "cache");
18115 mips_r10k_cache_barrier
= R10K_CACHE_BARRIER_NONE
;
18118 /* If TARGET_DSPR2, enable TARGET_DSP. */
18122 if (TARGET_DSP
&& mips_isa_rev
>= 6)
18124 error ("the %qs architecture does not support DSP instructions",
18125 mips_arch_info
->name
);
18126 TARGET_DSP
= false;
18127 TARGET_DSPR2
= false;
18130 /* .eh_frame addresses should be the same width as a C pointer.
18131 Most MIPS ABIs support only one pointer size, so the assembler
18132 will usually know exactly how big an .eh_frame address is.
18134 Unfortunately, this is not true of the 64-bit EABI. The ABI was
18135 originally defined to use 64-bit pointers (i.e. it is LP64), and
18136 this is still the default mode. However, we also support an n32-like
18137 ILP32 mode, which is selected by -mlong32. The problem is that the
18138 assembler has traditionally not had an -mlong option, so it has
18139 traditionally not known whether we're using the ILP32 or LP64 form.
18141 As it happens, gas versions up to and including 2.19 use _32-bit_
18142 addresses for EABI64 .cfi_* directives. This is wrong for the
18143 default LP64 mode, so we can't use the directives by default.
18144 Moreover, since gas's current behavior is at odds with gcc's
18145 default behavior, it seems unwise to rely on future versions
18146 of gas behaving the same way. We therefore avoid using .cfi
18147 directives for -mlong32 as well. */
18148 if (mips_abi
== ABI_EABI
&& TARGET_64BIT
)
18149 flag_dwarf2_cfi_asm
= 0;
18151 /* .cfi_* directives generate a read-only section, so fall back on
18152 manual .eh_frame creation if we need the section to be writable. */
18153 if (TARGET_WRITABLE_EH_FRAME
)
18154 flag_dwarf2_cfi_asm
= 0;
18156 mips_init_print_operand_punct ();
18158 /* Set up array to map GCC register number to debug register number.
18159 Ignore the special purpose register numbers. */
18161 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
18163 mips_dbx_regno
[i
] = IGNORED_DWARF_REGNUM
;
18164 if (GP_REG_P (i
) || FP_REG_P (i
) || ALL_COP_REG_P (i
))
18165 mips_dwarf_regno
[i
] = i
;
18167 mips_dwarf_regno
[i
] = INVALID_REGNUM
;
18170 start
= GP_DBX_FIRST
- GP_REG_FIRST
;
18171 for (i
= GP_REG_FIRST
; i
<= GP_REG_LAST
; i
++)
18172 mips_dbx_regno
[i
] = i
+ start
;
18174 start
= FP_DBX_FIRST
- FP_REG_FIRST
;
18175 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
18176 mips_dbx_regno
[i
] = i
+ start
;
18178 /* Accumulator debug registers use big-endian ordering. */
18179 mips_dbx_regno
[HI_REGNUM
] = MD_DBX_FIRST
+ 0;
18180 mips_dbx_regno
[LO_REGNUM
] = MD_DBX_FIRST
+ 1;
18181 mips_dwarf_regno
[HI_REGNUM
] = MD_REG_FIRST
+ 0;
18182 mips_dwarf_regno
[LO_REGNUM
] = MD_REG_FIRST
+ 1;
18183 for (i
= DSP_ACC_REG_FIRST
; i
<= DSP_ACC_REG_LAST
; i
+= 2)
18185 mips_dwarf_regno
[i
+ TARGET_LITTLE_ENDIAN
] = i
;
18186 mips_dwarf_regno
[i
+ TARGET_BIG_ENDIAN
] = i
+ 1;
18189 /* Set up mips_hard_regno_mode_ok. */
18190 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
18191 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
18192 mips_hard_regno_mode_ok
[mode
][regno
]
18193 = mips_hard_regno_mode_ok_p (regno
, (machine_mode
) mode
);
18195 /* Function to allocate machine-dependent function status. */
18196 init_machine_status
= &mips_init_machine_status
;
18198 /* Default to working around R4000 errata only if the processor
18199 was selected explicitly. */
18200 if ((target_flags_explicit
& MASK_FIX_R4000
) == 0
18201 && strcmp (mips_arch_info
->name
, "r4000") == 0)
18202 target_flags
|= MASK_FIX_R4000
;
18204 /* Default to working around R4400 errata only if the processor
18205 was selected explicitly. */
18206 if ((target_flags_explicit
& MASK_FIX_R4400
) == 0
18207 && strcmp (mips_arch_info
->name
, "r4400") == 0)
18208 target_flags
|= MASK_FIX_R4400
;
18210 /* Default to working around R10000 errata only if the processor
18211 was selected explicitly. */
18212 if ((target_flags_explicit
& MASK_FIX_R10000
) == 0
18213 && strcmp (mips_arch_info
->name
, "r10000") == 0)
18214 target_flags
|= MASK_FIX_R10000
;
18216 /* Make sure that branch-likely instructions available when using
18217 -mfix-r10000. The instructions are not available if either:
18219 1. -mno-branch-likely was passed.
18220 2. The selected ISA does not support branch-likely and
18221 the command line does not include -mbranch-likely. */
18222 if (TARGET_FIX_R10000
18223 && ((target_flags_explicit
& MASK_BRANCHLIKELY
) == 0
18224 ? !ISA_HAS_BRANCHLIKELY
18225 : !TARGET_BRANCHLIKELY
))
18226 sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
18228 if (TARGET_SYNCI
&& !ISA_HAS_SYNCI
)
18230 warning (0, "the %qs architecture does not support the synci "
18231 "instruction", mips_arch_info
->name
);
18232 target_flags
&= ~MASK_SYNCI
;
18235 /* Only optimize PIC indirect calls if they are actually required. */
18236 if (!TARGET_USE_GOT
|| !TARGET_EXPLICIT_RELOCS
)
18237 target_flags
&= ~MASK_RELAX_PIC_CALLS
;
18239 /* Save base state of options. */
18240 mips_base_target_flags
= target_flags
;
18241 mips_base_schedule_insns
= flag_schedule_insns
;
18242 mips_base_reorder_blocks_and_partition
= flag_reorder_blocks_and_partition
;
18243 mips_base_move_loop_invariants
= flag_move_loop_invariants
;
18244 mips_base_align_loops
= align_loops
;
18245 mips_base_align_jumps
= align_jumps
;
18246 mips_base_align_functions
= align_functions
;
18248 /* Now select the ISA mode.
18250 Do all CPP-sensitive stuff in uncompressed mode; we'll switch modes
18251 later if required. */
18252 mips_set_compression_mode (0);
18254 /* We register a second machine specific reorg pass after delay slot
18255 filling. Registering the pass must be done at start up. It's
18256 convenient to do it here. */
18257 opt_pass
*new_pass
= make_pass_mips_machine_reorg2 (g
);
18258 struct register_pass_info insert_pass_mips_machine_reorg2
=
18260 new_pass
, /* pass */
18261 "dbr", /* reference_pass_name */
18262 1, /* ref_pass_instance_number */
18263 PASS_POS_INSERT_AFTER
/* po_op */
18265 register_pass (&insert_pass_mips_machine_reorg2
);
18267 if (TARGET_HARD_FLOAT_ABI
&& TARGET_MIPS5900
)
18268 REAL_MODE_FORMAT (SFmode
) = &spu_single_format
;
18270 mips_register_frame_header_opt ();
18273 /* Swap the register information for registers I and I + 1, which
18274 currently have the wrong endianness. Note that the registers'
18275 fixedness and call-clobberedness might have been set on the
18279 mips_swap_registers (unsigned int i
)
18284 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
18285 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
18287 SWAP_INT (fixed_regs
[i
], fixed_regs
[i
+ 1]);
18288 SWAP_INT (call_used_regs
[i
], call_used_regs
[i
+ 1]);
18289 SWAP_INT (call_really_used_regs
[i
], call_really_used_regs
[i
+ 1]);
18290 SWAP_STRING (reg_names
[i
], reg_names
[i
+ 1]);
18296 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
18299 mips_conditional_register_usage (void)
18304 /* These DSP control register fields are global. */
18305 global_regs
[CCDSP_PO_REGNUM
] = 1;
18306 global_regs
[CCDSP_SC_REGNUM
] = 1;
18309 AND_COMPL_HARD_REG_SET (accessible_reg_set
,
18310 reg_class_contents
[(int) DSP_ACC_REGS
]);
18313 AND_COMPL_HARD_REG_SET (accessible_reg_set
,
18314 reg_class_contents
[(int) MD_REGS
]);
18316 if (!TARGET_HARD_FLOAT
)
18318 AND_COMPL_HARD_REG_SET (accessible_reg_set
,
18319 reg_class_contents
[(int) FP_REGS
]);
18320 AND_COMPL_HARD_REG_SET (accessible_reg_set
,
18321 reg_class_contents
[(int) ST_REGS
]);
18323 else if (!ISA_HAS_8CC
)
18325 /* We only have a single condition-code register. We implement
18326 this by fixing all the condition-code registers and generating
18327 RTL that refers directly to ST_REG_FIRST. */
18328 AND_COMPL_HARD_REG_SET (accessible_reg_set
,
18329 reg_class_contents
[(int) ST_REGS
]);
18331 SET_HARD_REG_BIT (accessible_reg_set
, FPSW_REGNUM
);
18332 fixed_regs
[FPSW_REGNUM
] = call_used_regs
[FPSW_REGNUM
] = 1;
18336 /* In MIPS16 mode, we prohibit the unused $s registers, since they
18337 are call-saved, and saving them via a MIPS16 register would
18338 probably waste more time than just reloading the value.
18340 We permit the $t temporary registers when optimizing for speed
18341 but not when optimizing for space because using them results in
18342 code that is larger (but faster) then not using them. We do
18343 allow $24 (t8) because it is used in CMP and CMPI instructions
18344 and $25 (t9) because it is used as the function call address in
18347 fixed_regs
[18] = call_used_regs
[18] = 1;
18348 fixed_regs
[19] = call_used_regs
[19] = 1;
18349 fixed_regs
[20] = call_used_regs
[20] = 1;
18350 fixed_regs
[21] = call_used_regs
[21] = 1;
18351 fixed_regs
[22] = call_used_regs
[22] = 1;
18352 fixed_regs
[23] = call_used_regs
[23] = 1;
18353 fixed_regs
[26] = call_used_regs
[26] = 1;
18354 fixed_regs
[27] = call_used_regs
[27] = 1;
18355 fixed_regs
[30] = call_used_regs
[30] = 1;
18358 fixed_regs
[8] = call_used_regs
[8] = 1;
18359 fixed_regs
[9] = call_used_regs
[9] = 1;
18360 fixed_regs
[10] = call_used_regs
[10] = 1;
18361 fixed_regs
[11] = call_used_regs
[11] = 1;
18362 fixed_regs
[12] = call_used_regs
[12] = 1;
18363 fixed_regs
[13] = call_used_regs
[13] = 1;
18364 fixed_regs
[14] = call_used_regs
[14] = 1;
18365 fixed_regs
[15] = call_used_regs
[15] = 1;
18368 /* Do not allow HI and LO to be treated as register operands.
18369 There are no MTHI or MTLO instructions (or any real need
18370 for them) and one-way registers cannot easily be reloaded. */
18371 AND_COMPL_HARD_REG_SET (operand_reg_set
,
18372 reg_class_contents
[(int) MD_REGS
]);
18374 /* $f20-$f23 are call-clobbered for n64. */
18375 if (mips_abi
== ABI_64
)
18378 for (regno
= FP_REG_FIRST
+ 20; regno
< FP_REG_FIRST
+ 24; regno
++)
18379 call_really_used_regs
[regno
] = call_used_regs
[regno
] = 1;
18381 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
18382 for n32 and o32 FP64. */
18383 if (mips_abi
== ABI_N32
18384 || (mips_abi
== ABI_32
18385 && TARGET_FLOAT64
))
18388 for (regno
= FP_REG_FIRST
+ 21; regno
<= FP_REG_FIRST
+ 31; regno
+=2)
18389 call_really_used_regs
[regno
] = call_used_regs
[regno
] = 1;
18391 /* Make sure that double-register accumulator values are correctly
18392 ordered for the current endianness. */
18393 if (TARGET_LITTLE_ENDIAN
)
18395 unsigned int regno
;
18397 mips_swap_registers (MD_REG_FIRST
);
18398 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
+= 2)
18399 mips_swap_registers (regno
);
18403 /* Implement EH_USES. */
18406 mips_eh_uses (unsigned int regno
)
18408 if (reload_completed
&& !TARGET_ABSOLUTE_JUMPS
)
18410 /* We need to force certain registers to be live in order to handle
18411 PIC long branches correctly. See mips_must_initialize_gp_p for
18413 if (mips_cfun_has_cprestore_slot_p ())
18415 if (regno
== CPRESTORE_SLOT_REGNUM
)
18420 if (cfun
->machine
->global_pointer
== regno
)
18428 /* Implement EPILOGUE_USES. */
18431 mips_epilogue_uses (unsigned int regno
)
18433 /* Say that the epilogue uses the return address register. Note that
18434 in the case of sibcalls, the values "used by the epilogue" are
18435 considered live at the start of the called function. */
18436 if (regno
== RETURN_ADDR_REGNUM
)
18439 /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
18440 See the comment above load_call<mode> for details. */
18441 if (TARGET_USE_GOT
&& (regno
) == GOT_VERSION_REGNUM
)
18444 /* An interrupt handler must preserve some registers that are
18445 ordinarily call-clobbered. */
18446 if (cfun
->machine
->interrupt_handler_p
18447 && mips_interrupt_extra_call_saved_reg_p (regno
))
18453 /* Return true if INSN needs to be wrapped in ".set noat".
18454 INSN has NOPERANDS operands, stored in OPVEC. */
18457 mips_need_noat_wrapper_p (rtx_insn
*insn
, rtx
*opvec
, int noperands
)
18459 if (recog_memoized (insn
) >= 0)
18461 subrtx_iterator::array_type array
;
18462 for (int i
= 0; i
< noperands
; i
++)
18463 FOR_EACH_SUBRTX (iter
, array
, opvec
[i
], NONCONST
)
18464 if (REG_P (*iter
) && REGNO (*iter
) == AT_REGNUM
)
18470 /* Implement FINAL_PRESCAN_INSN. */
18473 mips_final_prescan_insn (rtx_insn
*insn
, rtx
*opvec
, int noperands
)
18475 if (mips_need_noat_wrapper_p (insn
, opvec
, noperands
))
18476 mips_push_asm_switch (&mips_noat
);
18479 /* Implement TARGET_ASM_FINAL_POSTSCAN_INSN. */
18482 mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED
, rtx_insn
*insn
,
18483 rtx
*opvec
, int noperands
)
18485 if (mips_need_noat_wrapper_p (insn
, opvec
, noperands
))
18486 mips_pop_asm_switch (&mips_noat
);
18489 /* Return the function that is used to expand the <u>mulsidi3 pattern.
18490 EXT_CODE is the code of the extension used. Return NULL if widening
18491 multiplication shouldn't be used. */
18494 mips_mulsidi3_gen_fn (enum rtx_code ext_code
)
18498 signed_p
= ext_code
== SIGN_EXTEND
;
18501 /* Don't use widening multiplication with MULT when we have DMUL. Even
18502 with the extension of its input operands DMUL is faster. Note that
18503 the extension is not needed for signed multiplication. In order to
18504 ensure that we always remove the redundant sign-extension in this
18505 case we still expand mulsidi3 for DMUL. */
18506 if (ISA_HAS_R6DMUL
)
18507 return signed_p
? gen_mulsidi3_64bit_r6dmul
: NULL
;
18509 return signed_p
? gen_mulsidi3_64bit_dmul
: NULL
;
18512 ? gen_mulsidi3_64bit_mips16
18513 : gen_umulsidi3_64bit_mips16
);
18514 if (TARGET_FIX_R4000
)
18516 return signed_p
? gen_mulsidi3_64bit
: gen_umulsidi3_64bit
;
18521 return (signed_p
? gen_mulsidi3_32bit_r6
: gen_umulsidi3_32bit_r6
);
18524 ? gen_mulsidi3_32bit_mips16
18525 : gen_umulsidi3_32bit_mips16
);
18526 if (TARGET_FIX_R4000
&& !ISA_HAS_DSP
)
18527 return signed_p
? gen_mulsidi3_32bit_r4000
: gen_umulsidi3_32bit_r4000
;
18528 return signed_p
? gen_mulsidi3_32bit
: gen_umulsidi3_32bit
;
18532 /* Return true if PATTERN matches the kind of instruction generated by
18533 umips_build_save_restore. SAVE_P is true for store. */
18536 umips_save_restore_pattern_p (bool save_p
, rtx pattern
)
18540 HOST_WIDE_INT first_offset
= 0;
18541 rtx first_base
= 0;
18542 unsigned int regmask
= 0;
18544 for (n
= 0; n
< XVECLEN (pattern
, 0); n
++)
18546 rtx set
, reg
, mem
, this_base
;
18547 HOST_WIDE_INT this_offset
;
18549 /* Check that we have a SET. */
18550 set
= XVECEXP (pattern
, 0, n
);
18551 if (GET_CODE (set
) != SET
)
18554 /* Check that the SET is a load (if restoring) or a store
18556 mem
= save_p
? SET_DEST (set
) : SET_SRC (set
);
18557 if (!MEM_P (mem
) || MEM_VOLATILE_P (mem
))
18560 /* Check that the address is the sum of base and a possibly-zero
18561 constant offset. Determine if the offset is in range. */
18562 mips_split_plus (XEXP (mem
, 0), &this_base
, &this_offset
);
18563 if (!REG_P (this_base
))
18568 if (!UMIPS_12BIT_OFFSET_P (this_offset
))
18570 first_base
= this_base
;
18571 first_offset
= this_offset
;
18575 /* Check that the save slots are consecutive. */
18576 if (REGNO (this_base
) != REGNO (first_base
)
18577 || this_offset
!= first_offset
+ UNITS_PER_WORD
* n
)
18581 /* Check that SET's other operand is a register. */
18582 reg
= save_p
? SET_SRC (set
) : SET_DEST (set
);
18586 regmask
|= 1 << REGNO (reg
);
18589 for (i
= 0; i
< ARRAY_SIZE (umips_swm_mask
); i
++)
18590 if (regmask
== umips_swm_mask
[i
])
18596 /* Return the assembly instruction for microMIPS LWM or SWM.
18597 SAVE_P and PATTERN are as for umips_save_restore_pattern_p. */
18600 umips_output_save_restore (bool save_p
, rtx pattern
)
18602 static char buffer
[300];
18605 HOST_WIDE_INT offset
;
18606 rtx base
, mem
, set
, last_set
, last_reg
;
18608 /* Parse the pattern. */
18609 gcc_assert (umips_save_restore_pattern_p (save_p
, pattern
));
18611 s
= strcpy (buffer
, save_p
? "swm\t" : "lwm\t");
18613 n
= XVECLEN (pattern
, 0);
18615 set
= XVECEXP (pattern
, 0, 0);
18616 mem
= save_p
? SET_DEST (set
) : SET_SRC (set
);
18617 mips_split_plus (XEXP (mem
, 0), &base
, &offset
);
18619 last_set
= XVECEXP (pattern
, 0, n
- 1);
18620 last_reg
= save_p
? SET_SRC (last_set
) : SET_DEST (last_set
);
18622 if (REGNO (last_reg
) == 31)
18625 gcc_assert (n
<= 9);
18629 s
+= sprintf (s
, "%s,", reg_names
[16]);
18631 s
+= sprintf (s
, "%s-%s,", reg_names
[16], reg_names
[15 + n
]);
18633 s
+= sprintf (s
, "%s-%s,%s,", reg_names
[16], reg_names
[23],
18636 if (REGNO (last_reg
) == 31)
18637 s
+= sprintf (s
, "%s,", reg_names
[31]);
18639 s
+= sprintf (s
, "%d(%s)", (int)offset
, reg_names
[REGNO (base
)]);
18643 /* Return true if MEM1 and MEM2 use the same base register, and the
18644 offset of MEM2 equals the offset of MEM1 plus 4. FIRST_REG is the
18645 register into (from) which the contents of MEM1 will be loaded
18646 (stored), depending on the value of LOAD_P.
18647 SWAP_P is true when the 1st and 2nd instructions are swapped. */
18650 umips_load_store_pair_p_1 (bool load_p
, bool swap_p
,
18651 rtx first_reg
, rtx mem1
, rtx mem2
)
18654 HOST_WIDE_INT offset1
, offset2
;
18656 if (!MEM_P (mem1
) || !MEM_P (mem2
))
18659 mips_split_plus (XEXP (mem1
, 0), &base1
, &offset1
);
18660 mips_split_plus (XEXP (mem2
, 0), &base2
, &offset2
);
18662 if (!REG_P (base1
) || !rtx_equal_p (base1
, base2
))
18665 /* Avoid invalid load pair instructions. */
18666 if (load_p
&& REGNO (first_reg
) == REGNO (base1
))
18669 /* We must avoid this case for anti-dependence.
18672 first_reg is $2, but the base is $3. */
18675 && REGNO (first_reg
) + 1 == REGNO (base1
))
18678 if (offset2
!= offset1
+ 4)
18681 if (!UMIPS_12BIT_OFFSET_P (offset1
))
18688 mips_load_store_bonding_p (rtx
*operands
, machine_mode mode
, bool load_p
)
18690 rtx reg1
, reg2
, mem1
, mem2
, base1
, base2
;
18691 enum reg_class rc1
, rc2
;
18692 HOST_WIDE_INT offset1
, offset2
;
18696 reg1
= operands
[0];
18697 reg2
= operands
[2];
18698 mem1
= operands
[1];
18699 mem2
= operands
[3];
18703 reg1
= operands
[1];
18704 reg2
= operands
[3];
18705 mem1
= operands
[0];
18706 mem2
= operands
[2];
18709 if (mips_address_insns (XEXP (mem1
, 0), mode
, false) == 0
18710 || mips_address_insns (XEXP (mem2
, 0), mode
, false) == 0)
18713 mips_split_plus (XEXP (mem1
, 0), &base1
, &offset1
);
18714 mips_split_plus (XEXP (mem2
, 0), &base2
, &offset2
);
18716 /* Base regs do not match. */
18717 if (!REG_P (base1
) || !rtx_equal_p (base1
, base2
))
18720 /* Either of the loads is clobbering base register. It is legitimate to bond
18721 loads if second load clobbers base register. However, hardware does not
18722 support such bonding. */
18724 && (REGNO (reg1
) == REGNO (base1
)
18725 || (REGNO (reg2
) == REGNO (base1
))))
18728 /* Loading in same registers. */
18730 && REGNO (reg1
) == REGNO (reg2
))
18733 /* The loads/stores are not of same type. */
18734 rc1
= REGNO_REG_CLASS (REGNO (reg1
));
18735 rc2
= REGNO_REG_CLASS (REGNO (reg2
));
18737 && !reg_class_subset_p (rc1
, rc2
)
18738 && !reg_class_subset_p (rc2
, rc1
))
18741 if (abs (offset1
- offset2
) != GET_MODE_SIZE (mode
))
18747 /* OPERANDS describes the operands to a pair of SETs, in the order
18748 dest1, src1, dest2, src2. Return true if the operands can be used
18749 in an LWP or SWP instruction; LOAD_P says which. */
18752 umips_load_store_pair_p (bool load_p
, rtx
*operands
)
18754 rtx reg1
, reg2
, mem1
, mem2
;
18758 reg1
= operands
[0];
18759 reg2
= operands
[2];
18760 mem1
= operands
[1];
18761 mem2
= operands
[3];
18765 reg1
= operands
[1];
18766 reg2
= operands
[3];
18767 mem1
= operands
[0];
18768 mem2
= operands
[2];
18771 if (REGNO (reg2
) == REGNO (reg1
) + 1)
18772 return umips_load_store_pair_p_1 (load_p
, false, reg1
, mem1
, mem2
);
18774 if (REGNO (reg1
) == REGNO (reg2
) + 1)
18775 return umips_load_store_pair_p_1 (load_p
, true, reg2
, mem2
, mem1
);
18780 /* Return the assembly instruction for a microMIPS LWP or SWP in which
18781 the first register is REG and the first memory slot is MEM.
18782 LOAD_P is true for LWP. */
18785 umips_output_load_store_pair_1 (bool load_p
, rtx reg
, rtx mem
)
18787 rtx ops
[] = {reg
, mem
};
18790 output_asm_insn ("lwp\t%0,%1", ops
);
18792 output_asm_insn ("swp\t%0,%1", ops
);
18795 /* Output the assembly instruction for a microMIPS LWP or SWP instruction.
18796 LOAD_P and OPERANDS are as for umips_load_store_pair_p. */
18799 umips_output_load_store_pair (bool load_p
, rtx
*operands
)
18801 rtx reg1
, reg2
, mem1
, mem2
;
18804 reg1
= operands
[0];
18805 reg2
= operands
[2];
18806 mem1
= operands
[1];
18807 mem2
= operands
[3];
18811 reg1
= operands
[1];
18812 reg2
= operands
[3];
18813 mem1
= operands
[0];
18814 mem2
= operands
[2];
18817 if (REGNO (reg2
) == REGNO (reg1
) + 1)
18819 umips_output_load_store_pair_1 (load_p
, reg1
, mem1
);
18823 gcc_assert (REGNO (reg1
) == REGNO (reg2
) + 1);
18824 umips_output_load_store_pair_1 (load_p
, reg2
, mem2
);
18827 /* Return true if REG1 and REG2 match the criteria for a movep insn. */
18830 umips_movep_target_p (rtx reg1
, rtx reg2
)
18832 int regno1
, regno2
, pair
;
18834 static const int match
[8] = {
18835 0x00000060, /* 5, 6 */
18836 0x000000a0, /* 5, 7 */
18837 0x000000c0, /* 6, 7 */
18838 0x00200010, /* 4, 21 */
18839 0x00400010, /* 4, 22 */
18840 0x00000030, /* 4, 5 */
18841 0x00000050, /* 4, 6 */
18842 0x00000090 /* 4, 7 */
18845 if (!REG_P (reg1
) || !REG_P (reg2
))
18848 regno1
= REGNO (reg1
);
18849 regno2
= REGNO (reg2
);
18851 if (!GP_REG_P (regno1
) || !GP_REG_P (regno2
))
18854 pair
= (1 << regno1
) | (1 << regno2
);
18856 for (i
= 0; i
< ARRAY_SIZE (match
); i
++)
18857 if (pair
== match
[i
])
18863 /* Return the size in bytes of the trampoline code, padded to
18864 TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target
18865 function address immediately follow. */
18868 mips_trampoline_code_size (void)
18870 if (TARGET_USE_PIC_FN_ADDR_REG
)
18872 else if (ptr_mode
== DImode
)
18874 else if (ISA_HAS_LOAD_DELAY
)
18880 /* Implement TARGET_TRAMPOLINE_INIT. */
18883 mips_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
18885 rtx addr
, end_addr
, high
, low
, opcode
, mem
;
18888 HOST_WIDE_INT end_addr_offset
, static_chain_offset
, target_function_offset
;
18890 /* Work out the offsets of the pointers from the start of the
18891 trampoline code. */
18892 end_addr_offset
= mips_trampoline_code_size ();
18893 static_chain_offset
= end_addr_offset
;
18894 target_function_offset
= static_chain_offset
+ GET_MODE_SIZE (ptr_mode
);
18896 /* Get pointers to the beginning and end of the code block. */
18897 addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
18898 end_addr
= mips_force_binary (Pmode
, PLUS
, addr
, GEN_INT (end_addr_offset
));
18900 #define OP(X) gen_int_mode (X, SImode)
18902 /* Build up the code in TRAMPOLINE. */
18904 if (TARGET_USE_PIC_FN_ADDR_REG
)
18906 /* $25 contains the address of the trampoline. Emit code of the form:
18908 l[wd] $1, target_function_offset($25)
18909 l[wd] $static_chain, static_chain_offset($25)
18912 trampoline
[i
++] = OP (MIPS_LOAD_PTR (AT_REGNUM
,
18913 target_function_offset
,
18914 PIC_FUNCTION_ADDR_REGNUM
));
18915 trampoline
[i
++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM
,
18916 static_chain_offset
,
18917 PIC_FUNCTION_ADDR_REGNUM
));
18918 trampoline
[i
++] = OP (MIPS_JR (AT_REGNUM
));
18919 trampoline
[i
++] = OP (MIPS_MOVE (PIC_FUNCTION_ADDR_REGNUM
, AT_REGNUM
));
18921 else if (ptr_mode
== DImode
)
18923 /* It's too cumbersome to create the full 64-bit address, so let's
18929 1: l[wd] $25, target_function_offset - 12($31)
18930 l[wd] $static_chain, static_chain_offset - 12($31)
18934 where 12 is the offset of "1:" from the start of the code block. */
18935 trampoline
[i
++] = OP (MIPS_MOVE (AT_REGNUM
, RETURN_ADDR_REGNUM
));
18936 trampoline
[i
++] = OP (MIPS_BAL (1));
18937 trampoline
[i
++] = OP (MIPS_NOP
);
18938 trampoline
[i
++] = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM
,
18939 target_function_offset
- 12,
18940 RETURN_ADDR_REGNUM
));
18941 trampoline
[i
++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM
,
18942 static_chain_offset
- 12,
18943 RETURN_ADDR_REGNUM
));
18944 trampoline
[i
++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM
));
18945 trampoline
[i
++] = OP (MIPS_MOVE (RETURN_ADDR_REGNUM
, AT_REGNUM
));
18949 /* If the target has load delays, emit:
18951 lui $1, %hi(end_addr)
18952 lw $25, %lo(end_addr + ...)($1)
18953 lw $static_chain, %lo(end_addr + ...)($1)
18959 lui $1, %hi(end_addr)
18960 lw $25, %lo(end_addr + ...)($1)
18962 lw $static_chain, %lo(end_addr + ...)($1). */
18964 /* Split END_ADDR into %hi and %lo values. Trampolines are aligned
18965 to 64 bits, so the %lo value will have the bottom 3 bits clear. */
18966 high
= expand_simple_binop (SImode
, PLUS
, end_addr
, GEN_INT (0x8000),
18967 NULL
, false, OPTAB_WIDEN
);
18968 high
= expand_simple_binop (SImode
, LSHIFTRT
, high
, GEN_INT (16),
18969 NULL
, false, OPTAB_WIDEN
);
18970 low
= convert_to_mode (SImode
, gen_lowpart (HImode
, end_addr
), true);
18972 /* Emit the LUI. */
18973 opcode
= OP (MIPS_LUI (AT_REGNUM
, 0));
18974 trampoline
[i
++] = expand_simple_binop (SImode
, IOR
, opcode
, high
,
18975 NULL
, false, OPTAB_WIDEN
);
18977 /* Emit the load of the target function. */
18978 opcode
= OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM
,
18979 target_function_offset
- end_addr_offset
,
18981 trampoline
[i
++] = expand_simple_binop (SImode
, IOR
, opcode
, low
,
18982 NULL
, false, OPTAB_WIDEN
);
18984 /* Emit the JR here, if we can. */
18985 if (!ISA_HAS_LOAD_DELAY
)
18986 trampoline
[i
++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM
));
18988 /* Emit the load of the static chain register. */
18989 opcode
= OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM
,
18990 static_chain_offset
- end_addr_offset
,
18992 trampoline
[i
++] = expand_simple_binop (SImode
, IOR
, opcode
, low
,
18993 NULL
, false, OPTAB_WIDEN
);
18995 /* Emit the JR, if we couldn't above. */
18996 if (ISA_HAS_LOAD_DELAY
)
18998 trampoline
[i
++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM
));
18999 trampoline
[i
++] = OP (MIPS_NOP
);
19005 /* If we are using compact branches we don't have delay slots so
19006 place the instruction that was in the delay slot before the JRC
19009 if (TARGET_CB_ALWAYS
)
19012 temp
= trampoline
[i
-2];
19013 trampoline
[i
-2] = trampoline
[i
-1];
19014 trampoline
[i
-1] = temp
;
19017 /* Copy the trampoline code. Leave any padding uninitialized. */
19018 for (j
= 0; j
< i
; j
++)
19020 mem
= adjust_address (m_tramp
, SImode
, j
* GET_MODE_SIZE (SImode
));
19021 mips_emit_move (mem
, trampoline
[j
]);
19024 /* Set up the static chain pointer field. */
19025 mem
= adjust_address (m_tramp
, ptr_mode
, static_chain_offset
);
19026 mips_emit_move (mem
, chain_value
);
19028 /* Set up the target function field. */
19029 mem
= adjust_address (m_tramp
, ptr_mode
, target_function_offset
);
19030 mips_emit_move (mem
, XEXP (DECL_RTL (fndecl
), 0));
19032 /* Flush the code part of the trampoline. */
19033 emit_insn (gen_add3_insn (end_addr
, addr
, GEN_INT (TRAMPOLINE_SIZE
)));
19034 emit_insn (gen_clear_cache (addr
, end_addr
));
19037 /* Implement FUNCTION_PROFILER. */
19039 void mips_function_profiler (FILE *file
)
19042 sorry ("mips16 function profiling");
19043 if (TARGET_LONG_CALLS
)
19045 /* For TARGET_LONG_CALLS use $3 for the address of _mcount. */
19046 if (Pmode
== DImode
)
19047 fprintf (file
, "\tdla\t%s,_mcount\n", reg_names
[3]);
19049 fprintf (file
, "\tla\t%s,_mcount\n", reg_names
[3]);
19051 mips_push_asm_switch (&mips_noat
);
19052 fprintf (file
, "\tmove\t%s,%s\t\t# save current return address\n",
19053 reg_names
[AT_REGNUM
], reg_names
[RETURN_ADDR_REGNUM
]);
19054 /* _mcount treats $2 as the static chain register. */
19055 if (cfun
->static_chain_decl
!= NULL
)
19056 fprintf (file
, "\tmove\t%s,%s\n", reg_names
[2],
19057 reg_names
[STATIC_CHAIN_REGNUM
]);
19058 if (TARGET_MCOUNT_RA_ADDRESS
)
19060 /* If TARGET_MCOUNT_RA_ADDRESS load $12 with the address of the
19061 ra save location. */
19062 if (cfun
->machine
->frame
.ra_fp_offset
== 0)
19063 /* ra not saved, pass zero. */
19064 fprintf (file
, "\tmove\t%s,%s\n", reg_names
[12], reg_names
[0]);
19066 fprintf (file
, "\t%s\t%s," HOST_WIDE_INT_PRINT_DEC
"(%s)\n",
19067 Pmode
== DImode
? "dla" : "la", reg_names
[12],
19068 cfun
->machine
->frame
.ra_fp_offset
,
19069 reg_names
[STACK_POINTER_REGNUM
]);
19071 if (!TARGET_NEWABI
)
19073 "\t%s\t%s,%s,%d\t\t# _mcount pops 2 words from stack\n",
19074 TARGET_64BIT
? "dsubu" : "subu",
19075 reg_names
[STACK_POINTER_REGNUM
],
19076 reg_names
[STACK_POINTER_REGNUM
],
19077 Pmode
== DImode
? 16 : 8);
19079 if (TARGET_LONG_CALLS
)
19080 fprintf (file
, "\tjalr\t%s\n", reg_names
[3]);
19082 fprintf (file
, "\tjal\t_mcount\n");
19083 mips_pop_asm_switch (&mips_noat
);
19084 /* _mcount treats $2 as the static chain register. */
19085 if (cfun
->static_chain_decl
!= NULL
)
19086 fprintf (file
, "\tmove\t%s,%s\n", reg_names
[STATIC_CHAIN_REGNUM
],
19090 /* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default
19091 behaviour of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
19092 when TARGET_LOONGSON_VECTORS is true. */
19094 static unsigned HOST_WIDE_INT
19095 mips_shift_truncation_mask (machine_mode mode
)
19097 if (TARGET_LOONGSON_VECTORS
&& VECTOR_MODE_P (mode
))
19100 return GET_MODE_BITSIZE (mode
) - 1;
19103 /* Implement TARGET_PREPARE_PCH_SAVE. */
19106 mips_prepare_pch_save (void)
19108 /* We are called in a context where the current MIPS16 vs. non-MIPS16
19109 setting should be irrelevant. The question then is: which setting
19110 makes most sense at load time?
19112 The PCH is loaded before the first token is read. We should never
19113 have switched into MIPS16 mode by that point, and thus should not
19114 have populated mips16_globals. Nor can we load the entire contents
19115 of mips16_globals from the PCH file, because mips16_globals contains
19116 a combination of GGC and non-GGC data.
19118 There is therefore no point in trying save the GGC part of
19119 mips16_globals to the PCH file, or to preserve MIPS16ness across
19120 the PCH save and load. The loading compiler would not have access
19121 to the non-GGC parts of mips16_globals (either from the PCH file,
19122 or from a copy that the loading compiler generated itself) and would
19123 have to call target_reinit anyway.
19125 It therefore seems best to switch back to non-MIPS16 mode at
19126 save time, and to ensure that mips16_globals remains null after
19128 mips_set_compression_mode (0);
19129 mips16_globals
= 0;
19132 /* Generate or test for an insn that supports a constant permutation. */
19134 #define MAX_VECT_LEN 8
19136 struct expand_vec_perm_d
19138 rtx target
, op0
, op1
;
19139 unsigned char perm
[MAX_VECT_LEN
];
19140 machine_mode vmode
;
19141 unsigned char nelt
;
19146 /* Construct (set target (vec_select op0 (parallel perm))) and
19147 return true if that's a valid instruction in the active ISA. */
19150 mips_expand_vselect (rtx target
, rtx op0
,
19151 const unsigned char *perm
, unsigned nelt
)
19153 rtx rperm
[MAX_VECT_LEN
], x
;
19157 for (i
= 0; i
< nelt
; ++i
)
19158 rperm
[i
] = GEN_INT (perm
[i
]);
19160 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (nelt
, rperm
));
19161 x
= gen_rtx_VEC_SELECT (GET_MODE (target
), op0
, x
);
19162 x
= gen_rtx_SET (target
, x
);
19164 insn
= emit_insn (x
);
19165 if (recog_memoized (insn
) < 0)
19167 remove_insn (insn
);
19173 /* Similar, but generate a vec_concat from op0 and op1 as well. */
19176 mips_expand_vselect_vconcat (rtx target
, rtx op0
, rtx op1
,
19177 const unsigned char *perm
, unsigned nelt
)
19179 machine_mode v2mode
;
19182 v2mode
= GET_MODE_2XWIDER_MODE (GET_MODE (op0
));
19183 x
= gen_rtx_VEC_CONCAT (v2mode
, op0
, op1
);
19184 return mips_expand_vselect (target
, x
, perm
, nelt
);
19187 /* Recognize patterns for even-odd extraction. */
19190 mips_expand_vpc_loongson_even_odd (struct expand_vec_perm_d
*d
)
19192 unsigned i
, odd
, nelt
= d
->nelt
;
19193 rtx t0
, t1
, t2
, t3
;
19195 if (!(TARGET_HARD_FLOAT
&& TARGET_LOONGSON_VECTORS
))
19197 /* Even-odd for V2SI/V2SFmode is matched by interleave directly. */
19204 for (i
= 1; i
< nelt
; ++i
)
19205 if (d
->perm
[i
] != i
* 2 + odd
)
19211 /* We need 2*log2(N)-1 operations to achieve odd/even with interleave. */
19212 t0
= gen_reg_rtx (d
->vmode
);
19213 t1
= gen_reg_rtx (d
->vmode
);
19217 emit_insn (gen_loongson_punpckhhw (t0
, d
->op0
, d
->op1
));
19218 emit_insn (gen_loongson_punpcklhw (t1
, d
->op0
, d
->op1
));
19220 emit_insn (gen_loongson_punpckhhw (d
->target
, t1
, t0
));
19222 emit_insn (gen_loongson_punpcklhw (d
->target
, t1
, t0
));
19226 t2
= gen_reg_rtx (d
->vmode
);
19227 t3
= gen_reg_rtx (d
->vmode
);
19228 emit_insn (gen_loongson_punpckhbh (t0
, d
->op0
, d
->op1
));
19229 emit_insn (gen_loongson_punpcklbh (t1
, d
->op0
, d
->op1
));
19230 emit_insn (gen_loongson_punpckhbh (t2
, t1
, t0
));
19231 emit_insn (gen_loongson_punpcklbh (t3
, t1
, t0
));
19233 emit_insn (gen_loongson_punpckhbh (d
->target
, t3
, t2
));
19235 emit_insn (gen_loongson_punpcklbh (d
->target
, t3
, t2
));
19239 gcc_unreachable ();
19244 /* Recognize patterns for the Loongson PSHUFH instruction. */
19247 mips_expand_vpc_loongson_pshufh (struct expand_vec_perm_d
*d
)
19252 if (!(TARGET_HARD_FLOAT
&& TARGET_LOONGSON_VECTORS
))
19254 if (d
->vmode
!= V4HImode
)
19259 /* Convert the selector into the packed 8-bit form for pshufh. */
19260 /* Recall that loongson is little-endian only. No big-endian
19261 adjustment required. */
19262 for (i
= mask
= 0; i
< 4; i
++)
19263 mask
|= (d
->perm
[i
] & 3) << (i
* 2);
19264 rmask
= force_reg (SImode
, GEN_INT (mask
));
19266 if (d
->one_vector_p
)
19267 emit_insn (gen_loongson_pshufh (d
->target
, d
->op0
, rmask
));
19270 rtx t0
, t1
, x
, merge
, rmerge
[4];
19272 t0
= gen_reg_rtx (V4HImode
);
19273 t1
= gen_reg_rtx (V4HImode
);
19274 emit_insn (gen_loongson_pshufh (t1
, d
->op1
, rmask
));
19275 emit_insn (gen_loongson_pshufh (t0
, d
->op0
, rmask
));
19277 for (i
= 0; i
< 4; ++i
)
19278 rmerge
[i
] = (d
->perm
[i
] & 4 ? constm1_rtx
: const0_rtx
);
19279 merge
= gen_rtx_CONST_VECTOR (V4HImode
, gen_rtvec_v (4, rmerge
));
19280 merge
= force_reg (V4HImode
, merge
);
19282 x
= gen_rtx_AND (V4HImode
, merge
, t1
);
19283 emit_insn (gen_rtx_SET (t1
, x
));
19285 x
= gen_rtx_NOT (V4HImode
, merge
);
19286 x
= gen_rtx_AND (V4HImode
, x
, t0
);
19287 emit_insn (gen_rtx_SET (t0
, x
));
19289 x
= gen_rtx_IOR (V4HImode
, t0
, t1
);
19290 emit_insn (gen_rtx_SET (d
->target
, x
));
19296 /* Recognize broadcast patterns for the Loongson. */
19299 mips_expand_vpc_loongson_bcast (struct expand_vec_perm_d
*d
)
19304 if (!(TARGET_HARD_FLOAT
&& TARGET_LOONGSON_VECTORS
))
19306 /* Note that we've already matched V2SI via punpck and V4HI via pshufh. */
19307 if (d
->vmode
!= V8QImode
)
19309 if (!d
->one_vector_p
)
19313 for (i
= 1; i
< 8; ++i
)
19314 if (d
->perm
[i
] != elt
)
19320 /* With one interleave we put two of the desired element adjacent. */
19321 t0
= gen_reg_rtx (V8QImode
);
19323 emit_insn (gen_loongson_punpcklbh (t0
, d
->op0
, d
->op0
));
19325 emit_insn (gen_loongson_punpckhbh (t0
, d
->op0
, d
->op0
));
19327 /* Shuffle that one HImode element into all locations. */
19330 t1
= gen_reg_rtx (V4HImode
);
19331 emit_insn (gen_loongson_pshufh (t1
, gen_lowpart (V4HImode
, t0
),
19332 force_reg (SImode
, GEN_INT (elt
))));
19334 emit_move_insn (d
->target
, gen_lowpart (V8QImode
, t1
));
19339 mips_expand_vec_perm_const_1 (struct expand_vec_perm_d
*d
)
19341 unsigned int i
, nelt
= d
->nelt
;
19342 unsigned char perm2
[MAX_VECT_LEN
];
19344 if (d
->one_vector_p
)
19346 /* Try interleave with alternating operands. */
19347 memcpy (perm2
, d
->perm
, sizeof(perm2
));
19348 for (i
= 1; i
< nelt
; i
+= 2)
19350 if (mips_expand_vselect_vconcat (d
->target
, d
->op0
, d
->op1
, perm2
, nelt
))
19355 if (mips_expand_vselect_vconcat (d
->target
, d
->op0
, d
->op1
,
19359 /* Try again with swapped operands. */
19360 for (i
= 0; i
< nelt
; ++i
)
19361 perm2
[i
] = (d
->perm
[i
] + nelt
) & (2 * nelt
- 1);
19362 if (mips_expand_vselect_vconcat (d
->target
, d
->op1
, d
->op0
, perm2
, nelt
))
19366 if (mips_expand_vpc_loongson_even_odd (d
))
19368 if (mips_expand_vpc_loongson_pshufh (d
))
19370 if (mips_expand_vpc_loongson_bcast (d
))
19375 /* Expand a vec_perm_const pattern. */
19378 mips_expand_vec_perm_const (rtx operands
[4])
19380 struct expand_vec_perm_d d
;
19381 int i
, nelt
, which
;
19382 unsigned char orig_perm
[MAX_VECT_LEN
];
19386 d
.target
= operands
[0];
19387 d
.op0
= operands
[1];
19388 d
.op1
= operands
[2];
19391 d
.vmode
= GET_MODE (d
.target
);
19392 gcc_assert (VECTOR_MODE_P (d
.vmode
));
19393 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
19394 d
.testing_p
= false;
19396 for (i
= which
= 0; i
< nelt
; ++i
)
19398 rtx e
= XVECEXP (sel
, 0, i
);
19399 int ei
= INTVAL (e
) & (2 * nelt
- 1);
19400 which
|= (ei
< nelt
? 1 : 2);
19403 memcpy (d
.perm
, orig_perm
, MAX_VECT_LEN
);
19411 d
.one_vector_p
= false;
19412 if (!rtx_equal_p (d
.op0
, d
.op1
))
19417 for (i
= 0; i
< nelt
; ++i
)
19418 d
.perm
[i
] &= nelt
- 1;
19420 d
.one_vector_p
= true;
19425 d
.one_vector_p
= true;
19429 ok
= mips_expand_vec_perm_const_1 (&d
);
19431 /* If we were given a two-vector permutation which just happened to
19432 have both input vectors equal, we folded this into a one-vector
19433 permutation. There are several loongson patterns that are matched
19434 via direct vec_select+vec_concat expansion, but we do not have
19435 support in mips_expand_vec_perm_const_1 to guess the adjustment
19436 that should be made for a single operand. Just try again with
19437 the original permutation. */
19438 if (!ok
&& which
== 3)
19440 d
.op0
= operands
[1];
19441 d
.op1
= operands
[2];
19442 d
.one_vector_p
= false;
19443 memcpy (d
.perm
, orig_perm
, MAX_VECT_LEN
);
19444 ok
= mips_expand_vec_perm_const_1 (&d
);
19450 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST_OK. */
19453 mips_vectorize_vec_perm_const_ok (machine_mode vmode
,
19454 const unsigned char *sel
)
19456 struct expand_vec_perm_d d
;
19457 unsigned int i
, nelt
, which
;
19461 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
19462 d
.testing_p
= true;
19463 memcpy (d
.perm
, sel
, nelt
);
19465 /* Categorize the set of elements in the selector. */
19466 for (i
= which
= 0; i
< nelt
; ++i
)
19468 unsigned char e
= d
.perm
[i
];
19469 gcc_assert (e
< 2 * nelt
);
19470 which
|= (e
< nelt
? 1 : 2);
19473 /* For all elements from second vector, fold the elements to first. */
19475 for (i
= 0; i
< nelt
; ++i
)
19478 /* Check whether the mask can be applied to the vector type. */
19479 d
.one_vector_p
= (which
!= 3);
19481 d
.target
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 1);
19482 d
.op1
= d
.op0
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 2);
19483 if (!d
.one_vector_p
)
19484 d
.op1
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 3);
19487 ret
= mips_expand_vec_perm_const_1 (&d
);
19493 /* Expand an integral vector unpack operation. */
19496 mips_expand_vec_unpack (rtx operands
[2], bool unsigned_p
, bool high_p
)
19498 machine_mode imode
= GET_MODE (operands
[1]);
19499 rtx (*unpack
) (rtx
, rtx
, rtx
);
19500 rtx (*cmpgt
) (rtx
, rtx
, rtx
);
19501 rtx tmp
, dest
, zero
;
19507 unpack
= gen_loongson_punpckhbh
;
19509 unpack
= gen_loongson_punpcklbh
;
19510 cmpgt
= gen_loongson_pcmpgtb
;
19514 unpack
= gen_loongson_punpckhhw
;
19516 unpack
= gen_loongson_punpcklhw
;
19517 cmpgt
= gen_loongson_pcmpgth
;
19520 gcc_unreachable ();
19523 zero
= force_reg (imode
, CONST0_RTX (imode
));
19528 tmp
= gen_reg_rtx (imode
);
19529 emit_insn (cmpgt (tmp
, zero
, operands
[1]));
19532 dest
= gen_reg_rtx (imode
);
19533 emit_insn (unpack (dest
, operands
[1], tmp
));
19535 emit_move_insn (operands
[0], gen_lowpart (GET_MODE (operands
[0]), dest
));
19538 /* A subroutine of mips_expand_vec_init, match constant vector elements. */
19541 mips_constant_elt_p (rtx x
)
19543 return CONST_INT_P (x
) || GET_CODE (x
) == CONST_DOUBLE
;
19546 /* A subroutine of mips_expand_vec_init, expand via broadcast. */
19549 mips_expand_vi_broadcast (machine_mode vmode
, rtx target
, rtx elt
)
19551 struct expand_vec_perm_d d
;
19555 if (elt
!= const0_rtx
)
19556 elt
= force_reg (GET_MODE_INNER (vmode
), elt
);
19558 elt
= gen_lowpart (DImode
, elt
);
19560 t1
= gen_reg_rtx (vmode
);
19564 emit_insn (gen_loongson_vec_init1_v8qi (t1
, elt
));
19567 emit_insn (gen_loongson_vec_init1_v4hi (t1
, elt
));
19570 gcc_unreachable ();
19573 memset (&d
, 0, sizeof (d
));
19578 d
.nelt
= GET_MODE_NUNITS (vmode
);
19579 d
.one_vector_p
= true;
19581 ok
= mips_expand_vec_perm_const_1 (&d
);
19585 /* A subroutine of mips_expand_vec_init, replacing all of the non-constant
19586 elements of VALS with zeros, copy the constant vector to TARGET. */
19589 mips_expand_vi_constant (machine_mode vmode
, unsigned nelt
,
19590 rtx target
, rtx vals
)
19592 rtvec vec
= shallow_copy_rtvec (XVEC (vals
, 0));
19595 for (i
= 0; i
< nelt
; ++i
)
19597 if (!mips_constant_elt_p (RTVEC_ELT (vec
, i
)))
19598 RTVEC_ELT (vec
, i
) = const0_rtx
;
19601 emit_move_insn (target
, gen_rtx_CONST_VECTOR (vmode
, vec
));
19605 /* A subroutine of mips_expand_vec_init, expand via pinsrh. */
19608 mips_expand_vi_loongson_one_pinsrh (rtx target
, rtx vals
, unsigned one_var
)
19610 mips_expand_vi_constant (V4HImode
, 4, target
, vals
);
19612 emit_insn (gen_vec_setv4hi (target
, target
, XVECEXP (vals
, 0, one_var
),
19613 GEN_INT (one_var
)));
19616 /* A subroutine of mips_expand_vec_init, expand anything via memory. */
19619 mips_expand_vi_general (machine_mode vmode
, machine_mode imode
,
19620 unsigned nelt
, unsigned nvar
, rtx target
, rtx vals
)
19622 rtx mem
= assign_stack_temp (vmode
, GET_MODE_SIZE (vmode
));
19623 unsigned int i
, isize
= GET_MODE_SIZE (imode
);
19626 mips_expand_vi_constant (vmode
, nelt
, mem
, vals
);
19628 for (i
= 0; i
< nelt
; ++i
)
19630 rtx x
= XVECEXP (vals
, 0, i
);
19631 if (!mips_constant_elt_p (x
))
19632 emit_move_insn (adjust_address (mem
, imode
, i
* isize
), x
);
19635 emit_move_insn (target
, mem
);
19638 /* Expand a vector initialization. */
19641 mips_expand_vector_init (rtx target
, rtx vals
)
19643 machine_mode vmode
= GET_MODE (target
);
19644 machine_mode imode
= GET_MODE_INNER (vmode
);
19645 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
19646 unsigned nvar
= 0, one_var
= -1u;
19647 bool all_same
= true;
19650 for (i
= 0; i
< nelt
; ++i
)
19652 x
= XVECEXP (vals
, 0, i
);
19653 if (!mips_constant_elt_p (x
))
19654 nvar
++, one_var
= i
;
19655 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
19659 /* Load constants from the pool, or whatever's handy. */
19662 emit_move_insn (target
, gen_rtx_CONST_VECTOR (vmode
, XVEC (vals
, 0)));
19666 /* For two-part initialization, always use CONCAT. */
19669 rtx op0
= force_reg (imode
, XVECEXP (vals
, 0, 0));
19670 rtx op1
= force_reg (imode
, XVECEXP (vals
, 0, 1));
19671 x
= gen_rtx_VEC_CONCAT (vmode
, op0
, op1
);
19672 emit_insn (gen_rtx_SET (target
, x
));
19676 /* Loongson is the only cpu with vectors with more elements. */
19677 gcc_assert (TARGET_HARD_FLOAT
&& TARGET_LOONGSON_VECTORS
);
19679 /* If all values are identical, broadcast the value. */
19682 mips_expand_vi_broadcast (vmode
, target
, XVECEXP (vals
, 0, 0));
19686 /* If we've only got one non-variable V4HImode, use PINSRH. */
19687 if (nvar
== 1 && vmode
== V4HImode
)
19689 mips_expand_vi_loongson_one_pinsrh (target
, vals
, one_var
);
19693 mips_expand_vi_general (vmode
, imode
, nelt
, nvar
, target
, vals
);
19696 /* Expand a vector reduction. */
19699 mips_expand_vec_reduc (rtx target
, rtx in
, rtx (*gen
)(rtx
, rtx
, rtx
))
19701 machine_mode vmode
= GET_MODE (in
);
19702 unsigned char perm2
[2];
19703 rtx last
, next
, fold
, x
;
19707 fold
= gen_reg_rtx (vmode
);
19711 /* Use PUL/PLU to produce { L, H } op { H, L }.
19712 By reversing the pair order, rather than a pure interleave high,
19713 we avoid erroneous exceptional conditions that we might otherwise
19714 produce from the computation of H op H. */
19717 ok
= mips_expand_vselect_vconcat (fold
, last
, last
, perm2
, 2);
19722 /* Use interleave to produce { H, L } op { H, H }. */
19723 emit_insn (gen_loongson_punpckhwd (fold
, last
, last
));
19727 /* Perform the first reduction with interleave,
19728 and subsequent reductions with shifts. */
19729 emit_insn (gen_loongson_punpckhwd_hi (fold
, last
, last
));
19731 next
= gen_reg_rtx (vmode
);
19732 emit_insn (gen (next
, last
, fold
));
19735 fold
= gen_reg_rtx (vmode
);
19736 x
= force_reg (SImode
, GEN_INT (16));
19737 emit_insn (gen_vec_shr_v4hi (fold
, last
, x
));
19741 emit_insn (gen_loongson_punpckhwd_qi (fold
, last
, last
));
19743 next
= gen_reg_rtx (vmode
);
19744 emit_insn (gen (next
, last
, fold
));
19747 fold
= gen_reg_rtx (vmode
);
19748 x
= force_reg (SImode
, GEN_INT (16));
19749 emit_insn (gen_vec_shr_v8qi (fold
, last
, x
));
19751 next
= gen_reg_rtx (vmode
);
19752 emit_insn (gen (next
, last
, fold
));
19755 fold
= gen_reg_rtx (vmode
);
19756 x
= force_reg (SImode
, GEN_INT (8));
19757 emit_insn (gen_vec_shr_v8qi (fold
, last
, x
));
19761 gcc_unreachable ();
19764 emit_insn (gen (target
, last
, fold
));
19767 /* Expand a vector minimum/maximum. */
19770 mips_expand_vec_minmax (rtx target
, rtx op0
, rtx op1
,
19771 rtx (*cmp
) (rtx
, rtx
, rtx
), bool min_p
)
19773 machine_mode vmode
= GET_MODE (target
);
19776 tc
= gen_reg_rtx (vmode
);
19777 t0
= gen_reg_rtx (vmode
);
19778 t1
= gen_reg_rtx (vmode
);
19781 emit_insn (cmp (tc
, op0
, op1
));
19783 x
= gen_rtx_AND (vmode
, tc
, (min_p
? op1
: op0
));
19784 emit_insn (gen_rtx_SET (t0
, x
));
19786 x
= gen_rtx_NOT (vmode
, tc
);
19787 x
= gen_rtx_AND (vmode
, x
, (min_p
? op0
: op1
));
19788 emit_insn (gen_rtx_SET (t1
, x
));
19790 x
= gen_rtx_IOR (vmode
, t0
, t1
);
19791 emit_insn (gen_rtx_SET (target
, x
));
19794 /* Implement HARD_REGNO_CALLER_SAVE_MODE. */
19797 mips_hard_regno_caller_save_mode (unsigned int regno
,
19798 unsigned int nregs
,
19801 /* For performance, avoid saving/restoring upper parts of a register
19802 by returning MODE as save mode when the mode is known. */
19803 if (mode
== VOIDmode
)
19804 return choose_hard_reg_mode (regno
, nregs
, false);
19809 /* Implement TARGET_CASE_VALUES_THRESHOLD. */
19812 mips_case_values_threshold (void)
19814 /* In MIPS16 mode using a larger case threshold generates smaller code. */
19815 if (TARGET_MIPS16
&& optimize_size
)
19818 return default_case_values_threshold ();
19821 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
19824 mips_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
19826 if (!TARGET_HARD_FLOAT_ABI
)
19828 tree exceptions_var
= create_tmp_var (MIPS_ATYPE_USI
);
19829 tree fcsr_orig_var
= create_tmp_var (MIPS_ATYPE_USI
);
19830 tree fcsr_mod_var
= create_tmp_var (MIPS_ATYPE_USI
);
19831 tree get_fcsr
= mips_builtin_decls
[MIPS_GET_FCSR
];
19832 tree set_fcsr
= mips_builtin_decls
[MIPS_SET_FCSR
];
19833 tree get_fcsr_hold_call
= build_call_expr (get_fcsr
, 0);
19834 tree hold_assign_orig
= build2 (MODIFY_EXPR
, MIPS_ATYPE_USI
,
19835 fcsr_orig_var
, get_fcsr_hold_call
);
19836 tree hold_mod_val
= build2 (BIT_AND_EXPR
, MIPS_ATYPE_USI
, fcsr_orig_var
,
19837 build_int_cst (MIPS_ATYPE_USI
, 0xfffff003));
19838 tree hold_assign_mod
= build2 (MODIFY_EXPR
, MIPS_ATYPE_USI
,
19839 fcsr_mod_var
, hold_mod_val
);
19840 tree set_fcsr_hold_call
= build_call_expr (set_fcsr
, 1, fcsr_mod_var
);
19841 tree hold_all
= build2 (COMPOUND_EXPR
, MIPS_ATYPE_USI
,
19842 hold_assign_orig
, hold_assign_mod
);
19843 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_all
,
19844 set_fcsr_hold_call
);
19846 *clear
= build_call_expr (set_fcsr
, 1, fcsr_mod_var
);
19848 tree get_fcsr_update_call
= build_call_expr (get_fcsr
, 0);
19849 *update
= build2 (MODIFY_EXPR
, MIPS_ATYPE_USI
,
19850 exceptions_var
, get_fcsr_update_call
);
19851 tree set_fcsr_update_call
= build_call_expr (set_fcsr
, 1, fcsr_orig_var
);
19852 *update
= build2 (COMPOUND_EXPR
, void_type_node
, *update
,
19853 set_fcsr_update_call
);
19854 tree atomic_feraiseexcept
19855 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT
);
19856 tree int_exceptions_var
= fold_convert (integer_type_node
,
19858 tree atomic_feraiseexcept_call
= build_call_expr (atomic_feraiseexcept
,
19859 1, int_exceptions_var
);
19860 *update
= build2 (COMPOUND_EXPR
, void_type_node
, *update
,
19861 atomic_feraiseexcept_call
);
19864 /* Implement TARGET_SPILL_CLASS. */
19867 mips_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED
,
19868 machine_mode mode ATTRIBUTE_UNUSED
)
19875 /* Implement TARGET_LRA_P. */
19880 return mips_lra_flag
;
19883 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS. */
19886 mips_ira_change_pseudo_allocno_class (int regno
, reg_class_t allocno_class
)
19888 /* LRA will allocate an FPR for an integer mode pseudo instead of spilling
19889 to memory if an FPR is present in the allocno class. It is rare that
19890 we actually need to place an integer mode value in an FPR so where
19891 possible limit the allocation to GR_REGS. This will slightly pessimize
19892 code that involves integer to/from float conversions as these will have
19893 to reload into FPRs in LRA. Such reloads are sometimes eliminated and
19894 sometimes only partially eliminated. We choose to take this penalty
19895 in order to eliminate usage of FPRs in code that does not use floating
19898 This change has a similar effect to increasing the cost of FPR->GPR
19899 register moves for integer modes so that they are higher than the cost
19900 of memory but changing the allocno class is more reliable.
19902 This is also similar to forbidding integer mode values in FPRs entirely
19903 but this would lead to an inconsistency in the integer to/from float
19904 instructions that say integer mode values must be placed in FPRs. */
19905 if (INTEGRAL_MODE_P (PSEUDO_REGNO_MODE (regno
)) && allocno_class
== ALL_REGS
)
19907 return allocno_class
;
19910 /* Implement TARGET_PROMOTE_FUNCTION_MODE */
19912 /* This function is equivalent to default_promote_function_mode_always_promote
19913 except that it returns a promoted mode even if type is NULL_TREE. This is
19914 needed by libcalls which have no type (only a mode) such as fixed conversion
19915 routines that take a signed or unsigned char/short argument and convert it
19916 to a fixed type. */
19918 static machine_mode
19919 mips_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
19921 int *punsignedp ATTRIBUTE_UNUSED
,
19922 const_tree fntype ATTRIBUTE_UNUSED
,
19923 int for_return ATTRIBUTE_UNUSED
)
19927 if (type
!= NULL_TREE
)
19928 return promote_mode (type
, mode
, punsignedp
);
19930 unsignedp
= *punsignedp
;
19931 PROMOTE_MODE (mode
, unsignedp
, type
);
19932 *punsignedp
= unsignedp
;
19936 /* Initialize the GCC target structure. */
19937 #undef TARGET_ASM_ALIGNED_HI_OP
19938 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
19939 #undef TARGET_ASM_ALIGNED_SI_OP
19940 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
19941 #undef TARGET_ASM_ALIGNED_DI_OP
19942 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
19944 #undef TARGET_OPTION_OVERRIDE
19945 #define TARGET_OPTION_OVERRIDE mips_option_override
19947 #undef TARGET_LEGITIMIZE_ADDRESS
19948 #define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address
19950 #undef TARGET_ASM_FUNCTION_PROLOGUE
19951 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
19952 #undef TARGET_ASM_FUNCTION_EPILOGUE
19953 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
19954 #undef TARGET_ASM_SELECT_RTX_SECTION
19955 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
19956 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
19957 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
19959 #undef TARGET_SCHED_INIT
19960 #define TARGET_SCHED_INIT mips_sched_init
19961 #undef TARGET_SCHED_REORDER
19962 #define TARGET_SCHED_REORDER mips_sched_reorder
19963 #undef TARGET_SCHED_REORDER2
19964 #define TARGET_SCHED_REORDER2 mips_sched_reorder2
19965 #undef TARGET_SCHED_VARIABLE_ISSUE
19966 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
19967 #undef TARGET_SCHED_ADJUST_COST
19968 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
19969 #undef TARGET_SCHED_ISSUE_RATE
19970 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
19971 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
19972 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
19973 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
19974 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
19975 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
19976 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
19977 mips_multipass_dfa_lookahead
19978 #undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
19979 #define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
19980 mips_small_register_classes_for_mode_p
19982 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
19983 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
19985 #undef TARGET_INSERT_ATTRIBUTES
19986 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
19987 #undef TARGET_MERGE_DECL_ATTRIBUTES
19988 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
19989 #undef TARGET_CAN_INLINE_P
19990 #define TARGET_CAN_INLINE_P mips_can_inline_p
19991 #undef TARGET_SET_CURRENT_FUNCTION
19992 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
19994 #undef TARGET_VALID_POINTER_MODE
19995 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
19996 #undef TARGET_REGISTER_MOVE_COST
19997 #define TARGET_REGISTER_MOVE_COST mips_register_move_cost
19998 #undef TARGET_REGISTER_PRIORITY
19999 #define TARGET_REGISTER_PRIORITY mips_register_priority
20000 #undef TARGET_MEMORY_MOVE_COST
20001 #define TARGET_MEMORY_MOVE_COST mips_memory_move_cost
20002 #undef TARGET_RTX_COSTS
20003 #define TARGET_RTX_COSTS mips_rtx_costs
20004 #undef TARGET_ADDRESS_COST
20005 #define TARGET_ADDRESS_COST mips_address_cost
20007 #undef TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P
20008 #define TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P mips_no_speculation_in_delay_slots_p
20010 #undef TARGET_IN_SMALL_DATA_P
20011 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
20013 #undef TARGET_MACHINE_DEPENDENT_REORG
20014 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
20016 #undef TARGET_PREFERRED_RELOAD_CLASS
20017 #define TARGET_PREFERRED_RELOAD_CLASS mips_preferred_reload_class
20019 #undef TARGET_EXPAND_TO_RTL_HOOK
20020 #define TARGET_EXPAND_TO_RTL_HOOK mips_expand_to_rtl_hook
20021 #undef TARGET_ASM_FILE_START
20022 #define TARGET_ASM_FILE_START mips_file_start
20023 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
20024 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
20025 #undef TARGET_ASM_CODE_END
20026 #define TARGET_ASM_CODE_END mips_code_end
20028 #undef TARGET_INIT_LIBFUNCS
20029 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
20031 #undef TARGET_BUILD_BUILTIN_VA_LIST
20032 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
20033 #undef TARGET_EXPAND_BUILTIN_VA_START
20034 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
20035 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
20036 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
20038 #undef TARGET_PROMOTE_FUNCTION_MODE
20039 #define TARGET_PROMOTE_FUNCTION_MODE mips_promote_function_mode
20040 #undef TARGET_FUNCTION_VALUE
20041 #define TARGET_FUNCTION_VALUE mips_function_value
20042 #undef TARGET_LIBCALL_VALUE
20043 #define TARGET_LIBCALL_VALUE mips_libcall_value
20044 #undef TARGET_FUNCTION_VALUE_REGNO_P
20045 #define TARGET_FUNCTION_VALUE_REGNO_P mips_function_value_regno_p
20046 #undef TARGET_RETURN_IN_MEMORY
20047 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
20048 #undef TARGET_RETURN_IN_MSB
20049 #define TARGET_RETURN_IN_MSB mips_return_in_msb
20051 #undef TARGET_ASM_OUTPUT_MI_THUNK
20052 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
20053 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
20054 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
20056 #undef TARGET_PRINT_OPERAND
20057 #define TARGET_PRINT_OPERAND mips_print_operand
20058 #undef TARGET_PRINT_OPERAND_ADDRESS
20059 #define TARGET_PRINT_OPERAND_ADDRESS mips_print_operand_address
20060 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
20061 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mips_print_operand_punct_valid_p
20063 #undef TARGET_SETUP_INCOMING_VARARGS
20064 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
20065 #undef TARGET_STRICT_ARGUMENT_NAMING
20066 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
20067 #undef TARGET_MUST_PASS_IN_STACK
20068 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
20069 #undef TARGET_PASS_BY_REFERENCE
20070 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
20071 #undef TARGET_CALLEE_COPIES
20072 #define TARGET_CALLEE_COPIES mips_callee_copies
20073 #undef TARGET_ARG_PARTIAL_BYTES
20074 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
20075 #undef TARGET_FUNCTION_ARG
20076 #define TARGET_FUNCTION_ARG mips_function_arg
20077 #undef TARGET_FUNCTION_ARG_ADVANCE
20078 #define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
20079 #undef TARGET_FUNCTION_ARG_BOUNDARY
20080 #define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
20081 #undef TARGET_GET_RAW_RESULT_MODE
20082 #define TARGET_GET_RAW_RESULT_MODE mips_get_reg_raw_mode
20083 #undef TARGET_GET_RAW_ARG_MODE
20084 #define TARGET_GET_RAW_ARG_MODE mips_get_reg_raw_mode
20086 #undef TARGET_MODE_REP_EXTENDED
20087 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
20089 #undef TARGET_VECTOR_MODE_SUPPORTED_P
20090 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
20092 #undef TARGET_SCALAR_MODE_SUPPORTED_P
20093 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
20095 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
20096 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode
20098 #undef TARGET_INIT_BUILTINS
20099 #define TARGET_INIT_BUILTINS mips_init_builtins
20100 #undef TARGET_BUILTIN_DECL
20101 #define TARGET_BUILTIN_DECL mips_builtin_decl
20102 #undef TARGET_EXPAND_BUILTIN
20103 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
20105 #undef TARGET_HAVE_TLS
20106 #define TARGET_HAVE_TLS HAVE_AS_TLS
20108 #undef TARGET_CANNOT_FORCE_CONST_MEM
20109 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
20111 #undef TARGET_LEGITIMATE_CONSTANT_P
20112 #define TARGET_LEGITIMATE_CONSTANT_P mips_legitimate_constant_p
20114 #undef TARGET_ENCODE_SECTION_INFO
20115 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
20117 #undef TARGET_ATTRIBUTE_TABLE
20118 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
20119 /* All our function attributes are related to how out-of-line copies should
20120 be compiled or called. They don't in themselves prevent inlining. */
20121 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
20122 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
20124 #undef TARGET_EXTRA_LIVE_ON_ENTRY
20125 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
20127 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
20128 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
20129 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
20130 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
20132 #undef TARGET_COMP_TYPE_ATTRIBUTES
20133 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
20135 #ifdef HAVE_AS_DTPRELWORD
20136 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
20137 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
20139 #undef TARGET_DWARF_REGISTER_SPAN
20140 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
20141 #undef TARGET_DWARF_FRAME_REG_MODE
20142 #define TARGET_DWARF_FRAME_REG_MODE mips_dwarf_frame_reg_mode
20144 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
20145 #define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
20147 #undef TARGET_LEGITIMATE_ADDRESS_P
20148 #define TARGET_LEGITIMATE_ADDRESS_P mips_legitimate_address_p
20150 #undef TARGET_FRAME_POINTER_REQUIRED
20151 #define TARGET_FRAME_POINTER_REQUIRED mips_frame_pointer_required
20153 #undef TARGET_CAN_ELIMINATE
20154 #define TARGET_CAN_ELIMINATE mips_can_eliminate
20156 #undef TARGET_CONDITIONAL_REGISTER_USAGE
20157 #define TARGET_CONDITIONAL_REGISTER_USAGE mips_conditional_register_usage
20159 #undef TARGET_TRAMPOLINE_INIT
20160 #define TARGET_TRAMPOLINE_INIT mips_trampoline_init
20162 #undef TARGET_ASM_OUTPUT_SOURCE_FILENAME
20163 #define TARGET_ASM_OUTPUT_SOURCE_FILENAME mips_output_filename
20165 #undef TARGET_SHIFT_TRUNCATION_MASK
20166 #define TARGET_SHIFT_TRUNCATION_MASK mips_shift_truncation_mask
20168 #undef TARGET_PREPARE_PCH_SAVE
20169 #define TARGET_PREPARE_PCH_SAVE mips_prepare_pch_save
20171 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
20172 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK mips_vectorize_vec_perm_const_ok
20174 #undef TARGET_CASE_VALUES_THRESHOLD
20175 #define TARGET_CASE_VALUES_THRESHOLD mips_case_values_threshold
20177 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
20178 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV mips_atomic_assign_expand_fenv
20180 #undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
20181 #define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
20183 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
20184 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
20185 mips_use_by_pieces_infrastructure_p
20187 #undef TARGET_SPILL_CLASS
20188 #define TARGET_SPILL_CLASS mips_spill_class
20189 #undef TARGET_LRA_P
20190 #define TARGET_LRA_P mips_lra_p
20191 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
20192 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS mips_ira_change_pseudo_allocno_class
20194 #undef TARGET_HARD_REGNO_SCRATCH_OK
20195 #define TARGET_HARD_REGNO_SCRATCH_OK mips_hard_regno_scratch_ok
20197 struct gcc_target targetm
= TARGET_INITIALIZER
;
20199 #include "gt-mips.h"