1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989-2019 Free Software Foundation, Inc.
3 Contributed by A. Lichnewsky, lich@inria.inria.fr.
4 Changes by Michael Meissner, meissner@osf.org.
5 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
6 Brendan Eich, brendan@microunity.com.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #define IN_TARGET_CODE 1
28 #include "coretypes.h"
38 #include "stringpool.h"
45 #include "diagnostic.h"
46 #include "insn-attr.h"
49 #include "fold-const.h"
51 #include "stor-layout.h"
57 #include "common/common-target.h"
58 #include "langhooks.h"
61 #include "sched-int.h"
63 #include "target-globals.h"
64 #include "tree-pass.h"
69 /* This file should be included last. */
70 #include "target-def.h"
72 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
73 #define UNSPEC_ADDRESS_P(X) \
74 (GET_CODE (X) == UNSPEC \
75 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
76 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
78 /* Extract the symbol or label from UNSPEC wrapper X. */
79 #define UNSPEC_ADDRESS(X) \
82 /* Extract the symbol type from UNSPEC wrapper X. */
83 #define UNSPEC_ADDRESS_TYPE(X) \
84 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
86 /* The maximum distance between the top of the stack frame and the
87 value $sp has when we save and restore registers.
89 The value for normal-mode code must be a SMALL_OPERAND and must
90 preserve the maximum stack alignment. We therefore use a value
91 of 0x7ff0 in this case.
93 microMIPS LWM and SWM support 12-bit offsets (from -0x800 to 0x7ff),
94 so we use a maximum of 0x7f0 for TARGET_MICROMIPS.
96 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
97 up to 0x7f8 bytes and can usually save or restore all the registers
98 that we need to save or restore. (Note that we can only use these
99 instructions for o32, for which the stack alignment is 8 bytes.)
101 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
102 RESTORE are not available. We can then use unextended instructions
103 to save and restore registers, and to allocate and deallocate the top
104 part of the frame. */
105 #define MIPS_MAX_FIRST_STACK_STEP \
106 (!TARGET_COMPRESSION ? 0x7ff0 \
107 : TARGET_MICROMIPS || GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
108 : TARGET_64BIT ? 0x100 : 0x400)
110 /* True if INSN is a mips.md pattern or asm statement. */
111 /* ??? This test exists through the compiler, perhaps it should be
113 #define USEFUL_INSN_P(INSN) \
114 (NONDEBUG_INSN_P (INSN) \
115 && GET_CODE (PATTERN (INSN)) != USE \
116 && GET_CODE (PATTERN (INSN)) != CLOBBER)
118 /* If INSN is a delayed branch sequence, return the first instruction
119 in the sequence, otherwise return INSN itself. */
120 #define SEQ_BEGIN(INSN) \
121 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
122 ? as_a <rtx_insn *> (XVECEXP (PATTERN (INSN), 0, 0)) \
125 /* Likewise for the last instruction in a delayed branch sequence. */
126 #define SEQ_END(INSN) \
127 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
128 ? as_a <rtx_insn *> (XVECEXP (PATTERN (INSN), \
130 XVECLEN (PATTERN (INSN), 0) - 1)) \
133 /* Execute the following loop body with SUBINSN set to each instruction
134 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
135 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
136 for ((SUBINSN) = SEQ_BEGIN (INSN); \
137 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
138 (SUBINSN) = NEXT_INSN (SUBINSN))
140 /* True if bit BIT is set in VALUE. */
141 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
143 /* Return the opcode for a ptr_mode load of the form:
145 l[wd] DEST, OFFSET(BASE). */
146 #define MIPS_LOAD_PTR(DEST, OFFSET, BASE) \
147 (((ptr_mode == DImode ? 0x37 : 0x23) << 26) \
152 /* Return the opcode to move register SRC into register DEST. */
153 #define MIPS_MOVE(DEST, SRC) \
154 ((TARGET_64BIT ? 0x2d : 0x21) \
158 /* Return the opcode for:
161 #define MIPS_LUI(DEST, VALUE) \
162 ((0xf << 26) | ((DEST) << 16) | (VALUE))
164 /* Return the opcode to jump to register DEST. When the JR opcode is not
165 available use JALR $0, DEST. */
166 #define MIPS_JR(DEST) \
167 (TARGET_CB_ALWAYS ? ((0x1b << 27) | ((DEST) << 16)) \
168 : (((DEST) << 21) | (ISA_HAS_JR ? 0x8 : 0x9)))
170 /* Return the opcode for:
172 bal . + (1 + OFFSET) * 4. */
173 #define MIPS_BAL(OFFSET) \
174 ((0x1 << 26) | (0x11 << 16) | (OFFSET))
176 /* Return the usual opcode for a nop. */
179 /* Classifies an address.
182 A natural register + offset address. The register satisfies
183 mips_valid_base_register_p and the offset is a const_arith_operand.
186 A LO_SUM rtx. The first operand is a valid base register and
187 the second operand is a symbolic address.
190 A signed 16-bit constant address.
193 A constant symbolic address. */
194 enum mips_address_type
{
201 /* Classifies an unconditional branch of interest for the P6600. */
203 enum mips_ucbranch_type
205 /* May not even be a branch. */
211 /* Macros to create an enumeration identifier for a function prototype. */
212 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
213 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
214 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
215 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
217 /* Classifies the prototype of a built-in function. */
218 enum mips_function_type
{
219 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
220 #include "config/mips/mips-ftypes.def"
221 #undef DEF_MIPS_FTYPE
225 /* Specifies how a built-in function should be converted into rtl. */
226 enum mips_builtin_type
{
227 /* The function corresponds directly to an .md pattern. The return
228 value is mapped to operand 0 and the arguments are mapped to
229 operands 1 and above. */
232 /* The function corresponds directly to an .md pattern. There is no return
233 value and the arguments are mapped to operands 0 and above. */
234 MIPS_BUILTIN_DIRECT_NO_TARGET
,
236 /* The function corresponds to a comparison instruction followed by
237 a mips_cond_move_tf_ps pattern. The first two arguments are the
238 values to compare and the second two arguments are the vector
239 operands for the movt.ps or movf.ps instruction (in assembly order). */
243 /* The function corresponds to a V2SF comparison instruction. Operand 0
244 of this instruction is the result of the comparison, which has mode
245 CCV2 or CCV4. The function arguments are mapped to operands 1 and
246 above. The function's return value is an SImode boolean that is
247 true under the following conditions:
249 MIPS_BUILTIN_CMP_ANY: one of the registers is true
250 MIPS_BUILTIN_CMP_ALL: all of the registers are true
251 MIPS_BUILTIN_CMP_LOWER: the first register is true
252 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
253 MIPS_BUILTIN_CMP_ANY
,
254 MIPS_BUILTIN_CMP_ALL
,
255 MIPS_BUILTIN_CMP_UPPER
,
256 MIPS_BUILTIN_CMP_LOWER
,
258 /* As above, but the instruction only sets a single $fcc register. */
259 MIPS_BUILTIN_CMP_SINGLE
,
261 /* The function corresponds to an MSA conditional branch instruction
262 combined with a compare instruction. */
263 MIPS_BUILTIN_MSA_TEST_BRANCH
,
265 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
266 MIPS_BUILTIN_BPOSGE32
269 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
270 #define MIPS_FP_CONDITIONS(MACRO) \
288 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
289 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
290 enum mips_fp_condition
{
291 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND
)
293 #undef DECLARE_MIPS_COND
295 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
296 #define STRINGIFY(X) #X
297 static const char *const mips_fp_conditions
[] = {
298 MIPS_FP_CONDITIONS (STRINGIFY
)
302 /* A class used to control a comdat-style stub that we output in each
303 translation unit that needs it. */
304 class mips_one_only_stub
{
306 virtual ~mips_one_only_stub () {}
308 /* Return the name of the stub. */
309 virtual const char *get_name () = 0;
311 /* Output the body of the function to asm_out_file. */
312 virtual void output_body () = 0;
315 /* Tuning information that is automatically derived from other sources
316 (such as the scheduler). */
318 /* The architecture and tuning settings that this structure describes. */
322 /* True if this structure describes MIPS16 settings. */
325 /* True if the structure has been initialized. */
328 /* True if "MULT $0, $0" is preferable to "MTLO $0; MTHI $0"
329 when optimizing for speed. */
330 bool fast_mult_zero_zero_p
;
333 /* Information about a single argument. */
334 struct mips_arg_info
{
335 /* True if the argument is passed in a floating-point register, or
336 would have been if we hadn't run out of registers. */
339 /* The number of words passed in registers, rounded up. */
340 unsigned int reg_words
;
342 /* For EABI, the offset of the first register from GP_ARG_FIRST or
343 FP_ARG_FIRST. For other ABIs, the offset of the first register from
344 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
345 comment for details).
347 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
349 unsigned int reg_offset
;
351 /* The number of words that must be passed on the stack, rounded up. */
352 unsigned int stack_words
;
354 /* The offset from the start of the stack overflow area of the argument's
355 first stack word. Only meaningful when STACK_WORDS is nonzero. */
356 unsigned int stack_offset
;
359 /* Information about an address described by mips_address_type.
365 REG is the base register and OFFSET is the constant offset.
368 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
369 is the type of symbol it references.
372 SYMBOL_TYPE is the type of symbol that the address references. */
373 struct mips_address_info
{
374 enum mips_address_type type
;
377 enum mips_symbol_type symbol_type
;
380 /* One stage in a constant building sequence. These sequences have
384 A = A CODE[1] VALUE[1]
385 A = A CODE[2] VALUE[2]
388 where A is an accumulator, each CODE[i] is a binary rtl operation
389 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
390 struct mips_integer_op
{
392 unsigned HOST_WIDE_INT value
;
395 /* The largest number of operations needed to load an integer constant.
396 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
397 When the lowest bit is clear, we can try, but reject a sequence with
398 an extra SLL at the end. */
399 #define MIPS_MAX_INTEGER_OPS 7
401 /* Information about a MIPS16e SAVE or RESTORE instruction. */
402 struct mips16e_save_restore_info
{
403 /* The number of argument registers saved by a SAVE instruction.
404 0 for RESTORE instructions. */
407 /* Bit X is set if the instruction saves or restores GPR X. */
410 /* The total number of bytes to allocate. */
414 /* Costs of various operations on the different architectures. */
416 struct mips_rtx_cost_data
418 unsigned short fp_add
;
419 unsigned short fp_mult_sf
;
420 unsigned short fp_mult_df
;
421 unsigned short fp_div_sf
;
422 unsigned short fp_div_df
;
423 unsigned short int_mult_si
;
424 unsigned short int_mult_di
;
425 unsigned short int_div_si
;
426 unsigned short int_div_di
;
427 unsigned short branch_cost
;
428 unsigned short memory_latency
;
431 /* Global variables for machine-dependent things. */
433 /* The -G setting, or the configuration's default small-data limit if
434 no -G option is given. */
435 static unsigned int mips_small_data_threshold
;
437 /* The number of file directives written by mips_output_filename. */
438 int num_source_filenames
;
440 /* The name that appeared in the last .file directive written by
441 mips_output_filename, or "" if mips_output_filename hasn't
442 written anything yet. */
443 const char *current_function_file
= "";
445 /* Arrays that map GCC register numbers to debugger register numbers. */
446 int mips_dbx_regno
[FIRST_PSEUDO_REGISTER
];
447 int mips_dwarf_regno
[FIRST_PSEUDO_REGISTER
];
449 /* Information about the current function's epilogue, used only while
452 /* A list of queued REG_CFA_RESTORE notes. */
455 /* The CFA is currently defined as CFA_REG + CFA_OFFSET. */
457 HOST_WIDE_INT cfa_offset
;
459 /* The offset of the CFA from the stack pointer while restoring
461 HOST_WIDE_INT cfa_restore_sp_offset
;
464 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
465 struct mips_asm_switch mips_noreorder
= { "reorder", 0 };
466 struct mips_asm_switch mips_nomacro
= { "macro", 0 };
467 struct mips_asm_switch mips_noat
= { "at", 0 };
469 /* True if we're writing out a branch-likely instruction rather than a
471 static bool mips_branch_likely
;
473 /* The current instruction-set architecture. */
474 enum processor mips_arch
;
475 const struct mips_cpu_info
*mips_arch_info
;
477 /* The processor that we should tune the code for. */
478 enum processor mips_tune
;
479 const struct mips_cpu_info
*mips_tune_info
;
481 /* The ISA level associated with mips_arch. */
484 /* The ISA revision level. This is 0 for MIPS I to V and N for
488 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
489 static const struct mips_cpu_info
*mips_isa_option_info
;
491 /* Which cost information to use. */
492 static const struct mips_rtx_cost_data
*mips_cost
;
494 /* The ambient target flags, excluding MASK_MIPS16. */
495 static int mips_base_target_flags
;
497 /* The default compression mode. */
498 unsigned int mips_base_compression_flags
;
500 /* The ambient values of other global variables. */
501 static int mips_base_schedule_insns
; /* flag_schedule_insns */
502 static int mips_base_reorder_blocks_and_partition
; /* flag_reorder... */
503 static int mips_base_move_loop_invariants
; /* flag_move_loop_invariants */
504 static const char *mips_base_align_loops
; /* align_loops */
505 static const char *mips_base_align_jumps
; /* align_jumps */
506 static const char *mips_base_align_functions
; /* align_functions */
508 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
509 static bool mips_hard_regno_mode_ok_p
[MAX_MACHINE_MODE
][FIRST_PSEUDO_REGISTER
];
511 /* Index C is true if character C is a valid PRINT_OPERAND punctation
513 static bool mips_print_operand_punct
[256];
515 static GTY (()) int mips_output_filename_first_time
= 1;
517 /* mips_split_p[X] is true if symbols of type X can be split by
518 mips_split_symbol. */
519 bool mips_split_p
[NUM_SYMBOL_TYPES
];
521 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
522 can be split by mips_split_symbol. */
523 bool mips_split_hi_p
[NUM_SYMBOL_TYPES
];
525 /* mips_use_pcrel_pool_p[X] is true if symbols of type X should be
526 forced into a PC-relative constant pool. */
527 bool mips_use_pcrel_pool_p
[NUM_SYMBOL_TYPES
];
529 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
530 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
531 if they are matched by a special .md file pattern. */
532 const char *mips_lo_relocs
[NUM_SYMBOL_TYPES
];
534 /* Likewise for HIGHs. */
535 const char *mips_hi_relocs
[NUM_SYMBOL_TYPES
];
537 /* Target state for MIPS16. */
538 struct target_globals
*mips16_globals
;
540 /* Target state for MICROMIPS. */
541 struct target_globals
*micromips_globals
;
543 /* Cached value of can_issue_more. This is cached in mips_variable_issue hook
544 and returned from mips_sched_reorder2. */
545 static int cached_can_issue_more
;
547 /* The stubs for various MIPS16 support functions, if used. */
548 static mips_one_only_stub
*mips16_rdhwr_stub
;
549 static mips_one_only_stub
*mips16_get_fcsr_stub
;
550 static mips_one_only_stub
*mips16_set_fcsr_stub
;
552 /* Index R is the smallest register class that contains register R. */
553 const enum reg_class mips_regno_to_class
[FIRST_PSEUDO_REGISTER
] = {
554 LEA_REGS
, LEA_REGS
, M16_STORE_REGS
, V1_REG
,
555 M16_STORE_REGS
, M16_STORE_REGS
, M16_STORE_REGS
, M16_STORE_REGS
,
556 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
557 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
558 M16_REGS
, M16_STORE_REGS
, LEA_REGS
, LEA_REGS
,
559 LEA_REGS
, LEA_REGS
, LEA_REGS
, LEA_REGS
,
560 T_REG
, PIC_FN_ADDR_REG
, LEA_REGS
, LEA_REGS
,
561 LEA_REGS
, M16_SP_REGS
, LEA_REGS
, LEA_REGS
,
563 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
564 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
565 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
566 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
567 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
568 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
569 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
570 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
571 MD0_REG
, MD1_REG
, NO_REGS
, ST_REGS
,
572 ST_REGS
, ST_REGS
, ST_REGS
, ST_REGS
,
573 ST_REGS
, ST_REGS
, ST_REGS
, NO_REGS
,
574 NO_REGS
, FRAME_REGS
, FRAME_REGS
, NO_REGS
,
575 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
576 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
577 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
578 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
579 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
580 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
581 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
582 COP0_REGS
, COP0_REGS
, COP0_REGS
, COP0_REGS
,
583 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
584 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
585 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
586 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
587 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
588 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
589 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
590 COP2_REGS
, COP2_REGS
, COP2_REGS
, COP2_REGS
,
591 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
592 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
593 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
594 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
595 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
596 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
597 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
598 COP3_REGS
, COP3_REGS
, COP3_REGS
, COP3_REGS
,
599 DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
, DSP_ACC_REGS
,
600 DSP_ACC_REGS
, DSP_ACC_REGS
, ALL_REGS
, ALL_REGS
,
601 ALL_REGS
, ALL_REGS
, ALL_REGS
, ALL_REGS
604 static tree
mips_handle_interrupt_attr (tree
*, tree
, tree
, int, bool *);
605 static tree
mips_handle_use_shadow_register_set_attr (tree
*, tree
, tree
, int,
608 /* The value of TARGET_ATTRIBUTE_TABLE. */
609 static const struct attribute_spec mips_attribute_table
[] = {
610 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
611 affects_type_identity, handler, exclude } */
612 { "long_call", 0, 0, false, true, true, false, NULL
, NULL
},
613 { "short_call", 0, 0, false, true, true, false, NULL
, NULL
},
614 { "far", 0, 0, false, true, true, false, NULL
, NULL
},
615 { "near", 0, 0, false, true, true, false, NULL
, NULL
},
616 /* We would really like to treat "mips16" and "nomips16" as type
617 attributes, but GCC doesn't provide the hooks we need to support
618 the right conversion rules. As declaration attributes, they affect
619 code generation but don't carry other semantics. */
620 { "mips16", 0, 0, true, false, false, false, NULL
, NULL
},
621 { "nomips16", 0, 0, true, false, false, false, NULL
, NULL
},
622 { "micromips", 0, 0, true, false, false, false, NULL
, NULL
},
623 { "nomicromips", 0, 0, true, false, false, false, NULL
, NULL
},
624 { "nocompression", 0, 0, true, false, false, false, NULL
, NULL
},
625 /* Allow functions to be specified as interrupt handlers */
626 { "interrupt", 0, 1, false, true, true, false, mips_handle_interrupt_attr
,
628 { "use_shadow_register_set", 0, 1, false, true, true, false,
629 mips_handle_use_shadow_register_set_attr
, NULL
},
630 { "keep_interrupts_masked", 0, 0, false, true, true, false, NULL
, NULL
},
631 { "use_debug_exception_return", 0, 0, false, true, true, false, NULL
, NULL
},
632 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
635 /* A table describing all the processors GCC knows about; see
636 mips-cpus.def for details. */
637 static const struct mips_cpu_info mips_cpu_info_table
[] = {
638 #define MIPS_CPU(NAME, CPU, ISA, FLAGS) \
639 { NAME, CPU, ISA, FLAGS },
640 #include "mips-cpus.def"
644 /* Default costs. If these are used for a processor we should look
645 up the actual costs. */
646 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
647 COSTS_N_INSNS (7), /* fp_mult_sf */ \
648 COSTS_N_INSNS (8), /* fp_mult_df */ \
649 COSTS_N_INSNS (23), /* fp_div_sf */ \
650 COSTS_N_INSNS (36), /* fp_div_df */ \
651 COSTS_N_INSNS (10), /* int_mult_si */ \
652 COSTS_N_INSNS (10), /* int_mult_di */ \
653 COSTS_N_INSNS (69), /* int_div_si */ \
654 COSTS_N_INSNS (69), /* int_div_di */ \
655 2, /* branch_cost */ \
656 4 /* memory_latency */
658 /* Floating-point costs for processors without an FPU. Just assume that
659 all floating-point libcalls are very expensive. */
660 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
661 COSTS_N_INSNS (256), /* fp_mult_sf */ \
662 COSTS_N_INSNS (256), /* fp_mult_df */ \
663 COSTS_N_INSNS (256), /* fp_div_sf */ \
664 COSTS_N_INSNS (256) /* fp_div_df */
666 /* Costs to use when optimizing for size. */
667 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size
= {
668 COSTS_N_INSNS (1), /* fp_add */
669 COSTS_N_INSNS (1), /* fp_mult_sf */
670 COSTS_N_INSNS (1), /* fp_mult_df */
671 COSTS_N_INSNS (1), /* fp_div_sf */
672 COSTS_N_INSNS (1), /* fp_div_df */
673 COSTS_N_INSNS (1), /* int_mult_si */
674 COSTS_N_INSNS (1), /* int_mult_di */
675 COSTS_N_INSNS (1), /* int_div_si */
676 COSTS_N_INSNS (1), /* int_div_di */
678 4 /* memory_latency */
681 /* Costs to use when optimizing for speed, indexed by processor. */
682 static const struct mips_rtx_cost_data
683 mips_rtx_cost_data
[NUM_PROCESSOR_VALUES
] = {
685 COSTS_N_INSNS (2), /* fp_add */
686 COSTS_N_INSNS (4), /* fp_mult_sf */
687 COSTS_N_INSNS (5), /* fp_mult_df */
688 COSTS_N_INSNS (12), /* fp_div_sf */
689 COSTS_N_INSNS (19), /* fp_div_df */
690 COSTS_N_INSNS (12), /* int_mult_si */
691 COSTS_N_INSNS (12), /* int_mult_di */
692 COSTS_N_INSNS (35), /* int_div_si */
693 COSTS_N_INSNS (35), /* int_div_di */
695 4 /* memory_latency */
699 COSTS_N_INSNS (6), /* int_mult_si */
700 COSTS_N_INSNS (6), /* int_mult_di */
701 COSTS_N_INSNS (36), /* int_div_si */
702 COSTS_N_INSNS (36), /* int_div_di */
704 4 /* memory_latency */
708 COSTS_N_INSNS (36), /* int_mult_si */
709 COSTS_N_INSNS (36), /* int_mult_di */
710 COSTS_N_INSNS (37), /* int_div_si */
711 COSTS_N_INSNS (37), /* int_div_di */
713 4 /* memory_latency */
717 COSTS_N_INSNS (4), /* int_mult_si */
718 COSTS_N_INSNS (11), /* int_mult_di */
719 COSTS_N_INSNS (36), /* int_div_si */
720 COSTS_N_INSNS (68), /* int_div_di */
722 4 /* memory_latency */
725 COSTS_N_INSNS (4), /* fp_add */
726 COSTS_N_INSNS (4), /* fp_mult_sf */
727 COSTS_N_INSNS (5), /* fp_mult_df */
728 COSTS_N_INSNS (17), /* fp_div_sf */
729 COSTS_N_INSNS (32), /* fp_div_df */
730 COSTS_N_INSNS (4), /* int_mult_si */
731 COSTS_N_INSNS (11), /* int_mult_di */
732 COSTS_N_INSNS (36), /* int_div_si */
733 COSTS_N_INSNS (68), /* int_div_di */
735 4 /* memory_latency */
738 COSTS_N_INSNS (4), /* fp_add */
739 COSTS_N_INSNS (4), /* fp_mult_sf */
740 COSTS_N_INSNS (5), /* fp_mult_df */
741 COSTS_N_INSNS (17), /* fp_div_sf */
742 COSTS_N_INSNS (32), /* fp_div_df */
743 COSTS_N_INSNS (4), /* int_mult_si */
744 COSTS_N_INSNS (7), /* int_mult_di */
745 COSTS_N_INSNS (42), /* int_div_si */
746 COSTS_N_INSNS (72), /* int_div_di */
748 4 /* memory_latency */
752 COSTS_N_INSNS (5), /* int_mult_si */
753 COSTS_N_INSNS (5), /* int_mult_di */
754 COSTS_N_INSNS (41), /* int_div_si */
755 COSTS_N_INSNS (41), /* int_div_di */
757 4 /* memory_latency */
760 COSTS_N_INSNS (8), /* fp_add */
761 COSTS_N_INSNS (8), /* fp_mult_sf */
762 COSTS_N_INSNS (10), /* fp_mult_df */
763 COSTS_N_INSNS (34), /* fp_div_sf */
764 COSTS_N_INSNS (64), /* fp_div_df */
765 COSTS_N_INSNS (5), /* int_mult_si */
766 COSTS_N_INSNS (5), /* int_mult_di */
767 COSTS_N_INSNS (41), /* int_div_si */
768 COSTS_N_INSNS (41), /* int_div_di */
770 4 /* memory_latency */
773 COSTS_N_INSNS (4), /* fp_add */
774 COSTS_N_INSNS (4), /* fp_mult_sf */
775 COSTS_N_INSNS (5), /* fp_mult_df */
776 COSTS_N_INSNS (17), /* fp_div_sf */
777 COSTS_N_INSNS (32), /* fp_div_df */
778 COSTS_N_INSNS (5), /* int_mult_si */
779 COSTS_N_INSNS (5), /* int_mult_di */
780 COSTS_N_INSNS (41), /* int_div_si */
781 COSTS_N_INSNS (41), /* int_div_di */
783 4 /* memory_latency */
787 COSTS_N_INSNS (5), /* int_mult_si */
788 COSTS_N_INSNS (5), /* int_mult_di */
789 COSTS_N_INSNS (41), /* int_div_si */
790 COSTS_N_INSNS (41), /* int_div_di */
792 4 /* memory_latency */
795 COSTS_N_INSNS (8), /* fp_add */
796 COSTS_N_INSNS (8), /* fp_mult_sf */
797 COSTS_N_INSNS (10), /* fp_mult_df */
798 COSTS_N_INSNS (34), /* fp_div_sf */
799 COSTS_N_INSNS (64), /* fp_div_df */
800 COSTS_N_INSNS (5), /* int_mult_si */
801 COSTS_N_INSNS (5), /* int_mult_di */
802 COSTS_N_INSNS (41), /* int_div_si */
803 COSTS_N_INSNS (41), /* int_div_di */
805 4 /* memory_latency */
808 COSTS_N_INSNS (4), /* fp_add */
809 COSTS_N_INSNS (4), /* fp_mult_sf */
810 COSTS_N_INSNS (5), /* fp_mult_df */
811 COSTS_N_INSNS (17), /* fp_div_sf */
812 COSTS_N_INSNS (32), /* fp_div_df */
813 COSTS_N_INSNS (5), /* int_mult_si */
814 COSTS_N_INSNS (5), /* int_mult_di */
815 COSTS_N_INSNS (41), /* int_div_si */
816 COSTS_N_INSNS (41), /* int_div_di */
818 4 /* memory_latency */
821 COSTS_N_INSNS (6), /* fp_add */
822 COSTS_N_INSNS (6), /* fp_mult_sf */
823 COSTS_N_INSNS (7), /* fp_mult_df */
824 COSTS_N_INSNS (25), /* fp_div_sf */
825 COSTS_N_INSNS (48), /* fp_div_df */
826 COSTS_N_INSNS (5), /* int_mult_si */
827 COSTS_N_INSNS (5), /* int_mult_di */
828 COSTS_N_INSNS (41), /* int_div_si */
829 COSTS_N_INSNS (41), /* int_div_di */
831 4 /* memory_latency */
839 { /* Loongson gs464. */
842 { /* Loongson gs464e. */
845 { /* Loongson gs264e. */
854 COSTS_N_INSNS (5), /* int_mult_si */
855 COSTS_N_INSNS (5), /* int_mult_di */
856 COSTS_N_INSNS (72), /* int_div_si */
857 COSTS_N_INSNS (72), /* int_div_di */
859 4 /* memory_latency */
864 COSTS_N_INSNS (6), /* int_mult_si */
865 COSTS_N_INSNS (6), /* int_mult_di */
866 COSTS_N_INSNS (18), /* int_div_si */
867 COSTS_N_INSNS (35), /* int_div_di */
869 4 /* memory_latency */
873 COSTS_N_INSNS (6), /* fp_add */
874 COSTS_N_INSNS (6), /* fp_mult_sf */
875 COSTS_N_INSNS (7), /* fp_mult_df */
876 COSTS_N_INSNS (25), /* fp_div_sf */
877 COSTS_N_INSNS (48), /* fp_div_df */
878 COSTS_N_INSNS (6), /* int_mult_si */
879 COSTS_N_INSNS (6), /* int_mult_di */
880 COSTS_N_INSNS (18), /* int_div_si */
881 COSTS_N_INSNS (35), /* int_div_di */
883 4 /* memory_latency */
886 COSTS_N_INSNS (2), /* fp_add */
887 COSTS_N_INSNS (4), /* fp_mult_sf */
888 COSTS_N_INSNS (5), /* fp_mult_df */
889 COSTS_N_INSNS (12), /* fp_div_sf */
890 COSTS_N_INSNS (19), /* fp_div_df */
891 COSTS_N_INSNS (2), /* int_mult_si */
892 COSTS_N_INSNS (2), /* int_mult_di */
893 COSTS_N_INSNS (35), /* int_div_si */
894 COSTS_N_INSNS (35), /* int_div_di */
896 4 /* memory_latency */
899 COSTS_N_INSNS (3), /* fp_add */
900 COSTS_N_INSNS (5), /* fp_mult_sf */
901 COSTS_N_INSNS (6), /* fp_mult_df */
902 COSTS_N_INSNS (15), /* fp_div_sf */
903 COSTS_N_INSNS (16), /* fp_div_df */
904 COSTS_N_INSNS (17), /* int_mult_si */
905 COSTS_N_INSNS (17), /* int_mult_di */
906 COSTS_N_INSNS (38), /* int_div_si */
907 COSTS_N_INSNS (38), /* int_div_di */
909 6 /* memory_latency */
912 COSTS_N_INSNS (6), /* fp_add */
913 COSTS_N_INSNS (7), /* fp_mult_sf */
914 COSTS_N_INSNS (8), /* fp_mult_df */
915 COSTS_N_INSNS (23), /* fp_div_sf */
916 COSTS_N_INSNS (36), /* fp_div_df */
917 COSTS_N_INSNS (10), /* int_mult_si */
918 COSTS_N_INSNS (10), /* int_mult_di */
919 COSTS_N_INSNS (69), /* int_div_si */
920 COSTS_N_INSNS (69), /* int_div_di */
922 6 /* memory_latency */
934 /* The only costs that appear to be updated here are
935 integer multiplication. */
937 COSTS_N_INSNS (4), /* int_mult_si */
938 COSTS_N_INSNS (6), /* int_mult_di */
939 COSTS_N_INSNS (69), /* int_div_si */
940 COSTS_N_INSNS (69), /* int_div_di */
942 4 /* memory_latency */
957 COSTS_N_INSNS (6), /* fp_add */
958 COSTS_N_INSNS (4), /* fp_mult_sf */
959 COSTS_N_INSNS (5), /* fp_mult_df */
960 COSTS_N_INSNS (23), /* fp_div_sf */
961 COSTS_N_INSNS (36), /* fp_div_df */
962 COSTS_N_INSNS (5), /* int_mult_si */
963 COSTS_N_INSNS (5), /* int_mult_di */
964 COSTS_N_INSNS (36), /* int_div_si */
965 COSTS_N_INSNS (36), /* int_div_di */
967 4 /* memory_latency */
970 COSTS_N_INSNS (6), /* fp_add */
971 COSTS_N_INSNS (5), /* fp_mult_sf */
972 COSTS_N_INSNS (6), /* fp_mult_df */
973 COSTS_N_INSNS (30), /* fp_div_sf */
974 COSTS_N_INSNS (59), /* fp_div_df */
975 COSTS_N_INSNS (3), /* int_mult_si */
976 COSTS_N_INSNS (4), /* int_mult_di */
977 COSTS_N_INSNS (42), /* int_div_si */
978 COSTS_N_INSNS (74), /* int_div_di */
980 4 /* memory_latency */
983 COSTS_N_INSNS (6), /* fp_add */
984 COSTS_N_INSNS (5), /* fp_mult_sf */
985 COSTS_N_INSNS (6), /* fp_mult_df */
986 COSTS_N_INSNS (30), /* fp_div_sf */
987 COSTS_N_INSNS (59), /* fp_div_df */
988 COSTS_N_INSNS (5), /* int_mult_si */
989 COSTS_N_INSNS (9), /* int_mult_di */
990 COSTS_N_INSNS (42), /* int_div_si */
991 COSTS_N_INSNS (74), /* int_div_di */
993 4 /* memory_latency */
996 COSTS_N_INSNS (4), /* fp_add */
997 COSTS_N_INSNS (4), /* fp_mult_sf */
998 COSTS_N_INSNS (256), /* fp_mult_df */
999 COSTS_N_INSNS (8), /* fp_div_sf */
1000 COSTS_N_INSNS (256), /* fp_div_df */
1001 COSTS_N_INSNS (4), /* int_mult_si */
1002 COSTS_N_INSNS (256), /* int_mult_di */
1003 COSTS_N_INSNS (37), /* int_div_si */
1004 COSTS_N_INSNS (256), /* int_div_di */
1005 1, /* branch_cost */
1006 4 /* memory_latency */
1009 /* The only costs that are changed here are
1010 integer multiplication. */
1011 COSTS_N_INSNS (6), /* fp_add */
1012 COSTS_N_INSNS (7), /* fp_mult_sf */
1013 COSTS_N_INSNS (8), /* fp_mult_df */
1014 COSTS_N_INSNS (23), /* fp_div_sf */
1015 COSTS_N_INSNS (36), /* fp_div_df */
1016 COSTS_N_INSNS (5), /* int_mult_si */
1017 COSTS_N_INSNS (9), /* int_mult_di */
1018 COSTS_N_INSNS (69), /* int_div_si */
1019 COSTS_N_INSNS (69), /* int_div_di */
1020 1, /* branch_cost */
1021 4 /* memory_latency */
1027 /* The only costs that are changed here are
1028 integer multiplication. */
1029 COSTS_N_INSNS (6), /* fp_add */
1030 COSTS_N_INSNS (7), /* fp_mult_sf */
1031 COSTS_N_INSNS (8), /* fp_mult_df */
1032 COSTS_N_INSNS (23), /* fp_div_sf */
1033 COSTS_N_INSNS (36), /* fp_div_df */
1034 COSTS_N_INSNS (3), /* int_mult_si */
1035 COSTS_N_INSNS (8), /* int_mult_di */
1036 COSTS_N_INSNS (69), /* int_div_si */
1037 COSTS_N_INSNS (69), /* int_div_di */
1038 1, /* branch_cost */
1039 4 /* memory_latency */
1042 COSTS_N_INSNS (2), /* fp_add */
1043 COSTS_N_INSNS (2), /* fp_mult_sf */
1044 COSTS_N_INSNS (2), /* fp_mult_df */
1045 COSTS_N_INSNS (12), /* fp_div_sf */
1046 COSTS_N_INSNS (19), /* fp_div_df */
1047 COSTS_N_INSNS (5), /* int_mult_si */
1048 COSTS_N_INSNS (9), /* int_mult_di */
1049 COSTS_N_INSNS (34), /* int_div_si */
1050 COSTS_N_INSNS (66), /* int_div_di */
1051 1, /* branch_cost */
1052 4 /* memory_latency */
1055 /* These costs are the same as the SB-1A below. */
1056 COSTS_N_INSNS (4), /* fp_add */
1057 COSTS_N_INSNS (4), /* fp_mult_sf */
1058 COSTS_N_INSNS (4), /* fp_mult_df */
1059 COSTS_N_INSNS (24), /* fp_div_sf */
1060 COSTS_N_INSNS (32), /* fp_div_df */
1061 COSTS_N_INSNS (3), /* int_mult_si */
1062 COSTS_N_INSNS (4), /* int_mult_di */
1063 COSTS_N_INSNS (36), /* int_div_si */
1064 COSTS_N_INSNS (68), /* int_div_di */
1065 1, /* branch_cost */
1066 4 /* memory_latency */
1069 /* These costs are the same as the SB-1 above. */
1070 COSTS_N_INSNS (4), /* fp_add */
1071 COSTS_N_INSNS (4), /* fp_mult_sf */
1072 COSTS_N_INSNS (4), /* fp_mult_df */
1073 COSTS_N_INSNS (24), /* fp_div_sf */
1074 COSTS_N_INSNS (32), /* fp_div_df */
1075 COSTS_N_INSNS (3), /* int_mult_si */
1076 COSTS_N_INSNS (4), /* int_mult_di */
1077 COSTS_N_INSNS (36), /* int_div_si */
1078 COSTS_N_INSNS (68), /* int_div_di */
1079 1, /* branch_cost */
1080 4 /* memory_latency */
1087 COSTS_N_INSNS (8), /* int_mult_si */
1088 COSTS_N_INSNS (8), /* int_mult_di */
1089 COSTS_N_INSNS (72), /* int_div_si */
1090 COSTS_N_INSNS (72), /* int_div_di */
1091 1, /* branch_cost */
1092 4 /* memory_latency */
1095 /* These costs are the same as 5KF above. */
1096 COSTS_N_INSNS (4), /* fp_add */
1097 COSTS_N_INSNS (4), /* fp_mult_sf */
1098 COSTS_N_INSNS (5), /* fp_mult_df */
1099 COSTS_N_INSNS (17), /* fp_div_sf */
1100 COSTS_N_INSNS (32), /* fp_div_df */
1101 COSTS_N_INSNS (4), /* int_mult_si */
1102 COSTS_N_INSNS (11), /* int_mult_di */
1103 COSTS_N_INSNS (36), /* int_div_si */
1104 COSTS_N_INSNS (68), /* int_div_di */
1105 1, /* branch_cost */
1106 4 /* memory_latency */
1109 COSTS_N_INSNS (4), /* fp_add */
1110 COSTS_N_INSNS (5), /* fp_mult_sf */
1111 COSTS_N_INSNS (5), /* fp_mult_df */
1112 COSTS_N_INSNS (17), /* fp_div_sf */
1113 COSTS_N_INSNS (17), /* fp_div_df */
1114 COSTS_N_INSNS (5), /* int_mult_si */
1115 COSTS_N_INSNS (5), /* int_mult_di */
1116 COSTS_N_INSNS (8), /* int_div_si */
1117 COSTS_N_INSNS (8), /* int_div_di */
1118 2, /* branch_cost */
1119 4 /* memory_latency */
1122 COSTS_N_INSNS (4), /* fp_add */
1123 COSTS_N_INSNS (4), /* fp_mult_sf */
1124 COSTS_N_INSNS (5), /* fp_mult_df */
1125 COSTS_N_INSNS (17), /* fp_div_sf */
1126 COSTS_N_INSNS (32), /* fp_div_df */
1127 COSTS_N_INSNS (5), /* int_mult_si */
1128 COSTS_N_INSNS (5), /* int_mult_di */
1129 COSTS_N_INSNS (34), /* int_div_si */
1130 COSTS_N_INSNS (68), /* int_div_di */
1131 1, /* branch_cost */
1132 4 /* memory_latency */
1135 COSTS_N_INSNS (4), /* fp_add */
1136 COSTS_N_INSNS (5), /* fp_mult_sf */
1137 COSTS_N_INSNS (5), /* fp_mult_df */
1138 COSTS_N_INSNS (32), /* fp_div_sf */
1139 COSTS_N_INSNS (32), /* fp_div_df */
1140 COSTS_N_INSNS (5), /* int_mult_si */
1141 COSTS_N_INSNS (5), /* int_mult_di */
1142 COSTS_N_INSNS (36), /* int_div_si */
1143 COSTS_N_INSNS (36), /* int_div_di */
1144 2, /* branch_cost */
1145 4 /* memory_latency */
1148 COSTS_N_INSNS (4), /* fp_add */
1149 COSTS_N_INSNS (5), /* fp_mult_sf */
1150 COSTS_N_INSNS (5), /* fp_mult_df */
1151 COSTS_N_INSNS (17), /* fp_div_sf */
1152 COSTS_N_INSNS (17), /* fp_div_df */
1153 COSTS_N_INSNS (5), /* int_mult_si */
1154 COSTS_N_INSNS (5), /* int_mult_di */
1155 COSTS_N_INSNS (8), /* int_div_si */
1156 COSTS_N_INSNS (8), /* int_div_di */
1157 2, /* branch_cost */
1158 4 /* memory_latency */
1162 static rtx
mips_find_pic_call_symbol (rtx_insn
*, rtx
, bool);
1163 static int mips_register_move_cost (machine_mode
, reg_class_t
,
1165 static unsigned int mips_function_arg_boundary (machine_mode
, const_tree
);
1166 static rtx
mips_gen_const_int_vector_shuffle (machine_mode
, int);
1168 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1169 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1170 static GTY (()) hash_map
<nofree_string_hash
, bool> *mflip_mips16_htab
;
1172 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1173 mode, false if it should next add an attribute for the opposite mode. */
1174 static GTY(()) bool mips16_flipper
;
1176 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1177 for -mflip-mips16. Return true if it should use "mips16" and false if
1178 it should use "nomips16". */
1181 mflip_mips16_use_mips16_p (tree decl
)
1184 bool base_is_mips16
= (mips_base_compression_flags
& MASK_MIPS16
) != 0;
1186 /* Use the opposite of the command-line setting for anonymous decls. */
1187 if (!DECL_NAME (decl
))
1188 return !base_is_mips16
;
1190 if (!mflip_mips16_htab
)
1191 mflip_mips16_htab
= hash_map
<nofree_string_hash
, bool>::create_ggc (37);
1193 name
= IDENTIFIER_POINTER (DECL_NAME (decl
));
1196 bool *slot
= &mflip_mips16_htab
->get_or_insert (name
, &existed
);
1199 mips16_flipper
= !mips16_flipper
;
1200 *slot
= mips16_flipper
? !base_is_mips16
: base_is_mips16
;
1205 /* Predicates to test for presence of "near"/"short_call" and "far"/"long_call"
1206 attributes on the given TYPE. */
1209 mips_near_type_p (const_tree type
)
1211 return (lookup_attribute ("short_call", TYPE_ATTRIBUTES (type
)) != NULL
1212 || lookup_attribute ("near", TYPE_ATTRIBUTES (type
)) != NULL
);
1216 mips_far_type_p (const_tree type
)
1218 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type
)) != NULL
1219 || lookup_attribute ("far", TYPE_ATTRIBUTES (type
)) != NULL
);
1223 /* Check if the interrupt attribute is set for a function. */
1226 mips_interrupt_type_p (tree type
)
1228 return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type
)) != NULL
;
1231 /* Return the mask for the "interrupt" attribute. */
1233 static enum mips_int_mask
1234 mips_interrupt_mask (tree type
)
1236 tree attr
= lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type
));
1240 /* For missing attributes or no arguments then return 'eic' as a safe
1243 return INT_MASK_EIC
;
1245 args
= TREE_VALUE (attr
);
1248 return INT_MASK_EIC
;
1250 cst
= TREE_VALUE (args
);
1252 if (strcmp (TREE_STRING_POINTER (cst
), "eic") == 0)
1253 return INT_MASK_EIC
;
1255 /* The validation code in mips_handle_interrupt_attr guarantees that the
1256 argument is now in the form:
1257 vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5). */
1258 str
= TREE_STRING_POINTER (cst
);
1260 gcc_assert (strlen (str
) == strlen ("vector=sw0"));
1263 return (enum mips_int_mask
) (INT_MASK_SW0
+ (str
[9] - '0'));
1265 return (enum mips_int_mask
) (INT_MASK_HW0
+ (str
[9] - '0'));
1268 /* Return the mips_shadow_set if the "use_shadow_register_set" attribute is
1269 set for a function. */
1271 static enum mips_shadow_set
1272 mips_use_shadow_register_set (tree type
)
1274 tree attr
= lookup_attribute ("use_shadow_register_set",
1275 TYPE_ATTRIBUTES (type
));
1278 /* The validation code in mips_handle_use_shadow_register_set_attr guarantees
1279 that if an argument is present then it means: Assume the shadow register
1280 set has a valid stack pointer in it. */
1282 return SHADOW_SET_NO
;
1284 args
= TREE_VALUE (attr
);
1287 return SHADOW_SET_YES
;
1289 return SHADOW_SET_INTSTACK
;
1292 /* Check if the attribute to keep interrupts masked is set for a function. */
1295 mips_keep_interrupts_masked_p (tree type
)
1297 return lookup_attribute ("keep_interrupts_masked",
1298 TYPE_ATTRIBUTES (type
)) != NULL
;
1301 /* Check if the attribute to use debug exception return is set for
1305 mips_use_debug_exception_return_p (tree type
)
1307 return lookup_attribute ("use_debug_exception_return",
1308 TYPE_ATTRIBUTES (type
)) != NULL
;
1311 /* Return the set of compression modes that are explicitly required
1312 by the attributes in ATTRIBUTES. */
1315 mips_get_compress_on_flags (tree attributes
)
1317 unsigned int flags
= 0;
1319 if (lookup_attribute ("mips16", attributes
) != NULL
)
1320 flags
|= MASK_MIPS16
;
1322 if (lookup_attribute ("micromips", attributes
) != NULL
)
1323 flags
|= MASK_MICROMIPS
;
1328 /* Return the set of compression modes that are explicitly forbidden
1329 by the attributes in ATTRIBUTES. */
1332 mips_get_compress_off_flags (tree attributes
)
1334 unsigned int flags
= 0;
1336 if (lookup_attribute ("nocompression", attributes
) != NULL
)
1337 flags
|= MASK_MIPS16
| MASK_MICROMIPS
;
1339 if (lookup_attribute ("nomips16", attributes
) != NULL
)
1340 flags
|= MASK_MIPS16
;
1342 if (lookup_attribute ("nomicromips", attributes
) != NULL
)
1343 flags
|= MASK_MICROMIPS
;
1348 /* Return the compression mode that should be used for function DECL.
1349 Return the ambient setting if DECL is null. */
1352 mips_get_compress_mode (tree decl
)
1354 unsigned int flags
, force_on
;
1356 flags
= mips_base_compression_flags
;
1359 /* Nested functions must use the same frame pointer as their
1360 parent and must therefore use the same ISA mode. */
1361 tree parent
= decl_function_context (decl
);
1364 force_on
= mips_get_compress_on_flags (DECL_ATTRIBUTES (decl
));
1367 flags
&= ~mips_get_compress_off_flags (DECL_ATTRIBUTES (decl
));
1372 /* Return the attribute name associated with MASK_MIPS16 and MASK_MICROMIPS
1376 mips_get_compress_on_name (unsigned int flags
)
1378 if (flags
== MASK_MIPS16
)
1383 /* Return the attribute name that forbids MASK_MIPS16 and MASK_MICROMIPS
1387 mips_get_compress_off_name (unsigned int flags
)
1389 if (flags
== MASK_MIPS16
)
1391 if (flags
== MASK_MICROMIPS
)
1392 return "nomicromips";
1393 return "nocompression";
1396 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1399 mips_comp_type_attributes (const_tree type1
, const_tree type2
)
1401 /* Disallow mixed near/far attributes. */
1402 if (mips_far_type_p (type1
) && mips_near_type_p (type2
))
1404 if (mips_near_type_p (type1
) && mips_far_type_p (type2
))
1409 /* Implement TARGET_INSERT_ATTRIBUTES. */
1412 mips_insert_attributes (tree decl
, tree
*attributes
)
1415 unsigned int compression_flags
, nocompression_flags
;
1417 /* Check for "mips16" and "nomips16" attributes. */
1418 compression_flags
= mips_get_compress_on_flags (*attributes
);
1419 nocompression_flags
= mips_get_compress_off_flags (*attributes
);
1421 if (TREE_CODE (decl
) != FUNCTION_DECL
)
1423 if (nocompression_flags
)
1424 error ("%qs attribute only applies to functions",
1425 mips_get_compress_off_name (nocompression_flags
));
1427 if (compression_flags
)
1428 error ("%qs attribute only applies to functions",
1429 mips_get_compress_on_name (nocompression_flags
));
1433 compression_flags
|= mips_get_compress_on_flags (DECL_ATTRIBUTES (decl
));
1434 nocompression_flags
|=
1435 mips_get_compress_off_flags (DECL_ATTRIBUTES (decl
));
1437 if (compression_flags
&& nocompression_flags
)
1438 error ("%qE cannot have both %qs and %qs attributes",
1439 DECL_NAME (decl
), mips_get_compress_on_name (compression_flags
),
1440 mips_get_compress_off_name (nocompression_flags
));
1442 if (compression_flags
& MASK_MIPS16
1443 && compression_flags
& MASK_MICROMIPS
)
1444 error ("%qE cannot have both %qs and %qs attributes",
1445 DECL_NAME (decl
), "mips16", "micromips");
1447 if (TARGET_FLIP_MIPS16
1448 && !DECL_ARTIFICIAL (decl
)
1449 && compression_flags
== 0
1450 && nocompression_flags
== 0)
1452 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1453 "mips16" attribute, arbitrarily pick one. We must pick the same
1454 setting for duplicate declarations of a function. */
1455 name
= mflip_mips16_use_mips16_p (decl
) ? "mips16" : "nomips16";
1456 *attributes
= tree_cons (get_identifier (name
), NULL
, *attributes
);
1457 name
= "nomicromips";
1458 *attributes
= tree_cons (get_identifier (name
), NULL
, *attributes
);
1463 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1466 mips_merge_decl_attributes (tree olddecl
, tree newdecl
)
1470 diff
= (mips_get_compress_on_flags (DECL_ATTRIBUTES (olddecl
))
1471 ^ mips_get_compress_on_flags (DECL_ATTRIBUTES (newdecl
)));
1473 error ("%qE redeclared with conflicting %qs attributes",
1474 DECL_NAME (newdecl
), mips_get_compress_on_name (diff
));
1476 diff
= (mips_get_compress_off_flags (DECL_ATTRIBUTES (olddecl
))
1477 ^ mips_get_compress_off_flags (DECL_ATTRIBUTES (newdecl
)));
1479 error ("%qE redeclared with conflicting %qs attributes",
1480 DECL_NAME (newdecl
), mips_get_compress_off_name (diff
));
1482 return merge_attributes (DECL_ATTRIBUTES (olddecl
),
1483 DECL_ATTRIBUTES (newdecl
));
1486 /* Implement TARGET_CAN_INLINE_P. */
1489 mips_can_inline_p (tree caller
, tree callee
)
1491 if (mips_get_compress_mode (callee
) != mips_get_compress_mode (caller
))
1493 return default_target_can_inline_p (caller
, callee
);
1496 /* Handle an "interrupt" attribute with an optional argument. */
1499 mips_handle_interrupt_attr (tree
*node ATTRIBUTE_UNUSED
, tree name
, tree args
,
1500 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
1502 /* Check for an argument. */
1503 if (is_attribute_p ("interrupt", name
) && args
!= NULL
)
1507 cst
= TREE_VALUE (args
);
1508 if (TREE_CODE (cst
) != STRING_CST
)
1510 warning (OPT_Wattributes
,
1511 "%qE attribute requires a string argument",
1513 *no_add_attrs
= true;
1515 else if (strcmp (TREE_STRING_POINTER (cst
), "eic") != 0
1516 && strncmp (TREE_STRING_POINTER (cst
), "vector=", 7) != 0)
1518 warning (OPT_Wattributes
,
1519 "argument to %qE attribute is neither eic, nor "
1520 "vector=<line>", name
);
1521 *no_add_attrs
= true;
1523 else if (strncmp (TREE_STRING_POINTER (cst
), "vector=", 7) == 0)
1525 const char *arg
= TREE_STRING_POINTER (cst
) + 7;
1527 /* Acceptable names are: sw0,sw1,hw0,hw1,hw2,hw3,hw4,hw5. */
1528 if (strlen (arg
) != 3
1529 || (arg
[0] != 's' && arg
[0] != 'h')
1531 || (arg
[0] == 's' && arg
[2] != '0' && arg
[2] != '1')
1532 || (arg
[0] == 'h' && (arg
[2] < '0' || arg
[2] > '5')))
1534 warning (OPT_Wattributes
,
1535 "interrupt vector to %qE attribute is not "
1536 "vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5)",
1538 *no_add_attrs
= true;
1548 /* Handle a "use_shadow_register_set" attribute with an optional argument. */
1551 mips_handle_use_shadow_register_set_attr (tree
*node ATTRIBUTE_UNUSED
,
1552 tree name
, tree args
,
1553 int flags ATTRIBUTE_UNUSED
,
1556 /* Check for an argument. */
1557 if (is_attribute_p ("use_shadow_register_set", name
) && args
!= NULL
)
1561 cst
= TREE_VALUE (args
);
1562 if (TREE_CODE (cst
) != STRING_CST
)
1564 warning (OPT_Wattributes
,
1565 "%qE attribute requires a string argument",
1567 *no_add_attrs
= true;
1569 else if (strcmp (TREE_STRING_POINTER (cst
), "intstack") != 0)
1571 warning (OPT_Wattributes
,
1572 "argument to %qE attribute is not intstack", name
);
1573 *no_add_attrs
= true;
1582 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1583 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1586 mips_split_plus (rtx x
, rtx
*base_ptr
, HOST_WIDE_INT
*offset_ptr
)
1588 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
1590 *base_ptr
= XEXP (x
, 0);
1591 *offset_ptr
= INTVAL (XEXP (x
, 1));
1600 static unsigned int mips_build_integer (struct mips_integer_op
*,
1601 unsigned HOST_WIDE_INT
);
1603 /* A subroutine of mips_build_integer, with the same interface.
1604 Assume that the final action in the sequence should be a left shift. */
1607 mips_build_shift (struct mips_integer_op
*codes
, HOST_WIDE_INT value
)
1609 unsigned int i
, shift
;
1611 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1612 since signed numbers are easier to load than unsigned ones. */
1614 while ((value
& 1) == 0)
1615 value
/= 2, shift
++;
1617 i
= mips_build_integer (codes
, value
);
1618 codes
[i
].code
= ASHIFT
;
1619 codes
[i
].value
= shift
;
1623 /* As for mips_build_shift, but assume that the final action will be
1624 an IOR or PLUS operation. */
1627 mips_build_lower (struct mips_integer_op
*codes
, unsigned HOST_WIDE_INT value
)
1629 unsigned HOST_WIDE_INT high
;
1632 high
= value
& ~(unsigned HOST_WIDE_INT
) 0xffff;
1633 if (!LUI_OPERAND (high
) && (value
& 0x18000) == 0x18000)
1635 /* The constant is too complex to load with a simple LUI/ORI pair,
1636 so we want to give the recursive call as many trailing zeros as
1637 possible. In this case, we know bit 16 is set and that the
1638 low 16 bits form a negative number. If we subtract that number
1639 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1640 i
= mips_build_integer (codes
, CONST_HIGH_PART (value
));
1641 codes
[i
].code
= PLUS
;
1642 codes
[i
].value
= CONST_LOW_PART (value
);
1646 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1647 bits gives a value with at least 17 trailing zeros. */
1648 i
= mips_build_integer (codes
, high
);
1649 codes
[i
].code
= IOR
;
1650 codes
[i
].value
= value
& 0xffff;
1655 /* Fill CODES with a sequence of rtl operations to load VALUE.
1656 Return the number of operations needed. */
1659 mips_build_integer (struct mips_integer_op
*codes
,
1660 unsigned HOST_WIDE_INT value
)
1662 if (SMALL_OPERAND (value
)
1663 || SMALL_OPERAND_UNSIGNED (value
)
1664 || LUI_OPERAND (value
))
1666 /* The value can be loaded with a single instruction. */
1667 codes
[0].code
= UNKNOWN
;
1668 codes
[0].value
= value
;
1671 else if ((value
& 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value
)))
1673 /* Either the constant is a simple LUI/ORI combination or its
1674 lowest bit is set. We don't want to shift in this case. */
1675 return mips_build_lower (codes
, value
);
1677 else if ((value
& 0xffff) == 0)
1679 /* The constant will need at least three actions. The lowest
1680 16 bits are clear, so the final action will be a shift. */
1681 return mips_build_shift (codes
, value
);
1685 /* The final action could be a shift, add or inclusive OR.
1686 Rather than use a complex condition to select the best
1687 approach, try both mips_build_shift and mips_build_lower
1688 and pick the one that gives the shortest sequence.
1689 Note that this case is only used once per constant. */
1690 struct mips_integer_op alt_codes
[MIPS_MAX_INTEGER_OPS
];
1691 unsigned int cost
, alt_cost
;
1693 cost
= mips_build_shift (codes
, value
);
1694 alt_cost
= mips_build_lower (alt_codes
, value
);
1695 if (alt_cost
< cost
)
1697 memcpy (codes
, alt_codes
, alt_cost
* sizeof (codes
[0]));
1704 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
1707 mips_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1709 return mips_const_insns (x
) > 0;
1712 /* Return a SYMBOL_REF for a MIPS16 function called NAME. */
1715 mips16_stub_function (const char *name
)
1719 x
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
1720 SYMBOL_REF_FLAGS (x
) |= (SYMBOL_FLAG_EXTERNAL
| SYMBOL_FLAG_FUNCTION
);
1724 /* Return a legitimate call address for STUB, given that STUB is a MIPS16
1725 support function. */
1728 mips16_stub_call_address (mips_one_only_stub
*stub
)
1730 rtx fn
= mips16_stub_function (stub
->get_name ());
1731 SYMBOL_REF_FLAGS (fn
) |= SYMBOL_FLAG_LOCAL
;
1732 if (!call_insn_operand (fn
, VOIDmode
))
1733 fn
= force_reg (Pmode
, fn
);
1737 /* A stub for moving the thread pointer into TLS_GET_TP_REGNUM. */
1739 class mips16_rdhwr_one_only_stub
: public mips_one_only_stub
1741 virtual const char *get_name ();
1742 virtual void output_body ();
1746 mips16_rdhwr_one_only_stub::get_name ()
1748 return "__mips16_rdhwr";
1752 mips16_rdhwr_one_only_stub::output_body ()
1754 fprintf (asm_out_file
,
1756 "\t.set\tmips32r2\n"
1757 "\t.set\tnoreorder\n"
1763 /* A stub for moving the FCSR into GET_FCSR_REGNUM. */
1764 class mips16_get_fcsr_one_only_stub
: public mips_one_only_stub
1766 virtual const char *get_name ();
1767 virtual void output_body ();
1771 mips16_get_fcsr_one_only_stub::get_name ()
1773 return "__mips16_get_fcsr";
1777 mips16_get_fcsr_one_only_stub::output_body ()
1779 fprintf (asm_out_file
,
1781 "\tj\t$31\n", reg_names
[GET_FCSR_REGNUM
]);
1784 /* A stub for moving SET_FCSR_REGNUM into the FCSR. */
1785 class mips16_set_fcsr_one_only_stub
: public mips_one_only_stub
1787 virtual const char *get_name ();
1788 virtual void output_body ();
1792 mips16_set_fcsr_one_only_stub::get_name ()
1794 return "__mips16_set_fcsr";
1798 mips16_set_fcsr_one_only_stub::output_body ()
1800 fprintf (asm_out_file
,
1802 "\tj\t$31\n", reg_names
[SET_FCSR_REGNUM
]);
1805 /* Return true if symbols of type TYPE require a GOT access. */
1808 mips_got_symbol_type_p (enum mips_symbol_type type
)
1812 case SYMBOL_GOT_PAGE_OFST
:
1813 case SYMBOL_GOT_DISP
:
1821 /* Return true if X is a thread-local symbol. */
1824 mips_tls_symbol_p (rtx x
)
1826 return GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0;
1829 /* Return true if SYMBOL_REF X is associated with a global symbol
1830 (in the STB_GLOBAL sense). */
1833 mips_global_symbol_p (const_rtx x
)
1835 const_tree decl
= SYMBOL_REF_DECL (x
);
1838 return !SYMBOL_REF_LOCAL_P (x
) || SYMBOL_REF_EXTERNAL_P (x
);
1840 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1841 or weak symbols. Relocations in the object file will be against
1842 the target symbol, so it's that symbol's binding that matters here. */
1843 return DECL_P (decl
) && (TREE_PUBLIC (decl
) || DECL_WEAK (decl
));
1846 /* Return true if function X is a libgcc MIPS16 stub function. */
1849 mips16_stub_function_p (const_rtx x
)
1851 return (GET_CODE (x
) == SYMBOL_REF
1852 && strncmp (XSTR (x
, 0), "__mips16_", 9) == 0);
1855 /* Return true if function X is a locally-defined and locally-binding
1859 mips16_local_function_p (const_rtx x
)
1861 return (GET_CODE (x
) == SYMBOL_REF
1862 && SYMBOL_REF_LOCAL_P (x
)
1863 && !SYMBOL_REF_EXTERNAL_P (x
)
1864 && (mips_get_compress_mode (SYMBOL_REF_DECL (x
)) & MASK_MIPS16
));
1867 /* Return true if SYMBOL_REF X binds locally. */
1870 mips_symbol_binds_local_p (const_rtx x
)
1872 return (SYMBOL_REF_DECL (x
)
1873 ? targetm
.binds_local_p (SYMBOL_REF_DECL (x
))
1874 : SYMBOL_REF_LOCAL_P (x
));
1877 /* Return true if OP is a constant vector with the number of units in MODE,
1878 and each unit has the same bit set. */
1881 mips_const_vector_bitimm_set_p (rtx op
, machine_mode mode
)
1883 if (GET_CODE (op
) == CONST_VECTOR
&& op
!= CONST0_RTX (mode
))
1885 unsigned HOST_WIDE_INT val
= UINTVAL (CONST_VECTOR_ELT (op
, 0));
1886 int vlog2
= exact_log2 (val
& GET_MODE_MASK (GET_MODE_INNER (mode
)));
1890 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
1891 gcc_assert (vlog2
>= 0 && vlog2
<= GET_MODE_UNIT_BITSIZE (mode
) - 1);
1892 return mips_const_vector_same_val_p (op
, mode
);
1899 /* Return true if OP is a constant vector with the number of units in MODE,
1900 and each unit has the same bit clear. */
1903 mips_const_vector_bitimm_clr_p (rtx op
, machine_mode mode
)
1905 if (GET_CODE (op
) == CONST_VECTOR
&& op
!= CONSTM1_RTX (mode
))
1907 unsigned HOST_WIDE_INT val
= ~UINTVAL (CONST_VECTOR_ELT (op
, 0));
1908 int vlog2
= exact_log2 (val
& GET_MODE_MASK (GET_MODE_INNER (mode
)));
1912 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
1913 gcc_assert (vlog2
>= 0 && vlog2
<= GET_MODE_UNIT_BITSIZE (mode
) - 1);
1914 return mips_const_vector_same_val_p (op
, mode
);
1921 /* Return true if OP is a constant vector with the number of units in MODE,
1922 and each unit has the same value. */
1925 mips_const_vector_same_val_p (rtx op
, machine_mode mode
)
1927 int i
, nunits
= GET_MODE_NUNITS (mode
);
1930 if (GET_CODE (op
) != CONST_VECTOR
|| GET_MODE (op
) != mode
)
1933 first
= CONST_VECTOR_ELT (op
, 0);
1934 for (i
= 1; i
< nunits
; i
++)
1935 if (!rtx_equal_p (first
, CONST_VECTOR_ELT (op
, i
)))
1941 /* Return true if OP is a constant vector with the number of units in MODE,
1942 and each unit has the same value as well as replicated bytes in the value.
1946 mips_const_vector_same_bytes_p (rtx op
, machine_mode mode
)
1949 HOST_WIDE_INT val
, first_byte
;
1952 if (!mips_const_vector_same_val_p (op
, mode
))
1955 first
= CONST_VECTOR_ELT (op
, 0);
1956 bytes
= GET_MODE_UNIT_SIZE (mode
);
1957 val
= INTVAL (first
);
1958 first_byte
= val
& 0xff;
1959 for (i
= 1; i
< bytes
; i
++)
1962 if ((val
& 0xff) != first_byte
)
1969 /* Return true if OP is a constant vector with the number of units in MODE,
1970 and each unit has the same integer value in the range [LOW, HIGH]. */
1973 mips_const_vector_same_int_p (rtx op
, machine_mode mode
, HOST_WIDE_INT low
,
1976 HOST_WIDE_INT value
;
1979 if (!mips_const_vector_same_val_p (op
, mode
))
1982 elem0
= CONST_VECTOR_ELT (op
, 0);
1983 if (!CONST_INT_P (elem0
))
1986 value
= INTVAL (elem0
);
1987 return (value
>= low
&& value
<= high
);
1990 /* Return true if OP is a constant vector with repeated 4-element sets
1994 mips_const_vector_shuffle_set_p (rtx op
, machine_mode mode
)
1996 int nunits
= GET_MODE_NUNITS (mode
);
1997 int nsets
= nunits
/ 4;
2001 /* Check if we have the same 4-element sets. */
2002 for (j
= 0; j
< nsets
; j
++, set
= 4 * j
)
2003 for (i
= 0; i
< 4; i
++)
2004 if ((INTVAL (XVECEXP (op
, 0, i
))
2005 != (INTVAL (XVECEXP (op
, 0, set
+ i
)) - set
))
2006 || !IN_RANGE (INTVAL (XVECEXP (op
, 0, set
+ i
)), 0, set
+ 3))
2011 /* Return true if rtx constants of mode MODE should be put into a small
2015 mips_rtx_constant_in_small_data_p (machine_mode mode
)
2017 return (!TARGET_EMBEDDED_DATA
2018 && TARGET_LOCAL_SDATA
2019 && GET_MODE_SIZE (mode
) <= mips_small_data_threshold
);
2022 /* Return true if X should not be moved directly into register $25.
2023 We need this because many versions of GAS will treat "la $25,foo" as
2024 part of a call sequence and so allow a global "foo" to be lazily bound. */
2027 mips_dangerous_for_la25_p (rtx x
)
2029 return (!TARGET_EXPLICIT_RELOCS
2031 && GET_CODE (x
) == SYMBOL_REF
2032 && mips_global_symbol_p (x
));
2035 /* Return true if calls to X might need $25 to be valid on entry. */
2038 mips_use_pic_fn_addr_reg_p (const_rtx x
)
2040 if (!TARGET_USE_PIC_FN_ADDR_REG
)
2043 /* MIPS16 stub functions are guaranteed not to use $25. */
2044 if (mips16_stub_function_p (x
))
2047 if (GET_CODE (x
) == SYMBOL_REF
)
2049 /* If PLTs and copy relocations are available, the static linker
2050 will make sure that $25 is valid on entry to the target function. */
2051 if (TARGET_ABICALLS_PIC0
)
2054 /* Locally-defined functions use absolute accesses to set up
2055 the global pointer. */
2056 if (TARGET_ABSOLUTE_ABICALLS
2057 && mips_symbol_binds_local_p (x
)
2058 && !SYMBOL_REF_EXTERNAL_P (x
))
2065 /* Return the method that should be used to access SYMBOL_REF or
2066 LABEL_REF X in context CONTEXT. */
2068 static enum mips_symbol_type
2069 mips_classify_symbol (const_rtx x
, enum mips_symbol_context context
)
2072 return SYMBOL_GOT_DISP
;
2074 if (GET_CODE (x
) == LABEL_REF
)
2076 /* Only return SYMBOL_PC_RELATIVE if we are generating MIPS16
2077 code and if we know that the label is in the current function's
2078 text section. LABEL_REFs are used for jump tables as well as
2079 text labels, so we must check whether jump tables live in the
2081 if (TARGET_MIPS16_SHORT_JUMP_TABLES
2082 && !LABEL_REF_NONLOCAL_P (x
))
2083 return SYMBOL_PC_RELATIVE
;
2085 if (TARGET_ABICALLS
&& !TARGET_ABSOLUTE_ABICALLS
)
2086 return SYMBOL_GOT_PAGE_OFST
;
2088 return SYMBOL_ABSOLUTE
;
2091 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
2093 if (SYMBOL_REF_TLS_MODEL (x
))
2096 if (CONSTANT_POOL_ADDRESS_P (x
))
2098 if (TARGET_MIPS16_TEXT_LOADS
)
2099 return SYMBOL_PC_RELATIVE
;
2101 if (TARGET_MIPS16_PCREL_LOADS
&& context
== SYMBOL_CONTEXT_MEM
)
2102 return SYMBOL_PC_RELATIVE
;
2104 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x
)))
2105 return SYMBOL_GP_RELATIVE
;
2108 /* Do not use small-data accesses for weak symbols; they may end up
2110 if (TARGET_GPOPT
&& SYMBOL_REF_SMALL_P (x
) && !SYMBOL_REF_WEAK (x
))
2111 return SYMBOL_GP_RELATIVE
;
2113 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
2115 if (TARGET_ABICALLS_PIC2
2116 && !(TARGET_ABSOLUTE_ABICALLS
&& mips_symbol_binds_local_p (x
)))
2118 /* There are three cases to consider:
2120 - o32 PIC (either with or without explicit relocs)
2121 - n32/n64 PIC without explicit relocs
2122 - n32/n64 PIC with explicit relocs
2124 In the first case, both local and global accesses will use an
2125 R_MIPS_GOT16 relocation. We must correctly predict which of
2126 the two semantics (local or global) the assembler and linker
2127 will apply. The choice depends on the symbol's binding rather
2128 than its visibility.
2130 In the second case, the assembler will not use R_MIPS_GOT16
2131 relocations, but it chooses between local and global accesses
2132 in the same way as for o32 PIC.
2134 In the third case we have more freedom since both forms of
2135 access will work for any kind of symbol. However, there seems
2136 little point in doing things differently. */
2137 if (mips_global_symbol_p (x
))
2138 return SYMBOL_GOT_DISP
;
2140 return SYMBOL_GOT_PAGE_OFST
;
2143 return SYMBOL_ABSOLUTE
;
2146 /* Classify the base of symbolic expression X, given that X appears in
2149 static enum mips_symbol_type
2150 mips_classify_symbolic_expression (rtx x
, enum mips_symbol_context context
)
2154 split_const (x
, &x
, &offset
);
2155 if (UNSPEC_ADDRESS_P (x
))
2156 return UNSPEC_ADDRESS_TYPE (x
);
2158 return mips_classify_symbol (x
, context
);
2161 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
2162 is the alignment in bytes of SYMBOL_REF X. */
2165 mips_offset_within_alignment_p (rtx x
, HOST_WIDE_INT offset
)
2167 HOST_WIDE_INT align
;
2169 align
= SYMBOL_REF_DECL (x
) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x
)) : 1;
2170 return IN_RANGE (offset
, 0, align
- 1);
2173 /* Return true if X is a symbolic constant that can be used in context
2174 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
2177 mips_symbolic_constant_p (rtx x
, enum mips_symbol_context context
,
2178 enum mips_symbol_type
*symbol_type
)
2182 split_const (x
, &x
, &offset
);
2183 if (UNSPEC_ADDRESS_P (x
))
2185 *symbol_type
= UNSPEC_ADDRESS_TYPE (x
);
2186 x
= UNSPEC_ADDRESS (x
);
2188 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
2190 *symbol_type
= mips_classify_symbol (x
, context
);
2191 if (*symbol_type
== SYMBOL_TLS
)
2197 if (offset
== const0_rtx
)
2200 /* Check whether a nonzero offset is valid for the underlying
2202 switch (*symbol_type
)
2204 case SYMBOL_ABSOLUTE
:
2205 case SYMBOL_64_HIGH
:
2208 /* If the target has 64-bit pointers and the object file only
2209 supports 32-bit symbols, the values of those symbols will be
2210 sign-extended. In this case we can't allow an arbitrary offset
2211 in case the 32-bit value X + OFFSET has a different sign from X. */
2212 if (Pmode
== DImode
&& !ABI_HAS_64BIT_SYMBOLS
)
2213 return offset_within_block_p (x
, INTVAL (offset
));
2215 /* In other cases the relocations can handle any offset. */
2218 case SYMBOL_PC_RELATIVE
:
2219 /* Allow constant pool references to be converted to LABEL+CONSTANT.
2220 In this case, we no longer have access to the underlying constant,
2221 but the original symbol-based access was known to be valid. */
2222 if (GET_CODE (x
) == LABEL_REF
)
2227 case SYMBOL_GP_RELATIVE
:
2228 /* Make sure that the offset refers to something within the
2229 same object block. This should guarantee that the final
2230 PC- or GP-relative offset is within the 16-bit limit. */
2231 return offset_within_block_p (x
, INTVAL (offset
));
2233 case SYMBOL_GOT_PAGE_OFST
:
2234 case SYMBOL_GOTOFF_PAGE
:
2235 /* If the symbol is global, the GOT entry will contain the symbol's
2236 address, and we will apply a 16-bit offset after loading it.
2237 If the symbol is local, the linker should provide enough local
2238 GOT entries for a 16-bit offset, but larger offsets may lead
2240 return SMALL_INT (offset
);
2244 /* There is no carry between the HI and LO REL relocations, so the
2245 offset is only valid if we know it won't lead to such a carry. */
2246 return mips_offset_within_alignment_p (x
, INTVAL (offset
));
2248 case SYMBOL_GOT_DISP
:
2249 case SYMBOL_GOTOFF_DISP
:
2250 case SYMBOL_GOTOFF_CALL
:
2251 case SYMBOL_GOTOFF_LOADGP
:
2254 case SYMBOL_GOTTPREL
:
2262 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
2263 single instruction. We rely on the fact that, in the worst case,
2264 all instructions involved in a MIPS16 address calculation are usually
2268 mips_symbol_insns_1 (enum mips_symbol_type type
, machine_mode mode
)
2270 if (mips_use_pcrel_pool_p
[(int) type
])
2272 if (mode
== MAX_MACHINE_MODE
)
2273 /* LEAs will be converted into constant-pool references by
2275 type
= SYMBOL_PC_RELATIVE
;
2277 /* The constant must be loaded and then dereferenced. */
2283 case SYMBOL_ABSOLUTE
:
2284 /* When using 64-bit symbols, we need 5 preparatory instructions,
2287 lui $at,%highest(symbol)
2288 daddiu $at,$at,%higher(symbol)
2290 daddiu $at,$at,%hi(symbol)
2293 The final address is then $at + %lo(symbol). With 32-bit
2294 symbols we just need a preparatory LUI for normal mode and
2295 a preparatory LI and SLL for MIPS16. */
2296 return ABI_HAS_64BIT_SYMBOLS
? 6 : TARGET_MIPS16
? 3 : 2;
2298 case SYMBOL_GP_RELATIVE
:
2299 /* Treat GP-relative accesses as taking a single instruction on
2300 MIPS16 too; the copy of $gp can often be shared. */
2303 case SYMBOL_PC_RELATIVE
:
2304 /* PC-relative constants can be only be used with ADDIUPC,
2305 DADDIUPC, LWPC and LDPC. */
2306 if (mode
== MAX_MACHINE_MODE
2307 || GET_MODE_SIZE (mode
) == 4
2308 || GET_MODE_SIZE (mode
) == 8)
2311 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
2314 case SYMBOL_GOT_DISP
:
2315 /* The constant will have to be loaded from the GOT before it
2316 is used in an address. */
2317 if (mode
!= MAX_MACHINE_MODE
)
2322 case SYMBOL_GOT_PAGE_OFST
:
2323 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
2324 local/global classification is accurate. The worst cases are:
2326 (1) For local symbols when generating o32 or o64 code. The assembler
2332 ...and the final address will be $at + %lo(symbol).
2334 (2) For global symbols when -mxgot. The assembler will use:
2336 lui $at,%got_hi(symbol)
2339 ...and the final address will be $at + %got_lo(symbol). */
2342 case SYMBOL_GOTOFF_PAGE
:
2343 case SYMBOL_GOTOFF_DISP
:
2344 case SYMBOL_GOTOFF_CALL
:
2345 case SYMBOL_GOTOFF_LOADGP
:
2346 case SYMBOL_64_HIGH
:
2352 case SYMBOL_GOTTPREL
:
2355 /* A 16-bit constant formed by a single relocation, or a 32-bit
2356 constant formed from a high 16-bit relocation and a low 16-bit
2357 relocation. Use mips_split_p to determine which. 32-bit
2358 constants need an "lui; addiu" sequence for normal mode and
2359 an "li; sll; addiu" sequence for MIPS16 mode. */
2360 return !mips_split_p
[type
] ? 1 : TARGET_MIPS16
? 3 : 2;
2363 /* We don't treat a bare TLS symbol as a constant. */
2369 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
2370 to load symbols of type TYPE into a register. Return 0 if the given
2371 type of symbol cannot be used as an immediate operand.
2373 Otherwise, return the number of instructions needed to load or store
2374 values of mode MODE to or from addresses of type TYPE. Return 0 if
2375 the given type of symbol is not valid in addresses.
2377 In both cases, instruction counts are based off BASE_INSN_LENGTH. */
2380 mips_symbol_insns (enum mips_symbol_type type
, machine_mode mode
)
2382 /* MSA LD.* and ST.* cannot support loading symbols via an immediate
2384 if (MSA_SUPPORTED_MODE_P (mode
))
2387 return mips_symbol_insns_1 (type
, mode
) * (TARGET_MIPS16
? 2 : 1);
2390 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2393 mips_cannot_force_const_mem (machine_mode mode
, rtx x
)
2395 enum mips_symbol_type type
;
2398 /* There is no assembler syntax for expressing an address-sized
2400 if (GET_CODE (x
) == HIGH
)
2403 /* As an optimization, reject constants that mips_legitimize_move
2406 Suppose we have a multi-instruction sequence that loads constant C
2407 into register R. If R does not get allocated a hard register, and
2408 R is used in an operand that allows both registers and memory
2409 references, reload will consider forcing C into memory and using
2410 one of the instruction's memory alternatives. Returning false
2411 here will force it to use an input reload instead. */
2412 if ((CONST_INT_P (x
) || GET_CODE (x
) == CONST_VECTOR
)
2413 && mips_legitimate_constant_p (mode
, x
))
2416 split_const (x
, &base
, &offset
);
2417 if (mips_symbolic_constant_p (base
, SYMBOL_CONTEXT_LEA
, &type
))
2419 /* See whether we explicitly want these symbols in the pool. */
2420 if (mips_use_pcrel_pool_p
[(int) type
])
2423 /* The same optimization as for CONST_INT. */
2424 if (SMALL_INT (offset
) && mips_symbol_insns (type
, MAX_MACHINE_MODE
) > 0)
2427 /* If MIPS16 constant pools live in the text section, they should
2428 not refer to anything that might need run-time relocation. */
2429 if (TARGET_MIPS16_PCREL_LOADS
&& mips_got_symbol_type_p (type
))
2433 /* TLS symbols must be computed by mips_legitimize_move. */
2434 if (tls_referenced_p (x
))
2440 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
2441 constants when we're using a per-function constant pool. */
2444 mips_use_blocks_for_constant_p (machine_mode mode ATTRIBUTE_UNUSED
,
2445 const_rtx x ATTRIBUTE_UNUSED
)
2447 return !TARGET_MIPS16_PCREL_LOADS
;
2450 /* Return true if register REGNO is a valid base register for mode MODE.
2451 STRICT_P is true if REG_OK_STRICT is in effect. */
2454 mips_regno_mode_ok_for_base_p (int regno
, machine_mode mode
,
2457 if (!HARD_REGISTER_NUM_P (regno
))
2461 regno
= reg_renumber
[regno
];
2464 /* These fake registers will be eliminated to either the stack or
2465 hard frame pointer, both of which are usually valid base registers.
2466 Reload deals with the cases where the eliminated form isn't valid. */
2467 if (regno
== ARG_POINTER_REGNUM
|| regno
== FRAME_POINTER_REGNUM
)
2470 /* In MIPS16 mode, the stack pointer can only address word and doubleword
2471 values, nothing smaller. */
2472 if (TARGET_MIPS16
&& regno
== STACK_POINTER_REGNUM
)
2473 return GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8;
2475 return TARGET_MIPS16
? M16_REG_P (regno
) : GP_REG_P (regno
);
2478 /* Return true if X is a valid base register for mode MODE.
2479 STRICT_P is true if REG_OK_STRICT is in effect. */
2482 mips_valid_base_register_p (rtx x
, machine_mode mode
, bool strict_p
)
2484 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
2488 && mips_regno_mode_ok_for_base_p (REGNO (x
), mode
, strict_p
));
2491 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
2492 can address a value of mode MODE. */
2495 mips_valid_offset_p (rtx x
, machine_mode mode
)
2497 /* Check that X is a signed 16-bit number. */
2498 if (!const_arith_operand (x
, Pmode
))
2501 /* We may need to split multiword moves, so make sure that every word
2503 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2504 && !SMALL_OPERAND (INTVAL (x
) + GET_MODE_SIZE (mode
) - UNITS_PER_WORD
))
2507 /* MSA LD.* and ST.* supports 10-bit signed offsets. */
2508 if (MSA_SUPPORTED_MODE_P (mode
)
2509 && !mips_signed_immediate_p (INTVAL (x
), 10,
2510 mips_ldst_scaled_shift (mode
)))
2516 /* Return true if a LO_SUM can address a value of mode MODE when the
2517 LO_SUM symbol has type SYMBOL_TYPE. */
2520 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type
, machine_mode mode
)
2522 /* Check that symbols of type SYMBOL_TYPE can be used to access values
2524 if (mips_symbol_insns (symbol_type
, mode
) == 0)
2527 /* Check that there is a known low-part relocation. */
2528 if (mips_lo_relocs
[symbol_type
] == NULL
)
2531 /* We may need to split multiword moves, so make sure that each word
2532 can be accessed without inducing a carry. This is mainly needed
2533 for o64, which has historically only guaranteed 64-bit alignment
2534 for 128-bit types. */
2535 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2536 && GET_MODE_BITSIZE (mode
) > GET_MODE_ALIGNMENT (mode
))
2539 /* MSA LD.* and ST.* cannot support loading symbols via %lo($base). */
2540 if (MSA_SUPPORTED_MODE_P (mode
))
2546 /* Return true if X is a valid address for machine mode MODE. If it is,
2547 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
2551 mips_classify_address (struct mips_address_info
*info
, rtx x
,
2552 machine_mode mode
, bool strict_p
)
2554 switch (GET_CODE (x
))
2558 info
->type
= ADDRESS_REG
;
2560 info
->offset
= const0_rtx
;
2561 return mips_valid_base_register_p (info
->reg
, mode
, strict_p
);
2564 info
->type
= ADDRESS_REG
;
2565 info
->reg
= XEXP (x
, 0);
2566 info
->offset
= XEXP (x
, 1);
2567 return (mips_valid_base_register_p (info
->reg
, mode
, strict_p
)
2568 && mips_valid_offset_p (info
->offset
, mode
));
2571 info
->type
= ADDRESS_LO_SUM
;
2572 info
->reg
= XEXP (x
, 0);
2573 info
->offset
= XEXP (x
, 1);
2574 /* We have to trust the creator of the LO_SUM to do something vaguely
2575 sane. Target-independent code that creates a LO_SUM should also
2576 create and verify the matching HIGH. Target-independent code that
2577 adds an offset to a LO_SUM must prove that the offset will not
2578 induce a carry. Failure to do either of these things would be
2579 a bug, and we are not required to check for it here. The MIPS
2580 backend itself should only create LO_SUMs for valid symbolic
2581 constants, with the high part being either a HIGH or a copy
2584 = mips_classify_symbolic_expression (info
->offset
, SYMBOL_CONTEXT_MEM
);
2585 return (mips_valid_base_register_p (info
->reg
, mode
, strict_p
)
2586 && mips_valid_lo_sum_p (info
->symbol_type
, mode
));
2589 /* Small-integer addresses don't occur very often, but they
2590 are legitimate if $0 is a valid base register. */
2591 info
->type
= ADDRESS_CONST_INT
;
2592 return !TARGET_MIPS16
&& SMALL_INT (x
);
2597 info
->type
= ADDRESS_SYMBOLIC
;
2598 return (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_MEM
,
2600 && mips_symbol_insns (info
->symbol_type
, mode
) > 0
2601 && !mips_split_p
[info
->symbol_type
]);
2608 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
2611 mips_legitimate_address_p (machine_mode mode
, rtx x
, bool strict_p
)
2613 struct mips_address_info addr
;
2615 return mips_classify_address (&addr
, x
, mode
, strict_p
);
2618 /* Return true if X is a legitimate $sp-based address for mode MODE. */
2621 mips_stack_address_p (rtx x
, machine_mode mode
)
2623 struct mips_address_info addr
;
2625 return (mips_classify_address (&addr
, x
, mode
, false)
2626 && addr
.type
== ADDRESS_REG
2627 && addr
.reg
== stack_pointer_rtx
);
2630 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2631 address instruction. Note that such addresses are not considered
2632 legitimate in the TARGET_LEGITIMATE_ADDRESS_P sense, because their use
2633 is so restricted. */
2636 mips_lwxs_address_p (rtx addr
)
2639 && GET_CODE (addr
) == PLUS
2640 && REG_P (XEXP (addr
, 1)))
2642 rtx offset
= XEXP (addr
, 0);
2643 if (GET_CODE (offset
) == MULT
2644 && REG_P (XEXP (offset
, 0))
2645 && CONST_INT_P (XEXP (offset
, 1))
2646 && INTVAL (XEXP (offset
, 1)) == 4)
2652 /* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load
2653 indexed address instruction. Note that such addresses are
2654 not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P
2655 sense, because their use is so restricted. */
2658 mips_lx_address_p (rtx addr
, machine_mode mode
)
2660 if (GET_CODE (addr
) != PLUS
2661 || !REG_P (XEXP (addr
, 0))
2662 || !REG_P (XEXP (addr
, 1)))
2664 if (ISA_HAS_LBX
&& mode
== QImode
)
2666 if (ISA_HAS_LHX
&& mode
== HImode
)
2668 if (ISA_HAS_LWX
&& mode
== SImode
)
2670 if (ISA_HAS_LDX
&& mode
== DImode
)
2672 if (MSA_SUPPORTED_MODE_P (mode
))
2677 /* Return true if a value at OFFSET bytes from base register BASE can be
2678 accessed using an unextended MIPS16 instruction. MODE is the mode of
2681 Usually the offset in an unextended instruction is a 5-bit field.
2682 The offset is unsigned and shifted left once for LH and SH, twice
2683 for LW and SW, and so on. An exception is LWSP and SWSP, which have
2684 an 8-bit immediate field that's shifted left twice. */
2687 mips16_unextended_reference_p (machine_mode mode
, rtx base
,
2688 unsigned HOST_WIDE_INT offset
)
2690 if (mode
!= BLKmode
&& offset
% GET_MODE_SIZE (mode
) == 0)
2692 if (GET_MODE_SIZE (mode
) == 4 && base
== stack_pointer_rtx
)
2693 return offset
< 256U * GET_MODE_SIZE (mode
);
2694 return offset
< 32U * GET_MODE_SIZE (mode
);
2699 /* Return the number of instructions needed to load or store a value
2700 of mode MODE at address X, assuming that BASE_INSN_LENGTH is the
2701 length of one instruction. Return 0 if X isn't valid for MODE.
2702 Assume that multiword moves may need to be split into word moves
2703 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2707 mips_address_insns (rtx x
, machine_mode mode
, bool might_split_p
)
2709 struct mips_address_info addr
;
2711 bool msa_p
= (!might_split_p
&& MSA_SUPPORTED_MODE_P (mode
));
2713 /* BLKmode is used for single unaligned loads and stores and should
2714 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2715 meaningless, so we have to single it out as a special case one way
2717 if (mode
!= BLKmode
&& might_split_p
)
2718 factor
= (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2722 if (mips_classify_address (&addr
, x
, mode
, false))
2728 /* MSA LD.* and ST.* supports 10-bit signed offsets. */
2729 if (mips_signed_immediate_p (INTVAL (addr
.offset
), 10,
2730 mips_ldst_scaled_shift (mode
)))
2736 && !mips16_unextended_reference_p (mode
, addr
.reg
,
2737 UINTVAL (addr
.offset
)))
2741 case ADDRESS_LO_SUM
:
2742 return msa_p
? 0 : TARGET_MIPS16
? factor
* 2 : factor
;
2744 case ADDRESS_CONST_INT
:
2745 return msa_p
? 0 : factor
;
2747 case ADDRESS_SYMBOLIC
:
2748 return msa_p
? 0 : factor
* mips_symbol_insns (addr
.symbol_type
, mode
);
2753 /* Return true if X fits within an unsigned field of BITS bits that is
2754 shifted left SHIFT bits before being used. */
2757 mips_unsigned_immediate_p (unsigned HOST_WIDE_INT x
, int bits
, int shift
= 0)
2759 return (x
& ((1 << shift
) - 1)) == 0 && x
< ((unsigned) 1 << (shift
+ bits
));
2762 /* Return true if X fits within a signed field of BITS bits that is
2763 shifted left SHIFT bits before being used. */
2766 mips_signed_immediate_p (unsigned HOST_WIDE_INT x
, int bits
, int shift
= 0)
2768 x
+= 1 << (bits
+ shift
- 1);
2769 return mips_unsigned_immediate_p (x
, bits
, shift
);
2772 /* Return the scale shift that applied to MSA LD/ST address offset. */
2775 mips_ldst_scaled_shift (machine_mode mode
)
2777 int shift
= exact_log2 (GET_MODE_UNIT_SIZE (mode
));
2779 if (shift
< 0 || shift
> 8)
2785 /* Return true if X is legitimate for accessing values of mode MODE,
2786 if it is based on a MIPS16 register, and if the offset satisfies
2787 OFFSET_PREDICATE. */
2790 m16_based_address_p (rtx x
, machine_mode mode
,
2791 insn_operand_predicate_fn offset_predicate
)
2793 struct mips_address_info addr
;
2795 return (mips_classify_address (&addr
, x
, mode
, false)
2796 && addr
.type
== ADDRESS_REG
2797 && M16_REG_P (REGNO (addr
.reg
))
2798 && offset_predicate (addr
.offset
, mode
));
2801 /* Return true if X is a legitimate address that conforms to the requirements
2802 for a microMIPS LWSP or SWSP insn. */
2805 lwsp_swsp_address_p (rtx x
, machine_mode mode
)
2807 struct mips_address_info addr
;
2809 return (mips_classify_address (&addr
, x
, mode
, false)
2810 && addr
.type
== ADDRESS_REG
2811 && REGNO (addr
.reg
) == STACK_POINTER_REGNUM
2812 && uw5_operand (addr
.offset
, mode
));
2815 /* Return true if X is a legitimate address with a 12-bit offset.
2816 MODE is the mode of the value being accessed. */
2819 umips_12bit_offset_address_p (rtx x
, machine_mode mode
)
2821 struct mips_address_info addr
;
2823 return (mips_classify_address (&addr
, x
, mode
, false)
2824 && addr
.type
== ADDRESS_REG
2825 && CONST_INT_P (addr
.offset
)
2826 && UMIPS_12BIT_OFFSET_P (INTVAL (addr
.offset
)));
2829 /* Return true if X is a legitimate address with a 9-bit offset.
2830 MODE is the mode of the value being accessed. */
2833 mips_9bit_offset_address_p (rtx x
, machine_mode mode
)
2835 struct mips_address_info addr
;
2837 return (mips_classify_address (&addr
, x
, mode
, false)
2838 && addr
.type
== ADDRESS_REG
2839 && CONST_INT_P (addr
.offset
)
2840 && MIPS_9BIT_OFFSET_P (INTVAL (addr
.offset
)));
2843 /* Return the number of instructions needed to load constant X,
2844 assuming that BASE_INSN_LENGTH is the length of one instruction.
2845 Return 0 if X isn't a valid constant. */
2848 mips_const_insns (rtx x
)
2850 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
2851 enum mips_symbol_type symbol_type
;
2854 switch (GET_CODE (x
))
2857 if (!mips_symbolic_constant_p (XEXP (x
, 0), SYMBOL_CONTEXT_LEA
,
2859 || !mips_split_p
[symbol_type
])
2862 /* This is simply an LUI for normal mode. It is an extended
2863 LI followed by an extended SLL for MIPS16. */
2864 return TARGET_MIPS16
? 4 : 1;
2868 /* Unsigned 8-bit constants can be loaded using an unextended
2869 LI instruction. Unsigned 16-bit constants can be loaded
2870 using an extended LI. Negative constants must be loaded
2871 using LI and then negated. */
2872 return (IN_RANGE (INTVAL (x
), 0, 255) ? 1
2873 : SMALL_OPERAND_UNSIGNED (INTVAL (x
)) ? 2
2874 : IN_RANGE (-INTVAL (x
), 0, 255) ? 2
2875 : SMALL_OPERAND_UNSIGNED (-INTVAL (x
)) ? 3
2878 return mips_build_integer (codes
, INTVAL (x
));
2882 && mips_const_vector_same_int_p (x
, GET_MODE (x
), -512, 511))
2886 /* Allow zeros for normal mode, where we can use $0. */
2887 return !TARGET_MIPS16
&& x
== CONST0_RTX (GET_MODE (x
)) ? 1 : 0;
2893 /* See if we can refer to X directly. */
2894 if (mips_symbolic_constant_p (x
, SYMBOL_CONTEXT_LEA
, &symbol_type
))
2895 return mips_symbol_insns (symbol_type
, MAX_MACHINE_MODE
);
2897 /* Otherwise try splitting the constant into a base and offset.
2898 If the offset is a 16-bit value, we can load the base address
2899 into a register and then use (D)ADDIU to add in the offset.
2900 If the offset is larger, we can load the base and offset
2901 into separate registers and add them together with (D)ADDU.
2902 However, the latter is only possible before reload; during
2903 and after reload, we must have the option of forcing the
2904 constant into the pool instead. */
2905 split_const (x
, &x
, &offset
);
2908 int n
= mips_const_insns (x
);
2911 if (SMALL_INT (offset
))
2913 else if (!targetm
.cannot_force_const_mem (GET_MODE (x
), x
))
2914 return n
+ 1 + mips_build_integer (codes
, INTVAL (offset
));
2921 return mips_symbol_insns (mips_classify_symbol (x
, SYMBOL_CONTEXT_LEA
),
2929 /* X is a doubleword constant that can be handled by splitting it into
2930 two words and loading each word separately. Return the number of
2931 instructions required to do this, assuming that BASE_INSN_LENGTH
2932 is the length of one instruction. */
2935 mips_split_const_insns (rtx x
)
2937 unsigned int low
, high
;
2939 low
= mips_const_insns (mips_subword (x
, false));
2940 high
= mips_const_insns (mips_subword (x
, true));
2941 gcc_assert (low
> 0 && high
> 0);
2945 /* Return one word of 128-bit value OP, taking into account the fixed
2946 endianness of certain registers. BYTE selects from the byte address. */
2949 mips_subword_at_byte (rtx op
, unsigned int byte
)
2953 mode
= GET_MODE (op
);
2954 if (mode
== VOIDmode
)
2957 gcc_assert (!FP_REG_RTX_P (op
));
2960 return mips_rewrite_small_data (adjust_address (op
, word_mode
, byte
));
2962 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
2965 /* Return the number of instructions needed to implement INSN,
2966 given that it loads from or stores to MEM. Assume that
2967 BASE_INSN_LENGTH is the length of one instruction. */
2970 mips_load_store_insns (rtx mem
, rtx_insn
*insn
)
2976 gcc_assert (MEM_P (mem
));
2977 mode
= GET_MODE (mem
);
2979 /* Try to prove that INSN does not need to be split. */
2980 might_split_p
= GET_MODE_SIZE (mode
) > UNITS_PER_WORD
;
2983 set
= single_set (insn
);
2984 if (set
&& !mips_split_move_insn_p (SET_DEST (set
), SET_SRC (set
), insn
))
2985 might_split_p
= false;
2988 return mips_address_insns (XEXP (mem
, 0), mode
, might_split_p
);
2991 /* Return the number of instructions needed for an integer division,
2992 assuming that BASE_INSN_LENGTH is the length of one instruction. */
2995 mips_idiv_insns (machine_mode mode
)
3000 if (TARGET_CHECK_ZERO_DIV
)
3002 if (GENERATE_DIVIDE_TRAPS
&& !MSA_SUPPORTED_MODE_P (mode
))
3008 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
3014 /* Emit a move from SRC to DEST. Assume that the move expanders can
3015 handle all moves if !can_create_pseudo_p (). The distinction is
3016 important because, unlike emit_move_insn, the move expanders know
3017 how to force Pmode objects into the constant pool even when the
3018 constant pool address is not itself legitimate. */
3021 mips_emit_move (rtx dest
, rtx src
)
3023 return (can_create_pseudo_p ()
3024 ? emit_move_insn (dest
, src
)
3025 : emit_move_insn_1 (dest
, src
));
3028 /* Emit a move from SRC to DEST, splitting compound moves into individual
3029 instructions. SPLIT_TYPE is the type of split to perform. */
3032 mips_emit_move_or_split (rtx dest
, rtx src
, enum mips_split_type split_type
)
3034 if (mips_split_move_p (dest
, src
, split_type
))
3035 mips_split_move (dest
, src
, split_type
, NULL
);
3037 mips_emit_move (dest
, src
);
3040 /* Emit an instruction of the form (set TARGET (CODE OP0)). */
3043 mips_emit_unary (enum rtx_code code
, rtx target
, rtx op0
)
3045 emit_insn (gen_rtx_SET (target
, gen_rtx_fmt_e (code
, GET_MODE (op0
), op0
)));
3048 /* Compute (CODE OP0) and store the result in a new register of mode MODE.
3049 Return that new register. */
3052 mips_force_unary (machine_mode mode
, enum rtx_code code
, rtx op0
)
3056 reg
= gen_reg_rtx (mode
);
3057 mips_emit_unary (code
, reg
, op0
);
3061 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3064 mips_emit_binary (enum rtx_code code
, rtx target
, rtx op0
, rtx op1
)
3066 emit_insn (gen_rtx_SET (target
, gen_rtx_fmt_ee (code
, GET_MODE (target
),
3070 /* Compute (CODE OP0 OP1) and store the result in a new register
3071 of mode MODE. Return that new register. */
3074 mips_force_binary (machine_mode mode
, enum rtx_code code
, rtx op0
, rtx op1
)
3078 reg
= gen_reg_rtx (mode
);
3079 mips_emit_binary (code
, reg
, op0
, op1
);
3083 /* Copy VALUE to a register and return that register. If new pseudos
3084 are allowed, copy it into a new register, otherwise use DEST. */
3087 mips_force_temporary (rtx dest
, rtx value
)
3089 if (can_create_pseudo_p ())
3090 return force_reg (Pmode
, value
);
3093 mips_emit_move (dest
, value
);
3098 /* Emit a call sequence with call pattern PATTERN and return the call
3099 instruction itself (which is not necessarily the last instruction
3100 emitted). ORIG_ADDR is the original, unlegitimized address,
3101 ADDR is the legitimized form, and LAZY_P is true if the call
3102 address is lazily-bound. */
3105 mips_emit_call_insn (rtx pattern
, rtx orig_addr
, rtx addr
, bool lazy_p
)
3110 insn
= emit_call_insn (pattern
);
3112 if (TARGET_MIPS16
&& mips_use_pic_fn_addr_reg_p (orig_addr
))
3114 /* MIPS16 JALRs only take MIPS16 registers. If the target
3115 function requires $25 to be valid on entry, we must copy it
3116 there separately. The move instruction can be put in the
3117 call's delay slot. */
3118 reg
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
3119 emit_insn_before (gen_move_insn (reg
, addr
), insn
);
3120 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), reg
);
3124 /* Lazy-binding stubs require $gp to be valid on entry. */
3125 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
3129 /* See the comment above load_call<mode> for details. */
3130 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
),
3131 gen_rtx_REG (Pmode
, GOT_VERSION_REGNUM
));
3132 emit_insn (gen_update_got_version ());
3136 && TARGET_EXPLICIT_RELOCS
3137 && TARGET_CALL_CLOBBERED_GP
)
3139 rtx post_call_tmp_reg
= gen_rtx_REG (word_mode
, POST_CALL_TMP_REG
);
3140 clobber_reg (&CALL_INSN_FUNCTION_USAGE (insn
), post_call_tmp_reg
);
3146 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
3147 then add CONST_INT OFFSET to the result. */
3150 mips_unspec_address_offset (rtx base
, rtx offset
,
3151 enum mips_symbol_type symbol_type
)
3153 base
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, base
),
3154 UNSPEC_ADDRESS_FIRST
+ symbol_type
);
3155 if (offset
!= const0_rtx
)
3156 base
= gen_rtx_PLUS (Pmode
, base
, offset
);
3157 return gen_rtx_CONST (Pmode
, base
);
3160 /* Return an UNSPEC address with underlying address ADDRESS and symbol
3161 type SYMBOL_TYPE. */
3164 mips_unspec_address (rtx address
, enum mips_symbol_type symbol_type
)
3168 split_const (address
, &base
, &offset
);
3169 return mips_unspec_address_offset (base
, offset
, symbol_type
);
3172 /* If OP is an UNSPEC address, return the address to which it refers,
3173 otherwise return OP itself. */
3176 mips_strip_unspec_address (rtx op
)
3180 split_const (op
, &base
, &offset
);
3181 if (UNSPEC_ADDRESS_P (base
))
3182 op
= plus_constant (Pmode
, UNSPEC_ADDRESS (base
), INTVAL (offset
));
3186 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
3187 high part to BASE and return the result. Just return BASE otherwise.
3188 TEMP is as for mips_force_temporary.
3190 The returned expression can be used as the first operand to a LO_SUM. */
3193 mips_unspec_offset_high (rtx temp
, rtx base
, rtx addr
,
3194 enum mips_symbol_type symbol_type
)
3196 if (mips_split_p
[symbol_type
])
3198 addr
= gen_rtx_HIGH (Pmode
, mips_unspec_address (addr
, symbol_type
));
3199 addr
= mips_force_temporary (temp
, addr
);
3200 base
= mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, addr
, base
));
3205 /* Return an instruction that copies $gp into register REG. We want
3206 GCC to treat the register's value as constant, so that its value
3207 can be rematerialized on demand. */
3210 gen_load_const_gp (rtx reg
)
3212 return PMODE_INSN (gen_load_const_gp
, (reg
));
3215 /* Return a pseudo register that contains the value of $gp throughout
3216 the current function. Such registers are needed by MIPS16 functions,
3217 for which $gp itself is not a valid base register or addition operand. */
3220 mips16_gp_pseudo_reg (void)
3222 if (cfun
->machine
->mips16_gp_pseudo_rtx
== NULL_RTX
)
3226 cfun
->machine
->mips16_gp_pseudo_rtx
= gen_reg_rtx (Pmode
);
3228 push_topmost_sequence ();
3230 scan
= get_insns ();
3231 while (NEXT_INSN (scan
) && !INSN_P (NEXT_INSN (scan
)))
3232 scan
= NEXT_INSN (scan
);
3234 rtx set
= gen_load_const_gp (cfun
->machine
->mips16_gp_pseudo_rtx
);
3235 rtx_insn
*insn
= emit_insn_after (set
, scan
);
3236 INSN_LOCATION (insn
) = 0;
3238 pop_topmost_sequence ();
3241 return cfun
->machine
->mips16_gp_pseudo_rtx
;
3244 /* Return a base register that holds pic_offset_table_rtx.
3245 TEMP, if nonnull, is a scratch Pmode base register. */
3248 mips_pic_base_register (rtx temp
)
3251 return pic_offset_table_rtx
;
3253 if (currently_expanding_to_rtl
)
3254 return mips16_gp_pseudo_reg ();
3256 if (can_create_pseudo_p ())
3257 temp
= gen_reg_rtx (Pmode
);
3260 /* The first post-reload split exposes all references to $gp
3261 (both uses and definitions). All references must remain
3262 explicit after that point.
3264 It is safe to introduce uses of $gp at any time, so for
3265 simplicity, we do that before the split too. */
3266 mips_emit_move (temp
, pic_offset_table_rtx
);
3268 emit_insn (gen_load_const_gp (temp
));
3272 /* Return the RHS of a load_call<mode> insn. */
3275 mips_unspec_call (rtx reg
, rtx symbol
)
3279 vec
= gen_rtvec (3, reg
, symbol
, gen_rtx_REG (SImode
, GOT_VERSION_REGNUM
));
3280 return gen_rtx_UNSPEC (Pmode
, vec
, UNSPEC_LOAD_CALL
);
3283 /* If SRC is the RHS of a load_call<mode> insn, return the underlying symbol
3284 reference. Return NULL_RTX otherwise. */
3287 mips_strip_unspec_call (rtx src
)
3289 if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == UNSPEC_LOAD_CALL
)
3290 return mips_strip_unspec_address (XVECEXP (src
, 0, 1));
3294 /* Create and return a GOT reference of type TYPE for address ADDR.
3295 TEMP, if nonnull, is a scratch Pmode base register. */
3298 mips_got_load (rtx temp
, rtx addr
, enum mips_symbol_type type
)
3300 rtx base
, high
, lo_sum_symbol
;
3302 base
= mips_pic_base_register (temp
);
3304 /* If we used the temporary register to load $gp, we can't use
3305 it for the high part as well. */
3306 if (temp
!= NULL
&& reg_overlap_mentioned_p (base
, temp
))
3309 high
= mips_unspec_offset_high (temp
, base
, addr
, type
);
3310 lo_sum_symbol
= mips_unspec_address (addr
, type
);
3312 if (type
== SYMBOL_GOTOFF_CALL
)
3313 return mips_unspec_call (high
, lo_sum_symbol
);
3315 return PMODE_INSN (gen_unspec_got
, (high
, lo_sum_symbol
));
3318 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
3319 it appears in a MEM of that mode. Return true if ADDR is a legitimate
3320 constant in that context and can be split into high and low parts.
3321 If so, and if LOW_OUT is nonnull, emit the high part and store the
3322 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
3324 TEMP is as for mips_force_temporary and is used to load the high
3325 part into a register.
3327 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
3328 a legitimize SET_SRC for an .md pattern, otherwise the low part
3329 is guaranteed to be a legitimate address for mode MODE. */
3332 mips_split_symbol (rtx temp
, rtx addr
, machine_mode mode
, rtx
*low_out
)
3334 enum mips_symbol_context context
;
3335 enum mips_symbol_type symbol_type
;
3338 context
= (mode
== MAX_MACHINE_MODE
3339 ? SYMBOL_CONTEXT_LEA
3340 : SYMBOL_CONTEXT_MEM
);
3341 if (GET_CODE (addr
) == HIGH
&& context
== SYMBOL_CONTEXT_LEA
)
3343 addr
= XEXP (addr
, 0);
3344 if (mips_symbolic_constant_p (addr
, context
, &symbol_type
)
3345 && mips_symbol_insns (symbol_type
, mode
) > 0
3346 && mips_split_hi_p
[symbol_type
])
3349 switch (symbol_type
)
3351 case SYMBOL_GOT_PAGE_OFST
:
3352 /* The high part of a page/ofst pair is loaded from the GOT. */
3353 *low_out
= mips_got_load (temp
, addr
, SYMBOL_GOTOFF_PAGE
);
3364 if (mips_symbolic_constant_p (addr
, context
, &symbol_type
)
3365 && mips_symbol_insns (symbol_type
, mode
) > 0
3366 && mips_split_p
[symbol_type
])
3369 switch (symbol_type
)
3371 case SYMBOL_GOT_DISP
:
3372 /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
3373 *low_out
= mips_got_load (temp
, addr
, SYMBOL_GOTOFF_DISP
);
3376 case SYMBOL_GP_RELATIVE
:
3377 high
= mips_pic_base_register (temp
);
3378 *low_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
3382 high
= gen_rtx_HIGH (Pmode
, copy_rtx (addr
));
3383 high
= mips_force_temporary (temp
, high
);
3384 *low_out
= gen_rtx_LO_SUM (Pmode
, high
, addr
);
3393 /* Return a legitimate address for REG + OFFSET. TEMP is as for
3394 mips_force_temporary; it is only needed when OFFSET is not a
3398 mips_add_offset (rtx temp
, rtx reg
, HOST_WIDE_INT offset
)
3400 if (!SMALL_OPERAND (offset
))
3406 /* Load the full offset into a register so that we can use
3407 an unextended instruction for the address itself. */
3408 high
= GEN_INT (offset
);
3413 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
3414 The addition inside the macro CONST_HIGH_PART may cause an
3415 overflow, so we need to force a sign-extension check. */
3416 high
= gen_int_mode (CONST_HIGH_PART (offset
), Pmode
);
3417 offset
= CONST_LOW_PART (offset
);
3419 high
= mips_force_temporary (temp
, high
);
3420 reg
= mips_force_temporary (temp
, gen_rtx_PLUS (Pmode
, high
, reg
));
3422 return plus_constant (Pmode
, reg
, offset
);
3425 /* The __tls_get_attr symbol. */
3426 static GTY(()) rtx mips_tls_symbol
;
3428 /* Return an instruction sequence that calls __tls_get_addr. SYM is
3429 the TLS symbol we are referencing and TYPE is the symbol type to use
3430 (either global dynamic or local dynamic). V0 is an RTX for the
3431 return value location. */
3434 mips_call_tls_get_addr (rtx sym
, enum mips_symbol_type type
, rtx v0
)
3439 a0
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
3441 if (!mips_tls_symbol
)
3442 mips_tls_symbol
= init_one_libfunc ("__tls_get_addr");
3444 loc
= mips_unspec_address (sym
, type
);
3448 emit_insn (gen_rtx_SET (a0
, gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
,
3450 insn
= mips_expand_call (MIPS_CALL_NORMAL
, v0
, mips_tls_symbol
,
3451 const0_rtx
, NULL_RTX
, false);
3452 RTL_CONST_CALL_P (insn
) = 1;
3453 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), a0
);
3454 insn
= get_insns ();
3461 /* Return a pseudo register that contains the current thread pointer. */
3464 mips_expand_thread_pointer (rtx tp
)
3470 if (!mips16_rdhwr_stub
)
3471 mips16_rdhwr_stub
= new mips16_rdhwr_one_only_stub ();
3472 fn
= mips16_stub_call_address (mips16_rdhwr_stub
);
3473 emit_insn (PMODE_INSN (gen_tls_get_tp_mips16
, (tp
, fn
)));
3476 emit_insn (PMODE_INSN (gen_tls_get_tp
, (tp
)));
3483 return mips_expand_thread_pointer (gen_reg_rtx (Pmode
));
3486 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
3487 its address. The return value will be both a valid address and a valid
3488 SET_SRC (either a REG or a LO_SUM). */
3491 mips_legitimize_tls_address (rtx loc
)
3493 rtx dest
, v0
, tp
, tmp1
, tmp2
, eqv
, offset
;
3494 enum tls_model model
;
3496 model
= SYMBOL_REF_TLS_MODEL (loc
);
3497 /* Only TARGET_ABICALLS code can have more than one module; other
3498 code must be static and should not use a GOT. All TLS models
3499 reduce to local exec in this situation. */
3500 if (!TARGET_ABICALLS
)
3501 model
= TLS_MODEL_LOCAL_EXEC
;
3505 case TLS_MODEL_GLOBAL_DYNAMIC
:
3507 v0
= gen_rtx_REG (Pmode
, GP_RETURN
);
3508 rtx_insn
*insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSGD
, v0
);
3509 dest
= gen_reg_rtx (Pmode
);
3510 emit_libcall_block (insn
, dest
, v0
, loc
);
3514 case TLS_MODEL_LOCAL_DYNAMIC
:
3516 v0
= gen_rtx_REG (Pmode
, GP_RETURN
);
3517 rtx_insn
*insn
= mips_call_tls_get_addr (loc
, SYMBOL_TLSLDM
, v0
);
3518 tmp1
= gen_reg_rtx (Pmode
);
3520 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3521 share the LDM result with other LD model accesses. */
3522 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
3524 emit_libcall_block (insn
, tmp1
, v0
, eqv
);
3526 offset
= mips_unspec_address (loc
, SYMBOL_DTPREL
);
3527 if (mips_split_p
[SYMBOL_DTPREL
])
3529 tmp2
= mips_unspec_offset_high (NULL
, tmp1
, loc
, SYMBOL_DTPREL
);
3530 dest
= gen_rtx_LO_SUM (Pmode
, tmp2
, offset
);
3533 dest
= expand_binop (Pmode
, add_optab
, tmp1
, offset
,
3534 0, 0, OPTAB_DIRECT
);
3538 case TLS_MODEL_INITIAL_EXEC
:
3539 tp
= mips_get_tp ();
3540 tmp1
= gen_reg_rtx (Pmode
);
3541 tmp2
= mips_unspec_address (loc
, SYMBOL_GOTTPREL
);
3542 if (Pmode
== DImode
)
3543 emit_insn (gen_load_gotdi (tmp1
, pic_offset_table_rtx
, tmp2
));
3545 emit_insn (gen_load_gotsi (tmp1
, pic_offset_table_rtx
, tmp2
));
3546 dest
= gen_reg_rtx (Pmode
);
3547 emit_insn (gen_add3_insn (dest
, tmp1
, tp
));
3550 case TLS_MODEL_LOCAL_EXEC
:
3551 tmp1
= mips_get_tp ();
3552 offset
= mips_unspec_address (loc
, SYMBOL_TPREL
);
3553 if (mips_split_p
[SYMBOL_TPREL
])
3555 tmp2
= mips_unspec_offset_high (NULL
, tmp1
, loc
, SYMBOL_TPREL
);
3556 dest
= gen_rtx_LO_SUM (Pmode
, tmp2
, offset
);
3559 dest
= expand_binop (Pmode
, add_optab
, tmp1
, offset
,
3560 0, 0, OPTAB_DIRECT
);
3569 /* Implement "TARGET = __builtin_mips_get_fcsr ()" for MIPS16,
3573 mips16_expand_get_fcsr (rtx target
)
3575 if (!mips16_get_fcsr_stub
)
3576 mips16_get_fcsr_stub
= new mips16_get_fcsr_one_only_stub ();
3577 rtx fn
= mips16_stub_call_address (mips16_get_fcsr_stub
);
3578 emit_insn (PMODE_INSN (gen_mips_get_fcsr_mips16
, (fn
)));
3579 emit_move_insn (target
, gen_rtx_REG (SImode
, GET_FCSR_REGNUM
));
3582 /* Implement __builtin_mips_set_fcsr (TARGET) for MIPS16, using a stub. */
3585 mips16_expand_set_fcsr (rtx newval
)
3587 if (!mips16_set_fcsr_stub
)
3588 mips16_set_fcsr_stub
= new mips16_set_fcsr_one_only_stub ();
3589 rtx fn
= mips16_stub_call_address (mips16_set_fcsr_stub
);
3590 emit_move_insn (gen_rtx_REG (SImode
, SET_FCSR_REGNUM
), newval
);
3591 emit_insn (PMODE_INSN (gen_mips_set_fcsr_mips16
, (fn
)));
3594 /* If X is not a valid address for mode MODE, force it into a register. */
3597 mips_force_address (rtx x
, machine_mode mode
)
3599 if (!mips_legitimate_address_p (mode
, x
, false))
3600 x
= force_reg (Pmode
, x
);
3604 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
3605 be legitimized in a way that the generic machinery might not expect,
3606 return a new address, otherwise return NULL. MODE is the mode of
3607 the memory being accessed. */
3610 mips_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
3614 HOST_WIDE_INT offset
;
3616 if (mips_tls_symbol_p (x
))
3617 return mips_legitimize_tls_address (x
);
3619 /* See if the address can split into a high part and a LO_SUM. */
3620 if (mips_split_symbol (NULL
, x
, mode
, &addr
))
3621 return mips_force_address (addr
, mode
);
3623 /* Handle BASE + OFFSET using mips_add_offset. */
3624 mips_split_plus (x
, &base
, &offset
);
3627 if (!mips_valid_base_register_p (base
, mode
, false))
3628 base
= copy_to_mode_reg (Pmode
, base
);
3629 addr
= mips_add_offset (NULL
, base
, offset
);
3630 return mips_force_address (addr
, mode
);
3636 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
3639 mips_move_integer (rtx temp
, rtx dest
, unsigned HOST_WIDE_INT value
)
3641 struct mips_integer_op codes
[MIPS_MAX_INTEGER_OPS
];
3643 unsigned int i
, num_ops
;
3646 mode
= GET_MODE (dest
);
3647 num_ops
= mips_build_integer (codes
, value
);
3649 /* Apply each binary operation to X. Invariant: X is a legitimate
3650 source operand for a SET pattern. */
3651 x
= GEN_INT (codes
[0].value
);
3652 for (i
= 1; i
< num_ops
; i
++)
3654 if (!can_create_pseudo_p ())
3656 emit_insn (gen_rtx_SET (temp
, x
));
3660 x
= force_reg (mode
, x
);
3661 x
= gen_rtx_fmt_ee (codes
[i
].code
, mode
, x
, GEN_INT (codes
[i
].value
));
3664 emit_insn (gen_rtx_SET (dest
, x
));
3667 /* Subroutine of mips_legitimize_move. Move constant SRC into register
3668 DEST given that SRC satisfies immediate_operand but doesn't satisfy
3672 mips_legitimize_const_move (machine_mode mode
, rtx dest
, rtx src
)
3676 /* Split moves of big integers into smaller pieces. */
3677 if (splittable_const_int_operand (src
, mode
))
3679 mips_move_integer (dest
, dest
, INTVAL (src
));
3683 /* Split moves of symbolic constants into high/low pairs. */
3684 if (mips_split_symbol (dest
, src
, MAX_MACHINE_MODE
, &src
))
3686 emit_insn (gen_rtx_SET (dest
, src
));
3690 /* Generate the appropriate access sequences for TLS symbols. */
3691 if (mips_tls_symbol_p (src
))
3693 mips_emit_move (dest
, mips_legitimize_tls_address (src
));
3697 /* If we have (const (plus symbol offset)), and that expression cannot
3698 be forced into memory, load the symbol first and add in the offset.
3699 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
3700 forced into memory, as it usually produces better code. */
3701 split_const (src
, &base
, &offset
);
3702 if (offset
!= const0_rtx
3703 && (targetm
.cannot_force_const_mem (mode
, src
)
3704 || (!TARGET_MIPS16
&& can_create_pseudo_p ())))
3706 base
= mips_force_temporary (dest
, base
);
3707 mips_emit_move (dest
, mips_add_offset (NULL
, base
, INTVAL (offset
)));
3711 src
= force_const_mem (mode
, src
);
3713 /* When using explicit relocs, constant pool references are sometimes
3714 not legitimate addresses. */
3715 mips_split_symbol (dest
, XEXP (src
, 0), mode
, &XEXP (src
, 0));
3716 mips_emit_move (dest
, src
);
3719 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
3720 sequence that is valid. */
3723 mips_legitimize_move (machine_mode mode
, rtx dest
, rtx src
)
3725 /* Both src and dest are non-registers; one special case is supported where
3726 the source is (const_int 0) and the store can source the zero register.
3727 MIPS16 and MSA are never able to source the zero register directly in
3728 memory operations. */
3729 if (!register_operand (dest
, mode
)
3730 && !register_operand (src
, mode
)
3731 && (TARGET_MIPS16
|| !const_0_operand (src
, mode
)
3732 || MSA_SUPPORTED_MODE_P (mode
)))
3734 mips_emit_move (dest
, force_reg (mode
, src
));
3738 /* We need to deal with constants that would be legitimate
3739 immediate_operands but aren't legitimate move_operands. */
3740 if (CONSTANT_P (src
) && !move_operand (src
, mode
))
3742 mips_legitimize_const_move (mode
, dest
, src
);
3743 set_unique_reg_note (get_last_insn (), REG_EQUAL
, copy_rtx (src
));
3749 /* Return true if value X in context CONTEXT is a small-data address
3750 that can be rewritten as a LO_SUM. */
3753 mips_rewrite_small_data_p (rtx x
, enum mips_symbol_context context
)
3755 enum mips_symbol_type symbol_type
;
3757 return (mips_lo_relocs
[SYMBOL_GP_RELATIVE
]
3758 && !mips_split_p
[SYMBOL_GP_RELATIVE
]
3759 && mips_symbolic_constant_p (x
, context
, &symbol_type
)
3760 && symbol_type
== SYMBOL_GP_RELATIVE
);
3763 /* Return true if OP refers to small data symbols directly, not through
3764 a LO_SUM. CONTEXT is the context in which X appears. */
3767 mips_small_data_pattern_1 (rtx x
, enum mips_symbol_context context
)
3769 subrtx_var_iterator::array_type array
;
3770 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, ALL
)
3774 /* Ignore things like "g" constraints in asms. We make no particular
3775 guarantee about which symbolic constants are acceptable as asm operands
3776 versus which must be forced into a GPR. */
3777 if (GET_CODE (x
) == LO_SUM
|| GET_CODE (x
) == ASM_OPERANDS
)
3778 iter
.skip_subrtxes ();
3781 if (mips_small_data_pattern_1 (XEXP (x
, 0), SYMBOL_CONTEXT_MEM
))
3783 iter
.skip_subrtxes ();
3785 else if (mips_rewrite_small_data_p (x
, context
))
3791 /* Return true if OP refers to small data symbols directly, not through
3795 mips_small_data_pattern_p (rtx op
)
3797 return mips_small_data_pattern_1 (op
, SYMBOL_CONTEXT_LEA
);
3800 /* Rewrite *LOC so that it refers to small data using explicit
3801 relocations. CONTEXT is the context in which *LOC appears. */
3804 mips_rewrite_small_data_1 (rtx
*loc
, enum mips_symbol_context context
)
3806 subrtx_ptr_iterator::array_type array
;
3807 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3812 mips_rewrite_small_data_1 (&XEXP (*loc
, 0), SYMBOL_CONTEXT_MEM
);
3813 iter
.skip_subrtxes ();
3815 else if (mips_rewrite_small_data_p (*loc
, context
))
3817 *loc
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, *loc
);
3818 iter
.skip_subrtxes ();
3820 else if (GET_CODE (*loc
) == LO_SUM
)
3821 iter
.skip_subrtxes ();
3825 /* Rewrite instruction pattern PATTERN so that it refers to small data
3826 using explicit relocations. */
3829 mips_rewrite_small_data (rtx pattern
)
3831 pattern
= copy_insn (pattern
);
3832 mips_rewrite_small_data_1 (&pattern
, SYMBOL_CONTEXT_LEA
);
3836 /* The cost of loading values from the constant pool. It should be
3837 larger than the cost of any constant we want to synthesize inline. */
3838 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3840 /* Return the cost of X when used as an operand to the MIPS16 instruction
3841 that implements CODE. Return -1 if there is no such instruction, or if
3842 X is not a valid immediate operand for it. */
3845 mips16_constant_cost (int code
, HOST_WIDE_INT x
)
3852 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3853 other shifts are extended. The shift patterns truncate the shift
3854 count to the right size, so there are no out-of-range values. */
3855 if (IN_RANGE (x
, 1, 8))
3857 return COSTS_N_INSNS (1);
3860 if (IN_RANGE (x
, -128, 127))
3862 if (SMALL_OPERAND (x
))
3863 return COSTS_N_INSNS (1);
3867 /* Like LE, but reject the always-true case. */
3872 /* We add 1 to the immediate and use SLT. */
3876 /* We can use CMPI for an xor with an unsigned 16-bit X. */
3879 if (IN_RANGE (x
, 0, 255))
3881 if (SMALL_OPERAND_UNSIGNED (x
))
3882 return COSTS_N_INSNS (1);
3887 /* Equality comparisons with 0 are cheap. */
3897 /* Return true if there is a non-MIPS16 instruction that implements CODE
3898 and if that instruction accepts X as an immediate operand. */
3901 mips_immediate_operand_p (int code
, HOST_WIDE_INT x
)
3908 /* All shift counts are truncated to a valid constant. */
3913 /* Likewise rotates, if the target supports rotates at all. */
3919 /* These instructions take 16-bit unsigned immediates. */
3920 return SMALL_OPERAND_UNSIGNED (x
);
3925 /* These instructions take 16-bit signed immediates. */
3926 return SMALL_OPERAND (x
);
3932 /* The "immediate" forms of these instructions are really
3933 implemented as comparisons with register 0. */
3938 /* Likewise, meaning that the only valid immediate operand is 1. */
3942 /* We add 1 to the immediate and use SLT. */
3943 return SMALL_OPERAND (x
+ 1);
3946 /* Likewise SLTU, but reject the always-true case. */
3947 return SMALL_OPERAND (x
+ 1) && x
+ 1 != 0;
3951 /* The bit position and size are immediate operands. */
3952 return ISA_HAS_EXT_INS
;
3955 /* By default assume that $0 can be used for 0. */
3960 /* Return the cost of binary operation X, given that the instruction
3961 sequence for a word-sized or smaller operation has cost SINGLE_COST
3962 and that the sequence of a double-word operation has cost DOUBLE_COST.
3963 If SPEED is true, optimize for speed otherwise optimize for size. */
3966 mips_binary_cost (rtx x
, int single_cost
, int double_cost
, bool speed
)
3970 if (GET_MODE_SIZE (GET_MODE (x
)) == UNITS_PER_WORD
* 2)
3975 + set_src_cost (XEXP (x
, 0), GET_MODE (x
), speed
)
3976 + rtx_cost (XEXP (x
, 1), GET_MODE (x
), GET_CODE (x
), 1, speed
));
3979 /* Return the cost of floating-point multiplications of mode MODE. */
3982 mips_fp_mult_cost (machine_mode mode
)
3984 return mode
== DFmode
? mips_cost
->fp_mult_df
: mips_cost
->fp_mult_sf
;
3987 /* Return the cost of floating-point divisions of mode MODE. */
3990 mips_fp_div_cost (machine_mode mode
)
3992 return mode
== DFmode
? mips_cost
->fp_div_df
: mips_cost
->fp_div_sf
;
3995 /* Return the cost of sign-extending OP to mode MODE, not including the
3996 cost of OP itself. */
3999 mips_sign_extend_cost (machine_mode mode
, rtx op
)
4002 /* Extended loads are as cheap as unextended ones. */
4005 if (TARGET_64BIT
&& mode
== DImode
&& GET_MODE (op
) == SImode
)
4006 /* A sign extension from SImode to DImode in 64-bit mode is free. */
4009 if (ISA_HAS_SEB_SEH
|| GENERATE_MIPS16E
)
4010 /* We can use SEB or SEH. */
4011 return COSTS_N_INSNS (1);
4013 /* We need to use a shift left and a shift right. */
4014 return COSTS_N_INSNS (TARGET_MIPS16
? 4 : 2);
4017 /* Return the cost of zero-extending OP to mode MODE, not including the
4018 cost of OP itself. */
4021 mips_zero_extend_cost (machine_mode mode
, rtx op
)
4024 /* Extended loads are as cheap as unextended ones. */
4027 if (TARGET_64BIT
&& mode
== DImode
&& GET_MODE (op
) == SImode
)
4028 /* We need a shift left by 32 bits and a shift right by 32 bits. */
4029 return COSTS_N_INSNS (TARGET_MIPS16
? 4 : 2);
4031 if (GENERATE_MIPS16E
)
4032 /* We can use ZEB or ZEH. */
4033 return COSTS_N_INSNS (1);
4036 /* We need to load 0xff or 0xffff into a register and use AND. */
4037 return COSTS_N_INSNS (GET_MODE (op
) == QImode
? 2 : 3);
4039 /* We can use ANDI. */
4040 return COSTS_N_INSNS (1);
4043 /* Return the cost of moving between two registers of mode MODE,
4044 assuming that the move will be in pieces of at most UNITS bytes. */
4047 mips_set_reg_reg_piece_cost (machine_mode mode
, unsigned int units
)
4049 return COSTS_N_INSNS ((GET_MODE_SIZE (mode
) + units
- 1) / units
);
4052 /* Return the cost of moving between two registers of mode MODE. */
4055 mips_set_reg_reg_cost (machine_mode mode
)
4057 switch (GET_MODE_CLASS (mode
))
4060 return mips_set_reg_reg_piece_cost (mode
, GET_MODE_SIZE (CCmode
));
4063 case MODE_COMPLEX_FLOAT
:
4064 case MODE_VECTOR_FLOAT
:
4065 if (TARGET_HARD_FLOAT
)
4066 return mips_set_reg_reg_piece_cost (mode
, UNITS_PER_HWFPVALUE
);
4070 return mips_set_reg_reg_piece_cost (mode
, UNITS_PER_WORD
);
4074 /* Implement TARGET_RTX_COSTS. */
4077 mips_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
4078 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
4080 int code
= GET_CODE (x
);
4081 bool float_mode_p
= FLOAT_MODE_P (mode
);
4085 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
4086 appear in the instruction stream, and the cost of a comparison is
4087 really the cost of the branch or scc condition. At the time of
4088 writing, GCC only uses an explicit outer COMPARE code when optabs
4089 is testing whether a constant is expensive enough to force into a
4090 register. We want optabs to pass such constants through the MIPS
4091 expanders instead, so make all constants very cheap here. */
4092 if (outer_code
== COMPARE
)
4094 gcc_assert (CONSTANT_P (x
));
4102 /* Treat *clear_upper32-style ANDs as having zero cost in the
4103 second operand. The cost is entirely in the first operand.
4105 ??? This is needed because we would otherwise try to CSE
4106 the constant operand. Although that's the right thing for
4107 instructions that continue to be a register operation throughout
4108 compilation, it is disastrous for instructions that could
4109 later be converted into a memory operation. */
4111 && outer_code
== AND
4112 && UINTVAL (x
) == 0xffffffff)
4120 cost
= mips16_constant_cost (outer_code
, INTVAL (x
));
4129 /* When not optimizing for size, we care more about the cost
4130 of hot code, and hot code is often in a loop. If a constant
4131 operand needs to be forced into a register, we will often be
4132 able to hoist the constant load out of the loop, so the load
4133 should not contribute to the cost. */
4134 if (speed
|| mips_immediate_operand_p (outer_code
, INTVAL (x
)))
4146 if (force_to_mem_operand (x
, VOIDmode
))
4148 *total
= COSTS_N_INSNS (1);
4151 cost
= mips_const_insns (x
);
4154 /* If the constant is likely to be stored in a GPR, SETs of
4155 single-insn constants are as cheap as register sets; we
4156 never want to CSE them.
4158 Don't reduce the cost of storing a floating-point zero in
4159 FPRs. If we have a zero in an FPR for other reasons, we
4160 can get better cfg-cleanup and delayed-branch results by
4161 using it consistently, rather than using $0 sometimes and
4162 an FPR at other times. Also, moves between floating-point
4163 registers are sometimes cheaper than (D)MTC1 $0. */
4165 && outer_code
== SET
4166 && !(float_mode_p
&& TARGET_HARD_FLOAT
))
4168 /* When non-MIPS16 code loads a constant N>1 times, we rarely
4169 want to CSE the constant itself. It is usually better to
4170 have N copies of the last operation in the sequence and one
4171 shared copy of the other operations. (Note that this is
4172 not true for MIPS16 code, where the final operation in the
4173 sequence is often an extended instruction.)
4175 Also, if we have a CONST_INT, we don't know whether it is
4176 for a word or doubleword operation, so we cannot rely on
4177 the result of mips_build_integer. */
4178 else if (!TARGET_MIPS16
4179 && (outer_code
== SET
|| GET_MODE (x
) == VOIDmode
))
4181 *total
= COSTS_N_INSNS (cost
);
4184 /* The value will need to be fetched from the constant pool. */
4185 *total
= CONSTANT_POOL_COST
;
4189 /* If the address is legitimate, return the number of
4190 instructions it needs. */
4192 cost
= mips_address_insns (addr
, mode
, true);
4195 *total
= COSTS_N_INSNS (cost
+ 1);
4198 /* Check for a scaled indexed address. */
4199 if (mips_lwxs_address_p (addr
)
4200 || mips_lx_address_p (addr
, mode
))
4202 *total
= COSTS_N_INSNS (2);
4205 /* Otherwise use the default handling. */
4209 *total
= COSTS_N_INSNS (6);
4213 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 2 : 1);
4217 /* Check for a *clear_upper32 pattern and treat it like a zero
4218 extension. See the pattern's comment for details. */
4221 && CONST_INT_P (XEXP (x
, 1))
4222 && UINTVAL (XEXP (x
, 1)) == 0xffffffff)
4224 *total
= (mips_zero_extend_cost (mode
, XEXP (x
, 0))
4225 + set_src_cost (XEXP (x
, 0), mode
, speed
));
4228 if (ISA_HAS_CINS
&& CONST_INT_P (XEXP (x
, 1)))
4230 rtx op
= XEXP (x
, 0);
4231 if (GET_CODE (op
) == ASHIFT
4232 && CONST_INT_P (XEXP (op
, 1))
4233 && mask_low_and_shift_p (mode
, XEXP (x
, 1), XEXP (op
, 1), 32))
4235 *total
= COSTS_N_INSNS (1);
4236 *total
+= set_src_cost (XEXP (op
, 0), mode
, speed
);
4240 /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in
4241 a single instruction. */
4243 && GET_CODE (XEXP (x
, 0)) == NOT
4244 && GET_CODE (XEXP (x
, 1)) == NOT
)
4246 cost
= GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 2 : 1;
4247 *total
= (COSTS_N_INSNS (cost
)
4248 + set_src_cost (XEXP (XEXP (x
, 0), 0), mode
, speed
)
4249 + set_src_cost (XEXP (XEXP (x
, 1), 0), mode
, speed
));
4257 /* Double-word operations use two single-word operations. */
4258 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
4267 if (CONSTANT_P (XEXP (x
, 1)))
4268 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
4271 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (12),
4277 *total
= mips_cost
->fp_add
;
4279 *total
= COSTS_N_INSNS (4);
4283 /* Low-part immediates need an extended MIPS16 instruction. */
4284 *total
= (COSTS_N_INSNS (TARGET_MIPS16
? 2 : 1)
4285 + set_src_cost (XEXP (x
, 0), mode
, speed
));
4304 /* Branch comparisons have VOIDmode, so use the first operand's
4306 mode
= GET_MODE (XEXP (x
, 0));
4307 if (FLOAT_MODE_P (mode
))
4309 *total
= mips_cost
->fp_add
;
4312 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
4317 if (float_mode_p
&& ISA_HAS_UNFUSED_MADD4
&& !HONOR_SIGNED_ZEROS (mode
))
4319 /* See if we can use NMADD or NMSUB via the *nmadd4<mode>_fastmath
4320 or *nmsub4<mode>_fastmath patterns. These patterns check for
4321 HONOR_SIGNED_ZEROS so we check here too. */
4322 rtx op0
= XEXP (x
, 0);
4323 rtx op1
= XEXP (x
, 1);
4324 if (GET_CODE (op0
) == MULT
&& GET_CODE (XEXP (op0
, 0)) == NEG
)
4326 *total
= (mips_fp_mult_cost (mode
)
4327 + set_src_cost (XEXP (XEXP (op0
, 0), 0), mode
, speed
)
4328 + set_src_cost (XEXP (op0
, 1), mode
, speed
)
4329 + set_src_cost (op1
, mode
, speed
));
4332 if (GET_CODE (op1
) == MULT
)
4334 *total
= (mips_fp_mult_cost (mode
)
4335 + set_src_cost (op0
, mode
, speed
)
4336 + set_src_cost (XEXP (op1
, 0), mode
, speed
)
4337 + set_src_cost (XEXP (op1
, 1), mode
, speed
));
4346 /* If this is part of a MADD or MSUB, treat the PLUS as
4348 if (ISA_HAS_UNFUSED_MADD4
&& GET_CODE (XEXP (x
, 0)) == MULT
)
4351 *total
= mips_cost
->fp_add
;
4355 /* If it's an add + mult (which is equivalent to shift left) and
4356 it's immediate operand satisfies const_immlsa_operand predicate. */
4357 if (((ISA_HAS_LSA
&& mode
== SImode
)
4358 || (ISA_HAS_DLSA
&& mode
== DImode
))
4359 && GET_CODE (XEXP (x
, 0)) == MULT
)
4361 rtx op2
= XEXP (XEXP (x
, 0), 1);
4362 if (const_immlsa_operand (op2
, mode
))
4364 *total
= (COSTS_N_INSNS (1)
4365 + set_src_cost (XEXP (XEXP (x
, 0), 0), mode
, speed
)
4366 + set_src_cost (XEXP (x
, 1), mode
, speed
));
4371 /* Double-word operations require three single-word operations and
4372 an SLTU. The MIPS16 version then needs to move the result of
4373 the SLTU from $24 to a MIPS16 register. */
4374 *total
= mips_binary_cost (x
, COSTS_N_INSNS (1),
4375 COSTS_N_INSNS (TARGET_MIPS16
? 5 : 4),
4380 if (float_mode_p
&& ISA_HAS_UNFUSED_MADD4
)
4382 /* See if we can use NMADD or NMSUB via the *nmadd4<mode> or
4383 *nmsub4<mode> patterns. */
4384 rtx op
= XEXP (x
, 0);
4385 if ((GET_CODE (op
) == PLUS
|| GET_CODE (op
) == MINUS
)
4386 && GET_CODE (XEXP (op
, 0)) == MULT
)
4388 *total
= (mips_fp_mult_cost (mode
)
4389 + set_src_cost (XEXP (XEXP (op
, 0), 0), mode
, speed
)
4390 + set_src_cost (XEXP (XEXP (op
, 0), 1), mode
, speed
)
4391 + set_src_cost (XEXP (op
, 1), mode
, speed
));
4397 *total
= mips_cost
->fp_add
;
4399 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
? 4 : 1);
4403 *total
= mips_fp_mult_cost (mode
);
4408 *total
= mips_fp_mult_cost (mode
);
4409 else if (mode
== DImode
&& !TARGET_64BIT
)
4410 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
4411 where the mulsidi3 always includes an MFHI and an MFLO. */
4413 ? mips_cost
->int_mult_si
* 3 + 6
4414 : COSTS_N_INSNS (ISA_HAS_MUL3
? 7 : 9));
4416 *total
= COSTS_N_INSNS ((ISA_HAS_MUL3
|| ISA_HAS_R6MUL
) ? 1 : 2) + 1;
4417 else if (mode
== DImode
)
4418 *total
= mips_cost
->int_mult_di
;
4420 *total
= mips_cost
->int_mult_si
;
4424 /* Check for a reciprocal. */
4426 && ISA_HAS_FP_RECIP_RSQRT (mode
)
4427 && flag_unsafe_math_optimizations
4428 && XEXP (x
, 0) == CONST1_RTX (mode
))
4430 if (outer_code
== SQRT
|| GET_CODE (XEXP (x
, 1)) == SQRT
)
4431 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
4432 division as being free. */
4433 *total
= set_src_cost (XEXP (x
, 1), mode
, speed
);
4435 *total
= (mips_fp_div_cost (mode
)
4436 + set_src_cost (XEXP (x
, 1), mode
, speed
));
4445 *total
= mips_fp_div_cost (mode
);
4454 /* It is our responsibility to make division by a power of 2
4455 as cheap as 2 register additions if we want the division
4456 expanders to be used for such operations; see the setting
4457 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
4458 should always produce shorter code than using
4459 expand_sdiv2_pow2. */
4461 && CONST_INT_P (XEXP (x
, 1))
4462 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
4464 *total
= COSTS_N_INSNS (2);
4465 *total
+= set_src_cost (XEXP (x
, 0), mode
, speed
);
4468 *total
= COSTS_N_INSNS (mips_idiv_insns (mode
));
4470 else if (mode
== DImode
)
4471 *total
= mips_cost
->int_div_di
;
4473 *total
= mips_cost
->int_div_si
;
4477 *total
= mips_sign_extend_cost (mode
, XEXP (x
, 0));
4481 if (outer_code
== SET
4483 && (GET_CODE (XEXP (x
, 0)) == TRUNCATE
4484 || GET_CODE (XEXP (x
, 0)) == SUBREG
)
4485 && GET_MODE (XEXP (x
, 0)) == QImode
4486 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
4488 *total
= set_src_cost (XEXP (XEXP (x
, 0), 0), VOIDmode
, speed
);
4491 *total
= mips_zero_extend_cost (mode
, XEXP (x
, 0));
4494 /* Costings for highpart multiplies. Matching patterns of the form:
4496 (lshiftrt:DI (mult:DI (sign_extend:DI (...)
4497 (sign_extend:DI (...))
4501 && (GET_CODE (XEXP (x
, 0)) == ASHIFTRT
4502 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
)
4503 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4504 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) == 32
4505 && GET_MODE (XEXP (x
, 0)) == DImode
)
4507 && INTVAL (XEXP (XEXP (x
, 0), 1)) == 64
4508 && GET_MODE (XEXP (x
, 0)) == TImode
))
4509 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
4510 && ((GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
4511 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == SIGN_EXTEND
)
4512 || (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
4513 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1))
4517 *total
= COSTS_N_INSNS (1) + 1;
4518 else if (mode
== DImode
)
4519 *total
= mips_cost
->int_mult_di
;
4521 *total
= mips_cost
->int_mult_si
;
4523 /* Sign extension is free, zero extension costs for DImode when
4524 on a 64bit core / when DMUL is present. */
4525 for (int i
= 0; i
< 2; ++i
)
4527 rtx op
= XEXP (XEXP (XEXP (x
, 0), 0), i
);
4529 && GET_CODE (op
) == ZERO_EXTEND
4530 && GET_MODE (op
) == DImode
)
4531 *total
+= rtx_cost (op
, DImode
, MULT
, i
, speed
);
4533 *total
+= rtx_cost (XEXP (op
, 0), VOIDmode
, GET_CODE (op
),
4542 case UNSIGNED_FLOAT
:
4545 case FLOAT_TRUNCATE
:
4546 *total
= mips_cost
->fp_add
;
4550 if (register_operand (SET_DEST (x
), VOIDmode
)
4551 && reg_or_0_operand (SET_SRC (x
), VOIDmode
))
4553 *total
= mips_set_reg_reg_cost (GET_MODE (SET_DEST (x
)));
4563 /* Implement TARGET_ADDRESS_COST. */
4566 mips_address_cost (rtx addr
, machine_mode mode
,
4567 addr_space_t as ATTRIBUTE_UNUSED
,
4568 bool speed ATTRIBUTE_UNUSED
)
4570 return mips_address_insns (addr
, mode
, false);
4573 /* Implement TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P. */
4576 mips_no_speculation_in_delay_slots_p ()
4578 return TARGET_CB_MAYBE
;
4581 /* Information about a single instruction in a multi-instruction
4583 struct mips_multi_member
{
4584 /* True if this is a label, false if it is code. */
4587 /* The output_asm_insn format of the instruction. */
4590 /* The operands to the instruction. */
4591 rtx operands
[MAX_RECOG_OPERANDS
];
4593 typedef struct mips_multi_member mips_multi_member
;
4595 /* The instructions that make up the current multi-insn sequence. */
4596 static vec
<mips_multi_member
> mips_multi_members
;
4598 /* How many instructions (as opposed to labels) are in the current
4599 multi-insn sequence. */
4600 static unsigned int mips_multi_num_insns
;
4602 /* Start a new multi-insn sequence. */
4605 mips_multi_start (void)
4607 mips_multi_members
.truncate (0);
4608 mips_multi_num_insns
= 0;
4611 /* Add a new, zero initialized member to the current multi-insn sequence. */
4613 static struct mips_multi_member
*
4614 mips_multi_add (void)
4616 mips_multi_member empty
;
4617 memset (&empty
, 0, sizeof (empty
));
4618 return mips_multi_members
.safe_push (empty
);
4621 /* Add a normal insn with the given asm format to the current multi-insn
4622 sequence. The other arguments are a null-terminated list of operands. */
4625 mips_multi_add_insn (const char *format
, ...)
4627 struct mips_multi_member
*member
;
4632 member
= mips_multi_add ();
4633 member
->is_label_p
= false;
4634 member
->format
= format
;
4635 va_start (ap
, format
);
4637 while ((op
= va_arg (ap
, rtx
)))
4638 member
->operands
[i
++] = op
;
4640 mips_multi_num_insns
++;
4643 /* Add the given label definition to the current multi-insn sequence.
4644 The definition should include the colon. */
4647 mips_multi_add_label (const char *label
)
4649 struct mips_multi_member
*member
;
4651 member
= mips_multi_add ();
4652 member
->is_label_p
= true;
4653 member
->format
= label
;
4656 /* Return the index of the last member of the current multi-insn sequence. */
4659 mips_multi_last_index (void)
4661 return mips_multi_members
.length () - 1;
4664 /* Add a copy of an existing instruction to the current multi-insn
4665 sequence. I is the index of the instruction that should be copied. */
4668 mips_multi_copy_insn (unsigned int i
)
4670 struct mips_multi_member
*member
;
4672 member
= mips_multi_add ();
4673 memcpy (member
, &mips_multi_members
[i
], sizeof (*member
));
4674 gcc_assert (!member
->is_label_p
);
4677 /* Change the operand of an existing instruction in the current
4678 multi-insn sequence. I is the index of the instruction,
4679 OP is the index of the operand, and X is the new value. */
4682 mips_multi_set_operand (unsigned int i
, unsigned int op
, rtx x
)
4684 mips_multi_members
[i
].operands
[op
] = x
;
4687 /* Write out the asm code for the current multi-insn sequence. */
4690 mips_multi_write (void)
4692 struct mips_multi_member
*member
;
4695 FOR_EACH_VEC_ELT (mips_multi_members
, i
, member
)
4696 if (member
->is_label_p
)
4697 fprintf (asm_out_file
, "%s\n", member
->format
);
4699 output_asm_insn (member
->format
, member
->operands
);
4702 /* Return one word of double-word value OP, taking into account the fixed
4703 endianness of certain registers. HIGH_P is true to select the high part,
4704 false to select the low part. */
4707 mips_subword (rtx op
, bool high_p
)
4709 unsigned int byte
, offset
;
4712 mode
= GET_MODE (op
);
4713 if (mode
== VOIDmode
)
4714 mode
= TARGET_64BIT
? TImode
: DImode
;
4716 if (TARGET_BIG_ENDIAN
? !high_p
: high_p
)
4717 byte
= UNITS_PER_WORD
;
4721 if (FP_REG_RTX_P (op
))
4723 /* Paired FPRs are always ordered little-endian. */
4724 offset
= (UNITS_PER_WORD
< UNITS_PER_HWFPVALUE
? high_p
: byte
!= 0);
4725 return gen_rtx_REG (word_mode
, REGNO (op
) + offset
);
4729 return mips_rewrite_small_data (adjust_address (op
, word_mode
, byte
));
4731 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
4734 /* Return true if SRC should be moved into DEST using "MULT $0, $0".
4735 SPLIT_TYPE is the condition under which moves should be split. */
4738 mips_mult_move_p (rtx dest
, rtx src
, enum mips_split_type split_type
)
4740 return ((split_type
!= SPLIT_FOR_SPEED
4741 || mips_tuning_info
.fast_mult_zero_zero_p
)
4742 && src
== const0_rtx
4744 && GET_MODE_SIZE (GET_MODE (dest
)) == 2 * UNITS_PER_WORD
4745 && (ISA_HAS_DSP_MULT
4746 ? ACC_REG_P (REGNO (dest
))
4747 : MD_REG_P (REGNO (dest
))));
4750 /* Return true if a move from SRC to DEST should be split into two.
4751 SPLIT_TYPE describes the split condition. */
4754 mips_split_move_p (rtx dest
, rtx src
, enum mips_split_type split_type
)
4756 /* Check whether the move can be done using some variant of MULT $0,$0. */
4757 if (mips_mult_move_p (dest
, src
, split_type
))
4760 /* FPR-to-FPR moves can be done in a single instruction, if they're
4762 unsigned int size
= GET_MODE_SIZE (GET_MODE (dest
));
4763 if (size
== 8 && FP_REG_RTX_P (src
) && FP_REG_RTX_P (dest
))
4766 /* Check for floating-point loads and stores. */
4767 if (size
== 8 && ISA_HAS_LDC1_SDC1
)
4769 if (FP_REG_RTX_P (dest
) && MEM_P (src
))
4771 if (FP_REG_RTX_P (src
) && MEM_P (dest
))
4775 /* Check if MSA moves need splitting. */
4776 if (MSA_SUPPORTED_MODE_P (GET_MODE (dest
)))
4777 return mips_split_128bit_move_p (dest
, src
);
4779 /* Otherwise split all multiword moves. */
4780 return size
> UNITS_PER_WORD
;
4783 /* Split a move from SRC to DEST, given that mips_split_move_p holds.
4784 SPLIT_TYPE describes the split condition. INSN is the insn being
4785 split, if we know it, NULL otherwise. */
4788 mips_split_move (rtx dest
, rtx src
, enum mips_split_type split_type
, rtx insn_
)
4792 gcc_checking_assert (mips_split_move_p (dest
, src
, split_type
));
4793 if (MSA_SUPPORTED_MODE_P (GET_MODE (dest
)))
4794 mips_split_128bit_move (dest
, src
);
4795 else if (FP_REG_RTX_P (dest
) || FP_REG_RTX_P (src
))
4797 if (!TARGET_64BIT
&& GET_MODE (dest
) == DImode
)
4798 emit_insn (gen_move_doubleword_fprdi (dest
, src
));
4799 else if (!TARGET_64BIT
&& GET_MODE (dest
) == DFmode
)
4800 emit_insn (gen_move_doubleword_fprdf (dest
, src
));
4801 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V2SFmode
)
4802 emit_insn (gen_move_doubleword_fprv2sf (dest
, src
));
4803 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V2SImode
)
4804 emit_insn (gen_move_doubleword_fprv2si (dest
, src
));
4805 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V4HImode
)
4806 emit_insn (gen_move_doubleword_fprv4hi (dest
, src
));
4807 else if (!TARGET_64BIT
&& GET_MODE (dest
) == V8QImode
)
4808 emit_insn (gen_move_doubleword_fprv8qi (dest
, src
));
4809 else if (TARGET_64BIT
&& GET_MODE (dest
) == TFmode
)
4810 emit_insn (gen_move_doubleword_fprtf (dest
, src
));
4814 else if (REG_P (dest
) && REGNO (dest
) == MD_REG_FIRST
)
4816 low_dest
= mips_subword (dest
, false);
4817 mips_emit_move (low_dest
, mips_subword (src
, false));
4819 emit_insn (gen_mthidi_ti (dest
, mips_subword (src
, true), low_dest
));
4821 emit_insn (gen_mthisi_di (dest
, mips_subword (src
, true), low_dest
));
4823 else if (REG_P (src
) && REGNO (src
) == MD_REG_FIRST
)
4825 mips_emit_move (mips_subword (dest
, false), mips_subword (src
, false));
4827 emit_insn (gen_mfhidi_ti (mips_subword (dest
, true), src
));
4829 emit_insn (gen_mfhisi_di (mips_subword (dest
, true), src
));
4833 /* The operation can be split into two normal moves. Decide in
4834 which order to do them. */
4835 low_dest
= mips_subword (dest
, false);
4836 if (REG_P (low_dest
)
4837 && reg_overlap_mentioned_p (low_dest
, src
))
4839 mips_emit_move (mips_subword (dest
, true), mips_subword (src
, true));
4840 mips_emit_move (low_dest
, mips_subword (src
, false));
4844 mips_emit_move (low_dest
, mips_subword (src
, false));
4845 mips_emit_move (mips_subword (dest
, true), mips_subword (src
, true));
4849 /* This is a hack. See if the next insn uses DEST and if so, see if we
4850 can forward SRC for DEST. This is most useful if the next insn is a
4852 rtx_insn
*insn
= (rtx_insn
*)insn_
;
4853 struct mips_address_info addr
= {};
4856 rtx_insn
*next
= next_nonnote_nondebug_insn_bb (insn
);
4859 rtx set
= single_set (next
);
4860 if (set
&& SET_SRC (set
) == dest
)
4864 rtx tmp
= XEXP (src
, 0);
4865 mips_classify_address (&addr
, tmp
, GET_MODE (tmp
), true);
4866 if (addr
.reg
&& !reg_overlap_mentioned_p (dest
, addr
.reg
))
4867 validate_change (next
, &SET_SRC (set
), src
, false);
4870 validate_change (next
, &SET_SRC (set
), src
, false);
4876 /* Return the split type for instruction INSN. */
4878 static enum mips_split_type
4879 mips_insn_split_type (rtx insn
)
4881 basic_block bb
= BLOCK_FOR_INSN (insn
);
4884 if (optimize_bb_for_speed_p (bb
))
4885 return SPLIT_FOR_SPEED
;
4887 return SPLIT_FOR_SIZE
;
4889 /* Once CFG information has been removed, we should trust the optimization
4890 decisions made by previous passes and only split where necessary. */
4891 return SPLIT_IF_NECESSARY
;
4894 /* Return true if a 128-bit move from SRC to DEST should be split. */
4897 mips_split_128bit_move_p (rtx dest
, rtx src
)
4899 /* MSA-to-MSA moves can be done in a single instruction. */
4900 if (FP_REG_RTX_P (src
) && FP_REG_RTX_P (dest
))
4903 /* Check for MSA loads and stores. */
4904 if (FP_REG_RTX_P (dest
) && MEM_P (src
))
4906 if (FP_REG_RTX_P (src
) && MEM_P (dest
))
4909 /* Check for MSA set to an immediate const vector with valid replicated
4911 if (FP_REG_RTX_P (dest
)
4912 && mips_const_vector_same_int_p (src
, GET_MODE (src
), -512, 511))
4915 /* Check for MSA load zero immediate. */
4916 if (FP_REG_RTX_P (dest
) && src
== CONST0_RTX (GET_MODE (src
)))
4922 /* Split a 128-bit move from SRC to DEST. */
4925 mips_split_128bit_move (rtx dest
, rtx src
)
4928 rtx low_dest
, low_src
, d
, s
;
4930 if (FP_REG_RTX_P (dest
))
4932 gcc_assert (!MEM_P (src
));
4934 rtx new_dest
= dest
;
4937 if (GET_MODE (dest
) != V4SImode
)
4938 new_dest
= simplify_gen_subreg (V4SImode
, dest
, GET_MODE (dest
), 0);
4942 if (GET_MODE (dest
) != V2DImode
)
4943 new_dest
= simplify_gen_subreg (V2DImode
, dest
, GET_MODE (dest
), 0);
4946 for (byte
= 0, index
= 0; byte
< GET_MODE_SIZE (TImode
);
4947 byte
+= UNITS_PER_WORD
, index
++)
4949 s
= mips_subword_at_byte (src
, byte
);
4951 emit_insn (gen_msa_insert_w (new_dest
, s
, new_dest
,
4952 GEN_INT (1 << index
)));
4954 emit_insn (gen_msa_insert_d (new_dest
, s
, new_dest
,
4955 GEN_INT (1 << index
)));
4958 else if (FP_REG_RTX_P (src
))
4960 gcc_assert (!MEM_P (dest
));
4965 if (GET_MODE (src
) != V4SImode
)
4966 new_src
= simplify_gen_subreg (V4SImode
, src
, GET_MODE (src
), 0);
4970 if (GET_MODE (src
) != V2DImode
)
4971 new_src
= simplify_gen_subreg (V2DImode
, src
, GET_MODE (src
), 0);
4974 for (byte
= 0, index
= 0; byte
< GET_MODE_SIZE (TImode
);
4975 byte
+= UNITS_PER_WORD
, index
++)
4977 d
= mips_subword_at_byte (dest
, byte
);
4979 emit_insn (gen_msa_copy_s_w (d
, new_src
, GEN_INT (index
)));
4981 emit_insn (gen_msa_copy_s_d (d
, new_src
, GEN_INT (index
)));
4986 low_dest
= mips_subword_at_byte (dest
, 0);
4987 low_src
= mips_subword_at_byte (src
, 0);
4988 gcc_assert (REG_P (low_dest
) && REG_P (low_src
));
4989 /* Make sure the source register is not written before reading. */
4990 if (REGNO (low_dest
) <= REGNO (low_src
))
4992 for (byte
= 0; byte
< GET_MODE_SIZE (TImode
);
4993 byte
+= UNITS_PER_WORD
)
4995 d
= mips_subword_at_byte (dest
, byte
);
4996 s
= mips_subword_at_byte (src
, byte
);
4997 mips_emit_move (d
, s
);
5002 for (byte
= GET_MODE_SIZE (TImode
) - UNITS_PER_WORD
; byte
>= 0;
5003 byte
-= UNITS_PER_WORD
)
5005 d
= mips_subword_at_byte (dest
, byte
);
5006 s
= mips_subword_at_byte (src
, byte
);
5007 mips_emit_move (d
, s
);
5013 /* Split a COPY_S.D with operands DEST, SRC and INDEX. GEN is a function
5014 used to generate subregs. */
5017 mips_split_msa_copy_d (rtx dest
, rtx src
, rtx index
,
5018 rtx (*gen_fn
)(rtx
, rtx
, rtx
))
5020 gcc_assert ((GET_MODE (src
) == V2DImode
&& GET_MODE (dest
) == DImode
)
5021 || (GET_MODE (src
) == V2DFmode
&& GET_MODE (dest
) == DFmode
));
5023 /* Note that low is always from the lower index, and high is always
5024 from the higher index. */
5025 rtx low
= mips_subword (dest
, false);
5026 rtx high
= mips_subword (dest
, true);
5027 rtx new_src
= simplify_gen_subreg (V4SImode
, src
, GET_MODE (src
), 0);
5029 emit_insn (gen_fn (low
, new_src
, GEN_INT (INTVAL (index
) * 2)));
5030 emit_insn (gen_fn (high
, new_src
, GEN_INT (INTVAL (index
) * 2 + 1)));
5033 /* Split a INSERT.D with operand DEST, SRC1.INDEX and SRC2. */
5036 mips_split_msa_insert_d (rtx dest
, rtx src1
, rtx index
, rtx src2
)
5039 gcc_assert (GET_MODE (dest
) == GET_MODE (src1
));
5040 gcc_assert ((GET_MODE (dest
) == V2DImode
5041 && (GET_MODE (src2
) == DImode
|| src2
== const0_rtx
))
5042 || (GET_MODE (dest
) == V2DFmode
&& GET_MODE (src2
) == DFmode
));
5044 /* Note that low is always from the lower index, and high is always
5045 from the higher index. */
5046 rtx low
= mips_subword (src2
, false);
5047 rtx high
= mips_subword (src2
, true);
5048 rtx new_dest
= simplify_gen_subreg (V4SImode
, dest
, GET_MODE (dest
), 0);
5049 rtx new_src1
= simplify_gen_subreg (V4SImode
, src1
, GET_MODE (src1
), 0);
5050 i
= exact_log2 (INTVAL (index
));
5051 gcc_assert (i
!= -1);
5053 emit_insn (gen_msa_insert_w (new_dest
, low
, new_src1
,
5054 GEN_INT (1 << (i
* 2))));
5055 emit_insn (gen_msa_insert_w (new_dest
, high
, new_dest
,
5056 GEN_INT (1 << (i
* 2 + 1))));
5062 mips_split_msa_fill_d (rtx dest
, rtx src
)
5064 gcc_assert ((GET_MODE (dest
) == V2DImode
5065 && (GET_MODE (src
) == DImode
|| src
== const0_rtx
))
5066 || (GET_MODE (dest
) == V2DFmode
&& GET_MODE (src
) == DFmode
));
5068 /* Note that low is always from the lower index, and high is always
5069 from the higher index. */
5071 if (src
== const0_rtx
)
5078 low
= mips_subword (src
, false);
5079 high
= mips_subword (src
, true);
5081 rtx new_dest
= simplify_gen_subreg (V4SImode
, dest
, GET_MODE (dest
), 0);
5082 emit_insn (gen_msa_fill_w (new_dest
, low
));
5083 emit_insn (gen_msa_insert_w (new_dest
, high
, new_dest
, GEN_INT (1 << 1)));
5084 emit_insn (gen_msa_insert_w (new_dest
, high
, new_dest
, GEN_INT (1 << 3)));
5087 /* Return true if a move from SRC to DEST in INSN should be split. */
5090 mips_split_move_insn_p (rtx dest
, rtx src
, rtx insn
)
5092 return mips_split_move_p (dest
, src
, mips_insn_split_type (insn
));
5095 /* Split a move from SRC to DEST in INSN, given that mips_split_move_insn_p
5099 mips_split_move_insn (rtx dest
, rtx src
, rtx insn
)
5101 mips_split_move (dest
, src
, mips_insn_split_type (insn
), insn
);
5104 /* Return the appropriate instructions to move SRC into DEST. Assume
5105 that SRC is operand 1 and DEST is operand 0. */
5108 mips_output_move (rtx dest
, rtx src
)
5110 enum rtx_code dest_code
= GET_CODE (dest
);
5111 enum rtx_code src_code
= GET_CODE (src
);
5112 machine_mode mode
= GET_MODE (dest
);
5113 bool dbl_p
= (GET_MODE_SIZE (mode
) == 8);
5114 bool msa_p
= MSA_SUPPORTED_MODE_P (mode
);
5115 enum mips_symbol_type symbol_type
;
5117 if (mips_split_move_p (dest
, src
, SPLIT_IF_NECESSARY
))
5121 && dest_code
== REG
&& FP_REG_P (REGNO (dest
))
5122 && src_code
== CONST_VECTOR
5123 && CONST_INT_P (CONST_VECTOR_ELT (src
, 0)))
5125 gcc_assert (mips_const_vector_same_int_p (src
, mode
, -512, 511));
5126 return "ldi.%v0\t%w0,%E1";
5129 if ((src_code
== REG
&& GP_REG_P (REGNO (src
)))
5130 || (!TARGET_MIPS16
&& src
== CONST0_RTX (mode
)))
5132 if (dest_code
== REG
)
5134 if (GP_REG_P (REGNO (dest
)))
5135 return "move\t%0,%z1";
5137 if (mips_mult_move_p (dest
, src
, SPLIT_IF_NECESSARY
))
5139 if (ISA_HAS_DSP_MULT
)
5140 return "mult\t%q0,%.,%.";
5142 return "mult\t%.,%.";
5145 /* Moves to HI are handled by special .md insns. */
5146 if (REGNO (dest
) == LO_REGNUM
)
5149 if (DSP_ACC_REG_P (REGNO (dest
)))
5151 static char retval
[] = "mt__\t%z1,%q0";
5153 retval
[2] = reg_names
[REGNO (dest
)][4];
5154 retval
[3] = reg_names
[REGNO (dest
)][5];
5158 if (FP_REG_P (REGNO (dest
)))
5162 gcc_assert (src
== CONST0_RTX (GET_MODE (src
)));
5163 return "ldi.%v0\t%w0,0";
5166 return dbl_p
? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
5169 if (ALL_COP_REG_P (REGNO (dest
)))
5171 static char retval
[] = "dmtc_\t%z1,%0";
5173 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
5174 return dbl_p
? retval
: retval
+ 1;
5177 if (dest_code
== MEM
)
5178 switch (GET_MODE_SIZE (mode
))
5180 case 1: return "sb\t%z1,%0";
5181 case 2: return "sh\t%z1,%0";
5182 case 4: return "sw\t%z1,%0";
5183 case 8: return "sd\t%z1,%0";
5184 default: gcc_unreachable ();
5187 if (dest_code
== REG
&& GP_REG_P (REGNO (dest
)))
5189 if (src_code
== REG
)
5191 /* Moves from HI are handled by special .md insns. */
5192 if (REGNO (src
) == LO_REGNUM
)
5194 /* When generating VR4120 or VR4130 code, we use MACC and
5195 DMACC instead of MFLO. This avoids both the normal
5196 MIPS III HI/LO hazards and the errata related to
5199 return dbl_p
? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
5203 if (DSP_ACC_REG_P (REGNO (src
)))
5205 static char retval
[] = "mf__\t%0,%q1";
5207 retval
[2] = reg_names
[REGNO (src
)][4];
5208 retval
[3] = reg_names
[REGNO (src
)][5];
5212 if (FP_REG_P (REGNO (src
)))
5214 gcc_assert (!msa_p
);
5215 return dbl_p
? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
5218 if (ALL_COP_REG_P (REGNO (src
)))
5220 static char retval
[] = "dmfc_\t%0,%1";
5222 retval
[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
5223 return dbl_p
? retval
: retval
+ 1;
5227 if (src_code
== MEM
)
5228 switch (GET_MODE_SIZE (mode
))
5230 case 1: return "lbu\t%0,%1";
5231 case 2: return "lhu\t%0,%1";
5232 case 4: return "lw\t%0,%1";
5233 case 8: return "ld\t%0,%1";
5234 default: gcc_unreachable ();
5237 if (src_code
== CONST_INT
)
5239 /* Don't use the X format for the operand itself, because that
5240 will give out-of-range numbers for 64-bit hosts and 32-bit
5243 return "li\t%0,%1\t\t\t# %X1";
5245 if (SMALL_OPERAND_UNSIGNED (INTVAL (src
)))
5248 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src
)))
5252 if (src_code
== HIGH
)
5253 return TARGET_MIPS16
? "#" : "lui\t%0,%h1";
5255 if (CONST_GP_P (src
))
5256 return "move\t%0,%1";
5258 if (mips_symbolic_constant_p (src
, SYMBOL_CONTEXT_LEA
, &symbol_type
)
5259 && mips_lo_relocs
[symbol_type
] != 0)
5261 /* A signed 16-bit constant formed by applying a relocation
5262 operator to a symbolic address. */
5263 gcc_assert (!mips_split_p
[symbol_type
]);
5264 return "li\t%0,%R1";
5267 if (symbolic_operand (src
, VOIDmode
))
5269 gcc_assert (TARGET_MIPS16
5270 ? TARGET_MIPS16_TEXT_LOADS
5271 : !TARGET_EXPLICIT_RELOCS
);
5272 return dbl_p
? "dla\t%0,%1" : "la\t%0,%1";
5275 if (src_code
== REG
&& FP_REG_P (REGNO (src
)))
5277 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
5279 if (GET_MODE (dest
) == V2SFmode
)
5280 return "mov.ps\t%0,%1";
5282 return "move.v\t%w0,%w1";
5284 return dbl_p
? "mov.d\t%0,%1" : "mov.s\t%0,%1";
5287 if (dest_code
== MEM
)
5290 return "st.%v1\t%w1,%0";
5292 return dbl_p
? "sdc1\t%1,%0" : "swc1\t%1,%0";
5295 if (dest_code
== REG
&& FP_REG_P (REGNO (dest
)))
5297 if (src_code
== MEM
)
5300 return "ld.%v0\t%w0,%1";
5302 return dbl_p
? "ldc1\t%0,%1" : "lwc1\t%0,%1";
5305 if (dest_code
== REG
&& ALL_COP_REG_P (REGNO (dest
)) && src_code
== MEM
)
5307 static char retval
[] = "l_c_\t%0,%1";
5309 retval
[1] = (dbl_p
? 'd' : 'w');
5310 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest
));
5313 if (dest_code
== MEM
&& src_code
== REG
&& ALL_COP_REG_P (REGNO (src
)))
5315 static char retval
[] = "s_c_\t%1,%0";
5317 retval
[1] = (dbl_p
? 'd' : 'w');
5318 retval
[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src
));
5324 /* Return true if CMP1 is a suitable second operand for integer ordering
5325 test CODE. See also the *sCC patterns in mips.md. */
5328 mips_int_order_operand_ok_p (enum rtx_code code
, rtx cmp1
)
5334 return reg_or_0_operand (cmp1
, VOIDmode
);
5338 return !TARGET_MIPS16
&& cmp1
== const1_rtx
;
5342 return arith_operand (cmp1
, VOIDmode
);
5345 return sle_operand (cmp1
, VOIDmode
);
5348 return sleu_operand (cmp1
, VOIDmode
);
5355 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
5356 integer ordering test *CODE, or if an equivalent combination can
5357 be formed by adjusting *CODE and *CMP1. When returning true, update
5358 *CODE and *CMP1 with the chosen code and operand, otherwise leave
5362 mips_canonicalize_int_order_test (enum rtx_code
*code
, rtx
*cmp1
,
5365 HOST_WIDE_INT plus_one
;
5367 if (mips_int_order_operand_ok_p (*code
, *cmp1
))
5370 if (CONST_INT_P (*cmp1
))
5374 plus_one
= trunc_int_for_mode (UINTVAL (*cmp1
) + 1, mode
);
5375 if (INTVAL (*cmp1
) < plus_one
)
5378 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
5384 plus_one
= trunc_int_for_mode (UINTVAL (*cmp1
) + 1, mode
);
5388 *cmp1
= force_reg (mode
, GEN_INT (plus_one
));
5399 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
5400 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
5401 is nonnull, it's OK to set TARGET to the inverse of the result and
5402 flip *INVERT_PTR instead. */
5405 mips_emit_int_order_test (enum rtx_code code
, bool *invert_ptr
,
5406 rtx target
, rtx cmp0
, rtx cmp1
)
5410 /* First see if there is a MIPS instruction that can do this operation.
5411 If not, try doing the same for the inverse operation. If that also
5412 fails, force CMP1 into a register and try again. */
5413 mode
= GET_MODE (cmp0
);
5414 if (mips_canonicalize_int_order_test (&code
, &cmp1
, mode
))
5415 mips_emit_binary (code
, target
, cmp0
, cmp1
);
5418 enum rtx_code inv_code
= reverse_condition (code
);
5419 if (!mips_canonicalize_int_order_test (&inv_code
, &cmp1
, mode
))
5421 cmp1
= force_reg (mode
, cmp1
);
5422 mips_emit_int_order_test (code
, invert_ptr
, target
, cmp0
, cmp1
);
5424 else if (invert_ptr
== 0)
5428 inv_target
= mips_force_binary (GET_MODE (target
),
5429 inv_code
, cmp0
, cmp1
);
5430 mips_emit_binary (XOR
, target
, inv_target
, const1_rtx
);
5434 *invert_ptr
= !*invert_ptr
;
5435 mips_emit_binary (inv_code
, target
, cmp0
, cmp1
);
5440 /* Return a register that is zero iff CMP0 and CMP1 are equal.
5441 The register will have the same mode as CMP0. */
5444 mips_zero_if_equal (rtx cmp0
, rtx cmp1
)
5446 if (cmp1
== const0_rtx
)
5449 if (uns_arith_operand (cmp1
, VOIDmode
))
5450 return expand_binop (GET_MODE (cmp0
), xor_optab
,
5451 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
5453 return expand_binop (GET_MODE (cmp0
), sub_optab
,
5454 cmp0
, cmp1
, 0, 0, OPTAB_DIRECT
);
5457 /* Convert *CODE into a code that can be used in a floating-point
5458 scc instruction (C.cond.fmt). Return true if the values of
5459 the condition code registers will be inverted, with 0 indicating
5460 that the condition holds. */
5463 mips_reversed_fp_cond (enum rtx_code
*code
)
5470 *code
= reverse_condition_maybe_unordered (*code
);
5478 /* Allocate a floating-point condition-code register of mode MODE.
5480 These condition code registers are used for certain kinds
5481 of compound operation, such as compare and branches, vconds,
5482 and built-in functions. At expand time, their use is entirely
5483 controlled by MIPS-specific code and is entirely internal
5484 to these compound operations.
5486 We could (and did in the past) expose condition-code values
5487 as pseudo registers and leave the register allocator to pick
5488 appropriate registers. The problem is that it is not practically
5489 possible for the rtl optimizers to guarantee that no spills will
5490 be needed, even when AVOID_CCMODE_COPIES is defined. We would
5491 therefore need spill and reload sequences to handle the worst case.
5493 Although such sequences do exist, they are very expensive and are
5494 not something we'd want to use. This is especially true of CCV2 and
5495 CCV4, where all the shuffling would greatly outweigh whatever benefit
5496 the vectorization itself provides.
5498 The main benefit of having more than one condition-code register
5499 is to allow the pipelining of operations, especially those involving
5500 comparisons and conditional moves. We don't really expect the
5501 registers to be live for long periods, and certainly never want
5502 them to be live across calls.
5504 Also, there should be no penalty attached to using all the available
5505 registers. They are simply bits in the same underlying FPU control
5508 We therefore expose the hardware registers from the outset and use
5509 a simple round-robin allocation scheme. */
5512 mips_allocate_fcc (machine_mode mode
)
5514 unsigned int regno
, count
;
5516 gcc_assert (TARGET_HARD_FLOAT
&& ISA_HAS_8CC
);
5520 else if (mode
== CCV2mode
)
5522 else if (mode
== CCV4mode
)
5527 cfun
->machine
->next_fcc
+= -cfun
->machine
->next_fcc
& (count
- 1);
5528 if (cfun
->machine
->next_fcc
> ST_REG_LAST
- ST_REG_FIRST
)
5529 cfun
->machine
->next_fcc
= 0;
5530 regno
= ST_REG_FIRST
+ cfun
->machine
->next_fcc
;
5531 cfun
->machine
->next_fcc
+= count
;
5532 return gen_rtx_REG (mode
, regno
);
5535 /* Convert a comparison into something that can be used in a branch or
5536 conditional move. On entry, *OP0 and *OP1 are the values being
5537 compared and *CODE is the code used to compare them.
5539 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
5540 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
5541 otherwise any standard branch condition can be used. The standard branch
5544 - EQ or NE between two registers.
5545 - any comparison between a register and zero.
5546 - if compact branches are available then any condition is valid. */
5549 mips_emit_compare (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
, bool need_eq_ne_p
)
5554 if (GET_MODE_CLASS (GET_MODE (*op0
)) == MODE_INT
)
5556 if (!need_eq_ne_p
&& *op1
== const0_rtx
)
5558 else if (*code
== EQ
|| *code
== NE
)
5562 *op0
= mips_zero_if_equal (cmp_op0
, cmp_op1
);
5566 *op1
= force_reg (GET_MODE (cmp_op0
), cmp_op1
);
5568 else if (!need_eq_ne_p
&& TARGET_CB_MAYBE
)
5598 *op1
= force_reg (GET_MODE (cmp_op0
), cmp_op1
);
5608 /* The comparison needs a separate scc instruction. Store the
5609 result of the scc in *OP0 and compare it against zero. */
5610 bool invert
= false;
5611 *op0
= gen_reg_rtx (GET_MODE (cmp_op0
));
5612 mips_emit_int_order_test (*code
, &invert
, *op0
, cmp_op0
, cmp_op1
);
5613 *code
= (invert
? EQ
: NE
);
5617 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_op0
)))
5619 *op0
= gen_rtx_REG (CCDSPmode
, CCDSP_CC_REGNUM
);
5620 mips_emit_binary (*code
, *op0
, cmp_op0
, cmp_op1
);
5626 enum rtx_code cmp_code
;
5628 /* Floating-point tests use a separate C.cond.fmt or CMP.cond.fmt
5629 comparison to set a register. The branch or conditional move will
5630 then compare that register against zero.
5632 Set CMP_CODE to the code of the comparison instruction and
5633 *CODE to the code that the branch or move should use. */
5637 /* All FP conditions can be implemented directly with CMP.cond.fmt
5638 or by reversing the operands. */
5640 *op0
= gen_reg_rtx (CCFmode
);
5644 /* Three FP conditions cannot be implemented by reversing the
5645 operands for C.cond.fmt, instead a reversed condition code is
5646 required and a test for false. */
5647 *code
= mips_reversed_fp_cond (&cmp_code
) ? EQ
: NE
;
5649 *op0
= mips_allocate_fcc (CCmode
);
5651 *op0
= gen_rtx_REG (CCmode
, FPSW_REGNUM
);
5655 mips_emit_binary (cmp_code
, *op0
, cmp_op0
, cmp_op1
);
5659 /* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
5660 and OPERAND[3]. Store the result in OPERANDS[0].
5662 On 64-bit targets, the mode of the comparison and target will always be
5663 SImode, thus possibly narrower than that of the comparison's operands. */
5666 mips_expand_scc (rtx operands
[])
5668 rtx target
= operands
[0];
5669 enum rtx_code code
= GET_CODE (operands
[1]);
5670 rtx op0
= operands
[2];
5671 rtx op1
= operands
[3];
5673 gcc_assert (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
);
5675 if (code
== EQ
|| code
== NE
)
5678 && reg_imm10_operand (op1
, GET_MODE (op1
)))
5679 mips_emit_binary (code
, target
, op0
, op1
);
5682 rtx zie
= mips_zero_if_equal (op0
, op1
);
5683 mips_emit_binary (code
, target
, zie
, const0_rtx
);
5687 mips_emit_int_order_test (code
, 0, target
, op0
, op1
);
5690 /* Compare OPERANDS[1] with OPERANDS[2] using comparison code
5691 CODE and jump to OPERANDS[3] if the condition holds. */
5694 mips_expand_conditional_branch (rtx
*operands
)
5696 enum rtx_code code
= GET_CODE (operands
[0]);
5697 rtx op0
= operands
[1];
5698 rtx op1
= operands
[2];
5701 mips_emit_compare (&code
, &op0
, &op1
, TARGET_MIPS16
);
5702 condition
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5703 emit_jump_insn (gen_condjump (condition
, operands
[3]));
5708 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
5709 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
5712 mips_expand_vcondv2sf (rtx dest
, rtx true_src
, rtx false_src
,
5713 enum rtx_code cond
, rtx cmp_op0
, rtx cmp_op1
)
5718 reversed_p
= mips_reversed_fp_cond (&cond
);
5719 cmp_result
= mips_allocate_fcc (CCV2mode
);
5720 emit_insn (gen_scc_ps (cmp_result
,
5721 gen_rtx_fmt_ee (cond
, VOIDmode
, cmp_op0
, cmp_op1
)));
5723 emit_insn (gen_mips_cond_move_tf_ps (dest
, false_src
, true_src
,
5726 emit_insn (gen_mips_cond_move_tf_ps (dest
, true_src
, false_src
,
5730 /* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0]
5731 if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
5734 mips_expand_conditional_move (rtx
*operands
)
5737 enum rtx_code code
= GET_CODE (operands
[1]);
5738 rtx op0
= XEXP (operands
[1], 0);
5739 rtx op1
= XEXP (operands
[1], 1);
5741 mips_emit_compare (&code
, &op0
, &op1
, true);
5742 cond
= gen_rtx_fmt_ee (code
, GET_MODE (op0
), op0
, op1
);
5744 /* There is no direct support for general conditional GP move involving
5745 two registers using SEL. */
5747 && INTEGRAL_MODE_P (GET_MODE (operands
[2]))
5748 && register_operand (operands
[2], VOIDmode
)
5749 && register_operand (operands
[3], VOIDmode
))
5751 machine_mode mode
= GET_MODE (operands
[0]);
5752 rtx temp
= gen_reg_rtx (mode
);
5753 rtx temp2
= gen_reg_rtx (mode
);
5755 emit_insn (gen_rtx_SET (temp
,
5756 gen_rtx_IF_THEN_ELSE (mode
, cond
,
5757 operands
[2], const0_rtx
)));
5759 /* Flip the test for the second operand. */
5760 cond
= gen_rtx_fmt_ee ((code
== EQ
) ? NE
: EQ
, GET_MODE (op0
), op0
, op1
);
5762 emit_insn (gen_rtx_SET (temp2
,
5763 gen_rtx_IF_THEN_ELSE (mode
, cond
,
5764 operands
[3], const0_rtx
)));
5766 /* Merge the two results, at least one is guaranteed to be zero. */
5767 emit_insn (gen_rtx_SET (operands
[0], gen_rtx_IOR (mode
, temp
, temp2
)));
5771 if (FLOAT_MODE_P (GET_MODE (operands
[2])) && !ISA_HAS_SEL
)
5773 operands
[2] = force_reg (GET_MODE (operands
[0]), operands
[2]);
5774 operands
[3] = force_reg (GET_MODE (operands
[0]), operands
[3]);
5777 emit_insn (gen_rtx_SET (operands
[0],
5778 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]), cond
,
5779 operands
[2], operands
[3])));
5783 /* Perform the comparison in COMPARISON, then trap if the condition holds. */
5786 mips_expand_conditional_trap (rtx comparison
)
5792 /* MIPS conditional trap instructions don't have GT or LE flavors,
5793 so we must swap the operands and convert to LT and GE respectively. */
5794 code
= GET_CODE (comparison
);
5801 code
= swap_condition (code
);
5802 op0
= XEXP (comparison
, 1);
5803 op1
= XEXP (comparison
, 0);
5807 op0
= XEXP (comparison
, 0);
5808 op1
= XEXP (comparison
, 1);
5812 mode
= GET_MODE (XEXP (comparison
, 0));
5813 op0
= force_reg (mode
, op0
);
5814 if (!(ISA_HAS_COND_TRAPI
5815 ? arith_operand (op1
, mode
)
5816 : reg_or_0_operand (op1
, mode
)))
5817 op1
= force_reg (mode
, op1
);
5819 emit_insn (gen_rtx_TRAP_IF (VOIDmode
,
5820 gen_rtx_fmt_ee (code
, mode
, op0
, op1
),
5824 /* Initialize *CUM for a call to a function of type FNTYPE. */
5827 mips_init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
)
5829 memset (cum
, 0, sizeof (*cum
));
5830 cum
->prototype
= (fntype
&& prototype_p (fntype
));
5831 cum
->gp_reg_found
= (cum
->prototype
&& stdarg_p (fntype
));
5834 /* Fill INFO with information about a single argument. CUM is the
5835 cumulative state for earlier arguments. MODE is the mode of this
5836 argument and TYPE is its type (if known). NAMED is true if this
5837 is a named (fixed) argument rather than a variable one. */
5840 mips_get_arg_info (struct mips_arg_info
*info
, const CUMULATIVE_ARGS
*cum
,
5841 machine_mode mode
, const_tree type
, bool named
)
5843 bool doubleword_aligned_p
;
5844 unsigned int num_bytes
, num_words
, max_regs
;
5846 /* Work out the size of the argument. */
5847 num_bytes
= type
? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
5848 num_words
= (num_bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
5850 /* Decide whether it should go in a floating-point register, assuming
5851 one is free. Later code checks for availability.
5853 The checks against UNITS_PER_FPVALUE handle the soft-float and
5854 single-float cases. */
5858 /* The EABI conventions have traditionally been defined in terms
5859 of TYPE_MODE, regardless of the actual type. */
5860 info
->fpr_p
= ((GET_MODE_CLASS (mode
) == MODE_FLOAT
5861 || mode
== V2SFmode
)
5862 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
5867 /* Only leading floating-point scalars are passed in
5868 floating-point registers. We also handle vector floats the same
5869 say, which is OK because they are not covered by the standard ABI. */
5870 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
|| mode
!= V2SFmode
);
5871 info
->fpr_p
= (!cum
->gp_reg_found
5872 && cum
->arg_number
< 2
5874 || SCALAR_FLOAT_TYPE_P (type
)
5875 || VECTOR_FLOAT_TYPE_P (type
))
5876 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
5877 || mode
== V2SFmode
)
5878 && GET_MODE_SIZE (mode
) <= UNITS_PER_FPVALUE
);
5883 /* Scalar, complex and vector floating-point types are passed in
5884 floating-point registers, as long as this is a named rather
5885 than a variable argument. */
5886 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
|| mode
!= V2SFmode
);
5887 info
->fpr_p
= (named
5888 && (type
== 0 || FLOAT_TYPE_P (type
))
5889 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
5890 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
5891 || mode
== V2SFmode
)
5892 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_FPVALUE
);
5894 /* ??? According to the ABI documentation, the real and imaginary
5895 parts of complex floats should be passed in individual registers.
5896 The real and imaginary parts of stack arguments are supposed
5897 to be contiguous and there should be an extra word of padding
5900 This has two problems. First, it makes it impossible to use a
5901 single "void *" va_list type, since register and stack arguments
5902 are passed differently. (At the time of writing, MIPSpro cannot
5903 handle complex float varargs correctly.) Second, it's unclear
5904 what should happen when there is only one register free.
5906 For now, we assume that named complex floats should go into FPRs
5907 if there are two FPRs free, otherwise they should be passed in the
5908 same way as a struct containing two floats. */
5910 && GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
5911 && GET_MODE_UNIT_SIZE (mode
) < UNITS_PER_FPVALUE
)
5913 if (cum
->num_gprs
>= MAX_ARGS_IN_REGISTERS
- 1)
5914 info
->fpr_p
= false;
5924 /* See whether the argument has doubleword alignment. */
5925 doubleword_aligned_p
= (mips_function_arg_boundary (mode
, type
)
5928 /* Set REG_OFFSET to the register count we're interested in.
5929 The EABI allocates the floating-point registers separately,
5930 but the other ABIs allocate them like integer registers. */
5931 info
->reg_offset
= (mips_abi
== ABI_EABI
&& info
->fpr_p
5935 /* Advance to an even register if the argument is doubleword-aligned. */
5936 if (doubleword_aligned_p
)
5937 info
->reg_offset
+= info
->reg_offset
& 1;
5939 /* Work out the offset of a stack argument. */
5940 info
->stack_offset
= cum
->stack_words
;
5941 if (doubleword_aligned_p
)
5942 info
->stack_offset
+= info
->stack_offset
& 1;
5944 max_regs
= MAX_ARGS_IN_REGISTERS
- info
->reg_offset
;
5946 /* Partition the argument between registers and stack. */
5947 info
->reg_words
= MIN (num_words
, max_regs
);
5948 info
->stack_words
= num_words
- info
->reg_words
;
5951 /* INFO describes a register argument that has the normal format for the
5952 argument's mode. Return the register it uses, assuming that FPRs are
5953 available if HARD_FLOAT_P. */
5956 mips_arg_regno (const struct mips_arg_info
*info
, bool hard_float_p
)
5958 if (!info
->fpr_p
|| !hard_float_p
)
5959 return GP_ARG_FIRST
+ info
->reg_offset
;
5960 else if (mips_abi
== ABI_32
&& TARGET_DOUBLE_FLOAT
&& info
->reg_offset
> 0)
5961 /* In o32, the second argument is always passed in $f14
5962 for TARGET_DOUBLE_FLOAT, regardless of whether the
5963 first argument was a word or doubleword. */
5964 return FP_ARG_FIRST
+ 2;
5966 return FP_ARG_FIRST
+ info
->reg_offset
;
5969 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
5972 mips_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
5974 return !TARGET_OLDABI
;
5977 /* Implement TARGET_FUNCTION_ARG. */
5980 mips_function_arg (cumulative_args_t cum_v
, const function_arg_info
&arg
)
5982 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5983 struct mips_arg_info info
;
5985 /* We will be called with an end marker after the last argument
5986 has been seen. Whatever we return will be passed to the call expander.
5987 If we need a MIPS16 fp_code, return a REG with the code stored as
5989 if (arg
.end_marker_p ())
5991 if (TARGET_MIPS16
&& cum
->fp_code
!= 0)
5992 return gen_rtx_REG ((machine_mode
) cum
->fp_code
, 0);
5997 mips_get_arg_info (&info
, cum
, arg
.mode
, arg
.type
, arg
.named
);
5999 /* Return straight away if the whole argument is passed on the stack. */
6000 if (info
.reg_offset
== MAX_ARGS_IN_REGISTERS
)
6003 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
6004 contains a double in its entirety, then that 64-bit chunk is passed
6005 in a floating-point register. */
6007 && TARGET_HARD_FLOAT
6010 && TREE_CODE (arg
.type
) == RECORD_TYPE
6011 && TYPE_SIZE_UNIT (arg
.type
)
6012 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (arg
.type
)))
6016 /* First check to see if there is any such field. */
6017 for (field
= TYPE_FIELDS (arg
.type
); field
; field
= DECL_CHAIN (field
))
6018 if (TREE_CODE (field
) == FIELD_DECL
6019 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
))
6020 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
6021 && tree_fits_shwi_p (bit_position (field
))
6022 && int_bit_position (field
) % BITS_PER_WORD
== 0)
6027 /* Now handle the special case by returning a PARALLEL
6028 indicating where each 64-bit chunk goes. INFO.REG_WORDS
6029 chunks are passed in registers. */
6031 HOST_WIDE_INT bitpos
;
6034 /* assign_parms checks the mode of ENTRY_PARM, so we must
6035 use the actual mode here. */
6036 ret
= gen_rtx_PARALLEL (arg
.mode
, rtvec_alloc (info
.reg_words
));
6039 field
= TYPE_FIELDS (arg
.type
);
6040 for (i
= 0; i
< info
.reg_words
; i
++)
6044 for (; field
; field
= DECL_CHAIN (field
))
6045 if (TREE_CODE (field
) == FIELD_DECL
6046 && int_bit_position (field
) >= bitpos
)
6050 && int_bit_position (field
) == bitpos
6051 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
))
6052 && TYPE_PRECISION (TREE_TYPE (field
)) == BITS_PER_WORD
)
6053 reg
= gen_rtx_REG (DFmode
, FP_ARG_FIRST
+ info
.reg_offset
+ i
);
6055 reg
= gen_rtx_REG (DImode
, GP_ARG_FIRST
+ info
.reg_offset
+ i
);
6058 = gen_rtx_EXPR_LIST (VOIDmode
, reg
,
6059 GEN_INT (bitpos
/ BITS_PER_UNIT
));
6061 bitpos
+= BITS_PER_WORD
;
6067 /* Handle the n32/n64 conventions for passing complex floating-point
6068 arguments in FPR pairs. The real part goes in the lower register
6069 and the imaginary part goes in the upper register. */
6072 && GET_MODE_CLASS (arg
.mode
) == MODE_COMPLEX_FLOAT
)
6078 inner
= GET_MODE_INNER (arg
.mode
);
6079 regno
= FP_ARG_FIRST
+ info
.reg_offset
;
6080 if (info
.reg_words
* UNITS_PER_WORD
== GET_MODE_SIZE (inner
))
6082 /* Real part in registers, imaginary part on stack. */
6083 gcc_assert (info
.stack_words
== info
.reg_words
);
6084 return gen_rtx_REG (inner
, regno
);
6088 gcc_assert (info
.stack_words
== 0);
6089 real
= gen_rtx_EXPR_LIST (VOIDmode
,
6090 gen_rtx_REG (inner
, regno
),
6092 imag
= gen_rtx_EXPR_LIST (VOIDmode
,
6094 regno
+ info
.reg_words
/ 2),
6095 GEN_INT (GET_MODE_SIZE (inner
)));
6096 return gen_rtx_PARALLEL (arg
.mode
, gen_rtvec (2, real
, imag
));
6100 return gen_rtx_REG (arg
.mode
, mips_arg_regno (&info
, TARGET_HARD_FLOAT
));
6103 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
6106 mips_function_arg_advance (cumulative_args_t cum_v
,
6107 const function_arg_info
&arg
)
6109 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6110 struct mips_arg_info info
;
6112 mips_get_arg_info (&info
, cum
, arg
.mode
, arg
.type
, arg
.named
);
6115 cum
->gp_reg_found
= true;
6117 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
6118 an explanation of what this code does. It assumes that we're using
6119 either the o32 or the o64 ABI, both of which pass at most 2 arguments
6121 if (cum
->arg_number
< 2 && info
.fpr_p
)
6122 cum
->fp_code
+= (arg
.mode
== SFmode
? 1 : 2) << (cum
->arg_number
* 2);
6124 /* Advance the register count. This has the effect of setting
6125 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
6126 argument required us to skip the final GPR and pass the whole
6127 argument on the stack. */
6128 if (mips_abi
!= ABI_EABI
|| !info
.fpr_p
)
6129 cum
->num_gprs
= info
.reg_offset
+ info
.reg_words
;
6130 else if (info
.reg_words
> 0)
6131 cum
->num_fprs
+= MAX_FPRS_PER_FMT
;
6133 /* Advance the stack word count. */
6134 if (info
.stack_words
> 0)
6135 cum
->stack_words
= info
.stack_offset
+ info
.stack_words
;
6140 /* Implement TARGET_ARG_PARTIAL_BYTES. */
6143 mips_arg_partial_bytes (cumulative_args_t cum
, const function_arg_info
&arg
)
6145 struct mips_arg_info info
;
6147 mips_get_arg_info (&info
, get_cumulative_args (cum
),
6148 arg
.mode
, arg
.type
, arg
.named
);
6149 return info
.stack_words
> 0 ? info
.reg_words
* UNITS_PER_WORD
: 0;
6152 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
6153 least PARM_BOUNDARY bits of alignment, but will be given anything up
6154 to STACK_BOUNDARY bits if the type requires it. */
6157 mips_function_arg_boundary (machine_mode mode
, const_tree type
)
6159 unsigned int alignment
;
6161 alignment
= type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
);
6162 if (alignment
< PARM_BOUNDARY
)
6163 alignment
= PARM_BOUNDARY
;
6164 if (alignment
> STACK_BOUNDARY
)
6165 alignment
= STACK_BOUNDARY
;
6169 /* Implement TARGET_GET_RAW_RESULT_MODE and TARGET_GET_RAW_ARG_MODE. */
6171 static fixed_size_mode
6172 mips_get_reg_raw_mode (int regno
)
6174 if (TARGET_FLOATXX
&& FP_REG_P (regno
))
6176 return default_get_reg_raw_mode (regno
);
6179 /* Implement TARGET_FUNCTION_ARG_PADDING; return PAD_UPWARD if the first
6180 byte of the stack slot has useful data, PAD_DOWNWARD if the last byte
6183 static pad_direction
6184 mips_function_arg_padding (machine_mode mode
, const_tree type
)
6186 /* On little-endian targets, the first byte of every stack argument
6187 is passed in the first byte of the stack slot. */
6188 if (!BYTES_BIG_ENDIAN
)
6191 /* Otherwise, integral types are padded downward: the last byte of a
6192 stack argument is passed in the last byte of the stack slot. */
6194 ? (INTEGRAL_TYPE_P (type
)
6195 || POINTER_TYPE_P (type
)
6196 || FIXED_POINT_TYPE_P (type
))
6197 : (SCALAR_INT_MODE_P (mode
)
6198 || ALL_SCALAR_FIXED_POINT_MODE_P (mode
)))
6199 return PAD_DOWNWARD
;
6201 /* Big-endian o64 pads floating-point arguments downward. */
6202 if (mips_abi
== ABI_O64
)
6203 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
6204 return PAD_DOWNWARD
;
6206 /* Other types are padded upward for o32, o64, n32 and n64. */
6207 if (mips_abi
!= ABI_EABI
)
6210 /* Arguments smaller than a stack slot are padded downward. */
6212 ? GET_MODE_BITSIZE (mode
) >= PARM_BOUNDARY
6213 : int_size_in_bytes (type
) >= (PARM_BOUNDARY
/ BITS_PER_UNIT
))
6216 return PAD_DOWNWARD
;
6219 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
6220 if the least significant byte of the register has useful data. Return
6221 the opposite if the most significant byte does. */
6224 mips_pad_reg_upward (machine_mode mode
, tree type
)
6226 /* No shifting is required for floating-point arguments. */
6227 if (type
!= 0 ? FLOAT_TYPE_P (type
) : GET_MODE_CLASS (mode
) == MODE_FLOAT
)
6228 return !BYTES_BIG_ENDIAN
;
6230 /* Otherwise, apply the same padding to register arguments as we do
6231 to stack arguments. */
6232 return mips_function_arg_padding (mode
, type
) == PAD_UPWARD
;
6235 /* Return nonzero when an argument must be passed by reference. */
6238 mips_pass_by_reference (cumulative_args_t
, const function_arg_info
&arg
)
6240 if (mips_abi
== ABI_EABI
)
6244 /* ??? How should SCmode be handled? */
6245 if (arg
.mode
== DImode
|| arg
.mode
== DFmode
6246 || arg
.mode
== DQmode
|| arg
.mode
== UDQmode
6247 || arg
.mode
== DAmode
|| arg
.mode
== UDAmode
)
6250 size
= arg
.type_size_in_bytes ();
6251 return size
== -1 || size
> UNITS_PER_WORD
;
6255 /* If we have a variable-sized parameter, we have no choice. */
6256 return targetm
.calls
.must_pass_in_stack (arg
);
6260 /* Implement TARGET_CALLEE_COPIES. */
6263 mips_callee_copies (cumulative_args_t
, const function_arg_info
&arg
)
6265 return mips_abi
== ABI_EABI
&& arg
.named
;
6268 /* See whether VALTYPE is a record whose fields should be returned in
6269 floating-point registers. If so, return the number of fields and
6270 list them in FIELDS (which should have two elements). Return 0
6273 For n32 & n64, a structure with one or two fields is returned in
6274 floating-point registers as long as every field has a floating-point
6278 mips_fpr_return_fields (const_tree valtype
, tree
*fields
)
6286 if (TREE_CODE (valtype
) != RECORD_TYPE
)
6290 for (field
= TYPE_FIELDS (valtype
); field
!= 0; field
= DECL_CHAIN (field
))
6292 if (TREE_CODE (field
) != FIELD_DECL
)
6295 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field
)))
6301 fields
[i
++] = field
;
6306 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
6307 a value in the most significant part of $2/$3 if:
6309 - the target is big-endian;
6311 - the value has a structure or union type (we generalize this to
6312 cover aggregates from other languages too); and
6314 - the structure is not returned in floating-point registers. */
6317 mips_return_in_msb (const_tree valtype
)
6321 return (TARGET_NEWABI
6322 && TARGET_BIG_ENDIAN
6323 && AGGREGATE_TYPE_P (valtype
)
6324 && mips_fpr_return_fields (valtype
, fields
) == 0);
6327 /* Return true if the function return value MODE will get returned in a
6328 floating-point register. */
6331 mips_return_mode_in_fpr_p (machine_mode mode
)
6333 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
|| mode
!= V2SFmode
);
6334 return ((GET_MODE_CLASS (mode
) == MODE_FLOAT
6336 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
6337 && GET_MODE_UNIT_SIZE (mode
) <= UNITS_PER_HWFPVALUE
);
6340 /* Return the representation of an FPR return register when the
6341 value being returned in FP_RETURN has mode VALUE_MODE and the
6342 return type itself has mode TYPE_MODE. On NewABI targets,
6343 the two modes may be different for structures like:
6345 struct __attribute__((packed)) foo { float f; }
6347 where we return the SFmode value of "f" in FP_RETURN, but where
6348 the structure itself has mode BLKmode. */
6351 mips_return_fpr_single (machine_mode type_mode
,
6352 machine_mode value_mode
)
6356 x
= gen_rtx_REG (value_mode
, FP_RETURN
);
6357 if (type_mode
!= value_mode
)
6359 x
= gen_rtx_EXPR_LIST (VOIDmode
, x
, const0_rtx
);
6360 x
= gen_rtx_PARALLEL (type_mode
, gen_rtvec (1, x
));
6365 /* Return a composite value in a pair of floating-point registers.
6366 MODE1 and OFFSET1 are the mode and byte offset for the first value,
6367 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
6370 For n32 & n64, $f0 always holds the first value and $f2 the second.
6371 Otherwise the values are packed together as closely as possible. */
6374 mips_return_fpr_pair (machine_mode mode
,
6375 machine_mode mode1
, HOST_WIDE_INT offset1
,
6376 machine_mode mode2
, HOST_WIDE_INT offset2
)
6380 inc
= (TARGET_NEWABI
|| mips_abi
== ABI_32
? 2 : MAX_FPRS_PER_FMT
);
6381 return gen_rtx_PARALLEL
6384 gen_rtx_EXPR_LIST (VOIDmode
,
6385 gen_rtx_REG (mode1
, FP_RETURN
),
6387 gen_rtx_EXPR_LIST (VOIDmode
,
6388 gen_rtx_REG (mode2
, FP_RETURN
+ inc
),
6389 GEN_INT (offset2
))));
6393 /* Implement TARGET_FUNCTION_VALUE and TARGET_LIBCALL_VALUE.
6394 For normal calls, VALTYPE is the return type and MODE is VOIDmode.
6395 For libcalls, VALTYPE is null and MODE is the mode of the return value. */
6398 mips_function_value_1 (const_tree valtype
, const_tree fn_decl_or_type
,
6407 if (fn_decl_or_type
&& DECL_P (fn_decl_or_type
))
6408 func
= fn_decl_or_type
;
6412 mode
= TYPE_MODE (valtype
);
6413 unsigned_p
= TYPE_UNSIGNED (valtype
);
6415 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
6416 return values, promote the mode here too. */
6417 mode
= promote_function_mode (valtype
, mode
, &unsigned_p
, func
, 1);
6419 /* Handle structures whose fields are returned in $f0/$f2. */
6420 switch (mips_fpr_return_fields (valtype
, fields
))
6423 return mips_return_fpr_single (mode
,
6424 TYPE_MODE (TREE_TYPE (fields
[0])));
6427 return mips_return_fpr_pair (mode
,
6428 TYPE_MODE (TREE_TYPE (fields
[0])),
6429 int_byte_position (fields
[0]),
6430 TYPE_MODE (TREE_TYPE (fields
[1])),
6431 int_byte_position (fields
[1]));
6434 /* If a value is passed in the most significant part of a register, see
6435 whether we have to round the mode up to a whole number of words. */
6436 if (mips_return_in_msb (valtype
))
6438 HOST_WIDE_INT size
= int_size_in_bytes (valtype
);
6439 if (size
% UNITS_PER_WORD
!= 0)
6441 size
+= UNITS_PER_WORD
- size
% UNITS_PER_WORD
;
6442 mode
= int_mode_for_size (size
* BITS_PER_UNIT
, 0).require ();
6446 /* For EABI, the class of return register depends entirely on MODE.
6447 For example, "struct { some_type x; }" and "union { some_type x; }"
6448 are returned in the same way as a bare "some_type" would be.
6449 Other ABIs only use FPRs for scalar, complex or vector types. */
6450 if (mips_abi
!= ABI_EABI
&& !FLOAT_TYPE_P (valtype
))
6451 return gen_rtx_REG (mode
, GP_RETURN
);
6456 /* Handle long doubles for n32 & n64. */
6458 return mips_return_fpr_pair (mode
,
6460 DImode
, GET_MODE_SIZE (mode
) / 2);
6462 if (mips_return_mode_in_fpr_p (mode
))
6464 if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
6465 return mips_return_fpr_pair (mode
,
6466 GET_MODE_INNER (mode
), 0,
6467 GET_MODE_INNER (mode
),
6468 GET_MODE_SIZE (mode
) / 2);
6470 return gen_rtx_REG (mode
, FP_RETURN
);
6474 return gen_rtx_REG (mode
, GP_RETURN
);
6477 /* Implement TARGET_FUNCTION_VALUE. */
6480 mips_function_value (const_tree valtype
, const_tree fn_decl_or_type
,
6481 bool outgoing ATTRIBUTE_UNUSED
)
6483 return mips_function_value_1 (valtype
, fn_decl_or_type
, VOIDmode
);
6486 /* Implement TARGET_LIBCALL_VALUE. */
6489 mips_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
6491 return mips_function_value_1 (NULL_TREE
, NULL_TREE
, mode
);
6494 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
6496 On the MIPS, R2 R3 and F0 F2 are the only register thus used. */
6499 mips_function_value_regno_p (const unsigned int regno
)
6501 /* Most types only require one GPR or one FPR for return values but for
6502 hard-float two FPRs can be used for _Complex types (for all ABIs)
6503 and long doubles (for n64). */
6504 if (regno
== GP_RETURN
6505 || regno
== FP_RETURN
6506 || (FP_RETURN
!= GP_RETURN
6507 && regno
== FP_RETURN
+ 2))
6510 /* For o32 FP32, _Complex double will be returned in four 32-bit registers.
6511 This does not apply to o32 FPXX as floating-point function argument and
6512 return registers are described as 64-bit even though floating-point
6513 registers are primarily described as 32-bit internally.
6514 See: mips_get_reg_raw_mode. */
6515 if ((mips_abi
== ABI_32
&& TARGET_FLOAT32
)
6516 && FP_RETURN
!= GP_RETURN
6517 && (regno
== FP_RETURN
+ 1
6518 || regno
== FP_RETURN
+ 3))
6524 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
6525 all BLKmode objects are returned in memory. Under the n32, n64
6526 and embedded ABIs, small structures are returned in a register.
6527 Objects with varying size must still be returned in memory, of
6531 mips_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
6534 /* Ensure that any floating point vector types are returned via memory
6535 even if they are supported through a vector mode with some ASEs. */
6536 return (VECTOR_FLOAT_TYPE_P (type
)
6537 || TYPE_MODE (type
) == BLKmode
);
6539 return (!IN_RANGE (int_size_in_bytes (type
), 0, 2 * UNITS_PER_WORD
));
6542 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
6545 mips_setup_incoming_varargs (cumulative_args_t cum
,
6546 const function_arg_info
&arg
,
6547 int *pretend_size ATTRIBUTE_UNUSED
, int no_rtl
)
6549 CUMULATIVE_ARGS local_cum
;
6550 int gp_saved
, fp_saved
;
6552 /* The caller has advanced CUM up to, but not beyond, the last named
6553 argument. Advance a local copy of CUM past the last "real" named
6554 argument, to find out how many registers are left over. */
6555 local_cum
= *get_cumulative_args (cum
);
6556 mips_function_arg_advance (pack_cumulative_args (&local_cum
), arg
);
6558 /* Found out how many registers we need to save. */
6559 gp_saved
= MAX_ARGS_IN_REGISTERS
- local_cum
.num_gprs
;
6560 fp_saved
= (EABI_FLOAT_VARARGS_P
6561 ? MAX_ARGS_IN_REGISTERS
- local_cum
.num_fprs
6570 ptr
= plus_constant (Pmode
, virtual_incoming_args_rtx
,
6571 REG_PARM_STACK_SPACE (cfun
->decl
)
6572 - gp_saved
* UNITS_PER_WORD
);
6573 mem
= gen_frame_mem (BLKmode
, ptr
);
6574 set_mem_alias_set (mem
, get_varargs_alias_set ());
6576 move_block_from_reg (local_cum
.num_gprs
+ GP_ARG_FIRST
,
6581 /* We can't use move_block_from_reg, because it will use
6586 /* Set OFF to the offset from virtual_incoming_args_rtx of
6587 the first float register. The FP save area lies below
6588 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
6589 off
= ROUND_DOWN (-gp_saved
* UNITS_PER_WORD
, UNITS_PER_FPVALUE
);
6590 off
-= fp_saved
* UNITS_PER_FPREG
;
6592 mode
= TARGET_SINGLE_FLOAT
? SFmode
: DFmode
;
6594 for (i
= local_cum
.num_fprs
; i
< MAX_ARGS_IN_REGISTERS
;
6595 i
+= MAX_FPRS_PER_FMT
)
6599 ptr
= plus_constant (Pmode
, virtual_incoming_args_rtx
, off
);
6600 mem
= gen_frame_mem (mode
, ptr
);
6601 set_mem_alias_set (mem
, get_varargs_alias_set ());
6602 mips_emit_move (mem
, gen_rtx_REG (mode
, FP_ARG_FIRST
+ i
));
6603 off
+= UNITS_PER_HWFPVALUE
;
6607 if (REG_PARM_STACK_SPACE (cfun
->decl
) == 0)
6608 cfun
->machine
->varargs_size
= (gp_saved
* UNITS_PER_WORD
6609 + fp_saved
* UNITS_PER_FPREG
);
6612 /* Implement TARGET_BUILTIN_VA_LIST. */
6615 mips_build_builtin_va_list (void)
6617 if (EABI_FLOAT_VARARGS_P
)
6619 /* We keep 3 pointers, and two offsets.
6621 Two pointers are to the overflow area, which starts at the CFA.
6622 One of these is constant, for addressing into the GPR save area
6623 below it. The other is advanced up the stack through the
6626 The third pointer is to the bottom of the GPR save area.
6627 Since the FPR save area is just below it, we can address
6628 FPR slots off this pointer.
6630 We also keep two one-byte offsets, which are to be subtracted
6631 from the constant pointers to yield addresses in the GPR and
6632 FPR save areas. These are downcounted as float or non-float
6633 arguments are used, and when they get to zero, the argument
6634 must be obtained from the overflow region. */
6635 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
, f_res
, record
;
6638 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6640 f_ovfl
= build_decl (BUILTINS_LOCATION
,
6641 FIELD_DECL
, get_identifier ("__overflow_argptr"),
6643 f_gtop
= build_decl (BUILTINS_LOCATION
,
6644 FIELD_DECL
, get_identifier ("__gpr_top"),
6646 f_ftop
= build_decl (BUILTINS_LOCATION
,
6647 FIELD_DECL
, get_identifier ("__fpr_top"),
6649 f_goff
= build_decl (BUILTINS_LOCATION
,
6650 FIELD_DECL
, get_identifier ("__gpr_offset"),
6651 unsigned_char_type_node
);
6652 f_foff
= build_decl (BUILTINS_LOCATION
,
6653 FIELD_DECL
, get_identifier ("__fpr_offset"),
6654 unsigned_char_type_node
);
6655 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
6656 warn on every user file. */
6657 index
= build_int_cst (NULL_TREE
, GET_MODE_SIZE (ptr_mode
) - 2 - 1);
6658 array
= build_array_type (unsigned_char_type_node
,
6659 build_index_type (index
));
6660 f_res
= build_decl (BUILTINS_LOCATION
,
6661 FIELD_DECL
, get_identifier ("__reserved"), array
);
6663 DECL_FIELD_CONTEXT (f_ovfl
) = record
;
6664 DECL_FIELD_CONTEXT (f_gtop
) = record
;
6665 DECL_FIELD_CONTEXT (f_ftop
) = record
;
6666 DECL_FIELD_CONTEXT (f_goff
) = record
;
6667 DECL_FIELD_CONTEXT (f_foff
) = record
;
6668 DECL_FIELD_CONTEXT (f_res
) = record
;
6670 TYPE_FIELDS (record
) = f_ovfl
;
6671 DECL_CHAIN (f_ovfl
) = f_gtop
;
6672 DECL_CHAIN (f_gtop
) = f_ftop
;
6673 DECL_CHAIN (f_ftop
) = f_goff
;
6674 DECL_CHAIN (f_goff
) = f_foff
;
6675 DECL_CHAIN (f_foff
) = f_res
;
6677 layout_type (record
);
6681 /* Otherwise, we use 'void *'. */
6682 return ptr_type_node
;
6685 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
6688 mips_va_start (tree valist
, rtx nextarg
)
6690 if (EABI_FLOAT_VARARGS_P
)
6692 const CUMULATIVE_ARGS
*cum
;
6693 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
6694 tree ovfl
, gtop
, ftop
, goff
, foff
;
6696 int gpr_save_area_size
;
6697 int fpr_save_area_size
;
6700 cum
= &crtl
->args
.info
;
6702 = (MAX_ARGS_IN_REGISTERS
- cum
->num_gprs
) * UNITS_PER_WORD
;
6704 = (MAX_ARGS_IN_REGISTERS
- cum
->num_fprs
) * UNITS_PER_FPREG
;
6706 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
6707 f_gtop
= DECL_CHAIN (f_ovfl
);
6708 f_ftop
= DECL_CHAIN (f_gtop
);
6709 f_goff
= DECL_CHAIN (f_ftop
);
6710 f_foff
= DECL_CHAIN (f_goff
);
6712 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
6714 gtop
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
), valist
, f_gtop
,
6716 ftop
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
), valist
, f_ftop
,
6718 goff
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
), valist
, f_goff
,
6720 foff
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
), valist
, f_foff
,
6723 /* Emit code to initialize OVFL, which points to the next varargs
6724 stack argument. CUM->STACK_WORDS gives the number of stack
6725 words used by named arguments. */
6726 t
= make_tree (TREE_TYPE (ovfl
), virtual_incoming_args_rtx
);
6727 if (cum
->stack_words
> 0)
6728 t
= fold_build_pointer_plus_hwi (t
, cum
->stack_words
* UNITS_PER_WORD
);
6729 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovfl
), ovfl
, t
);
6730 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6732 /* Emit code to initialize GTOP, the top of the GPR save area. */
6733 t
= make_tree (TREE_TYPE (gtop
), virtual_incoming_args_rtx
);
6734 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gtop
), gtop
, t
);
6735 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6737 /* Emit code to initialize FTOP, the top of the FPR save area.
6738 This address is gpr_save_area_bytes below GTOP, rounded
6739 down to the next fp-aligned boundary. */
6740 t
= make_tree (TREE_TYPE (ftop
), virtual_incoming_args_rtx
);
6741 fpr_offset
= gpr_save_area_size
+ UNITS_PER_FPVALUE
- 1;
6742 fpr_offset
&= -UNITS_PER_FPVALUE
;
6744 t
= fold_build_pointer_plus_hwi (t
, -fpr_offset
);
6745 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ftop
), ftop
, t
);
6746 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6748 /* Emit code to initialize GOFF, the offset from GTOP of the
6749 next GPR argument. */
6750 t
= build2 (MODIFY_EXPR
, TREE_TYPE (goff
), goff
,
6751 build_int_cst (TREE_TYPE (goff
), gpr_save_area_size
));
6752 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6754 /* Likewise emit code to initialize FOFF, the offset from FTOP
6755 of the next FPR argument. */
6756 t
= build2 (MODIFY_EXPR
, TREE_TYPE (foff
), foff
,
6757 build_int_cst (TREE_TYPE (foff
), fpr_save_area_size
));
6758 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6762 nextarg
= plus_constant (Pmode
, nextarg
, -cfun
->machine
->varargs_size
);
6763 std_expand_builtin_va_start (valist
, nextarg
);
6767 /* Like std_gimplify_va_arg_expr, but apply alignment to zero-sized
6771 mips_std_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6774 tree addr
, t
, type_size
, rounded_size
, valist_tmp
;
6775 unsigned HOST_WIDE_INT align
, boundary
;
6778 indirect
= pass_va_arg_by_reference (type
);
6780 type
= build_pointer_type (type
);
6782 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
6783 boundary
= targetm
.calls
.function_arg_boundary (TYPE_MODE (type
), type
);
6785 /* When we align parameter on stack for caller, if the parameter
6786 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
6787 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
6788 here with caller. */
6789 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
6790 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
6792 boundary
/= BITS_PER_UNIT
;
6794 /* Hoist the valist value into a temporary for the moment. */
6795 valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
6797 /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually
6798 requires greater alignment, we must perform dynamic alignment. */
6799 if (boundary
> align
)
6801 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
6802 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
6803 gimplify_and_add (t
, pre_p
);
6805 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
6806 fold_build2 (BIT_AND_EXPR
, TREE_TYPE (valist
),
6808 build_int_cst (TREE_TYPE (valist
), -boundary
)));
6809 gimplify_and_add (t
, pre_p
);
6814 /* If the actual alignment is less than the alignment of the type,
6815 adjust the type accordingly so that we don't assume strict alignment
6816 when dereferencing the pointer. */
6817 boundary
*= BITS_PER_UNIT
;
6818 if (boundary
< TYPE_ALIGN (type
))
6820 type
= build_variant_type_copy (type
);
6821 SET_TYPE_ALIGN (type
, boundary
);
6824 /* Compute the rounded size of the type. */
6825 type_size
= size_in_bytes (type
);
6826 rounded_size
= round_up (type_size
, align
);
6828 /* Reduce rounded_size so it's sharable with the postqueue. */
6829 gimplify_expr (&rounded_size
, pre_p
, post_p
, is_gimple_val
, fb_rvalue
);
6833 if (PAD_VARARGS_DOWN
&& !integer_zerop (rounded_size
))
6835 /* Small args are padded downward. */
6836 t
= fold_build2_loc (input_location
, GT_EXPR
, sizetype
,
6837 rounded_size
, size_int (align
));
6838 t
= fold_build3 (COND_EXPR
, sizetype
, t
, size_zero_node
,
6839 size_binop (MINUS_EXPR
, rounded_size
, type_size
));
6840 addr
= fold_build_pointer_plus (addr
, t
);
6843 /* Compute new value for AP. */
6844 t
= fold_build_pointer_plus (valist_tmp
, rounded_size
);
6845 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
6846 gimplify_and_add (t
, pre_p
);
6848 addr
= fold_convert (build_pointer_type (type
), addr
);
6851 addr
= build_va_arg_indirect_ref (addr
);
6853 return build_va_arg_indirect_ref (addr
);
6856 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
6859 mips_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6865 indirect_p
= pass_va_arg_by_reference (type
);
6867 type
= build_pointer_type (type
);
6869 if (!EABI_FLOAT_VARARGS_P
)
6870 addr
= mips_std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6873 tree f_ovfl
, f_gtop
, f_ftop
, f_goff
, f_foff
;
6874 tree ovfl
, top
, off
, align
;
6875 HOST_WIDE_INT size
, rsize
, osize
;
6878 f_ovfl
= TYPE_FIELDS (va_list_type_node
);
6879 f_gtop
= DECL_CHAIN (f_ovfl
);
6880 f_ftop
= DECL_CHAIN (f_gtop
);
6881 f_goff
= DECL_CHAIN (f_ftop
);
6882 f_foff
= DECL_CHAIN (f_goff
);
6886 TOP be the top of the GPR or FPR save area;
6887 OFF be the offset from TOP of the next register;
6888 ADDR_RTX be the address of the argument;
6889 SIZE be the number of bytes in the argument type;
6890 RSIZE be the number of bytes used to store the argument
6891 when it's in the register save area; and
6892 OSIZE be the number of bytes used to store it when it's
6893 in the stack overflow area.
6895 The code we want is:
6897 1: off &= -rsize; // round down
6900 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
6905 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
6906 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
6910 [1] and [9] can sometimes be optimized away. */
6912 ovfl
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovfl
), valist
, f_ovfl
,
6914 size
= int_size_in_bytes (type
);
6916 if (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
6917 && GET_MODE_SIZE (TYPE_MODE (type
)) <= UNITS_PER_FPVALUE
)
6919 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_ftop
),
6920 unshare_expr (valist
), f_ftop
, NULL_TREE
);
6921 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_foff
),
6922 unshare_expr (valist
), f_foff
, NULL_TREE
);
6924 /* When va_start saves FPR arguments to the stack, each slot
6925 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
6926 argument's precision. */
6927 rsize
= UNITS_PER_HWFPVALUE
;
6929 /* Overflow arguments are padded to UNITS_PER_WORD bytes
6930 (= PARM_BOUNDARY bits). This can be different from RSIZE
6933 (1) On 32-bit targets when TYPE is a structure such as:
6935 struct s { float f; };
6937 Such structures are passed in paired FPRs, so RSIZE
6938 will be 8 bytes. However, the structure only takes
6939 up 4 bytes of memory, so OSIZE will only be 4.
6941 (2) In combinations such as -mgp64 -msingle-float
6942 -fshort-double. Doubles passed in registers will then take
6943 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
6944 stack take up UNITS_PER_WORD bytes. */
6945 osize
= MAX (GET_MODE_SIZE (TYPE_MODE (type
)), UNITS_PER_WORD
);
6949 top
= build3 (COMPONENT_REF
, TREE_TYPE (f_gtop
),
6950 unshare_expr (valist
), f_gtop
, NULL_TREE
);
6951 off
= build3 (COMPONENT_REF
, TREE_TYPE (f_goff
),
6952 unshare_expr (valist
), f_goff
, NULL_TREE
);
6953 rsize
= ROUND_UP (size
, UNITS_PER_WORD
);
6954 if (rsize
> UNITS_PER_WORD
)
6956 /* [1] Emit code for: off &= -rsize. */
6957 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (off
), unshare_expr (off
),
6958 build_int_cst (TREE_TYPE (off
), -rsize
));
6959 gimplify_assign (unshare_expr (off
), t
, pre_p
);
6964 /* [2] Emit code to branch if off == 0. */
6965 t
= build2 (NE_EXPR
, boolean_type_node
, unshare_expr (off
),
6966 build_int_cst (TREE_TYPE (off
), 0));
6967 addr
= build3 (COND_EXPR
, ptr_type_node
, t
, NULL_TREE
, NULL_TREE
);
6969 /* [5] Emit code for: off -= rsize. We do this as a form of
6970 post-decrement not available to C. */
6971 t
= fold_convert (TREE_TYPE (off
), build_int_cst (NULL_TREE
, rsize
));
6972 t
= build2 (POSTDECREMENT_EXPR
, TREE_TYPE (off
), off
, t
);
6974 /* [4] Emit code for:
6975 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
6976 t
= fold_convert (sizetype
, t
);
6977 t
= fold_build1 (NEGATE_EXPR
, sizetype
, t
);
6978 t
= fold_build_pointer_plus (top
, t
);
6979 if (BYTES_BIG_ENDIAN
&& rsize
> size
)
6980 t
= fold_build_pointer_plus_hwi (t
, rsize
- size
);
6981 COND_EXPR_THEN (addr
) = t
;
6983 if (osize
> UNITS_PER_WORD
)
6985 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
6986 t
= fold_build_pointer_plus_hwi (unshare_expr (ovfl
), osize
- 1);
6987 u
= build_int_cst (TREE_TYPE (t
), -osize
);
6988 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6989 align
= build2 (MODIFY_EXPR
, TREE_TYPE (ovfl
),
6990 unshare_expr (ovfl
), t
);
6995 /* [10, 11] Emit code for:
6996 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
6998 u
= fold_convert (TREE_TYPE (ovfl
), build_int_cst (NULL_TREE
, osize
));
6999 t
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (ovfl
), ovfl
, u
);
7000 if (BYTES_BIG_ENDIAN
&& osize
> size
)
7001 t
= fold_build_pointer_plus_hwi (t
, osize
- size
);
7003 /* String [9] and [10, 11] together. */
7005 t
= build2 (COMPOUND_EXPR
, TREE_TYPE (t
), align
, t
);
7006 COND_EXPR_ELSE (addr
) = t
;
7008 addr
= fold_convert (build_pointer_type (type
), addr
);
7009 addr
= build_va_arg_indirect_ref (addr
);
7013 addr
= build_va_arg_indirect_ref (addr
);
7018 /* Declare a unique, locally-binding function called NAME, then start
7022 mips_start_unique_function (const char *name
)
7026 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
7027 get_identifier (name
),
7028 build_function_type_list (void_type_node
, NULL_TREE
));
7029 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
7030 NULL_TREE
, void_type_node
);
7031 TREE_PUBLIC (decl
) = 1;
7032 TREE_STATIC (decl
) = 1;
7034 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
7036 targetm
.asm_out
.unique_section (decl
, 0);
7037 switch_to_section (get_named_section (decl
, NULL
, 0));
7039 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
7040 fputs ("\t.hidden\t", asm_out_file
);
7041 assemble_name (asm_out_file
, name
);
7042 putc ('\n', asm_out_file
);
7045 /* Start a definition of function NAME. MIPS16_P indicates whether the
7046 function contains MIPS16 code. */
7049 mips_start_function_definition (const char *name
, bool mips16_p
)
7052 fprintf (asm_out_file
, "\t.set\tmips16\n");
7054 fprintf (asm_out_file
, "\t.set\tnomips16\n");
7056 if (TARGET_MICROMIPS
)
7057 fprintf (asm_out_file
, "\t.set\tmicromips\n");
7058 #ifdef HAVE_GAS_MICROMIPS
7060 fprintf (asm_out_file
, "\t.set\tnomicromips\n");
7063 if (!flag_inhibit_size_directive
)
7065 fputs ("\t.ent\t", asm_out_file
);
7066 assemble_name (asm_out_file
, name
);
7067 fputs ("\n", asm_out_file
);
7070 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, name
, "function");
7072 /* Start the definition proper. */
7073 assemble_name (asm_out_file
, name
);
7074 fputs (":\n", asm_out_file
);
7077 /* End a function definition started by mips_start_function_definition. */
7080 mips_end_function_definition (const char *name
)
7082 if (!flag_inhibit_size_directive
)
7084 fputs ("\t.end\t", asm_out_file
);
7085 assemble_name (asm_out_file
, name
);
7086 fputs ("\n", asm_out_file
);
7090 /* If *STUB_PTR points to a stub, output a comdat-style definition for it,
7091 then free *STUB_PTR. */
7094 mips_finish_stub (mips_one_only_stub
**stub_ptr
)
7096 mips_one_only_stub
*stub
= *stub_ptr
;
7100 const char *name
= stub
->get_name ();
7101 mips_start_unique_function (name
);
7102 mips_start_function_definition (name
, false);
7103 stub
->output_body ();
7104 mips_end_function_definition (name
);
7109 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
7112 mips_ok_for_lazy_binding_p (rtx x
)
7114 return (TARGET_USE_GOT
7115 && GET_CODE (x
) == SYMBOL_REF
7116 && !SYMBOL_REF_BIND_NOW_P (x
)
7117 && !mips_symbol_binds_local_p (x
));
7120 /* Load function address ADDR into register DEST. TYPE is as for
7121 mips_expand_call. Return true if we used an explicit lazy-binding
7125 mips_load_call_address (enum mips_call_type type
, rtx dest
, rtx addr
)
7127 /* If we're generating PIC, and this call is to a global function,
7128 try to allow its address to be resolved lazily. This isn't
7129 possible for sibcalls when $gp is call-saved because the value
7130 of $gp on entry to the stub would be our caller's gp, not ours. */
7131 if (TARGET_EXPLICIT_RELOCS
7132 && !(type
== MIPS_CALL_SIBCALL
&& TARGET_CALL_SAVED_GP
)
7133 && mips_ok_for_lazy_binding_p (addr
))
7135 addr
= mips_got_load (dest
, addr
, SYMBOL_GOTOFF_CALL
);
7136 emit_insn (gen_rtx_SET (dest
, addr
));
7141 mips_emit_move (dest
, addr
);
7146 /* Each locally-defined hard-float MIPS16 function has a local symbol
7147 associated with it. This hash table maps the function symbol (FUNC)
7148 to the local symbol (LOCAL). */
7149 static GTY (()) hash_map
<nofree_string_hash
, rtx
> *mips16_local_aliases
;
7151 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
7152 Return a local alias for it, creating a new one if necessary. */
7155 mips16_local_alias (rtx func
)
7157 /* Create the hash table if this is the first call. */
7158 if (mips16_local_aliases
== NULL
)
7159 mips16_local_aliases
= hash_map
<nofree_string_hash
, rtx
>::create_ggc (37);
7161 /* Look up the function symbol, creating a new entry if need be. */
7163 const char *func_name
= XSTR (func
, 0);
7164 rtx
*slot
= &mips16_local_aliases
->get_or_insert (func_name
, &existed
);
7165 gcc_assert (slot
!= NULL
);
7171 /* Create a new SYMBOL_REF for the local symbol. The choice of
7172 __fn_local_* is based on the __fn_stub_* names that we've
7173 traditionally used for the non-MIPS16 stub. */
7174 func_name
= targetm
.strip_name_encoding (XSTR (func
, 0));
7175 const char *local_name
= ACONCAT (("__fn_local_", func_name
, NULL
));
7176 local
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (local_name
));
7177 SYMBOL_REF_FLAGS (local
) = SYMBOL_REF_FLAGS (func
) | SYMBOL_FLAG_LOCAL
;
7179 /* Create a new structure to represent the mapping. */
7185 /* A chained list of functions for which mips16_build_call_stub has already
7186 generated a stub. NAME is the name of the function and FP_RET_P is true
7187 if the function returns a value in floating-point registers. */
7188 struct mips16_stub
{
7189 struct mips16_stub
*next
;
7193 static struct mips16_stub
*mips16_stubs
;
7195 /* Return the two-character string that identifies floating-point
7196 return mode MODE in the name of a MIPS16 function stub. */
7199 mips16_call_stub_mode_suffix (machine_mode mode
)
7203 else if (mode
== DFmode
)
7205 else if (mode
== SCmode
)
7207 else if (mode
== DCmode
)
7209 else if (mode
== V2SFmode
)
7211 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
);
7218 /* Write instructions to move a 32-bit value between general register
7219 GPREG and floating-point register FPREG. DIRECTION is 't' to move
7220 from GPREG to FPREG and 'f' to move in the opposite direction. */
7223 mips_output_32bit_xfer (char direction
, unsigned int gpreg
, unsigned int fpreg
)
7225 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
7226 reg_names
[gpreg
], reg_names
[fpreg
]);
7229 /* Likewise for 64-bit values. */
7232 mips_output_64bit_xfer (char direction
, unsigned int gpreg
, unsigned int fpreg
)
7235 fprintf (asm_out_file
, "\tdm%cc1\t%s,%s\n", direction
,
7236 reg_names
[gpreg
], reg_names
[fpreg
]);
7237 else if (ISA_HAS_MXHC1
)
7239 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
7240 reg_names
[gpreg
+ TARGET_BIG_ENDIAN
], reg_names
[fpreg
]);
7241 fprintf (asm_out_file
, "\tm%chc1\t%s,%s\n", direction
,
7242 reg_names
[gpreg
+ TARGET_LITTLE_ENDIAN
], reg_names
[fpreg
]);
7244 else if (TARGET_FLOATXX
&& direction
== 't')
7246 /* Use the argument save area to move via memory. */
7247 fprintf (asm_out_file
, "\tsw\t%s,0($sp)\n", reg_names
[gpreg
]);
7248 fprintf (asm_out_file
, "\tsw\t%s,4($sp)\n", reg_names
[gpreg
+ 1]);
7249 fprintf (asm_out_file
, "\tldc1\t%s,0($sp)\n", reg_names
[fpreg
]);
7251 else if (TARGET_FLOATXX
&& direction
== 'f')
7253 /* Use the argument save area to move via memory. */
7254 fprintf (asm_out_file
, "\tsdc1\t%s,0($sp)\n", reg_names
[fpreg
]);
7255 fprintf (asm_out_file
, "\tlw\t%s,0($sp)\n", reg_names
[gpreg
]);
7256 fprintf (asm_out_file
, "\tlw\t%s,4($sp)\n", reg_names
[gpreg
+ 1]);
7260 /* Move the least-significant word. */
7261 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
7262 reg_names
[gpreg
+ TARGET_BIG_ENDIAN
], reg_names
[fpreg
]);
7263 /* ...then the most significant word. */
7264 fprintf (asm_out_file
, "\tm%cc1\t%s,%s\n", direction
,
7265 reg_names
[gpreg
+ TARGET_LITTLE_ENDIAN
], reg_names
[fpreg
+ 1]);
7269 /* Write out code to move floating-point arguments into or out of
7270 general registers. FP_CODE is the code describing which arguments
7271 are present (see the comment above the definition of CUMULATIVE_ARGS
7272 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
7275 mips_output_args_xfer (int fp_code
, char direction
)
7277 unsigned int gparg
, fparg
, f
;
7278 CUMULATIVE_ARGS cum
;
7280 /* This code only works for o32 and o64. */
7281 gcc_assert (TARGET_OLDABI
);
7283 mips_init_cumulative_args (&cum
, NULL
);
7285 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
7288 struct mips_arg_info info
;
7292 else if ((f
& 3) == 2)
7297 mips_get_arg_info (&info
, &cum
, mode
, NULL
, true);
7298 gparg
= mips_arg_regno (&info
, false);
7299 fparg
= mips_arg_regno (&info
, true);
7302 mips_output_32bit_xfer (direction
, gparg
, fparg
);
7304 mips_output_64bit_xfer (direction
, gparg
, fparg
);
7306 function_arg_info
arg (mode
, /*named=*/true);
7307 mips_function_arg_advance (pack_cumulative_args (&cum
), arg
);
7311 /* Write a MIPS16 stub for the current function. This stub is used
7312 for functions which take arguments in the floating-point registers.
7313 It is normal-mode code that moves the floating-point arguments
7314 into the general registers and then jumps to the MIPS16 code. */
7317 mips16_build_function_stub (void)
7319 const char *fnname
, *alias_name
, *separator
;
7320 char *secname
, *stubname
;
7325 /* Create the name of the stub, and its unique section. */
7326 symbol
= XEXP (DECL_RTL (current_function_decl
), 0);
7327 alias
= mips16_local_alias (symbol
);
7329 fnname
= targetm
.strip_name_encoding (XSTR (symbol
, 0));
7330 alias_name
= targetm
.strip_name_encoding (XSTR (alias
, 0));
7331 secname
= ACONCAT ((".mips16.fn.", fnname
, NULL
));
7332 stubname
= ACONCAT (("__fn_stub_", fnname
, NULL
));
7334 /* Build a decl for the stub. */
7335 stubdecl
= build_decl (BUILTINS_LOCATION
,
7336 FUNCTION_DECL
, get_identifier (stubname
),
7337 build_function_type_list (void_type_node
, NULL_TREE
));
7338 set_decl_section_name (stubdecl
, secname
);
7339 DECL_RESULT (stubdecl
) = build_decl (BUILTINS_LOCATION
,
7340 RESULT_DECL
, NULL_TREE
, void_type_node
);
7342 /* Output a comment. */
7343 fprintf (asm_out_file
, "\t# Stub function for %s (",
7344 current_function_name ());
7346 for (f
= (unsigned int) crtl
->args
.info
.fp_code
; f
!= 0; f
>>= 2)
7348 fprintf (asm_out_file
, "%s%s", separator
,
7349 (f
& 3) == 1 ? "float" : "double");
7352 fprintf (asm_out_file
, ")\n");
7354 /* Start the function definition. */
7355 assemble_start_function (stubdecl
, stubname
);
7356 mips_start_function_definition (stubname
, false);
7358 /* If generating pic2 code, either set up the global pointer or
7360 if (TARGET_ABICALLS_PIC2
)
7362 if (TARGET_ABSOLUTE_ABICALLS
)
7363 fprintf (asm_out_file
, "\t.option\tpic0\n");
7366 output_asm_insn ("%(.cpload\t%^%)", NULL
);
7367 /* Emit an R_MIPS_NONE relocation to tell the linker what the
7368 target function is. Use a local GOT access when loading the
7369 symbol, to cut down on the number of unnecessary GOT entries
7370 for stubs that aren't needed. */
7371 output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol
);
7376 /* Load the address of the MIPS16 function into $25. Do this first so
7377 that targets with coprocessor interlocks can use an MFC1 to fill the
7379 output_asm_insn ("la\t%^,%0", &symbol
);
7381 /* Move the arguments from floating-point registers to general registers. */
7382 mips_output_args_xfer (crtl
->args
.info
.fp_code
, 'f');
7384 /* Jump to the MIPS16 function. */
7385 output_asm_insn ("jr\t%^", NULL
);
7387 if (TARGET_ABICALLS_PIC2
&& TARGET_ABSOLUTE_ABICALLS
)
7388 fprintf (asm_out_file
, "\t.option\tpic2\n");
7390 mips_end_function_definition (stubname
);
7392 /* If the linker needs to create a dynamic symbol for the target
7393 function, it will associate the symbol with the stub (which,
7394 unlike the target function, follows the proper calling conventions).
7395 It is therefore useful to have a local alias for the target function,
7396 so that it can still be identified as MIPS16 code. As an optimization,
7397 this symbol can also be used for indirect MIPS16 references from
7398 within this file. */
7399 ASM_OUTPUT_DEF (asm_out_file
, alias_name
, fnname
);
7401 switch_to_section (function_section (current_function_decl
));
7404 /* The current function is a MIPS16 function that returns a value in an FPR.
7405 Copy the return value from its soft-float to its hard-float location.
7406 libgcc2 has special non-MIPS16 helper functions for each case. */
7409 mips16_copy_fpr_return_value (void)
7411 rtx fn
, insn
, retval
;
7413 machine_mode return_mode
;
7416 return_type
= DECL_RESULT (current_function_decl
);
7417 return_mode
= DECL_MODE (return_type
);
7419 name
= ACONCAT (("__mips16_ret_",
7420 mips16_call_stub_mode_suffix (return_mode
),
7422 fn
= mips16_stub_function (name
);
7424 /* The function takes arguments in $2 (and possibly $3), so calls
7425 to it cannot be lazily bound. */
7426 SYMBOL_REF_FLAGS (fn
) |= SYMBOL_FLAG_BIND_NOW
;
7428 /* Model the call as something that takes the GPR return value as
7429 argument and returns an "updated" value. */
7430 retval
= gen_rtx_REG (return_mode
, GP_RETURN
);
7431 insn
= mips_expand_call (MIPS_CALL_EPILOGUE
, retval
, fn
,
7432 const0_rtx
, NULL_RTX
, false);
7433 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), retval
);
7436 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
7437 RETVAL is the location of the return value, or null if this is
7438 a "call" rather than a "call_value". ARGS_SIZE is the size of the
7439 arguments and FP_CODE is the code built by mips_function_arg;
7440 see the comment before the fp_code field in CUMULATIVE_ARGS for details.
7442 There are three alternatives:
7444 - If a stub was needed, emit the call and return the call insn itself.
7446 - If we can avoid using a stub by redirecting the call, set *FN_PTR
7447 to the new target and return null.
7449 - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
7452 A stub is needed for calls to functions that, in normal mode,
7453 receive arguments in FPRs or return values in FPRs. The stub
7454 copies the arguments from their soft-float positions to their
7455 hard-float positions, calls the real function, then copies the
7456 return value from its hard-float position to its soft-float
7459 We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
7460 If *FN_PTR turns out to be to a non-MIPS16 function, the linker
7461 automatically redirects the JAL to the stub, otherwise the JAL
7462 continues to call FN directly. */
7465 mips16_build_call_stub (rtx retval
, rtx
*fn_ptr
, rtx args_size
, int fp_code
)
7469 struct mips16_stub
*l
;
7473 /* We don't need to do anything if we aren't in MIPS16 mode, or if
7474 we were invoked with the -msoft-float option. */
7475 if (!TARGET_MIPS16
|| TARGET_SOFT_FLOAT_ABI
)
7478 /* Figure out whether the value might come back in a floating-point
7480 fp_ret_p
= retval
&& mips_return_mode_in_fpr_p (GET_MODE (retval
));
7482 /* We don't need to do anything if there were no floating-point
7483 arguments and the value will not be returned in a floating-point
7485 if (fp_code
== 0 && !fp_ret_p
)
7488 /* We don't need to do anything if this is a call to a special
7489 MIPS16 support function. */
7491 if (mips16_stub_function_p (fn
))
7494 /* If we're calling a locally-defined MIPS16 function, we know that
7495 it will return values in both the "soft-float" and "hard-float"
7496 registers. There is no need to use a stub to move the latter
7498 if (fp_code
== 0 && mips16_local_function_p (fn
))
7501 /* This code will only work for o32 and o64 abis. The other ABI's
7502 require more sophisticated support. */
7503 gcc_assert (TARGET_OLDABI
);
7505 /* If we're calling via a function pointer, use one of the magic
7506 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
7507 Each stub expects the function address to arrive in register $2. */
7508 if (GET_CODE (fn
) != SYMBOL_REF
7509 || !call_insn_operand (fn
, VOIDmode
))
7516 /* If this is a locally-defined and locally-binding function,
7517 avoid the stub by calling the local alias directly. */
7518 if (mips16_local_function_p (fn
))
7520 *fn_ptr
= mips16_local_alias (fn
);
7524 /* Create a SYMBOL_REF for the libgcc.a function. */
7526 sprintf (buf
, "__mips16_call_stub_%s_%d",
7527 mips16_call_stub_mode_suffix (GET_MODE (retval
)),
7530 sprintf (buf
, "__mips16_call_stub_%d", fp_code
);
7531 stub_fn
= mips16_stub_function (buf
);
7533 /* The function uses $2 as an argument, so calls to it
7534 cannot be lazily bound. */
7535 SYMBOL_REF_FLAGS (stub_fn
) |= SYMBOL_FLAG_BIND_NOW
;
7537 /* Load the target function into $2. */
7538 addr
= gen_rtx_REG (Pmode
, GP_REG_FIRST
+ 2);
7539 lazy_p
= mips_load_call_address (MIPS_CALL_NORMAL
, addr
, fn
);
7541 /* Emit the call. */
7542 insn
= mips_expand_call (MIPS_CALL_NORMAL
, retval
, stub_fn
,
7543 args_size
, NULL_RTX
, lazy_p
);
7545 /* Tell GCC that this call does indeed use the value of $2. */
7546 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), addr
);
7548 /* If we are handling a floating-point return value, we need to
7549 save $18 in the function prologue. Putting a note on the
7550 call will mean that df_regs_ever_live_p ($18) will be true if the
7551 call is not eliminated, and we can check that in the prologue
7554 CALL_INSN_FUNCTION_USAGE (insn
) =
7555 gen_rtx_EXPR_LIST (VOIDmode
,
7556 gen_rtx_CLOBBER (VOIDmode
,
7557 gen_rtx_REG (word_mode
, 18)),
7558 CALL_INSN_FUNCTION_USAGE (insn
));
7563 /* We know the function we are going to call. If we have already
7564 built a stub, we don't need to do anything further. */
7565 fnname
= targetm
.strip_name_encoding (XSTR (fn
, 0));
7566 for (l
= mips16_stubs
; l
!= NULL
; l
= l
->next
)
7567 if (strcmp (l
->name
, fnname
) == 0)
7572 const char *separator
;
7573 char *secname
, *stubname
;
7574 tree stubid
, stubdecl
;
7577 /* If the function does not return in FPRs, the special stub
7581 If the function does return in FPRs, the stub section is named
7582 .mips16.call.fp.FNNAME
7584 Build a decl for the stub. */
7585 secname
= ACONCAT ((".mips16.call.", fp_ret_p
? "fp." : "",
7587 stubname
= ACONCAT (("__call_stub_", fp_ret_p
? "fp_" : "",
7589 stubid
= get_identifier (stubname
);
7590 stubdecl
= build_decl (BUILTINS_LOCATION
,
7591 FUNCTION_DECL
, stubid
,
7592 build_function_type_list (void_type_node
,
7594 set_decl_section_name (stubdecl
, secname
);
7595 DECL_RESULT (stubdecl
) = build_decl (BUILTINS_LOCATION
,
7596 RESULT_DECL
, NULL_TREE
,
7599 /* Output a comment. */
7600 fprintf (asm_out_file
, "\t# Stub function to call %s%s (",
7602 ? (GET_MODE (retval
) == SFmode
? "float " : "double ")
7606 for (f
= (unsigned int) fp_code
; f
!= 0; f
>>= 2)
7608 fprintf (asm_out_file
, "%s%s", separator
,
7609 (f
& 3) == 1 ? "float" : "double");
7612 fprintf (asm_out_file
, ")\n");
7614 /* Start the function definition. */
7615 assemble_start_function (stubdecl
, stubname
);
7616 mips_start_function_definition (stubname
, false);
7620 fprintf (asm_out_file
, "\t.cfi_startproc\n");
7622 /* Create a fake CFA 4 bytes below the stack pointer.
7623 This works around unwinders (like libgcc's) that expect
7624 the CFA for non-signal frames to be unique. */
7625 fprintf (asm_out_file
, "\t.cfi_def_cfa 29,-4\n");
7627 /* "Save" $sp in itself so we don't use the fake CFA.
7628 This is: DW_CFA_val_expression r29, { DW_OP_reg29 }. */
7629 fprintf (asm_out_file
, "\t.cfi_escape 0x16,29,1,0x6d\n");
7631 /* Save the return address in $18. The stub's caller knows
7632 that $18 might be clobbered, even though $18 is usually
7633 a call-saved register.
7635 Do it early on in case the last move to a floating-point
7636 register can be scheduled into the delay slot of the
7637 call we are about to make. */
7638 fprintf (asm_out_file
, "\tmove\t%s,%s\n",
7639 reg_names
[GP_REG_FIRST
+ 18],
7640 reg_names
[RETURN_ADDR_REGNUM
]);
7644 /* Load the address of the MIPS16 function into $25. Do this
7645 first so that targets with coprocessor interlocks can use
7646 an MFC1 to fill the delay slot. */
7647 if (TARGET_EXPLICIT_RELOCS
)
7649 output_asm_insn ("lui\t%^,%%hi(%0)", &fn
);
7650 output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn
);
7653 output_asm_insn ("la\t%^,%0", &fn
);
7656 /* Move the arguments from general registers to floating-point
7658 mips_output_args_xfer (fp_code
, 't');
7662 /* Now call the non-MIPS16 function. */
7663 output_asm_insn (mips_output_jump (&fn
, 0, -1, true), &fn
);
7664 fprintf (asm_out_file
, "\t.cfi_register 31,18\n");
7666 /* Move the result from floating-point registers to
7667 general registers. */
7668 switch (GET_MODE (retval
))
7671 mips_output_32bit_xfer ('f', GP_RETURN
+ TARGET_BIG_ENDIAN
,
7675 mips_output_32bit_xfer ('f', GP_RETURN
+ TARGET_LITTLE_ENDIAN
,
7676 TARGET_LITTLE_ENDIAN
7679 if (GET_MODE (retval
) == SCmode
&& TARGET_64BIT
)
7681 /* On 64-bit targets, complex floats are returned in
7682 a single GPR, such that "sd" on a suitably-aligned
7683 target would store the value correctly. */
7684 fprintf (asm_out_file
, "\tdsll\t%s,%s,32\n",
7685 reg_names
[GP_RETURN
+ TARGET_BIG_ENDIAN
],
7686 reg_names
[GP_RETURN
+ TARGET_BIG_ENDIAN
]);
7687 fprintf (asm_out_file
, "\tdsll\t%s,%s,32\n",
7688 reg_names
[GP_RETURN
+ TARGET_LITTLE_ENDIAN
],
7689 reg_names
[GP_RETURN
+ TARGET_LITTLE_ENDIAN
]);
7690 fprintf (asm_out_file
, "\tdsrl\t%s,%s,32\n",
7691 reg_names
[GP_RETURN
+ TARGET_BIG_ENDIAN
],
7692 reg_names
[GP_RETURN
+ TARGET_BIG_ENDIAN
]);
7693 fprintf (asm_out_file
, "\tor\t%s,%s,%s\n",
7694 reg_names
[GP_RETURN
],
7695 reg_names
[GP_RETURN
],
7696 reg_names
[GP_RETURN
+ 1]);
7701 mips_output_32bit_xfer ('f', GP_RETURN
, FP_REG_FIRST
);
7705 mips_output_64bit_xfer ('f', GP_RETURN
+ (8 / UNITS_PER_WORD
),
7710 gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
7711 || GET_MODE (retval
) != V2SFmode
);
7712 mips_output_64bit_xfer ('f', GP_RETURN
, FP_REG_FIRST
);
7718 fprintf (asm_out_file
, "\tjr\t%s\n", reg_names
[GP_REG_FIRST
+ 18]);
7719 fprintf (asm_out_file
, "\t.cfi_endproc\n");
7723 /* Jump to the previously-loaded address. */
7724 output_asm_insn ("jr\t%^", NULL
);
7727 #ifdef ASM_DECLARE_FUNCTION_SIZE
7728 ASM_DECLARE_FUNCTION_SIZE (asm_out_file
, stubname
, stubdecl
);
7731 mips_end_function_definition (stubname
);
7733 /* Record this stub. */
7734 l
= XNEW (struct mips16_stub
);
7735 l
->name
= xstrdup (fnname
);
7736 l
->fp_ret_p
= fp_ret_p
;
7737 l
->next
= mips16_stubs
;
7741 /* If we expect a floating-point return value, but we've built a
7742 stub which does not expect one, then we're in trouble. We can't
7743 use the existing stub, because it won't handle the floating-point
7744 value. We can't build a new stub, because the linker won't know
7745 which stub to use for the various calls in this object file.
7746 Fortunately, this case is illegal, since it means that a function
7747 was declared in two different ways in a single compilation. */
7748 if (fp_ret_p
&& !l
->fp_ret_p
)
7749 error ("cannot handle inconsistent calls to %qs", fnname
);
7751 if (retval
== NULL_RTX
)
7752 pattern
= gen_call_internal_direct (fn
, args_size
);
7754 pattern
= gen_call_value_internal_direct (retval
, fn
, args_size
);
7755 insn
= mips_emit_call_insn (pattern
, fn
, fn
, false);
7757 /* If we are calling a stub which handles a floating-point return
7758 value, we need to arrange to save $18 in the prologue. We do this
7759 by marking the function call as using the register. The prologue
7760 will later see that it is used, and emit code to save it. */
7762 CALL_INSN_FUNCTION_USAGE (insn
) =
7763 gen_rtx_EXPR_LIST (VOIDmode
,
7764 gen_rtx_CLOBBER (VOIDmode
,
7765 gen_rtx_REG (word_mode
, 18)),
7766 CALL_INSN_FUNCTION_USAGE (insn
));
7771 /* Expand a call of type TYPE. RESULT is where the result will go (null
7772 for "call"s and "sibcall"s), ADDR is the address of the function,
7773 ARGS_SIZE is the size of the arguments and AUX is the value passed
7774 to us by mips_function_arg. LAZY_P is true if this call already
7775 involves a lazily-bound function address (such as when calling
7776 functions through a MIPS16 hard-float stub).
7778 Return the call itself. */
7781 mips_expand_call (enum mips_call_type type
, rtx result
, rtx addr
,
7782 rtx args_size
, rtx aux
, bool lazy_p
)
7784 rtx orig_addr
, pattern
;
7788 fp_code
= aux
== 0 ? 0 : (int) GET_MODE (aux
);
7789 insn
= mips16_build_call_stub (result
, &addr
, args_size
, fp_code
);
7792 gcc_assert (!lazy_p
&& type
== MIPS_CALL_NORMAL
);
7797 if (!call_insn_operand (addr
, VOIDmode
))
7799 if (type
== MIPS_CALL_EPILOGUE
)
7800 addr
= MIPS_EPILOGUE_TEMP (Pmode
);
7802 addr
= gen_reg_rtx (Pmode
);
7803 lazy_p
|= mips_load_call_address (type
, addr
, orig_addr
);
7808 rtx (*fn
) (rtx
, rtx
);
7810 if (type
== MIPS_CALL_SIBCALL
)
7811 fn
= gen_sibcall_internal
;
7813 fn
= gen_call_internal
;
7815 pattern
= fn (addr
, args_size
);
7817 else if (GET_CODE (result
) == PARALLEL
&& XVECLEN (result
, 0) == 2)
7819 /* Handle return values created by mips_return_fpr_pair. */
7820 rtx (*fn
) (rtx
, rtx
, rtx
, rtx
);
7823 if (type
== MIPS_CALL_SIBCALL
)
7824 fn
= gen_sibcall_value_multiple_internal
;
7826 fn
= gen_call_value_multiple_internal
;
7828 reg1
= XEXP (XVECEXP (result
, 0, 0), 0);
7829 reg2
= XEXP (XVECEXP (result
, 0, 1), 0);
7830 pattern
= fn (reg1
, addr
, args_size
, reg2
);
7834 rtx (*fn
) (rtx
, rtx
, rtx
);
7836 if (type
== MIPS_CALL_SIBCALL
)
7837 fn
= gen_sibcall_value_internal
;
7839 fn
= gen_call_value_internal
;
7841 /* Handle return values created by mips_return_fpr_single. */
7842 if (GET_CODE (result
) == PARALLEL
&& XVECLEN (result
, 0) == 1)
7843 result
= XEXP (XVECEXP (result
, 0, 0), 0);
7844 pattern
= fn (result
, addr
, args_size
);
7847 return mips_emit_call_insn (pattern
, orig_addr
, addr
, lazy_p
);
7850 /* Split call instruction INSN into a $gp-clobbering call and
7851 (where necessary) an instruction to restore $gp from its save slot.
7852 CALL_PATTERN is the pattern of the new call. */
7855 mips_split_call (rtx insn
, rtx call_pattern
)
7857 emit_call_insn (call_pattern
);
7858 if (!find_reg_note (insn
, REG_NORETURN
, 0))
7859 mips_restore_gp_from_cprestore_slot (gen_rtx_REG (Pmode
,
7860 POST_CALL_TMP_REG
));
7863 /* Return true if a call to DECL may need to use JALX. */
7866 mips_call_may_need_jalx_p (tree decl
)
7868 /* If the current translation unit would use a different mode for DECL,
7869 assume that the call needs JALX. */
7870 if (mips_get_compress_mode (decl
) != TARGET_COMPRESSION
)
7873 /* mips_get_compress_mode is always accurate for locally-binding
7874 functions in the current translation unit. */
7875 if (!DECL_EXTERNAL (decl
) && targetm
.binds_local_p (decl
))
7878 /* When -minterlink-compressed is in effect, assume that functions
7879 could use a different encoding mode unless an attribute explicitly
7880 tells us otherwise. */
7881 if (TARGET_INTERLINK_COMPRESSED
)
7883 if (!TARGET_COMPRESSION
7884 && mips_get_compress_off_flags (DECL_ATTRIBUTES (decl
)) ==0)
7886 if (TARGET_COMPRESSION
7887 && mips_get_compress_on_flags (DECL_ATTRIBUTES (decl
)) == 0)
7894 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
7897 mips_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
7899 if (!TARGET_SIBCALLS
)
7902 /* Interrupt handlers need special epilogue code and therefore can't
7904 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl
)))
7907 /* Direct Js are only possible to functions that use the same ISA encoding.
7908 There is no JX counterpoart of JALX. */
7910 && const_call_insn_operand (XEXP (DECL_RTL (decl
), 0), VOIDmode
)
7911 && mips_call_may_need_jalx_p (decl
))
7914 /* Sibling calls should not prevent lazy binding. Lazy-binding stubs
7915 require $gp to be valid on entry, so sibcalls can only use stubs
7916 if $gp is call-clobbered. */
7918 && TARGET_CALL_SAVED_GP
7919 && !TARGET_ABICALLS_PIC0
7920 && !targetm
.binds_local_p (decl
))
7927 /* Implement TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */
7930 mips_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size
,
7932 enum by_pieces_operation op
,
7935 if (op
== STORE_BY_PIECES
)
7936 return mips_store_by_pieces_p (size
, align
);
7937 if (op
== MOVE_BY_PIECES
&& HAVE_cpymemsi
)
7939 /* cpymemsi is meant to generate code that is at least as good as
7940 move_by_pieces. However, cpymemsi effectively uses a by-pieces
7941 implementation both for moves smaller than a word and for
7942 word-aligned moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT
7943 bytes. We should allow the tree-level optimisers to do such
7944 moves by pieces, as it often exposes other optimization
7945 opportunities. We might as well continue to use cpymemsi at
7946 the rtl level though, as it produces better code when
7947 scheduling is disabled (such as at -O). */
7948 if (currently_expanding_to_rtl
)
7950 if (align
< BITS_PER_WORD
)
7951 return size
< UNITS_PER_WORD
;
7952 return size
<= MIPS_MAX_MOVE_BYTES_STRAIGHT
;
7955 return default_use_by_pieces_infrastructure_p (size
, align
, op
, speed_p
);
7958 /* Implement a handler for STORE_BY_PIECES operations
7959 for TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */
7962 mips_store_by_pieces_p (unsigned HOST_WIDE_INT size
, unsigned int align
)
7964 /* Storing by pieces involves moving constants into registers
7965 of size MIN (ALIGN, BITS_PER_WORD), then storing them.
7966 We need to decide whether it is cheaper to load the address of
7967 constant data into a register and use a block move instead. */
7969 /* If the data is only byte aligned, then:
7971 (a1) A block move of less than 4 bytes would involve three 3 LBs and
7972 3 SBs. We might as well use 3 single-instruction LIs and 3 SBs
7975 (a2) A block move of 4 bytes from aligned source data can use an
7976 LW/SWL/SWR sequence. This is often better than the 4 LIs and
7977 4 SBs that we would generate when storing by pieces. */
7978 if (align
<= BITS_PER_UNIT
)
7981 /* If the data is 2-byte aligned, then:
7983 (b1) A block move of less than 4 bytes would use a combination of LBs,
7984 LHs, SBs and SHs. We get better code by using single-instruction
7985 LIs, SBs and SHs instead.
7987 (b2) A block move of 4 bytes from aligned source data would again use
7988 an LW/SWL/SWR sequence. In most cases, loading the address of
7989 the source data would require at least one extra instruction.
7990 It is often more efficient to use 2 single-instruction LIs and
7993 (b3) A block move of up to 3 additional bytes would be like (b1).
7995 (b4) A block move of 8 bytes from aligned source data can use two
7996 LW/SWL/SWR sequences or a single LD/SDL/SDR sequence. Both
7997 sequences are better than the 4 LIs and 4 SHs that we'd generate
7998 when storing by pieces.
8000 The reasoning for higher alignments is similar:
8002 (c1) A block move of less than 4 bytes would be the same as (b1).
8004 (c2) A block move of 4 bytes would use an LW/SW sequence. Again,
8005 loading the address of the source data would typically require
8006 at least one extra instruction. It is generally better to use
8009 (c3) A block move of up to 3 additional bytes would be like (b1).
8011 (c4) A block move of 8 bytes can use two LW/SW sequences or a single
8012 LD/SD sequence, and in these cases we've traditionally preferred
8013 the memory copy over the more bulky constant moves. */
8017 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
8018 Assume that the areas do not overlap. */
8021 mips_block_move_straight (rtx dest
, rtx src
, HOST_WIDE_INT length
)
8023 HOST_WIDE_INT offset
, delta
;
8024 unsigned HOST_WIDE_INT bits
;
8029 /* Work out how many bits to move at a time. If both operands have
8030 half-word alignment, it is usually better to move in half words.
8031 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
8032 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
8033 Otherwise move word-sized chunks.
8035 For ISA_HAS_LWL_LWR we rely on the lwl/lwr & swl/swr load. Otherwise
8036 picking the minimum of alignment or BITS_PER_WORD gets us the
8037 desired size for bits. */
8039 if (!ISA_HAS_LWL_LWR
)
8040 bits
= MIN (BITS_PER_WORD
, MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)));
8043 if (MEM_ALIGN (src
) == BITS_PER_WORD
/ 2
8044 && MEM_ALIGN (dest
) == BITS_PER_WORD
/ 2)
8045 bits
= BITS_PER_WORD
/ 2;
8047 bits
= BITS_PER_WORD
;
8050 mode
= int_mode_for_size (bits
, 0).require ();
8051 delta
= bits
/ BITS_PER_UNIT
;
8053 /* Allocate a buffer for the temporary registers. */
8054 regs
= XALLOCAVEC (rtx
, length
/ delta
);
8056 /* Load as many BITS-sized chunks as possible. Use a normal load if
8057 the source has enough alignment, otherwise use left/right pairs. */
8058 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
8060 regs
[i
] = gen_reg_rtx (mode
);
8061 if (MEM_ALIGN (src
) >= bits
)
8062 mips_emit_move (regs
[i
], adjust_address (src
, mode
, offset
));
8065 rtx part
= adjust_address (src
, BLKmode
, offset
);
8066 set_mem_size (part
, delta
);
8067 if (!mips_expand_ext_as_unaligned_load (regs
[i
], part
, bits
, 0, 0))
8072 /* Copy the chunks to the destination. */
8073 for (offset
= 0, i
= 0; offset
+ delta
<= length
; offset
+= delta
, i
++)
8074 if (MEM_ALIGN (dest
) >= bits
)
8075 mips_emit_move (adjust_address (dest
, mode
, offset
), regs
[i
]);
8078 rtx part
= adjust_address (dest
, BLKmode
, offset
);
8079 set_mem_size (part
, delta
);
8080 if (!mips_expand_ins_as_unaligned_store (part
, regs
[i
], bits
, 0))
8084 /* Mop up any left-over bytes. */
8085 if (offset
< length
)
8087 src
= adjust_address (src
, BLKmode
, offset
);
8088 dest
= adjust_address (dest
, BLKmode
, offset
);
8089 move_by_pieces (dest
, src
, length
- offset
,
8090 MIN (MEM_ALIGN (src
), MEM_ALIGN (dest
)), RETURN_BEGIN
);
8094 /* Helper function for doing a loop-based block operation on memory
8095 reference MEM. Each iteration of the loop will operate on LENGTH
8098 Create a new base register for use within the loop and point it to
8099 the start of MEM. Create a new memory reference that uses this
8100 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
8103 mips_adjust_block_mem (rtx mem
, HOST_WIDE_INT length
,
8104 rtx
*loop_reg
, rtx
*loop_mem
)
8106 *loop_reg
= copy_addr_to_reg (XEXP (mem
, 0));
8108 /* Although the new mem does not refer to a known location,
8109 it does keep up to LENGTH bytes of alignment. */
8110 *loop_mem
= change_address (mem
, BLKmode
, *loop_reg
);
8111 set_mem_align (*loop_mem
, MIN (MEM_ALIGN (mem
), length
* BITS_PER_UNIT
));
8114 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
8115 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
8116 the memory regions do not overlap. */
8119 mips_block_move_loop (rtx dest
, rtx src
, HOST_WIDE_INT length
,
8120 HOST_WIDE_INT bytes_per_iter
)
8122 rtx_code_label
*label
;
8123 rtx src_reg
, dest_reg
, final_src
, test
;
8124 HOST_WIDE_INT leftover
;
8126 leftover
= length
% bytes_per_iter
;
8129 /* Create registers and memory references for use within the loop. */
8130 mips_adjust_block_mem (src
, bytes_per_iter
, &src_reg
, &src
);
8131 mips_adjust_block_mem (dest
, bytes_per_iter
, &dest_reg
, &dest
);
8133 /* Calculate the value that SRC_REG should have after the last iteration
8135 final_src
= expand_simple_binop (Pmode
, PLUS
, src_reg
, GEN_INT (length
),
8138 /* Emit the start of the loop. */
8139 label
= gen_label_rtx ();
8142 /* Emit the loop body. */
8143 mips_block_move_straight (dest
, src
, bytes_per_iter
);
8145 /* Move on to the next block. */
8146 mips_emit_move (src_reg
, plus_constant (Pmode
, src_reg
, bytes_per_iter
));
8147 mips_emit_move (dest_reg
, plus_constant (Pmode
, dest_reg
, bytes_per_iter
));
8149 /* Emit the loop condition. */
8150 test
= gen_rtx_NE (VOIDmode
, src_reg
, final_src
);
8151 if (Pmode
== DImode
)
8152 emit_jump_insn (gen_cbranchdi4 (test
, src_reg
, final_src
, label
));
8154 emit_jump_insn (gen_cbranchsi4 (test
, src_reg
, final_src
, label
));
8156 /* Mop up any left-over bytes. */
8158 mips_block_move_straight (dest
, src
, leftover
);
8160 /* Temporary fix for PR79150. */
8161 emit_insn (gen_nop ());
8164 /* Expand a cpymemsi instruction, which copies LENGTH bytes from
8165 memory reference SRC to memory reference DEST. */
8168 mips_expand_block_move (rtx dest
, rtx src
, rtx length
)
8170 if (!ISA_HAS_LWL_LWR
8171 && (MEM_ALIGN (src
) < MIPS_MIN_MOVE_MEM_ALIGN
8172 || MEM_ALIGN (dest
) < MIPS_MIN_MOVE_MEM_ALIGN
))
8175 if (CONST_INT_P (length
))
8177 if (INTVAL (length
) <= MIPS_MAX_MOVE_BYTES_STRAIGHT
)
8179 mips_block_move_straight (dest
, src
, INTVAL (length
));
8184 mips_block_move_loop (dest
, src
, INTVAL (length
),
8185 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER
);
8192 /* Expand a loop of synci insns for the address range [BEGIN, END). */
8195 mips_expand_synci_loop (rtx begin
, rtx end
)
8197 rtx inc
, cmp_result
, mask
, length
;
8198 rtx_code_label
*label
, *end_label
;
8200 /* Create end_label. */
8201 end_label
= gen_label_rtx ();
8203 /* Check if begin equals end. */
8204 cmp_result
= gen_rtx_EQ (VOIDmode
, begin
, end
);
8205 emit_jump_insn (gen_condjump (cmp_result
, end_label
));
8207 /* Load INC with the cache line size (rdhwr INC,$1). */
8208 inc
= gen_reg_rtx (Pmode
);
8209 emit_insn (PMODE_INSN (gen_rdhwr_synci_step
, (inc
)));
8211 /* Check if inc is 0. */
8212 cmp_result
= gen_rtx_EQ (VOIDmode
, inc
, const0_rtx
);
8213 emit_jump_insn (gen_condjump (cmp_result
, end_label
));
8215 /* Calculate mask. */
8216 mask
= mips_force_unary (Pmode
, NEG
, inc
);
8218 /* Mask out begin by mask. */
8219 begin
= mips_force_binary (Pmode
, AND
, begin
, mask
);
8221 /* Calculate length. */
8222 length
= mips_force_binary (Pmode
, MINUS
, end
, begin
);
8224 /* Loop back to here. */
8225 label
= gen_label_rtx ();
8228 emit_insn (gen_synci (begin
));
8230 /* Update length. */
8231 mips_emit_binary (MINUS
, length
, length
, inc
);
8234 mips_emit_binary (PLUS
, begin
, begin
, inc
);
8236 /* Check if length is greater than 0. */
8237 cmp_result
= gen_rtx_GT (VOIDmode
, length
, const0_rtx
);
8238 emit_jump_insn (gen_condjump (cmp_result
, label
));
8240 emit_label (end_label
);
8243 /* Expand a QI or HI mode atomic memory operation.
8245 GENERATOR contains a pointer to the gen_* function that generates
8246 the SI mode underlying atomic operation using masks that we
8249 RESULT is the return register for the operation. Its value is NULL
8252 MEM is the location of the atomic access.
8254 OLDVAL is the first operand for the operation.
8256 NEWVAL is the optional second operand for the operation. Its value
8257 is NULL if unused. */
8260 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator
,
8261 rtx result
, rtx mem
, rtx oldval
, rtx newval
)
8263 rtx orig_addr
, memsi_addr
, memsi
, shift
, shiftsi
, unshifted_mask
;
8264 rtx unshifted_mask_reg
, mask
, inverted_mask
, si_op
;
8268 mode
= GET_MODE (mem
);
8270 /* Compute the address of the containing SImode value. */
8271 orig_addr
= force_reg (Pmode
, XEXP (mem
, 0));
8272 memsi_addr
= mips_force_binary (Pmode
, AND
, orig_addr
,
8273 force_reg (Pmode
, GEN_INT (-4)));
8275 /* Create a memory reference for it. */
8276 memsi
= gen_rtx_MEM (SImode
, memsi_addr
);
8277 set_mem_alias_set (memsi
, ALIAS_SET_MEMORY_BARRIER
);
8278 MEM_VOLATILE_P (memsi
) = MEM_VOLATILE_P (mem
);
8280 /* Work out the byte offset of the QImode or HImode value,
8281 counting from the least significant byte. */
8282 shift
= mips_force_binary (Pmode
, AND
, orig_addr
, GEN_INT (3));
8283 if (TARGET_BIG_ENDIAN
)
8284 mips_emit_binary (XOR
, shift
, shift
, GEN_INT (mode
== QImode
? 3 : 2));
8286 /* Multiply by eight to convert the shift value from bytes to bits. */
8287 mips_emit_binary (ASHIFT
, shift
, shift
, GEN_INT (3));
8289 /* Make the final shift an SImode value, so that it can be used in
8290 SImode operations. */
8291 shiftsi
= force_reg (SImode
, gen_lowpart (SImode
, shift
));
8293 /* Set MASK to an inclusive mask of the QImode or HImode value. */
8294 unshifted_mask
= GEN_INT (GET_MODE_MASK (mode
));
8295 unshifted_mask_reg
= force_reg (SImode
, unshifted_mask
);
8296 mask
= mips_force_binary (SImode
, ASHIFT
, unshifted_mask_reg
, shiftsi
);
8298 /* Compute the equivalent exclusive mask. */
8299 inverted_mask
= gen_reg_rtx (SImode
);
8300 emit_insn (gen_rtx_SET (inverted_mask
, gen_rtx_NOT (SImode
, mask
)));
8302 /* Shift the old value into place. */
8303 if (oldval
!= const0_rtx
)
8305 oldval
= convert_modes (SImode
, mode
, oldval
, true);
8306 oldval
= force_reg (SImode
, oldval
);
8307 oldval
= mips_force_binary (SImode
, ASHIFT
, oldval
, shiftsi
);
8310 /* Do the same for the new value. */
8311 if (newval
&& newval
!= const0_rtx
)
8313 newval
= convert_modes (SImode
, mode
, newval
, true);
8314 newval
= force_reg (SImode
, newval
);
8315 newval
= mips_force_binary (SImode
, ASHIFT
, newval
, shiftsi
);
8318 /* Do the SImode atomic access. */
8320 res
= gen_reg_rtx (SImode
);
8322 si_op
= generator
.fn_6 (res
, memsi
, mask
, inverted_mask
, oldval
, newval
);
8324 si_op
= generator
.fn_5 (res
, memsi
, mask
, inverted_mask
, oldval
);
8326 si_op
= generator
.fn_4 (memsi
, mask
, inverted_mask
, oldval
);
8332 /* Shift and convert the result. */
8333 mips_emit_binary (AND
, res
, res
, mask
);
8334 mips_emit_binary (LSHIFTRT
, res
, res
, shiftsi
);
8335 mips_emit_move (result
, gen_lowpart (GET_MODE (result
), res
));
8339 /* Return true if it is possible to use left/right accesses for a
8340 bitfield of WIDTH bits starting BITPOS bits into BLKmode memory OP.
8341 When returning true, update *LEFT and *RIGHT as follows:
8343 *LEFT is a QImode reference to the first byte if big endian or
8344 the last byte if little endian. This address can be used in the
8345 left-side instructions (LWL, SWL, LDL, SDL).
8347 *RIGHT is a QImode reference to the opposite end of the field and
8348 can be used in the patterning right-side instruction. */
8351 mips_get_unaligned_mem (rtx op
, HOST_WIDE_INT width
, HOST_WIDE_INT bitpos
,
8352 rtx
*left
, rtx
*right
)
8356 /* Check that the size is valid. */
8357 if (width
!= 32 && (!TARGET_64BIT
|| width
!= 64))
8360 /* We can only access byte-aligned values. Since we are always passed
8361 a reference to the first byte of the field, it is not necessary to
8362 do anything with BITPOS after this check. */
8363 if (bitpos
% BITS_PER_UNIT
!= 0)
8366 /* Reject aligned bitfields: we want to use a normal load or store
8367 instead of a left/right pair. */
8368 if (MEM_ALIGN (op
) >= width
)
8371 /* Get references to both ends of the field. */
8372 first
= adjust_address (op
, QImode
, 0);
8373 last
= adjust_address (op
, QImode
, width
/ BITS_PER_UNIT
- 1);
8375 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
8376 correspond to the MSB and RIGHT to the LSB. */
8377 if (TARGET_BIG_ENDIAN
)
8378 *left
= first
, *right
= last
;
8380 *left
= last
, *right
= first
;
8385 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
8386 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
8387 the operation is the equivalent of:
8389 (set DEST (*_extract SRC WIDTH BITPOS))
8391 Return true on success. */
8394 mips_expand_ext_as_unaligned_load (rtx dest
, rtx src
, HOST_WIDE_INT width
,
8395 HOST_WIDE_INT bitpos
, bool unsigned_p
)
8397 rtx left
, right
, temp
;
8398 rtx dest1
= NULL_RTX
;
8400 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
8401 be a DImode, create a new temp and emit a zero extend at the end. */
8402 if (GET_MODE (dest
) == DImode
8404 && GET_MODE_BITSIZE (SImode
) == width
)
8407 dest
= gen_reg_rtx (SImode
);
8410 if (!mips_get_unaligned_mem (src
, width
, bitpos
, &left
, &right
))
8413 temp
= gen_reg_rtx (GET_MODE (dest
));
8414 if (GET_MODE (dest
) == DImode
)
8416 emit_insn (gen_mov_ldl (temp
, src
, left
));
8417 emit_insn (gen_mov_ldr (dest
, copy_rtx (src
), right
, temp
));
8421 emit_insn (gen_mov_lwl (temp
, src
, left
));
8422 emit_insn (gen_mov_lwr (dest
, copy_rtx (src
), right
, temp
));
8425 /* If we were loading 32bits and the original register was DI then
8426 sign/zero extend into the orignal dest. */
8430 emit_insn (gen_zero_extendsidi2 (dest1
, dest
));
8432 emit_insn (gen_extendsidi2 (dest1
, dest
));
8437 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
8438 BITPOS and SRC are the operands passed to the expander; the operation
8439 is the equivalent of:
8441 (set (zero_extract DEST WIDTH BITPOS) SRC)
8443 Return true on success. */
8446 mips_expand_ins_as_unaligned_store (rtx dest
, rtx src
, HOST_WIDE_INT width
,
8447 HOST_WIDE_INT bitpos
)
8452 if (!mips_get_unaligned_mem (dest
, width
, bitpos
, &left
, &right
))
8455 mode
= int_mode_for_size (width
, 0).require ();
8456 src
= gen_lowpart (mode
, src
);
8459 emit_insn (gen_mov_sdl (dest
, src
, left
));
8460 emit_insn (gen_mov_sdr (copy_rtx (dest
), copy_rtx (src
), right
));
8464 emit_insn (gen_mov_swl (dest
, src
, left
));
8465 emit_insn (gen_mov_swr (copy_rtx (dest
), copy_rtx (src
), right
));
8470 /* Return true if X is a MEM with the same size as MODE. */
8473 mips_mem_fits_mode_p (machine_mode mode
, rtx x
)
8476 && MEM_SIZE_KNOWN_P (x
)
8477 && MEM_SIZE (x
) == GET_MODE_SIZE (mode
));
8480 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
8481 source of an "ext" instruction or the destination of an "ins"
8482 instruction. OP must be a register operand and the following
8483 conditions must hold:
8485 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
8486 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
8487 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
8489 Also reject lengths equal to a word as they are better handled
8490 by the move patterns. */
8493 mips_use_ins_ext_p (rtx op
, HOST_WIDE_INT width
, HOST_WIDE_INT bitpos
)
8495 if (!ISA_HAS_EXT_INS
8496 || !register_operand (op
, VOIDmode
)
8497 || GET_MODE_BITSIZE (GET_MODE (op
)) > BITS_PER_WORD
)
8500 if (!IN_RANGE (width
, 1, GET_MODE_BITSIZE (GET_MODE (op
)) - 1))
8503 if (bitpos
< 0 || bitpos
+ width
> GET_MODE_BITSIZE (GET_MODE (op
)))
8509 /* Check if MASK and SHIFT are valid in mask-low-and-shift-left
8510 operation if MAXLEN is the maxium length of consecutive bits that
8511 can make up MASK. MODE is the mode of the operation. See
8512 mask_low_and_shift_len for the actual definition. */
8515 mask_low_and_shift_p (machine_mode mode
, rtx mask
, rtx shift
, int maxlen
)
8517 return IN_RANGE (mask_low_and_shift_len (mode
, mask
, shift
), 1, maxlen
);
8520 /* Return true iff OP1 and OP2 are valid operands together for the
8521 *and<MODE>3 and *and<MODE>3_mips16 patterns. For the cases to consider,
8522 see the table in the comment before the pattern. */
8525 and_operands_ok (machine_mode mode
, rtx op1
, rtx op2
)
8528 if (memory_operand (op1
, mode
))
8530 if (TARGET_MIPS16
) {
8531 struct mips_address_info addr
;
8532 if (!mips_classify_address (&addr
, op1
, mode
, false))
8535 return and_load_operand (op2
, mode
);
8538 return and_reg_operand (op2
, mode
);
8541 /* The canonical form of a mask-low-and-shift-left operation is
8542 (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
8543 cleared. Thus we need to shift MASK to the right before checking if it
8544 is a valid mask value. MODE is the mode of the operation. If true
8545 return the length of the mask, otherwise return -1. */
8548 mask_low_and_shift_len (machine_mode mode
, rtx mask
, rtx shift
)
8550 HOST_WIDE_INT shval
;
8552 shval
= INTVAL (shift
) & (GET_MODE_BITSIZE (mode
) - 1);
8553 return exact_log2 ((UINTVAL (mask
) >> shval
) + 1);
8556 /* Return true if -msplit-addresses is selected and should be honored.
8558 -msplit-addresses is a half-way house between explicit relocations
8559 and the traditional assembler macros. It can split absolute 32-bit
8560 symbolic constants into a high/lo_sum pair but uses macros for other
8563 Like explicit relocation support for REL targets, it relies
8564 on GNU extensions in the assembler and the linker.
8566 Although this code should work for -O0, it has traditionally
8567 been treated as an optimization. */
8570 mips_split_addresses_p (void)
8572 return (TARGET_SPLIT_ADDRESSES
8576 && !ABI_HAS_64BIT_SYMBOLS
);
8579 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
8582 mips_init_relocs (void)
8584 memset (mips_split_p
, '\0', sizeof (mips_split_p
));
8585 memset (mips_split_hi_p
, '\0', sizeof (mips_split_hi_p
));
8586 memset (mips_use_pcrel_pool_p
, '\0', sizeof (mips_use_pcrel_pool_p
));
8587 memset (mips_hi_relocs
, '\0', sizeof (mips_hi_relocs
));
8588 memset (mips_lo_relocs
, '\0', sizeof (mips_lo_relocs
));
8590 if (TARGET_MIPS16_PCREL_LOADS
)
8591 mips_use_pcrel_pool_p
[SYMBOL_ABSOLUTE
] = true;
8594 if (ABI_HAS_64BIT_SYMBOLS
)
8596 if (TARGET_EXPLICIT_RELOCS
)
8598 mips_split_p
[SYMBOL_64_HIGH
] = true;
8599 mips_hi_relocs
[SYMBOL_64_HIGH
] = "%highest(";
8600 mips_lo_relocs
[SYMBOL_64_HIGH
] = "%higher(";
8602 mips_split_p
[SYMBOL_64_MID
] = true;
8603 mips_hi_relocs
[SYMBOL_64_MID
] = "%higher(";
8604 mips_lo_relocs
[SYMBOL_64_MID
] = "%hi(";
8606 mips_split_p
[SYMBOL_64_LOW
] = true;
8607 mips_hi_relocs
[SYMBOL_64_LOW
] = "%hi(";
8608 mips_lo_relocs
[SYMBOL_64_LOW
] = "%lo(";
8610 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
8611 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
8616 if (TARGET_EXPLICIT_RELOCS
8617 || mips_split_addresses_p ()
8620 mips_split_p
[SYMBOL_ABSOLUTE
] = true;
8621 mips_hi_relocs
[SYMBOL_ABSOLUTE
] = "%hi(";
8622 mips_lo_relocs
[SYMBOL_ABSOLUTE
] = "%lo(";
8629 /* The high part is provided by a pseudo copy of $gp. */
8630 mips_split_p
[SYMBOL_GP_RELATIVE
] = true;
8631 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gprel(";
8633 else if (TARGET_EXPLICIT_RELOCS
)
8634 /* Small data constants are kept whole until after reload,
8635 then lowered by mips_rewrite_small_data. */
8636 mips_lo_relocs
[SYMBOL_GP_RELATIVE
] = "%gp_rel(";
8638 if (TARGET_EXPLICIT_RELOCS
)
8640 mips_split_p
[SYMBOL_GOT_PAGE_OFST
] = true;
8643 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got_page(";
8644 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%got_ofst(";
8648 mips_lo_relocs
[SYMBOL_GOTOFF_PAGE
] = "%got(";
8649 mips_lo_relocs
[SYMBOL_GOT_PAGE_OFST
] = "%lo(";
8652 /* Expose the use of $28 as soon as possible. */
8653 mips_split_hi_p
[SYMBOL_GOT_PAGE_OFST
] = true;
8657 /* The HIGH and LO_SUM are matched by special .md patterns. */
8658 mips_split_p
[SYMBOL_GOT_DISP
] = true;
8660 mips_split_p
[SYMBOL_GOTOFF_DISP
] = true;
8661 mips_hi_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_hi(";
8662 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_lo(";
8664 mips_split_p
[SYMBOL_GOTOFF_CALL
] = true;
8665 mips_hi_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_hi(";
8666 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call_lo(";
8671 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got_disp(";
8673 mips_lo_relocs
[SYMBOL_GOTOFF_DISP
] = "%got(";
8674 mips_lo_relocs
[SYMBOL_GOTOFF_CALL
] = "%call16(";
8676 /* Expose the use of $28 as soon as possible. */
8677 mips_split_p
[SYMBOL_GOT_DISP
] = true;
8683 mips_split_p
[SYMBOL_GOTOFF_LOADGP
] = true;
8684 mips_hi_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%hi(%neg(%gp_rel(";
8685 mips_lo_relocs
[SYMBOL_GOTOFF_LOADGP
] = "%lo(%neg(%gp_rel(";
8688 mips_lo_relocs
[SYMBOL_TLSGD
] = "%tlsgd(";
8689 mips_lo_relocs
[SYMBOL_TLSLDM
] = "%tlsldm(";
8691 if (TARGET_MIPS16_PCREL_LOADS
)
8693 mips_use_pcrel_pool_p
[SYMBOL_DTPREL
] = true;
8694 mips_use_pcrel_pool_p
[SYMBOL_TPREL
] = true;
8698 mips_split_p
[SYMBOL_DTPREL
] = true;
8699 mips_hi_relocs
[SYMBOL_DTPREL
] = "%dtprel_hi(";
8700 mips_lo_relocs
[SYMBOL_DTPREL
] = "%dtprel_lo(";
8702 mips_split_p
[SYMBOL_TPREL
] = true;
8703 mips_hi_relocs
[SYMBOL_TPREL
] = "%tprel_hi(";
8704 mips_lo_relocs
[SYMBOL_TPREL
] = "%tprel_lo(";
8707 mips_lo_relocs
[SYMBOL_GOTTPREL
] = "%gottprel(";
8708 mips_lo_relocs
[SYMBOL_HALF
] = "%half(";
8711 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
8712 in context CONTEXT. RELOCS is the array of relocations to use. */
8715 mips_print_operand_reloc (FILE *file
, rtx op
, enum mips_symbol_context context
,
8716 const char **relocs
)
8718 enum mips_symbol_type symbol_type
;
8721 symbol_type
= mips_classify_symbolic_expression (op
, context
);
8722 gcc_assert (relocs
[symbol_type
]);
8724 fputs (relocs
[symbol_type
], file
);
8725 output_addr_const (file
, mips_strip_unspec_address (op
));
8726 for (p
= relocs
[symbol_type
]; *p
!= 0; p
++)
8731 /* Start a new block with the given asm switch enabled. If we need
8732 to print a directive, emit PREFIX before it and SUFFIX after it. */
8735 mips_push_asm_switch_1 (struct mips_asm_switch
*asm_switch
,
8736 const char *prefix
, const char *suffix
)
8738 if (asm_switch
->nesting_level
== 0)
8739 fprintf (asm_out_file
, "%s.set\tno%s%s", prefix
, asm_switch
->name
, suffix
);
8740 asm_switch
->nesting_level
++;
8743 /* Likewise, but end a block. */
8746 mips_pop_asm_switch_1 (struct mips_asm_switch
*asm_switch
,
8747 const char *prefix
, const char *suffix
)
8749 gcc_assert (asm_switch
->nesting_level
);
8750 asm_switch
->nesting_level
--;
8751 if (asm_switch
->nesting_level
== 0)
8752 fprintf (asm_out_file
, "%s.set\t%s%s", prefix
, asm_switch
->name
, suffix
);
8755 /* Wrappers around mips_push_asm_switch_1 and mips_pop_asm_switch_1
8756 that either print a complete line or print nothing. */
8759 mips_push_asm_switch (struct mips_asm_switch
*asm_switch
)
8761 mips_push_asm_switch_1 (asm_switch
, "\t", "\n");
8765 mips_pop_asm_switch (struct mips_asm_switch
*asm_switch
)
8767 mips_pop_asm_switch_1 (asm_switch
, "\t", "\n");
8770 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
8771 The punctuation characters are:
8773 '(' Start a nested ".set noreorder" block.
8774 ')' End a nested ".set noreorder" block.
8775 '[' Start a nested ".set noat" block.
8776 ']' End a nested ".set noat" block.
8777 '<' Start a nested ".set nomacro" block.
8778 '>' End a nested ".set nomacro" block.
8779 '*' Behave like %(%< if generating a delayed-branch sequence.
8780 '#' Print a nop if in a ".set noreorder" block.
8781 '/' Like '#', but do nothing within a delayed-branch sequence.
8782 '?' Print "l" if mips_branch_likely is true
8783 '~' Print a nop if mips_branch_likely is true
8784 '.' Print the name of the register with a hard-wired zero (zero or $0).
8785 '@' Print the name of the assembler temporary register (at or $1).
8786 '^' Print the name of the pic call-through register (t9 or $25).
8787 '+' Print the name of the gp register (usually gp or $28).
8788 '$' Print the name of the stack pointer register (sp or $29).
8789 ':' Print "c" to use the compact version if the delay slot is a nop.
8790 '!' Print "s" to use the short version if the delay slot contains a
8793 See also mips_init_print_operand_punct. */
8796 mips_print_operand_punctuation (FILE *file
, int ch
)
8801 mips_push_asm_switch_1 (&mips_noreorder
, "", "\n\t");
8805 mips_pop_asm_switch_1 (&mips_noreorder
, "\n\t", "");
8809 mips_push_asm_switch_1 (&mips_noat
, "", "\n\t");
8813 mips_pop_asm_switch_1 (&mips_noat
, "\n\t", "");
8817 mips_push_asm_switch_1 (&mips_nomacro
, "", "\n\t");
8821 mips_pop_asm_switch_1 (&mips_nomacro
, "\n\t", "");
8825 if (final_sequence
!= 0)
8827 mips_print_operand_punctuation (file
, '(');
8828 mips_print_operand_punctuation (file
, '<');
8833 if (mips_noreorder
.nesting_level
> 0)
8834 fputs ("\n\tnop", file
);
8838 /* Print an extra newline so that the delayed insn is separated
8839 from the following ones. This looks neater and is consistent
8840 with non-nop delayed sequences. */
8841 if (mips_noreorder
.nesting_level
> 0 && final_sequence
== 0)
8842 fputs ("\n\tnop\n", file
);
8846 if (mips_branch_likely
)
8851 if (mips_branch_likely
)
8852 fputs ("\n\tnop", file
);
8856 fputs (reg_names
[GP_REG_FIRST
+ 0], file
);
8860 fputs (reg_names
[AT_REGNUM
], file
);
8864 fputs (reg_names
[PIC_FUNCTION_ADDR_REGNUM
], file
);
8868 fputs (reg_names
[PIC_OFFSET_TABLE_REGNUM
], file
);
8872 fputs (reg_names
[STACK_POINTER_REGNUM
], file
);
8876 /* When final_sequence is 0, the delay slot will be a nop. We can
8877 use the compact version where available. The %: formatter will
8878 only be present if a compact form of the branch is available. */
8879 if (final_sequence
== 0)
8884 /* If the delay slot instruction is short, then use the
8886 if (TARGET_MICROMIPS
&& !TARGET_INTERLINK_COMPRESSED
&& mips_isa_rev
<= 5
8887 && (final_sequence
== 0
8888 || get_attr_length (final_sequence
->insn (1)) == 2))
8898 /* Initialize mips_print_operand_punct. */
8901 mips_init_print_operand_punct (void)
8905 for (p
= "()[]<>*#/?~.@^+$:!"; *p
; p
++)
8906 mips_print_operand_punct
[(unsigned char) *p
] = true;
8909 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
8910 associated with condition CODE. Print the condition part of the
8914 mips_print_int_branch_condition (FILE *file
, enum rtx_code code
, int letter
)
8928 /* Conveniently, the MIPS names for these conditions are the same
8929 as their RTL equivalents. */
8930 fputs (GET_RTX_NAME (code
), file
);
8934 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter
);
8939 /* Likewise floating-point branches. */
8942 mips_print_float_branch_condition (FILE *file
, enum rtx_code code
, int letter
)
8948 fputs ("c1eqz", file
);
8950 fputs ("c1f", file
);
8955 fputs ("c1nez", file
);
8957 fputs ("c1t", file
);
8961 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter
);
8966 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8969 mips_print_operand_punct_valid_p (unsigned char code
)
8971 return mips_print_operand_punct
[code
];
8974 /* Implement TARGET_PRINT_OPERAND. The MIPS-specific operand codes are:
8976 'E' Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal.
8977 'X' Print CONST_INT OP in hexadecimal format.
8978 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
8979 'd' Print CONST_INT OP in decimal.
8980 'B' Print CONST_INT OP element 0 of a replicated CONST_VECTOR
8981 as an unsigned byte [0..255].
8982 'm' Print one less than CONST_INT OP in decimal.
8983 'y' Print exact log2 of CONST_INT OP in decimal.
8984 'h' Print the high-part relocation associated with OP, after stripping
8986 'R' Print the low-part relocation associated with OP.
8987 'C' Print the integer branch condition for comparison OP.
8988 'N' Print the inverse of the integer branch condition for comparison OP.
8989 'F' Print the FPU branch condition for comparison OP.
8990 'W' Print the inverse of the FPU branch condition for comparison OP.
8991 'w' Print a MSA register.
8992 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
8993 'z' for (eq:?I ...), 'n' for (ne:?I ...).
8994 't' Like 'T', but with the EQ/NE cases reversed
8995 'Y' Print mips_fp_conditions[INTVAL (OP)]
8996 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
8997 'q' Print a DSP accumulator register.
8998 'D' Print the second part of a double-word register or memory operand.
8999 'L' Print the low-order register in a double-word register operand.
9000 'M' Print high-order register in a double-word register operand.
9001 'z' Print $0 if OP is zero, otherwise print OP normally.
9002 'b' Print the address of a memory operand, without offset.
9003 'v' Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI,
9004 V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively.
9005 'V' Print exact log2 of CONST_INT OP element 0 of a replicated
9006 CONST_VECTOR in decimal. */
9009 mips_print_operand (FILE *file
, rtx op
, int letter
)
9013 if (mips_print_operand_punct_valid_p (letter
))
9015 mips_print_operand_punctuation (file
, letter
);
9020 code
= GET_CODE (op
);
9025 if (GET_CODE (op
) == CONST_VECTOR
)
9027 gcc_assert (mips_const_vector_same_val_p (op
, GET_MODE (op
)));
9028 op
= CONST_VECTOR_ELT (op
, 0);
9029 gcc_assert (CONST_INT_P (op
));
9030 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (op
));
9033 output_operand_lossage ("invalid use of '%%%c'", letter
);
9037 if (CONST_INT_P (op
))
9038 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (op
));
9040 output_operand_lossage ("invalid use of '%%%c'", letter
);
9044 if (CONST_INT_P (op
))
9045 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (op
) & 0xffff);
9047 output_operand_lossage ("invalid use of '%%%c'", letter
);
9051 if (CONST_INT_P (op
))
9052 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (op
));
9054 output_operand_lossage ("invalid use of '%%%c'", letter
);
9058 if (GET_CODE (op
) == CONST_VECTOR
)
9060 gcc_assert (mips_const_vector_same_val_p (op
, GET_MODE (op
)));
9061 op
= CONST_VECTOR_ELT (op
, 0);
9062 gcc_assert (CONST_INT_P (op
));
9063 unsigned HOST_WIDE_INT val8
= UINTVAL (op
) & GET_MODE_MASK (QImode
);
9064 fprintf (file
, HOST_WIDE_INT_PRINT_UNSIGNED
, val8
);
9067 output_operand_lossage ("invalid use of '%%%c'", letter
);
9071 if (CONST_INT_P (op
))
9072 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (op
) - 1);
9074 output_operand_lossage ("invalid use of '%%%c'", letter
);
9078 if (CONST_INT_P (op
))
9080 int val
= exact_log2 (INTVAL (op
));
9082 fprintf (file
, "%d", val
);
9084 output_operand_lossage ("invalid use of '%%%c'", letter
);
9087 output_operand_lossage ("invalid use of '%%%c'", letter
);
9091 if (GET_CODE (op
) == CONST_VECTOR
)
9093 machine_mode mode
= GET_MODE_INNER (GET_MODE (op
));
9094 unsigned HOST_WIDE_INT val
= UINTVAL (CONST_VECTOR_ELT (op
, 0));
9095 int vlog2
= exact_log2 (val
& GET_MODE_MASK (mode
));
9097 fprintf (file
, "%d", vlog2
);
9099 output_operand_lossage ("invalid use of '%%%c'", letter
);
9102 output_operand_lossage ("invalid use of '%%%c'", letter
);
9108 mips_print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_hi_relocs
);
9112 mips_print_operand_reloc (file
, op
, SYMBOL_CONTEXT_LEA
, mips_lo_relocs
);
9116 mips_print_int_branch_condition (file
, code
, letter
);
9120 mips_print_int_branch_condition (file
, reverse_condition (code
), letter
);
9124 mips_print_float_branch_condition (file
, code
, letter
);
9128 mips_print_float_branch_condition (file
, reverse_condition (code
),
9135 int truth
= (code
== NE
) == (letter
== 'T');
9136 fputc ("zfnt"[truth
* 2 + ST_REG_P (REGNO (XEXP (op
, 0)))], file
);
9141 if (code
== CONST_INT
&& UINTVAL (op
) < ARRAY_SIZE (mips_fp_conditions
))
9142 fputs (mips_fp_conditions
[UINTVAL (op
)], file
);
9144 output_operand_lossage ("'%%%c' is not a valid operand prefix",
9149 if (ISA_HAS_8CC
|| ISA_HAS_CCF
)
9151 mips_print_operand (file
, op
, 0);
9157 if (code
== REG
&& MD_REG_P (REGNO (op
)))
9158 fprintf (file
, "$ac0");
9159 else if (code
== REG
&& DSP_ACC_REG_P (REGNO (op
)))
9160 fprintf (file
, "$ac%c", reg_names
[REGNO (op
)][3]);
9162 output_operand_lossage ("invalid use of '%%%c'", letter
);
9166 if (code
== REG
&& MSA_REG_P (REGNO (op
)))
9167 fprintf (file
, "$w%s", ®_names
[REGNO (op
)][2]);
9169 output_operand_lossage ("invalid use of '%%%c'", letter
);
9173 switch (GET_MODE (op
))
9176 fprintf (file
, "b");
9179 fprintf (file
, "h");
9183 fprintf (file
, "w");
9187 fprintf (file
, "d");
9190 output_operand_lossage ("invalid use of '%%%c'", letter
);
9199 unsigned int regno
= REGNO (op
);
9200 if ((letter
== 'M' && TARGET_LITTLE_ENDIAN
)
9201 || (letter
== 'L' && TARGET_BIG_ENDIAN
)
9204 else if (letter
&& letter
!= 'z' && letter
!= 'M' && letter
!= 'L')
9205 output_operand_lossage ("invalid use of '%%%c'", letter
);
9206 /* We need to print $0 .. $31 for COP0 registers. */
9207 if (COP0_REG_P (regno
))
9208 fprintf (file
, "$%s", ®_names
[regno
][4]);
9210 fprintf (file
, "%s", reg_names
[regno
]);
9216 output_address (GET_MODE (op
), plus_constant (Pmode
,
9218 else if (letter
== 'b')
9220 gcc_assert (REG_P (XEXP (op
, 0)));
9221 mips_print_operand (file
, XEXP (op
, 0), 0);
9223 else if (letter
&& letter
!= 'z')
9224 output_operand_lossage ("invalid use of '%%%c'", letter
);
9226 output_address (GET_MODE (op
), XEXP (op
, 0));
9230 if (letter
== 'z' && op
== CONST0_RTX (GET_MODE (op
)))
9231 fputs (reg_names
[GP_REG_FIRST
], file
);
9232 else if (letter
&& letter
!= 'z')
9233 output_operand_lossage ("invalid use of '%%%c'", letter
);
9234 else if (CONST_GP_P (op
))
9235 fputs (reg_names
[GLOBAL_POINTER_REGNUM
], file
);
9237 output_addr_const (file
, mips_strip_unspec_address (op
));
9243 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9246 mips_print_operand_address (FILE *file
, machine_mode
/*mode*/, rtx x
)
9248 struct mips_address_info addr
;
9250 if (mips_classify_address (&addr
, x
, word_mode
, true))
9254 mips_print_operand (file
, addr
.offset
, 0);
9255 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
9258 case ADDRESS_LO_SUM
:
9259 mips_print_operand_reloc (file
, addr
.offset
, SYMBOL_CONTEXT_MEM
,
9261 fprintf (file
, "(%s)", reg_names
[REGNO (addr
.reg
)]);
9264 case ADDRESS_CONST_INT
:
9265 output_addr_const (file
, x
);
9266 fprintf (file
, "(%s)", reg_names
[GP_REG_FIRST
]);
9269 case ADDRESS_SYMBOLIC
:
9270 output_addr_const (file
, mips_strip_unspec_address (x
));
9276 /* Implement TARGET_ENCODE_SECTION_INFO. */
9279 mips_encode_section_info (tree decl
, rtx rtl
, int first
)
9281 default_encode_section_info (decl
, rtl
, first
);
9283 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9285 rtx symbol
= XEXP (rtl
, 0);
9286 tree type
= TREE_TYPE (decl
);
9288 /* Encode whether the symbol is short or long. */
9289 if ((TARGET_LONG_CALLS
&& !mips_near_type_p (type
))
9290 || mips_far_type_p (type
))
9291 SYMBOL_REF_FLAGS (symbol
) |= SYMBOL_FLAG_LONG_CALL
;
9295 /* Implement TARGET_SELECT_RTX_SECTION. */
9298 mips_select_rtx_section (machine_mode mode
, rtx x
,
9299 unsigned HOST_WIDE_INT align
)
9301 /* ??? Consider using mergeable small data sections. */
9302 if (mips_rtx_constant_in_small_data_p (mode
))
9303 return get_named_section (NULL
, ".sdata", 0);
9305 return default_elf_select_rtx_section (mode
, x
, align
);
9308 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
9310 The complication here is that, with the combination TARGET_ABICALLS
9311 && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
9312 absolute addresses, and should therefore not be included in the
9313 read-only part of a DSO. Handle such cases by selecting a normal
9314 data section instead of a read-only one. The logic apes that in
9315 default_function_rodata_section. */
9318 mips_function_rodata_section (tree decl
)
9320 if (!TARGET_ABICALLS
|| TARGET_ABSOLUTE_ABICALLS
|| TARGET_GPWORD
)
9321 return default_function_rodata_section (decl
);
9323 if (decl
&& DECL_SECTION_NAME (decl
))
9325 const char *name
= DECL_SECTION_NAME (decl
);
9326 if (DECL_COMDAT_GROUP (decl
) && strncmp (name
, ".gnu.linkonce.t.", 16) == 0)
9328 char *rname
= ASTRDUP (name
);
9330 return get_section (rname
, SECTION_LINKONCE
| SECTION_WRITE
, decl
);
9332 else if (flag_function_sections
9333 && flag_data_sections
9334 && strncmp (name
, ".text.", 6) == 0)
9336 char *rname
= ASTRDUP (name
);
9337 memcpy (rname
+ 1, "data", 4);
9338 return get_section (rname
, SECTION_WRITE
, decl
);
9341 return data_section
;
9344 /* Implement TARGET_IN_SMALL_DATA_P. */
9347 mips_in_small_data_p (const_tree decl
)
9349 unsigned HOST_WIDE_INT size
;
9351 if (TREE_CODE (decl
) == STRING_CST
|| TREE_CODE (decl
) == FUNCTION_DECL
)
9354 /* We don't yet generate small-data references for -mabicalls
9355 or VxWorks RTP code. See the related -G handling in
9356 mips_option_override. */
9357 if (TARGET_ABICALLS
|| TARGET_VXWORKS_RTP
)
9360 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
) != 0)
9364 /* Reject anything that isn't in a known small-data section. */
9365 name
= DECL_SECTION_NAME (decl
);
9366 if (strcmp (name
, ".sdata") != 0 && strcmp (name
, ".sbss") != 0)
9369 /* If a symbol is defined externally, the assembler will use the
9370 usual -G rules when deciding how to implement macros. */
9371 if (mips_lo_relocs
[SYMBOL_GP_RELATIVE
] || !DECL_EXTERNAL (decl
))
9374 else if (TARGET_EMBEDDED_DATA
)
9376 /* Don't put constants into the small data section: we want them
9377 to be in ROM rather than RAM. */
9378 if (TREE_CODE (decl
) != VAR_DECL
)
9381 if (TREE_READONLY (decl
)
9382 && !TREE_SIDE_EFFECTS (decl
)
9383 && (!DECL_INITIAL (decl
) || TREE_CONSTANT (DECL_INITIAL (decl
))))
9387 /* Enforce -mlocal-sdata. */
9388 if (!TARGET_LOCAL_SDATA
&& !TREE_PUBLIC (decl
))
9391 /* Enforce -mextern-sdata. */
9392 if (!TARGET_EXTERN_SDATA
&& DECL_P (decl
))
9394 if (DECL_EXTERNAL (decl
))
9396 if (DECL_COMMON (decl
) && DECL_INITIAL (decl
) == NULL
)
9400 /* We have traditionally not treated zero-sized objects as small data,
9401 so this is now effectively part of the ABI. */
9402 size
= int_size_in_bytes (TREE_TYPE (decl
));
9403 return size
> 0 && size
<= mips_small_data_threshold
;
9406 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
9407 anchors for small data: the GP register acts as an anchor in that
9408 case. We also don't want to use them for PC-relative accesses,
9409 where the PC acts as an anchor. */
9412 mips_use_anchors_for_symbol_p (const_rtx symbol
)
9414 switch (mips_classify_symbol (symbol
, SYMBOL_CONTEXT_MEM
))
9416 case SYMBOL_PC_RELATIVE
:
9417 case SYMBOL_GP_RELATIVE
:
9421 return default_use_anchors_for_symbol_p (symbol
);
9425 /* The MIPS debug format wants all automatic variables and arguments
9426 to be in terms of the virtual frame pointer (stack pointer before
9427 any adjustment in the function), while the MIPS 3.0 linker wants
9428 the frame pointer to be the stack pointer after the initial
9429 adjustment. So, we do the adjustment here. The arg pointer (which
9430 is eliminated) points to the virtual frame pointer, while the frame
9431 pointer (which may be eliminated) points to the stack pointer after
9432 the initial adjustments. */
9435 mips_debugger_offset (rtx addr
, HOST_WIDE_INT offset
)
9437 rtx offset2
= const0_rtx
;
9438 rtx reg
= eliminate_constant_term (addr
, &offset2
);
9441 offset
= INTVAL (offset2
);
9443 if (reg
== stack_pointer_rtx
9444 || reg
== frame_pointer_rtx
9445 || reg
== hard_frame_pointer_rtx
)
9447 offset
-= cfun
->machine
->frame
.total_size
;
9448 if (reg
== hard_frame_pointer_rtx
)
9449 offset
+= cfun
->machine
->frame
.hard_frame_pointer_offset
;
9455 /* Implement ASM_OUTPUT_EXTERNAL. */
9458 mips_output_external (FILE *file
, tree decl
, const char *name
)
9460 default_elf_asm_output_external (file
, decl
, name
);
9462 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9463 set in order to avoid putting out names that are never really
9465 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl
)))
9467 if (!TARGET_EXPLICIT_RELOCS
&& mips_in_small_data_p (decl
))
9469 /* When using assembler macros, emit .extern directives for
9470 all small-data externs so that the assembler knows how
9473 In most cases it would be safe (though pointless) to emit
9474 .externs for other symbols too. One exception is when an
9475 object is within the -G limit but declared by the user to
9476 be in a section other than .sbss or .sdata. */
9477 fputs ("\t.extern\t", file
);
9478 assemble_name (file
, name
);
9479 fprintf (file
, ", " HOST_WIDE_INT_PRINT_DEC
"\n",
9480 int_size_in_bytes (TREE_TYPE (decl
)));
9485 /* Implement TARGET_ASM_OUTPUT_SOURCE_FILENAME. */
9488 mips_output_filename (FILE *stream
, const char *name
)
9490 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
9492 if (write_symbols
== DWARF2_DEBUG
)
9494 else if (mips_output_filename_first_time
)
9496 mips_output_filename_first_time
= 0;
9497 num_source_filenames
+= 1;
9498 current_function_file
= name
;
9499 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
9500 output_quoted_string (stream
, name
);
9501 putc ('\n', stream
);
9503 /* If we are emitting stabs, let dbxout.c handle this (except for
9504 the mips_output_filename_first_time case). */
9505 else if (write_symbols
== DBX_DEBUG
)
9507 else if (name
!= current_function_file
9508 && strcmp (name
, current_function_file
) != 0)
9510 num_source_filenames
+= 1;
9511 current_function_file
= name
;
9512 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
9513 output_quoted_string (stream
, name
);
9514 putc ('\n', stream
);
9518 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
9520 static void ATTRIBUTE_UNUSED
9521 mips_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
9526 fputs ("\t.dtprelword\t", file
);
9530 fputs ("\t.dtpreldword\t", file
);
9536 output_addr_const (file
, x
);
9537 fputs ("+0x8000", file
);
9540 /* Implement TARGET_DWARF_REGISTER_SPAN. */
9543 mips_dwarf_register_span (rtx reg
)
9548 /* TARGET_FLOATXX is implemented as 32-bit floating-point registers but
9549 ensures that double-precision registers are treated as if they were
9550 64-bit physical registers. The code will run correctly with 32-bit or
9551 64-bit registers which means that dwarf information cannot be precise
9552 for all scenarios. We choose to state that the 64-bit values are stored
9553 in a single 64-bit 'piece'. This slightly unusual construct can then be
9554 interpreted as either a pair of registers if the registers are 32-bit or
9555 a single 64-bit register depending on hardware. */
9556 mode
= GET_MODE (reg
);
9557 if (FP_REG_P (REGNO (reg
))
9559 && GET_MODE_SIZE (mode
) > UNITS_PER_FPREG
)
9561 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, reg
));
9563 /* By default, GCC maps increasing register numbers to increasing
9564 memory locations, but paired FPRs are always little-endian,
9565 regardless of the prevailing endianness. */
9566 else if (FP_REG_P (REGNO (reg
))
9567 && TARGET_BIG_ENDIAN
9568 && MAX_FPRS_PER_FMT
> 1
9569 && GET_MODE_SIZE (mode
) > UNITS_PER_FPREG
)
9571 gcc_assert (GET_MODE_SIZE (mode
) == UNITS_PER_HWFPVALUE
);
9572 high
= mips_subword (reg
, true);
9573 low
= mips_subword (reg
, false);
9574 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, high
, low
));
9580 /* Implement TARGET_DWARF_FRAME_REG_MODE. */
9583 mips_dwarf_frame_reg_mode (int regno
)
9585 machine_mode mode
= default_dwarf_frame_reg_mode (regno
);
9587 if (FP_REG_P (regno
) && mips_abi
== ABI_32
&& !TARGET_FLOAT32
)
9593 /* DSP ALU can bypass data with no delays for the following pairs. */
9594 enum insn_code dspalu_bypass_table
[][2] =
9596 {CODE_FOR_mips_addsc
, CODE_FOR_mips_addwc
},
9597 {CODE_FOR_mips_cmpu_eq_qb
, CODE_FOR_mips_pick_qb
},
9598 {CODE_FOR_mips_cmpu_lt_qb
, CODE_FOR_mips_pick_qb
},
9599 {CODE_FOR_mips_cmpu_le_qb
, CODE_FOR_mips_pick_qb
},
9600 {CODE_FOR_mips_cmp_eq_ph
, CODE_FOR_mips_pick_ph
},
9601 {CODE_FOR_mips_cmp_lt_ph
, CODE_FOR_mips_pick_ph
},
9602 {CODE_FOR_mips_cmp_le_ph
, CODE_FOR_mips_pick_ph
},
9603 {CODE_FOR_mips_wrdsp
, CODE_FOR_mips_insv
}
9607 mips_dspalu_bypass_p (rtx out_insn
, rtx in_insn
)
9610 int num_bypass
= ARRAY_SIZE (dspalu_bypass_table
);
9611 enum insn_code out_icode
= (enum insn_code
) INSN_CODE (out_insn
);
9612 enum insn_code in_icode
= (enum insn_code
) INSN_CODE (in_insn
);
9614 for (i
= 0; i
< num_bypass
; i
++)
9616 if (out_icode
== dspalu_bypass_table
[i
][0]
9617 && in_icode
== dspalu_bypass_table
[i
][1])
9623 /* Implement ASM_OUTPUT_ASCII. */
9626 mips_output_ascii (FILE *stream
, const char *string
, size_t len
)
9632 fprintf (stream
, "\t.ascii\t\"");
9633 for (i
= 0; i
< len
; i
++)
9637 c
= (unsigned char) string
[i
];
9640 if (c
== '\\' || c
== '\"')
9642 putc ('\\', stream
);
9650 fprintf (stream
, "\\%03o", c
);
9654 if (cur_pos
> 72 && i
+1 < len
)
9657 fprintf (stream
, "\"\n\t.ascii\t\"");
9660 fprintf (stream
, "\"\n");
9663 /* Return the pseudo-op for full SYMBOL_(D)TPREL address *ADDR.
9664 Update *ADDR with the operand that should be printed. */
9667 mips_output_tls_reloc_directive (rtx
*addr
)
9669 enum mips_symbol_type type
;
9671 type
= mips_classify_symbolic_expression (*addr
, SYMBOL_CONTEXT_LEA
);
9672 *addr
= mips_strip_unspec_address (*addr
);
9676 return Pmode
== SImode
? ".dtprelword\t%0" : ".dtpreldword\t%0";
9679 return Pmode
== SImode
? ".tprelword\t%0" : ".tpreldword\t%0";
9686 /* Emit either a label, .comm, or .lcomm directive. When using assembler
9687 macros, mark the symbol as written so that mips_asm_output_external
9688 won't emit an .extern for it. STREAM is the output file, NAME is the
9689 name of the symbol, INIT_STRING is the string that should be written
9690 before the symbol and FINAL_STRING is the string that should be
9691 written after it. FINAL_STRING is a printf format that consumes the
9692 remaining arguments. */
9695 mips_declare_object (FILE *stream
, const char *name
, const char *init_string
,
9696 const char *final_string
, ...)
9700 fputs (init_string
, stream
);
9701 assemble_name (stream
, name
);
9702 va_start (ap
, final_string
);
9703 vfprintf (stream
, final_string
, ap
);
9706 if (!TARGET_EXPLICIT_RELOCS
)
9708 tree name_tree
= get_identifier (name
);
9709 TREE_ASM_WRITTEN (name_tree
) = 1;
9713 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
9714 NAME is the name of the object and ALIGN is the required alignment
9715 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
9716 alignment argument. */
9719 mips_declare_common_object (FILE *stream
, const char *name
,
9720 const char *init_string
,
9721 unsigned HOST_WIDE_INT size
,
9722 unsigned int align
, bool takes_alignment_p
)
9724 if (!takes_alignment_p
)
9726 size
+= (align
/ BITS_PER_UNIT
) - 1;
9727 size
-= size
% (align
/ BITS_PER_UNIT
);
9728 mips_declare_object (stream
, name
, init_string
,
9729 "," HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
9732 mips_declare_object (stream
, name
, init_string
,
9733 "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u\n",
9734 size
, align
/ BITS_PER_UNIT
);
9737 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
9738 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
9741 mips_output_aligned_decl_common (FILE *stream
, tree decl
, const char *name
,
9742 unsigned HOST_WIDE_INT size
,
9745 /* If the target wants uninitialized const declarations in
9746 .rdata then don't put them in .comm. */
9747 if (TARGET_EMBEDDED_DATA
9748 && TARGET_UNINIT_CONST_IN_RODATA
9749 && TREE_CODE (decl
) == VAR_DECL
9750 && TREE_READONLY (decl
)
9751 && (DECL_INITIAL (decl
) == 0 || DECL_INITIAL (decl
) == error_mark_node
))
9753 if (TREE_PUBLIC (decl
) && DECL_NAME (decl
))
9754 targetm
.asm_out
.globalize_label (stream
, name
);
9756 switch_to_section (readonly_data_section
);
9757 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
9758 mips_declare_object (stream
, name
, "",
9759 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
9763 mips_declare_common_object (stream
, name
, "\n\t.comm\t",
9767 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
9768 extern int size_directive_output
;
9770 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
9771 definitions except that it uses mips_declare_object to emit the label. */
9774 mips_declare_object_name (FILE *stream
, const char *name
,
9775 tree decl ATTRIBUTE_UNUSED
)
9777 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
9778 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
9781 size_directive_output
= 0;
9782 if (!flag_inhibit_size_directive
&& DECL_SIZE (decl
))
9786 size_directive_output
= 1;
9787 size
= int_size_in_bytes (TREE_TYPE (decl
));
9788 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
9791 mips_declare_object (stream
, name
, "", ":\n");
9794 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
9797 mips_finish_declare_object (FILE *stream
, tree decl
, int top_level
, int at_end
)
9801 name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
9802 if (!flag_inhibit_size_directive
9803 && DECL_SIZE (decl
) != 0
9806 && DECL_INITIAL (decl
) == error_mark_node
9807 && !size_directive_output
)
9811 size_directive_output
= 1;
9812 size
= int_size_in_bytes (TREE_TYPE (decl
));
9813 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
9818 /* Mark text contents as code or data, mainly for the purpose of correct
9819 disassembly. Emit a local symbol and set its type appropriately for
9820 that purpose. Also emit `.insn' if marking contents as code so that
9821 the ISA mode is recorded and any padding that follows is disassembled
9822 as correct instructions. */
9825 mips_set_text_contents_type (FILE *file ATTRIBUTE_UNUSED
,
9826 const char *prefix ATTRIBUTE_UNUSED
,
9827 unsigned long num ATTRIBUTE_UNUSED
,
9828 bool function_p ATTRIBUTE_UNUSED
)
9830 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
9831 char buf
[(sizeof (num
) * 10) / 4 + 2];
9836 sprintf (buf
, "%lu", num
);
9837 symbol
= XEXP (DECL_RTL (current_function_decl
), 0);
9838 fnname
= targetm
.strip_name_encoding (XSTR (symbol
, 0));
9839 sname
= ACONCAT ((prefix
, fnname
, "_", buf
, NULL
));
9841 ASM_OUTPUT_TYPE_DIRECTIVE (file
, sname
, function_p
? "function" : "object");
9842 assemble_name (file
, sname
);
9843 fputs (":\n", file
);
9845 fputs ("\t.insn\n", file
);
9849 /* Return the FOO in the name of the ".mdebug.FOO" section associated
9850 with the current ABI. */
9853 mips_mdebug_abi_name (void)
9866 return TARGET_64BIT
? "eabi64" : "eabi32";
9872 /* Implement TARGET_ASM_FILE_START. */
9875 mips_file_start (void)
9877 default_file_start ();
9879 /* Generate a special section to describe the ABI switches used to
9880 produce the resultant binary. */
9882 /* Record the ABI itself. Modern versions of binutils encode
9883 this information in the ELF header flags, but GDB needs the
9884 information in order to correctly debug binaries produced by
9885 older binutils. See the function mips_gdbarch_init in
9887 fprintf (asm_out_file
, "\t.section .mdebug.%s\n\t.previous\n",
9888 mips_mdebug_abi_name ());
9890 /* There is no ELF header flag to distinguish long32 forms of the
9891 EABI from long64 forms. Emit a special section to help tools
9892 such as GDB. Do the same for o64, which is sometimes used with
9894 if (mips_abi
== ABI_EABI
|| mips_abi
== ABI_O64
)
9895 fprintf (asm_out_file
, "\t.section .gcc_compiled_long%d\n"
9896 "\t.previous\n", TARGET_LONG64
? 64 : 32);
9898 /* Record the NaN encoding. */
9899 if (HAVE_AS_NAN
|| mips_nan
!= MIPS_IEEE_754_DEFAULT
)
9900 fprintf (asm_out_file
, "\t.nan\t%s\n",
9901 mips_nan
== MIPS_IEEE_754_2008
? "2008" : "legacy");
9903 #ifdef HAVE_AS_DOT_MODULE
9904 /* Record the FP ABI. See below for comments. */
9905 if (TARGET_NO_FLOAT
)
9906 #ifdef HAVE_AS_GNU_ATTRIBUTE
9907 fputs ("\t.gnu_attribute 4, 0\n", asm_out_file
);
9911 else if (!TARGET_HARD_FLOAT_ABI
)
9912 fputs ("\t.module\tsoftfloat\n", asm_out_file
);
9913 else if (!TARGET_DOUBLE_FLOAT
)
9914 fputs ("\t.module\tsinglefloat\n", asm_out_file
);
9915 else if (TARGET_FLOATXX
)
9916 fputs ("\t.module\tfp=xx\n", asm_out_file
);
9917 else if (TARGET_FLOAT64
)
9918 fputs ("\t.module\tfp=64\n", asm_out_file
);
9920 fputs ("\t.module\tfp=32\n", asm_out_file
);
9922 if (TARGET_ODD_SPREG
)
9923 fputs ("\t.module\toddspreg\n", asm_out_file
);
9925 fputs ("\t.module\tnooddspreg\n", asm_out_file
);
9928 #ifdef HAVE_AS_GNU_ATTRIBUTE
9932 /* No floating-point operations, -mno-float. */
9933 if (TARGET_NO_FLOAT
)
9935 /* Soft-float code, -msoft-float. */
9936 else if (!TARGET_HARD_FLOAT_ABI
)
9938 /* Single-float code, -msingle-float. */
9939 else if (!TARGET_DOUBLE_FLOAT
)
9941 /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64.
9943 This case used 12 callee-saved double-precision registers
9944 and is deprecated. */
9945 /* 64-bit or 32-bit FP registers on a 32-bit target, -mfpxx. */
9946 else if (TARGET_FLOATXX
)
9948 /* 64-bit FP registers on a 32-bit target, -mfp64 -modd-spreg. */
9949 else if (mips_abi
== ABI_32
&& TARGET_FLOAT64
&& TARGET_ODD_SPREG
)
9951 /* 64-bit FP registers on a 32-bit target, -mfp64 -mno-odd-spreg. */
9952 else if (mips_abi
== ABI_32
&& TARGET_FLOAT64
)
9954 /* Regular FP code, FP regs same size as GP regs, -mdouble-float. */
9958 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", attr
);
9962 fprintf (asm_out_file
, "\t.gnu_attribute 8, 1\n");
9967 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
9968 if (TARGET_ABICALLS
)
9970 fprintf (asm_out_file
, "\t.abicalls\n");
9971 if (TARGET_ABICALLS_PIC0
)
9972 fprintf (asm_out_file
, "\t.option\tpic0\n");
9975 if (flag_verbose_asm
)
9976 fprintf (asm_out_file
, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
9978 mips_small_data_threshold
, mips_arch_info
->name
, mips_isa
);
9981 /* Implement TARGET_ASM_CODE_END. */
9984 mips_code_end (void)
9986 mips_finish_stub (&mips16_rdhwr_stub
);
9987 mips_finish_stub (&mips16_get_fcsr_stub
);
9988 mips_finish_stub (&mips16_set_fcsr_stub
);
9991 /* Make the last instruction frame-related and note that it performs
9992 the operation described by FRAME_PATTERN. */
9995 mips_set_frame_expr (rtx frame_pattern
)
9999 insn
= get_last_insn ();
10000 RTX_FRAME_RELATED_P (insn
) = 1;
10001 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
10006 /* Return a frame-related rtx that stores REG at MEM.
10007 REG must be a single register. */
10010 mips_frame_set (rtx mem
, rtx reg
)
10014 set
= gen_rtx_SET (mem
, reg
);
10015 RTX_FRAME_RELATED_P (set
) = 1;
10020 /* Record that the epilogue has restored call-saved register REG. */
10023 mips_add_cfa_restore (rtx reg
)
10025 mips_epilogue
.cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
10026 mips_epilogue
.cfa_restores
);
10029 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
10030 mips16e_s2_s8_regs[X], it must also save the registers in indexes
10031 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
10032 static const unsigned char mips16e_s2_s8_regs
[] = {
10033 30, 23, 22, 21, 20, 19, 18
10035 static const unsigned char mips16e_a0_a3_regs
[] = {
10039 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
10040 ordered from the uppermost in memory to the lowest in memory. */
10041 static const unsigned char mips16e_save_restore_regs
[] = {
10042 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
10045 /* Return the index of the lowest X in the range [0, SIZE) for which
10046 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
10048 static unsigned int
10049 mips16e_find_first_register (unsigned int mask
, const unsigned char *regs
,
10054 for (i
= 0; i
< size
; i
++)
10055 if (BITSET_P (mask
, regs
[i
]))
10061 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
10062 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
10063 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
10064 is true for all indexes (X, SIZE). */
10067 mips16e_mask_registers (unsigned int *mask_ptr
, const unsigned char *regs
,
10068 unsigned int size
, unsigned int *num_regs_ptr
)
10072 i
= mips16e_find_first_register (*mask_ptr
, regs
, size
);
10073 for (i
++; i
< size
; i
++)
10074 if (!BITSET_P (*mask_ptr
, regs
[i
]))
10076 *num_regs_ptr
+= 1;
10077 *mask_ptr
|= 1 << regs
[i
];
10081 /* Return a simplified form of X using the register values in REG_VALUES.
10082 REG_VALUES[R] is the last value assigned to hard register R, or null
10083 if R has not been modified.
10085 This function is rather limited, but is good enough for our purposes. */
10088 mips16e_collect_propagate_value (rtx x
, rtx
*reg_values
)
10090 x
= avoid_constant_pool_reference (x
);
10094 rtx x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
10095 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
),
10096 x0
, GET_MODE (XEXP (x
, 0)));
10099 if (ARITHMETIC_P (x
))
10101 rtx x0
= mips16e_collect_propagate_value (XEXP (x
, 0), reg_values
);
10102 rtx x1
= mips16e_collect_propagate_value (XEXP (x
, 1), reg_values
);
10103 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), x0
, x1
);
10107 && reg_values
[REGNO (x
)]
10108 && !rtx_unstable_p (reg_values
[REGNO (x
)]))
10109 return reg_values
[REGNO (x
)];
10114 /* Return true if (set DEST SRC) stores an argument register into its
10115 caller-allocated save slot, storing the number of that argument
10116 register in *REGNO_PTR if so. REG_VALUES is as for
10117 mips16e_collect_propagate_value. */
10120 mips16e_collect_argument_save_p (rtx dest
, rtx src
, rtx
*reg_values
,
10121 unsigned int *regno_ptr
)
10123 unsigned int argno
, regno
;
10124 HOST_WIDE_INT offset
, required_offset
;
10127 /* Check that this is a word-mode store. */
10128 if (!MEM_P (dest
) || !REG_P (src
) || GET_MODE (dest
) != word_mode
)
10131 /* Check that the register being saved is an unmodified argument
10133 regno
= REGNO (src
);
10134 if (!IN_RANGE (regno
, GP_ARG_FIRST
, GP_ARG_LAST
) || reg_values
[regno
])
10136 argno
= regno
- GP_ARG_FIRST
;
10138 /* Check whether the address is an appropriate stack-pointer or
10139 frame-pointer access. */
10140 addr
= mips16e_collect_propagate_value (XEXP (dest
, 0), reg_values
);
10141 mips_split_plus (addr
, &base
, &offset
);
10142 required_offset
= cfun
->machine
->frame
.total_size
+ argno
* UNITS_PER_WORD
;
10143 if (base
== hard_frame_pointer_rtx
)
10144 required_offset
-= cfun
->machine
->frame
.hard_frame_pointer_offset
;
10145 else if (base
!= stack_pointer_rtx
)
10147 if (offset
!= required_offset
)
10150 *regno_ptr
= regno
;
10154 /* A subroutine of mips_expand_prologue, called only when generating
10155 MIPS16e SAVE instructions. Search the start of the function for any
10156 instructions that save argument registers into their caller-allocated
10157 save slots. Delete such instructions and return a value N such that
10158 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
10159 instructions redundant. */
10161 static unsigned int
10162 mips16e_collect_argument_saves (void)
10164 rtx reg_values
[FIRST_PSEUDO_REGISTER
];
10165 rtx_insn
*insn
, *next
;
10166 rtx set
, dest
, src
;
10167 unsigned int nargs
, regno
;
10169 push_topmost_sequence ();
10171 memset (reg_values
, 0, sizeof (reg_values
));
10172 for (insn
= get_insns (); insn
; insn
= next
)
10174 next
= NEXT_INSN (insn
);
10175 if (NOTE_P (insn
) || DEBUG_INSN_P (insn
))
10178 if (!INSN_P (insn
))
10181 set
= PATTERN (insn
);
10182 if (GET_CODE (set
) != SET
)
10185 dest
= SET_DEST (set
);
10186 src
= SET_SRC (set
);
10187 if (mips16e_collect_argument_save_p (dest
, src
, reg_values
, ®no
))
10189 if (!BITSET_P (cfun
->machine
->frame
.mask
, regno
))
10191 delete_insn (insn
);
10192 nargs
= MAX (nargs
, (regno
- GP_ARG_FIRST
) + 1);
10195 else if (REG_P (dest
) && GET_MODE (dest
) == word_mode
)
10196 reg_values
[REGNO (dest
)]
10197 = mips16e_collect_propagate_value (src
, reg_values
);
10201 pop_topmost_sequence ();
10206 /* Return a move between register REGNO and memory location SP + OFFSET.
10207 REG_PARM_P is true if SP + OFFSET belongs to REG_PARM_STACK_SPACE.
10208 Make the move a load if RESTORE_P, otherwise make it a store. */
10211 mips16e_save_restore_reg (bool restore_p
, bool reg_parm_p
,
10212 HOST_WIDE_INT offset
, unsigned int regno
)
10216 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
, stack_pointer_rtx
,
10218 reg
= gen_rtx_REG (SImode
, regno
);
10221 mips_add_cfa_restore (reg
);
10222 return gen_rtx_SET (reg
, mem
);
10225 return gen_rtx_SET (mem
, reg
);
10226 return mips_frame_set (mem
, reg
);
10229 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
10230 The instruction must:
10232 - Allocate or deallocate SIZE bytes in total; SIZE is known
10235 - Save or restore as many registers in *MASK_PTR as possible.
10236 The instruction saves the first registers at the top of the
10237 allocated area, with the other registers below it.
10239 - Save NARGS argument registers above the allocated area.
10241 (NARGS is always zero if RESTORE_P.)
10243 The SAVE and RESTORE instructions cannot save and restore all general
10244 registers, so there may be some registers left over for the caller to
10245 handle. Destructively modify *MASK_PTR so that it contains the registers
10246 that still need to be saved or restored. The caller can save these
10247 registers in the memory immediately below *OFFSET_PTR, which is a
10248 byte offset from the bottom of the allocated stack area. */
10251 mips16e_build_save_restore (bool restore_p
, unsigned int *mask_ptr
,
10252 HOST_WIDE_INT
*offset_ptr
, unsigned int nargs
,
10253 HOST_WIDE_INT size
)
10256 HOST_WIDE_INT offset
, top_offset
;
10257 unsigned int i
, regno
;
10260 gcc_assert (cfun
->machine
->frame
.num_fp
== 0);
10262 /* Calculate the number of elements in the PARALLEL. We need one element
10263 for the stack adjustment, one for each argument register save, and one
10264 for each additional register move. */
10266 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
10267 if (BITSET_P (*mask_ptr
, mips16e_save_restore_regs
[i
]))
10270 /* Create the final PARALLEL. */
10271 pattern
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (n
));
10274 /* Add the stack pointer adjustment. */
10275 set
= gen_rtx_SET (stack_pointer_rtx
,
10276 plus_constant (Pmode
, stack_pointer_rtx
,
10277 restore_p
? size
: -size
));
10278 RTX_FRAME_RELATED_P (set
) = 1;
10279 XVECEXP (pattern
, 0, n
++) = set
;
10281 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
10282 top_offset
= restore_p
? size
: 0;
10284 /* Save the arguments. */
10285 for (i
= 0; i
< nargs
; i
++)
10287 offset
= top_offset
+ i
* UNITS_PER_WORD
;
10288 set
= mips16e_save_restore_reg (restore_p
, true, offset
,
10290 XVECEXP (pattern
, 0, n
++) = set
;
10293 /* Then fill in the other register moves. */
10294 offset
= top_offset
;
10295 for (i
= 0; i
< ARRAY_SIZE (mips16e_save_restore_regs
); i
++)
10297 regno
= mips16e_save_restore_regs
[i
];
10298 if (BITSET_P (*mask_ptr
, regno
))
10300 offset
-= UNITS_PER_WORD
;
10301 set
= mips16e_save_restore_reg (restore_p
, false, offset
, regno
);
10302 XVECEXP (pattern
, 0, n
++) = set
;
10303 *mask_ptr
&= ~(1 << regno
);
10307 /* Tell the caller what offset it should use for the remaining registers. */
10308 *offset_ptr
= size
+ (offset
- top_offset
);
10310 gcc_assert (n
== XVECLEN (pattern
, 0));
10315 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
10316 pointer. Return true if PATTERN matches the kind of instruction
10317 generated by mips16e_build_save_restore. If INFO is nonnull,
10318 initialize it when returning true. */
10321 mips16e_save_restore_pattern_p (rtx pattern
, HOST_WIDE_INT adjust
,
10322 struct mips16e_save_restore_info
*info
)
10324 unsigned int i
, nargs
, mask
, extra
;
10325 HOST_WIDE_INT top_offset
, save_offset
, offset
;
10326 rtx set
, reg
, mem
, base
;
10329 if (!GENERATE_MIPS16E_SAVE_RESTORE
)
10332 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
10333 top_offset
= adjust
> 0 ? adjust
: 0;
10335 /* Interpret all other members of the PARALLEL. */
10336 save_offset
= top_offset
- UNITS_PER_WORD
;
10340 for (n
= 1; n
< XVECLEN (pattern
, 0); n
++)
10342 /* Check that we have a SET. */
10343 set
= XVECEXP (pattern
, 0, n
);
10344 if (GET_CODE (set
) != SET
)
10347 /* Check that the SET is a load (if restoring) or a store
10349 mem
= adjust
> 0 ? SET_SRC (set
) : SET_DEST (set
);
10353 /* Check that the address is the sum of the stack pointer and a
10354 possibly-zero constant offset. */
10355 mips_split_plus (XEXP (mem
, 0), &base
, &offset
);
10356 if (base
!= stack_pointer_rtx
)
10359 /* Check that SET's other operand is a register. */
10360 reg
= adjust
> 0 ? SET_DEST (set
) : SET_SRC (set
);
10364 /* Check for argument saves. */
10365 if (offset
== top_offset
+ nargs
* UNITS_PER_WORD
10366 && REGNO (reg
) == GP_ARG_FIRST
+ nargs
)
10368 else if (offset
== save_offset
)
10370 while (mips16e_save_restore_regs
[i
++] != REGNO (reg
))
10371 if (i
== ARRAY_SIZE (mips16e_save_restore_regs
))
10374 mask
|= 1 << REGNO (reg
);
10375 save_offset
-= UNITS_PER_WORD
;
10381 /* Check that the restrictions on register ranges are met. */
10383 mips16e_mask_registers (&mask
, mips16e_s2_s8_regs
,
10384 ARRAY_SIZE (mips16e_s2_s8_regs
), &extra
);
10385 mips16e_mask_registers (&mask
, mips16e_a0_a3_regs
,
10386 ARRAY_SIZE (mips16e_a0_a3_regs
), &extra
);
10390 /* Make sure that the topmost argument register is not saved twice.
10391 The checks above ensure that the same is then true for the other
10392 argument registers. */
10393 if (nargs
> 0 && BITSET_P (mask
, GP_ARG_FIRST
+ nargs
- 1))
10396 /* Pass back information, if requested. */
10399 info
->nargs
= nargs
;
10401 info
->size
= (adjust
> 0 ? adjust
: -adjust
);
10407 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
10408 for the register range [MIN_REG, MAX_REG]. Return a pointer to
10409 the null terminator. */
10412 mips16e_add_register_range (char *s
, unsigned int min_reg
,
10413 unsigned int max_reg
)
10415 if (min_reg
!= max_reg
)
10416 s
+= sprintf (s
, ",%s-%s", reg_names
[min_reg
], reg_names
[max_reg
]);
10418 s
+= sprintf (s
, ",%s", reg_names
[min_reg
]);
10422 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
10423 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
10426 mips16e_output_save_restore (rtx pattern
, HOST_WIDE_INT adjust
)
10428 static char buffer
[300];
10430 struct mips16e_save_restore_info info
;
10431 unsigned int i
, end
;
10434 /* Parse the pattern. */
10435 if (!mips16e_save_restore_pattern_p (pattern
, adjust
, &info
))
10436 gcc_unreachable ();
10438 /* Add the mnemonic. */
10439 s
= strcpy (buffer
, adjust
> 0 ? "restore\t" : "save\t");
10442 /* Save the arguments. */
10443 if (info
.nargs
> 1)
10444 s
+= sprintf (s
, "%s-%s,", reg_names
[GP_ARG_FIRST
],
10445 reg_names
[GP_ARG_FIRST
+ info
.nargs
- 1]);
10446 else if (info
.nargs
== 1)
10447 s
+= sprintf (s
, "%s,", reg_names
[GP_ARG_FIRST
]);
10449 /* Emit the amount of stack space to allocate or deallocate. */
10450 s
+= sprintf (s
, "%d", (int) info
.size
);
10452 /* Save or restore $16. */
10453 if (BITSET_P (info
.mask
, 16))
10454 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 16]);
10456 /* Save or restore $17. */
10457 if (BITSET_P (info
.mask
, 17))
10458 s
+= sprintf (s
, ",%s", reg_names
[GP_REG_FIRST
+ 17]);
10460 /* Save or restore registers in the range $s2...$s8, which
10461 mips16e_s2_s8_regs lists in decreasing order. Note that this
10462 is a software register range; the hardware registers are not
10463 numbered consecutively. */
10464 end
= ARRAY_SIZE (mips16e_s2_s8_regs
);
10465 i
= mips16e_find_first_register (info
.mask
, mips16e_s2_s8_regs
, end
);
10467 s
= mips16e_add_register_range (s
, mips16e_s2_s8_regs
[end
- 1],
10468 mips16e_s2_s8_regs
[i
]);
10470 /* Save or restore registers in the range $a0...$a3. */
10471 end
= ARRAY_SIZE (mips16e_a0_a3_regs
);
10472 i
= mips16e_find_first_register (info
.mask
, mips16e_a0_a3_regs
, end
);
10474 s
= mips16e_add_register_range (s
, mips16e_a0_a3_regs
[i
],
10475 mips16e_a0_a3_regs
[end
- 1]);
10477 /* Save or restore $31. */
10478 if (BITSET_P (info
.mask
, RETURN_ADDR_REGNUM
))
10479 s
+= sprintf (s
, ",%s", reg_names
[RETURN_ADDR_REGNUM
]);
10484 /* Return true if the current function returns its value in a floating-point
10485 register in MIPS16 mode. */
10488 mips16_cfun_returns_in_fpr_p (void)
10490 tree return_type
= DECL_RESULT (current_function_decl
);
10491 return (TARGET_MIPS16
10492 && TARGET_HARD_FLOAT_ABI
10493 && !aggregate_value_p (return_type
, current_function_decl
)
10494 && mips_return_mode_in_fpr_p (DECL_MODE (return_type
)));
10497 /* Return true if predicate PRED is true for at least one instruction.
10498 Cache the result in *CACHE, and assume that the result is true
10499 if *CACHE is already true. */
10502 mips_find_gp_ref (bool *cache
, bool (*pred
) (rtx_insn
*))
10504 rtx_insn
*insn
, *subinsn
;
10508 push_topmost_sequence ();
10509 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
10510 FOR_EACH_SUBINSN (subinsn
, insn
)
10511 if (USEFUL_INSN_P (subinsn
) && pred (subinsn
))
10516 pop_topmost_sequence ();
10521 /* Return true if INSN refers to the global pointer in an "inflexible" way.
10522 See mips_cfun_has_inflexible_gp_ref_p for details. */
10525 mips_insn_has_inflexible_gp_ref_p (rtx_insn
*insn
)
10527 /* Uses of pic_offset_table_rtx in CALL_INSN_FUNCTION_USAGE
10528 indicate that the target could be a traditional MIPS
10529 lazily-binding stub. */
10530 return find_reg_fusage (insn
, USE
, pic_offset_table_rtx
);
10533 /* Return true if the current function refers to the global pointer
10534 in a way that forces $28 to be valid. This means that we can't
10535 change the choice of global pointer, even for NewABI code.
10537 One example of this (and one which needs several checks) is that
10538 $28 must be valid when calling traditional MIPS lazy-binding stubs.
10539 (This restriction does not apply to PLTs.) */
10542 mips_cfun_has_inflexible_gp_ref_p (void)
10544 /* If the function has a nonlocal goto, $28 must hold the correct
10545 global pointer for the target function. That is, the target
10546 of the goto implicitly uses $28. */
10547 if (crtl
->has_nonlocal_goto
)
10550 if (TARGET_ABICALLS_PIC2
)
10552 /* Symbolic accesses implicitly use the global pointer unless
10553 -mexplicit-relocs is in effect. JAL macros to symbolic addresses
10554 might go to traditional MIPS lazy-binding stubs. */
10555 if (!TARGET_EXPLICIT_RELOCS
)
10558 /* FUNCTION_PROFILER includes a JAL to _mcount, which again
10559 can be lazily-bound. */
10563 /* MIPS16 functions that return in FPRs need to call an
10564 external libgcc routine. This call is only made explict
10565 during mips_expand_epilogue, and it too might be lazily bound. */
10566 if (mips16_cfun_returns_in_fpr_p ())
10570 return mips_find_gp_ref (&cfun
->machine
->has_inflexible_gp_insn_p
,
10571 mips_insn_has_inflexible_gp_ref_p
);
10574 /* Return true if INSN refers to the global pointer in a "flexible" way.
10575 See mips_cfun_has_flexible_gp_ref_p for details. */
10578 mips_insn_has_flexible_gp_ref_p (rtx_insn
*insn
)
10580 return (get_attr_got (insn
) != GOT_UNSET
10581 || mips_small_data_pattern_p (PATTERN (insn
))
10582 || reg_overlap_mentioned_p (pic_offset_table_rtx
, PATTERN (insn
)));
10585 /* Return true if the current function references the global pointer,
10586 but if those references do not inherently require the global pointer
10587 to be $28. Assume !mips_cfun_has_inflexible_gp_ref_p (). */
10590 mips_cfun_has_flexible_gp_ref_p (void)
10592 /* Reload can sometimes introduce constant pool references
10593 into a function that otherwise didn't need them. For example,
10594 suppose we have an instruction like:
10596 (set (reg:DF R1) (float:DF (reg:SI R2)))
10598 If R2 turns out to be a constant such as 1, the instruction may
10599 have a REG_EQUAL note saying that R1 == 1.0. Reload then has
10600 the option of using this constant if R2 doesn't get allocated
10603 In cases like these, reload will have added the constant to the
10604 pool but no instruction will yet refer to it. */
10605 if (TARGET_ABICALLS_PIC2
&& !reload_completed
&& crtl
->uses_const_pool
)
10608 return mips_find_gp_ref (&cfun
->machine
->has_flexible_gp_insn_p
,
10609 mips_insn_has_flexible_gp_ref_p
);
10612 /* Return the register that should be used as the global pointer
10613 within this function. Return INVALID_REGNUM if the function
10614 doesn't need a global pointer. */
10616 static unsigned int
10617 mips_global_pointer (void)
10619 unsigned int regno
;
10621 /* $gp is always available unless we're using a GOT. */
10622 if (!TARGET_USE_GOT
)
10623 return GLOBAL_POINTER_REGNUM
;
10625 /* If there are inflexible references to $gp, we must use the
10626 standard register. */
10627 if (mips_cfun_has_inflexible_gp_ref_p ())
10628 return GLOBAL_POINTER_REGNUM
;
10630 /* If there are no current references to $gp, then the only uses
10631 we can introduce later are those involved in long branches. */
10632 if (TARGET_ABSOLUTE_JUMPS
&& !mips_cfun_has_flexible_gp_ref_p ())
10633 return INVALID_REGNUM
;
10635 /* If the global pointer is call-saved, try to use a call-clobbered
10637 if (TARGET_CALL_SAVED_GP
&& crtl
->is_leaf
)
10638 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
10639 if (!df_regs_ever_live_p (regno
)
10640 && call_used_regs
[regno
]
10641 && !fixed_regs
[regno
]
10642 && regno
!= PIC_FUNCTION_ADDR_REGNUM
)
10645 return GLOBAL_POINTER_REGNUM
;
10648 /* Return true if the current function's prologue must load the global
10649 pointer value into pic_offset_table_rtx and store the same value in
10650 the function's cprestore slot (if any).
10652 One problem we have to deal with is that, when emitting GOT-based
10653 position independent code, long-branch sequences will need to load
10654 the address of the branch target from the GOT. We don't know until
10655 the very end of compilation whether (and where) the function needs
10656 long branches, so we must ensure that _any_ branch can access the
10657 global pointer in some form. However, we do not want to pessimize
10658 the usual case in which all branches are short.
10660 We handle this as follows:
10662 (1) During reload, we set cfun->machine->global_pointer to
10663 INVALID_REGNUM if we _know_ that the current function
10664 doesn't need a global pointer. This is only valid if
10665 long branches don't need the GOT.
10667 Otherwise, we assume that we might need a global pointer
10668 and pick an appropriate register.
10670 (2) If cfun->machine->global_pointer != INVALID_REGNUM,
10671 we ensure that the global pointer is available at every
10672 block boundary bar entry and exit. We do this in one of two ways:
10674 - If the function has a cprestore slot, we ensure that this
10675 slot is valid at every branch. However, as explained in
10676 point (6) below, there is no guarantee that pic_offset_table_rtx
10677 itself is valid if new uses of the global pointer are introduced
10678 after the first post-epilogue split.
10680 We guarantee that the cprestore slot is valid by loading it
10681 into a fake register, CPRESTORE_SLOT_REGNUM. We then make
10682 this register live at every block boundary bar function entry
10683 and exit. It is then invalid to move the load (and thus the
10684 preceding store) across a block boundary.
10686 - If the function has no cprestore slot, we guarantee that
10687 pic_offset_table_rtx itself is valid at every branch.
10689 See mips_eh_uses for the handling of the register liveness.
10691 (3) During prologue and epilogue generation, we emit "ghost"
10692 placeholder instructions to manipulate the global pointer.
10694 (4) During prologue generation, we set cfun->machine->must_initialize_gp_p
10695 and cfun->machine->must_restore_gp_when_clobbered_p if we already know
10696 that the function needs a global pointer. (There is no need to set
10697 them earlier than this, and doing it as late as possible leads to
10698 fewer false positives.)
10700 (5) If cfun->machine->must_initialize_gp_p is true during a
10701 split_insns pass, we split the ghost instructions into real
10702 instructions. These split instructions can then be optimized in
10703 the usual way. Otherwise, we keep the ghost instructions intact,
10704 and optimize for the case where they aren't needed. We still
10705 have the option of splitting them later, if we need to introduce
10706 new uses of the global pointer.
10708 For example, the scheduler ignores a ghost instruction that
10709 stores $28 to the stack, but it handles the split form of
10710 the ghost instruction as an ordinary store.
10712 (6) [OldABI only.] If cfun->machine->must_restore_gp_when_clobbered_p
10713 is true during the first post-epilogue split_insns pass, we split
10714 calls and restore_gp patterns into instructions that explicitly
10715 load pic_offset_table_rtx from the cprestore slot. Otherwise,
10716 we split these patterns into instructions that _don't_ load from
10717 the cprestore slot.
10719 If cfun->machine->must_restore_gp_when_clobbered_p is true at the
10720 time of the split, then any instructions that exist at that time
10721 can make free use of pic_offset_table_rtx. However, if we want
10722 to introduce new uses of the global pointer after the split,
10723 we must explicitly load the value from the cprestore slot, since
10724 pic_offset_table_rtx itself might not be valid at a given point
10727 The idea is that we want to be able to delete redundant
10728 loads from the cprestore slot in the usual case where no
10729 long branches are needed.
10731 (7) If cfun->machine->must_initialize_gp_p is still false at the end
10732 of md_reorg, we decide whether the global pointer is needed for
10733 long branches. If so, we set cfun->machine->must_initialize_gp_p
10734 to true and split the ghost instructions into real instructions
10737 Note that the ghost instructions must have a zero length for three reasons:
10739 - Giving the length of the underlying $gp sequence might cause
10740 us to use long branches in cases where they aren't really needed.
10742 - They would perturb things like alignment calculations.
10744 - More importantly, the hazard detection in md_reorg relies on
10745 empty instructions having a zero length.
10747 If we find a long branch and split the ghost instructions at the
10748 end of md_reorg, the split could introduce more long branches.
10749 That isn't a problem though, because we still do the split before
10750 the final shorten_branches pass.
10752 This is extremely ugly, but it seems like the best compromise between
10753 correctness and efficiency. */
10756 mips_must_initialize_gp_p (void)
10758 return cfun
->machine
->must_initialize_gp_p
;
10761 /* Return true if REGNO is a register that is ordinarily call-clobbered
10762 but must nevertheless be preserved by an interrupt handler. */
10765 mips_interrupt_extra_call_saved_reg_p (unsigned int regno
)
10767 if ((ISA_HAS_HILO
|| TARGET_DSP
)
10768 && MD_REG_P (regno
))
10771 if (TARGET_DSP
&& DSP_ACC_REG_P (regno
))
10774 if (GP_REG_P (regno
)
10775 && cfun
->machine
->use_shadow_register_set
== SHADOW_SET_NO
)
10777 /* $0 is hard-wired. */
10778 if (regno
== GP_REG_FIRST
)
10781 /* The interrupt handler can treat kernel registers as
10782 scratch registers. */
10783 if (KERNEL_REG_P (regno
))
10786 /* The function will return the stack pointer to its original value
10788 if (regno
== STACK_POINTER_REGNUM
)
10791 /* Otherwise, return true for registers that aren't ordinarily
10793 return call_used_regs
[regno
];
10799 /* Return true if the current function should treat register REGNO
10803 mips_cfun_call_saved_reg_p (unsigned int regno
)
10805 /* If the user makes an ordinarily-call-saved register global,
10806 that register is no longer call-saved. */
10807 if (global_regs
[regno
])
10810 /* Interrupt handlers need to save extra registers. */
10811 if (cfun
->machine
->interrupt_handler_p
10812 && mips_interrupt_extra_call_saved_reg_p (regno
))
10815 /* call_insns preserve $28 unless they explicitly say otherwise,
10816 so call_used_regs[] treats $28 as call-saved. However,
10817 we want the ABI property rather than the default call_insn
10819 return (regno
== GLOBAL_POINTER_REGNUM
10820 ? TARGET_CALL_SAVED_GP
10821 : !call_used_regs
[regno
]);
10824 /* Return true if the function body might clobber register REGNO.
10825 We know that REGNO is call-saved. */
10828 mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno
)
10830 /* Some functions should be treated as clobbering all call-saved
10832 if (crtl
->saves_all_registers
)
10835 /* DF handles cases where a register is explicitly referenced in
10836 the rtl. Incoming values are passed in call-clobbered registers,
10837 so we can assume that any live call-saved register is set within
10839 if (df_regs_ever_live_p (regno
))
10842 /* Check for registers that are clobbered by FUNCTION_PROFILER.
10843 These clobbers are not explicit in the rtl. */
10844 if (crtl
->profile
&& MIPS_SAVE_REG_FOR_PROFILING_P (regno
))
10847 /* If we're using a call-saved global pointer, the function's
10848 prologue will need to set it up. */
10849 if (cfun
->machine
->global_pointer
== regno
)
10852 /* The function's prologue will need to set the frame pointer if
10853 frame_pointer_needed. */
10854 if (regno
== HARD_FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
10857 /* If a MIPS16 function returns a value in FPRs, its epilogue
10858 will need to call an external libgcc routine. This yet-to-be
10859 generated call_insn will clobber $31. */
10860 if (regno
== RETURN_ADDR_REGNUM
&& mips16_cfun_returns_in_fpr_p ())
10863 /* If REGNO is ordinarily call-clobbered, we must assume that any
10864 called function could modify it. */
10865 if (cfun
->machine
->interrupt_handler_p
10867 && mips_interrupt_extra_call_saved_reg_p (regno
))
10873 /* Return true if the current function must save register REGNO. */
10876 mips_save_reg_p (unsigned int regno
)
10878 if (mips_cfun_call_saved_reg_p (regno
))
10880 if (mips_cfun_might_clobber_call_saved_reg_p (regno
))
10883 /* Save both registers in an FPR pair if either one is used. This is
10884 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
10885 register to be used without the even register. */
10886 if (FP_REG_P (regno
)
10887 && MAX_FPRS_PER_FMT
== 2
10888 && mips_cfun_might_clobber_call_saved_reg_p (regno
+ 1))
10892 /* We need to save the incoming return address if __builtin_eh_return
10893 is being used to set a different return address. */
10894 if (regno
== RETURN_ADDR_REGNUM
&& crtl
->calls_eh_return
)
10900 /* Populate the current function's mips_frame_info structure.
10902 MIPS stack frames look like:
10904 +-------------------------------+
10906 | incoming stack arguments |
10908 +-------------------------------+
10910 | caller-allocated save area |
10911 A | for register arguments |
10913 +-------------------------------+ <-- incoming stack pointer
10915 | callee-allocated save area |
10916 B | for arguments that are |
10917 | split between registers and |
10920 +-------------------------------+ <-- arg_pointer_rtx
10922 C | callee-allocated save area |
10923 | for register varargs |
10925 +-------------------------------+ <-- frame_pointer_rtx
10926 | | + cop0_sp_offset
10927 | COP0 reg save area | + UNITS_PER_WORD
10929 +-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
10930 | | + UNITS_PER_WORD
10931 | accumulator save area |
10933 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
10934 | | + UNITS_PER_HWFPVALUE
10937 +-------------------------------+ <-- stack_pointer_rtx + gp_sp_offset
10938 | | + UNITS_PER_WORD
10941 +-------------------------------+ <-- frame_pointer_rtx with
10942 | | \ -fstack-protector
10943 | local variables | | var_size
10945 +-------------------------------+
10947 | $gp save area | | cprestore_size
10949 P +-------------------------------+ <-- hard_frame_pointer_rtx for
10951 | outgoing stack arguments | |
10953 +-------------------------------+ | args_size
10955 | caller-allocated save area | |
10956 | for register arguments | |
10958 +-------------------------------+ <-- stack_pointer_rtx
10959 frame_pointer_rtx without
10961 hard_frame_pointer_rtx for
10964 At least two of A, B and C will be empty.
10966 Dynamic stack allocations such as alloca insert data at point P.
10967 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
10968 hard_frame_pointer_rtx unchanged. */
10971 mips_compute_frame_info (void)
10973 struct mips_frame_info
*frame
;
10974 HOST_WIDE_INT offset
, size
;
10975 unsigned int regno
, i
;
10977 /* Skip re-computing the frame info after reload completed. */
10978 if (reload_completed
)
10981 /* Set this function's interrupt properties. */
10982 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl
)))
10984 if (mips_isa_rev
< 2)
10985 error ("the %<interrupt%> attribute requires a MIPS32r2 processor or greater");
10986 else if (TARGET_MIPS16
)
10987 error ("interrupt handlers cannot be MIPS16 functions");
10990 cfun
->machine
->interrupt_handler_p
= true;
10991 cfun
->machine
->int_mask
=
10992 mips_interrupt_mask (TREE_TYPE (current_function_decl
));
10993 cfun
->machine
->use_shadow_register_set
=
10994 mips_use_shadow_register_set (TREE_TYPE (current_function_decl
));
10995 cfun
->machine
->keep_interrupts_masked_p
=
10996 mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl
));
10997 cfun
->machine
->use_debug_exception_return_p
=
10998 mips_use_debug_exception_return_p (TREE_TYPE
10999 (current_function_decl
));
11003 frame
= &cfun
->machine
->frame
;
11004 memset (frame
, 0, sizeof (*frame
));
11005 size
= get_frame_size ();
11007 /* The first two blocks contain the outgoing argument area and the $gp save
11008 slot. This area isn't needed in leaf functions. We can also skip it
11009 if we know that none of the called functions will use this space.
11011 But if the target-independent frame size is nonzero, we have already
11012 committed to allocating these in TARGET_STARTING_FRAME_OFFSET for
11013 !FRAME_GROWS_DOWNWARD. */
11015 if ((size
== 0 || FRAME_GROWS_DOWNWARD
)
11016 && (crtl
->is_leaf
|| (cfun
->machine
->optimize_call_stack
&& !flag_pic
)))
11018 /* The MIPS 3.0 linker does not like functions that dynamically
11019 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
11020 looks like we are trying to create a second frame pointer to the
11021 function, so allocate some stack space to make it happy. */
11022 if (cfun
->calls_alloca
)
11023 frame
->args_size
= REG_PARM_STACK_SPACE (cfun
->decl
);
11025 frame
->args_size
= 0;
11026 frame
->cprestore_size
= 0;
11030 frame
->args_size
= crtl
->outgoing_args_size
;
11031 frame
->cprestore_size
= MIPS_GP_SAVE_AREA_SIZE
;
11034 /* MIPS16 code offsets the frame pointer by the size of the outgoing
11035 arguments. This tends to increase the chances of using unextended
11036 instructions for local variables and incoming arguments. */
11038 frame
->hard_frame_pointer_offset
= frame
->args_size
;
11040 /* PR 69129 / 69012: Beware of a possible race condition. mips_global_pointer
11041 might call mips_cfun_has_inflexible_gp_ref_p which in turn can call
11042 mips_find_gp_ref which will iterate over the current insn sequence.
11043 If any of these insns use the cprestore_save_slot_operand or
11044 cprestore_load_slot_operand predicates in order to be recognised then
11045 they will call mips_cprestore_address_p which calls
11046 mips_get_cprestore_base_and_offset which expects the frame information
11047 to be filled in... In fact mips_get_cprestore_base_and_offset only
11048 needs the args_size and hard_frame_pointer_offset fields to be filled
11049 in, which is why the global_pointer field is initialised here and not
11051 cfun
->machine
->global_pointer
= mips_global_pointer ();
11053 offset
= frame
->args_size
+ frame
->cprestore_size
;
11055 /* Move above the local variables. */
11056 frame
->var_size
= MIPS_STACK_ALIGN (size
);
11057 offset
+= frame
->var_size
;
11059 /* Find out which GPRs we need to save. */
11060 for (regno
= GP_REG_FIRST
; regno
<= GP_REG_LAST
; regno
++)
11061 if (mips_save_reg_p (regno
))
11064 frame
->mask
|= 1 << (regno
- GP_REG_FIRST
);
11067 /* If this function calls eh_return, we must also save and restore the
11068 EH data registers. */
11069 if (crtl
->calls_eh_return
)
11070 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; i
++)
11073 frame
->mask
|= 1 << (EH_RETURN_DATA_REGNO (i
) - GP_REG_FIRST
);
11076 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
11077 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
11078 save all later registers too. */
11079 if (GENERATE_MIPS16E_SAVE_RESTORE
)
11081 mips16e_mask_registers (&frame
->mask
, mips16e_s2_s8_regs
,
11082 ARRAY_SIZE (mips16e_s2_s8_regs
), &frame
->num_gp
);
11083 mips16e_mask_registers (&frame
->mask
, mips16e_a0_a3_regs
,
11084 ARRAY_SIZE (mips16e_a0_a3_regs
), &frame
->num_gp
);
11087 /* Move above the GPR save area. */
11088 if (frame
->num_gp
> 0)
11090 offset
+= MIPS_STACK_ALIGN (frame
->num_gp
* UNITS_PER_WORD
);
11091 frame
->gp_sp_offset
= offset
- UNITS_PER_WORD
;
11094 /* Find out which FPRs we need to save. This loop must iterate over
11095 the same space as its companion in mips_for_each_saved_gpr_and_fpr. */
11096 if (TARGET_HARD_FLOAT
)
11097 for (regno
= FP_REG_FIRST
; regno
<= FP_REG_LAST
; regno
+= MAX_FPRS_PER_FMT
)
11098 if (mips_save_reg_p (regno
))
11100 frame
->num_fp
+= MAX_FPRS_PER_FMT
;
11101 frame
->fmask
|= ~(~0U << MAX_FPRS_PER_FMT
) << (regno
- FP_REG_FIRST
);
11104 /* Move above the FPR save area. */
11105 if (frame
->num_fp
> 0)
11107 offset
+= MIPS_STACK_ALIGN (frame
->num_fp
* UNITS_PER_FPREG
);
11108 frame
->fp_sp_offset
= offset
- UNITS_PER_HWFPVALUE
;
11111 /* Add in space for the interrupt context information. */
11112 if (cfun
->machine
->interrupt_handler_p
)
11115 if (mips_save_reg_p (LO_REGNUM
) || mips_save_reg_p (HI_REGNUM
))
11118 frame
->acc_mask
|= (1 << 0);
11121 /* Check accumulators 1, 2, 3. */
11122 for (i
= DSP_ACC_REG_FIRST
; i
<= DSP_ACC_REG_LAST
; i
+= 2)
11123 if (mips_save_reg_p (i
) || mips_save_reg_p (i
+ 1))
11126 frame
->acc_mask
|= 1 << (((i
- DSP_ACC_REG_FIRST
) / 2) + 1);
11129 /* All interrupt context functions need space to preserve STATUS. */
11130 frame
->num_cop0_regs
++;
11132 /* We need to save EPC regardless of whether interrupts remain masked
11133 as exceptions will corrupt EPC. */
11134 frame
->num_cop0_regs
++;
11137 /* Move above the accumulator save area. */
11138 if (frame
->num_acc
> 0)
11140 /* Each accumulator needs 2 words. */
11141 offset
+= frame
->num_acc
* 2 * UNITS_PER_WORD
;
11142 frame
->acc_sp_offset
= offset
- UNITS_PER_WORD
;
11145 /* Move above the COP0 register save area. */
11146 if (frame
->num_cop0_regs
> 0)
11148 offset
+= frame
->num_cop0_regs
* UNITS_PER_WORD
;
11149 frame
->cop0_sp_offset
= offset
- UNITS_PER_WORD
;
11152 /* Determine if we can save the callee-saved registers in the frame
11153 header. Restrict this to functions where there is no other reason
11154 to allocate stack space so that we can eliminate the instructions
11155 that modify the stack pointer. */
11159 && flag_frame_header_optimization
11160 && !MAIN_NAME_P (DECL_NAME (current_function_decl
))
11161 && cfun
->machine
->varargs_size
== 0
11162 && crtl
->args
.pretend_args_size
== 0
11163 && frame
->var_size
== 0
11164 && frame
->num_acc
== 0
11165 && frame
->num_cop0_regs
== 0
11166 && frame
->num_fp
== 0
11167 && frame
->num_gp
> 0
11168 && frame
->num_gp
<= MAX_ARGS_IN_REGISTERS
11169 && !GENERATE_MIPS16E_SAVE_RESTORE
11170 && !cfun
->machine
->interrupt_handler_p
11171 && cfun
->machine
->does_not_use_frame_header
11172 && cfun
->machine
->optimize_call_stack
11173 && !cfun
->machine
->callers_may_not_allocate_frame
11174 && !mips_cfun_has_cprestore_slot_p ())
11177 frame
->gp_sp_offset
= REG_PARM_STACK_SPACE(cfun
) - UNITS_PER_WORD
;
11178 cfun
->machine
->use_frame_header_for_callee_saved_regs
= true;
11181 /* Move above the callee-allocated varargs save area. */
11182 offset
+= MIPS_STACK_ALIGN (cfun
->machine
->varargs_size
);
11183 frame
->arg_pointer_offset
= offset
;
11185 /* Move above the callee-allocated area for pretend stack arguments. */
11186 offset
+= crtl
->args
.pretend_args_size
;
11187 frame
->total_size
= offset
;
11189 /* Work out the offsets of the save areas from the top of the frame. */
11190 if (frame
->gp_sp_offset
> 0)
11191 frame
->gp_save_offset
= frame
->gp_sp_offset
- offset
;
11192 if (frame
->fp_sp_offset
> 0)
11193 frame
->fp_save_offset
= frame
->fp_sp_offset
- offset
;
11194 if (frame
->acc_sp_offset
> 0)
11195 frame
->acc_save_offset
= frame
->acc_sp_offset
- offset
;
11196 if (frame
->num_cop0_regs
> 0)
11197 frame
->cop0_save_offset
= frame
->cop0_sp_offset
- offset
;
11200 /* Return the style of GP load sequence that is being used for the
11201 current function. */
11203 enum mips_loadgp_style
11204 mips_current_loadgp_style (void)
11206 if (!TARGET_USE_GOT
|| cfun
->machine
->global_pointer
== INVALID_REGNUM
)
11207 return LOADGP_NONE
;
11209 if (TARGET_RTP_PIC
)
11212 if (TARGET_ABSOLUTE_ABICALLS
)
11213 return LOADGP_ABSOLUTE
;
11215 return TARGET_NEWABI
? LOADGP_NEWABI
: LOADGP_OLDABI
;
11218 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11221 mips_frame_pointer_required (void)
11223 /* If the function contains dynamic stack allocations, we need to
11224 use the frame pointer to access the static parts of the frame. */
11225 if (cfun
->calls_alloca
)
11228 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
11229 reload may be unable to compute the address of a local variable,
11230 since there is no way to add a large constant to the stack pointer
11231 without using a second temporary register. */
11234 mips_compute_frame_info ();
11235 if (!SMALL_OPERAND (cfun
->machine
->frame
.total_size
))
11242 /* Make sure that we're not trying to eliminate to the wrong hard frame
11246 mips_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
11248 return (to
== HARD_FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
11251 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
11252 or argument pointer. TO is either the stack pointer or hard frame
11256 mips_initial_elimination_offset (int from
, int to
)
11258 HOST_WIDE_INT offset
;
11260 mips_compute_frame_info ();
11262 /* Set OFFSET to the offset from the end-of-prologue stack pointer. */
11265 case FRAME_POINTER_REGNUM
:
11266 if (FRAME_GROWS_DOWNWARD
)
11267 offset
= (cfun
->machine
->frame
.args_size
11268 + cfun
->machine
->frame
.cprestore_size
11269 + cfun
->machine
->frame
.var_size
);
11274 case ARG_POINTER_REGNUM
:
11275 offset
= cfun
->machine
->frame
.arg_pointer_offset
;
11279 gcc_unreachable ();
11282 if (to
== HARD_FRAME_POINTER_REGNUM
)
11283 offset
-= cfun
->machine
->frame
.hard_frame_pointer_offset
;
11288 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
11291 mips_extra_live_on_entry (bitmap regs
)
11293 if (TARGET_USE_GOT
)
11295 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
11296 the global pointer. */
11297 if (!TARGET_ABSOLUTE_ABICALLS
)
11298 bitmap_set_bit (regs
, PIC_FUNCTION_ADDR_REGNUM
);
11300 /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
11301 the global pointer. */
11303 bitmap_set_bit (regs
, MIPS16_PIC_TEMP_REGNUM
);
11305 /* See the comment above load_call<mode> for details. */
11306 bitmap_set_bit (regs
, GOT_VERSION_REGNUM
);
11310 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
11314 mips_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
11319 return get_hard_reg_initial_val (Pmode
, RETURN_ADDR_REGNUM
);
11322 /* Emit code to change the current function's return address to
11323 ADDRESS. SCRATCH is available as a scratch register, if needed.
11324 ADDRESS and SCRATCH are both word-mode GPRs. */
11327 mips_set_return_address (rtx address
, rtx scratch
)
11331 gcc_assert (BITSET_P (cfun
->machine
->frame
.mask
, RETURN_ADDR_REGNUM
));
11332 slot_address
= mips_add_offset (scratch
, stack_pointer_rtx
,
11333 cfun
->machine
->frame
.gp_sp_offset
);
11334 mips_emit_move (gen_frame_mem (GET_MODE (address
), slot_address
), address
);
11337 /* Return true if the current function has a cprestore slot. */
11340 mips_cfun_has_cprestore_slot_p (void)
11342 return (cfun
->machine
->global_pointer
!= INVALID_REGNUM
11343 && cfun
->machine
->frame
.cprestore_size
> 0);
11346 /* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the
11347 cprestore slot. LOAD_P is true if the caller wants to load from
11348 the cprestore slot; it is false if the caller wants to store to
11352 mips_get_cprestore_base_and_offset (rtx
*base
, HOST_WIDE_INT
*offset
,
11355 const struct mips_frame_info
*frame
;
11357 frame
= &cfun
->machine
->frame
;
11358 /* .cprestore always uses the stack pointer instead of the frame pointer.
11359 We have a free choice for direct stores for non-MIPS16 functions,
11360 and for MIPS16 functions whose cprestore slot is in range of the
11361 stack pointer. Using the stack pointer would sometimes give more
11362 (early) scheduling freedom, but using the frame pointer would
11363 sometimes give more (late) scheduling freedom. It's hard to
11364 predict which applies to a given function, so let's keep things
11367 Loads must always use the frame pointer in functions that call
11368 alloca, and there's little benefit to using the stack pointer
11370 if (frame_pointer_needed
&& !(TARGET_CPRESTORE_DIRECTIVE
&& !load_p
))
11372 *base
= hard_frame_pointer_rtx
;
11373 *offset
= frame
->args_size
- frame
->hard_frame_pointer_offset
;
11377 *base
= stack_pointer_rtx
;
11378 *offset
= frame
->args_size
;
11382 /* Return true if X is the load or store address of the cprestore slot;
11383 LOAD_P says which. */
11386 mips_cprestore_address_p (rtx x
, bool load_p
)
11388 rtx given_base
, required_base
;
11389 HOST_WIDE_INT given_offset
, required_offset
;
11391 mips_split_plus (x
, &given_base
, &given_offset
);
11392 mips_get_cprestore_base_and_offset (&required_base
, &required_offset
, load_p
);
11393 return given_base
== required_base
&& given_offset
== required_offset
;
11396 /* Return a MEM rtx for the cprestore slot. LOAD_P is true if we are
11397 going to load from it, false if we are going to store to it.
11398 Use TEMP as a temporary register if need be. */
11401 mips_cprestore_slot (rtx temp
, bool load_p
)
11404 HOST_WIDE_INT offset
;
11406 mips_get_cprestore_base_and_offset (&base
, &offset
, load_p
);
11407 return gen_frame_mem (Pmode
, mips_add_offset (temp
, base
, offset
));
11410 /* Emit instructions to save global pointer value GP into cprestore
11411 slot MEM. OFFSET is the offset that MEM applies to the base register.
11413 MEM may not be a legitimate address. If it isn't, TEMP is a
11414 temporary register that can be used, otherwise it is a SCRATCH. */
11417 mips_save_gp_to_cprestore_slot (rtx mem
, rtx offset
, rtx gp
, rtx temp
)
11419 if (TARGET_CPRESTORE_DIRECTIVE
)
11421 gcc_assert (gp
== pic_offset_table_rtx
);
11422 emit_insn (PMODE_INSN (gen_cprestore
, (mem
, offset
)));
11425 mips_emit_move (mips_cprestore_slot (temp
, false), gp
);
11428 /* Restore $gp from its save slot, using TEMP as a temporary base register
11429 if need be. This function is for o32 and o64 abicalls only.
11431 See mips_must_initialize_gp_p for details about how we manage the
11435 mips_restore_gp_from_cprestore_slot (rtx temp
)
11437 gcc_assert (TARGET_ABICALLS
&& TARGET_OLDABI
&& epilogue_completed
);
11439 if (!cfun
->machine
->must_restore_gp_when_clobbered_p
)
11441 emit_note (NOTE_INSN_DELETED
);
11447 mips_emit_move (temp
, mips_cprestore_slot (temp
, true));
11448 mips_emit_move (pic_offset_table_rtx
, temp
);
11451 mips_emit_move (pic_offset_table_rtx
, mips_cprestore_slot (temp
, true));
11452 if (!TARGET_EXPLICIT_RELOCS
)
11453 emit_insn (gen_blockage ());
11456 /* A function to save or store a register. The first argument is the
11457 register and the second is the stack slot. */
11458 typedef void (*mips_save_restore_fn
) (rtx
, rtx
);
11460 /* Use FN to save or restore register REGNO. MODE is the register's
11461 mode and OFFSET is the offset of its save slot from the current
11465 mips_save_restore_reg (machine_mode mode
, int regno
,
11466 HOST_WIDE_INT offset
, mips_save_restore_fn fn
)
11470 mem
= gen_frame_mem (mode
, plus_constant (Pmode
, stack_pointer_rtx
,
11472 fn (gen_rtx_REG (mode
, regno
), mem
);
11475 /* Call FN for each accumulator that is saved by the current function.
11476 SP_OFFSET is the offset of the current stack pointer from the start
11480 mips_for_each_saved_acc (HOST_WIDE_INT sp_offset
, mips_save_restore_fn fn
)
11482 HOST_WIDE_INT offset
;
11485 offset
= cfun
->machine
->frame
.acc_sp_offset
- sp_offset
;
11486 if (BITSET_P (cfun
->machine
->frame
.acc_mask
, 0))
11488 mips_save_restore_reg (word_mode
, LO_REGNUM
, offset
, fn
);
11489 offset
-= UNITS_PER_WORD
;
11490 mips_save_restore_reg (word_mode
, HI_REGNUM
, offset
, fn
);
11491 offset
-= UNITS_PER_WORD
;
11494 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
++)
11495 if (BITSET_P (cfun
->machine
->frame
.acc_mask
,
11496 ((regno
- DSP_ACC_REG_FIRST
) / 2) + 1))
11498 mips_save_restore_reg (word_mode
, regno
, offset
, fn
);
11499 offset
-= UNITS_PER_WORD
;
11503 /* Save register REG to MEM. Make the instruction frame-related. */
11506 mips_save_reg (rtx reg
, rtx mem
)
11508 if (GET_MODE (reg
) == DFmode
11509 && (!TARGET_FLOAT64
11510 || mips_abi
== ABI_32
))
11514 mips_emit_move_or_split (mem
, reg
, SPLIT_IF_NECESSARY
);
11516 x1
= mips_frame_set (mips_subword (mem
, false),
11517 mips_subword (reg
, false));
11518 x2
= mips_frame_set (mips_subword (mem
, true),
11519 mips_subword (reg
, true));
11520 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, x1
, x2
)));
11523 mips_emit_save_slot_move (mem
, reg
, MIPS_PROLOGUE_TEMP (GET_MODE (reg
)));
11526 /* Capture the register combinations that are allowed in a SWM or LWM
11527 instruction. The entries are ordered by number of registers set in
11528 the mask. We also ignore the single register encodings because a
11529 normal SW/LW is preferred. */
11531 static const unsigned int umips_swm_mask
[17] = {
11532 0xc0ff0000, 0x80ff0000, 0x40ff0000, 0x807f0000,
11533 0x00ff0000, 0x803f0000, 0x007f0000, 0x801f0000,
11534 0x003f0000, 0x800f0000, 0x001f0000, 0x80070000,
11535 0x000f0000, 0x80030000, 0x00070000, 0x80010000,
11539 static const unsigned int umips_swm_encoding
[17] = {
11540 25, 24, 9, 23, 8, 22, 7, 21, 6, 20, 5, 19, 4, 18, 3, 17, 2
11543 /* Try to use a microMIPS LWM or SWM instruction to save or restore
11544 as many GPRs in *MASK as possible. *OFFSET is the offset from the
11545 stack pointer of the topmost save slot.
11547 Remove from *MASK all registers that were handled using LWM and SWM.
11548 Update *OFFSET so that it points to the first unused save slot. */
11551 umips_build_save_restore (mips_save_restore_fn fn
,
11552 unsigned *mask
, HOST_WIDE_INT
*offset
)
11556 rtx pattern
, set
, reg
, mem
;
11557 HOST_WIDE_INT this_offset
;
11560 /* Try matching $16 to $31 (s0 to ra). */
11561 for (i
= 0; i
< ARRAY_SIZE (umips_swm_mask
); i
++)
11562 if ((*mask
& 0xffff0000) == umips_swm_mask
[i
])
11565 if (i
== ARRAY_SIZE (umips_swm_mask
))
11568 /* Get the offset of the lowest save slot. */
11569 nregs
= (umips_swm_encoding
[i
] & 0xf) + (umips_swm_encoding
[i
] >> 4);
11570 this_offset
= *offset
- UNITS_PER_WORD
* (nregs
- 1);
11572 /* LWM/SWM can only support offsets from -2048 to 2047. */
11573 if (!UMIPS_12BIT_OFFSET_P (this_offset
))
11576 /* Create the final PARALLEL. */
11577 pattern
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nregs
));
11578 this_base
= stack_pointer_rtx
;
11580 /* For registers $16-$23 and $30. */
11581 for (j
= 0; j
< (umips_swm_encoding
[i
] & 0xf); j
++)
11583 HOST_WIDE_INT offset
= this_offset
+ j
* UNITS_PER_WORD
;
11584 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
, this_base
, offset
));
11585 unsigned int regno
= (j
!= 8) ? 16 + j
: 30;
11586 *mask
&= ~(1 << regno
);
11587 reg
= gen_rtx_REG (SImode
, regno
);
11588 if (fn
== mips_save_reg
)
11589 set
= mips_frame_set (mem
, reg
);
11592 set
= gen_rtx_SET (reg
, mem
);
11593 mips_add_cfa_restore (reg
);
11595 XVECEXP (pattern
, 0, j
) = set
;
11598 /* For register $31. */
11599 if (umips_swm_encoding
[i
] >> 4)
11601 HOST_WIDE_INT offset
= this_offset
+ j
* UNITS_PER_WORD
;
11602 *mask
&= ~(1 << 31);
11603 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
, this_base
, offset
));
11604 reg
= gen_rtx_REG (SImode
, 31);
11605 if (fn
== mips_save_reg
)
11606 set
= mips_frame_set (mem
, reg
);
11609 set
= gen_rtx_SET (reg
, mem
);
11610 mips_add_cfa_restore (reg
);
11612 XVECEXP (pattern
, 0, j
) = set
;
11615 pattern
= emit_insn (pattern
);
11616 if (fn
== mips_save_reg
)
11617 RTX_FRAME_RELATED_P (pattern
) = 1;
11619 /* Adjust the last offset. */
11620 *offset
-= UNITS_PER_WORD
* nregs
;
11625 /* Call FN for each register that is saved by the current function.
11626 SP_OFFSET is the offset of the current stack pointer from the start
11630 mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset
,
11631 mips_save_restore_fn fn
)
11633 machine_mode fpr_mode
;
11635 const struct mips_frame_info
*frame
= &cfun
->machine
->frame
;
11636 HOST_WIDE_INT offset
;
11639 /* Save registers starting from high to low. The debuggers prefer at least
11640 the return register be stored at func+4, and also it allows us not to
11641 need a nop in the epilogue if at least one register is reloaded in
11642 addition to return address. */
11643 offset
= frame
->gp_sp_offset
- sp_offset
;
11644 mask
= frame
->mask
;
11646 if (TARGET_MICROMIPS
)
11647 umips_build_save_restore (fn
, &mask
, &offset
);
11649 for (regno
= GP_REG_LAST
; regno
>= GP_REG_FIRST
; regno
--)
11650 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
11652 /* Record the ra offset for use by mips_function_profiler. */
11653 if (regno
== RETURN_ADDR_REGNUM
)
11654 cfun
->machine
->frame
.ra_fp_offset
= offset
+ sp_offset
;
11655 mips_save_restore_reg (word_mode
, regno
, offset
, fn
);
11656 offset
-= UNITS_PER_WORD
;
11659 /* This loop must iterate over the same space as its companion in
11660 mips_compute_frame_info. */
11661 offset
= cfun
->machine
->frame
.fp_sp_offset
- sp_offset
;
11662 fpr_mode
= (TARGET_SINGLE_FLOAT
? SFmode
: DFmode
);
11663 for (regno
= FP_REG_LAST
- MAX_FPRS_PER_FMT
+ 1;
11664 regno
>= FP_REG_FIRST
;
11665 regno
-= MAX_FPRS_PER_FMT
)
11666 if (BITSET_P (cfun
->machine
->frame
.fmask
, regno
- FP_REG_FIRST
))
11668 if (!TARGET_FLOAT64
&& TARGET_DOUBLE_FLOAT
11669 && (fixed_regs
[regno
] || fixed_regs
[regno
+ 1]))
11671 if (fixed_regs
[regno
])
11672 mips_save_restore_reg (SFmode
, regno
+ 1, offset
, fn
);
11674 mips_save_restore_reg (SFmode
, regno
, offset
, fn
);
11677 mips_save_restore_reg (fpr_mode
, regno
, offset
, fn
);
11678 offset
-= GET_MODE_SIZE (fpr_mode
);
11682 /* Return true if a move between register REGNO and its save slot (MEM)
11683 can be done in a single move. LOAD_P is true if we are loading
11684 from the slot, false if we are storing to it. */
11687 mips_direct_save_slot_move_p (unsigned int regno
, rtx mem
, bool load_p
)
11689 /* There is a specific MIPS16 instruction for saving $31 to the stack. */
11690 if (TARGET_MIPS16
&& !load_p
&& regno
== RETURN_ADDR_REGNUM
)
11693 return mips_secondary_reload_class (REGNO_REG_CLASS (regno
),
11694 GET_MODE (mem
), mem
, load_p
) == NO_REGS
;
11697 /* Emit a move from SRC to DEST, given that one of them is a register
11698 save slot and that the other is a register. TEMP is a temporary
11699 GPR of the same mode that is available if need be. */
11702 mips_emit_save_slot_move (rtx dest
, rtx src
, rtx temp
)
11704 unsigned int regno
;
11709 regno
= REGNO (src
);
11714 regno
= REGNO (dest
);
11718 if (regno
== cfun
->machine
->global_pointer
&& !mips_must_initialize_gp_p ())
11720 /* We don't yet know whether we'll need this instruction or not.
11721 Postpone the decision by emitting a ghost move. This move
11722 is specifically not frame-related; only the split version is. */
11724 emit_insn (gen_move_gpdi (dest
, src
));
11726 emit_insn (gen_move_gpsi (dest
, src
));
11730 if (regno
== HI_REGNUM
)
11734 mips_emit_move (temp
, src
);
11736 emit_insn (gen_mthidi_ti (gen_rtx_REG (TImode
, MD_REG_FIRST
),
11737 temp
, gen_rtx_REG (DImode
, LO_REGNUM
)));
11739 emit_insn (gen_mthisi_di (gen_rtx_REG (DImode
, MD_REG_FIRST
),
11740 temp
, gen_rtx_REG (SImode
, LO_REGNUM
)));
11745 emit_insn (gen_mfhidi_ti (temp
,
11746 gen_rtx_REG (TImode
, MD_REG_FIRST
)));
11748 emit_insn (gen_mfhisi_di (temp
,
11749 gen_rtx_REG (DImode
, MD_REG_FIRST
)));
11750 mips_emit_move (dest
, temp
);
11753 else if (mips_direct_save_slot_move_p (regno
, mem
, mem
== src
))
11754 mips_emit_move (dest
, src
);
11757 gcc_assert (!reg_overlap_mentioned_p (dest
, temp
));
11758 mips_emit_move (temp
, src
);
11759 mips_emit_move (dest
, temp
);
11762 mips_set_frame_expr (mips_frame_set (dest
, src
));
11765 /* If we're generating n32 or n64 abicalls, and the current function
11766 does not use $28 as its global pointer, emit a cplocal directive.
11767 Use pic_offset_table_rtx as the argument to the directive. */
11770 mips_output_cplocal (void)
11772 if (!TARGET_EXPLICIT_RELOCS
11773 && mips_must_initialize_gp_p ()
11774 && cfun
->machine
->global_pointer
!= GLOBAL_POINTER_REGNUM
)
11775 output_asm_insn (".cplocal %+", 0);
11778 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
11781 mips_output_function_prologue (FILE *file
)
11783 const char *fnname
;
11785 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
11786 floating-point arguments. */
11788 && TARGET_HARD_FLOAT_ABI
11789 && crtl
->args
.info
.fp_code
!= 0)
11790 mips16_build_function_stub ();
11792 /* Get the function name the same way that toplev.c does before calling
11793 assemble_start_function. This is needed so that the name used here
11794 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
11795 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
11796 mips_start_function_definition (fnname
, TARGET_MIPS16
);
11798 /* Output MIPS-specific frame information. */
11799 if (!flag_inhibit_size_directive
)
11801 const struct mips_frame_info
*frame
;
11803 frame
= &cfun
->machine
->frame
;
11805 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
11807 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC
",%s\t\t"
11808 "# vars= " HOST_WIDE_INT_PRINT_DEC
11810 ", args= " HOST_WIDE_INT_PRINT_DEC
11811 ", gp= " HOST_WIDE_INT_PRINT_DEC
"\n",
11812 reg_names
[frame_pointer_needed
11813 ? HARD_FRAME_POINTER_REGNUM
11814 : STACK_POINTER_REGNUM
],
11815 (frame_pointer_needed
11816 ? frame
->total_size
- frame
->hard_frame_pointer_offset
11817 : frame
->total_size
),
11818 reg_names
[RETURN_ADDR_REGNUM
],
11820 frame
->num_gp
, frame
->num_fp
,
11822 frame
->cprestore_size
);
11824 /* .mask MASK, OFFSET. */
11825 fprintf (file
, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
11826 frame
->mask
, frame
->gp_save_offset
);
11828 /* .fmask MASK, OFFSET. */
11829 fprintf (file
, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC
"\n",
11830 frame
->fmask
, frame
->fp_save_offset
);
11833 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
11834 Also emit the ".set noreorder; .set nomacro" sequence for functions
11836 if (mips_must_initialize_gp_p ()
11837 && mips_current_loadgp_style () == LOADGP_OLDABI
)
11841 /* This is a fixed-form sequence. The position of the
11842 first two instructions is important because of the
11843 way _gp_disp is defined. */
11844 output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
11845 output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
11846 output_asm_insn ("sll\t$2,16", 0);
11847 output_asm_insn ("addu\t$2,$3", 0);
11851 /* .cpload must be in a .set noreorder but not a
11852 .set nomacro block. */
11853 mips_push_asm_switch (&mips_noreorder
);
11854 output_asm_insn (".cpload\t%^", 0);
11855 if (!cfun
->machine
->all_noreorder_p
)
11856 mips_pop_asm_switch (&mips_noreorder
);
11858 mips_push_asm_switch (&mips_nomacro
);
11861 else if (cfun
->machine
->all_noreorder_p
)
11863 mips_push_asm_switch (&mips_noreorder
);
11864 mips_push_asm_switch (&mips_nomacro
);
11867 /* Tell the assembler which register we're using as the global
11868 pointer. This is needed for thunks, since they can use either
11869 explicit relocs or assembler macros. */
11870 mips_output_cplocal ();
11873 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
11876 mips_output_function_epilogue (FILE *)
11878 const char *fnname
;
11880 /* Reinstate the normal $gp. */
11881 SET_REGNO (pic_offset_table_rtx
, GLOBAL_POINTER_REGNUM
);
11882 mips_output_cplocal ();
11884 if (cfun
->machine
->all_noreorder_p
)
11886 mips_pop_asm_switch (&mips_nomacro
);
11887 mips_pop_asm_switch (&mips_noreorder
);
11890 /* Get the function name the same way that toplev.c does before calling
11891 assemble_start_function. This is needed so that the name used here
11892 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
11893 fnname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
11894 mips_end_function_definition (fnname
);
11897 /* Emit an optimisation barrier for accesses to the current frame. */
11900 mips_frame_barrier (void)
11902 emit_clobber (gen_frame_mem (BLKmode
, stack_pointer_rtx
));
11906 /* The __gnu_local_gp symbol. */
11908 static GTY(()) rtx mips_gnu_local_gp
;
11910 /* If we're generating n32 or n64 abicalls, emit instructions
11911 to set up the global pointer. */
11914 mips_emit_loadgp (void)
11916 rtx addr
, offset
, incoming_address
, base
, index
, pic_reg
;
11918 pic_reg
= TARGET_MIPS16
? MIPS16_PIC_TEMP
: pic_offset_table_rtx
;
11919 switch (mips_current_loadgp_style ())
11921 case LOADGP_ABSOLUTE
:
11922 if (mips_gnu_local_gp
== NULL
)
11924 mips_gnu_local_gp
= gen_rtx_SYMBOL_REF (Pmode
, "__gnu_local_gp");
11925 SYMBOL_REF_FLAGS (mips_gnu_local_gp
) |= SYMBOL_FLAG_LOCAL
;
11927 emit_insn (PMODE_INSN (gen_loadgp_absolute
,
11928 (pic_reg
, mips_gnu_local_gp
)));
11931 case LOADGP_OLDABI
:
11932 /* Added by mips_output_function_prologue. */
11935 case LOADGP_NEWABI
:
11936 addr
= XEXP (DECL_RTL (current_function_decl
), 0);
11937 offset
= mips_unspec_address (addr
, SYMBOL_GOTOFF_LOADGP
);
11938 incoming_address
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
11939 emit_insn (PMODE_INSN (gen_loadgp_newabi
,
11940 (pic_reg
, offset
, incoming_address
)));
11944 base
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_BASE
));
11945 index
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (VXWORKS_GOTT_INDEX
));
11946 emit_insn (PMODE_INSN (gen_loadgp_rtp
, (pic_reg
, base
, index
)));
11954 emit_insn (PMODE_INSN (gen_copygp_mips16
,
11955 (pic_offset_table_rtx
, pic_reg
)));
11957 /* Emit a blockage if there are implicit uses of the GP register.
11958 This includes profiled functions, because FUNCTION_PROFILE uses
11960 if (!TARGET_EXPLICIT_RELOCS
|| crtl
->profile
)
11961 emit_insn (gen_loadgp_blockage ());
11964 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
11966 #if PROBE_INTERVAL > 32768
11967 #error Cannot use indexed addressing mode for stack probing
11970 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
11971 inclusive. These are offsets from the current stack pointer. */
11974 mips_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
11977 sorry ("%<-fstack-check=specific%> not implemented for MIPS16");
11979 /* See if we have a constant small number of probes to generate. If so,
11980 that's the easy case. */
11981 if (first
+ size
<= 32768)
11985 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
11986 it exceeds SIZE. If only one probe is needed, this will not
11987 generate any code. Then probe at FIRST + SIZE. */
11988 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
11989 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
11992 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
11996 /* Otherwise, do the same as above, but in a loop. Note that we must be
11997 extra careful with variables wrapping around because we might be at
11998 the very top (or the very bottom) of the address space and we have
11999 to be able to handle this case properly; in particular, we use an
12000 equality test for the loop condition. */
12003 HOST_WIDE_INT rounded_size
;
12004 rtx r3
= MIPS_PROLOGUE_TEMP (Pmode
);
12005 rtx r12
= MIPS_PROLOGUE_TEMP2 (Pmode
);
12007 /* Sanity check for the addressing mode we're going to use. */
12008 gcc_assert (first
<= 32768);
12011 /* Step 1: round SIZE to the previous multiple of the interval. */
12013 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
12016 /* Step 2: compute initial and final value of the loop counter. */
12018 /* TEST_ADDR = SP + FIRST. */
12019 emit_insn (gen_rtx_SET (r3
, plus_constant (Pmode
, stack_pointer_rtx
,
12022 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
12023 if (rounded_size
> 32768)
12025 emit_move_insn (r12
, GEN_INT (rounded_size
));
12026 emit_insn (gen_rtx_SET (r12
, gen_rtx_MINUS (Pmode
, r3
, r12
)));
12029 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, r3
,
12033 /* Step 3: the loop
12037 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
12040 while (TEST_ADDR != LAST_ADDR)
12042 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
12043 until it is equal to ROUNDED_SIZE. */
12045 emit_insn (PMODE_INSN (gen_probe_stack_range
, (r3
, r3
, r12
)));
12048 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
12049 that SIZE is equal to ROUNDED_SIZE. */
12051 if (size
!= rounded_size
)
12052 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
12055 /* Make sure nothing is scheduled before we are done. */
12056 emit_insn (gen_blockage ());
12059 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
12060 absolute addresses. */
12063 mips_output_probe_stack_range (rtx reg1
, rtx reg2
)
12065 static int labelno
= 0;
12066 char loop_lab
[32], tmp
[64];
12069 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
12072 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
12074 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
12076 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
12077 if (TARGET_64BIT
&& TARGET_LONG64
)
12078 output_asm_insn ("daddiu\t%0,%0,%1", xops
);
12080 output_asm_insn ("addiu\t%0,%0,%1", xops
);
12082 /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch. */
12084 strcpy (tmp
, "%(%<bne\t%0,%1,");
12085 output_asm_insn (strcat (tmp
, &loop_lab
[1]), xops
);
12087 output_asm_insn ("sd\t$0,0(%0)%)", xops
);
12089 output_asm_insn ("sw\t$0,0(%0)%)", xops
);
12094 /* Return true if X contains a kernel register. */
12097 mips_refers_to_kernel_reg_p (const_rtx x
)
12099 subrtx_iterator::array_type array
;
12100 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
12101 if (REG_P (*iter
) && KERNEL_REG_P (REGNO (*iter
)))
12106 /* Expand the "prologue" pattern. */
12109 mips_expand_prologue (void)
12111 const struct mips_frame_info
*frame
;
12112 HOST_WIDE_INT size
;
12113 unsigned int nargs
;
12115 if (cfun
->machine
->global_pointer
!= INVALID_REGNUM
)
12117 /* Check whether an insn uses pic_offset_table_rtx, either explicitly
12118 or implicitly. If so, we can commit to using a global pointer
12119 straight away, otherwise we need to defer the decision. */
12120 if (mips_cfun_has_inflexible_gp_ref_p ()
12121 || mips_cfun_has_flexible_gp_ref_p ())
12123 cfun
->machine
->must_initialize_gp_p
= true;
12124 cfun
->machine
->must_restore_gp_when_clobbered_p
= true;
12127 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
12130 frame
= &cfun
->machine
->frame
;
12131 size
= frame
->total_size
;
12133 if (flag_stack_usage_info
)
12134 current_function_static_stack_size
= size
;
12136 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
12137 || flag_stack_clash_protection
)
12139 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
12141 if (size
> PROBE_INTERVAL
&& size
> get_stack_check_protect ())
12142 mips_emit_probe_stack_range (get_stack_check_protect (),
12143 size
- get_stack_check_protect ());
12146 mips_emit_probe_stack_range (get_stack_check_protect (), size
);
12149 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
12150 bytes beforehand; this is enough to cover the register save area
12151 without going out of range. */
12152 if (((frame
->mask
| frame
->fmask
| frame
->acc_mask
) != 0)
12153 || frame
->num_cop0_regs
> 0)
12155 HOST_WIDE_INT step1
;
12157 step1
= MIN (size
, MIPS_MAX_FIRST_STACK_STEP
);
12158 if (GENERATE_MIPS16E_SAVE_RESTORE
)
12160 HOST_WIDE_INT offset
;
12161 unsigned int mask
, regno
;
12163 /* Try to merge argument stores into the save instruction. */
12164 nargs
= mips16e_collect_argument_saves ();
12166 /* Build the save instruction. */
12167 mask
= frame
->mask
;
12168 rtx insn
= mips16e_build_save_restore (false, &mask
, &offset
,
12170 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
12171 mips_frame_barrier ();
12174 /* Check if we need to save other registers. */
12175 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
12176 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
12178 offset
-= UNITS_PER_WORD
;
12179 mips_save_restore_reg (word_mode
, regno
,
12180 offset
, mips_save_reg
);
12185 if (cfun
->machine
->interrupt_handler_p
)
12187 HOST_WIDE_INT offset
;
12190 /* If this interrupt is using a shadow register set, we need to
12191 get the stack pointer from the previous register set. */
12192 if (cfun
->machine
->use_shadow_register_set
== SHADOW_SET_YES
)
12193 emit_insn (PMODE_INSN (gen_mips_rdpgpr
, (stack_pointer_rtx
,
12194 stack_pointer_rtx
)));
12196 if (!cfun
->machine
->keep_interrupts_masked_p
)
12198 if (cfun
->machine
->int_mask
== INT_MASK_EIC
)
12199 /* Move from COP0 Cause to K0. */
12200 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, K0_REG_NUM
),
12201 gen_rtx_REG (SImode
, COP0_CAUSE_REG_NUM
)));
12203 /* Move from COP0 EPC to K1. */
12204 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, K1_REG_NUM
),
12205 gen_rtx_REG (SImode
,
12206 COP0_EPC_REG_NUM
)));
12208 /* Allocate the first part of the frame. */
12209 rtx insn
= gen_add3_insn (stack_pointer_rtx
, stack_pointer_rtx
,
12211 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
12212 mips_frame_barrier ();
12215 /* Start at the uppermost location for saving. */
12216 offset
= frame
->cop0_sp_offset
- size
;
12218 /* Push EPC into its stack slot. */
12219 mem
= gen_frame_mem (word_mode
,
12220 plus_constant (Pmode
, stack_pointer_rtx
,
12222 mips_emit_move (mem
, gen_rtx_REG (word_mode
, K1_REG_NUM
));
12223 offset
-= UNITS_PER_WORD
;
12225 /* Move from COP0 Status to K1. */
12226 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, K1_REG_NUM
),
12227 gen_rtx_REG (SImode
,
12228 COP0_STATUS_REG_NUM
)));
12230 /* Right justify the RIPL in k0. */
12231 if (!cfun
->machine
->keep_interrupts_masked_p
12232 && cfun
->machine
->int_mask
== INT_MASK_EIC
)
12233 emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode
, K0_REG_NUM
),
12234 gen_rtx_REG (SImode
, K0_REG_NUM
),
12235 GEN_INT (CAUSE_IPL
)));
12237 /* Push Status into its stack slot. */
12238 mem
= gen_frame_mem (word_mode
,
12239 plus_constant (Pmode
, stack_pointer_rtx
,
12241 mips_emit_move (mem
, gen_rtx_REG (word_mode
, K1_REG_NUM
));
12242 offset
-= UNITS_PER_WORD
;
12244 /* Insert the RIPL into our copy of SR (k1) as the new IPL. */
12245 if (!cfun
->machine
->keep_interrupts_masked_p
12246 && cfun
->machine
->int_mask
== INT_MASK_EIC
)
12247 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
12250 gen_rtx_REG (SImode
, K0_REG_NUM
)));
12252 /* Clear all interrupt mask bits up to and including the
12253 handler's interrupt line. */
12254 if (!cfun
->machine
->keep_interrupts_masked_p
12255 && cfun
->machine
->int_mask
!= INT_MASK_EIC
)
12256 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
12257 GEN_INT (cfun
->machine
->int_mask
+ 1),
12259 gen_rtx_REG (SImode
, GP_REG_FIRST
)));
12261 if (!cfun
->machine
->keep_interrupts_masked_p
)
12262 /* Enable interrupts by clearing the KSU ERL and EXL bits.
12263 IE is already the correct value, so we don't have to do
12264 anything explicit. */
12265 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
12268 gen_rtx_REG (SImode
, GP_REG_FIRST
)));
12270 /* Disable interrupts by clearing the KSU, ERL, EXL,
12272 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
12275 gen_rtx_REG (SImode
, GP_REG_FIRST
)));
12277 if (TARGET_HARD_FLOAT
)
12278 /* Disable COP1 for hard-float. This will lead to an exception
12279 if floating-point code is executed in an ISR. */
12280 emit_insn (gen_insvsi (gen_rtx_REG (SImode
, K1_REG_NUM
),
12283 gen_rtx_REG (SImode
, GP_REG_FIRST
)));
12289 rtx insn
= gen_add3_insn (stack_pointer_rtx
,
12292 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
12293 mips_frame_barrier ();
12297 mips_for_each_saved_acc (size
, mips_save_reg
);
12298 mips_for_each_saved_gpr_and_fpr (size
, mips_save_reg
);
12302 /* Allocate the rest of the frame. */
12305 if (SMALL_OPERAND (-size
))
12306 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx
,
12308 GEN_INT (-size
)))) = 1;
12311 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), GEN_INT (size
));
12314 /* There are no instructions to add or subtract registers
12315 from the stack pointer, so use the frame pointer as a
12316 temporary. We should always be using a frame pointer
12317 in this case anyway. */
12318 gcc_assert (frame_pointer_needed
);
12319 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
12320 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx
,
12321 hard_frame_pointer_rtx
,
12322 MIPS_PROLOGUE_TEMP (Pmode
)));
12323 mips_emit_move (stack_pointer_rtx
, hard_frame_pointer_rtx
);
12326 emit_insn (gen_sub3_insn (stack_pointer_rtx
,
12328 MIPS_PROLOGUE_TEMP (Pmode
)));
12330 /* Describe the combined effect of the previous instructions. */
12331 mips_set_frame_expr
12332 (gen_rtx_SET (stack_pointer_rtx
,
12333 plus_constant (Pmode
, stack_pointer_rtx
, -size
)));
12335 mips_frame_barrier ();
12338 /* Set up the frame pointer, if we're using one. */
12339 if (frame_pointer_needed
)
12341 HOST_WIDE_INT offset
;
12343 offset
= frame
->hard_frame_pointer_offset
;
12346 rtx insn
= mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
12347 RTX_FRAME_RELATED_P (insn
) = 1;
12349 else if (SMALL_OPERAND (offset
))
12351 rtx insn
= gen_add3_insn (hard_frame_pointer_rtx
,
12352 stack_pointer_rtx
, GEN_INT (offset
));
12353 RTX_FRAME_RELATED_P (emit_insn (insn
)) = 1;
12357 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode
), GEN_INT (offset
));
12358 mips_emit_move (hard_frame_pointer_rtx
, stack_pointer_rtx
);
12359 emit_insn (gen_add3_insn (hard_frame_pointer_rtx
,
12360 hard_frame_pointer_rtx
,
12361 MIPS_PROLOGUE_TEMP (Pmode
)));
12362 mips_set_frame_expr
12363 (gen_rtx_SET (hard_frame_pointer_rtx
,
12364 plus_constant (Pmode
, stack_pointer_rtx
, offset
)));
12368 mips_emit_loadgp ();
12370 /* Initialize the $gp save slot. */
12371 if (mips_cfun_has_cprestore_slot_p ())
12373 rtx base
, mem
, gp
, temp
;
12374 HOST_WIDE_INT offset
;
12376 mips_get_cprestore_base_and_offset (&base
, &offset
, false);
12377 mem
= gen_frame_mem (Pmode
, plus_constant (Pmode
, base
, offset
));
12378 gp
= TARGET_MIPS16
? MIPS16_PIC_TEMP
: pic_offset_table_rtx
;
12379 temp
= (SMALL_OPERAND (offset
)
12380 ? gen_rtx_SCRATCH (Pmode
)
12381 : MIPS_PROLOGUE_TEMP (Pmode
));
12382 emit_insn (PMODE_INSN (gen_potential_cprestore
,
12383 (mem
, GEN_INT (offset
), gp
, temp
)));
12385 mips_get_cprestore_base_and_offset (&base
, &offset
, true);
12386 mem
= gen_frame_mem (Pmode
, plus_constant (Pmode
, base
, offset
));
12387 emit_insn (PMODE_INSN (gen_use_cprestore
, (mem
)));
12390 /* We need to search back to the last use of K0 or K1. */
12391 if (cfun
->machine
->interrupt_handler_p
)
12394 for (insn
= get_last_insn (); insn
!= NULL_RTX
; insn
= PREV_INSN (insn
))
12396 && mips_refers_to_kernel_reg_p (PATTERN (insn
)))
12398 /* Emit a move from K1 to COP0 Status after insn. */
12399 gcc_assert (insn
!= NULL_RTX
);
12400 emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode
, COP0_STATUS_REG_NUM
),
12401 gen_rtx_REG (SImode
, K1_REG_NUM
)),
12405 /* If we are profiling, make sure no instructions are scheduled before
12406 the call to mcount. */
12408 emit_insn (gen_blockage ());
12411 /* Attach all pending register saves to the previous instruction.
12412 Return that instruction. */
12415 mips_epilogue_emit_cfa_restores (void)
12419 insn
= get_last_insn ();
12420 if (mips_epilogue
.cfa_restores
)
12422 gcc_assert (insn
&& !REG_NOTES (insn
));
12423 RTX_FRAME_RELATED_P (insn
) = 1;
12424 REG_NOTES (insn
) = mips_epilogue
.cfa_restores
;
12425 mips_epilogue
.cfa_restores
= 0;
12430 /* Like mips_epilogue_emit_cfa_restores, but also record that the CFA is
12431 now at REG + OFFSET. */
12434 mips_epilogue_set_cfa (rtx reg
, HOST_WIDE_INT offset
)
12438 insn
= mips_epilogue_emit_cfa_restores ();
12439 if (reg
!= mips_epilogue
.cfa_reg
|| offset
!= mips_epilogue
.cfa_offset
)
12441 RTX_FRAME_RELATED_P (insn
) = 1;
12442 REG_NOTES (insn
) = alloc_reg_note (REG_CFA_DEF_CFA
,
12443 plus_constant (Pmode
, reg
, offset
),
12445 mips_epilogue
.cfa_reg
= reg
;
12446 mips_epilogue
.cfa_offset
= offset
;
12450 /* Emit instructions to restore register REG from slot MEM. Also update
12451 the cfa_restores list. */
12454 mips_restore_reg (rtx reg
, rtx mem
)
12456 /* There's no MIPS16 instruction to load $31 directly. Load into
12457 $7 instead and adjust the return insn appropriately. */
12458 if (TARGET_MIPS16
&& REGNO (reg
) == RETURN_ADDR_REGNUM
)
12459 reg
= gen_rtx_REG (GET_MODE (reg
), GP_REG_FIRST
+ 7);
12460 else if (GET_MODE (reg
) == DFmode
12461 && (!TARGET_FLOAT64
12462 || mips_abi
== ABI_32
))
12464 mips_add_cfa_restore (mips_subword (reg
, true));
12465 mips_add_cfa_restore (mips_subword (reg
, false));
12468 mips_add_cfa_restore (reg
);
12470 mips_emit_save_slot_move (reg
, mem
, MIPS_EPILOGUE_TEMP (GET_MODE (reg
)));
12471 if (REGNO (reg
) == REGNO (mips_epilogue
.cfa_reg
))
12472 /* The CFA is currently defined in terms of the register whose
12473 value we have just restored. Redefine the CFA in terms of
12474 the stack pointer. */
12475 mips_epilogue_set_cfa (stack_pointer_rtx
,
12476 mips_epilogue
.cfa_restore_sp_offset
);
12479 /* Emit code to set the stack pointer to BASE + OFFSET, given that
12480 BASE + OFFSET is NEW_FRAME_SIZE bytes below the top of the frame.
12481 BASE, if not the stack pointer, is available as a temporary. */
12484 mips_deallocate_stack (rtx base
, rtx offset
, HOST_WIDE_INT new_frame_size
)
12486 if (base
== stack_pointer_rtx
&& offset
== const0_rtx
)
12489 mips_frame_barrier ();
12490 if (offset
== const0_rtx
)
12492 emit_move_insn (stack_pointer_rtx
, base
);
12493 mips_epilogue_set_cfa (stack_pointer_rtx
, new_frame_size
);
12495 else if (TARGET_MIPS16
&& base
!= stack_pointer_rtx
)
12497 emit_insn (gen_add3_insn (base
, base
, offset
));
12498 mips_epilogue_set_cfa (base
, new_frame_size
);
12499 emit_move_insn (stack_pointer_rtx
, base
);
12503 emit_insn (gen_add3_insn (stack_pointer_rtx
, base
, offset
));
12504 mips_epilogue_set_cfa (stack_pointer_rtx
, new_frame_size
);
12508 /* Emit any instructions needed before a return. */
12511 mips_expand_before_return (void)
12513 /* When using a call-clobbered gp, we start out with unified call
12514 insns that include instructions to restore the gp. We then split
12515 these unified calls after reload. These split calls explicitly
12516 clobber gp, so there is no need to define
12517 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
12519 For consistency, we should also insert an explicit clobber of $28
12520 before return insns, so that the post-reload optimizers know that
12521 the register is not live on exit. */
12522 if (TARGET_CALL_CLOBBERED_GP
)
12523 emit_clobber (pic_offset_table_rtx
);
12526 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
12530 mips_expand_epilogue (bool sibcall_p
)
12532 const struct mips_frame_info
*frame
;
12533 HOST_WIDE_INT step1
, step2
;
12536 bool use_jraddiusp_p
= false;
12538 if (!sibcall_p
&& mips_can_use_return_insn ())
12540 emit_jump_insn (gen_return ());
12544 /* In MIPS16 mode, if the return value should go into a floating-point
12545 register, we need to call a helper routine to copy it over. */
12546 if (mips16_cfun_returns_in_fpr_p ())
12547 mips16_copy_fpr_return_value ();
12549 /* Split the frame into two. STEP1 is the amount of stack we should
12550 deallocate before restoring the registers. STEP2 is the amount we
12551 should deallocate afterwards.
12553 Start off by assuming that no registers need to be restored. */
12554 frame
= &cfun
->machine
->frame
;
12555 step1
= frame
->total_size
;
12558 /* Work out which register holds the frame address. */
12559 if (!frame_pointer_needed
)
12560 base
= stack_pointer_rtx
;
12563 base
= hard_frame_pointer_rtx
;
12564 step1
-= frame
->hard_frame_pointer_offset
;
12566 mips_epilogue
.cfa_reg
= base
;
12567 mips_epilogue
.cfa_offset
= step1
;
12568 mips_epilogue
.cfa_restores
= NULL_RTX
;
12570 /* If we need to restore registers, deallocate as much stack as
12571 possible in the second step without going out of range. */
12572 if ((frame
->mask
| frame
->fmask
| frame
->acc_mask
) != 0
12573 || frame
->num_cop0_regs
> 0)
12575 step2
= MIN (step1
, MIPS_MAX_FIRST_STACK_STEP
);
12579 /* Get an rtx for STEP1 that we can add to BASE. */
12580 adjust
= GEN_INT (step1
);
12581 if (!SMALL_OPERAND (step1
))
12583 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), adjust
);
12584 adjust
= MIPS_EPILOGUE_TEMP (Pmode
);
12586 mips_deallocate_stack (base
, adjust
, step2
);
12588 /* If we're using addressing macros, $gp is implicitly used by all
12589 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
12591 if (TARGET_CALL_SAVED_GP
&& !TARGET_EXPLICIT_RELOCS
)
12592 emit_insn (gen_blockage ());
12594 mips_epilogue
.cfa_restore_sp_offset
= step2
;
12595 if (GENERATE_MIPS16E_SAVE_RESTORE
&& frame
->mask
!= 0)
12597 unsigned int regno
, mask
;
12598 HOST_WIDE_INT offset
;
12601 /* Generate the restore instruction. */
12602 mask
= frame
->mask
;
12603 restore
= mips16e_build_save_restore (true, &mask
, &offset
, 0, step2
);
12605 /* Restore any other registers manually. */
12606 for (regno
= GP_REG_FIRST
; regno
< GP_REG_LAST
; regno
++)
12607 if (BITSET_P (mask
, regno
- GP_REG_FIRST
))
12609 offset
-= UNITS_PER_WORD
;
12610 mips_save_restore_reg (word_mode
, regno
, offset
, mips_restore_reg
);
12613 /* Restore the remaining registers and deallocate the final bit
12615 mips_frame_barrier ();
12616 emit_insn (restore
);
12617 mips_epilogue_set_cfa (stack_pointer_rtx
, 0);
12621 /* Restore the registers. */
12622 mips_for_each_saved_acc (frame
->total_size
- step2
, mips_restore_reg
);
12623 mips_for_each_saved_gpr_and_fpr (frame
->total_size
- step2
,
12626 if (cfun
->machine
->interrupt_handler_p
)
12628 HOST_WIDE_INT offset
;
12631 offset
= frame
->cop0_sp_offset
- (frame
->total_size
- step2
);
12633 /* Restore the original EPC. */
12634 mem
= gen_frame_mem (word_mode
,
12635 plus_constant (Pmode
, stack_pointer_rtx
,
12637 mips_emit_move (gen_rtx_REG (word_mode
, K1_REG_NUM
), mem
);
12638 offset
-= UNITS_PER_WORD
;
12640 /* Move to COP0 EPC. */
12641 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, COP0_EPC_REG_NUM
),
12642 gen_rtx_REG (SImode
, K1_REG_NUM
)));
12644 /* Restore the original Status. */
12645 mem
= gen_frame_mem (word_mode
,
12646 plus_constant (Pmode
, stack_pointer_rtx
,
12648 mips_emit_move (gen_rtx_REG (word_mode
, K1_REG_NUM
), mem
);
12649 offset
-= UNITS_PER_WORD
;
12651 /* If we don't use shadow register set, we need to update SP. */
12652 if (cfun
->machine
->use_shadow_register_set
== SHADOW_SET_NO
)
12653 mips_deallocate_stack (stack_pointer_rtx
, GEN_INT (step2
), 0);
12655 /* The choice of position is somewhat arbitrary in this case. */
12656 mips_epilogue_emit_cfa_restores ();
12658 /* Move to COP0 Status. */
12659 emit_insn (gen_cop0_move (gen_rtx_REG (SImode
, COP0_STATUS_REG_NUM
),
12660 gen_rtx_REG (SImode
, K1_REG_NUM
)));
12662 else if (TARGET_MICROMIPS
12663 && !crtl
->calls_eh_return
12666 && mips_unsigned_immediate_p (step2
, 5, 2))
12667 use_jraddiusp_p
= true;
12669 /* Deallocate the final bit of the frame. */
12670 mips_deallocate_stack (stack_pointer_rtx
, GEN_INT (step2
), 0);
12673 if (cfun
->machine
->use_frame_header_for_callee_saved_regs
)
12674 mips_epilogue_emit_cfa_restores ();
12675 else if (!use_jraddiusp_p
)
12676 gcc_assert (!mips_epilogue
.cfa_restores
);
12678 /* Add in the __builtin_eh_return stack adjustment. We need to
12679 use a temporary in MIPS16 code. */
12680 if (crtl
->calls_eh_return
)
12684 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode
), stack_pointer_rtx
);
12685 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode
),
12686 MIPS_EPILOGUE_TEMP (Pmode
),
12687 EH_RETURN_STACKADJ_RTX
));
12688 mips_emit_move (stack_pointer_rtx
, MIPS_EPILOGUE_TEMP (Pmode
));
12691 emit_insn (gen_add3_insn (stack_pointer_rtx
,
12693 EH_RETURN_STACKADJ_RTX
));
12698 mips_expand_before_return ();
12699 if (cfun
->machine
->interrupt_handler_p
)
12701 /* Interrupt handlers generate eret or deret. */
12702 if (cfun
->machine
->use_debug_exception_return_p
)
12703 emit_jump_insn (gen_mips_deret ());
12705 emit_jump_insn (gen_mips_eret ());
12711 /* When generating MIPS16 code, the normal
12712 mips_for_each_saved_gpr_and_fpr path will restore the return
12713 address into $7 rather than $31. */
12715 && !GENERATE_MIPS16E_SAVE_RESTORE
12716 && BITSET_P (frame
->mask
, RETURN_ADDR_REGNUM
))
12718 /* simple_returns cannot rely on values that are only available
12719 on paths through the epilogue (because return paths that do
12720 not pass through the epilogue may nevertheless reuse a
12721 simple_return that occurs at the end of the epilogue).
12722 Use a normal return here instead. */
12723 rtx reg
= gen_rtx_REG (Pmode
, GP_REG_FIRST
+ 7);
12724 pat
= gen_return_internal (reg
);
12726 else if (use_jraddiusp_p
)
12727 pat
= gen_jraddiusp (GEN_INT (step2
));
12730 rtx reg
= gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
);
12731 pat
= gen_simple_return_internal (reg
);
12733 emit_jump_insn (pat
);
12734 if (use_jraddiusp_p
)
12735 mips_epilogue_set_cfa (stack_pointer_rtx
, step2
);
12739 /* Search from the beginning to the first use of K0 or K1. */
12740 if (cfun
->machine
->interrupt_handler_p
12741 && !cfun
->machine
->keep_interrupts_masked_p
)
12743 for (insn
= get_insns (); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
12745 && mips_refers_to_kernel_reg_p (PATTERN (insn
)))
12747 gcc_assert (insn
!= NULL_RTX
);
12748 /* Insert disable interrupts before the first use of K0 or K1. */
12749 emit_insn_before (gen_mips_di (), insn
);
12750 emit_insn_before (gen_mips_ehb (), insn
);
12754 /* Return nonzero if this function is known to have a null epilogue.
12755 This allows the optimizer to omit jumps to jumps if no stack
12759 mips_can_use_return_insn (void)
12761 /* Interrupt handlers need to go through the epilogue. */
12762 if (cfun
->machine
->interrupt_handler_p
)
12765 if (!reload_completed
)
12771 /* In MIPS16 mode, a function that returns a floating-point value
12772 needs to arrange to copy the return value into the floating-point
12774 if (mips16_cfun_returns_in_fpr_p ())
12777 return (cfun
->machine
->frame
.total_size
== 0
12778 && !cfun
->machine
->use_frame_header_for_callee_saved_regs
);
12781 /* Return true if register REGNO can store a value of mode MODE.
12782 The result of this function is cached in mips_hard_regno_mode_ok. */
12785 mips_hard_regno_mode_ok_uncached (unsigned int regno
, machine_mode mode
)
12788 enum mode_class mclass
;
12790 if (mode
== CCV2mode
)
12791 return (ISA_HAS_8CC
12792 && ST_REG_P (regno
)
12793 && (regno
- ST_REG_FIRST
) % 2 == 0);
12795 if (mode
== CCV4mode
)
12796 return (ISA_HAS_8CC
12797 && ST_REG_P (regno
)
12798 && (regno
- ST_REG_FIRST
) % 4 == 0);
12800 if (mode
== CCmode
)
12801 return ISA_HAS_8CC
? ST_REG_P (regno
) : regno
== FPSW_REGNUM
;
12803 size
= GET_MODE_SIZE (mode
);
12804 mclass
= GET_MODE_CLASS (mode
);
12806 if (GP_REG_P (regno
) && mode
!= CCFmode
&& !MSA_SUPPORTED_MODE_P (mode
))
12807 return ((regno
- GP_REG_FIRST
) & 1) == 0 || size
<= UNITS_PER_WORD
;
12809 /* For MSA, allow TImode and 128-bit vector modes in all FPR. */
12810 if (FP_REG_P (regno
) && MSA_SUPPORTED_MODE_P (mode
))
12813 if (FP_REG_P (regno
)
12814 && (((regno
- FP_REG_FIRST
) % MAX_FPRS_PER_FMT
) == 0
12815 || (MIN_FPRS_PER_FMT
== 1 && size
<= UNITS_PER_FPREG
)))
12817 /* Deny use of odd-numbered registers for 32-bit data for
12818 the o32 FP64A ABI. */
12819 if (TARGET_O32_FP64A_ABI
&& size
<= 4 && (regno
& 1) != 0)
12822 /* The FPXX ABI requires double-precision values to be placed in
12823 even-numbered registers. Disallow odd-numbered registers with
12824 CCFmode because CCFmode double-precision compares will write a
12825 64-bit value to a register. */
12826 if (mode
== CCFmode
)
12827 return !(TARGET_FLOATXX
&& (regno
& 1) != 0);
12829 /* Allow 64-bit vector modes for Loongson MultiMedia extensions
12830 Instructions (MMI). */
12831 if (TARGET_LOONGSON_MMI
12832 && (mode
== V2SImode
12833 || mode
== V4HImode
12834 || mode
== V8QImode
12835 || mode
== DImode
))
12838 if (mclass
== MODE_FLOAT
12839 || mclass
== MODE_COMPLEX_FLOAT
12840 || mclass
== MODE_VECTOR_FLOAT
)
12841 return size
<= UNITS_PER_FPVALUE
;
12843 /* Allow integer modes that fit into a single register. We need
12844 to put integers into FPRs when using instructions like CVT
12845 and TRUNC. There's no point allowing sizes smaller than a word,
12846 because the FPU has no appropriate load/store instructions. */
12847 if (mclass
== MODE_INT
)
12848 return size
>= MIN_UNITS_PER_WORD
&& size
<= UNITS_PER_FPREG
;
12851 /* Don't allow vector modes in accumulators. */
12852 if (ACC_REG_P (regno
)
12853 && !VECTOR_MODE_P (mode
)
12854 && (INTEGRAL_MODE_P (mode
) || ALL_FIXED_POINT_MODE_P (mode
)))
12856 if (MD_REG_P (regno
))
12858 /* After a multiplication or division, clobbering HI makes
12859 the value of LO unpredictable, and vice versa. This means
12860 that, for all interesting cases, HI and LO are effectively
12863 We model this by requiring that any value that uses HI
12865 if (size
<= UNITS_PER_WORD
* 2)
12866 return regno
== (size
<= UNITS_PER_WORD
? LO_REGNUM
: MD_REG_FIRST
);
12870 /* DSP accumulators do not have the same restrictions as
12871 HI and LO, so we can treat them as normal doubleword
12873 if (size
<= UNITS_PER_WORD
)
12876 if (size
<= UNITS_PER_WORD
* 2
12877 && ((regno
- DSP_ACC_REG_FIRST
) & 1) == 0)
12882 if (ALL_COP_REG_P (regno
))
12883 return mclass
== MODE_INT
&& size
<= UNITS_PER_WORD
;
12885 if (regno
== GOT_VERSION_REGNUM
)
12886 return mode
== SImode
;
12891 /* Implement TARGET_HARD_REGNO_MODE_OK. */
12894 mips_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
12896 return mips_hard_regno_mode_ok_p
[mode
][regno
];
12899 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
12902 mips_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
12903 unsigned int new_reg
)
12905 /* Interrupt functions can only use registers that have already been
12906 saved by the prologue, even if they would normally be call-clobbered. */
12907 if (cfun
->machine
->interrupt_handler_p
&& !df_regs_ever_live_p (new_reg
))
12913 /* Return nonzero if register REGNO can be used as a scratch register
12917 mips_hard_regno_scratch_ok (unsigned int regno
)
12919 /* See mips_hard_regno_rename_ok. */
12920 if (cfun
->machine
->interrupt_handler_p
&& !df_regs_ever_live_p (regno
))
12926 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. Odd-numbered
12927 single-precision registers are not considered callee-saved for o32
12928 FPXX as they will be clobbered when run on an FR=1 FPU. MSA vector
12929 registers with MODE > 64 bits are part clobbered too. */
12932 mips_hard_regno_call_part_clobbered (unsigned int, unsigned int regno
,
12936 && hard_regno_nregs (regno
, mode
) == 1
12937 && FP_REG_P (regno
)
12938 && (regno
& 1) != 0)
12941 if (ISA_HAS_MSA
&& FP_REG_P (regno
) && GET_MODE_SIZE (mode
) > 8)
12947 /* Implement TARGET_HARD_REGNO_NREGS. */
12949 static unsigned int
12950 mips_hard_regno_nregs (unsigned int regno
, machine_mode mode
)
12952 if (ST_REG_P (regno
))
12953 /* The size of FP status registers is always 4, because they only hold
12954 CCmode values, and CCmode is always considered to be 4 bytes wide. */
12955 return (GET_MODE_SIZE (mode
) + 3) / 4;
12957 if (FP_REG_P (regno
))
12959 if (MSA_SUPPORTED_MODE_P (mode
))
12962 return (GET_MODE_SIZE (mode
) + UNITS_PER_FPREG
- 1) / UNITS_PER_FPREG
;
12965 /* All other registers are word-sized. */
12966 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
12969 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
12970 in mips_hard_regno_nregs. */
12973 mips_class_max_nregs (enum reg_class rclass
, machine_mode mode
)
12979 left
= reg_class_contents
[rclass
];
12980 if (hard_reg_set_intersect_p (left
, reg_class_contents
[(int) ST_REGS
]))
12982 if (mips_hard_regno_mode_ok (ST_REG_FIRST
, mode
))
12983 size
= MIN (size
, 4);
12985 left
&= ~reg_class_contents
[ST_REGS
];
12987 if (hard_reg_set_intersect_p (left
, reg_class_contents
[(int) FP_REGS
]))
12989 if (mips_hard_regno_mode_ok (FP_REG_FIRST
, mode
))
12991 if (MSA_SUPPORTED_MODE_P (mode
))
12992 size
= MIN (size
, UNITS_PER_MSA_REG
);
12994 size
= MIN (size
, UNITS_PER_FPREG
);
12997 left
&= ~reg_class_contents
[FP_REGS
];
12999 if (!hard_reg_set_empty_p (left
))
13000 size
= MIN (size
, UNITS_PER_WORD
);
13001 return (GET_MODE_SIZE (mode
) + size
- 1) / size
;
13004 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
13007 mips_can_change_mode_class (machine_mode from
,
13008 machine_mode to
, reg_class_t rclass
)
13010 /* Allow conversions between different Loongson integer vectors,
13011 and between those vectors and DImode. */
13012 if (GET_MODE_SIZE (from
) == 8 && GET_MODE_SIZE (to
) == 8
13013 && INTEGRAL_MODE_P (from
) && INTEGRAL_MODE_P (to
))
13016 /* Allow conversions between different MSA vector modes. */
13017 if (MSA_SUPPORTED_MODE_P (from
) && MSA_SUPPORTED_MODE_P (to
))
13020 /* Otherwise, there are several problems with changing the modes of
13021 values in floating-point registers:
13023 - When a multi-word value is stored in paired floating-point
13024 registers, the first register always holds the low word. We
13025 therefore can't allow FPRs to change between single-word and
13026 multi-word modes on big-endian targets.
13028 - GCC assumes that each word of a multiword register can be
13029 accessed individually using SUBREGs. This is not true for
13030 floating-point registers if they are bigger than a word.
13032 - Loading a 32-bit value into a 64-bit floating-point register
13033 will not sign-extend the value, despite what LOAD_EXTEND_OP
13034 says. We can't allow FPRs to change from SImode to a wider
13035 mode on 64-bit targets.
13037 - If the FPU has already interpreted a value in one format, we
13038 must not ask it to treat the value as having a different
13041 We therefore disallow all mode changes involving FPRs. */
13043 return !reg_classes_intersect_p (FP_REGS
, rclass
);
13046 /* Implement target hook small_register_classes_for_mode_p. */
13049 mips_small_register_classes_for_mode_p (machine_mode mode
13052 return TARGET_MIPS16
;
13055 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction,
13056 or use the MSA's move.v instruction. */
13059 mips_mode_ok_for_mov_fmt_p (machine_mode mode
)
13065 return TARGET_HARD_FLOAT
;
13068 return TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
;
13071 return TARGET_HARD_FLOAT
&& TARGET_PAIRED_SINGLE_FLOAT
;
13074 return MSA_SUPPORTED_MODE_P (mode
);
13078 /* Implement TARGET_MODES_TIEABLE_P. */
13081 mips_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
13083 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
13084 prefer to put one of them in FPRs. */
13085 return (mode1
== mode2
13086 || (!mips_mode_ok_for_mov_fmt_p (mode1
)
13087 && !mips_mode_ok_for_mov_fmt_p (mode2
)));
13090 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
13093 mips_preferred_reload_class (rtx x
, reg_class_t rclass
)
13095 if (mips_dangerous_for_la25_p (x
) && reg_class_subset_p (LEA_REGS
, rclass
))
13098 if (reg_class_subset_p (FP_REGS
, rclass
)
13099 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x
)))
13102 if (reg_class_subset_p (GR_REGS
, rclass
))
13105 if (TARGET_MIPS16
&& reg_class_subset_p (M16_REGS
, rclass
))
13111 /* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
13112 Return a "canonical" class to represent it in later calculations. */
13115 mips_canonicalize_move_class (reg_class_t rclass
)
13117 /* All moves involving accumulator registers have the same cost. */
13118 if (reg_class_subset_p (rclass
, ACC_REGS
))
13121 /* Likewise promote subclasses of general registers to the most
13122 interesting containing class. */
13123 if (TARGET_MIPS16
&& reg_class_subset_p (rclass
, M16_REGS
))
13125 else if (reg_class_subset_p (rclass
, GENERAL_REGS
))
13126 rclass
= GENERAL_REGS
;
13131 /* Return the cost of moving a value from a register of class FROM to a GPR.
13132 Return 0 for classes that are unions of other classes handled by this
13136 mips_move_to_gpr_cost (reg_class_t from
)
13142 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
13146 /* MFLO and MFHI. */
13156 /* This choice of value is historical. */
13164 /* Return the cost of moving a value from a GPR to a register of class TO.
13165 Return 0 for classes that are unions of other classes handled by this
13169 mips_move_from_gpr_cost (reg_class_t to
)
13175 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
13179 /* MTLO and MTHI. */
13189 /* This choice of value is historical. */
13197 /* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
13198 maximum of the move costs for subclasses; regclass will work out
13199 the maximum for us. */
13202 mips_register_move_cost (machine_mode mode
,
13203 reg_class_t from
, reg_class_t to
)
13208 from
= mips_canonicalize_move_class (from
);
13209 to
= mips_canonicalize_move_class (to
);
13211 /* Handle moves that can be done without using general-purpose registers. */
13212 if (from
== FP_REGS
)
13214 if (to
== FP_REGS
&& mips_mode_ok_for_mov_fmt_p (mode
))
13219 /* Handle cases in which only one class deviates from the ideal. */
13220 dregs
= TARGET_MIPS16
? M16_REGS
: GENERAL_REGS
;
13222 return mips_move_from_gpr_cost (to
);
13224 return mips_move_to_gpr_cost (from
);
13226 /* Handles cases that require a GPR temporary. */
13227 cost1
= mips_move_to_gpr_cost (from
);
13230 cost2
= mips_move_from_gpr_cost (to
);
13232 return cost1
+ cost2
;
13238 /* Implement TARGET_REGISTER_PRIORITY. */
13241 mips_register_priority (int hard_regno
)
13243 /* Treat MIPS16 registers with higher priority than other regs. */
13245 && TEST_HARD_REG_BIT (reg_class_contents
[M16_REGS
], hard_regno
))
13250 /* Implement TARGET_MEMORY_MOVE_COST. */
13253 mips_memory_move_cost (machine_mode mode
, reg_class_t rclass
, bool in
)
13255 return (mips_cost
->memory_latency
13256 + memory_move_secondary_cost (mode
, rclass
, in
));
13259 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
13261 When targeting the o32 FPXX ABI, all moves with a length of doubleword
13262 or greater must be performed by FR-mode-aware instructions.
13263 This can be achieved using MFHC1/MTHC1 when these instructions are
13264 available but otherwise moves must go via memory.
13265 For the o32 FP64A ABI, all odd-numbered moves with a length of
13266 doubleword or greater are required to use memory. Using MTC1/MFC1
13267 to access the lower-half of these registers would require a forbidden
13268 single-precision access. We require all double-word moves to use
13269 memory because adding even and odd floating-point registers classes
13270 would have a significant impact on the backend. */
13273 mips_secondary_memory_needed (machine_mode mode
, reg_class_t class1
,
13274 reg_class_t class2
)
13276 /* Ignore spilled pseudos. */
13277 if (lra_in_progress
&& (class1
== NO_REGS
|| class2
== NO_REGS
))
13280 if (((class1
== FP_REGS
) != (class2
== FP_REGS
))
13281 && ((TARGET_FLOATXX
&& !ISA_HAS_MXHC1
)
13282 || TARGET_O32_FP64A_ABI
)
13283 && GET_MODE_SIZE (mode
) >= 8)
13289 /* Return the register class required for a secondary register when
13290 copying between one of the registers in RCLASS and value X, which
13291 has mode MODE. X is the source of the move if IN_P, otherwise it
13292 is the destination. Return NO_REGS if no secondary register is
13296 mips_secondary_reload_class (enum reg_class rclass
,
13297 machine_mode mode
, rtx x
, bool)
13301 /* If X is a constant that cannot be loaded into $25, it must be loaded
13302 into some other GPR. No other register class allows a direct move. */
13303 if (mips_dangerous_for_la25_p (x
))
13304 return reg_class_subset_p (rclass
, LEA_REGS
) ? NO_REGS
: LEA_REGS
;
13306 regno
= true_regnum (x
);
13309 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
13310 if (!reg_class_subset_p (rclass
, M16_REGS
) && !M16_REG_P (regno
))
13316 /* Copying from accumulator registers to anywhere other than a general
13317 register requires a temporary general register. */
13318 if (reg_class_subset_p (rclass
, ACC_REGS
))
13319 return GP_REG_P (regno
) ? NO_REGS
: GR_REGS
;
13320 if (ACC_REG_P (regno
))
13321 return reg_class_subset_p (rclass
, GR_REGS
) ? NO_REGS
: GR_REGS
;
13323 if (reg_class_subset_p (rclass
, FP_REGS
))
13327 && (GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8)))
13328 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
13329 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
13332 if (MEM_P (x
) && MSA_SUPPORTED_MODE_P (mode
))
13333 /* In this case we can use MSA LD.* and ST.*. */
13336 if (GP_REG_P (regno
) || x
== CONST0_RTX (mode
))
13337 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
13340 if (CONSTANT_P (x
) && !targetm
.cannot_force_const_mem (mode
, x
))
13341 /* We can force the constant to memory and use lwc1
13342 and ldc1. As above, we will use pairs of lwc1s if
13343 ldc1 is not supported. */
13346 if (FP_REG_P (regno
) && mips_mode_ok_for_mov_fmt_p (mode
))
13347 /* In this case we can use mov.fmt. */
13350 /* Otherwise, we need to reload through an integer register. */
13353 if (FP_REG_P (regno
))
13354 return reg_class_subset_p (rclass
, GR_REGS
) ? NO_REGS
: GR_REGS
;
13359 /* Implement TARGET_MODE_REP_EXTENDED. */
13362 mips_mode_rep_extended (scalar_int_mode mode
, scalar_int_mode mode_rep
)
13364 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
13365 if (TARGET_64BIT
&& mode
== SImode
&& mode_rep
== DImode
)
13366 return SIGN_EXTEND
;
13371 /* Implement TARGET_VALID_POINTER_MODE. */
13374 mips_valid_pointer_mode (scalar_int_mode mode
)
13376 return mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
);
13379 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
13382 mips_vector_mode_supported_p (machine_mode mode
)
13387 return TARGET_PAIRED_SINGLE_FLOAT
;
13402 return TARGET_LOONGSON_MMI
;
13405 return MSA_SUPPORTED_MODE_P (mode
);
13409 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
13412 mips_scalar_mode_supported_p (scalar_mode mode
)
13414 if (ALL_FIXED_POINT_MODE_P (mode
)
13415 && GET_MODE_PRECISION (mode
) <= 2 * BITS_PER_WORD
)
13418 return default_scalar_mode_supported_p (mode
);
13421 /* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
13423 static machine_mode
13424 mips_preferred_simd_mode (scalar_mode mode
)
13426 if (TARGET_PAIRED_SINGLE_FLOAT
13456 /* Implement TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES. */
13459 mips_autovectorize_vector_sizes (vector_sizes
*sizes
, bool)
13462 sizes
->safe_push (16);
13465 /* Implement TARGET_INIT_LIBFUNCS. */
13468 mips_init_libfuncs (void)
13470 if (TARGET_FIX_VR4120
)
13472 /* Register the special divsi3 and modsi3 functions needed to work
13473 around VR4120 division errata. */
13474 set_optab_libfunc (sdiv_optab
, SImode
, "__vr4120_divsi3");
13475 set_optab_libfunc (smod_optab
, SImode
, "__vr4120_modsi3");
13478 if (TARGET_MIPS16
&& TARGET_HARD_FLOAT_ABI
)
13480 /* Register the MIPS16 -mhard-float stubs. */
13481 set_optab_libfunc (add_optab
, SFmode
, "__mips16_addsf3");
13482 set_optab_libfunc (sub_optab
, SFmode
, "__mips16_subsf3");
13483 set_optab_libfunc (smul_optab
, SFmode
, "__mips16_mulsf3");
13484 set_optab_libfunc (sdiv_optab
, SFmode
, "__mips16_divsf3");
13486 set_optab_libfunc (eq_optab
, SFmode
, "__mips16_eqsf2");
13487 set_optab_libfunc (ne_optab
, SFmode
, "__mips16_nesf2");
13488 set_optab_libfunc (gt_optab
, SFmode
, "__mips16_gtsf2");
13489 set_optab_libfunc (ge_optab
, SFmode
, "__mips16_gesf2");
13490 set_optab_libfunc (lt_optab
, SFmode
, "__mips16_ltsf2");
13491 set_optab_libfunc (le_optab
, SFmode
, "__mips16_lesf2");
13492 set_optab_libfunc (unord_optab
, SFmode
, "__mips16_unordsf2");
13494 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__mips16_fix_truncsfsi");
13495 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__mips16_floatsisf");
13496 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__mips16_floatunsisf");
13498 if (TARGET_DOUBLE_FLOAT
)
13500 set_optab_libfunc (add_optab
, DFmode
, "__mips16_adddf3");
13501 set_optab_libfunc (sub_optab
, DFmode
, "__mips16_subdf3");
13502 set_optab_libfunc (smul_optab
, DFmode
, "__mips16_muldf3");
13503 set_optab_libfunc (sdiv_optab
, DFmode
, "__mips16_divdf3");
13505 set_optab_libfunc (eq_optab
, DFmode
, "__mips16_eqdf2");
13506 set_optab_libfunc (ne_optab
, DFmode
, "__mips16_nedf2");
13507 set_optab_libfunc (gt_optab
, DFmode
, "__mips16_gtdf2");
13508 set_optab_libfunc (ge_optab
, DFmode
, "__mips16_gedf2");
13509 set_optab_libfunc (lt_optab
, DFmode
, "__mips16_ltdf2");
13510 set_optab_libfunc (le_optab
, DFmode
, "__mips16_ledf2");
13511 set_optab_libfunc (unord_optab
, DFmode
, "__mips16_unorddf2");
13513 set_conv_libfunc (sext_optab
, DFmode
, SFmode
,
13514 "__mips16_extendsfdf2");
13515 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
,
13516 "__mips16_truncdfsf2");
13517 set_conv_libfunc (sfix_optab
, SImode
, DFmode
,
13518 "__mips16_fix_truncdfsi");
13519 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
,
13520 "__mips16_floatsidf");
13521 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
,
13522 "__mips16_floatunsidf");
13526 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
13527 on an external non-MIPS16 routine to implement __sync_synchronize.
13528 Similarly for the rest of the ll/sc libfuncs. */
13531 synchronize_libfunc
= init_one_libfunc ("__sync_synchronize");
13532 init_sync_libfuncs (UNITS_PER_WORD
);
13536 /* Build up a multi-insn sequence that loads label TARGET into $AT. */
13539 mips_process_load_label (rtx target
)
13541 rtx base
, gp
, intop
;
13542 HOST_WIDE_INT offset
;
13544 mips_multi_start ();
13548 mips_multi_add_insn ("lw\t%@,%%got_page(%0)(%+)", target
, 0);
13549 mips_multi_add_insn ("addiu\t%@,%@,%%got_ofst(%0)", target
, 0);
13553 mips_multi_add_insn ("ld\t%@,%%got_page(%0)(%+)", target
, 0);
13554 mips_multi_add_insn ("daddiu\t%@,%@,%%got_ofst(%0)", target
, 0);
13558 gp
= pic_offset_table_rtx
;
13559 if (mips_cfun_has_cprestore_slot_p ())
13561 gp
= gen_rtx_REG (Pmode
, AT_REGNUM
);
13562 mips_get_cprestore_base_and_offset (&base
, &offset
, true);
13563 if (!SMALL_OPERAND (offset
))
13565 intop
= GEN_INT (CONST_HIGH_PART (offset
));
13566 mips_multi_add_insn ("lui\t%0,%1", gp
, intop
, 0);
13567 mips_multi_add_insn ("addu\t%0,%0,%1", gp
, base
, 0);
13570 offset
= CONST_LOW_PART (offset
);
13572 intop
= GEN_INT (offset
);
13573 if (ISA_HAS_LOAD_DELAY
)
13574 mips_multi_add_insn ("lw\t%0,%1(%2)%#", gp
, intop
, base
, 0);
13576 mips_multi_add_insn ("lw\t%0,%1(%2)", gp
, intop
, base
, 0);
13578 if (ISA_HAS_LOAD_DELAY
)
13579 mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)%#", target
, gp
, 0);
13581 mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)", target
, gp
, 0);
13582 mips_multi_add_insn ("addiu\t%@,%@,%%lo(%0)", target
, 0);
13587 /* Return the number of instructions needed to load a label into $AT. */
13589 static unsigned int
13590 mips_load_label_num_insns (void)
13592 if (cfun
->machine
->load_label_num_insns
== 0)
13594 mips_process_load_label (pc_rtx
);
13595 cfun
->machine
->load_label_num_insns
= mips_multi_num_insns
;
13597 return cfun
->machine
->load_label_num_insns
;
13600 /* Emit an asm sequence to start a noat block and load the address
13601 of a label into $1. */
13604 mips_output_load_label (rtx target
)
13606 mips_push_asm_switch (&mips_noat
);
13607 if (TARGET_EXPLICIT_RELOCS
)
13609 mips_process_load_label (target
);
13610 mips_multi_write ();
13614 if (Pmode
== DImode
)
13615 output_asm_insn ("dla\t%@,%0", &target
);
13617 output_asm_insn ("la\t%@,%0", &target
);
13621 /* Return the length of INSN. LENGTH is the initial length computed by
13622 attributes in the machine-description file. */
13625 mips_adjust_insn_length (rtx_insn
*insn
, int length
)
13627 /* mips.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length
13628 of a PIC long-branch sequence. Substitute the correct value. */
13629 if (length
== MAX_PIC_BRANCH_LENGTH
13631 && INSN_CODE (insn
) >= 0
13632 && get_attr_type (insn
) == TYPE_BRANCH
)
13634 /* Add the branch-over instruction and its delay slot, if this
13635 is a conditional branch. */
13636 length
= simplejump_p (insn
) ? 0 : 8;
13638 /* Add the size of a load into $AT. */
13639 length
+= BASE_INSN_LENGTH
* mips_load_label_num_insns ();
13641 /* Add the length of an indirect jump, ignoring the delay slot. */
13642 length
+= TARGET_COMPRESSION
? 2 : 4;
13645 /* A unconditional jump has an unfilled delay slot if it is not part
13646 of a sequence. A conditional jump normally has a delay slot, but
13647 does not on MIPS16. */
13648 if (CALL_P (insn
) || (TARGET_MIPS16
? simplejump_p (insn
) : JUMP_P (insn
)))
13649 length
+= TARGET_MIPS16
? 2 : 4;
13651 /* See how many nops might be needed to avoid hardware hazards. */
13652 if (!cfun
->machine
->ignore_hazard_length_p
13654 && INSN_CODE (insn
) >= 0)
13655 switch (get_attr_hazard (insn
))
13661 case HAZARD_FORBIDDEN_SLOT
:
13662 length
+= NOP_INSN_LENGTH
;
13666 length
+= NOP_INSN_LENGTH
* 2;
13673 /* Return the asm template for a call. OPERANDS are the operands, TARGET_OPNO
13674 is the operand number of the target. SIZE_OPNO is the operand number of
13675 the argument size operand that can optionally hold the call attributes. If
13676 SIZE_OPNO is not -1 and the call is indirect, use the function symbol from
13677 the call attributes to attach a R_MIPS_JALR relocation to the call. LINK_P
13678 indicates whether the jump is a call and needs to set the link register.
13680 When generating GOT code without explicit relocation operators, all calls
13681 should use assembly macros. Otherwise, all indirect calls should use "jr"
13682 or "jalr"; we will arrange to restore $gp afterwards if necessary. Finally,
13683 we can only generate direct calls for -mabicalls by temporarily switching
13686 For microMIPS jal(r), we try to generate jal(r)s when a 16-bit
13687 instruction is in the delay slot of jal(r).
13689 Where compact branches are available, we try to use them if the delay slot
13690 has a NOP (or equivalently delay slots were not enabled for the instruction
13694 mips_output_jump (rtx
*operands
, int target_opno
, int size_opno
, bool link_p
)
13696 static char buffer
[300];
13698 bool reg_p
= REG_P (operands
[target_opno
]);
13700 const char *and_link
= link_p
? "al" : "";
13701 const char *reg
= reg_p
? "r" : "";
13702 const char *compact
= "";
13703 const char *nop
= "%/";
13704 const char *short_delay
= link_p
? "%!" : "";
13705 const char *insn_name
= TARGET_CB_NEVER
|| reg_p
? "j" : "b";
13707 /* Compact branches can only be described when the ISA has support for them
13708 as both the compact formatter '%:' and the delay slot NOP formatter '%/'
13709 work as a mutually exclusive pair. I.e. a NOP is never required if a
13710 compact form is available. */
13711 if (!final_sequence
13712 && (TARGET_CB_MAYBE
13713 || (ISA_HAS_JRC
&& !link_p
&& reg_p
)))
13719 if (TARGET_USE_GOT
&& !TARGET_EXPLICIT_RELOCS
)
13720 sprintf (s
, "%%*%s%s\t%%%d%%/", insn_name
, and_link
, target_opno
);
13723 if (!reg_p
&& TARGET_ABICALLS_PIC2
)
13724 s
+= sprintf (s
, ".option\tpic0\n\t");
13726 if (reg_p
&& mips_get_pic_call_symbol (operands
, size_opno
))
13727 s
+= sprintf (s
, "%%*.reloc\t1f,%s,%%%d\n1:\t",
13728 TARGET_MICROMIPS
? "R_MICROMIPS_JALR" : "R_MIPS_JALR",
13731 s
+= sprintf (s
, "%%*");
13733 s
+= sprintf (s
, "%s%s%s%s%s\t%%%d%s",
13734 insn_name
, and_link
, reg
, compact
, short_delay
,
13737 if (!reg_p
&& TARGET_ABICALLS_PIC2
)
13738 s
+= sprintf (s
, "\n\t.option\tpic2");
13743 /* Return the assembly code for INSN, which has the operands given by
13744 OPERANDS, and which branches to OPERANDS[0] if some condition is true.
13745 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
13746 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
13747 version of BRANCH_IF_TRUE. */
13750 mips_output_conditional_branch (rtx_insn
*insn
, rtx
*operands
,
13751 const char *branch_if_true
,
13752 const char *branch_if_false
)
13754 unsigned int length
;
13757 gcc_assert (LABEL_P (operands
[0]));
13759 length
= get_attr_length (insn
);
13762 /* Just a simple conditional branch. */
13763 mips_branch_likely
= (final_sequence
&& INSN_ANNULLED_BRANCH_P (insn
));
13764 return branch_if_true
;
13767 /* Generate a reversed branch around a direct jump. This fallback does
13768 not use branch-likely instructions. */
13769 mips_branch_likely
= false;
13770 rtx_code_label
*not_taken
= gen_label_rtx ();
13771 taken
= operands
[0];
13773 /* Generate the reversed branch to NOT_TAKEN. */
13774 operands
[0] = not_taken
;
13775 output_asm_insn (branch_if_false
, operands
);
13777 /* If INSN has a delay slot, we must provide delay slots for both the
13778 branch to NOT_TAKEN and the conditional jump. We must also ensure
13779 that INSN's delay slot is executed in the appropriate cases. */
13780 if (final_sequence
)
13782 /* This first delay slot will always be executed, so use INSN's
13783 delay slot if is not annulled. */
13784 if (!INSN_ANNULLED_BRANCH_P (insn
))
13786 final_scan_insn (final_sequence
->insn (1),
13787 asm_out_file
, optimize
, 1, NULL
);
13788 final_sequence
->insn (1)->set_deleted ();
13791 output_asm_insn ("nop", 0);
13792 fprintf (asm_out_file
, "\n");
13795 /* Output the unconditional branch to TAKEN. */
13796 if (TARGET_ABSOLUTE_JUMPS
&& TARGET_CB_MAYBE
)
13798 /* Add a hazard nop. */
13799 if (!final_sequence
)
13801 output_asm_insn ("nop\t\t# hazard nop", 0);
13802 fprintf (asm_out_file
, "\n");
13804 output_asm_insn (MIPS_ABSOLUTE_JUMP ("bc\t%0"), &taken
);
13806 else if (TARGET_ABSOLUTE_JUMPS
)
13807 output_asm_insn (MIPS_ABSOLUTE_JUMP ("j\t%0%/"), &taken
);
13810 mips_output_load_label (taken
);
13811 if (TARGET_CB_MAYBE
)
13812 output_asm_insn ("jrc\t%@%]", 0);
13814 output_asm_insn ("jr\t%@%]%/", 0);
13817 /* Now deal with its delay slot; see above. */
13818 if (final_sequence
)
13820 /* This delay slot will only be executed if the branch is taken.
13821 Use INSN's delay slot if is annulled. */
13822 if (INSN_ANNULLED_BRANCH_P (insn
))
13824 final_scan_insn (final_sequence
->insn (1),
13825 asm_out_file
, optimize
, 1, NULL
);
13826 final_sequence
->insn (1)->set_deleted ();
13828 else if (TARGET_CB_NEVER
)
13829 output_asm_insn ("nop", 0);
13830 fprintf (asm_out_file
, "\n");
13833 /* Output NOT_TAKEN. */
13834 targetm
.asm_out
.internal_label (asm_out_file
, "L",
13835 CODE_LABEL_NUMBER (not_taken
));
13839 /* Return the assembly code for INSN, which branches to OPERANDS[0]
13840 if some equality condition is true. The condition is given by
13841 OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
13842 OPERANDS[1]. OPERANDS[2] is the comparison's first operand;
13843 OPERANDS[3] is the second operand and may be zero or a register. */
13846 mips_output_equal_conditional_branch (rtx_insn
* insn
, rtx
*operands
,
13849 const char *branch
[2];
13850 /* For a simple BNEZ or BEQZ microMIPSr3 branch. */
13851 if (TARGET_MICROMIPS
13852 && mips_isa_rev
<= 5
13853 && operands
[3] == const0_rtx
13854 && get_attr_length (insn
) <= 8)
13856 if (mips_cb
== MIPS_CB_OPTIMAL
)
13858 branch
[!inverted_p
] = "%*b%C1z%:\t%2,%0";
13859 branch
[inverted_p
] = "%*b%N1z%:\t%2,%0";
13863 branch
[!inverted_p
] = "%*b%C1z\t%2,%0%/";
13864 branch
[inverted_p
] = "%*b%N1z\t%2,%0%/";
13867 else if (TARGET_CB_MAYBE
)
13869 if (operands
[3] == const0_rtx
)
13871 branch
[!inverted_p
] = MIPS_BRANCH_C ("b%C1z", "%2,%0");
13872 branch
[inverted_p
] = MIPS_BRANCH_C ("b%N1z", "%2,%0");
13874 else if (REGNO (operands
[2]) != REGNO (operands
[3]))
13876 branch
[!inverted_p
] = MIPS_BRANCH_C ("b%C1", "%2,%3,%0");
13877 branch
[inverted_p
] = MIPS_BRANCH_C ("b%N1", "%2,%3,%0");
13881 /* This case is degenerate. It should not happen, but does. */
13882 if (GET_CODE (operands
[1]) == NE
)
13883 inverted_p
= !inverted_p
;
13885 branch
[!inverted_p
] = MIPS_BRANCH_C ("b", "%0");
13886 branch
[inverted_p
] = "%*\t\t# branch never";
13891 branch
[!inverted_p
] = MIPS_BRANCH ("b%C1", "%2,%z3,%0");
13892 branch
[inverted_p
] = MIPS_BRANCH ("b%N1", "%2,%z3,%0");
13895 return mips_output_conditional_branch (insn
, operands
, branch
[1], branch
[0]);
13898 /* Return the assembly code for INSN, which branches to OPERANDS[0]
13899 if some ordering condition is true. The condition is given by
13900 OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
13901 OPERANDS[1]. OPERANDS[2] is the comparison's first operand;
13902 OPERANDS[3] is the second operand and may be zero or a register. */
13905 mips_output_order_conditional_branch (rtx_insn
*insn
, rtx
*operands
,
13908 const char *branch
[2];
13910 /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true.
13911 Make BRANCH[0] branch on the inverse condition. */
13912 if (operands
[3] != const0_rtx
)
13914 /* Handle degenerate cases that should not, but do, occur. */
13915 if (REGNO (operands
[2]) == REGNO (operands
[3]))
13917 switch (GET_CODE (operands
[1]))
13921 inverted_p
= !inverted_p
;
13922 /* Fall through. */
13925 branch
[!inverted_p
] = MIPS_BRANCH_C ("b", "%0");
13926 branch
[inverted_p
] = "%*\t\t# branch never";
13929 gcc_unreachable ();
13934 branch
[!inverted_p
] = MIPS_BRANCH_C ("b%C1", "%2,%3,%0");
13935 branch
[inverted_p
] = MIPS_BRANCH_C ("b%N1", "%2,%3,%0");
13940 switch (GET_CODE (operands
[1]))
13942 /* These cases are equivalent to comparisons against zero. */
13944 inverted_p
= !inverted_p
;
13945 /* Fall through. */
13947 if (TARGET_CB_MAYBE
)
13949 branch
[!inverted_p
] = MIPS_BRANCH_C ("bnez", "%2,%0");
13950 branch
[inverted_p
] = MIPS_BRANCH_C ("beqz", "%2,%0");
13954 branch
[!inverted_p
] = MIPS_BRANCH ("bne", "%2,%.,%0");
13955 branch
[inverted_p
] = MIPS_BRANCH ("beq", "%2,%.,%0");
13959 /* These cases are always true or always false. */
13961 inverted_p
= !inverted_p
;
13962 /* Fall through. */
13964 if (TARGET_CB_MAYBE
)
13966 branch
[!inverted_p
] = MIPS_BRANCH_C ("b", "%0");
13967 branch
[inverted_p
] = "%*\t\t# branch never";
13971 branch
[!inverted_p
] = MIPS_BRANCH ("beq", "%.,%.,%0");
13972 branch
[inverted_p
] = MIPS_BRANCH ("bne", "%.,%.,%0");
13977 if (TARGET_CB_MAYBE
)
13979 branch
[!inverted_p
] = MIPS_BRANCH_C ("b%C1z", "%2,%0");
13980 branch
[inverted_p
] = MIPS_BRANCH_C ("b%N1z", "%2,%0");
13984 branch
[!inverted_p
] = MIPS_BRANCH ("b%C1z", "%2,%0");
13985 branch
[inverted_p
] = MIPS_BRANCH ("b%N1z", "%2,%0");
13990 return mips_output_conditional_branch (insn
, operands
, branch
[1], branch
[0]);
13993 /* Start a block of code that needs access to the LL, SC and SYNC
13997 mips_start_ll_sc_sync_block (void)
13999 if (!ISA_HAS_LL_SC
)
14001 output_asm_insn (".set\tpush", 0);
14003 output_asm_insn (".set\tmips3", 0);
14005 output_asm_insn (".set\tmips2", 0);
14009 /* End a block started by mips_start_ll_sc_sync_block. */
14012 mips_end_ll_sc_sync_block (void)
14014 if (!ISA_HAS_LL_SC
)
14015 output_asm_insn (".set\tpop", 0);
14018 /* Output and/or return the asm template for a sync instruction. */
14021 mips_output_sync (void)
14023 mips_start_ll_sc_sync_block ();
14024 output_asm_insn ("sync", 0);
14025 mips_end_ll_sc_sync_block ();
14029 /* Return the asm template associated with sync_insn1 value TYPE.
14030 IS_64BIT_P is true if we want a 64-bit rather than 32-bit operation. */
14032 static const char *
14033 mips_sync_insn1_template (enum attr_sync_insn1 type
, bool is_64bit_p
)
14037 case SYNC_INSN1_MOVE
:
14038 return "move\t%0,%z2";
14039 case SYNC_INSN1_LI
:
14040 return "li\t%0,%2";
14041 case SYNC_INSN1_ADDU
:
14042 return is_64bit_p
? "daddu\t%0,%1,%z2" : "addu\t%0,%1,%z2";
14043 case SYNC_INSN1_ADDIU
:
14044 return is_64bit_p
? "daddiu\t%0,%1,%2" : "addiu\t%0,%1,%2";
14045 case SYNC_INSN1_SUBU
:
14046 return is_64bit_p
? "dsubu\t%0,%1,%z2" : "subu\t%0,%1,%z2";
14047 case SYNC_INSN1_AND
:
14048 return "and\t%0,%1,%z2";
14049 case SYNC_INSN1_ANDI
:
14050 return "andi\t%0,%1,%2";
14051 case SYNC_INSN1_OR
:
14052 return "or\t%0,%1,%z2";
14053 case SYNC_INSN1_ORI
:
14054 return "ori\t%0,%1,%2";
14055 case SYNC_INSN1_XOR
:
14056 return "xor\t%0,%1,%z2";
14057 case SYNC_INSN1_XORI
:
14058 return "xori\t%0,%1,%2";
14060 gcc_unreachable ();
14063 /* Return the asm template associated with sync_insn2 value TYPE. */
14065 static const char *
14066 mips_sync_insn2_template (enum attr_sync_insn2 type
)
14070 case SYNC_INSN2_NOP
:
14071 gcc_unreachable ();
14072 case SYNC_INSN2_AND
:
14073 return "and\t%0,%1,%z2";
14074 case SYNC_INSN2_XOR
:
14075 return "xor\t%0,%1,%z2";
14076 case SYNC_INSN2_NOT
:
14077 return "nor\t%0,%1,%.";
14079 gcc_unreachable ();
14082 /* OPERANDS are the operands to a sync loop instruction and INDEX is
14083 the value of the one of the sync_* attributes. Return the operand
14084 referred to by the attribute, or DEFAULT_VALUE if the insn doesn't
14085 have the associated attribute. */
14088 mips_get_sync_operand (rtx
*operands
, int index
, rtx default_value
)
14091 default_value
= operands
[index
- 1];
14092 return default_value
;
14095 /* INSN is a sync loop with operands OPERANDS. Build up a multi-insn
14096 sequence for it. */
14099 mips_process_sync_loop (rtx_insn
*insn
, rtx
*operands
)
14101 rtx at
, mem
, oldval
, newval
, inclusive_mask
, exclusive_mask
;
14102 rtx required_oldval
, insn1_op2
, tmp1
, tmp2
, tmp3
, cmp
;
14103 unsigned int tmp3_insn
;
14104 enum attr_sync_insn1 insn1
;
14105 enum attr_sync_insn2 insn2
;
14108 enum memmodel model
;
14110 /* Read an operand from the sync_WHAT attribute and store it in
14111 variable WHAT. DEFAULT is the default value if no attribute
14113 #define READ_OPERAND(WHAT, DEFAULT) \
14114 WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
14117 /* Read the memory. */
14118 READ_OPERAND (mem
, 0);
14120 is_64bit_p
= (GET_MODE_BITSIZE (GET_MODE (mem
)) == 64);
14122 /* Read the other attributes. */
14123 at
= gen_rtx_REG (GET_MODE (mem
), AT_REGNUM
);
14124 READ_OPERAND (oldval
, at
);
14125 READ_OPERAND (cmp
, 0);
14126 READ_OPERAND (newval
, at
);
14127 READ_OPERAND (inclusive_mask
, 0);
14128 READ_OPERAND (exclusive_mask
, 0);
14129 READ_OPERAND (required_oldval
, 0);
14130 READ_OPERAND (insn1_op2
, 0);
14131 insn1
= get_attr_sync_insn1 (insn
);
14132 insn2
= get_attr_sync_insn2 (insn
);
14134 /* Don't bother setting CMP result that is never used. */
14135 if (cmp
&& find_reg_note (insn
, REG_UNUSED
, cmp
))
14138 memmodel_attr
= get_attr_sync_memmodel (insn
);
14139 switch (memmodel_attr
)
14142 model
= MEMMODEL_ACQ_REL
;
14145 model
= MEMMODEL_ACQUIRE
;
14148 model
= memmodel_from_int (INTVAL (operands
[memmodel_attr
]));
14151 mips_multi_start ();
14153 /* Output the release side of the memory barrier. */
14154 if (need_atomic_barrier_p (model
, true))
14156 if (required_oldval
== 0 && TARGET_OCTEON
)
14158 /* Octeon doesn't reorder reads, so a full barrier can be
14159 created by using SYNCW to order writes combined with the
14160 write from the following SC. When the SC successfully
14161 completes, we know that all preceding writes are also
14162 committed to the coherent memory system. It is possible
14163 for a single SYNCW to fail, but a pair of them will never
14164 fail, so we use two. */
14165 mips_multi_add_insn ("syncw", NULL
);
14166 mips_multi_add_insn ("syncw", NULL
);
14169 mips_multi_add_insn ("sync", NULL
);
14172 /* Output the branch-back label. */
14173 mips_multi_add_label ("1:");
14175 /* OLDVAL = *MEM. */
14176 mips_multi_add_insn (is_64bit_p
? "lld\t%0,%1" : "ll\t%0,%1",
14177 oldval
, mem
, NULL
);
14179 /* if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2. */
14180 if (required_oldval
)
14182 if (inclusive_mask
== 0)
14186 gcc_assert (oldval
!= at
);
14187 mips_multi_add_insn ("and\t%0,%1,%2",
14188 at
, oldval
, inclusive_mask
, NULL
);
14191 if (TARGET_CB_NEVER
)
14192 mips_multi_add_insn ("bne\t%0,%z1,2f", tmp1
, required_oldval
, NULL
);
14194 /* CMP = 0 [delay slot]. */
14196 mips_multi_add_insn ("li\t%0,0", cmp
, NULL
);
14198 if (TARGET_CB_MAYBE
&& required_oldval
== const0_rtx
)
14199 mips_multi_add_insn ("bnezc\t%0,2f", tmp1
, NULL
);
14200 else if (TARGET_CB_MAYBE
)
14201 mips_multi_add_insn ("bnec\t%0,%1,2f", tmp1
, required_oldval
, NULL
);
14205 /* $TMP1 = OLDVAL & EXCLUSIVE_MASK. */
14206 if (exclusive_mask
== 0)
14210 gcc_assert (oldval
!= at
);
14211 mips_multi_add_insn ("and\t%0,%1,%z2",
14212 at
, oldval
, exclusive_mask
, NULL
);
14216 /* $TMP2 = INSN1 (OLDVAL, INSN1_OP2).
14218 We can ignore moves if $TMP4 != INSN1_OP2, since we'll still emit
14219 at least one instruction in that case. */
14220 if (insn1
== SYNC_INSN1_MOVE
14221 && (tmp1
!= const0_rtx
|| insn2
!= SYNC_INSN2_NOP
))
14225 mips_multi_add_insn (mips_sync_insn1_template (insn1
, is_64bit_p
),
14226 newval
, oldval
, insn1_op2
, NULL
);
14230 /* $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK). */
14231 if (insn2
== SYNC_INSN2_NOP
)
14235 mips_multi_add_insn (mips_sync_insn2_template (insn2
),
14236 newval
, tmp2
, inclusive_mask
, NULL
);
14239 tmp3_insn
= mips_multi_last_index ();
14241 /* $AT = $TMP1 | $TMP3. */
14242 if (tmp1
== const0_rtx
|| tmp3
== const0_rtx
)
14244 mips_multi_set_operand (tmp3_insn
, 0, at
);
14249 gcc_assert (tmp1
!= tmp3
);
14250 mips_multi_add_insn ("or\t%0,%1,%2", at
, tmp1
, tmp3
, NULL
);
14253 /* if (!commit (*MEM = $AT)) goto 1.
14255 This will sometimes be a delayed branch; see the write code below
14257 mips_multi_add_insn (is_64bit_p
? "scd\t%0,%1" : "sc\t%0,%1", at
, mem
, NULL
);
14259 /* When using branch likely (-mfix-r10000), the delay slot instruction
14260 will be annulled on false. The normal delay slot instructions
14261 calculate the overall result of the atomic operation and must not
14262 be annulled. To ensure this behavior unconditionally use a NOP
14263 in the delay slot for the branch likely case. */
14265 if (TARGET_CB_MAYBE
)
14266 mips_multi_add_insn ("beqzc\t%0,1b", at
, NULL
);
14268 mips_multi_add_insn ("beq%?\t%0,%.,1b%~", at
, NULL
);
14270 /* if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot]. */
14271 if (insn1
!= SYNC_INSN1_MOVE
&& insn1
!= SYNC_INSN1_LI
&& tmp3
!= newval
)
14273 mips_multi_copy_insn (tmp3_insn
);
14274 mips_multi_set_operand (mips_multi_last_index (), 0, newval
);
14276 else if (!(required_oldval
&& cmp
) && !mips_branch_likely
)
14277 mips_multi_add_insn ("nop", NULL
);
14279 /* CMP = 1 -- either standalone or in a delay slot. */
14280 if (required_oldval
&& cmp
)
14281 mips_multi_add_insn ("li\t%0,1", cmp
, NULL
);
14283 /* Output the acquire side of the memory barrier. */
14284 if (TARGET_SYNC_AFTER_SC
&& need_atomic_barrier_p (model
, false))
14285 mips_multi_add_insn ("sync", NULL
);
14287 /* Output the exit label, if needed. */
14288 if (required_oldval
)
14289 mips_multi_add_label ("2:");
14291 #undef READ_OPERAND
14294 /* Output and/or return the asm template for sync loop INSN, which has
14295 the operands given by OPERANDS. */
14298 mips_output_sync_loop (rtx_insn
*insn
, rtx
*operands
)
14300 /* Use branch-likely instructions to work around the LL/SC R10000
14302 mips_branch_likely
= TARGET_FIX_R10000
;
14304 mips_process_sync_loop (insn
, operands
);
14306 mips_push_asm_switch (&mips_noreorder
);
14307 mips_push_asm_switch (&mips_nomacro
);
14308 mips_push_asm_switch (&mips_noat
);
14309 mips_start_ll_sc_sync_block ();
14311 mips_multi_write ();
14313 mips_end_ll_sc_sync_block ();
14314 mips_pop_asm_switch (&mips_noat
);
14315 mips_pop_asm_switch (&mips_nomacro
);
14316 mips_pop_asm_switch (&mips_noreorder
);
14321 /* Return the number of individual instructions in sync loop INSN,
14322 which has the operands given by OPERANDS. */
14325 mips_sync_loop_insns (rtx_insn
*insn
, rtx
*operands
)
14327 /* Use branch-likely instructions to work around the LL/SC R10000
14329 mips_branch_likely
= TARGET_FIX_R10000
;
14330 mips_process_sync_loop (insn
, operands
);
14331 return mips_multi_num_insns
;
14334 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
14335 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
14337 When working around R4000 and R4400 errata, we need to make sure that
14338 the division is not immediately followed by a shift[1][2]. We also
14339 need to stop the division from being put into a branch delay slot[3].
14340 The easiest way to avoid both problems is to add a nop after the
14341 division. When a divide-by-zero check is needed, this nop can be
14342 used to fill the branch delay slot.
14344 [1] If a double-word or a variable shift executes immediately
14345 after starting an integer division, the shift may give an
14346 incorrect result. See quotations of errata #16 and #28 from
14347 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
14348 in mips.md for details.
14350 [2] A similar bug to [1] exists for all revisions of the
14351 R4000 and the R4400 when run in an MC configuration.
14352 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
14354 "19. In this following sequence:
14356 ddiv (or ddivu or div or divu)
14357 dsll32 (or dsrl32, dsra32)
14359 if an MPT stall occurs, while the divide is slipping the cpu
14360 pipeline, then the following double shift would end up with an
14363 Workaround: The compiler needs to avoid generating any
14364 sequence with divide followed by extended double shift."
14366 This erratum is also present in "MIPS R4400MC Errata, Processor
14367 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
14368 & 3.0" as errata #10 and #4, respectively.
14370 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
14371 (also valid for MIPS R4000MC processors):
14373 "52. R4000SC: This bug does not apply for the R4000PC.
14375 There are two flavors of this bug:
14377 1) If the instruction just after divide takes an RF exception
14378 (tlb-refill, tlb-invalid) and gets an instruction cache
14379 miss (both primary and secondary) and the line which is
14380 currently in secondary cache at this index had the first
14381 data word, where the bits 5..2 are set, then R4000 would
14382 get a wrong result for the div.
14387 ------------------- # end-of page. -tlb-refill
14392 ------------------- # end-of page. -tlb-invalid
14395 2) If the divide is in the taken branch delay slot, where the
14396 target takes RF exception and gets an I-cache miss for the
14397 exception vector or where I-cache miss occurs for the
14398 target address, under the above mentioned scenarios, the
14399 div would get wrong results.
14402 j r2 # to next page mapped or unmapped
14403 div r8,r9 # this bug would be there as long
14404 # as there is an ICache miss and
14405 nop # the "data pattern" is present
14408 beq r0, r0, NextPage # to Next page
14412 This bug is present for div, divu, ddiv, and ddivu
14415 Workaround: For item 1), OS could make sure that the next page
14416 after the divide instruction is also mapped. For item 2), the
14417 compiler could make sure that the divide instruction is not in
14418 the branch delay slot."
14420 These processors have PRId values of 0x00004220 and 0x00004300 for
14421 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
14424 mips_output_division (const char *division
, rtx
*operands
)
14429 if (TARGET_FIX_R4000
|| TARGET_FIX_R4400
)
14431 output_asm_insn (s
, operands
);
14434 if (TARGET_CHECK_ZERO_DIV
)
14438 output_asm_insn (s
, operands
);
14439 s
= "bnez\t%2,1f\n\tbreak\t7\n1:";
14441 else if (GENERATE_DIVIDE_TRAPS
)
14443 /* Avoid long replay penalty on load miss by putting the trap before
14446 output_asm_insn ("teq\t%2,%.,7", operands
);
14449 output_asm_insn (s
, operands
);
14450 s
= "teq\t%2,%.,7";
14455 if (flag_delayed_branch
)
14457 output_asm_insn ("%(bne\t%2,%.,1f", operands
);
14458 output_asm_insn (s
, operands
);
14459 s
= "break\t7%)\n1:";
14463 output_asm_insn (s
, operands
);
14464 s
= "bne\t%2,%.,1f\n\tnop\n\tbreak\t7\n1:";
14471 /* Return the assembly code for MSA DIV_{S,U}.DF or MOD_{S,U}.DF instructions,
14472 which has the operands given by OPERANDS. Add in a divide-by-zero check
14476 mips_msa_output_division (const char *division
, rtx
*operands
)
14481 if (TARGET_CHECK_ZERO_DIV
)
14483 output_asm_insn ("%(bnz.%v0\t%w2,1f", operands
);
14484 output_asm_insn (s
, operands
);
14485 s
= "break\t7%)\n1:";
14490 /* Return true if destination of IN_INSN is used as add source in
14491 OUT_INSN. Both IN_INSN and OUT_INSN are of type fmadd. Example:
14492 madd.s dst, x, y, z
14493 madd.s a, dst, b, c */
14496 mips_fmadd_bypass (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
14498 int dst_reg
, src_reg
;
14500 gcc_assert (get_attr_type (in_insn
) == TYPE_FMADD
);
14501 gcc_assert (get_attr_type (out_insn
) == TYPE_FMADD
);
14503 extract_insn (in_insn
);
14504 dst_reg
= REG_P (recog_data
.operand
[0]);
14506 extract_insn (out_insn
);
14507 src_reg
= REG_P (recog_data
.operand
[1]);
14509 if (dst_reg
== src_reg
)
14515 /* Return true if IN_INSN is a multiply-add or multiply-subtract
14516 instruction and if OUT_INSN assigns to the accumulator operand. */
14519 mips_linked_madd_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
14521 enum attr_accum_in accum_in
;
14522 int accum_in_opnum
;
14525 if (recog_memoized (in_insn
) < 0)
14528 accum_in
= get_attr_accum_in (in_insn
);
14529 if (accum_in
== ACCUM_IN_NONE
)
14532 accum_in_opnum
= accum_in
- ACCUM_IN_0
;
14534 extract_insn (in_insn
);
14535 gcc_assert (accum_in_opnum
< recog_data
.n_operands
);
14536 accum_in_op
= recog_data
.operand
[accum_in_opnum
];
14538 return reg_set_p (accum_in_op
, out_insn
);
14541 /* True if the dependency between OUT_INSN and IN_INSN is on the store
14542 data rather than the address. We need this because the cprestore
14543 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
14544 which causes the default routine to abort. We just return false
14548 mips_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
14550 if (GET_CODE (PATTERN (in_insn
)) == UNSPEC_VOLATILE
)
14553 return store_data_bypass_p (out_insn
, in_insn
);
14557 /* Variables and flags used in scheduler hooks when tuning for
14561 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
14564 /* If true, then next ALU1/2 instruction will go to ALU1. */
14567 /* If true, then next FALU1/2 unstruction will go to FALU1. */
14570 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
14571 int alu1_core_unit_code
;
14572 int alu2_core_unit_code
;
14573 int falu1_core_unit_code
;
14574 int falu2_core_unit_code
;
14576 /* True if current cycle has a multi instruction.
14577 This flag is used in mips_ls2_dfa_post_advance_cycle. */
14578 bool cycle_has_multi_p
;
14580 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
14581 These are used in mips_ls2_dfa_post_advance_cycle to initialize
14583 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
14584 instruction to go ALU1. */
14585 rtx_insn
*alu1_turn_enabled_insn
;
14586 rtx_insn
*alu2_turn_enabled_insn
;
14587 rtx_insn
*falu1_turn_enabled_insn
;
14588 rtx_insn
*falu2_turn_enabled_insn
;
14591 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
14592 dependencies have no cost, except on the 20Kc where output-dependence
14593 is treated like input-dependence. */
14596 mips_adjust_cost (rtx_insn
*, int dep_type
, rtx_insn
*, int cost
, unsigned int)
14598 if (dep_type
!= 0 && (dep_type
!= REG_DEP_OUTPUT
|| !TUNE_20KC
))
14603 /* Return the number of instructions that can be issued per cycle. */
14606 mips_issue_rate (void)
14610 case PROCESSOR_74KC
:
14611 case PROCESSOR_74KF2_1
:
14612 case PROCESSOR_74KF1_1
:
14613 case PROCESSOR_74KF3_2
:
14614 /* The 74k is not strictly quad-issue cpu, but can be seen as one
14615 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
14616 but in reality only a maximum of 3 insns can be issued as
14617 floating-point loads and stores also require a slot in the
14619 case PROCESSOR_R10000
:
14620 /* All R10K Processors are quad-issue (being the first MIPS
14621 processors to support this feature). */
14624 case PROCESSOR_20KC
:
14625 case PROCESSOR_R4130
:
14626 case PROCESSOR_R5400
:
14627 case PROCESSOR_R5500
:
14628 case PROCESSOR_R5900
:
14629 case PROCESSOR_R7000
:
14630 case PROCESSOR_R9000
:
14631 case PROCESSOR_OCTEON
:
14632 case PROCESSOR_OCTEON2
:
14633 case PROCESSOR_OCTEON3
:
14634 case PROCESSOR_I6400
:
14635 case PROCESSOR_GS264E
:
14638 case PROCESSOR_SB1
:
14639 case PROCESSOR_SB1A
:
14640 /* This is actually 4, but we get better performance if we claim 3.
14641 This is partly because of unwanted speculative code motion with the
14642 larger number, and partly because in most common cases we can't
14643 reach the theoretical max of 4. */
14646 case PROCESSOR_LOONGSON_2E
:
14647 case PROCESSOR_LOONGSON_2F
:
14648 case PROCESSOR_GS464
:
14649 case PROCESSOR_GS464E
:
14650 case PROCESSOR_P5600
:
14651 case PROCESSOR_P6600
:
14654 case PROCESSOR_XLP
:
14655 return (reload_completed
? 4 : 3);
14662 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
14665 mips_ls2_init_dfa_post_cycle_insn (void)
14668 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
14669 mips_ls2
.alu1_turn_enabled_insn
= get_insns ();
14673 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
14674 mips_ls2
.alu2_turn_enabled_insn
= get_insns ();
14678 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
14679 mips_ls2
.falu1_turn_enabled_insn
= get_insns ();
14683 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
14684 mips_ls2
.falu2_turn_enabled_insn
= get_insns ();
14687 mips_ls2
.alu1_core_unit_code
= get_cpu_unit_code ("ls2_alu1_core");
14688 mips_ls2
.alu2_core_unit_code
= get_cpu_unit_code ("ls2_alu2_core");
14689 mips_ls2
.falu1_core_unit_code
= get_cpu_unit_code ("ls2_falu1_core");
14690 mips_ls2
.falu2_core_unit_code
= get_cpu_unit_code ("ls2_falu2_core");
14693 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
14694 Init data used in mips_dfa_post_advance_cycle. */
14697 mips_init_dfa_post_cycle_insn (void)
14699 if (TUNE_LOONGSON_2EF
)
14700 mips_ls2_init_dfa_post_cycle_insn ();
14703 /* Initialize STATE when scheduling for Loongson 2E/2F.
14704 Support round-robin dispatch scheme by enabling only one of
14705 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
14709 mips_ls2_dfa_post_advance_cycle (state_t state
)
14711 if (cpu_unit_reservation_p (state
, mips_ls2
.alu1_core_unit_code
))
14713 /* Though there are no non-pipelined ALU1 insns,
14714 we can get an instruction of type 'multi' before reload. */
14715 gcc_assert (mips_ls2
.cycle_has_multi_p
);
14716 mips_ls2
.alu1_turn_p
= false;
14719 mips_ls2
.cycle_has_multi_p
= false;
14721 if (cpu_unit_reservation_p (state
, mips_ls2
.alu2_core_unit_code
))
14722 /* We have a non-pipelined alu instruction in the core,
14723 adjust round-robin counter. */
14724 mips_ls2
.alu1_turn_p
= true;
14726 if (mips_ls2
.alu1_turn_p
)
14728 if (state_transition (state
, mips_ls2
.alu1_turn_enabled_insn
) >= 0)
14729 gcc_unreachable ();
14733 if (state_transition (state
, mips_ls2
.alu2_turn_enabled_insn
) >= 0)
14734 gcc_unreachable ();
14737 if (cpu_unit_reservation_p (state
, mips_ls2
.falu1_core_unit_code
))
14739 /* There are no non-pipelined FALU1 insns. */
14740 gcc_unreachable ();
14741 mips_ls2
.falu1_turn_p
= false;
14744 if (cpu_unit_reservation_p (state
, mips_ls2
.falu2_core_unit_code
))
14745 /* We have a non-pipelined falu instruction in the core,
14746 adjust round-robin counter. */
14747 mips_ls2
.falu1_turn_p
= true;
14749 if (mips_ls2
.falu1_turn_p
)
14751 if (state_transition (state
, mips_ls2
.falu1_turn_enabled_insn
) >= 0)
14752 gcc_unreachable ();
14756 if (state_transition (state
, mips_ls2
.falu2_turn_enabled_insn
) >= 0)
14757 gcc_unreachable ();
14761 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
14762 This hook is being called at the start of each cycle. */
14765 mips_dfa_post_advance_cycle (void)
14767 if (TUNE_LOONGSON_2EF
)
14768 mips_ls2_dfa_post_advance_cycle (curr_state
);
14771 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
14772 be as wide as the scheduling freedom in the DFA. */
14775 mips_multipass_dfa_lookahead (void)
14777 /* Can schedule up to 4 of the 6 function units in any one cycle. */
14781 if (TUNE_LOONGSON_2EF
|| TUNE_GS464
|| TUNE_GS464E
)
14784 if (TUNE_OCTEON
|| TUNE_GS264E
)
14787 if (TUNE_P5600
|| TUNE_P6600
|| TUNE_I6400
)
14793 /* Remove the instruction at index LOWER from ready queue READY and
14794 reinsert it in front of the instruction at index HIGHER. LOWER must
14798 mips_promote_ready (rtx_insn
**ready
, int lower
, int higher
)
14800 rtx_insn
*new_head
;
14803 new_head
= ready
[lower
];
14804 for (i
= lower
; i
< higher
; i
++)
14805 ready
[i
] = ready
[i
+ 1];
14806 ready
[i
] = new_head
;
14809 /* If the priority of the instruction at POS2 in the ready queue READY
14810 is within LIMIT units of that of the instruction at POS1, swap the
14811 instructions if POS2 is not already less than POS1. */
14814 mips_maybe_swap_ready (rtx_insn
**ready
, int pos1
, int pos2
, int limit
)
14817 && INSN_PRIORITY (ready
[pos1
]) + limit
>= INSN_PRIORITY (ready
[pos2
]))
14821 temp
= ready
[pos1
];
14822 ready
[pos1
] = ready
[pos2
];
14823 ready
[pos2
] = temp
;
14827 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
14828 that may clobber hi or lo. */
14829 static rtx_insn
*mips_macc_chains_last_hilo
;
14831 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
14832 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
14835 mips_macc_chains_record (rtx_insn
*insn
)
14837 if (get_attr_may_clobber_hilo (insn
))
14838 mips_macc_chains_last_hilo
= insn
;
14841 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
14842 has NREADY elements, looking for a multiply-add or multiply-subtract
14843 instruction that is cumulative with mips_macc_chains_last_hilo.
14844 If there is one, promote it ahead of anything else that might
14845 clobber hi or lo. */
14848 mips_macc_chains_reorder (rtx_insn
**ready
, int nready
)
14852 if (mips_macc_chains_last_hilo
!= 0)
14853 for (i
= nready
- 1; i
>= 0; i
--)
14854 if (mips_linked_madd_p (mips_macc_chains_last_hilo
, ready
[i
]))
14856 for (j
= nready
- 1; j
> i
; j
--)
14857 if (recog_memoized (ready
[j
]) >= 0
14858 && get_attr_may_clobber_hilo (ready
[j
]))
14860 mips_promote_ready (ready
, i
, j
);
14867 /* The last instruction to be scheduled. */
14868 static rtx_insn
*vr4130_last_insn
;
14870 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
14871 points to an rtx that is initially an instruction. Nullify the rtx
14872 if the instruction uses the value of register X. */
14875 vr4130_true_reg_dependence_p_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
,
14880 insn_ptr
= (rtx
*) data
;
14883 && reg_referenced_p (x
, PATTERN (*insn_ptr
)))
14887 /* Return true if there is true register dependence between vr4130_last_insn
14891 vr4130_true_reg_dependence_p (rtx insn
)
14893 note_stores (vr4130_last_insn
, vr4130_true_reg_dependence_p_1
, &insn
);
14897 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
14898 the ready queue and that INSN2 is the instruction after it, return
14899 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
14900 in which INSN1 and INSN2 can probably issue in parallel, but for
14901 which (INSN2, INSN1) should be less sensitive to instruction
14902 alignment than (INSN1, INSN2). See 4130.md for more details. */
14905 vr4130_swap_insns_p (rtx_insn
*insn1
, rtx_insn
*insn2
)
14907 sd_iterator_def sd_it
;
14910 /* Check for the following case:
14912 1) there is some other instruction X with an anti dependence on INSN1;
14913 2) X has a higher priority than INSN2; and
14914 3) X is an arithmetic instruction (and thus has no unit restrictions).
14916 If INSN1 is the last instruction blocking X, it would better to
14917 choose (INSN1, X) over (INSN2, INSN1). */
14918 FOR_EACH_DEP (insn1
, SD_LIST_FORW
, sd_it
, dep
)
14919 if (DEP_TYPE (dep
) == REG_DEP_ANTI
14920 && INSN_PRIORITY (DEP_CON (dep
)) > INSN_PRIORITY (insn2
)
14921 && recog_memoized (DEP_CON (dep
)) >= 0
14922 && get_attr_vr4130_class (DEP_CON (dep
)) == VR4130_CLASS_ALU
)
14925 if (vr4130_last_insn
!= 0
14926 && recog_memoized (insn1
) >= 0
14927 && recog_memoized (insn2
) >= 0)
14929 /* See whether INSN1 and INSN2 use different execution units,
14930 or if they are both ALU-type instructions. If so, they can
14931 probably execute in parallel. */
14932 enum attr_vr4130_class class1
= get_attr_vr4130_class (insn1
);
14933 enum attr_vr4130_class class2
= get_attr_vr4130_class (insn2
);
14934 if (class1
!= class2
|| class1
== VR4130_CLASS_ALU
)
14936 /* If only one of the instructions has a dependence on
14937 vr4130_last_insn, prefer to schedule the other one first. */
14938 bool dep1_p
= vr4130_true_reg_dependence_p (insn1
);
14939 bool dep2_p
= vr4130_true_reg_dependence_p (insn2
);
14940 if (dep1_p
!= dep2_p
)
14943 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
14944 is not an ALU-type instruction and if INSN1 uses the same
14945 execution unit. (Note that if this condition holds, we already
14946 know that INSN2 uses a different execution unit.) */
14947 if (class1
!= VR4130_CLASS_ALU
14948 && recog_memoized (vr4130_last_insn
) >= 0
14949 && class1
== get_attr_vr4130_class (vr4130_last_insn
))
14956 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
14957 queue with at least two instructions. Swap the first two if
14958 vr4130_swap_insns_p says that it could be worthwhile. */
14961 vr4130_reorder (rtx_insn
**ready
, int nready
)
14963 if (vr4130_swap_insns_p (ready
[nready
- 1], ready
[nready
- 2]))
14964 mips_promote_ready (ready
, nready
- 2, nready
- 1);
14967 /* Record whether last 74k AGEN instruction was a load or store. */
14968 static enum attr_type mips_last_74k_agen_insn
= TYPE_UNKNOWN
;
14970 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
14971 resets to TYPE_UNKNOWN state. */
14974 mips_74k_agen_init (rtx_insn
*insn
)
14976 if (!insn
|| CALL_P (insn
) || JUMP_P (insn
))
14977 mips_last_74k_agen_insn
= TYPE_UNKNOWN
;
14980 enum attr_type type
= get_attr_type (insn
);
14981 if (type
== TYPE_LOAD
|| type
== TYPE_STORE
)
14982 mips_last_74k_agen_insn
= type
;
14986 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
14987 loads to be grouped together, and multiple stores to be grouped
14988 together. Swap things around in the ready queue to make this happen. */
14991 mips_74k_agen_reorder (rtx_insn
**ready
, int nready
)
14994 int store_pos
, load_pos
;
14999 for (i
= nready
- 1; i
>= 0; i
--)
15001 rtx_insn
*insn
= ready
[i
];
15002 if (USEFUL_INSN_P (insn
))
15003 switch (get_attr_type (insn
))
15006 if (store_pos
== -1)
15011 if (load_pos
== -1)
15020 if (load_pos
== -1 || store_pos
== -1)
15023 switch (mips_last_74k_agen_insn
)
15026 /* Prefer to schedule loads since they have a higher latency. */
15028 /* Swap loads to the front of the queue. */
15029 mips_maybe_swap_ready (ready
, load_pos
, store_pos
, 4);
15032 /* Swap stores to the front of the queue. */
15033 mips_maybe_swap_ready (ready
, store_pos
, load_pos
, 4);
15040 /* Implement TARGET_SCHED_INIT. */
15043 mips_sched_init (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
15044 int max_ready ATTRIBUTE_UNUSED
)
15046 mips_macc_chains_last_hilo
= 0;
15047 vr4130_last_insn
= 0;
15048 mips_74k_agen_init (NULL
);
15050 /* When scheduling for Loongson2, branch instructions go to ALU1,
15051 therefore basic block is most likely to start with round-robin counter
15052 pointed to ALU2. */
15053 mips_ls2
.alu1_turn_p
= false;
15054 mips_ls2
.falu1_turn_p
= true;
15057 /* Subroutine used by TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
15060 mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
15061 rtx_insn
**ready
, int *nreadyp
, int cycle ATTRIBUTE_UNUSED
)
15063 if (!reload_completed
15064 && TUNE_MACC_CHAINS
15066 mips_macc_chains_reorder (ready
, *nreadyp
);
15068 if (reload_completed
15070 && !TARGET_VR4130_ALIGN
15072 vr4130_reorder (ready
, *nreadyp
);
15075 mips_74k_agen_reorder (ready
, *nreadyp
);
15078 /* Implement TARGET_SCHED_REORDER. */
15081 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
15082 rtx_insn
**ready
, int *nreadyp
, int cycle ATTRIBUTE_UNUSED
)
15084 mips_sched_reorder_1 (file
, verbose
, ready
, nreadyp
, cycle
);
15085 return mips_issue_rate ();
15088 /* Implement TARGET_SCHED_REORDER2. */
15091 mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
15092 rtx_insn
**ready
, int *nreadyp
, int cycle ATTRIBUTE_UNUSED
)
15094 mips_sched_reorder_1 (file
, verbose
, ready
, nreadyp
, cycle
);
15095 return cached_can_issue_more
;
15098 /* Update round-robin counters for ALU1/2 and FALU1/2. */
15101 mips_ls2_variable_issue (rtx_insn
*insn
)
15103 if (mips_ls2
.alu1_turn_p
)
15105 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.alu1_core_unit_code
))
15106 mips_ls2
.alu1_turn_p
= false;
15110 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.alu2_core_unit_code
))
15111 mips_ls2
.alu1_turn_p
= true;
15114 if (mips_ls2
.falu1_turn_p
)
15116 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.falu1_core_unit_code
))
15117 mips_ls2
.falu1_turn_p
= false;
15121 if (cpu_unit_reservation_p (curr_state
, mips_ls2
.falu2_core_unit_code
))
15122 mips_ls2
.falu1_turn_p
= true;
15125 if (recog_memoized (insn
) >= 0)
15126 mips_ls2
.cycle_has_multi_p
|= (get_attr_type (insn
) == TYPE_MULTI
);
15129 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
15132 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
15133 rtx_insn
*insn
, int more
)
15135 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
15136 if (USEFUL_INSN_P (insn
))
15138 if (get_attr_type (insn
) != TYPE_GHOST
)
15140 if (!reload_completed
&& TUNE_MACC_CHAINS
)
15141 mips_macc_chains_record (insn
);
15142 vr4130_last_insn
= insn
;
15144 mips_74k_agen_init (insn
);
15145 else if (TUNE_LOONGSON_2EF
)
15146 mips_ls2_variable_issue (insn
);
15149 /* Instructions of type 'multi' should all be split before
15150 the second scheduling pass. */
15151 gcc_assert (!reload_completed
15152 || recog_memoized (insn
) < 0
15153 || get_attr_type (insn
) != TYPE_MULTI
);
15155 cached_can_issue_more
= more
;
15159 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
15160 return the first operand of the associated PREF or PREFX insn. */
15163 mips_prefetch_cookie (rtx write
, rtx locality
)
15165 /* store_streamed / load_streamed. */
15166 if (INTVAL (locality
) <= 0)
15167 return GEN_INT (INTVAL (write
) + 4);
15169 /* store / load. */
15170 if (INTVAL (locality
) <= 2)
15173 /* store_retained / load_retained. */
15174 return GEN_INT (INTVAL (write
) + 6);
15177 /* Loongson EXT2 only implements pref hint=0 (prefetch for load) and hint=1
15178 (prefetch for store), other hint just scale to hint = 0 and hint = 1. */
15181 mips_loongson_ext2_prefetch_cookie (rtx write
, rtx
)
15184 if (INTVAL (write
) == 1)
15185 return GEN_INT (INTVAL (write
));
15188 if (INTVAL (write
) == 0)
15189 return GEN_INT (INTVAL (write
));
15191 gcc_unreachable ();
15195 /* Flags that indicate when a built-in function is available.
15197 BUILTIN_AVAIL_NON_MIPS16
15198 The function is available on the current target if !TARGET_MIPS16.
15200 BUILTIN_AVAIL_MIPS16
15201 The function is available on the current target if TARGET_MIPS16. */
15202 #define BUILTIN_AVAIL_NON_MIPS16 1
15203 #define BUILTIN_AVAIL_MIPS16 2
15205 /* Declare an availability predicate for built-in functions that
15206 require non-MIPS16 mode and also require COND to be true.
15207 NAME is the main part of the predicate's name. */
15208 #define AVAIL_NON_MIPS16(NAME, COND) \
15209 static unsigned int \
15210 mips_builtin_avail_##NAME (void) \
15212 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
15215 /* Declare an availability predicate for built-in functions that
15216 support both MIPS16 and non-MIPS16 code and also require COND
15217 to be true. NAME is the main part of the predicate's name. */
15218 #define AVAIL_ALL(NAME, COND) \
15219 static unsigned int \
15220 mips_builtin_avail_##NAME (void) \
15222 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 | BUILTIN_AVAIL_MIPS16 : 0; \
15225 /* This structure describes a single built-in function. */
15226 struct mips_builtin_description
{
15227 /* The code of the main .md file instruction. See mips_builtin_type
15228 for more information. */
15229 enum insn_code icode
;
15231 /* The floating-point comparison code to use with ICODE, if any. */
15232 enum mips_fp_condition cond
;
15234 /* The name of the built-in function. */
15237 /* Specifies how the function should be expanded. */
15238 enum mips_builtin_type builtin_type
;
15240 /* The function's prototype. */
15241 enum mips_function_type function_type
;
15243 /* Whether the function is available. */
15244 unsigned int (*avail
) (void);
15247 AVAIL_ALL (hard_float
, TARGET_HARD_FLOAT_ABI
)
15248 AVAIL_NON_MIPS16 (paired_single
, TARGET_PAIRED_SINGLE_FLOAT
)
15249 AVAIL_NON_MIPS16 (sb1_paired_single
, TARGET_SB1
&& TARGET_PAIRED_SINGLE_FLOAT
)
15250 AVAIL_NON_MIPS16 (mips3d
, TARGET_MIPS3D
)
15251 AVAIL_NON_MIPS16 (dsp
, TARGET_DSP
)
15252 AVAIL_NON_MIPS16 (dspr2
, TARGET_DSPR2
)
15253 AVAIL_NON_MIPS16 (dsp_32
, !TARGET_64BIT
&& TARGET_DSP
)
15254 AVAIL_NON_MIPS16 (dsp_64
, TARGET_64BIT
&& TARGET_DSP
)
15255 AVAIL_NON_MIPS16 (dspr2_32
, !TARGET_64BIT
&& TARGET_DSPR2
)
15256 AVAIL_NON_MIPS16 (loongson
, TARGET_LOONGSON_MMI
)
15257 AVAIL_NON_MIPS16 (cache
, TARGET_CACHE_BUILTIN
)
15258 AVAIL_NON_MIPS16 (msa
, TARGET_MSA
)
15260 /* Construct a mips_builtin_description from the given arguments.
15262 INSN is the name of the associated instruction pattern, without the
15263 leading CODE_FOR_mips_.
15265 CODE is the floating-point condition code associated with the
15266 function. It can be 'f' if the field is not applicable.
15268 NAME is the name of the function itself, without the leading
15271 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
15273 AVAIL is the name of the availability predicate, without the leading
15274 mips_builtin_avail_. */
15275 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
15276 FUNCTION_TYPE, AVAIL) \
15277 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
15278 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
15279 mips_builtin_avail_ ## AVAIL }
15281 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
15282 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
15283 are as for MIPS_BUILTIN. */
15284 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
15285 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
15287 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
15288 are subject to mips_builtin_avail_<AVAIL>. */
15289 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
15290 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
15291 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
15292 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
15293 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
15295 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
15296 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
15297 while the any and all forms are subject to mips_builtin_avail_mips3d. */
15298 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
15299 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
15300 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
15302 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
15303 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
15305 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
15306 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
15308 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
15309 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
15312 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
15313 are subject to mips_builtin_avail_mips3d. */
15314 #define CMP_4S_BUILTINS(INSN, COND) \
15315 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
15316 MIPS_BUILTIN_CMP_ANY, \
15317 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
15318 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
15319 MIPS_BUILTIN_CMP_ALL, \
15320 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
15322 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
15323 instruction requires mips_builtin_avail_<AVAIL>. */
15324 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
15325 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
15326 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
15328 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
15329 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
15332 /* Define all the built-in functions related to C.cond.fmt condition COND. */
15333 #define CMP_BUILTINS(COND) \
15334 MOVTF_BUILTINS (c, COND, paired_single), \
15335 MOVTF_BUILTINS (cabs, COND, mips3d), \
15336 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
15337 CMP_PS_BUILTINS (c, COND, paired_single), \
15338 CMP_PS_BUILTINS (cabs, COND, mips3d), \
15339 CMP_4S_BUILTINS (c, COND), \
15340 CMP_4S_BUILTINS (cabs, COND)
15342 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
15343 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
15344 and AVAIL are as for MIPS_BUILTIN. */
15345 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
15346 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
15347 FUNCTION_TYPE, AVAIL)
15349 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
15350 branch instruction. AVAIL is as for MIPS_BUILTIN. */
15351 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
15352 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
15353 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
15355 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
15356 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
15357 builtin_description field. */
15358 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
15359 { CODE_FOR_loongson_ ## INSN, MIPS_FP_COND_f, \
15360 "__builtin_loongson_" #FN_NAME, MIPS_BUILTIN_DIRECT, \
15361 FUNCTION_TYPE, mips_builtin_avail_loongson }
15363 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
15364 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
15365 builtin_description field. */
15366 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
15367 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
15369 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
15370 We use functions of this form when the same insn can be usefully applied
15371 to more than one datatype. */
15372 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
15373 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
15375 /* Define an MSA MIPS_BUILTIN_DIRECT function __builtin_msa_<INSN>
15376 for instruction CODE_FOR_msa_<INSN>. FUNCTION_TYPE is a builtin_description
15378 #define MSA_BUILTIN(INSN, FUNCTION_TYPE) \
15379 { CODE_FOR_msa_ ## INSN, MIPS_FP_COND_f, \
15380 "__builtin_msa_" #INSN, MIPS_BUILTIN_DIRECT, \
15381 FUNCTION_TYPE, mips_builtin_avail_msa }
15383 /* Define a remapped MSA MIPS_BUILTIN_DIRECT function __builtin_msa_<INSN>
15384 for instruction CODE_FOR_msa_<INSN2>. FUNCTION_TYPE is
15385 a builtin_description field. */
15386 #define MSA_BUILTIN_REMAP(INSN, INSN2, FUNCTION_TYPE) \
15387 { CODE_FOR_msa_ ## INSN2, MIPS_FP_COND_f, \
15388 "__builtin_msa_" #INSN, MIPS_BUILTIN_DIRECT, \
15389 FUNCTION_TYPE, mips_builtin_avail_msa }
15391 /* Define an MSA MIPS_BUILTIN_MSA_TEST_BRANCH function __builtin_msa_<INSN>
15392 for instruction CODE_FOR_msa_<INSN>. FUNCTION_TYPE is a builtin_description
15394 #define MSA_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \
15395 { CODE_FOR_msa_ ## INSN, MIPS_FP_COND_f, \
15396 "__builtin_msa_" #INSN, MIPS_BUILTIN_MSA_TEST_BRANCH, \
15397 FUNCTION_TYPE, mips_builtin_avail_msa }
15399 /* Define an MSA MIPS_BUILTIN_DIRECT_NO_TARGET function __builtin_msa_<INSN>
15400 for instruction CODE_FOR_msa_<INSN>. FUNCTION_TYPE is a builtin_description
15402 #define MSA_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \
15403 { CODE_FOR_msa_ ## INSN, MIPS_FP_COND_f, \
15404 "__builtin_msa_" #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
15405 FUNCTION_TYPE, mips_builtin_avail_msa }
15407 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
15408 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
15409 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
15410 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
15411 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
15412 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
15413 #define CODE_FOR_mips_mult CODE_FOR_mulsidi3_32bit
15414 #define CODE_FOR_mips_multu CODE_FOR_umulsidi3_32bit
15416 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
15417 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
15418 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
15419 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
15420 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
15421 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
15422 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
15423 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
15424 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
15425 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
15426 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
15427 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
15428 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
15429 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
15430 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
15431 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
15432 #define CODE_FOR_loongson_pmullh CODE_FOR_mulv4hi3
15433 #define CODE_FOR_loongson_psllh CODE_FOR_ashlv4hi3
15434 #define CODE_FOR_loongson_psllw CODE_FOR_ashlv2si3
15435 #define CODE_FOR_loongson_psrlh CODE_FOR_lshrv4hi3
15436 #define CODE_FOR_loongson_psrlw CODE_FOR_lshrv2si3
15437 #define CODE_FOR_loongson_psrah CODE_FOR_ashrv4hi3
15438 #define CODE_FOR_loongson_psraw CODE_FOR_ashrv2si3
15439 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
15440 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
15441 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
15442 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
15443 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
15444 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
15445 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
15447 #define CODE_FOR_msa_adds_s_b CODE_FOR_ssaddv16qi3
15448 #define CODE_FOR_msa_adds_s_h CODE_FOR_ssaddv8hi3
15449 #define CODE_FOR_msa_adds_s_w CODE_FOR_ssaddv4si3
15450 #define CODE_FOR_msa_adds_s_d CODE_FOR_ssaddv2di3
15451 #define CODE_FOR_msa_adds_u_b CODE_FOR_usaddv16qi3
15452 #define CODE_FOR_msa_adds_u_h CODE_FOR_usaddv8hi3
15453 #define CODE_FOR_msa_adds_u_w CODE_FOR_usaddv4si3
15454 #define CODE_FOR_msa_adds_u_d CODE_FOR_usaddv2di3
15455 #define CODE_FOR_msa_addv_b CODE_FOR_addv16qi3
15456 #define CODE_FOR_msa_addv_h CODE_FOR_addv8hi3
15457 #define CODE_FOR_msa_addv_w CODE_FOR_addv4si3
15458 #define CODE_FOR_msa_addv_d CODE_FOR_addv2di3
15459 #define CODE_FOR_msa_addvi_b CODE_FOR_addv16qi3
15460 #define CODE_FOR_msa_addvi_h CODE_FOR_addv8hi3
15461 #define CODE_FOR_msa_addvi_w CODE_FOR_addv4si3
15462 #define CODE_FOR_msa_addvi_d CODE_FOR_addv2di3
15463 #define CODE_FOR_msa_and_v CODE_FOR_andv16qi3
15464 #define CODE_FOR_msa_andi_b CODE_FOR_andv16qi3
15465 #define CODE_FOR_msa_bmnz_v CODE_FOR_msa_bmnz_b
15466 #define CODE_FOR_msa_bmnzi_b CODE_FOR_msa_bmnz_b
15467 #define CODE_FOR_msa_bmz_v CODE_FOR_msa_bmz_b
15468 #define CODE_FOR_msa_bmzi_b CODE_FOR_msa_bmz_b
15469 #define CODE_FOR_msa_bnz_v CODE_FOR_msa_bnz_v_b
15470 #define CODE_FOR_msa_bz_v CODE_FOR_msa_bz_v_b
15471 #define CODE_FOR_msa_bsel_v CODE_FOR_msa_bsel_b
15472 #define CODE_FOR_msa_bseli_b CODE_FOR_msa_bsel_b
15473 #define CODE_FOR_msa_ceqi_b CODE_FOR_msa_ceq_b
15474 #define CODE_FOR_msa_ceqi_h CODE_FOR_msa_ceq_h
15475 #define CODE_FOR_msa_ceqi_w CODE_FOR_msa_ceq_w
15476 #define CODE_FOR_msa_ceqi_d CODE_FOR_msa_ceq_d
15477 #define CODE_FOR_msa_clti_s_b CODE_FOR_msa_clt_s_b
15478 #define CODE_FOR_msa_clti_s_h CODE_FOR_msa_clt_s_h
15479 #define CODE_FOR_msa_clti_s_w CODE_FOR_msa_clt_s_w
15480 #define CODE_FOR_msa_clti_s_d CODE_FOR_msa_clt_s_d
15481 #define CODE_FOR_msa_clti_u_b CODE_FOR_msa_clt_u_b
15482 #define CODE_FOR_msa_clti_u_h CODE_FOR_msa_clt_u_h
15483 #define CODE_FOR_msa_clti_u_w CODE_FOR_msa_clt_u_w
15484 #define CODE_FOR_msa_clti_u_d CODE_FOR_msa_clt_u_d
15485 #define CODE_FOR_msa_clei_s_b CODE_FOR_msa_cle_s_b
15486 #define CODE_FOR_msa_clei_s_h CODE_FOR_msa_cle_s_h
15487 #define CODE_FOR_msa_clei_s_w CODE_FOR_msa_cle_s_w
15488 #define CODE_FOR_msa_clei_s_d CODE_FOR_msa_cle_s_d
15489 #define CODE_FOR_msa_clei_u_b CODE_FOR_msa_cle_u_b
15490 #define CODE_FOR_msa_clei_u_h CODE_FOR_msa_cle_u_h
15491 #define CODE_FOR_msa_clei_u_w CODE_FOR_msa_cle_u_w
15492 #define CODE_FOR_msa_clei_u_d CODE_FOR_msa_cle_u_d
15493 #define CODE_FOR_msa_div_s_b CODE_FOR_divv16qi3
15494 #define CODE_FOR_msa_div_s_h CODE_FOR_divv8hi3
15495 #define CODE_FOR_msa_div_s_w CODE_FOR_divv4si3
15496 #define CODE_FOR_msa_div_s_d CODE_FOR_divv2di3
15497 #define CODE_FOR_msa_div_u_b CODE_FOR_udivv16qi3
15498 #define CODE_FOR_msa_div_u_h CODE_FOR_udivv8hi3
15499 #define CODE_FOR_msa_div_u_w CODE_FOR_udivv4si3
15500 #define CODE_FOR_msa_div_u_d CODE_FOR_udivv2di3
15501 #define CODE_FOR_msa_fadd_w CODE_FOR_addv4sf3
15502 #define CODE_FOR_msa_fadd_d CODE_FOR_addv2df3
15503 #define CODE_FOR_msa_fexdo_w CODE_FOR_vec_pack_trunc_v2df
15504 #define CODE_FOR_msa_ftrunc_s_w CODE_FOR_fix_truncv4sfv4si2
15505 #define CODE_FOR_msa_ftrunc_s_d CODE_FOR_fix_truncv2dfv2di2
15506 #define CODE_FOR_msa_ftrunc_u_w CODE_FOR_fixuns_truncv4sfv4si2
15507 #define CODE_FOR_msa_ftrunc_u_d CODE_FOR_fixuns_truncv2dfv2di2
15508 #define CODE_FOR_msa_ffint_s_w CODE_FOR_floatv4siv4sf2
15509 #define CODE_FOR_msa_ffint_s_d CODE_FOR_floatv2div2df2
15510 #define CODE_FOR_msa_ffint_u_w CODE_FOR_floatunsv4siv4sf2
15511 #define CODE_FOR_msa_ffint_u_d CODE_FOR_floatunsv2div2df2
15512 #define CODE_FOR_msa_fsub_w CODE_FOR_subv4sf3
15513 #define CODE_FOR_msa_fsub_d CODE_FOR_subv2df3
15514 #define CODE_FOR_msa_fmadd_w CODE_FOR_fmav4sf4
15515 #define CODE_FOR_msa_fmadd_d CODE_FOR_fmav2df4
15516 #define CODE_FOR_msa_fmsub_w CODE_FOR_fnmav4sf4
15517 #define CODE_FOR_msa_fmsub_d CODE_FOR_fnmav2df4
15518 #define CODE_FOR_msa_fmul_w CODE_FOR_mulv4sf3
15519 #define CODE_FOR_msa_fmul_d CODE_FOR_mulv2df3
15520 #define CODE_FOR_msa_fdiv_w CODE_FOR_divv4sf3
15521 #define CODE_FOR_msa_fdiv_d CODE_FOR_divv2df3
15522 #define CODE_FOR_msa_fmax_w CODE_FOR_smaxv4sf3
15523 #define CODE_FOR_msa_fmax_d CODE_FOR_smaxv2df3
15524 #define CODE_FOR_msa_fmin_w CODE_FOR_sminv4sf3
15525 #define CODE_FOR_msa_fmin_d CODE_FOR_sminv2df3
15526 #define CODE_FOR_msa_fsqrt_w CODE_FOR_sqrtv4sf2
15527 #define CODE_FOR_msa_fsqrt_d CODE_FOR_sqrtv2df2
15528 #define CODE_FOR_msa_max_s_b CODE_FOR_smaxv16qi3
15529 #define CODE_FOR_msa_max_s_h CODE_FOR_smaxv8hi3
15530 #define CODE_FOR_msa_max_s_w CODE_FOR_smaxv4si3
15531 #define CODE_FOR_msa_max_s_d CODE_FOR_smaxv2di3
15532 #define CODE_FOR_msa_maxi_s_b CODE_FOR_smaxv16qi3
15533 #define CODE_FOR_msa_maxi_s_h CODE_FOR_smaxv8hi3
15534 #define CODE_FOR_msa_maxi_s_w CODE_FOR_smaxv4si3
15535 #define CODE_FOR_msa_maxi_s_d CODE_FOR_smaxv2di3
15536 #define CODE_FOR_msa_max_u_b CODE_FOR_umaxv16qi3
15537 #define CODE_FOR_msa_max_u_h CODE_FOR_umaxv8hi3
15538 #define CODE_FOR_msa_max_u_w CODE_FOR_umaxv4si3
15539 #define CODE_FOR_msa_max_u_d CODE_FOR_umaxv2di3
15540 #define CODE_FOR_msa_maxi_u_b CODE_FOR_umaxv16qi3
15541 #define CODE_FOR_msa_maxi_u_h CODE_FOR_umaxv8hi3
15542 #define CODE_FOR_msa_maxi_u_w CODE_FOR_umaxv4si3
15543 #define CODE_FOR_msa_maxi_u_d CODE_FOR_umaxv2di3
15544 #define CODE_FOR_msa_min_s_b CODE_FOR_sminv16qi3
15545 #define CODE_FOR_msa_min_s_h CODE_FOR_sminv8hi3
15546 #define CODE_FOR_msa_min_s_w CODE_FOR_sminv4si3
15547 #define CODE_FOR_msa_min_s_d CODE_FOR_sminv2di3
15548 #define CODE_FOR_msa_mini_s_b CODE_FOR_sminv16qi3
15549 #define CODE_FOR_msa_mini_s_h CODE_FOR_sminv8hi3
15550 #define CODE_FOR_msa_mini_s_w CODE_FOR_sminv4si3
15551 #define CODE_FOR_msa_mini_s_d CODE_FOR_sminv2di3
15552 #define CODE_FOR_msa_min_u_b CODE_FOR_uminv16qi3
15553 #define CODE_FOR_msa_min_u_h CODE_FOR_uminv8hi3
15554 #define CODE_FOR_msa_min_u_w CODE_FOR_uminv4si3
15555 #define CODE_FOR_msa_min_u_d CODE_FOR_uminv2di3
15556 #define CODE_FOR_msa_mini_u_b CODE_FOR_uminv16qi3
15557 #define CODE_FOR_msa_mini_u_h CODE_FOR_uminv8hi3
15558 #define CODE_FOR_msa_mini_u_w CODE_FOR_uminv4si3
15559 #define CODE_FOR_msa_mini_u_d CODE_FOR_uminv2di3
15560 #define CODE_FOR_msa_mod_s_b CODE_FOR_modv16qi3
15561 #define CODE_FOR_msa_mod_s_h CODE_FOR_modv8hi3
15562 #define CODE_FOR_msa_mod_s_w CODE_FOR_modv4si3
15563 #define CODE_FOR_msa_mod_s_d CODE_FOR_modv2di3
15564 #define CODE_FOR_msa_mod_u_b CODE_FOR_umodv16qi3
15565 #define CODE_FOR_msa_mod_u_h CODE_FOR_umodv8hi3
15566 #define CODE_FOR_msa_mod_u_w CODE_FOR_umodv4si3
15567 #define CODE_FOR_msa_mod_u_d CODE_FOR_umodv2di3
15568 #define CODE_FOR_msa_mod_s_b CODE_FOR_modv16qi3
15569 #define CODE_FOR_msa_mod_s_h CODE_FOR_modv8hi3
15570 #define CODE_FOR_msa_mod_s_w CODE_FOR_modv4si3
15571 #define CODE_FOR_msa_mod_s_d CODE_FOR_modv2di3
15572 #define CODE_FOR_msa_mod_u_b CODE_FOR_umodv16qi3
15573 #define CODE_FOR_msa_mod_u_h CODE_FOR_umodv8hi3
15574 #define CODE_FOR_msa_mod_u_w CODE_FOR_umodv4si3
15575 #define CODE_FOR_msa_mod_u_d CODE_FOR_umodv2di3
15576 #define CODE_FOR_msa_mulv_b CODE_FOR_mulv16qi3
15577 #define CODE_FOR_msa_mulv_h CODE_FOR_mulv8hi3
15578 #define CODE_FOR_msa_mulv_w CODE_FOR_mulv4si3
15579 #define CODE_FOR_msa_mulv_d CODE_FOR_mulv2di3
15580 #define CODE_FOR_msa_nlzc_b CODE_FOR_clzv16qi2
15581 #define CODE_FOR_msa_nlzc_h CODE_FOR_clzv8hi2
15582 #define CODE_FOR_msa_nlzc_w CODE_FOR_clzv4si2
15583 #define CODE_FOR_msa_nlzc_d CODE_FOR_clzv2di2
15584 #define CODE_FOR_msa_nor_v CODE_FOR_msa_nor_b
15585 #define CODE_FOR_msa_or_v CODE_FOR_iorv16qi3
15586 #define CODE_FOR_msa_ori_b CODE_FOR_iorv16qi3
15587 #define CODE_FOR_msa_nori_b CODE_FOR_msa_nor_b
15588 #define CODE_FOR_msa_pcnt_b CODE_FOR_popcountv16qi2
15589 #define CODE_FOR_msa_pcnt_h CODE_FOR_popcountv8hi2
15590 #define CODE_FOR_msa_pcnt_w CODE_FOR_popcountv4si2
15591 #define CODE_FOR_msa_pcnt_d CODE_FOR_popcountv2di2
15592 #define CODE_FOR_msa_xor_v CODE_FOR_xorv16qi3
15593 #define CODE_FOR_msa_xori_b CODE_FOR_xorv16qi3
15594 #define CODE_FOR_msa_sll_b CODE_FOR_vashlv16qi3
15595 #define CODE_FOR_msa_sll_h CODE_FOR_vashlv8hi3
15596 #define CODE_FOR_msa_sll_w CODE_FOR_vashlv4si3
15597 #define CODE_FOR_msa_sll_d CODE_FOR_vashlv2di3
15598 #define CODE_FOR_msa_slli_b CODE_FOR_vashlv16qi3
15599 #define CODE_FOR_msa_slli_h CODE_FOR_vashlv8hi3
15600 #define CODE_FOR_msa_slli_w CODE_FOR_vashlv4si3
15601 #define CODE_FOR_msa_slli_d CODE_FOR_vashlv2di3
15602 #define CODE_FOR_msa_sra_b CODE_FOR_vashrv16qi3
15603 #define CODE_FOR_msa_sra_h CODE_FOR_vashrv8hi3
15604 #define CODE_FOR_msa_sra_w CODE_FOR_vashrv4si3
15605 #define CODE_FOR_msa_sra_d CODE_FOR_vashrv2di3
15606 #define CODE_FOR_msa_srai_b CODE_FOR_vashrv16qi3
15607 #define CODE_FOR_msa_srai_h CODE_FOR_vashrv8hi3
15608 #define CODE_FOR_msa_srai_w CODE_FOR_vashrv4si3
15609 #define CODE_FOR_msa_srai_d CODE_FOR_vashrv2di3
15610 #define CODE_FOR_msa_srl_b CODE_FOR_vlshrv16qi3
15611 #define CODE_FOR_msa_srl_h CODE_FOR_vlshrv8hi3
15612 #define CODE_FOR_msa_srl_w CODE_FOR_vlshrv4si3
15613 #define CODE_FOR_msa_srl_d CODE_FOR_vlshrv2di3
15614 #define CODE_FOR_msa_srli_b CODE_FOR_vlshrv16qi3
15615 #define CODE_FOR_msa_srli_h CODE_FOR_vlshrv8hi3
15616 #define CODE_FOR_msa_srli_w CODE_FOR_vlshrv4si3
15617 #define CODE_FOR_msa_srli_d CODE_FOR_vlshrv2di3
15618 #define CODE_FOR_msa_subv_b CODE_FOR_subv16qi3
15619 #define CODE_FOR_msa_subv_h CODE_FOR_subv8hi3
15620 #define CODE_FOR_msa_subv_w CODE_FOR_subv4si3
15621 #define CODE_FOR_msa_subv_d CODE_FOR_subv2di3
15622 #define CODE_FOR_msa_subvi_b CODE_FOR_subv16qi3
15623 #define CODE_FOR_msa_subvi_h CODE_FOR_subv8hi3
15624 #define CODE_FOR_msa_subvi_w CODE_FOR_subv4si3
15625 #define CODE_FOR_msa_subvi_d CODE_FOR_subv2di3
15627 #define CODE_FOR_msa_move_v CODE_FOR_movv16qi
15629 #define CODE_FOR_msa_vshf_b CODE_FOR_vec_permv16qi
15630 #define CODE_FOR_msa_vshf_h CODE_FOR_vec_permv8hi
15631 #define CODE_FOR_msa_vshf_w CODE_FOR_vec_permv4si
15632 #define CODE_FOR_msa_vshf_d CODE_FOR_vec_permv2di
15634 #define CODE_FOR_msa_ilvod_d CODE_FOR_msa_ilvl_d
15635 #define CODE_FOR_msa_ilvev_d CODE_FOR_msa_ilvr_d
15636 #define CODE_FOR_msa_pckod_d CODE_FOR_msa_ilvl_d
15637 #define CODE_FOR_msa_pckev_d CODE_FOR_msa_ilvr_d
15639 #define CODE_FOR_msa_ldi_b CODE_FOR_msa_ldiv16qi
15640 #define CODE_FOR_msa_ldi_h CODE_FOR_msa_ldiv8hi
15641 #define CODE_FOR_msa_ldi_w CODE_FOR_msa_ldiv4si
15642 #define CODE_FOR_msa_ldi_d CODE_FOR_msa_ldiv2di
15644 static const struct mips_builtin_description mips_builtins
[] = {
15645 #define MIPS_GET_FCSR 0
15646 DIRECT_BUILTIN (get_fcsr
, MIPS_USI_FTYPE_VOID
, hard_float
),
15647 #define MIPS_SET_FCSR 1
15648 DIRECT_NO_TARGET_BUILTIN (set_fcsr
, MIPS_VOID_FTYPE_USI
, hard_float
),
15650 DIRECT_BUILTIN (pll_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
15651 DIRECT_BUILTIN (pul_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
15652 DIRECT_BUILTIN (plu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
15653 DIRECT_BUILTIN (puu_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, paired_single
),
15654 DIRECT_BUILTIN (cvt_ps_s
, MIPS_V2SF_FTYPE_SF_SF
, paired_single
),
15655 DIRECT_BUILTIN (cvt_s_pl
, MIPS_SF_FTYPE_V2SF
, paired_single
),
15656 DIRECT_BUILTIN (cvt_s_pu
, MIPS_SF_FTYPE_V2SF
, paired_single
),
15657 DIRECT_BUILTIN (abs_ps
, MIPS_V2SF_FTYPE_V2SF
, paired_single
),
15659 DIRECT_BUILTIN (alnv_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF_INT
, paired_single
),
15660 DIRECT_BUILTIN (addr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
15661 DIRECT_BUILTIN (mulr_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
15662 DIRECT_BUILTIN (cvt_pw_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
15663 DIRECT_BUILTIN (cvt_ps_pw
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
15665 DIRECT_BUILTIN (recip1_s
, MIPS_SF_FTYPE_SF
, mips3d
),
15666 DIRECT_BUILTIN (recip1_d
, MIPS_DF_FTYPE_DF
, mips3d
),
15667 DIRECT_BUILTIN (recip1_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
15668 DIRECT_BUILTIN (recip2_s
, MIPS_SF_FTYPE_SF_SF
, mips3d
),
15669 DIRECT_BUILTIN (recip2_d
, MIPS_DF_FTYPE_DF_DF
, mips3d
),
15670 DIRECT_BUILTIN (recip2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
15672 DIRECT_BUILTIN (rsqrt1_s
, MIPS_SF_FTYPE_SF
, mips3d
),
15673 DIRECT_BUILTIN (rsqrt1_d
, MIPS_DF_FTYPE_DF
, mips3d
),
15674 DIRECT_BUILTIN (rsqrt1_ps
, MIPS_V2SF_FTYPE_V2SF
, mips3d
),
15675 DIRECT_BUILTIN (rsqrt2_s
, MIPS_SF_FTYPE_SF_SF
, mips3d
),
15676 DIRECT_BUILTIN (rsqrt2_d
, MIPS_DF_FTYPE_DF_DF
, mips3d
),
15677 DIRECT_BUILTIN (rsqrt2_ps
, MIPS_V2SF_FTYPE_V2SF_V2SF
, mips3d
),
15679 MIPS_FP_CONDITIONS (CMP_BUILTINS
),
15681 /* Built-in functions for the SB-1 processor. */
15682 DIRECT_BUILTIN (sqrt_ps
, MIPS_V2SF_FTYPE_V2SF
, sb1_paired_single
),
15684 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
15685 DIRECT_BUILTIN (addq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
15686 DIRECT_BUILTIN (addq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
15687 DIRECT_BUILTIN (addq_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
15688 DIRECT_BUILTIN (addu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
15689 DIRECT_BUILTIN (addu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
15690 DIRECT_BUILTIN (subq_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
15691 DIRECT_BUILTIN (subq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
15692 DIRECT_BUILTIN (subq_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
15693 DIRECT_BUILTIN (subu_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
15694 DIRECT_BUILTIN (subu_s_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
15695 DIRECT_BUILTIN (addsc
, MIPS_SI_FTYPE_SI_SI
, dsp
),
15696 DIRECT_BUILTIN (addwc
, MIPS_SI_FTYPE_SI_SI
, dsp
),
15697 DIRECT_BUILTIN (modsub
, MIPS_SI_FTYPE_SI_SI
, dsp
),
15698 DIRECT_BUILTIN (raddu_w_qb
, MIPS_SI_FTYPE_V4QI
, dsp
),
15699 DIRECT_BUILTIN (absq_s_ph
, MIPS_V2HI_FTYPE_V2HI
, dsp
),
15700 DIRECT_BUILTIN (absq_s_w
, MIPS_SI_FTYPE_SI
, dsp
),
15701 DIRECT_BUILTIN (precrq_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dsp
),
15702 DIRECT_BUILTIN (precrq_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, dsp
),
15703 DIRECT_BUILTIN (precrq_rs_ph_w
, MIPS_V2HI_FTYPE_SI_SI
, dsp
),
15704 DIRECT_BUILTIN (precrqu_s_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dsp
),
15705 DIRECT_BUILTIN (preceq_w_phl
, MIPS_SI_FTYPE_V2HI
, dsp
),
15706 DIRECT_BUILTIN (preceq_w_phr
, MIPS_SI_FTYPE_V2HI
, dsp
),
15707 DIRECT_BUILTIN (precequ_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
15708 DIRECT_BUILTIN (precequ_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
15709 DIRECT_BUILTIN (precequ_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
15710 DIRECT_BUILTIN (precequ_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
15711 DIRECT_BUILTIN (preceu_ph_qbl
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
15712 DIRECT_BUILTIN (preceu_ph_qbr
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
15713 DIRECT_BUILTIN (preceu_ph_qbla
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
15714 DIRECT_BUILTIN (preceu_ph_qbra
, MIPS_V2HI_FTYPE_V4QI
, dsp
),
15715 DIRECT_BUILTIN (shll_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dsp
),
15716 DIRECT_BUILTIN (shll_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
15717 DIRECT_BUILTIN (shll_s_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
15718 DIRECT_BUILTIN (shll_s_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
15719 DIRECT_BUILTIN (shrl_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dsp
),
15720 DIRECT_BUILTIN (shra_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
15721 DIRECT_BUILTIN (shra_r_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dsp
),
15722 DIRECT_BUILTIN (shra_r_w
, MIPS_SI_FTYPE_SI_SI
, dsp
),
15723 DIRECT_BUILTIN (muleu_s_ph_qbl
, MIPS_V2HI_FTYPE_V4QI_V2HI
, dsp
),
15724 DIRECT_BUILTIN (muleu_s_ph_qbr
, MIPS_V2HI_FTYPE_V4QI_V2HI
, dsp
),
15725 DIRECT_BUILTIN (mulq_rs_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
15726 DIRECT_BUILTIN (muleq_s_w_phl
, MIPS_SI_FTYPE_V2HI_V2HI
, dsp
),
15727 DIRECT_BUILTIN (muleq_s_w_phr
, MIPS_SI_FTYPE_V2HI_V2HI
, dsp
),
15728 DIRECT_BUILTIN (bitrev
, MIPS_SI_FTYPE_SI
, dsp
),
15729 DIRECT_BUILTIN (insv
, MIPS_SI_FTYPE_SI_SI
, dsp
),
15730 DIRECT_BUILTIN (repl_qb
, MIPS_V4QI_FTYPE_SI
, dsp
),
15731 DIRECT_BUILTIN (repl_ph
, MIPS_V2HI_FTYPE_SI
, dsp
),
15732 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
15733 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
15734 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb
, MIPS_VOID_FTYPE_V4QI_V4QI
, dsp
),
15735 DIRECT_BUILTIN (cmpgu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
15736 DIRECT_BUILTIN (cmpgu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
15737 DIRECT_BUILTIN (cmpgu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dsp
),
15738 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
15739 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
15740 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph
, MIPS_VOID_FTYPE_V2HI_V2HI
, dsp
),
15741 DIRECT_BUILTIN (pick_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dsp
),
15742 DIRECT_BUILTIN (pick_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
15743 DIRECT_BUILTIN (packrl_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dsp
),
15744 DIRECT_NO_TARGET_BUILTIN (wrdsp
, MIPS_VOID_FTYPE_SI_SI
, dsp
),
15745 DIRECT_BUILTIN (rddsp
, MIPS_SI_FTYPE_SI
, dsp
),
15746 DIRECT_BUILTIN (lbux
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
15747 DIRECT_BUILTIN (lhx
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
15748 DIRECT_BUILTIN (lwx
, MIPS_SI_FTYPE_POINTER_SI
, dsp
),
15749 BPOSGE_BUILTIN (32, dsp
),
15751 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
15752 DIRECT_BUILTIN (absq_s_qb
, MIPS_V4QI_FTYPE_V4QI
, dspr2
),
15753 DIRECT_BUILTIN (addu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15754 DIRECT_BUILTIN (addu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15755 DIRECT_BUILTIN (adduh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
15756 DIRECT_BUILTIN (adduh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
15757 DIRECT_BUILTIN (append
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
15758 DIRECT_BUILTIN (balign
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
15759 DIRECT_BUILTIN (cmpgdu_eq_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
15760 DIRECT_BUILTIN (cmpgdu_lt_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
15761 DIRECT_BUILTIN (cmpgdu_le_qb
, MIPS_SI_FTYPE_V4QI_V4QI
, dspr2
),
15762 DIRECT_BUILTIN (mul_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15763 DIRECT_BUILTIN (mul_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15764 DIRECT_BUILTIN (mulq_rs_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
15765 DIRECT_BUILTIN (mulq_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15766 DIRECT_BUILTIN (mulq_s_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
15767 DIRECT_BUILTIN (precr_qb_ph
, MIPS_V4QI_FTYPE_V2HI_V2HI
, dspr2
),
15768 DIRECT_BUILTIN (precr_sra_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, dspr2
),
15769 DIRECT_BUILTIN (precr_sra_r_ph_w
, MIPS_V2HI_FTYPE_SI_SI_SI
, dspr2
),
15770 DIRECT_BUILTIN (prepend
, MIPS_SI_FTYPE_SI_SI_SI
, dspr2
),
15771 DIRECT_BUILTIN (shra_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dspr2
),
15772 DIRECT_BUILTIN (shra_r_qb
, MIPS_V4QI_FTYPE_V4QI_SI
, dspr2
),
15773 DIRECT_BUILTIN (shrl_ph
, MIPS_V2HI_FTYPE_V2HI_SI
, dspr2
),
15774 DIRECT_BUILTIN (subu_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15775 DIRECT_BUILTIN (subu_s_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15776 DIRECT_BUILTIN (subuh_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
15777 DIRECT_BUILTIN (subuh_r_qb
, MIPS_V4QI_FTYPE_V4QI_V4QI
, dspr2
),
15778 DIRECT_BUILTIN (addqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15779 DIRECT_BUILTIN (addqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15780 DIRECT_BUILTIN (addqh_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
15781 DIRECT_BUILTIN (addqh_r_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
15782 DIRECT_BUILTIN (subqh_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15783 DIRECT_BUILTIN (subqh_r_ph
, MIPS_V2HI_FTYPE_V2HI_V2HI
, dspr2
),
15784 DIRECT_BUILTIN (subqh_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
15785 DIRECT_BUILTIN (subqh_r_w
, MIPS_SI_FTYPE_SI_SI
, dspr2
),
15787 /* Built-in functions for the DSP ASE (32-bit only). */
15788 DIRECT_BUILTIN (dpau_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
15789 DIRECT_BUILTIN (dpau_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
15790 DIRECT_BUILTIN (dpsu_h_qbl
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
15791 DIRECT_BUILTIN (dpsu_h_qbr
, MIPS_DI_FTYPE_DI_V4QI_V4QI
, dsp_32
),
15792 DIRECT_BUILTIN (dpaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
15793 DIRECT_BUILTIN (dpsq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
15794 DIRECT_BUILTIN (mulsaq_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
15795 DIRECT_BUILTIN (dpaq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
15796 DIRECT_BUILTIN (dpsq_sa_l_w
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
15797 DIRECT_BUILTIN (maq_s_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
15798 DIRECT_BUILTIN (maq_s_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
15799 DIRECT_BUILTIN (maq_sa_w_phl
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
15800 DIRECT_BUILTIN (maq_sa_w_phr
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dsp_32
),
15801 DIRECT_BUILTIN (extr_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
15802 DIRECT_BUILTIN (extr_r_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
15803 DIRECT_BUILTIN (extr_rs_w
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
15804 DIRECT_BUILTIN (extr_s_h
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
15805 DIRECT_BUILTIN (extp
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
15806 DIRECT_BUILTIN (extpdp
, MIPS_SI_FTYPE_DI_SI
, dsp_32
),
15807 DIRECT_BUILTIN (shilo
, MIPS_DI_FTYPE_DI_SI
, dsp_32
),
15808 DIRECT_BUILTIN (mthlip
, MIPS_DI_FTYPE_DI_SI
, dsp_32
),
15809 DIRECT_BUILTIN (madd
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
15810 DIRECT_BUILTIN (maddu
, MIPS_DI_FTYPE_DI_USI_USI
, dsp_32
),
15811 DIRECT_BUILTIN (msub
, MIPS_DI_FTYPE_DI_SI_SI
, dsp_32
),
15812 DIRECT_BUILTIN (msubu
, MIPS_DI_FTYPE_DI_USI_USI
, dsp_32
),
15813 DIRECT_BUILTIN (mult
, MIPS_DI_FTYPE_SI_SI
, dsp_32
),
15814 DIRECT_BUILTIN (multu
, MIPS_DI_FTYPE_USI_USI
, dsp_32
),
15816 /* Built-in functions for the DSP ASE (64-bit only). */
15817 DIRECT_BUILTIN (ldx
, MIPS_DI_FTYPE_POINTER_SI
, dsp_64
),
15819 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
15820 DIRECT_BUILTIN (dpa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15821 DIRECT_BUILTIN (dps_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15822 DIRECT_BUILTIN (mulsa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15823 DIRECT_BUILTIN (dpax_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15824 DIRECT_BUILTIN (dpsx_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15825 DIRECT_BUILTIN (dpaqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15826 DIRECT_BUILTIN (dpaqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15827 DIRECT_BUILTIN (dpsqx_s_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15828 DIRECT_BUILTIN (dpsqx_sa_w_ph
, MIPS_DI_FTYPE_DI_V2HI_V2HI
, dspr2_32
),
15830 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
15831 LOONGSON_BUILTIN (packsswh
, MIPS_V4HI_FTYPE_V2SI_V2SI
),
15832 LOONGSON_BUILTIN (packsshb
, MIPS_V8QI_FTYPE_V4HI_V4HI
),
15833 LOONGSON_BUILTIN (packushb
, MIPS_UV8QI_FTYPE_UV4HI_UV4HI
),
15834 LOONGSON_BUILTIN_SUFFIX (paddw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
15835 LOONGSON_BUILTIN_SUFFIX (paddh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15836 LOONGSON_BUILTIN_SUFFIX (paddb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15837 LOONGSON_BUILTIN_SUFFIX (paddw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
15838 LOONGSON_BUILTIN_SUFFIX (paddh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15839 LOONGSON_BUILTIN_SUFFIX (paddb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15840 LOONGSON_BUILTIN_SUFFIX (paddd
, u
, MIPS_UDI_FTYPE_UDI_UDI
),
15841 LOONGSON_BUILTIN_SUFFIX (paddd
, s
, MIPS_DI_FTYPE_DI_DI
),
15842 LOONGSON_BUILTIN (paddsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15843 LOONGSON_BUILTIN (paddsb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15844 LOONGSON_BUILTIN (paddush
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15845 LOONGSON_BUILTIN (paddusb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15846 LOONGSON_BUILTIN_ALIAS (pandn_d
, pandn_ud
, MIPS_UDI_FTYPE_UDI_UDI
),
15847 LOONGSON_BUILTIN_ALIAS (pandn_w
, pandn_uw
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
15848 LOONGSON_BUILTIN_ALIAS (pandn_h
, pandn_uh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15849 LOONGSON_BUILTIN_ALIAS (pandn_b
, pandn_ub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15850 LOONGSON_BUILTIN_ALIAS (pandn_d
, pandn_sd
, MIPS_DI_FTYPE_DI_DI
),
15851 LOONGSON_BUILTIN_ALIAS (pandn_w
, pandn_sw
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
15852 LOONGSON_BUILTIN_ALIAS (pandn_h
, pandn_sh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15853 LOONGSON_BUILTIN_ALIAS (pandn_b
, pandn_sb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15854 LOONGSON_BUILTIN (pavgh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15855 LOONGSON_BUILTIN (pavgb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15856 LOONGSON_BUILTIN_SUFFIX (pcmpeqw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
15857 LOONGSON_BUILTIN_SUFFIX (pcmpeqh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15858 LOONGSON_BUILTIN_SUFFIX (pcmpeqb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15859 LOONGSON_BUILTIN_SUFFIX (pcmpeqw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
15860 LOONGSON_BUILTIN_SUFFIX (pcmpeqh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15861 LOONGSON_BUILTIN_SUFFIX (pcmpeqb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15862 LOONGSON_BUILTIN_SUFFIX (pcmpgtw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
15863 LOONGSON_BUILTIN_SUFFIX (pcmpgth
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15864 LOONGSON_BUILTIN_SUFFIX (pcmpgtb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15865 LOONGSON_BUILTIN_SUFFIX (pcmpgtw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
15866 LOONGSON_BUILTIN_SUFFIX (pcmpgth
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15867 LOONGSON_BUILTIN_SUFFIX (pcmpgtb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15868 LOONGSON_BUILTIN_SUFFIX (pextrh
, u
, MIPS_UV4HI_FTYPE_UV4HI_USI
),
15869 LOONGSON_BUILTIN_SUFFIX (pextrh
, s
, MIPS_V4HI_FTYPE_V4HI_USI
),
15870 LOONGSON_BUILTIN_SUFFIX (pinsrh_0
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15871 LOONGSON_BUILTIN_SUFFIX (pinsrh_1
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15872 LOONGSON_BUILTIN_SUFFIX (pinsrh_2
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15873 LOONGSON_BUILTIN_SUFFIX (pinsrh_3
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15874 LOONGSON_BUILTIN_SUFFIX (pinsrh_0
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15875 LOONGSON_BUILTIN_SUFFIX (pinsrh_1
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15876 LOONGSON_BUILTIN_SUFFIX (pinsrh_2
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15877 LOONGSON_BUILTIN_SUFFIX (pinsrh_3
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15878 LOONGSON_BUILTIN (pmaddhw
, MIPS_V2SI_FTYPE_V4HI_V4HI
),
15879 LOONGSON_BUILTIN (pmaxsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15880 LOONGSON_BUILTIN (pmaxub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15881 LOONGSON_BUILTIN (pminsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15882 LOONGSON_BUILTIN (pminub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15883 LOONGSON_BUILTIN_SUFFIX (pmovmskb
, u
, MIPS_UV8QI_FTYPE_UV8QI
),
15884 LOONGSON_BUILTIN_SUFFIX (pmovmskb
, s
, MIPS_V8QI_FTYPE_V8QI
),
15885 LOONGSON_BUILTIN (pmulhuh
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15886 LOONGSON_BUILTIN (pmulhh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15887 LOONGSON_BUILTIN (pmullh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15888 LOONGSON_BUILTIN (pmuluw
, MIPS_UDI_FTYPE_UV2SI_UV2SI
),
15889 LOONGSON_BUILTIN (pasubub
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15890 LOONGSON_BUILTIN (biadd
, MIPS_UV4HI_FTYPE_UV8QI
),
15891 LOONGSON_BUILTIN (psadbh
, MIPS_UV4HI_FTYPE_UV8QI_UV8QI
),
15892 LOONGSON_BUILTIN_SUFFIX (pshufh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
15893 LOONGSON_BUILTIN_SUFFIX (pshufh
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
15894 LOONGSON_BUILTIN_SUFFIX (psllh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
15895 LOONGSON_BUILTIN_SUFFIX (psllh
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
15896 LOONGSON_BUILTIN_SUFFIX (psllw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
15897 LOONGSON_BUILTIN_SUFFIX (psllw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
15898 LOONGSON_BUILTIN_SUFFIX (psrah
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
15899 LOONGSON_BUILTIN_SUFFIX (psrah
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
15900 LOONGSON_BUILTIN_SUFFIX (psraw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
15901 LOONGSON_BUILTIN_SUFFIX (psraw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
15902 LOONGSON_BUILTIN_SUFFIX (psrlh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UQI
),
15903 LOONGSON_BUILTIN_SUFFIX (psrlh
, s
, MIPS_V4HI_FTYPE_V4HI_UQI
),
15904 LOONGSON_BUILTIN_SUFFIX (psrlw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UQI
),
15905 LOONGSON_BUILTIN_SUFFIX (psrlw
, s
, MIPS_V2SI_FTYPE_V2SI_UQI
),
15906 LOONGSON_BUILTIN_SUFFIX (psubw
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
15907 LOONGSON_BUILTIN_SUFFIX (psubh
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15908 LOONGSON_BUILTIN_SUFFIX (psubb
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15909 LOONGSON_BUILTIN_SUFFIX (psubw
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
15910 LOONGSON_BUILTIN_SUFFIX (psubh
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15911 LOONGSON_BUILTIN_SUFFIX (psubb
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15912 LOONGSON_BUILTIN_SUFFIX (psubd
, u
, MIPS_UDI_FTYPE_UDI_UDI
),
15913 LOONGSON_BUILTIN_SUFFIX (psubd
, s
, MIPS_DI_FTYPE_DI_DI
),
15914 LOONGSON_BUILTIN (psubsh
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15915 LOONGSON_BUILTIN (psubsb
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15916 LOONGSON_BUILTIN (psubush
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15917 LOONGSON_BUILTIN (psubusb
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15918 LOONGSON_BUILTIN_SUFFIX (punpckhbh
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15919 LOONGSON_BUILTIN_SUFFIX (punpckhhw
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15920 LOONGSON_BUILTIN_SUFFIX (punpckhwd
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
15921 LOONGSON_BUILTIN_SUFFIX (punpckhbh
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15922 LOONGSON_BUILTIN_SUFFIX (punpckhhw
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15923 LOONGSON_BUILTIN_SUFFIX (punpckhwd
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
15924 LOONGSON_BUILTIN_SUFFIX (punpcklbh
, u
, MIPS_UV8QI_FTYPE_UV8QI_UV8QI
),
15925 LOONGSON_BUILTIN_SUFFIX (punpcklhw
, u
, MIPS_UV4HI_FTYPE_UV4HI_UV4HI
),
15926 LOONGSON_BUILTIN_SUFFIX (punpcklwd
, u
, MIPS_UV2SI_FTYPE_UV2SI_UV2SI
),
15927 LOONGSON_BUILTIN_SUFFIX (punpcklbh
, s
, MIPS_V8QI_FTYPE_V8QI_V8QI
),
15928 LOONGSON_BUILTIN_SUFFIX (punpcklhw
, s
, MIPS_V4HI_FTYPE_V4HI_V4HI
),
15929 LOONGSON_BUILTIN_SUFFIX (punpcklwd
, s
, MIPS_V2SI_FTYPE_V2SI_V2SI
),
15931 /* Sundry other built-in functions. */
15932 DIRECT_NO_TARGET_BUILTIN (cache
, MIPS_VOID_FTYPE_SI_CVPOINTER
, cache
),
15934 /* Built-in functions for MSA. */
15935 MSA_BUILTIN (sll_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
15936 MSA_BUILTIN (sll_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
15937 MSA_BUILTIN (sll_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
15938 MSA_BUILTIN (sll_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
15939 MSA_BUILTIN (slli_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
15940 MSA_BUILTIN (slli_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
15941 MSA_BUILTIN (slli_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
15942 MSA_BUILTIN (slli_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
15943 MSA_BUILTIN (sra_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
15944 MSA_BUILTIN (sra_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
15945 MSA_BUILTIN (sra_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
15946 MSA_BUILTIN (sra_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
15947 MSA_BUILTIN (srai_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
15948 MSA_BUILTIN (srai_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
15949 MSA_BUILTIN (srai_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
15950 MSA_BUILTIN (srai_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
15951 MSA_BUILTIN (srar_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
15952 MSA_BUILTIN (srar_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
15953 MSA_BUILTIN (srar_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
15954 MSA_BUILTIN (srar_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
15955 MSA_BUILTIN (srari_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
15956 MSA_BUILTIN (srari_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
15957 MSA_BUILTIN (srari_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
15958 MSA_BUILTIN (srari_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
15959 MSA_BUILTIN (srl_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
15960 MSA_BUILTIN (srl_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
15961 MSA_BUILTIN (srl_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
15962 MSA_BUILTIN (srl_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
15963 MSA_BUILTIN (srli_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
15964 MSA_BUILTIN (srli_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
15965 MSA_BUILTIN (srli_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
15966 MSA_BUILTIN (srli_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
15967 MSA_BUILTIN (srlr_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
15968 MSA_BUILTIN (srlr_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
15969 MSA_BUILTIN (srlr_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
15970 MSA_BUILTIN (srlr_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
15971 MSA_BUILTIN (srlri_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
15972 MSA_BUILTIN (srlri_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
15973 MSA_BUILTIN (srlri_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
15974 MSA_BUILTIN (srlri_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
15975 MSA_BUILTIN (bclr_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
15976 MSA_BUILTIN (bclr_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
15977 MSA_BUILTIN (bclr_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
15978 MSA_BUILTIN (bclr_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
15979 MSA_BUILTIN (bclri_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
15980 MSA_BUILTIN (bclri_h
, MIPS_UV8HI_FTYPE_UV8HI_UQI
),
15981 MSA_BUILTIN (bclri_w
, MIPS_UV4SI_FTYPE_UV4SI_UQI
),
15982 MSA_BUILTIN (bclri_d
, MIPS_UV2DI_FTYPE_UV2DI_UQI
),
15983 MSA_BUILTIN (bset_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
15984 MSA_BUILTIN (bset_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
15985 MSA_BUILTIN (bset_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
15986 MSA_BUILTIN (bset_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
15987 MSA_BUILTIN (bseti_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
15988 MSA_BUILTIN (bseti_h
, MIPS_UV8HI_FTYPE_UV8HI_UQI
),
15989 MSA_BUILTIN (bseti_w
, MIPS_UV4SI_FTYPE_UV4SI_UQI
),
15990 MSA_BUILTIN (bseti_d
, MIPS_UV2DI_FTYPE_UV2DI_UQI
),
15991 MSA_BUILTIN (bneg_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
15992 MSA_BUILTIN (bneg_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
15993 MSA_BUILTIN (bneg_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
15994 MSA_BUILTIN (bneg_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
15995 MSA_BUILTIN (bnegi_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
15996 MSA_BUILTIN (bnegi_h
, MIPS_UV8HI_FTYPE_UV8HI_UQI
),
15997 MSA_BUILTIN (bnegi_w
, MIPS_UV4SI_FTYPE_UV4SI_UQI
),
15998 MSA_BUILTIN (bnegi_d
, MIPS_UV2DI_FTYPE_UV2DI_UQI
),
15999 MSA_BUILTIN (binsl_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI
),
16000 MSA_BUILTIN (binsl_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UV8HI
),
16001 MSA_BUILTIN (binsl_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UV4SI
),
16002 MSA_BUILTIN (binsl_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI
),
16003 MSA_BUILTIN (binsli_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI
),
16004 MSA_BUILTIN (binsli_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UQI
),
16005 MSA_BUILTIN (binsli_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UQI
),
16006 MSA_BUILTIN (binsli_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UQI
),
16007 MSA_BUILTIN (binsr_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI
),
16008 MSA_BUILTIN (binsr_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UV8HI
),
16009 MSA_BUILTIN (binsr_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UV4SI
),
16010 MSA_BUILTIN (binsr_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI
),
16011 MSA_BUILTIN (binsri_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI
),
16012 MSA_BUILTIN (binsri_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UQI
),
16013 MSA_BUILTIN (binsri_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UQI
),
16014 MSA_BUILTIN (binsri_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UQI
),
16015 MSA_BUILTIN (addv_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16016 MSA_BUILTIN (addv_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16017 MSA_BUILTIN (addv_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16018 MSA_BUILTIN (addv_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16019 MSA_BUILTIN (addvi_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
16020 MSA_BUILTIN (addvi_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
16021 MSA_BUILTIN (addvi_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
16022 MSA_BUILTIN (addvi_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
16023 MSA_BUILTIN (subv_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16024 MSA_BUILTIN (subv_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16025 MSA_BUILTIN (subv_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16026 MSA_BUILTIN (subv_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16027 MSA_BUILTIN (subvi_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
16028 MSA_BUILTIN (subvi_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
16029 MSA_BUILTIN (subvi_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
16030 MSA_BUILTIN (subvi_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
16031 MSA_BUILTIN (max_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16032 MSA_BUILTIN (max_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16033 MSA_BUILTIN (max_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16034 MSA_BUILTIN (max_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16035 MSA_BUILTIN (maxi_s_b
, MIPS_V16QI_FTYPE_V16QI_QI
),
16036 MSA_BUILTIN (maxi_s_h
, MIPS_V8HI_FTYPE_V8HI_QI
),
16037 MSA_BUILTIN (maxi_s_w
, MIPS_V4SI_FTYPE_V4SI_QI
),
16038 MSA_BUILTIN (maxi_s_d
, MIPS_V2DI_FTYPE_V2DI_QI
),
16039 MSA_BUILTIN (max_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16040 MSA_BUILTIN (max_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16041 MSA_BUILTIN (max_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16042 MSA_BUILTIN (max_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16043 MSA_BUILTIN (maxi_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
16044 MSA_BUILTIN (maxi_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UQI
),
16045 MSA_BUILTIN (maxi_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UQI
),
16046 MSA_BUILTIN (maxi_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UQI
),
16047 MSA_BUILTIN (min_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16048 MSA_BUILTIN (min_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16049 MSA_BUILTIN (min_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16050 MSA_BUILTIN (min_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16051 MSA_BUILTIN (mini_s_b
, MIPS_V16QI_FTYPE_V16QI_QI
),
16052 MSA_BUILTIN (mini_s_h
, MIPS_V8HI_FTYPE_V8HI_QI
),
16053 MSA_BUILTIN (mini_s_w
, MIPS_V4SI_FTYPE_V4SI_QI
),
16054 MSA_BUILTIN (mini_s_d
, MIPS_V2DI_FTYPE_V2DI_QI
),
16055 MSA_BUILTIN (min_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16056 MSA_BUILTIN (min_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16057 MSA_BUILTIN (min_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16058 MSA_BUILTIN (min_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16059 MSA_BUILTIN (mini_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
16060 MSA_BUILTIN (mini_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UQI
),
16061 MSA_BUILTIN (mini_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UQI
),
16062 MSA_BUILTIN (mini_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UQI
),
16063 MSA_BUILTIN (max_a_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16064 MSA_BUILTIN (max_a_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16065 MSA_BUILTIN (max_a_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16066 MSA_BUILTIN (max_a_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16067 MSA_BUILTIN (min_a_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16068 MSA_BUILTIN (min_a_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16069 MSA_BUILTIN (min_a_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16070 MSA_BUILTIN (min_a_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16071 MSA_BUILTIN (ceq_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16072 MSA_BUILTIN (ceq_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16073 MSA_BUILTIN (ceq_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16074 MSA_BUILTIN (ceq_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16075 MSA_BUILTIN (ceqi_b
, MIPS_V16QI_FTYPE_V16QI_QI
),
16076 MSA_BUILTIN (ceqi_h
, MIPS_V8HI_FTYPE_V8HI_QI
),
16077 MSA_BUILTIN (ceqi_w
, MIPS_V4SI_FTYPE_V4SI_QI
),
16078 MSA_BUILTIN (ceqi_d
, MIPS_V2DI_FTYPE_V2DI_QI
),
16079 MSA_BUILTIN (clt_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16080 MSA_BUILTIN (clt_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16081 MSA_BUILTIN (clt_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16082 MSA_BUILTIN (clt_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16083 MSA_BUILTIN (clti_s_b
, MIPS_V16QI_FTYPE_V16QI_QI
),
16084 MSA_BUILTIN (clti_s_h
, MIPS_V8HI_FTYPE_V8HI_QI
),
16085 MSA_BUILTIN (clti_s_w
, MIPS_V4SI_FTYPE_V4SI_QI
),
16086 MSA_BUILTIN (clti_s_d
, MIPS_V2DI_FTYPE_V2DI_QI
),
16087 MSA_BUILTIN (clt_u_b
, MIPS_V16QI_FTYPE_UV16QI_UV16QI
),
16088 MSA_BUILTIN (clt_u_h
, MIPS_V8HI_FTYPE_UV8HI_UV8HI
),
16089 MSA_BUILTIN (clt_u_w
, MIPS_V4SI_FTYPE_UV4SI_UV4SI
),
16090 MSA_BUILTIN (clt_u_d
, MIPS_V2DI_FTYPE_UV2DI_UV2DI
),
16091 MSA_BUILTIN (clti_u_b
, MIPS_V16QI_FTYPE_UV16QI_UQI
),
16092 MSA_BUILTIN (clti_u_h
, MIPS_V8HI_FTYPE_UV8HI_UQI
),
16093 MSA_BUILTIN (clti_u_w
, MIPS_V4SI_FTYPE_UV4SI_UQI
),
16094 MSA_BUILTIN (clti_u_d
, MIPS_V2DI_FTYPE_UV2DI_UQI
),
16095 MSA_BUILTIN (cle_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16096 MSA_BUILTIN (cle_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16097 MSA_BUILTIN (cle_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16098 MSA_BUILTIN (cle_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16099 MSA_BUILTIN (clei_s_b
, MIPS_V16QI_FTYPE_V16QI_QI
),
16100 MSA_BUILTIN (clei_s_h
, MIPS_V8HI_FTYPE_V8HI_QI
),
16101 MSA_BUILTIN (clei_s_w
, MIPS_V4SI_FTYPE_V4SI_QI
),
16102 MSA_BUILTIN (clei_s_d
, MIPS_V2DI_FTYPE_V2DI_QI
),
16103 MSA_BUILTIN (cle_u_b
, MIPS_V16QI_FTYPE_UV16QI_UV16QI
),
16104 MSA_BUILTIN (cle_u_h
, MIPS_V8HI_FTYPE_UV8HI_UV8HI
),
16105 MSA_BUILTIN (cle_u_w
, MIPS_V4SI_FTYPE_UV4SI_UV4SI
),
16106 MSA_BUILTIN (cle_u_d
, MIPS_V2DI_FTYPE_UV2DI_UV2DI
),
16107 MSA_BUILTIN (clei_u_b
, MIPS_V16QI_FTYPE_UV16QI_UQI
),
16108 MSA_BUILTIN (clei_u_h
, MIPS_V8HI_FTYPE_UV8HI_UQI
),
16109 MSA_BUILTIN (clei_u_w
, MIPS_V4SI_FTYPE_UV4SI_UQI
),
16110 MSA_BUILTIN (clei_u_d
, MIPS_V2DI_FTYPE_UV2DI_UQI
),
16111 MSA_BUILTIN (ld_b
, MIPS_V16QI_FTYPE_CVPOINTER_SI
),
16112 MSA_BUILTIN (ld_h
, MIPS_V8HI_FTYPE_CVPOINTER_SI
),
16113 MSA_BUILTIN (ld_w
, MIPS_V4SI_FTYPE_CVPOINTER_SI
),
16114 MSA_BUILTIN (ld_d
, MIPS_V2DI_FTYPE_CVPOINTER_SI
),
16115 MSA_NO_TARGET_BUILTIN (st_b
, MIPS_VOID_FTYPE_V16QI_CVPOINTER_SI
),
16116 MSA_NO_TARGET_BUILTIN (st_h
, MIPS_VOID_FTYPE_V8HI_CVPOINTER_SI
),
16117 MSA_NO_TARGET_BUILTIN (st_w
, MIPS_VOID_FTYPE_V4SI_CVPOINTER_SI
),
16118 MSA_NO_TARGET_BUILTIN (st_d
, MIPS_VOID_FTYPE_V2DI_CVPOINTER_SI
),
16119 MSA_BUILTIN (sat_s_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
16120 MSA_BUILTIN (sat_s_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
16121 MSA_BUILTIN (sat_s_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
16122 MSA_BUILTIN (sat_s_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
16123 MSA_BUILTIN (sat_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
16124 MSA_BUILTIN (sat_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UQI
),
16125 MSA_BUILTIN (sat_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UQI
),
16126 MSA_BUILTIN (sat_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UQI
),
16127 MSA_BUILTIN (add_a_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16128 MSA_BUILTIN (add_a_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16129 MSA_BUILTIN (add_a_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16130 MSA_BUILTIN (add_a_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16131 MSA_BUILTIN (adds_a_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16132 MSA_BUILTIN (adds_a_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16133 MSA_BUILTIN (adds_a_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16134 MSA_BUILTIN (adds_a_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16135 MSA_BUILTIN (adds_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16136 MSA_BUILTIN (adds_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16137 MSA_BUILTIN (adds_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16138 MSA_BUILTIN (adds_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16139 MSA_BUILTIN (adds_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16140 MSA_BUILTIN (adds_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16141 MSA_BUILTIN (adds_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16142 MSA_BUILTIN (adds_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16143 MSA_BUILTIN (ave_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16144 MSA_BUILTIN (ave_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16145 MSA_BUILTIN (ave_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16146 MSA_BUILTIN (ave_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16147 MSA_BUILTIN (ave_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16148 MSA_BUILTIN (ave_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16149 MSA_BUILTIN (ave_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16150 MSA_BUILTIN (ave_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16151 MSA_BUILTIN (aver_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16152 MSA_BUILTIN (aver_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16153 MSA_BUILTIN (aver_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16154 MSA_BUILTIN (aver_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16155 MSA_BUILTIN (aver_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16156 MSA_BUILTIN (aver_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16157 MSA_BUILTIN (aver_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16158 MSA_BUILTIN (aver_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16159 MSA_BUILTIN (subs_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16160 MSA_BUILTIN (subs_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16161 MSA_BUILTIN (subs_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16162 MSA_BUILTIN (subs_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16163 MSA_BUILTIN (subs_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16164 MSA_BUILTIN (subs_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16165 MSA_BUILTIN (subs_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16166 MSA_BUILTIN (subs_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16167 MSA_BUILTIN (subsuu_s_b
, MIPS_V16QI_FTYPE_UV16QI_UV16QI
),
16168 MSA_BUILTIN (subsuu_s_h
, MIPS_V8HI_FTYPE_UV8HI_UV8HI
),
16169 MSA_BUILTIN (subsuu_s_w
, MIPS_V4SI_FTYPE_UV4SI_UV4SI
),
16170 MSA_BUILTIN (subsuu_s_d
, MIPS_V2DI_FTYPE_UV2DI_UV2DI
),
16171 MSA_BUILTIN (subsus_u_b
, MIPS_UV16QI_FTYPE_UV16QI_V16QI
),
16172 MSA_BUILTIN (subsus_u_h
, MIPS_UV8HI_FTYPE_UV8HI_V8HI
),
16173 MSA_BUILTIN (subsus_u_w
, MIPS_UV4SI_FTYPE_UV4SI_V4SI
),
16174 MSA_BUILTIN (subsus_u_d
, MIPS_UV2DI_FTYPE_UV2DI_V2DI
),
16175 MSA_BUILTIN (asub_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16176 MSA_BUILTIN (asub_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16177 MSA_BUILTIN (asub_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16178 MSA_BUILTIN (asub_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16179 MSA_BUILTIN (asub_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16180 MSA_BUILTIN (asub_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16181 MSA_BUILTIN (asub_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16182 MSA_BUILTIN (asub_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16183 MSA_BUILTIN (mulv_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16184 MSA_BUILTIN (mulv_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16185 MSA_BUILTIN (mulv_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16186 MSA_BUILTIN (mulv_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16187 MSA_BUILTIN (maddv_b
, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI
),
16188 MSA_BUILTIN (maddv_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI
),
16189 MSA_BUILTIN (maddv_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI
),
16190 MSA_BUILTIN (maddv_d
, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI
),
16191 MSA_BUILTIN (msubv_b
, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI
),
16192 MSA_BUILTIN (msubv_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI
),
16193 MSA_BUILTIN (msubv_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI
),
16194 MSA_BUILTIN (msubv_d
, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI
),
16195 MSA_BUILTIN (div_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16196 MSA_BUILTIN (div_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16197 MSA_BUILTIN (div_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16198 MSA_BUILTIN (div_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16199 MSA_BUILTIN (div_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16200 MSA_BUILTIN (div_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16201 MSA_BUILTIN (div_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16202 MSA_BUILTIN (div_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16203 MSA_BUILTIN (hadd_s_h
, MIPS_V8HI_FTYPE_V16QI_V16QI
),
16204 MSA_BUILTIN (hadd_s_w
, MIPS_V4SI_FTYPE_V8HI_V8HI
),
16205 MSA_BUILTIN (hadd_s_d
, MIPS_V2DI_FTYPE_V4SI_V4SI
),
16206 MSA_BUILTIN (hadd_u_h
, MIPS_UV8HI_FTYPE_UV16QI_UV16QI
),
16207 MSA_BUILTIN (hadd_u_w
, MIPS_UV4SI_FTYPE_UV8HI_UV8HI
),
16208 MSA_BUILTIN (hadd_u_d
, MIPS_UV2DI_FTYPE_UV4SI_UV4SI
),
16209 MSA_BUILTIN (hsub_s_h
, MIPS_V8HI_FTYPE_V16QI_V16QI
),
16210 MSA_BUILTIN (hsub_s_w
, MIPS_V4SI_FTYPE_V8HI_V8HI
),
16211 MSA_BUILTIN (hsub_s_d
, MIPS_V2DI_FTYPE_V4SI_V4SI
),
16212 MSA_BUILTIN (hsub_u_h
, MIPS_V8HI_FTYPE_UV16QI_UV16QI
),
16213 MSA_BUILTIN (hsub_u_w
, MIPS_V4SI_FTYPE_UV8HI_UV8HI
),
16214 MSA_BUILTIN (hsub_u_d
, MIPS_V2DI_FTYPE_UV4SI_UV4SI
),
16215 MSA_BUILTIN (mod_s_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16216 MSA_BUILTIN (mod_s_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16217 MSA_BUILTIN (mod_s_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16218 MSA_BUILTIN (mod_s_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16219 MSA_BUILTIN (mod_u_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16220 MSA_BUILTIN (mod_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV8HI
),
16221 MSA_BUILTIN (mod_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV4SI
),
16222 MSA_BUILTIN (mod_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV2DI
),
16223 MSA_BUILTIN (dotp_s_h
, MIPS_V8HI_FTYPE_V16QI_V16QI
),
16224 MSA_BUILTIN (dotp_s_w
, MIPS_V4SI_FTYPE_V8HI_V8HI
),
16225 MSA_BUILTIN (dotp_s_d
, MIPS_V2DI_FTYPE_V4SI_V4SI
),
16226 MSA_BUILTIN (dotp_u_h
, MIPS_UV8HI_FTYPE_UV16QI_UV16QI
),
16227 MSA_BUILTIN (dotp_u_w
, MIPS_UV4SI_FTYPE_UV8HI_UV8HI
),
16228 MSA_BUILTIN (dotp_u_d
, MIPS_UV2DI_FTYPE_UV4SI_UV4SI
),
16229 MSA_BUILTIN (dpadd_s_h
, MIPS_V8HI_FTYPE_V8HI_V16QI_V16QI
),
16230 MSA_BUILTIN (dpadd_s_w
, MIPS_V4SI_FTYPE_V4SI_V8HI_V8HI
),
16231 MSA_BUILTIN (dpadd_s_d
, MIPS_V2DI_FTYPE_V2DI_V4SI_V4SI
),
16232 MSA_BUILTIN (dpadd_u_h
, MIPS_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI
),
16233 MSA_BUILTIN (dpadd_u_w
, MIPS_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI
),
16234 MSA_BUILTIN (dpadd_u_d
, MIPS_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI
),
16235 MSA_BUILTIN (dpsub_s_h
, MIPS_V8HI_FTYPE_V8HI_V16QI_V16QI
),
16236 MSA_BUILTIN (dpsub_s_w
, MIPS_V4SI_FTYPE_V4SI_V8HI_V8HI
),
16237 MSA_BUILTIN (dpsub_s_d
, MIPS_V2DI_FTYPE_V2DI_V4SI_V4SI
),
16238 MSA_BUILTIN (dpsub_u_h
, MIPS_V8HI_FTYPE_V8HI_UV16QI_UV16QI
),
16239 MSA_BUILTIN (dpsub_u_w
, MIPS_V4SI_FTYPE_V4SI_UV8HI_UV8HI
),
16240 MSA_BUILTIN (dpsub_u_d
, MIPS_V2DI_FTYPE_V2DI_UV4SI_UV4SI
),
16241 MSA_BUILTIN (sld_b
, MIPS_V16QI_FTYPE_V16QI_V16QI_SI
),
16242 MSA_BUILTIN (sld_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_SI
),
16243 MSA_BUILTIN (sld_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_SI
),
16244 MSA_BUILTIN (sld_d
, MIPS_V2DI_FTYPE_V2DI_V2DI_SI
),
16245 MSA_BUILTIN (sldi_b
, MIPS_V16QI_FTYPE_V16QI_V16QI_UQI
),
16246 MSA_BUILTIN (sldi_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_UQI
),
16247 MSA_BUILTIN (sldi_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_UQI
),
16248 MSA_BUILTIN (sldi_d
, MIPS_V2DI_FTYPE_V2DI_V2DI_UQI
),
16249 MSA_BUILTIN (splat_b
, MIPS_V16QI_FTYPE_V16QI_SI
),
16250 MSA_BUILTIN (splat_h
, MIPS_V8HI_FTYPE_V8HI_SI
),
16251 MSA_BUILTIN (splat_w
, MIPS_V4SI_FTYPE_V4SI_SI
),
16252 MSA_BUILTIN (splat_d
, MIPS_V2DI_FTYPE_V2DI_SI
),
16253 MSA_BUILTIN (splati_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
16254 MSA_BUILTIN (splati_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
16255 MSA_BUILTIN (splati_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
16256 MSA_BUILTIN (splati_d
, MIPS_V2DI_FTYPE_V2DI_UQI
),
16257 MSA_BUILTIN (pckev_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16258 MSA_BUILTIN (pckev_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16259 MSA_BUILTIN (pckev_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16260 MSA_BUILTIN (pckev_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16261 MSA_BUILTIN (pckod_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16262 MSA_BUILTIN (pckod_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16263 MSA_BUILTIN (pckod_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16264 MSA_BUILTIN (pckod_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16265 MSA_BUILTIN (ilvl_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16266 MSA_BUILTIN (ilvl_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16267 MSA_BUILTIN (ilvl_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16268 MSA_BUILTIN (ilvl_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16269 MSA_BUILTIN (ilvr_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16270 MSA_BUILTIN (ilvr_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16271 MSA_BUILTIN (ilvr_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16272 MSA_BUILTIN (ilvr_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16273 MSA_BUILTIN (ilvev_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16274 MSA_BUILTIN (ilvev_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16275 MSA_BUILTIN (ilvev_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16276 MSA_BUILTIN (ilvev_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16277 MSA_BUILTIN (ilvod_b
, MIPS_V16QI_FTYPE_V16QI_V16QI
),
16278 MSA_BUILTIN (ilvod_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16279 MSA_BUILTIN (ilvod_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16280 MSA_BUILTIN (ilvod_d
, MIPS_V2DI_FTYPE_V2DI_V2DI
),
16281 MSA_BUILTIN (vshf_b
, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI
),
16282 MSA_BUILTIN (vshf_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI
),
16283 MSA_BUILTIN (vshf_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI
),
16284 MSA_BUILTIN (vshf_d
, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI
),
16285 MSA_BUILTIN (and_v
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16286 MSA_BUILTIN (andi_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
16287 MSA_BUILTIN (or_v
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16288 MSA_BUILTIN (ori_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
16289 MSA_BUILTIN (nor_v
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16290 MSA_BUILTIN (nori_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
16291 MSA_BUILTIN (xor_v
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI
),
16292 MSA_BUILTIN (xori_b
, MIPS_UV16QI_FTYPE_UV16QI_UQI
),
16293 MSA_BUILTIN (bmnz_v
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI
),
16294 MSA_BUILTIN (bmnzi_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI
),
16295 MSA_BUILTIN (bmz_v
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI
),
16296 MSA_BUILTIN (bmzi_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI
),
16297 MSA_BUILTIN (bsel_v
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI
),
16298 MSA_BUILTIN (bseli_b
, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI
),
16299 MSA_BUILTIN (shf_b
, MIPS_V16QI_FTYPE_V16QI_UQI
),
16300 MSA_BUILTIN (shf_h
, MIPS_V8HI_FTYPE_V8HI_UQI
),
16301 MSA_BUILTIN (shf_w
, MIPS_V4SI_FTYPE_V4SI_UQI
),
16302 MSA_BUILTIN_TEST_BRANCH (bnz_v
, MIPS_SI_FTYPE_UV16QI
),
16303 MSA_BUILTIN_TEST_BRANCH (bz_v
, MIPS_SI_FTYPE_UV16QI
),
16304 MSA_BUILTIN (fill_b
, MIPS_V16QI_FTYPE_SI
),
16305 MSA_BUILTIN (fill_h
, MIPS_V8HI_FTYPE_SI
),
16306 MSA_BUILTIN (fill_w
, MIPS_V4SI_FTYPE_SI
),
16307 MSA_BUILTIN (fill_d
, MIPS_V2DI_FTYPE_DI
),
16308 MSA_BUILTIN (pcnt_b
, MIPS_V16QI_FTYPE_V16QI
),
16309 MSA_BUILTIN (pcnt_h
, MIPS_V8HI_FTYPE_V8HI
),
16310 MSA_BUILTIN (pcnt_w
, MIPS_V4SI_FTYPE_V4SI
),
16311 MSA_BUILTIN (pcnt_d
, MIPS_V2DI_FTYPE_V2DI
),
16312 MSA_BUILTIN (nloc_b
, MIPS_V16QI_FTYPE_V16QI
),
16313 MSA_BUILTIN (nloc_h
, MIPS_V8HI_FTYPE_V8HI
),
16314 MSA_BUILTIN (nloc_w
, MIPS_V4SI_FTYPE_V4SI
),
16315 MSA_BUILTIN (nloc_d
, MIPS_V2DI_FTYPE_V2DI
),
16316 MSA_BUILTIN (nlzc_b
, MIPS_V16QI_FTYPE_V16QI
),
16317 MSA_BUILTIN (nlzc_h
, MIPS_V8HI_FTYPE_V8HI
),
16318 MSA_BUILTIN (nlzc_w
, MIPS_V4SI_FTYPE_V4SI
),
16319 MSA_BUILTIN (nlzc_d
, MIPS_V2DI_FTYPE_V2DI
),
16320 MSA_BUILTIN (copy_s_b
, MIPS_SI_FTYPE_V16QI_UQI
),
16321 MSA_BUILTIN (copy_s_h
, MIPS_SI_FTYPE_V8HI_UQI
),
16322 MSA_BUILTIN (copy_s_w
, MIPS_SI_FTYPE_V4SI_UQI
),
16323 MSA_BUILTIN (copy_s_d
, MIPS_DI_FTYPE_V2DI_UQI
),
16324 MSA_BUILTIN (copy_u_b
, MIPS_USI_FTYPE_V16QI_UQI
),
16325 MSA_BUILTIN (copy_u_h
, MIPS_USI_FTYPE_V8HI_UQI
),
16326 MSA_BUILTIN_REMAP (copy_u_w
, copy_s_w
, MIPS_USI_FTYPE_V4SI_UQI
),
16327 MSA_BUILTIN_REMAP (copy_u_d
, copy_s_d
, MIPS_UDI_FTYPE_V2DI_UQI
),
16328 MSA_BUILTIN (insert_b
, MIPS_V16QI_FTYPE_V16QI_UQI_SI
),
16329 MSA_BUILTIN (insert_h
, MIPS_V8HI_FTYPE_V8HI_UQI_SI
),
16330 MSA_BUILTIN (insert_w
, MIPS_V4SI_FTYPE_V4SI_UQI_SI
),
16331 MSA_BUILTIN (insert_d
, MIPS_V2DI_FTYPE_V2DI_UQI_DI
),
16332 MSA_BUILTIN (insve_b
, MIPS_V16QI_FTYPE_V16QI_UQI_V16QI
),
16333 MSA_BUILTIN (insve_h
, MIPS_V8HI_FTYPE_V8HI_UQI_V8HI
),
16334 MSA_BUILTIN (insve_w
, MIPS_V4SI_FTYPE_V4SI_UQI_V4SI
),
16335 MSA_BUILTIN (insve_d
, MIPS_V2DI_FTYPE_V2DI_UQI_V2DI
),
16336 MSA_BUILTIN_TEST_BRANCH (bnz_b
, MIPS_SI_FTYPE_UV16QI
),
16337 MSA_BUILTIN_TEST_BRANCH (bnz_h
, MIPS_SI_FTYPE_UV8HI
),
16338 MSA_BUILTIN_TEST_BRANCH (bnz_w
, MIPS_SI_FTYPE_UV4SI
),
16339 MSA_BUILTIN_TEST_BRANCH (bnz_d
, MIPS_SI_FTYPE_UV2DI
),
16340 MSA_BUILTIN_TEST_BRANCH (bz_b
, MIPS_SI_FTYPE_UV16QI
),
16341 MSA_BUILTIN_TEST_BRANCH (bz_h
, MIPS_SI_FTYPE_UV8HI
),
16342 MSA_BUILTIN_TEST_BRANCH (bz_w
, MIPS_SI_FTYPE_UV4SI
),
16343 MSA_BUILTIN_TEST_BRANCH (bz_d
, MIPS_SI_FTYPE_UV2DI
),
16344 MSA_BUILTIN (ldi_b
, MIPS_V16QI_FTYPE_HI
),
16345 MSA_BUILTIN (ldi_h
, MIPS_V8HI_FTYPE_HI
),
16346 MSA_BUILTIN (ldi_w
, MIPS_V4SI_FTYPE_HI
),
16347 MSA_BUILTIN (ldi_d
, MIPS_V2DI_FTYPE_HI
),
16348 MSA_BUILTIN (fcaf_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16349 MSA_BUILTIN (fcaf_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16350 MSA_BUILTIN (fcor_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16351 MSA_BUILTIN (fcor_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16352 MSA_BUILTIN (fcun_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16353 MSA_BUILTIN (fcun_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16354 MSA_BUILTIN (fcune_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16355 MSA_BUILTIN (fcune_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16356 MSA_BUILTIN (fcueq_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16357 MSA_BUILTIN (fcueq_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16358 MSA_BUILTIN (fceq_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16359 MSA_BUILTIN (fceq_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16360 MSA_BUILTIN (fcne_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16361 MSA_BUILTIN (fcne_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16362 MSA_BUILTIN (fclt_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16363 MSA_BUILTIN (fclt_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16364 MSA_BUILTIN (fcult_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16365 MSA_BUILTIN (fcult_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16366 MSA_BUILTIN (fcle_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16367 MSA_BUILTIN (fcle_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16368 MSA_BUILTIN (fcule_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16369 MSA_BUILTIN (fcule_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16370 MSA_BUILTIN (fsaf_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16371 MSA_BUILTIN (fsaf_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16372 MSA_BUILTIN (fsor_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16373 MSA_BUILTIN (fsor_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16374 MSA_BUILTIN (fsun_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16375 MSA_BUILTIN (fsun_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16376 MSA_BUILTIN (fsune_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16377 MSA_BUILTIN (fsune_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16378 MSA_BUILTIN (fsueq_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16379 MSA_BUILTIN (fsueq_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16380 MSA_BUILTIN (fseq_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16381 MSA_BUILTIN (fseq_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16382 MSA_BUILTIN (fsne_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16383 MSA_BUILTIN (fsne_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16384 MSA_BUILTIN (fslt_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16385 MSA_BUILTIN (fslt_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16386 MSA_BUILTIN (fsult_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16387 MSA_BUILTIN (fsult_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16388 MSA_BUILTIN (fsle_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16389 MSA_BUILTIN (fsle_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16390 MSA_BUILTIN (fsule_w
, MIPS_V4SI_FTYPE_V4SF_V4SF
),
16391 MSA_BUILTIN (fsule_d
, MIPS_V2DI_FTYPE_V2DF_V2DF
),
16392 MSA_BUILTIN (fadd_w
, MIPS_V4SF_FTYPE_V4SF_V4SF
),
16393 MSA_BUILTIN (fadd_d
, MIPS_V2DF_FTYPE_V2DF_V2DF
),
16394 MSA_BUILTIN (fsub_w
, MIPS_V4SF_FTYPE_V4SF_V4SF
),
16395 MSA_BUILTIN (fsub_d
, MIPS_V2DF_FTYPE_V2DF_V2DF
),
16396 MSA_BUILTIN (fmul_w
, MIPS_V4SF_FTYPE_V4SF_V4SF
),
16397 MSA_BUILTIN (fmul_d
, MIPS_V2DF_FTYPE_V2DF_V2DF
),
16398 MSA_BUILTIN (fdiv_w
, MIPS_V4SF_FTYPE_V4SF_V4SF
),
16399 MSA_BUILTIN (fdiv_d
, MIPS_V2DF_FTYPE_V2DF_V2DF
),
16400 MSA_BUILTIN (fmadd_w
, MIPS_V4SF_FTYPE_V4SF_V4SF_V4SF
),
16401 MSA_BUILTIN (fmadd_d
, MIPS_V2DF_FTYPE_V2DF_V2DF_V2DF
),
16402 MSA_BUILTIN (fmsub_w
, MIPS_V4SF_FTYPE_V4SF_V4SF_V4SF
),
16403 MSA_BUILTIN (fmsub_d
, MIPS_V2DF_FTYPE_V2DF_V2DF_V2DF
),
16404 MSA_BUILTIN (fexp2_w
, MIPS_V4SF_FTYPE_V4SF_V4SI
),
16405 MSA_BUILTIN (fexp2_d
, MIPS_V2DF_FTYPE_V2DF_V2DI
),
16406 MSA_BUILTIN (fexdo_h
, MIPS_V8HI_FTYPE_V4SF_V4SF
),
16407 MSA_BUILTIN (fexdo_w
, MIPS_V4SF_FTYPE_V2DF_V2DF
),
16408 MSA_BUILTIN (ftq_h
, MIPS_V8HI_FTYPE_V4SF_V4SF
),
16409 MSA_BUILTIN (ftq_w
, MIPS_V4SI_FTYPE_V2DF_V2DF
),
16410 MSA_BUILTIN (fmin_w
, MIPS_V4SF_FTYPE_V4SF_V4SF
),
16411 MSA_BUILTIN (fmin_d
, MIPS_V2DF_FTYPE_V2DF_V2DF
),
16412 MSA_BUILTIN (fmin_a_w
, MIPS_V4SF_FTYPE_V4SF_V4SF
),
16413 MSA_BUILTIN (fmin_a_d
, MIPS_V2DF_FTYPE_V2DF_V2DF
),
16414 MSA_BUILTIN (fmax_w
, MIPS_V4SF_FTYPE_V4SF_V4SF
),
16415 MSA_BUILTIN (fmax_d
, MIPS_V2DF_FTYPE_V2DF_V2DF
),
16416 MSA_BUILTIN (fmax_a_w
, MIPS_V4SF_FTYPE_V4SF_V4SF
),
16417 MSA_BUILTIN (fmax_a_d
, MIPS_V2DF_FTYPE_V2DF_V2DF
),
16418 MSA_BUILTIN (mul_q_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16419 MSA_BUILTIN (mul_q_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16420 MSA_BUILTIN (mulr_q_h
, MIPS_V8HI_FTYPE_V8HI_V8HI
),
16421 MSA_BUILTIN (mulr_q_w
, MIPS_V4SI_FTYPE_V4SI_V4SI
),
16422 MSA_BUILTIN (madd_q_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI
),
16423 MSA_BUILTIN (madd_q_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI
),
16424 MSA_BUILTIN (maddr_q_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI
),
16425 MSA_BUILTIN (maddr_q_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI
),
16426 MSA_BUILTIN (msub_q_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI
),
16427 MSA_BUILTIN (msub_q_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI
),
16428 MSA_BUILTIN (msubr_q_h
, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI
),
16429 MSA_BUILTIN (msubr_q_w
, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI
),
16430 MSA_BUILTIN (fclass_w
, MIPS_V4SI_FTYPE_V4SF
),
16431 MSA_BUILTIN (fclass_d
, MIPS_V2DI_FTYPE_V2DF
),
16432 MSA_BUILTIN (fsqrt_w
, MIPS_V4SF_FTYPE_V4SF
),
16433 MSA_BUILTIN (fsqrt_d
, MIPS_V2DF_FTYPE_V2DF
),
16434 MSA_BUILTIN (frcp_w
, MIPS_V4SF_FTYPE_V4SF
),
16435 MSA_BUILTIN (frcp_d
, MIPS_V2DF_FTYPE_V2DF
),
16436 MSA_BUILTIN (frint_w
, MIPS_V4SF_FTYPE_V4SF
),
16437 MSA_BUILTIN (frint_d
, MIPS_V2DF_FTYPE_V2DF
),
16438 MSA_BUILTIN (frsqrt_w
, MIPS_V4SF_FTYPE_V4SF
),
16439 MSA_BUILTIN (frsqrt_d
, MIPS_V2DF_FTYPE_V2DF
),
16440 MSA_BUILTIN (flog2_w
, MIPS_V4SF_FTYPE_V4SF
),
16441 MSA_BUILTIN (flog2_d
, MIPS_V2DF_FTYPE_V2DF
),
16442 MSA_BUILTIN (fexupl_w
, MIPS_V4SF_FTYPE_V8HI
),
16443 MSA_BUILTIN (fexupl_d
, MIPS_V2DF_FTYPE_V4SF
),
16444 MSA_BUILTIN (fexupr_w
, MIPS_V4SF_FTYPE_V8HI
),
16445 MSA_BUILTIN (fexupr_d
, MIPS_V2DF_FTYPE_V4SF
),
16446 MSA_BUILTIN (ffql_w
, MIPS_V4SF_FTYPE_V8HI
),
16447 MSA_BUILTIN (ffql_d
, MIPS_V2DF_FTYPE_V4SI
),
16448 MSA_BUILTIN (ffqr_w
, MIPS_V4SF_FTYPE_V8HI
),
16449 MSA_BUILTIN (ffqr_d
, MIPS_V2DF_FTYPE_V4SI
),
16450 MSA_BUILTIN (ftint_s_w
, MIPS_V4SI_FTYPE_V4SF
),
16451 MSA_BUILTIN (ftint_s_d
, MIPS_V2DI_FTYPE_V2DF
),
16452 MSA_BUILTIN (ftint_u_w
, MIPS_UV4SI_FTYPE_V4SF
),
16453 MSA_BUILTIN (ftint_u_d
, MIPS_UV2DI_FTYPE_V2DF
),
16454 MSA_BUILTIN (ftrunc_s_w
, MIPS_V4SI_FTYPE_V4SF
),
16455 MSA_BUILTIN (ftrunc_s_d
, MIPS_V2DI_FTYPE_V2DF
),
16456 MSA_BUILTIN (ftrunc_u_w
, MIPS_UV4SI_FTYPE_V4SF
),
16457 MSA_BUILTIN (ftrunc_u_d
, MIPS_UV2DI_FTYPE_V2DF
),
16458 MSA_BUILTIN (ffint_s_w
, MIPS_V4SF_FTYPE_V4SI
),
16459 MSA_BUILTIN (ffint_s_d
, MIPS_V2DF_FTYPE_V2DI
),
16460 MSA_BUILTIN (ffint_u_w
, MIPS_V4SF_FTYPE_UV4SI
),
16461 MSA_BUILTIN (ffint_u_d
, MIPS_V2DF_FTYPE_UV2DI
),
16462 MSA_NO_TARGET_BUILTIN (ctcmsa
, MIPS_VOID_FTYPE_UQI_SI
),
16463 MSA_BUILTIN (cfcmsa
, MIPS_SI_FTYPE_UQI
),
16464 MSA_BUILTIN (move_v
, MIPS_V16QI_FTYPE_V16QI
),
16467 /* Index I is the function declaration for mips_builtins[I], or null if the
16468 function isn't defined on this target. */
16469 static GTY(()) tree mips_builtin_decls
[ARRAY_SIZE (mips_builtins
)];
16470 /* Get the index I of the function declaration for mips_builtin_decls[I]
16471 using the instruction code or return null if not defined for the target. */
16472 static GTY(()) int mips_get_builtin_decl_index
[NUM_INSN_CODES
];
16474 /* MODE is a vector mode whose elements have type TYPE. Return the type
16475 of the vector itself. */
16478 mips_builtin_vector_type (tree type
, machine_mode mode
)
16480 static tree types
[2 * (int) MAX_MACHINE_MODE
];
16483 mode_index
= (int) mode
;
16485 if (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_UNSIGNED (type
))
16486 mode_index
+= MAX_MACHINE_MODE
;
16488 if (types
[mode_index
] == NULL_TREE
)
16489 types
[mode_index
] = build_vector_type_for_mode (type
, mode
);
16490 return types
[mode_index
];
16493 /* Return a type for 'const volatile void *'. */
16496 mips_build_cvpointer_type (void)
16500 if (cache
== NULL_TREE
)
16501 cache
= build_pointer_type (build_qualified_type
16503 TYPE_QUAL_CONST
| TYPE_QUAL_VOLATILE
));
16507 /* Source-level argument types. */
16508 #define MIPS_ATYPE_VOID void_type_node
16509 #define MIPS_ATYPE_INT integer_type_node
16510 #define MIPS_ATYPE_POINTER ptr_type_node
16511 #define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
16513 /* Standard mode-based argument types. */
16514 #define MIPS_ATYPE_QI intQI_type_node
16515 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
16516 #define MIPS_ATYPE_HI intHI_type_node
16517 #define MIPS_ATYPE_SI intSI_type_node
16518 #define MIPS_ATYPE_USI unsigned_intSI_type_node
16519 #define MIPS_ATYPE_DI intDI_type_node
16520 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
16521 #define MIPS_ATYPE_SF float_type_node
16522 #define MIPS_ATYPE_DF double_type_node
16524 /* Vector argument types. */
16525 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
16526 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
16527 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
16528 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
16529 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
16530 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
16532 #define MIPS_ATYPE_V2DI \
16533 mips_builtin_vector_type (long_long_integer_type_node, V2DImode)
16534 #define MIPS_ATYPE_V4SI mips_builtin_vector_type (intSI_type_node, V4SImode)
16535 #define MIPS_ATYPE_V8HI mips_builtin_vector_type (intHI_type_node, V8HImode)
16536 #define MIPS_ATYPE_V16QI mips_builtin_vector_type (intQI_type_node, V16QImode)
16537 #define MIPS_ATYPE_V2DF mips_builtin_vector_type (double_type_node, V2DFmode)
16538 #define MIPS_ATYPE_V4SF mips_builtin_vector_type (float_type_node, V4SFmode)
16540 #define MIPS_ATYPE_UV2DI \
16541 mips_builtin_vector_type (long_long_unsigned_type_node, V2DImode)
16542 #define MIPS_ATYPE_UV4SI \
16543 mips_builtin_vector_type (unsigned_intSI_type_node, V4SImode)
16544 #define MIPS_ATYPE_UV8HI \
16545 mips_builtin_vector_type (unsigned_intHI_type_node, V8HImode)
16546 #define MIPS_ATYPE_UV16QI \
16547 mips_builtin_vector_type (unsigned_intQI_type_node, V16QImode)
16549 #define MIPS_ATYPE_UV2SI \
16550 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
16551 #define MIPS_ATYPE_UV4HI \
16552 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
16553 #define MIPS_ATYPE_UV8QI \
16554 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
16556 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
16557 their associated MIPS_ATYPEs. */
16558 #define MIPS_FTYPE_ATYPES1(A, B) \
16559 MIPS_ATYPE_##A, MIPS_ATYPE_##B
16561 #define MIPS_FTYPE_ATYPES2(A, B, C) \
16562 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
16564 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
16565 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
16567 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
16568 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
16571 /* Return the function type associated with function prototype TYPE. */
16574 mips_build_function_type (enum mips_function_type type
)
16576 static tree types
[(int) MIPS_MAX_FTYPE_MAX
];
16578 if (types
[(int) type
] == NULL_TREE
)
16581 #define DEF_MIPS_FTYPE(NUM, ARGS) \
16582 case MIPS_FTYPE_NAME##NUM ARGS: \
16583 types[(int) type] \
16584 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
16587 #include "config/mips/mips-ftypes.def"
16588 #undef DEF_MIPS_FTYPE
16590 gcc_unreachable ();
16593 return types
[(int) type
];
16596 /* Implement TARGET_INIT_BUILTINS. */
16599 mips_init_builtins (void)
16601 const struct mips_builtin_description
*d
;
16604 /* Iterate through all of the bdesc arrays, initializing all of the
16605 builtin functions. */
16606 for (i
= 0; i
< ARRAY_SIZE (mips_builtins
); i
++)
16608 d
= &mips_builtins
[i
];
16611 mips_builtin_decls
[i
]
16612 = add_builtin_function (d
->name
,
16613 mips_build_function_type (d
->function_type
),
16614 i
, BUILT_IN_MD
, NULL
, NULL
);
16615 mips_get_builtin_decl_index
[d
->icode
] = i
;
16620 /* Implement TARGET_BUILTIN_DECL. */
16623 mips_builtin_decl (unsigned int code
, bool initialize_p ATTRIBUTE_UNUSED
)
16625 if (code
>= ARRAY_SIZE (mips_builtins
))
16626 return error_mark_node
;
16627 return mips_builtin_decls
[code
];
16630 /* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION. */
16633 mips_builtin_vectorized_function (unsigned int fn
, tree type_out
, tree type_in
)
16635 machine_mode in_mode
, out_mode
;
16638 if (TREE_CODE (type_out
) != VECTOR_TYPE
16639 || TREE_CODE (type_in
) != VECTOR_TYPE
16643 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
16644 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
16645 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
16646 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
16648 /* INSN is the name of the associated instruction pattern, without
16649 the leading CODE_FOR_. */
16650 #define MIPS_GET_BUILTIN(INSN) \
16651 mips_builtin_decls[mips_get_builtin_decl_index[CODE_FOR_##INSN]]
16655 case BUILT_IN_SQRT
:
16656 if (out_mode
== DFmode
&& out_n
== 2
16657 && in_mode
== DFmode
&& in_n
== 2)
16658 return MIPS_GET_BUILTIN (msa_fsqrt_d
);
16660 case BUILT_IN_SQRTF
:
16661 if (out_mode
== SFmode
&& out_n
== 4
16662 && in_mode
== SFmode
&& in_n
== 4)
16663 return MIPS_GET_BUILTIN (msa_fsqrt_w
);
16672 /* Take argument ARGNO from EXP's argument list and convert it into
16673 an expand operand. Store the operand in *OP. */
16676 mips_prepare_builtin_arg (struct expand_operand
*op
, tree exp
,
16677 unsigned int argno
)
16682 arg
= CALL_EXPR_ARG (exp
, argno
);
16683 value
= expand_normal (arg
);
16684 create_input_operand (op
, value
, TYPE_MODE (TREE_TYPE (arg
)));
16687 /* Expand instruction ICODE as part of a built-in function sequence.
16688 Use the first NOPS elements of OPS as the instruction's operands.
16689 HAS_TARGET_P is true if operand 0 is a target; it is false if the
16690 instruction has no target.
16692 Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx. */
16695 mips_expand_builtin_insn (enum insn_code icode
, unsigned int nops
,
16696 struct expand_operand
*ops
, bool has_target_p
)
16698 machine_mode imode
;
16699 int rangelo
= 0, rangehi
= 0, error_opno
= 0;
16704 /* The third operand of these instructions is in SImode, so we need to
16705 bring the corresponding builtin argument from QImode into SImode. */
16706 case CODE_FOR_loongson_pshufh
:
16707 case CODE_FOR_loongson_psllh
:
16708 case CODE_FOR_loongson_psllw
:
16709 case CODE_FOR_loongson_psrah
:
16710 case CODE_FOR_loongson_psraw
:
16711 case CODE_FOR_loongson_psrlh
:
16712 case CODE_FOR_loongson_psrlw
:
16713 gcc_assert (has_target_p
&& nops
== 3 && ops
[2].mode
== QImode
);
16714 sireg
= gen_reg_rtx (SImode
);
16715 emit_insn (gen_zero_extendqisi2 (sireg
,
16716 force_reg (QImode
, ops
[2].value
)));
16717 ops
[2].value
= sireg
;
16718 ops
[2].mode
= SImode
;
16721 case CODE_FOR_msa_addvi_b
:
16722 case CODE_FOR_msa_addvi_h
:
16723 case CODE_FOR_msa_addvi_w
:
16724 case CODE_FOR_msa_addvi_d
:
16725 case CODE_FOR_msa_clti_u_b
:
16726 case CODE_FOR_msa_clti_u_h
:
16727 case CODE_FOR_msa_clti_u_w
:
16728 case CODE_FOR_msa_clti_u_d
:
16729 case CODE_FOR_msa_clei_u_b
:
16730 case CODE_FOR_msa_clei_u_h
:
16731 case CODE_FOR_msa_clei_u_w
:
16732 case CODE_FOR_msa_clei_u_d
:
16733 case CODE_FOR_msa_maxi_u_b
:
16734 case CODE_FOR_msa_maxi_u_h
:
16735 case CODE_FOR_msa_maxi_u_w
:
16736 case CODE_FOR_msa_maxi_u_d
:
16737 case CODE_FOR_msa_mini_u_b
:
16738 case CODE_FOR_msa_mini_u_h
:
16739 case CODE_FOR_msa_mini_u_w
:
16740 case CODE_FOR_msa_mini_u_d
:
16741 case CODE_FOR_msa_subvi_b
:
16742 case CODE_FOR_msa_subvi_h
:
16743 case CODE_FOR_msa_subvi_w
:
16744 case CODE_FOR_msa_subvi_d
:
16745 gcc_assert (has_target_p
&& nops
== 3);
16746 /* We only generate a vector of constants iff the second argument
16747 is an immediate. We also validate the range of the immediate. */
16748 if (CONST_INT_P (ops
[2].value
))
16752 if (IN_RANGE (INTVAL (ops
[2].value
), rangelo
, rangehi
))
16754 ops
[2].mode
= ops
[0].mode
;
16755 ops
[2].value
= mips_gen_const_int_vector (ops
[2].mode
,
16756 INTVAL (ops
[2].value
));
16763 case CODE_FOR_msa_ceqi_b
:
16764 case CODE_FOR_msa_ceqi_h
:
16765 case CODE_FOR_msa_ceqi_w
:
16766 case CODE_FOR_msa_ceqi_d
:
16767 case CODE_FOR_msa_clti_s_b
:
16768 case CODE_FOR_msa_clti_s_h
:
16769 case CODE_FOR_msa_clti_s_w
:
16770 case CODE_FOR_msa_clti_s_d
:
16771 case CODE_FOR_msa_clei_s_b
:
16772 case CODE_FOR_msa_clei_s_h
:
16773 case CODE_FOR_msa_clei_s_w
:
16774 case CODE_FOR_msa_clei_s_d
:
16775 case CODE_FOR_msa_maxi_s_b
:
16776 case CODE_FOR_msa_maxi_s_h
:
16777 case CODE_FOR_msa_maxi_s_w
:
16778 case CODE_FOR_msa_maxi_s_d
:
16779 case CODE_FOR_msa_mini_s_b
:
16780 case CODE_FOR_msa_mini_s_h
:
16781 case CODE_FOR_msa_mini_s_w
:
16782 case CODE_FOR_msa_mini_s_d
:
16783 gcc_assert (has_target_p
&& nops
== 3);
16784 /* We only generate a vector of constants iff the second argument
16785 is an immediate. We also validate the range of the immediate. */
16786 if (CONST_INT_P (ops
[2].value
))
16790 if (IN_RANGE (INTVAL (ops
[2].value
), rangelo
, rangehi
))
16792 ops
[2].mode
= ops
[0].mode
;
16793 ops
[2].value
= mips_gen_const_int_vector (ops
[2].mode
,
16794 INTVAL (ops
[2].value
));
16801 case CODE_FOR_msa_andi_b
:
16802 case CODE_FOR_msa_ori_b
:
16803 case CODE_FOR_msa_nori_b
:
16804 case CODE_FOR_msa_xori_b
:
16805 gcc_assert (has_target_p
&& nops
== 3);
16806 if (!CONST_INT_P (ops
[2].value
))
16808 ops
[2].mode
= ops
[0].mode
;
16809 ops
[2].value
= mips_gen_const_int_vector (ops
[2].mode
,
16810 INTVAL (ops
[2].value
));
16813 case CODE_FOR_msa_bmzi_b
:
16814 case CODE_FOR_msa_bmnzi_b
:
16815 case CODE_FOR_msa_bseli_b
:
16816 gcc_assert (has_target_p
&& nops
== 4);
16817 if (!CONST_INT_P (ops
[3].value
))
16819 ops
[3].mode
= ops
[0].mode
;
16820 ops
[3].value
= mips_gen_const_int_vector (ops
[3].mode
,
16821 INTVAL (ops
[3].value
));
16824 case CODE_FOR_msa_fill_b
:
16825 case CODE_FOR_msa_fill_h
:
16826 case CODE_FOR_msa_fill_w
:
16827 case CODE_FOR_msa_fill_d
:
16828 /* Map the built-ins to vector fill operations. We need fix up the mode
16829 for the element being inserted. */
16830 gcc_assert (has_target_p
&& nops
== 2);
16831 imode
= GET_MODE_INNER (ops
[0].mode
);
16832 ops
[1].value
= lowpart_subreg (imode
, ops
[1].value
, ops
[1].mode
);
16833 ops
[1].mode
= imode
;
16836 case CODE_FOR_msa_ilvl_b
:
16837 case CODE_FOR_msa_ilvl_h
:
16838 case CODE_FOR_msa_ilvl_w
:
16839 case CODE_FOR_msa_ilvl_d
:
16840 case CODE_FOR_msa_ilvr_b
:
16841 case CODE_FOR_msa_ilvr_h
:
16842 case CODE_FOR_msa_ilvr_w
:
16843 case CODE_FOR_msa_ilvr_d
:
16844 case CODE_FOR_msa_ilvev_b
:
16845 case CODE_FOR_msa_ilvev_h
:
16846 case CODE_FOR_msa_ilvev_w
:
16847 case CODE_FOR_msa_ilvod_b
:
16848 case CODE_FOR_msa_ilvod_h
:
16849 case CODE_FOR_msa_ilvod_w
:
16850 case CODE_FOR_msa_pckev_b
:
16851 case CODE_FOR_msa_pckev_h
:
16852 case CODE_FOR_msa_pckev_w
:
16853 case CODE_FOR_msa_pckod_b
:
16854 case CODE_FOR_msa_pckod_h
:
16855 case CODE_FOR_msa_pckod_w
:
16856 /* Swap the operands 1 and 2 for interleave operations. Built-ins follow
16857 convention of ISA, which have op1 as higher component and op2 as lower
16858 component. However, the VEC_PERM op in tree and vec_concat in RTL
16859 expects first operand to be lower component, because of which this
16860 swap is needed for builtins. */
16861 gcc_assert (has_target_p
&& nops
== 3);
16862 std::swap (ops
[1], ops
[2]);
16865 case CODE_FOR_msa_maddv_b
:
16866 case CODE_FOR_msa_maddv_h
:
16867 case CODE_FOR_msa_maddv_w
:
16868 case CODE_FOR_msa_maddv_d
:
16869 case CODE_FOR_msa_fmadd_w
:
16870 case CODE_FOR_msa_fmadd_d
:
16871 case CODE_FOR_msa_fmsub_w
:
16872 case CODE_FOR_msa_fmsub_d
:
16873 /* fma(a, b, c) results into (a * b + c), however builtin_msa_fmadd expects
16874 it to be (a + b * c). Swap the 1st and 3rd operands. */
16875 std::swap (ops
[1], ops
[3]);
16878 case CODE_FOR_msa_slli_b
:
16879 case CODE_FOR_msa_slli_h
:
16880 case CODE_FOR_msa_slli_w
:
16881 case CODE_FOR_msa_slli_d
:
16882 case CODE_FOR_msa_srai_b
:
16883 case CODE_FOR_msa_srai_h
:
16884 case CODE_FOR_msa_srai_w
:
16885 case CODE_FOR_msa_srai_d
:
16886 case CODE_FOR_msa_srli_b
:
16887 case CODE_FOR_msa_srli_h
:
16888 case CODE_FOR_msa_srli_w
:
16889 case CODE_FOR_msa_srli_d
:
16890 gcc_assert (has_target_p
&& nops
== 3);
16891 if (CONST_INT_P (ops
[2].value
))
16894 rangehi
= GET_MODE_UNIT_BITSIZE (ops
[0].mode
) - 1;
16895 if (IN_RANGE (INTVAL (ops
[2].value
), rangelo
, rangehi
))
16897 ops
[2].mode
= ops
[0].mode
;
16898 ops
[2].value
= mips_gen_const_int_vector (ops
[2].mode
,
16899 INTVAL (ops
[2].value
));
16906 case CODE_FOR_msa_insert_b
:
16907 case CODE_FOR_msa_insert_h
:
16908 case CODE_FOR_msa_insert_w
:
16909 case CODE_FOR_msa_insert_d
:
16910 /* Map the built-ins to insert operations. We need to swap operands,
16911 fix up the mode for the element being inserted, and generate
16912 a bit mask for vec_merge. */
16913 gcc_assert (has_target_p
&& nops
== 4);
16914 std::swap (ops
[1], ops
[2]);
16915 std::swap (ops
[1], ops
[3]);
16916 imode
= GET_MODE_INNER (ops
[0].mode
);
16917 ops
[1].value
= lowpart_subreg (imode
, ops
[1].value
, ops
[1].mode
);
16918 ops
[1].mode
= imode
;
16920 rangehi
= GET_MODE_NUNITS (ops
[0].mode
) - 1;
16921 if (CONST_INT_P (ops
[3].value
)
16922 && IN_RANGE (INTVAL (ops
[3].value
), rangelo
, rangehi
))
16923 ops
[3].value
= GEN_INT (1 << INTVAL (ops
[3].value
));
16928 case CODE_FOR_msa_insve_b
:
16929 case CODE_FOR_msa_insve_h
:
16930 case CODE_FOR_msa_insve_w
:
16931 case CODE_FOR_msa_insve_d
:
16932 /* Map the built-ins to element insert operations. We need to swap
16933 operands and generate a bit mask. */
16934 gcc_assert (has_target_p
&& nops
== 4);
16935 std::swap (ops
[1], ops
[2]);
16936 std::swap (ops
[1], ops
[3]);
16938 rangehi
= GET_MODE_NUNITS (ops
[0].mode
) - 1;
16939 if (CONST_INT_P (ops
[3].value
)
16940 && IN_RANGE (INTVAL (ops
[3].value
), rangelo
, rangehi
))
16941 ops
[3].value
= GEN_INT (1 << INTVAL (ops
[3].value
));
16946 case CODE_FOR_msa_shf_b
:
16947 case CODE_FOR_msa_shf_h
:
16948 case CODE_FOR_msa_shf_w
:
16949 case CODE_FOR_msa_shf_w_f
:
16950 gcc_assert (has_target_p
&& nops
== 3);
16951 ops
[2].value
= mips_gen_const_int_vector_shuffle (ops
[0].mode
,
16952 INTVAL (ops
[2].value
));
16955 case CODE_FOR_msa_vshf_b
:
16956 case CODE_FOR_msa_vshf_h
:
16957 case CODE_FOR_msa_vshf_w
:
16958 case CODE_FOR_msa_vshf_d
:
16959 gcc_assert (has_target_p
&& nops
== 4);
16960 std::swap (ops
[1], ops
[3]);
16963 case CODE_FOR_msa_dpadd_s_w
:
16964 case CODE_FOR_msa_dpadd_s_h
:
16965 case CODE_FOR_msa_dpadd_s_d
:
16966 case CODE_FOR_msa_dpadd_u_w
:
16967 case CODE_FOR_msa_dpadd_u_h
:
16968 case CODE_FOR_msa_dpadd_u_d
:
16969 case CODE_FOR_msa_dpsub_s_w
:
16970 case CODE_FOR_msa_dpsub_s_h
:
16971 case CODE_FOR_msa_dpsub_s_d
:
16972 case CODE_FOR_msa_dpsub_u_w
:
16973 case CODE_FOR_msa_dpsub_u_h
:
16974 case CODE_FOR_msa_dpsub_u_d
:
16975 /* Force the operands which correspond to the same in-out register
16976 to have the same pseudo assigned to them. If the input operand
16977 is not REG, create one for it. */
16978 if (!REG_P (ops
[1].value
))
16979 ops
[1].value
= copy_to_mode_reg (ops
[1].mode
, ops
[1].value
);
16980 create_output_operand (&ops
[0], ops
[1].value
, ops
[1].mode
);
16987 if (error_opno
!= 0)
16989 error ("argument %d to the built-in must be a constant"
16990 " in range %d to %d", error_opno
, rangelo
, rangehi
);
16991 return has_target_p
? gen_reg_rtx (ops
[0].mode
) : const0_rtx
;
16993 else if (!maybe_expand_insn (icode
, nops
, ops
))
16995 error ("invalid argument to built-in function");
16996 return has_target_p
? gen_reg_rtx (ops
[0].mode
) : const0_rtx
;
16998 return has_target_p
? ops
[0].value
: const0_rtx
;
17001 /* Expand a floating-point comparison for built-in function call EXP.
17002 The first NARGS arguments are the values to be compared. ICODE is
17003 the .md pattern that does the comparison and COND is the condition
17004 that is being tested. Return an rtx for the result. */
17007 mips_expand_builtin_compare_1 (enum insn_code icode
,
17008 enum mips_fp_condition cond
,
17009 tree exp
, int nargs
)
17011 struct expand_operand ops
[MAX_RECOG_OPERANDS
];
17015 /* The instruction should have a target operand, an operand for each
17016 argument, and an operand for COND. */
17017 gcc_assert (nargs
+ 2 == insn_data
[(int) icode
].n_generator_args
);
17019 output
= mips_allocate_fcc (insn_data
[(int) icode
].operand
[0].mode
);
17021 create_fixed_operand (&ops
[opno
++], output
);
17022 for (argno
= 0; argno
< nargs
; argno
++)
17023 mips_prepare_builtin_arg (&ops
[opno
++], exp
, argno
);
17024 create_integer_operand (&ops
[opno
++], (int) cond
);
17025 return mips_expand_builtin_insn (icode
, opno
, ops
, true);
17028 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
17029 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
17030 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
17031 suggests a good place to put the result. */
17034 mips_expand_builtin_direct (enum insn_code icode
, rtx target
, tree exp
,
17037 struct expand_operand ops
[MAX_RECOG_OPERANDS
];
17040 /* Map any target to operand 0. */
17043 create_output_operand (&ops
[opno
++], target
, TYPE_MODE (TREE_TYPE (exp
)));
17045 /* Map the arguments to the other operands. */
17046 gcc_assert (opno
+ call_expr_nargs (exp
)
17047 == insn_data
[icode
].n_generator_args
);
17048 for (argno
= 0; argno
< call_expr_nargs (exp
); argno
++)
17049 mips_prepare_builtin_arg (&ops
[opno
++], exp
, argno
);
17051 return mips_expand_builtin_insn (icode
, opno
, ops
, has_target_p
);
17054 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
17055 function; TYPE says which. EXP is the CALL_EXPR that calls the
17056 function, ICODE is the instruction that should be used to compare
17057 the first two arguments, and COND is the condition it should test.
17058 TARGET, if nonnull, suggests a good place to put the result. */
17061 mips_expand_builtin_movtf (enum mips_builtin_type type
,
17062 enum insn_code icode
, enum mips_fp_condition cond
,
17063 rtx target
, tree exp
)
17065 struct expand_operand ops
[4];
17068 cmp_result
= mips_expand_builtin_compare_1 (icode
, cond
, exp
, 2);
17069 create_output_operand (&ops
[0], target
, TYPE_MODE (TREE_TYPE (exp
)));
17070 if (type
== MIPS_BUILTIN_MOVT
)
17072 mips_prepare_builtin_arg (&ops
[2], exp
, 2);
17073 mips_prepare_builtin_arg (&ops
[1], exp
, 3);
17077 mips_prepare_builtin_arg (&ops
[1], exp
, 2);
17078 mips_prepare_builtin_arg (&ops
[2], exp
, 3);
17080 create_fixed_operand (&ops
[3], cmp_result
);
17081 return mips_expand_builtin_insn (CODE_FOR_mips_cond_move_tf_ps
,
17085 /* Expand an MSA built-in for a compare and branch instruction specified by
17086 ICODE, set a general-purpose register to 1 if the branch was taken,
17090 mips_expand_builtin_msa_test_branch (enum insn_code icode
, tree exp
)
17092 struct expand_operand ops
[3];
17094 rtx_code_label
*true_label
, *done_label
;
17097 true_label
= gen_label_rtx ();
17098 done_label
= gen_label_rtx ();
17100 create_input_operand (&ops
[0], true_label
, TYPE_MODE (TREE_TYPE (exp
)));
17101 mips_prepare_builtin_arg (&ops
[1], exp
, 0);
17102 create_fixed_operand (&ops
[2], const0_rtx
);
17104 /* Make sure that the operand 1 is a REG. */
17105 if (GET_CODE (ops
[1].value
) != REG
)
17106 ops
[1].value
= force_reg (ops
[1].mode
, ops
[1].value
);
17108 if ((cbranch
= maybe_gen_insn (icode
, 3, ops
)) == NULL_RTX
)
17109 error ("failed to expand built-in function");
17111 cmp_result
= gen_reg_rtx (SImode
);
17113 /* First assume that CMP_RESULT is false. */
17114 mips_emit_move (cmp_result
, const0_rtx
);
17116 /* Branch to TRUE_LABEL if CBRANCH is taken and DONE_LABEL otherwise. */
17117 emit_jump_insn (cbranch
);
17118 emit_jump_insn (gen_jump (done_label
));
17121 /* Set CMP_RESULT to true if the branch was taken. */
17122 emit_label (true_label
);
17123 mips_emit_move (cmp_result
, const1_rtx
);
17125 emit_label (done_label
);
17129 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
17130 into TARGET otherwise. Return TARGET. */
17133 mips_builtin_branch_and_move (rtx condition
, rtx target
,
17134 rtx value_if_true
, rtx value_if_false
)
17136 rtx_code_label
*true_label
, *done_label
;
17138 true_label
= gen_label_rtx ();
17139 done_label
= gen_label_rtx ();
17141 /* First assume that CONDITION is false. */
17142 mips_emit_move (target
, value_if_false
);
17144 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
17145 emit_jump_insn (gen_condjump (condition
, true_label
));
17146 emit_jump_insn (gen_jump (done_label
));
17149 /* Fix TARGET if CONDITION is true. */
17150 emit_label (true_label
);
17151 mips_emit_move (target
, value_if_true
);
17153 emit_label (done_label
);
17157 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
17158 the CALL_EXPR that calls the function, ICODE is the code of the
17159 comparison instruction, and COND is the condition it should test.
17160 TARGET, if nonnull, suggests a good place to put the boolean result. */
17163 mips_expand_builtin_compare (enum mips_builtin_type builtin_type
,
17164 enum insn_code icode
, enum mips_fp_condition cond
,
17165 rtx target
, tree exp
)
17167 rtx offset
, condition
, cmp_result
;
17169 if (target
== 0 || GET_MODE (target
) != SImode
)
17170 target
= gen_reg_rtx (SImode
);
17171 cmp_result
= mips_expand_builtin_compare_1 (icode
, cond
, exp
,
17172 call_expr_nargs (exp
));
17174 /* If the comparison sets more than one register, we define the result
17175 to be 0 if all registers are false and -1 if all registers are true.
17176 The value of the complete result is indeterminate otherwise. */
17177 switch (builtin_type
)
17179 case MIPS_BUILTIN_CMP_ALL
:
17180 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, constm1_rtx
);
17181 return mips_builtin_branch_and_move (condition
, target
,
17182 const0_rtx
, const1_rtx
);
17184 case MIPS_BUILTIN_CMP_UPPER
:
17185 case MIPS_BUILTIN_CMP_LOWER
:
17186 offset
= GEN_INT (builtin_type
== MIPS_BUILTIN_CMP_UPPER
);
17187 condition
= gen_single_cc (cmp_result
, offset
);
17188 return mips_builtin_branch_and_move (condition
, target
,
17189 const1_rtx
, const0_rtx
);
17192 condition
= gen_rtx_NE (VOIDmode
, cmp_result
, const0_rtx
);
17193 return mips_builtin_branch_and_move (condition
, target
,
17194 const1_rtx
, const0_rtx
);
17198 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
17199 if nonnull, suggests a good place to put the boolean result. */
17202 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type
, rtx target
)
17204 rtx condition
, cmp_result
;
17207 if (target
== 0 || GET_MODE (target
) != SImode
)
17208 target
= gen_reg_rtx (SImode
);
17210 cmp_result
= gen_rtx_REG (CCDSPmode
, CCDSP_PO_REGNUM
);
17212 if (builtin_type
== MIPS_BUILTIN_BPOSGE32
)
17217 condition
= gen_rtx_GE (VOIDmode
, cmp_result
, GEN_INT (cmp_value
));
17218 return mips_builtin_branch_and_move (condition
, target
,
17219 const1_rtx
, const0_rtx
);
17222 /* Implement TARGET_EXPAND_BUILTIN. */
17225 mips_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
17226 machine_mode mode
, int ignore
)
17229 unsigned int fcode
, avail
;
17230 const struct mips_builtin_description
*d
;
17232 fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
17233 fcode
= DECL_MD_FUNCTION_CODE (fndecl
);
17234 gcc_assert (fcode
< ARRAY_SIZE (mips_builtins
));
17235 d
= &mips_builtins
[fcode
];
17236 avail
= d
->avail ();
17237 gcc_assert (avail
!= 0);
17238 if (TARGET_MIPS16
&& !(avail
& BUILTIN_AVAIL_MIPS16
))
17240 error ("built-in function %qE not supported for MIPS16",
17241 DECL_NAME (fndecl
));
17242 return ignore
? const0_rtx
: CONST0_RTX (mode
);
17244 switch (d
->builtin_type
)
17246 case MIPS_BUILTIN_DIRECT
:
17247 return mips_expand_builtin_direct (d
->icode
, target
, exp
, true);
17249 case MIPS_BUILTIN_DIRECT_NO_TARGET
:
17250 return mips_expand_builtin_direct (d
->icode
, target
, exp
, false);
17252 case MIPS_BUILTIN_MOVT
:
17253 case MIPS_BUILTIN_MOVF
:
17254 return mips_expand_builtin_movtf (d
->builtin_type
, d
->icode
,
17255 d
->cond
, target
, exp
);
17257 case MIPS_BUILTIN_CMP_ANY
:
17258 case MIPS_BUILTIN_CMP_ALL
:
17259 case MIPS_BUILTIN_CMP_UPPER
:
17260 case MIPS_BUILTIN_CMP_LOWER
:
17261 case MIPS_BUILTIN_CMP_SINGLE
:
17262 return mips_expand_builtin_compare (d
->builtin_type
, d
->icode
,
17263 d
->cond
, target
, exp
);
17265 case MIPS_BUILTIN_MSA_TEST_BRANCH
:
17266 return mips_expand_builtin_msa_test_branch (d
->icode
, exp
);
17268 case MIPS_BUILTIN_BPOSGE32
:
17269 return mips_expand_builtin_bposge (d
->builtin_type
, target
);
17271 gcc_unreachable ();
17274 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
17275 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
17276 struct mips16_constant
{
17277 struct mips16_constant
*next
;
17279 rtx_code_label
*label
;
17283 /* Information about an incomplete MIPS16 constant pool. FIRST is the
17284 first constant, HIGHEST_ADDRESS is the highest address that the first
17285 byte of the pool can have, and INSN_ADDRESS is the current instruction
17287 struct mips16_constant_pool
{
17288 struct mips16_constant
*first
;
17289 int highest_address
;
17293 /* Add constant VALUE to POOL and return its label. MODE is the
17294 value's mode (used for CONST_INTs, etc.). */
17296 static rtx_code_label
*
17297 mips16_add_constant (struct mips16_constant_pool
*pool
,
17298 rtx value
, machine_mode mode
)
17300 struct mips16_constant
**p
, *c
;
17301 bool first_of_size_p
;
17303 /* See whether the constant is already in the pool. If so, return the
17304 existing label, otherwise leave P pointing to the place where the
17305 constant should be added.
17307 Keep the pool sorted in increasing order of mode size so that we can
17308 reduce the number of alignments needed. */
17309 first_of_size_p
= true;
17310 for (p
= &pool
->first
; *p
!= 0; p
= &(*p
)->next
)
17312 if (mode
== (*p
)->mode
&& rtx_equal_p (value
, (*p
)->value
))
17313 return (*p
)->label
;
17314 if (GET_MODE_SIZE (mode
) < GET_MODE_SIZE ((*p
)->mode
))
17316 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE ((*p
)->mode
))
17317 first_of_size_p
= false;
17320 /* In the worst case, the constant needed by the earliest instruction
17321 will end up at the end of the pool. The entire pool must then be
17322 accessible from that instruction.
17324 When adding the first constant, set the pool's highest address to
17325 the address of the first out-of-range byte. Adjust this address
17326 downwards each time a new constant is added. */
17327 if (pool
->first
== 0)
17328 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
17329 of the instruction with the lowest two bits clear. The base PC
17330 value for LDPC has the lowest three bits clear. Assume the worst
17331 case here; namely that the PC-relative instruction occupies the
17332 last 2 bytes in an aligned word. */
17333 pool
->highest_address
= pool
->insn_address
- (UNITS_PER_WORD
- 2) + 0x8000;
17334 pool
->highest_address
-= GET_MODE_SIZE (mode
);
17335 if (first_of_size_p
)
17336 /* Take into account the worst possible padding due to alignment. */
17337 pool
->highest_address
-= GET_MODE_SIZE (mode
) - 1;
17339 /* Create a new entry. */
17340 c
= XNEW (struct mips16_constant
);
17343 c
->label
= gen_label_rtx ();
17350 /* Output constant VALUE after instruction INSN and return the last
17351 instruction emitted. MODE is the mode of the constant. */
17354 mips16_emit_constants_1 (machine_mode mode
, rtx value
, rtx_insn
*insn
)
17356 if (SCALAR_INT_MODE_P (mode
) || ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
17358 rtx size
= GEN_INT (GET_MODE_SIZE (mode
));
17359 return emit_insn_after (gen_consttable_int (value
, size
), insn
);
17362 if (SCALAR_FLOAT_MODE_P (mode
))
17363 return emit_insn_after (gen_consttable_float (value
), insn
);
17365 if (VECTOR_MODE_P (mode
))
17369 for (i
= 0; i
< CONST_VECTOR_NUNITS (value
); i
++)
17370 insn
= mips16_emit_constants_1 (GET_MODE_INNER (mode
),
17371 CONST_VECTOR_ELT (value
, i
), insn
);
17375 gcc_unreachable ();
17378 /* Dump out the constants in CONSTANTS after INSN. Record the initial
17379 label number in the `consttable' and `consttable_end' insns emitted
17380 at the beginning and the end of the constant pool respectively, so
17381 that individual pools can be uniquely marked as data for the purpose
17385 mips16_emit_constants (struct mips16_constant
*constants
, rtx_insn
*insn
)
17387 int label_num
= constants
? CODE_LABEL_NUMBER (constants
->label
) : 0;
17388 struct mips16_constant
*c
, *next
;
17393 insn
= emit_insn_after (gen_consttable (GEN_INT (label_num
)), insn
);
17394 for (c
= constants
; c
!= NULL
; c
= next
)
17396 /* If necessary, increase the alignment of PC. */
17397 if (align
< GET_MODE_SIZE (c
->mode
))
17399 int align_log
= floor_log2 (GET_MODE_SIZE (c
->mode
));
17400 insn
= emit_insn_after (gen_align (GEN_INT (align_log
)), insn
);
17402 align
= GET_MODE_SIZE (c
->mode
);
17404 insn
= emit_label_after (c
->label
, insn
);
17405 insn
= mips16_emit_constants_1 (c
->mode
, c
->value
, insn
);
17411 insn
= emit_insn_after (gen_consttable_end (GEN_INT (label_num
)), insn
);
17413 emit_barrier_after (insn
);
17416 /* Return the length of instruction INSN. */
17419 mips16_insn_length (rtx_insn
*insn
)
17421 if (JUMP_TABLE_DATA_P (insn
))
17423 rtx body
= PATTERN (insn
);
17424 if (GET_CODE (body
) == ADDR_VEC
)
17425 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 0);
17426 else if (GET_CODE (body
) == ADDR_DIFF_VEC
)
17427 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, 1);
17429 gcc_unreachable ();
17431 return get_attr_length (insn
);
17434 /* If *X is a symbolic constant that refers to the constant pool, add
17435 the constant to POOL and rewrite *X to use the constant's label. */
17438 mips16_rewrite_pool_constant (struct mips16_constant_pool
*pool
, rtx
*x
)
17441 rtx_code_label
*label
;
17443 split_const (*x
, &base
, &offset
);
17444 if (GET_CODE (base
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (base
))
17446 label
= mips16_add_constant (pool
, copy_rtx (get_pool_constant (base
)),
17447 get_pool_mode (base
));
17448 base
= gen_rtx_LABEL_REF (Pmode
, label
);
17449 *x
= mips_unspec_address_offset (base
, offset
, SYMBOL_PC_RELATIVE
);
17453 /* Rewrite INSN so that constant pool references refer to the constant's
17457 mips16_rewrite_pool_refs (rtx_insn
*insn
, struct mips16_constant_pool
*pool
)
17459 subrtx_ptr_iterator::array_type array
;
17460 FOR_EACH_SUBRTX_PTR (iter
, array
, &PATTERN (insn
), ALL
)
17464 if (force_to_mem_operand (*loc
, Pmode
))
17466 rtx mem
= force_const_mem (GET_MODE (*loc
), *loc
);
17467 validate_change (insn
, loc
, mem
, false);
17472 mips16_rewrite_pool_constant (pool
, &XEXP (*loc
, 0));
17473 iter
.skip_subrtxes ();
17477 if (TARGET_MIPS16_TEXT_LOADS
)
17478 mips16_rewrite_pool_constant (pool
, loc
);
17479 if (GET_CODE (*loc
) == CONST
17480 /* Don't rewrite the __mips16_rdwr symbol. */
17481 || (GET_CODE (*loc
) == UNSPEC
17482 && XINT (*loc
, 1) == UNSPEC_TLS_GET_TP
))
17483 iter
.skip_subrtxes ();
17488 /* Return whether CFG is used in mips_reorg. */
17491 mips_cfg_in_reorg (void)
17493 return (mips_r10k_cache_barrier
!= R10K_CACHE_BARRIER_NONE
17494 || TARGET_RELAX_PIC_CALLS
);
17497 /* Build MIPS16 constant pools. Split the instructions if SPLIT_P,
17498 otherwise assume that they are already split. */
17501 mips16_lay_out_constants (bool split_p
)
17503 struct mips16_constant_pool pool
;
17504 rtx_insn
*insn
, *barrier
;
17506 if (!TARGET_MIPS16_PCREL_LOADS
)
17511 if (mips_cfg_in_reorg ())
17512 split_all_insns ();
17514 split_all_insns_noflow ();
17517 memset (&pool
, 0, sizeof (pool
));
17518 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
17520 /* Rewrite constant pool references in INSN. */
17521 if (USEFUL_INSN_P (insn
))
17522 mips16_rewrite_pool_refs (insn
, &pool
);
17524 pool
.insn_address
+= mips16_insn_length (insn
);
17526 if (pool
.first
!= NULL
)
17528 /* If there are no natural barriers between the first user of
17529 the pool and the highest acceptable address, we'll need to
17530 create a new instruction to jump around the constant pool.
17531 In the worst case, this instruction will be 4 bytes long.
17533 If it's too late to do this transformation after INSN,
17534 do it immediately before INSN. */
17535 if (barrier
== 0 && pool
.insn_address
+ 4 > pool
.highest_address
)
17537 rtx_code_label
*label
;
17540 label
= gen_label_rtx ();
17542 jump
= emit_jump_insn_before (gen_jump (label
), insn
);
17543 JUMP_LABEL (jump
) = label
;
17544 LABEL_NUSES (label
) = 1;
17545 barrier
= emit_barrier_after (jump
);
17547 emit_label_after (label
, barrier
);
17548 pool
.insn_address
+= 4;
17551 /* See whether the constant pool is now out of range of the first
17552 user. If so, output the constants after the previous barrier.
17553 Note that any instructions between BARRIER and INSN (inclusive)
17554 will use negative offsets to refer to the pool. */
17555 if (pool
.insn_address
> pool
.highest_address
)
17557 mips16_emit_constants (pool
.first
, barrier
);
17561 else if (BARRIER_P (insn
))
17565 mips16_emit_constants (pool
.first
, get_last_insn ());
17568 /* Return true if it is worth r10k_simplify_address's while replacing
17569 an address with X. We are looking for constants, and for addresses
17570 at a known offset from the incoming stack pointer. */
17573 r10k_simplified_address_p (rtx x
)
17575 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
17577 return x
== virtual_incoming_args_rtx
|| CONSTANT_P (x
);
17580 /* X is an expression that appears in INSN. Try to use the UD chains
17581 to simplify it, returning the simplified form on success and the
17582 original form otherwise. Replace the incoming value of $sp with
17583 virtual_incoming_args_rtx (which should never occur in X otherwise). */
17586 r10k_simplify_address (rtx x
, rtx_insn
*insn
)
17588 rtx newx
, op0
, op1
, set
, note
;
17589 rtx_insn
*def_insn
;
17591 struct df_link
*defs
;
17596 op0
= r10k_simplify_address (XEXP (x
, 0), insn
);
17597 if (op0
!= XEXP (x
, 0))
17598 newx
= simplify_gen_unary (GET_CODE (x
), GET_MODE (x
),
17599 op0
, GET_MODE (XEXP (x
, 0)));
17601 else if (BINARY_P (x
))
17603 op0
= r10k_simplify_address (XEXP (x
, 0), insn
);
17604 op1
= r10k_simplify_address (XEXP (x
, 1), insn
);
17605 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
17606 newx
= simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
17608 else if (GET_CODE (x
) == LO_SUM
)
17610 /* LO_SUMs can be offset from HIGHs, if we know they won't
17611 overflow. See mips_classify_address for the rationale behind
17613 op0
= r10k_simplify_address (XEXP (x
, 0), insn
);
17614 if (GET_CODE (op0
) == HIGH
)
17615 newx
= XEXP (x
, 1);
17617 else if (REG_P (x
))
17619 /* Uses are recorded by regno_reg_rtx, not X itself. */
17620 use
= df_find_use (insn
, regno_reg_rtx
[REGNO (x
)]);
17622 defs
= DF_REF_CHAIN (use
);
17624 /* Require a single definition. */
17625 if (defs
&& defs
->next
== NULL
)
17628 if (DF_REF_IS_ARTIFICIAL (def
))
17630 /* Replace the incoming value of $sp with
17631 virtual_incoming_args_rtx. */
17632 if (x
== stack_pointer_rtx
17633 && DF_REF_BB (def
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
17634 newx
= virtual_incoming_args_rtx
;
17636 else if (dominated_by_p (CDI_DOMINATORS
, DF_REF_BB (use
),
17639 /* Make sure that DEF_INSN is a single set of REG. */
17640 def_insn
= DF_REF_INSN (def
);
17641 if (NONJUMP_INSN_P (def_insn
))
17643 set
= single_set (def_insn
);
17644 if (set
&& rtx_equal_p (SET_DEST (set
), x
))
17646 /* Prefer to use notes, since the def-use chains
17647 are often shorter. */
17648 note
= find_reg_equal_equiv_note (def_insn
);
17650 newx
= XEXP (note
, 0);
17652 newx
= SET_SRC (set
);
17653 newx
= r10k_simplify_address (newx
, def_insn
);
17659 if (newx
&& r10k_simplified_address_p (newx
))
17664 /* Return true if ADDRESS is known to be an uncached address
17665 on R10K systems. */
17668 r10k_uncached_address_p (unsigned HOST_WIDE_INT address
)
17670 unsigned HOST_WIDE_INT upper
;
17672 /* Check for KSEG1. */
17673 if (address
+ 0x60000000 < 0x20000000)
17676 /* Check for uncached XKPHYS addresses. */
17677 if (Pmode
== DImode
)
17679 upper
= (address
>> 40) & 0xf9ffff;
17680 if (upper
== 0x900000 || upper
== 0xb80000)
17686 /* Return true if we can prove that an access to address X in instruction
17687 INSN would be safe from R10K speculation. This X is a general
17688 expression; it might not be a legitimate address. */
17691 r10k_safe_address_p (rtx x
, rtx_insn
*insn
)
17694 HOST_WIDE_INT offset_val
;
17696 x
= r10k_simplify_address (x
, insn
);
17698 /* Check for references to the stack frame. It doesn't really matter
17699 how much of the frame has been allocated at INSN; -mr10k-cache-barrier
17700 allows us to assume that accesses to any part of the eventual frame
17701 is safe from speculation at any point in the function. */
17702 mips_split_plus (x
, &base
, &offset_val
);
17703 if (base
== virtual_incoming_args_rtx
17704 && offset_val
>= -cfun
->machine
->frame
.total_size
17705 && offset_val
< cfun
->machine
->frame
.args_size
)
17708 /* Check for uncached addresses. */
17709 if (CONST_INT_P (x
))
17710 return r10k_uncached_address_p (INTVAL (x
));
17712 /* Check for accesses to a static object. */
17713 split_const (x
, &base
, &offset
);
17714 return offset_within_block_p (base
, INTVAL (offset
));
17717 /* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
17718 an in-range access to an automatic variable, or to an object with
17719 a link-time-constant address. */
17722 r10k_safe_mem_expr_p (tree expr
, unsigned HOST_WIDE_INT offset
)
17724 poly_int64 bitoffset
, bitsize
;
17725 tree inner
, var_offset
;
17727 int unsigned_p
, reverse_p
, volatile_p
;
17729 inner
= get_inner_reference (expr
, &bitsize
, &bitoffset
, &var_offset
, &mode
,
17730 &unsigned_p
, &reverse_p
, &volatile_p
);
17731 if (!DECL_P (inner
) || !DECL_SIZE_UNIT (inner
) || var_offset
)
17734 offset
+= bitoffset
/ BITS_PER_UNIT
;
17735 return offset
< tree_to_uhwi (DECL_SIZE_UNIT (inner
));
17738 /* Return true if X contains a MEM that is not safe from R10K speculation.
17739 INSN is the instruction that contains X. */
17742 r10k_needs_protection_p_1 (rtx x
, rtx_insn
*insn
)
17744 subrtx_var_iterator::array_type array
;
17745 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
17750 if ((MEM_EXPR (mem
)
17751 && MEM_OFFSET_KNOWN_P (mem
)
17752 && r10k_safe_mem_expr_p (MEM_EXPR (mem
), MEM_OFFSET (mem
)))
17753 || r10k_safe_address_p (XEXP (mem
, 0), insn
))
17754 iter
.skip_subrtxes ();
17762 /* A note_stores callback for which DATA points to an instruction pointer.
17763 If *DATA is nonnull, make it null if it X contains a MEM that is not
17764 safe from R10K speculation. */
17767 r10k_needs_protection_p_store (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
,
17770 rtx_insn
**insn_ptr
;
17772 insn_ptr
= (rtx_insn
**) data
;
17773 if (*insn_ptr
&& r10k_needs_protection_p_1 (x
, *insn_ptr
))
17777 /* X is the pattern of a call instruction. Return true if the call is
17778 not to a declared function. */
17781 r10k_needs_protection_p_call (const_rtx x
)
17783 subrtx_iterator::array_type array
;
17784 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
17786 const_rtx mem
= *iter
;
17789 const_rtx addr
= XEXP (mem
, 0);
17790 if (GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_DECL (addr
))
17791 iter
.skip_subrtxes ();
17799 /* Return true if instruction INSN needs to be protected by an R10K
17803 r10k_needs_protection_p (rtx_insn
*insn
)
17806 return r10k_needs_protection_p_call (PATTERN (insn
));
17808 if (mips_r10k_cache_barrier
== R10K_CACHE_BARRIER_STORE
)
17810 note_stores (insn
, r10k_needs_protection_p_store
, &insn
);
17811 return insn
== NULL_RTX
;
17814 return r10k_needs_protection_p_1 (PATTERN (insn
), insn
);
17817 /* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
17818 edge is unconditional. */
17821 r10k_protected_bb_p (basic_block bb
, sbitmap protected_bbs
)
17826 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
17827 if (!single_succ_p (e
->src
)
17828 || !bitmap_bit_p (protected_bbs
, e
->src
->index
)
17829 || (e
->flags
& EDGE_COMPLEX
) != 0)
17834 /* Implement -mr10k-cache-barrier= for the current function. */
17837 r10k_insert_cache_barriers (void)
17839 int *rev_post_order
;
17842 sbitmap protected_bbs
;
17843 rtx_insn
*insn
, *end
;
17844 rtx unprotected_region
;
17848 sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
17852 /* Calculate dominators. */
17853 calculate_dominance_info (CDI_DOMINATORS
);
17855 /* Bit X of PROTECTED_BBS is set if the last operation in basic block
17856 X is protected by a cache barrier. */
17857 protected_bbs
= sbitmap_alloc (last_basic_block_for_fn (cfun
));
17858 bitmap_clear (protected_bbs
);
17860 /* Iterate over the basic blocks in reverse post-order. */
17861 rev_post_order
= XNEWVEC (int, last_basic_block_for_fn (cfun
));
17862 n
= pre_and_rev_post_order_compute (NULL
, rev_post_order
, false);
17863 for (i
= 0; i
< n
; i
++)
17865 bb
= BASIC_BLOCK_FOR_FN (cfun
, rev_post_order
[i
]);
17867 /* If this block is only reached by unconditional edges, and if the
17868 source of every edge is protected, the beginning of the block is
17870 if (r10k_protected_bb_p (bb
, protected_bbs
))
17871 unprotected_region
= NULL_RTX
;
17873 unprotected_region
= pc_rtx
;
17874 end
= NEXT_INSN (BB_END (bb
));
17876 /* UNPROTECTED_REGION is:
17878 - null if we are processing a protected region,
17879 - pc_rtx if we are processing an unprotected region but have
17880 not yet found the first instruction in it
17881 - the first instruction in an unprotected region otherwise. */
17882 for (insn
= BB_HEAD (bb
); insn
!= end
; insn
= NEXT_INSN (insn
))
17884 if (unprotected_region
&& USEFUL_INSN_P (insn
))
17886 if (recog_memoized (insn
) == CODE_FOR_mips_cache
)
17887 /* This CACHE instruction protects the following code. */
17888 unprotected_region
= NULL_RTX
;
17891 /* See if INSN is the first instruction in this
17892 unprotected region. */
17893 if (unprotected_region
== pc_rtx
)
17894 unprotected_region
= insn
;
17896 /* See if INSN needs to be protected. If so,
17897 we must insert a cache barrier somewhere between
17898 PREV_INSN (UNPROTECTED_REGION) and INSN. It isn't
17899 clear which position is better performance-wise,
17900 but as a tie-breaker, we assume that it is better
17901 to allow delay slots to be back-filled where
17902 possible, and that it is better not to insert
17903 barriers in the middle of already-scheduled code.
17904 We therefore insert the barrier at the beginning
17906 if (r10k_needs_protection_p (insn
))
17908 emit_insn_before (gen_r10k_cache_barrier (),
17909 as_a
<rtx_insn
*> (unprotected_region
));
17910 unprotected_region
= NULL_RTX
;
17916 /* The called function is not required to protect the exit path.
17917 The code that follows a call is therefore unprotected. */
17918 unprotected_region
= pc_rtx
;
17921 /* Record whether the end of this block is protected. */
17922 if (unprotected_region
== NULL_RTX
)
17923 bitmap_set_bit (protected_bbs
, bb
->index
);
17925 XDELETEVEC (rev_post_order
);
17927 sbitmap_free (protected_bbs
);
17929 free_dominance_info (CDI_DOMINATORS
);
17932 /* If INSN is a call, return the underlying CALL expr. Return NULL_RTX
17933 otherwise. If INSN has two call rtx, then store the second one in
17937 mips_call_expr_from_insn (rtx_insn
*insn
, rtx
*second_call
)
17942 if (!CALL_P (insn
))
17945 x
= PATTERN (insn
);
17946 if (GET_CODE (x
) == PARALLEL
)
17948 /* Calls returning complex values have two CALL rtx. Look for the second
17949 one here, and return it via the SECOND_CALL arg. */
17950 x2
= XVECEXP (x
, 0, 1);
17951 if (GET_CODE (x2
) == SET
)
17953 if (GET_CODE (x2
) == CALL
)
17956 x
= XVECEXP (x
, 0, 0);
17958 if (GET_CODE (x
) == SET
)
17960 gcc_assert (GET_CODE (x
) == CALL
);
17965 /* REG is set in DEF. See if the definition is one of the ways we load a
17966 register with a symbol address for a mips_use_pic_fn_addr_reg_p call.
17967 If it is, return the symbol reference of the function, otherwise return
17970 If RECURSE_P is true, use mips_find_pic_call_symbol to interpret
17971 the values of source registers, otherwise treat such registers as
17972 having an unknown value. */
17975 mips_pic_call_symbol_from_set (df_ref def
, rtx reg
, bool recurse_p
)
17977 rtx_insn
*def_insn
;
17980 if (DF_REF_IS_ARTIFICIAL (def
))
17983 def_insn
= DF_REF_INSN (def
);
17984 set
= single_set (def_insn
);
17985 if (set
&& rtx_equal_p (SET_DEST (set
), reg
))
17987 rtx note
, src
, symbol
;
17989 /* First see whether the source is a plain symbol. This is used
17990 when calling symbols that are not lazily bound. */
17991 src
= SET_SRC (set
);
17992 if (GET_CODE (src
) == SYMBOL_REF
)
17995 /* Handle %call16 references. */
17996 symbol
= mips_strip_unspec_call (src
);
17999 gcc_assert (GET_CODE (symbol
) == SYMBOL_REF
);
18003 /* If we have something more complicated, look for a
18004 REG_EQUAL or REG_EQUIV note. */
18005 note
= find_reg_equal_equiv_note (def_insn
);
18006 if (note
&& GET_CODE (XEXP (note
, 0)) == SYMBOL_REF
)
18007 return XEXP (note
, 0);
18009 /* Follow at most one simple register copy. Such copies are
18010 interesting in cases like:
18014 locally_binding_fn (...);
18019 locally_binding_fn (...);
18021 locally_binding_fn (...);
18023 where the load of locally_binding_fn can legitimately be
18024 hoisted or shared. However, we do not expect to see complex
18025 chains of copies, so a full worklist solution to the problem
18026 would probably be overkill. */
18027 if (recurse_p
&& REG_P (src
))
18028 return mips_find_pic_call_symbol (def_insn
, src
, false);
18034 /* Find the definition of the use of REG in INSN. See if the definition
18035 is one of the ways we load a register with a symbol address for a
18036 mips_use_pic_fn_addr_reg_p call. If it is return the symbol reference
18037 of the function, otherwise return NULL_RTX. RECURSE_P is as for
18038 mips_pic_call_symbol_from_set. */
18041 mips_find_pic_call_symbol (rtx_insn
*insn
, rtx reg
, bool recurse_p
)
18044 struct df_link
*defs
;
18047 use
= df_find_use (insn
, regno_reg_rtx
[REGNO (reg
)]);
18050 defs
= DF_REF_CHAIN (use
);
18053 symbol
= mips_pic_call_symbol_from_set (defs
->ref
, reg
, recurse_p
);
18057 /* If we have more than one definition, they need to be identical. */
18058 for (defs
= defs
->next
; defs
; defs
= defs
->next
)
18062 other
= mips_pic_call_symbol_from_set (defs
->ref
, reg
, recurse_p
);
18063 if (!rtx_equal_p (symbol
, other
))
18070 /* Replace the args_size operand of the call expression CALL with the
18071 call-attribute UNSPEC and fill in SYMBOL as the function symbol. */
18074 mips_annotate_pic_call_expr (rtx call
, rtx symbol
)
18078 args_size
= XEXP (call
, 1);
18079 XEXP (call
, 1) = gen_rtx_UNSPEC (GET_MODE (args_size
),
18080 gen_rtvec (2, args_size
, symbol
),
18084 /* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See
18085 if instead of the arg_size argument it contains the call attributes. If
18086 yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function
18087 symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is
18091 mips_get_pic_call_symbol (rtx
*operands
, int args_size_opno
)
18093 rtx args_size
, symbol
;
18095 if (!TARGET_RELAX_PIC_CALLS
|| args_size_opno
== -1)
18098 args_size
= operands
[args_size_opno
];
18099 if (GET_CODE (args_size
) != UNSPEC
)
18101 gcc_assert (XINT (args_size
, 1) == UNSPEC_CALL_ATTR
);
18103 symbol
= XVECEXP (args_size
, 0, 1);
18104 gcc_assert (GET_CODE (symbol
) == SYMBOL_REF
);
18106 operands
[args_size_opno
] = symbol
;
18110 /* Use DF to annotate PIC indirect calls with the function symbol they
18114 mips_annotate_pic_calls (void)
18119 FOR_EACH_BB_FN (bb
, cfun
)
18120 FOR_BB_INSNS (bb
, insn
)
18122 rtx call
, reg
, symbol
, second_call
;
18125 call
= mips_call_expr_from_insn (insn
, &second_call
);
18128 gcc_assert (MEM_P (XEXP (call
, 0)));
18129 reg
= XEXP (XEXP (call
, 0), 0);
18133 symbol
= mips_find_pic_call_symbol (insn
, reg
, true);
18136 mips_annotate_pic_call_expr (call
, symbol
);
18138 mips_annotate_pic_call_expr (second_call
, symbol
);
18143 /* A temporary variable used by note_uses callbacks, etc. */
18144 static rtx_insn
*mips_sim_insn
;
18146 /* A structure representing the state of the processor pipeline.
18147 Used by the mips_sim_* family of functions. */
18149 /* The maximum number of instructions that can be issued in a cycle.
18150 (Caches mips_issue_rate.) */
18151 unsigned int issue_rate
;
18153 /* The current simulation time. */
18156 /* How many more instructions can be issued in the current cycle. */
18157 unsigned int insns_left
;
18159 /* LAST_SET[X].INSN is the last instruction to set register X.
18160 LAST_SET[X].TIME is the time at which that instruction was issued.
18161 INSN is null if no instruction has yet set register X. */
18165 } last_set
[FIRST_PSEUDO_REGISTER
];
18167 /* The pipeline's current DFA state. */
18171 /* Reset STATE to the initial simulation state. */
18174 mips_sim_reset (struct mips_sim
*state
)
18176 curr_state
= state
->dfa_state
;
18179 state
->insns_left
= state
->issue_rate
;
18180 memset (&state
->last_set
, 0, sizeof (state
->last_set
));
18181 state_reset (curr_state
);
18183 targetm
.sched
.init (0, false, 0);
18184 advance_state (curr_state
);
18187 /* Initialize STATE before its first use. DFA_STATE points to an
18188 allocated but uninitialized DFA state. */
18191 mips_sim_init (struct mips_sim
*state
, state_t dfa_state
)
18193 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
18194 targetm
.sched
.init_dfa_pre_cycle_insn ();
18196 if (targetm
.sched
.init_dfa_post_cycle_insn
)
18197 targetm
.sched
.init_dfa_post_cycle_insn ();
18199 state
->issue_rate
= mips_issue_rate ();
18200 state
->dfa_state
= dfa_state
;
18201 mips_sim_reset (state
);
18204 /* Advance STATE by one clock cycle. */
18207 mips_sim_next_cycle (struct mips_sim
*state
)
18209 curr_state
= state
->dfa_state
;
18212 state
->insns_left
= state
->issue_rate
;
18213 advance_state (curr_state
);
18216 /* Advance simulation state STATE until instruction INSN can read
18220 mips_sim_wait_reg (struct mips_sim
*state
, rtx_insn
*insn
, rtx reg
)
18222 unsigned int regno
, end_regno
;
18224 end_regno
= END_REGNO (reg
);
18225 for (regno
= REGNO (reg
); regno
< end_regno
; regno
++)
18226 if (state
->last_set
[regno
].insn
!= 0)
18230 t
= (state
->last_set
[regno
].time
18231 + insn_latency (state
->last_set
[regno
].insn
, insn
));
18232 while (state
->time
< t
)
18233 mips_sim_next_cycle (state
);
18237 /* A note_uses callback. For each register in *X, advance simulation
18238 state DATA until mips_sim_insn can read the register's value. */
18241 mips_sim_wait_regs_1 (rtx
*x
, void *data
)
18243 subrtx_var_iterator::array_type array
;
18244 FOR_EACH_SUBRTX_VAR (iter
, array
, *x
, NONCONST
)
18246 mips_sim_wait_reg ((struct mips_sim
*) data
, mips_sim_insn
, *iter
);
18249 /* Advance simulation state STATE until all of INSN's register
18250 dependencies are satisfied. */
18253 mips_sim_wait_regs (struct mips_sim
*state
, rtx_insn
*insn
)
18255 mips_sim_insn
= insn
;
18256 note_uses (&PATTERN (insn
), mips_sim_wait_regs_1
, state
);
18259 /* Advance simulation state STATE until the units required by
18260 instruction INSN are available. */
18263 mips_sim_wait_units (struct mips_sim
*state
, rtx_insn
*insn
)
18267 tmp_state
= alloca (state_size ());
18268 while (state
->insns_left
== 0
18269 || (memcpy (tmp_state
, state
->dfa_state
, state_size ()),
18270 state_transition (tmp_state
, insn
) >= 0))
18271 mips_sim_next_cycle (state
);
18274 /* Advance simulation state STATE until INSN is ready to issue. */
18277 mips_sim_wait_insn (struct mips_sim
*state
, rtx_insn
*insn
)
18279 mips_sim_wait_regs (state
, insn
);
18280 mips_sim_wait_units (state
, insn
);
18283 /* mips_sim_insn has just set X. Update the LAST_SET array
18284 in simulation state DATA. */
18287 mips_sim_record_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
18289 struct mips_sim
*state
;
18291 state
= (struct mips_sim
*) data
;
18294 unsigned int regno
, end_regno
;
18296 end_regno
= END_REGNO (x
);
18297 for (regno
= REGNO (x
); regno
< end_regno
; regno
++)
18299 state
->last_set
[regno
].insn
= mips_sim_insn
;
18300 state
->last_set
[regno
].time
= state
->time
;
18305 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
18306 can issue immediately (i.e., that mips_sim_wait_insn has already
18310 mips_sim_issue_insn (struct mips_sim
*state
, rtx_insn
*insn
)
18312 curr_state
= state
->dfa_state
;
18314 state_transition (curr_state
, insn
);
18315 state
->insns_left
= targetm
.sched
.variable_issue (0, false, insn
,
18316 state
->insns_left
);
18318 mips_sim_insn
= insn
;
18319 note_stores (insn
, mips_sim_record_set
, state
);
18322 /* Simulate issuing a NOP in state STATE. */
18325 mips_sim_issue_nop (struct mips_sim
*state
)
18327 if (state
->insns_left
== 0)
18328 mips_sim_next_cycle (state
);
18329 state
->insns_left
--;
18332 /* Update simulation state STATE so that it's ready to accept the instruction
18333 after INSN. INSN should be part of the main rtl chain, not a member of a
18337 mips_sim_finish_insn (struct mips_sim
*state
, rtx_insn
*insn
)
18339 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
18341 mips_sim_issue_nop (state
);
18343 switch (GET_CODE (SEQ_BEGIN (insn
)))
18347 /* We can't predict the processor state after a call or label. */
18348 mips_sim_reset (state
);
18352 /* The delay slots of branch likely instructions are only executed
18353 when the branch is taken. Therefore, if the caller has simulated
18354 the delay slot instruction, STATE does not really reflect the state
18355 of the pipeline for the instruction after the delay slot. Also,
18356 branch likely instructions tend to incur a penalty when not taken,
18357 so there will probably be an extra delay between the branch and
18358 the instruction after the delay slot. */
18359 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn
)))
18360 mips_sim_reset (state
);
18368 /* Use simulator state STATE to calculate the execution time of
18369 instruction sequence SEQ. */
18371 static unsigned int
18372 mips_seq_time (struct mips_sim
*state
, rtx_insn
*seq
)
18374 mips_sim_reset (state
);
18375 for (rtx_insn
*insn
= seq
; insn
; insn
= NEXT_INSN (insn
))
18377 mips_sim_wait_insn (state
, insn
);
18378 mips_sim_issue_insn (state
, insn
);
18380 return state
->time
;
18383 /* Return the execution-time cost of mips_tuning_info.fast_mult_zero_zero_p
18384 setting SETTING, using STATE to simulate instruction sequences. */
18386 static unsigned int
18387 mips_mult_zero_zero_cost (struct mips_sim
*state
, bool setting
)
18389 mips_tuning_info
.fast_mult_zero_zero_p
= setting
;
18392 machine_mode dword_mode
= TARGET_64BIT
? TImode
: DImode
;
18393 rtx hilo
= gen_rtx_REG (dword_mode
, MD_REG_FIRST
);
18394 mips_emit_move_or_split (hilo
, const0_rtx
, SPLIT_FOR_SPEED
);
18396 /* If the target provides mulsidi3_32bit then that's the most likely
18397 consumer of the result. Test for bypasses. */
18398 if (dword_mode
== DImode
&& HAVE_maddsidi4
)
18400 rtx gpr
= gen_rtx_REG (SImode
, GP_REG_FIRST
+ 4);
18401 emit_insn (gen_maddsidi4 (hilo
, gpr
, gpr
, hilo
));
18404 unsigned int time
= mips_seq_time (state
, get_insns ());
18409 /* Check the relative speeds of "MULT $0,$0" and "MTLO $0; MTHI $0"
18410 and set up mips_tuning_info.fast_mult_zero_zero_p accordingly.
18411 Prefer MULT -- which is shorter -- in the event of a tie. */
18414 mips_set_fast_mult_zero_zero_p (struct mips_sim
*state
)
18416 if (TARGET_MIPS16
|| !ISA_HAS_HILO
)
18417 /* No MTLO or MTHI available for MIPS16. Also, when there are no HI or LO
18418 registers then there is no reason to zero them, arbitrarily choose to
18419 say that "MULT $0,$0" would be faster. */
18420 mips_tuning_info
.fast_mult_zero_zero_p
= true;
18423 unsigned int true_time
= mips_mult_zero_zero_cost (state
, true);
18424 unsigned int false_time
= mips_mult_zero_zero_cost (state
, false);
18425 mips_tuning_info
.fast_mult_zero_zero_p
= (true_time
<= false_time
);
18429 /* Set up costs based on the current architecture and tuning settings. */
18432 mips_set_tuning_info (void)
18434 if (mips_tuning_info
.initialized_p
18435 && mips_tuning_info
.arch
== mips_arch
18436 && mips_tuning_info
.tune
== mips_tune
18437 && mips_tuning_info
.mips16_p
== TARGET_MIPS16
)
18440 mips_tuning_info
.arch
= mips_arch
;
18441 mips_tuning_info
.tune
= mips_tune
;
18442 mips_tuning_info
.mips16_p
= TARGET_MIPS16
;
18443 mips_tuning_info
.initialized_p
= true;
18447 struct mips_sim state
;
18448 mips_sim_init (&state
, alloca (state_size ()));
18450 mips_set_fast_mult_zero_zero_p (&state
);
18455 /* Implement TARGET_EXPAND_TO_RTL_HOOK. */
18458 mips_expand_to_rtl_hook (void)
18460 /* We need to call this at a point where we can safely create sequences
18461 of instructions, so TARGET_OVERRIDE_OPTIONS is too early. We also
18462 need to call it at a point where the DFA infrastructure is not
18463 already in use, so we can't just call it lazily on demand.
18465 At present, mips_tuning_info is only needed during post-expand
18466 RTL passes such as split_insns, so this hook should be early enough.
18467 We may need to move the call elsewhere if mips_tuning_info starts
18468 to be used for other things (such as rtx_costs, or expanders that
18469 could be called during gimple optimization). */
18470 mips_set_tuning_info ();
18473 /* The VR4130 pipeline issues aligned pairs of instructions together,
18474 but it stalls the second instruction if it depends on the first.
18475 In order to cut down the amount of logic required, this dependence
18476 check is not based on a full instruction decode. Instead, any non-SPECIAL
18477 instruction is assumed to modify the register specified by bits 20-16
18478 (which is usually the "rt" field).
18480 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
18481 input, so we can end up with a false dependence between the branch
18482 and its delay slot. If this situation occurs in instruction INSN,
18483 try to avoid it by swapping rs and rt. */
18486 vr4130_avoid_branch_rt_conflict (rtx_insn
*insn
)
18488 rtx_insn
*first
, *second
;
18490 first
= SEQ_BEGIN (insn
);
18491 second
= SEQ_END (insn
);
18493 && NONJUMP_INSN_P (second
)
18494 && GET_CODE (PATTERN (first
)) == SET
18495 && GET_CODE (SET_DEST (PATTERN (first
))) == PC
18496 && GET_CODE (SET_SRC (PATTERN (first
))) == IF_THEN_ELSE
)
18498 /* Check for the right kind of condition. */
18499 rtx cond
= XEXP (SET_SRC (PATTERN (first
)), 0);
18500 if ((GET_CODE (cond
) == EQ
|| GET_CODE (cond
) == NE
)
18501 && REG_P (XEXP (cond
, 0))
18502 && REG_P (XEXP (cond
, 1))
18503 && reg_referenced_p (XEXP (cond
, 1), PATTERN (second
))
18504 && !reg_referenced_p (XEXP (cond
, 0), PATTERN (second
)))
18506 /* SECOND mentions the rt register but not the rs register. */
18507 rtx tmp
= XEXP (cond
, 0);
18508 XEXP (cond
, 0) = XEXP (cond
, 1);
18509 XEXP (cond
, 1) = tmp
;
18514 /* Implement -mvr4130-align. Go through each basic block and simulate the
18515 processor pipeline. If we find that a pair of instructions could execute
18516 in parallel, and the first of those instructions is not 8-byte aligned,
18517 insert a nop to make it aligned. */
18520 vr4130_align_insns (void)
18522 struct mips_sim state
;
18523 rtx_insn
*insn
, *subinsn
, *last
, *last2
, *next
;
18528 /* LAST is the last instruction before INSN to have a nonzero length.
18529 LAST2 is the last such instruction before LAST. */
18533 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
18536 mips_sim_init (&state
, alloca (state_size ()));
18537 for (insn
= get_insns (); insn
!= 0; insn
= next
)
18539 unsigned int length
;
18541 next
= NEXT_INSN (insn
);
18543 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
18544 This isn't really related to the alignment pass, but we do it on
18545 the fly to avoid a separate instruction walk. */
18546 vr4130_avoid_branch_rt_conflict (insn
);
18548 length
= get_attr_length (insn
);
18549 if (length
> 0 && USEFUL_INSN_P (insn
))
18550 FOR_EACH_SUBINSN (subinsn
, insn
)
18552 mips_sim_wait_insn (&state
, subinsn
);
18554 /* If we want this instruction to issue in parallel with the
18555 previous one, make sure that the previous instruction is
18556 aligned. There are several reasons why this isn't worthwhile
18557 when the second instruction is a call:
18559 - Calls are less likely to be performance critical,
18560 - There's a good chance that the delay slot can execute
18561 in parallel with the call.
18562 - The return address would then be unaligned.
18564 In general, if we're going to insert a nop between instructions
18565 X and Y, it's better to insert it immediately after X. That
18566 way, if the nop makes Y aligned, it will also align any labels
18567 between X and Y. */
18568 if (state
.insns_left
!= state
.issue_rate
18569 && !CALL_P (subinsn
))
18571 if (subinsn
== SEQ_BEGIN (insn
) && aligned_p
)
18573 /* SUBINSN is the first instruction in INSN and INSN is
18574 aligned. We want to align the previous instruction
18575 instead, so insert a nop between LAST2 and LAST.
18577 Note that LAST could be either a single instruction
18578 or a branch with a delay slot. In the latter case,
18579 LAST, like INSN, is already aligned, but the delay
18580 slot must have some extra delay that stops it from
18581 issuing at the same time as the branch. We therefore
18582 insert a nop before the branch in order to align its
18584 gcc_assert (last2
);
18585 emit_insn_after (gen_nop (), last2
);
18588 else if (subinsn
!= SEQ_BEGIN (insn
) && !aligned_p
)
18590 /* SUBINSN is the delay slot of INSN, but INSN is
18591 currently unaligned. Insert a nop between
18592 LAST and INSN to align it. */
18594 emit_insn_after (gen_nop (), last
);
18598 mips_sim_issue_insn (&state
, subinsn
);
18600 mips_sim_finish_insn (&state
, insn
);
18602 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
18603 length
= get_attr_length (insn
);
18606 /* If the instruction is an asm statement or multi-instruction
18607 mips.md patern, the length is only an estimate. Insert an
18608 8 byte alignment after it so that the following instructions
18609 can be handled correctly. */
18610 if (NONJUMP_INSN_P (SEQ_BEGIN (insn
))
18611 && (recog_memoized (insn
) < 0 || length
>= 8))
18613 next
= emit_insn_after (gen_align (GEN_INT (3)), insn
);
18614 next
= NEXT_INSN (next
);
18615 mips_sim_next_cycle (&state
);
18618 else if (length
& 4)
18619 aligned_p
= !aligned_p
;
18624 /* See whether INSN is an aligned label. */
18625 if (LABEL_P (insn
) && label_to_alignment (insn
).levels
[0].log
>= 3)
18631 /* This structure records that the current function has a LO_SUM
18632 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
18633 the largest offset applied to BASE by all such LO_SUMs. */
18634 struct mips_lo_sum_offset
{
18636 HOST_WIDE_INT offset
;
18639 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
18642 mips_hash_base (rtx base
)
18644 int do_not_record_p
;
18646 return hash_rtx (base
, GET_MODE (base
), &do_not_record_p
, NULL
, false);
18649 /* Hashtable helpers. */
18651 struct mips_lo_sum_offset_hasher
: free_ptr_hash
<mips_lo_sum_offset
>
18653 typedef rtx_def
*compare_type
;
18654 static inline hashval_t
hash (const mips_lo_sum_offset
*);
18655 static inline bool equal (const mips_lo_sum_offset
*, const rtx_def
*);
18658 /* Hash-table callbacks for mips_lo_sum_offsets. */
18661 mips_lo_sum_offset_hasher::hash (const mips_lo_sum_offset
*entry
)
18663 return mips_hash_base (entry
->base
);
18667 mips_lo_sum_offset_hasher::equal (const mips_lo_sum_offset
*entry
,
18668 const rtx_def
*value
)
18670 return rtx_equal_p (entry
->base
, value
);
18673 typedef hash_table
<mips_lo_sum_offset_hasher
> mips_offset_table
;
18675 /* Look up symbolic constant X in HTAB, which is a hash table of
18676 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
18677 paired with a recorded LO_SUM, otherwise record X in the table. */
18680 mips_lo_sum_offset_lookup (mips_offset_table
*htab
, rtx x
,
18681 enum insert_option option
)
18684 mips_lo_sum_offset
**slot
;
18685 struct mips_lo_sum_offset
*entry
;
18687 /* Split X into a base and offset. */
18688 split_const (x
, &base
, &offset
);
18689 if (UNSPEC_ADDRESS_P (base
))
18690 base
= UNSPEC_ADDRESS (base
);
18692 /* Look up the base in the hash table. */
18693 slot
= htab
->find_slot_with_hash (base
, mips_hash_base (base
), option
);
18697 entry
= (struct mips_lo_sum_offset
*) *slot
;
18698 if (option
== INSERT
)
18702 entry
= XNEW (struct mips_lo_sum_offset
);
18703 entry
->base
= base
;
18704 entry
->offset
= INTVAL (offset
);
18709 if (INTVAL (offset
) > entry
->offset
)
18710 entry
->offset
= INTVAL (offset
);
18713 return INTVAL (offset
) <= entry
->offset
;
18716 /* Search X for LO_SUMs and record them in HTAB. */
18719 mips_record_lo_sums (const_rtx x
, mips_offset_table
*htab
)
18721 subrtx_iterator::array_type array
;
18722 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
18723 if (GET_CODE (*iter
) == LO_SUM
)
18724 mips_lo_sum_offset_lookup (htab
, XEXP (*iter
, 1), INSERT
);
18727 /* Return true if INSN is a SET of an orphaned high-part relocation.
18728 HTAB is a hash table of mips_lo_sum_offsets that describes all the
18729 LO_SUMs in the current function. */
18732 mips_orphaned_high_part_p (mips_offset_table
*htab
, rtx_insn
*insn
)
18734 enum mips_symbol_type type
;
18737 set
= single_set (insn
);
18740 /* Check for %his. */
18742 if (GET_CODE (x
) == HIGH
18743 && absolute_symbolic_operand (XEXP (x
, 0), VOIDmode
))
18744 return !mips_lo_sum_offset_lookup (htab
, XEXP (x
, 0), NO_INSERT
);
18746 /* Check for local %gots (and %got_pages, which is redundant but OK). */
18747 if (GET_CODE (x
) == UNSPEC
18748 && XINT (x
, 1) == UNSPEC_LOAD_GOT
18749 && mips_symbolic_constant_p (XVECEXP (x
, 0, 1),
18750 SYMBOL_CONTEXT_LEA
, &type
)
18751 && type
== SYMBOL_GOTOFF_PAGE
)
18752 return !mips_lo_sum_offset_lookup (htab
, XVECEXP (x
, 0, 1), NO_INSERT
);
18757 /* Subroutine of mips_avoid_hazard. We classify unconditional branches
18758 of interest for the P6600 for performance reasons. We're interested
18759 in differentiating BALC from JIC, JIALC and BC. */
18761 static enum mips_ucbranch_type
18762 mips_classify_branch_p6600 (rtx_insn
*insn
)
18764 /* We ignore sequences here as they represent a filled delay slot. */
18766 || !USEFUL_INSN_P (insn
)
18767 || GET_CODE (PATTERN (insn
)) == SEQUENCE
)
18768 return UC_UNDEFINED
;
18770 if (get_attr_jal (insn
) == JAL_INDIRECT
/* JIC and JIALC. */
18771 || get_attr_type (insn
) == TYPE_JUMP
) /* BC. */
18774 if (CALL_P (insn
) && get_attr_jal (insn
) == JAL_DIRECT
)
18777 return UC_UNDEFINED
;
18780 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
18781 INSN and a previous instruction, avoid it by inserting nops after
18784 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
18785 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
18786 before using the value of that register. *HILO_DELAY counts the
18787 number of instructions since the last hilo hazard (that is,
18788 the number of instructions since the last MFLO or MFHI).
18790 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
18791 for the next instruction.
18793 LO_REG is an rtx for the LO register, used in dependence checking. */
18796 mips_avoid_hazard (rtx_insn
*after
, rtx_insn
*insn
, int *hilo_delay
,
18797 rtx
*delayed_reg
, rtx lo_reg
, bool *fs_delay
)
18802 pattern
= PATTERN (insn
);
18804 /* Do not put the whole function in .set noreorder if it contains
18805 an asm statement. We don't know whether there will be hazards
18806 between the asm statement and the gcc-generated code. */
18807 if (GET_CODE (pattern
) == ASM_INPUT
|| asm_noperands (pattern
) >= 0)
18808 cfun
->machine
->all_noreorder_p
= false;
18810 /* Ignore zero-length instructions (barriers and the like). */
18811 ninsns
= get_attr_length (insn
) / 4;
18815 /* Work out how many nops are needed. Note that we only care about
18816 registers that are explicitly mentioned in the instruction's pattern.
18817 It doesn't matter that calls use the argument registers or that they
18818 clobber hi and lo. */
18819 if (*hilo_delay
< 2 && reg_set_p (lo_reg
, pattern
))
18820 nops
= 2 - *hilo_delay
;
18821 else if (*delayed_reg
!= 0 && reg_referenced_p (*delayed_reg
, pattern
))
18823 /* If processing a forbidden slot hazard then a NOP is required if the
18824 branch instruction was not in a sequence (as the sequence would
18825 imply it is not actually a compact branch anyway) and the current
18826 insn is not an inline asm, and can't go in a delay slot. */
18827 else if (*fs_delay
&& get_attr_can_delay (insn
) == CAN_DELAY_NO
18828 && GET_CODE (PATTERN (after
)) != SEQUENCE
18829 && GET_CODE (pattern
) != ASM_INPUT
18830 && asm_noperands (pattern
) < 0)
18832 /* The P6600's branch predictor can handle static sequences of back-to-back
18833 branches in the following cases:
18835 (1) BALC followed by any conditional compact branch
18836 (2) BALC followed by BALC
18838 Any other combinations of compact branches will incur performance
18839 penalty. Inserting a no-op only costs space as the dispatch unit will
18840 disregard the nop. */
18841 else if (TUNE_P6600
&& TARGET_CB_MAYBE
&& !optimize_size
18842 && ((mips_classify_branch_p6600 (after
) == UC_BALC
18843 && mips_classify_branch_p6600 (insn
) == UC_OTHER
)
18844 || (mips_classify_branch_p6600 (insn
) == UC_BALC
18845 && mips_classify_branch_p6600 (after
) == UC_OTHER
)))
18850 /* Insert the nops between this instruction and the previous one.
18851 Each new nop takes us further from the last hilo hazard. */
18852 *hilo_delay
+= nops
;
18854 /* Move to the next real instruction if we are inserting a NOP and this
18855 instruction is a call with debug information. The reason being that
18856 we can't separate the call from the debug info. */
18857 rtx_insn
*real_after
= after
;
18858 if (real_after
&& nops
&& CALL_P (real_after
))
18860 && (NOTE_P (NEXT_INSN (real_after
))
18861 || BARRIER_P (NEXT_INSN (real_after
))))
18862 real_after
= NEXT_INSN (real_after
);
18865 emit_insn_after (gen_hazard_nop (), real_after
);
18867 /* Set up the state for the next instruction. */
18868 *hilo_delay
+= ninsns
;
18871 if (INSN_CODE (insn
) >= 0)
18872 switch (get_attr_hazard (insn
))
18875 /* For the P6600, flag some unconditional branches as having a
18876 pseudo-forbidden slot. This will cause additional nop insertion
18877 or SEQUENCE breaking as required. This is for performance
18878 reasons not correctness. */
18882 && mips_classify_branch_p6600 (insn
) == UC_OTHER
)
18886 case HAZARD_FORBIDDEN_SLOT
:
18887 if (TARGET_CB_MAYBE
)
18896 set
= single_set (insn
);
18898 *delayed_reg
= SET_DEST (set
);
18903 /* A SEQUENCE is breakable iff the branch inside it has a compact form
18904 and the target has compact branches. */
18907 mips_breakable_sequence_p (rtx_insn
*insn
)
18909 return (insn
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
18911 && get_attr_compact_form (SEQ_BEGIN (insn
)) != COMPACT_FORM_NEVER
);
18914 /* Remove a SEQUENCE and replace it with the delay slot instruction
18915 followed by the branch and return the instruction in the delay slot.
18916 Return the first of the two new instructions.
18917 Subroutine of mips_reorg_process_insns. */
18920 mips_break_sequence (rtx_insn
*insn
)
18922 rtx_insn
*before
= PREV_INSN (insn
);
18923 rtx_insn
*branch
= SEQ_BEGIN (insn
);
18924 rtx_insn
*ds
= SEQ_END (insn
);
18925 remove_insn (insn
);
18926 add_insn_after (ds
, before
, NULL
);
18927 add_insn_after (branch
, ds
, NULL
);
18931 /* Go through the instruction stream and insert nops where necessary.
18932 Also delete any high-part relocations whose partnering low parts
18933 are now all dead. See if the whole function can then be put into
18934 .set noreorder and .set nomacro. */
18937 mips_reorg_process_insns (void)
18939 rtx_insn
*insn
, *last_insn
, *subinsn
, *next_insn
;
18940 rtx lo_reg
, delayed_reg
;
18944 /* Force all instructions to be split into their final form. */
18945 split_all_insns_noflow ();
18947 /* Recalculate instruction lengths without taking nops into account. */
18948 cfun
->machine
->ignore_hazard_length_p
= true;
18949 shorten_branches (get_insns ());
18951 cfun
->machine
->all_noreorder_p
= true;
18953 /* We don't track MIPS16 PC-relative offsets closely enough to make
18954 a good job of "set .noreorder" code in MIPS16 mode. */
18956 cfun
->machine
->all_noreorder_p
= false;
18958 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
18959 if (!TARGET_EXPLICIT_RELOCS
)
18960 cfun
->machine
->all_noreorder_p
= false;
18962 /* Profiled functions can't be all noreorder because the profiler
18963 support uses assembler macros. */
18965 cfun
->machine
->all_noreorder_p
= false;
18967 /* Code compiled with -mfix-vr4120, -mfix-r5900, -mfix-rm7000 or
18968 -mfix-24k can't be all noreorder because we rely on the assembler
18969 to work around some errata. The R5900 target has several bugs. */
18970 if (TARGET_FIX_VR4120
18971 || TARGET_FIX_RM7000
18973 || TARGET_FIX_R5900
)
18974 cfun
->machine
->all_noreorder_p
= false;
18976 /* The same is true for -mfix-vr4130 if we might generate MFLO or
18977 MFHI instructions. Note that we avoid using MFLO and MFHI if
18978 the VR4130 MACC and DMACC instructions are available instead;
18979 see the *mfhilo_{si,di}_macc patterns. */
18980 if (TARGET_FIX_VR4130
&& !ISA_HAS_MACCHI
)
18981 cfun
->machine
->all_noreorder_p
= false;
18983 mips_offset_table
htab (37);
18985 /* Make a first pass over the instructions, recording all the LO_SUMs. */
18986 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
18987 FOR_EACH_SUBINSN (subinsn
, insn
)
18988 if (USEFUL_INSN_P (subinsn
))
18990 rtx body
= PATTERN (insn
);
18991 int noperands
= asm_noperands (body
);
18992 if (noperands
>= 0)
18994 rtx
*ops
= XALLOCAVEC (rtx
, noperands
);
18995 bool *used
= XALLOCAVEC (bool, noperands
);
18996 const char *string
= decode_asm_operands (body
, ops
, NULL
, NULL
,
18998 get_referenced_operands (string
, used
, noperands
);
18999 for (int i
= 0; i
< noperands
; ++i
)
19001 mips_record_lo_sums (ops
[i
], &htab
);
19004 mips_record_lo_sums (PATTERN (subinsn
), &htab
);
19010 lo_reg
= gen_rtx_REG (SImode
, LO_REGNUM
);
19013 /* Make a second pass over the instructions. Delete orphaned
19014 high-part relocations or turn them into NOPs. Avoid hazards
19015 by inserting NOPs. */
19016 for (insn
= get_insns (); insn
!= 0; insn
= next_insn
)
19018 next_insn
= NEXT_INSN (insn
);
19019 if (USEFUL_INSN_P (insn
))
19021 if (GET_CODE (PATTERN (insn
)) == SEQUENCE
)
19023 rtx_insn
*next_active
= next_active_insn (insn
);
19024 /* Undo delay slots to avoid bubbles if the next instruction can
19025 be placed in a forbidden slot or the cost of adding an
19026 explicit NOP in a forbidden slot is OK and if the SEQUENCE is
19027 safely breakable. */
19028 if (TARGET_CB_MAYBE
19029 && mips_breakable_sequence_p (insn
)
19030 && INSN_P (SEQ_BEGIN (insn
))
19031 && INSN_P (SEQ_END (insn
))
19033 && INSN_P (next_active
)
19034 && GET_CODE (PATTERN (next_active
)) != SEQUENCE
19035 && get_attr_can_delay (next_active
) == CAN_DELAY_YES
)
19036 || !optimize_size
))
19038 /* To hide a potential pipeline bubble, if we scan backwards
19039 from the current SEQUENCE and find that there is a load
19040 of a value that is used in the CTI and there are no
19041 dependencies between the CTI and instruction in the delay
19042 slot, break the sequence so the load delay is hidden. */
19044 CLEAR_HARD_REG_SET (uses
);
19045 note_uses (&PATTERN (SEQ_BEGIN (insn
)), record_hard_reg_uses
,
19047 HARD_REG_SET delay_sets
;
19048 CLEAR_HARD_REG_SET (delay_sets
);
19049 note_stores (SEQ_END (insn
), record_hard_reg_sets
,
19052 rtx_insn
*prev
= prev_active_insn (insn
);
19054 && GET_CODE (PATTERN (prev
)) == SET
19055 && MEM_P (SET_SRC (PATTERN (prev
))))
19058 CLEAR_HARD_REG_SET (sets
);
19059 note_stores (prev
, record_hard_reg_sets
, &sets
);
19061 /* Re-order if safe. */
19062 if (!hard_reg_set_intersect_p (delay_sets
, uses
)
19063 && hard_reg_set_intersect_p (uses
, sets
))
19065 next_insn
= mips_break_sequence (insn
);
19066 /* Need to process the hazards of the newly
19067 introduced instructions. */
19072 /* If we find an orphaned high-part relocation in a delay
19073 slot then we can convert to a compact branch and get
19074 the orphaned high part deleted. */
19075 if (mips_orphaned_high_part_p (&htab
, SEQ_END (insn
)))
19077 next_insn
= mips_break_sequence (insn
);
19078 /* Need to process the hazards of the newly
19079 introduced instructions. */
19084 /* If we find an orphaned high-part relocation in a delay
19085 slot, it's easier to turn that instruction into a NOP than
19086 to delete it. The delay slot will be a NOP either way. */
19087 FOR_EACH_SUBINSN (subinsn
, insn
)
19088 if (INSN_P (subinsn
))
19090 if (mips_orphaned_high_part_p (&htab
, subinsn
))
19092 PATTERN (subinsn
) = gen_nop ();
19093 INSN_CODE (subinsn
) = CODE_FOR_nop
;
19095 mips_avoid_hazard (last_insn
, subinsn
, &hilo_delay
,
19096 &delayed_reg
, lo_reg
, &fs_delay
);
19102 /* INSN is a single instruction. Delete it if it's an
19103 orphaned high-part relocation. */
19104 if (mips_orphaned_high_part_p (&htab
, insn
))
19105 delete_insn (insn
);
19106 /* Also delete cache barriers if the last instruction
19107 was an annulled branch. INSN will not be speculatively
19109 else if (recog_memoized (insn
) == CODE_FOR_r10k_cache_barrier
19111 && JUMP_P (SEQ_BEGIN (last_insn
))
19112 && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn
)))
19113 delete_insn (insn
);
19116 mips_avoid_hazard (last_insn
, insn
, &hilo_delay
,
19117 &delayed_reg
, lo_reg
, &fs_delay
);
19118 /* When a compact branch introduces a forbidden slot hazard
19119 and the next useful instruction is a SEQUENCE of a jump
19120 and a non-nop instruction in the delay slot, remove the
19121 sequence and replace it with the delay slot instruction
19122 then the jump to clear the forbidden slot hazard.
19124 For the P6600, this optimisation solves the performance
19125 penalty associated with BALC followed by a delay slot
19126 branch. We do not set fs_delay as we do not want
19127 the full logic of a forbidden slot; the penalty exists
19128 only against branches not the full class of forbidden
19129 slot instructions. */
19131 if (fs_delay
|| (TUNE_P6600
19133 && mips_classify_branch_p6600 (insn
)
19136 /* Search onwards from the current position looking for
19137 a SEQUENCE. We are looking for pipeline hazards here
19138 and do not need to worry about labels or barriers as
19139 the optimization only undoes delay slot filling which
19140 only affects the order of the branch and its delay
19142 rtx_insn
*next
= next_active_insn (insn
);
19144 && USEFUL_INSN_P (next
)
19145 && GET_CODE (PATTERN (next
)) == SEQUENCE
19146 && mips_breakable_sequence_p (next
))
19149 next_insn
= mips_break_sequence (next
);
19150 /* Need to process the hazards of the newly
19151 introduced instructions. */
19162 /* Return true if the function has a long branch instruction. */
19165 mips_has_long_branch_p (void)
19167 rtx_insn
*insn
, *subinsn
;
19170 /* We need up-to-date instruction lengths. */
19171 shorten_branches (get_insns ());
19173 /* Look for a branch that is longer than normal. The normal length for
19174 non-MIPS16 branches is 8, because the length includes the delay slot.
19175 It is 4 for MIPS16, because MIPS16 branches are extended instructions,
19176 but they have no delay slot. */
19177 normal_length
= (TARGET_MIPS16
? 4 : 8);
19178 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
19179 FOR_EACH_SUBINSN (subinsn
, insn
)
19180 if (JUMP_P (subinsn
)
19181 && get_attr_length (subinsn
) > normal_length
19182 && (any_condjump_p (subinsn
) || any_uncondjump_p (subinsn
)))
19188 /* If we are using a GOT, but have not decided to use a global pointer yet,
19189 see whether we need one to implement long branches. Convert the ghost
19190 global-pointer instructions into real ones if so. */
19193 mips_expand_ghost_gp_insns (void)
19195 /* Quick exit if we already know that we will or won't need a
19197 if (!TARGET_USE_GOT
19198 || cfun
->machine
->global_pointer
== INVALID_REGNUM
19199 || mips_must_initialize_gp_p ())
19202 /* Run a full check for long branches. */
19203 if (!mips_has_long_branch_p ())
19206 /* We've now established that we need $gp. */
19207 cfun
->machine
->must_initialize_gp_p
= true;
19208 split_all_insns_noflow ();
19213 /* Subroutine of mips_reorg to manage passes that require DF. */
19216 mips_df_reorg (void)
19218 /* Create def-use chains. */
19219 df_set_flags (DF_EQ_NOTES
);
19220 df_chain_add_problem (DF_UD_CHAIN
);
19223 if (TARGET_RELAX_PIC_CALLS
)
19224 mips_annotate_pic_calls ();
19226 if (mips_r10k_cache_barrier
!= R10K_CACHE_BARRIER_NONE
)
19227 r10k_insert_cache_barriers ();
19229 df_finish_pass (false);
19232 /* Emit code to load LABEL_REF SRC into MIPS16 register DEST. This is
19233 called very late in mips_reorg, but the caller is required to run
19234 mips16_lay_out_constants on the result. */
19237 mips16_load_branch_target (rtx dest
, rtx src
)
19239 if (TARGET_ABICALLS
&& !TARGET_ABSOLUTE_ABICALLS
)
19243 if (mips_cfun_has_cprestore_slot_p ())
19244 mips_emit_move (dest
, mips_cprestore_slot (dest
, true));
19246 mips_emit_move (dest
, pic_offset_table_rtx
);
19247 page
= mips_unspec_address (src
, SYMBOL_GOTOFF_PAGE
);
19248 low
= mips_unspec_address (src
, SYMBOL_GOT_PAGE_OFST
);
19249 emit_insn (gen_rtx_SET (dest
,
19250 PMODE_INSN (gen_unspec_got
, (dest
, page
))));
19251 emit_insn (gen_rtx_SET (dest
, gen_rtx_LO_SUM (Pmode
, dest
, low
)));
19255 src
= mips_unspec_address (src
, SYMBOL_ABSOLUTE
);
19256 mips_emit_move (dest
, src
);
19260 /* If we're compiling a MIPS16 function, look for and split any long branches.
19261 This must be called after all other instruction modifications in
19265 mips16_split_long_branches (void)
19267 bool something_changed
;
19269 if (!TARGET_MIPS16
)
19272 /* Loop until the alignments for all targets are sufficient. */
19276 rtx_jump_insn
*jump_insn
;
19278 shorten_branches (get_insns ());
19279 something_changed
= false;
19280 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
19281 if ((jump_insn
= dyn_cast
<rtx_jump_insn
*> (insn
))
19282 && get_attr_length (jump_insn
) > 4
19283 && (any_condjump_p (jump_insn
) || any_uncondjump_p (jump_insn
)))
19285 rtx old_label
, temp
, saved_temp
;
19286 rtx_code_label
*new_label
;
19288 rtx_insn
*jump
, *jump_sequence
;
19292 /* Free up a MIPS16 register by saving it in $1. */
19293 saved_temp
= gen_rtx_REG (Pmode
, AT_REGNUM
);
19294 temp
= gen_rtx_REG (Pmode
, GP_REG_FIRST
+ 2);
19295 emit_move_insn (saved_temp
, temp
);
19297 /* Load the branch target into TEMP. */
19298 old_label
= JUMP_LABEL (jump_insn
);
19299 target
= gen_rtx_LABEL_REF (Pmode
, old_label
);
19300 mips16_load_branch_target (temp
, target
);
19302 /* Jump to the target and restore the register's
19304 jump
= emit_jump_insn (PMODE_INSN (gen_indirect_jump_and_restore
,
19305 (temp
, temp
, saved_temp
)));
19306 JUMP_LABEL (jump
) = old_label
;
19307 LABEL_NUSES (old_label
)++;
19309 /* Rewrite any symbolic references that are supposed to use
19310 a PC-relative constant pool. */
19311 mips16_lay_out_constants (false);
19313 if (simplejump_p (jump_insn
))
19314 /* We're going to replace INSN with a longer form. */
19318 /* Create a branch-around label for the original
19320 new_label
= gen_label_rtx ();
19321 emit_label (new_label
);
19324 jump_sequence
= get_insns ();
19327 emit_insn_after (jump_sequence
, jump_insn
);
19329 invert_jump (jump_insn
, new_label
, false);
19331 delete_insn (jump_insn
);
19332 something_changed
= true;
19335 while (something_changed
);
19338 /* Insert a `.insn' assembly pseudo-op after any labels followed by
19339 a MIPS16 constant pool or no insn at all. This is needed so that
19340 targets that have been optimized away are still marked as code
19341 and therefore branches that remained and point to them are known
19342 to retain the ISA mode and as such can be successfully assembled. */
19345 mips_insert_insn_pseudos (void)
19347 bool insn_pseudo_needed
= TRUE
;
19350 for (insn
= get_last_insn (); insn
!= NULL_RTX
; insn
= PREV_INSN (insn
))
19351 switch (GET_CODE (insn
))
19354 if (GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
19355 && XINT (PATTERN (insn
), 1) == UNSPEC_CONSTTABLE
)
19357 insn_pseudo_needed
= TRUE
;
19360 /* Fall through. */
19363 case JUMP_TABLE_DATA
:
19364 insn_pseudo_needed
= FALSE
;
19367 if (insn_pseudo_needed
)
19369 emit_insn_after (gen_insn_pseudo (), insn
);
19370 insn_pseudo_needed
= FALSE
;
19378 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
19383 /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF. Also during
19384 insn splitting in mips16_lay_out_constants, DF insn info is only kept up
19385 to date if the CFG is available. */
19386 if (mips_cfg_in_reorg ())
19387 compute_bb_for_insn ();
19388 mips16_lay_out_constants (true);
19389 if (mips_cfg_in_reorg ())
19392 free_bb_for_insn ();
19396 /* We use a machine specific pass to do a second machine dependent reorg
19397 pass after delay branch scheduling. */
19399 static unsigned int
19400 mips_machine_reorg2 (void)
19402 mips_reorg_process_insns ();
19404 && TARGET_EXPLICIT_RELOCS
19406 && TARGET_VR4130_ALIGN
)
19407 vr4130_align_insns ();
19408 if (mips_expand_ghost_gp_insns ())
19409 /* The expansion could invalidate some of the VR4130 alignment
19410 optimizations, but this should be an extremely rare case anyhow. */
19411 mips_reorg_process_insns ();
19412 mips16_split_long_branches ();
19413 mips_insert_insn_pseudos ();
19419 const pass_data pass_data_mips_machine_reorg2
=
19421 RTL_PASS
, /* type */
19422 "mach2", /* name */
19423 OPTGROUP_NONE
, /* optinfo_flags */
19424 TV_MACH_DEP
, /* tv_id */
19425 0, /* properties_required */
19426 0, /* properties_provided */
19427 0, /* properties_destroyed */
19428 0, /* todo_flags_start */
19429 0, /* todo_flags_finish */
19432 class pass_mips_machine_reorg2
: public rtl_opt_pass
19435 pass_mips_machine_reorg2(gcc::context
*ctxt
)
19436 : rtl_opt_pass(pass_data_mips_machine_reorg2
, ctxt
)
19439 /* opt_pass methods: */
19440 virtual unsigned int execute (function
*) { return mips_machine_reorg2 (); }
19442 }; // class pass_mips_machine_reorg2
19444 } // anon namespace
19447 make_pass_mips_machine_reorg2 (gcc::context
*ctxt
)
19449 return new pass_mips_machine_reorg2 (ctxt
);
19453 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
19454 in order to avoid duplicating too much logic from elsewhere. */
19457 mips_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
19458 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
19461 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl
));
19462 rtx this_rtx
, temp1
, temp2
, fnaddr
;
19464 bool use_sibcall_p
;
19466 /* Pretend to be a post-reload pass while generating rtl. */
19467 reload_completed
= 1;
19469 /* Mark the end of the (empty) prologue. */
19470 emit_note (NOTE_INSN_PROLOGUE_END
);
19472 /* Determine if we can use a sibcall to call FUNCTION directly. */
19473 fnaddr
= XEXP (DECL_RTL (function
), 0);
19474 use_sibcall_p
= (mips_function_ok_for_sibcall (function
, NULL
)
19475 && const_call_insn_operand (fnaddr
, Pmode
));
19477 /* Determine if we need to load FNADDR from the GOT. */
19479 && (mips_got_symbol_type_p
19480 (mips_classify_symbol (fnaddr
, SYMBOL_CONTEXT_LEA
))))
19482 /* Pick a global pointer. Use a call-clobbered register if
19483 TARGET_CALL_SAVED_GP. */
19484 cfun
->machine
->global_pointer
19485 = TARGET_CALL_SAVED_GP
? 15 : GLOBAL_POINTER_REGNUM
;
19486 cfun
->machine
->must_initialize_gp_p
= true;
19487 SET_REGNO (pic_offset_table_rtx
, cfun
->machine
->global_pointer
);
19489 /* Set up the global pointer for n32 or n64 abicalls. */
19490 mips_emit_loadgp ();
19493 /* We need two temporary registers in some cases. */
19494 temp1
= gen_rtx_REG (Pmode
, 2);
19495 temp2
= gen_rtx_REG (Pmode
, 3);
19497 /* Find out which register contains the "this" pointer. */
19498 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
19499 this_rtx
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
+ 1);
19501 this_rtx
= gen_rtx_REG (Pmode
, GP_ARG_FIRST
);
19503 /* Add DELTA to THIS_RTX. */
19506 rtx offset
= GEN_INT (delta
);
19507 if (!SMALL_OPERAND (delta
))
19509 mips_emit_move (temp1
, offset
);
19512 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, offset
));
19515 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
19516 if (vcall_offset
!= 0)
19520 /* Set TEMP1 to *THIS_RTX. */
19521 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, this_rtx
));
19523 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
19524 addr
= mips_add_offset (temp2
, temp1
, vcall_offset
);
19526 /* Load the offset and add it to THIS_RTX. */
19527 mips_emit_move (temp1
, gen_rtx_MEM (Pmode
, addr
));
19528 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, temp1
));
19531 /* Jump to the target function. Use a sibcall if direct jumps are
19532 allowed, otherwise load the address into a register first. */
19535 insn
= emit_call_insn (gen_sibcall_internal (fnaddr
, const0_rtx
));
19536 SIBLING_CALL_P (insn
) = 1;
19540 /* This is messy. GAS treats "la $25,foo" as part of a call
19541 sequence and may allow a global "foo" to be lazily bound.
19542 The general move patterns therefore reject this combination.
19544 In this context, lazy binding would actually be OK
19545 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
19546 TARGET_CALL_SAVED_GP; see mips_load_call_address.
19547 We must therefore load the address via a temporary
19548 register if mips_dangerous_for_la25_p.
19550 If we jump to the temporary register rather than $25,
19551 the assembler can use the move insn to fill the jump's
19554 We can use the same technique for MIPS16 code, where $25
19555 is not a valid JR register. */
19556 if (TARGET_USE_PIC_FN_ADDR_REG
19558 && !mips_dangerous_for_la25_p (fnaddr
))
19559 temp1
= gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
);
19560 mips_load_call_address (MIPS_CALL_SIBCALL
, temp1
, fnaddr
);
19562 if (TARGET_USE_PIC_FN_ADDR_REG
19563 && REGNO (temp1
) != PIC_FUNCTION_ADDR_REGNUM
)
19564 mips_emit_move (gen_rtx_REG (Pmode
, PIC_FUNCTION_ADDR_REGNUM
), temp1
);
19565 emit_jump_insn (gen_indirect_jump (temp1
));
19568 /* Run just enough of rest_of_compilation. This sequence was
19569 "borrowed" from alpha.c. */
19570 insn
= get_insns ();
19571 split_all_insns_noflow ();
19572 mips16_lay_out_constants (true);
19573 shorten_branches (insn
);
19574 assemble_start_function (thunk_fndecl
, fnname
);
19575 final_start_function (insn
, file
, 1);
19576 final (insn
, file
, 1);
19577 final_end_function ();
19578 assemble_end_function (thunk_fndecl
, fnname
);
19580 /* Clean up the vars set above. Note that final_end_function resets
19581 the global pointer for us. */
19582 reload_completed
= 0;
19586 /* The last argument passed to mips_set_compression_mode,
19587 or negative if the function hasn't been called yet. */
19588 static unsigned int old_compression_mode
= -1;
19590 /* Set up the target-dependent global state for ISA mode COMPRESSION_MODE,
19591 which is either MASK_MIPS16 or MASK_MICROMIPS. */
19594 mips_set_compression_mode (unsigned int compression_mode
)
19597 if (compression_mode
== old_compression_mode
)
19600 /* Restore base settings of various flags. */
19601 target_flags
= mips_base_target_flags
;
19602 flag_schedule_insns
= mips_base_schedule_insns
;
19603 flag_reorder_blocks_and_partition
= mips_base_reorder_blocks_and_partition
;
19604 flag_move_loop_invariants
= mips_base_move_loop_invariants
;
19605 str_align_loops
= mips_base_align_loops
;
19606 str_align_jumps
= mips_base_align_jumps
;
19607 str_align_functions
= mips_base_align_functions
;
19608 target_flags
&= ~(MASK_MIPS16
| MASK_MICROMIPS
);
19609 target_flags
|= compression_mode
;
19611 if (compression_mode
& MASK_MIPS16
)
19613 /* Switch to MIPS16 mode. */
19614 target_flags
|= MASK_MIPS16
;
19616 /* Turn off SYNCI if it was on, MIPS16 doesn't support it. */
19617 target_flags
&= ~MASK_SYNCI
;
19619 /* Don't run the scheduler before reload, since it tends to
19620 increase register pressure. */
19621 flag_schedule_insns
= 0;
19623 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
19624 the whole function to be in a single section. */
19625 flag_reorder_blocks_and_partition
= 0;
19627 /* Don't move loop invariants, because it tends to increase
19628 register pressure. It also introduces an extra move in cases
19629 where the constant is the first operand in a two-operand binary
19630 instruction, or when it forms a register argument to a functon
19632 flag_move_loop_invariants
= 0;
19634 target_flags
|= MASK_EXPLICIT_RELOCS
;
19636 /* Experiments suggest we get the best overall section-anchor
19637 results from using the range of an unextended LW or SW. Code
19638 that makes heavy use of byte or short accesses can do better
19639 with ranges of 0...31 and 0...63 respectively, but most code is
19640 sensitive to the range of LW and SW instead. */
19641 targetm
.min_anchor_offset
= 0;
19642 targetm
.max_anchor_offset
= 127;
19644 targetm
.const_anchor
= 0;
19646 /* MIPS16 has no BAL instruction. */
19647 target_flags
&= ~MASK_RELAX_PIC_CALLS
;
19649 /* The R4000 errata don't apply to any known MIPS16 cores.
19650 It's simpler to make the R4000 fixes and MIPS16 mode
19651 mutually exclusive. */
19652 target_flags
&= ~MASK_FIX_R4000
;
19654 if (flag_pic
&& !TARGET_OLDABI
)
19655 sorry ("MIPS16 PIC for ABIs other than o32 and o64");
19658 sorry ("MIPS16 %<-mxgot%> code");
19660 if (TARGET_HARD_FLOAT_ABI
&& !TARGET_OLDABI
)
19661 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
19664 sorry ("MSA MIPS16 code");
19668 /* Switch to microMIPS or the standard encoding. */
19670 if (TARGET_MICROMIPS
)
19671 /* Avoid branch likely. */
19672 target_flags
&= ~MASK_BRANCHLIKELY
;
19674 /* Provide default values for align_* for 64-bit targets. */
19677 if (flag_align_loops
&& !str_align_loops
)
19678 str_align_loops
= "8";
19679 if (flag_align_jumps
&& !str_align_jumps
)
19680 str_align_jumps
= "8";
19681 if (flag_align_functions
&& !str_align_functions
)
19682 str_align_functions
= "8";
19685 targetm
.min_anchor_offset
= -32768;
19686 targetm
.max_anchor_offset
= 32767;
19688 targetm
.const_anchor
= 0x8000;
19691 /* (Re)initialize MIPS target internals for new ISA. */
19692 mips_init_relocs ();
19694 if (compression_mode
& MASK_MIPS16
)
19696 if (!mips16_globals
)
19697 mips16_globals
= save_target_globals_default_opts ();
19699 restore_target_globals (mips16_globals
);
19701 else if (compression_mode
& MASK_MICROMIPS
)
19703 if (!micromips_globals
)
19704 micromips_globals
= save_target_globals_default_opts ();
19706 restore_target_globals (micromips_globals
);
19709 restore_target_globals (&default_target_globals
);
19711 old_compression_mode
= compression_mode
;
19714 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
19715 function should use the MIPS16 or microMIPS ISA and switch modes
19719 mips_set_current_function (tree fndecl
)
19721 mips_set_compression_mode (mips_get_compress_mode (fndecl
));
19724 /* Allocate a chunk of memory for per-function machine-dependent data. */
19726 static struct machine_function
*
19727 mips_init_machine_status (void)
19729 return ggc_cleared_alloc
<machine_function
> ();
19732 /* Return the processor associated with the given ISA level, or null
19733 if the ISA isn't valid. */
19735 static const struct mips_cpu_info
*
19736 mips_cpu_info_from_isa (int isa
)
19740 for (i
= 0; i
< ARRAY_SIZE (mips_cpu_info_table
); i
++)
19741 if (mips_cpu_info_table
[i
].isa
== isa
)
19742 return mips_cpu_info_table
+ i
;
19747 /* Return a mips_cpu_info entry determined by an option valued
19750 static const struct mips_cpu_info
*
19751 mips_cpu_info_from_opt (int opt
)
19755 case MIPS_ARCH_OPTION_FROM_ABI
:
19756 /* 'from-abi' selects the most compatible architecture for the
19757 given ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit
19758 ABIs. For the EABIs, we have to decide whether we're using
19759 the 32-bit or 64-bit version. */
19760 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS
? 1
19761 : ABI_NEEDS_64BIT_REGS
? 3
19762 : (TARGET_64BIT
? 3 : 1));
19764 case MIPS_ARCH_OPTION_NATIVE
:
19765 gcc_unreachable ();
19768 return &mips_cpu_info_table
[opt
];
19772 /* Return a default mips_cpu_info entry, given that no -march= option
19773 was explicitly specified. */
19775 static const struct mips_cpu_info
*
19776 mips_default_arch (void)
19778 #if defined (MIPS_CPU_STRING_DEFAULT)
19780 for (i
= 0; i
< ARRAY_SIZE (mips_cpu_info_table
); i
++)
19781 if (strcmp (mips_cpu_info_table
[i
].name
, MIPS_CPU_STRING_DEFAULT
) == 0)
19782 return mips_cpu_info_table
+ i
;
19783 gcc_unreachable ();
19784 #elif defined (MIPS_ISA_DEFAULT)
19785 return mips_cpu_info_from_isa (MIPS_ISA_DEFAULT
);
19787 /* 'from-abi' makes a good default: you get whatever the ABI
19789 return mips_cpu_info_from_opt (MIPS_ARCH_OPTION_FROM_ABI
);
19793 /* Set up globals to generate code for the ISA or processor
19794 described by INFO. */
19797 mips_set_architecture (const struct mips_cpu_info
*info
)
19801 mips_arch_info
= info
;
19802 mips_arch
= info
->cpu
;
19803 mips_isa
= info
->isa
;
19807 mips_isa_rev
= (mips_isa
& 31) + 1;
19811 /* Likewise for tuning. */
19814 mips_set_tune (const struct mips_cpu_info
*info
)
19818 mips_tune_info
= info
;
19819 mips_tune
= info
->cpu
;
19823 /* Implement TARGET_OPTION_OVERRIDE. */
19826 mips_option_override (void)
19828 int i
, start
, regno
, mode
;
19830 if (global_options_set
.x_mips_isa_option
)
19831 mips_isa_option_info
= &mips_cpu_info_table
[mips_isa_option
];
19833 #ifdef SUBTARGET_OVERRIDE_OPTIONS
19834 SUBTARGET_OVERRIDE_OPTIONS
;
19837 /* MIPS16 and microMIPS cannot coexist. */
19838 if (TARGET_MICROMIPS
&& TARGET_MIPS16
)
19839 error ("unsupported combination: %s", "-mips16 -mmicromips");
19841 /* Prohibit Paired-Single and MSA combination. This is software restriction
19842 rather than architectural. */
19843 if (ISA_HAS_MSA
&& TARGET_PAIRED_SINGLE_FLOAT
)
19844 error ("unsupported combination: %s", "-mmsa -mpaired-single");
19846 /* Save the base compression state and process flags as though we
19847 were generating uncompressed code. */
19848 mips_base_compression_flags
= TARGET_COMPRESSION
;
19849 target_flags
&= ~TARGET_COMPRESSION
;
19851 /* -mno-float overrides -mhard-float and -msoft-float. */
19852 if (TARGET_NO_FLOAT
)
19854 target_flags
|= MASK_SOFT_FLOAT_ABI
;
19855 target_flags_explicit
|= MASK_SOFT_FLOAT_ABI
;
19858 if (TARGET_FLIP_MIPS16
)
19859 TARGET_INTERLINK_COMPRESSED
= 1;
19861 /* Set the small data limit. */
19862 mips_small_data_threshold
= (global_options_set
.x_g_switch_value
19864 : MIPS_DEFAULT_GVALUE
);
19866 /* The following code determines the architecture and register size.
19867 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
19868 The GAS and GCC code should be kept in sync as much as possible. */
19870 if (global_options_set
.x_mips_arch_option
)
19871 mips_set_architecture (mips_cpu_info_from_opt (mips_arch_option
));
19873 if (mips_isa_option_info
!= 0)
19875 if (mips_arch_info
== 0)
19876 mips_set_architecture (mips_isa_option_info
);
19877 else if (mips_arch_info
->isa
!= mips_isa_option_info
->isa
)
19878 error ("%<-%s%> conflicts with the other architecture options, "
19879 "which specify a %s processor",
19880 mips_isa_option_info
->name
,
19881 mips_cpu_info_from_isa (mips_arch_info
->isa
)->name
);
19884 if (mips_arch_info
== 0)
19885 mips_set_architecture (mips_default_arch ());
19887 if (ABI_NEEDS_64BIT_REGS
&& !ISA_HAS_64BIT_REGS
)
19888 error ("%<-march=%s%> is not compatible with the selected ABI",
19889 mips_arch_info
->name
);
19891 /* Optimize for mips_arch, unless -mtune selects a different processor. */
19892 if (global_options_set
.x_mips_tune_option
)
19893 mips_set_tune (mips_cpu_info_from_opt (mips_tune_option
));
19895 if (mips_tune_info
== 0)
19896 mips_set_tune (mips_arch_info
);
19898 if ((target_flags_explicit
& MASK_64BIT
) != 0)
19900 /* The user specified the size of the integer registers. Make sure
19901 it agrees with the ABI and ISA. */
19902 if (TARGET_64BIT
&& !ISA_HAS_64BIT_REGS
)
19903 error ("%<-mgp64%> used with a 32-bit processor");
19904 else if (!TARGET_64BIT
&& ABI_NEEDS_64BIT_REGS
)
19905 error ("%<-mgp32%> used with a 64-bit ABI");
19906 else if (TARGET_64BIT
&& ABI_NEEDS_32BIT_REGS
)
19907 error ("%<-mgp64%> used with a 32-bit ABI");
19911 /* Infer the integer register size from the ABI and processor.
19912 Restrict ourselves to 32-bit registers if that's all the
19913 processor has, or if the ABI cannot handle 64-bit registers. */
19914 if (ABI_NEEDS_32BIT_REGS
|| !ISA_HAS_64BIT_REGS
)
19915 target_flags
&= ~MASK_64BIT
;
19917 target_flags
|= MASK_64BIT
;
19920 if ((target_flags_explicit
& MASK_FLOAT64
) != 0)
19922 if (mips_isa_rev
>= 6 && !TARGET_FLOAT64
)
19923 error ("the %qs architecture does not support %<-mfp32%>",
19924 mips_arch_info
->name
);
19925 else if (TARGET_SINGLE_FLOAT
&& TARGET_FLOAT64
)
19926 error ("unsupported combination: %s", "-mfp64 -msingle-float");
19927 else if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
&& !TARGET_FLOAT64
)
19928 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
19929 else if (!TARGET_64BIT
&& TARGET_FLOAT64
)
19931 if (!ISA_HAS_MXHC1
)
19932 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
19933 " the target supports the mfhc1 and mthc1 instructions");
19934 else if (mips_abi
!= ABI_32
)
19935 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
19941 /* -msingle-float selects 32-bit float registers. On r6 and later,
19942 -mdouble-float selects 64-bit float registers, since the old paired
19943 register model is not supported. In other cases the float registers
19944 should be the same size as the integer ones. */
19945 if (mips_isa_rev
>= 6 && TARGET_DOUBLE_FLOAT
&& !TARGET_FLOATXX
)
19946 target_flags
|= MASK_FLOAT64
;
19947 else if (TARGET_64BIT
&& TARGET_DOUBLE_FLOAT
)
19948 target_flags
|= MASK_FLOAT64
;
19949 else if (mips_abi
== ABI_32
&& ISA_HAS_MSA
&& !TARGET_FLOATXX
)
19950 target_flags
|= MASK_FLOAT64
;
19952 target_flags
&= ~MASK_FLOAT64
;
19955 if (mips_abi
!= ABI_32
&& TARGET_FLOATXX
)
19956 error ("%<-mfpxx%> can only be used with the o32 ABI");
19957 else if (TARGET_FLOAT64
&& TARGET_FLOATXX
)
19958 error ("unsupported combination: %s", "-mfp64 -mfpxx");
19959 else if (ISA_MIPS1
&& !TARGET_FLOAT32
)
19960 error ("%<-march=%s%> requires %<-mfp32%>", mips_arch_info
->name
);
19961 else if (TARGET_FLOATXX
&& !mips_lra_flag
)
19962 error ("%<-mfpxx%> requires %<-mlra%>");
19964 /* End of code shared with GAS. */
19966 /* The R5900 FPU only supports single precision. */
19967 if (TARGET_MIPS5900
&& TARGET_HARD_FLOAT_ABI
&& TARGET_DOUBLE_FLOAT
)
19968 error ("unsupported combination: %s",
19969 "-march=r5900 -mhard-float -mdouble-float");
19971 /* If a -mlong* option was given, check that it matches the ABI,
19972 otherwise infer the -mlong* setting from the other options. */
19973 if ((target_flags_explicit
& MASK_LONG64
) != 0)
19977 if (mips_abi
== ABI_N32
)
19978 error ("%qs is incompatible with %qs", "-mabi=n32", "-mlong64");
19979 else if (mips_abi
== ABI_32
)
19980 error ("%qs is incompatible with %qs", "-mabi=32", "-mlong64");
19981 else if (mips_abi
== ABI_O64
&& TARGET_ABICALLS
)
19982 /* We have traditionally allowed non-abicalls code to use
19983 an LP64 form of o64. However, it would take a bit more
19984 effort to support the combination of 32-bit GOT entries
19985 and 64-bit pointers, so we treat the abicalls case as
19987 error ("the combination of %qs and %qs is incompatible with %qs",
19988 "-mabi=o64", "-mabicalls", "-mlong64");
19992 if (mips_abi
== ABI_64
)
19993 error ("%qs is incompatible with %qs", "-mabi=64", "-mlong32");
19998 if ((mips_abi
== ABI_EABI
&& TARGET_64BIT
) || mips_abi
== ABI_64
)
19999 target_flags
|= MASK_LONG64
;
20001 target_flags
&= ~MASK_LONG64
;
20004 if (!TARGET_OLDABI
)
20005 flag_pcc_struct_return
= 0;
20007 /* Decide which rtx_costs structure to use. */
20009 mips_cost
= &mips_rtx_cost_optimize_size
;
20011 mips_cost
= &mips_rtx_cost_data
[mips_tune
];
20013 /* If the user hasn't specified a branch cost, use the processor's
20015 if (mips_branch_cost
== 0)
20016 mips_branch_cost
= mips_cost
->branch_cost
;
20018 /* If neither -mbranch-likely nor -mno-branch-likely was given
20019 on the command line, set MASK_BRANCHLIKELY based on the target
20020 architecture and tuning flags. Annulled delay slots are a
20021 size win, so we only consider the processor-specific tuning
20022 for !optimize_size. */
20023 if ((target_flags_explicit
& MASK_BRANCHLIKELY
) == 0)
20025 if (ISA_HAS_BRANCHLIKELY
20027 && (mips_tune_info
->tune_flags
20028 & PTF_AVOID_BRANCHLIKELY_SIZE
) == 0)
20031 && (mips_tune_info
->tune_flags
20032 & PTF_AVOID_BRANCHLIKELY_SPEED
) == 0)
20033 || (mips_tune_info
->tune_flags
20034 & PTF_AVOID_BRANCHLIKELY_ALWAYS
) == 0))
20035 target_flags
|= MASK_BRANCHLIKELY
;
20037 target_flags
&= ~MASK_BRANCHLIKELY
;
20039 else if (TARGET_BRANCHLIKELY
&& !ISA_HAS_BRANCHLIKELY
)
20040 warning (0, "the %qs architecture does not support branch-likely"
20041 " instructions", mips_arch_info
->name
);
20043 /* If the user hasn't specified -mimadd or -mno-imadd set
20044 MASK_IMADD based on the target architecture and tuning
20046 if ((target_flags_explicit
& MASK_IMADD
) == 0)
20048 if (ISA_HAS_MADD_MSUB
&&
20049 (mips_tune_info
->tune_flags
& PTF_AVOID_IMADD
) == 0)
20050 target_flags
|= MASK_IMADD
;
20052 target_flags
&= ~MASK_IMADD
;
20054 else if (TARGET_IMADD
&& !ISA_HAS_MADD_MSUB
)
20055 warning (0, "the %qs architecture does not support madd or msub"
20056 " instructions", mips_arch_info
->name
);
20058 /* If neither -modd-spreg nor -mno-odd-spreg was given on the command
20059 line, set MASK_ODD_SPREG based on the ISA and ABI. */
20060 if ((target_flags_explicit
& MASK_ODD_SPREG
) == 0)
20062 /* Disable TARGET_ODD_SPREG when using the o32 FPXX ABI. */
20063 if (!ISA_HAS_ODD_SPREG
|| TARGET_FLOATXX
)
20064 target_flags
&= ~MASK_ODD_SPREG
;
20066 target_flags
|= MASK_ODD_SPREG
;
20068 else if (TARGET_ODD_SPREG
&& !ISA_HAS_ODD_SPREG
)
20069 warning (0, "the %qs architecture does not support odd single-precision"
20070 " registers", mips_arch_info
->name
);
20072 if (!TARGET_ODD_SPREG
&& TARGET_64BIT
)
20074 error ("unsupported combination: %s", "-mgp64 -mno-odd-spreg");
20075 /* Allow compilation to continue further even though invalid output
20076 will be produced. */
20077 target_flags
|= MASK_ODD_SPREG
;
20080 if (!ISA_HAS_COMPACT_BRANCHES
&& mips_cb
== MIPS_CB_ALWAYS
)
20082 error ("unsupported combination: %qs%s %s",
20083 mips_arch_info
->name
, TARGET_MICROMIPS
? " -mmicromips" : "",
20084 "-mcompact-branches=always");
20086 else if (!ISA_HAS_DELAY_SLOTS
&& mips_cb
== MIPS_CB_NEVER
)
20088 error ("unsupported combination: %qs%s %s",
20089 mips_arch_info
->name
, TARGET_MICROMIPS
? " -mmicromips" : "",
20090 "-mcompact-branches=never");
20093 /* Require explicit relocs for MIPS R6 onwards. This enables simplification
20094 of the compact branch and jump support through the backend. */
20095 if (!TARGET_EXPLICIT_RELOCS
&& mips_isa_rev
>= 6)
20097 error ("unsupported combination: %qs %s",
20098 mips_arch_info
->name
, "-mno-explicit-relocs");
20101 /* The effect of -mabicalls isn't defined for the EABI. */
20102 if (mips_abi
== ABI_EABI
&& TARGET_ABICALLS
)
20104 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
20105 target_flags
&= ~MASK_ABICALLS
;
20108 /* PIC requires -mabicalls. */
20111 if (mips_abi
== ABI_EABI
)
20112 error ("cannot generate position-independent code for %qs",
20114 else if (!TARGET_ABICALLS
)
20115 error ("position-independent code requires %qs", "-mabicalls");
20118 if (TARGET_ABICALLS_PIC2
)
20119 /* We need to set flag_pic for executables as well as DSOs
20120 because we may reference symbols that are not defined in
20121 the final executable. (MIPS does not use things like
20122 copy relocs, for example.)
20124 There is a body of code that uses __PIC__ to distinguish
20125 between -mabicalls and -mno-abicalls code. The non-__PIC__
20126 variant is usually appropriate for TARGET_ABICALLS_PIC0, as
20127 long as any indirect jumps use $25. */
20130 /* -mvr4130-align is a "speed over size" optimization: it usually produces
20131 faster code, but at the expense of more nops. Enable it at -O3 and
20133 if (optimize
> 2 && (target_flags_explicit
& MASK_VR4130_ALIGN
) == 0)
20134 target_flags
|= MASK_VR4130_ALIGN
;
20136 /* Prefer a call to memcpy over inline code when optimizing for size,
20137 though see MOVE_RATIO in mips.h. */
20138 if (optimize_size
&& (target_flags_explicit
& MASK_MEMCPY
) == 0)
20139 target_flags
|= MASK_MEMCPY
;
20141 /* If we have a nonzero small-data limit, check that the -mgpopt
20142 setting is consistent with the other target flags. */
20143 if (mips_small_data_threshold
> 0)
20147 if (!TARGET_EXPLICIT_RELOCS
)
20148 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
20150 TARGET_LOCAL_SDATA
= false;
20151 TARGET_EXTERN_SDATA
= false;
20155 if (TARGET_VXWORKS_RTP
)
20156 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
20158 if (TARGET_ABICALLS
)
20159 warning (0, "cannot use small-data accesses for %qs",
20164 /* Set NaN and ABS defaults. */
20165 if (mips_nan
== MIPS_IEEE_754_DEFAULT
&& !ISA_HAS_IEEE_754_LEGACY
)
20166 mips_nan
= MIPS_IEEE_754_2008
;
20167 if (mips_abs
== MIPS_IEEE_754_DEFAULT
&& !ISA_HAS_IEEE_754_LEGACY
)
20168 mips_abs
= MIPS_IEEE_754_2008
;
20170 /* Check for IEEE 754 legacy/2008 support. */
20171 if ((mips_nan
== MIPS_IEEE_754_LEGACY
20172 || mips_abs
== MIPS_IEEE_754_LEGACY
)
20173 && !ISA_HAS_IEEE_754_LEGACY
)
20174 warning (0, "the %qs architecture does not support %<-m%s=legacy%>",
20175 mips_arch_info
->name
,
20176 mips_nan
== MIPS_IEEE_754_LEGACY
? "nan" : "abs");
20178 if ((mips_nan
== MIPS_IEEE_754_2008
20179 || mips_abs
== MIPS_IEEE_754_2008
)
20180 && !ISA_HAS_IEEE_754_2008
)
20181 warning (0, "the %qs architecture does not support %<-m%s=2008%>",
20182 mips_arch_info
->name
,
20183 mips_nan
== MIPS_IEEE_754_2008
? "nan" : "abs");
20185 /* Pre-IEEE 754-2008 MIPS hardware has a quirky almost-IEEE format
20186 for all its floating point. */
20187 if (mips_nan
!= MIPS_IEEE_754_2008
)
20189 REAL_MODE_FORMAT (SFmode
) = &mips_single_format
;
20190 REAL_MODE_FORMAT (DFmode
) = &mips_double_format
;
20191 REAL_MODE_FORMAT (TFmode
) = &mips_quad_format
;
20194 /* Make sure that the user didn't turn off paired single support when
20195 MIPS-3D support is requested. */
20197 && (target_flags_explicit
& MASK_PAIRED_SINGLE_FLOAT
)
20198 && !TARGET_PAIRED_SINGLE_FLOAT
)
20199 error ("%<-mips3d%> requires %<-mpaired-single%>");
20201 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
20203 target_flags
|= MASK_PAIRED_SINGLE_FLOAT
;
20205 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
20206 and TARGET_HARD_FLOAT_ABI are both true. */
20207 if (TARGET_PAIRED_SINGLE_FLOAT
&& !(TARGET_FLOAT64
&& TARGET_HARD_FLOAT_ABI
))
20209 error ("%qs must be used with %qs",
20210 TARGET_MIPS3D
? "-mips3d" : "-mpaired-single",
20211 TARGET_HARD_FLOAT_ABI
? "-mfp64" : "-mhard-float");
20212 target_flags
&= ~MASK_PAIRED_SINGLE_FLOAT
;
20216 /* Make sure that when ISA_HAS_MSA is true, TARGET_FLOAT64 and
20217 TARGET_HARD_FLOAT_ABI and both true. */
20218 if (ISA_HAS_MSA
&& !(TARGET_FLOAT64
&& TARGET_HARD_FLOAT_ABI
))
20219 error ("%<-mmsa%> must be used with %<-mfp64%> and %<-mhard-float%>");
20221 /* Make sure that -mpaired-single is only used on ISAs that support it.
20222 We must disable it otherwise since it relies on other ISA properties
20223 like ISA_HAS_8CC having their normal values. */
20224 if (TARGET_PAIRED_SINGLE_FLOAT
&& !ISA_HAS_PAIRED_SINGLE
)
20226 error ("the %qs architecture does not support paired-single"
20227 " instructions", mips_arch_info
->name
);
20228 target_flags
&= ~MASK_PAIRED_SINGLE_FLOAT
;
20232 if (mips_r10k_cache_barrier
!= R10K_CACHE_BARRIER_NONE
20233 && !TARGET_CACHE_BUILTIN
)
20235 error ("%qs requires a target that provides the %qs instruction",
20236 "-mr10k-cache-barrier", "cache");
20237 mips_r10k_cache_barrier
= R10K_CACHE_BARRIER_NONE
;
20240 /* If TARGET_DSPR2, enable TARGET_DSP. */
20244 if (TARGET_DSP
&& mips_isa_rev
>= 6)
20246 error ("the %qs architecture does not support DSP instructions",
20247 mips_arch_info
->name
);
20248 TARGET_DSP
= false;
20249 TARGET_DSPR2
= false;
20252 /* Make sure that when TARGET_LOONGSON_MMI is true, TARGET_HARD_FLOAT_ABI
20253 is true. In o32 pairs of floating-point registers provide 64-bit
20255 if (TARGET_LOONGSON_MMI
&& !TARGET_HARD_FLOAT_ABI
)
20256 error ("%<-mloongson-mmi%> must be used with %<-mhard-float%>");
20258 /* If TARGET_LOONGSON_EXT2, enable TARGET_LOONGSON_EXT. */
20259 if (TARGET_LOONGSON_EXT2
)
20261 /* Make sure that when TARGET_LOONGSON_EXT2 is true, TARGET_LOONGSON_EXT
20262 is true. If a user explicitly says -mloongson-ext2 -mno-loongson-ext
20263 then that is an error. */
20264 if (!TARGET_LOONGSON_EXT
20265 && (target_flags_explicit
& MASK_LOONGSON_EXT
) != 0)
20266 error ("%<-mloongson-ext2%> must be used with %<-mloongson-ext%>");
20267 target_flags
|= MASK_LOONGSON_EXT
;
20270 /* .eh_frame addresses should be the same width as a C pointer.
20271 Most MIPS ABIs support only one pointer size, so the assembler
20272 will usually know exactly how big an .eh_frame address is.
20274 Unfortunately, this is not true of the 64-bit EABI. The ABI was
20275 originally defined to use 64-bit pointers (i.e. it is LP64), and
20276 this is still the default mode. However, we also support an n32-like
20277 ILP32 mode, which is selected by -mlong32. The problem is that the
20278 assembler has traditionally not had an -mlong option, so it has
20279 traditionally not known whether we're using the ILP32 or LP64 form.
20281 As it happens, gas versions up to and including 2.19 use _32-bit_
20282 addresses for EABI64 .cfi_* directives. This is wrong for the
20283 default LP64 mode, so we can't use the directives by default.
20284 Moreover, since gas's current behavior is at odds with gcc's
20285 default behavior, it seems unwise to rely on future versions
20286 of gas behaving the same way. We therefore avoid using .cfi
20287 directives for -mlong32 as well. */
20288 if (mips_abi
== ABI_EABI
&& TARGET_64BIT
)
20289 flag_dwarf2_cfi_asm
= 0;
20291 /* .cfi_* directives generate a read-only section, so fall back on
20292 manual .eh_frame creation if we need the section to be writable. */
20293 if (TARGET_WRITABLE_EH_FRAME
)
20294 flag_dwarf2_cfi_asm
= 0;
20296 mips_init_print_operand_punct ();
20298 /* Set up array to map GCC register number to debug register number.
20299 Ignore the special purpose register numbers. */
20301 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
20303 mips_dbx_regno
[i
] = IGNORED_DWARF_REGNUM
;
20304 if (GP_REG_P (i
) || FP_REG_P (i
) || ALL_COP_REG_P (i
))
20305 mips_dwarf_regno
[i
] = i
;
20307 mips_dwarf_regno
[i
] = INVALID_REGNUM
;
20310 start
= GP_DBX_FIRST
- GP_REG_FIRST
;
20311 for (i
= GP_REG_FIRST
; i
<= GP_REG_LAST
; i
++)
20312 mips_dbx_regno
[i
] = i
+ start
;
20314 start
= FP_DBX_FIRST
- FP_REG_FIRST
;
20315 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
20316 mips_dbx_regno
[i
] = i
+ start
;
20318 /* Accumulator debug registers use big-endian ordering. */
20319 mips_dbx_regno
[HI_REGNUM
] = MD_DBX_FIRST
+ 0;
20320 mips_dbx_regno
[LO_REGNUM
] = MD_DBX_FIRST
+ 1;
20321 mips_dwarf_regno
[HI_REGNUM
] = MD_REG_FIRST
+ 0;
20322 mips_dwarf_regno
[LO_REGNUM
] = MD_REG_FIRST
+ 1;
20323 for (i
= DSP_ACC_REG_FIRST
; i
<= DSP_ACC_REG_LAST
; i
+= 2)
20325 mips_dwarf_regno
[i
+ TARGET_LITTLE_ENDIAN
] = i
;
20326 mips_dwarf_regno
[i
+ TARGET_BIG_ENDIAN
] = i
+ 1;
20329 /* Set up mips_hard_regno_mode_ok. */
20330 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
20331 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
20332 mips_hard_regno_mode_ok_p
[mode
][regno
]
20333 = mips_hard_regno_mode_ok_uncached (regno
, (machine_mode
) mode
);
20335 /* Function to allocate machine-dependent function status. */
20336 init_machine_status
= &mips_init_machine_status
;
20338 /* Default to working around R4000 errata only if the processor
20339 was selected explicitly. */
20340 if ((target_flags_explicit
& MASK_FIX_R4000
) == 0
20341 && strcmp (mips_arch_info
->name
, "r4000") == 0)
20342 target_flags
|= MASK_FIX_R4000
;
20344 /* Default to working around R4400 errata only if the processor
20345 was selected explicitly. */
20346 if ((target_flags_explicit
& MASK_FIX_R4400
) == 0
20347 && strcmp (mips_arch_info
->name
, "r4400") == 0)
20348 target_flags
|= MASK_FIX_R4400
;
20350 /* Default to working around R5900 errata only if the processor
20351 was selected explicitly. */
20352 if ((target_flags_explicit
& MASK_FIX_R5900
) == 0
20353 && strcmp (mips_arch_info
->name
, "r5900") == 0)
20354 target_flags
|= MASK_FIX_R5900
;
20356 /* Default to working around R10000 errata only if the processor
20357 was selected explicitly. */
20358 if ((target_flags_explicit
& MASK_FIX_R10000
) == 0
20359 && strcmp (mips_arch_info
->name
, "r10000") == 0)
20360 target_flags
|= MASK_FIX_R10000
;
20362 /* Make sure that branch-likely instructions available when using
20363 -mfix-r10000. The instructions are not available if either:
20365 1. -mno-branch-likely was passed.
20366 2. The selected ISA does not support branch-likely and
20367 the command line does not include -mbranch-likely. */
20368 if (TARGET_FIX_R10000
20369 && ((target_flags_explicit
& MASK_BRANCHLIKELY
) == 0
20370 ? !ISA_HAS_BRANCHLIKELY
20371 : !TARGET_BRANCHLIKELY
))
20372 sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
20374 if (TARGET_SYNCI
&& !ISA_HAS_SYNCI
)
20376 warning (0, "the %qs architecture does not support the synci "
20377 "instruction", mips_arch_info
->name
);
20378 target_flags
&= ~MASK_SYNCI
;
20381 /* Only optimize PIC indirect calls if they are actually required. */
20382 if (!TARGET_USE_GOT
|| !TARGET_EXPLICIT_RELOCS
)
20383 target_flags
&= ~MASK_RELAX_PIC_CALLS
;
20385 /* Save base state of options. */
20386 mips_base_target_flags
= target_flags
;
20387 mips_base_schedule_insns
= flag_schedule_insns
;
20388 mips_base_reorder_blocks_and_partition
= flag_reorder_blocks_and_partition
;
20389 mips_base_move_loop_invariants
= flag_move_loop_invariants
;
20390 mips_base_align_loops
= str_align_loops
;
20391 mips_base_align_jumps
= str_align_jumps
;
20392 mips_base_align_functions
= str_align_functions
;
20394 /* Now select the ISA mode.
20396 Do all CPP-sensitive stuff in uncompressed mode; we'll switch modes
20397 later if required. */
20398 mips_set_compression_mode (0);
20400 /* We register a second machine specific reorg pass after delay slot
20401 filling. Registering the pass must be done at start up. It's
20402 convenient to do it here. */
20403 opt_pass
*new_pass
= make_pass_mips_machine_reorg2 (g
);
20404 struct register_pass_info insert_pass_mips_machine_reorg2
=
20406 new_pass
, /* pass */
20407 "dbr", /* reference_pass_name */
20408 1, /* ref_pass_instance_number */
20409 PASS_POS_INSERT_AFTER
/* po_op */
20411 register_pass (&insert_pass_mips_machine_reorg2
);
20413 if (TARGET_HARD_FLOAT_ABI
&& TARGET_MIPS5900
)
20414 REAL_MODE_FORMAT (SFmode
) = &spu_single_format
;
20416 mips_register_frame_header_opt ();
20419 /* Swap the register information for registers I and I + 1, which
20420 currently have the wrong endianness. Note that the registers'
20421 fixedness and call-clobberedness might have been set on the
20425 mips_swap_registers (unsigned int i
)
20430 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
20431 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
20433 SWAP_INT (fixed_regs
[i
], fixed_regs
[i
+ 1]);
20434 SWAP_INT (call_used_regs
[i
], call_used_regs
[i
+ 1]);
20435 SWAP_STRING (reg_names
[i
], reg_names
[i
+ 1]);
20441 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
20444 mips_conditional_register_usage (void)
20449 /* These DSP control register fields are global. */
20450 global_regs
[CCDSP_PO_REGNUM
] = 1;
20451 global_regs
[CCDSP_SC_REGNUM
] = 1;
20454 accessible_reg_set
&= ~reg_class_contents
[DSP_ACC_REGS
];
20457 accessible_reg_set
&= ~reg_class_contents
[MD_REGS
];
20459 if (!TARGET_HARD_FLOAT
)
20460 accessible_reg_set
&= ~(reg_class_contents
[FP_REGS
]
20461 | reg_class_contents
[ST_REGS
]);
20462 else if (!ISA_HAS_8CC
)
20464 /* We only have a single condition-code register. We implement
20465 this by fixing all the condition-code registers and generating
20466 RTL that refers directly to ST_REG_FIRST. */
20467 accessible_reg_set
&= ~reg_class_contents
[ST_REGS
];
20469 SET_HARD_REG_BIT (accessible_reg_set
, FPSW_REGNUM
);
20470 fixed_regs
[FPSW_REGNUM
] = 1;
20474 /* In MIPS16 mode, we prohibit the unused $s registers, since they
20475 are call-saved, and saving them via a MIPS16 register would
20476 probably waste more time than just reloading the value.
20478 We permit the $t temporary registers when optimizing for speed
20479 but not when optimizing for space because using them results in
20480 code that is larger (but faster) then not using them. We do
20481 allow $24 (t8) because it is used in CMP and CMPI instructions
20482 and $25 (t9) because it is used as the function call address in
20485 fixed_regs
[18] = 1;
20486 fixed_regs
[19] = 1;
20487 fixed_regs
[20] = 1;
20488 fixed_regs
[21] = 1;
20489 fixed_regs
[22] = 1;
20490 fixed_regs
[23] = 1;
20491 fixed_regs
[26] = 1;
20492 fixed_regs
[27] = 1;
20493 fixed_regs
[30] = 1;
20498 fixed_regs
[10] = 1;
20499 fixed_regs
[11] = 1;
20500 fixed_regs
[12] = 1;
20501 fixed_regs
[13] = 1;
20502 fixed_regs
[14] = 1;
20503 fixed_regs
[15] = 1;
20506 /* Do not allow HI and LO to be treated as register operands.
20507 There are no MTHI or MTLO instructions (or any real need
20508 for them) and one-way registers cannot easily be reloaded. */
20509 operand_reg_set
&= ~reg_class_contents
[MD_REGS
];
20511 /* $f20-$f23 are call-clobbered for n64. */
20512 if (mips_abi
== ABI_64
)
20515 for (regno
= FP_REG_FIRST
+ 20; regno
< FP_REG_FIRST
+ 24; regno
++)
20516 call_used_regs
[regno
] = 1;
20518 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
20519 for n32 and o32 FP64. */
20520 if (mips_abi
== ABI_N32
20521 || (mips_abi
== ABI_32
20522 && TARGET_FLOAT64
))
20525 for (regno
= FP_REG_FIRST
+ 21; regno
<= FP_REG_FIRST
+ 31; regno
+=2)
20526 call_used_regs
[regno
] = 1;
20528 /* Make sure that double-register accumulator values are correctly
20529 ordered for the current endianness. */
20530 if (TARGET_LITTLE_ENDIAN
)
20532 unsigned int regno
;
20534 mips_swap_registers (MD_REG_FIRST
);
20535 for (regno
= DSP_ACC_REG_FIRST
; regno
<= DSP_ACC_REG_LAST
; regno
+= 2)
20536 mips_swap_registers (regno
);
20540 /* Implement EH_USES. */
20543 mips_eh_uses (unsigned int regno
)
20545 if (reload_completed
&& !TARGET_ABSOLUTE_JUMPS
)
20547 /* We need to force certain registers to be live in order to handle
20548 PIC long branches correctly. See mips_must_initialize_gp_p for
20550 if (mips_cfun_has_cprestore_slot_p ())
20552 if (regno
== CPRESTORE_SLOT_REGNUM
)
20557 if (cfun
->machine
->global_pointer
== regno
)
20565 /* Implement EPILOGUE_USES. */
20568 mips_epilogue_uses (unsigned int regno
)
20570 /* Say that the epilogue uses the return address register. Note that
20571 in the case of sibcalls, the values "used by the epilogue" are
20572 considered live at the start of the called function. */
20573 if (regno
== RETURN_ADDR_REGNUM
)
20576 /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
20577 See the comment above load_call<mode> for details. */
20578 if (TARGET_USE_GOT
&& (regno
) == GOT_VERSION_REGNUM
)
20581 /* An interrupt handler must preserve some registers that are
20582 ordinarily call-clobbered. */
20583 if (cfun
->machine
->interrupt_handler_p
20584 && mips_interrupt_extra_call_saved_reg_p (regno
))
20590 /* Return true if INSN needs to be wrapped in ".set noat".
20591 INSN has NOPERANDS operands, stored in OPVEC. */
20594 mips_need_noat_wrapper_p (rtx_insn
*insn
, rtx
*opvec
, int noperands
)
20596 if (recog_memoized (insn
) >= 0)
20598 subrtx_iterator::array_type array
;
20599 for (int i
= 0; i
< noperands
; i
++)
20600 FOR_EACH_SUBRTX (iter
, array
, opvec
[i
], NONCONST
)
20601 if (REG_P (*iter
) && REGNO (*iter
) == AT_REGNUM
)
20607 /* Implement FINAL_PRESCAN_INSN. Mark MIPS16 inline constant pools
20608 as data for the purpose of disassembly. For simplicity embed the
20609 pool's initial label number in the local symbol produced so that
20610 multiple pools within a single function end up marked with unique
20611 symbols. The label number is carried by the `consttable' insn
20612 emitted at the beginning of each pool. */
20615 mips_final_prescan_insn (rtx_insn
*insn
, rtx
*opvec
, int noperands
)
20618 && GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
20619 && XINT (PATTERN (insn
), 1) == UNSPEC_CONSTTABLE
)
20620 mips_set_text_contents_type (asm_out_file
, "__pool_",
20621 INTVAL (XVECEXP (PATTERN (insn
), 0, 0)),
20624 if (mips_need_noat_wrapper_p (insn
, opvec
, noperands
))
20625 mips_push_asm_switch (&mips_noat
);
20628 /* Implement TARGET_ASM_FINAL_POSTSCAN_INSN. Reset text marking to
20629 code after a MIPS16 inline constant pool. Like with the beginning
20630 of a pool table use the pool's initial label number to keep symbols
20631 unique. The label number is carried by the `consttable_end' insn
20632 emitted at the end of each pool. */
20635 mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED
, rtx_insn
*insn
,
20636 rtx
*opvec
, int noperands
)
20638 if (mips_need_noat_wrapper_p (insn
, opvec
, noperands
))
20639 mips_pop_asm_switch (&mips_noat
);
20642 && GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
20643 && XINT (PATTERN (insn
), 1) == UNSPEC_CONSTTABLE_END
)
20645 rtx_insn
*next_insn
= next_real_nondebug_insn (insn
);
20646 bool code_p
= (next_insn
!= NULL
20647 && INSN_P (next_insn
)
20648 && (GET_CODE (PATTERN (next_insn
)) != UNSPEC_VOLATILE
20649 || XINT (PATTERN (next_insn
), 1) != UNSPEC_CONSTTABLE
));
20651 /* Switch content type depending on whether there is code beyond
20652 the constant pool. */
20653 mips_set_text_contents_type (asm_out_file
, "__pend_",
20654 INTVAL (XVECEXP (PATTERN (insn
), 0, 0)),
20659 /* Return the function that is used to expand the <u>mulsidi3 pattern.
20660 EXT_CODE is the code of the extension used. Return NULL if widening
20661 multiplication shouldn't be used. */
20664 mips_mulsidi3_gen_fn (enum rtx_code ext_code
)
20668 signed_p
= ext_code
== SIGN_EXTEND
;
20671 /* Don't use widening multiplication with MULT when we have DMUL. Even
20672 with the extension of its input operands DMUL is faster. Note that
20673 the extension is not needed for signed multiplication. In order to
20674 ensure that we always remove the redundant sign-extension in this
20675 case we still expand mulsidi3 for DMUL. */
20676 if (ISA_HAS_R6DMUL
)
20677 return signed_p
? gen_mulsidi3_64bit_r6dmul
: NULL
;
20679 return signed_p
? gen_mulsidi3_64bit_dmul
: NULL
;
20682 ? gen_mulsidi3_64bit_mips16
20683 : gen_umulsidi3_64bit_mips16
);
20684 if (TARGET_FIX_R4000
)
20686 return signed_p
? gen_mulsidi3_64bit
: gen_umulsidi3_64bit
;
20691 return (signed_p
? gen_mulsidi3_32bit_r6
: gen_umulsidi3_32bit_r6
);
20694 ? gen_mulsidi3_32bit_mips16
20695 : gen_umulsidi3_32bit_mips16
);
20696 if (TARGET_FIX_R4000
&& !ISA_HAS_DSP
)
20697 return signed_p
? gen_mulsidi3_32bit_r4000
: gen_umulsidi3_32bit_r4000
;
20698 return signed_p
? gen_mulsidi3_32bit
: gen_umulsidi3_32bit
;
20702 /* Return true if PATTERN matches the kind of instruction generated by
20703 umips_build_save_restore. SAVE_P is true for store. */
20706 umips_save_restore_pattern_p (bool save_p
, rtx pattern
)
20710 HOST_WIDE_INT first_offset
= 0;
20711 rtx first_base
= 0;
20712 unsigned int regmask
= 0;
20714 for (n
= 0; n
< XVECLEN (pattern
, 0); n
++)
20716 rtx set
, reg
, mem
, this_base
;
20717 HOST_WIDE_INT this_offset
;
20719 /* Check that we have a SET. */
20720 set
= XVECEXP (pattern
, 0, n
);
20721 if (GET_CODE (set
) != SET
)
20724 /* Check that the SET is a load (if restoring) or a store
20726 mem
= save_p
? SET_DEST (set
) : SET_SRC (set
);
20727 if (!MEM_P (mem
) || MEM_VOLATILE_P (mem
))
20730 /* Check that the address is the sum of base and a possibly-zero
20731 constant offset. Determine if the offset is in range. */
20732 mips_split_plus (XEXP (mem
, 0), &this_base
, &this_offset
);
20733 if (!REG_P (this_base
))
20738 if (!UMIPS_12BIT_OFFSET_P (this_offset
))
20740 first_base
= this_base
;
20741 first_offset
= this_offset
;
20745 /* Check that the save slots are consecutive. */
20746 if (REGNO (this_base
) != REGNO (first_base
)
20747 || this_offset
!= first_offset
+ UNITS_PER_WORD
* n
)
20751 /* Check that SET's other operand is a register. */
20752 reg
= save_p
? SET_SRC (set
) : SET_DEST (set
);
20756 regmask
|= 1 << REGNO (reg
);
20759 for (i
= 0; i
< ARRAY_SIZE (umips_swm_mask
); i
++)
20760 if (regmask
== umips_swm_mask
[i
])
20766 /* Return the assembly instruction for microMIPS LWM or SWM.
20767 SAVE_P and PATTERN are as for umips_save_restore_pattern_p. */
20770 umips_output_save_restore (bool save_p
, rtx pattern
)
20772 static char buffer
[300];
20775 HOST_WIDE_INT offset
;
20776 rtx base
, mem
, set
, last_set
, last_reg
;
20778 /* Parse the pattern. */
20779 gcc_assert (umips_save_restore_pattern_p (save_p
, pattern
));
20781 s
= strcpy (buffer
, save_p
? "swm\t" : "lwm\t");
20783 n
= XVECLEN (pattern
, 0);
20785 set
= XVECEXP (pattern
, 0, 0);
20786 mem
= save_p
? SET_DEST (set
) : SET_SRC (set
);
20787 mips_split_plus (XEXP (mem
, 0), &base
, &offset
);
20789 last_set
= XVECEXP (pattern
, 0, n
- 1);
20790 last_reg
= save_p
? SET_SRC (last_set
) : SET_DEST (last_set
);
20792 if (REGNO (last_reg
) == 31)
20795 gcc_assert (n
<= 9);
20799 s
+= sprintf (s
, "%s,", reg_names
[16]);
20801 s
+= sprintf (s
, "%s-%s,", reg_names
[16], reg_names
[15 + n
]);
20803 s
+= sprintf (s
, "%s-%s,%s,", reg_names
[16], reg_names
[23],
20806 if (REGNO (last_reg
) == 31)
20807 s
+= sprintf (s
, "%s,", reg_names
[31]);
20809 s
+= sprintf (s
, "%d(%s)", (int)offset
, reg_names
[REGNO (base
)]);
20813 /* Return true if MEM1 and MEM2 use the same base register, and the
20814 offset of MEM2 equals the offset of MEM1 plus 4. FIRST_REG is the
20815 register into (from) which the contents of MEM1 will be loaded
20816 (stored), depending on the value of LOAD_P.
20817 SWAP_P is true when the 1st and 2nd instructions are swapped. */
20820 umips_load_store_pair_p_1 (bool load_p
, bool swap_p
,
20821 rtx first_reg
, rtx mem1
, rtx mem2
)
20824 HOST_WIDE_INT offset1
, offset2
;
20826 if (!MEM_P (mem1
) || !MEM_P (mem2
))
20829 mips_split_plus (XEXP (mem1
, 0), &base1
, &offset1
);
20830 mips_split_plus (XEXP (mem2
, 0), &base2
, &offset2
);
20832 if (!REG_P (base1
) || !rtx_equal_p (base1
, base2
))
20835 /* Avoid invalid load pair instructions. */
20836 if (load_p
&& REGNO (first_reg
) == REGNO (base1
))
20839 /* We must avoid this case for anti-dependence.
20842 first_reg is $2, but the base is $3. */
20845 && REGNO (first_reg
) + 1 == REGNO (base1
))
20848 if (offset2
!= offset1
+ 4)
20851 if (!UMIPS_12BIT_OFFSET_P (offset1
))
20858 mips_load_store_bonding_p (rtx
*operands
, machine_mode mode
, bool load_p
)
20860 rtx reg1
, reg2
, mem1
, mem2
, base1
, base2
;
20861 enum reg_class rc1
, rc2
;
20862 HOST_WIDE_INT offset1
, offset2
;
20866 reg1
= operands
[0];
20867 reg2
= operands
[2];
20868 mem1
= operands
[1];
20869 mem2
= operands
[3];
20873 reg1
= operands
[1];
20874 reg2
= operands
[3];
20875 mem1
= operands
[0];
20876 mem2
= operands
[2];
20879 if (mips_address_insns (XEXP (mem1
, 0), mode
, false) == 0
20880 || mips_address_insns (XEXP (mem2
, 0), mode
, false) == 0)
20883 mips_split_plus (XEXP (mem1
, 0), &base1
, &offset1
);
20884 mips_split_plus (XEXP (mem2
, 0), &base2
, &offset2
);
20886 /* Base regs do not match. */
20887 if (!REG_P (base1
) || !rtx_equal_p (base1
, base2
))
20890 /* Either of the loads is clobbering base register. It is legitimate to bond
20891 loads if second load clobbers base register. However, hardware does not
20892 support such bonding. */
20894 && (REGNO (reg1
) == REGNO (base1
)
20895 || (REGNO (reg2
) == REGNO (base1
))))
20898 /* Loading in same registers. */
20900 && REGNO (reg1
) == REGNO (reg2
))
20903 /* The loads/stores are not of same type. */
20904 rc1
= REGNO_REG_CLASS (REGNO (reg1
));
20905 rc2
= REGNO_REG_CLASS (REGNO (reg2
));
20907 && !reg_class_subset_p (rc1
, rc2
)
20908 && !reg_class_subset_p (rc2
, rc1
))
20911 if (abs (offset1
- offset2
) != GET_MODE_SIZE (mode
))
20917 /* OPERANDS describes the operands to a pair of SETs, in the order
20918 dest1, src1, dest2, src2. Return true if the operands can be used
20919 in an LWP or SWP instruction; LOAD_P says which. */
20922 umips_load_store_pair_p (bool load_p
, rtx
*operands
)
20924 rtx reg1
, reg2
, mem1
, mem2
;
20928 reg1
= operands
[0];
20929 reg2
= operands
[2];
20930 mem1
= operands
[1];
20931 mem2
= operands
[3];
20935 reg1
= operands
[1];
20936 reg2
= operands
[3];
20937 mem1
= operands
[0];
20938 mem2
= operands
[2];
20941 if (REGNO (reg2
) == REGNO (reg1
) + 1)
20942 return umips_load_store_pair_p_1 (load_p
, false, reg1
, mem1
, mem2
);
20944 if (REGNO (reg1
) == REGNO (reg2
) + 1)
20945 return umips_load_store_pair_p_1 (load_p
, true, reg2
, mem2
, mem1
);
20950 /* Return the assembly instruction for a microMIPS LWP or SWP in which
20951 the first register is REG and the first memory slot is MEM.
20952 LOAD_P is true for LWP. */
20955 umips_output_load_store_pair_1 (bool load_p
, rtx reg
, rtx mem
)
20957 rtx ops
[] = {reg
, mem
};
20960 output_asm_insn ("lwp\t%0,%1", ops
);
20962 output_asm_insn ("swp\t%0,%1", ops
);
20965 /* Output the assembly instruction for a microMIPS LWP or SWP instruction.
20966 LOAD_P and OPERANDS are as for umips_load_store_pair_p. */
20969 umips_output_load_store_pair (bool load_p
, rtx
*operands
)
20971 rtx reg1
, reg2
, mem1
, mem2
;
20974 reg1
= operands
[0];
20975 reg2
= operands
[2];
20976 mem1
= operands
[1];
20977 mem2
= operands
[3];
20981 reg1
= operands
[1];
20982 reg2
= operands
[3];
20983 mem1
= operands
[0];
20984 mem2
= operands
[2];
20987 if (REGNO (reg2
) == REGNO (reg1
) + 1)
20989 umips_output_load_store_pair_1 (load_p
, reg1
, mem1
);
20993 gcc_assert (REGNO (reg1
) == REGNO (reg2
) + 1);
20994 umips_output_load_store_pair_1 (load_p
, reg2
, mem2
);
20997 /* Return true if REG1 and REG2 match the criteria for a movep insn. */
21000 umips_movep_target_p (rtx reg1
, rtx reg2
)
21002 int regno1
, regno2
, pair
;
21004 static const int match
[8] = {
21005 0x00000060, /* 5, 6 */
21006 0x000000a0, /* 5, 7 */
21007 0x000000c0, /* 6, 7 */
21008 0x00200010, /* 4, 21 */
21009 0x00400010, /* 4, 22 */
21010 0x00000030, /* 4, 5 */
21011 0x00000050, /* 4, 6 */
21012 0x00000090 /* 4, 7 */
21015 if (!REG_P (reg1
) || !REG_P (reg2
))
21018 regno1
= REGNO (reg1
);
21019 regno2
= REGNO (reg2
);
21021 if (!GP_REG_P (regno1
) || !GP_REG_P (regno2
))
21024 pair
= (1 << regno1
) | (1 << regno2
);
21026 for (i
= 0; i
< ARRAY_SIZE (match
); i
++)
21027 if (pair
== match
[i
])
21033 /* Return the size in bytes of the trampoline code, padded to
21034 TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target
21035 function address immediately follow. */
21038 mips_trampoline_code_size (void)
21040 if (TARGET_USE_PIC_FN_ADDR_REG
)
21042 else if (ptr_mode
== DImode
)
21044 else if (ISA_HAS_LOAD_DELAY
)
21050 /* Implement TARGET_TRAMPOLINE_INIT. */
21053 mips_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
21055 rtx addr
, end_addr
, high
, low
, opcode
, mem
;
21058 HOST_WIDE_INT end_addr_offset
, static_chain_offset
, target_function_offset
;
21060 /* Work out the offsets of the pointers from the start of the
21061 trampoline code. */
21062 end_addr_offset
= mips_trampoline_code_size ();
21063 static_chain_offset
= end_addr_offset
;
21064 target_function_offset
= static_chain_offset
+ GET_MODE_SIZE (ptr_mode
);
21066 /* Get pointers to the beginning and end of the code block. */
21067 addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
21068 end_addr
= mips_force_binary (Pmode
, PLUS
, addr
, GEN_INT (end_addr_offset
));
21070 #define OP(X) gen_int_mode (X, SImode)
21072 /* Build up the code in TRAMPOLINE. */
21074 if (TARGET_USE_PIC_FN_ADDR_REG
)
21076 /* $25 contains the address of the trampoline. Emit code of the form:
21078 l[wd] $1, target_function_offset($25)
21079 l[wd] $static_chain, static_chain_offset($25)
21082 trampoline
[i
++] = OP (MIPS_LOAD_PTR (AT_REGNUM
,
21083 target_function_offset
,
21084 PIC_FUNCTION_ADDR_REGNUM
));
21085 trampoline
[i
++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM
,
21086 static_chain_offset
,
21087 PIC_FUNCTION_ADDR_REGNUM
));
21088 trampoline
[i
++] = OP (MIPS_JR (AT_REGNUM
));
21089 trampoline
[i
++] = OP (MIPS_MOVE (PIC_FUNCTION_ADDR_REGNUM
, AT_REGNUM
));
21091 else if (ptr_mode
== DImode
)
21093 /* It's too cumbersome to create the full 64-bit address, so let's
21099 1: l[wd] $25, target_function_offset - 12($31)
21100 l[wd] $static_chain, static_chain_offset - 12($31)
21104 where 12 is the offset of "1:" from the start of the code block. */
21105 trampoline
[i
++] = OP (MIPS_MOVE (AT_REGNUM
, RETURN_ADDR_REGNUM
));
21106 trampoline
[i
++] = OP (MIPS_BAL (1));
21107 trampoline
[i
++] = OP (MIPS_NOP
);
21108 trampoline
[i
++] = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM
,
21109 target_function_offset
- 12,
21110 RETURN_ADDR_REGNUM
));
21111 trampoline
[i
++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM
,
21112 static_chain_offset
- 12,
21113 RETURN_ADDR_REGNUM
));
21114 trampoline
[i
++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM
));
21115 trampoline
[i
++] = OP (MIPS_MOVE (RETURN_ADDR_REGNUM
, AT_REGNUM
));
21119 /* If the target has load delays, emit:
21121 lui $1, %hi(end_addr)
21122 lw $25, %lo(end_addr + ...)($1)
21123 lw $static_chain, %lo(end_addr + ...)($1)
21129 lui $1, %hi(end_addr)
21130 lw $25, %lo(end_addr + ...)($1)
21132 lw $static_chain, %lo(end_addr + ...)($1). */
21134 /* Split END_ADDR into %hi and %lo values. Trampolines are aligned
21135 to 64 bits, so the %lo value will have the bottom 3 bits clear. */
21136 high
= expand_simple_binop (SImode
, PLUS
, end_addr
, GEN_INT (0x8000),
21137 NULL
, false, OPTAB_WIDEN
);
21138 high
= expand_simple_binop (SImode
, LSHIFTRT
, high
, GEN_INT (16),
21139 NULL
, false, OPTAB_WIDEN
);
21140 low
= convert_to_mode (SImode
, gen_lowpart (HImode
, end_addr
), true);
21142 /* Emit the LUI. */
21143 opcode
= OP (MIPS_LUI (AT_REGNUM
, 0));
21144 trampoline
[i
++] = expand_simple_binop (SImode
, IOR
, opcode
, high
,
21145 NULL
, false, OPTAB_WIDEN
);
21147 /* Emit the load of the target function. */
21148 opcode
= OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM
,
21149 target_function_offset
- end_addr_offset
,
21151 trampoline
[i
++] = expand_simple_binop (SImode
, IOR
, opcode
, low
,
21152 NULL
, false, OPTAB_WIDEN
);
21154 /* Emit the JR here, if we can. */
21155 if (!ISA_HAS_LOAD_DELAY
)
21156 trampoline
[i
++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM
));
21158 /* Emit the load of the static chain register. */
21159 opcode
= OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM
,
21160 static_chain_offset
- end_addr_offset
,
21162 trampoline
[i
++] = expand_simple_binop (SImode
, IOR
, opcode
, low
,
21163 NULL
, false, OPTAB_WIDEN
);
21165 /* Emit the JR, if we couldn't above. */
21166 if (ISA_HAS_LOAD_DELAY
)
21168 trampoline
[i
++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM
));
21169 trampoline
[i
++] = OP (MIPS_NOP
);
21175 /* If we are using compact branches we don't have delay slots so
21176 place the instruction that was in the delay slot before the JRC
21179 if (TARGET_CB_ALWAYS
)
21182 temp
= trampoline
[i
-2];
21183 trampoline
[i
-2] = trampoline
[i
-1];
21184 trampoline
[i
-1] = temp
;
21187 /* Copy the trampoline code. Leave any padding uninitialized. */
21188 for (j
= 0; j
< i
; j
++)
21190 mem
= adjust_address (m_tramp
, SImode
, j
* GET_MODE_SIZE (SImode
));
21191 mips_emit_move (mem
, trampoline
[j
]);
21194 /* Set up the static chain pointer field. */
21195 mem
= adjust_address (m_tramp
, ptr_mode
, static_chain_offset
);
21196 mips_emit_move (mem
, chain_value
);
21198 /* Set up the target function field. */
21199 mem
= adjust_address (m_tramp
, ptr_mode
, target_function_offset
);
21200 mips_emit_move (mem
, XEXP (DECL_RTL (fndecl
), 0));
21202 /* Flush the code part of the trampoline. */
21203 emit_insn (gen_add3_insn (end_addr
, addr
, GEN_INT (TRAMPOLINE_SIZE
)));
21204 emit_insn (gen_clear_cache (addr
, end_addr
));
21207 /* Implement FUNCTION_PROFILER. */
21209 void mips_function_profiler (FILE *file
)
21212 sorry ("mips16 function profiling");
21213 if (TARGET_LONG_CALLS
)
21215 /* For TARGET_LONG_CALLS use $3 for the address of _mcount. */
21216 if (Pmode
== DImode
)
21217 fprintf (file
, "\tdla\t%s,_mcount\n", reg_names
[3]);
21219 fprintf (file
, "\tla\t%s,_mcount\n", reg_names
[3]);
21221 mips_push_asm_switch (&mips_noat
);
21222 fprintf (file
, "\tmove\t%s,%s\t\t# save current return address\n",
21223 reg_names
[AT_REGNUM
], reg_names
[RETURN_ADDR_REGNUM
]);
21224 /* _mcount treats $2 as the static chain register. */
21225 if (cfun
->static_chain_decl
!= NULL
)
21226 fprintf (file
, "\tmove\t%s,%s\n", reg_names
[2],
21227 reg_names
[STATIC_CHAIN_REGNUM
]);
21228 if (TARGET_MCOUNT_RA_ADDRESS
)
21230 /* If TARGET_MCOUNT_RA_ADDRESS load $12 with the address of the
21231 ra save location. */
21232 if (cfun
->machine
->frame
.ra_fp_offset
== 0)
21233 /* ra not saved, pass zero. */
21234 fprintf (file
, "\tmove\t%s,%s\n", reg_names
[12], reg_names
[0]);
21236 fprintf (file
, "\t%s\t%s," HOST_WIDE_INT_PRINT_DEC
"(%s)\n",
21237 Pmode
== DImode
? "dla" : "la", reg_names
[12],
21238 cfun
->machine
->frame
.ra_fp_offset
,
21239 reg_names
[STACK_POINTER_REGNUM
]);
21241 if (!TARGET_NEWABI
)
21243 "\t%s\t%s,%s,%d\t\t# _mcount pops 2 words from stack\n",
21244 TARGET_64BIT
? "dsubu" : "subu",
21245 reg_names
[STACK_POINTER_REGNUM
],
21246 reg_names
[STACK_POINTER_REGNUM
],
21247 Pmode
== DImode
? 16 : 8);
21249 if (TARGET_LONG_CALLS
)
21250 fprintf (file
, "\tjalr\t%s\n", reg_names
[3]);
21252 fprintf (file
, "\tjal\t_mcount\n");
21253 mips_pop_asm_switch (&mips_noat
);
21254 /* _mcount treats $2 as the static chain register. */
21255 if (cfun
->static_chain_decl
!= NULL
)
21256 fprintf (file
, "\tmove\t%s,%s\n", reg_names
[STATIC_CHAIN_REGNUM
],
21260 /* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default
21261 behavior of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
21262 when TARGET_LOONGSON_MMI is true. */
21264 static unsigned HOST_WIDE_INT
21265 mips_shift_truncation_mask (machine_mode mode
)
21267 if (TARGET_LOONGSON_MMI
&& VECTOR_MODE_P (mode
))
21270 return GET_MODE_BITSIZE (mode
) - 1;
21273 /* Implement TARGET_PREPARE_PCH_SAVE. */
21276 mips_prepare_pch_save (void)
21278 /* We are called in a context where the current compression vs.
21279 non-compression setting should be irrelevant. The question then is:
21280 which setting makes most sense at load time?
21282 The PCH is loaded before the first token is read. We should never have
21283 switched into a compression mode by that point, and thus should not have
21284 populated mips16_globals or micromips_globals. Nor can we load the
21285 entire contents of mips16_globals or micromips_globals from the PCH file,
21286 because they contain a combination of GGC and non-GGC data.
21288 There is therefore no point in trying save the GGC part of
21289 mips16_globals/micromips_globals to the PCH file, or to preserve a
21290 compression setting across the PCH save and load. The loading compiler
21291 would not have access to the non-GGC parts of mips16_globals or
21292 micromips_globals (either from the PCH file, or from a copy that the
21293 loading compiler generated itself) and would have to call target_reinit
21296 It therefore seems best to switch back to non-MIPS16 mode and
21297 non-microMIPS mode to save time, and to ensure that mips16_globals and
21298 micromips_globals remain null after a PCH load. */
21299 mips_set_compression_mode (0);
21300 mips16_globals
= 0;
21301 micromips_globals
= 0;
21304 /* Generate or test for an insn that supports a constant permutation. */
21306 #define MAX_VECT_LEN 16
21308 struct expand_vec_perm_d
21310 rtx target
, op0
, op1
;
21311 unsigned char perm
[MAX_VECT_LEN
];
21312 machine_mode vmode
;
21313 unsigned char nelt
;
21318 /* Construct (set target (vec_select op0 (parallel perm))) and
21319 return true if that's a valid instruction in the active ISA. */
21322 mips_expand_vselect (rtx target
, rtx op0
,
21323 const unsigned char *perm
, unsigned nelt
)
21325 rtx rperm
[MAX_VECT_LEN
], x
;
21329 for (i
= 0; i
< nelt
; ++i
)
21330 rperm
[i
] = GEN_INT (perm
[i
]);
21332 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (nelt
, rperm
));
21333 x
= gen_rtx_VEC_SELECT (GET_MODE (target
), op0
, x
);
21334 x
= gen_rtx_SET (target
, x
);
21336 insn
= emit_insn (x
);
21337 if (recog_memoized (insn
) < 0)
21339 remove_insn (insn
);
21345 /* Similar, but generate a vec_concat from op0 and op1 as well. */
21348 mips_expand_vselect_vconcat (rtx target
, rtx op0
, rtx op1
,
21349 const unsigned char *perm
, unsigned nelt
)
21351 machine_mode v2mode
;
21354 if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0
)).exists (&v2mode
))
21356 x
= gen_rtx_VEC_CONCAT (v2mode
, op0
, op1
);
21357 return mips_expand_vselect (target
, x
, perm
, nelt
);
21360 /* Recognize patterns for even-odd extraction. */
21363 mips_expand_vpc_loongson_even_odd (struct expand_vec_perm_d
*d
)
21365 unsigned i
, odd
, nelt
= d
->nelt
;
21366 rtx t0
, t1
, t2
, t3
;
21368 if (!(TARGET_HARD_FLOAT
&& TARGET_LOONGSON_MMI
))
21370 /* Even-odd for V2SI/V2SFmode is matched by interleave directly. */
21377 for (i
= 1; i
< nelt
; ++i
)
21378 if (d
->perm
[i
] != i
* 2 + odd
)
21384 /* We need 2*log2(N)-1 operations to achieve odd/even with interleave. */
21385 t0
= gen_reg_rtx (d
->vmode
);
21386 t1
= gen_reg_rtx (d
->vmode
);
21390 emit_insn (gen_loongson_punpckhhw (t0
, d
->op0
, d
->op1
));
21391 emit_insn (gen_loongson_punpcklhw (t1
, d
->op0
, d
->op1
));
21393 emit_insn (gen_loongson_punpckhhw (d
->target
, t1
, t0
));
21395 emit_insn (gen_loongson_punpcklhw (d
->target
, t1
, t0
));
21399 t2
= gen_reg_rtx (d
->vmode
);
21400 t3
= gen_reg_rtx (d
->vmode
);
21401 emit_insn (gen_loongson_punpckhbh (t0
, d
->op0
, d
->op1
));
21402 emit_insn (gen_loongson_punpcklbh (t1
, d
->op0
, d
->op1
));
21403 emit_insn (gen_loongson_punpckhbh (t2
, t1
, t0
));
21404 emit_insn (gen_loongson_punpcklbh (t3
, t1
, t0
));
21406 emit_insn (gen_loongson_punpckhbh (d
->target
, t3
, t2
));
21408 emit_insn (gen_loongson_punpcklbh (d
->target
, t3
, t2
));
21412 gcc_unreachable ();
21417 /* Recognize patterns for the Loongson PSHUFH instruction. */
21420 mips_expand_vpc_loongson_pshufh (struct expand_vec_perm_d
*d
)
21425 if (!(TARGET_HARD_FLOAT
&& TARGET_LOONGSON_MMI
))
21427 if (d
->vmode
!= V4HImode
)
21432 /* Convert the selector into the packed 8-bit form for pshufh. */
21433 /* Recall that loongson is little-endian only. No big-endian
21434 adjustment required. */
21435 for (i
= mask
= 0; i
< 4; i
++)
21436 mask
|= (d
->perm
[i
] & 3) << (i
* 2);
21437 rmask
= force_reg (SImode
, GEN_INT (mask
));
21439 if (d
->one_vector_p
)
21440 emit_insn (gen_loongson_pshufh (d
->target
, d
->op0
, rmask
));
21443 rtx t0
, t1
, x
, merge
, rmerge
[4];
21445 t0
= gen_reg_rtx (V4HImode
);
21446 t1
= gen_reg_rtx (V4HImode
);
21447 emit_insn (gen_loongson_pshufh (t1
, d
->op1
, rmask
));
21448 emit_insn (gen_loongson_pshufh (t0
, d
->op0
, rmask
));
21450 for (i
= 0; i
< 4; ++i
)
21451 rmerge
[i
] = (d
->perm
[i
] & 4 ? constm1_rtx
: const0_rtx
);
21452 merge
= gen_rtx_CONST_VECTOR (V4HImode
, gen_rtvec_v (4, rmerge
));
21453 merge
= force_reg (V4HImode
, merge
);
21455 x
= gen_rtx_AND (V4HImode
, merge
, t1
);
21456 emit_insn (gen_rtx_SET (t1
, x
));
21458 x
= gen_rtx_NOT (V4HImode
, merge
);
21459 x
= gen_rtx_AND (V4HImode
, x
, t0
);
21460 emit_insn (gen_rtx_SET (t0
, x
));
21462 x
= gen_rtx_IOR (V4HImode
, t0
, t1
);
21463 emit_insn (gen_rtx_SET (d
->target
, x
));
21469 /* Recognize broadcast patterns for the Loongson. */
21472 mips_expand_vpc_loongson_bcast (struct expand_vec_perm_d
*d
)
21477 if (!(TARGET_HARD_FLOAT
&& TARGET_LOONGSON_MMI
))
21479 /* Note that we've already matched V2SI via punpck and V4HI via pshufh. */
21480 if (d
->vmode
!= V8QImode
)
21482 if (!d
->one_vector_p
)
21486 for (i
= 1; i
< 8; ++i
)
21487 if (d
->perm
[i
] != elt
)
21493 /* With one interleave we put two of the desired element adjacent. */
21494 t0
= gen_reg_rtx (V8QImode
);
21496 emit_insn (gen_loongson_punpcklbh (t0
, d
->op0
, d
->op0
));
21498 emit_insn (gen_loongson_punpckhbh (t0
, d
->op0
, d
->op0
));
21500 /* Shuffle that one HImode element into all locations. */
21503 t1
= gen_reg_rtx (V4HImode
);
21504 emit_insn (gen_loongson_pshufh (t1
, gen_lowpart (V4HImode
, t0
),
21505 force_reg (SImode
, GEN_INT (elt
))));
21507 emit_move_insn (d
->target
, gen_lowpart (V8QImode
, t1
));
21511 /* Construct (set target (vec_select op0 (parallel selector))) and
21512 return true if that's a valid instruction in the active ISA. */
21515 mips_expand_msa_shuffle (struct expand_vec_perm_d
*d
)
21517 rtx x
, elts
[MAX_VECT_LEN
];
21525 for (i
= 0; i
< d
->nelt
; i
++)
21526 elts
[i
] = GEN_INT (d
->perm
[i
]);
21528 v
= gen_rtvec_v (d
->nelt
, elts
);
21529 x
= gen_rtx_PARALLEL (VOIDmode
, v
);
21531 if (!mips_const_vector_shuffle_set_p (x
, d
->vmode
))
21534 x
= gen_rtx_VEC_SELECT (d
->vmode
, d
->op0
, x
);
21535 x
= gen_rtx_SET (d
->target
, x
);
21537 insn
= emit_insn (x
);
21538 if (recog_memoized (insn
) < 0)
21540 remove_insn (insn
);
21547 mips_expand_vec_perm_const_1 (struct expand_vec_perm_d
*d
)
21549 unsigned int i
, nelt
= d
->nelt
;
21550 unsigned char perm2
[MAX_VECT_LEN
];
21552 if (d
->one_vector_p
)
21554 /* Try interleave with alternating operands. */
21555 memcpy (perm2
, d
->perm
, sizeof(perm2
));
21556 for (i
= 1; i
< nelt
; i
+= 2)
21558 if (mips_expand_vselect_vconcat (d
->target
, d
->op0
, d
->op1
, perm2
, nelt
))
21563 if (mips_expand_vselect_vconcat (d
->target
, d
->op0
, d
->op1
,
21567 /* Try again with swapped operands. */
21568 for (i
= 0; i
< nelt
; ++i
)
21569 perm2
[i
] = (d
->perm
[i
] + nelt
) & (2 * nelt
- 1);
21570 if (mips_expand_vselect_vconcat (d
->target
, d
->op1
, d
->op0
, perm2
, nelt
))
21574 if (mips_expand_vpc_loongson_even_odd (d
))
21576 if (mips_expand_vpc_loongson_pshufh (d
))
21578 if (mips_expand_vpc_loongson_bcast (d
))
21580 if (mips_expand_msa_shuffle (d
))
21585 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
21588 mips_vectorize_vec_perm_const (machine_mode vmode
, rtx target
, rtx op0
,
21589 rtx op1
, const vec_perm_indices
&sel
)
21591 struct expand_vec_perm_d d
;
21592 int i
, nelt
, which
;
21593 unsigned char orig_perm
[MAX_VECT_LEN
];
21601 gcc_assert (VECTOR_MODE_P (vmode
));
21602 d
.nelt
= nelt
= GET_MODE_NUNITS (vmode
);
21603 d
.testing_p
= !target
;
21605 /* This is overly conservative, but ensures we don't get an
21606 uninitialized warning on ORIG_PERM. */
21607 memset (orig_perm
, 0, MAX_VECT_LEN
);
21608 for (i
= which
= 0; i
< nelt
; ++i
)
21610 int ei
= sel
[i
] & (2 * nelt
- 1);
21611 which
|= (ei
< nelt
? 1 : 2);
21614 memcpy (d
.perm
, orig_perm
, MAX_VECT_LEN
);
21622 d
.one_vector_p
= false;
21623 if (d
.testing_p
|| !rtx_equal_p (d
.op0
, d
.op1
))
21628 for (i
= 0; i
< nelt
; ++i
)
21629 d
.perm
[i
] &= nelt
- 1;
21631 d
.one_vector_p
= true;
21636 d
.one_vector_p
= true;
21642 d
.target
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 1);
21643 d
.op1
= d
.op0
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 2);
21644 if (!d
.one_vector_p
)
21645 d
.op1
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 3);
21648 ok
= mips_expand_vec_perm_const_1 (&d
);
21653 ok
= mips_expand_vec_perm_const_1 (&d
);
21655 /* If we were given a two-vector permutation which just happened to
21656 have both input vectors equal, we folded this into a one-vector
21657 permutation. There are several loongson patterns that are matched
21658 via direct vec_select+vec_concat expansion, but we do not have
21659 support in mips_expand_vec_perm_const_1 to guess the adjustment
21660 that should be made for a single operand. Just try again with
21661 the original permutation. */
21662 if (!ok
&& which
== 3)
21666 d
.one_vector_p
= false;
21667 memcpy (d
.perm
, orig_perm
, MAX_VECT_LEN
);
21668 ok
= mips_expand_vec_perm_const_1 (&d
);
21674 /* Implement TARGET_SCHED_REASSOCIATION_WIDTH. */
21677 mips_sched_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
21680 if (MSA_SUPPORTED_MODE_P (mode
))
21685 /* Expand an integral vector unpack operation. */
21688 mips_expand_vec_unpack (rtx operands
[2], bool unsigned_p
, bool high_p
)
21690 machine_mode imode
= GET_MODE (operands
[1]);
21691 rtx (*unpack
) (rtx
, rtx
, rtx
);
21692 rtx (*cmpFunc
) (rtx
, rtx
, rtx
);
21693 rtx tmp
, dest
, zero
;
21700 if (BYTES_BIG_ENDIAN
!= high_p
)
21701 unpack
= gen_msa_ilvl_w
;
21703 unpack
= gen_msa_ilvr_w
;
21705 cmpFunc
= gen_msa_clt_s_w
;
21709 if (BYTES_BIG_ENDIAN
!= high_p
)
21710 unpack
= gen_msa_ilvl_h
;
21712 unpack
= gen_msa_ilvr_h
;
21714 cmpFunc
= gen_msa_clt_s_h
;
21718 if (BYTES_BIG_ENDIAN
!= high_p
)
21719 unpack
= gen_msa_ilvl_b
;
21721 unpack
= gen_msa_ilvr_b
;
21723 cmpFunc
= gen_msa_clt_s_b
;
21727 gcc_unreachable ();
21733 /* Extract sign extention for each element comparing each element
21734 with immediate zero. */
21735 tmp
= gen_reg_rtx (imode
);
21736 emit_insn (cmpFunc (tmp
, operands
[1], CONST0_RTX (imode
)));
21739 tmp
= force_reg (imode
, CONST0_RTX (imode
));
21741 dest
= gen_reg_rtx (imode
);
21743 emit_insn (unpack (dest
, operands
[1], tmp
));
21744 emit_move_insn (operands
[0], gen_lowpart (GET_MODE (operands
[0]), dest
));
21752 unpack
= gen_loongson_punpckhbh
;
21754 unpack
= gen_loongson_punpcklbh
;
21755 cmpFunc
= gen_loongson_pcmpgtb
;
21759 unpack
= gen_loongson_punpckhhw
;
21761 unpack
= gen_loongson_punpcklhw
;
21762 cmpFunc
= gen_loongson_pcmpgth
;
21765 gcc_unreachable ();
21768 zero
= force_reg (imode
, CONST0_RTX (imode
));
21773 tmp
= gen_reg_rtx (imode
);
21774 emit_insn (cmpFunc (tmp
, zero
, operands
[1]));
21777 dest
= gen_reg_rtx (imode
);
21778 emit_insn (unpack (dest
, operands
[1], tmp
));
21780 emit_move_insn (operands
[0], gen_lowpart (GET_MODE (operands
[0]), dest
));
21783 /* Construct and return PARALLEL RTX with CONST_INTs for HIGH (high_p == TRUE)
21784 or LOW (high_p == FALSE) half of a vector for mode MODE. */
21787 mips_msa_vec_parallel_const_half (machine_mode mode
, bool high_p
)
21789 int nunits
= GET_MODE_NUNITS (mode
);
21790 rtvec v
= rtvec_alloc (nunits
/ 2);
21794 if (BYTES_BIG_ENDIAN
)
21795 base
= high_p
? 0 : nunits
/ 2;
21797 base
= high_p
? nunits
/ 2 : 0;
21799 for (i
= 0; i
< nunits
/ 2; i
++)
21800 RTVEC_ELT (v
, i
) = GEN_INT (base
+ i
);
21802 return gen_rtx_PARALLEL (VOIDmode
, v
);
21805 /* A subroutine of mips_expand_vec_init, match constant vector elements. */
21808 mips_constant_elt_p (rtx x
)
21810 return CONST_INT_P (x
) || GET_CODE (x
) == CONST_DOUBLE
;
21813 /* A subroutine of mips_expand_vec_init, expand via broadcast. */
21816 mips_expand_vi_broadcast (machine_mode vmode
, rtx target
, rtx elt
)
21818 struct expand_vec_perm_d d
;
21822 if (elt
!= const0_rtx
)
21823 elt
= force_reg (GET_MODE_INNER (vmode
), elt
);
21825 elt
= gen_lowpart (DImode
, elt
);
21827 t1
= gen_reg_rtx (vmode
);
21831 emit_insn (gen_loongson_vec_init1_v8qi (t1
, elt
));
21834 emit_insn (gen_loongson_vec_init1_v4hi (t1
, elt
));
21837 gcc_unreachable ();
21840 memset (&d
, 0, sizeof (d
));
21845 d
.nelt
= GET_MODE_NUNITS (vmode
);
21846 d
.one_vector_p
= true;
21848 ok
= mips_expand_vec_perm_const_1 (&d
);
21852 /* Return a const_int vector of VAL with mode MODE. */
21855 mips_gen_const_int_vector (machine_mode mode
, HOST_WIDE_INT val
)
21857 rtx c
= gen_int_mode (val
, GET_MODE_INNER (mode
));
21858 return gen_const_vec_duplicate (mode
, c
);
21861 /* Return a vector of repeated 4-element sets generated from
21862 immediate VAL in mode MODE. */
21865 mips_gen_const_int_vector_shuffle (machine_mode mode
, int val
)
21867 int nunits
= GET_MODE_NUNITS (mode
);
21868 int nsets
= nunits
/ 4;
21869 rtx elts
[MAX_VECT_LEN
];
21873 /* Generate a const_int vector replicating the same 4-element set
21874 from an immediate. */
21875 for (j
= 0; j
< nsets
; j
++, set
= 4 * j
)
21876 for (i
= 0; i
< 4; i
++)
21877 elts
[set
+ i
] = GEN_INT (set
+ ((val
>> (2 * i
)) & 0x3));
21879 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (nunits
, elts
));
21882 /* A subroutine of mips_expand_vec_init, replacing all of the non-constant
21883 elements of VALS with zeros, copy the constant vector to TARGET. */
21886 mips_expand_vi_constant (machine_mode vmode
, unsigned nelt
,
21887 rtx target
, rtx vals
)
21889 rtvec vec
= shallow_copy_rtvec (XVEC (vals
, 0));
21892 for (i
= 0; i
< nelt
; ++i
)
21894 rtx elem
= RTVEC_ELT (vec
, i
);
21895 if (!mips_constant_elt_p (elem
))
21896 RTVEC_ELT (vec
, i
) = CONST0_RTX (GET_MODE (elem
));
21899 emit_move_insn (target
, gen_rtx_CONST_VECTOR (vmode
, vec
));
21903 /* A subroutine of mips_expand_vec_init, expand via pinsrh. */
21906 mips_expand_vi_loongson_one_pinsrh (rtx target
, rtx vals
, unsigned one_var
)
21908 mips_expand_vi_constant (V4HImode
, 4, target
, vals
);
21910 emit_insn (gen_vec_setv4hi (target
, target
, XVECEXP (vals
, 0, one_var
),
21911 GEN_INT (one_var
)));
21914 /* A subroutine of mips_expand_vec_init, expand anything via memory. */
21917 mips_expand_vi_general (machine_mode vmode
, machine_mode imode
,
21918 unsigned nelt
, unsigned nvar
, rtx target
, rtx vals
)
21920 rtx mem
= assign_stack_temp (vmode
, GET_MODE_SIZE (vmode
));
21921 unsigned int i
, isize
= GET_MODE_SIZE (imode
);
21924 mips_expand_vi_constant (vmode
, nelt
, mem
, vals
);
21926 for (i
= 0; i
< nelt
; ++i
)
21928 rtx x
= XVECEXP (vals
, 0, i
);
21929 if (!mips_constant_elt_p (x
))
21930 emit_move_insn (adjust_address (mem
, imode
, i
* isize
), x
);
21933 emit_move_insn (target
, mem
);
21936 /* Expand a vector initialization. */
21939 mips_expand_vector_init (rtx target
, rtx vals
)
21941 machine_mode vmode
= GET_MODE (target
);
21942 machine_mode imode
= GET_MODE_INNER (vmode
);
21943 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
21944 unsigned nvar
= 0, one_var
= -1u;
21945 bool all_same
= true;
21948 for (i
= 0; i
< nelt
; ++i
)
21950 x
= XVECEXP (vals
, 0, i
);
21951 if (!mips_constant_elt_p (x
))
21952 nvar
++, one_var
= i
;
21953 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
21961 rtx same
= XVECEXP (vals
, 0, 0);
21964 if (CONST_INT_P (same
) && nvar
== 0
21965 && mips_signed_immediate_p (INTVAL (same
), 10, 0))
21973 temp
= gen_rtx_CONST_VECTOR (vmode
, XVEC (vals
, 0));
21974 emit_move_insn (target
, temp
);
21978 gcc_unreachable ();
21981 temp
= gen_reg_rtx (imode
);
21982 if (imode
== GET_MODE (same
))
21984 else if (GET_MODE_SIZE (imode
) >= UNITS_PER_WORD
)
21985 temp2
= simplify_gen_subreg (imode
, same
, GET_MODE (same
), 0);
21987 temp2
= lowpart_subreg (imode
, same
, GET_MODE (same
));
21988 emit_move_insn (temp
, temp2
);
21996 mips_emit_move (target
, gen_rtx_VEC_DUPLICATE (vmode
, temp
));
22000 emit_insn (gen_msa_splati_w_f_scalar (target
, temp
));
22004 emit_insn (gen_msa_splati_d_f_scalar (target
, temp
));
22008 gcc_unreachable ();
22013 emit_move_insn (target
, CONST0_RTX (vmode
));
22015 for (i
= 0; i
< nelt
; ++i
)
22017 rtx temp
= gen_reg_rtx (imode
);
22018 emit_move_insn (temp
, XVECEXP (vals
, 0, i
));
22022 emit_insn (gen_vec_setv16qi (target
, temp
, GEN_INT (i
)));
22026 emit_insn (gen_vec_setv8hi (target
, temp
, GEN_INT (i
)));
22030 emit_insn (gen_vec_setv4si (target
, temp
, GEN_INT (i
)));
22034 emit_insn (gen_vec_setv2di (target
, temp
, GEN_INT (i
)));
22038 emit_insn (gen_vec_setv4sf (target
, temp
, GEN_INT (i
)));
22042 emit_insn (gen_vec_setv2df (target
, temp
, GEN_INT (i
)));
22046 gcc_unreachable ();
22053 /* Load constants from the pool, or whatever's handy. */
22056 emit_move_insn (target
, gen_rtx_CONST_VECTOR (vmode
, XVEC (vals
, 0)));
22060 /* For two-part initialization, always use CONCAT. */
22063 rtx op0
= force_reg (imode
, XVECEXP (vals
, 0, 0));
22064 rtx op1
= force_reg (imode
, XVECEXP (vals
, 0, 1));
22065 x
= gen_rtx_VEC_CONCAT (vmode
, op0
, op1
);
22066 emit_insn (gen_rtx_SET (target
, x
));
22070 /* Loongson is the only cpu with vectors with more elements. */
22071 gcc_assert (TARGET_HARD_FLOAT
&& TARGET_LOONGSON_MMI
);
22073 /* If all values are identical, broadcast the value. */
22076 mips_expand_vi_broadcast (vmode
, target
, XVECEXP (vals
, 0, 0));
22080 /* If we've only got one non-variable V4HImode, use PINSRH. */
22081 if (nvar
== 1 && vmode
== V4HImode
)
22083 mips_expand_vi_loongson_one_pinsrh (target
, vals
, one_var
);
22087 mips_expand_vi_general (vmode
, imode
, nelt
, nvar
, target
, vals
);
22090 /* Expand a vector reduction. */
22093 mips_expand_vec_reduc (rtx target
, rtx in
, rtx (*gen
)(rtx
, rtx
, rtx
))
22095 machine_mode vmode
= GET_MODE (in
);
22096 unsigned char perm2
[2];
22097 rtx last
, next
, fold
, x
;
22101 fold
= gen_reg_rtx (vmode
);
22105 /* Use PUL/PLU to produce { L, H } op { H, L }.
22106 By reversing the pair order, rather than a pure interleave high,
22107 we avoid erroneous exceptional conditions that we might otherwise
22108 produce from the computation of H op H. */
22111 ok
= mips_expand_vselect_vconcat (fold
, last
, last
, perm2
, 2);
22116 /* Use interleave to produce { H, L } op { H, H }. */
22117 emit_insn (gen_loongson_punpckhwd (fold
, last
, last
));
22121 /* Perform the first reduction with interleave,
22122 and subsequent reductions with shifts. */
22123 emit_insn (gen_loongson_punpckhwd_hi (fold
, last
, last
));
22125 next
= gen_reg_rtx (vmode
);
22126 emit_insn (gen (next
, last
, fold
));
22129 fold
= gen_reg_rtx (vmode
);
22130 x
= force_reg (SImode
, GEN_INT (16));
22131 emit_insn (gen_vec_shr_v4hi (fold
, last
, x
));
22135 emit_insn (gen_loongson_punpckhwd_qi (fold
, last
, last
));
22137 next
= gen_reg_rtx (vmode
);
22138 emit_insn (gen (next
, last
, fold
));
22141 fold
= gen_reg_rtx (vmode
);
22142 x
= force_reg (SImode
, GEN_INT (16));
22143 emit_insn (gen_vec_shr_v8qi (fold
, last
, x
));
22145 next
= gen_reg_rtx (vmode
);
22146 emit_insn (gen (next
, last
, fold
));
22149 fold
= gen_reg_rtx (vmode
);
22150 x
= force_reg (SImode
, GEN_INT (8));
22151 emit_insn (gen_vec_shr_v8qi (fold
, last
, x
));
22155 gcc_unreachable ();
22158 emit_insn (gen (target
, last
, fold
));
22161 /* Expand a vector minimum/maximum. */
22164 mips_expand_vec_minmax (rtx target
, rtx op0
, rtx op1
,
22165 rtx (*cmp
) (rtx
, rtx
, rtx
), bool min_p
)
22167 machine_mode vmode
= GET_MODE (target
);
22170 tc
= gen_reg_rtx (vmode
);
22171 t0
= gen_reg_rtx (vmode
);
22172 t1
= gen_reg_rtx (vmode
);
22175 emit_insn (cmp (tc
, op0
, op1
));
22177 x
= gen_rtx_AND (vmode
, tc
, (min_p
? op1
: op0
));
22178 emit_insn (gen_rtx_SET (t0
, x
));
22180 x
= gen_rtx_NOT (vmode
, tc
);
22181 x
= gen_rtx_AND (vmode
, x
, (min_p
? op0
: op1
));
22182 emit_insn (gen_rtx_SET (t1
, x
));
22184 x
= gen_rtx_IOR (vmode
, t0
, t1
);
22185 emit_insn (gen_rtx_SET (target
, x
));
22188 /* Implement HARD_REGNO_CALLER_SAVE_MODE. */
22191 mips_hard_regno_caller_save_mode (unsigned int regno
,
22192 unsigned int nregs
,
22195 /* For performance, avoid saving/restoring upper parts of a register
22196 by returning MODE as save mode when the mode is known. */
22197 if (mode
== VOIDmode
)
22198 return choose_hard_reg_mode (regno
, nregs
, NULL
);
22203 /* Generate RTL for comparing CMP_OP0 and CMP_OP1 using condition COND and
22204 store the result -1 or 0 in DEST. */
22207 mips_expand_msa_cmp (rtx dest
, enum rtx_code cond
, rtx op0
, rtx op1
)
22209 machine_mode cmp_mode
= GET_MODE (op0
);
22211 bool negate
= false;
22222 cond
= reverse_condition (cond
);
22235 std::swap (op0
, op1
);
22236 cond
= swap_condition (cond
);
22239 gcc_unreachable ();
22241 mips_emit_binary (cond
, dest
, op0
, op1
);
22243 emit_move_insn (dest
, gen_rtx_NOT (GET_MODE (dest
), dest
));
22258 case LTGT
: cond
= NE
; break;
22259 case UNGE
: cond
= UNLE
; std::swap (op0
, op1
); break;
22260 case UNGT
: cond
= UNLT
; std::swap (op0
, op1
); break;
22261 case LE
: unspec
= UNSPEC_MSA_FSLE
; break;
22262 case LT
: unspec
= UNSPEC_MSA_FSLT
; break;
22263 case GE
: unspec
= UNSPEC_MSA_FSLE
; std::swap (op0
, op1
); break;
22264 case GT
: unspec
= UNSPEC_MSA_FSLT
; std::swap (op0
, op1
); break;
22266 gcc_unreachable ();
22269 mips_emit_binary (cond
, dest
, op0
, op1
);
22272 rtx x
= gen_rtx_UNSPEC (GET_MODE (dest
),
22273 gen_rtvec (2, op0
, op1
), unspec
);
22274 emit_insn (gen_rtx_SET (dest
, x
));
22279 gcc_unreachable ();
22284 /* Expand VEC_COND_EXPR, where:
22285 MODE is mode of the result
22286 VIMODE equivalent integer mode
22287 OPERANDS operands of VEC_COND_EXPR. */
22290 mips_expand_vec_cond_expr (machine_mode mode
, machine_mode vimode
,
22293 rtx cond
= operands
[3];
22294 rtx cmp_op0
= operands
[4];
22295 rtx cmp_op1
= operands
[5];
22296 rtx cmp_res
= gen_reg_rtx (vimode
);
22298 mips_expand_msa_cmp (cmp_res
, GET_CODE (cond
), cmp_op0
, cmp_op1
);
22300 /* We handle the following cases:
22301 1) r = a CMP b ? -1 : 0
22302 2) r = a CMP b ? -1 : v
22303 3) r = a CMP b ? v : 0
22304 4) r = a CMP b ? v1 : v2 */
22306 /* Case (1) above. We only move the results. */
22307 if (operands
[1] == CONSTM1_RTX (vimode
)
22308 && operands
[2] == CONST0_RTX (vimode
))
22309 emit_move_insn (operands
[0], cmp_res
);
22312 rtx src1
= gen_reg_rtx (vimode
);
22313 rtx src2
= gen_reg_rtx (vimode
);
22314 rtx mask
= gen_reg_rtx (vimode
);
22317 /* Move the vector result to use it as a mask. */
22318 emit_move_insn (mask
, cmp_res
);
22320 if (register_operand (operands
[1], mode
))
22322 rtx xop1
= operands
[1];
22323 if (mode
!= vimode
)
22325 xop1
= gen_reg_rtx (vimode
);
22326 emit_move_insn (xop1
, gen_lowpart (vimode
, operands
[1]));
22328 emit_move_insn (src1
, xop1
);
22332 gcc_assert (operands
[1] == CONSTM1_RTX (vimode
));
22333 /* Case (2) if the below doesn't move the mask to src2. */
22334 emit_move_insn (src1
, mask
);
22337 if (register_operand (operands
[2], mode
))
22339 rtx xop2
= operands
[2];
22340 if (mode
!= vimode
)
22342 xop2
= gen_reg_rtx (vimode
);
22343 emit_move_insn (xop2
, gen_lowpart (vimode
, operands
[2]));
22345 emit_move_insn (src2
, xop2
);
22349 gcc_assert (operands
[2] == CONST0_RTX (mode
));
22350 /* Case (3) if the above didn't move the mask to src1. */
22351 emit_move_insn (src2
, mask
);
22354 /* We deal with case (4) if the mask wasn't moved to either src1 or src2.
22355 In any case, we eventually do vector mask-based copy. */
22356 bsel
= gen_rtx_IOR (vimode
,
22357 gen_rtx_AND (vimode
,
22358 gen_rtx_NOT (vimode
, mask
), src2
),
22359 gen_rtx_AND (vimode
, mask
, src1
));
22360 /* The result is placed back to a register with the mask. */
22361 emit_insn (gen_rtx_SET (mask
, bsel
));
22362 emit_move_insn (operands
[0], gen_rtx_SUBREG (mode
, mask
, 0));
22366 /* Implement TARGET_CASE_VALUES_THRESHOLD. */
22369 mips_case_values_threshold (void)
22371 /* In MIPS16 mode using a larger case threshold generates smaller code. */
22372 if (TARGET_MIPS16
&& optimize_size
)
22375 return default_case_values_threshold ();
22378 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
22381 mips_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
22383 if (!TARGET_HARD_FLOAT_ABI
)
22385 tree exceptions_var
= create_tmp_var_raw (MIPS_ATYPE_USI
);
22386 tree fcsr_orig_var
= create_tmp_var_raw (MIPS_ATYPE_USI
);
22387 tree fcsr_mod_var
= create_tmp_var_raw (MIPS_ATYPE_USI
);
22388 tree get_fcsr
= mips_builtin_decls
[MIPS_GET_FCSR
];
22389 tree set_fcsr
= mips_builtin_decls
[MIPS_SET_FCSR
];
22390 tree get_fcsr_hold_call
= build_call_expr (get_fcsr
, 0);
22391 tree hold_assign_orig
= build2 (MODIFY_EXPR
, MIPS_ATYPE_USI
,
22392 fcsr_orig_var
, get_fcsr_hold_call
);
22393 tree hold_mod_val
= build2 (BIT_AND_EXPR
, MIPS_ATYPE_USI
, fcsr_orig_var
,
22394 build_int_cst (MIPS_ATYPE_USI
, 0xfffff003));
22395 tree hold_assign_mod
= build2 (MODIFY_EXPR
, MIPS_ATYPE_USI
,
22396 fcsr_mod_var
, hold_mod_val
);
22397 tree set_fcsr_hold_call
= build_call_expr (set_fcsr
, 1, fcsr_mod_var
);
22398 tree hold_all
= build2 (COMPOUND_EXPR
, MIPS_ATYPE_USI
,
22399 hold_assign_orig
, hold_assign_mod
);
22400 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_all
,
22401 set_fcsr_hold_call
);
22403 *clear
= build_call_expr (set_fcsr
, 1, fcsr_mod_var
);
22405 tree get_fcsr_update_call
= build_call_expr (get_fcsr
, 0);
22406 *update
= build2 (MODIFY_EXPR
, MIPS_ATYPE_USI
,
22407 exceptions_var
, get_fcsr_update_call
);
22408 tree set_fcsr_update_call
= build_call_expr (set_fcsr
, 1, fcsr_orig_var
);
22409 *update
= build2 (COMPOUND_EXPR
, void_type_node
, *update
,
22410 set_fcsr_update_call
);
22411 tree atomic_feraiseexcept
22412 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT
);
22413 tree int_exceptions_var
= fold_convert (integer_type_node
,
22415 tree atomic_feraiseexcept_call
= build_call_expr (atomic_feraiseexcept
,
22416 1, int_exceptions_var
);
22417 *update
= build2 (COMPOUND_EXPR
, void_type_node
, *update
,
22418 atomic_feraiseexcept_call
);
22421 /* Implement TARGET_SPILL_CLASS. */
22424 mips_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED
,
22425 machine_mode mode ATTRIBUTE_UNUSED
)
22432 /* Implement TARGET_LRA_P. */
22437 return mips_lra_flag
;
22440 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS. */
22443 mips_ira_change_pseudo_allocno_class (int regno
, reg_class_t allocno_class
,
22444 reg_class_t best_class ATTRIBUTE_UNUSED
)
22446 /* LRA will allocate an FPR for an integer mode pseudo instead of spilling
22447 to memory if an FPR is present in the allocno class. It is rare that
22448 we actually need to place an integer mode value in an FPR so where
22449 possible limit the allocation to GR_REGS. This will slightly pessimize
22450 code that involves integer to/from float conversions as these will have
22451 to reload into FPRs in LRA. Such reloads are sometimes eliminated and
22452 sometimes only partially eliminated. We choose to take this penalty
22453 in order to eliminate usage of FPRs in code that does not use floating
22456 This change has a similar effect to increasing the cost of FPR->GPR
22457 register moves for integer modes so that they are higher than the cost
22458 of memory but changing the allocno class is more reliable.
22460 This is also similar to forbidding integer mode values in FPRs entirely
22461 but this would lead to an inconsistency in the integer to/from float
22462 instructions that say integer mode values must be placed in FPRs. */
22463 if (INTEGRAL_MODE_P (PSEUDO_REGNO_MODE (regno
)) && allocno_class
== ALL_REGS
)
22465 return allocno_class
;
22468 /* Implement TARGET_PROMOTE_FUNCTION_MODE */
22470 /* This function is equivalent to default_promote_function_mode_always_promote
22471 except that it returns a promoted mode even if type is NULL_TREE. This is
22472 needed by libcalls which have no type (only a mode) such as fixed conversion
22473 routines that take a signed or unsigned char/short argument and convert it
22474 to a fixed type. */
22476 static machine_mode
22477 mips_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
22479 int *punsignedp ATTRIBUTE_UNUSED
,
22480 const_tree fntype ATTRIBUTE_UNUSED
,
22481 int for_return ATTRIBUTE_UNUSED
)
22485 if (type
!= NULL_TREE
)
22486 return promote_mode (type
, mode
, punsignedp
);
22488 unsignedp
= *punsignedp
;
22489 PROMOTE_MODE (mode
, unsignedp
, type
);
22490 *punsignedp
= unsignedp
;
22494 /* Implement TARGET_TRULY_NOOP_TRUNCATION. */
22497 mips_truly_noop_truncation (poly_uint64 outprec
, poly_uint64 inprec
)
22499 return !TARGET_64BIT
|| inprec
<= 32 || outprec
> 32;
22502 /* Implement TARGET_CONSTANT_ALIGNMENT. */
22504 static HOST_WIDE_INT
22505 mips_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
22507 if (TREE_CODE (exp
) == STRING_CST
|| TREE_CODE (exp
) == CONSTRUCTOR
)
22508 return MAX (align
, BITS_PER_WORD
);
22512 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
22514 static unsigned HOST_WIDE_INT
22515 mips_asan_shadow_offset (void)
22520 /* Implement TARGET_STARTING_FRAME_OFFSET. See mips_compute_frame_info
22521 for details about the frame layout. */
22523 static HOST_WIDE_INT
22524 mips_starting_frame_offset (void)
22526 if (FRAME_GROWS_DOWNWARD
)
22528 return crtl
->outgoing_args_size
+ MIPS_GP_SAVE_AREA_SIZE
;
22531 /* Initialize the GCC target structure. */
22532 #undef TARGET_ASM_ALIGNED_HI_OP
22533 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
22534 #undef TARGET_ASM_ALIGNED_SI_OP
22535 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
22536 #undef TARGET_ASM_ALIGNED_DI_OP
22537 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
22539 #undef TARGET_OPTION_OVERRIDE
22540 #define TARGET_OPTION_OVERRIDE mips_option_override
22542 #undef TARGET_LEGITIMIZE_ADDRESS
22543 #define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address
22545 #undef TARGET_ASM_FUNCTION_PROLOGUE
22546 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
22547 #undef TARGET_ASM_FUNCTION_EPILOGUE
22548 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
22549 #undef TARGET_ASM_SELECT_RTX_SECTION
22550 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
22551 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
22552 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
22554 #undef TARGET_SCHED_INIT
22555 #define TARGET_SCHED_INIT mips_sched_init
22556 #undef TARGET_SCHED_REORDER
22557 #define TARGET_SCHED_REORDER mips_sched_reorder
22558 #undef TARGET_SCHED_REORDER2
22559 #define TARGET_SCHED_REORDER2 mips_sched_reorder2
22560 #undef TARGET_SCHED_VARIABLE_ISSUE
22561 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
22562 #undef TARGET_SCHED_ADJUST_COST
22563 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
22564 #undef TARGET_SCHED_ISSUE_RATE
22565 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
22566 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
22567 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
22568 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
22569 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
22570 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
22571 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
22572 mips_multipass_dfa_lookahead
22573 #undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
22574 #define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
22575 mips_small_register_classes_for_mode_p
22577 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
22578 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
22580 #undef TARGET_INSERT_ATTRIBUTES
22581 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
22582 #undef TARGET_MERGE_DECL_ATTRIBUTES
22583 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
22584 #undef TARGET_CAN_INLINE_P
22585 #define TARGET_CAN_INLINE_P mips_can_inline_p
22586 #undef TARGET_SET_CURRENT_FUNCTION
22587 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
22589 #undef TARGET_VALID_POINTER_MODE
22590 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
22591 #undef TARGET_REGISTER_MOVE_COST
22592 #define TARGET_REGISTER_MOVE_COST mips_register_move_cost
22593 #undef TARGET_REGISTER_PRIORITY
22594 #define TARGET_REGISTER_PRIORITY mips_register_priority
22595 #undef TARGET_MEMORY_MOVE_COST
22596 #define TARGET_MEMORY_MOVE_COST mips_memory_move_cost
22597 #undef TARGET_RTX_COSTS
22598 #define TARGET_RTX_COSTS mips_rtx_costs
22599 #undef TARGET_ADDRESS_COST
22600 #define TARGET_ADDRESS_COST mips_address_cost
22602 #undef TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P
22603 #define TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P mips_no_speculation_in_delay_slots_p
22605 #undef TARGET_IN_SMALL_DATA_P
22606 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
22608 #undef TARGET_MACHINE_DEPENDENT_REORG
22609 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
22611 #undef TARGET_PREFERRED_RELOAD_CLASS
22612 #define TARGET_PREFERRED_RELOAD_CLASS mips_preferred_reload_class
22614 #undef TARGET_EXPAND_TO_RTL_HOOK
22615 #define TARGET_EXPAND_TO_RTL_HOOK mips_expand_to_rtl_hook
22616 #undef TARGET_ASM_FILE_START
22617 #define TARGET_ASM_FILE_START mips_file_start
22618 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
22619 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
22620 #undef TARGET_ASM_CODE_END
22621 #define TARGET_ASM_CODE_END mips_code_end
22623 #undef TARGET_INIT_LIBFUNCS
22624 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
22626 #undef TARGET_BUILD_BUILTIN_VA_LIST
22627 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
22628 #undef TARGET_EXPAND_BUILTIN_VA_START
22629 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
22630 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
22631 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
22633 #undef TARGET_PROMOTE_FUNCTION_MODE
22634 #define TARGET_PROMOTE_FUNCTION_MODE mips_promote_function_mode
22635 #undef TARGET_FUNCTION_VALUE
22636 #define TARGET_FUNCTION_VALUE mips_function_value
22637 #undef TARGET_LIBCALL_VALUE
22638 #define TARGET_LIBCALL_VALUE mips_libcall_value
22639 #undef TARGET_FUNCTION_VALUE_REGNO_P
22640 #define TARGET_FUNCTION_VALUE_REGNO_P mips_function_value_regno_p
22641 #undef TARGET_RETURN_IN_MEMORY
22642 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
22643 #undef TARGET_RETURN_IN_MSB
22644 #define TARGET_RETURN_IN_MSB mips_return_in_msb
22646 #undef TARGET_ASM_OUTPUT_MI_THUNK
22647 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
22648 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
22649 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
22651 #undef TARGET_PRINT_OPERAND
22652 #define TARGET_PRINT_OPERAND mips_print_operand
22653 #undef TARGET_PRINT_OPERAND_ADDRESS
22654 #define TARGET_PRINT_OPERAND_ADDRESS mips_print_operand_address
22655 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
22656 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mips_print_operand_punct_valid_p
22658 #undef TARGET_SETUP_INCOMING_VARARGS
22659 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
22660 #undef TARGET_STRICT_ARGUMENT_NAMING
22661 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
22662 #undef TARGET_MUST_PASS_IN_STACK
22663 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
22664 #undef TARGET_PASS_BY_REFERENCE
22665 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
22666 #undef TARGET_CALLEE_COPIES
22667 #define TARGET_CALLEE_COPIES mips_callee_copies
22668 #undef TARGET_ARG_PARTIAL_BYTES
22669 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
22670 #undef TARGET_FUNCTION_ARG
22671 #define TARGET_FUNCTION_ARG mips_function_arg
22672 #undef TARGET_FUNCTION_ARG_ADVANCE
22673 #define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
22674 #undef TARGET_FUNCTION_ARG_PADDING
22675 #define TARGET_FUNCTION_ARG_PADDING mips_function_arg_padding
22676 #undef TARGET_FUNCTION_ARG_BOUNDARY
22677 #define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
22678 #undef TARGET_GET_RAW_RESULT_MODE
22679 #define TARGET_GET_RAW_RESULT_MODE mips_get_reg_raw_mode
22680 #undef TARGET_GET_RAW_ARG_MODE
22681 #define TARGET_GET_RAW_ARG_MODE mips_get_reg_raw_mode
22683 #undef TARGET_MODE_REP_EXTENDED
22684 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
22686 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
22687 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
22688 mips_builtin_vectorized_function
22689 #undef TARGET_VECTOR_MODE_SUPPORTED_P
22690 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
22692 #undef TARGET_SCALAR_MODE_SUPPORTED_P
22693 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
22695 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
22696 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode
22697 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
22698 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
22699 mips_autovectorize_vector_sizes
22701 #undef TARGET_INIT_BUILTINS
22702 #define TARGET_INIT_BUILTINS mips_init_builtins
22703 #undef TARGET_BUILTIN_DECL
22704 #define TARGET_BUILTIN_DECL mips_builtin_decl
22705 #undef TARGET_EXPAND_BUILTIN
22706 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
22708 #undef TARGET_HAVE_TLS
22709 #define TARGET_HAVE_TLS HAVE_AS_TLS
22711 #undef TARGET_CANNOT_FORCE_CONST_MEM
22712 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
22714 #undef TARGET_LEGITIMATE_CONSTANT_P
22715 #define TARGET_LEGITIMATE_CONSTANT_P mips_legitimate_constant_p
22717 #undef TARGET_ENCODE_SECTION_INFO
22718 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
22720 #undef TARGET_ATTRIBUTE_TABLE
22721 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
22722 /* All our function attributes are related to how out-of-line copies should
22723 be compiled or called. They don't in themselves prevent inlining. */
22724 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
22725 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
22727 #undef TARGET_EXTRA_LIVE_ON_ENTRY
22728 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
22730 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
22731 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
22732 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
22733 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
22735 #undef TARGET_COMP_TYPE_ATTRIBUTES
22736 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
22738 #ifdef HAVE_AS_DTPRELWORD
22739 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
22740 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
22742 #undef TARGET_DWARF_REGISTER_SPAN
22743 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
22744 #undef TARGET_DWARF_FRAME_REG_MODE
22745 #define TARGET_DWARF_FRAME_REG_MODE mips_dwarf_frame_reg_mode
22747 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
22748 #define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
22750 #undef TARGET_LEGITIMATE_ADDRESS_P
22751 #define TARGET_LEGITIMATE_ADDRESS_P mips_legitimate_address_p
22753 #undef TARGET_FRAME_POINTER_REQUIRED
22754 #define TARGET_FRAME_POINTER_REQUIRED mips_frame_pointer_required
22756 #undef TARGET_CAN_ELIMINATE
22757 #define TARGET_CAN_ELIMINATE mips_can_eliminate
22759 #undef TARGET_CONDITIONAL_REGISTER_USAGE
22760 #define TARGET_CONDITIONAL_REGISTER_USAGE mips_conditional_register_usage
22762 #undef TARGET_TRAMPOLINE_INIT
22763 #define TARGET_TRAMPOLINE_INIT mips_trampoline_init
22765 #undef TARGET_ASM_OUTPUT_SOURCE_FILENAME
22766 #define TARGET_ASM_OUTPUT_SOURCE_FILENAME mips_output_filename
22768 #undef TARGET_SHIFT_TRUNCATION_MASK
22769 #define TARGET_SHIFT_TRUNCATION_MASK mips_shift_truncation_mask
22771 #undef TARGET_PREPARE_PCH_SAVE
22772 #define TARGET_PREPARE_PCH_SAVE mips_prepare_pch_save
22774 #undef TARGET_VECTORIZE_VEC_PERM_CONST
22775 #define TARGET_VECTORIZE_VEC_PERM_CONST mips_vectorize_vec_perm_const
22777 #undef TARGET_SCHED_REASSOCIATION_WIDTH
22778 #define TARGET_SCHED_REASSOCIATION_WIDTH mips_sched_reassociation_width
22780 #undef TARGET_CASE_VALUES_THRESHOLD
22781 #define TARGET_CASE_VALUES_THRESHOLD mips_case_values_threshold
22783 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
22784 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV mips_atomic_assign_expand_fenv
22786 #undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
22787 #define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
22789 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
22790 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
22791 mips_use_by_pieces_infrastructure_p
22793 #undef TARGET_SPILL_CLASS
22794 #define TARGET_SPILL_CLASS mips_spill_class
22795 #undef TARGET_LRA_P
22796 #define TARGET_LRA_P mips_lra_p
22797 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
22798 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS mips_ira_change_pseudo_allocno_class
22800 #undef TARGET_HARD_REGNO_SCRATCH_OK
22801 #define TARGET_HARD_REGNO_SCRATCH_OK mips_hard_regno_scratch_ok
22803 #undef TARGET_HARD_REGNO_NREGS
22804 #define TARGET_HARD_REGNO_NREGS mips_hard_regno_nregs
22805 #undef TARGET_HARD_REGNO_MODE_OK
22806 #define TARGET_HARD_REGNO_MODE_OK mips_hard_regno_mode_ok
22808 #undef TARGET_MODES_TIEABLE_P
22809 #define TARGET_MODES_TIEABLE_P mips_modes_tieable_p
22811 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
22812 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
22813 mips_hard_regno_call_part_clobbered
22815 /* The architecture reserves bit 0 for MIPS16 so use bit 1 for descriptors. */
22816 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
22817 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2
22819 #undef TARGET_SECONDARY_MEMORY_NEEDED
22820 #define TARGET_SECONDARY_MEMORY_NEEDED mips_secondary_memory_needed
22822 #undef TARGET_CAN_CHANGE_MODE_CLASS
22823 #define TARGET_CAN_CHANGE_MODE_CLASS mips_can_change_mode_class
22825 #undef TARGET_TRULY_NOOP_TRUNCATION
22826 #define TARGET_TRULY_NOOP_TRUNCATION mips_truly_noop_truncation
22828 #undef TARGET_CONSTANT_ALIGNMENT
22829 #define TARGET_CONSTANT_ALIGNMENT mips_constant_alignment
22831 #undef TARGET_ASAN_SHADOW_OFFSET
22832 #define TARGET_ASAN_SHADOW_OFFSET mips_asan_shadow_offset
22834 #undef TARGET_STARTING_FRAME_OFFSET
22835 #define TARGET_STARTING_FRAME_OFFSET mips_starting_frame_offset
22837 struct gcc_target targetm
= TARGET_INITIALIZER
;
22839 #include "gt-mips.h"